path stringlengths 11 71 | content stringlengths 75 124k |
|---|---|
Analysis\Analytic\IsolatedZeros.lean | /-
Copyright (c) 2022 Vincent Beffara. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Vincent Beffara
-/
import Mathlib.Analysis.Analytic.Constructions
import Mathlib.Analysis.Calculus.Dslope
import Mathlib.Analysis.Calculus.FDeriv.Analytic
import Mathlib.Analysis.Analytic.Uniqueness
/-!
# Principle of isolated zeros
This file proves the fact that the zeros of a non-constant analytic function of one variable are
isolated. It also introduces a little bit of API in the `HasFPowerSeriesAt` namespace that is
useful in this setup.
## Main results
* `AnalyticAt.eventually_eq_zero_or_eventually_ne_zero` is the main statement that if a function is
analytic at `zâ`, then either it is identically zero in a neighborhood of `zâ`, or it does not
vanish in a punctured neighborhood of `zâ`.
* `AnalyticOn.eqOn_of_preconnected_of_frequently_eq` is the identity theorem for analytic
functions: if a function `f` is analytic on a connected set `U` and is zero on a set with an
accumulation point in `U` then `f` is identically `0` on `U`.
-/
open scoped Classical
open Filter Function Nat FormalMultilinearSeries EMetric Set
open scoped Topology
variable {ð : Type*} [NontriviallyNormedField ð] {E : Type*} [NormedAddCommGroup E]
[NormedSpace ð E] {s : E} {p q : FormalMultilinearSeries ð ð E} {f g : ð â E} {n : â} {z zâ : ð}
namespace HasSum
variable {a : â â E}
theorem hasSum_at_zero (a : â â E) : HasSum (fun n => (0 : ð) ^ n ⢠a n) (a 0) := by
convert hasSum_single (α := E) 0 fun b h ⊠_ <;> simp [*]
theorem exists_hasSum_smul_of_apply_eq_zero (hs : HasSum (fun m => z ^ m ⢠a m) s)
(ha : â k < n, a k = 0) : â t : E, z ^ n ⢠t = s â§ HasSum (fun m => z ^ m ⢠a (m + n)) t := by
obtain rfl | hn := n.eq_zero_or_pos
· simpa
by_cases h : z = 0
· have : s = 0 := hs.unique (by simpa [ha 0 hn, h] using hasSum_at_zero a)
exact âša n, by simp [h, hn.ne', this], by simpa [h] using hasSum_at_zero fun m => a (m + n)â©
· refine âš(z ^ n)â»Â¹ ⢠s, by field_simp [smul_smul], ?_â©
have h1 : â i â Finset.range n, z ^ i ⢠a i = 0 :=
Finset.sum_eq_zero fun k hk => by simp [ha k (Finset.mem_range.mp hk)]
have h2 : HasSum (fun m => z ^ (m + n) ⢠a (m + n)) s := by
simpa [h1] using (hasSum_nat_add_iff' n).mpr hs
convert h2.const_smul (zâ»Â¹ ^ n) using 1
· field_simp [pow_add, smul_smul]
· simp only [inv_pow]
end HasSum
namespace HasFPowerSeriesAt
theorem has_fpower_series_dslope_fslope (hp : HasFPowerSeriesAt f p zâ) :
HasFPowerSeriesAt (dslope f zâ) p.fslope zâ := by
have hpd : deriv f zâ = p.coeff 1 := hp.deriv
have hp0 : p.coeff 0 = f zâ := hp.coeff_zero 1
simp only [hasFPowerSeriesAt_iff, apply_eq_pow_smul_coeff, coeff_fslope] at hp â¢
refine hp.mono fun x hx => ?_
by_cases h : x = 0
· convert hasSum_single (α := E) 0 _ <;> intros <;> simp [*]
· have hxx : â n : â, xâ»Â¹ * x ^ (n + 1) = x ^ n := fun n => by field_simp [h, _root_.pow_succ]
suffices HasSum (fun n => xâ»Â¹ ⢠x ^ (n + 1) ⢠p.coeff (n + 1)) (xâ»Â¹ ⢠(f (zâ + x) - f zâ)) by
simpa [dslope, slope, h, smul_smul, hxx] using this
simpa [hp0] using ((hasSum_nat_add_iff' 1).mpr hx).const_smul xâ»Â¹
theorem has_fpower_series_iterate_dslope_fslope (n : â) (hp : HasFPowerSeriesAt f p zâ) :
HasFPowerSeriesAt ((swap dslope zâ)^[n] f) (fslope^[n] p) zâ := by
induction' n with n ih generalizing f p
· exact hp
· simpa using ih (has_fpower_series_dslope_fslope hp)
theorem iterate_dslope_fslope_ne_zero (hp : HasFPowerSeriesAt f p zâ) (h : p â 0) :
(swap dslope zâ)^[p.order] f zâ â 0 := by
rw [â coeff_zero (has_fpower_series_iterate_dslope_fslope p.order hp) 1]
simpa [coeff_eq_zero] using apply_order_ne_zero h
theorem eq_pow_order_mul_iterate_dslope (hp : HasFPowerSeriesAt f p zâ) :
âá¶ z in ð zâ, f z = (z - zâ) ^ p.order ⢠(swap dslope zâ)^[p.order] f z := by
have hq := hasFPowerSeriesAt_iff'.mp (has_fpower_series_iterate_dslope_fslope p.order hp)
filter_upwards [hq, hasFPowerSeriesAt_iff'.mp hp] with x hx1 hx2
have : â k < p.order, p.coeff k = 0 := fun k hk => by
simpa [coeff_eq_zero] using apply_eq_zero_of_lt_order hk
obtain âšs, hs1, hs2â© := HasSum.exists_hasSum_smul_of_apply_eq_zero hx2 this
convert hs1.symm
simp only [coeff_iterate_fslope] at hx1
exact hx1.unique hs2
theorem locally_ne_zero (hp : HasFPowerSeriesAt f p zâ) (h : p â 0) : âá¶ z in ð[â ] zâ, f z â 0 := by
rw [eventually_nhdsWithin_iff]
have h2 := (has_fpower_series_iterate_dslope_fslope p.order hp).continuousAt
have h3 := h2.eventually_ne (iterate_dslope_fslope_ne_zero hp h)
filter_upwards [eq_pow_order_mul_iterate_dslope hp, h3] with z e1 e2 e3
simpa [e1, e2, e3] using pow_ne_zero p.order (sub_ne_zero.mpr e3)
theorem locally_zero_iff (hp : HasFPowerSeriesAt f p zâ) : (âá¶ z in ð zâ, f z = 0) â p = 0 :=
âšfun hf => hp.eq_zero_of_eventually hf, fun h => eventually_eq_zero (by rwa [h] at hp)â©
end HasFPowerSeriesAt
namespace AnalyticAt
/-- The *principle of isolated zeros* for an analytic function, local version: if a function is
analytic at `zâ`, then either it is identically zero in a neighborhood of `zâ`, or it does not
vanish in a punctured neighborhood of `zâ`. -/
theorem eventually_eq_zero_or_eventually_ne_zero (hf : AnalyticAt ð f zâ) :
(âá¶ z in ð zâ, f z = 0) âš âá¶ z in ð[â ] zâ, f z â 0 := by
rcases hf with âšp, hpâ©
by_cases h : p = 0
· exact Or.inl (HasFPowerSeriesAt.eventually_eq_zero (by rwa [h] at hp))
· exact Or.inr (hp.locally_ne_zero h)
theorem eventually_eq_or_eventually_ne (hf : AnalyticAt ð f zâ) (hg : AnalyticAt ð g zâ) :
(âá¶ z in ð zâ, f z = g z) âš âá¶ z in ð[â ] zâ, f z â g z := by
simpa [sub_eq_zero] using (hf.sub hg).eventually_eq_zero_or_eventually_ne_zero
theorem frequently_zero_iff_eventually_zero {f : ð â E} {w : ð} (hf : AnalyticAt ð f w) :
(âá¶ z in ð[â ] w, f z = 0) â âá¶ z in ð w, f z = 0 :=
âšhf.eventually_eq_zero_or_eventually_ne_zero.resolve_right, fun h =>
(h.filter_mono nhdsWithin_le_nhds).frequentlyâ©
theorem frequently_eq_iff_eventually_eq (hf : AnalyticAt ð f zâ) (hg : AnalyticAt ð g zâ) :
(âá¶ z in ð[â ] zâ, f z = g z) â âá¶ z in ð zâ, f z = g z := by
simpa [sub_eq_zero] using frequently_zero_iff_eventually_zero (hf.sub hg)
/-- For a function `f` on `ð`, and `zâ â ð`, there exists at most one `n` such that on a punctured
neighbourhood of `zâ` we have `f z = (z - zâ) ^ n ⢠g z`, with `g` analytic and nonvanishing at
`zâ`. We formulate this with `n : â€`, and deduce the case `n : â` later, for applications to
meromorphic functions. -/
lemma unique_eventuallyEq_zpow_smul_nonzero {m n : â€}
(hm : â g, AnalyticAt ð g zâ â§ g zâ â 0 â§ âá¶ z in ð[â ] zâ, f z = (z - zâ) ^ m ⢠g z)
(hn : â g, AnalyticAt ð g zâ â§ g zâ â 0 â§ âá¶ z in ð[â ] zâ, f z = (z - zâ) ^ n ⢠g z) :
m = n := by
wlog h_le : n †m generalizing m n
· exact ((this hn hm) (not_le.mp h_le).le).symm
let âšg, hg_an, _, hg_eqâ© := hm
let âšj, hj_an, hj_ne, hj_eqâ© := hn
contrapose! hj_ne
have : âá¶ z in ð[â ] zâ, j z = (z - zâ) ^ (m - n) ⢠g z := by
apply Filter.Eventually.frequently
rw [eventually_nhdsWithin_iff] at hg_eq hj_eq â¢
filter_upwards [hg_eq, hj_eq] with z hfz hfz' hz
rw [â add_sub_cancel_left n m, add_sub_assoc, zpow_addâ <| sub_ne_zero.mpr hz, mul_smul,
hfz' hz, smul_right_inj <| zpow_ne_zero _ <| sub_ne_zero.mpr hz] at hfz
exact hfz hz
rw [frequently_eq_iff_eventually_eq hj_an] at this
· rw [EventuallyEq.eq_of_nhds this, sub_self, zero_zpow _ (sub_ne_zero.mpr hj_ne), zero_smul]
conv => enter [2, z, 1]; rw [â Int.toNat_sub_of_le h_le, zpow_natCast]
exact (((analyticAt_id _ _).sub analyticAt_const).pow _).smul hg_an
/-- For a function `f` on `ð`, and `zâ â ð`, there exists at most one `n` such that on a
neighbourhood of `zâ` we have `f z = (z - zâ) ^ n ⢠g z`, with `g` analytic and nonvanishing at
`zâ`. -/
lemma unique_eventuallyEq_pow_smul_nonzero {m n : â}
(hm : â g, AnalyticAt ð g zâ â§ g zâ â 0 â§ âá¶ z in ð zâ, f z = (z - zâ) ^ m ⢠g z)
(hn : â g, AnalyticAt ð g zâ â§ g zâ â 0 â§ âá¶ z in ð zâ, f z = (z - zâ) ^ n ⢠g z) :
m = n := by
simp_rw [â zpow_natCast] at hm hn
exact Int.ofNat_inj.mp <| unique_eventuallyEq_zpow_smul_nonzero
(let âšg, hâ, hâ, hââ© := hm; âšg, hâ, hâ, hâ.filter_mono nhdsWithin_le_nhdsâ©)
(let âšg, hâ, hâ, hââ© := hn; âšg, hâ, hâ, hâ.filter_mono nhdsWithin_le_nhdsâ©)
/-- If `f` is analytic at `zâ`, then exactly one of the following two possibilities occurs: either
`f` vanishes identically near `zâ`, or locally around `zâ` it has the form `z ⊠(z - zâ) ^ n ⢠g z`
for some `n` and some `g` which is analytic and non-vanishing at `zâ`. -/
theorem exists_eventuallyEq_pow_smul_nonzero_iff (hf : AnalyticAt ð f zâ) :
(â (n : â), â (g : ð â E), AnalyticAt ð g zâ â§ g zâ â 0 â§
âá¶ z in ð zâ, f z = (z - zâ) ^ n ⢠g z) â (¬âá¶ z in ð zâ, f z = 0) := by
constructor
· rintro âšn, g, hg_an, hg_ne, hg_eqâ©
contrapose! hg_ne
apply EventuallyEq.eq_of_nhds
rw [EventuallyEq, â AnalyticAt.frequently_eq_iff_eventually_eq hg_an analyticAt_const]
refine (eventually_nhdsWithin_iff.mpr ?_).frequently
filter_upwards [hg_eq, hg_ne] with z hf_eq hf0 hz
rwa [hf0, eq_comm, smul_eq_zero_iff_right] at hf_eq
exact pow_ne_zero _ (sub_ne_zero.mpr hz)
· intro hf_ne
rcases hf with âšp, hpâ©
exact âšp.order, _, âš_, hp.has_fpower_series_iterate_dslope_fslope p.orderâ©,
hp.iterate_dslope_fslope_ne_zero (hf_ne.imp hp.locally_zero_iff.mpr),
hp.eq_pow_order_mul_iterate_dslopeâ©
/-- The order of vanishing of `f` at `zâ`, as an element of `ââ`.
This is defined to be `â` if `f` is identically 0 on a neighbourhood of `zâ`, and otherwise the
unique `n` such that `f z = (z - zâ) ^ n ⢠g z` with `g` analytic and non-vanishing at `zâ`. See
`AnalyticAt.order_eq_top_iff` and `AnalyticAt.order_eq_nat_iff` for these equivalences. -/
noncomputable def order (hf : AnalyticAt ð f zâ) : ENat :=
if h : âá¶ z in ð zâ, f z = 0 then â€
else â(hf.exists_eventuallyEq_pow_smul_nonzero_iff.mpr h).choose
lemma order_eq_top_iff (hf : AnalyticAt ð f zâ) : hf.order = †â âá¶ z in ð zâ, f z = 0 := by
unfold order
split_ifs with h
· rwa [eq_self, true_iff]
· simpa only [ne_eq, ENat.coe_ne_top, false_iff] using h
lemma order_eq_nat_iff (hf : AnalyticAt ð f zâ) (n : â) : hf.order = ân â
â (g : ð â E), AnalyticAt ð g zâ â§ g zâ â 0 â§ âá¶ z in ð zâ, f z = (z - zâ) ^ n ⢠g z := by
unfold order
split_ifs with h
· simp only [ENat.top_ne_coe, false_iff]
contrapose! h
rw [â hf.exists_eventuallyEq_pow_smul_nonzero_iff]
exact âšn, hâ©
· rw [â hf.exists_eventuallyEq_pow_smul_nonzero_iff] at h
refine âšfun hn ⊠(WithTop.coe_inj.mp hn : h.choose = n) âž h.choose_spec, fun h' ⊠?_â©
rw [unique_eventuallyEq_pow_smul_nonzero h.choose_spec h']
end AnalyticAt
namespace AnalyticOn
variable {U : Set ð}
/-- The *principle of isolated zeros* for an analytic function, global version: if a function is
analytic on a connected set `U` and vanishes in arbitrary neighborhoods of a point `zâ â U`, then
it is identically zero in `U`.
For higher-dimensional versions requiring that the function vanishes in a neighborhood of `zâ`,
see `AnalyticOn.eqOn_zero_of_preconnected_of_eventuallyEq_zero`. -/
theorem eqOn_zero_of_preconnected_of_frequently_eq_zero (hf : AnalyticOn ð f U)
(hU : IsPreconnected U) (hâ : zâ â U) (hfw : âá¶ z in ð[â ] zâ, f z = 0) : EqOn f 0 U :=
hf.eqOn_zero_of_preconnected_of_eventuallyEq_zero hU hâ
((hf zâ hâ).frequently_zero_iff_eventually_zero.1 hfw)
theorem eqOn_zero_of_preconnected_of_mem_closure (hf : AnalyticOn ð f U) (hU : IsPreconnected U)
(hâ : zâ â U) (hfzâ : zâ â closure ({z | f z = 0} \ {zâ})) : EqOn f 0 U :=
hf.eqOn_zero_of_preconnected_of_frequently_eq_zero hU hâ
(mem_closure_ne_iff_frequently_within.mp hfzâ)
/-- The *identity principle* for analytic functions, global version: if two functions are
analytic on a connected set `U` and coincide at points which accumulate to a point `zâ â U`, then
they coincide globally in `U`.
For higher-dimensional versions requiring that the functions coincide in a neighborhood of `zâ`,
see `AnalyticOn.eqOn_of_preconnected_of_eventuallyEq`. -/
theorem eqOn_of_preconnected_of_frequently_eq (hf : AnalyticOn ð f U) (hg : AnalyticOn ð g U)
(hU : IsPreconnected U) (hâ : zâ â U) (hfg : âá¶ z in ð[â ] zâ, f z = g z) : EqOn f g U := by
have hfg' : âá¶ z in ð[â ] zâ, (f - g) z = 0 :=
hfg.mono fun z h => by rw [Pi.sub_apply, h, sub_self]
simpa [sub_eq_zero] using fun z hz =>
(hf.sub hg).eqOn_zero_of_preconnected_of_frequently_eq_zero hU hâ hfg' hz
theorem eqOn_of_preconnected_of_mem_closure (hf : AnalyticOn ð f U) (hg : AnalyticOn ð g U)
(hU : IsPreconnected U) (hâ : zâ â U) (hfg : zâ â closure ({z | f z = g z} \ {zâ})) :
EqOn f g U :=
hf.eqOn_of_preconnected_of_frequently_eq hg hU hâ (mem_closure_ne_iff_frequently_within.mp hfg)
/-- The *identity principle* for analytic functions, global version: if two functions on a normed
field `ð` are analytic everywhere and coincide at points which accumulate to a point `zâ`, then
they coincide globally.
For higher-dimensional versions requiring that the functions coincide in a neighborhood of `zâ`,
see `AnalyticOn.eq_of_eventuallyEq`. -/
theorem eq_of_frequently_eq [ConnectedSpace ð] (hf : AnalyticOn ð f univ) (hg : AnalyticOn ð g univ)
(hfg : âá¶ z in ð[â ] zâ, f z = g z) : f = g :=
funext fun x =>
eqOn_of_preconnected_of_frequently_eq hf hg isPreconnected_univ (mem_univ zâ) hfg (mem_univ x)
end AnalyticOn
|
Analysis\Analytic\Linear.lean | /-
Copyright (c) 2021 Yury G. Kudryashov. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yury G. Kudryashov
-/
import Mathlib.Analysis.Analytic.Basic
/-!
# Linear functions are analytic
In this file we prove that a `ContinuousLinearMap` defines an analytic function with
the formal power series `f x = f a + f (x - a)`. We also prove similar results for multilinear maps.
-/
variable {ð : Type*} [NontriviallyNormedField ð] {E : Type*} [NormedAddCommGroup E]
[NormedSpace ð E] {F : Type*} [NormedAddCommGroup F] [NormedSpace ð F] {G : Type*}
[NormedAddCommGroup G] [NormedSpace ð G]
open scoped Topology NNReal ENNReal
open Set Filter Asymptotics
noncomputable section
namespace ContinuousLinearMap
@[simp]
theorem fpowerSeries_radius (f : E âL[ð] F) (x : E) : (f.fpowerSeries x).radius = â :=
(f.fpowerSeries x).radius_eq_top_of_forall_image_add_eq_zero 2 fun _ => rfl
protected theorem hasFPowerSeriesOnBall (f : E âL[ð] F) (x : E) :
HasFPowerSeriesOnBall f (f.fpowerSeries x) x â :=
{ r_le := by simp
r_pos := ENNReal.coe_lt_top
hasSum := fun _ => (hasSum_nat_add_iff' 2).1 <| by
simp [Finset.sum_range_succ, â sub_sub, hasSum_zero, fpowerSeries] }
protected theorem hasFPowerSeriesAt (f : E âL[ð] F) (x : E) :
HasFPowerSeriesAt f (f.fpowerSeries x) x :=
âšâ, f.hasFPowerSeriesOnBall xâ©
protected theorem analyticAt (f : E âL[ð] F) (x : E) : AnalyticAt ð f x :=
(f.hasFPowerSeriesAt x).analyticAt
/-- Reinterpret a bilinear map `f : E âL[ð] F âL[ð] G` as a multilinear map
`(E Ã F) [Ã2]âL[ð] G`. This multilinear map is the second term in the formal
multilinear series expansion of `uncurry f`. It is given by
`f.uncurryBilinear ![(x, y), (x', y')] = f x y'`. -/
def uncurryBilinear (f : E âL[ð] F âL[ð] G) : E Ã F[Ã2]âL[ð] G :=
@ContinuousLinearMap.uncurryLeft ð 1 (fun _ => E Ã F) G _ _ _ _ _ <|
(â(continuousMultilinearCurryFin1 ð (E Ã F) G).symm : (E Ã F âL[ð] G) âL[ð] _).comp <|
f.bilinearComp (fst _ _ _) (snd _ _ _)
@[simp]
theorem uncurryBilinear_apply (f : E âL[ð] F âL[ð] G) (m : Fin 2 â E Ã F) :
f.uncurryBilinear m = f (m 0).1 (m 1).2 :=
rfl
/-- Formal multilinear series expansion of a bilinear function `f : E âL[ð] F âL[ð] G`. -/
def fpowerSeriesBilinear (f : E âL[ð] F âL[ð] G) (x : E Ã F) : FormalMultilinearSeries ð (E Ã F) G
| 0 => ContinuousMultilinearMap.curry0 ð _ (f x.1 x.2)
| 1 => (continuousMultilinearCurryFin1 ð (E Ã F) G).symm (f.derivâ x)
| 2 => f.uncurryBilinear
| _ => 0
theorem fpowerSeriesBilinear_apply_zero (f : E âL[ð] F âL[ð] G) (x : E Ã F) :
fpowerSeriesBilinear f x 0 = ContinuousMultilinearMap.curry0 ð _ (f x.1 x.2) :=
rfl
theorem fpowerSeriesBilinear_apply_one (f : E âL[ð] F âL[ð] G) (x : E Ã F) :
fpowerSeriesBilinear f x 1 = (continuousMultilinearCurryFin1 ð (E Ã F) G).symm (f.derivâ x) :=
rfl
theorem fpowerSeriesBilinear_apply_two (f : E âL[ð] F âL[ð] G) (x : E Ã F) :
fpowerSeriesBilinear f x 2 = f.uncurryBilinear :=
rfl
theorem fpowerSeriesBilinear_apply_add_three (f : E âL[ð] F âL[ð] G) (x : E Ã F) (n) :
fpowerSeriesBilinear f x (n + 3) = 0 :=
rfl
attribute
[eqns
fpowerSeriesBilinear_apply_zero
fpowerSeriesBilinear_apply_one
fpowerSeriesBilinear_apply_two
fpowerSeriesBilinear_apply_add_three] fpowerSeriesBilinear
attribute [simp] fpowerSeriesBilinear
@[simp]
theorem fpowerSeriesBilinear_radius (f : E âL[ð] F âL[ð] G) (x : E Ã F) :
(f.fpowerSeriesBilinear x).radius = â :=
(f.fpowerSeriesBilinear x).radius_eq_top_of_forall_image_add_eq_zero 3 fun _ => rfl
protected theorem hasFPowerSeriesOnBall_bilinear (f : E âL[ð] F âL[ð] G) (x : E Ã F) :
HasFPowerSeriesOnBall (fun x : E Ã F => f x.1 x.2) (f.fpowerSeriesBilinear x) x â :=
{ r_le := by simp
r_pos := ENNReal.coe_lt_top
hasSum := fun _ =>
(hasSum_nat_add_iff' 3).1 <| by
simp only [Finset.sum_range_succ, Finset.sum_range_one, Prod.fst_add, Prod.snd_add,
f.map_add_add]
simp [fpowerSeriesBilinear, hasSum_zero] }
protected theorem hasFPowerSeriesAt_bilinear (f : E âL[ð] F âL[ð] G) (x : E Ã F) :
HasFPowerSeriesAt (fun x : E Ã F => f x.1 x.2) (f.fpowerSeriesBilinear x) x :=
âšâ, f.hasFPowerSeriesOnBall_bilinear xâ©
protected theorem analyticAt_bilinear (f : E âL[ð] F âL[ð] G) (x : E Ã F) :
AnalyticAt ð (fun x : E Ã F => f x.1 x.2) x :=
(f.hasFPowerSeriesAt_bilinear x).analyticAt
end ContinuousLinearMap
variable (ð)
lemma analyticAt_id (z : E) : AnalyticAt ð (id : E â E) z :=
(ContinuousLinearMap.id ð E).analyticAt z
/-- `id` is entire -/
theorem analyticOn_id {s : Set E} : AnalyticOn ð (fun x : E ⊠x) s :=
fun _ _ ⊠analyticAt_id _ _
/-- `fst` is analytic -/
theorem analyticAt_fst {p : E à F} : AnalyticAt ð (fun p : E à F ⊠p.fst) p :=
(ContinuousLinearMap.fst ð E F).analyticAt p
/-- `snd` is analytic -/
theorem analyticAt_snd {p : E à F} : AnalyticAt ð (fun p : E à F ⊠p.snd) p :=
(ContinuousLinearMap.snd ð E F).analyticAt p
/-- `fst` is entire -/
theorem analyticOn_fst {s : Set (E à F)} : AnalyticOn ð (fun p : E à F ⊠p.fst) s :=
fun _ _ ⊠analyticAt_fst _
/-- `snd` is entire -/
theorem analyticOn_snd {s : Set (E à F)} : AnalyticOn ð (fun p : E à F ⊠p.snd) s :=
fun _ _ ⊠analyticAt_snd _
|
Analysis\Analytic\Meromorphic.lean | /-
Copyright (c) 2024 David Loeffler. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: David Loeffler
-/
import Mathlib.Analysis.Analytic.IsolatedZeros
import Mathlib.Algebra.Order.AddGroupWithTop
/-!
# Meromorphic functions
Main statements:
* `MeromorphicAt`: definition of meromorphy at a point
* `MeromorphicAt.iff_eventuallyEq_zpow_smul_analyticAt`: `f` is meromorphic at `zâ` iff we have
`f z = (z - zâ) ^ n ⢠g z` on a punctured nhd of `zâ`, for some `n : â€` and `g` analytic at zâ.
* `MeromorphicAt.order`: order of vanishing at `zâ`, as an element of `†⪠{â}`
-/
open Filter
open scoped Topology
variable {ð : Type*} [NontriviallyNormedField ð]
{E : Type*} [NormedAddCommGroup E] [NormedSpace ð E]
/-- Meromorphy of `f` at `x` (more precisely, on a punctured neighbourhood of `x`; the value at
`x` itself is irrelevant). -/
def MeromorphicAt (f : ð â E) (x : ð) :=
â (n : â), AnalyticAt ð (fun z ⊠(z - x) ^ n ⢠f z) x
lemma AnalyticAt.meromorphicAt {f : ð â E} {x : ð} (hf : AnalyticAt ð f x) :
MeromorphicAt f x :=
âš0, by simpa only [pow_zero, one_smul]â©
namespace MeromorphicAt
lemma id (x : ð) : MeromorphicAt id x := (analyticAt_id ð x).meromorphicAt
lemma const (e : E) (x : ð) : MeromorphicAt (fun _ ⊠e) x :=
analyticAt_const.meromorphicAt
lemma add {f g : ð â E} {x : ð} (hf : MeromorphicAt f x) (hg : MeromorphicAt g x) :
MeromorphicAt (f + g) x := by
rcases hf with âšm, hfâ©
rcases hg with âšn, hgâ©
refine âšmax m n, ?_â©
have : (fun z ⊠(z - x) ^ max m n ⢠(f + g) z) = fun z ⊠(z - x) ^ (max m n - m) â¢
((z - x) ^ m ⢠f z) + (z - x) ^ (max m n - n) ⢠((z - x) ^ n ⢠g z) := by
simp_rw [â mul_smul, â pow_add, Nat.sub_add_cancel (Nat.le_max_left _ _),
Nat.sub_add_cancel (Nat.le_max_right _ _), Pi.add_apply, smul_add]
rw [this]
exact ((((analyticAt_id ð x).sub analyticAt_const).pow _).smul hf).add
((((analyticAt_id ð x).sub analyticAt_const).pow _).smul hg)
lemma smul {f : ð â ð} {g : ð â E} {x : ð} (hf : MeromorphicAt f x) (hg : MeromorphicAt g x) :
MeromorphicAt (f ⢠g) x := by
rcases hf with âšm, hfâ©
rcases hg with âšn, hgâ©
refine âšm + n, ?_â©
convert hf.smul hg using 2 with z
rw [smul_eq_mul, â mul_smul, mul_assoc, mul_comm (f z), â mul_assoc, pow_add,
â smul_eq_mul (a' := f z), smul_assoc, Pi.smul_apply']
lemma mul {f g : ð â ð} {x : ð} (hf : MeromorphicAt f x) (hg : MeromorphicAt g x) :
MeromorphicAt (f * g) x :=
hf.smul hg
lemma neg {f : ð â E} {x : ð} (hf : MeromorphicAt f x) : MeromorphicAt (-f) x := by
convert (MeromorphicAt.const (-1 : ð) x).smul hf using 1
ext1 z
simp only [Pi.neg_apply, Pi.smul_apply', neg_smul, one_smul]
@[simp]
lemma neg_iff {f : ð â E} {x : ð} :
MeromorphicAt (-f) x â MeromorphicAt f x :=
âšfun h ⊠by simpa only [neg_neg] using h.neg, MeromorphicAt.negâ©
lemma sub {f g : ð â E} {x : ð} (hf : MeromorphicAt f x) (hg : MeromorphicAt g x) :
MeromorphicAt (f - g) x := by
convert hf.add hg.neg using 1
ext1 z
simp_rw [Pi.sub_apply, Pi.add_apply, Pi.neg_apply, sub_eq_add_neg]
/-- With our definitions, `MeromorphicAt f x` depends only on the values of `f` on a punctured
neighbourhood of `x` (not on `f x`) -/
lemma congr {f g : ð â E} {x : ð} (hf : MeromorphicAt f x) (hfg : f =á¶ [ð[â ] x] g) :
MeromorphicAt g x := by
rcases hf with âšm, hfâ©
refine âšm + 1, ?_â©
have : AnalyticAt ð (fun z ⊠z - x) x := (analyticAt_id ð x).sub analyticAt_const
refine (this.smul hf).congr ?_
rw [eventuallyEq_nhdsWithin_iff] at hfg
filter_upwards [hfg] with z hz
rcases eq_or_ne z x with rfl | hn
· simp
· rw [hz (Set.mem_compl_singleton_iff.mp hn), pow_succ', mul_smul]
lemma inv {f : ð â ð} {x : ð} (hf : MeromorphicAt f x) : MeromorphicAt fâ»Â¹ x := by
rcases hf with âšm, hfâ©
by_cases h_eq : (fun z ⊠(z - x) ^ m ⢠f z) =á¶ [ð x] 0
· -- silly case: f locally 0 near x
refine (MeromorphicAt.const 0 x).congr ?_
rw [eventuallyEq_nhdsWithin_iff]
filter_upwards [h_eq] with z hfz hz
rw [Pi.inv_apply, (smul_eq_zero_iff_right <| pow_ne_zero _ (sub_ne_zero.mpr hz)).mp hfz,
inv_zero]
· -- interesting case: use local formula for `f`
obtain âšn, g, hg_an, hg_ne, hg_eqâ© := hf.exists_eventuallyEq_pow_smul_nonzero_iff.mpr h_eq
have : AnalyticAt ð (fun z ⊠(z - x) ^ (m + 1)) x :=
((analyticAt_id ð x).sub analyticAt_const).pow _
-- use `m + 1` rather than `m` to damp out any silly issues with the value at `z = x`
refine âšn + 1, (this.smul <| hg_an.inv hg_ne).congr ?_â©
filter_upwards [hg_eq, hg_an.continuousAt.eventually_ne hg_ne] with z hfg hg_ne'
rcases eq_or_ne z x with rfl | hz_ne
· simp only [sub_self, pow_succ, mul_zero, zero_smul]
· simp_rw [smul_eq_mul] at hfg â¢
have aux1 : f z â 0 := by
have : (z - x) ^ n * g z â 0 := mul_ne_zero (pow_ne_zero _ (sub_ne_zero.mpr hz_ne)) hg_ne'
rw [â hfg, mul_ne_zero_iff] at this
exact this.2
field_simp [sub_ne_zero.mpr hz_ne]
rw [pow_succ', mul_assoc, hfg]
ring
@[simp]
lemma inv_iff {f : ð â ð} {x : ð} :
MeromorphicAt fâ»Â¹ x â MeromorphicAt f x :=
âšfun h ⊠by simpa only [inv_inv] using h.inv, MeromorphicAt.invâ©
lemma div {f g : ð â ð} {x : ð} (hf : MeromorphicAt f x) (hg : MeromorphicAt g x) :
MeromorphicAt (f / g) x :=
(div_eq_mul_inv f g).symm âž (hf.mul hg.inv)
lemma pow {f : ð â ð} {x : ð} (hf : MeromorphicAt f x) (n : â) : MeromorphicAt (f ^ n) x := by
induction' n with m hm
· simpa only [Nat.zero_eq, pow_zero] using MeromorphicAt.const 1 x
· simpa only [pow_succ] using hm.mul hf
lemma zpow {f : ð â ð} {x : ð} (hf : MeromorphicAt f x) (n : â€) : MeromorphicAt (f ^ n) x := by
induction' n with m m
· simpa only [Int.ofNat_eq_coe, zpow_natCast] using hf.pow m
· simpa only [zpow_negSucc, inv_iff] using hf.pow (m + 1)
/-- The order of vanishing of a meromorphic function, as an element of `†⪠â` (to include the
case of functions identically 0 near `x`). -/
noncomputable def order {f : ð â E} {x : ð} (hf : MeromorphicAt f x) : WithTop †:=
(hf.choose_spec.order.map (â· : â â â€)) - hf.choose
open WithTop.LinearOrderedAddCommGroup
lemma order_eq_top_iff {f : ð â E} {x : ð} (hf : MeromorphicAt f x) :
hf.order = †â âá¶ z in ð[â ] x, f z = 0 := by
unfold order
by_cases h : hf.choose_spec.order = â€
· rw [h, WithTop.map_top, â WithTop.coe_natCast,
top_sub, eq_self, true_iff, eventually_nhdsWithin_iff]
rw [AnalyticAt.order_eq_top_iff] at h
filter_upwards [h] with z hf hz
rwa [smul_eq_zero_iff_right <| pow_ne_zero _ (sub_ne_zero.mpr hz)] at hf
· obtain âšm, hmâ© := WithTop.ne_top_iff_exists.mp h
rw [â hm, WithTop.map_coe, sub_eq_top_iff, eq_false_intro WithTop.coe_ne_top, false_or]
simp only [WithTop.natCast_ne_top, false_iff]
contrapose! h
rw [AnalyticAt.order_eq_top_iff]
rw [â hf.choose_spec.frequently_eq_iff_eventually_eq analyticAt_const]
apply Eventually.frequently
filter_upwards [h] with z hfz
rw [hfz, smul_zero]
lemma order_eq_int_iff {f : ð â E} {x : ð} (hf : MeromorphicAt f x) (n : â€) : hf.order = n â
â g : ð â E, AnalyticAt ð g x â§ g x â 0 â§ âá¶ z in ð[â ] x, f z = (z - x) ^ n ⢠g z := by
unfold order
by_cases h : hf.choose_spec.order = â€
· rw [h, WithTop.map_top, â WithTop.coe_natCast, top_sub,
eq_false_intro WithTop.top_ne_coe, false_iff]
rw [AnalyticAt.order_eq_top_iff] at h
refine fun âšg, hg_an, hg_ne, hg_eq⩠⊠hg_ne ?_
apply EventuallyEq.eq_of_nhds
rw [EventuallyEq, â AnalyticAt.frequently_eq_iff_eventually_eq hg_an analyticAt_const]
apply Eventually.frequently
rw [eventually_nhdsWithin_iff] at hg_eq â¢
filter_upwards [h, hg_eq] with z hfz hfz_eq hz
rwa [hfz_eq hz, â mul_smul, smul_eq_zero_iff_right] at hfz
exact mul_ne_zero (pow_ne_zero _ (sub_ne_zero.mpr hz)) (zpow_ne_zero _ (sub_ne_zero.mpr hz))
· obtain âšm, hâ© := WithTop.ne_top_iff_exists.mp h
rw [â h, WithTop.map_coe, â WithTop.coe_natCast, â coe_sub, WithTop.coe_inj]
obtain âšg, hg_an, hg_ne, hg_eqâ© := (AnalyticAt.order_eq_nat_iff _ _).mp h.symm
replace hg_eq : âá¶ (z : ð) in ð[â ] x, f z = (z - x) ^ (âm - âhf.choose : â€) ⢠g z := by
rw [eventually_nhdsWithin_iff]
filter_upwards [hg_eq] with z hg_eq hz
rwa [â smul_right_inj <| zpow_ne_zero _ (sub_ne_zero.mpr hz), â mul_smul,
â zpow_addâ (sub_ne_zero.mpr hz), â add_sub_assoc, add_sub_cancel_left, zpow_natCast,
zpow_natCast]
exact âšfun h ⊠âšg, hg_an, hg_ne, h âž hg_eqâ©,
AnalyticAt.unique_eventuallyEq_zpow_smul_nonzero âšg, hg_an, hg_ne, hg_eqâ©â©
/-- Compatibility of notions of `order` for analytic and meromorphic functions. -/
lemma _root_.AnalyticAt.meromorphicAt_order {f : ð â E} {x : ð} (hf : AnalyticAt ð f x) :
hf.meromorphicAt.order = hf.order.map (â) := by
rcases eq_or_ne hf.order †with ho | ho
· rw [ho, WithTop.map_top, order_eq_top_iff]
exact (hf.order_eq_top_iff.mp ho).filter_mono nhdsWithin_le_nhds
· obtain âšn, hnâ© := WithTop.ne_top_iff_exists.mp ho
simp_rw [â hn, WithTop.map_coe, order_eq_int_iff, zpow_natCast]
rcases (hf.order_eq_nat_iff _).mp hn.symm with âšg, h1, h2, h3â©
exact âšg, h1, h2, h3.filter_mono nhdsWithin_le_nhdsâ©
lemma iff_eventuallyEq_zpow_smul_analyticAt {f : ð â E} {x : ð} : MeromorphicAt f x â
â (n : â€) (g : ð â E), AnalyticAt ð g x â§ âá¶ z in ð[â ] x, f z = (z - x) ^ n ⢠g z := by
refine âšfun âšn, hn⩠⊠âš-n, _, âšhn, eventually_nhdsWithin_iff.mpr ?_â©â©, ?_â©
· filter_upwards with z hz
rw [â mul_smul, â zpow_natCast, â zpow_addâ (sub_ne_zero.mpr hz), add_left_neg,
zpow_zero, one_smul]
· refine fun âšn, g, hg_an, hg_eq⩠⊠MeromorphicAt.congr ?_ (EventuallyEq.symm hg_eq)
exact (((MeromorphicAt.id x).sub (.const _ x)).zpow _).smul hg_an.meromorphicAt
end MeromorphicAt
/-- Meromorphy of a function on a set. -/
def MeromorphicOn (f : ð â E) (U : Set ð) : Prop := â x â U, MeromorphicAt f x
lemma AnalyticOn.meromorphicOn {f : ð â E} {U : Set ð} (hf : AnalyticOn ð f U) :
MeromorphicOn f U :=
fun x hx ⊠(hf x hx).meromorphicAt
namespace MeromorphicOn
variable {s t : ð â ð} {f g : ð â E} {U : Set ð}
(hs : MeromorphicOn s U) (ht : MeromorphicOn t U)
(hf : MeromorphicOn f U) (hg : MeromorphicOn g U)
lemma id {U : Set ð} : MeromorphicOn id U := fun x _ ⊠.id x
lemma const (e : E) {U : Set ð} : MeromorphicOn (fun _ ⊠e) U :=
fun x _ ⊠.const e x
section arithmetic
lemma mono_set {V : Set ð} (hv : V â U) : MeromorphicOn f V := fun x hx ⊠hf x (hv hx)
lemma add : MeromorphicOn (f + g) U := fun x hx ⊠(hf x hx).add (hg x hx)
lemma sub : MeromorphicOn (f - g) U := fun x hx ⊠(hf x hx).sub (hg x hx)
lemma neg : MeromorphicOn (-f) U := fun x hx ⊠(hf x hx).neg
@[simp] lemma neg_iff : MeromorphicOn (-f) U â MeromorphicOn f U :=
âšfun h ⊠by simpa only [neg_neg] using h.neg, negâ©
lemma smul : MeromorphicOn (s ⢠f) U := fun x hx ⊠(hs x hx).smul (hf x hx)
lemma mul : MeromorphicOn (s * t) U := fun x hx ⊠(hs x hx).mul (ht x hx)
lemma inv : MeromorphicOn sâ»Â¹ U := fun x hx ⊠(hs x hx).inv
@[simp] lemma inv_iff : MeromorphicOn sâ»Â¹ U â MeromorphicOn s U :=
âšfun h ⊠by simpa only [inv_inv] using h.inv, invâ©
lemma div : MeromorphicOn (s / t) U := fun x hx ⊠(hs x hx).div (ht x hx)
lemma pow (n : â) : MeromorphicOn (s ^ n) U := fun x hx ⊠(hs x hx).pow _
lemma zpow (n : â€) : MeromorphicOn (s ^ n) U := fun x hx ⊠(hs x hx).zpow _
end arithmetic
lemma congr (h_eq : Set.EqOn f g U) (hu : IsOpen U) : MeromorphicOn g U := by
refine fun x hx ⊠(hf x hx).congr (EventuallyEq.filter_mono ?_ nhdsWithin_le_nhds)
exact eventually_of_mem (hu.mem_nhds hx) h_eq
end MeromorphicOn
|
Analysis\Analytic\Polynomial.lean | /-
Copyright (c) 2023 Junyan Xu. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Junyan Xu
-/
import Mathlib.Algebra.Polynomial.AlgebraMap
import Mathlib.Algebra.MvPolynomial.Basic
import Mathlib.Analysis.Analytic.Constructions
import Mathlib.Topology.Algebra.Module.FiniteDimension
/-!
# Polynomials are analytic
This file combines the analysis and algebra libraries and shows that evaluation of a polynomial
is an analytic function.
-/
variable {ð E A B : Type*} [NontriviallyNormedField ð] [NormedAddCommGroup E] [NormedSpace ð E]
[CommSemiring A] {z : E} {s : Set E}
section Polynomial
open Polynomial
variable [NormedRing B] [NormedAlgebra ð B] [Algebra A B] {f : E â B}
theorem AnalyticAt.aeval_polynomial (hf : AnalyticAt ð f z) (p : A[X]) :
AnalyticAt ð (fun x ⊠aeval (f x) p) z := by
refine p.induction_on (fun k ⊠?_) (fun p q hp hq ⊠?_) fun p i hp ⊠?_
· simp_rw [aeval_C]; apply analyticAt_const
· simp_rw [aeval_add]; exact hp.add hq
· convert hp.mul hf
simp_rw [pow_succ, aeval_mul, â mul_assoc, aeval_X]
theorem AnalyticOn.aeval_polynomial (hf : AnalyticOn ð f s) (p : A[X]) :
AnalyticOn ð (fun x ⊠aeval (f x) p) s := fun x hx ⊠(hf x hx).aeval_polynomial p
theorem AnalyticOn.eval_polynomial {A} [NormedCommRing A] [NormedAlgebra ð A] (p : A[X]) :
AnalyticOn ð (eval · p) Set.univ := (analyticOn_id ð).aeval_polynomial p
end Polynomial
section MvPolynomial
open MvPolynomial
variable [NormedCommRing B] [NormedAlgebra ð B] [Algebra A B] {Ï : Type*} {f : E â Ï â B}
theorem AnalyticAt.aeval_mvPolynomial (hf : â i, AnalyticAt ð (f · i) z) (p : MvPolynomial Ï A) :
AnalyticAt ð (fun x ⊠aeval (f x) p) z := by
apply p.induction_on (fun k ⊠?_) (fun p q hp hq ⊠?_) fun p i hp ⊠?_ -- `refine` doesn't work
· simp_rw [aeval_C]; apply analyticAt_const
· simp_rw [map_add]; exact hp.add hq
· simp_rw [map_mul, aeval_X]; exact hp.mul (hf i)
theorem AnalyticOn.aeval_mvPolynomial (hf : â i, AnalyticOn ð (f · i) s) (p : MvPolynomial Ï A) :
AnalyticOn ð (fun x ⊠aeval (f x) p) s := fun x hx ⊠.aeval_mvPolynomial (hf · x hx) p
theorem AnalyticOn.eval_continuousLinearMap (f : E âL[ð] Ï â B) (p : MvPolynomial Ï B) :
AnalyticOn ð (fun x ⊠eval (f x) p) Set.univ :=
fun x _ ⊠.aeval_mvPolynomial (fun i ⊠((ContinuousLinearMap.proj i).comp f).analyticAt x) p
theorem AnalyticOn.eval_continuousLinearMap' (f : Ï â E âL[ð] B) (p : MvPolynomial Ï B) :
AnalyticOn ð (fun x ⊠eval (f · x) p) Set.univ :=
fun x _ ⊠.aeval_mvPolynomial (fun i ⊠(f i).analyticAt x) p
variable [CompleteSpace ð] [T2Space E] [FiniteDimensional ð E]
theorem AnalyticOn.eval_linearMap (f : E ââ[ð] Ï â B) (p : MvPolynomial Ï B) :
AnalyticOn ð (fun x ⊠eval (f x) p) Set.univ :=
AnalyticOn.eval_continuousLinearMap { f with cont := f.continuous_of_finiteDimensional } p
theorem AnalyticOn.eval_linearMap' (f : Ï â E ââ[ð] B) (p : MvPolynomial Ï B) :
AnalyticOn ð (fun x ⊠eval (f · x) p) Set.univ := AnalyticOn.eval_linearMap (.pi f) p
theorem AnalyticOn.eval_mvPolynomial [Fintype Ï] (p : MvPolynomial Ï ð) :
AnalyticOn ð (eval · p) Set.univ := AnalyticOn.eval_linearMap (.id (R := ð) (M := Ï â ð)) p
end MvPolynomial
|
Analysis\Analytic\RadiusLiminf.lean | /-
Copyright (c) 2020 Yury Kudryashov. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yury Kudryashov
-/
import Mathlib.Analysis.Analytic.Basic
import Mathlib.Analysis.SpecialFunctions.Pow.NNReal
/-!
# Representation of `FormalMultilinearSeries.radius` as a `liminf`
In this file we prove that the radius of convergence of a `FormalMultilinearSeries` is equal to
$\liminf_{n\to\infty} \frac{1}{\sqrt[n]{âp nâ}}$. This lemma can't go to `Analysis.Analytic.Basic`
because this would create a circular dependency once we redefine `exp` using
`FormalMultilinearSeries`.
-/
variable {ð : Type*} [NontriviallyNormedField ð] {E : Type*} [NormedAddCommGroup E]
[NormedSpace ð E] {F : Type*} [NormedAddCommGroup F] [NormedSpace ð F]
open scoped Topology NNReal ENNReal
open Filter Asymptotics
namespace FormalMultilinearSeries
variable (p : FormalMultilinearSeries ð E F)
/-- The radius of a formal multilinear series is equal to
$\liminf_{n\to\infty} \frac{1}{\sqrt[n]{âp nâ}}$. The actual statement uses `ââ¥0` and some
coercions. -/
theorem radius_eq_liminf :
p.radius = liminf (fun n => (1 / (âp nââ ^ (1 / (n : â)) : ââ¥0) : ââ¥0â)) atTop := by
-- Porting note: added type ascription to make elaborated statement match Lean 3 version
have :
â (r : ââ¥0) {n : â},
0 < n â ((r : ââ¥0â) †1 / â(âp nââ ^ (1 / (n : â))) â âp nââ * r ^ n †1) := by
intro r n hn
have : 0 < (n : â) := Nat.cast_pos.2 hn
conv_lhs =>
rw [one_div, ENNReal.le_inv_iff_mul_le, â ENNReal.coe_mul, ENNReal.coe_le_one_iff, one_div, â
NNReal.rpow_one r, â mul_inv_cancel this.ne', NNReal.rpow_mul, â NNReal.mul_rpow, â
NNReal.one_rpow nâ»Â¹, NNReal.rpow_le_rpow_iff (inv_pos.2 this), mul_comm,
NNReal.rpow_natCast]
apply le_antisymm <;> refine ENNReal.le_of_forall_nnreal_lt fun r hr => ?_
· have := ((TFAE_exists_lt_isLittleO_pow (fun n => âp nâ * r ^ n) 1).out 1 7).1
(p.isLittleO_of_lt_radius hr)
obtain âša, ha, Hâ© := this
apply le_liminf_of_le
· infer_param
· rw [â eventually_map]
refine
H.mp ((eventually_gt_atTop 0).mono fun n hnâ hn => (this _ hnâ).2 (NNReal.coe_le_coe.1 ?_))
push_cast
exact (le_abs_self _).trans (hn.trans (pow_le_one _ ha.1.le ha.2.le))
· refine p.le_radius_of_isBigO (IsBigO.of_bound 1 ?_)
refine (eventually_lt_of_lt_liminf hr).mp ((eventually_gt_atTop 0).mono fun n hnâ hn => ?_)
simpa using NNReal.coe_le_coe.2 ((this _ hnâ).1 hn.le)
end FormalMultilinearSeries
|
Analysis\Analytic\Uniqueness.lean | /-
Copyright (c) 2022 Sébastien Gouëzel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Sébastien Gouëzel
-/
import Mathlib.Analysis.Analytic.Linear
import Mathlib.Analysis.Analytic.Composition
import Mathlib.Analysis.Normed.Module.Completion
/-!
# Uniqueness principle for analytic functions
We show that two analytic functions which coincide around a point coincide on whole connected sets,
in `AnalyticOn.eqOn_of_preconnected_of_eventuallyEq`.
-/
variable {ð : Type*} [NontriviallyNormedField ð] {E : Type*} [NormedAddCommGroup E]
[NormedSpace ð E] {F : Type*} [NormedAddCommGroup F] [NormedSpace ð F]
open Set
open scoped Topology ENNReal
namespace AnalyticOn
/-- If an analytic function vanishes around a point, then it is uniformly zero along
a connected set. Superseded by `eqOn_zero_of_preconnected_of_locally_zero` which does not assume
completeness of the target space. -/
theorem eqOn_zero_of_preconnected_of_eventuallyEq_zero_aux [CompleteSpace F] {f : E â F} {U : Set E}
(hf : AnalyticOn ð f U) (hU : IsPreconnected U) {zâ : E} (hâ : zâ â U) (hfzâ : f =á¶ [ð zâ] 0) :
EqOn f 0 U := by
/- Let `u` be the set of points around which `f` vanishes. It is clearly open. We have to show
that its limit points in `U` still belong to it, from which the inclusion `U â u` will follow
by connectedness. -/
let u := {x | f =á¶ [ð x] 0}
suffices main : closure u â© U â u by
have Uu : U â u :=
hU.subset_of_closure_inter_subset isOpen_setOf_eventually_nhds âšzâ, hâ, hfzââ© main
intro z hz
simpa using mem_of_mem_nhds (Uu hz)
/- Take a limit point `x`, then a ball `B (x, r)` on which it has a power series expansion, and
then `y â B (x, r/2) â© u`. Then `f` has a power series expansion on `B (y, r/2)` as it is
contained in `B (x, r)`. All the coefficients in this series expansion vanish, as `f` is zero
on a neighborhood of `y`. Therefore, `f` is zero on `B (y, r/2)`. As this ball contains `x`,
it follows that `f` vanishes on a neighborhood of `x`, proving the claim. -/
rintro x âšxu, xUâ©
rcases hf x xU with âšp, r, hpâ©
obtain âšy, yu, hxyâ© : â y â u, edist x y < r / 2 :=
EMetric.mem_closure_iff.1 xu (r / 2) (ENNReal.half_pos hp.r_pos.ne')
let q := p.changeOrigin (y - x)
have has_series : HasFPowerSeriesOnBall f q y (r / 2) := by
have A : (ây - xââ : ââ¥0â) < r / 2 := by rwa [edist_comm, edist_eq_coe_nnnorm_sub] at hxy
have := hp.changeOrigin (A.trans_le ENNReal.half_le_self)
simp only [add_sub_cancel] at this
apply this.mono (ENNReal.half_pos hp.r_pos.ne')
apply ENNReal.le_sub_of_add_le_left ENNReal.coe_ne_top
apply (add_le_add A.le (le_refl (r / 2))).trans (le_of_eq _)
exact ENNReal.add_halves _
have M : EMetric.ball y (r / 2) â ð x := EMetric.isOpen_ball.mem_nhds hxy
filter_upwards [M] with z hz
have A : HasSum (fun n : â => q n fun _ : Fin n => z - y) (f z) := has_series.hasSum_sub hz
have B : HasSum (fun n : â => q n fun _ : Fin n => z - y) 0 := by
have : HasFPowerSeriesAt 0 q y := has_series.hasFPowerSeriesAt.congr yu
convert hasSum_zero (α := F) using 2
ext n
exact this.apply_eq_zero n _
exact HasSum.unique A B
/-- The *identity principle* for analytic functions: If an analytic function vanishes in a whole
neighborhood of a point `zâ`, then it is uniformly zero along a connected set. For a one-dimensional
version assuming only that the function vanishes at some points arbitrarily close to `zâ`, see
`eqOn_zero_of_preconnected_of_frequently_eq_zero`. -/
theorem eqOn_zero_of_preconnected_of_eventuallyEq_zero {f : E â F} {U : Set E}
(hf : AnalyticOn ð f U) (hU : IsPreconnected U) {zâ : E} (hâ : zâ â U) (hfzâ : f =á¶ [ð zâ] 0) :
EqOn f 0 U := by
let F' := UniformSpace.Completion F
set e : F âL[ð] F' := UniformSpace.Completion.toComplL
have : AnalyticOn ð (e â f) U := fun x hx => (e.analyticAt _).comp (hf x hx)
have A : EqOn (e â f) 0 U := by
apply eqOn_zero_of_preconnected_of_eventuallyEq_zero_aux this hU hâ
filter_upwards [hfzâ] with x hx
simp only [hx, Function.comp_apply, Pi.zero_apply, map_zero]
intro z hz
have : e (f z) = e 0 := by simpa only using A hz
exact UniformSpace.Completion.coe_injective F this
/-- The *identity principle* for analytic functions: If two analytic functions coincide in a whole
neighborhood of a point `zâ`, then they coincide globally along a connected set.
For a one-dimensional version assuming only that the functions coincide at some points
arbitrarily close to `zâ`, see `eqOn_of_preconnected_of_frequently_eq`. -/
theorem eqOn_of_preconnected_of_eventuallyEq {f g : E â F} {U : Set E} (hf : AnalyticOn ð f U)
(hg : AnalyticOn ð g U) (hU : IsPreconnected U) {zâ : E} (hâ : zâ â U) (hfg : f =á¶ [ð zâ] g) :
EqOn f g U := by
have hfg' : f - g =á¶ [ð zâ] 0 := hfg.mono fun z h => by simp [h]
simpa [sub_eq_zero] using fun z hz =>
(hf.sub hg).eqOn_zero_of_preconnected_of_eventuallyEq_zero hU hâ hfg' hz
/-- The *identity principle* for analytic functions: If two analytic functions on a normed space
coincide in a neighborhood of a point `zâ`, then they coincide everywhere.
For a one-dimensional version assuming only that the functions coincide at some points
arbitrarily close to `zâ`, see `eq_of_frequently_eq`. -/
theorem eq_of_eventuallyEq {f g : E â F} [PreconnectedSpace E] (hf : AnalyticOn ð f univ)
(hg : AnalyticOn ð g univ) {zâ : E} (hfg : f =á¶ [ð zâ] g) : f = g :=
funext fun x =>
eqOn_of_preconnected_of_eventuallyEq hf hg isPreconnected_univ (mem_univ zâ) hfg (mem_univ x)
end AnalyticOn
|
Analysis\Analytic\Within.lean | /-
Copyright (c) 2024 Geoffrey Irving. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Geoffrey Irving
-/
import Mathlib.Analysis.Analytic.Constructions
import Mathlib.Analysis.Calculus.FDeriv.Analytic
/-!
# Properties of analyticity restricted to a set
From `Mathlib.Analysis.Analytic.Basic`, we have the definitons
1. `AnalyticWithinAt ð f s x` means a power series at `x` converges to `f` on `ð[s] x`, and
`f` is continuous within `s` at `x`.
2. `AnalyticWithinOn ð f s t` means `â x â t, AnalyticWithinAt ð f s x`.
This means there exists an extension of `f` which is analytic and agrees with `f` on `s ⪠{x}`, but
`f` is allowed to be arbitrary elsewhere. Requiring `ContinuousWithinAt` is essential if `x â s`:
it is required for composition and smoothness to follow without extra hypotheses (we could
alternately require convergence at `x` even if `x â s`).
Here we prove basic properties of these definitions. Where convenient we assume completeness of the
ambient space, which allows us to related `AnalyticWithinAt` to analyticity of a local extension.
-/
noncomputable section
open Topology Filter ENNReal
open Set Filter
variable {ð : Type*} [NontriviallyNormedField ð]
variable {E F G H : Type*} [NormedAddCommGroup E] [NormedSpace ð E] [NormedAddCommGroup F]
[NormedSpace ð F] [NormedAddCommGroup G] [NormedSpace ð G] [NormedAddCommGroup H]
[NormedSpace ð H]
/-!
### Basic properties
-/
@[simp] lemma hasFPowerSeriesWithinOnBall_univ {f : E â F} {p : FormalMultilinearSeries ð E F}
{x : E} {r : ââ¥0â} :
HasFPowerSeriesWithinOnBall f p univ x r â HasFPowerSeriesOnBall f p x r := by
constructor
· intro h
exact âšh.r_le, h.r_pos, fun {y} m ⊠h.hasSum (mem_univ _) mâ©
· intro h
refine âšh.r_le, h.r_pos, fun {y} _ m => h.hasSum m, ?_â©
exact (h.continuousOn.continuousAt (EMetric.ball_mem_nhds x h.r_pos)).continuousWithinAt
@[simp] lemma hasFPowerSeriesWithinAt_univ {f : E â F} {p : FormalMultilinearSeries ð E F} {x : E} :
HasFPowerSeriesWithinAt f p univ x â HasFPowerSeriesAt f p x := by
simp only [HasFPowerSeriesWithinAt, hasFPowerSeriesWithinOnBall_univ, HasFPowerSeriesAt]
@[simp] lemma analyticWithinAt_univ {f : E â F} {x : E} :
AnalyticWithinAt ð f univ x â AnalyticAt ð f x := by
simp only [AnalyticWithinAt, hasFPowerSeriesWithinAt_univ, AnalyticAt]
lemma analyticWithinOn_univ {f : E â F} :
AnalyticWithinOn ð f univ â AnalyticOn ð f univ := by
simp only [AnalyticWithinOn, analyticWithinAt_univ, AnalyticOn]
lemma HasFPowerSeriesWithinAt.continuousWithinAt {f : E â F} {p : FormalMultilinearSeries ð E F}
{s : Set E} {x : E} (h : HasFPowerSeriesWithinAt f p s x) : ContinuousWithinAt f s x := by
rcases h with âšr, hâ©
exact h.continuousWithinAt
lemma AnalyticWithinAt.continuousWithinAt {f : E â F} {s : Set E} {x : E}
(h : AnalyticWithinAt ð f s x) : ContinuousWithinAt f s x := by
rcases h with âšp, hâ©
exact h.continuousWithinAt
/-- `AnalyticWithinAt` is trivial if `{x} â ð[s] x` -/
lemma analyticWithinAt_of_singleton_mem {f : E â F} {s : Set E} {x : E} (h : {x} â ð[s] x) :
AnalyticWithinAt ð f s x := by
have fc : ContinuousWithinAt f s x :=
Filter.Tendsto.mono_left (tendsto_pure_nhds _ _) (Filter.le_pure_iff.mpr h)
rcases mem_nhdsWithin.mp h with âšt, ot, xt, stâ©
rcases Metric.mem_nhds_iff.mp (ot.mem_nhds xt) with âšr, r0, rtâ©
exact âšconstFormalMultilinearSeries ð E (f x), .ofReal r, {
r_le := by simp only [FormalMultilinearSeries.constFormalMultilinearSeries_radius, le_top]
r_pos := by positivity
hasSum := by
intro y ys yr
simp only [subset_singleton_iff, mem_inter_iff, and_imp] at st
specialize st (x + y) (rt (by simpa using yr)) ys
simp only [st]
apply (hasFPowerSeriesOnBall_const (e := 0)).hasSum
simp only [Metric.emetric_ball_top, mem_univ]
continuousWithinAt := fc
}â©
/-- Analyticity implies analyticity within any `s` -/
lemma AnalyticAt.analyticWithinAt {f : E â F} {s : Set E} {x : E} (h : AnalyticAt ð f x) :
AnalyticWithinAt ð f s x := by
rcases h with âšp, r, hpâ©
exact âšp, r, {
r_le := hp.r_le
r_pos := hp.r_pos
hasSum := fun {y} _ yr ⊠hp.hasSum yr
continuousWithinAt :=
(hp.continuousOn.continuousAt (EMetric.ball_mem_nhds x hp.r_pos)).continuousWithinAt
}â©
/-- Analyticity on `s` implies analyticity within `s` -/
lemma AnalyticOn.analyticWithinOn {f : E â F} {s : Set E} (h : AnalyticOn ð f s) :
AnalyticWithinOn ð f s :=
fun x m ⊠(h x m).analyticWithinAt
lemma AnalyticWithinOn.continuousOn {f : E â F} {s : Set E} (h : AnalyticWithinOn ð f s) :
ContinuousOn f s :=
fun x m ⊠(h x m).continuousWithinAt
/-- If `f` is `AnalyticWithinOn` near each point in a set, it is `AnalyticWithinOn` the set -/
lemma analyticWithinOn_of_locally_analyticWithinOn {f : E â F} {s : Set E}
(h : â x â s, â u, IsOpen u â§ x â u â§ AnalyticWithinOn ð f (s â© u)) :
AnalyticWithinOn ð f s := by
intro x m
rcases h x m with âšu, ou, xu, fuâ©
rcases Metric.mem_nhds_iff.mp (ou.mem_nhds xu) with âšr, r0, ruâ©
rcases fu x âšm, xuâ© with âšp, t, fpâ©
exact âšp, min (.ofReal r) t, {
r_pos := lt_min (by positivity) fp.r_pos
r_le := min_le_of_right_le fp.r_le
hasSum := by
intro y ys yr
simp only [EMetric.mem_ball, lt_min_iff, edist_lt_ofReal, dist_zero_right] at yr
apply fp.hasSum âšys, ru ?_â©
· simp only [EMetric.mem_ball, yr]
· simp only [Metric.mem_ball, dist_self_add_left, yr]
continuousWithinAt := by
refine (fu.continuousOn x âšm, xuâ©).mono_left (le_of_eq ?_)
exact nhdsWithin_eq_nhdsWithin xu ou (by simp only [inter_assoc, inter_self])
}â©
/-- On open sets, `AnalyticOn` and `AnalyticWithinOn` coincide -/
@[simp] lemma IsOpen.analyticWithinOn_iff_analyticOn {f : E â F} {s : Set E} (hs : IsOpen s) :
AnalyticWithinOn ð f s â AnalyticOn ð f s := by
refine âš?_, AnalyticOn.analyticWithinOnâ©
intro hf x m
rcases Metric.mem_nhds_iff.mp (hs.mem_nhds m) with âšr, r0, rsâ©
rcases hf x m with âšp, t, fpâ©
exact âšp, min (.ofReal r) t, {
r_pos := lt_min (by positivity) fp.r_pos
r_le := min_le_of_right_le fp.r_le
hasSum := by
intro y ym
simp only [EMetric.mem_ball, lt_min_iff, edist_lt_ofReal, dist_zero_right] at ym
refine fp.hasSum (rs ?_) ym.2
simp only [Metric.mem_ball, dist_self_add_left, ym.1]
}â©
/-!
### Equivalence to analyticity of a local extension
We show that `HasFPowerSeriesWithinOnBall`, `HasFPowerSeriesWithinAt`, and `AnalyticWithinAt` are
equivalent to the existence of a local extension with full analyticity. We do not yet show a
result for `AnalyticWithinOn`, as this requires a bit more work to show that local extensions can
be stitched together.
-/
/-- `f` has power series `p` at `x` iff some local extension of `f` has that series -/
lemma hasFPowerSeriesWithinOnBall_iff_exists_hasFPowerSeriesOnBall [CompleteSpace F] {f : E â F}
{p : FormalMultilinearSeries ð E F} {s : Set E} {x : E} {r : ââ¥0â} :
HasFPowerSeriesWithinOnBall f p s x r â
ContinuousWithinAt f s x â§ â g, EqOn f g (s â© EMetric.ball x r) â§
HasFPowerSeriesOnBall g p x r := by
constructor
· intro h
refine âšh.continuousWithinAt, fun y ⊠p.sum (y - x), ?_, ?_â©
· intro y âšys,ybâ©
simp only [EMetric.mem_ball, edist_eq_coe_nnnorm_sub] at yb
have e0 := p.hasSum (x := y - x) ?_
have e1 := (h.hasSum (y := y - x) ?_ ?_)
· simp only [add_sub_cancel] at e1
exact e1.unique e0
· simpa only [add_sub_cancel]
· simpa only [EMetric.mem_ball, edist_eq_coe_nnnorm]
· simp only [EMetric.mem_ball, edist_eq_coe_nnnorm]
exact lt_of_lt_of_le yb h.r_le
· refine âšh.r_le, h.r_pos, ?_â©
intro y lt
simp only [add_sub_cancel_left]
apply p.hasSum
simp only [EMetric.mem_ball] at lt â¢
exact lt_of_lt_of_le lt h.r_le
· intro âšmem, g, hfg, hgâ©
refine âšhg.r_le, hg.r_pos, ?_, memâ©
intro y ys lt
rw [hfg]
· exact hg.hasSum lt
· refine âšys, ?_â©
simpa only [EMetric.mem_ball, edist_eq_coe_nnnorm_sub, add_sub_cancel_left, sub_zero] using lt
/-- `f` has power series `p` at `x` iff some local extension of `f` has that series -/
lemma hasFPowerSeriesWithinAt_iff_exists_hasFPowerSeriesAt [CompleteSpace F] {f : E â F}
{p : FormalMultilinearSeries ð E F} {s : Set E} {x : E} :
HasFPowerSeriesWithinAt f p s x â
ContinuousWithinAt f s x â§ â g, f =á¶ [ð[s] x] g â§ HasFPowerSeriesAt g p x := by
constructor
· intro âšr, hâ©
rcases hasFPowerSeriesWithinOnBall_iff_exists_hasFPowerSeriesOnBall.mp h with âšfc, g, e, hâ©
refine âšfc, g, ?_, âšr, hâ©â©
refine Filter.eventuallyEq_iff_exists_mem.mpr âš_, ?_, eâ©
exact inter_mem_nhdsWithin _ (EMetric.ball_mem_nhds _ h.r_pos)
· intro âšmem, g, hfg, âšr, hgâ©â©
simp only [eventuallyEq_nhdsWithin_iff, Metric.eventually_nhds_iff] at hfg
rcases hfg with âše, e0, hfgâ©
refine âšmin r (.ofReal e), ?_â©
refine hasFPowerSeriesWithinOnBall_iff_exists_hasFPowerSeriesOnBall.mpr âšmem, g, ?_, ?_â©
· intro y âšys, xyâ©
refine hfg ?_ ys
simp only [EMetric.mem_ball, lt_min_iff, edist_lt_ofReal] at xy
exact xy.2
· exact hg.mono (lt_min hg.r_pos (by positivity)) (min_le_left _ _)
/-- `f` is analytic within `s` at `x` iff some local extension of `f` is analytic at `x` -/
lemma analyticWithinAt_iff_exists_analyticAt [CompleteSpace F] {f : E â F} {s : Set E} {x : E} :
AnalyticWithinAt ð f s x â
ContinuousWithinAt f s x â§ â g, f =á¶ [ð[s] x] g â§ AnalyticAt ð g x := by
simp only [AnalyticWithinAt, AnalyticAt, hasFPowerSeriesWithinAt_iff_exists_hasFPowerSeriesAt]
tauto
/-- If `f` is analytic within `s` at `x`, some local extension of `f` is analytic at `x` -/
lemma AnalyticWithinAt.exists_analyticAt [CompleteSpace F] {f : E â F} {s : Set E} {x : E}
(h : AnalyticWithinAt ð f s x) : â g, f x = g x â§ f =á¶ [ð[s] x] g â§ AnalyticAt ð g x := by
by_cases s0 : ð[s] x = â¥
· refine âšfun _ ⊠f x, rfl, ?_, analyticAt_constâ©
simp only [EventuallyEq, s0, eventually_bot]
· rcases analyticWithinAt_iff_exists_analyticAt.mp h with âš_, g, fg, hgâ©
refine âšg, ?_, fg, hgâ©
exact tendsto_nhds_unique' âšs0â© h.continuousWithinAt
(hg.continuousAt.continuousWithinAt.congr' fg.symm)
/-!
### Congruence
We require completeness to use equivalence to locally extensions, but this is nonessential.
-/
lemma AnalyticWithinAt.congr_of_eventuallyEq [CompleteSpace F] {f g : E â F} {s : Set E} {x : E}
(hf : AnalyticWithinAt ð f s x) (hs : f =á¶ [ð[s] x] g) (hx : f x = g x) :
AnalyticWithinAt ð g s x := by
rcases hf.exists_analyticAt with âšf', fx, ef, hf'â©
rw [analyticWithinAt_iff_exists_analyticAt]
have eg := hs.symm.trans ef
refine âš?_, f', eg, hf'â©
exact hf'.continuousAt.continuousWithinAt.congr_of_eventuallyEq eg (hx.symm.trans fx)
lemma AnalyticWithinAt.congr [CompleteSpace F] {f g : E â F} {s : Set E} {x : E}
(hf : AnalyticWithinAt ð f s x) (hs : EqOn f g s) (hx : f x = g x) :
AnalyticWithinAt ð g s x :=
hf.congr_of_eventuallyEq hs.eventuallyEq_nhdsWithin hx
lemma AnalyticWithinOn.congr [CompleteSpace F] {f g : E â F} {s : Set E}
(hf : AnalyticWithinOn ð f s) (hs : EqOn f g s) :
AnalyticWithinOn ð g s :=
fun x m ⊠(hf x m).congr hs (hs m)
/-!
### Monotonicity w.r.t. the set we're analytic within
-/
lemma HasFPowerSeriesWithinOnBall.mono {f : E â F} {p : FormalMultilinearSeries ð E F}
{s t : Set E} {x : E} {r : ââ¥0â} (h : HasFPowerSeriesWithinOnBall f p t x r)
(hs : s â t) : HasFPowerSeriesWithinOnBall f p s x r where
r_le := h.r_le
r_pos := h.r_pos
hasSum {_} ys yb := h.hasSum (hs ys) yb
continuousWithinAt := h.continuousWithinAt.mono hs
lemma HasFPowerSeriesWithinAt.mono {f : E â F} {p : FormalMultilinearSeries ð E F}
{s t : Set E} {x : E} (h : HasFPowerSeriesWithinAt f p t x)
(hs : s â t) : HasFPowerSeriesWithinAt f p s x := by
rcases h with âšr, hrâ©
exact âšr, hr.mono hsâ©
lemma AnalyticWithinAt.mono {f : E â F} {s t : Set E} {x : E} (h : AnalyticWithinAt ð f t x)
(hs : s â t) : AnalyticWithinAt ð f s x := by
rcases h with âšp, hpâ©
exact âšp, hp.mono hsâ©
lemma AnalyticWithinOn.mono {f : E â F} {s t : Set E} (h : AnalyticWithinOn ð f t)
(hs : s â t) : AnalyticWithinOn ð f s :=
fun _ m ⊠(h _ (hs m)).mono hs
/-!
### Analyticity within respects composition
Currently we require `CompleteSpace`s to use equivalence to local extensions, but this is not
essential.
-/
lemma AnalyticWithinAt.comp [CompleteSpace F] [CompleteSpace G] {f : F â G} {g : E â F} {s : Set F}
{t : Set E} {x : E} (hf : AnalyticWithinAt ð f s (g x)) (hg : AnalyticWithinAt ð g t x)
(h : MapsTo g t s) : AnalyticWithinAt ð (f â g) t x := by
rcases hf.exists_analyticAt with âšf', _, ef, hf'â©
rcases hg.exists_analyticAt with âšg', gx, eg, hg'â©
refine analyticWithinAt_iff_exists_analyticAt.mpr âš?_, f' â g', ?_, ?_â©
· exact hf.continuousWithinAt.comp hg.continuousWithinAt h
· have gt := hg.continuousWithinAt.tendsto_nhdsWithin h
filter_upwards [eg, gt.eventually ef]
intro y gy fgy
simp only [Function.comp_apply, fgy, â gy]
· exact hf'.comp_of_eq hg' gx.symm
lemma AnalyticWithinOn.comp [CompleteSpace F] [CompleteSpace G] {f : F â G} {g : E â F} {s : Set F}
{t : Set E} (hf : AnalyticWithinOn ð f s) (hg : AnalyticWithinOn ð g t) (h : MapsTo g t s) :
AnalyticWithinOn ð (f â g) t :=
fun x m ⊠(hf _ (h m)).comp (hg x m) h
/-!
### Analyticity within implies smoothness
-/
lemma AnalyticWithinAt.contDiffWithinAt [CompleteSpace F] {f : E â F} {s : Set E} {x : E}
(h : AnalyticWithinAt ð f s x) {n : ââ} : ContDiffWithinAt ð n f s x := by
rcases h.exists_analyticAt with âšg, fx, fg, hgâ©
exact hg.contDiffAt.contDiffWithinAt.congr_of_eventuallyEq fg fx
lemma AnalyticWithinOn.contDiffOn [CompleteSpace F] {f : E â F} {s : Set E}
(h : AnalyticWithinOn ð f s) {n : ââ} : ContDiffOn ð n f s :=
fun x m ⊠(h x m).contDiffWithinAt
/-!
### Analyticity within respects products
-/
lemma HasFPowerSeriesWithinOnBall.prod {e : E} {f : E â F} {g : E â G} {s : Set E} {r t : ââ¥0â}
{p : FormalMultilinearSeries ð E F} {q : FormalMultilinearSeries ð E G}
(hf : HasFPowerSeriesWithinOnBall f p s e r) (hg : HasFPowerSeriesWithinOnBall g q s e t) :
HasFPowerSeriesWithinOnBall (fun x ⊠(f x, g x)) (p.prod q) s e (min r t) where
r_le := by
rw [p.radius_prod_eq_min]
exact min_le_min hf.r_le hg.r_le
r_pos := lt_min hf.r_pos hg.r_pos
hasSum := by
intro y m hy
simp_rw [FormalMultilinearSeries.prod, ContinuousMultilinearMap.prod_apply]
refine (hf.hasSum m ?_).prod_mk (hg.hasSum m ?_)
· exact EMetric.mem_ball.mpr (lt_of_lt_of_le hy (min_le_left _ _))
· exact EMetric.mem_ball.mpr (lt_of_lt_of_le hy (min_le_right _ _))
continuousWithinAt := hf.continuousWithinAt.prod hg.continuousWithinAt
lemma HasFPowerSeriesWithinAt.prod {e : E} {f : E â F} {g : E â G} {s : Set E}
{p : FormalMultilinearSeries ð E F} {q : FormalMultilinearSeries ð E G}
(hf : HasFPowerSeriesWithinAt f p s e) (hg : HasFPowerSeriesWithinAt g q s e) :
HasFPowerSeriesWithinAt (fun x ⊠(f x, g x)) (p.prod q) s e := by
rcases hf with âš_, hfâ©
rcases hg with âš_, hgâ©
exact âš_, hf.prod hgâ©
lemma AnalyticWithinAt.prod {e : E} {f : E â F} {g : E â G} {s : Set E}
(hf : AnalyticWithinAt ð f s e) (hg : AnalyticWithinAt ð g s e) :
AnalyticWithinAt ð (fun x ⊠(f x, g x)) s e := by
rcases hf with âš_, hfâ©
rcases hg with âš_, hgâ©
exact âš_, hf.prod hgâ©
lemma AnalyticWithinOn.prod {f : E â F} {g : E â G} {s : Set E}
(hf : AnalyticWithinOn ð f s) (hg : AnalyticWithinOn ð g s) :
AnalyticWithinOn ð (fun x ⊠(f x, g x)) s :=
fun x hx ⊠(hf x hx).prod (hg x hx)
|
Analysis\Asymptotics\AsymptoticEquivalent.lean | /-
Copyright (c) 2020 Anatole Dedecker. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Anatole Dedecker
-/
import Mathlib.Analysis.Asymptotics.Asymptotics
import Mathlib.Analysis.Asymptotics.Theta
import Mathlib.Analysis.Normed.Order.Basic
/-!
# Asymptotic equivalence
In this file, we define the relation `IsEquivalent l u v`, which means that `u-v` is little o of
`v` along the filter `l`.
Unlike `Is(Little|Big)O` relations, this one requires `u` and `v` to have the same codomain `β`.
While the definition only requires `β` to be a `NormedAddCommGroup`, most interesting properties
require it to be a `NormedField`.
## Notations
We introduce the notation `u ~[l] v := IsEquivalent l u v`, which you can use by opening the
`Asymptotics` locale.
## Main results
If `β` is a `NormedAddCommGroup` :
- `_ ~[l] _` is an equivalence relation
- Equivalent statements for `u ~[l] const _ c` :
- If `c â 0`, this is true iff `Tendsto u l (ð c)` (see `isEquivalent_const_iff_tendsto`)
- For `c = 0`, this is true iff `u =á¶ [l] 0` (see `isEquivalent_zero_iff_eventually_zero`)
If `β` is a `NormedField` :
- Alternative characterization of the relation (see `isEquivalent_iff_exists_eq_mul`) :
`u ~[l] v â â (Ï : α â β) (hÏ : Tendsto Ï l (ð 1)), u =á¶ [l] Ï * v`
- Provided some non-vanishing hypothesis, this can be seen as `u ~[l] v â Tendsto (u/v) l (ð 1)`
(see `isEquivalent_iff_tendsto_one`)
- For any constant `c`, `u ~[l] v` implies `Tendsto u l (ð c) â Tendsto v l (ð c)`
(see `IsEquivalent.tendsto_nhds_iff`)
- `*` and `/` are compatible with `_ ~[l] _` (see `IsEquivalent.mul` and `IsEquivalent.div`)
If `β` is a `NormedLinearOrderedField` :
- If `u ~[l] v`, we have `Tendsto u l atTop â Tendsto v l atTop`
(see `IsEquivalent.tendsto_atTop_iff`)
## Implementation Notes
Note that `IsEquivalent` takes the parameters `(l : Filter α) (u v : α â β)` in that order.
This is to enable `calc` support, as `calc` requires that the last two explicit arguments are `u v`.
-/
namespace Asymptotics
open Filter Function
open Topology
section NormedAddCommGroup
variable {α β : Type*} [NormedAddCommGroup β]
/-- Two functions `u` and `v` are said to be asymptotically equivalent along a filter `l` when
`u x - v x = o(v x)` as `x` converges along `l`. -/
def IsEquivalent (l : Filter α) (u v : α â β) :=
(u - v) =o[l] v
@[inherit_doc] scoped notation:50 u " ~[" l:50 "] " v:50 => Asymptotics.IsEquivalent l u v
variable {u v w : α â β} {l : Filter α}
theorem IsEquivalent.isLittleO (h : u ~[l] v) : (u - v) =o[l] v := h
nonrec theorem IsEquivalent.isBigO (h : u ~[l] v) : u =O[l] v :=
(IsBigO.congr_of_sub h.isBigO.symm).mp (isBigO_refl _ _)
theorem IsEquivalent.isBigO_symm (h : u ~[l] v) : v =O[l] u := by
convert h.isLittleO.right_isBigO_add
simp
theorem IsEquivalent.isTheta (h : u ~[l] v) : u =Î[l] v :=
âšh.isBigO, h.isBigO_symmâ©
theorem IsEquivalent.isTheta_symm (h : u ~[l] v) : v =Î[l] u :=
âšh.isBigO_symm, h.isBigOâ©
@[refl]
theorem IsEquivalent.refl : u ~[l] u := by
rw [IsEquivalent, sub_self]
exact isLittleO_zero _ _
@[symm]
theorem IsEquivalent.symm (h : u ~[l] v) : v ~[l] u :=
(h.isLittleO.trans_isBigO h.isBigO_symm).symm
@[trans]
theorem IsEquivalent.trans {l : Filter α} {u v w : α â β} (huv : u ~[l] v) (hvw : v ~[l] w) :
u ~[l] w :=
(huv.isLittleO.trans_isBigO hvw.isBigO).triangle hvw.isLittleO
theorem IsEquivalent.congr_left {u v w : α â β} {l : Filter α} (huv : u ~[l] v) (huw : u =á¶ [l] w) :
w ~[l] v :=
huv.congr' (huw.sub (EventuallyEq.refl _ _)) (EventuallyEq.refl _ _)
theorem IsEquivalent.congr_right {u v w : α â β} {l : Filter α} (huv : u ~[l] v) (hvw : v =á¶ [l] w) :
u ~[l] w :=
(huv.symm.congr_left hvw).symm
theorem isEquivalent_zero_iff_eventually_zero : u ~[l] 0 â u =á¶ [l] 0 := by
rw [IsEquivalent, sub_zero]
exact isLittleO_zero_right_iff
theorem isEquivalent_zero_iff_isBigO_zero : u ~[l] 0 â u =O[l] (0 : α â β) := by
refine âšIsEquivalent.isBigO, fun h ⊠?_â©
rw [isEquivalent_zero_iff_eventually_zero, eventuallyEq_iff_exists_mem]
exact âš{ x : α | u x = 0 }, isBigO_zero_right_iff.mp h, fun x hx ⊠hxâ©
theorem isEquivalent_const_iff_tendsto {c : β} (h : c â 0) :
u ~[l] const _ c â Tendsto u l (ð c) := by
simp (config := { unfoldPartialApp := true }) only [IsEquivalent, const, isLittleO_const_iff h]
constructor <;> intro h
· have := h.sub (tendsto_const_nhds (x := -c))
simp only [Pi.sub_apply, sub_neg_eq_add, sub_add_cancel, zero_add] at this
exact this
· have := h.sub (tendsto_const_nhds (x := c))
rwa [sub_self] at this
theorem IsEquivalent.tendsto_const {c : β} (hu : u ~[l] const _ c) : Tendsto u l (ð c) := by
rcases em <| c = 0 with rfl | h
· exact (tendsto_congr' <| isEquivalent_zero_iff_eventually_zero.mp hu).mpr tendsto_const_nhds
· exact (isEquivalent_const_iff_tendsto h).mp hu
theorem IsEquivalent.tendsto_nhds {c : β} (huv : u ~[l] v) (hu : Tendsto u l (ð c)) :
Tendsto v l (ð c) := by
by_cases h : c = 0
· subst c
rw [â isLittleO_one_iff â] at hu â¢
simpa using (huv.symm.isLittleO.trans hu).add hu
· rw [â isEquivalent_const_iff_tendsto h] at hu â¢
exact huv.symm.trans hu
theorem IsEquivalent.tendsto_nhds_iff {c : β} (huv : u ~[l] v) :
Tendsto u l (ð c) â Tendsto v l (ð c) :=
âšhuv.tendsto_nhds, huv.symm.tendsto_nhdsâ©
theorem IsEquivalent.add_isLittleO (huv : u ~[l] v) (hwv : w =o[l] v) : u + w ~[l] v := by
simpa only [IsEquivalent, add_sub_right_comm] using huv.add hwv
theorem IsEquivalent.sub_isLittleO (huv : u ~[l] v) (hwv : w =o[l] v) : u - w ~[l] v := by
simpa only [sub_eq_add_neg] using huv.add_isLittleO hwv.neg_left
theorem IsLittleO.add_isEquivalent (hu : u =o[l] w) (hv : v ~[l] w) : u + v ~[l] w :=
add_comm v u âž hv.add_isLittleO hu
theorem IsLittleO.isEquivalent (huv : (u - v) =o[l] v) : u ~[l] v := huv
theorem IsEquivalent.neg (huv : u ~[l] v) : (fun x ⊠-u x) ~[l] fun x ⊠-v x := by
rw [IsEquivalent]
convert huv.isLittleO.neg_left.neg_right
simp [neg_add_eq_sub]
end NormedAddCommGroup
open Asymptotics
section NormedField
variable {α β : Type*} [NormedField β] {t u v w : α â β} {l : Filter α}
theorem isEquivalent_iff_exists_eq_mul :
u ~[l] v â â (Ï : α â β) (_ : Tendsto Ï l (ð 1)), u =á¶ [l] Ï * v := by
rw [IsEquivalent, isLittleO_iff_exists_eq_mul]
constructor <;> rintro âšÏ, hÏ, hâ© <;> [refine âšÏ + 1, ?_, ?_â©; refine âšÏ - 1, ?_, ?_â©]
· conv in ð _ => rw [â zero_add (1 : β)]
exact hÏ.add tendsto_const_nhds
· convert h.add (EventuallyEq.refl l v) <;> simp [add_mul]
· conv in ð _ => rw [â sub_self (1 : β)]
exact hÏ.sub tendsto_const_nhds
· convert h.sub (EventuallyEq.refl l v); simp [sub_mul]
theorem IsEquivalent.exists_eq_mul (huv : u ~[l] v) :
â (Ï : α â β) (_ : Tendsto Ï l (ð 1)), u =á¶ [l] Ï * v :=
isEquivalent_iff_exists_eq_mul.mp huv
theorem isEquivalent_of_tendsto_one (hz : âá¶ x in l, v x = 0 â u x = 0)
(huv : Tendsto (u / v) l (ð 1)) : u ~[l] v := by
rw [isEquivalent_iff_exists_eq_mul]
exact âšu / v, huv, hz.mono fun x hz' ⊠(div_mul_cancel_of_imp hz').symmâ©
theorem isEquivalent_of_tendsto_one' (hz : â x, v x = 0 â u x = 0) (huv : Tendsto (u / v) l (ð 1)) :
u ~[l] v :=
isEquivalent_of_tendsto_one (eventually_of_forall hz) huv
theorem isEquivalent_iff_tendsto_one (hz : âá¶ x in l, v x â 0) :
u ~[l] v â Tendsto (u / v) l (ð 1) := by
constructor
· intro hequiv
have := hequiv.isLittleO.tendsto_div_nhds_zero
simp only [Pi.sub_apply, sub_div] at this
have key : Tendsto (fun x ⊠v x / v x) l (ð 1) :=
(tendsto_congr' <| hz.mono fun x hnz ⊠@div_self _ _ (v x) hnz).mpr tendsto_const_nhds
convert this.add key
· simp
· norm_num
· exact isEquivalent_of_tendsto_one (hz.mono fun x hnvz hz ⊠(hnvz hz).elim)
end NormedField
section SMul
theorem IsEquivalent.smul {α E ð : Type*} [NormedField ð] [NormedAddCommGroup E] [NormedSpace ð E]
{a b : α â ð} {u v : α â E} {l : Filter α} (hab : a ~[l] b) (huv : u ~[l] v) :
(fun x ⊠a x ⢠u x) ~[l] fun x ⊠b x ⢠v x := by
rcases hab.exists_eq_mul with âšÏ, hÏ, habÏâ©
have : ((fun x ⊠a x ⢠u x) - (fun x ⊠b x ⢠v x)) =á¶ [l] fun x ⊠b x ⢠(Ï x ⢠u x - v x) := by
-- Porting note: `convert` has become too strong, so we need to specify `using 1`.
convert (habÏ.compâ (· ⢠·) <| EventuallyEq.refl _ u).sub
(EventuallyEq.refl _ fun x ⊠b x ⢠v x) using 1
ext
rw [Pi.mul_apply, mul_comm, mul_smul, â smul_sub]
refine (isLittleO_congr this.symm <| EventuallyEq.rfl).mp ((isBigO_refl b l).smul_isLittleO ?_)
rcases huv.isBigO.exists_pos with âšC, hC, hCuvâ©
rw [IsEquivalent] at *
rw [isLittleO_iff] at *
rw [IsBigOWith] at hCuv
simp only [Metric.tendsto_nhds, dist_eq_norm] at hÏ
intro c hc
specialize hÏ (c / 2 / C) (div_pos (div_pos hc zero_lt_two) hC)
specialize huv (div_pos hc zero_lt_two)
refine hÏ.mp (huv.mp <| hCuv.mono fun x hCuvx huvx hÏx ⊠?_)
have key :=
calc
âÏ x - 1â * âu xâ †c / 2 / C * âu xâ := by gcongr
_ †c / 2 / C * (C * âv xâ) := by gcongr
_ = c / 2 * âv xâ := by
field_simp [hC.ne.symm]
ring
calc
â((fun x : α âŠ Ï x ⢠u x) - v) xâ = â(Ï x - 1) ⢠u x + (u x - v x)â := by
simp [sub_smul, sub_add]
_ †â(Ï x - 1) ⢠u xâ + âu x - v xâ := norm_add_le _ _
_ = âÏ x - 1â * âu xâ + âu x - v xâ := by rw [norm_smul]
_ †c / 2 * âv xâ + âu x - v xâ := by gcongr
_ †c / 2 * âv xâ + c / 2 * âv xâ := by gcongr; exact huvx
_ = c * âv xâ := by ring
end SMul
section mul_inv
variable {α β : Type*} [NormedField β] {t u v w : α â β} {l : Filter α}
theorem IsEquivalent.mul (htu : t ~[l] u) (hvw : v ~[l] w) : t * v ~[l] u * w :=
htu.smul hvw
theorem IsEquivalent.inv (huv : u ~[l] v) : (fun x ⊠(u x)â»Â¹) ~[l] fun x ⊠(v x)â»Â¹ := by
rw [isEquivalent_iff_exists_eq_mul] at *
rcases huv with âšÏ, hÏ, hâ©
rw [â inv_one]
refine âšfun x ⊠(Ï x)â»Â¹, Tendsto.invâ hÏ (by norm_num), ?_â©
convert h.inv
simp [mul_comm]
theorem IsEquivalent.div (htu : t ~[l] u) (hvw : v ~[l] w) :
(fun x ⊠t x / v x) ~[l] fun x ⊠u x / w x := by
simpa only [div_eq_mul_inv] using htu.mul hvw.inv
end mul_inv
section NormedLinearOrderedField
variable {α β : Type*} [NormedLinearOrderedField β] {u v : α â β} {l : Filter α}
theorem IsEquivalent.tendsto_atTop [OrderTopology β] (huv : u ~[l] v) (hu : Tendsto u l atTop) :
Tendsto v l atTop :=
let âšÏ, hÏ, hâ© := huv.symm.exists_eq_mul
Tendsto.congr' h.symm (mul_comm u Ï âž hu.atTop_mul zero_lt_one hÏ)
theorem IsEquivalent.tendsto_atTop_iff [OrderTopology β] (huv : u ~[l] v) :
Tendsto u l atTop â Tendsto v l atTop :=
âšhuv.tendsto_atTop, huv.symm.tendsto_atTopâ©
theorem IsEquivalent.tendsto_atBot [OrderTopology β] (huv : u ~[l] v) (hu : Tendsto u l atBot) :
Tendsto v l atBot := by
convert tendsto_neg_atTop_atBot.comp (huv.neg.tendsto_atTop <| tendsto_neg_atBot_atTop.comp hu)
ext
simp
theorem IsEquivalent.tendsto_atBot_iff [OrderTopology β] (huv : u ~[l] v) :
Tendsto u l atBot â Tendsto v l atBot :=
âšhuv.tendsto_atBot, huv.symm.tendsto_atBotâ©
end NormedLinearOrderedField
end Asymptotics
open Filter Asymptotics
open Asymptotics
variable {α β βâ : Type*} [NormedAddCommGroup β] [Norm βâ] {l : Filter α}
theorem Filter.EventuallyEq.isEquivalent {u v : α â β} (h : u =á¶ [l] v) : u ~[l] v :=
IsEquivalent.congr_right (isLittleO_refl_left _ _) h
@[trans]
theorem Filter.EventuallyEq.trans_isEquivalent {f gâ gâ : α â β} (h : f =á¶ [l] gâ)
(hâ : gâ ~[l] gâ) : f ~[l] gâ :=
h.isEquivalent.trans hâ
namespace Asymptotics
instance transIsEquivalentIsEquivalent :
@Trans (α â β) (α â β) (α â β) (IsEquivalent l) (IsEquivalent l) (IsEquivalent l) where
trans := IsEquivalent.trans
instance transEventuallyEqIsEquivalent :
@Trans (α â β) (α â β) (α â β) (EventuallyEq l) (IsEquivalent l) (IsEquivalent l) where
trans := EventuallyEq.trans_isEquivalent
@[trans]
theorem IsEquivalent.trans_eventuallyEq {f gâ gâ : α â β} (h : f ~[l] gâ)
(hâ : gâ =á¶ [l] gâ) : f ~[l] gâ :=
h.trans hâ.isEquivalent
instance transIsEquivalentEventuallyEq :
@Trans (α â β) (α â β) (α â β) (IsEquivalent l) (EventuallyEq l) (IsEquivalent l) where
trans := IsEquivalent.trans_eventuallyEq
@[trans]
theorem IsEquivalent.trans_isBigO {f gâ : α â β} {gâ : α â βâ} (h : f ~[l] gâ) (hâ : gâ =O[l] gâ) :
f =O[l] gâ :=
IsBigO.trans h.isBigO hâ
instance transIsEquivalentIsBigO :
@Trans (α â β) (α â β) (α â βâ) (IsEquivalent l) (IsBigO l) (IsBigO l) where
trans := IsEquivalent.trans_isBigO
@[trans]
theorem IsBigO.trans_isEquivalent {f : α â βâ} {gâ gâ : α â β} (h : f =O[l] gâ) (hâ : gâ ~[l] gâ) :
f =O[l] gâ :=
IsBigO.trans h hâ.isBigO
instance transIsBigOIsEquivalent :
@Trans (α â βâ) (α â β) (α â β) (IsBigO l) (IsEquivalent l) (IsBigO l) where
trans := IsBigO.trans_isEquivalent
@[trans]
theorem IsEquivalent.trans_isLittleO {f gâ : α â β} {gâ : α â βâ} (h : f ~[l] gâ)
(hâ : gâ =o[l] gâ) : f =o[l] gâ :=
IsBigO.trans_isLittleO h.isBigO hâ
instance transIsEquivalentIsLittleO :
@Trans (α â β) (α â β) (α â βâ) (IsEquivalent l) (IsLittleO l) (IsLittleO l) where
trans := IsEquivalent.trans_isLittleO
@[trans]
theorem IsLittleO.trans_isEquivalent {f : α â βâ} {gâ gâ : α â β} (h : f =o[l] gâ)
(hâ : gâ ~[l] gâ) : f =o[l] gâ :=
IsLittleO.trans_isBigO h hâ.isBigO
instance transIsLittleOIsEquivalent :
@Trans (α â βâ) (α â β) (α â β) (IsLittleO l) (IsEquivalent l) (IsLittleO l) where
trans := IsLittleO.trans_isEquivalent
@[trans]
theorem IsEquivalent.trans_isTheta {f gâ : α â β} {gâ : α â βâ} (h : f ~[l] gâ)
(hâ : gâ =Î[l] gâ) : f =Î[l] gâ :=
IsTheta.trans h.isTheta hâ
instance transIsEquivalentIsTheta :
@Trans (α â β) (α â β) (α â βâ) (IsEquivalent l) (IsTheta l) (IsTheta l) where
trans := IsEquivalent.trans_isTheta
@[trans]
theorem IsTheta.trans_isEquivalent {f : α â βâ} {gâ gâ : α â β} (h : f =Î[l] gâ)
(hâ : gâ ~[l] gâ) : f =Î[l] gâ :=
IsTheta.trans h hâ.isTheta
instance transIsThetaIsEquivalent :
@Trans (α â βâ) (α â β) (α â β) (IsTheta l) (IsEquivalent l) (IsTheta l) where
trans := IsTheta.trans_isEquivalent
end Asymptotics
|
Analysis\Asymptotics\Asymptotics.lean | /-
Copyright (c) 2019 Jeremy Avigad. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Jeremy Avigad, Yury Kudryashov
-/
import Mathlib.Analysis.Normed.Group.InfiniteSum
import Mathlib.Analysis.Normed.MulAction
import Mathlib.Topology.Algebra.Order.LiminfLimsup
import Mathlib.Topology.PartialHomeomorph
/-!
# Asymptotics
We introduce these relations:
* `IsBigOWith c l f g` : "f is big O of g along l with constant c";
* `f =O[l] g` : "f is big O of g along l";
* `f =o[l] g` : "f is little o of g along l".
Here `l` is any filter on the domain of `f` and `g`, which are assumed to be the same. The codomains
of `f` and `g` do not need to be the same; all that is needed that there is a norm associated with
these types, and it is the norm that is compared asymptotically.
The relation `IsBigOWith c` is introduced to factor out common algebraic arguments in the proofs of
similar properties of `IsBigO` and `IsLittleO`. Usually proofs outside of this file should use
`IsBigO` instead.
Often the ranges of `f` and `g` will be the real numbers, in which case the norm is the absolute
value. In general, we have
`f =O[l] g â (fun x ⊠âf xâ) =O[l] (fun x ⊠âg xâ)`,
and similarly for `IsLittleO`. But our setup allows us to use the notions e.g. with functions
to the integers, rationals, complex numbers, or any normed vector space without mentioning the
norm explicitly.
If `f` and `g` are functions to a normed field like the reals or complex numbers and `g` is always
nonzero, we have
`f =o[l] g â Tendsto (fun x ⊠f x / (g x)) l (ð 0)`.
In fact, the right-to-left direction holds without the hypothesis on `g`, and in the other direction
it suffices to assume that `f` is zero wherever `g` is. (This generalization is useful in defining
the Fréchet derivative.)
-/
open Filter Set
open scoped Classical
open Topology Filter NNReal
namespace Asymptotics
variable {α : Type*} {β : Type*} {E : Type*} {F : Type*} {G : Type*} {E' : Type*}
{F' : Type*} {G' : Type*} {E'' : Type*} {F'' : Type*} {G'' : Type*} {E''' : Type*}
{R : Type*} {R' : Type*} {ð : Type*} {ð' : Type*}
variable [Norm E] [Norm F] [Norm G]
variable [SeminormedAddCommGroup E'] [SeminormedAddCommGroup F'] [SeminormedAddCommGroup G']
[NormedAddCommGroup E''] [NormedAddCommGroup F''] [NormedAddCommGroup G''] [SeminormedRing R]
[SeminormedAddGroup E''']
[SeminormedRing R']
variable [NormedDivisionRing ð] [NormedDivisionRing ð']
variable {c c' câ câ : â} {f : α â E} {g : α â F} {k : α â G}
variable {f' : α â E'} {g' : α â F'} {k' : α â G'}
variable {f'' : α â E''} {g'' : α â F''} {k'' : α â G''}
variable {l l' : Filter α}
section Defs
/-! ### Definitions -/
/-- This version of the Landau notation `IsBigOWith C l f g` where `f` and `g` are two functions on
a type `α` and `l` is a filter on `α`, means that eventually for `l`, `âfâ` is bounded by `C * âgâ`.
In other words, `âfâ / âgâ` is eventually bounded by `C`, modulo division by zero issues that are
avoided by this definition. Probably you want to use `IsBigO` instead of this relation. -/
irreducible_def IsBigOWith (c : â) (l : Filter α) (f : α â E) (g : α â F) : Prop :=
âá¶ x in l, âf xâ †c * âg xâ
/-- Definition of `IsBigOWith`. We record it in a lemma as `IsBigOWith` is irreducible. -/
theorem isBigOWith_iff : IsBigOWith c l f g â âá¶ x in l, âf xâ †c * âg xâ := by rw [IsBigOWith_def]
alias âšIsBigOWith.bound, IsBigOWith.of_boundâ© := isBigOWith_iff
/-- The Landau notation `f =O[l] g` where `f` and `g` are two functions on a type `α` and `l` is
a filter on `α`, means that eventually for `l`, `âfâ` is bounded by a constant multiple of `âgâ`.
In other words, `âfâ / âgâ` is eventually bounded, modulo division by zero issues that are avoided
by this definition. -/
irreducible_def IsBigO (l : Filter α) (f : α â E) (g : α â F) : Prop :=
â c : â, IsBigOWith c l f g
@[inherit_doc]
notation:100 f " =O[" l "] " g:100 => IsBigO l f g
/-- Definition of `IsBigO` in terms of `IsBigOWith`. We record it in a lemma as `IsBigO` is
irreducible. -/
theorem isBigO_iff_isBigOWith : f =O[l] g â â c : â, IsBigOWith c l f g := by rw [IsBigO_def]
/-- Definition of `IsBigO` in terms of filters. -/
theorem isBigO_iff : f =O[l] g â â c : â, âá¶ x in l, âf xâ †c * âg xâ := by
simp only [IsBigO_def, IsBigOWith_def]
/-- Definition of `IsBigO` in terms of filters, with a positive constant. -/
theorem isBigO_iff' {g : α â E'''} :
f =O[l] g â â c > 0, âá¶ x in l, âf xâ †c * âg xâ := by
refine âšfun h => ?mp, fun h => ?mprâ©
case mp =>
rw [isBigO_iff] at h
obtain âšc, hcâ© := h
refine âšmax c 1, zero_lt_one.trans_le (le_max_right _ _), ?_â©
filter_upwards [hc] with x hx
apply hx.trans
gcongr
exact le_max_left _ _
case mpr =>
rw [isBigO_iff]
obtain âšc, âš_, hcâ©â© := h
exact âšc, hcâ©
/-- Definition of `IsBigO` in terms of filters, with the constant in the lower bound. -/
theorem isBigO_iff'' {g : α â E'''} :
f =O[l] g â â c > 0, âá¶ x in l, c * âf xâ †âg xâ := by
refine âšfun h => ?mp, fun h => ?mprâ©
case mp =>
rw [isBigO_iff'] at h
obtain âšc, âšhc_pos, hcâ©â© := h
refine âšcâ»Â¹, âšby positivity, ?_â©â©
filter_upwards [hc] with x hx
rwa [inv_mul_le_iff (by positivity)]
case mpr =>
rw [isBigO_iff']
obtain âšc, âšhc_pos, hcâ©â© := h
refine âšcâ»Â¹, âšby positivity, ?_â©â©
filter_upwards [hc] with x hx
rwa [â inv_inv c, inv_mul_le_iff (by positivity)] at hx
theorem IsBigO.of_bound (c : â) (h : âá¶ x in l, âf xâ †c * âg xâ) : f =O[l] g :=
isBigO_iff.2 âšc, hâ©
theorem IsBigO.of_bound' (h : âá¶ x in l, âf xâ †âg xâ) : f =O[l] g :=
IsBigO.of_bound 1 <| by
simp_rw [one_mul]
exact h
theorem IsBigO.bound : f =O[l] g â â c : â, âá¶ x in l, âf xâ †c * âg xâ :=
isBigO_iff.1
/-- The Landau notation `f =o[l] g` where `f` and `g` are two functions on a type `α` and `l` is
a filter on `α`, means that eventually for `l`, `âfâ` is bounded by an arbitrarily small constant
multiple of `âgâ`. In other words, `âfâ / âgâ` tends to `0` along `l`, modulo division by zero
issues that are avoided by this definition. -/
irreducible_def IsLittleO (l : Filter α) (f : α â E) (g : α â F) : Prop :=
â âŠc : ââŠ, 0 < c â IsBigOWith c l f g
@[inherit_doc]
notation:100 f " =o[" l "] " g:100 => IsLittleO l f g
/-- Definition of `IsLittleO` in terms of `IsBigOWith`. -/
theorem isLittleO_iff_forall_isBigOWith : f =o[l] g â â âŠc : ââŠ, 0 < c â IsBigOWith c l f g := by
rw [IsLittleO_def]
alias âšIsLittleO.forall_isBigOWith, IsLittleO.of_isBigOWithâ© := isLittleO_iff_forall_isBigOWith
/-- Definition of `IsLittleO` in terms of filters. -/
theorem isLittleO_iff : f =o[l] g â â âŠc : ââŠ, 0 < c â âá¶ x in l, âf xâ †c * âg xâ := by
simp only [IsLittleO_def, IsBigOWith_def]
alias âšIsLittleO.bound, IsLittleO.of_boundâ© := isLittleO_iff
theorem IsLittleO.def (h : f =o[l] g) (hc : 0 < c) : âá¶ x in l, âf xâ †c * âg xâ :=
isLittleO_iff.1 h hc
theorem IsLittleO.def' (h : f =o[l] g) (hc : 0 < c) : IsBigOWith c l f g :=
isBigOWith_iff.2 <| isLittleO_iff.1 h hc
theorem IsLittleO.eventuallyLE (h : f =o[l] g) : âá¶ x in l, âf xâ †âg xâ := by
simpa using h.def zero_lt_one
end Defs
/-! ### Conversions -/
theorem IsBigOWith.isBigO (h : IsBigOWith c l f g) : f =O[l] g := by rw [IsBigO_def]; exact âšc, hâ©
theorem IsLittleO.isBigOWith (hgf : f =o[l] g) : IsBigOWith 1 l f g :=
hgf.def' zero_lt_one
theorem IsLittleO.isBigO (hgf : f =o[l] g) : f =O[l] g :=
hgf.isBigOWith.isBigO
theorem IsBigO.isBigOWith : f =O[l] g â â c : â, IsBigOWith c l f g :=
isBigO_iff_isBigOWith.1
theorem IsBigOWith.weaken (h : IsBigOWith c l f g') (hc : c †c') : IsBigOWith c' l f g' :=
IsBigOWith.of_bound <|
mem_of_superset h.bound fun x hx =>
calc
âf xâ †c * âg' xâ := hx
_ †_ := by gcongr
theorem IsBigOWith.exists_pos (h : IsBigOWith c l f g') :
â c' > 0, IsBigOWith c' l f g' :=
âšmax c 1, lt_of_lt_of_le zero_lt_one (le_max_right c 1), h.weaken <| le_max_left c 1â©
theorem IsBigO.exists_pos (h : f =O[l] g') : â c > 0, IsBigOWith c l f g' :=
let âš_c, hcâ© := h.isBigOWith
hc.exists_pos
theorem IsBigOWith.exists_nonneg (h : IsBigOWith c l f g') :
â c' ⥠0, IsBigOWith c' l f g' :=
let âšc, cpos, hcâ© := h.exists_pos
âšc, le_of_lt cpos, hcâ©
theorem IsBigO.exists_nonneg (h : f =O[l] g') : â c ⥠0, IsBigOWith c l f g' :=
let âš_c, hcâ© := h.isBigOWith
hc.exists_nonneg
/-- `f = O(g)` if and only if `IsBigOWith c f g` for all sufficiently large `c`. -/
theorem isBigO_iff_eventually_isBigOWith : f =O[l] g' â âá¶ c in atTop, IsBigOWith c l f g' :=
isBigO_iff_isBigOWith.trans
âšfun âšc, hcâ© => mem_atTop_sets.2 âšc, fun _c' hc' => hc.weaken hc'â©, fun h => h.existsâ©
/-- `f = O(g)` if and only if `âá¶ x in l, âf xâ †c * âg xâ` for all sufficiently large `c`. -/
theorem isBigO_iff_eventually : f =O[l] g' â âá¶ c in atTop, âá¶ x in l, âf xâ †c * âg' xâ :=
isBigO_iff_eventually_isBigOWith.trans <| by simp only [IsBigOWith_def]
theorem IsBigO.exists_mem_basis {ι} {p : ι â Prop} {s : ι â Set α} (h : f =O[l] g')
(hb : l.HasBasis p s) :
â c > 0, â i : ι, p i â§ â x â s i, âf xâ †c * âg' xâ :=
flip Exists.imp h.exists_pos fun c h => by
simpa only [isBigOWith_iff, hb.eventually_iff, exists_prop] using h
theorem isBigOWith_inv (hc : 0 < c) : IsBigOWith câ»Â¹ l f g â âá¶ x in l, c * âf xâ †âg xâ := by
simp only [IsBigOWith_def, â div_eq_inv_mul, le_div_iff' hc]
-- We prove this lemma with strange assumptions to get two lemmas below automatically
theorem isLittleO_iff_nat_mul_le_aux (hâ : (â x, 0 †âf xâ) âš â x, 0 †âg xâ) :
f =o[l] g â â n : â, âá¶ x in l, ân * âf xâ †âg xâ := by
constructor
· rintro H (_ | n)
· refine (H.def one_pos).mono fun x hâ' => ?_
rw [Nat.cast_zero, zero_mul]
refine hâ.elim (fun hf => (hf x).trans ?_) fun hg => hg x
rwa [one_mul] at hâ'
· have : (0 : â) < n.succ := Nat.cast_pos.2 n.succ_pos
exact (isBigOWith_inv this).1 (H.def' <| inv_pos.2 this)
· refine fun H => isLittleO_iff.2 fun ε ε0 => ?_
rcases exists_nat_gt εâ»Â¹ with âšn, hnâ©
have hnâ : (0 : â) < n := (inv_pos.2 ε0).trans hn
refine ((isBigOWith_inv hnâ).2 (H n)).bound.mono fun x hfg => ?_
refine hfg.trans (mul_le_mul_of_nonneg_right (inv_le_of_inv_le ε0 hn.le) ?_)
refine hâ.elim (fun hf => nonneg_of_mul_nonneg_right ((hf x).trans hfg) ?_) fun h => h x
exact inv_pos.2 hnâ
theorem isLittleO_iff_nat_mul_le : f =o[l] g' â â n : â, âá¶ x in l, ân * âf xâ †âg' xâ :=
isLittleO_iff_nat_mul_le_aux (Or.inr fun _x => norm_nonneg _)
theorem isLittleO_iff_nat_mul_le' : f' =o[l] g â â n : â, âá¶ x in l, ân * âf' xâ †âg xâ :=
isLittleO_iff_nat_mul_le_aux (Or.inl fun _x => norm_nonneg _)
/-! ### Subsingleton -/
@[nontriviality]
theorem isLittleO_of_subsingleton [Subsingleton E'] : f' =o[l] g' :=
IsLittleO.of_bound fun c hc => by simp [Subsingleton.elim (f' _) 0, mul_nonneg hc.le]
@[nontriviality]
theorem isBigO_of_subsingleton [Subsingleton E'] : f' =O[l] g' :=
isLittleO_of_subsingleton.isBigO
section congr
variable {fâ fâ : α â E} {gâ gâ : α â F}
/-! ### Congruence -/
theorem isBigOWith_congr (hc : câ = câ) (hf : fâ =á¶ [l] fâ) (hg : gâ =á¶ [l] gâ) :
IsBigOWith câ l fâ gâ â IsBigOWith câ l fâ gâ := by
simp only [IsBigOWith_def]
subst câ
apply Filter.eventually_congr
filter_upwards [hf, hg] with _ eâ eâ
rw [eâ, eâ]
theorem IsBigOWith.congr' (h : IsBigOWith câ l fâ gâ) (hc : câ = câ) (hf : fâ =á¶ [l] fâ)
(hg : gâ =á¶ [l] gâ) : IsBigOWith câ l fâ gâ :=
(isBigOWith_congr hc hf hg).mp h
theorem IsBigOWith.congr (h : IsBigOWith câ l fâ gâ) (hc : câ = câ) (hf : â x, fâ x = fâ x)
(hg : â x, gâ x = gâ x) : IsBigOWith câ l fâ gâ :=
h.congr' hc (univ_mem' hf) (univ_mem' hg)
theorem IsBigOWith.congr_left (h : IsBigOWith c l fâ g) (hf : â x, fâ x = fâ x) :
IsBigOWith c l fâ g :=
h.congr rfl hf fun _ => rfl
theorem IsBigOWith.congr_right (h : IsBigOWith c l f gâ) (hg : â x, gâ x = gâ x) :
IsBigOWith c l f gâ :=
h.congr rfl (fun _ => rfl) hg
theorem IsBigOWith.congr_const (h : IsBigOWith câ l f g) (hc : câ = câ) : IsBigOWith câ l f g :=
h.congr hc (fun _ => rfl) fun _ => rfl
theorem isBigO_congr (hf : fâ =á¶ [l] fâ) (hg : gâ =á¶ [l] gâ) : fâ =O[l] gâ â fâ =O[l] gâ := by
simp only [IsBigO_def]
exact exists_congr fun c => isBigOWith_congr rfl hf hg
theorem IsBigO.congr' (h : fâ =O[l] gâ) (hf : fâ =á¶ [l] fâ) (hg : gâ =á¶ [l] gâ) : fâ =O[l] gâ :=
(isBigO_congr hf hg).mp h
theorem IsBigO.congr (h : fâ =O[l] gâ) (hf : â x, fâ x = fâ x) (hg : â x, gâ x = gâ x) :
fâ =O[l] gâ :=
h.congr' (univ_mem' hf) (univ_mem' hg)
theorem IsBigO.congr_left (h : fâ =O[l] g) (hf : â x, fâ x = fâ x) : fâ =O[l] g :=
h.congr hf fun _ => rfl
theorem IsBigO.congr_right (h : f =O[l] gâ) (hg : â x, gâ x = gâ x) : f =O[l] gâ :=
h.congr (fun _ => rfl) hg
theorem isLittleO_congr (hf : fâ =á¶ [l] fâ) (hg : gâ =á¶ [l] gâ) : fâ =o[l] gâ â fâ =o[l] gâ := by
simp only [IsLittleO_def]
exact forallâ_congr fun c _hc => isBigOWith_congr (Eq.refl c) hf hg
theorem IsLittleO.congr' (h : fâ =o[l] gâ) (hf : fâ =á¶ [l] fâ) (hg : gâ =á¶ [l] gâ) : fâ =o[l] gâ :=
(isLittleO_congr hf hg).mp h
theorem IsLittleO.congr (h : fâ =o[l] gâ) (hf : â x, fâ x = fâ x) (hg : â x, gâ x = gâ x) :
fâ =o[l] gâ :=
h.congr' (univ_mem' hf) (univ_mem' hg)
theorem IsLittleO.congr_left (h : fâ =o[l] g) (hf : â x, fâ x = fâ x) : fâ =o[l] g :=
h.congr hf fun _ => rfl
theorem IsLittleO.congr_right (h : f =o[l] gâ) (hg : â x, gâ x = gâ x) : f =o[l] gâ :=
h.congr (fun _ => rfl) hg
@[trans]
theorem _root_.Filter.EventuallyEq.trans_isBigO {fâ fâ : α â E} {g : α â F} (hf : fâ =á¶ [l] fâ)
(h : fâ =O[l] g) : fâ =O[l] g :=
h.congr' hf.symm EventuallyEq.rfl
instance transEventuallyEqIsBigO :
@Trans (α â E) (α â E) (α â F) (· =á¶ [l] ·) (· =O[l] ·) (· =O[l] ·) where
trans := Filter.EventuallyEq.trans_isBigO
@[trans]
theorem _root_.Filter.EventuallyEq.trans_isLittleO {fâ fâ : α â E} {g : α â F} (hf : fâ =á¶ [l] fâ)
(h : fâ =o[l] g) : fâ =o[l] g :=
h.congr' hf.symm EventuallyEq.rfl
instance transEventuallyEqIsLittleO :
@Trans (α â E) (α â E) (α â F) (· =á¶ [l] ·) (· =o[l] ·) (· =o[l] ·) where
trans := Filter.EventuallyEq.trans_isLittleO
@[trans]
theorem IsBigO.trans_eventuallyEq {f : α â E} {gâ gâ : α â F} (h : f =O[l] gâ) (hg : gâ =á¶ [l] gâ) :
f =O[l] gâ :=
h.congr' EventuallyEq.rfl hg
instance transIsBigOEventuallyEq :
@Trans (α â E) (α â F) (α â F) (· =O[l] ·) (· =á¶ [l] ·) (· =O[l] ·) where
trans := IsBigO.trans_eventuallyEq
@[trans]
theorem IsLittleO.trans_eventuallyEq {f : α â E} {gâ gâ : α â F} (h : f =o[l] gâ)
(hg : gâ =á¶ [l] gâ) : f =o[l] gâ :=
h.congr' EventuallyEq.rfl hg
instance transIsLittleOEventuallyEq :
@Trans (α â E) (α â F) (α â F) (· =o[l] ·) (· =á¶ [l] ·) (· =o[l] ·) where
trans := IsLittleO.trans_eventuallyEq
end congr
/-! ### Filter operations and transitivity -/
theorem IsBigOWith.comp_tendsto (hcfg : IsBigOWith c l f g) {k : β â α} {l' : Filter β}
(hk : Tendsto k l' l) : IsBigOWith c l' (f â k) (g â k) :=
IsBigOWith.of_bound <| hk hcfg.bound
theorem IsBigO.comp_tendsto (hfg : f =O[l] g) {k : β â α} {l' : Filter β} (hk : Tendsto k l' l) :
(f â k) =O[l'] (g â k) :=
isBigO_iff_isBigOWith.2 <| hfg.isBigOWith.imp fun _c h => h.comp_tendsto hk
theorem IsLittleO.comp_tendsto (hfg : f =o[l] g) {k : β â α} {l' : Filter β} (hk : Tendsto k l' l) :
(f â k) =o[l'] (g â k) :=
IsLittleO.of_isBigOWith fun _c cpos => (hfg.forall_isBigOWith cpos).comp_tendsto hk
@[simp]
theorem isBigOWith_map {k : β â α} {l : Filter β} :
IsBigOWith c (map k l) f g â IsBigOWith c l (f â k) (g â k) := by
simp only [IsBigOWith_def]
exact eventually_map
@[simp]
theorem isBigO_map {k : β â α} {l : Filter β} : f =O[map k l] g â (f â k) =O[l] (g â k) := by
simp only [IsBigO_def, isBigOWith_map]
@[simp]
theorem isLittleO_map {k : β â α} {l : Filter β} : f =o[map k l] g â (f â k) =o[l] (g â k) := by
simp only [IsLittleO_def, isBigOWith_map]
theorem IsBigOWith.mono (h : IsBigOWith c l' f g) (hl : l †l') : IsBigOWith c l f g :=
IsBigOWith.of_bound <| hl h.bound
theorem IsBigO.mono (h : f =O[l'] g) (hl : l †l') : f =O[l] g :=
isBigO_iff_isBigOWith.2 <| h.isBigOWith.imp fun _c h => h.mono hl
theorem IsLittleO.mono (h : f =o[l'] g) (hl : l †l') : f =o[l] g :=
IsLittleO.of_isBigOWith fun _c cpos => (h.forall_isBigOWith cpos).mono hl
theorem IsBigOWith.trans (hfg : IsBigOWith c l f g) (hgk : IsBigOWith c' l g k) (hc : 0 †c) :
IsBigOWith (c * c') l f k := by
simp only [IsBigOWith_def] at *
filter_upwards [hfg, hgk] with x hx hx'
calc
âf xâ †c * âg xâ := hx
_ †c * (c' * âk xâ) := by gcongr
_ = c * c' * âk xâ := (mul_assoc _ _ _).symm
@[trans]
theorem IsBigO.trans {f : α â E} {g : α â F'} {k : α â G} (hfg : f =O[l] g) (hgk : g =O[l] k) :
f =O[l] k :=
let âš_c, cnonneg, hcâ© := hfg.exists_nonneg
let âš_c', hc'â© := hgk.isBigOWith
(hc.trans hc' cnonneg).isBigO
instance transIsBigOIsBigO :
@Trans (α â E) (α â F') (α â G) (· =O[l] ·) (· =O[l] ·) (· =O[l] ·) where
trans := IsBigO.trans
theorem IsLittleO.trans_isBigOWith (hfg : f =o[l] g) (hgk : IsBigOWith c l g k) (hc : 0 < c) :
f =o[l] k := by
simp only [IsLittleO_def] at *
intro c' c'pos
have : 0 < c' / c := div_pos c'pos hc
exact ((hfg this).trans hgk this.le).congr_const (div_mul_cancelâ _ hc.ne')
@[trans]
theorem IsLittleO.trans_isBigO {f : α â E} {g : α â F} {k : α â G'} (hfg : f =o[l] g)
(hgk : g =O[l] k) : f =o[l] k :=
let âš_c, cpos, hcâ© := hgk.exists_pos
hfg.trans_isBigOWith hc cpos
instance transIsLittleOIsBigO :
@Trans (α â E) (α â F) (α â G') (· =o[l] ·) (· =O[l] ·) (· =o[l] ·) where
trans := IsLittleO.trans_isBigO
theorem IsBigOWith.trans_isLittleO (hfg : IsBigOWith c l f g) (hgk : g =o[l] k) (hc : 0 < c) :
f =o[l] k := by
simp only [IsLittleO_def] at *
intro c' c'pos
have : 0 < c' / c := div_pos c'pos hc
exact (hfg.trans (hgk this) hc.le).congr_const (mul_div_cancelâ _ hc.ne')
@[trans]
theorem IsBigO.trans_isLittleO {f : α â E} {g : α â F'} {k : α â G} (hfg : f =O[l] g)
(hgk : g =o[l] k) : f =o[l] k :=
let âš_c, cpos, hcâ© := hfg.exists_pos
hc.trans_isLittleO hgk cpos
instance transIsBigOIsLittleO :
@Trans (α â E) (α â F') (α â G) (· =O[l] ·) (· =o[l] ·) (· =o[l] ·) where
trans := IsBigO.trans_isLittleO
@[trans]
theorem IsLittleO.trans {f : α â E} {g : α â F} {k : α â G} (hfg : f =o[l] g) (hgk : g =o[l] k) :
f =o[l] k :=
hfg.trans_isBigOWith hgk.isBigOWith one_pos
instance transIsLittleOIsLittleO :
@Trans (α â E) (α â F) (α â G) (· =o[l] ·) (· =o[l] ·) (· =o[l] ·) where
trans := IsLittleO.trans
theorem _root_.Filter.Eventually.trans_isBigO {f : α â E} {g : α â F'} {k : α â G}
(hfg : âá¶ x in l, âf xâ †âg xâ) (hgk : g =O[l] k) : f =O[l] k :=
(IsBigO.of_bound' hfg).trans hgk
theorem _root_.Filter.Eventually.isBigO {f : α â E} {g : α â â} {l : Filter α}
(hfg : âá¶ x in l, âf xâ †g x) : f =O[l] g :=
IsBigO.of_bound' <| hfg.mono fun _x hx => hx.trans <| Real.le_norm_self _
section
variable (l)
theorem isBigOWith_of_le' (hfg : â x, âf xâ †c * âg xâ) : IsBigOWith c l f g :=
IsBigOWith.of_bound <| univ_mem' hfg
theorem isBigOWith_of_le (hfg : â x, âf xâ †âg xâ) : IsBigOWith 1 l f g :=
isBigOWith_of_le' l fun x => by
rw [one_mul]
exact hfg x
theorem isBigO_of_le' (hfg : â x, âf xâ †c * âg xâ) : f =O[l] g :=
(isBigOWith_of_le' l hfg).isBigO
theorem isBigO_of_le (hfg : â x, âf xâ †âg xâ) : f =O[l] g :=
(isBigOWith_of_le l hfg).isBigO
end
theorem isBigOWith_refl (f : α â E) (l : Filter α) : IsBigOWith 1 l f f :=
isBigOWith_of_le l fun _ => le_rfl
theorem isBigO_refl (f : α â E) (l : Filter α) : f =O[l] f :=
(isBigOWith_refl f l).isBigO
theorem _root_.Filter.EventuallyEq.isBigO {fâ fâ : α â E} (hf : fâ =á¶ [l] fâ) : fâ =O[l] fâ :=
hf.trans_isBigO (isBigO_refl _ _)
theorem IsBigOWith.trans_le (hfg : IsBigOWith c l f g) (hgk : â x, âg xâ †âk xâ) (hc : 0 †c) :
IsBigOWith c l f k :=
(hfg.trans (isBigOWith_of_le l hgk) hc).congr_const <| mul_one c
theorem IsBigO.trans_le (hfg : f =O[l] g') (hgk : â x, âg' xâ †âk xâ) : f =O[l] k :=
hfg.trans (isBigO_of_le l hgk)
theorem IsLittleO.trans_le (hfg : f =o[l] g) (hgk : â x, âg xâ †âk xâ) : f =o[l] k :=
hfg.trans_isBigOWith (isBigOWith_of_le _ hgk) zero_lt_one
theorem isLittleO_irrefl' (h : âá¶ x in l, âf' xâ â 0) : ¬f' =o[l] f' := by
intro ho
rcases ((ho.bound one_half_pos).and_frequently h).exists with âšx, hle, hneâ©
rw [one_div, â div_eq_inv_mul] at hle
exact (half_lt_self (lt_of_le_of_ne (norm_nonneg _) hne.symm)).not_le hle
theorem isLittleO_irrefl (h : âá¶ x in l, f'' x â 0) : ¬f'' =o[l] f'' :=
isLittleO_irrefl' <| h.mono fun _x => norm_ne_zero_iff.mpr
theorem IsBigO.not_isLittleO (h : f'' =O[l] g') (hf : âá¶ x in l, f'' x â 0) :
¬g' =o[l] f'' := fun h' =>
isLittleO_irrefl hf (h.trans_isLittleO h')
theorem IsLittleO.not_isBigO (h : f'' =o[l] g') (hf : âá¶ x in l, f'' x â 0) :
¬g' =O[l] f'' := fun h' =>
isLittleO_irrefl hf (h.trans_isBigO h')
section Bot
variable (c f g)
@[simp]
theorem isBigOWith_bot : IsBigOWith c ⥠f g :=
IsBigOWith.of_bound <| trivial
@[simp]
theorem isBigO_bot : f =O[â¥] g :=
(isBigOWith_bot 1 f g).isBigO
@[simp]
theorem isLittleO_bot : f =o[â¥] g :=
IsLittleO.of_isBigOWith fun c _ => isBigOWith_bot c f g
end Bot
@[simp]
theorem isBigOWith_pure {x} : IsBigOWith c (pure x) f g â âf xâ †c * âg xâ :=
isBigOWith_iff
theorem IsBigOWith.sup (h : IsBigOWith c l f g) (h' : IsBigOWith c l' f g) :
IsBigOWith c (l â l') f g :=
IsBigOWith.of_bound <| mem_sup.2 âšh.bound, h'.boundâ©
theorem IsBigOWith.sup' (h : IsBigOWith c l f g') (h' : IsBigOWith c' l' f g') :
IsBigOWith (max c c') (l â l') f g' :=
IsBigOWith.of_bound <|
mem_sup.2 âš(h.weaken <| le_max_left c c').bound, (h'.weaken <| le_max_right c c').boundâ©
theorem IsBigO.sup (h : f =O[l] g') (h' : f =O[l'] g') : f =O[l â l'] g' :=
let âš_c, hcâ© := h.isBigOWith
let âš_c', hc'â© := h'.isBigOWith
(hc.sup' hc').isBigO
theorem IsLittleO.sup (h : f =o[l] g) (h' : f =o[l'] g) : f =o[l â l'] g :=
IsLittleO.of_isBigOWith fun _c cpos => (h.forall_isBigOWith cpos).sup (h'.forall_isBigOWith cpos)
@[simp]
theorem isBigO_sup : f =O[l â l'] g' â f =O[l] g' â§ f =O[l'] g' :=
âšfun h => âšh.mono le_sup_left, h.mono le_sup_rightâ©, fun h => h.1.sup h.2â©
@[simp]
theorem isLittleO_sup : f =o[l â l'] g â f =o[l] g â§ f =o[l'] g :=
âšfun h => âšh.mono le_sup_left, h.mono le_sup_rightâ©, fun h => h.1.sup h.2â©
theorem isBigOWith_insert [TopologicalSpace α] {x : α} {s : Set α} {C : â} {g : α â E} {g' : α â F}
(h : âg xâ †C * âg' xâ) : IsBigOWith C (ð[insert x s] x) g g' â
IsBigOWith C (ð[s] x) g g' := by
simp_rw [IsBigOWith_def, nhdsWithin_insert, eventually_sup, eventually_pure, h, true_and_iff]
protected theorem IsBigOWith.insert [TopologicalSpace α] {x : α} {s : Set α} {C : â} {g : α â E}
{g' : α â F} (h1 : IsBigOWith C (ð[s] x) g g') (h2 : âg xâ †C * âg' xâ) :
IsBigOWith C (ð[insert x s] x) g g' :=
(isBigOWith_insert h2).mpr h1
theorem isLittleO_insert [TopologicalSpace α] {x : α} {s : Set α} {g : α â E'} {g' : α â F'}
(h : g x = 0) : g =o[ð[insert x s] x] g' â g =o[ð[s] x] g' := by
simp_rw [IsLittleO_def]
refine forall_congr' fun c => forall_congr' fun hc => ?_
rw [isBigOWith_insert]
rw [h, norm_zero]
exact mul_nonneg hc.le (norm_nonneg _)
protected theorem IsLittleO.insert [TopologicalSpace α] {x : α} {s : Set α} {g : α â E'}
{g' : α â F'} (h1 : g =o[ð[s] x] g') (h2 : g x = 0) : g =o[ð[insert x s] x] g' :=
(isLittleO_insert h2).mpr h1
/-! ### Simplification : norm, abs -/
section NormAbs
variable {u v : α â â}
@[simp]
theorem isBigOWith_norm_right : (IsBigOWith c l f fun x => âg' xâ) â IsBigOWith c l f g' := by
simp only [IsBigOWith_def, norm_norm]
@[simp]
theorem isBigOWith_abs_right : (IsBigOWith c l f fun x => |u x|) â IsBigOWith c l f u :=
@isBigOWith_norm_right _ _ _ _ _ _ f u l
alias âšIsBigOWith.of_norm_right, IsBigOWith.norm_rightâ© := isBigOWith_norm_right
alias âšIsBigOWith.of_abs_right, IsBigOWith.abs_rightâ© := isBigOWith_abs_right
@[simp]
theorem isBigO_norm_right : (f =O[l] fun x => âg' xâ) â f =O[l] g' := by
simp only [IsBigO_def]
exact exists_congr fun _ => isBigOWith_norm_right
@[simp]
theorem isBigO_abs_right : (f =O[l] fun x => |u x|) â f =O[l] u :=
@isBigO_norm_right _ _ â _ _ _ _ _
alias âšIsBigO.of_norm_right, IsBigO.norm_rightâ© := isBigO_norm_right
alias âšIsBigO.of_abs_right, IsBigO.abs_rightâ© := isBigO_abs_right
@[simp]
theorem isLittleO_norm_right : (f =o[l] fun x => âg' xâ) â f =o[l] g' := by
simp only [IsLittleO_def]
exact forallâ_congr fun _ _ => isBigOWith_norm_right
@[simp]
theorem isLittleO_abs_right : (f =o[l] fun x => |u x|) â f =o[l] u :=
@isLittleO_norm_right _ _ â _ _ _ _ _
alias âšIsLittleO.of_norm_right, IsLittleO.norm_rightâ© := isLittleO_norm_right
alias âšIsLittleO.of_abs_right, IsLittleO.abs_rightâ© := isLittleO_abs_right
@[simp]
theorem isBigOWith_norm_left : IsBigOWith c l (fun x => âf' xâ) g â IsBigOWith c l f' g := by
simp only [IsBigOWith_def, norm_norm]
@[simp]
theorem isBigOWith_abs_left : IsBigOWith c l (fun x => |u x|) g â IsBigOWith c l u g :=
@isBigOWith_norm_left _ _ _ _ _ _ g u l
alias âšIsBigOWith.of_norm_left, IsBigOWith.norm_leftâ© := isBigOWith_norm_left
alias âšIsBigOWith.of_abs_left, IsBigOWith.abs_leftâ© := isBigOWith_abs_left
@[simp]
theorem isBigO_norm_left : (fun x => âf' xâ) =O[l] g â f' =O[l] g := by
simp only [IsBigO_def]
exact exists_congr fun _ => isBigOWith_norm_left
@[simp]
theorem isBigO_abs_left : (fun x => |u x|) =O[l] g â u =O[l] g :=
@isBigO_norm_left _ _ _ _ _ g u l
alias âšIsBigO.of_norm_left, IsBigO.norm_leftâ© := isBigO_norm_left
alias âšIsBigO.of_abs_left, IsBigO.abs_leftâ© := isBigO_abs_left
@[simp]
theorem isLittleO_norm_left : (fun x => âf' xâ) =o[l] g â f' =o[l] g := by
simp only [IsLittleO_def]
exact forallâ_congr fun _ _ => isBigOWith_norm_left
@[simp]
theorem isLittleO_abs_left : (fun x => |u x|) =o[l] g â u =o[l] g :=
@isLittleO_norm_left _ _ _ _ _ g u l
alias âšIsLittleO.of_norm_left, IsLittleO.norm_leftâ© := isLittleO_norm_left
alias âšIsLittleO.of_abs_left, IsLittleO.abs_leftâ© := isLittleO_abs_left
theorem isBigOWith_norm_norm :
(IsBigOWith c l (fun x => âf' xâ) fun x => âg' xâ) â IsBigOWith c l f' g' :=
isBigOWith_norm_left.trans isBigOWith_norm_right
theorem isBigOWith_abs_abs :
(IsBigOWith c l (fun x => |u x|) fun x => |v x|) â IsBigOWith c l u v :=
isBigOWith_abs_left.trans isBigOWith_abs_right
alias âšIsBigOWith.of_norm_norm, IsBigOWith.norm_normâ© := isBigOWith_norm_norm
alias âšIsBigOWith.of_abs_abs, IsBigOWith.abs_absâ© := isBigOWith_abs_abs
theorem isBigO_norm_norm : ((fun x => âf' xâ) =O[l] fun x => âg' xâ) â f' =O[l] g' :=
isBigO_norm_left.trans isBigO_norm_right
theorem isBigO_abs_abs : ((fun x => |u x|) =O[l] fun x => |v x|) â u =O[l] v :=
isBigO_abs_left.trans isBigO_abs_right
alias âšIsBigO.of_norm_norm, IsBigO.norm_normâ© := isBigO_norm_norm
alias âšIsBigO.of_abs_abs, IsBigO.abs_absâ© := isBigO_abs_abs
theorem isLittleO_norm_norm : ((fun x => âf' xâ) =o[l] fun x => âg' xâ) â f' =o[l] g' :=
isLittleO_norm_left.trans isLittleO_norm_right
theorem isLittleO_abs_abs : ((fun x => |u x|) =o[l] fun x => |v x|) â u =o[l] v :=
isLittleO_abs_left.trans isLittleO_abs_right
alias âšIsLittleO.of_norm_norm, IsLittleO.norm_normâ© := isLittleO_norm_norm
alias âšIsLittleO.of_abs_abs, IsLittleO.abs_absâ© := isLittleO_abs_abs
end NormAbs
/-! ### Simplification: negate -/
@[simp]
theorem isBigOWith_neg_right : (IsBigOWith c l f fun x => -g' x) â IsBigOWith c l f g' := by
simp only [IsBigOWith_def, norm_neg]
alias âšIsBigOWith.of_neg_right, IsBigOWith.neg_rightâ© := isBigOWith_neg_right
@[simp]
theorem isBigO_neg_right : (f =O[l] fun x => -g' x) â f =O[l] g' := by
simp only [IsBigO_def]
exact exists_congr fun _ => isBigOWith_neg_right
alias âšIsBigO.of_neg_right, IsBigO.neg_rightâ© := isBigO_neg_right
@[simp]
theorem isLittleO_neg_right : (f =o[l] fun x => -g' x) â f =o[l] g' := by
simp only [IsLittleO_def]
exact forallâ_congr fun _ _ => isBigOWith_neg_right
alias âšIsLittleO.of_neg_right, IsLittleO.neg_rightâ© := isLittleO_neg_right
@[simp]
theorem isBigOWith_neg_left : IsBigOWith c l (fun x => -f' x) g â IsBigOWith c l f' g := by
simp only [IsBigOWith_def, norm_neg]
alias âšIsBigOWith.of_neg_left, IsBigOWith.neg_leftâ© := isBigOWith_neg_left
@[simp]
theorem isBigO_neg_left : (fun x => -f' x) =O[l] g â f' =O[l] g := by
simp only [IsBigO_def]
exact exists_congr fun _ => isBigOWith_neg_left
alias âšIsBigO.of_neg_left, IsBigO.neg_leftâ© := isBigO_neg_left
@[simp]
theorem isLittleO_neg_left : (fun x => -f' x) =o[l] g â f' =o[l] g := by
simp only [IsLittleO_def]
exact forallâ_congr fun _ _ => isBigOWith_neg_left
alias âšIsLittleO.of_neg_left, IsLittleO.neg_leftâ© := isLittleO_neg_left
/-! ### Product of functions (right) -/
theorem isBigOWith_fst_prod : IsBigOWith 1 l f' fun x => (f' x, g' x) :=
isBigOWith_of_le l fun _x => le_max_left _ _
theorem isBigOWith_snd_prod : IsBigOWith 1 l g' fun x => (f' x, g' x) :=
isBigOWith_of_le l fun _x => le_max_right _ _
theorem isBigO_fst_prod : f' =O[l] fun x => (f' x, g' x) :=
isBigOWith_fst_prod.isBigO
theorem isBigO_snd_prod : g' =O[l] fun x => (f' x, g' x) :=
isBigOWith_snd_prod.isBigO
theorem isBigO_fst_prod' {f' : α â E' à F'} : (fun x => (f' x).1) =O[l] f' := by
simpa [IsBigO_def, IsBigOWith_def] using isBigO_fst_prod (E' := E') (F' := F')
theorem isBigO_snd_prod' {f' : α â E' à F'} : (fun x => (f' x).2) =O[l] f' := by
simpa [IsBigO_def, IsBigOWith_def] using isBigO_snd_prod (E' := E') (F' := F')
section
variable (f' k')
theorem IsBigOWith.prod_rightl (h : IsBigOWith c l f g') (hc : 0 †c) :
IsBigOWith c l f fun x => (g' x, k' x) :=
(h.trans isBigOWith_fst_prod hc).congr_const (mul_one c)
theorem IsBigO.prod_rightl (h : f =O[l] g') : f =O[l] fun x => (g' x, k' x) :=
let âš_c, cnonneg, hcâ© := h.exists_nonneg
(hc.prod_rightl k' cnonneg).isBigO
theorem IsLittleO.prod_rightl (h : f =o[l] g') : f =o[l] fun x => (g' x, k' x) :=
IsLittleO.of_isBigOWith fun _c cpos => (h.forall_isBigOWith cpos).prod_rightl k' cpos.le
theorem IsBigOWith.prod_rightr (h : IsBigOWith c l f g') (hc : 0 †c) :
IsBigOWith c l f fun x => (f' x, g' x) :=
(h.trans isBigOWith_snd_prod hc).congr_const (mul_one c)
theorem IsBigO.prod_rightr (h : f =O[l] g') : f =O[l] fun x => (f' x, g' x) :=
let âš_c, cnonneg, hcâ© := h.exists_nonneg
(hc.prod_rightr f' cnonneg).isBigO
theorem IsLittleO.prod_rightr (h : f =o[l] g') : f =o[l] fun x => (f' x, g' x) :=
IsLittleO.of_isBigOWith fun _c cpos => (h.forall_isBigOWith cpos).prod_rightr f' cpos.le
end
theorem IsBigOWith.prod_left_same (hf : IsBigOWith c l f' k') (hg : IsBigOWith c l g' k') :
IsBigOWith c l (fun x => (f' x, g' x)) k' := by
rw [isBigOWith_iff] at *; filter_upwards [hf, hg] with x using max_le
theorem IsBigOWith.prod_left (hf : IsBigOWith c l f' k') (hg : IsBigOWith c' l g' k') :
IsBigOWith (max c c') l (fun x => (f' x, g' x)) k' :=
(hf.weaken <| le_max_left c c').prod_left_same (hg.weaken <| le_max_right c c')
theorem IsBigOWith.prod_left_fst (h : IsBigOWith c l (fun x => (f' x, g' x)) k') :
IsBigOWith c l f' k' :=
(isBigOWith_fst_prod.trans h zero_le_one).congr_const <| one_mul c
theorem IsBigOWith.prod_left_snd (h : IsBigOWith c l (fun x => (f' x, g' x)) k') :
IsBigOWith c l g' k' :=
(isBigOWith_snd_prod.trans h zero_le_one).congr_const <| one_mul c
theorem isBigOWith_prod_left :
IsBigOWith c l (fun x => (f' x, g' x)) k' â IsBigOWith c l f' k' â§ IsBigOWith c l g' k' :=
âšfun h => âšh.prod_left_fst, h.prod_left_sndâ©, fun h => h.1.prod_left_same h.2â©
theorem IsBigO.prod_left (hf : f' =O[l] k') (hg : g' =O[l] k') : (fun x => (f' x, g' x)) =O[l] k' :=
let âš_c, hfâ© := hf.isBigOWith
let âš_c', hgâ© := hg.isBigOWith
(hf.prod_left hg).isBigO
theorem IsBigO.prod_left_fst : (fun x => (f' x, g' x)) =O[l] k' â f' =O[l] k' :=
IsBigO.trans isBigO_fst_prod
theorem IsBigO.prod_left_snd : (fun x => (f' x, g' x)) =O[l] k' â g' =O[l] k' :=
IsBigO.trans isBigO_snd_prod
@[simp]
theorem isBigO_prod_left : (fun x => (f' x, g' x)) =O[l] k' â f' =O[l] k' â§ g' =O[l] k' :=
âšfun h => âšh.prod_left_fst, h.prod_left_sndâ©, fun h => h.1.prod_left h.2â©
theorem IsLittleO.prod_left (hf : f' =o[l] k') (hg : g' =o[l] k') :
(fun x => (f' x, g' x)) =o[l] k' :=
IsLittleO.of_isBigOWith fun _c hc =>
(hf.forall_isBigOWith hc).prod_left_same (hg.forall_isBigOWith hc)
theorem IsLittleO.prod_left_fst : (fun x => (f' x, g' x)) =o[l] k' â f' =o[l] k' :=
IsBigO.trans_isLittleO isBigO_fst_prod
theorem IsLittleO.prod_left_snd : (fun x => (f' x, g' x)) =o[l] k' â g' =o[l] k' :=
IsBigO.trans_isLittleO isBigO_snd_prod
@[simp]
theorem isLittleO_prod_left : (fun x => (f' x, g' x)) =o[l] k' â f' =o[l] k' â§ g' =o[l] k' :=
âšfun h => âšh.prod_left_fst, h.prod_left_sndâ©, fun h => h.1.prod_left h.2â©
theorem IsBigOWith.eq_zero_imp (h : IsBigOWith c l f'' g'') : âá¶ x in l, g'' x = 0 â f'' x = 0 :=
Eventually.mono h.bound fun x hx hg => norm_le_zero_iff.1 <| by simpa [hg] using hx
theorem IsBigO.eq_zero_imp (h : f'' =O[l] g'') : âá¶ x in l, g'' x = 0 â f'' x = 0 :=
let âš_C, hCâ© := h.isBigOWith
hC.eq_zero_imp
/-! ### Addition and subtraction -/
section add_sub
variable {fâ fâ : α â E'} {gâ gâ : α â F'}
theorem IsBigOWith.add (hâ : IsBigOWith câ l fâ g) (hâ : IsBigOWith câ l fâ g) :
IsBigOWith (câ + câ) l (fun x => fâ x + fâ x) g := by
rw [IsBigOWith_def] at *
filter_upwards [hâ, hâ] with x hxâ hxâ using
calc
âfâ x + fâ xâ †câ * âg xâ + câ * âg xâ := norm_add_le_of_le hxâ hxâ
_ = (câ + câ) * âg xâ := (add_mul _ _ _).symm
theorem IsBigO.add (hâ : fâ =O[l] g) (hâ : fâ =O[l] g) : (fun x => fâ x + fâ x) =O[l] g :=
let âš_câ, hcââ© := hâ.isBigOWith
let âš_câ, hcââ© := hâ.isBigOWith
(hcâ.add hcâ).isBigO
theorem IsLittleO.add (hâ : fâ =o[l] g) (hâ : fâ =o[l] g) : (fun x => fâ x + fâ x) =o[l] g :=
IsLittleO.of_isBigOWith fun c cpos =>
((hâ.forall_isBigOWith <| half_pos cpos).add (hâ.forall_isBigOWith <|
half_pos cpos)).congr_const (add_halves c)
theorem IsLittleO.add_add (hâ : fâ =o[l] gâ) (hâ : fâ =o[l] gâ) :
(fun x => fâ x + fâ x) =o[l] fun x => âgâ xâ + âgâ xâ := by
refine (hâ.trans_le fun x => ?_).add (hâ.trans_le ?_) <;> simp [abs_of_nonneg, add_nonneg]
theorem IsBigO.add_isLittleO (hâ : fâ =O[l] g) (hâ : fâ =o[l] g) : (fun x => fâ x + fâ x) =O[l] g :=
hâ.add hâ.isBigO
theorem IsLittleO.add_isBigO (hâ : fâ =o[l] g) (hâ : fâ =O[l] g) : (fun x => fâ x + fâ x) =O[l] g :=
hâ.isBigO.add hâ
theorem IsBigOWith.add_isLittleO (hâ : IsBigOWith câ l fâ g) (hâ : fâ =o[l] g) (hc : câ < câ) :
IsBigOWith câ l (fun x => fâ x + fâ x) g :=
(hâ.add (hâ.forall_isBigOWith (sub_pos.2 hc))).congr_const (add_sub_cancel _ _)
theorem IsLittleO.add_isBigOWith (hâ : fâ =o[l] g) (hâ : IsBigOWith câ l fâ g) (hc : câ < câ) :
IsBigOWith câ l (fun x => fâ x + fâ x) g :=
(hâ.add_isLittleO hâ hc).congr_left fun _ => add_comm _ _
theorem IsBigOWith.sub (hâ : IsBigOWith câ l fâ g) (hâ : IsBigOWith câ l fâ g) :
IsBigOWith (câ + câ) l (fun x => fâ x - fâ x) g := by
simpa only [sub_eq_add_neg] using hâ.add hâ.neg_left
theorem IsBigOWith.sub_isLittleO (hâ : IsBigOWith câ l fâ g) (hâ : fâ =o[l] g) (hc : câ < câ) :
IsBigOWith câ l (fun x => fâ x - fâ x) g := by
simpa only [sub_eq_add_neg] using hâ.add_isLittleO hâ.neg_left hc
theorem IsBigO.sub (hâ : fâ =O[l] g) (hâ : fâ =O[l] g) : (fun x => fâ x - fâ x) =O[l] g := by
simpa only [sub_eq_add_neg] using hâ.add hâ.neg_left
theorem IsLittleO.sub (hâ : fâ =o[l] g) (hâ : fâ =o[l] g) : (fun x => fâ x - fâ x) =o[l] g := by
simpa only [sub_eq_add_neg] using hâ.add hâ.neg_left
end add_sub
/-!
### Lemmas about `IsBigO (fâ - fâ) g l` / `IsLittleO (fâ - fâ) g l` treated as a binary relation
-/
section IsBigOOAsRel
variable {fâ fâ fâ : α â E'}
theorem IsBigOWith.symm (h : IsBigOWith c l (fun x => fâ x - fâ x) g) :
IsBigOWith c l (fun x => fâ x - fâ x) g :=
h.neg_left.congr_left fun _x => neg_sub _ _
theorem isBigOWith_comm :
IsBigOWith c l (fun x => fâ x - fâ x) g â IsBigOWith c l (fun x => fâ x - fâ x) g :=
âšIsBigOWith.symm, IsBigOWith.symmâ©
theorem IsBigO.symm (h : (fun x => fâ x - fâ x) =O[l] g) : (fun x => fâ x - fâ x) =O[l] g :=
h.neg_left.congr_left fun _x => neg_sub _ _
theorem isBigO_comm : (fun x => fâ x - fâ x) =O[l] g â (fun x => fâ x - fâ x) =O[l] g :=
âšIsBigO.symm, IsBigO.symmâ©
theorem IsLittleO.symm (h : (fun x => fâ x - fâ x) =o[l] g) : (fun x => fâ x - fâ x) =o[l] g := by
simpa only [neg_sub] using h.neg_left
theorem isLittleO_comm : (fun x => fâ x - fâ x) =o[l] g â (fun x => fâ x - fâ x) =o[l] g :=
âšIsLittleO.symm, IsLittleO.symmâ©
theorem IsBigOWith.triangle (hâ : IsBigOWith c l (fun x => fâ x - fâ x) g)
(hâ : IsBigOWith c' l (fun x => fâ x - fâ x) g) :
IsBigOWith (c + c') l (fun x => fâ x - fâ x) g :=
(hâ.add hâ).congr_left fun _x => sub_add_sub_cancel _ _ _
theorem IsBigO.triangle (hâ : (fun x => fâ x - fâ x) =O[l] g)
(hâ : (fun x => fâ x - fâ x) =O[l] g) : (fun x => fâ x - fâ x) =O[l] g :=
(hâ.add hâ).congr_left fun _x => sub_add_sub_cancel _ _ _
theorem IsLittleO.triangle (hâ : (fun x => fâ x - fâ x) =o[l] g)
(hâ : (fun x => fâ x - fâ x) =o[l] g) : (fun x => fâ x - fâ x) =o[l] g :=
(hâ.add hâ).congr_left fun _x => sub_add_sub_cancel _ _ _
theorem IsBigO.congr_of_sub (h : (fun x => fâ x - fâ x) =O[l] g) : fâ =O[l] g â fâ =O[l] g :=
âšfun h' => (h'.sub h).congr_left fun _x => sub_sub_cancel _ _, fun h' =>
(h.add h').congr_left fun _x => sub_add_cancel _ _â©
theorem IsLittleO.congr_of_sub (h : (fun x => fâ x - fâ x) =o[l] g) : fâ =o[l] g â fâ =o[l] g :=
âšfun h' => (h'.sub h).congr_left fun _x => sub_sub_cancel _ _, fun h' =>
(h.add h').congr_left fun _x => sub_add_cancel _ _â©
end IsBigOOAsRel
/-! ### Zero, one, and other constants -/
section ZeroConst
variable (g g' l)
theorem isLittleO_zero : (fun _x => (0 : E')) =o[l] g' :=
IsLittleO.of_bound fun c hc =>
univ_mem' fun x => by simpa using mul_nonneg hc.le (norm_nonneg <| g' x)
theorem isBigOWith_zero (hc : 0 †c) : IsBigOWith c l (fun _x => (0 : E')) g' :=
IsBigOWith.of_bound <| univ_mem' fun x => by simpa using mul_nonneg hc (norm_nonneg <| g' x)
theorem isBigOWith_zero' : IsBigOWith 0 l (fun _x => (0 : E')) g :=
IsBigOWith.of_bound <| univ_mem' fun x => by simp
theorem isBigO_zero : (fun _x => (0 : E')) =O[l] g :=
isBigO_iff_isBigOWith.2 âš0, isBigOWith_zero' _ _â©
theorem isBigO_refl_left : (fun x => f' x - f' x) =O[l] g' :=
(isBigO_zero g' l).congr_left fun _x => (sub_self _).symm
theorem isLittleO_refl_left : (fun x => f' x - f' x) =o[l] g' :=
(isLittleO_zero g' l).congr_left fun _x => (sub_self _).symm
variable {g g' l}
@[simp]
theorem isBigOWith_zero_right_iff : (IsBigOWith c l f'' fun _x => (0 : F')) â f'' =á¶ [l] 0 := by
simp only [IsBigOWith_def, exists_prop, true_and_iff, norm_zero, mul_zero,
norm_le_zero_iff, EventuallyEq, Pi.zero_apply]
@[simp]
theorem isBigO_zero_right_iff : (f'' =O[l] fun _x => (0 : F')) â f'' =á¶ [l] 0 :=
âšfun h =>
let âš_c, hcâ© := h.isBigOWith
isBigOWith_zero_right_iff.1 hc,
fun h => (isBigOWith_zero_right_iff.2 h : IsBigOWith 1 _ _ _).isBigOâ©
@[simp]
theorem isLittleO_zero_right_iff : (f'' =o[l] fun _x => (0 : F')) â f'' =á¶ [l] 0 :=
âšfun h => isBigO_zero_right_iff.1 h.isBigO,
fun h => IsLittleO.of_isBigOWith fun _c _hc => isBigOWith_zero_right_iff.2 hâ©
theorem isBigOWith_const_const (c : E) {c' : F''} (hc' : c' â 0) (l : Filter α) :
IsBigOWith (âcâ / âc'â) l (fun _x : α => c) fun _x => c' := by
simp only [IsBigOWith_def]
apply univ_mem'
intro x
rw [mem_setOf, div_mul_cancelâ _ (norm_ne_zero_iff.mpr hc')]
theorem isBigO_const_const (c : E) {c' : F''} (hc' : c' â 0) (l : Filter α) :
(fun _x : α => c) =O[l] fun _x => c' :=
(isBigOWith_const_const c hc' l).isBigO
@[simp]
theorem isBigO_const_const_iff {c : E''} {c' : F''} (l : Filter α) [l.NeBot] :
((fun _x : α => c) =O[l] fun _x => c') â c' = 0 â c = 0 := by
rcases eq_or_ne c' 0 with (rfl | hc')
· simp [EventuallyEq]
· simp [hc', isBigO_const_const _ hc']
@[simp]
theorem isBigO_pure {x} : f'' =O[pure x] g'' â g'' x = 0 â f'' x = 0 :=
calc
f'' =O[pure x] g'' â (fun _y : α => f'' x) =O[pure x] fun _ => g'' x := isBigO_congr rfl rfl
_ â g'' x = 0 â f'' x = 0 := isBigO_const_const_iff _
end ZeroConst
@[simp]
theorem isBigOWith_principal {s : Set α} : IsBigOWith c (ð s) f g â â x â s, âf xâ †c * âg xâ := by
rw [IsBigOWith_def, eventually_principal]
theorem isBigO_principal {s : Set α} : f =O[ð s] g â â c, â x â s, âf xâ †c * âg xâ := by
simp_rw [isBigO_iff, eventually_principal]
@[simp]
theorem isLittleO_principal {s : Set α} : f'' =o[ð s] g' â â x â s, f'' x = 0 := by
refine âšfun h x hx ⊠norm_le_zero_iff.1 ?_, fun h ⊠?_â©
· simp only [isLittleO_iff, isBigOWith_principal] at h
have : Tendsto (fun c : â => c * âg' xâ) (ð[>] 0) (ð 0) :=
((continuous_id.mul continuous_const).tendsto' _ _ (zero_mul _)).mono_left
inf_le_left
apply le_of_tendsto_of_tendsto tendsto_const_nhds this
apply eventually_nhdsWithin_iff.2 (eventually_of_forall (fun c hc ⊠?_))
exact eventually_principal.1 (h hc) x hx
· apply (isLittleO_zero g' _).congr' ?_ EventuallyEq.rfl
exact fun x hx ⊠(h x hx).symm
@[simp]
theorem isBigOWith_top : IsBigOWith c †f g â â x, âf xâ †c * âg xâ := by
rw [IsBigOWith_def, eventually_top]
@[simp]
theorem isBigO_top : f =O[â€] g â â C, â x, âf xâ †C * âg xâ := by
simp_rw [isBigO_iff, eventually_top]
@[simp]
theorem isLittleO_top : f'' =o[â€] g' â â x, f'' x = 0 := by
simp only [â principal_univ, isLittleO_principal, mem_univ, forall_true_left]
section
variable (F)
variable [One F] [NormOneClass F]
theorem isBigOWith_const_one (c : E) (l : Filter α) :
IsBigOWith âcâ l (fun _x : α => c) fun _x => (1 : F) := by simp [isBigOWith_iff]
theorem isBigO_const_one (c : E) (l : Filter α) : (fun _x : α => c) =O[l] fun _x => (1 : F) :=
(isBigOWith_const_one F c l).isBigO
theorem isLittleO_const_iff_isLittleO_one {c : F''} (hc : c â 0) :
(f =o[l] fun _x => c) â f =o[l] fun _x => (1 : F) :=
âšfun h => h.trans_isBigOWith (isBigOWith_const_one _ _ _) (norm_pos_iff.2 hc),
fun h => h.trans_isBigO <| isBigO_const_const _ hc _â©
@[simp]
theorem isLittleO_one_iff : f' =o[l] (fun _x => 1 : α â F) â Tendsto f' l (ð 0) := by
simp only [isLittleO_iff, norm_one, mul_one, Metric.nhds_basis_closedBall.tendsto_right_iff,
Metric.mem_closedBall, dist_zero_right]
@[simp]
theorem isBigO_one_iff : f =O[l] (fun _x => 1 : α â F) â
IsBoundedUnder (· †·) l fun x => âf xâ := by
simp only [isBigO_iff, norm_one, mul_one, IsBoundedUnder, IsBounded, eventually_map]
alias âš_, _root_.Filter.IsBoundedUnder.isBigO_oneâ© := isBigO_one_iff
@[simp]
theorem isLittleO_one_left_iff : (fun _x => 1 : α â F) =o[l] f â Tendsto (fun x => âf xâ) l atTop :=
calc
(fun _x => 1 : α â F) =o[l] f â â n : â, âá¶ x in l, ân * â(1 : F)â †âf xâ :=
isLittleO_iff_nat_mul_le_aux <| Or.inl fun _x => by simp only [norm_one, zero_le_one]
_ â â n : â, True â âá¶ x in l, âf xâ â Ici (n : â) := by
simp only [norm_one, mul_one, true_imp_iff, mem_Ici]
_ â Tendsto (fun x => âf xâ) l atTop :=
atTop_hasCountableBasis_of_archimedean.1.tendsto_right_iff.symm
theorem _root_.Filter.Tendsto.isBigO_one {c : E'} (h : Tendsto f' l (ð c)) :
f' =O[l] (fun _x => 1 : α â F) :=
h.norm.isBoundedUnder_le.isBigO_one F
theorem IsBigO.trans_tendsto_nhds (hfg : f =O[l] g') {y : F'} (hg : Tendsto g' l (ð y)) :
f =O[l] (fun _x => 1 : α â F) :=
hfg.trans <| hg.isBigO_one F
/-- The condition `f = O[ð[â ] a] 1` is equivalent to `f = O[ð a] 1`. -/
lemma isBigO_one_nhds_ne_iff [TopologicalSpace α] {a : α} :
f =O[ð[â ] a] (fun _ ⊠1 : α â F) â f =O[ð a] (fun _ ⊠1 : α â F) := by
refine âšfun h ⊠?_, fun h ⊠h.mono nhdsWithin_le_nhdsâ©
simp only [isBigO_one_iff, IsBoundedUnder, IsBounded, eventually_map] at h â¢
obtain âšc, hcâ© := h
use max c âf aâ
filter_upwards [eventually_nhdsWithin_iff.mp hc] with b hb
rcases eq_or_ne b a with rfl | hb'
· apply le_max_right
· exact (hb hb').trans (le_max_left ..)
end
theorem isLittleO_const_iff {c : F''} (hc : c â 0) :
(f'' =o[l] fun _x => c) â Tendsto f'' l (ð 0) :=
(isLittleO_const_iff_isLittleO_one â hc).trans (isLittleO_one_iff _)
theorem isLittleO_id_const {c : F''} (hc : c â 0) : (fun x : E'' => x) =o[ð 0] fun _x => c :=
(isLittleO_const_iff hc).mpr (continuous_id.tendsto 0)
theorem _root_.Filter.IsBoundedUnder.isBigO_const (h : IsBoundedUnder (· †·) l (norm â f))
{c : F''} (hc : c â 0) : f =O[l] fun _x => c :=
(h.isBigO_one â).trans (isBigO_const_const _ hc _)
theorem isBigO_const_of_tendsto {y : E''} (h : Tendsto f'' l (ð y)) {c : F''} (hc : c â 0) :
f'' =O[l] fun _x => c :=
h.norm.isBoundedUnder_le.isBigO_const hc
theorem IsBigO.isBoundedUnder_le {c : F} (h : f =O[l] fun _x => c) :
IsBoundedUnder (· †·) l (norm â f) :=
let âšc', hc'â© := h.bound
âšc' * âcâ, eventually_map.2 hc'â©
theorem isBigO_const_of_ne {c : F''} (hc : c â 0) :
(f =O[l] fun _x => c) â IsBoundedUnder (· †·) l (norm â f) :=
âšfun h => h.isBoundedUnder_le, fun h => h.isBigO_const hcâ©
theorem isBigO_const_iff {c : F''} : (f'' =O[l] fun _x => c) â
(c = 0 â f'' =á¶ [l] 0) â§ IsBoundedUnder (· †·) l fun x => âf'' xâ := by
refine âšfun h => âšfun hc => isBigO_zero_right_iff.1 (by rwa [â hc]), h.isBoundedUnder_leâ©, ?_â©
rintro âšhcf, hfâ©
rcases eq_or_ne c 0 with (hc | hc)
exacts [(hcf hc).trans_isBigO (isBigO_zero _ _), hf.isBigO_const hc]
theorem isBigO_iff_isBoundedUnder_le_div (h : âá¶ x in l, g'' x â 0) :
f =O[l] g'' â IsBoundedUnder (· †·) l fun x => âf xâ / âg'' xâ := by
simp only [isBigO_iff, IsBoundedUnder, IsBounded, eventually_map]
exact
exists_congr fun c =>
eventually_congr <| h.mono fun x hx => (div_le_iff <| norm_pos_iff.2 hx).symm
/-- `(fun x ⊠c) =O[l] f` if and only if `f` is bounded away from zero. -/
theorem isBigO_const_left_iff_pos_le_norm {c : E''} (hc : c â 0) :
(fun _x => c) =O[l] f' â â b, 0 < b â§ âá¶ x in l, b †âf' xâ := by
constructor
· intro h
rcases h.exists_pos with âšC, hCâ, hCâ©
refine âšâcâ / C, div_pos (norm_pos_iff.2 hc) hCâ, ?_â©
exact hC.bound.mono fun x => (div_le_iff' hCâ).2
· rintro âšb, hbâ, hbâ©
refine IsBigO.of_bound (âcâ / b) (hb.mono fun x hx => ?_)
rw [div_mul_eq_mul_div, mul_div_assoc]
exact le_mul_of_one_le_right (norm_nonneg _) ((one_le_div hbâ).2 hx)
theorem IsBigO.trans_tendsto (hfg : f'' =O[l] g'') (hg : Tendsto g'' l (ð 0)) :
Tendsto f'' l (ð 0) :=
(isLittleO_one_iff â).1 <| hfg.trans_isLittleO <| (isLittleO_one_iff â).2 hg
theorem IsLittleO.trans_tendsto (hfg : f'' =o[l] g'') (hg : Tendsto g'' l (ð 0)) :
Tendsto f'' l (ð 0) :=
hfg.isBigO.trans_tendsto hg
/-! ### Multiplication by a constant -/
theorem isBigOWith_const_mul_self (c : R) (f : α â R) (l : Filter α) :
IsBigOWith âcâ l (fun x => c * f x) f :=
isBigOWith_of_le' _ fun _x => norm_mul_le _ _
theorem isBigO_const_mul_self (c : R) (f : α â R) (l : Filter α) : (fun x => c * f x) =O[l] f :=
(isBigOWith_const_mul_self c f l).isBigO
theorem IsBigOWith.const_mul_left {f : α â R} (h : IsBigOWith c l f g) (c' : R) :
IsBigOWith (âc'â * c) l (fun x => c' * f x) g :=
(isBigOWith_const_mul_self c' f l).trans h (norm_nonneg c')
theorem IsBigO.const_mul_left {f : α â R} (h : f =O[l] g) (c' : R) : (fun x => c' * f x) =O[l] g :=
let âš_c, hcâ© := h.isBigOWith
(hc.const_mul_left c').isBigO
theorem isBigOWith_self_const_mul' (u : RË£) (f : α â R) (l : Filter α) :
IsBigOWith â(âuâ»Â¹ : R)â l f fun x => âu * f x :=
(isBigOWith_const_mul_self âuâ»Â¹ (fun x ⊠âu * f x) l).congr_left
fun x ⊠u.inv_mul_cancel_left (f x)
theorem isBigOWith_self_const_mul (c : ð) (hc : c â 0) (f : α â ð) (l : Filter α) :
IsBigOWith âcââ»Â¹ l f fun x => c * f x :=
(isBigOWith_self_const_mul' (Units.mk0 c hc) f l).congr_const <| norm_inv c
theorem isBigO_self_const_mul' {c : R} (hc : IsUnit c) (f : α â R) (l : Filter α) :
f =O[l] fun x => c * f x :=
let âšu, huâ© := hc
hu âž (isBigOWith_self_const_mul' u f l).isBigO
theorem isBigO_self_const_mul (c : ð) (hc : c â 0) (f : α â ð) (l : Filter α) :
f =O[l] fun x => c * f x :=
isBigO_self_const_mul' (IsUnit.mk0 c hc) f l
theorem isBigO_const_mul_left_iff' {f : α â R} {c : R} (hc : IsUnit c) :
(fun x => c * f x) =O[l] g â f =O[l] g :=
âš(isBigO_self_const_mul' hc f l).trans, fun h => h.const_mul_left câ©
theorem isBigO_const_mul_left_iff {f : α â ð} {c : ð} (hc : c â 0) :
(fun x => c * f x) =O[l] g â f =O[l] g :=
isBigO_const_mul_left_iff' <| IsUnit.mk0 c hc
theorem IsLittleO.const_mul_left {f : α â R} (h : f =o[l] g) (c : R) : (fun x => c * f x) =o[l] g :=
(isBigO_const_mul_self c f l).trans_isLittleO h
theorem isLittleO_const_mul_left_iff' {f : α â R} {c : R} (hc : IsUnit c) :
(fun x => c * f x) =o[l] g â f =o[l] g :=
âš(isBigO_self_const_mul' hc f l).trans_isLittleO, fun h => h.const_mul_left câ©
theorem isLittleO_const_mul_left_iff {f : α â ð} {c : ð} (hc : c â 0) :
(fun x => c * f x) =o[l] g â f =o[l] g :=
isLittleO_const_mul_left_iff' <| IsUnit.mk0 c hc
theorem IsBigOWith.of_const_mul_right {g : α â R} {c : R} (hc' : 0 †c')
(h : IsBigOWith c' l f fun x => c * g x) : IsBigOWith (c' * âcâ) l f g :=
h.trans (isBigOWith_const_mul_self c g l) hc'
theorem IsBigO.of_const_mul_right {g : α â R} {c : R} (h : f =O[l] fun x => c * g x) : f =O[l] g :=
let âš_c, cnonneg, hcâ© := h.exists_nonneg
(hc.of_const_mul_right cnonneg).isBigO
theorem IsBigOWith.const_mul_right' {g : α â R} {u : RË£} {c' : â} (hc' : 0 †c')
(h : IsBigOWith c' l f g) : IsBigOWith (c' * â(âuâ»Â¹ : R)â) l f fun x => âu * g x :=
h.trans (isBigOWith_self_const_mul' _ _ _) hc'
theorem IsBigOWith.const_mul_right {g : α â ð} {c : ð} (hc : c â 0) {c' : â} (hc' : 0 †c')
(h : IsBigOWith c' l f g) : IsBigOWith (c' * âcââ»Â¹) l f fun x => c * g x :=
h.trans (isBigOWith_self_const_mul c hc g l) hc'
theorem IsBigO.const_mul_right' {g : α â R} {c : R} (hc : IsUnit c) (h : f =O[l] g) :
f =O[l] fun x => c * g x :=
h.trans (isBigO_self_const_mul' hc g l)
theorem IsBigO.const_mul_right {g : α â ð} {c : ð} (hc : c â 0) (h : f =O[l] g) :
f =O[l] fun x => c * g x :=
h.const_mul_right' <| IsUnit.mk0 c hc
theorem isBigO_const_mul_right_iff' {g : α â R} {c : R} (hc : IsUnit c) :
(f =O[l] fun x => c * g x) â f =O[l] g :=
âšfun h => h.of_const_mul_right, fun h => h.const_mul_right' hcâ©
theorem isBigO_const_mul_right_iff {g : α â ð} {c : ð} (hc : c â 0) :
(f =O[l] fun x => c * g x) â f =O[l] g :=
isBigO_const_mul_right_iff' <| IsUnit.mk0 c hc
theorem IsLittleO.of_const_mul_right {g : α â R} {c : R} (h : f =o[l] fun x => c * g x) :
f =o[l] g :=
h.trans_isBigO (isBigO_const_mul_self c g l)
theorem IsLittleO.const_mul_right' {g : α â R} {c : R} (hc : IsUnit c) (h : f =o[l] g) :
f =o[l] fun x => c * g x :=
h.trans_isBigO (isBigO_self_const_mul' hc g l)
theorem IsLittleO.const_mul_right {g : α â ð} {c : ð} (hc : c â 0) (h : f =o[l] g) :
f =o[l] fun x => c * g x :=
h.const_mul_right' <| IsUnit.mk0 c hc
theorem isLittleO_const_mul_right_iff' {g : α â R} {c : R} (hc : IsUnit c) :
(f =o[l] fun x => c * g x) â f =o[l] g :=
âšfun h => h.of_const_mul_right, fun h => h.const_mul_right' hcâ©
theorem isLittleO_const_mul_right_iff {g : α â ð} {c : ð} (hc : c â 0) :
(f =o[l] fun x => c * g x) â f =o[l] g :=
isLittleO_const_mul_right_iff' <| IsUnit.mk0 c hc
/-! ### Multiplication -/
theorem IsBigOWith.mul {fâ fâ : α â R} {gâ gâ : α â ð} {câ câ : â} (hâ : IsBigOWith câ l fâ gâ)
(hâ : IsBigOWith câ l fâ gâ) :
IsBigOWith (câ * câ) l (fun x => fâ x * fâ x) fun x => gâ x * gâ x := by
simp only [IsBigOWith_def] at *
filter_upwards [hâ, hâ] with _ hxâ hxâ
apply le_trans (norm_mul_le _ _)
convert mul_le_mul hxâ hxâ (norm_nonneg _) (le_trans (norm_nonneg _) hxâ) using 1
rw [norm_mul, mul_mul_mul_comm]
theorem IsBigO.mul {fâ fâ : α â R} {gâ gâ : α â ð} (hâ : fâ =O[l] gâ) (hâ : fâ =O[l] gâ) :
(fun x => fâ x * fâ x) =O[l] fun x => gâ x * gâ x :=
let âš_c, hcâ© := hâ.isBigOWith
let âš_c', hc'â© := hâ.isBigOWith
(hc.mul hc').isBigO
theorem IsBigO.mul_isLittleO {fâ fâ : α â R} {gâ gâ : α â ð} (hâ : fâ =O[l] gâ) (hâ : fâ =o[l] gâ) :
(fun x => fâ x * fâ x) =o[l] fun x => gâ x * gâ x := by
simp only [IsLittleO_def] at *
intro c cpos
rcases hâ.exists_pos with âšc', c'pos, hc'â©
exact (hc'.mul (hâ (div_pos cpos c'pos))).congr_const (mul_div_cancelâ _ (ne_of_gt c'pos))
theorem IsLittleO.mul_isBigO {fâ fâ : α â R} {gâ gâ : α â ð} (hâ : fâ =o[l] gâ) (hâ : fâ =O[l] gâ) :
(fun x => fâ x * fâ x) =o[l] fun x => gâ x * gâ x := by
simp only [IsLittleO_def] at *
intro c cpos
rcases hâ.exists_pos with âšc', c'pos, hc'â©
exact ((hâ (div_pos cpos c'pos)).mul hc').congr_const (div_mul_cancelâ _ (ne_of_gt c'pos))
theorem IsLittleO.mul {fâ fâ : α â R} {gâ gâ : α â ð} (hâ : fâ =o[l] gâ) (hâ : fâ =o[l] gâ) :
(fun x => fâ x * fâ x) =o[l] fun x => gâ x * gâ x :=
hâ.mul_isBigO hâ.isBigO
theorem IsBigOWith.pow' {f : α â R} {g : α â ð} (h : IsBigOWith c l f g) :
â n : â, IsBigOWith (Nat.casesOn n â(1 : R)â fun n => c ^ (n + 1))
l (fun x => f x ^ n) fun x => g x ^ n
| 0 => by simpa using isBigOWith_const_const (1 : R) (one_ne_zero' ð) l
| 1 => by simpa
| n + 2 => by simpa [pow_succ] using (IsBigOWith.pow' h (n + 1)).mul h
theorem IsBigOWith.pow [NormOneClass R] {f : α â R} {g : α â ð} (h : IsBigOWith c l f g) :
â n : â, IsBigOWith (c ^ n) l (fun x => f x ^ n) fun x => g x ^ n
| 0 => by simpa using h.pow' 0
| n + 1 => h.pow' (n + 1)
theorem IsBigOWith.of_pow {n : â} {f : α â ð} {g : α â R} (h : IsBigOWith c l (f ^ n) (g ^ n))
(hn : n â 0) (hc : c †c' ^ n) (hc' : 0 †c') : IsBigOWith c' l f g :=
IsBigOWith.of_bound <| (h.weaken hc).bound.mono fun x hx âŠ
le_of_pow_le_pow_left hn (by positivity) <|
calc
âf xâ ^ n = âf x ^ nâ := (norm_pow _ _).symm
_ †c' ^ n * âg x ^ nâ := hx
_ †c' ^ n * âg xâ ^ n := by gcongr; exact norm_pow_le' _ hn.bot_lt
_ = (c' * âg xâ) ^ n := (mul_pow _ _ _).symm
theorem IsBigO.pow {f : α â R} {g : α â ð} (h : f =O[l] g) (n : â) :
(fun x => f x ^ n) =O[l] fun x => g x ^ n :=
let âš_C, hCâ© := h.isBigOWith
isBigO_iff_isBigOWith.2 âš_, hC.pow' nâ©
theorem IsBigO.of_pow {f : α â ð} {g : α â R} {n : â} (hn : n â 0) (h : (f ^ n) =O[l] (g ^ n)) :
f =O[l] g := by
rcases h.exists_pos with âšC, _hCâ, hCâ©
obtain âšc : â, hcâ : 0 †c, hc : C †c ^ nâ© :=
((eventually_ge_atTop _).and <| (tendsto_pow_atTop hn).eventually_ge_atTop C).exists
exact (hC.of_pow hn hc hcâ).isBigO
theorem IsLittleO.pow {f : α â R} {g : α â ð} (h : f =o[l] g) {n : â} (hn : 0 < n) :
(fun x => f x ^ n) =o[l] fun x => g x ^ n := by
obtain âšn, rflâ© := Nat.exists_eq_succ_of_ne_zero hn.ne'; clear hn
induction' n with n ihn
· simpa only [Nat.zero_eq, â Nat.one_eq_succ_zero, pow_one]
· convert ihn.mul h <;> simp [pow_succ]
theorem IsLittleO.of_pow {f : α â ð} {g : α â R} {n : â} (h : (f ^ n) =o[l] (g ^ n)) (hn : n â 0) :
f =o[l] g :=
IsLittleO.of_isBigOWith fun _c hc => (h.def' <| pow_pos hc _).of_pow hn le_rfl hc.le
/-! ### Inverse -/
theorem IsBigOWith.inv_rev {f : α â ð} {g : α â ð'} (h : IsBigOWith c l f g)
(hâ : âá¶ x in l, f x = 0 â g x = 0) : IsBigOWith c l (fun x => (g x)â»Â¹) fun x => (f x)â»Â¹ := by
refine IsBigOWith.of_bound (h.bound.mp (hâ.mono fun x hâ hle => ?_))
rcases eq_or_ne (f x) 0 with hx | hx
· simp only [hx, hâ hx, inv_zero, norm_zero, mul_zero, le_rfl]
· have hc : 0 < c := pos_of_mul_pos_left ((norm_pos_iff.2 hx).trans_le hle) (norm_nonneg _)
replace hle := inv_le_inv_of_le (norm_pos_iff.2 hx) hle
simpa only [norm_inv, mul_inv, â div_eq_inv_mul, div_le_iff hc] using hle
theorem IsBigO.inv_rev {f : α â ð} {g : α â ð'} (h : f =O[l] g)
(hâ : âá¶ x in l, f x = 0 â g x = 0) : (fun x => (g x)â»Â¹) =O[l] fun x => (f x)â»Â¹ :=
let âš_c, hcâ© := h.isBigOWith
(hc.inv_rev hâ).isBigO
theorem IsLittleO.inv_rev {f : α â ð} {g : α â ð'} (h : f =o[l] g)
(hâ : âá¶ x in l, f x = 0 â g x = 0) : (fun x => (g x)â»Â¹) =o[l] fun x => (f x)â»Â¹ :=
IsLittleO.of_isBigOWith fun _c hc => (h.def' hc).inv_rev hâ
/-! ### Scalar multiplication -/
section SMulConst
variable [Module R E'] [BoundedSMul R E']
theorem IsBigOWith.const_smul_self (c' : R) :
IsBigOWith (âc'â) l (fun x => c' ⢠f' x) f' :=
isBigOWith_of_le' _ fun _ => norm_smul_le _ _
theorem IsBigO.const_smul_self (c' : R) : (fun x => c' ⢠f' x) =O[l] f' :=
(IsBigOWith.const_smul_self _).isBigO
theorem IsBigOWith.const_smul_left (h : IsBigOWith c l f' g) (c' : R) :
IsBigOWith (âc'â * c) l (fun x => c' ⢠f' x) g :=
.trans (.const_smul_self _) h (norm_nonneg _)
theorem IsBigO.const_smul_left (h : f' =O[l] g) (c : R) : (c ⢠f') =O[l] g :=
let âš_b, hbâ© := h.isBigOWith
(hb.const_smul_left _).isBigO
theorem IsLittleO.const_smul_left (h : f' =o[l] g) (c : R) : (c ⢠f') =o[l] g :=
(IsBigO.const_smul_self _).trans_isLittleO h
variable [Module ð E'] [BoundedSMul ð E']
theorem isBigO_const_smul_left {c : ð} (hc : c â 0) : (fun x => c ⢠f' x) =O[l] g â f' =O[l] g := by
have cne0 : âcâ â 0 := norm_ne_zero_iff.mpr hc
rw [â isBigO_norm_left]
simp only [norm_smul]
rw [isBigO_const_mul_left_iff cne0, isBigO_norm_left]
theorem isLittleO_const_smul_left {c : ð} (hc : c â 0) :
(fun x => c ⢠f' x) =o[l] g â f' =o[l] g := by
have cne0 : âcâ â 0 := norm_ne_zero_iff.mpr hc
rw [â isLittleO_norm_left]
simp only [norm_smul]
rw [isLittleO_const_mul_left_iff cne0, isLittleO_norm_left]
theorem isBigO_const_smul_right {c : ð} (hc : c â 0) :
(f =O[l] fun x => c ⢠f' x) â f =O[l] f' := by
have cne0 : âcâ â 0 := norm_ne_zero_iff.mpr hc
rw [â isBigO_norm_right]
simp only [norm_smul]
rw [isBigO_const_mul_right_iff cne0, isBigO_norm_right]
theorem isLittleO_const_smul_right {c : ð} (hc : c â 0) :
(f =o[l] fun x => c ⢠f' x) â f =o[l] f' := by
have cne0 : âcâ â 0 := norm_ne_zero_iff.mpr hc
rw [â isLittleO_norm_right]
simp only [norm_smul]
rw [isLittleO_const_mul_right_iff cne0, isLittleO_norm_right]
end SMulConst
section SMul
variable [Module R E'] [BoundedSMul R E'] [Module ð' F'] [BoundedSMul ð' F']
variable {kâ : α â R} {kâ : α â ð'}
theorem IsBigOWith.smul (hâ : IsBigOWith c l kâ kâ) (hâ : IsBigOWith c' l f' g') :
IsBigOWith (c * c') l (fun x => kâ x ⢠f' x) fun x => kâ x ⢠g' x := by
simp only [IsBigOWith_def] at *
filter_upwards [hâ, hâ] with _ hxâ hxâ
apply le_trans (norm_smul_le _ _)
convert mul_le_mul hxâ hxâ (norm_nonneg _) (le_trans (norm_nonneg _) hxâ) using 1
rw [norm_smul, mul_mul_mul_comm]
theorem IsBigO.smul (hâ : kâ =O[l] kâ) (hâ : f' =O[l] g') :
(fun x => kâ x ⢠f' x) =O[l] fun x => kâ x ⢠g' x := by
obtain âšcâ, hââ© := hâ.isBigOWith
obtain âšcâ, hââ© := hâ.isBigOWith
exact (hâ.smul hâ).isBigO
theorem IsBigO.smul_isLittleO (hâ : kâ =O[l] kâ) (hâ : f' =o[l] g') :
(fun x => kâ x ⢠f' x) =o[l] fun x => kâ x ⢠g' x := by
simp only [IsLittleO_def] at *
intro c cpos
rcases hâ.exists_pos with âšc', c'pos, hc'â©
exact (hc'.smul (hâ (div_pos cpos c'pos))).congr_const (mul_div_cancelâ _ (ne_of_gt c'pos))
theorem IsLittleO.smul_isBigO (hâ : kâ =o[l] kâ) (hâ : f' =O[l] g') :
(fun x => kâ x ⢠f' x) =o[l] fun x => kâ x ⢠g' x := by
simp only [IsLittleO_def] at *
intro c cpos
rcases hâ.exists_pos with âšc', c'pos, hc'â©
exact ((hâ (div_pos cpos c'pos)).smul hc').congr_const (div_mul_cancelâ _ (ne_of_gt c'pos))
theorem IsLittleO.smul (hâ : kâ =o[l] kâ) (hâ : f' =o[l] g') :
(fun x => kâ x ⢠f' x) =o[l] fun x => kâ x ⢠g' x :=
hâ.smul_isBigO hâ.isBigO
end SMul
/-! ### Sum -/
section Sum
variable {ι : Type*} {A : ι â α â E'} {C : ι â â} {s : Finset ι}
theorem IsBigOWith.sum (h : â i â s, IsBigOWith (C i) l (A i) g) :
IsBigOWith (â i â s, C i) l (fun x => â i â s, A i x) g := by
induction' s using Finset.induction_on with i s is IH
· simp only [isBigOWith_zero', Finset.sum_empty, forall_true_iff]
· simp only [is, Finset.sum_insert, not_false_iff]
exact (h _ (Finset.mem_insert_self i s)).add (IH fun j hj => h _ (Finset.mem_insert_of_mem hj))
theorem IsBigO.sum (h : â i â s, A i =O[l] g) : (fun x => â i â s, A i x) =O[l] g := by
simp only [IsBigO_def] at *
choose! C hC using h
exact âš_, IsBigOWith.sum hCâ©
theorem IsLittleO.sum (h : â i â s, A i =o[l] g') : (fun x => â i â s, A i x) =o[l] g' := by
induction' s using Finset.induction_on with i s is IH
· simp only [isLittleO_zero, Finset.sum_empty, forall_true_iff]
· simp only [is, Finset.sum_insert, not_false_iff]
exact (h _ (Finset.mem_insert_self i s)).add (IH fun j hj => h _ (Finset.mem_insert_of_mem hj))
end Sum
/-! ### Relation between `f = o(g)` and `f / g â 0` -/
theorem IsLittleO.tendsto_div_nhds_zero {f g : α â ð} (h : f =o[l] g) :
Tendsto (fun x => f x / g x) l (ð 0) :=
(isLittleO_one_iff ð).mp <| by
calc
(fun x => f x / g x) =o[l] fun x => g x / g x := by
simpa only [div_eq_mul_inv] using h.mul_isBigO (isBigO_refl _ _)
_ =O[l] fun _x => (1 : ð) := isBigO_of_le _ fun x => by simp [div_self_le_one]
theorem IsLittleO.tendsto_inv_smul_nhds_zero [Module ð E'] [BoundedSMul ð E']
{f : α â E'} {g : α â ð}
{l : Filter α} (h : f =o[l] g) : Tendsto (fun x => (g x)â»Â¹ ⢠f x) l (ð 0) := by
simpa only [div_eq_inv_mul, â norm_inv, â norm_smul, â tendsto_zero_iff_norm_tendsto_zero] using
h.norm_norm.tendsto_div_nhds_zero
theorem isLittleO_iff_tendsto' {f g : α â ð} (hgf : âá¶ x in l, g x = 0 â f x = 0) :
f =o[l] g â Tendsto (fun x => f x / g x) l (ð 0) :=
âšIsLittleO.tendsto_div_nhds_zero, fun h =>
(((isLittleO_one_iff _).mpr h).mul_isBigO (isBigO_refl g l)).congr'
(hgf.mono fun _x => div_mul_cancel_of_imp) (eventually_of_forall fun _x => one_mul _)â©
theorem isLittleO_iff_tendsto {f g : α â ð} (hgf : â x, g x = 0 â f x = 0) :
f =o[l] g â Tendsto (fun x => f x / g x) l (ð 0) :=
isLittleO_iff_tendsto' (eventually_of_forall hgf)
alias âš_, isLittleO_of_tendsto'â© := isLittleO_iff_tendsto'
alias âš_, isLittleO_of_tendstoâ© := isLittleO_iff_tendsto
theorem isLittleO_const_left_of_ne {c : E''} (hc : c â 0) :
(fun _x => c) =o[l] g â Tendsto (fun x => âg xâ) l atTop := by
simp only [â isLittleO_one_left_iff â]
exact âš(isBigO_const_const (1 : â) hc l).trans_isLittleO,
(isBigO_const_one â c l).trans_isLittleOâ©
@[simp]
theorem isLittleO_const_left {c : E''} :
(fun _x => c) =o[l] g'' â c = 0 âš Tendsto (norm â g'') l atTop := by
rcases eq_or_ne c 0 with (rfl | hc)
· simp only [isLittleO_zero, eq_self_iff_true, true_or_iff]
· simp only [hc, false_or_iff, isLittleO_const_left_of_ne hc]; rfl
@[simp 1001] -- Porting note: increase priority so that this triggers before `isLittleO_const_left`
theorem isLittleO_const_const_iff [NeBot l] {d : E''} {c : F''} :
((fun _x => d) =o[l] fun _x => c) â d = 0 := by
have : ¬Tendsto (Function.const α âcâ) l atTop :=
not_tendsto_atTop_of_tendsto_nhds tendsto_const_nhds
simp only [isLittleO_const_left, or_iff_left_iff_imp]
exact fun h => (this h).elim
@[simp]
theorem isLittleO_pure {x} : f'' =o[pure x] g'' â f'' x = 0 :=
calc
f'' =o[pure x] g'' â (fun _y : α => f'' x) =o[pure x] fun _ => g'' x := isLittleO_congr rfl rfl
_ â f'' x = 0 := isLittleO_const_const_iff
theorem isLittleO_const_id_cobounded (c : F'') :
(fun _ => c) =o[Bornology.cobounded E''] id :=
isLittleO_const_left.2 <| .inr tendsto_norm_cobounded_atTop
theorem isLittleO_const_id_atTop (c : E'') : (fun _x : â => c) =o[atTop] id :=
isLittleO_const_left.2 <| Or.inr tendsto_abs_atTop_atTop
theorem isLittleO_const_id_atBot (c : E'') : (fun _x : â => c) =o[atBot] id :=
isLittleO_const_left.2 <| Or.inr tendsto_abs_atBot_atTop
/-!
### Eventually (u / v) * v = u
If `u` and `v` are linked by an `IsBigOWith` relation, then we
eventually have `(u / v) * v = u`, even if `v` vanishes.
-/
section EventuallyMulDivCancel
variable {u v : α â ð}
theorem IsBigOWith.eventually_mul_div_cancel (h : IsBigOWith c l u v) : u / v * v =á¶ [l] u :=
Eventually.mono h.bound fun y hy => div_mul_cancel_of_imp fun hv => by simpa [hv] using hy
/-- If `u = O(v)` along `l`, then `(u / v) * v = u` eventually at `l`. -/
theorem IsBigO.eventually_mul_div_cancel (h : u =O[l] v) : u / v * v =á¶ [l] u :=
let âš_c, hcâ© := h.isBigOWith
hc.eventually_mul_div_cancel
/-- If `u = o(v)` along `l`, then `(u / v) * v = u` eventually at `l`. -/
theorem IsLittleO.eventually_mul_div_cancel (h : u =o[l] v) : u / v * v =á¶ [l] u :=
(h.forall_isBigOWith zero_lt_one).eventually_mul_div_cancel
end EventuallyMulDivCancel
/-! ### Equivalent definitions of the form `â Ï, u =á¶ [l] Ï * v` in a `NormedField`. -/
section ExistsMulEq
variable {u v : α â ð}
/-- If `âÏâ` is eventually bounded by `c`, and `u =á¶ [l] Ï * v`, then we have `IsBigOWith c u v l`.
This does not require any assumptions on `c`, which is why we keep this version along with
`IsBigOWith_iff_exists_eq_mul`. -/
theorem isBigOWith_of_eq_mul {u v : α â R} (Ï : α â R) (hÏ : âá¶ x in l, âÏ xâ †c)
(h : u =á¶ [l] Ï * v) :
IsBigOWith c l u v := by
simp only [IsBigOWith_def]
refine h.symm.rw (fun x a => âaâ †c * âv xâ) (hÏ.mono fun x hx => ?_)
simp only [Pi.mul_apply]
refine (norm_mul_le _ _).trans ?_
gcongr
theorem isBigOWith_iff_exists_eq_mul (hc : 0 †c) :
IsBigOWith c l u v â â Ï : α â ð, (âá¶ x in l, âÏ xâ †c) â§ u =á¶ [l] Ï * v := by
constructor
· intro h
use fun x => u x / v x
refine âšEventually.mono h.bound fun y hy => ?_, h.eventually_mul_div_cancel.symmâ©
simpa using div_le_of_nonneg_of_le_mul (norm_nonneg _) hc hy
· rintro âšÏ, hÏ, hâ©
exact isBigOWith_of_eq_mul Ï hÏ h
theorem IsBigOWith.exists_eq_mul (h : IsBigOWith c l u v) (hc : 0 †c) :
â Ï : α â ð, (âá¶ x in l, âÏ xâ †c) â§ u =á¶ [l] Ï * v :=
(isBigOWith_iff_exists_eq_mul hc).mp h
theorem isBigO_iff_exists_eq_mul :
u =O[l] v â â Ï : α â ð, l.IsBoundedUnder (· †·) (norm â Ï) â§ u =á¶ [l] Ï * v := by
constructor
· rintro h
rcases h.exists_nonneg with âšc, hnnc, hcâ©
rcases hc.exists_eq_mul hnnc with âšÏ, hÏ, huvÏâ©
exact âšÏ, âšc, hÏâ©, huvÏâ©
· rintro âšÏ, âšc, hÏâ©, huvÏâ©
exact isBigO_iff_isBigOWith.2 âšc, isBigOWith_of_eq_mul Ï hÏ huvÏâ©
alias âšIsBigO.exists_eq_mul, _â© := isBigO_iff_exists_eq_mul
theorem isLittleO_iff_exists_eq_mul :
u =o[l] v â â Ï : α â ð, Tendsto Ï l (ð 0) â§ u =á¶ [l] Ï * v := by
constructor
· exact fun h => âšfun x => u x / v x, h.tendsto_div_nhds_zero, h.eventually_mul_div_cancel.symmâ©
· simp only [IsLittleO_def]
rintro âšÏ, hÏ, huvÏâ© c hpos
rw [NormedAddCommGroup.tendsto_nhds_zero] at hÏ
exact isBigOWith_of_eq_mul _ ((hÏ c hpos).mono fun x => le_of_lt) huvÏ
alias âšIsLittleO.exists_eq_mul, _â© := isLittleO_iff_exists_eq_mul
end ExistsMulEq
/-! ### Miscellaneous lemmas -/
theorem div_isBoundedUnder_of_isBigO {α : Type*} {l : Filter α} {f g : α â ð} (h : f =O[l] g) :
IsBoundedUnder (· †·) l fun x => âf x / g xâ := by
obtain âšc, hâ, hcâ© := h.exists_nonneg
refine âšc, eventually_map.2 (hc.bound.mono fun x hx => ?_)â©
rw [norm_div]
exact div_le_of_nonneg_of_le_mul (norm_nonneg _) hâ hx
theorem isBigO_iff_div_isBoundedUnder {α : Type*} {l : Filter α} {f g : α â ð}
(hgf : âá¶ x in l, g x = 0 â f x = 0) :
f =O[l] g â IsBoundedUnder (· †·) l fun x => âf x / g xâ := by
refine âšdiv_isBoundedUnder_of_isBigO, fun h => ?_â©
obtain âšc, hcâ© := h
simp only [eventually_map, norm_div] at hc
refine IsBigO.of_bound c (hc.mp <| hgf.mono fun x hxâ hxâ => ?_)
by_cases hgx : g x = 0
· simp [hxâ hgx, hgx]
· exact (div_le_iff (norm_pos_iff.2 hgx)).mp hxâ
theorem isBigO_of_div_tendsto_nhds {α : Type*} {l : Filter α} {f g : α â ð}
(hgf : âá¶ x in l, g x = 0 â f x = 0) (c : ð) (H : Filter.Tendsto (f / g) l (ð c)) :
f =O[l] g :=
(isBigO_iff_div_isBoundedUnder hgf).2 <| H.norm.isBoundedUnder_le
theorem IsLittleO.tendsto_zero_of_tendsto {α E ð : Type*} [NormedAddCommGroup E] [NormedField ð]
{u : α â E} {v : α â ð} {l : Filter α} {y : ð} (huv : u =o[l] v) (hv : Tendsto v l (ð y)) :
Tendsto u l (ð 0) := by
suffices h : u =o[l] fun _x => (1 : ð) by
rwa [isLittleO_one_iff] at h
exact huv.trans_isBigO (hv.isBigO_one ð)
theorem isLittleO_pow_pow {m n : â} (h : m < n) : (fun x : ð => x ^ n) =o[ð 0] fun x => x ^ m := by
rcases lt_iff_exists_add.1 h with âšp, hp0 : 0 < p, rflâ©
suffices (fun x : ð => x ^ m * x ^ p) =o[ð 0] fun x => x ^ m * 1 ^ p by
simpa only [pow_add, one_pow, mul_one]
exact IsBigO.mul_isLittleO (isBigO_refl _ _)
(IsLittleO.pow ((isLittleO_one_iff _).2 tendsto_id) hp0)
theorem isLittleO_norm_pow_norm_pow {m n : â} (h : m < n) :
(fun x : E' => âxâ ^ n) =o[ð 0] fun x => âxâ ^ m :=
(isLittleO_pow_pow h).comp_tendsto tendsto_norm_zero
theorem isLittleO_pow_id {n : â} (h : 1 < n) : (fun x : ð => x ^ n) =o[ð 0] fun x => x := by
convert isLittleO_pow_pow h (ð := ð)
simp only [pow_one]
theorem isLittleO_norm_pow_id {n : â} (h : 1 < n) :
(fun x : E' => âxâ ^ n) =o[ð 0] fun x => x := by
have := @isLittleO_norm_pow_norm_pow E' _ _ _ h
simp only [pow_one] at this
exact isLittleO_norm_right.mp this
theorem IsBigO.eq_zero_of_norm_pow_within {f : E'' â F''} {s : Set E''} {xâ : E''} {n : â}
(h : f =O[ð[s] xâ] fun x => âx - xââ ^ n) (hxâ : xâ â s) (hn : n â 0) : f xâ = 0 :=
mem_of_mem_nhdsWithin hxâ h.eq_zero_imp <| by simp_rw [sub_self, norm_zero, zero_pow hn]
theorem IsBigO.eq_zero_of_norm_pow {f : E'' â F''} {xâ : E''} {n : â}
(h : f =O[ð xâ] fun x => âx - xââ ^ n) (hn : n â 0) : f xâ = 0 := by
rw [â nhdsWithin_univ] at h
exact h.eq_zero_of_norm_pow_within (mem_univ _) hn
theorem isLittleO_pow_sub_pow_sub (xâ : E') {n m : â} (h : n < m) :
(fun x => âx - xââ ^ m) =o[ð xâ] fun x => âx - xââ ^ n :=
haveI : Tendsto (fun x => âx - xââ) (ð xâ) (ð 0) := by
apply tendsto_norm_zero.comp
rw [â sub_self xâ]
exact tendsto_id.sub tendsto_const_nhds
(isLittleO_pow_pow h).comp_tendsto this
theorem isLittleO_pow_sub_sub (xâ : E') {m : â} (h : 1 < m) :
(fun x => âx - xââ ^ m) =o[ð xâ] fun x => x - xâ := by
simpa only [isLittleO_norm_right, pow_one] using isLittleO_pow_sub_pow_sub xâ h
theorem IsBigOWith.right_le_sub_of_lt_one {fâ fâ : α â E'} (h : IsBigOWith c l fâ fâ) (hc : c < 1) :
IsBigOWith (1 / (1 - c)) l fâ fun x => fâ x - fâ x :=
IsBigOWith.of_bound <|
mem_of_superset h.bound fun x hx => by
simp only [mem_setOf_eq] at hx â¢
rw [mul_comm, one_div, â div_eq_mul_inv, _root_.le_div_iff, mul_sub, mul_one, mul_comm]
· exact le_trans (sub_le_sub_left hx _) (norm_sub_norm_le _ _)
· exact sub_pos.2 hc
theorem IsBigOWith.right_le_add_of_lt_one {fâ fâ : α â E'} (h : IsBigOWith c l fâ fâ) (hc : c < 1) :
IsBigOWith (1 / (1 - c)) l fâ fun x => fâ x + fâ x :=
(h.neg_right.right_le_sub_of_lt_one hc).neg_right.of_neg_left.congr rfl (fun x ⊠rfl) fun x ⊠by
rw [neg_sub, sub_neg_eq_add]
@[deprecated (since := "2024-01-31")]
alias IsBigOWith.right_le_sub_of_lt_1 := IsBigOWith.right_le_sub_of_lt_one
@[deprecated (since := "2024-01-31")]
alias IsBigOWith.right_le_add_of_lt_1 := IsBigOWith.right_le_add_of_lt_one
theorem IsLittleO.right_isBigO_sub {fâ fâ : α â E'} (h : fâ =o[l] fâ) :
fâ =O[l] fun x => fâ x - fâ x :=
((h.def' one_half_pos).right_le_sub_of_lt_one one_half_lt_one).isBigO
theorem IsLittleO.right_isBigO_add {fâ fâ : α â E'} (h : fâ =o[l] fâ) :
fâ =O[l] fun x => fâ x + fâ x :=
((h.def' one_half_pos).right_le_add_of_lt_one one_half_lt_one).isBigO
theorem IsLittleO.right_isBigO_add' {fâ fâ : α â E'} (h : fâ =o[l] fâ) :
fâ =O[l] (fâ + fâ) :=
add_comm fâ fâ âž h.right_isBigO_add
/-- If `f x = O(g x)` along `cofinite`, then there exists a positive constant `C` such that
`âf xâ †C * âg xâ` whenever `g x â 0`. -/
theorem bound_of_isBigO_cofinite (h : f =O[cofinite] g'') :
â C > 0, â âŠxâŠ, g'' x â 0 â âf xâ †C * âg'' xâ := by
rcases h.exists_pos with âšC, Câ, hCâ©
rw [IsBigOWith_def, eventually_cofinite] at hC
rcases (hC.toFinset.image fun x => âf xâ / âg'' xâ).exists_le with âšC', hC'â©
have : â x, C * âg'' xâ < âf xâ â âf xâ / âg'' xâ †C' := by simpa using hC'
refine âšmax C C', lt_max_iff.2 (Or.inl Câ), fun x hâ => ?_â©
rw [max_mul_of_nonneg _ _ (norm_nonneg _), le_max_iff, or_iff_not_imp_left, not_le]
exact fun hx => (div_le_iff (norm_pos_iff.2 hâ)).1 (this _ hx)
theorem isBigO_cofinite_iff (h : â x, g'' x = 0 â f'' x = 0) :
f'' =O[cofinite] g'' â â C, â x, âf'' xâ †C * âg'' xâ :=
âšfun h' =>
let âšC, _Câ, hCâ© := bound_of_isBigO_cofinite h'
âšC, fun x => if hx : g'' x = 0 then by simp [h _ hx, hx] else hC hxâ©,
fun h => (isBigO_top.2 h).mono le_topâ©
theorem bound_of_isBigO_nat_atTop {f : â â E} {g'' : â â E''} (h : f =O[atTop] g'') :
â C > 0, â âŠxâŠ, g'' x â 0 â âf xâ †C * âg'' xâ :=
bound_of_isBigO_cofinite <| by rwa [Nat.cofinite_eq_atTop]
theorem isBigO_nat_atTop_iff {f : â â E''} {g : â â F''} (h : â x, g x = 0 â f x = 0) :
f =O[atTop] g â â C, â x, âf xâ †C * âg xâ := by
rw [â Nat.cofinite_eq_atTop, isBigO_cofinite_iff h]
theorem isBigO_one_nat_atTop_iff {f : â â E''} :
f =O[atTop] (fun _n => 1 : â â â) â â C, â n, âf nâ †C :=
Iff.trans (isBigO_nat_atTop_iff fun n h => (one_ne_zero h).elim) <| by
simp only [norm_one, mul_one]
theorem isBigOWith_pi {ι : Type*} [Fintype ι] {E' : ι â Type*} [â i, NormedAddCommGroup (E' i)]
{f : α â â i, E' i} {C : â} (hC : 0 †C) :
IsBigOWith C l f g' â â i, IsBigOWith C l (fun x => f x i) g' := by
have : â x, 0 †C * âg' xâ := fun x => mul_nonneg hC (norm_nonneg _)
simp only [isBigOWith_iff, pi_norm_le_iff_of_nonneg (this _), eventually_all]
@[simp]
theorem isBigO_pi {ι : Type*} [Fintype ι] {E' : ι â Type*} [â i, NormedAddCommGroup (E' i)]
{f : α â â i, E' i} : f =O[l] g' â â i, (fun x => f x i) =O[l] g' := by
simp only [isBigO_iff_eventually_isBigOWith, â eventually_all]
exact eventually_congr (eventually_atTop.2 âš0, fun c => isBigOWith_piâ©)
@[simp]
theorem isLittleO_pi {ι : Type*} [Fintype ι] {E' : ι â Type*} [â i, NormedAddCommGroup (E' i)]
{f : α â â i, E' i} : f =o[l] g' â â i, (fun x => f x i) =o[l] g' := by
simp (config := { contextual := true }) only [IsLittleO_def, isBigOWith_pi, le_of_lt]
exact âšfun h i c hc => h hc i, fun h c hc i => h i hcâ©
theorem IsBigO.natCast_atTop {R : Type*} [StrictOrderedSemiring R] [Archimedean R]
{f : R â E} {g : R â F} (h : f =O[atTop] g) :
(fun (n : â) => f n) =O[atTop] (fun n => g n) :=
IsBigO.comp_tendsto h tendsto_natCast_atTop_atTop
@[deprecated (since := "2024-04-17")]
alias IsBigO.nat_cast_atTop := IsBigO.natCast_atTop
theorem IsLittleO.natCast_atTop {R : Type*} [StrictOrderedSemiring R] [Archimedean R]
{f : R â E} {g : R â F} (h : f =o[atTop] g) :
(fun (n : â) => f n) =o[atTop] (fun n => g n) :=
IsLittleO.comp_tendsto h tendsto_natCast_atTop_atTop
@[deprecated (since := "2024-04-17")]
alias IsLittleO.nat_cast_atTop := IsLittleO.natCast_atTop
theorem isBigO_atTop_iff_eventually_exists {α : Type*} [SemilatticeSup α] [Nonempty α]
{f : α â E} {g : α â F} : f =O[atTop] g â âá¶ nâ in atTop, â c, â n ⥠nâ, âf nâ †c * âg nâ := by
rw [isBigO_iff, exists_eventually_atTop]
theorem isBigO_atTop_iff_eventually_exists_pos {α : Type*}
[SemilatticeSup α] [Nonempty α] {f : α â G} {g : α â G'} :
f =O[atTop] g â âá¶ nâ in atTop, â c > 0, â n ⥠nâ, c * âf nâ †âg nâ := by
simp_rw [isBigO_iff'', â exists_prop, Subtype.exists', exists_eventually_atTop]
end Asymptotics
open Asymptotics
theorem summable_of_isBigO {ι E} [SeminormedAddCommGroup E] [CompleteSpace E]
{f : ι â E} {g : ι â â} (hg : Summable g) (h : f =O[cofinite] g) : Summable f :=
let âšC, hCâ© := h.isBigOWith
.of_norm_bounded_eventually (fun x => C * âg xâ) (hg.abs.mul_left _) hC.bound
theorem summable_of_isBigO_nat {E} [SeminormedAddCommGroup E] [CompleteSpace E]
{f : â â E} {g : â â â} (hg : Summable g) (h : f =O[atTop] g) : Summable f :=
summable_of_isBigO hg <| Nat.cofinite_eq_atTop.symm âž h
lemma Asymptotics.IsBigO.comp_summable_norm {ι E F : Type*}
[SeminormedAddCommGroup E] [SeminormedAddCommGroup F] {f : E â F} {g : ι â E}
(hf : f =O[ð 0] id) (hg : Summable (âg ·â)) : Summable (âf <| g ·â) :=
summable_of_isBigO hg <| hf.norm_norm.comp_tendsto <|
tendsto_zero_iff_norm_tendsto_zero.2 hg.tendsto_cofinite_zero
namespace PartialHomeomorph
variable {α : Type*} {β : Type*} [TopologicalSpace α] [TopologicalSpace β]
variable {E : Type*} [Norm E] {F : Type*} [Norm F]
/-- Transfer `IsBigOWith` over a `PartialHomeomorph`. -/
theorem isBigOWith_congr (e : PartialHomeomorph α β) {b : β} (hb : b â e.target) {f : β â E}
{g : β â F} {C : â} : IsBigOWith C (ð b) f g â IsBigOWith C (ð (e.symm b)) (f â e) (g â e) :=
âšfun h =>
h.comp_tendsto <| by
have := e.continuousAt (e.map_target hb)
rwa [ContinuousAt, e.rightInvOn hb] at this,
fun h =>
(h.comp_tendsto (e.continuousAt_symm hb)).congr' rfl
((e.eventually_right_inverse hb).mono fun x hx => congr_arg f hx)
((e.eventually_right_inverse hb).mono fun x hx => congr_arg g hx)â©
/-- Transfer `IsBigO` over a `PartialHomeomorph`. -/
theorem isBigO_congr (e : PartialHomeomorph α β) {b : β} (hb : b â e.target) {f : β â E}
{g : β â F} : f =O[ð b] g â (f â e) =O[ð (e.symm b)] (g â e) := by
simp only [IsBigO_def]
exact exists_congr fun C => e.isBigOWith_congr hb
/-- Transfer `IsLittleO` over a `PartialHomeomorph`. -/
theorem isLittleO_congr (e : PartialHomeomorph α β) {b : β} (hb : b â e.target) {f : β â E}
{g : β â F} : f =o[ð b] g â (f â e) =o[ð (e.symm b)] (g â e) := by
simp only [IsLittleO_def]
exact forallâ_congr fun c _hc => e.isBigOWith_congr hb
end PartialHomeomorph
namespace Homeomorph
variable {α : Type*} {β : Type*} [TopologicalSpace α] [TopologicalSpace β]
variable {E : Type*} [Norm E] {F : Type*} [Norm F]
open Asymptotics
/-- Transfer `IsBigOWith` over a `Homeomorph`. -/
theorem isBigOWith_congr (e : α ââ β) {b : β} {f : β â E} {g : β â F} {C : â} :
IsBigOWith C (ð b) f g â IsBigOWith C (ð (e.symm b)) (f â e) (g â e) :=
e.toPartialHomeomorph.isBigOWith_congr trivial
/-- Transfer `IsBigO` over a `Homeomorph`. -/
theorem isBigO_congr (e : α ââ β) {b : β} {f : β â E} {g : β â F} :
f =O[ð b] g â (f â e) =O[ð (e.symm b)] (g â e) := by
simp only [IsBigO_def]
exact exists_congr fun C => e.isBigOWith_congr
/-- Transfer `IsLittleO` over a `Homeomorph`. -/
theorem isLittleO_congr (e : α ââ β) {b : β} {f : β â E} {g : β â F} :
f =o[ð b] g â (f â e) =o[ð (e.symm b)] (g â e) := by
simp only [IsLittleO_def]
exact forallâ_congr fun c _hc => e.isBigOWith_congr
end Homeomorph
|
Analysis\Asymptotics\SpecificAsymptotics.lean | /-
Copyright (c) 2021 Anatole Dedecker. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Anatole Dedecker
-/
import Mathlib.Analysis.Normed.Order.Basic
import Mathlib.Analysis.Asymptotics.Asymptotics
import Mathlib.Analysis.Normed.Module.Basic
/-!
# A collection of specific asymptotic results
This file contains specific lemmas about asymptotics which don't have their place in the general
theory developed in `Mathlib.Analysis.Asymptotics.Asymptotics`.
-/
open Filter Asymptotics
open Topology
section NormedField
/-- If `f : ð â E` is bounded in a punctured neighborhood of `a`, then `f(x) = o((x - a)â»Â¹)` as
`x â a`, `x â a`. -/
theorem Filter.IsBoundedUnder.isLittleO_sub_self_inv {ð E : Type*} [NormedField ð] [Norm E] {a : ð}
{f : ð â E} (h : IsBoundedUnder (· †·) (ð[â ] a) (norm â f)) :
f =o[ð[â ] a] fun x => (x - a)â»Â¹ := by
refine (h.isBigO_const (one_ne_zero' â)).trans_isLittleO (isLittleO_const_left.2 <| Or.inr ?_)
simp only [(· â ·), norm_inv]
exact (tendsto_norm_sub_self_punctured_nhds a).inv_tendsto_zero
end NormedField
section LinearOrderedField
variable {ð : Type*} [LinearOrderedField ð]
theorem pow_div_pow_eventuallyEq_atTop {p q : â} :
(fun x : ð => x ^ p / x ^ q) =á¶ [atTop] fun x => x ^ ((p : â€) - q) := by
apply (eventually_gt_atTop (0 : ð)).mono fun x hx => _
intro x hx
simp [zpow_subâ hx.ne']
theorem pow_div_pow_eventuallyEq_atBot {p q : â} :
(fun x : ð => x ^ p / x ^ q) =á¶ [atBot] fun x => x ^ ((p : â€) - q) := by
apply (eventually_lt_atBot (0 : ð)).mono fun x hx => _
intro x hx
simp [zpow_subâ hx.ne]
theorem tendsto_pow_div_pow_atTop_atTop {p q : â} (hpq : q < p) :
Tendsto (fun x : ð => x ^ p / x ^ q) atTop atTop := by
rw [tendsto_congr' pow_div_pow_eventuallyEq_atTop]
apply tendsto_zpow_atTop_atTop
omega
theorem tendsto_pow_div_pow_atTop_zero [TopologicalSpace ð] [OrderTopology ð] {p q : â}
(hpq : p < q) : Tendsto (fun x : ð => x ^ p / x ^ q) atTop (ð 0) := by
rw [tendsto_congr' pow_div_pow_eventuallyEq_atTop]
apply tendsto_zpow_atTop_zero
omega
end LinearOrderedField
section NormedLinearOrderedField
variable {ð : Type*} [NormedLinearOrderedField ð]
theorem Asymptotics.isLittleO_pow_pow_atTop_of_lt [OrderTopology ð] {p q : â} (hpq : p < q) :
(fun x : ð => x ^ p) =o[atTop] fun x => x ^ q := by
refine (isLittleO_iff_tendsto' ?_).mpr (tendsto_pow_div_pow_atTop_zero hpq)
exact (eventually_gt_atTop 0).mono fun x hx hxq => (pow_ne_zero q hx.ne' hxq).elim
theorem Asymptotics.IsBigO.trans_tendsto_norm_atTop {α : Type*} {u v : α â ð} {l : Filter α}
(huv : u =O[l] v) (hu : Tendsto (fun x => âu xâ) l atTop) :
Tendsto (fun x => âv xâ) l atTop := by
rcases huv.exists_pos with âšc, hc, hcuvâ©
rw [IsBigOWith] at hcuv
convert Tendsto.atTop_div_const hc (tendsto_atTop_mono' l hcuv hu)
rw [mul_div_cancel_leftâ _ hc.ne.symm]
end NormedLinearOrderedField
section Real
open Finset
theorem Asymptotics.IsLittleO.sum_range {α : Type*} [NormedAddCommGroup α] {f : â â α} {g : â â â}
(h : f =o[atTop] g) (hg : 0 †g) (h'g : Tendsto (fun n => â i â range n, g i) atTop atTop) :
(fun n => â i â range n, f i) =o[atTop] fun n => â i â range n, g i := by
have A : â i, âg iâ = g i := fun i => Real.norm_of_nonneg (hg i)
have B : â n, ââ i â range n, g iâ = â i â range n, g i := fun n => by
rwa [Real.norm_eq_abs, abs_sum_of_nonneg']
apply isLittleO_iff.2 fun ε εpos => _
intro ε εpos
obtain âšN, hNâ© : â N : â, â b : â, N †b â âf bâ †ε / 2 * g b := by
simpa only [A, eventually_atTop] using isLittleO_iff.mp h (half_pos εpos)
have : (fun _ : â => â i â range N, f i) =o[atTop] fun n : â => â i â range n, g i := by
apply isLittleO_const_left.2
exact Or.inr (h'g.congr fun n => (B n).symm)
filter_upwards [isLittleO_iff.1 this (half_pos εpos), Ici_mem_atTop N] with n hn Nn
calc
ââ i â range n, f iâ = â(â i â range N, f i) + â i â Ico N n, f iâ := by
rw [sum_range_add_sum_Ico _ Nn]
_ †ââ i â range N, f iâ + ââ i â Ico N n, f iâ := norm_add_le _ _
_ †ââ i â range N, f iâ + â i â Ico N n, ε / 2 * g i :=
(add_le_add le_rfl (norm_sum_le_of_le _ fun i hi => hN _ (mem_Ico.1 hi).1))
_ †ââ i â range N, f iâ + â i â range n, ε / 2 * g i := by
gcongr
apply sum_le_sum_of_subset_of_nonneg
· rw [range_eq_Ico]
exact Ico_subset_Ico (zero_le _) le_rfl
· intro i _ _
exact mul_nonneg (half_pos εpos).le (hg i)
_ †ε / 2 * ââ i â range n, g iâ + ε / 2 * â i â range n, g i := by rw [â mul_sum]; gcongr
_ = ε * ââ i â range n, g iâ := by
simp only [B]
ring
theorem Asymptotics.isLittleO_sum_range_of_tendsto_zero {α : Type*} [NormedAddCommGroup α]
{f : â â α} (h : Tendsto f atTop (ð 0)) :
(fun n => â i â range n, f i) =o[atTop] fun n => (n : â) := by
have := ((isLittleO_one_iff â).2 h).sum_range fun i => zero_le_one
simp only [sum_const, card_range, Nat.smul_one_eq_cast] at this
exact this tendsto_natCast_atTop_atTop
/-- The Cesaro average of a converging sequence converges to the same limit. -/
theorem Filter.Tendsto.cesaro_smul {E : Type*} [NormedAddCommGroup E] [NormedSpace â E] {u : â â E}
{l : E} (h : Tendsto u atTop (ð l)) :
Tendsto (fun n : â => (nâ»Â¹ : â) ⢠â i â range n, u i) atTop (ð l) := by
rw [â tendsto_sub_nhds_zero_iff, â isLittleO_one_iff â]
have := Asymptotics.isLittleO_sum_range_of_tendsto_zero (tendsto_sub_nhds_zero_iff.2 h)
apply ((isBigO_refl (fun n : â => (n : â)â»Â¹) atTop).smul_isLittleO this).congr' _ _
· filter_upwards [Ici_mem_atTop 1] with n npos
have nposâ : (0 : â) < n := Nat.cast_pos.2 npos
simp only [smul_sub, sum_sub_distrib, sum_const, card_range, sub_right_inj]
rw [â Nat.cast_smul_eq_nsmul â, smul_smul, inv_mul_cancel nposâ.ne', one_smul]
· filter_upwards [Ici_mem_atTop 1] with n npos
have nposâ : (0 : â) < n := Nat.cast_pos.2 npos
rw [Algebra.id.smul_eq_mul, inv_mul_cancel nposâ.ne']
/-- The Cesaro average of a converging sequence converges to the same limit. -/
theorem Filter.Tendsto.cesaro {u : â â â} {l : â} (h : Tendsto u atTop (ð l)) :
Tendsto (fun n : â => (nâ»Â¹ : â) * â i â range n, u i) atTop (ð l) :=
h.cesaro_smul
end Real
|
Analysis\Asymptotics\SuperpolynomialDecay.lean | /-
Copyright (c) 2021 Devon Tuma. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Devon Tuma
-/
import Mathlib.Algebra.Polynomial.Eval
import Mathlib.Analysis.Asymptotics.Asymptotics
import Mathlib.Analysis.Normed.Order.Basic
import Mathlib.Topology.Algebra.Order.LiminfLimsup
/-!
# Super-Polynomial Function Decay
This file defines a predicate `Asymptotics.SuperpolynomialDecay f` for a function satisfying
one of following equivalent definitions (The definition is in terms of the first condition):
* `x ^ n * f` tends to `ð 0` for all (or sufficiently large) naturals `n`
* `|x ^ n * f|` tends to `ð 0` for all naturals `n` (`superpolynomialDecay_iff_abs_tendsto_zero`)
* `|x ^ n * f|` is bounded for all naturals `n` (`superpolynomialDecay_iff_abs_isBoundedUnder`)
* `f` is `o(x ^ c)` for all integers `c` (`superpolynomialDecay_iff_isLittleO`)
* `f` is `O(x ^ c)` for all integers `c` (`superpolynomialDecay_iff_isBigO`)
These conditions are all equivalent to conditions in terms of polynomials, replacing `x ^ c` with
`p(x)` or `p(x)â»Â¹` as appropriate, since asymptotically `p(x)` behaves like `X ^ p.natDegree`.
These further equivalences are not proven in mathlib but would be good future projects.
The definition of superpolynomial decay for `f : α â β` is relative to a parameter `k : α â β`.
Super-polynomial decay then means `f x` decays faster than `(k x) ^ c` for all integers `c`.
Equivalently `f x` decays faster than `p.eval (k x)` for all polynomials `p : β[X]`.
The definition is also relative to a filter `l : Filter α` where the decay rate is compared.
When the map `k` is given by `n ⊠ân : â â â` this defines negligible functions:
https://en.wikipedia.org/wiki/Negligible_function
When the map `k` is given by `(râ,...,râ) ⊠râ*...*râ : ââ¿ â â` this is equivalent
to the definition of rapidly decreasing functions given here:
https://ncatlab.org/nlab/show/rapidly+decreasing+function
# Main Theorems
* `SuperpolynomialDecay.polynomial_mul` says that if `f(x)` is negligible,
then so is `p(x) * f(x)` for any polynomial `p`.
* `superpolynomialDecay_iff_zpow_tendsto_zero` gives an equivalence between definitions in terms
of decaying faster than `k(x) ^ n` for all naturals `n` or `k(x) ^ c` for all integer `c`.
-/
namespace Asymptotics
open Topology Polynomial
open Filter
/-- `f` has superpolynomial decay in parameter `k` along filter `l` if
`k ^ n * f` tends to zero at `l` for all naturals `n` -/
def SuperpolynomialDecay {α β : Type*} [TopologicalSpace β] [CommSemiring β] (l : Filter α)
(k : α â β) (f : α â β) :=
â n : â, Tendsto (fun a : α => k a ^ n * f a) l (ð 0)
variable {α β : Type*} {l : Filter α} {k : α â β} {f g g' : α â β}
section CommSemiring
variable [TopologicalSpace β] [CommSemiring β]
theorem SuperpolynomialDecay.congr' (hf : SuperpolynomialDecay l k f) (hfg : f =á¶ [l] g) :
SuperpolynomialDecay l k g := fun z =>
(hf z).congr' (EventuallyEq.mul (EventuallyEq.refl l _) hfg)
theorem SuperpolynomialDecay.congr (hf : SuperpolynomialDecay l k f) (hfg : â x, f x = g x) :
SuperpolynomialDecay l k g := fun z =>
(hf z).congr fun x => (congr_arg fun a => k x ^ z * a) <| hfg x
@[simp]
theorem superpolynomialDecay_zero (l : Filter α) (k : α â β) : SuperpolynomialDecay l k 0 :=
fun z => by simpa only [Pi.zero_apply, mul_zero] using tendsto_const_nhds
theorem SuperpolynomialDecay.add [ContinuousAdd β] (hf : SuperpolynomialDecay l k f)
(hg : SuperpolynomialDecay l k g) : SuperpolynomialDecay l k (f + g) := fun z => by
simpa only [mul_add, add_zero, Pi.add_apply] using (hf z).add (hg z)
theorem SuperpolynomialDecay.mul [ContinuousMul β] (hf : SuperpolynomialDecay l k f)
(hg : SuperpolynomialDecay l k g) : SuperpolynomialDecay l k (f * g) := fun z => by
simpa only [mul_assoc, one_mul, mul_zero, pow_zero] using (hf z).mul (hg 0)
theorem SuperpolynomialDecay.mul_const [ContinuousMul β] (hf : SuperpolynomialDecay l k f) (c : β) :
SuperpolynomialDecay l k fun n => f n * c := fun z => by
simpa only [â mul_assoc, zero_mul] using Tendsto.mul_const c (hf z)
theorem SuperpolynomialDecay.const_mul [ContinuousMul β] (hf : SuperpolynomialDecay l k f) (c : β) :
SuperpolynomialDecay l k fun n => c * f n :=
(hf.mul_const c).congr fun _ => mul_comm _ _
theorem SuperpolynomialDecay.param_mul (hf : SuperpolynomialDecay l k f) :
SuperpolynomialDecay l k (k * f) := fun z =>
tendsto_nhds.2 fun s hs hs0 =>
l.sets_of_superset ((tendsto_nhds.1 (hf <| z + 1)) s hs hs0) fun x hx => by
simpa only [Set.mem_preimage, Pi.mul_apply, â mul_assoc, â pow_succ] using hx
theorem SuperpolynomialDecay.mul_param (hf : SuperpolynomialDecay l k f) :
SuperpolynomialDecay l k (f * k) :=
hf.param_mul.congr fun _ => mul_comm _ _
theorem SuperpolynomialDecay.param_pow_mul (hf : SuperpolynomialDecay l k f) (n : â) :
SuperpolynomialDecay l k (k ^ n * f) := by
induction' n with n hn
· simpa only [Nat.zero_eq, one_mul, pow_zero] using hf
· simpa only [pow_succ', mul_assoc] using hn.param_mul
theorem SuperpolynomialDecay.mul_param_pow (hf : SuperpolynomialDecay l k f) (n : â) :
SuperpolynomialDecay l k (f * k ^ n) :=
(hf.param_pow_mul n).congr fun _ => mul_comm _ _
theorem SuperpolynomialDecay.polynomial_mul [ContinuousAdd β] [ContinuousMul β]
(hf : SuperpolynomialDecay l k f) (p : β[X]) :
SuperpolynomialDecay l k fun x => (p.eval <| k x) * f x :=
Polynomial.induction_on' p (fun p q hp hq => by simpa [add_mul] using hp.add hq) fun n c => by
simpa [mul_assoc] using (hf.param_pow_mul n).const_mul c
theorem SuperpolynomialDecay.mul_polynomial [ContinuousAdd β] [ContinuousMul β]
(hf : SuperpolynomialDecay l k f) (p : β[X]) :
SuperpolynomialDecay l k fun x => f x * (p.eval <| k x) :=
(hf.polynomial_mul p).congr fun _ => mul_comm _ _
end CommSemiring
section OrderedCommSemiring
variable [TopologicalSpace β] [OrderedCommSemiring β] [OrderTopology β]
theorem SuperpolynomialDecay.trans_eventuallyLE (hk : 0 â€á¶ [l] k) (hg : SuperpolynomialDecay l k g)
(hg' : SuperpolynomialDecay l k g') (hfg : g â€á¶ [l] f) (hfg' : f â€á¶ [l] g') :
SuperpolynomialDecay l k f := fun z =>
tendsto_of_tendsto_of_tendsto_of_le_of_le' (hg z) (hg' z)
(hfg.mp (hk.mono fun _ hx hx' => mul_le_mul_of_nonneg_left hx' (pow_nonneg hx z)))
(hfg'.mp (hk.mono fun _ hx hx' => mul_le_mul_of_nonneg_left hx' (pow_nonneg hx z)))
end OrderedCommSemiring
section LinearOrderedCommRing
variable [TopologicalSpace β] [LinearOrderedCommRing β] [OrderTopology β]
variable (l k f)
theorem superpolynomialDecay_iff_abs_tendsto_zero :
SuperpolynomialDecay l k f â â n : â, Tendsto (fun a : α => |k a ^ n * f a|) l (ð 0) :=
âšfun h z => (tendsto_zero_iff_abs_tendsto_zero _).1 (h z), fun h z =>
(tendsto_zero_iff_abs_tendsto_zero _).2 (h z)â©
theorem superpolynomialDecay_iff_superpolynomialDecay_abs :
SuperpolynomialDecay l k f â SuperpolynomialDecay l (fun a => |k a|) fun a => |f a| :=
(superpolynomialDecay_iff_abs_tendsto_zero l k f).trans
(by simp_rw [SuperpolynomialDecay, abs_mul, abs_pow])
variable {l k f}
theorem SuperpolynomialDecay.trans_eventually_abs_le (hf : SuperpolynomialDecay l k f)
(hfg : abs â g â€á¶ [l] abs â f) : SuperpolynomialDecay l k g := by
rw [superpolynomialDecay_iff_abs_tendsto_zero] at hf â¢
refine fun z =>
tendsto_of_tendsto_of_tendsto_of_le_of_le' tendsto_const_nhds (hf z)
(eventually_of_forall fun x => abs_nonneg _) (hfg.mono fun x hx => ?_)
calc
|k x ^ z * g x| = |k x ^ z| * |g x| := abs_mul (k x ^ z) (g x)
_ †|k x ^ z| * |f x| := by gcongr _ * ?_; exact hx
_ = |k x ^ z * f x| := (abs_mul (k x ^ z) (f x)).symm
theorem SuperpolynomialDecay.trans_abs_le (hf : SuperpolynomialDecay l k f)
(hfg : â x, |g x| †|f x|) : SuperpolynomialDecay l k g :=
hf.trans_eventually_abs_le (eventually_of_forall hfg)
end LinearOrderedCommRing
section Field
variable [TopologicalSpace β] [Field β] (l k f)
theorem superpolynomialDecay_mul_const_iff [ContinuousMul β] {c : β} (hc0 : c â 0) :
(SuperpolynomialDecay l k fun n => f n * c) â SuperpolynomialDecay l k f :=
âšfun h => (h.mul_const câ»Â¹).congr fun x => by simp [mul_assoc, mul_inv_cancel hc0], fun h =>
h.mul_const câ©
theorem superpolynomialDecay_const_mul_iff [ContinuousMul β] {c : β} (hc0 : c â 0) :
(SuperpolynomialDecay l k fun n => c * f n) â SuperpolynomialDecay l k f :=
âšfun h => (h.const_mul câ»Â¹).congr fun x => by simp [â mul_assoc, inv_mul_cancel hc0], fun h =>
h.const_mul câ©
variable {l k f}
end Field
section LinearOrderedField
variable [TopologicalSpace β] [LinearOrderedField β] [OrderTopology β]
variable (f)
theorem superpolynomialDecay_iff_abs_isBoundedUnder (hk : Tendsto k l atTop) :
SuperpolynomialDecay l k f â
â z : â, IsBoundedUnder (· †·) l fun a : α => |k a ^ z * f a| := by
refine
âšfun h z => Tendsto.isBoundedUnder_le (Tendsto.abs (h z)), fun h =>
(superpolynomialDecay_iff_abs_tendsto_zero l k f).2 fun z => ?_â©
obtain âšm, hmâ© := h (z + 1)
have h1 : Tendsto (fun _ : α => (0 : β)) l (ð 0) := tendsto_const_nhds
have h2 : Tendsto (fun a : α => |(k a)â»Â¹| * m) l (ð 0) :=
zero_mul m âž
Tendsto.mul_const m ((tendsto_zero_iff_abs_tendsto_zero _).1 hk.inv_tendsto_atTop)
refine
tendsto_of_tendsto_of_tendsto_of_le_of_le' h1 h2 (eventually_of_forall fun x => abs_nonneg _)
((eventually_map.1 hm).mp ?_)
refine (hk.eventually_ne_atTop 0).mono fun x hk0 hx => ?_
refine Eq.trans_le ?_ (mul_le_mul_of_nonneg_left hx <| abs_nonneg (k x)â»Â¹)
rw [â abs_mul, â mul_assoc, pow_succ', â mul_assoc, inv_mul_cancel hk0, one_mul]
theorem superpolynomialDecay_iff_zpow_tendsto_zero (hk : Tendsto k l atTop) :
SuperpolynomialDecay l k f â â z : â€, Tendsto (fun a : α => k a ^ z * f a) l (ð 0) := by
refine âšfun h z => ?_, fun h n => by simpa only [zpow_natCast] using h (n : â€)â©
by_cases hz : 0 †z
· unfold Tendsto
lift z to â using hz
simpa using h z
· have : Tendsto (fun a => k a ^ z) l (ð 0) :=
Tendsto.comp (tendsto_zpow_atTop_zero (not_le.1 hz)) hk
have h : Tendsto f l (ð 0) := by simpa using h 0
exact zero_mul (0 : β) ➠this.mul h
variable {f}
theorem SuperpolynomialDecay.param_zpow_mul (hk : Tendsto k l atTop)
(hf : SuperpolynomialDecay l k f) (z : â€) :
SuperpolynomialDecay l k fun a => k a ^ z * f a := by
rw [superpolynomialDecay_iff_zpow_tendsto_zero _ hk] at hf â¢
refine fun z' => (hf <| z' + z).congr' ((hk.eventually_ne_atTop 0).mono fun x hx => ?_)
simp [zpow_addâ hx, mul_assoc, Pi.mul_apply]
theorem SuperpolynomialDecay.mul_param_zpow (hk : Tendsto k l atTop)
(hf : SuperpolynomialDecay l k f) (z : â€) : SuperpolynomialDecay l k fun a => f a * k a ^ z :=
(hf.param_zpow_mul hk z).congr fun _ => mul_comm _ _
theorem SuperpolynomialDecay.inv_param_mul (hk : Tendsto k l atTop)
(hf : SuperpolynomialDecay l k f) : SuperpolynomialDecay l k (kâ»Â¹ * f) := by
simpa using hf.param_zpow_mul hk (-1)
theorem SuperpolynomialDecay.param_inv_mul (hk : Tendsto k l atTop)
(hf : SuperpolynomialDecay l k f) : SuperpolynomialDecay l k (f * kâ»Â¹) :=
(hf.inv_param_mul hk).congr fun _ => mul_comm _ _
variable (f)
theorem superpolynomialDecay_param_mul_iff (hk : Tendsto k l atTop) :
SuperpolynomialDecay l k (k * f) â SuperpolynomialDecay l k f :=
âšfun h =>
(h.inv_param_mul hk).congr'
((hk.eventually_ne_atTop 0).mono fun x hx => by simp [â mul_assoc, inv_mul_cancel hx]),
fun h => h.param_mulâ©
theorem superpolynomialDecay_mul_param_iff (hk : Tendsto k l atTop) :
SuperpolynomialDecay l k (f * k) â SuperpolynomialDecay l k f := by
simpa [mul_comm k] using superpolynomialDecay_param_mul_iff f hk
theorem superpolynomialDecay_param_pow_mul_iff (hk : Tendsto k l atTop) (n : â) :
SuperpolynomialDecay l k (k ^ n * f) â SuperpolynomialDecay l k f := by
induction' n with n hn
· simp
· simpa [pow_succ, â mul_comm k, mul_assoc,
superpolynomialDecay_param_mul_iff (k ^ n * f) hk] using hn
theorem superpolynomialDecay_mul_param_pow_iff (hk : Tendsto k l atTop) (n : â) :
SuperpolynomialDecay l k (f * k ^ n) â SuperpolynomialDecay l k f := by
simpa [mul_comm f] using superpolynomialDecay_param_pow_mul_iff f hk n
variable {f}
end LinearOrderedField
section NormedLinearOrderedField
variable [NormedLinearOrderedField β]
variable (l k f)
theorem superpolynomialDecay_iff_norm_tendsto_zero :
SuperpolynomialDecay l k f â â n : â, Tendsto (fun a : α => âk a ^ n * f aâ) l (ð 0) :=
âšfun h z => tendsto_zero_iff_norm_tendsto_zero.1 (h z), fun h z =>
tendsto_zero_iff_norm_tendsto_zero.2 (h z)â©
theorem superpolynomialDecay_iff_superpolynomialDecay_norm :
SuperpolynomialDecay l k f â SuperpolynomialDecay l (fun a => âk aâ) fun a => âf aâ :=
(superpolynomialDecay_iff_norm_tendsto_zero l k f).trans (by simp [SuperpolynomialDecay])
variable {l k}
variable [OrderTopology β]
theorem superpolynomialDecay_iff_isBigO (hk : Tendsto k l atTop) :
SuperpolynomialDecay l k f â â z : â€, f =O[l] fun a : α => k a ^ z := by
refine (superpolynomialDecay_iff_zpow_tendsto_zero f hk).trans ?_
have hk0 : âá¶ x in l, k x â 0 := hk.eventually_ne_atTop 0
refine âšfun h z => ?_, fun h z => ?_â©
· refine isBigO_of_div_tendsto_nhds (hk0.mono fun x hx hxz ⊠absurd hxz (zpow_ne_zero _ hx)) 0 ?_
have : (fun a : α => k a ^ z)â»Â¹ = fun a : α => k a ^ (-z) := funext fun x => by simp
rw [div_eq_mul_inv, mul_comm f, this]
exact h (-z)
· suffices (fun a : α => k a ^ z * f a) =O[l] fun a : α => (k a)â»Â¹ from
IsBigO.trans_tendsto this hk.inv_tendsto_atTop
refine
((isBigO_refl (fun a => k a ^ z) l).mul (h (-(z + 1)))).trans
(IsBigO.of_bound 1 <| hk0.mono fun a ha0 => ?_)
simp only [one_mul, neg_add z 1, zpow_addâ ha0, â mul_assoc, zpow_neg,
mul_inv_cancel (zpow_ne_zero z ha0), zpow_one]
rfl
theorem superpolynomialDecay_iff_isLittleO (hk : Tendsto k l atTop) :
SuperpolynomialDecay l k f â â z : â€, f =o[l] fun a : α => k a ^ z := by
refine âšfun h z => ?_, fun h => (superpolynomialDecay_iff_isBigO f hk).2 fun z => (h z).isBigOâ©
have hk0 : âá¶ x in l, k x â 0 := hk.eventually_ne_atTop 0
have : (fun _ : α => (1 : β)) =o[l] k :=
isLittleO_of_tendsto' (hk0.mono fun x hkx hkx' => absurd hkx' hkx)
(by simpa using hk.inv_tendsto_atTop)
have : f =o[l] fun x : α => k x * k x ^ (z - 1) := by
simpa using this.mul_isBigO ((superpolynomialDecay_iff_isBigO f hk).1 h <| z - 1)
refine this.trans_isBigO (IsBigO.of_bound 1 (hk0.mono fun x hkx => le_of_eq ?_))
rw [one_mul, zpow_sub_oneâ hkx, mul_comm (k x), mul_assoc, inv_mul_cancel hkx, mul_one]
end NormedLinearOrderedField
end Asymptotics
|
Analysis\Asymptotics\Theta.lean | /-
Copyright (c) 2022 Yury G. Kudryashov. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yury G. Kudryashov
-/
import Mathlib.Analysis.Asymptotics.Asymptotics
import Mathlib.Analysis.Normed.Module.Basic
/-!
# Asymptotic equivalence up to a constant
In this file we define `Asymptotics.IsTheta l f g` (notation: `f =Î[l] g`) as
`f =O[l] g â§ g =O[l] f`, then prove basic properties of this equivalence relation.
-/
open Filter
open Topology
namespace Asymptotics
variable {α : Type*} {β : Type*} {E : Type*} {F : Type*} {G : Type*} {E' : Type*}
{F' : Type*} {G' : Type*} {E'' : Type*} {F'' : Type*} {G'' : Type*} {R : Type*}
{R' : Type*} {ð : Type*} {ð' : Type*}
variable [Norm E] [Norm F] [Norm G]
variable [SeminormedAddCommGroup E'] [SeminormedAddCommGroup F'] [SeminormedAddCommGroup G']
[NormedAddCommGroup E''] [NormedAddCommGroup F''] [NormedAddCommGroup G''] [SeminormedRing R]
[SeminormedRing R']
variable [NormedField ð] [NormedField ð']
variable {c c' câ câ : â} {f : α â E} {g : α â F} {k : α â G}
variable {f' : α â E'} {g' : α â F'} {k' : α â G'}
variable {f'' : α â E''} {g'' : α â F''}
variable {l l' : Filter α}
/-- We say that `f` is `Î(g)` along a filter `l` (notation: `f =Î[l] g`) if `f =O[l] g` and
`g =O[l] f`. -/
def IsTheta (l : Filter α) (f : α â E) (g : α â F) : Prop :=
IsBigO l f g â§ IsBigO l g f
@[inherit_doc]
notation:100 f " =Î[" l "] " g:100 => IsTheta l f g
theorem IsBigO.antisymm (hâ : f =O[l] g) (hâ : g =O[l] f) : f =Î[l] g :=
âšhâ, hââ©
lemma IsTheta.isBigO (h : f =Î[l] g) : f =O[l] g := h.1
lemma IsTheta.isBigO_symm (h : f =Î[l] g) : g =O[l] f := h.2
@[refl]
theorem isTheta_refl (f : α â E) (l : Filter α) : f =Î[l] f :=
âšisBigO_refl _ _, isBigO_refl _ _â©
theorem isTheta_rfl : f =Î[l] f :=
isTheta_refl _ _
@[symm]
nonrec theorem IsTheta.symm (h : f =Î[l] g) : g =Î[l] f :=
h.symm
theorem isTheta_comm : f =Î[l] g â g =Î[l] f :=
âšfun h ⊠h.symm, fun h ⊠h.symmâ©
@[trans]
theorem IsTheta.trans {f : α â E} {g : α â F'} {k : α â G} (hâ : f =Î[l] g) (hâ : g =Î[l] k) :
f =Î[l] k :=
âšhâ.1.trans hâ.1, hâ.2.trans hâ.2â©
-- Porting note (#10754): added instance
instance : Trans (α := α â E) (β := α â F') (γ := α â G) (IsTheta l) (IsTheta l) (IsTheta l) :=
âšIsTheta.transâ©
@[trans]
theorem IsBigO.trans_isTheta {f : α â E} {g : α â F'} {k : α â G} (hâ : f =O[l] g)
(hâ : g =Î[l] k) : f =O[l] k :=
hâ.trans hâ.1
-- Porting note (#10754): added instance
instance : Trans (α := α â E) (β := α â F') (γ := α â G) (IsBigO l) (IsTheta l) (IsBigO l) :=
âšIsBigO.trans_isThetaâ©
@[trans]
theorem IsTheta.trans_isBigO {f : α â E} {g : α â F'} {k : α â G} (hâ : f =Î[l] g)
(hâ : g =O[l] k) : f =O[l] k :=
hâ.1.trans hâ
-- Porting note (#10754): added instance
instance : Trans (α := α â E) (β := α â F') (γ := α â G) (IsTheta l) (IsBigO l) (IsBigO l) :=
âšIsTheta.trans_isBigOâ©
@[trans]
theorem IsLittleO.trans_isTheta {f : α â E} {g : α â F} {k : α â G'} (hâ : f =o[l] g)
(hâ : g =Î[l] k) : f =o[l] k :=
hâ.trans_isBigO hâ.1
-- Porting note (#10754): added instance
instance : Trans (α := α â E) (β := α â F') (γ := α â G') (IsLittleO l) (IsTheta l) (IsLittleO l) :=
âšIsLittleO.trans_isThetaâ©
@[trans]
theorem IsTheta.trans_isLittleO {f : α â E} {g : α â F'} {k : α â G} (hâ : f =Î[l] g)
(hâ : g =o[l] k) : f =o[l] k :=
hâ.1.trans_isLittleO hâ
-- Porting note (#10754): added instance
instance : Trans (α := α â E) (β := α â F') (γ := α â G) (IsTheta l) (IsLittleO l) (IsLittleO l) :=
âšIsTheta.trans_isLittleOâ©
@[trans]
theorem IsTheta.trans_eventuallyEq {f : α â E} {gâ gâ : α â F} (h : f =Î[l] gâ) (hg : gâ =á¶ [l] gâ) :
f =Î[l] gâ :=
âšh.1.trans_eventuallyEq hg, hg.symm.trans_isBigO h.2â©
-- Porting note (#10754): added instance
instance : Trans (α := α â E) (β := α â F) (γ := α â F) (IsTheta l) (EventuallyEq l) (IsTheta l) :=
âšIsTheta.trans_eventuallyEqâ©
@[trans]
theorem _root_.Filter.EventuallyEq.trans_isTheta {fâ fâ : α â E} {g : α â F} (hf : fâ =á¶ [l] fâ)
(h : fâ =Î[l] g) : fâ =Î[l] g :=
âšhf.trans_isBigO h.1, h.2.trans_eventuallyEq hf.symmâ©
-- Porting note (#10754): added instance
instance : Trans (α := α â E) (β := α â E) (γ := α â F) (EventuallyEq l) (IsTheta l) (IsTheta l) :=
âšEventuallyEq.trans_isThetaâ©
lemma _root_.Filter.EventuallyEq.isTheta {f g : α â E} (h : f =á¶ [l] g) : f =Î[l] g :=
h.trans_isTheta isTheta_rfl
@[simp]
theorem isTheta_norm_left : (fun x ⊠âf' xâ) =Î[l] g â f' =Î[l] g := by simp [IsTheta]
@[simp]
theorem isTheta_norm_right : (f =Î[l] fun x ⊠âg' xâ) â f =Î[l] g' := by simp [IsTheta]
alias âšIsTheta.of_norm_left, IsTheta.norm_leftâ© := isTheta_norm_left
alias âšIsTheta.of_norm_right, IsTheta.norm_rightâ© := isTheta_norm_right
theorem isTheta_of_norm_eventuallyEq (h : (fun x ⊠âf xâ) =á¶ [l] fun x ⊠âg xâ) : f =Î[l] g :=
âšIsBigO.of_bound 1 <| by simpa only [one_mul] using h.le,
IsBigO.of_bound 1 <| by simpa only [one_mul] using h.symm.leâ©
theorem isTheta_of_norm_eventuallyEq' {g : α â â} (h : (fun x ⊠âf' xâ) =á¶ [l] g) : f' =Î[l] g :=
isTheta_of_norm_eventuallyEq <| h.mono fun x hx ⊠by simp only [â hx, norm_norm]
theorem IsTheta.isLittleO_congr_left (h : f' =Î[l] g') : f' =o[l] k â g' =o[l] k :=
âšh.symm.trans_isLittleO, h.trans_isLittleOâ©
theorem IsTheta.isLittleO_congr_right (h : g' =Î[l] k') : f =o[l] g' â f =o[l] k' :=
âšfun H ⊠H.trans_isTheta h, fun H ⊠H.trans_isTheta h.symmâ©
theorem IsTheta.isBigO_congr_left (h : f' =Î[l] g') : f' =O[l] k â g' =O[l] k :=
âšh.symm.trans_isBigO, h.trans_isBigOâ©
theorem IsTheta.isBigO_congr_right (h : g' =Î[l] k') : f =O[l] g' â f =O[l] k' :=
âšfun H ⊠H.trans_isTheta h, fun H ⊠H.trans_isTheta h.symmâ©
lemma IsTheta.isTheta_congr_left (h : f' =Î[l] g') : f' =Î[l] k â g' =Î[l] k :=
h.isBigO_congr_left.and h.isBigO_congr_right
lemma IsTheta.isTheta_congr_right (h : f' =Î[l] g') : k =Î[l] f' â k =Î[l] g' :=
h.isBigO_congr_right.and h.isBigO_congr_left
theorem IsTheta.mono (h : f =Î[l] g) (hl : l' †l) : f =Î[l'] g :=
âšh.1.mono hl, h.2.mono hlâ©
theorem IsTheta.sup (h : f' =Î[l] g') (h' : f' =Î[l'] g') : f' =Î[l â l'] g' :=
âšh.1.sup h'.1, h.2.sup h'.2â©
@[simp]
theorem isTheta_sup : f' =Î[l â l'] g' â f' =Î[l] g' â§ f' =Î[l'] g' :=
âšfun h ⊠âšh.mono le_sup_left, h.mono le_sup_rightâ©, fun h ⊠h.1.sup h.2â©
theorem IsTheta.eq_zero_iff (h : f'' =Î[l] g'') : âá¶ x in l, f'' x = 0 â g'' x = 0 :=
h.1.eq_zero_imp.mp <| h.2.eq_zero_imp.mono fun _ ⊠Iff.intro
theorem IsTheta.tendsto_zero_iff (h : f'' =Î[l] g'') :
Tendsto f'' l (ð 0) â Tendsto g'' l (ð 0) := by
simp only [â isLittleO_one_iff â, h.isLittleO_congr_left]
theorem IsTheta.tendsto_norm_atTop_iff (h : f' =Î[l] g') :
Tendsto (norm â f') l atTop â Tendsto (norm â g') l atTop := by
simp only [Function.comp, â isLittleO_const_left_of_ne (one_ne_zero' â), h.isLittleO_congr_right]
theorem IsTheta.isBoundedUnder_le_iff (h : f' =Î[l] g') :
IsBoundedUnder (· †·) l (norm â f') â IsBoundedUnder (· †·) l (norm â g') := by
simp only [â isBigO_const_of_ne (one_ne_zero' â), h.isBigO_congr_left]
theorem IsTheta.smul [NormedSpace ð E'] [NormedSpace ð' F'] {fâ : α â ð} {fâ : α â ð'} {gâ : α â E'}
{gâ : α â F'} (hf : fâ =Î[l] fâ) (hg : gâ =Î[l] gâ) :
(fun x ⊠fâ x ⢠gâ x) =Î[l] fun x ⊠fâ x ⢠gâ x :=
âšhf.1.smul hg.1, hf.2.smul hg.2â©
theorem IsTheta.mul {fâ fâ : α â ð} {gâ gâ : α â ð'} (hâ : fâ =Î[l] gâ) (hâ : fâ =Î[l] gâ) :
(fun x ⊠fâ x * fâ x) =Î[l] fun x ⊠gâ x * gâ x :=
hâ.smul hâ
theorem IsTheta.inv {f : α â ð} {g : α â ð'} (h : f =Î[l] g) :
(fun x ⊠(f x)â»Â¹) =Î[l] fun x ⊠(g x)â»Â¹ :=
âšh.2.inv_rev h.1.eq_zero_imp, h.1.inv_rev h.2.eq_zero_impâ©
@[simp]
theorem isTheta_inv {f : α â ð} {g : α â ð'} :
((fun x ⊠(f x)â»Â¹) =Î[l] fun x ⊠(g x)â»Â¹) â f =Î[l] g :=
âšfun h ⊠by simpa only [inv_inv] using h.inv, IsTheta.invâ©
theorem IsTheta.div {fâ fâ : α â ð} {gâ gâ : α â ð'} (hâ : fâ =Î[l] gâ) (hâ : fâ =Î[l] gâ) :
(fun x ⊠fâ x / fâ x) =Î[l] fun x ⊠gâ x / gâ x := by
simpa only [div_eq_mul_inv] using hâ.mul hâ.inv
theorem IsTheta.pow {f : α â ð} {g : α â ð'} (h : f =Î[l] g) (n : â) :
(fun x ⊠f x ^ n) =Î[l] fun x ⊠g x ^ n :=
âšh.1.pow n, h.2.pow nâ©
theorem IsTheta.zpow {f : α â ð} {g : α â ð'} (h : f =Î[l] g) (n : â€) :
(fun x ⊠f x ^ n) =Î[l] fun x ⊠g x ^ n := by
cases n
· simpa only [Int.ofNat_eq_coe, zpow_natCast] using h.pow _
· simpa only [zpow_negSucc] using (h.pow _).inv
theorem isTheta_const_const {câ : E''} {câ : F''} (hâ : câ â 0) (hâ : câ â 0) :
(fun _ : α ⊠câ) =Î[l] fun _ ⊠câ :=
âšisBigO_const_const _ hâ _, isBigO_const_const _ hâ _â©
@[simp]
theorem isTheta_const_const_iff [NeBot l] {câ : E''} {câ : F''} :
((fun _ : α ⊠câ) =Î[l] fun _ ⊠câ) â (câ = 0 â câ = 0) := by
simpa only [IsTheta, isBigO_const_const_iff, â iff_def] using Iff.comm
@[simp]
theorem isTheta_zero_left : (fun _ ⊠(0 : E')) =Î[l] g'' â g'' =á¶ [l] 0 := by
simp only [IsTheta, isBigO_zero, isBigO_zero_right_iff, true_and_iff]
@[simp]
theorem isTheta_zero_right : (f'' =Î[l] fun _ ⊠(0 : F')) â f'' =á¶ [l] 0 :=
isTheta_comm.trans isTheta_zero_left
theorem isTheta_const_smul_left [NormedSpace ð E'] {c : ð} (hc : c â 0) :
(fun x ⊠c ⢠f' x) =Î[l] g â f' =Î[l] g :=
and_congr (isBigO_const_smul_left hc) (isBigO_const_smul_right hc)
alias âšIsTheta.of_const_smul_left, IsTheta.const_smul_leftâ© := isTheta_const_smul_left
theorem isTheta_const_smul_right [NormedSpace ð F'] {c : ð} (hc : c â 0) :
(f =Î[l] fun x ⊠c ⢠g' x) â f =Î[l] g' :=
and_congr (isBigO_const_smul_right hc) (isBigO_const_smul_left hc)
alias âšIsTheta.of_const_smul_right, IsTheta.const_smul_rightâ© := isTheta_const_smul_right
theorem isTheta_const_mul_left {c : ð} {f : α â ð} (hc : c â 0) :
(fun x ⊠c * f x) =Î[l] g â f =Î[l] g := by
simpa only [â smul_eq_mul] using isTheta_const_smul_left hc
alias âšIsTheta.of_const_mul_left, IsTheta.const_mul_leftâ© := isTheta_const_mul_left
theorem isTheta_const_mul_right {c : ð} {g : α â ð} (hc : c â 0) :
(f =Î[l] fun x ⊠c * g x) â f =Î[l] g := by
simpa only [â smul_eq_mul] using isTheta_const_smul_right hc
alias âšIsTheta.of_const_mul_right, IsTheta.const_mul_rightâ© := isTheta_const_mul_right
theorem IsLittleO.right_isTheta_add {fâ fâ : α â E'} (h : fâ =o[l] fâ) :
fâ =Î[l] (fâ + fâ) :=
âšh.right_isBigO_add, h.add_isBigO (isBigO_refl _ _)â©
theorem IsLittleO.right_isTheta_add' {fâ fâ : α â E'} (h : fâ =o[l] fâ) :
fâ =Î[l] (fâ + fâ) :=
add_comm fâ fâ âž h.right_isTheta_add
lemma IsTheta.add_isLittleO {fâ fâ : α â E'} {g : α â F}
(hÎ : fâ =Î[l] g) (ho : fâ =o[l] g) : (fâ + fâ) =Î[l] g :=
(ho.trans_isTheta hÎ.symm).right_isTheta_add'.symm.trans hÎ
lemma IsLittleO.add_isTheta {fâ fâ : α â E'} {g : α â F}
(ho : fâ =o[l] g) (hÎ : fâ =Î[l] g) : (fâ + fâ) =Î[l] g :=
add_comm fâ fâ âž hÎ.add_isLittleO ho
end Asymptotics
|
Analysis\BoxIntegral\Basic.lean | /-
Copyright (c) 2021 Yury Kudryashov. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yury Kudryashov
-/
import Mathlib.Analysis.BoxIntegral.Partition.Filter
import Mathlib.Analysis.BoxIntegral.Partition.Measure
import Mathlib.Analysis.Oscillation
import Mathlib.Topology.UniformSpace.Compact
import Mathlib.Data.Bool.Basic
/-!
# Integrals of Riemann, Henstock-Kurzweil, and McShane
In this file we define the integral of a function over a box in `ââ¿`. The same definition works for
Riemann, Henstock-Kurzweil, and McShane integrals.
As usual, we represent `ââ¿` as the type of functions `ι â â` for some finite type `ι`. A rectangular
box `(l, u]` in `ââ¿` is defined to be the set `{x : ι â â | â i, l i < x i â§ x i †u i}`, see
`BoxIntegral.Box`.
Let `vol` be a box-additive function on boxes in `ââ¿` with codomain `E âL[â] F`. Given a function
`f : ââ¿ â E`, a box `I` and a tagged partition `Ï` of this box, the *integral sum* of `f` over `Ï`
with respect to the volume `vol` is the sum of `vol J (f (Ï.tag J))` over all boxes of `Ï`. Here
`Ï.tag J` is the point (tag) in `ââ¿` associated with the box `J`.
The integral is defined as the limit of integral sums along a filter. Different filters correspond
to different integration theories. In order to avoid code duplication, all our definitions and
theorems take an argument `l : BoxIntegral.IntegrationParams`. This is a type that holds three
boolean values, and encodes eight filters including those corresponding to Riemann,
Henstock-Kurzweil, and McShane integrals.
Following the design of infinite sums (see `hasSum` and `tsum`), we define a predicate
`BoxIntegral.HasIntegral` and a function `BoxIntegral.integral` that returns a vector satisfying
the predicate or zero if the function is not integrable.
Then we prove some basic properties of box integrals (linearity, a formula for the integral of a
constant). We also prove a version of the Henstock-Sacks inequality (see
`BoxIntegral.Integrable.dist_integralSum_le_of_memBaseSet` and
`BoxIntegral.Integrable.dist_integralSum_sum_integral_le_of_memBaseSet_of_iUnion_eq`), prove
integrability of continuous functions, and provide a criterion for integrability w.r.t. a
non-Riemann filter (e.g., Henstock-Kurzweil and McShane).
## Notation
- `ââ¿`: local notation for `ι â â`
## Tags
integral
-/
open scoped Topology NNReal Filter Uniformity BoxIntegral
open Set Finset Function Filter Metric BoxIntegral.IntegrationParams
noncomputable section
namespace BoxIntegral
universe u v w
variable {ι : Type u} {E : Type v} {F : Type w} [NormedAddCommGroup E] [NormedSpace â E]
[NormedAddCommGroup F] [NormedSpace â F] {I J : Box ι} {Ï : TaggedPrepartition I}
open TaggedPrepartition
local notation "ââ¿" => ι â â
/-!
### Integral sum and its basic properties
-/
/-- The integral sum of `f : ââ¿ â E` over a tagged prepartition `Ï` w.r.t. box-additive volume `vol`
with codomain `E âL[â] F` is the sum of `vol J (f (Ï.tag J))` over all boxes of `Ï`. -/
def integralSum (f : ââ¿ â E) (vol : ι âáµáµ E âL[â] F) (Ï : TaggedPrepartition I) : F :=
â J â Ï.boxes, vol J (f (Ï.tag J))
theorem integralSum_biUnionTagged (f : ââ¿ â E) (vol : ι âáµáµ E âL[â] F) (Ï : Prepartition I)
(Ïi : â J, TaggedPrepartition J) :
integralSum f vol (Ï.biUnionTagged Ïi) = â J â Ï.boxes, integralSum f vol (Ïi J) := by
refine (Ï.sum_biUnion_boxes _ _).trans <| sum_congr rfl fun J hJ => sum_congr rfl fun J' hJ' => ?_
rw [Ï.tag_biUnionTagged hJ hJ']
theorem integralSum_biUnion_partition (f : ââ¿ â E) (vol : ι âáµáµ E âL[â] F)
(Ï : TaggedPrepartition I) (Ïi : â J, Prepartition J) (hÏi : â J â Ï, (Ïi J).IsPartition) :
integralSum f vol (Ï.biUnionPrepartition Ïi) = integralSum f vol Ï := by
refine (Ï.sum_biUnion_boxes _ _).trans (sum_congr rfl fun J hJ => ?_)
calc
(â J' â (Ïi J).boxes, vol J' (f (Ï.tag <| Ï.toPrepartition.biUnionIndex Ïi J'))) =
â J' â (Ïi J).boxes, vol J' (f (Ï.tag J)) :=
sum_congr rfl fun J' hJ' => by rw [Prepartition.biUnionIndex_of_mem _ hJ hJ']
_ = vol J (f (Ï.tag J)) :=
(vol.map âšâšfun g : E âL[â] F => g (f (Ï.tag J)), rflâ©, fun _ _ => rflâ©).sum_partition_boxes
le_top (hÏi J hJ)
theorem integralSum_inf_partition (f : ââ¿ â E) (vol : ι âáµáµ E âL[â] F) (Ï : TaggedPrepartition I)
{Ï' : Prepartition I} (h : Ï'.IsPartition) :
integralSum f vol (Ï.infPrepartition Ï') = integralSum f vol Ï :=
integralSum_biUnion_partition f vol Ï _ fun _J hJ => h.restrict (Prepartition.le_of_mem _ hJ)
open Classical in
theorem integralSum_fiberwise {α} (g : Box ι â α) (f : ââ¿ â E) (vol : ι âáµáµ E âL[â] F)
(Ï : TaggedPrepartition I) :
(â y â Ï.boxes.image g, integralSum f vol (Ï.filter (g · = y))) = integralSum f vol Ï :=
Ï.sum_fiberwise g fun J => vol J (f <| Ï.tag J)
theorem integralSum_sub_partitions (f : ââ¿ â E) (vol : ι âáµáµ E âL[â] F)
{Ïâ Ïâ : TaggedPrepartition I} (hâ : Ïâ.IsPartition) (hâ : Ïâ.IsPartition) :
integralSum f vol Ïâ - integralSum f vol Ïâ =
â J â (Ïâ.toPrepartition â Ïâ.toPrepartition).boxes,
(vol J (f <| (Ïâ.infPrepartition Ïâ.toPrepartition).tag J) -
vol J (f <| (Ïâ.infPrepartition Ïâ.toPrepartition).tag J)) := by
rw [â integralSum_inf_partition f vol Ïâ hâ, â integralSum_inf_partition f vol Ïâ hâ,
integralSum, integralSum, Finset.sum_sub_distrib]
simp only [infPrepartition_toPrepartition, inf_comm]
@[simp]
theorem integralSum_disjUnion (f : ââ¿ â E) (vol : ι âáµáµ E âL[â] F) {Ïâ Ïâ : TaggedPrepartition I}
(h : Disjoint Ïâ.iUnion Ïâ.iUnion) :
integralSum f vol (Ïâ.disjUnion Ïâ h) = integralSum f vol Ïâ + integralSum f vol Ïâ := by
refine (Prepartition.sum_disj_union_boxes h _).trans
(congr_argâ (· + ·) (sum_congr rfl fun J hJ => ?_) (sum_congr rfl fun J hJ => ?_))
· rw [disjUnion_tag_of_mem_left _ hJ]
· rw [disjUnion_tag_of_mem_right _ hJ]
@[simp]
theorem integralSum_add (f g : ââ¿ â E) (vol : ι âáµáµ E âL[â] F) (Ï : TaggedPrepartition I) :
integralSum (f + g) vol Ï = integralSum f vol Ï + integralSum g vol Ï := by
simp only [integralSum, Pi.add_apply, (vol _).map_add, Finset.sum_add_distrib]
@[simp]
theorem integralSum_neg (f : ââ¿ â E) (vol : ι âáµáµ E âL[â] F) (Ï : TaggedPrepartition I) :
integralSum (-f) vol Ï = -integralSum f vol Ï := by
simp only [integralSum, Pi.neg_apply, (vol _).map_neg, Finset.sum_neg_distrib]
@[simp]
theorem integralSum_smul (c : â) (f : ââ¿ â E) (vol : ι âáµáµ E âL[â] F) (Ï : TaggedPrepartition I) :
integralSum (c ⢠f) vol Ï = c ⢠integralSum f vol Ï := by
simp only [integralSum, Finset.smul_sum, Pi.smul_apply, ContinuousLinearMap.map_smul]
variable [Fintype ι]
/-!
### Basic integrability theory
-/
/-- The predicate `HasIntegral I l f vol y` says that `y` is the integral of `f` over `I` along `l`
w.r.t. volume `vol`. This means that integral sums of `f` tend to `ð y` along
`BoxIntegral.IntegrationParams.toFilteriUnion I â€`. -/
def HasIntegral (I : Box ι) (l : IntegrationParams) (f : ââ¿ â E) (vol : ι âáµáµ E âL[â] F) (y : F) :
Prop :=
Tendsto (integralSum f vol) (l.toFilteriUnion I â€) (ð y)
/-- A function is integrable if there exists a vector that satisfies the `HasIntegral`
predicate. -/
def Integrable (I : Box ι) (l : IntegrationParams) (f : ââ¿ â E) (vol : ι âáµáµ E âL[â] F) :=
â y, HasIntegral I l f vol y
open Classical in
/-- The integral of a function `f` over a box `I` along a filter `l` w.r.t. a volume `vol`.
Returns zero on non-integrable functions. -/
def integral (I : Box ι) (l : IntegrationParams) (f : ââ¿ â E) (vol : ι âáµáµ E âL[â] F) :=
if h : Integrable I l f vol then h.choose else 0
-- Porting note: using the above notation ââ¿ here causes the theorem below to be silently ignored
-- see https://leanprover.zulipchat.com/#narrow/stream/287929-mathlib4/topic/Lean.204.20doesn't.20add.20lemma.20to.20the.20environment/near/363764522
-- and https://github.com/leanprover/lean4/issues/2257
variable {l : IntegrationParams} {f g : (ι â â) â E} {vol : ι âáµáµ E âL[â] F} {y y' : F}
/-- Reinterpret `BoxIntegral.HasIntegral` as `Filter.Tendsto`, e.g., dot-notation theorems
that are shadowed in the `BoxIntegral.HasIntegral` namespace. -/
theorem HasIntegral.tendsto (h : HasIntegral I l f vol y) :
Tendsto (integralSum f vol) (l.toFilteriUnion I â€) (ð y) :=
h
/-- The `ε`-`Ύ` definition of `BoxIntegral.HasIntegral`. -/
theorem hasIntegral_iff : HasIntegral I l f vol y â
â ε > (0 : â), â r : ââ¥0 â ââ¿ â Ioi (0 : â), (â c, l.RCond (r c)) â§
â c Ï, l.MemBaseSet I c (r c) Ï â IsPartition Ï â dist (integralSum f vol Ï) y †ε :=
((l.hasBasis_toFilteriUnion_top I).tendsto_iff nhds_basis_closedBall).trans <| by
simp [@forall_swap ââ¥0 (TaggedPrepartition I)]
/-- Quite often it is more natural to prove an estimate of the form `a * ε`, not `ε` in the RHS of
`BoxIntegral.hasIntegral_iff`, so we provide this auxiliary lemma. -/
theorem HasIntegral.of_mul (a : â)
(h : â ε : â, 0 < ε â â r : ââ¥0 â ââ¿ â Ioi (0 : â), (â c, l.RCond (r c)) â§ â c Ï,
l.MemBaseSet I c (r c) Ï â IsPartition Ï â dist (integralSum f vol Ï) y †a * ε) :
HasIntegral I l f vol y := by
refine hasIntegral_iff.2 fun ε hε => ?_
rcases exists_pos_mul_lt hε a with âšÎµ', hε', haâ©
rcases h ε' hε' with âšr, hr, Hâ©
exact âšr, hr, fun c Ï hÏ hÏp => (H c Ï hÏ hÏp).trans ha.leâ©
theorem integrable_iff_cauchy [CompleteSpace F] :
Integrable I l f vol â Cauchy ((l.toFilteriUnion I â€).map (integralSum f vol)) :=
cauchy_map_iff_exists_tendsto.symm
/-- In a complete space, a function is integrable if and only if its integral sums form a Cauchy
net. Here we restate this fact in terms of `â ε > 0, â r, ...`. -/
theorem integrable_iff_cauchy_basis [CompleteSpace F] : Integrable I l f vol â
â ε > (0 : â), â r : ââ¥0 â ââ¿ â Ioi (0 : â), (â c, l.RCond (r c)) â§
â câ câ Ïâ Ïâ, l.MemBaseSet I câ (r câ) Ïâ â Ïâ.IsPartition â l.MemBaseSet I câ (r câ) Ïâ â
Ïâ.IsPartition â dist (integralSum f vol Ïâ) (integralSum f vol Ïâ) †ε := by
rw [integrable_iff_cauchy, cauchy_map_iff',
(l.hasBasis_toFilteriUnion_top _).prod_self.tendsto_iff uniformity_basis_dist_le]
refine forallâ_congr fun ε _ => exists_congr fun r => ?_
simp only [exists_prop, Prod.forall, Set.mem_iUnion, exists_imp, prod_mk_mem_set_prod_eq, and_imp,
mem_inter_iff, mem_setOf_eq]
exact
and_congr Iff.rfl
âšfun H câ câ Ïâ Ïâ hâ hUâ hâ hUâ => H Ïâ Ïâ câ hâ hUâ câ hâ hUâ,
fun H Ïâ Ïâ câ hâ hUâ câ hâ hUâ => H câ câ Ïâ Ïâ hâ hUâ hâ hUââ©
theorem HasIntegral.mono {lâ lâ : IntegrationParams} (h : HasIntegral I lâ f vol y) (hl : lâ †lâ) :
HasIntegral I lâ f vol y :=
h.mono_left <| IntegrationParams.toFilteriUnion_mono _ hl _
protected theorem Integrable.hasIntegral (h : Integrable I l f vol) :
HasIntegral I l f vol (integral I l f vol) := by
rw [integral, dif_pos h]
exact Classical.choose_spec h
theorem Integrable.mono {l'} (h : Integrable I l f vol) (hle : l' †l) : Integrable I l' f vol :=
âš_, h.hasIntegral.mono hleâ©
theorem HasIntegral.unique (h : HasIntegral I l f vol y) (h' : HasIntegral I l f vol y') : y = y' :=
tendsto_nhds_unique h h'
theorem HasIntegral.integrable (h : HasIntegral I l f vol y) : Integrable I l f vol :=
âš_, hâ©
theorem HasIntegral.integral_eq (h : HasIntegral I l f vol y) : integral I l f vol = y :=
h.integrable.hasIntegral.unique h
nonrec theorem HasIntegral.add (h : HasIntegral I l f vol y) (h' : HasIntegral I l g vol y') :
HasIntegral I l (f + g) vol (y + y') := by
simpa only [HasIntegral, â integralSum_add] using h.add h'
theorem Integrable.add (hf : Integrable I l f vol) (hg : Integrable I l g vol) :
Integrable I l (f + g) vol :=
(hf.hasIntegral.add hg.hasIntegral).integrable
theorem integral_add (hf : Integrable I l f vol) (hg : Integrable I l g vol) :
integral I l (f + g) vol = integral I l f vol + integral I l g vol :=
(hf.hasIntegral.add hg.hasIntegral).integral_eq
nonrec theorem HasIntegral.neg (hf : HasIntegral I l f vol y) : HasIntegral I l (-f) vol (-y) := by
simpa only [HasIntegral, â integralSum_neg] using hf.neg
theorem Integrable.neg (hf : Integrable I l f vol) : Integrable I l (-f) vol :=
hf.hasIntegral.neg.integrable
theorem Integrable.of_neg (hf : Integrable I l (-f) vol) : Integrable I l f vol :=
neg_neg f âž hf.neg
@[simp]
theorem integrable_neg : Integrable I l (-f) vol â Integrable I l f vol :=
âšfun h => h.of_neg, fun h => h.negâ©
@[simp]
theorem integral_neg : integral I l (-f) vol = -integral I l f vol := by
classical
exact if h : Integrable I l f vol then h.hasIntegral.neg.integral_eq
else by rw [integral, integral, dif_neg h, dif_neg (mt Integrable.of_neg h), neg_zero]
theorem HasIntegral.sub (h : HasIntegral I l f vol y) (h' : HasIntegral I l g vol y') :
HasIntegral I l (f - g) vol (y - y') := by simpa only [sub_eq_add_neg] using h.add h'.neg
theorem Integrable.sub (hf : Integrable I l f vol) (hg : Integrable I l g vol) :
Integrable I l (f - g) vol :=
(hf.hasIntegral.sub hg.hasIntegral).integrable
theorem integral_sub (hf : Integrable I l f vol) (hg : Integrable I l g vol) :
integral I l (f - g) vol = integral I l f vol - integral I l g vol :=
(hf.hasIntegral.sub hg.hasIntegral).integral_eq
theorem hasIntegral_const (c : E) : HasIntegral I l (fun _ => c) vol (vol I c) :=
tendsto_const_nhds.congr' <| (l.eventually_isPartition I).mono fun _Ï hÏ => Eq.symm <|
(vol.map âšâšfun g : E âL[â] F ⊠g c, rflâ©, fun _ _ ⊠rflâ©).sum_partition_boxes le_top hÏ
@[simp]
theorem integral_const (c : E) : integral I l (fun _ => c) vol = vol I c :=
(hasIntegral_const c).integral_eq
theorem integrable_const (c : E) : Integrable I l (fun _ => c) vol :=
âš_, hasIntegral_const câ©
theorem hasIntegral_zero : HasIntegral I l (fun _ => (0 : E)) vol 0 := by
simpa only [â (vol I).map_zero] using hasIntegral_const (0 : E)
theorem integrable_zero : Integrable I l (fun _ => (0 : E)) vol :=
âš0, hasIntegral_zeroâ©
theorem integral_zero : integral I l (fun _ => (0 : E)) vol = 0 :=
hasIntegral_zero.integral_eq
theorem HasIntegral.sum {α : Type*} {s : Finset α} {f : α â ââ¿ â E} {g : α â F}
(h : â i â s, HasIntegral I l (f i) vol (g i)) :
HasIntegral I l (fun x => â i â s, f i x) vol (â i â s, g i) := by
classical
induction' s using Finset.induction_on with a s ha ihs; · simp [hasIntegral_zero]
simp only [Finset.sum_insert ha]; rw [Finset.forall_mem_insert] at h
exact h.1.add (ihs h.2)
theorem HasIntegral.smul (hf : HasIntegral I l f vol y) (c : â) :
HasIntegral I l (c ⢠f) vol (c ⢠y) := by
simpa only [HasIntegral, â integralSum_smul] using
(tendsto_const_nhds : Tendsto _ _ (ð c)).smul hf
theorem Integrable.smul (hf : Integrable I l f vol) (c : â) : Integrable I l (c ⢠f) vol :=
(hf.hasIntegral.smul c).integrable
theorem Integrable.of_smul {c : â} (hf : Integrable I l (c ⢠f) vol) (hc : c â 0) :
Integrable I l f vol := by
simpa [inv_smul_smulâ hc] using hf.smul câ»Â¹
@[simp]
theorem integral_smul (c : â) : integral I l (fun x => c ⢠f x) vol = c ⢠integral I l f vol := by
rcases eq_or_ne c 0 with (rfl | hc); · simp only [zero_smul, integral_zero]
by_cases hf : Integrable I l f vol
· exact (hf.hasIntegral.smul c).integral_eq
· have : ¬Integrable I l (fun x => c ⢠f x) vol := mt (fun h => h.of_smul hc) hf
rw [integral, integral, dif_neg hf, dif_neg this, smul_zero]
open MeasureTheory
/-- The integral of a nonnegative function w.r.t. a volume generated by a locally-finite measure is
nonnegative. -/
theorem integral_nonneg {g : ââ¿ â â} (hg : â x â Box.Icc I, 0 †g x) (ÎŒ : Measure ââ¿)
[IsLocallyFiniteMeasure Ό] : 0 †integral I l g Ό.toBoxAdditive.toSMul := by
by_cases hgi : Integrable I l g Ό.toBoxAdditive.toSMul
· refine ge_of_tendsto' hgi.hasIntegral fun Ï => sum_nonneg fun J _ => ?_
exact mul_nonneg ENNReal.toReal_nonneg (hg _ <| Ï.tag_mem_Icc _)
· rw [integral, dif_neg hgi]
/-- If `âf xâ †g x` on `[l, u]` and `g` is integrable, then the norm of the integral of `f` is less
than or equal to the integral of `g`. -/
theorem norm_integral_le_of_norm_le {g : ââ¿ â â} (hle : â x â Box.Icc I, âf xâ †g x)
(ÎŒ : Measure ââ¿) [IsLocallyFiniteMeasure ÎŒ] (hg : Integrable I l g ÎŒ.toBoxAdditive.toSMul) :
â(integral I l f ÎŒ.toBoxAdditive.toSMul : E)â †integral I l g ÎŒ.toBoxAdditive.toSMul := by
by_cases hfi : Integrable.{u, v, v} I l f Ό.toBoxAdditive.toSMul
· refine le_of_tendsto_of_tendsto' hfi.hasIntegral.norm hg.hasIntegral fun Ï => ?_
refine norm_sum_le_of_le _ fun J _ => ?_
simp only [BoxAdditiveMap.toSMul_apply, norm_smul, smul_eq_mul, Real.norm_eq_abs,
Ό.toBoxAdditive_apply, abs_of_nonneg ENNReal.toReal_nonneg]
exact mul_le_mul_of_nonneg_left (hle _ <| Ï.tag_mem_Icc _) ENNReal.toReal_nonneg
· rw [integral, dif_neg hfi, norm_zero]
exact integral_nonneg (fun x hx => (norm_nonneg _).trans (hle x hx)) Ό
theorem norm_integral_le_of_le_const {c : â}
(hc : â x â Box.Icc I, âf xâ †c) (ÎŒ : Measure ââ¿) [IsLocallyFiniteMeasure ÎŒ] :
â(integral I l f ÎŒ.toBoxAdditive.toSMul : E)â †(ÎŒ I).toReal * c := by
simpa only [integral_const] using norm_integral_le_of_norm_le hc Ό (integrable_const c)
/-!
# Henstock-Sacks inequality and integrability on subboxes
Henstock-Sacks inequality for Henstock-Kurzweil integral says the following. Let `f` be a function
integrable on a box `I`; let `r : ââ¿ â (0, â)` be a function such that for any tagged partition of
`I` subordinate to `r`, the integral sum over this partition is `ε`-close to the integral. Then for
any tagged prepartition (i.e. a finite collections of pairwise disjoint subboxes of `I` with tagged
points) `Ï`, the integral sum over `Ï` differs from the integral of `f` over the part of `I` covered
by `Ï` by at most `ε`. The actual statement in the library is a bit more complicated to make it work
for any `BoxIntegral.IntegrationParams`. We formalize several versions of this inequality in
`BoxIntegral.Integrable.dist_integralSum_le_of_memBaseSet`,
`BoxIntegral.Integrable.dist_integralSum_sum_integral_le_of_memBaseSet_of_iUnion_eq`, and
`BoxIntegral.Integrable.dist_integralSum_sum_integral_le_of_memBaseSet`.
Instead of using predicate assumptions on `r`, we define
`BoxIntegral.Integrable.convergenceR (h : integrable I l f vol) (ε : â) (c : ââ¥0) : ââ¿ â (0, â)`
to be a function `r` such that
- if `l.bRiemann`, then `r` is a constant;
- if `ε > 0`, then for any tagged partition `Ï` of `I` subordinate to `r` (more precisely,
satisfying the predicate `l.mem_base_set I c r`), the integral sum of `f` over `Ï` differs from
the integral of `f` over `I` by at most `ε`.
The proof is mostly based on
[Russel A. Gordon, *The integrals of Lebesgue, Denjoy, Perron, and Henstock*][Gordon55].
-/
namespace Integrable
/-- If `ε > 0`, then `BoxIntegral.Integrable.convergenceR` is a function `r : ââ¥0 â ââ¿ â (0, â)`
such that for every `c : ââ¥0`, for every tagged partition `Ï` subordinate to `r` (and satisfying
additional distortion estimates if `BoxIntegral.IntegrationParams.bDistortion l = true`), the
corresponding integral sum is `ε`-close to the integral.
If `BoxIntegral.IntegrationParams.bRiemann = true`, then `r c x` does not depend on `x`. If
`ε †0`, then we use `r c x = 1`. -/
def convergenceR (h : Integrable I l f vol) (ε : â) : ââ¥0 â ââ¿ â Ioi (0 : â) :=
if hε : 0 < ε then (hasIntegral_iff.1 h.hasIntegral ε hε).choose
else fun _ _ => âš1, Set.mem_Ioi.2 zero_lt_oneâ©
variable {c câ câ : ââ¥0} {ε εâ εâ : â} {Ïâ Ïâ : TaggedPrepartition I}
theorem convergenceR_cond (h : Integrable I l f vol) (ε : â) (c : ââ¥0) :
l.RCond (h.convergenceR ε c) := by
rw [convergenceR]; split_ifs with hâ
exacts [(hasIntegral_iff.1 h.hasIntegral ε hâ).choose_spec.1 _, fun _ x => rfl]
theorem dist_integralSum_integral_le_of_memBaseSet (h : Integrable I l f vol) (hâ : 0 < ε)
(hÏ : l.MemBaseSet I c (h.convergenceR ε c) Ï) (hÏp : Ï.IsPartition) :
dist (integralSum f vol Ï) (integral I l f vol) †ε := by
rw [convergenceR, dif_pos hâ] at hÏ
exact (hasIntegral_iff.1 h.hasIntegral ε hâ).choose_spec.2 c _ hÏ hÏp
/-- **Henstock-Sacks inequality**. Let `râ râ : ââ¿ â (0, â)` be a function such that for any tagged
*partition* of `I` subordinate to `râ`, `k=1,2`, the integral sum of `f` over this partition differs
from the integral of `f` by at most `εâ`. Then for any two tagged *prepartition* `Ïâ Ïâ` subordinate
to `râ` and `râ` respectively and covering the same part of `I`, the integral sums of `f` over these
prepartitions differ from each other by at most `뵉 + 뵉`.
The actual statement
- uses `BoxIntegral.Integrable.convergenceR` instead of a predicate assumption on `r`;
- uses `BoxIntegral.IntegrationParams.MemBaseSet` instead of âsubordinate to `r`â to
account for additional requirements like being a Henstock partition or having a bounded
distortion.
See also `BoxIntegral.Integrable.dist_integralSum_sum_integral_le_of_memBaseSet_of_iUnion_eq` and
`BoxIntegral.Integrable.dist_integralSum_sum_integral_le_of_memBaseSet`.
-/
theorem dist_integralSum_le_of_memBaseSet (h : Integrable I l f vol) (hposâ : 0 < εâ)
(hposâ : 0 < εâ) (hâ : l.MemBaseSet I câ (h.convergenceR εâ câ) Ïâ)
(hâ : l.MemBaseSet I câ (h.convergenceR εâ câ) Ïâ) (HU : Ïâ.iUnion = Ïâ.iUnion) :
dist (integralSum f vol Ïâ) (integralSum f vol Ïâ) †εâ + εâ := by
rcases hâ.exists_common_compl hâ HU with âšÏ, hÏU, hÏcâ, hÏcââ©
set r : ââ¿ â Ioi (0 : â) := fun x => min (h.convergenceR εâ câ x) (h.convergenceR εâ câ x)
set Ïr := Ï.toSubordinate r
have Hâ :
dist (integralSum f vol (Ïâ.unionComplToSubordinate Ï hÏU r)) (integral I l f vol) †εâ :=
h.dist_integralSum_integral_le_of_memBaseSet hposâ
(hâ.unionComplToSubordinate (fun _ _ => min_le_left _ _) hÏU hÏcâ)
(isPartition_unionComplToSubordinate _ _ _ _)
rw [HU] at hÏU
have Hâ :
dist (integralSum f vol (Ïâ.unionComplToSubordinate Ï hÏU r)) (integral I l f vol) †εâ :=
h.dist_integralSum_integral_le_of_memBaseSet hposâ
(hâ.unionComplToSubordinate (fun _ _ => min_le_right _ _) hÏU hÏcâ)
(isPartition_unionComplToSubordinate _ _ _ _)
simpa [unionComplToSubordinate] using (dist_triangle_right _ _ _).trans (add_le_add Hâ Hâ)
/-- If `f` is integrable on `I` along `l`, then for two sufficiently fine tagged prepartitions
(in the sense of the filter `BoxIntegral.IntegrationParams.toFilter l I`) such that they cover
the same part of `I`, the integral sums of `f` over `Ïâ` and `Ïâ` are very close to each other. -/
theorem tendsto_integralSum_toFilter_prod_self_inf_iUnion_eq_uniformity (h : Integrable I l f vol) :
Tendsto (fun Ï : TaggedPrepartition I Ã TaggedPrepartition I =>
(integralSum f vol Ï.1, integralSum f vol Ï.2))
((l.toFilter I ÃË¢ l.toFilter I) â ð {Ï | Ï.1.iUnion = Ï.2.iUnion}) (ð€ F) := by
refine (((l.hasBasis_toFilter I).prod_self.inf_principal _).tendsto_iff
uniformity_basis_dist_le).2 fun ε ε0 => ?_
replace ε0 := half_pos ε0
use h.convergenceR (ε / 2), h.convergenceR_cond (ε / 2); rintro âšÏâ, Ïââ© âšâšhâ, hââ©, hUâ©
rw [â add_halves ε]
exact h.dist_integralSum_le_of_memBaseSet ε0 ε0 hâ.choose_spec hâ.choose_spec hU
/-- If `f` is integrable on a box `I` along `l`, then for any fixed subset `s` of `I` that can be
represented as a finite union of boxes, the integral sums of `f` over tagged prepartitions that
cover exactly `s` form a Cauchy âsequenceâ along `l`. -/
theorem cauchy_map_integralSum_toFilteriUnion (h : Integrable I l f vol) (Ïâ : Prepartition I) :
Cauchy ((l.toFilteriUnion I Ïâ).map (integralSum f vol)) := by
refine âšinferInstance, ?_â©
rw [prod_map_map_eq, â toFilter_inf_iUnion_eq, â prod_inf_prod, prod_principal_principal]
exact h.tendsto_integralSum_toFilter_prod_self_inf_iUnion_eq_uniformity.mono_left
(inf_le_inf_left _ <| principal_mono.2 fun Ï h => h.1.trans h.2.symm)
variable [CompleteSpace F]
theorem to_subbox_aux (h : Integrable I l f vol) (hJ : J †I) :
â y : F, HasIntegral J l f vol y â§
Tendsto (integralSum f vol) (l.toFilteriUnion I (Prepartition.single I J hJ)) (ð y) := by
refine (cauchy_map_iff_exists_tendsto.1
(h.cauchy_map_integralSum_toFilteriUnion (.single I J hJ))).imp fun y hy ⊠âš?_, hyâ©
convert hy.comp (l.tendsto_embedBox_toFilteriUnion_top hJ) -- faster than `exact` here
/-- If `f` is integrable on a box `I`, then it is integrable on any subbox of `I`. -/
theorem to_subbox (h : Integrable I l f vol) (hJ : J †I) : Integrable J l f vol :=
(h.to_subbox_aux hJ).imp fun _ => And.left
/-- If `f` is integrable on a box `I`, then integral sums of `f` over tagged prepartitions
that cover exactly a subbox `J †I` tend to the integral of `f` over `J` along `l`. -/
theorem tendsto_integralSum_toFilteriUnion_single (h : Integrable I l f vol) (hJ : J †I) :
Tendsto (integralSum f vol) (l.toFilteriUnion I (Prepartition.single I J hJ))
(ð <| integral J l f vol) :=
let âš_y, hâ, hââ© := h.to_subbox_aux hJ
hâ.integral_eq.symm âž hâ
/-- **Henstock-Sacks inequality**. Let `r : ââ¿ â (0, â)` be a function such that for any tagged
*partition* of `I` subordinate to `r`, the integral sum of `f` over this partition differs from the
integral of `f` by at most `ε`. Then for any tagged *prepartition* `Ï` subordinate to `r`, the
integral sum of `f` over this prepartition differs from the integral of `f` over the part of `I`
covered by `Ï` by at most `ε`.
The actual statement
- uses `BoxIntegral.Integrable.convergenceR` instead of a predicate assumption on `r`;
- uses `BoxIntegral.IntegrationParams.MemBaseSet` instead of âsubordinate to `r`â to
account for additional requirements like being a Henstock partition or having a bounded
distortion;
- takes an extra argument `Ïâ : prepartition I` and an assumption `Ï.Union = Ïâ.Union` instead of
using `Ï.to_prepartition`.
-/
theorem dist_integralSum_sum_integral_le_of_memBaseSet_of_iUnion_eq (h : Integrable I l f vol)
(h0 : 0 < ε) (hÏ : l.MemBaseSet I c (h.convergenceR ε c) Ï) {Ïâ : Prepartition I}
(hU : Ï.iUnion = Ïâ.iUnion) :
dist (integralSum f vol Ï) (â J â Ïâ.boxes, integral J l f vol) †ε := by
-- Let us prove that the distance is less than or equal to `ε + Ύ` for all positive `Ύ`.
refine le_of_forall_pos_le_add fun ÎŽ ÎŽ0 => ?_
-- First we choose some constants.
set ÎŽ' : â := ÎŽ / (Ïâ.boxes.card + 1)
have H0 : 0 < (Ïâ.boxes.card + 1 : â) := Nat.cast_add_one_pos _
have ÎŽ'0 : 0 < ÎŽ' := div_pos ÎŽ0 H0
set C := max Ïâ.distortion Ïâ.compl.distortion
/- Next we choose a tagged partition of each `J â Ïâ` such that the integral sum of `f` over this
partition is `ÎŽ'`-close to the integral of `f` over `J`. -/
have : â J â Ïâ, â Ïi : TaggedPrepartition J,
Ïi.IsPartition â§ dist (integralSum f vol Ïi) (integral J l f vol) †Ύ' â§
l.MemBaseSet J C (h.convergenceR ÎŽ' C) Ïi := by
intro J hJ
have Hle : J †I := Ïâ.le_of_mem hJ
have HJi : Integrable J l f vol := h.to_subbox Hle
set r := fun x => min (h.convergenceR ÎŽ' C x) (HJi.convergenceR ÎŽ' C x)
have hJd : J.distortion †C := le_trans (Finset.le_sup hJ) (le_max_left _ _)
rcases l.exists_memBaseSet_isPartition J hJd r with âšÏJ, hC, hpâ©
have hCâ : l.MemBaseSet J C (HJi.convergenceR ÎŽ' C) ÏJ := by
refine hC.mono J le_rfl le_rfl fun x _ => ?_; exact min_le_right _ _
have hCâ : l.MemBaseSet J C (h.convergenceR ÎŽ' C) ÏJ := by
refine hC.mono J le_rfl le_rfl fun x _ => ?_; exact min_le_left _ _
exact âšÏJ, hp, HJi.dist_integralSum_integral_le_of_memBaseSet ÎŽ'0 hCâ hp, hCââ©
/- Now we combine these tagged partitions into a tagged prepartition of `I` that covers the
same part of `I` as `Ïâ` and apply `BoxIntegral.dist_integralSum_le_of_memBaseSet` to
`Ï` and this prepartition. -/
choose! Ïi hÏip hÏiÎŽ' hÏiC using this
have : l.MemBaseSet I C (h.convergenceR ÎŽ' C) (Ïâ.biUnionTagged Ïi) :=
biUnionTagged_memBaseSet hÏiC hÏip fun _ => le_max_right _ _
have hU' : Ï.iUnion = (Ïâ.biUnionTagged Ïi).iUnion :=
hU.trans (Prepartition.iUnion_biUnion_partition _ hÏip).symm
have := h.dist_integralSum_le_of_memBaseSet h0 ÎŽ'0 hÏ this hU'
rw [integralSum_biUnionTagged] at this
calc
dist (integralSum f vol Ï) (â J â Ïâ.boxes, integral J l f vol) â€
dist (integralSum f vol Ï) (â J â Ïâ.boxes, integralSum f vol (Ïi J)) +
dist (â J â Ïâ.boxes, integralSum f vol (Ïi J)) (â J â Ïâ.boxes, integral J l f vol) :=
dist_triangle _ _ _
_ †ε + ÎŽ' + â _J â Ïâ.boxes, ÎŽ' := add_le_add this (dist_sum_sum_le_of_le _ hÏiÎŽ')
_ = ε + Ύ := by field_simp [Ύ']; ring
/-- **Henstock-Sacks inequality**. Let `r : ââ¿ â (0, â)` be a function such that for any tagged
*partition* of `I` subordinate to `r`, the integral sum of `f` over this partition differs from the
integral of `f` by at most `ε`. Then for any tagged *prepartition* `Ï` subordinate to `r`, the
integral sum of `f` over this prepartition differs from the integral of `f` over the part of `I`
covered by `Ï` by at most `ε`.
The actual statement
- uses `BoxIntegral.Integrable.convergenceR` instead of a predicate assumption on `r`;
- uses `BoxIntegral.IntegrationParams.MemBaseSet` instead of âsubordinate to `r`â to
account for additional requirements like being a Henstock partition or having a bounded
distortion;
-/
theorem dist_integralSum_sum_integral_le_of_memBaseSet (h : Integrable I l f vol) (h0 : 0 < ε)
(hÏ : l.MemBaseSet I c (h.convergenceR ε c) Ï) :
dist (integralSum f vol Ï) (â J â Ï.boxes, integral J l f vol) †ε :=
h.dist_integralSum_sum_integral_le_of_memBaseSet_of_iUnion_eq h0 hÏ rfl
/-- Integral sum of `f` over a tagged prepartition `Ï` such that `Ï.Union = Ïâ.Union` tends to the
sum of integrals of `f` over the boxes of `Ïâ`. -/
theorem tendsto_integralSum_sum_integral (h : Integrable I l f vol) (Ïâ : Prepartition I) :
Tendsto (integralSum f vol) (l.toFilteriUnion I Ïâ)
(ð <| â J â Ïâ.boxes, integral J l f vol) := by
refine ((l.hasBasis_toFilteriUnion I Ïâ).tendsto_iff nhds_basis_closedBall).2 fun ε ε0 => ?_
refine âšh.convergenceR ε, h.convergenceR_cond ε, ?_â©
simp only [mem_inter_iff, Set.mem_iUnion, mem_setOf_eq]
rintro Ï âšc, hc, hUâ©
exact h.dist_integralSum_sum_integral_le_of_memBaseSet_of_iUnion_eq ε0 hc hU
/-- If `f` is integrable on `I`, then `fun J ⊠integral J l f vol` is box-additive on subboxes of
`I`: if `Ïâ`, `Ïâ` are two prepartitions of `I` covering the same part of `I`, the sum of integrals
of `f` over the boxes of `Ïâ` is equal to the sum of integrals of `f` over the boxes of `Ïâ`.
See also `BoxIntegral.Integrable.toBoxAdditive` for a bundled version. -/
theorem sum_integral_congr (h : Integrable I l f vol) {Ïâ Ïâ : Prepartition I}
(hU : Ïâ.iUnion = Ïâ.iUnion) :
â J â Ïâ.boxes, integral J l f vol = â J â Ïâ.boxes, integral J l f vol := by
refine tendsto_nhds_unique (h.tendsto_integralSum_sum_integral Ïâ) ?_
rw [l.toFilteriUnion_congr _ hU]
exact h.tendsto_integralSum_sum_integral Ïâ
/-- If `f` is integrable on `I`, then `fun J ⊠integral J l f vol` is box-additive on subboxes of
`I`: if `Ïâ`, `Ïâ` are two prepartitions of `I` covering the same part of `I`, the sum of integrals
of `f` over the boxes of `Ïâ` is equal to the sum of integrals of `f` over the boxes of `Ïâ`.
See also `BoxIntegral.Integrable.sum_integral_congr` for an unbundled version. -/
@[simps]
def toBoxAdditive (h : Integrable I l f vol) : ι âáµáµ[I] F where
toFun J := integral J l f vol
sum_partition_boxes' J hJ Ï hÏ := by
replace hÏ := hÏ.iUnion_eq; rw [â Prepartition.iUnion_top] at hÏ
rw [(h.to_subbox (WithTop.coe_le_coe.1 hJ)).sum_integral_congr hÏ, Prepartition.top_boxes,
sum_singleton]
end Integrable
open MeasureTheory
/-!
### Integrability conditions
-/
open Prepartition EMetric ENNReal BoxAdditiveMap Finset Metric TaggedPrepartition
variable (l)
/-- A function that is bounded and a.e. continuous on a box `I` is integrable on `I`. -/
theorem integrable_of_bounded_and_ae_continuousWithinAt [CompleteSpace E] {I : Box ι} {f : ââ¿ â E}
(hb : â C : â, â x â Box.Icc I, âf xâ †C) (ÎŒ : Measure ââ¿) [IsLocallyFiniteMeasure ÎŒ]
(hc : âáµ x â(ÎŒ.restrict (Box.Icc I)), ContinuousWithinAt f (Box.Icc I) x) :
Integrable I l f Ό.toBoxAdditive.toSMul := by
/- We prove that f is integrable by proving that we can ensure that the integralSums over any
two tagged prepartitions Ïâ and Ïâ can be made ε-close by making the partitions
sufficiently fine.
Start by defining some constants C, 뵉, 뵉 that will be useful later. -/
refine integrable_iff_cauchy_basis.2 fun ε ε0 ⊠?_
rcases exists_pos_mul_lt ε0 (2 * ÎŒ.toBoxAdditive I) with âšÎµâ, εâ0, hεââ©
rcases hb with âšC, hCâ©
have C0 : 0 †C := by
obtain âšx, hxâ© := BoxIntegral.Box.nonempty_coe I
exact le_trans (norm_nonneg (f x)) <| hC x (I.coe_subset_Icc hx)
rcases exists_pos_mul_lt ε0 (4 * C) with âšÎµâ, εâ0, hεââ©
have εâ0' : ENNReal.ofReal εâ â 0 := ne_of_gt <| ofReal_pos.2 εâ0
-- The set of discontinuities of f is contained in an open set U with ÎŒ U < εâ.
let D := { x â Box.Icc I | ¬ ContinuousWithinAt f (Box.Icc I) x }
let Ό' := Ό.restrict (Box.Icc I)
have Ό'D : Ό' D = 0 := by
rcases eventually_iff_exists_mem.1 hc with âšV, ae, hVâ©
exact eq_of_le_of_not_lt (mem_ae_iff.1 ae ➠(Ό'.mono <| fun x h xV ⊠h.2 (hV x xV))) not_lt_zero
obtain âšU, UD, Uopen, hUâ© := Set.exists_isOpen_lt_add D (show ÎŒ' D â †by simp [ÎŒ'D]) εâ0'
rw [Ό'D, zero_add] at hU
/- Box.Icc I \ U is compact and avoids discontinuities of f, so there exists r > 0 such that for
every x â Box.Icc I \ U, the oscillation (within Box.Icc I) of f on the ball of radius r
centered at x is †εâ -/
have comp : IsCompact (Box.Icc I \ U) :=
I.isCompact_Icc.of_isClosed_subset (I.isCompact_Icc.isClosed.sdiff Uopen) Set.diff_subset
have : â x â (Box.Icc I \ U), oscillationWithin f (Box.Icc I) x < (ENNReal.ofReal εâ) := by
intro x hx
suffices oscillationWithin f (Box.Icc I) x = 0 by rw [this]; exact ofReal_pos.2 뵉0
simpa [OscillationWithin.eq_zero_iff_continuousWithinAt, D, hx.1] using hx.2 â (fun a ⊠UD a)
rcases comp.uniform_oscillationWithin this with âšr, r0, hrâ©
/- We prove the claim for partitions Ïâ and Ïâ subordinate to r/2, by writing the difference as
an integralSum over Ïâ â Ïâ and considering separately the boxes of Ïâ â Ïâ which are/aren't
fully contained within U. -/
refine âšfun _ _ ⊠âšr / 2, half_pos r0â©, fun _ _ _ ⊠rfl, fun câ câ Ïâ Ïâ hâ hâp hâ hâp ⊠?_â©
simp only [dist_eq_norm, integralSum_sub_partitions _ _ hâp hâp, toSMul_apply, â smul_sub]
have ΌI : Ό I < †:= lt_of_le_of_lt (Ό.mono I.coe_subset_Icc) I.isCompact_Icc.measure_lt_top
let tâ (J : Box ι) : ââ¿ := (Ïâ.infPrepartition Ïâ.toPrepartition).tag J
let tâ (J : Box ι) : ââ¿ := (Ïâ.infPrepartition Ïâ.toPrepartition).tag J
let B := (Ïâ.toPrepartition â Ïâ.toPrepartition).boxes
classical
let B' := B.filter (fun J ⊠J.toSet â U)
have hB' : B' â B := B.filter_subset (fun J ⊠J.toSet â U)
have ÎŒJ_ne_top : â J â B, ÎŒ J â †:=
fun J hJ ⊠lt_top_iff_ne_top.1 <| lt_of_le_of_lt (Ό.mono (Prepartition.le_of_mem' _ J hJ)) ΌI
have un : â S â B, â J â S, J.toSet â I.toSet :=
fun S hS ⊠iUnion_subset_iff.2 (fun J ⊠iUnion_subset_iff.2 fun hJ ⊠le_of_mem' _ J (hS hJ))
rw [â sum_sdiff hB', â add_halves ε]
apply le_trans (norm_add_le _ _) (add_le_add ?_ ?_)
/- If a box J is not contained within U, then the oscillation of f on J is small, which bounds
the contribution of J to the overall sum. -/
· have : â J â B \ B', âÎŒ.toBoxAdditive J ⢠(f (tâ J) - f (tâ J))â †Ό.toBoxAdditive J * εâ := by
intro J hJ
rw [mem_sdiff, B.mem_filter, not_and] at hJ
rw [norm_smul, Ό.toBoxAdditive_apply, Real.norm_of_nonneg toReal_nonneg]
refine mul_le_mul_of_nonneg_left ?_ toReal_nonneg
obtain âšx, xJ, xnUâ© : â x â J, x â U := Set.not_subset.1 (hJ.2 hJ.1)
have hx : x â Box.Icc I \ U := âšBox.coe_subset_Icc ((le_of_mem' _ J hJ.1) xJ), xnUâ©
have ineq : edist (f (tâ J)) (f (tâ J)) †EMetric.diam (f '' (ball x r â© (Box.Icc I))) := by
apply edist_le_diam_of_mem <;>
refine Set.mem_image_of_mem f âš?_, tag_mem_Icc _ Jâ© <;>
refine closedBall_subset_ball (div_two_lt_of_pos r0) <| mem_closedBall_comm.1 ?_
· exact hâ.isSubordinate.infPrepartition Ïâ.toPrepartition J hJ.1 (Box.coe_subset_Icc xJ)
· exact hâ.isSubordinate.infPrepartition Ïâ.toPrepartition J
((Ïâ.mem_infPrepartition_comm).1 hJ.1) (Box.coe_subset_Icc xJ)
rw [â emetric_ball] at ineq
simpa only [edist_le_ofReal (le_of_lt 뵉0), dist_eq_norm, hJ.1] using ineq.trans (hr x hx)
refine (norm_sum_le _ _).trans <| (sum_le_sum this).trans ?_
rw [â sum_mul]
trans ÎŒ.toBoxAdditive I * εâ; swap
· linarith
simp_rw [mul_le_mul_right εâ0, ÎŒ.toBoxAdditive_apply]
refine le_trans ?_ <| toReal_mono (lt_top_iff_ne_top.1 ΌI) <| Ό.mono <| un (B \ B') sdiff_subset
rw [â toReal_sum (fun J hJ ⊠ΌJ_ne_top J (mem_sdiff.1 hJ).1), â Finset.tsum_subtype]
refine (toReal_mono <| ne_of_lt <| lt_of_le_of_lt (Ό.mono <| un (B \ B') sdiff_subset) ΌI) ?_
refine le_of_eq (measure_biUnion (countable_toSet _) ?_ (fun J _ ⊠J.measurableSet_coe)).symm
exact fun J hJ J' hJ' hJJ' ⊠pairwiseDisjoint _ (mem_sdiff.1 hJ).1 (mem_sdiff.1 hJ').1 hJJ'
-- The contribution of the boxes contained within U is bounded because f is bounded and ÎŒ U < εâ.
· have : â J â B', âÎŒ.toBoxAdditive J ⢠(f (tâ J) - f (tâ J))â †Ό.toBoxAdditive J * (2 * C) := by
intro J _
rw [norm_smul, Ό.toBoxAdditive_apply, Real.norm_of_nonneg toReal_nonneg, two_mul]
refine mul_le_mul_of_nonneg_left (le_trans (norm_sub_le _ _) (add_le_add ?_ ?_)) (by simp) <;>
exact hC _ (TaggedPrepartition.tag_mem_Icc _ J)
apply (norm_sum_le_of_le B' this).trans
simp_rw [â sum_mul, ÎŒ.toBoxAdditive_apply, â toReal_sum (fun J hJ ⊠ΌJ_ne_top J (hB' hJ))]
suffices (â J in B', ÎŒ J).toReal †εâ by
linarith [mul_le_mul_of_nonneg_right this <| (mul_nonneg_iff_of_pos_left two_pos).2 C0]
rw [â toReal_ofReal (le_of_lt εâ0)]
refine toReal_mono ofReal_ne_top (le_trans ?_ (le_of_lt hU))
trans ÎŒ' (â J â B', J)
· simp only [Ό', Ό.restrict_eq_self <| (un _ hB').trans I.coe_subset_Icc]
exact le_of_eq <| Eq.symm <| measure_biUnion_finset
(fun J hJ K hK hJK ⊠pairwiseDisjoint _ (hB' hJ) (hB' hK) hJK) fun J _ ⊠J.measurableSet_coe
· apply Ό'.mono
simp_rw [iUnion_subset_iff]
exact fun J hJ ⊠(mem_filter.1 hJ).2
/-- A function that is bounded on a box `I` and a.e. continuous is integrable on `I`.
This is a version of `integrable_of_bounded_and_ae_continuousWithinAt` with a stronger continuity
assumption so that the user does not need to specialize the continuity assumption to each box on
which the theorem is to be applied. -/
theorem integrable_of_bounded_and_ae_continuous [CompleteSpace E] {I : Box ι} {f : ââ¿ â E}
(hb : â C : â, â x â Box.Icc I, âf xâ †C) (ÎŒ : Measure ââ¿) [IsLocallyFiniteMeasure ÎŒ]
(hc : âáµ x âÎŒ, ContinuousAt f x) : Integrable I l f ÎŒ.toBoxAdditive.toSMul :=
integrable_of_bounded_and_ae_continuousWithinAt l hb Ό <|
Eventually.filter_mono (ae_mono Ό.restrict_le_self) (hc.mono fun _ h ⊠h.continuousWithinAt)
/-- A continuous function is box-integrable with respect to any locally finite measure.
This is true for any volume with bounded variation. -/
theorem integrable_of_continuousOn [CompleteSpace E] {I : Box ι} {f : ââ¿ â E}
(hc : ContinuousOn f (Box.Icc I)) (ÎŒ : Measure ââ¿) [IsLocallyFiniteMeasure ÎŒ] :
Integrable.{u, v, v} I l f Ό.toBoxAdditive.toSMul := by
apply integrable_of_bounded_and_ae_continuousWithinAt
· obtain âšC, hCâ© := (NormedSpace.isBounded_iff_subset_smul_closedBall â).1
(I.isCompact_Icc.image_of_continuousOn hc).isBounded
use âCâ, fun x hx ⊠by
simpa only [smul_closedUnitBall, mem_closedBall_zero_iff] using hC (Set.mem_image_of_mem f hx)
· refine eventually_of_mem ?_ (fun x hx ⊠hc.continuousWithinAt hx)
rw [mem_ae_iff, Ό.restrict_apply] <;> simp [MeasurableSet.compl_iff.2 I.measurableSet_Icc]
variable {l}
/-- This is an auxiliary lemma used to prove two statements at once. Use one of the next two
lemmas instead. -/
theorem HasIntegral.of_bRiemann_eq_false_of_forall_isLittleO (hl : l.bRiemann = false)
(B : ι âáµáµ[I] â) (hB0 : â J, 0 †B J) (g : ι âáµáµ[I] F) (s : Set ââ¿) (hs : s.Countable)
(hlH : s.Nonempty â l.bHenstock = true)
(Hâ : â (c : ââ¥0), â x â Box.Icc I â© s, â ε > (0 : â),
â ÎŽ > 0, â J †I, Box.Icc J â Metric.closedBall x ÎŽ â x â Box.Icc J â
(l.bDistortion â J.distortion †c) â dist (vol J (f x)) (g J) †ε)
(Hâ : â (c : ââ¥0), â x â Box.Icc I \ s, â ε > (0 : â),
â ÎŽ > 0, â J †I, Box.Icc J â Metric.closedBall x ÎŽ â (l.bHenstock â x â Box.Icc J) â
(l.bDistortion â J.distortion †c) â dist (vol J (f x)) (g J) †ε * B J) :
HasIntegral I l f vol (g I) := by
/- We choose `r x` differently for `x â s` and `x â s`.
For `x â s`, we choose `εs` such that `â' x : s, εs x < ε / 2 / 2 ^ #ι`, then choose `r x` so
that `dist (vol J (f x)) (g J) †εs x` for `J` in the `r x`-neighborhood of `x`. This guarantees
that the sum of these distances over boxes `J` such that `Ï.tag J â s` is less than `ε / 2`. We
need an additional multiplier `2 ^ #ι` because different boxes can have the same tag.
For `x â s`, we choose `r x` so that `dist (vol (J (f x))) (g J) †(ε / 2 / B I) * B J` for a
box `J` in the `ÎŽ`-neighborhood of `x`. -/
refine ((l.hasBasis_toFilteriUnion_top _).tendsto_iff Metric.nhds_basis_closedBall).2 ?_
intro ε ε0
simp only [â exists_prop, gt_iff_lt, Subtype.exists'] at Hâ Hâ
choose! ÎŽâ HÎŽâ using Hâ
choose! ÎŽâ HÎŽâ using Hâ
have ε0' := half_pos ε0; have H0 : 0 < (2 : â) ^ Fintype.card ι := pow_pos zero_lt_two _
rcases hs.exists_pos_forall_sum_le (div_pos ε0' H0) with âšÎµs, hεs0, hεsâ©
simp only [le_div_iff' H0, mul_sum] at hεs
rcases exists_pos_mul_lt ε0' (B I) with âšÎµ', ε'0, hεIâ©
classical
set ÎŽ : ââ¥0 â ââ¿ â Ioi (0 : â) := fun c x => if x â s then ÎŽâ c x (εs x) else (ÎŽâ c) x ε'
refine âšÎŽ, fun c => l.rCond_of_bRiemann_eq_false hl, ?_â©
simp only [Set.mem_iUnion, mem_inter_iff, mem_setOf_eq]
rintro Ï âšc, hÏÎŽ, hÏpâ©
-- Now we split the sum into two parts based on whether `Ï.tag J` belongs to `s` or not.
rw [â g.sum_partition_boxes le_rfl hÏp, Metric.mem_closedBall, integralSum,
â sum_filter_add_sum_filter_not Ï.boxes fun J => Ï.tag J â s,
â sum_filter_add_sum_filter_not Ï.boxes fun J => Ï.tag J â s, â add_halves ε]
refine dist_add_add_le_of_le ?_ ?_
· rcases s.eq_empty_or_nonempty with (rfl | hsne); · simp [ε0'.le]
/- For the boxes such that `Ï.tag J â s`, we use the fact that at most `2 ^ #ι` boxes have the
same tag. -/
specialize hlH hsne
have : â J â Ï.boxes.filter fun J => Ï.tag J â s,
dist (vol J (f <| Ï.tag J)) (g J) †εs (Ï.tag J) := fun J hJ ⊠by
rw [Finset.mem_filter] at hJ; cases' hJ with hJ hJs
refine HÎŽâ c _ âšÏ.tag_mem_Icc _, hJsâ© _ (hεs0 _) _ (Ï.le_of_mem' _ hJ) ?_
(hÏÎŽ.2 hlH J hJ) fun hD => (Finset.le_sup hJ).trans (hÏÎŽ.3 hD)
convert hÏÎŽ.1 J hJ using 3; exact (if_pos hJs).symm
refine (dist_sum_sum_le_of_le _ this).trans ?_
rw [sum_comp]
refine (sum_le_sum ?_).trans (hεs _ ?_)
· rintro b -
rw [â Nat.cast_two, â Nat.cast_pow, â nsmul_eq_mul]
refine nsmul_le_nsmul_left (hεs0 _).le ?_
refine (Finset.card_le_card ?_).trans ((hÏÎŽ.isHenstock hlH).card_filter_tag_eq_le b)
exact filter_subset_filter _ (filter_subset _ _)
· rw [Finset.coe_image, Set.image_subset_iff]
exact fun J hJ => (Finset.mem_filter.1 hJ).2
/- Now we deal with boxes such that `Ï.tag J â s`.
In this case the estimate is straightforward. -/
-- Porting note: avoided strange elaboration issues by rewriting using `calc`
calc
dist (â J â Ï.boxes.filter (¬tag Ï Â· â s), vol J (f (tag Ï J)))
(â J â Ï.boxes.filter (¬tag Ï Â· â s), g J)
†â J â Ï.boxes.filter (¬tag Ï Â· â s), ε' * B J := dist_sum_sum_le_of_le _ fun J hJ ⊠by
rw [Finset.mem_filter] at hJ; cases' hJ with hJ hJs
refine HÎŽâ c _ âšÏ.tag_mem_Icc _, hJsâ© _ ε'0 _ (Ï.le_of_mem' _ hJ) ?_ (fun hH => hÏÎŽ.2 hH J hJ)
fun hD => (Finset.le_sup hJ).trans (hÏÎŽ.3 hD)
convert hÏÎŽ.1 J hJ using 3; exact (if_neg hJs).symm
_ †â J â Ï.boxes, ε' * B J := sum_le_sum_of_subset_of_nonneg (filter_subset _ _) fun _ _ _ âŠ
mul_nonneg ε'0.le (hB0 _)
_ = B I * ε' := by rw [â mul_sum, B.sum_partition_boxes le_rfl hÏp, mul_comm]
_ †ε / 2 := hεI.le
/-- A function `f` has Henstock (or `â¥`) integral over `I` is equal to the value of a box-additive
function `g` on `I` provided that `vol J (f x)` is sufficiently close to `g J` for sufficiently
small boxes `J â x`. This lemma is useful to prove, e.g., to prove the Divergence theorem for
integral along `â¥`.
Let `l` be either `BoxIntegral.IntegrationParams.Henstock` or `â¥`. Let `g` a box-additive function
on subboxes of `I`. Suppose that there exists a nonnegative box-additive function `B` and a
countable set `s` with the following property.
For every `c : ââ¥0`, a point `x â I.Icc`, and a positive `ε` there exists `ÎŽ > 0` such that for any
box `J †I` such that
- `x â J.Icc â Metric.closedBall x ÎŽ`;
- if `l.bDistortion` (i.e., `l = â¥`), then the distortion of `J` is less than or equal to `c`,
the distance between the term `vol J (f x)` of an integral sum corresponding to `J` and `g J` is
less than or equal to `ε` if `x â s` and is less than or equal to `ε * B J` otherwise.
Then `f` is integrable on `I` along `l` with integral `g I`. -/
theorem HasIntegral.of_le_Henstock_of_forall_isLittleO (hl : l †Henstock) (B : ι âáµáµ[I] â)
(hB0 : â J, 0 †B J) (g : ι âáµáµ[I] F) (s : Set ââ¿) (hs : s.Countable)
(Hâ : â (c : ââ¥0), â x â Box.Icc I â© s, â ε > (0 : â),
â ÎŽ > 0, â J †I, Box.Icc J â Metric.closedBall x ÎŽ â x â Box.Icc J â
(l.bDistortion â J.distortion †c) â dist (vol J (f x)) (g J) †ε)
(Hâ : â (c : ââ¥0), â x â Box.Icc I \ s, â ε > (0 : â),
â ÎŽ > 0, â J †I, Box.Icc J â Metric.closedBall x ÎŽ â x â Box.Icc J â
(l.bDistortion â J.distortion †c) â dist (vol J (f x)) (g J) †ε * B J) :
HasIntegral I l f vol (g I) :=
have A : l.bHenstock := Bool.eq_true_of_true_le hl.2.1
HasIntegral.of_bRiemann_eq_false_of_forall_isLittleO (Bool.eq_false_of_le_false hl.1) B hB0 _ s hs
(fun _ => A) Hâ <| by simpa only [A, true_imp_iff] using Hâ
/-- Suppose that there exists a nonnegative box-additive function `B` with the following property.
For every `c : ââ¥0`, a point `x â I.Icc`, and a positive `ε` there exists `ÎŽ > 0` such that for any
box `J †I` such that
- `J.Icc â Metric.closedBall x ÎŽ`;
- if `l.bDistortion` (i.e., `l = â¥`), then the distortion of `J` is less than or equal to `c`,
the distance between the term `vol J (f x)` of an integral sum corresponding to `J` and `g J` is
less than or equal to `ε * B J`.
Then `f` is McShane integrable on `I` with integral `g I`. -/
theorem HasIntegral.mcShane_of_forall_isLittleO (B : ι âáµáµ[I] â) (hB0 : â J, 0 †B J)
(g : ι âáµáµ[I] F) (H : â (c : ââ¥0), â x â Box.Icc I, â ε > (0 : â), â ÎŽ > 0, â J †I,
Box.Icc J â Metric.closedBall x ÎŽ â dist (vol J (f x)) (g J) †ε * B J) :
HasIntegral I McShane f vol (g I) :=
(HasIntegral.of_bRiemann_eq_false_of_forall_isLittleO (l := McShane) rfl B hB0 g â
countable_empty
(fun âš_x, hxâ© => hx.elim) fun c x hx => hx.2.elim) <| by
simpa only [McShane, Bool.coe_sort_false, false_imp_iff, true_imp_iff, diff_empty] using H
end BoxIntegral
|
Analysis\BoxIntegral\DivergenceTheorem.lean | /-
Copyright (c) 2021 Yury Kudryashov. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yury Kudryashov
-/
import Mathlib.Analysis.BoxIntegral.Basic
import Mathlib.Analysis.BoxIntegral.Partition.Additive
import Mathlib.Analysis.Calculus.FDeriv.Prod
/-!
# Divergence integral for Henstock-Kurzweil integral
In this file we prove the Divergence Theorem for a Henstock-Kurzweil style integral. The theorem
says the following. Let `f : ââ¿ â Eâ¿` be a function differentiable on a closed rectangular box
`I` with derivative `f' x : ââ¿ âL[â] Eâ¿` at `x â I`. Then the divergence `fun x ⊠â k, f' x eâ k`,
where `eâ = Pi.single k 1` is the `k`-th basis vector, is integrable on `I`, and its integral is
equal to the sum of integrals of `f` over the faces of `I` taken with appropriate signs.
To make the proof work, we had to ban tagged partitions with âlong and thinâ boxes. More precisely,
we use the following generalization of one-dimensional Henstock-Kurzweil integral to functions
defined on a box in `ââ¿` (it corresponds to the value `BoxIntegral.IntegrationParams.GP = â¥` of
`BoxIntegral.IntegrationParams` in the definition of `BoxIntegral.HasIntegral`).
We say that `f : ââ¿ â E` has integral `y : E` over a box `I â ââ¿` if for an arbitrarily small
positive `ε` and an arbitrarily large `c`, there exists a function `r : ââ¿ â (0, â)` such that for
any tagged partition `Ï` of `I` such that
* `Ï` is a Henstock partition, i.e., each tag belongs to its box;
* `Ï` is subordinate to `r`;
* for every box of `Ï`, the maximum of the ratios of its sides is less than or equal to `c`,
the integral sum of `f` over `Ï` is `ε`-close to `y`. In case of dimension one, the last condition
trivially holds for any `c ⥠1`, so this definition is equivalent to the standard definition of
Henstock-Kurzweil integral.
## Tags
Henstock-Kurzweil integral, integral, Stokes theorem, divergence theorem
-/
open scoped NNReal ENNReal Topology BoxIntegral
open ContinuousLinearMap (lsmul)
open Filter Set Finset Metric
open BoxIntegral.IntegrationParams (GP gp_le)
noncomputable section
universe u
variable {E : Type u} [NormedAddCommGroup E] [NormedSpace â E] {n : â}
namespace BoxIntegral
variable [CompleteSpace E] (I : Box (Fin (n + 1))) {i : Fin (n + 1)}
open MeasureTheory
/-- Auxiliary lemma for the divergence theorem. -/
theorem norm_volume_sub_integral_face_upper_sub_lower_smul_le {f : (Fin (n + 1) â â) â E}
{f' : (Fin (n + 1) â â) âL[â] E} (hfc : ContinuousOn f (Box.Icc I)) {x : Fin (n + 1) â â}
(hxI : x â (Box.Icc I)) {a : E} {ε : â} (h0 : 0 < ε)
(hε : â y â (Box.Icc I), âf y - a - f' (y - x)â †ε * ây - xâ) {c : ââ¥0}
(hc : I.distortion †c) :
â(â j, (I.upper j - I.lower j)) ⢠f' (Pi.single i 1) -
(integral (I.face i) ⥠(f â i.insertNth (α := fun _ ⊠â) (I.upper i)) BoxAdditiveMap.volume -
integral (I.face i) ⥠(f â i.insertNth (α := fun _ ⊠â) (I.lower i))
BoxAdditiveMap.volume)â â€
2 * ε * c * â j, (I.upper j - I.lower j) := by
-- Porting note: Lean fails to find `α` in the next line
set e : â â (Fin n â â) â (Fin (n + 1) â â) := i.insertNth (α := fun _ ⊠â)
/- **Plan of the proof**. The difference of the integrals of the affine function
`fun y ⊠a + f' (y - x)` over the faces `x i = I.upper i` and `x i = I.lower i` is equal to the
volume of `I` multiplied by `f' (Pi.single i 1)`, so it suffices to show that the integral of
`f y - a - f' (y - x)` over each of these faces is less than or equal to `ε * c * vol I`. We
integrate a function of the norm `†ε * diam I.Icc` over a box of volume
`â j â i, (I.upper j - I.lower j)`. Since `diam I.Icc †c * (I.upper i - I.lower i)`, we get the
required estimate. -/
have Hl : I.lower i â Icc (I.lower i) (I.upper i) := Set.left_mem_Icc.2 (I.lower_le_upper i)
have Hu : I.upper i â Icc (I.lower i) (I.upper i) := Set.right_mem_Icc.2 (I.lower_le_upper i)
have Hi : â x â Icc (I.lower i) (I.upper i),
Integrable.{0, u, u} (I.face i) ⥠(f â e x) BoxAdditiveMap.volume := fun x hx =>
integrable_of_continuousOn _ (Box.continuousOn_face_Icc hfc hx) volume
/- We start with an estimate: the difference of the values of `f` at the corresponding points
of the faces `x i = I.lower i` and `x i = I.upper i` is `(2 * ε * diam I.Icc)`-close to the
value of `f'` on `Pi.single i (I.upper i - I.lower i) = lᵢ ⢠eᵢ`, where
`láµ¢ = I.upper i - I.lower i` is the length of `i`-th edge of `I` and `eáµ¢ = Pi.single i 1` is the
`i`-th unit vector. -/
have : â y â Box.Icc (I.face i),
âf' (Pi.single i (I.upper i - I.lower i)) -
(f (e (I.upper i) y) - f (e (I.lower i) y))â â€
2 * ε * diam (Box.Icc I) := fun y hy ⊠by
set g := fun y => f y - a - f' (y - x) with hg
change â y â (Box.Icc I), âg yâ †ε * ây - xâ at hε
clear_value g; obtain rfl : f = fun y => a + f' (y - x) + g y := by simp [hg]
convert_to âg (e (I.lower i) y) - g (e (I.upper i) y)â †_
· congr 1
have := Fin.insertNth_sub_same (α := fun _ ⊠â) i (I.upper i) (I.lower i) y
simp only [â this, f'.map_sub]; abel
· have : â z â Icc (I.lower i) (I.upper i), e z y â (Box.Icc I) := fun z hz =>
I.mapsTo_insertNth_face_Icc hz hy
replace hε : â y â (Box.Icc I), âg yâ †ε * diam (Box.Icc I) := by
intro y hy
refine (hε y hy).trans (mul_le_mul_of_nonneg_left ?_ h0.le)
rw [â dist_eq_norm]
exact dist_le_diam_of_mem I.isCompact_Icc.isBounded hy hxI
rw [two_mul, add_mul]
exact norm_sub_le_of_le (hε _ (this _ Hl)) (hε _ (this _ Hu))
calc
â(â j, (I.upper j - I.lower j)) ⢠f' (Pi.single i 1) -
(integral (I.face i) ⥠(f â e (I.upper i)) BoxAdditiveMap.volume -
integral (I.face i) ⥠(f â e (I.lower i)) BoxAdditiveMap.volume)â =
âintegral.{0, u, u} (I.face i) â¥
(fun x : Fin n â â =>
f' (Pi.single i (I.upper i - I.lower i)) -
(f (e (I.upper i) x) - f (e (I.lower i) x)))
BoxAdditiveMap.volumeâ := by
rw [â integral_sub (Hi _ Hu) (Hi _ Hl), â Box.volume_face_mul i, mul_smul, â Box.volume_apply,
â BoxAdditiveMap.toSMul_apply, â integral_const, â BoxAdditiveMap.volume,
â integral_sub (integrable_const _) ((Hi _ Hu).sub (Hi _ Hl))]
simp only [(· â ·), Pi.sub_def, â f'.map_smul, â Pi.single_smul', smul_eq_mul, mul_one]
_ †(volume (I.face i : Set (Fin n â â))).toReal * (2 * ε * c * (I.upper i - I.lower i)) := by
-- The hard part of the estimate was done above, here we just replace `diam I.Icc`
-- with `c * (I.upper i - I.lower i)`
refine norm_integral_le_of_le_const (fun y hy => (this y hy).trans ?_) volume
rw [mul_assoc (2 * ε)]
gcongr
exact I.diam_Icc_le_of_distortion_le i hc
_ = 2 * ε * c * â j, (I.upper j - I.lower j) := by
rw [â Measure.toBoxAdditive_apply, Box.volume_apply, â I.volume_face_mul i]
ac_rfl
/-- If `f : ââ¿âºÂ¹ â E` is differentiable on a closed rectangular box `I` with derivative `f'`, then
the partial derivative `fun x ⊠f' x (Pi.single i 1)` is Henstock-Kurzweil integrable with integral
equal to the difference of integrals of `f` over the faces `x i = I.upper i` and `x i = I.lower i`.
More precisely, we use a non-standard generalization of the Henstock-Kurzweil integral and
we allow `f` to be non-differentiable (but still continuous) at a countable set of points.
TODO: If `n > 0`, then the condition at `x â s` can be replaced by a much weaker estimate but this
requires either better integrability theorems, or usage of a filter depending on the countable set
`s` (we need to ensure that none of the faces of a partition contain a point from `s`). -/
theorem hasIntegral_GP_pderiv (f : (Fin (n + 1) â â) â E)
(f' : (Fin (n + 1) â â) â (Fin (n + 1) â â) âL[â] E) (s : Set (Fin (n + 1) â â))
(hs : s.Countable) (Hs : â x â s, ContinuousWithinAt f (Box.Icc I) x)
(Hd : â x â (Box.Icc I) \ s, HasFDerivWithinAt f (f' x) (Box.Icc I) x) (i : Fin (n + 1)) :
HasIntegral.{0, u, u} I GP (fun x => f' x (Pi.single i 1)) BoxAdditiveMap.volume
(integral.{0, u, u} (I.face i) GP (fun x => f (i.insertNth (I.upper i) x))
BoxAdditiveMap.volume -
integral.{0, u, u} (I.face i) GP (fun x => f (i.insertNth (I.lower i) x))
BoxAdditiveMap.volume) := by
/- Note that `f` is continuous on `I.Icc`, hence it is integrable on the faces of all boxes
`J †I`, thus the difference of integrals over `x i = J.upper i` and `x i = J.lower i` is a
box-additive function of `J †I`. -/
have Hc : ContinuousOn f (Box.Icc I) := fun x hx ⊠by
by_cases hxs : x â s
exacts [Hs x hxs, (Hd x âšhx, hxsâ©).continuousWithinAt]
set fI : â â Box (Fin n) â E := fun y J =>
integral.{0, u, u} J GP (fun x => f (i.insertNth y x)) BoxAdditiveMap.volume
set fb : Icc (I.lower i) (I.upper i) â Fin n âáµáµ[â(I.face i)] E := fun x =>
(integrable_of_continuousOn GP (Box.continuousOn_face_Icc Hc x.2) volume).toBoxAdditive
set F : Fin (n + 1) âáµáµ[I] E := BoxAdditiveMap.upperSubLower I i fI fb fun x _ J => rfl
-- Thus our statement follows from some local estimates.
change HasIntegral I GP (fun x => f' x (Pi.single i 1)) _ (F I)
refine HasIntegral.of_le_Henstock_of_forall_isLittleO gp_le ?_ ?_ _ s hs ?_ ?_
·-- We use the volume as an upper estimate.
exact (volume : Measure (Fin (n + 1) â â)).toBoxAdditive.restrict _ le_top
· exact fun J => ENNReal.toReal_nonneg
· intro c x hx ε ε0
/- Near `x â s` we choose `ÎŽ` so that both vectors are small. `volume J ⢠eáµ¢` is small because
`volume J †(2 * Ύ) ^ (n + 1)` is small, and the difference of the integrals is small
because each of the integrals is close to `volume (J.face i) ⢠f x`.
TODO: there should be a shorter and more readable way to formalize this simple proof. -/
have : âá¶ ÎŽ in ð[>] (0 : â), ÎŽ â Ioc (0 : â) (1 / 2) â§
(âáµ (yâ â closedBall x ÎŽ â© (Box.Icc I)) (yâ â closedBall x ÎŽ â© (Box.Icc I)),
âf yâ - f yââ †ε / 2) â§ (2 * ÎŽ) ^ (n + 1) * âf' x (Pi.single i 1)â †ε / 2 := by
refine .and ?_ (.and ?_ ?_)
· exact Ioc_mem_nhdsWithin_Ioi âšle_rfl, one_half_posâ©
· rcases ((nhdsWithin_hasBasis nhds_basis_closedBall _).tendsto_iff nhds_basis_closedBall).1
(Hs x hx.2) _ (half_pos <| half_pos ε0) with âšÎŽâ, ÎŽâ0, hÎŽââ©
filter_upwards [Ioc_mem_nhdsWithin_Ioi âšle_rfl, ÎŽâ0â©] with ÎŽ hÎŽ yâ hyâ yâ hyâ
have : closedBall x ÎŽ â© (Box.Icc I) â closedBall x ÎŽâ â© (Box.Icc I) := by gcongr; exact hÎŽ.2
rw [â dist_eq_norm]
calc
dist (f yâ) (f yâ) †dist (f yâ) (f x) + dist (f yâ) (f x) := dist_triangle_right _ _ _
_ †ε / 2 / 2 + ε / 2 / 2 := add_le_add (hÎŽâ _ <| this hyâ) (hÎŽâ _ <| this hyâ)
_ = ε / 2 := add_halves _
· have : ContinuousWithinAt (fun ÎŽ : â => (2 * ÎŽ) ^ (n + 1) * âf' x (Pi.single i 1)â)
(Ioi 0) 0 := ((continuousWithinAt_id.const_mul _).pow _).mul_const _
refine this.eventually (ge_mem_nhds ?_)
simpa using half_pos ε0
rcases this.exists with âšÎŽ, âšhÎŽ0, hÎŽ12â©, hdfÎŽ, hÎŽâ©
refine âšÎŽ, hÎŽ0, fun J hJI hJÎŽ _ _ => add_halves ε âž ?_â©
have Hl : J.lower i â Icc (J.lower i) (J.upper i) := Set.left_mem_Icc.2 (J.lower_le_upper i)
have Hu : J.upper i â Icc (J.lower i) (J.upper i) := Set.right_mem_Icc.2 (J.lower_le_upper i)
have Hi : â x â Icc (J.lower i) (J.upper i),
Integrable.{0, u, u} (J.face i) GP (fun y => f (i.insertNth x y))
BoxAdditiveMap.volume := fun x hx =>
integrable_of_continuousOn _ (Box.continuousOn_face_Icc (Hc.mono <| Box.le_iff_Icc.1 hJI) hx)
volume
have hJÎŽ' : Box.Icc J â closedBall x ÎŽ â© (Box.Icc I) := subset_inter hJÎŽ (Box.le_iff_Icc.1 hJI)
have Hmaps : â z â Icc (J.lower i) (J.upper i),
MapsTo (i.insertNth z) (Box.Icc (J.face i)) (closedBall x ÎŽ â© (Box.Icc I)) := fun z hz =>
(J.mapsTo_insertNth_face_Icc hz).mono Subset.rfl hJÎŽ'
simp only [dist_eq_norm]; dsimp [F]
rw [â integral_sub (Hi _ Hu) (Hi _ Hl)]
refine (norm_sub_le _ _).trans (add_le_add ?_ ?_)
· simp_rw [BoxAdditiveMap.volume_apply, norm_smul, Real.norm_eq_abs, abs_prod]
refine (mul_le_mul_of_nonneg_right ?_ <| norm_nonneg _).trans hÎŽ
have : â j, |J.upper j - J.lower j| †2 * ÎŽ := fun j âŠ
calc
dist (J.upper j) (J.lower j) †dist J.upper J.lower := dist_le_pi_dist _ _ _
_ †dist J.upper x + dist J.lower x := dist_triangle_right _ _ _
_ †Ύ + Ύ := add_le_add (hJΎ J.upper_mem_Icc) (hJΎ J.lower_mem_Icc)
_ = 2 * ÎŽ := (two_mul ÎŽ).symm
calc
â j, |J.upper j - J.lower j| †â j : Fin (n + 1), 2 * ÎŽ :=
prod_le_prod (fun _ _ => abs_nonneg _) fun j _ => this j
_ = (2 * ÎŽ) ^ (n + 1) := by simp
· refine (norm_integral_le_of_le_const (fun y hy => hdfΎ _ (Hmaps _ Hu hy) _
(Hmaps _ Hl hy)) volume).trans ?_
refine (mul_le_mul_of_nonneg_right ?_ (half_pos ε0).le).trans_eq (one_mul _)
rw [Box.coe_eq_pi, Real.volume_pi_Ioc_toReal (Box.lower_le_upper _)]
refine prod_le_one (fun _ _ => sub_nonneg.2 <| Box.lower_le_upper _ _) fun j _ => ?_
calc
J.upper (i.succAbove j) - J.lower (i.succAbove j) â€
dist (J.upper (i.succAbove j)) (J.lower (i.succAbove j)) :=
le_abs_self _
_ †dist J.upper J.lower := dist_le_pi_dist J.upper J.lower (i.succAbove j)
_ †dist J.upper x + dist J.lower x := dist_triangle_right _ _ _
_ †Ύ + Ύ := add_le_add (hJΎ J.upper_mem_Icc) (hJΎ J.lower_mem_Icc)
_ †1 / 2 + 1 / 2 := by gcongr
_ = 1 := add_halves 1
· intro c x hx ε ε0
/- At a point `x â s`, we unfold the definition of Fréchet differentiability, then use
an estimate we proved earlier in this file. -/
rcases exists_pos_mul_lt ε0 (2 * c) with âšÎµ', ε'0, hltâ©
rcases (nhdsWithin_hasBasis nhds_basis_closedBall _).mem_iff.1
((Hd x hx).isLittleO.def ε'0) with âšÎŽ, ÎŽ0, HÎŽâ©
refine âšÎŽ, ÎŽ0, fun J hle hJÎŽ hxJ hJc => ?_â©
simp only [BoxAdditiveMap.volume_apply, Box.volume_apply, dist_eq_norm]
refine (norm_volume_sub_integral_face_upper_sub_lower_smul_le _
(Hc.mono <| Box.le_iff_Icc.1 hle) hxJ ε'0 (fun y hy => HΎ ?_) (hJc rfl)).trans ?_
· exact âšhJÎŽ hy, Box.le_iff_Icc.1 hle hyâ©
· rw [mul_right_comm (2 : â), â Box.volume_apply]
exact mul_le_mul_of_nonneg_right hlt.le ENNReal.toReal_nonneg
/-- Divergence theorem for a Henstock-Kurzweil style integral.
If `f : ââ¿âºÂ¹ â Eâ¿âºÂ¹` is differentiable on a closed rectangular box `I` with derivative `f'`, then
the divergence `â i, f' x (Pi.single i 1) i` is Henstock-Kurzweil integrable with integral equal to
the sum of integrals of `f` over the faces of `I` taken with appropriate signs.
More precisely, we use a non-standard generalization of the Henstock-Kurzweil integral and
we allow `f` to be non-differentiable (but still continuous) at a countable set of points. -/
theorem hasIntegral_GP_divergence_of_forall_hasDerivWithinAt
(f : (Fin (n + 1) â â) â Fin (n + 1) â E)
(f' : (Fin (n + 1) â â) â (Fin (n + 1) â â) âL[â] (Fin (n + 1) â E))
(s : Set (Fin (n + 1) â â)) (hs : s.Countable)
(Hs : â x â s, ContinuousWithinAt f (Box.Icc I) x)
(Hd : â x â (Box.Icc I) \ s, HasFDerivWithinAt f (f' x) (Box.Icc I) x) :
HasIntegral.{0, u, u} I GP (fun x => â i, f' x (Pi.single i 1) i) BoxAdditiveMap.volume
(â i,
(integral.{0, u, u} (I.face i) GP (fun x => f (i.insertNth (I.upper i) x) i)
BoxAdditiveMap.volume -
integral.{0, u, u} (I.face i) GP (fun x => f (i.insertNth (I.lower i) x) i)
BoxAdditiveMap.volume)) := by
refine HasIntegral.sum fun i _ => ?_
simp only [hasFDerivWithinAt_pi', continuousWithinAt_pi] at Hd Hs
exact hasIntegral_GP_pderiv I _ _ s hs (fun x hx => Hs x hx i) (fun x hx => Hd x hx i) i
end BoxIntegral
|
Analysis\BoxIntegral\Integrability.lean | /-
Copyright (c) 2021 Yury Kudryashov. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yury Kudryashov
-/
import Mathlib.Analysis.BoxIntegral.Basic
import Mathlib.MeasureTheory.Integral.SetIntegral
import Mathlib.Tactic.Generalize
/-!
# McShane integrability vs Bochner integrability
In this file we prove that any Bochner integrable function is McShane integrable (hence, it is
Henstock and `GP` integrable) with the same integral. The proof is based on
[Russel A. Gordon, *The integrals of Lebesgue, Denjoy, Perron, and Henstock*][Gordon55].
We deduce that the same is true for the Riemann integral for continuous functions.
## Tags
integral, McShane integral, Bochner integral
-/
open scoped NNReal ENNReal Topology
universe u v
variable {ι : Type u} {E : Type v} [Fintype ι] [NormedAddCommGroup E] [NormedSpace â E]
open MeasureTheory Metric Set Finset Filter BoxIntegral
namespace BoxIntegral
/-- The indicator function of a measurable set is McShane integrable with respect to any
locally-finite measure. -/
theorem hasIntegralIndicatorConst (l : IntegrationParams) (hl : l.bRiemann = false)
{s : Set (ι â â)} (hs : MeasurableSet s) (I : Box ι) (y : E) (ÎŒ : Measure (ι â â))
[IsLocallyFiniteMeasure Ό] :
HasIntegral.{u, v, v} I l (s.indicator fun _ => y) Ό.toBoxAdditive.toSMul
((Ό (s ⩠I)).toReal ⢠y) := by
refine HasIntegral.of_mul âyâ fun ε ε0 => ?_
lift ε to ââ¥0 using ε0.le; rw [NNReal.coe_pos] at ε0
/- First we choose a closed set `F â s â© I.Icc` and an open set `U â s` such that
both `(s ⩠I.Icc) \ F` and `U \ s` have measure less than `ε`. -/
have A : ÎŒ (s â© Box.Icc I) â â :=
((measure_mono Set.inter_subset_right).trans_lt (I.measure_Icc_lt_top Ό)).ne
have B : ÎŒ (s â© I) â â :=
((measure_mono Set.inter_subset_right).trans_lt (I.measure_coe_lt_top Ό)).ne
obtain âšF, hFs, hFc, hÎŒFâ© : â F, F â s â© Box.Icc I â§ IsClosed F â§ ÎŒ ((s â© Box.Icc I) \ F) < ε :=
(hs.inter I.measurableSet_Icc).exists_isClosed_diff_lt A (ENNReal.coe_pos.2 ε0).ne'
obtain âšU, hsU, hUo, hUt, hÎŒUâ© :
â U, s â© Box.Icc I â U â§ IsOpen U â§ ÎŒ U < â â§ ÎŒ (U \ (s â© Box.Icc I)) < ε :=
(hs.inter I.measurableSet_Icc).exists_isOpen_diff_lt A (ENNReal.coe_pos.2 ε0).ne'
/- Then we choose `r` so that `closed_ball x (r x) â U` whenever `x â s â© I.Icc` and
`closed_ball x (r x)` is disjoint with `F` otherwise. -/
have : â x â s â© Box.Icc I, â r : Ioi (0 : â), closedBall x r â U := fun x hx => by
rcases nhds_basis_closedBall.mem_iff.1 (hUo.mem_nhds <| hsU hx) with âšr, hrâ, hrâ©
exact âšâšr, hrââ©, hrâ©
choose! rs hrsU using this
have : â x â Box.Icc I \ s, â r : Ioi (0 : â), closedBall x r â Fá¶ := fun x hx => by
obtain âšr, hrâ, hrâ© :=
nhds_basis_closedBall.mem_iff.1 (hFc.isOpen_compl.mem_nhds fun hx' => hx.2 (hFs hx').1)
exact âšâšr, hrââ©, hrâ©
choose! rs' hrs'F using this
classical
set r : (ι â â) â Ioi (0 : â) := s.piecewise rs rs'
refine âšfun _ => r, fun c => l.rCond_of_bRiemann_eq_false hl, fun c Ï hÏ hÏp => ?_â©; rw [mul_comm]
/- Then the union of boxes `J â Ï` such that `Ï.tag â s` includes `F` and is included by `U`,
hence its measure is `ε`-close to the measure of `s`. -/
dsimp [integralSum]
simp only [mem_closedBall, dist_eq_norm, â indicator_const_smul_apply,
sum_indicator_eq_sum_filter, â sum_smul, â sub_smul, norm_smul, Real.norm_eq_abs, â
Prepartition.filter_boxes, â Prepartition.measure_iUnion_toReal]
gcongr
set t := (Ï.filter (Ï.tag · â s)).iUnion
change abs ((Ό t).toReal - (Ό (s ⩠I)).toReal) †ε
have htU : t â U â© I := by
simp only [t, TaggedPrepartition.iUnion_def, iUnion_subset_iff, TaggedPrepartition.mem_filter,
and_imp]
refine fun J hJ hJs x hx => âšhrsU _ âšhJs, Ï.tag_mem_Icc Jâ© ?_, Ï.le_of_mem' J hJ hxâ©
simpa only [r, s.piecewise_eq_of_mem _ _ hJs] using hÏ.1 J hJ (Box.coe_subset_Icc hx)
refine abs_sub_le_iff.2 âš?_, ?_â©
· refine (ENNReal.le_toReal_sub B).trans (ENNReal.toReal_le_coe_of_le_coe ?_)
refine (tsub_le_tsub (measure_mono htU) le_rfl).trans (le_measure_diff.trans ?_)
refine (measure_mono fun x hx => ?_).trans hÎŒU.le
exact âšhx.1.1, fun hx' => hx.2 âšhx'.1, hx.1.2â©â©
· have hÎŒt : ÎŒ t â â := ((measure_mono (htU.trans inter_subset_left)).trans_lt hUt).ne
refine (ENNReal.le_toReal_sub hÎŒt).trans (ENNReal.toReal_le_coe_of_le_coe ?_)
refine le_measure_diff.trans ((measure_mono ?_).trans hÎŒF.le)
rintro x âšâšhxs, hxIâ©, hxtâ©
refine âšâšhxs, Box.coe_subset_Icc hxIâ©, fun hxF => hxt ?_â©
simp only [t, TaggedPrepartition.iUnion_def, TaggedPrepartition.mem_filter, Set.mem_iUnion]
rcases hÏp x hxI with âšJ, hJÏ, hxJâ©
refine âšJ, âšhJÏ, ?_â©, hxJâ©
contrapose hxF
refine hrs'F _ âšÏ.tag_mem_Icc J, hxFâ© ?_
simpa only [r, s.piecewise_eq_of_not_mem _ _ hxF] using hÏ.1 J hJÏ (Box.coe_subset_Icc hxJ)
/-- If `f` is a.e. equal to zero on a rectangular box, then it has McShane integral zero on this
box. -/
theorem HasIntegral.of_aeEq_zero {l : IntegrationParams} {I : Box ι} {f : (ι â â) â E}
{ÎŒ : Measure (ι â â)} [IsLocallyFiniteMeasure ÎŒ] (hf : f =áµ[ÎŒ.restrict I] 0)
(hl : l.bRiemann = false) : HasIntegral.{u, v, v} I l f Ό.toBoxAdditive.toSMul 0 := by
/- Each set `{x | n < âf xâ †n + 1}`, `n : â`, has measure zero. We cover it by an open set of
measure less than `ε / 2 ^ n / (n + 1)`. Then the norm of the integral sum is less than `ε`. -/
refine hasIntegral_iff.2 fun ε ε0 => ?_
lift ε to ââ¥0 using ε0.lt.le; rw [gt_iff_lt, NNReal.coe_pos] at ε0
rcases NNReal.exists_pos_sum_of_countable ε0.ne' â with âšÎŽ, ÎŽ0, c, hÎŽc, hcεâ©
haveI := Fact.mk (I.measure_coe_lt_top Ό)
change ÎŒ.restrict I {x | f x â 0} = 0 at hf
set N : (ι â â) â â := fun x => ââf xâââ
have N0 : â {x}, N x = 0 â f x = 0 := by simp [N]
have : â n, â U, N â»Â¹' {n} â U â§ IsOpen U â§ ÎŒ.restrict I U < ÎŽ n / n := fun n ⊠by
refine (N â»Â¹' {n}).exists_isOpen_lt_of_lt _ ?_
cases' n with n
· simpa [ENNReal.div_zero (ENNReal.coe_pos.2 (Ύ0 _)).ne'] using measure_lt_top (Ό.restrict I) _
· refine (measure_mono_null ?_ hf).le.trans_lt ?_
· exact fun x hxN hxf => n.succ_ne_zero ((Eq.symm hxN).trans <| N0.2 hxf)
· simp [(Ύ0 _).ne']
choose U hNU hUo hÎŒU using this
have : â x, â r : Ioi (0 : â), closedBall x r â U (N x) := fun x => by
obtain âšr, hrâ, hrâ© := nhds_basis_closedBall.mem_iff.1 ((hUo _).mem_nhds (hNU _ rfl))
exact âšâšr, hrââ©, hrâ©
choose r hrU using this
refine âšfun _ => r, fun c => l.rCond_of_bRiemann_eq_false hl, fun c Ï hÏ _ => ?_â©
rw [dist_eq_norm, sub_zero, â integralSum_fiberwise fun J => N (Ï.tag J)]
refine le_trans ?_ (NNReal.coe_lt_coe.2 hcε).le
refine (norm_sum_le_of_le _ ?_).trans
(sum_le_hasSum _ (fun n _ => (ÎŽ n).2) (NNReal.hasSum_coe.2 hÎŽc))
rintro n -
dsimp [integralSum]
have : â J â Ï.filter fun J => N (Ï.tag J) = n,
â(ÎŒ âJ).toReal ⢠f (Ï.tag J)â †(ÎŒ J).toReal * n := fun J hJ ⊠by
rw [TaggedPrepartition.mem_filter] at hJ
rw [norm_smul, Real.norm_eq_abs, abs_of_nonneg ENNReal.toReal_nonneg]
gcongr
exact hJ.2 âž Nat.le_ceil _
refine (norm_sum_le_of_le _ this).trans ?_; clear this
rw [â sum_mul, â Prepartition.measure_iUnion_toReal]
let m := ÎŒ (Ï.filter fun J => N (Ï.tag J) = n).iUnion
show m.toReal * ân †â(ÎŽ n)
have : m < ÎŽ n / n := by
simp only [Measure.restrict_apply (hUo _).measurableSet] at hÎŒU
refine (measure_mono ?_).trans_lt (hÎŒU _)
simp only [Set.subset_def, TaggedPrepartition.mem_iUnion, TaggedPrepartition.mem_filter]
rintro x âšJ, âšhJ, rflâ©, hxâ©
exact âšhrU _ (hÏ.1 _ hJ (Box.coe_subset_Icc hx)), Ï.le_of_mem' J hJ hxâ©
clear_value m
lift m to ââ¥0 using ne_top_of_lt this
rw [ENNReal.coe_toReal, â NNReal.coe_natCast, â NNReal.coe_mul, NNReal.coe_le_coe, â
ENNReal.coe_le_coe, ENNReal.coe_mul, ENNReal.coe_natCast, mul_comm]
exact (mul_le_mul_left' this.le _).trans ENNReal.mul_div_le
/-- If `f` has integral `y` on a box `I` with respect to a locally finite measure `Ό` and `g` is
a.e. equal to `f` on `I`, then `g` has the same integral on `I`. -/
theorem HasIntegral.congr_ae {l : IntegrationParams} {I : Box ι} {y : E} {f g : (ι â â) â E}
{ÎŒ : Measure (ι â â)} [IsLocallyFiniteMeasure ÎŒ]
(hf : HasIntegral.{u, v, v} I l f ÎŒ.toBoxAdditive.toSMul y) (hfg : f =áµ[ÎŒ.restrict I] g)
(hl : l.bRiemann = false) : HasIntegral.{u, v, v} I l g Ό.toBoxAdditive.toSMul y := by
have : g - f =áµ[ÎŒ.restrict I] 0 := hfg.mono fun x hx => sub_eq_zero.2 hx.symm
simpa using hf.add (HasIntegral.of_aeEq_zero this hl)
end BoxIntegral
namespace MeasureTheory
namespace SimpleFunc
/-- A simple function is McShane integrable w.r.t. any locally finite measure. -/
theorem hasBoxIntegral (f : SimpleFunc (ι â â) E) (ÎŒ : Measure (ι â â)) [IsLocallyFiniteMeasure ÎŒ]
(I : Box ι) (l : IntegrationParams) (hl : l.bRiemann = false) :
HasIntegral.{u, v, v} I l f Ό.toBoxAdditive.toSMul (f.integral (Ό.restrict I)) := by
induction' f using MeasureTheory.SimpleFunc.induction with y s hs f g _ hfi hgi
· simpa only [Measure.restrict_apply hs, const_zero, integral_piecewise_zero, integral_const,
Measure.restrict_apply, MeasurableSet.univ, Set.univ_inter] using
BoxIntegral.hasIntegralIndicatorConst l hl hs I y Ό
· borelize E; haveI := Fact.mk (I.measure_coe_lt_top Ό)
rw [integral_add]
exacts [hfi.add hgi, integrable_iff.2 fun _ _ => measure_lt_top _ _,
integrable_iff.2 fun _ _ => measure_lt_top _ _]
/-- For a simple function, its McShane (or Henstock, or `â¥`) box integral is equal to its
integral in the sense of `MeasureTheory.SimpleFunc.integral`. -/
theorem box_integral_eq_integral (f : SimpleFunc (ι â â) E) (ÎŒ : Measure (ι â â))
[IsLocallyFiniteMeasure Ό] (I : Box ι) (l : IntegrationParams) (hl : l.bRiemann = false) :
BoxIntegral.integral.{u, v, v} I l f Ό.toBoxAdditive.toSMul = f.integral (Ό.restrict I) :=
(f.hasBoxIntegral Ό I l hl).integral_eq
end SimpleFunc
open TopologicalSpace
/-- If `f : ââ¿ â E` is Bochner integrable w.r.t. a locally finite measure `ÎŒ` on a rectangular box
`I`, then it is McShane integrable on `I` with the same integral. -/
theorem IntegrableOn.hasBoxIntegral [CompleteSpace E] {f : (ι â â) â E} {ÎŒ : Measure (ι â â)}
[IsLocallyFiniteMeasure Ό] {I : Box ι} (hf : IntegrableOn f I Ό) (l : IntegrationParams)
(hl : l.bRiemann = false) :
HasIntegral.{u, v, v} I l f ÎŒ.toBoxAdditive.toSMul (â« x in I, f x âÎŒ) := by
borelize E
-- First we replace an `ae_strongly_measurable` function by a measurable one.
rcases hf.aestronglyMeasurable with âšg, hg, hfgâ©
haveI : SeparableSpace (range g ⪠{0} : Set E) := hg.separableSpace_range_union_singleton
rw [integral_congr_ae hfg]; have hgi : IntegrableOn g I Ό := (integrable_congr hfg).1 hf
refine BoxIntegral.HasIntegral.congr_ae ?_ hfg.symm hl
clear! f
/- Now consider the sequence of simple functions
`SimpleFunc.approxOn g hg.measurable (range g ⪠{0}) 0 (by simp)`
approximating `g`. Recall some properties of this sequence. -/
set f : â â SimpleFunc (ι â â) E :=
SimpleFunc.approxOn g hg.measurable (range g ⪠{0}) 0 (by simp)
have hfi : â n, IntegrableOn (f n) I ÎŒ :=
SimpleFunc.integrable_approxOn_range hg.measurable hgi
have hfi' := fun n => ((f n).hasBoxIntegral Ό I l hl).integrable
have hfg_mono : â (x) {m n}, m †n â âf n x - g xâ †âf m x - g xâ := by
intro x m n hmn
rw [â dist_eq_norm, â dist_eq_norm, dist_nndist, dist_nndist, NNReal.coe_le_coe, â
ENNReal.coe_le_coe, â edist_nndist, â edist_nndist]
exact SimpleFunc.edist_approxOn_mono hg.measurable _ x hmn
/- Now consider `ε > 0`. We need to find `r` such that for any tagged partition subordinate
to `r`, the integral sum is `(Ό I + 1 + 1) * ε`-close to the Bochner integral. -/
refine HasIntegral.of_mul ((Ό I).toReal + 1 + 1) fun ε ε0 => ?_
lift ε to ââ¥0 using ε0.le; rw [NNReal.coe_pos] at ε0; have ε0' := ENNReal.coe_pos.2 ε0
-- Choose `N` such that the integral of `âf N x - g xâ` is less than or equal to `ε`.
obtain âšNâ, hNââ© : â N : â, â« x in I, âf N x - g xâ âÎŒ †ε := by
have : Tendsto (fun n => â«â» x in I, âf n x - g xââ âÎŒ) atTop (ð 0) :=
SimpleFunc.tendsto_approxOn_range_L1_nnnorm hg.measurable hgi
refine (this.eventually (ge_mem_nhds ε0')).exists.imp fun N hN => ?_
exact integral_coe_le_of_lintegral_coe_le hN
-- For each `x`, we choose `Nx x ⥠Nâ` such that `dist (f Nx x) (g x) †ε`.
have : â x, â Nâ, Nâ †Nâ â§ dist (f Nâ x) (g x) †ε := fun x ⊠by
have : Tendsto (f · x) atTop (ð <| g x) :=
SimpleFunc.tendsto_approxOn hg.measurable _ (subset_closure (by simp))
exact ((eventually_ge_atTop Nâ).and <| this <| closedBall_mem_nhds _ ε0).exists
choose Nx hNx hNxε using this
-- We also choose a convergent series with `â' i : â, ÎŽ i < ε`.
rcases NNReal.exists_pos_sum_of_countable ε0.ne' â with âšÎŽ, ÎŽ0, c, hÎŽc, hcεâ©
/- Since each simple function `fáµ¢` is integrable, there exists `ráµ¢ : ââ¿ â (0, â)` such that
the integral sum of `f` over any tagged prepartition is `ÎŽáµ¢`-close to the sum of integrals
of `fáµ¢` over the boxes of this prepartition. For each `x`, we choose `r (Nx x)` as the radius
at `x`. -/
set r : ââ¥0 â (ι â â) â Ioi (0 : â) := fun c x => (hfi' <| Nx x).convergenceR (ÎŽ <| Nx x) c x
refine âšr, fun c => l.rCond_of_bRiemann_eq_false hl, fun c Ï hÏ hÏp => ?_â©
/- Now we prove the estimate in 3 "jumps": first we replace `g x` in the formula for the
integral sum by `f (Nx x)`; then we replace each `ÎŒ J ⢠f (Nx (Ï.tag J)) (Ï.tag J)`
by the Bochner integral of `f (Nx (Ï.tag J)) x` over `J`, then we jump to the Bochner
integral of `g`. -/
refine (dist_triangle4 _ (â J â Ï.boxes, (ÎŒ J).toReal ⢠f (Nx <| Ï.tag J) (Ï.tag J))
(â J â Ï.boxes, â« x in J, f (Nx <| Ï.tag J) x âÎŒ) _).trans ?_
rw [add_mul, add_mul, one_mul]
refine add_le_add_three ?_ ?_ ?_
· /- Since each `f (Nx <| Ï.tag J)` is `ε`-close to `g (Ï.tag J)`, replacing the latter with
the former in the formula for the integral sum changes the sum at most by `Ό I * ε`. -/
rw [â hÏp.iUnion_eq, Ï.measure_iUnion_toReal, sum_mul, integralSum]
refine dist_sum_sum_le_of_le _ fun J _ => ?_; dsimp
rw [dist_eq_norm, â smul_sub, norm_smul, Real.norm_eq_abs, abs_of_nonneg ENNReal.toReal_nonneg]
gcongr
rw [â dist_eq_norm']; exact hNxε _
· /- We group the terms of both sums by the values of `Nx (Ï.tag J)`.
For each `N`, the sum of Bochner integrals over the boxes is equal
to the sum of box integrals, and the sum of box integrals is `ÎŽáµ¢`-close
to the corresponding integral sum due to the Henstock-Sacks inequality. -/
rw [â Ï.sum_fiberwise fun J => Nx (Ï.tag J), â Ï.sum_fiberwise fun J => Nx (Ï.tag J)]
refine le_trans ?_ (NNReal.coe_lt_coe.2 hcε).le
refine
(dist_sum_sum_le_of_le _ fun n hn => ?_).trans
(sum_le_hasSum _ (fun n _ => (ÎŽ n).2) (NNReal.hasSum_coe.2 hÎŽc))
have hNxn : â J â Ï.filter fun J => Nx (Ï.tag J) = n, Nx (Ï.tag J) = n := fun J hJ =>
(Ï.mem_filter.1 hJ).2
have hrn : â J â Ï.filter fun J => Nx (Ï.tag J) = n,
r c (Ï.tag J) = (hfi' n).convergenceR (ÎŽ n) c (Ï.tag J) := fun J hJ ⊠by
obtain rfl := hNxn J hJ
rfl
have :
l.MemBaseSet I c ((hfi' n).convergenceR (ÎŽ n) c) (Ï.filter fun J => Nx (Ï.tag J) = n) :=
(hÏ.filter _).mono' _ le_rfl le_rfl fun J hJ => (hrn J hJ).le
convert (hfi' n).dist_integralSum_sum_integral_le_of_memBaseSet (ÎŽ0 _) this using 2
· refine sum_congr rfl fun J hJ => ?_
simp [hNxn J hJ]
· refine sum_congr rfl fun J hJ => ?_
rw [â SimpleFunc.integral_eq_integral, SimpleFunc.box_integral_eq_integral _ _ _ _ hl,
hNxn J hJ]
exact (hfi _).mono_set (Prepartition.le_of_mem _ hJ)
· /- For the last jump, we use the fact that the distance between `f (Nx x) x` and `g x` is less
than or equal to the distance between `f Nâ x` and `g x` and the integral of
`âf Nâ x - g xâ` is less than or equal to `ε`. -/
refine le_trans ?_ hNâ
have hfi : â (n), â J â Ï, IntegrableOn (f n) (âJ) ÎŒ := fun n J hJ =>
(hfi n).mono_set (Ï.le_of_mem' J hJ)
have hgi : â J â Ï, IntegrableOn g (âJ) ÎŒ := fun J hJ => hgi.mono_set (Ï.le_of_mem' J hJ)
have hfgi : â (n), â J â Ï, IntegrableOn (fun x => âf n x - g xâ) J ÎŒ := fun n J hJ =>
((hfi n J hJ).sub (hgi J hJ)).norm
rw [â hÏp.iUnion_eq, Prepartition.iUnion_def',
integral_finset_biUnion Ï.boxes (fun J _ => J.measurableSet_coe) Ï.pairwiseDisjoint hgi,
integral_finset_biUnion Ï.boxes (fun J _ => J.measurableSet_coe) Ï.pairwiseDisjoint (hfgi _)]
refine dist_sum_sum_le_of_le _ fun J hJ => ?_
rw [dist_eq_norm, â integral_sub (hfi _ J hJ) (hgi J hJ)]
refine norm_integral_le_of_norm_le (hfgi _ J hJ) (eventually_of_forall fun x => ?_)
exact hfg_mono x (hNx (Ï.tag J))
/-- If `f : ââ¿ â E` is continuous on a rectangular box `I`, then it is Box integrable on `I`
w.r.t. a locally finite measure `Ό` with the same integral. -/
theorem ContinuousOn.hasBoxIntegral [CompleteSpace E] {f : (ι â â) â E} (ÎŒ : Measure (ι â â))
[IsLocallyFiniteMeasure Ό] {I : Box ι} (hc : ContinuousOn f (Box.Icc I))
(l : IntegrationParams) :
HasIntegral.{u, v, v} I l f ÎŒ.toBoxAdditive.toSMul (â« x in I, f x âÎŒ) := by
obtain âšy, hyâ© := BoxIntegral.integrable_of_continuousOn l hc ÎŒ
convert hy
have : IntegrableOn f I Ό :=
IntegrableOn.mono_set (hc.integrableOn_compact I.isCompact_Icc) Box.coe_subset_Icc
exact HasIntegral.unique (IntegrableOn.hasBoxIntegral this ⥠rfl) (HasIntegral.mono hy bot_le)
/-- If `f : ââ¿ â E` is a.e. continuous and bounded on a rectangular box `I`, then it is Box
integrable on `I` w.r.t. a locally finite measure `Ό` with the same integral. -/
theorem AEContinuous.hasBoxIntegral [CompleteSpace E] {f : (ι â â) â E} (ÎŒ : Measure (ι â â))
[IsLocallyFiniteMeasure ÎŒ] {I : Box ι} (hb : â C : â, â x â Box.Icc I, âf xâ †C)
(hc : âáµ x âÎŒ, ContinuousAt f x) (l : IntegrationParams) :
HasIntegral.{u, v, v} I l f ÎŒ.toBoxAdditive.toSMul (â« x in I, f x âÎŒ) := by
obtain âšy, hyâ© := integrable_of_bounded_and_ae_continuous l hb ÎŒ hc
convert hy
refine HasIntegral.unique (IntegrableOn.hasBoxIntegral ?_ ⥠rfl) (HasIntegral.mono hy bot_le)
constructor
· let v := {x : (ι â â) | ContinuousAt f x}
have : AEStronglyMeasurable f (Ό.restrict v) :=
(ContinuousAt.continuousOn fun _ h ⊠h).aestronglyMeasurable (measurableSet_of_continuousAt f)
refine this.mono_measure (Measure.le_iff.2 fun s hs ⊠?_)
repeat rw [Ό.restrict_apply hs]
apply le_of_le_of_eq <| Ό.mono s.inter_subset_left
refine measure_eq_measure_of_null_diff s.inter_subset_left ?_ |>.symm
rw [diff_self_inter, Set.diff_eq]
refine (le_antisymm (zero_le (ÎŒ (s â© vá¶))) ?_).symm
exact le_trans (Ό.mono s.inter_subset_right) (nonpos_iff_eq_zero.2 hc)
· have : IsFiniteMeasure (Ό.restrict (Box.Icc I)) :=
{ measure_univ_lt_top := by simp [I.isCompact_Icc.measure_lt_top (Ό := Ό)] }
have : IsFiniteMeasure (Ό.restrict I) :=
isFiniteMeasure_of_le (Ό.restrict (Box.Icc I))
(Ό.restrict_mono Box.coe_subset_Icc (le_refl Ό))
obtain âšC, hCâ© := hb
refine hasFiniteIntegral_of_bounded (C := C) (Filter.eventually_iff_exists_mem.2 ?_)
use I, self_mem_ae_restrict I.measurableSet_coe, fun y hy ⊠hC y (I.coe_subset_Icc hy)
end MeasureTheory
|
Analysis\BoxIntegral\Box\Basic.lean | /-
Copyright (c) 2021 Yury Kudryashov. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yury Kudryashov
-/
import Mathlib.Order.Fin.Tuple
import Mathlib.Order.Interval.Set.Monotone
import Mathlib.Topology.MetricSpace.Basic
import Mathlib.Topology.MetricSpace.Bounded
import Mathlib.Topology.Order.MonotoneConvergence
import Mathlib.Topology.MetricSpace.Pseudo.Lemmas
/-!
# Rectangular boxes in `ââ¿`
In this file we define rectangular boxes in `ââ¿`. As usual, we represent `ââ¿` as the type of
functions `ι â â` (usually `ι = Fin n` for some `n`). When we need to interpret a box `[l, u]` as a
set, we use the product `{x | â i, l i < x i â§ x i †u i}` of half-open intervals `(l i, u i]`. We
exclude `l i` because this way boxes of a partition are disjoint as sets in `ââ¿`.
Currently, the only use cases for these constructions are the definitions of Riemann-style integrals
(Riemann, Henstock-Kurzweil, McShane).
## Main definitions
We use the same structure `BoxIntegral.Box` both for ambient boxes and for elements of a partition.
Each box is stored as two points `lower upper : ι â â` and a proof of `â i, lower i < upper i`. We
define instances `Membership (ι â â) (Box ι)` and `CoeTC (Box ι) (Set <| ι â â)` so that each box is
interpreted as the set `{x | â i, x i â Set.Ioc (I.lower i) (I.upper i)}`. This way boxes of a
partition are pairwise disjoint and their union is exactly the original box.
We require boxes to be nonempty, because this way coercion to sets is injective. The empty box can
be represented as `⥠: WithBot (BoxIntegral.Box ι)`.
We define the following operations on boxes:
* coercion to `Set (ι â â)` and `Membership (ι â â) (BoxIntegral.Box ι)` as described above;
* `PartialOrder` and `SemilatticeSup` instances such that `I †J` is equivalent to
`(I : Set (ι â â)) â J`;
* `Lattice` instances on `WithBot (BoxIntegral.Box ι)`;
* `BoxIntegral.Box.Icc`: the closed box `Set.Icc I.lower I.upper`; defined as a bundled monotone
map from `Box ι` to `Set (ι â â)`;
* `BoxIntegral.Box.face I i : Box (Fin n)`: a hyperface of `I : BoxIntegral.Box (Fin (n + 1))`;
* `BoxIntegral.Box.distortion`: the maximal ratio of two lengths of edges of a box; defined as the
supremum of `nndist I.lower I.upper / nndist (I.lower i) (I.upper i)`.
We also provide a convenience constructor `BoxIntegral.Box.mk' (l u : ι â â) : WithBot (Box ι)`
that returns the box `âšl, u, _â©` if it is nonempty and `â¥` otherwise.
## Tags
rectangular box
-/
open Set Function Metric Filter
noncomputable section
open scoped Classical
open NNReal Topology
namespace BoxIntegral
variable {ι : Type*}
/-!
### Rectangular box: definition and partial order
-/
/-- A nontrivial rectangular box in `ι â â` with corners `lower` and `upper`. Represents the product
of half-open intervals `(lower i, upper i]`. -/
structure Box (ι : Type*) where
/-- coordinates of the lower and upper corners of the box -/
(lower upper : ι â â)
/-- Each lower coordinate is less than its upper coordinate: i.e., the box is non-empty -/
lower_lt_upper : â i, lower i < upper i
attribute [simp] Box.lower_lt_upper
namespace Box
variable (I J : Box ι) {x y : ι â â}
instance : Inhabited (Box ι) :=
âšâš0, 1, fun _ ⊠zero_lt_oneâ©â©
theorem lower_le_upper : I.lower †I.upper :=
fun i ⊠(I.lower_lt_upper i).le
theorem lower_ne_upper (i) : I.lower i â I.upper i :=
(I.lower_lt_upper i).ne
instance : Membership (ι â â) (Box ι) :=
âšfun x I ⊠â i, x i â Ioc (I.lower i) (I.upper i)â©
-- Porting note: added
/-- The set of points in this box: this is the product of half-open intervals `(lower i, upper i]`,
where `lower` and `upper` are this box' corners. -/
@[coe]
def toSet (I : Box ι) : Set (ι â â) := { x | x â I }
instance : CoeTC (Box ι) (Set <| ι â â) :=
âštoSetâ©
@[simp]
theorem mem_mk {l u x : ι â â} {H} : x â mk l u H â â i, x i â Ioc (l i) (u i) := Iff.rfl
@[simp, norm_cast]
theorem mem_coe : x â (I : Set (ι â â)) â x â I := Iff.rfl
theorem mem_def : x â I â â i, x i â Ioc (I.lower i) (I.upper i) := Iff.rfl
theorem mem_univ_Ioc {I : Box ι} : (x â pi univ fun i ⊠Ioc (I.lower i) (I.upper i)) â x â I :=
mem_univ_pi
theorem coe_eq_pi : (I : Set (ι â â)) = pi univ fun i ⊠Ioc (I.lower i) (I.upper i) :=
Set.ext fun _ ⊠mem_univ_Ioc.symm
@[simp]
theorem upper_mem : I.upper â I :=
fun i ⊠right_mem_Ioc.2 <| I.lower_lt_upper i
theorem exists_mem : â x, x â I :=
âš_, I.upper_memâ©
theorem nonempty_coe : Set.Nonempty (I : Set (ι â â)) :=
I.exists_mem
@[simp]
theorem coe_ne_empty : (I : Set (ι â â)) â â
:=
I.nonempty_coe.ne_empty
@[simp]
theorem empty_ne_coe : â
â (I : Set (ι â â)) :=
I.coe_ne_empty.symm
instance : LE (Box ι) :=
âšfun I J ⊠â âŠxâŠ, x â I â x â Jâ©
theorem le_def : I †J â â x â I, x â J := Iff.rfl
theorem le_TFAE : List.TFAE [I †J, (I : Set (ι â â)) â J,
Icc I.lower I.upper â Icc J.lower J.upper, J.lower †I.lower â§ I.upper †J.upper] := by
tfae_have 1 â 2
· exact Iff.rfl
tfae_have 2 â 3
· intro h
simpa [coe_eq_pi, closure_pi_set, lower_ne_upper] using closure_mono h
tfae_have 3 â 4
· exact Icc_subset_Icc_iff I.lower_le_upper
tfae_have 4 â 2
· exact fun h x hx i ⊠Ioc_subset_Ioc (h.1 i) (h.2 i) (hx i)
tfae_finish
variable {I J}
@[simp, norm_cast]
theorem coe_subset_coe : (I : Set (ι â â)) â J â I †J := Iff.rfl
theorem le_iff_bounds : I †J â J.lower †I.lower â§ I.upper †J.upper :=
(le_TFAE I J).out 0 3
theorem injective_coe : Injective ((â) : Box ι â Set (ι â â)) := by
rintro âšlâ, uâ, hââ© âšlâ, uâ, hââ© h
simp only [Subset.antisymm_iff, coe_subset_coe, le_iff_bounds] at h
congr
exacts [le_antisymm h.2.1 h.1.1, le_antisymm h.1.2 h.2.2]
@[simp, norm_cast]
theorem coe_inj : (I : Set (ι â â)) = J â I = J :=
injective_coe.eq_iff
@[ext]
theorem ext (H : â x, x â I â x â J) : I = J :=
injective_coe <| Set.ext H
theorem ne_of_disjoint_coe (h : Disjoint (I : Set (ι â â)) J) : I â J :=
mt coe_inj.2 <| h.ne I.coe_ne_empty
instance : PartialOrder (Box ι) :=
{ PartialOrder.lift ((â) : Box ι â Set (ι â â)) injective_coe with le := (· †·) }
/-- Closed box corresponding to `I : BoxIntegral.Box ι`. -/
protected def Icc : Box ι âªo Set (ι â â) :=
OrderEmbedding.ofMapLEIff (fun I : Box ι ⊠Icc I.lower I.upper) fun I J ⊠(le_TFAE I J).out 2 0
theorem Icc_def : Box.Icc I = Icc I.lower I.upper := rfl
@[simp]
theorem upper_mem_Icc (I : Box ι) : I.upper â Box.Icc I :=
right_mem_Icc.2 I.lower_le_upper
@[simp]
theorem lower_mem_Icc (I : Box ι) : I.lower â Box.Icc I :=
left_mem_Icc.2 I.lower_le_upper
protected theorem isCompact_Icc (I : Box ι) : IsCompact (Box.Icc I) :=
isCompact_Icc
theorem Icc_eq_pi : Box.Icc I = pi univ fun i ⊠Icc (I.lower i) (I.upper i) :=
(pi_univ_Icc _ _).symm
theorem le_iff_Icc : I †J â Box.Icc I â Box.Icc J :=
(le_TFAE I J).out 0 2
theorem antitone_lower : Antitone fun I : Box ι ⊠I.lower :=
fun _ _ H ⊠(le_iff_bounds.1 H).1
theorem monotone_upper : Monotone fun I : Box ι ⊠I.upper :=
fun _ _ H ⊠(le_iff_bounds.1 H).2
theorem coe_subset_Icc : âI â Box.Icc I :=
fun _ hx ⊠âšfun i ⊠(hx i).1.le, fun i ⊠(hx i).2â©
/-!
### Supremum of two boxes
-/
/-- `I â J` is the least box that includes both `I` and `J`. Since `âI ⪠âJ` is usually not a box,
`â(I â J)` is larger than `âI ⪠âJ`. -/
instance : Sup (Box ι) :=
âšfun I J ⊠âšI.lower â J.lower, I.upper â J.upper,
fun i ⊠(min_le_left _ _).trans_lt <| (I.lower_lt_upper i).trans_le (le_max_left _ _)â©â©
instance : SemilatticeSup (Box ι) :=
{ (inferInstance : PartialOrder (Box ι)),
(inferInstance : Sup (Box ι)) with
le_sup_left := fun _ _ ⊠le_iff_bounds.2 âšinf_le_left, le_sup_leftâ©
le_sup_right := fun _ _ ⊠le_iff_bounds.2 âšinf_le_right, le_sup_rightâ©
sup_le := fun _ _ _ hâ hâ ⊠le_iff_bounds.2
âšle_inf (antitone_lower hâ) (antitone_lower hâ),
sup_le (monotone_upper hâ) (monotone_upper hâ)â© }
/-!
### `WithBot (Box ι)`
In this section we define coercion from `WithBot (Box ι)` to `Set (ι â â)` by sending `â¥` to `â
`.
-/
-- Porting note: added
/-- The set underlying this box: `â¥` is mapped to `â
`. -/
@[coe]
def withBotToSet (o : WithBot (Box ι)) : Set (ι â â) := o.elim â
(â)
instance withBotCoe : CoeTC (WithBot (Box ι)) (Set (ι â â)) :=
âšwithBotToSetâ©
@[simp, norm_cast]
theorem coe_bot : ((⥠: WithBot (Box ι)) : Set (ι â â)) = â
:= rfl
@[simp, norm_cast]
theorem coe_coe : ((I : WithBot (Box ι)) : Set (ι â â)) = I := rfl
theorem isSome_iff : â {I : WithBot (Box ι)}, I.isSome â (I : Set (ι â â)).Nonempty
| ⥠=> by
erw [Option.isSome]
simp
| (I : Box ι) => by
erw [Option.isSome]
simp [I.nonempty_coe]
theorem biUnion_coe_eq_coe (I : WithBot (Box ι)) :
â (J : Box ι) (_ : âJ = I), (J : Set (ι â â)) = I := by
induction I <;> simp [WithBot.coe_eq_coe]
@[simp, norm_cast]
theorem withBotCoe_subset_iff {I J : WithBot (Box ι)} : (I : Set (ι â â)) â J â I †J := by
induction I; · simp
induction J; · simp [subset_empty_iff]
simp [le_def]
@[simp, norm_cast]
theorem withBotCoe_inj {I J : WithBot (Box ι)} : (I : Set (ι â â)) = J â I = J := by
simp only [Subset.antisymm_iff, â le_antisymm_iff, withBotCoe_subset_iff]
/-- Make a `WithBot (Box ι)` from a pair of corners `l u : ι â â`. If `l i < u i` for all `i`,
then the result is `âšl, u, _â© : Box ι`, otherwise it is `â¥`. In any case, the result interpreted
as a set in `ι â â` is the set `{x : ι â â | â i, x i â Ioc (l i) (u i)}`. -/
def mk' (l u : ι â â) : WithBot (Box ι) :=
if h : â i, l i < u i then â(âšl, u, hâ© : Box ι) else â¥
@[simp]
theorem mk'_eq_bot {l u : ι â â} : mk' l u = ⥠â â i, u i †l i := by
rw [mk']
split_ifs with h <;> simpa using h
@[simp]
theorem mk'_eq_coe {l u : ι â â} : mk' l u = I â l = I.lower â§ u = I.upper := by
cases' I with lI uI hI; rw [mk']; split_ifs with h
· simp [WithBot.coe_eq_coe]
· suffices l = lI â u â uI by simpa
rintro rfl rfl
exact h hI
@[simp]
theorem coe_mk' (l u : ι â â) : (mk' l u : Set (ι â â)) = pi univ fun i ⊠Ioc (l i) (u i) := by
rw [mk']; split_ifs with h
· exact coe_eq_pi _
· rcases not_forall.mp h with âši, hiâ©
rw [coe_bot, univ_pi_eq_empty]
exact Ioc_eq_empty hi
instance WithBot.inf : Inf (WithBot (Box ι)) :=
âšfun I âŠ
WithBot.recBotCoe (fun _ ⊠â¥)
(fun I J ⊠WithBot.recBotCoe ⥠(fun J ⊠mk' (I.lower â J.lower) (I.upper â J.upper)) J) Iâ©
@[simp]
theorem coe_inf (I J : WithBot (Box ι)) : (â(I â J) : Set (ι â â)) = (I : Set _) â© J := by
induction I
· change â
= _
simp
induction J
· change â
= _
simp
change ((mk' _ _ : WithBot (Box ι)) : Set (ι â â)) = _
simp only [coe_eq_pi, â pi_inter_distrib, Ioc_inter_Ioc, Pi.sup_apply, Pi.inf_apply, coe_mk',
coe_coe]
instance : Lattice (WithBot (Box ι)) :=
{ WithBot.semilatticeSup,
Box.WithBot.inf with
inf_le_left := fun I J ⊠by
rw [â withBotCoe_subset_iff, coe_inf]
exact inter_subset_left
inf_le_right := fun I J ⊠by
rw [â withBotCoe_subset_iff, coe_inf]
exact inter_subset_right
le_inf := fun I Jâ Jâ hâ hâ ⊠by
simp only [â withBotCoe_subset_iff, coe_inf] at *
exact subset_inter hâ hâ }
@[simp, norm_cast]
theorem disjoint_withBotCoe {I J : WithBot (Box ι)} :
Disjoint (I : Set (ι â â)) J â Disjoint I J := by
simp only [disjoint_iff_inf_le, â withBotCoe_subset_iff, coe_inf]
rfl
theorem disjoint_coe : Disjoint (I : WithBot (Box ι)) J â Disjoint (I : Set (ι â â)) J :=
disjoint_withBotCoe.symm
theorem not_disjoint_coe_iff_nonempty_inter :
¬Disjoint (I : WithBot (Box ι)) J â (I â© J : Set (ι â â)).Nonempty := by
rw [disjoint_coe, Set.not_disjoint_iff_nonempty_inter]
/-!
### Hyperface of a box in `ââ¿âºÂ¹ = Fin (n + 1) â â`
-/
/-- Face of a box in `ââ¿âºÂ¹ = Fin (n + 1) â â`: the box in `ââ¿ = Fin n â â` with corners at
`I.lower â Fin.succAbove i` and `I.upper â Fin.succAbove i`. -/
@[simps (config := { simpRhs := true })]
def face {n} (I : Box (Fin (n + 1))) (i : Fin (n + 1)) : Box (Fin n) :=
âšI.lower â Fin.succAbove i, I.upper â Fin.succAbove i, fun _ ⊠I.lower_lt_upper _â©
@[simp]
theorem face_mk {n} (l u : Fin (n + 1) â â) (h : â i, l i < u i) (i : Fin (n + 1)) :
face âšl, u, hâ© i = âšl â Fin.succAbove i, u â Fin.succAbove i, fun _ ⊠h _â© := rfl
@[mono]
theorem face_mono {n} {I J : Box (Fin (n + 1))} (h : I †J) (i : Fin (n + 1)) :
face I i †face J i :=
fun _ hx _ ⊠Ioc_subset_Ioc ((le_iff_bounds.1 h).1 _) ((le_iff_bounds.1 h).2 _) (hx _)
theorem monotone_face {n} (i : Fin (n + 1)) : Monotone fun I ⊠face I i :=
fun _ _ h ⊠face_mono h i
theorem mapsTo_insertNth_face_Icc {n} (I : Box (Fin (n + 1))) {i : Fin (n + 1)} {x : â}
(hx : x â Icc (I.lower i) (I.upper i)) :
MapsTo (i.insertNth x) (Box.Icc (I.face i)) (Box.Icc I) :=
fun _ hy ⊠Fin.insertNth_mem_Icc.2 âšhx, hyâ©
theorem mapsTo_insertNth_face {n} (I : Box (Fin (n + 1))) {i : Fin (n + 1)} {x : â}
(hx : x â Ioc (I.lower i) (I.upper i)) :
MapsTo (i.insertNth x) (I.face i : Set (_ â _)) (I : Set (_ â _)) := by
intro y hy
simp_rw [mem_coe, mem_def, i.forall_iff_succAbove, Fin.insertNth_apply_same,
Fin.insertNth_apply_succAbove]
exact âšhx, hyâ©
theorem continuousOn_face_Icc {X} [TopologicalSpace X] {n} {f : (Fin (n + 1) â â) â X}
{I : Box (Fin (n + 1))} (h : ContinuousOn f (Box.Icc I)) {i : Fin (n + 1)} {x : â}
(hx : x â Icc (I.lower i) (I.upper i)) :
ContinuousOn (f â i.insertNth x) (Box.Icc (I.face i)) :=
h.comp (continuousOn_const.fin_insertNth i continuousOn_id) (I.mapsTo_insertNth_face_Icc hx)
/-!
### Covering of the interior of a box by a monotone sequence of smaller boxes
-/
/-- The interior of a box. -/
protected def Ioo : Box ι âo Set (ι â â) where
toFun I := pi univ fun i ⊠Ioo (I.lower i) (I.upper i)
monotone' _ _ h :=
pi_mono fun i _ ⊠Ioo_subset_Ioo ((le_iff_bounds.1 h).1 i) ((le_iff_bounds.1 h).2 i)
theorem Ioo_subset_coe (I : Box ι) : Box.Ioo I â I :=
fun _ hx i ⊠Ioo_subset_Ioc_self (hx i trivial)
protected theorem Ioo_subset_Icc (I : Box ι) : Box.Ioo I â Box.Icc I :=
I.Ioo_subset_coe.trans coe_subset_Icc
theorem iUnion_Ioo_of_tendsto [Finite ι] {I : Box ι} {J : â â Box ι} (hJ : Monotone J)
(hl : Tendsto (lower â J) atTop (ð I.lower)) (hu : Tendsto (upper â J) atTop (ð I.upper)) :
â n, Box.Ioo (J n) = Box.Ioo I :=
have hl' : â i, Antitone fun n ⊠(J n).lower i :=
fun i ⊠(monotone_eval i).comp_antitone (antitone_lower.comp_monotone hJ)
have hu' : â i, Monotone fun n ⊠(J n).upper i :=
fun i ⊠(monotone_eval i).comp (monotone_upper.comp hJ)
calc
â n, Box.Ioo (J n) = pi univ fun i ⊠â n, Ioo ((J n).lower i) ((J n).upper i) :=
iUnion_univ_pi_of_monotone fun i ⊠(hl' i).Ioo (hu' i)
_ = Box.Ioo I :=
pi_congr rfl fun i _ âŠ
iUnion_Ioo_of_mono_of_isGLB_of_isLUB (hl' i) (hu' i)
(isGLB_of_tendsto_atTop (hl' i) (tendsto_pi_nhds.1 hl _))
(isLUB_of_tendsto_atTop (hu' i) (tendsto_pi_nhds.1 hu _))
theorem exists_seq_mono_tendsto (I : Box ι) :
â J : â âo Box ι,
(â n, Box.Icc (J n) â Box.Ioo I) â§
Tendsto (lower â J) atTop (ð I.lower) â§ Tendsto (upper â J) atTop (ð I.upper) := by
choose a b ha_anti hb_mono ha_mem hb_mem hab ha_tendsto hb_tendsto using
fun i ⊠exists_seq_strictAnti_strictMono_tendsto (I.lower_lt_upper i)
exact
âšâšfun k ⊠âšflip a k, flip b k, fun i ⊠hab _ _ _â©, fun k l hkl âŠ
le_iff_bounds.2 âšfun i ⊠(ha_anti i).antitone hkl, fun i ⊠(hb_mono i).monotone hklâ©â©,
fun n x hx i _ ⊠âš(ha_mem _ _).1.trans_le (hx.1 _), (hx.2 _).trans_lt (hb_mem _ _).2â©,
tendsto_pi_nhds.2 ha_tendsto, tendsto_pi_nhds.2 hb_tendstoâ©
section Distortion
variable [Fintype ι]
/-- The distortion of a box `I` is the maximum of the ratios of the lengths of its edges.
It is defined as the maximum of the ratios
`nndist I.lower I.upper / nndist (I.lower i) (I.upper i)`. -/
def distortion (I : Box ι) : ââ¥0 :=
Finset.univ.sup fun i : ι ⊠nndist I.lower I.upper / nndist (I.lower i) (I.upper i)
theorem distortion_eq_of_sub_eq_div {I J : Box ι} {r : â}
(h : â i, I.upper i - I.lower i = (J.upper i - J.lower i) / r) :
distortion I = distortion J := by
simp only [distortion, nndist_pi_def, Real.nndist_eq', h, map_divâ]
congr 1 with i
have : 0 < r := by
by_contra hr
have := div_nonpos_of_nonneg_of_nonpos (sub_nonneg.2 <| J.lower_le_upper i) (not_lt.1 hr)
rw [â h] at this
exact this.not_lt (sub_pos.2 <| I.lower_lt_upper i)
have hn0 := (map_ne_zero Real.nnabs).2 this.ne'
simp_rw [NNReal.finset_sup_div, div_div_div_cancel_right _ hn0]
theorem nndist_le_distortion_mul (I : Box ι) (i : ι) :
nndist I.lower I.upper †I.distortion * nndist (I.lower i) (I.upper i) :=
calc
nndist I.lower I.upper =
nndist I.lower I.upper / nndist (I.lower i) (I.upper i) * nndist (I.lower i) (I.upper i) :=
(div_mul_cancelâ _ <| mt nndist_eq_zero.1 (I.lower_lt_upper i).ne).symm
_ †I.distortion * nndist (I.lower i) (I.upper i) := by
apply mul_le_mul_right'
apply Finset.le_sup (Finset.mem_univ i)
theorem dist_le_distortion_mul (I : Box ι) (i : ι) :
dist I.lower I.upper †I.distortion * (I.upper i - I.lower i) := by
have A : I.lower i - I.upper i < 0 := sub_neg.2 (I.lower_lt_upper i)
simpa only [â NNReal.coe_le_coe, â dist_nndist, NNReal.coe_mul, Real.dist_eq, abs_of_neg A,
neg_sub] using I.nndist_le_distortion_mul i
theorem diam_Icc_le_of_distortion_le (I : Box ι) (i : ι) {c : ââ¥0} (h : I.distortion †c) :
diam (Box.Icc I) †c * (I.upper i - I.lower i) :=
have : (0 : â) †c * (I.upper i - I.lower i) :=
mul_nonneg c.coe_nonneg (sub_nonneg.2 <| I.lower_le_upper _)
diam_le_of_forall_dist_le this fun x hx y hy âŠ
calc
dist x y †dist I.lower I.upper := Real.dist_le_of_mem_pi_Icc hx hy
_ †I.distortion * (I.upper i - I.lower i) := I.dist_le_distortion_mul i
_ †c * (I.upper i - I.lower i) := by gcongr; exact sub_nonneg.2 (I.lower_le_upper i)
end Distortion
end Box
end BoxIntegral
|
Analysis\BoxIntegral\Box\SubboxInduction.lean | /-
Copyright (c) 2021 Yury Kudryashov. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yury Kudryashov
-/
import Mathlib.Analysis.BoxIntegral.Box.Basic
import Mathlib.Analysis.SpecificLimits.Basic
/-!
# Induction on subboxes
In this file we prove the following induction principle for `BoxIntegral.Box`, see
`BoxIntegral.Box.subbox_induction_on`. Let `p` be a predicate on `BoxIntegral.Box ι`, let `I` be a
box. Suppose that the following two properties hold true.
* Consider a smaller box `J †I`. The hyperplanes passing through the center of `J` split it into
`2 ^ n` boxes. If `p` holds true on each of these boxes, then it is true on `J`.
* For each `z` in the closed box `I.Icc` there exists a neighborhood `U` of `z` within `I.Icc` such
that for every box `J †I` such that `z â J.Icc â U`, if `J` is homothetic to `I` with a
coefficient of the form `1 / 2 ^ m`, then `p` is true on `J`.
Then `p I` is true.
## Tags
rectangular box, induction
-/
open Set Function Filter Topology
noncomputable section
namespace BoxIntegral
namespace Box
variable {ι : Type*} {I J : Box ι}
open Classical in
/-- For a box `I`, the hyperplanes passing through its center split `I` into `2 ^ card ι` boxes.
`BoxIntegral.Box.splitCenterBox I s` is one of these boxes. See also
`BoxIntegral.Partition.splitCenter` for the corresponding `BoxIntegral.Partition`. -/
def splitCenterBox (I : Box ι) (s : Set ι) : Box ι where
lower := s.piecewise (fun i ⊠(I.lower i + I.upper i) / 2) I.lower
upper := s.piecewise I.upper fun i ⊠(I.lower i + I.upper i) / 2
lower_lt_upper i := by
dsimp only [Set.piecewise]
split_ifs <;> simp only [left_lt_add_div_two, add_div_two_lt_right, I.lower_lt_upper]
theorem mem_splitCenterBox {s : Set ι} {y : ι â â} :
y â I.splitCenterBox s â y â I â§ â i, (I.lower i + I.upper i) / 2 < y i â i â s := by
simp only [splitCenterBox, mem_def, â forall_and]
refine forall_congr' fun i ⊠?_
dsimp only [Set.piecewise]
split_ifs with hs <;> simp only [hs, iff_true_iff, iff_false_iff, not_lt]
exacts [âšfun H ⊠âšâš(left_lt_add_div_two.2 (I.lower_lt_upper i)).trans H.1, H.2â©, H.1â©,
fun H ⊠âšH.2, H.1.2â©â©,
âšfun H ⊠âšâšH.1, H.2.trans (add_div_two_lt_right.2 (I.lower_lt_upper i)).leâ©, H.2â©,
fun H ⊠âšH.1.1, H.2â©â©]
theorem splitCenterBox_le (I : Box ι) (s : Set ι) : I.splitCenterBox s †I :=
fun _ hx ⊠(mem_splitCenterBox.1 hx).1
theorem disjoint_splitCenterBox (I : Box ι) {s t : Set ι} (h : s â t) :
Disjoint (I.splitCenterBox s : Set (ι â â)) (I.splitCenterBox t) := by
rw [disjoint_iff_inf_le]
rintro y âšhs, htâ©; apply h
ext i
rw [mem_coe, mem_splitCenterBox] at hs ht
rw [â hs.2, â ht.2]
theorem injective_splitCenterBox (I : Box ι) : Injective I.splitCenterBox := fun _ _ H âŠ
by_contra fun Hne ⊠(I.disjoint_splitCenterBox Hne).ne (nonempty_coe _).ne_empty (H ➠rfl)
@[simp]
theorem exists_mem_splitCenterBox {I : Box ι} {x : ι â â} : (â s, x â I.splitCenterBox s) â x â I :=
âšfun âšs, hs⩠⊠I.splitCenterBox_le s hs, fun hx âŠ
âš{ i | (I.lower i + I.upper i) / 2 < x i }, mem_splitCenterBox.2 âšhx, fun _ ⊠Iff.rflâ©â©â©
/-- `BoxIntegral.Box.splitCenterBox` bundled as a `Function.Embedding`. -/
@[simps]
def splitCenterBoxEmb (I : Box ι) : Set ι ⪠Box ι :=
âšsplitCenterBox I, injective_splitCenterBox Iâ©
@[simp]
theorem iUnion_coe_splitCenterBox (I : Box ι) : â s, (I.splitCenterBox s : Set (ι â â)) = I := by
ext x
simp
@[simp]
theorem upper_sub_lower_splitCenterBox (I : Box ι) (s : Set ι) (i : ι) :
(I.splitCenterBox s).upper i - (I.splitCenterBox s).lower i = (I.upper i - I.lower i) / 2 := by
by_cases i â s <;> field_simp [splitCenterBox] <;> field_simp [mul_two, two_mul]
/-- Let `p` be a predicate on `Box ι`, let `I` be a box. Suppose that the following two properties
hold true.
* `H_ind` : Consider a smaller box `J †I`. The hyperplanes passing through the center of `J` split
it into `2 ^ n` boxes. If `p` holds true on each of these boxes, then it true on `J`.
* `H_nhds` : For each `z` in the closed box `I.Icc` there exists a neighborhood `U` of `z` within
`I.Icc` such that for every box `J †I` such that `z â J.Icc â U`, if `J` is homothetic to `I`
with a coefficient of the form `1 / 2 ^ m`, then `p` is true on `J`.
Then `p I` is true. See also `BoxIntegral.Box.subbox_induction_on` for a version using
`BoxIntegral.Prepartition.splitCenter` instead of `BoxIntegral.Box.splitCenterBox`.
The proof still works if we assume `H_ind` only for subboxes `J †I` that are homothetic to `I` with
a coefficient of the form `2â»áµ` but we do not need this generalization yet. -/
@[elab_as_elim]
theorem subbox_induction_on' {p : Box ι â Prop} (I : Box ι)
(H_ind : â J †I, (â s, p (splitCenterBox J s)) â p J)
(H_nhds : â z â Box.Icc I, â U â ð[Box.Icc I] z, â J †I, â (m : â), z â Box.Icc J â
Box.Icc J â U â (â i, J.upper i - J.lower i = (I.upper i - I.lower i) / 2 ^ m) â p J) :
p I := by
by_contra hpI
-- First we use `H_ind` to construct a decreasing sequence of boxes such that `â m, ¬p (J m)`.
replace H_ind := fun J hJ ⊠not_imp_not.2 (H_ind J hJ)
simp only [exists_imp, not_forall] at H_ind
choose! s hs using H_ind
set J : â â Box ι := fun m ⊠(fun J ⊠splitCenterBox J (s J))^[m] I
have J_succ : â m, J (m + 1) = splitCenterBox (J m) (s <| J m) :=
fun m ⊠iterate_succ_apply' _ _ _
-- Now we prove some properties of `J`
have hJmono : Antitone J :=
antitone_nat_of_succ_le fun n ⊠by simpa [J_succ] using splitCenterBox_le _ _
have hJle : â m, J m †I := fun m ⊠hJmono (zero_le m)
have hJp : â m, ¬p (J m) :=
fun m ⊠Nat.recOn m hpI fun m ⊠by simpa only [J_succ] using hs (J m) (hJle m)
have hJsub : â m i, (J m).upper i - (J m).lower i = (I.upper i - I.lower i) / 2 ^ m := by
intro m i
induction' m with m ihm
· simp [J, Nat.zero_eq]
simp only [pow_succ, J_succ, upper_sub_lower_splitCenterBox, ihm, div_div]
have h0 : J 0 = I := rfl
clear_value J
clear hpI hs J_succ s
-- Let `z` be the unique common point of all `(J m).Icc`. Then `H_nhds` proves `p (J m)` for
-- sufficiently large `m`. This contradicts `hJp`.
set z : ι â â := âš m, (J m).lower
have hzJ : â m, z â Box.Icc (J m) :=
mem_iInter.1 (ciSup_mem_iInter_Icc_of_antitone_Icc
((@Box.Icc ι).monotone.comp_antitone hJmono) fun m ⊠(J m).lower_le_upper)
have hJl_mem : â m, (J m).lower â Box.Icc I := fun m ⊠le_iff_Icc.1 (hJle m) (J m).lower_mem_Icc
have hJu_mem : â m, (J m).upper â Box.Icc I := fun m ⊠le_iff_Icc.1 (hJle m) (J m).upper_mem_Icc
have hJlz : Tendsto (fun m ⊠(J m).lower) atTop (ð z) :=
tendsto_atTop_ciSup (antitone_lower.comp hJmono) âšI.upper, fun x âšm, hm⩠⊠hm âž (hJl_mem m).2â©
have hJuz : Tendsto (fun m ⊠(J m).upper) atTop (ð z) := by
suffices Tendsto (fun m ⊠(J m).upper - (J m).lower) atTop (ð 0) by simpa using hJlz.add this
refine tendsto_pi_nhds.2 fun i ⊠?_
simpa [hJsub] using
tendsto_const_nhds.div_atTop (tendsto_pow_atTop_atTop_of_one_lt _root_.one_lt_two)
replace hJlz : Tendsto (fun m ⊠(J m).lower) atTop (ð[Icc I.lower I.upper] z) :=
tendsto_nhdsWithin_of_tendsto_nhds_of_eventually_within _ hJlz (eventually_of_forall hJl_mem)
replace hJuz : Tendsto (fun m ⊠(J m).upper) atTop (ð[Icc I.lower I.upper] z) :=
tendsto_nhdsWithin_of_tendsto_nhds_of_eventually_within _ hJuz (eventually_of_forall hJu_mem)
rcases H_nhds z (h0 âž hzJ 0) with âšU, hUz, hUâ©
rcases (tendsto_lift'.1 (hJlz.Icc hJuz) U hUz).exists with âšm, hUmâ©
exact hJp m (hU (J m) (hJle m) m (hzJ m) hUm (hJsub m))
end Box
end BoxIntegral
|
Analysis\BoxIntegral\Partition\Additive.lean | /-
Copyright (c) 2021 Yury Kudryashov. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yury Kudryashov
-/
import Mathlib.Analysis.BoxIntegral.Partition.Split
import Mathlib.Analysis.NormedSpace.OperatorNorm.Mul
/-!
# Box additive functions
We say that a function `f : Box ι â M` from boxes in `ââ¿` to a commutative additive monoid `M` is
*box additive* on subboxes of `Iâ : WithTop (Box ι)` if for any box `J`, `âJ †Iâ`, and a partition
`Ï` of `J`, `f J = â J' â Ï.boxes, f J'`. We use `Iâ : WithTop (Box ι)` instead of `Iâ : Box ι` to
use the same definition for functions box additive on subboxes of a box and for functions box
additive on all boxes.
Examples of box-additive functions include the measure of a box and the integral of a fixed
integrable function over a box.
In this file we define box-additive functions and prove that a function such that
`f J = f (J ⩠{x | x i < y}) + f (J ⩠{x | y †x i})` is box-additive.
## Tags
rectangular box, additive function
-/
noncomputable section
open scoped Classical
open Function Set
namespace BoxIntegral
variable {ι M : Type*} {n : â}
/-- A function on `Box ι` is called box additive if for every box `J` and a partition `Ï` of `J`
we have `f J = â Ji â Ï.boxes, f Ji`. A function is called box additive on subboxes of `I : Box ι`
if the same property holds for `J †I`. We formalize these two notions in the same definition
using `I : WithBot (Box ι)`: the value `I = â€` corresponds to functions box additive on the whole
space. -/
structure BoxAdditiveMap (ι M : Type*) [AddCommMonoid M] (I : WithTop (Box ι)) where
/-- The function underlying this additive map. -/
toFun : Box ι â M
sum_partition_boxes' : â J : Box ι, âJ †I â â Ï : Prepartition J, Ï.IsPartition â
â Ji â Ï.boxes, toFun Ji = toFun J
/-- A function on `Box ι` is called box additive if for every box `J` and a partition `Ï` of `J`
we have `f J = â Ji â Ï.boxes, f Ji`. -/
scoped notation:25 ι " âáµáµ " M => BoxIntegral.BoxAdditiveMap ι M â€
@[inherit_doc] scoped notation:25 ι " âáµáµ[" I "] " M => BoxIntegral.BoxAdditiveMap ι M I
namespace BoxAdditiveMap
open Box Prepartition Finset
variable {N : Type*} [AddCommMonoid M] [AddCommMonoid N] {Iâ : WithTop (Box ι)} {I J : Box ι}
{i : ι}
instance : FunLike (ι âáµáµ[Iâ] M) (Box ι) M where
coe := toFun
coe_injective' f g h := by cases f; cases g; congr
initialize_simps_projections BoxIntegral.BoxAdditiveMap (toFun â apply)
@[simp]
theorem coe_mk (f h) : â(mk f h : ι âáµáµ[Iâ] M) = f := rfl
theorem coe_injective : Injective fun (f : ι âáµáµ[Iâ] M) x => f x :=
DFunLike.coe_injective
-- Porting note (#10618): was @[simp], now can be proved by `simp`
theorem coe_inj {f g : ι âáµáµ[Iâ] M} : (f : Box ι â M) = g â f = g := DFunLike.coe_fn_eq
theorem sum_partition_boxes (f : ι âáµáµ[Iâ] M) (hI : âI †Iâ) {Ï : Prepartition I}
(h : Ï.IsPartition) : â J â Ï.boxes, f J = f I :=
f.sum_partition_boxes' I hI Ï h
@[simps (config := .asFn)]
instance : Zero (ι âáµáµ[Iâ] M) :=
âšâš0, fun _ _ _ _ => sum_const_zeroâ©â©
instance : Inhabited (ι âáµáµ[Iâ] M) :=
âš0â©
instance : Add (ι âáµáµ[Iâ] M) :=
âšfun f g =>
âšf + g, fun I hI Ï hÏ => by
simp only [Pi.add_apply, sum_add_distrib, sum_partition_boxes _ hI hÏ]â©â©
instance {R} [Monoid R] [DistribMulAction R M] : SMul R (ι âáµáµ[Iâ] M) :=
âšfun r f =>
âšr ⢠(f : Box ι â M), fun I hI Ï hÏ => by
simp only [Pi.smul_apply, â smul_sum, sum_partition_boxes _ hI hÏ]â©â©
instance : AddCommMonoid (ι âáµáµ[Iâ] M) :=
Function.Injective.addCommMonoid _ coe_injective rfl (fun _ _ => rfl) fun _ _ => rfl
@[simp]
theorem map_split_add (f : ι âáµáµ[Iâ] M) (hI : âI †Iâ) (i : ι) (x : â) :
(I.splitLower i x).elim' 0 f + (I.splitUpper i x).elim' 0 f = f I := by
rw [â f.sum_partition_boxes hI (isPartitionSplit I i x), sum_split_boxes]
/-- If `f` is box-additive on subboxes of `Iâ`, then it is box-additive on subboxes of any
`I †Iâ`. -/
@[simps]
def restrict (f : ι âáµáµ[Iâ] M) (I : WithTop (Box ι)) (hI : I †Iâ) : ι âáµáµ[I] M :=
âšf, fun J hJ => f.2 J (hJ.trans hI)â©
/-- If `f : Box ι â M` is box additive on partitions of the form `split I i x`, then it is box
additive. -/
def ofMapSplitAdd [Finite ι] (f : Box ι â M) (Iâ : WithTop (Box ι))
(hf : â I : Box ι, âI †Iâ â â {i x}, x â Ioo (I.lower i) (I.upper i) â
(I.splitLower i x).elim' 0 f + (I.splitUpper i x).elim' 0 f = f I) :
ι âáµáµ[Iâ] M := by
refine âšf, ?_â©
replace hf : â I : Box ι, âI †Iâ â â s, (â J â (splitMany I s).boxes, f J) = f I := by
intro I hI s
induction' s using Finset.induction_on with a s _ ihs
· simp
rw [splitMany_insert, inf_split, â ihs, biUnion_boxes, sum_biUnion_boxes]
refine Finset.sum_congr rfl fun J' hJ' => ?_
by_cases h : a.2 â Ioo (J'.lower a.1) (J'.upper a.1)
· rw [sum_split_boxes]
exact hf _ ((WithTop.coe_le_coe.2 <| le_of_mem _ hJ').trans hI) h
· rw [split_of_not_mem_Ioo h, top_boxes, Finset.sum_singleton]
intro I hI Ï hÏ
have Hle : â J â Ï, âJ †Iâ := fun J hJ => (WithTop.coe_le_coe.2 <| Ï.le_of_mem hJ).trans hI
rcases hÏ.exists_splitMany_le with âšs, hsâ©
rw [â hf _ hI, â inf_of_le_right hs, inf_splitMany, biUnion_boxes, sum_biUnion_boxes]
exact Finset.sum_congr rfl fun J hJ => (hf _ (Hle _ hJ) _).symm
/-- If `g : M â N` is an additive map and `f` is a box additive map, then `g â f` is a box additive
map. -/
@[simps (config := .asFn)]
def map (f : ι âáµáµ[Iâ] M) (g : M â+ N) : ι âáµáµ[Iâ] N where
toFun := g â f
sum_partition_boxes' I hI Ï hÏ := by simp_rw [comp, â map_sum, f.sum_partition_boxes hI hÏ]
/-- If `f` is a box additive function on subboxes of `I` and `Ïâ`, `Ïâ` are two prepartitions of
`I` that cover the same part of `I`, then `â J â Ïâ.boxes, f J = â J â Ïâ.boxes, f J`. -/
theorem sum_boxes_congr [Finite ι] (f : ι âáµáµ[Iâ] M) (hI : âI †Iâ) {Ïâ Ïâ : Prepartition I}
(h : Ïâ.iUnion = Ïâ.iUnion) : â J â Ïâ.boxes, f J = â J â Ïâ.boxes, f J := by
rcases exists_splitMany_inf_eq_filter_of_finite {Ïâ, Ïâ} ((finite_singleton _).insert _) with
âšs, hsâ©
simp only [inf_splitMany] at hs
rcases hs _ (Or.inl rfl), hs _ (Or.inr rfl) with âšhâ, hââ©; clear hs
rw [h] at hâ
calc
â J â Ïâ.boxes, f J = â J â Ïâ.boxes, â J' â (splitMany J s).boxes, f J' :=
Finset.sum_congr rfl fun J hJ => (f.sum_partition_boxes ?_ (isPartition_splitMany _ _)).symm
_ = â J â (Ïâ.biUnion fun J => splitMany J s).boxes, f J := (sum_biUnion_boxes _ _ _).symm
_ = â J â (Ïâ.biUnion fun J => splitMany J s).boxes, f J := by rw [hâ, hâ]
_ = â J â Ïâ.boxes, â J' â (splitMany J s).boxes, f J' := sum_biUnion_boxes _ _ _
_ = â J â Ïâ.boxes, f J :=
Finset.sum_congr rfl fun J hJ => f.sum_partition_boxes ?_ (isPartition_splitMany _ _)
exacts [(WithTop.coe_le_coe.2 <| Ïâ.le_of_mem hJ).trans hI,
(WithTop.coe_le_coe.2 <| Ïâ.le_of_mem hJ).trans hI]
section ToSMul
variable {E : Type*} [NormedAddCommGroup E] [NormedSpace â E]
/-- If `f` is a box-additive map, then so is the map sending `I` to the scalar multiplication
by `f I` as a continuous linear map from `E` to itself. -/
def toSMul (f : ι âáµáµ[Iâ] â) : ι âáµáµ[Iâ] E âL[â] E :=
f.map (ContinuousLinearMap.lsmul â â).toLinearMap.toAddMonoidHom
@[simp]
theorem toSMul_apply (f : ι âáµáµ[Iâ] â) (I : Box ι) (x : E) : f.toSMul I x = f I ⢠x := rfl
end ToSMul
/-- Given a box `Iâ` in `ââ¿âºÂ¹`, `f x : Box (Fin n) â G` is a family of functions indexed by a real
`x` and for `x â [Iâ.lower i, Iâ.upper i]`, `f x` is box-additive on subboxes of the `i`-th face of
`Iâ`, then `fun J ⊠f (J.upper i) (J.face i) - f (J.lower i) (J.face i)` is box-additive on subboxes
of `Iâ`. -/
@[simps!]
def upperSubLower.{u} {G : Type u} [AddCommGroup G] (Iâ : Box (Fin (n + 1))) (i : Fin (n + 1))
(f : â â Box (Fin n) â G) (fb : Icc (Iâ.lower i) (Iâ.upper i) â Fin n âáµáµ[Iâ.face i] G)
(hf : â (x) (hx : x â Icc (Iâ.lower i) (Iâ.upper i)) (J), f x J = fb âšx, hxâ© J) :
Fin (n + 1) âáµáµ[Iâ] G :=
ofMapSplitAdd (fun J : Box (Fin (n + 1)) => f (J.upper i) (J.face i) - f (J.lower i) (J.face i))
Iâ
(by
intro J hJ j x
rw [WithTop.coe_le_coe] at hJ
refine i.succAboveCases (fun hx => ?_) (fun j hx => ?_) j
· simp only [Box.splitLower_def hx, Box.splitUpper_def hx, update_same, â WithBot.some_eq_coe,
Option.elim', Box.face, (· â ·), update_noteq (Fin.succAbove_ne _ _)]
abel
· have : (J.face i : WithTop (Box (Fin n))) †Iâ.face i :=
WithTop.coe_le_coe.2 (face_mono hJ i)
rw [le_iff_Icc, @Box.Icc_eq_pi _ Iâ] at hJ
simp only
rw [hf _ (hJ J.upper_mem_Icc _ trivial), hf _ (hJ J.lower_mem_Icc _ trivial),
â (fb _).map_split_add this j x, â (fb _).map_split_add this j x]
have hx' : x â Ioo ((J.face i).lower j) ((J.face i).upper j) := hx
simp only [Box.splitLower_def hx, Box.splitUpper_def hx, Box.splitLower_def hx',
Box.splitUpper_def hx', â WithBot.some_eq_coe, Option.elim', Box.face_mk,
update_noteq (Fin.succAbove_ne _ _).symm, sub_add_sub_comm,
update_comp_eq_of_injective _ (Fin.strictMono_succAbove i).injective j x, â hf]
simp only [Box.face])
end BoxAdditiveMap
end BoxIntegral
|
Analysis\BoxIntegral\Partition\Basic.lean | /-
Copyright (c) 2021 Yury Kudryashov. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yury Kudryashov
-/
import Mathlib.Algebra.BigOperators.Option
import Mathlib.Analysis.BoxIntegral.Box.Basic
import Mathlib.Data.Set.Pairwise.Lattice
/-!
# Partitions of rectangular boxes in `ââ¿`
In this file we define (pre)partitions of rectangular boxes in `ââ¿`. A partition of a box `I` in
`ââ¿` (see `BoxIntegral.Prepartition` and `BoxIntegral.Prepartition.IsPartition`) is a finite set
of pairwise disjoint boxes such that their union is exactly `I`. We use `boxes : Finset (Box ι)` to
store the set of boxes.
Many lemmas about box integrals deal with pairwise disjoint collections of subboxes, so we define a
structure `BoxIntegral.Prepartition (I : BoxIntegral.Box ι)` that stores a collection of boxes
such that
* each box `J â boxes` is a subbox of `I`;
* the boxes are pairwise disjoint as sets in `ââ¿`.
Then we define a predicate `BoxIntegral.Prepartition.IsPartition`; `Ï.IsPartition` means that the
boxes of `Ï` actually cover the whole `I`. We also define some operations on prepartitions:
* `BoxIntegral.Prepartition.biUnion`: split each box of a partition into smaller boxes;
* `BoxIntegral.Prepartition.restrict`: restrict a partition to a smaller box.
We also define a `SemilatticeInf` structure on `BoxIntegral.Prepartition I` for all
`I : BoxIntegral.Box ι`.
## Tags
rectangular box, partition
-/
open Set Finset Function
open scoped Classical
open NNReal
noncomputable section
namespace BoxIntegral
variable {ι : Type*}
/-- A prepartition of `I : BoxIntegral.Box ι` is a finite set of pairwise disjoint subboxes of
`I`. -/
structure Prepartition (I : Box ι) where
/-- The underlying set of boxes -/
boxes : Finset (Box ι)
/-- Each box is a sub-box of `I` -/
le_of_mem' : â J â boxes, J †I
/-- The boxes in a prepartition are pairwise disjoint. -/
pairwiseDisjoint : Set.Pairwise (âboxes) (Disjoint on ((â) : Box ι â Set (ι â â)))
namespace Prepartition
variable {I J Jâ Jâ : Box ι} (Ï : Prepartition I) {Ïâ Ïâ : Prepartition I} {x : ι â â}
instance : Membership (Box ι) (Prepartition I) :=
âšfun J Ï => J â Ï.boxesâ©
@[simp]
theorem mem_boxes : J â Ï.boxes â J â Ï := Iff.rfl
@[simp]
theorem mem_mk {s hâ hâ} : J â (mk s hâ hâ : Prepartition I) â J â s := Iff.rfl
theorem disjoint_coe_of_mem (hâ : Jâ â Ï) (hâ : Jâ â Ï) (h : Jâ â Jâ) :
Disjoint (Jâ : Set (ι â â)) Jâ :=
Ï.pairwiseDisjoint hâ hâ h
theorem eq_of_mem_of_mem (hâ : Jâ â Ï) (hâ : Jâ â Ï) (hxâ : x â Jâ) (hxâ : x â Jâ) : Jâ = Jâ :=
by_contra fun H => (Ï.disjoint_coe_of_mem hâ hâ H).le_bot âšhxâ, hxââ©
theorem eq_of_le_of_le (hâ : Jâ â Ï) (hâ : Jâ â Ï) (hleâ : J †Jâ) (hleâ : J †Jâ) : Jâ = Jâ :=
Ï.eq_of_mem_of_mem hâ hâ (hleâ J.upper_mem) (hleâ J.upper_mem)
theorem eq_of_le (hâ : Jâ â Ï) (hâ : Jâ â Ï) (hle : Jâ †Jâ) : Jâ = Jâ :=
Ï.eq_of_le_of_le hâ hâ le_rfl hle
theorem le_of_mem (hJ : J â Ï) : J †I :=
Ï.le_of_mem' J hJ
theorem lower_le_lower (hJ : J â Ï) : I.lower †J.lower :=
Box.antitone_lower (Ï.le_of_mem hJ)
theorem upper_le_upper (hJ : J â Ï) : J.upper †I.upper :=
Box.monotone_upper (Ï.le_of_mem hJ)
theorem injective_boxes : Function.Injective (boxes : Prepartition I â Finset (Box ι)) := by
rintro âšsâ, hâ, hâ'â© âšsâ, hâ, hâ'â© (rfl : sâ = sâ)
rfl
@[ext]
theorem ext (h : â J, J â Ïâ â J â Ïâ) : Ïâ = Ïâ :=
injective_boxes <| Finset.ext h
/-- The singleton prepartition `{J}`, `J †I`. -/
@[simps]
def single (I J : Box ι) (h : J †I) : Prepartition I :=
âš{J}, by simpa, by simpâ©
@[simp]
theorem mem_single {J'} (h : J †I) : J' â single I J h â J' = J :=
mem_singleton
/-- We say that `Ï â€ Ï'` if each box of `Ï` is a subbox of some box of `Ï'`. -/
instance : LE (Prepartition I) :=
âšfun Ï Ï' => â âŠIâŠ, I â Ï â â I' â Ï', I †I'â©
instance partialOrder : PartialOrder (Prepartition I) where
le := (· †·)
le_refl Ï I hI := âšI, hI, le_rflâ©
le_trans Ïâ Ïâ Ïâ hââ hââ Iâ hIâ :=
let âšIâ, hIâ, hIâââ© := hââ hIâ
let âšIâ, hIâ, hIâââ© := hââ hIâ
âšIâ, hIâ, hIââ.trans hIâââ©
le_antisymm := by
suffices â {Ïâ Ïâ : Prepartition I}, Ïâ †Ïâ â Ïâ †Ïâ â Ïâ.boxes â Ïâ.boxes from
fun Ïâ Ïâ hâ hâ => injective_boxes (Subset.antisymm (this hâ hâ) (this hâ hâ))
intro Ïâ Ïâ hâ hâ J hJ
rcases hâ hJ with âšJ', hJ', hleâ©; rcases hâ hJ' with âšJ'', hJ'', hle'â©
obtain rfl : J = J'' := Ïâ.eq_of_le hJ hJ'' (hle.trans hle')
obtain rfl : J' = J := le_antisymm â¹_⺠â¹_âº
assumption
instance : OrderTop (Prepartition I) where
top := single I I le_rfl
le_top Ï J hJ := âšI, by simp, Ï.le_of_mem hJâ©
instance : OrderBot (Prepartition I) where
bot := âšâ
,
fun _ hJ => (Finset.not_mem_empty _ hJ).elim,
fun _ hJ => (Set.not_mem_empty _ <| Finset.coe_empty âž hJ).elimâ©
bot_le _ _ hJ := (Finset.not_mem_empty _ hJ).elim
instance : Inhabited (Prepartition I) := âšâ€â©
theorem le_def : Ïâ †Ïâ â â J â Ïâ, â J' â Ïâ, J †J' := Iff.rfl
@[simp]
theorem mem_top : J â (†: Prepartition I) â J = I :=
mem_singleton
@[simp]
theorem top_boxes : (†: Prepartition I).boxes = {I} := rfl
@[simp]
theorem not_mem_bot : J â (⥠: Prepartition I) :=
Finset.not_mem_empty _
@[simp]
theorem bot_boxes : (⥠: Prepartition I).boxes = â
:= rfl
/-- An auxiliary lemma used to prove that the same point can't belong to more than
`2 ^ Fintype.card ι` closed boxes of a prepartition. -/
theorem injOn_setOf_mem_Icc_setOf_lower_eq (x : ι â â) :
InjOn (fun J : Box ι => { i | J.lower i = x i }) { J | J â Ï â§ x â Box.Icc J } := by
rintro Jâ âšhâ, hxââ© Jâ âšhâ, hxââ© (H : { i | Jâ.lower i = x i } = { i | Jâ.lower i = x i })
suffices â i, (Ioc (Jâ.lower i) (Jâ.upper i) â© Ioc (Jâ.lower i) (Jâ.upper i)).Nonempty by
choose y hyâ hyâ using this
exact Ï.eq_of_mem_of_mem hâ hâ hyâ hyâ
intro i
simp only [Set.ext_iff, mem_setOf] at H
rcases (hxâ.1 i).eq_or_lt with hiâ | hiâ
· have hiâ : Jâ.lower i = x i := (H _).1 hiâ
have Hâ : x i < Jâ.upper i := by simpa only [hiâ] using Jâ.lower_lt_upper i
have Hâ : x i < Jâ.upper i := by simpa only [hiâ] using Jâ.lower_lt_upper i
rw [Ioc_inter_Ioc, hiâ, hiâ, sup_idem, Set.nonempty_Ioc]
exact lt_min Hâ Hâ
· have hiâ : Jâ.lower i < x i := (hxâ.1 i).lt_of_ne (mt (H _).2 hiâ.ne)
exact âšx i, âšhiâ, hxâ.2 iâ©, âšhiâ, hxâ.2 iâ©â©
/-- The set of boxes of a prepartition that contain `x` in their closures has cardinality
at most `2 ^ Fintype.card ι`. -/
theorem card_filter_mem_Icc_le [Fintype ι] (x : ι â â) :
(Ï.boxes.filter fun J : Box ι => x â Box.Icc J).card †2 ^ Fintype.card ι := by
rw [â Fintype.card_set]
refine Finset.card_le_card_of_injOn (fun J : Box ι => { i | J.lower i = x i })
(fun _ _ => Finset.mem_univ _) ?_
simpa using Ï.injOn_setOf_mem_Icc_setOf_lower_eq x
/-- Given a prepartition `Ï : BoxIntegral.Prepartition I`, `Ï.iUnion` is the part of `I` covered by
the boxes of `Ï`. -/
protected def iUnion : Set (ι â â) :=
â J â Ï, âJ
theorem iUnion_def : Ï.iUnion = â J â Ï, âJ := rfl
theorem iUnion_def' : Ï.iUnion = â J â Ï.boxes, âJ := rfl
-- Porting note: Previous proof was `:= Set.mem_iUnionâ`
@[simp]
theorem mem_iUnion : x â Ï.iUnion â â J â Ï, x â J := by
convert Set.mem_iUnionâ
rw [Box.mem_coe, exists_prop]
@[simp]
theorem iUnion_single (h : J †I) : (single I J h).iUnion = J := by simp [iUnion_def]
@[simp]
theorem iUnion_top : (†: Prepartition I).iUnion = I := by simp [Prepartition.iUnion]
@[simp]
theorem iUnion_eq_empty : Ïâ.iUnion = â
â Ïâ = ⥠:= by
simp [â injective_boxes.eq_iff, Finset.ext_iff, Prepartition.iUnion, imp_false]
@[simp]
theorem iUnion_bot : (⥠: Prepartition I).iUnion = â
:=
iUnion_eq_empty.2 rfl
theorem subset_iUnion (h : J â Ï) : âJ â Ï.iUnion :=
subset_biUnion_of_mem h
theorem iUnion_subset : Ï.iUnion â I :=
iUnionâ_subset Ï.le_of_mem'
@[mono]
theorem iUnion_mono (h : Ïâ †Ïâ) : Ïâ.iUnion â Ïâ.iUnion := fun _ hx =>
let âš_, hJâ, hxâ© := Ïâ.mem_iUnion.1 hx
let âšJâ, hJâ, hleâ© := h hJâ
Ïâ.mem_iUnion.2 âšJâ, hJâ, hle hxâ©
theorem disjoint_boxes_of_disjoint_iUnion (h : Disjoint Ïâ.iUnion Ïâ.iUnion) :
Disjoint Ïâ.boxes Ïâ.boxes :=
Finset.disjoint_left.2 fun J hâ hâ =>
Disjoint.le_bot (h.mono (Ïâ.subset_iUnion hâ) (Ïâ.subset_iUnion hâ)) âšJ.upper_mem, J.upper_memâ©
theorem le_iff_nonempty_imp_le_and_iUnion_subset :
Ïâ †Ïâ â
(â J â Ïâ, â J' â Ïâ, (J â© J' : Set (ι â â)).Nonempty â J †J') â§ Ïâ.iUnion â Ïâ.iUnion := by
constructor
· refine fun H => âšfun J hJ J' hJ' Hne => ?_, iUnion_mono Hâ©
rcases H hJ with âšJ'', hJ'', Hleâ©
rcases Hne with âšx, hx, hx'â©
rwa [Ïâ.eq_of_mem_of_mem hJ' hJ'' hx' (Hle hx)]
· rintro âšH, HUâ© J hJ
simp only [Set.subset_def, mem_iUnion] at HU
rcases HU J.upper âšJ, hJ, J.upper_memâ© with âšJâ, hJâ, hxâ©
exact âšJâ, hJâ, H _ hJ _ hJâ âš_, J.upper_mem, hxâ©â©
theorem eq_of_boxes_subset_iUnion_superset (hâ : Ïâ.boxes â Ïâ.boxes) (hâ : Ïâ.iUnion â Ïâ.iUnion) :
Ïâ = Ïâ :=
le_antisymm (fun J hJ => âšJ, hâ hJ, le_rflâ©) <|
le_iff_nonempty_imp_le_and_iUnion_subset.2
âšfun _ hJâ _ hJâ Hne =>
(Ïâ.eq_of_mem_of_mem hJâ (hâ hJâ) Hne.choose_spec.1 Hne.choose_spec.2).le, hââ©
/-- Given a prepartition `Ï` of a box `I` and a collection of prepartitions `Ïi J` of all boxes
`J â Ï`, returns the prepartition of `I` into the union of the boxes of all `Ïi J`.
Though we only use the values of `Ïi` on the boxes of `Ï`, we require `Ïi` to be a globally defined
function. -/
@[simps]
def biUnion (Ïi : â J : Box ι, Prepartition J) : Prepartition I where
boxes := Ï.boxes.biUnion fun J => (Ïi J).boxes
le_of_mem' J hJ := by
simp only [Finset.mem_biUnion, exists_prop, mem_boxes] at hJ
rcases hJ with âšJ', hJ', hJâ©
exact ((Ïi J').le_of_mem hJ).trans (Ï.le_of_mem hJ')
pairwiseDisjoint := by
simp only [Set.Pairwise, Finset.mem_coe, Finset.mem_biUnion]
rintro Jâ' âšJâ, hJâ, hJâ'â© Jâ' âšJâ, hJâ, hJâ'â© Hne
rw [Function.onFun, Set.disjoint_left]
rintro x hxâ hxâ; apply Hne
obtain rfl : Jâ = Jâ :=
Ï.eq_of_mem_of_mem hJâ hJâ ((Ïi Jâ).le_of_mem hJâ' hxâ) ((Ïi Jâ).le_of_mem hJâ' hxâ)
exact (Ïi Jâ).eq_of_mem_of_mem hJâ' hJâ' hxâ hxâ
variable {Ïi Ïiâ Ïiâ : â J : Box ι, Prepartition J}
@[simp]
theorem mem_biUnion : J â Ï.biUnion Ïi â â J' â Ï, J â Ïi J' := by simp [biUnion]
theorem biUnion_le (Ïi : â J, Prepartition J) : Ï.biUnion Ïi â€ Ï := fun _ hJ =>
let âšJ', hJ', hJâ© := Ï.mem_biUnion.1 hJ
âšJ', hJ', (Ïi J').le_of_mem hJâ©
@[simp]
theorem biUnion_top : (Ï.biUnion fun _ => â€) = Ï := by
ext
simp
@[congr]
theorem biUnion_congr (h : Ïâ = Ïâ) (hi : â J â Ïâ, Ïiâ J = Ïiâ J) :
Ïâ.biUnion Ïiâ = Ïâ.biUnion Ïiâ := by
subst Ïâ
ext J
simp only [mem_biUnion]
constructor <;> exact fun âšJ', hâ, hââ© => âšJ', hâ, hi J' hâ âž hââ©
theorem biUnion_congr_of_le (h : Ïâ = Ïâ) (hi : â J †I, Ïiâ J = Ïiâ J) :
Ïâ.biUnion Ïiâ = Ïâ.biUnion Ïiâ :=
biUnion_congr h fun J hJ => hi J (Ïâ.le_of_mem hJ)
@[simp]
theorem iUnion_biUnion (Ïi : â J : Box ι, Prepartition J) :
(Ï.biUnion Ïi).iUnion = â J â Ï, (Ïi J).iUnion := by simp [Prepartition.iUnion]
@[simp]
theorem sum_biUnion_boxes {M : Type*} [AddCommMonoid M] (Ï : Prepartition I)
(Ïi : â J, Prepartition J) (f : Box ι â M) :
(â J â Ï.boxes.biUnion fun J => (Ïi J).boxes, f J) =
â J â Ï.boxes, â J' â (Ïi J).boxes, f J' := by
refine Finset.sum_biUnion fun Jâ hâ Jâ hâ hne => Finset.disjoint_left.2 fun J' hâ' hâ' => ?_
exact hne (Ï.eq_of_le_of_le hâ hâ ((Ïi Jâ).le_of_mem hâ') ((Ïi Jâ).le_of_mem hâ'))
/-- Given a box `J â Ï.biUnion Ïi`, returns the box `J' â Ï` such that `J â Ïi J'`.
For `J â Ï.biUnion Ïi`, returns `I`. -/
def biUnionIndex (Ïi : â (J : Box ι), Prepartition J) (J : Box ι) : Box ι :=
if hJ : J â Ï.biUnion Ïi then (Ï.mem_biUnion.1 hJ).choose else I
theorem biUnionIndex_mem (hJ : J â Ï.biUnion Ïi) : Ï.biUnionIndex Ïi J â Ï := by
rw [biUnionIndex, dif_pos hJ]
exact (Ï.mem_biUnion.1 hJ).choose_spec.1
theorem biUnionIndex_le (Ïi : â J, Prepartition J) (J : Box ι) : Ï.biUnionIndex Ïi J †I := by
by_cases hJ : J â Ï.biUnion Ïi
· exact Ï.le_of_mem (Ï.biUnionIndex_mem hJ)
· rw [biUnionIndex, dif_neg hJ]
theorem mem_biUnionIndex (hJ : J â Ï.biUnion Ïi) : J â Ïi (Ï.biUnionIndex Ïi J) := by
convert (Ï.mem_biUnion.1 hJ).choose_spec.2 <;> exact dif_pos hJ
theorem le_biUnionIndex (hJ : J â Ï.biUnion Ïi) : J †Ï.biUnionIndex Ïi J :=
le_of_mem _ (Ï.mem_biUnionIndex hJ)
/-- Uniqueness property of `BoxIntegral.Prepartition.biUnionIndex`. -/
theorem biUnionIndex_of_mem (hJ : J â Ï) {J'} (hJ' : J' â Ïi J) : Ï.biUnionIndex Ïi J' = J :=
have : J' â Ï.biUnion Ïi := Ï.mem_biUnion.2 âšJ, hJ, hJ'â©
Ï.eq_of_le_of_le (Ï.biUnionIndex_mem this) hJ (Ï.le_biUnionIndex this) (le_of_mem _ hJ')
theorem biUnion_assoc (Ïi : â J, Prepartition J) (Ïi' : Box ι â â J : Box ι, Prepartition J) :
(Ï.biUnion fun J => (Ïi J).biUnion (Ïi' J)) =
(Ï.biUnion Ïi).biUnion fun J => Ïi' (Ï.biUnionIndex Ïi J) J := by
ext J
simp only [mem_biUnion, exists_prop]
constructor
· rintro âšJâ, hJâ, Jâ, hJâ, hJâ©
refine âšJâ, âšJâ, hJâ, hJââ©, ?_â©
rwa [Ï.biUnionIndex_of_mem hJâ hJâ]
· rintro âšJâ, âšJâ, hJâ, hJââ©, hJâ©
refine âšJâ, hJâ, Jâ, hJâ, ?_â©
rwa [Ï.biUnionIndex_of_mem hJâ hJâ] at hJ
/-- Create a `BoxIntegral.Prepartition` from a collection of possibly empty boxes by filtering out
the empty one if it exists. -/
def ofWithBot (boxes : Finset (WithBot (Box ι)))
(le_of_mem : â J â boxes, (J : WithBot (Box ι)) †I)
(pairwise_disjoint : Set.Pairwise (boxes : Set (WithBot (Box ι))) Disjoint) :
Prepartition I where
boxes := Finset.eraseNone boxes
le_of_mem' J hJ := by
rw [mem_eraseNone] at hJ
simpa only [WithBot.some_eq_coe, WithBot.coe_le_coe] using le_of_mem _ hJ
pairwiseDisjoint Jâ hâ Jâ hâ hne := by
simp only [mem_coe, mem_eraseNone] at hâ hâ
exact Box.disjoint_coe.1 (pairwise_disjoint hâ hâ (mt Option.some_inj.1 hne))
@[simp]
theorem mem_ofWithBot {boxes : Finset (WithBot (Box ι))} {hâ hâ} :
J â (ofWithBot boxes hâ hâ : Prepartition I) â (J : WithBot (Box ι)) â boxes :=
mem_eraseNone
@[simp]
theorem iUnion_ofWithBot (boxes : Finset (WithBot (Box ι)))
(le_of_mem : â J â boxes, (J : WithBot (Box ι)) †I)
(pairwise_disjoint : Set.Pairwise (boxes : Set (WithBot (Box ι))) Disjoint) :
(ofWithBot boxes le_of_mem pairwise_disjoint).iUnion = â J â boxes, âJ := by
suffices â (J : Box ι) (_ : âJ â boxes), âJ = â J â boxes, (J : Set (ι â â)) by
simpa [ofWithBot, Prepartition.iUnion]
simp only [â Box.biUnion_coe_eq_coe, @iUnion_comm _ _ (Box ι), @iUnion_comm _ _ (@Eq _ _ _),
iUnion_iUnion_eq_right]
theorem ofWithBot_le {boxes : Finset (WithBot (Box ι))}
{le_of_mem : â J â boxes, (J : WithBot (Box ι)) †I}
{pairwise_disjoint : Set.Pairwise (boxes : Set (WithBot (Box ι))) Disjoint}
(H : â J â boxes, J â ⥠â â J' â Ï, J †âJ') :
ofWithBot boxes le_of_mem pairwise_disjoint â€ Ï := by
have : â J : Box ι, âJ â boxes â â J' â Ï, J †J' := fun J hJ => by
simpa only [WithBot.coe_le_coe] using H J hJ WithBot.coe_ne_bot
simpa [ofWithBot, le_def]
theorem le_ofWithBot {boxes : Finset (WithBot (Box ι))}
{le_of_mem : â J â boxes, (J : WithBot (Box ι)) †I}
{pairwise_disjoint : Set.Pairwise (boxes : Set (WithBot (Box ι))) Disjoint}
(H : â J â Ï, â J' â boxes, âJ †J') : Ï â€ ofWithBot boxes le_of_mem pairwise_disjoint := by
intro J hJ
rcases H J hJ with âšJ', J'mem, hleâ©
lift J' to Box ι using ne_bot_of_le_ne_bot WithBot.coe_ne_bot hle
exact âšJ', mem_ofWithBot.2 J'mem, WithBot.coe_le_coe.1 hleâ©
theorem ofWithBot_mono {boxesâ : Finset (WithBot (Box ι))}
{le_of_memâ : â J â boxesâ, (J : WithBot (Box ι)) †I}
{pairwise_disjointâ : Set.Pairwise (boxesâ : Set (WithBot (Box ι))) Disjoint}
{boxesâ : Finset (WithBot (Box ι))} {le_of_memâ : â J â boxesâ, (J : WithBot (Box ι)) †I}
{pairwise_disjointâ : Set.Pairwise (boxesâ : Set (WithBot (Box ι))) Disjoint}
(H : â J â boxesâ, J â ⥠â â J' â boxesâ, J †J') :
ofWithBot boxesâ le_of_memâ pairwise_disjointâ â€
ofWithBot boxesâ le_of_memâ pairwise_disjointâ :=
le_ofWithBot _ fun J hJ => H J (mem_ofWithBot.1 hJ) WithBot.coe_ne_bot
theorem sum_ofWithBot {M : Type*} [AddCommMonoid M] (boxes : Finset (WithBot (Box ι)))
(le_of_mem : â J â boxes, (J : WithBot (Box ι)) †I)
(pairwise_disjoint : Set.Pairwise (boxes : Set (WithBot (Box ι))) Disjoint) (f : Box ι â M) :
(â J â (ofWithBot boxes le_of_mem pairwise_disjoint).boxes, f J) =
â J â boxes, Option.elim' 0 f J :=
Finset.sum_eraseNone _ _
/-- Restrict a prepartition to a box. -/
def restrict (Ï : Prepartition I) (J : Box ι) : Prepartition J :=
ofWithBot (Ï.boxes.image fun J' : Box ι => J â J')
(fun J' hJ' => by
rcases Finset.mem_image.1 hJ' with âšJ', -, rflâ©
exact inf_le_left)
(by
simp only [Set.Pairwise, onFun, Finset.mem_coe, Finset.mem_image]
rintro _ âšJâ, hâ, rflâ© _ âšJâ, hâ, rflâ© Hne
have : Jâ â Jâ := by
rintro rfl
exact Hne rfl
exact ((Box.disjoint_coe.2 <| Ï.disjoint_coe_of_mem hâ hâ this).inf_left' _).inf_right' _)
@[simp]
theorem mem_restrict : Jâ â Ï.restrict J â â J' â Ï, (Jâ : WithBot (Box ι)) = âJ â âJ' := by
simp [restrict, eq_comm]
theorem mem_restrict' : Jâ â Ï.restrict J â â J' â Ï, (Jâ : Set (ι â â)) = âJ â© âJ' := by
simp only [mem_restrict, â Box.withBotCoe_inj, Box.coe_inf, Box.coe_coe]
@[mono]
theorem restrict_mono {Ïâ Ïâ : Prepartition I} (Hle : Ïâ †Ïâ) : Ïâ.restrict J †Ïâ.restrict J := by
refine ofWithBot_mono fun Jâ hJâ hne => ?_
rw [Finset.mem_image] at hJâ; rcases hJâ with âšJâ, hJâ, rflâ©
rcases Hle hJâ with âšJâ, hJâ, hleâ©
exact âš_, Finset.mem_image_of_mem _ hJâ, inf_le_inf_left _ <| WithBot.coe_le_coe.2 hleâ©
theorem monotone_restrict : Monotone fun Ï : Prepartition I => restrict Ï J :=
fun _ _ => restrict_mono
/-- Restricting to a larger box does not change the set of boxes. We cannot claim equality
of prepartitions because they have different types. -/
theorem restrict_boxes_of_le (Ï : Prepartition I) (h : I †J) : (Ï.restrict J).boxes = Ï.boxes := by
simp only [restrict, ofWithBot, eraseNone_eq_biUnion]
refine Finset.image_biUnion.trans ?_
refine (Finset.biUnion_congr rfl ?_).trans Finset.biUnion_singleton_eq_self
intro J' hJ'
rw [inf_of_le_right, â WithBot.some_eq_coe, Option.toFinset_some]
exact WithBot.coe_le_coe.2 ((Ï.le_of_mem hJ').trans h)
@[simp]
theorem restrict_self : Ï.restrict I = Ï :=
injective_boxes <| restrict_boxes_of_le Ï le_rfl
@[simp]
theorem iUnion_restrict : (Ï.restrict J).iUnion = (J : Set (ι â â)) â© (Ï.iUnion) := by
simp [restrict, â inter_iUnion, â iUnion_def]
@[simp]
theorem restrict_biUnion (Ïi : â J, Prepartition J) (hJ : J â Ï) :
(Ï.biUnion Ïi).restrict J = Ïi J := by
refine (eq_of_boxes_subset_iUnion_superset (fun Jâ hâ => ?_) ?_).symm
· refine (mem_restrict _).2 âšJâ, Ï.mem_biUnion.2 âšJ, hJ, hââ©, (inf_of_le_right ?_).symmâ©
exact WithBot.coe_le_coe.2 (le_of_mem _ hâ)
· simp only [iUnion_restrict, iUnion_biUnion, Set.subset_def, Set.mem_inter_iff, Set.mem_iUnion]
rintro x âšhxJ, Jâ, hâ, hxâ©
obtain rfl : J = Jâ := Ï.eq_of_mem_of_mem hJ hâ hxJ (iUnion_subset _ hx)
exact hx
theorem biUnion_le_iff {Ïi : â J, Prepartition J} {Ï' : Prepartition I} :
Ï.biUnion Ïi †Ï' â â J â Ï, Ïi J †Ï'.restrict J := by
constructor <;> intro H J hJ
· rw [â Ï.restrict_biUnion Ïi hJ]
exact restrict_mono H
· rw [mem_biUnion] at hJ
rcases hJ with âšJâ, hâ, hJâ©
rcases H Jâ hâ hJ with âšJâ, hâ, Hleâ©
rcases Ï'.mem_restrict.mp hâ with âšJâ, hâ, Hâ©
exact âšJâ, hâ, Hle.trans <| WithBot.coe_le_coe.1 <| H.trans_le inf_le_rightâ©
theorem le_biUnion_iff {Ïi : â J, Prepartition J} {Ï' : Prepartition I} :
Ï' †Ï.biUnion Ïi â Ï' â€ Ï â§ â J â Ï, Ï'.restrict J †Ïi J := by
refine âšfun H => âšH.trans (Ï.biUnion_le Ïi), fun J hJ => ?_â©, ?_â©
· rw [â Ï.restrict_biUnion Ïi hJ]
exact restrict_mono H
· rintro âšH, Hiâ© J' hJ'
rcases H hJ' with âšJ, hJ, hleâ©
have : J' â Ï'.restrict J :=
Ï'.mem_restrict.2 âšJ', hJ', (inf_of_le_right <| WithBot.coe_le_coe.2 hle).symmâ©
rcases Hi J hJ this with âšJi, hJi, hleiâ©
exact âšJi, Ï.mem_biUnion.2 âšJ, hJ, hJiâ©, hleiâ©
instance inf : Inf (Prepartition I) :=
âšfun Ïâ Ïâ => Ïâ.biUnion fun J => Ïâ.restrict Jâ©
theorem inf_def (Ïâ Ïâ : Prepartition I) : Ïâ â Ïâ = Ïâ.biUnion fun J => Ïâ.restrict J := rfl
@[simp]
theorem mem_inf {Ïâ Ïâ : Prepartition I} :
J â Ïâ â Ïâ â â Jâ â Ïâ, â Jâ â Ïâ, (J : WithBot (Box ι)) = âJâ â âJâ := by
simp only [inf_def, mem_biUnion, mem_restrict]
@[simp]
theorem iUnion_inf (Ïâ Ïâ : Prepartition I) : (Ïâ â Ïâ).iUnion = Ïâ.iUnion â© Ïâ.iUnion := by
simp only [inf_def, iUnion_biUnion, iUnion_restrict, â iUnion_inter, â iUnion_def]
instance : SemilatticeInf (Prepartition I) :=
{ Prepartition.inf,
Prepartition.partialOrder with
inf_le_left := fun Ïâ _ => Ïâ.biUnion_le _
inf_le_right := fun _ _ => (biUnion_le_iff _).2 fun _ _ => le_rfl
le_inf := fun _ Ïâ _ hâ hâ => Ïâ.le_biUnion_iff.2 âšhâ, fun _ _ => restrict_mono hââ© }
/-- The prepartition with boxes `{J â Ï | p J}`. -/
@[simps]
def filter (Ï : Prepartition I) (p : Box ι â Prop) : Prepartition I where
boxes := Ï.boxes.filter p
le_of_mem' _ hJ := Ï.le_of_mem (mem_filter.1 hJ).1
pairwiseDisjoint _ hâ _ hâ := Ï.disjoint_coe_of_mem (mem_filter.1 hâ).1 (mem_filter.1 hâ).1
@[simp]
theorem mem_filter {p : Box ι â Prop} : J â Ï.filter p â J â Ï â§ p J :=
Finset.mem_filter
theorem filter_le (Ï : Prepartition I) (p : Box ι â Prop) : Ï.filter p â€ Ï := fun J hJ =>
let âšhÏ, _â© := Ï.mem_filter.1 hJ
âšJ, hÏ, le_rflâ©
theorem filter_of_true {p : Box ι â Prop} (hp : â J â Ï, p J) : Ï.filter p = Ï := by
ext J
simpa using hp J
@[simp]
theorem filter_true : (Ï.filter fun _ => True) = Ï :=
Ï.filter_of_true fun _ _ => trivial
@[simp]
theorem iUnion_filter_not (Ï : Prepartition I) (p : Box ι â Prop) :
(Ï.filter fun J => ¬p J).iUnion = Ï.iUnion \ (Ï.filter p).iUnion := by
simp only [Prepartition.iUnion]
convert (@Set.biUnion_diff_biUnion_eq (ι â â) (Box ι) Ï.boxes (Ï.filter p).boxes (â) _).symm
· simp (config := { contextual := true })
· rw [Set.PairwiseDisjoint]
convert Ï.pairwiseDisjoint
rw [Set.union_eq_left, filter_boxes, coe_filter]
exact fun _ âšh, _â© => h
theorem sum_fiberwise {α M} [AddCommMonoid M] (Ï : Prepartition I) (f : Box ι â α) (g : Box ι â M) :
(â y â Ï.boxes.image f, â J â (Ï.filter fun J => f J = y).boxes, g J) =
â J â Ï.boxes, g J := by
convert sum_fiberwise_of_maps_to (fun _ => Finset.mem_image_of_mem f) g
/-- Union of two disjoint prepartitions. -/
@[simps]
def disjUnion (Ïâ Ïâ : Prepartition I) (h : Disjoint Ïâ.iUnion Ïâ.iUnion) : Prepartition I where
boxes := Ïâ.boxes ⪠Ïâ.boxes
le_of_mem' J hJ := (Finset.mem_union.1 hJ).elim Ïâ.le_of_mem Ïâ.le_of_mem
pairwiseDisjoint :=
suffices â Jâ â Ïâ, â Jâ â Ïâ, Jâ â Jâ â Disjoint (Jâ : Set (ι â â)) Jâ by
simpa [pairwise_union_of_symmetric (symmetric_disjoint.comap _), pairwiseDisjoint]
fun Jâ hâ Jâ hâ _ => h.mono (Ïâ.subset_iUnion hâ) (Ïâ.subset_iUnion hâ)
@[simp]
theorem mem_disjUnion (H : Disjoint Ïâ.iUnion Ïâ.iUnion) :
J â Ïâ.disjUnion Ïâ H â J â Ïâ âš J â Ïâ :=
Finset.mem_union
@[simp]
theorem iUnion_disjUnion (h : Disjoint Ïâ.iUnion Ïâ.iUnion) :
(Ïâ.disjUnion Ïâ h).iUnion = Ïâ.iUnion ⪠Ïâ.iUnion := by
simp [disjUnion, Prepartition.iUnion, iUnion_or, iUnion_union_distrib]
@[simp]
theorem sum_disj_union_boxes {M : Type*} [AddCommMonoid M] (h : Disjoint Ïâ.iUnion Ïâ.iUnion)
(f : Box ι â M) :
â J â Ïâ.boxes ⪠Ïâ.boxes, f J = (â J â Ïâ.boxes, f J) + â J â Ïâ.boxes, f J :=
sum_union <| disjoint_boxes_of_disjoint_iUnion h
section Distortion
variable [Fintype ι]
/-- The distortion of a prepartition is the maximum of the distortions of the boxes of this
prepartition. -/
def distortion : ââ¥0 :=
Ï.boxes.sup Box.distortion
theorem distortion_le_of_mem (h : J â Ï) : J.distortion †Ï.distortion :=
le_sup h
theorem distortion_le_iff {c : ââ¥0} : Ï.distortion †c â â J â Ï, Box.distortion J †c :=
Finset.sup_le_iff
theorem distortion_biUnion (Ï : Prepartition I) (Ïi : â J, Prepartition J) :
(Ï.biUnion Ïi).distortion = Ï.boxes.sup fun J => (Ïi J).distortion :=
sup_biUnion _ _
@[simp]
theorem distortion_disjUnion (h : Disjoint Ïâ.iUnion Ïâ.iUnion) :
(Ïâ.disjUnion Ïâ h).distortion = max Ïâ.distortion Ïâ.distortion :=
sup_union
theorem distortion_of_const {c} (hâ : Ï.boxes.Nonempty) (hâ : â J â Ï, Box.distortion J = c) :
Ï.distortion = c :=
(sup_congr rfl hâ).trans (sup_const hâ _)
@[simp]
theorem distortion_top (I : Box ι) : distortion (†: Prepartition I) = I.distortion :=
sup_singleton
@[simp]
theorem distortion_bot (I : Box ι) : distortion (⥠: Prepartition I) = 0 :=
sup_empty
end Distortion
/-- A prepartition `Ï` of `I` is a partition if the boxes of `Ï` cover the whole `I`. -/
def IsPartition (Ï : Prepartition I) :=
â x â I, â J â Ï, x â J
theorem isPartition_iff_iUnion_eq {Ï : Prepartition I} : Ï.IsPartition â Ï.iUnion = I := by
simp_rw [IsPartition, Set.Subset.antisymm_iff, Ï.iUnion_subset, true_and_iff, Set.subset_def,
mem_iUnion, Box.mem_coe]
@[simp]
theorem isPartition_single_iff (h : J †I) : IsPartition (single I J h) â J = I := by
simp [isPartition_iff_iUnion_eq]
theorem isPartitionTop (I : Box ι) : IsPartition (†: Prepartition I) :=
fun _ hx => âšI, mem_top.2 rfl, hxâ©
namespace IsPartition
variable {Ï}
theorem iUnion_eq (h : Ï.IsPartition) : Ï.iUnion = I :=
isPartition_iff_iUnion_eq.1 h
theorem iUnion_subset (h : Ï.IsPartition) (Ïâ : Prepartition I) : Ïâ.iUnion â Ï.iUnion :=
h.iUnion_eq.symm âž Ïâ.iUnion_subset
protected theorem existsUnique (h : Ï.IsPartition) (hx : x â I) :
â! J â Ï, x â J := by
rcases h x hx with âšJ, h, hxâ©
exact ExistsUnique.intro J âšh, hxâ© fun J' âšh', hx'â© => Ï.eq_of_mem_of_mem h' h hx' hx
theorem nonempty_boxes (h : Ï.IsPartition) : Ï.boxes.Nonempty :=
let âšJ, hJ, _â© := h _ I.upper_mem
âšJ, hJâ©
theorem eq_of_boxes_subset (hâ : Ïâ.IsPartition) (hâ : Ïâ.boxes â Ïâ.boxes) : Ïâ = Ïâ :=
eq_of_boxes_subset_iUnion_superset hâ <| hâ.iUnion_subset _
theorem le_iff (h : Ïâ.IsPartition) :
Ïâ †Ïâ â â J â Ïâ, â J' â Ïâ, (J â© J' : Set (ι â â)).Nonempty â J †J' :=
le_iff_nonempty_imp_le_and_iUnion_subset.trans <| and_iff_left <| h.iUnion_subset _
protected theorem biUnion (h : IsPartition Ï) (hi : â J â Ï, IsPartition (Ïi J)) :
IsPartition (Ï.biUnion Ïi) := fun x hx =>
let âšJ, hJ, hxiâ© := h x hx
let âšJi, hJi, hxâ© := hi J hJ x hxi
âšJi, Ï.mem_biUnion.2 âšJ, hJ, hJiâ©, hxâ©
protected theorem restrict (h : IsPartition Ï) (hJ : J †I) : IsPartition (Ï.restrict J) :=
isPartition_iff_iUnion_eq.2 <| by simp [h.iUnion_eq, hJ]
protected theorem inf (hâ : IsPartition Ïâ) (hâ : IsPartition Ïâ) : IsPartition (Ïâ â Ïâ) :=
isPartition_iff_iUnion_eq.2 <| by simp [hâ.iUnion_eq, hâ.iUnion_eq]
end IsPartition
theorem iUnion_biUnion_partition (h : â J â Ï, (Ïi J).IsPartition) :
(Ï.biUnion Ïi).iUnion = Ï.iUnion :=
(iUnion_biUnion _ _).trans <|
iUnion_congr_of_surjective id surjective_id fun J =>
iUnion_congr_of_surjective id surjective_id fun hJ => (h J hJ).iUnion_eq
theorem isPartitionDisjUnionOfEqDiff (h : Ïâ.iUnion = âI \ Ïâ.iUnion) :
IsPartition (Ïâ.disjUnion Ïâ <| h.symm âž disjoint_sdiff_self_right) :=
isPartition_iff_iUnion_eq.2 <| (iUnion_disjUnion _).trans <| by simp [h, Ïâ.iUnion_subset]
end Prepartition
end BoxIntegral
|
Analysis\BoxIntegral\Partition\Filter.lean | /-
Copyright (c) 2021 Yury Kudryashov. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yury Kudryashov
-/
import Mathlib.Analysis.BoxIntegral.Partition.SubboxInduction
import Mathlib.Analysis.BoxIntegral.Partition.Split
/-!
# Filters used in box-based integrals
First we define a structure `BoxIntegral.IntegrationParams`. This structure will be used as an
argument in the definition of `BoxIntegral.integral` in order to use the same definition for a few
well-known definitions of integrals based on partitions of a rectangular box into subboxes (Riemann
integral, Henstock-Kurzweil integral, and McShane integral).
This structure holds three boolean values (see below), and encodes eight different sets of
parameters; only four of these values are used somewhere in `mathlib4`. Three of them correspond to
the integration theories listed above, and one is a generalization of the one-dimensional
Henstock-Kurzweil integral such that the divergence theorem works without additional integrability
assumptions.
Finally, for each set of parameters `l : BoxIntegral.IntegrationParams` and a rectangular box
`I : BoxIntegral.Box ι`, we define several `Filter`s that will be used either in the definition of
the corresponding integral, or in the proofs of its properties. We equip
`BoxIntegral.IntegrationParams` with a `BoundedOrder` structure such that larger
`IntegrationParams` produce larger filters.
## Main definitions
### Integration parameters
The structure `BoxIntegral.IntegrationParams` has 3 boolean fields with the following meaning:
* `bRiemann`: the value `true` means that the filter corresponds to a Riemann-style integral, i.e.
in the definition of integrability we require a constant upper estimate `r` on the size of boxes
of a tagged partition; the value `false` means that the estimate may depend on the position of the
tag.
* `bHenstock`: the value `true` means that we require that each tag belongs to its own closed box;
the value `false` means that we only require that tags belong to the ambient box.
* `bDistortion`: the value `true` means that `r` can depend on the maximal ratio of sides of the
same box of a partition. Presence of this case make quite a few proofs harder but we can prove the
divergence theorem only for the filter `BoxIntegral.IntegrationParams.GP = ⥠=
{bRiemann := false, bHenstock := true, bDistortion := true}`.
### Well-known sets of parameters
Out of eight possible values of `BoxIntegral.IntegrationParams`, the following four are used in
the library.
* `BoxIntegral.IntegrationParams.Riemann` (`bRiemann = true`, `bHenstock = true`,
`bDistortion = false`): this value corresponds to the Riemann integral; in the corresponding
filter, we require that the diameters of all boxes `J` of a tagged partition are bounded from
above by a constant upper estimate that may not depend on the geometry of `J`, and each tag
belongs to the corresponding closed box.
* `BoxIntegral.IntegrationParams.Henstock` (`bRiemann = false`, `bHenstock = true`,
`bDistortion = false`): this value corresponds to the most natural generalization of
Henstock-Kurzweil integral to higher dimension; the only (but important!) difference between this
theory and Riemann integral is that instead of a constant upper estimate on the size of all boxes
of a partition, we require that the partition is *subordinate* to a possibly discontinuous
function `r : (ι â â) â {x : â | 0 < x}`, i.e. each box `J` is included in a closed ball with
center `Ï.tag J` and radius `r J`.
* `BoxIntegral.IntegrationParams.McShane` (`bRiemann = false`, `bHenstock = false`,
`bDistortion = false`): this value corresponds to the McShane integral; the only difference with
the Henstock integral is that we allow tags to be outside of their boxes; the tags still have to
be in the ambient closed box, and the partition still has to be subordinate to a function.
* `BoxIntegral.IntegrationParams.GP = â¥` (`bRiemann = false`, `bHenstock = true`,
`bDistortion = true`): this is the least integration theory in our list, i.e., all functions
integrable in any other theory is integrable in this one as well. This is a non-standard
generalization of the Henstock-Kurzweil integral to higher dimension. In dimension one, it
generates the same filter as `Henstock`. In higher dimension, this generalization defines an
integration theory such that the divergence of any Fréchet differentiable function `f` is
integrable, and its integral is equal to the sum of integrals of `f` over the faces of the box,
taken with appropriate signs.
A function `f` is `GP`-integrable if for any `ε > 0` and `c : ââ¥0` there exists
`r : (ι â â) â {x : â | 0 < x}` such that for any tagged partition `Ï` subordinate to `r`, if each
tag belongs to the corresponding closed box and for each box `J â Ï`, the maximal ratio of its
sides is less than or equal to `c`, then the integral sum of `f` over `Ï` is `ε`-close to the
integral.
### Filters and predicates on `TaggedPrepartition I`
For each value of `IntegrationParams` and a rectangular box `I`, we define a few filters on
`TaggedPrepartition I`. First, we define a predicate
```
structure BoxIntegral.IntegrationParams.MemBaseSet (l : BoxIntegral.IntegrationParams)
(I : BoxIntegral.Box ι) (c : ââ¥0) (r : (ι â â) â Ioi (0 : â))
(Ï : BoxIntegral.TaggedPrepartition I) : Prop where
```
This predicate says that
* if `l.bHenstock`, then `Ï` is a Henstock prepartition, i.e. each tag belongs to the corresponding
closed box;
* `Ï` is subordinate to `r`;
* if `l.bDistortion`, then the distortion of each box in `Ï` is less than or equal to `c`;
* if `l.bDistortion`, then there exists a prepartition `Ï'` with distortion `†c` that covers
exactly `I \ Ï.iUnion`.
The last condition is always true for `c > 1`, see TODO section for more details.
Then we define a predicate `BoxIntegral.IntegrationParams.RCond` on functions
`r : (ι â â) â {x : â | 0 < x}`. If `l.bRiemann`, then this predicate requires `r` to be a constant
function, otherwise it imposes no restrictions on `r`. We introduce this definition to prove a few
dot-notation lemmas: e.g., `BoxIntegral.IntegrationParams.RCond.min` says that the pointwise
minimum of two functions that satisfy this condition satisfies this condition as well.
Then we define four filters on `BoxIntegral.TaggedPrepartition I`.
* `BoxIntegral.IntegrationParams.toFilterDistortion`: an auxiliary filter that takes parameters
`(l : BoxIntegral.IntegrationParams) (I : BoxIntegral.Box ι) (c : ââ¥0)` and returns the
filter generated by all sets `{Ï | MemBaseSet l I c r Ï}`, where `r` is a function satisfying
the predicate `BoxIntegral.IntegrationParams.RCond l`;
* `BoxIntegral.IntegrationParams.toFilter l I`: the supremum of `l.toFilterDistortion I c`
over all `c : ââ¥0`;
* `BoxIntegral.IntegrationParams.toFilterDistortioniUnion l I c Ïâ`, where `Ïâ` is a
prepartition of `I`: the infimum of `l.toFilterDistortion I c` and the principal filter
generated by `{Ï | Ï.iUnion = Ïâ.iUnion}`;
* `BoxIntegral.IntegrationParams.toFilteriUnion l I Ïâ`: the supremum of
`l.toFilterDistortioniUnion l I c Ïâ` over all `c : ââ¥0`. This is the filter (in the case
`Ïâ = â€` is the one-box partition of `I`) used in the definition of the integral of a function
over a box.
## Implementation details
* Later we define the integral of a function over a rectangular box as the limit (if it exists) of
the integral sums along `BoxIntegral.IntegrationParams.toFilteriUnion l I â€`. While it is
possible to define the integral with a general filter on `BoxIntegral.TaggedPrepartition I` as a
parameter, many lemmas (e.g., Sacks-Henstock lemma and most results about integrability of
functions) require the filter to have a predictable structure. So, instead of adding assumptions
about the filter here and there, we define this auxiliary type that can encode all integration
theories we need in practice.
* While the definition of the integral only uses the filter
`BoxIntegral.IntegrationParams.toFilteriUnion l I â€` and partitions of a box, some lemmas
(e.g., the Henstock-Sacks lemmas) are best formulated in terms of the predicate `MemBaseSet` and
other filters defined above.
* We use `Bool` instead of `Prop` for the fields of `IntegrationParams` in order to have decidable
equality and inequalities.
## TODO
Currently, `BoxIntegral.IntegrationParams.MemBaseSet` explicitly requires that there exists a
partition of the complement `I \ Ï.iUnion` with distortion `†c`. For `c > 1`, this condition is
always true but the proof of this fact requires more API about
`BoxIntegral.Prepartition.splitMany`. We should formalize this fact, then either require `c > 1`
everywhere, or replace `†c` with `< c` so that we automatically get `c > 1` for a non-trivial
prepartition (and consider the special case `Ï = â¥` separately if needed).
## Tags
integral, rectangular box, partition, filter
-/
open Set Function Filter Metric Finset Bool
open scoped Classical
open Topology Filter NNReal
noncomputable section
namespace BoxIntegral
variable {ι : Type*} [Fintype ι] {I J : Box ι} {c câ câ : ââ¥0}
open TaggedPrepartition
/-- An `IntegrationParams` is a structure holding 3 boolean values used to define a filter to be
used in the definition of a box-integrable function.
* `bRiemann`: the value `true` means that the filter corresponds to a Riemann-style integral, i.e.
in the definition of integrability we require a constant upper estimate `r` on the size of boxes
of a tagged partition; the value `false` means that the estimate may depend on the position of the
tag.
* `bHenstock`: the value `true` means that we require that each tag belongs to its own closed box;
the value `false` means that we only require that tags belong to the ambient box.
* `bDistortion`: the value `true` means that `r` can depend on the maximal ratio of sides of the
same box of a partition. Presence of this case makes quite a few proofs harder but we can prove
the divergence theorem only for the filter `BoxIntegral.IntegrationParams.GP = ⥠=
{bRiemann := false, bHenstock := true, bDistortion := true}`.
-/
@[ext]
structure IntegrationParams : Type where
(bRiemann bHenstock bDistortion : Bool)
variable {l lâ lâ : IntegrationParams}
namespace IntegrationParams
/-- Auxiliary equivalence with a product type used to lift an order. -/
def equivProd : IntegrationParams â Bool à Booláµáµ à Booláµáµ where
toFun l := âšl.1, OrderDual.toDual l.2, OrderDual.toDual l.3â©
invFun l := âšl.1, OrderDual.ofDual l.2.1, OrderDual.ofDual l.2.2â©
left_inv _ := rfl
right_inv _ := rfl
instance : PartialOrder IntegrationParams :=
PartialOrder.lift equivProd equivProd.injective
/-- Auxiliary `OrderIso` with a product type used to lift a `BoundedOrder` structure. -/
def isoProd : IntegrationParams âo Bool à Booláµáµ à Booláµáµ :=
âšequivProd, Iff.rflâ©
instance : BoundedOrder IntegrationParams :=
isoProd.symm.toGaloisInsertion.liftBoundedOrder
/-- The value `BoxIntegral.IntegrationParams.GP = â¥`
(`bRiemann = false`, `bHenstock = true`, `bDistortion = true`)
corresponds to a generalization of the Henstock integral such that the Divergence theorem holds true
without additional integrability assumptions, see the module docstring for details. -/
instance : Inhabited IntegrationParams :=
âšâ¥â©
instance : DecidableRel ((· †·) : IntegrationParams â IntegrationParams â Prop) :=
fun _ _ => And.decidable
instance : DecidableEq IntegrationParams :=
fun _ _ => decidable_of_iff _ IntegrationParams.ext_iff.symm
/-- The `BoxIntegral.IntegrationParams` corresponding to the Riemann integral. In the
corresponding filter, we require that the diameters of all boxes `J` of a tagged partition are
bounded from above by a constant upper estimate that may not depend on the geometry of `J`, and each
tag belongs to the corresponding closed box. -/
def Riemann : IntegrationParams where
bRiemann := true
bHenstock := true
bDistortion := false
/-- The `BoxIntegral.IntegrationParams` corresponding to the Henstock-Kurzweil integral. In the
corresponding filter, we require that the tagged partition is subordinate to a (possibly,
discontinuous) positive function `r` and each tag belongs to the corresponding closed box. -/
def Henstock : IntegrationParams :=
âšfalse, true, falseâ©
/-- The `BoxIntegral.IntegrationParams` corresponding to the McShane integral. In the
corresponding filter, we require that the tagged partition is subordinate to a (possibly,
discontinuous) positive function `r`; the tags may be outside of the corresponding closed box
(but still inside the ambient closed box `I.Icc`). -/
def McShane : IntegrationParams :=
âšfalse, false, falseâ©
/-- The `BoxIntegral.IntegrationParams` corresponding to the generalized Perron integral. In the
corresponding filter, we require that the tagged partition is subordinate to a (possibly,
discontinuous) positive function `r` and each tag belongs to the corresponding closed box. We also
require an upper estimate on the distortion of all boxes of the partition. -/
def GP : IntegrationParams := â¥
theorem henstock_le_riemann : Henstock †Riemann := by trivial
theorem henstock_le_mcShane : Henstock †McShane := by trivial
theorem gp_le : GP †l :=
bot_le
/-- The predicate corresponding to a base set of the filter defined by an
`IntegrationParams`. It says that
* if `l.bHenstock`, then `Ï` is a Henstock prepartition, i.e. each tag belongs to the corresponding
closed box;
* `Ï` is subordinate to `r`;
* if `l.bDistortion`, then the distortion of each box in `Ï` is less than or equal to `c`;
* if `l.bDistortion`, then there exists a prepartition `Ï'` with distortion `†c` that covers
exactly `I \ Ï.iUnion`.
The last condition is automatically verified for partitions, and is used in the proof of the
Sacks-Henstock inequality to compare two prepartitions covering the same part of the box.
It is also automatically satisfied for any `c > 1`, see TODO section of the module docstring for
details. -/
structure MemBaseSet (l : IntegrationParams) (I : Box ι) (c : ââ¥0) (r : (ι â â) â Ioi (0 : â))
(Ï : TaggedPrepartition I) : Prop where
protected isSubordinate : Ï.IsSubordinate r
protected isHenstock : l.bHenstock â Ï.IsHenstock
protected distortion_le : l.bDistortion â Ï.distortion †c
protected exists_compl : l.bDistortion â â Ï' : Prepartition I,
Ï'.iUnion = âI \ Ï.iUnion â§ Ï'.distortion †c
/-- A predicate saying that in case `l.bRiemann = true`, the function `r` is a constant. -/
def RCond {ι : Type*} (l : IntegrationParams) (r : (ι â â) â Ioi (0 : â)) : Prop :=
l.bRiemann â â x, r x = r 0
/-- A set `s : Set (TaggedPrepartition I)` belongs to `l.toFilterDistortion I c` if there exists
a function `r : ââ¿ â (0, â)` (or a constant `r` if `l.bRiemann = true`) such that `s` contains each
prepartition `Ï` such that `l.MemBaseSet I c r Ï`. -/
def toFilterDistortion (l : IntegrationParams) (I : Box ι) (c : ââ¥0) :
Filter (TaggedPrepartition I) :=
âš
(r : (ι â â) â Ioi (0 : â)) (_ : l.RCond r), ð { Ï | l.MemBaseSet I c r Ï }
/-- A set `s : Set (TaggedPrepartition I)` belongs to `l.toFilter I` if for any `c : ââ¥0` there
exists a function `r : ââ¿ â (0, â)` (or a constant `r` if `l.bRiemann = true`) such that
`s` contains each prepartition `Ï` such that `l.MemBaseSet I c r Ï`. -/
def toFilter (l : IntegrationParams) (I : Box ι) : Filter (TaggedPrepartition I) :=
âš c : ââ¥0, l.toFilterDistortion I c
/-- A set `s : Set (TaggedPrepartition I)` belongs to `l.toFilterDistortioniUnion I c Ïâ` if
there exists a function `r : ââ¿ â (0, â)` (or a constant `r` if `l.bRiemann = true`) such that `s`
contains each prepartition `Ï` such that `l.MemBaseSet I c r Ï` and `Ï.iUnion = Ïâ.iUnion`. -/
def toFilterDistortioniUnion (l : IntegrationParams) (I : Box ι) (c : ââ¥0) (Ïâ : Prepartition I) :=
l.toFilterDistortion I c â ð { Ï | Ï.iUnion = Ïâ.iUnion }
/-- A set `s : Set (TaggedPrepartition I)` belongs to `l.toFilteriUnion I Ïâ` if for any `c : ââ¥0`
there exists a function `r : ââ¿ â (0, â)` (or a constant `r` if `l.bRiemann = true`) such that `s`
contains each prepartition `Ï` such that `l.MemBaseSet I c r Ï` and `Ï.iUnion = Ïâ.iUnion`. -/
def toFilteriUnion (l : IntegrationParams) (I : Box ι) (Ïâ : Prepartition I) :=
âš c : ââ¥0, l.toFilterDistortioniUnion I c Ïâ
theorem rCond_of_bRiemann_eq_false {ι} (l : IntegrationParams) (hl : l.bRiemann = false)
{r : (ι â â) â Ioi (0 : â)} : l.RCond r := by
simp [RCond, hl]
theorem toFilter_inf_iUnion_eq (l : IntegrationParams) (I : Box ι) (Ïâ : Prepartition I) :
l.toFilter I â ð { Ï | Ï.iUnion = Ïâ.iUnion } = l.toFilteriUnion I Ïâ :=
(iSup_inf_principal _ _).symm
variable {r râ râ : (ι â â) â Ioi (0 : â)} {Ï Ïâ Ïâ : TaggedPrepartition I}
variable (I) in
theorem MemBaseSet.mono' (h : lâ †lâ) (hc : câ †câ)
(hr : â J â Ï, râ (Ï.tag J) †râ (Ï.tag J)) (hÏ : lâ.MemBaseSet I câ râ Ï) :
lâ.MemBaseSet I câ râ Ï :=
âšhÏ.1.mono' hr, fun hâ => hÏ.2 (le_iff_imp.1 h.2.1 hâ),
fun hD => (hÏ.3 (le_iff_imp.1 h.2.2 hD)).trans hc,
fun hD => (hÏ.4 (le_iff_imp.1 h.2.2 hD)).imp fun _ hÏ => âšhÏ.1, hÏ.2.trans hcâ©â©
variable (I) in
@[mono]
theorem MemBaseSet.mono (h : lâ †lâ) (hc : câ †câ)
(hr : â x â Box.Icc I, râ x †râ x) (hÏ : lâ.MemBaseSet I câ râ Ï) : lâ.MemBaseSet I câ râ Ï :=
hÏ.mono' I h hc fun J _ => hr _ <| Ï.tag_mem_Icc J
theorem MemBaseSet.exists_common_compl (hâ : l.MemBaseSet I câ râ Ïâ) (hâ : l.MemBaseSet I câ râ Ïâ)
(hU : Ïâ.iUnion = Ïâ.iUnion) :
â Ï : Prepartition I, Ï.iUnion = âI \ Ïâ.iUnion â§
(l.bDistortion â Ï.distortion †câ) â§ (l.bDistortion â Ï.distortion †câ) := by
wlog hc : câ †câ with H
· simpa [hU, _root_.and_comm] using
@H _ _ I J c câ câ _ lâ lâ r râ râ Ï Ïâ Ïâ hâ hâ hU.symm (le_of_not_le hc)
by_cases hD : (l.bDistortion : Prop)
· rcases hâ.4 hD with âšÏ, hÏU, hÏcâ©
exact âšÏ, hÏU, fun _ => hÏc, fun _ => hÏc.trans hcâ©
· exact âšÏâ.toPrepartition.compl, Ïâ.toPrepartition.iUnion_compl,
fun h => (hD h).elim, fun h => (hD h).elimâ©
protected theorem MemBaseSet.unionComplToSubordinate (hÏâ : l.MemBaseSet I c râ Ïâ)
(hle : â x â Box.Icc I, râ x †râ x) {Ïâ : Prepartition I} (hU : Ïâ.iUnion = âI \ Ïâ.iUnion)
(hc : l.bDistortion â Ïâ.distortion †c) :
l.MemBaseSet I c râ (Ïâ.unionComplToSubordinate Ïâ hU râ) :=
âšhÏâ.1.disjUnion ((Ïâ.isSubordinate_toSubordinate râ).mono hle) _,
fun h => (hÏâ.2 h).disjUnion (Ïâ.isHenstock_toSubordinate _) _,
fun h => (distortion_unionComplToSubordinate _ _ _ _).trans_le (max_le (hÏâ.3 h) (hc h)),
fun _ => âšâ¥, by simpâ©â©
protected theorem MemBaseSet.filter (hÏ : l.MemBaseSet I c r Ï) (p : Box ι â Prop) :
l.MemBaseSet I c r (Ï.filter p) := by
refine âšfun J hJ => hÏ.1 J (Ï.mem_filter.1 hJ).1, fun hH J hJ => hÏ.2 hH J (Ï.mem_filter.1 hJ).1,
fun hD => (distortion_filter_le _ _).trans (hÏ.3 hD), fun hD => ?_â©
rcases hÏ.4 hD with âšÏâ, hÏâU, hcâ©
set Ïâ := Ï.filter fun J => ¬p J
have : Disjoint Ïâ.iUnion Ïâ.iUnion := by
simpa [Ïâ, hÏâU] using disjoint_sdiff_self_left.mono_right sdiff_le
refine âšÏâ.disjUnion Ïâ.toPrepartition this, ?_, ?_â©
· suffices âI \ Ï.iUnion ⪠Ï.iUnion \ (Ï.filter p).iUnion = âI \ (Ï.filter p).iUnion by
simp [Ïâ, *]
have h : (Ï.filter p).iUnion â Ï.iUnion :=
biUnion_subset_biUnion_left (Finset.filter_subset _ _)
ext x
fconstructor
· rintro (âšhxI, hxÏâ© | âšhxÏ, hxpâ©)
exacts [âšhxI, mt (@h x) hxÏâ©, âšÏ.iUnion_subset hxÏ, hxpâ©]
· rintro âšhxI, hxpâ©
by_cases hxÏ : x â Ï.iUnion
exacts [Or.inr âšhxÏ, hxpâ©, Or.inl âšhxI, hxÏâ©]
· have : (Ï.filter fun J => ¬p J).distortion †c := (distortion_filter_le _ _).trans (hÏ.3 hD)
simpa [hc]
theorem biUnionTagged_memBaseSet {Ï : Prepartition I} {Ïi : â J, TaggedPrepartition J}
(h : â J â Ï, l.MemBaseSet J c r (Ïi J)) (hp : â J â Ï, (Ïi J).IsPartition)
(hc : l.bDistortion â Ï.compl.distortion †c) : l.MemBaseSet I c r (Ï.biUnionTagged Ïi) := by
refine âšTaggedPrepartition.isSubordinate_biUnionTagged.2 fun J hJ => (h J hJ).1,
fun hH => TaggedPrepartition.isHenstock_biUnionTagged.2 fun J hJ => (h J hJ).2 hH,
fun hD => ?_, fun hD => ?_â©
· rw [Prepartition.distortion_biUnionTagged, Finset.sup_le_iff]
exact fun J hJ => (h J hJ).3 hD
· refine âš_, ?_, hc hDâ©
rw [Ï.iUnion_compl, â Ï.iUnion_biUnion_partition hp]
rfl
@[mono]
theorem RCond.mono {ι : Type*} {r : (ι â â) â Ioi (0 : â)} (h : lâ †lâ) (hr : lâ.RCond r) :
lâ.RCond r :=
fun hR => hr (le_iff_imp.1 h.1 hR)
nonrec theorem RCond.min {ι : Type*} {râ râ : (ι â â) â Ioi (0 : â)} (hâ : l.RCond râ)
(hâ : l.RCond râ) : l.RCond fun x => min (râ x) (râ x) :=
fun hR x => congr_argâ min (hâ hR x) (hâ hR x)
@[mono]
theorem toFilterDistortion_mono (I : Box ι) (h : lâ †lâ) (hc : câ †câ) :
lâ.toFilterDistortion I câ †lâ.toFilterDistortion I câ :=
iInf_mono fun _ =>
iInf_mono' fun hr =>
âšhr.mono h, principal_mono.2 fun _ => MemBaseSet.mono I h hc fun _ _ => le_rflâ©
@[mono]
theorem toFilter_mono (I : Box ι) {lâ lâ : IntegrationParams} (h : lâ †lâ) :
lâ.toFilter I †lâ.toFilter I :=
iSup_mono fun _ => toFilterDistortion_mono I h le_rfl
@[mono]
theorem toFilteriUnion_mono (I : Box ι) {lâ lâ : IntegrationParams} (h : lâ †lâ)
(Ïâ : Prepartition I) : lâ.toFilteriUnion I Ïâ †lâ.toFilteriUnion I Ïâ :=
iSup_mono fun _ => inf_le_inf_right _ <| toFilterDistortion_mono _ h le_rfl
theorem toFilteriUnion_congr (I : Box ι) (l : IntegrationParams) {Ïâ Ïâ : Prepartition I}
(h : Ïâ.iUnion = Ïâ.iUnion) : l.toFilteriUnion I Ïâ = l.toFilteriUnion I Ïâ := by
simp only [toFilteriUnion, toFilterDistortioniUnion, h]
theorem hasBasis_toFilterDistortion (l : IntegrationParams) (I : Box ι) (c : ââ¥0) :
(l.toFilterDistortion I c).HasBasis l.RCond fun r => { Ï | l.MemBaseSet I c r Ï } :=
hasBasis_biInf_principal'
(fun _ hrâ _ hrâ =>
âš_, hrâ.min hrâ, fun _ => MemBaseSet.mono _ le_rfl le_rfl fun _ _ => min_le_left _ _,
fun _ => MemBaseSet.mono _ le_rfl le_rfl fun _ _ => min_le_right _ _â©)
âšfun _ => âš1, Set.mem_Ioi.2 zero_lt_oneâ©, fun _ _ => rflâ©
theorem hasBasis_toFilterDistortioniUnion (l : IntegrationParams) (I : Box ι) (c : ââ¥0)
(Ïâ : Prepartition I) :
(l.toFilterDistortioniUnion I c Ïâ).HasBasis l.RCond fun r =>
{ Ï | l.MemBaseSet I c r Ï â§ Ï.iUnion = Ïâ.iUnion } :=
(l.hasBasis_toFilterDistortion I c).inf_principal _
theorem hasBasis_toFilteriUnion (l : IntegrationParams) (I : Box ι) (Ïâ : Prepartition I) :
(l.toFilteriUnion I Ïâ).HasBasis (fun r : ââ¥0 â (ι â â) â Ioi (0 : â) => â c, l.RCond (r c))
fun r => { Ï | â c, l.MemBaseSet I c (r c) Ï â§ Ï.iUnion = Ïâ.iUnion } := by
have := fun c => l.hasBasis_toFilterDistortioniUnion I c Ïâ
simpa only [setOf_and, setOf_exists] using hasBasis_iSup this
theorem hasBasis_toFilteriUnion_top (l : IntegrationParams) (I : Box ι) :
(l.toFilteriUnion I â€).HasBasis (fun r : ââ¥0 â (ι â â) â Ioi (0 : â) => â c, l.RCond (r c))
fun r => { Ï | â c, l.MemBaseSet I c (r c) Ï â§ Ï.IsPartition } := by
simpa only [TaggedPrepartition.isPartition_iff_iUnion_eq, Prepartition.iUnion_top] using
l.hasBasis_toFilteriUnion I â€
theorem hasBasis_toFilter (l : IntegrationParams) (I : Box ι) :
(l.toFilter I).HasBasis (fun r : ââ¥0 â (ι â â) â Ioi (0 : â) => â c, l.RCond (r c))
fun r => { Ï | â c, l.MemBaseSet I c (r c) Ï } := by
simpa only [setOf_exists] using hasBasis_iSup (l.hasBasis_toFilterDistortion I)
theorem tendsto_embedBox_toFilteriUnion_top (l : IntegrationParams) (h : I †J) :
Tendsto (TaggedPrepartition.embedBox I J h) (l.toFilteriUnion I â€)
(l.toFilteriUnion J (Prepartition.single J I h)) := by
simp only [toFilteriUnion, tendsto_iSup]; intro c
set Ïâ := Prepartition.single J I h
refine le_iSup_of_le (max c Ïâ.compl.distortion) ?_
refine ((l.hasBasis_toFilterDistortioniUnion I c â€).tendsto_iff
(l.hasBasis_toFilterDistortioniUnion J _ _)).2 fun r hr => ?_
refine âšr, hr, fun Ï hÏ => ?_â©
rw [mem_setOf_eq, Prepartition.iUnion_top] at hÏ
refine âšâšhÏ.1.1, hÏ.1.2, fun hD => le_trans (hÏ.1.3 hD) (le_max_left _ _), fun _ => ?_â©, ?_â©
· refine âš_, Ïâ.iUnion_compl.trans ?_, le_max_right _ _â©
congr 1
exact (Prepartition.iUnion_single h).trans hÏ.2.symm
· exact hÏ.2.trans (Prepartition.iUnion_single _).symm
theorem exists_memBaseSet_le_iUnion_eq (l : IntegrationParams) (Ïâ : Prepartition I)
(hcâ : Ïâ.distortion †c) (hcâ : Ïâ.compl.distortion †c) (r : (ι â â) â Ioi (0 : â)) :
â Ï, l.MemBaseSet I c r Ï â§ Ï.toPrepartition †Ïâ â§ Ï.iUnion = Ïâ.iUnion := by
rcases Ïâ.exists_tagged_le_isHenstock_isSubordinate_iUnion_eq r with âšÏ, hle, hH, hr, hd, hUâ©
refine âšÏ, âšhr, fun _ => hH, fun _ => hd.trans_le hcâ, fun _ => âšÏâ.compl, ?_, hcââ©â©, âšhle, hUâ©â©
exact Prepartition.compl_congr hU âž Ï.toPrepartition.iUnion_compl
theorem exists_memBaseSet_isPartition (l : IntegrationParams) (I : Box ι) (hc : I.distortion †c)
(r : (ι â â) â Ioi (0 : â)) : â Ï, l.MemBaseSet I c r Ï â§ Ï.IsPartition := by
rw [â Prepartition.distortion_top] at hc
have hc' : (†: Prepartition I).compl.distortion †c := by simp
simpa [isPartition_iff_iUnion_eq] using l.exists_memBaseSet_le_iUnion_eq †hc hc' r
theorem toFilterDistortioniUnion_neBot (l : IntegrationParams) (I : Box ι) (Ïâ : Prepartition I)
(hcâ : Ïâ.distortion †c) (hcâ : Ïâ.compl.distortion †c) :
(l.toFilterDistortioniUnion I c Ïâ).NeBot :=
((l.hasBasis_toFilterDistortion I _).inf_principal _).neBot_iff.2
fun {r} _ => (l.exists_memBaseSet_le_iUnion_eq Ïâ hcâ hcâ r).imp fun _ hÏ => âšhÏ.1, hÏ.2.2â©
instance toFilterDistortioniUnion_neBot' (l : IntegrationParams) (I : Box ι) (Ïâ : Prepartition I) :
(l.toFilterDistortioniUnion I (max Ïâ.distortion Ïâ.compl.distortion) Ïâ).NeBot :=
l.toFilterDistortioniUnion_neBot I Ïâ (le_max_left _ _) (le_max_right _ _)
instance toFilterDistortion_neBot (l : IntegrationParams) (I : Box ι) :
(l.toFilterDistortion I I.distortion).NeBot := by
simpa using (l.toFilterDistortioniUnion_neBot' I â€).mono inf_le_left
instance toFilter_neBot (l : IntegrationParams) (I : Box ι) : (l.toFilter I).NeBot :=
(l.toFilterDistortion_neBot I).mono <| le_iSup _ _
instance toFilteriUnion_neBot (l : IntegrationParams) (I : Box ι) (Ïâ : Prepartition I) :
(l.toFilteriUnion I Ïâ).NeBot :=
(l.toFilterDistortioniUnion_neBot' I Ïâ).mono <|
le_iSup (fun c => l.toFilterDistortioniUnion I c Ïâ) _
theorem eventually_isPartition (l : IntegrationParams) (I : Box ι) :
âá¶ Ï in l.toFilteriUnion I â€, TaggedPrepartition.IsPartition Ï :=
eventually_iSup.2 fun _ =>
eventually_inf_principal.2 <|
eventually_of_forall fun Ï h =>
Ï.isPartition_iff_iUnion_eq.2 (h.trans Prepartition.iUnion_top)
end IntegrationParams
end BoxIntegral
|
Analysis\BoxIntegral\Partition\Measure.lean | /-
Copyright (c) 2021 Yury Kudryashov. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yury Kudryashov
-/
import Mathlib.Analysis.BoxIntegral.Partition.Additive
import Mathlib.MeasureTheory.Measure.Lebesgue.Basic
/-!
# Box-additive functions defined by measures
In this file we prove a few simple facts about rectangular boxes, partitions, and measures:
- given a box `I : Box ι`, its coercion to `Set (ι â â)` and `I.Icc` are measurable sets;
- if `ÎŒ` is a locally finite measure, then `(I : Set (ι â â))` and `I.Icc` have finite measure;
- if `Ό` is a locally finite measure, then `fun J ⊠(Ό J).toReal` is a box additive function.
For the last statement, we both prove it as a proposition and define a bundled
`BoxIntegral.BoxAdditiveMap` function.
## Tags
rectangular box, measure
-/
open Set
noncomputable section
open scoped ENNReal BoxIntegral
variable {ι : Type*}
namespace BoxIntegral
open MeasureTheory
namespace Box
variable (I : Box ι)
theorem measure_Icc_lt_top (ÎŒ : Measure (ι â â)) [IsLocallyFiniteMeasure ÎŒ] : ÎŒ (Box.Icc I) < â :=
show ÎŒ (Icc I.lower I.upper) < â from I.isCompact_Icc.measure_lt_top
theorem measure_coe_lt_top (ÎŒ : Measure (ι â â)) [IsLocallyFiniteMeasure ÎŒ] : ÎŒ I < â :=
(measure_mono <| coe_subset_Icc).trans_lt (I.measure_Icc_lt_top Ό)
section Countable
variable [Countable ι]
theorem measurableSet_coe : MeasurableSet (I : Set (ι â â)) := by
rw [coe_eq_pi]
exact MeasurableSet.univ_pi fun i => measurableSet_Ioc
theorem measurableSet_Icc : MeasurableSet (Box.Icc I) :=
_root_.measurableSet_Icc
theorem measurableSet_Ioo : MeasurableSet (Box.Ioo I) :=
MeasurableSet.univ_pi fun _ => _root_.measurableSet_Ioo
end Countable
variable [Fintype ι]
theorem coe_ae_eq_Icc : (I : Set (ι â â)) =áµ[volume] Box.Icc I := by
rw [coe_eq_pi]
exact Measure.univ_pi_Ioc_ae_eq_Icc
theorem Ioo_ae_eq_Icc : Box.Ioo I =áµ[volume] Box.Icc I :=
Measure.univ_pi_Ioo_ae_eq_Icc
end Box
theorem Prepartition.measure_iUnion_toReal [Finite ι] {I : Box ι} (Ï : Prepartition I)
(ÎŒ : Measure (ι â â)) [IsLocallyFiniteMeasure ÎŒ] :
(ÎŒ Ï.iUnion).toReal = â J â Ï.boxes, (ÎŒ J).toReal := by
erw [â ENNReal.toReal_sum, Ï.iUnion_def, measure_biUnion_finset Ï.pairwiseDisjoint]
exacts [fun J _ => J.measurableSet_coe, fun J _ => (J.measure_coe_lt_top Ό).ne]
end BoxIntegral
open BoxIntegral BoxIntegral.Box
namespace MeasureTheory
namespace Measure
/-- If `ÎŒ` is a locally finite measure on `ââ¿`, then `fun J ⊠(ÎŒ J).toReal` is a box-additive
function. -/
@[simps]
def toBoxAdditive [Finite ι] (ÎŒ : Measure (ι â â)) [IsLocallyFiniteMeasure ÎŒ] : ι âáµáµ[â€] â where
toFun J := (Ό J).toReal
sum_partition_boxes' J _ Ï hÏ := by rw [â Ï.measure_iUnion_toReal, hÏ.iUnion_eq]
end Measure
end MeasureTheory
namespace BoxIntegral
open MeasureTheory
namespace Box
variable [Fintype ι]
-- @[simp] -- Porting note: simp normal form is `volume_apply'`
theorem volume_apply (I : Box ι) :
(volume : Measure (ι â â)).toBoxAdditive I = â i, (I.upper i - I.lower i) := by
rw [Measure.toBoxAdditive_apply, coe_eq_pi, Real.volume_pi_Ioc_toReal I.lower_le_upper]
@[simp]
theorem volume_apply' (I : Box ι) :
((volume : Measure (ι â â)) I).toReal = â i, (I.upper i - I.lower i) := by
rw [coe_eq_pi, Real.volume_pi_Ioc_toReal I.lower_le_upper]
theorem volume_face_mul {n} (i : Fin (n + 1)) (I : Box (Fin (n + 1))) :
(â j, ((I.face i).upper j - (I.face i).lower j)) * (I.upper i - I.lower i) =
â j, (I.upper j - I.lower j) := by
simp only [face_lower, face_upper, (· â ·), Fin.prod_univ_succAbove _ i, mul_comm]
end Box
namespace BoxAdditiveMap
variable [Fintype ι]
/-- Box-additive map sending each box `I` to the continuous linear endomorphism
`x ⊠(volume I).toReal ⢠x`. -/
protected def volume {E : Type*} [NormedAddCommGroup E] [NormedSpace â E] : ι âáµáµ E âL[â] E :=
(volume : Measure (ι â â)).toBoxAdditive.toSMul
theorem volume_apply {E : Type*} [NormedAddCommGroup E] [NormedSpace â E] (I : Box ι) (x : E) :
BoxAdditiveMap.volume I x = (â j, (I.upper j - I.lower j)) ⢠x := by
rw [BoxAdditiveMap.volume, toSMul_apply]
exact congr_argâ (· ⢠·) I.volume_apply rfl
end BoxAdditiveMap
end BoxIntegral
|
Analysis\BoxIntegral\Partition\Split.lean | /-
Copyright (c) 2021 Yury Kudryashov. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yury Kudryashov
-/
import Mathlib.Analysis.BoxIntegral.Partition.Basic
/-!
# Split a box along one or more hyperplanes
## Main definitions
A hyperplane `{x : ι â â | x i = a}` splits a rectangular box `I : BoxIntegral.Box ι` into two
smaller boxes. If `a â Ioo (I.lower i, I.upper i)`, then one of these boxes is empty, so it is not a
box in the sense of `BoxIntegral.Box`.
We introduce the following definitions.
* `BoxIntegral.Box.splitLower I i a` and `BoxIntegral.Box.splitUpper I i a` are these boxes (as
`WithBot (BoxIntegral.Box ι)`);
* `BoxIntegral.Prepartition.split I i a` is the partition of `I` made of these two boxes (or of one
box `I` if one of these boxes is empty);
* `BoxIntegral.Prepartition.splitMany I s`, where `s : Finset (ι à â)` is a finite set of
hyperplanes `{x : ι â â | x i = a}` encoded as pairs `(i, a)`, is the partition of `I` made by
cutting it along all the hyperplanes in `s`.
## Main results
The main result `BoxIntegral.Prepartition.exists_iUnion_eq_diff` says that any prepartition `Ï` of
`I` admits a prepartition `Ï'` of `I` that covers exactly `I \ Ï.iUnion`. One of these prepartitions
is available as `BoxIntegral.Prepartition.compl`.
## Tags
rectangular box, partition, hyperplane
-/
noncomputable section
open scoped Classical
open Filter
open Function Set Filter
namespace BoxIntegral
variable {ι M : Type*} {n : â}
namespace Box
variable {I : Box ι} {i : ι} {x : â} {y : ι â â}
/-- Given a box `I` and `x â (I.lower i, I.upper i)`, the hyperplane `{y : ι â â | y i = x}` splits
`I` into two boxes. `BoxIntegral.Box.splitLower I i x` is the box `I ⩠{y | y i †x}`
(if it is nonempty). As usual, we represent a box that may be empty as
`WithBot (BoxIntegral.Box ι)`. -/
def splitLower (I : Box ι) (i : ι) (x : â) : WithBot (Box ι) :=
mk' I.lower (update I.upper i (min x (I.upper i)))
@[simp]
theorem coe_splitLower : (splitLower I i x : Set (ι â â)) = âI â© { y | y i †x } := by
rw [splitLower, coe_mk']
ext y
simp only [mem_univ_pi, mem_Ioc, mem_inter_iff, mem_coe, mem_setOf_eq, forall_and, â Pi.le_def,
le_update_iff, le_min_iff, and_assoc, and_forall_ne (p := fun j => y j †upper I j) i, mem_def]
rw [and_comm (a := y i †x)]
theorem splitLower_le : I.splitLower i x †I :=
withBotCoe_subset_iff.1 <| by simp
@[simp]
theorem splitLower_eq_bot {i x} : I.splitLower i x = ⥠â x †I.lower i := by
rw [splitLower, mk'_eq_bot, exists_update_iff I.upper fun j y => y †I.lower j]
simp [(I.lower_lt_upper _).not_le]
@[simp]
theorem splitLower_eq_self : I.splitLower i x = I â I.upper i †x := by
simp [splitLower, update_eq_iff]
theorem splitLower_def [DecidableEq ι] {i x} (h : x â Ioo (I.lower i) (I.upper i))
(h' : â j, I.lower j < update I.upper i x j :=
(forall_update_iff I.upper fun j y => I.lower j < y).2
âšh.1, fun j _ => I.lower_lt_upper _â©) :
I.splitLower i x = (âšI.lower, update I.upper i x, h'â© : Box ι) := by
simp (config := { unfoldPartialApp := true }) only [splitLower, mk'_eq_coe, min_eq_left h.2.le,
update, and_self]
/-- Given a box `I` and `x â (I.lower i, I.upper i)`, the hyperplane `{y : ι â â | y i = x}` splits
`I` into two boxes. `BoxIntegral.Box.splitUpper I i x` is the box `I â© {y | x < y i}`
(if it is nonempty). As usual, we represent a box that may be empty as
`WithBot (BoxIntegral.Box ι)`. -/
def splitUpper (I : Box ι) (i : ι) (x : â) : WithBot (Box ι) :=
mk' (update I.lower i (max x (I.lower i))) I.upper
@[simp]
theorem coe_splitUpper : (splitUpper I i x : Set (ι â â)) = âI â© { y | x < y i } := by
rw [splitUpper, coe_mk']
ext y
simp only [mem_univ_pi, mem_Ioc, mem_inter_iff, mem_coe, mem_setOf_eq, forall_and,
forall_update_iff I.lower fun j z => z < y j, max_lt_iff, and_assoc (a := x < y i),
and_forall_ne (p := fun j => lower I j < y j) i, mem_def]
exact and_comm
theorem splitUpper_le : I.splitUpper i x †I :=
withBotCoe_subset_iff.1 <| by simp
@[simp]
theorem splitUpper_eq_bot {i x} : I.splitUpper i x = ⥠â I.upper i †x := by
rw [splitUpper, mk'_eq_bot, exists_update_iff I.lower fun j y => I.upper j †y]
simp [(I.lower_lt_upper _).not_le]
@[simp]
theorem splitUpper_eq_self : I.splitUpper i x = I â x †I.lower i := by
simp [splitUpper, update_eq_iff]
theorem splitUpper_def [DecidableEq ι] {i x} (h : x â Ioo (I.lower i) (I.upper i))
(h' : â j, update I.lower i x j < I.upper j :=
(forall_update_iff I.lower fun j y => y < I.upper j).2
âšh.2, fun j _ => I.lower_lt_upper _â©) :
I.splitUpper i x = (âšupdate I.lower i x, I.upper, h'â© : Box ι) := by
simp (config := { unfoldPartialApp := true }) only [splitUpper, mk'_eq_coe, max_eq_left h.1.le,
update, and_self]
theorem disjoint_splitLower_splitUpper (I : Box ι) (i : ι) (x : â) :
Disjoint (I.splitLower i x) (I.splitUpper i x) := by
rw [â disjoint_withBotCoe, coe_splitLower, coe_splitUpper]
refine (Disjoint.inf_left' _ ?_).inf_right' _
rw [Set.disjoint_left]
exact fun y (hle : y i †x) hlt => not_lt_of_le hle hlt
theorem splitLower_ne_splitUpper (I : Box ι) (i : ι) (x : â) :
I.splitLower i x â I.splitUpper i x := by
cases' le_or_lt x (I.lower i) with h
· rw [splitUpper_eq_self.2 h, splitLower_eq_bot.2 h]
exact WithBot.bot_ne_coe
· refine (disjoint_splitLower_splitUpper I i x).ne ?_
rwa [Ne, splitLower_eq_bot, not_le]
end Box
namespace Prepartition
variable {I J : Box ι} {i : ι} {x : â}
/-- The partition of `I : Box ι` into the boxes `I ⩠{y | y †x i}` and `I ⩠{y | x i < y}`.
One of these boxes can be empty, then this partition is just the single-box partition `â€`. -/
def split (I : Box ι) (i : ι) (x : â) : Prepartition I :=
ofWithBot {I.splitLower i x, I.splitUpper i x}
(by
simp only [Finset.mem_insert, Finset.mem_singleton]
rintro J (rfl | rfl)
exacts [Box.splitLower_le, Box.splitUpper_le])
(by
simp only [Finset.coe_insert, Finset.coe_singleton, true_and_iff, Set.mem_singleton_iff,
pairwise_insert_of_symmetric symmetric_disjoint, pairwise_singleton]
rintro J rfl -
exact I.disjoint_splitLower_splitUpper i x)
@[simp]
theorem mem_split_iff : J â split I i x â âJ = I.splitLower i x âš âJ = I.splitUpper i x := by
simp [split]
theorem mem_split_iff' : J â split I i x â
(J : Set (ι â â)) = âI â© { y | y i †x } âš (J : Set (ι â â)) = âI â© { y | x < y i } := by
simp [mem_split_iff, â Box.withBotCoe_inj]
@[simp]
theorem iUnion_split (I : Box ι) (i : ι) (x : â) : (split I i x).iUnion = I := by
simp [split, â inter_union_distrib_left, â setOf_or, le_or_lt]
theorem isPartitionSplit (I : Box ι) (i : ι) (x : â) : IsPartition (split I i x) :=
isPartition_iff_iUnion_eq.2 <| iUnion_split I i x
-- Porting note: In the type, changed `Option.elim` to `Option.elim'`
theorem sum_split_boxes {M : Type*} [AddCommMonoid M] (I : Box ι) (i : ι) (x : â) (f : Box ι â M) :
(â J â (split I i x).boxes, f J) =
(I.splitLower i x).elim' 0 f + (I.splitUpper i x).elim' 0 f := by
rw [split, sum_ofWithBot, Finset.sum_pair (I.splitLower_ne_splitUpper i x)]
/-- If `x â (I.lower i, I.upper i)`, then the hyperplane `{y | y i = x}` does not split `I`. -/
theorem split_of_not_mem_Ioo (h : x â Ioo (I.lower i) (I.upper i)) : split I i x = †:= by
refine ((isPartitionTop I).eq_of_boxes_subset fun J hJ => ?_).symm
rcases mem_top.1 hJ with rfl; clear hJ
rw [mem_boxes, mem_split_iff]
rw [mem_Ioo, not_and_or, not_lt, not_lt] at h
cases h <;> [right; left]
· rwa [eq_comm, Box.splitUpper_eq_self]
· rwa [eq_comm, Box.splitLower_eq_self]
theorem coe_eq_of_mem_split_of_mem_le {y : ι â â} (hâ : J â split I i x) (hâ : y â J)
(hâ : y i †x) : (J : Set (ι â â)) = âI â© { y | y i †x } := by
refine (mem_split_iff'.1 hâ).resolve_right fun H => ?_
rw [â Box.mem_coe, H] at hâ
exact hâ.not_lt hâ.2
theorem coe_eq_of_mem_split_of_lt_mem {y : ι â â} (hâ : J â split I i x) (hâ : y â J)
(hâ : x < y i) : (J : Set (ι â â)) = âI â© { y | x < y i } := by
refine (mem_split_iff'.1 hâ).resolve_left fun H => ?_
rw [â Box.mem_coe, H] at hâ
exact hâ.not_le hâ.2
@[simp]
theorem restrict_split (h : I †J) (i : ι) (x : â) : (split J i x).restrict I = split I i x := by
refine ((isPartitionSplit J i x).restrict h).eq_of_boxes_subset ?_
simp only [Finset.subset_iff, mem_boxes, mem_restrict', exists_prop, mem_split_iff']
have : â s, (I â© s : Set (ι â â)) â J := fun s => inter_subset_left.trans h
rintro Jâ âšJâ, Hâ | Hâ, Hââ© <;> [left; right] <;>
simp [Hâ, Hâ, inter_left_comm (I : Set (ι â â)), this]
theorem inf_split (Ï : Prepartition I) (i : ι) (x : â) :
Ï â split I i x = Ï.biUnion fun J => split J i x :=
biUnion_congr_of_le rfl fun _ hJ => restrict_split hJ i x
/-- Split a box along many hyperplanes `{y | y i = x}`; each hyperplane is given by the pair
`(i x)`. -/
def splitMany (I : Box ι) (s : Finset (ι à â)) : Prepartition I :=
s.inf fun p => split I p.1 p.2
@[simp]
theorem splitMany_empty (I : Box ι) : splitMany I â
= †:=
Finset.inf_empty
@[simp]
theorem splitMany_insert (I : Box ι) (s : Finset (ι à â)) (p : ι à â) :
splitMany I (insert p s) = splitMany I s â split I p.1 p.2 := by
rw [splitMany, Finset.inf_insert, inf_comm, splitMany]
theorem splitMany_le_split (I : Box ι) {s : Finset (ι à â)} {p : ι à â} (hp : p â s) :
splitMany I s †split I p.1 p.2 :=
Finset.inf_le hp
theorem isPartition_splitMany (I : Box ι) (s : Finset (ι à â)) : IsPartition (splitMany I s) :=
Finset.induction_on s (by simp only [splitMany_empty, isPartitionTop]) fun a s _ hs => by
simpa only [splitMany_insert, inf_split] using hs.biUnion fun J _ => isPartitionSplit _ _ _
@[simp]
theorem iUnion_splitMany (I : Box ι) (s : Finset (ι à â)) : (splitMany I s).iUnion = I :=
(isPartition_splitMany I s).iUnion_eq
theorem inf_splitMany {I : Box ι} (Ï : Prepartition I) (s : Finset (ι à â)) :
Ï â splitMany I s = Ï.biUnion fun J => splitMany J s := by
induction' s using Finset.induction_on with p s _ ihp
· simp
· simp_rw [splitMany_insert, â inf_assoc, ihp, inf_split, biUnion_assoc]
/-- Let `s : Finset (ι à â)` be a set of hyperplanes `{x : ι â â | x i = r}` in `ι â â` encoded as
pairs `(i, r)`. Suppose that this set contains all faces of a box `J`. The hyperplanes of `s` split
a box `I` into subboxes. Let `Js` be one of them. If `J` and `Js` have nonempty intersection, then
`Js` is a subbox of `J`. -/
theorem not_disjoint_imp_le_of_subset_of_mem_splitMany {I J Js : Box ι} {s : Finset (ι à â)}
(H : â i, {(i, J.lower i), (i, J.upper i)} â s) (HJs : Js â splitMany I s)
(Hn : ¬Disjoint (J : WithBot (Box ι)) Js) : Js †J := by
simp only [Finset.insert_subset_iff, Finset.singleton_subset_iff] at H
rcases Box.not_disjoint_coe_iff_nonempty_inter.mp Hn with âšx, hx, hxsâ©
refine fun y hy i => âš?_, ?_â©
· rcases splitMany_le_split I (H i).1 HJs with âšJl, Hmem : Jl â split I i (J.lower i), Hleâ©
have := Hle hxs
rw [â Box.coe_subset_coe, coe_eq_of_mem_split_of_lt_mem Hmem this (hx i).1] at Hle
exact (Hle hy).2
· rcases splitMany_le_split I (H i).2 HJs with âšJl, Hmem : Jl â split I i (J.upper i), Hleâ©
have := Hle hxs
rw [â Box.coe_subset_coe, coe_eq_of_mem_split_of_mem_le Hmem this (hx i).2] at Hle
exact (Hle hy).2
section Finite
variable [Finite ι]
/-- Let `s` be a finite set of boxes in `ââ¿ = ι â â`. Then there exists a finite set `tâ` of
hyperplanes (namely, the set of all hyperfaces of boxes in `s`) such that for any `t â tâ`
and any box `I` in `ââ¿` the following holds. The hyperplanes from `t` split `I` into subboxes.
Let `J'` be one of them, and let `J` be one of the boxes in `s`. If these boxes have a nonempty
intersection, then `J' †J`. -/
theorem eventually_not_disjoint_imp_le_of_mem_splitMany (s : Finset (Box ι)) :
âá¶ t : Finset (ι à â) in atTop, â (I : Box ι), â J â s, â J' â splitMany I t,
¬Disjoint (J : WithBot (Box ι)) J' â J' †J := by
cases nonempty_fintype ι
refine eventually_atTop.2
âšs.biUnion fun J => Finset.univ.biUnion fun i => {(i, J.lower i), (i, J.upper i)},
fun t ht I J hJ J' hJ' => not_disjoint_imp_le_of_subset_of_mem_splitMany (fun i => ?_) hJ'â©
exact fun p hp =>
ht (Finset.mem_biUnion.2 âšJ, hJ, Finset.mem_biUnion.2 âši, Finset.mem_univ _, hpâ©â©)
theorem eventually_splitMany_inf_eq_filter (Ï : Prepartition I) :
âá¶ t : Finset (ι à â) in atTop,
Ï â splitMany I t = (splitMany I t).filter fun J => âJ â Ï.iUnion := by
refine (eventually_not_disjoint_imp_le_of_mem_splitMany Ï.boxes).mono fun t ht => ?_
refine le_antisymm ((biUnion_le_iff _).2 fun J hJ => ?_) (le_inf (fun J hJ => ?_) (filter_le _ _))
· refine ofWithBot_mono ?_
simp only [Finset.mem_image, exists_prop, mem_boxes, mem_filter]
rintro _ âšJâ, hâ, rflâ© hne
refine âš_, âšJâ, âšhâ, Subset.trans ?_ (Ï.subset_iUnion hJ)â©, rflâ©, le_rflâ©
exact ht I J hJ Jâ hâ (mt disjoint_iff.1 hne)
· rw [mem_filter] at hJ
rcases Set.mem_iUnionâ.1 (hJ.2 J.upper_mem) with âšJ', hJ', hmemâ©
refine âšJ', hJ', ht I _ hJ' _ hJ.1 <| Box.not_disjoint_coe_iff_nonempty_inter.2 ?_â©
exact âšJ.upper, hmem, J.upper_memâ©
theorem exists_splitMany_inf_eq_filter_of_finite (s : Set (Prepartition I)) (hs : s.Finite) :
â t : Finset (ι à â),
â Ï â s, Ï â splitMany I t = (splitMany I t).filter fun J => âJ â Ï.iUnion :=
haveI := fun Ï (_ : Ï â s) => eventually_splitMany_inf_eq_filter Ï
(hs.eventually_all.2 this).exists
/-- If `Ï` is a partition of `I`, then there exists a finite set `s` of hyperplanes such that
`splitMany I s †Ï`. -/
theorem IsPartition.exists_splitMany_le {I : Box ι} {Ï : Prepartition I} (h : IsPartition Ï) :
â s, splitMany I s â€ Ï := by
refine (eventually_splitMany_inf_eq_filter Ï).exists.imp fun s hs => ?_
rwa [h.iUnion_eq, filter_of_true, inf_eq_right] at hs
exact fun J hJ => le_of_mem _ hJ
/-- For every prepartition `Ï` of `I` there exists a prepartition that covers exactly
`I \ Ï.iUnion`. -/
theorem exists_iUnion_eq_diff (Ï : Prepartition I) :
â Ï' : Prepartition I, Ï'.iUnion = âI \ Ï.iUnion := by
rcases Ï.eventually_splitMany_inf_eq_filter.exists with âšs, hsâ©
use (splitMany I s).filter fun J => ¬(J : Set (ι â â)) â Ï.iUnion
simp [â hs]
/-- If `Ï` is a prepartition of `I`, then `Ï.compl` is a prepartition of `I`
such that `Ï.compl.iUnion = I \ Ï.iUnion`. -/
def compl (Ï : Prepartition I) : Prepartition I :=
Ï.exists_iUnion_eq_diff.choose
@[simp]
theorem iUnion_compl (Ï : Prepartition I) : Ï.compl.iUnion = âI \ Ï.iUnion :=
Ï.exists_iUnion_eq_diff.choose_spec
/-- Since the definition of `BoxIntegral.Prepartition.compl` uses `Exists.choose`,
the result depends only on `Ï.iUnion`. -/
theorem compl_congr {Ïâ Ïâ : Prepartition I} (h : Ïâ.iUnion = Ïâ.iUnion) : Ïâ.compl = Ïâ.compl := by
dsimp only [compl]
congr 1
rw [h]
theorem IsPartition.compl_eq_bot {Ï : Prepartition I} (h : IsPartition Ï) : Ï.compl = ⥠:= by
rw [â iUnion_eq_empty, iUnion_compl, h.iUnion_eq, diff_self]
@[simp]
theorem compl_top : (†: Prepartition I).compl = ⥠:=
(isPartitionTop I).compl_eq_bot
end Finite
end Prepartition
end BoxIntegral
|
Analysis\BoxIntegral\Partition\SubboxInduction.lean | /-
Copyright (c) 2021 Yury Kudryashov. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yury Kudryashov
-/
import Mathlib.Analysis.BoxIntegral.Box.SubboxInduction
import Mathlib.Analysis.BoxIntegral.Partition.Tagged
/-!
# Induction on subboxes
In this file we prove (see
`BoxIntegral.Box.exists_taggedPartition_isHenstock_isSubordinate_homothetic`) that for every box `I`
in `ââ¿` and a function `r : ââ¿ â â` positive on `I` there exists a tagged partition `Ï` of `I` such
that
* `Ï` is a Henstock partition;
* `Ï` is subordinate to `r`;
* each box in `Ï` is homothetic to `I` with coefficient of the form `1 / 2 ^ n`.
Later we will use this lemma to prove that the Henstock filter is nontrivial, hence the Henstock
integral is well-defined.
## Tags
partition, tagged partition, Henstock integral
-/
namespace BoxIntegral
open Set Metric
open scoped Classical
open Topology
noncomputable section
variable {ι : Type*} [Fintype ι] {I J : Box ι}
namespace Prepartition
/-- Split a box in `ââ¿` into `2 ^ n` boxes by hyperplanes passing through its center. -/
def splitCenter (I : Box ι) : Prepartition I where
boxes := Finset.univ.map (Box.splitCenterBoxEmb I)
le_of_mem' := by simp [I.splitCenterBox_le]
pairwiseDisjoint := by
rw [Finset.coe_map, Finset.coe_univ, image_univ]
rintro _ âšs, rflâ© _ âšt, rflâ© Hne
exact I.disjoint_splitCenterBox (mt (congr_arg _) Hne)
@[simp]
theorem mem_splitCenter : J â splitCenter I â â s, I.splitCenterBox s = J := by simp [splitCenter]
theorem isPartition_splitCenter (I : Box ι) : IsPartition (splitCenter I) := fun x hx => by
simp [hx]
theorem upper_sub_lower_of_mem_splitCenter (h : J â splitCenter I) (i : ι) :
J.upper i - J.lower i = (I.upper i - I.lower i) / 2 :=
let âšs, hsâ© := mem_splitCenter.1 h
hs âž I.upper_sub_lower_splitCenterBox s i
end Prepartition
namespace Box
open Prepartition TaggedPrepartition
/-- Let `p` be a predicate on `Box ι`, let `I` be a box. Suppose that the following two properties
hold true.
* Consider a smaller box `J †I`. The hyperplanes passing through the center of `J` split it into
`2 ^ n` boxes. If `p` holds true on each of these boxes, then it true on `J`.
* For each `z` in the closed box `I.Icc` there exists a neighborhood `U` of `z` within `I.Icc` such
that for every box `J †I` such that `z â J.Icc â U`, if `J` is homothetic to `I` with a
coefficient of the form `1 / 2 ^ m`, then `p` is true on `J`.
Then `p I` is true. See also `BoxIntegral.Box.subbox_induction_on'` for a version using
`BoxIntegral.Box.splitCenterBox` instead of `BoxIntegral.Prepartition.splitCenter`. -/
@[elab_as_elim]
theorem subbox_induction_on {p : Box ι â Prop} (I : Box ι)
(H_ind : â J †I, (â J' â splitCenter J, p J') â p J)
(H_nhds : â z â Box.Icc I, â U â ð[Box.Icc I] z, â J †I, â (m : â),
z â Box.Icc J â Box.Icc J â U â
(â i, J.upper i - J.lower i = (I.upper i - I.lower i) / 2 ^ m) â p J) :
p I := by
refine subbox_induction_on' I (fun J hle hs => H_ind J hle fun J' h' => ?_) H_nhds
rcases mem_splitCenter.1 h' with âšs, rflâ©
exact hs s
/-- Given a box `I` in `ââ¿` and a function `r : ââ¿ â (0, â)`, there exists a tagged partition `Ï` of
`I` such that
* `Ï` is a Henstock partition;
* `Ï` is subordinate to `r`;
* each box in `Ï` is homothetic to `I` with coefficient of the form `1 / 2 ^ m`.
This lemma implies that the Henstock filter is nontrivial, hence the Henstock integral is
well-defined. -/
theorem exists_taggedPartition_isHenstock_isSubordinate_homothetic (I : Box ι)
(r : (ι â â) â Ioi (0 : â)) :
â Ï : TaggedPrepartition I, Ï.IsPartition â§ Ï.IsHenstock â§ Ï.IsSubordinate r â§
(â J â Ï, â m : â, â i, (J : _).upper i - J.lower i = (I.upper i - I.lower i) / 2 ^ m) â§
Ï.distortion = I.distortion := by
refine subbox_induction_on I (fun J _ hJ => ?_) fun z _ => ?_
· choose! Ïi hP hHen hr Hn _ using hJ
choose! n hn using Hn
have hP : ((splitCenter J).biUnionTagged Ïi).IsPartition :=
(isPartition_splitCenter _).biUnionTagged hP
have hsub : â J' â (splitCenter J).biUnionTagged Ïi, â n : â, â i,
(J' : _).upper i - J'.lower i = (J.upper i - J.lower i) / 2 ^ n := by
intro J' hJ'
rcases (splitCenter J).mem_biUnionTagged.1 hJ' with âšJâ, hâ, hââ©
refine âšn Jâ J' + 1, fun i => ?_â©
simp only [hn Jâ hâ J' hâ, upper_sub_lower_of_mem_splitCenter hâ, pow_succ', div_div]
refine âš_, hP, isHenstock_biUnionTagged.2 hHen, isSubordinate_biUnionTagged.2 hr, hsub, ?_â©
refine TaggedPrepartition.distortion_of_const _ hP.nonempty_boxes fun J' h' => ?_
rcases hsub J' h' with âšn, hnâ©
exact Box.distortion_eq_of_sub_eq_div hn
· refine âšBox.Icc I â© closedBall z (r z),
inter_mem_nhdsWithin _ (closedBall_mem_nhds _ (r z).coe_prop), ?_â©
intro J _ n Hmem HIcc Hsub
rw [Set.subset_inter_iff] at HIcc
refine âšsingle _ _ le_rfl _ Hmem, isPartition_single _, isHenstock_single _,
(isSubordinate_single _ _).2 HIcc.2, ?_, distortion_single _ _â©
simp only [TaggedPrepartition.mem_single, forall_eq]
refine âš0, fun i => ?_â©
simp
end Box
namespace Prepartition
open TaggedPrepartition Finset Function
/-- Given a box `I` in `ââ¿`, a function `r : ââ¿ â (0, â)`, and a prepartition `Ï` of `I`, there
exists a tagged prepartition `Ï'` of `I` such that
* each box of `Ï'` is included in some box of `Ï`;
* `Ï'` is a Henstock partition;
* `Ï'` is subordinate to `r`;
* `Ï'` covers exactly the same part of `I` as `Ï`;
* the distortion of `Ï'` is equal to the distortion of `Ï`.
-/
theorem exists_tagged_le_isHenstock_isSubordinate_iUnion_eq {I : Box ι} (r : (ι â â) â Ioi (0 : â))
(Ï : Prepartition I) :
â Ï' : TaggedPrepartition I, Ï'.toPrepartition â€ Ï â§ Ï'.IsHenstock â§ Ï'.IsSubordinate r â§
Ï'.distortion = Ï.distortion â§ Ï'.iUnion = Ï.iUnion := by
have := fun J => Box.exists_taggedPartition_isHenstock_isSubordinate_homothetic J r
choose! Ïi Ïip ÏiH Ïir _ Ïid using this
refine âšÏ.biUnionTagged Ïi, biUnion_le _ _, isHenstock_biUnionTagged.2 fun J _ => ÏiH J,
isSubordinate_biUnionTagged.2 fun J _ => Ïir J, ?_, Ï.iUnion_biUnion_partition fun J _ => Ïip Jâ©
rw [distortion_biUnionTagged]
exact sup_congr rfl fun J _ => Ïid J
/-- Given a prepartition `Ï` of a box `I` and a function `r : ââ¿ â (0, â)`, `Ï.toSubordinate r`
is a tagged partition `Ï'` such that
* each box of `Ï'` is included in some box of `Ï`;
* `Ï'` is a Henstock partition;
* `Ï'` is subordinate to `r`;
* `Ï'` covers exactly the same part of `I` as `Ï`;
* the distortion of `Ï'` is equal to the distortion of `Ï`.
-/
def toSubordinate (Ï : Prepartition I) (r : (ι â â) â Ioi (0 : â)) : TaggedPrepartition I :=
(Ï.exists_tagged_le_isHenstock_isSubordinate_iUnion_eq r).choose
theorem toSubordinate_toPrepartition_le (Ï : Prepartition I) (r : (ι â â) â Ioi (0 : â)) :
(Ï.toSubordinate r).toPrepartition â€ Ï :=
(Ï.exists_tagged_le_isHenstock_isSubordinate_iUnion_eq r).choose_spec.1
theorem isHenstock_toSubordinate (Ï : Prepartition I) (r : (ι â â) â Ioi (0 : â)) :
(Ï.toSubordinate r).IsHenstock :=
(Ï.exists_tagged_le_isHenstock_isSubordinate_iUnion_eq r).choose_spec.2.1
theorem isSubordinate_toSubordinate (Ï : Prepartition I) (r : (ι â â) â Ioi (0 : â)) :
(Ï.toSubordinate r).IsSubordinate r :=
(Ï.exists_tagged_le_isHenstock_isSubordinate_iUnion_eq r).choose_spec.2.2.1
@[simp]
theorem distortion_toSubordinate (Ï : Prepartition I) (r : (ι â â) â Ioi (0 : â)) :
(Ï.toSubordinate r).distortion = Ï.distortion :=
(Ï.exists_tagged_le_isHenstock_isSubordinate_iUnion_eq r).choose_spec.2.2.2.1
@[simp]
theorem iUnion_toSubordinate (Ï : Prepartition I) (r : (ι â â) â Ioi (0 : â)) :
(Ï.toSubordinate r).iUnion = Ï.iUnion :=
(Ï.exists_tagged_le_isHenstock_isSubordinate_iUnion_eq r).choose_spec.2.2.2.2
end Prepartition
namespace TaggedPrepartition
/-- Given a tagged prepartition `Ïâ`, a prepartition `Ïâ` that covers exactly `I \ Ïâ.iUnion`, and
a function `r : ââ¿ â (0, â)`, returns the union of `Ïâ` and `Ïâ.toSubordinate r`. This partition
`Ï` has the following properties:
* `Ï` is a partition, i.e. it covers the whole `I`;
* `Ïâ.boxes â Ï.boxes`;
* `Ï.tag J = Ïâ.tag J` whenever `J â Ïâ`;
* `Ï` is Henstock outside of `Ïâ`: `Ï.tag J â J.Icc` whenever `J â Ï`, `J â Ïâ`;
* `Ï` is subordinate to `r` outside of `Ïâ`;
* the distortion of `Ï` is equal to the maximum of the distortions of `Ïâ` and `Ïâ`.
-/
def unionComplToSubordinate (Ïâ : TaggedPrepartition I) (Ïâ : Prepartition I)
(hU : Ïâ.iUnion = âI \ Ïâ.iUnion) (r : (ι â â) â Ioi (0 : â)) : TaggedPrepartition I :=
Ïâ.disjUnion (Ïâ.toSubordinate r)
(((Ïâ.iUnion_toSubordinate r).trans hU).symm âž disjoint_sdiff_self_right)
theorem isPartition_unionComplToSubordinate (Ïâ : TaggedPrepartition I) (Ïâ : Prepartition I)
(hU : Ïâ.iUnion = âI \ Ïâ.iUnion) (r : (ι â â) â Ioi (0 : â)) :
IsPartition (Ïâ.unionComplToSubordinate Ïâ hU r) :=
Prepartition.isPartitionDisjUnionOfEqDiff ((Ïâ.iUnion_toSubordinate r).trans hU)
@[simp]
theorem unionComplToSubordinate_boxes (Ïâ : TaggedPrepartition I) (Ïâ : Prepartition I)
(hU : Ïâ.iUnion = âI \ Ïâ.iUnion) (r : (ι â â) â Ioi (0 : â)) :
(Ïâ.unionComplToSubordinate Ïâ hU r).boxes = Ïâ.boxes ⪠(Ïâ.toSubordinate r).boxes := rfl
@[simp]
theorem iUnion_unionComplToSubordinate_boxes (Ïâ : TaggedPrepartition I) (Ïâ : Prepartition I)
(hU : Ïâ.iUnion = âI \ Ïâ.iUnion) (r : (ι â â) â Ioi (0 : â)) :
(Ïâ.unionComplToSubordinate Ïâ hU r).iUnion = I :=
(isPartition_unionComplToSubordinate _ _ _ _).iUnion_eq
@[simp]
theorem distortion_unionComplToSubordinate (Ïâ : TaggedPrepartition I) (Ïâ : Prepartition I)
(hU : Ïâ.iUnion = âI \ Ïâ.iUnion) (r : (ι â â) â Ioi (0 : â)) :
(Ïâ.unionComplToSubordinate Ïâ hU r).distortion = max Ïâ.distortion Ïâ.distortion := by
simp [unionComplToSubordinate]
end TaggedPrepartition
end
end BoxIntegral
|
Analysis\BoxIntegral\Partition\Tagged.lean | /-
Copyright (c) 2021 Yury Kudryashov. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yury Kudryashov
-/
import Mathlib.Analysis.BoxIntegral.Partition.Basic
/-!
# Tagged partitions
A tagged (pre)partition is a (pre)partition `Ï` enriched with a tagged point for each box of
`Ï`. For simplicity we require that the function `BoxIntegral.TaggedPrepartition.tag` is defined
on all boxes `J : Box ι` but use its values only on boxes of the partition. Given
`Ï : BoxIntegral.TaggedPrepartition I`, we require that each `BoxIntegral.TaggedPrepartition Ï J`
belongs to `BoxIntegral.Box.Icc I`. If for every `J â Ï`, `Ï.tag J` belongs to `J.Icc`, then `Ï` is
called a *Henstock* partition. We do not include this assumption into the definition of a tagged
(pre)partition because McShane integral is defined as a limit along tagged partitions without this
requirement.
## Tags
rectangular box, box partition
-/
noncomputable section
open scoped Classical
open ENNReal NNReal
open Set Function
namespace BoxIntegral
variable {ι : Type*}
/-- A tagged prepartition is a prepartition enriched with a tagged point for each box of the
prepartition. For simplicity we require that `tag` is defined for all boxes in `ι â â` but
we will use only the values of `tag` on the boxes of the partition. -/
structure TaggedPrepartition (I : Box ι) extends Prepartition I where
/-- Choice of tagged point of each box in this prepartition:
we extend this to a total function, on all boxes in `ι â â`. -/
tag : Box ι â ι â â
/-- Each tagged point belongs to `I` -/
tag_mem_Icc : â J, tag J â Box.Icc I
namespace TaggedPrepartition
variable {I J Jâ Jâ : Box ι} (Ï : TaggedPrepartition I) {x : ι â â}
instance : Membership (Box ι) (TaggedPrepartition I) :=
âšfun J Ï => J â Ï.boxesâ©
@[simp]
theorem mem_toPrepartition {Ï : TaggedPrepartition I} : J â Ï.toPrepartition â J â Ï := Iff.rfl
@[simp]
theorem mem_mk (Ï : Prepartition I) (f h) : J â mk Ï f h â J â Ï := Iff.rfl
/-- Union of all boxes of a tagged prepartition. -/
def iUnion : Set (ι â â) :=
Ï.toPrepartition.iUnion
theorem iUnion_def : Ï.iUnion = â J â Ï, âJ := rfl
@[simp]
theorem iUnion_mk (Ï : Prepartition I) (f h) : (mk Ï f h).iUnion = Ï.iUnion := rfl
@[simp]
theorem iUnion_toPrepartition : Ï.toPrepartition.iUnion = Ï.iUnion := rfl
-- Porting note: Previous proof was `:= Set.mem_iUnionâ`
@[simp]
theorem mem_iUnion : x â Ï.iUnion â â J â Ï, x â J := by
convert Set.mem_iUnionâ
rw [Box.mem_coe, mem_toPrepartition, exists_prop]
theorem subset_iUnion (h : J â Ï) : âJ â Ï.iUnion :=
subset_biUnion_of_mem h
theorem iUnion_subset : Ï.iUnion â I :=
iUnionâ_subset Ï.le_of_mem'
/-- A tagged prepartition is a partition if it covers the whole box. -/
def IsPartition :=
Ï.toPrepartition.IsPartition
theorem isPartition_iff_iUnion_eq : IsPartition Ï â Ï.iUnion = I :=
Prepartition.isPartition_iff_iUnion_eq
/-- The tagged partition made of boxes of `Ï` that satisfy predicate `p`. -/
@[simps! (config := .asFn)]
def filter (p : Box ι â Prop) : TaggedPrepartition I :=
âšÏ.1.filter p, Ï.2, Ï.3â©
@[simp]
theorem mem_filter {p : Box ι â Prop} : J â Ï.filter p â J â Ï â§ p J :=
Finset.mem_filter
@[simp]
theorem iUnion_filter_not (Ï : TaggedPrepartition I) (p : Box ι â Prop) :
(Ï.filter fun J => ¬p J).iUnion = Ï.iUnion \ (Ï.filter p).iUnion :=
Ï.toPrepartition.iUnion_filter_not p
end TaggedPrepartition
namespace Prepartition
variable {I J : Box ι}
/-- Given a partition `Ï` of `I : BoxIntegral.Box ι` and a collection of tagged partitions
`Ïi J` of all boxes `J â Ï`, returns the tagged partition of `I` into all the boxes of `Ïi J`
with tags coming from `(Ïi J).tag`. -/
def biUnionTagged (Ï : Prepartition I) (Ïi : â J : Box ι, TaggedPrepartition J) :
TaggedPrepartition I where
toPrepartition := Ï.biUnion fun J => (Ïi J).toPrepartition
tag J := (Ïi (Ï.biUnionIndex (fun J => (Ïi J).toPrepartition) J)).tag J
tag_mem_Icc _ := Box.le_iff_Icc.1 (Ï.biUnionIndex_le _ _) ((Ïi _).tag_mem_Icc _)
@[simp]
theorem mem_biUnionTagged (Ï : Prepartition I) {Ïi : â J, TaggedPrepartition J} :
J â Ï.biUnionTagged Ïi â â J' â Ï, J â Ïi J' :=
Ï.mem_biUnion
theorem tag_biUnionTagged (Ï : Prepartition I) {Ïi : â J, TaggedPrepartition J} (hJ : J â Ï) {J'}
(hJ' : J' â Ïi J) : (Ï.biUnionTagged Ïi).tag J' = (Ïi J).tag J' := by
rw [â Ï.biUnionIndex_of_mem (Ïi := fun J => (Ïi J).toPrepartition) hJ hJ']
rfl
@[simp]
theorem iUnion_biUnionTagged (Ï : Prepartition I) (Ïi : â J, TaggedPrepartition J) :
(Ï.biUnionTagged Ïi).iUnion = â J â Ï, (Ïi J).iUnion :=
iUnion_biUnion _ _
theorem forall_biUnionTagged (p : (ι â â) â Box ι â Prop) (Ï : Prepartition I)
(Ïi : â J, TaggedPrepartition J) :
(â J â Ï.biUnionTagged Ïi, p ((Ï.biUnionTagged Ïi).tag J) J) â
â J â Ï, â J' â Ïi J, p ((Ïi J).tag J') J' := by
simp only [mem_biUnionTagged]
refine âšfun H J hJ J' hJ' => ?_, fun H J' âšJ, hJ, hJ'â© => ?_â©
· rw [â Ï.tag_biUnionTagged hJ hJ']
exact H J' âšJ, hJ, hJ'â©
· rw [Ï.tag_biUnionTagged hJ hJ']
exact H J hJ J' hJ'
theorem IsPartition.biUnionTagged {Ï : Prepartition I} (h : IsPartition Ï)
{Ïi : â J, TaggedPrepartition J} (hi : â J â Ï, (Ïi J).IsPartition) :
(Ï.biUnionTagged Ïi).IsPartition :=
h.biUnion hi
end Prepartition
namespace TaggedPrepartition
variable {I J : Box ι} {Ï Ïâ Ïâ : TaggedPrepartition I} {x : ι â â}
/-- Given a tagged partition `Ï` of `I` and a (not tagged) partition `Ïi J hJ` of each `J â Ï`,
returns the tagged partition of `I` into all the boxes of all `Ïi J hJ`. The tag of a box `J`
is defined to be the `Ï.tag` of the box of the partition `Ï` that includes `J`.
Note that usually the result is not a Henstock partition. -/
@[simps (config := .asFn) tag]
def biUnionPrepartition (Ï : TaggedPrepartition I) (Ïi : â J : Box ι, Prepartition J) :
TaggedPrepartition I where
toPrepartition := Ï.toPrepartition.biUnion Ïi
tag J := Ï.tag (Ï.toPrepartition.biUnionIndex Ïi J)
tag_mem_Icc _ := Ï.tag_mem_Icc _
theorem IsPartition.biUnionPrepartition {Ï : TaggedPrepartition I} (h : IsPartition Ï)
{Ïi : â J, Prepartition J} (hi : â J â Ï, (Ïi J).IsPartition) :
(Ï.biUnionPrepartition Ïi).IsPartition :=
h.biUnion hi
/-- Given two partitions `Ïâ` and `Ïâ`, one of them tagged and the other is not, returns the tagged
partition with `toPrepartition = Ïâ.toPrepartition â Ïâ` and tags coming from `Ïâ`.
Note that usually the result is not a Henstock partition. -/
def infPrepartition (Ï : TaggedPrepartition I) (Ï' : Prepartition I) : TaggedPrepartition I :=
Ï.biUnionPrepartition fun J => Ï'.restrict J
@[simp]
theorem infPrepartition_toPrepartition (Ï : TaggedPrepartition I) (Ï' : Prepartition I) :
(Ï.infPrepartition Ï').toPrepartition = Ï.toPrepartition â Ï' := rfl
theorem mem_infPrepartition_comm :
J â Ïâ.infPrepartition Ïâ.toPrepartition â J â Ïâ.infPrepartition Ïâ.toPrepartition := by
simp only [â mem_toPrepartition, infPrepartition_toPrepartition, inf_comm]
theorem IsPartition.infPrepartition (hâ : Ïâ.IsPartition) {Ïâ : Prepartition I}
(hâ : Ïâ.IsPartition) : (Ïâ.infPrepartition Ïâ).IsPartition :=
hâ.inf hâ
open Metric
/-- A tagged partition is said to be a Henstock partition if for each `J â Ï`, the tag of `J`
belongs to `J.Icc`. -/
def IsHenstock (Ï : TaggedPrepartition I) : Prop :=
â J â Ï, Ï.tag J â Box.Icc J
@[simp]
theorem isHenstock_biUnionTagged {Ï : Prepartition I} {Ïi : â J, TaggedPrepartition J} :
IsHenstock (Ï.biUnionTagged Ïi) â â J â Ï, (Ïi J).IsHenstock :=
Ï.forall_biUnionTagged (fun x J => x â Box.Icc J) Ïi
/-- In a Henstock prepartition, there are at most `2 ^ Fintype.card ι` boxes with a given tag. -/
theorem IsHenstock.card_filter_tag_eq_le [Fintype ι] (h : Ï.IsHenstock) (x : ι â â) :
(Ï.boxes.filter fun J => Ï.tag J = x).card †2 ^ Fintype.card ι :=
calc
(Ï.boxes.filter fun J => Ï.tag J = x).card â€
(Ï.boxes.filter fun J : Box ι => x â Box.Icc J).card := by
refine Finset.card_le_card fun J hJ => ?_
rw [Finset.mem_filter] at hJ â¢; rcases hJ with âšhJ, rflâ©
exact âšhJ, h J hJâ©
_ †2 ^ Fintype.card ι := Ï.toPrepartition.card_filter_mem_Icc_le x
/-- A tagged partition `Ï` is subordinate to `r : (ι â â) â â` if each box `J â Ï` is included in
the closed ball with center `Ï.tag J` and radius `r (Ï.tag J)`. -/
def IsSubordinate [Fintype ι] (Ï : TaggedPrepartition I) (r : (ι â â) â Ioi (0 : â)) : Prop :=
â J â Ï, Box.Icc J â closedBall (Ï.tag J) (r <| Ï.tag J)
variable {r râ râ : (ι â â) â Ioi (0 : â)}
@[simp]
theorem isSubordinate_biUnionTagged [Fintype ι] {Ï : Prepartition I}
{Ïi : â J, TaggedPrepartition J} :
IsSubordinate (Ï.biUnionTagged Ïi) r â â J â Ï, (Ïi J).IsSubordinate r :=
Ï.forall_biUnionTagged (fun x J => Box.Icc J â closedBall x (r x)) Ïi
theorem IsSubordinate.biUnionPrepartition [Fintype ι] (h : IsSubordinate Ï r)
(Ïi : â J, Prepartition J) : IsSubordinate (Ï.biUnionPrepartition Ïi) r :=
fun _ hJ => Subset.trans (Box.le_iff_Icc.1 <| Ï.toPrepartition.le_biUnionIndex hJ) <|
h _ <| Ï.toPrepartition.biUnionIndex_mem hJ
theorem IsSubordinate.infPrepartition [Fintype ι] (h : IsSubordinate Ï r) (Ï' : Prepartition I) :
IsSubordinate (Ï.infPrepartition Ï') r :=
h.biUnionPrepartition _
theorem IsSubordinate.mono' [Fintype ι] {Ï : TaggedPrepartition I} (hrâ : Ï.IsSubordinate râ)
(h : â J â Ï, râ (Ï.tag J) †râ (Ï.tag J)) : Ï.IsSubordinate râ :=
fun _ hJ _ hx => closedBall_subset_closedBall (h _ hJ) (hrâ _ hJ hx)
theorem IsSubordinate.mono [Fintype ι] {Ï : TaggedPrepartition I} (hrâ : Ï.IsSubordinate râ)
(h : â x â Box.Icc I, râ x †râ x) : Ï.IsSubordinate râ :=
hrâ.mono' fun J _ => h _ <| Ï.tag_mem_Icc J
theorem IsSubordinate.diam_le [Fintype ι] {Ï : TaggedPrepartition I} (h : Ï.IsSubordinate r)
(hJ : J â Ï.boxes) : diam (Box.Icc J) †2 * r (Ï.tag J) :=
calc
diam (Box.Icc J) †diam (closedBall (Ï.tag J) (r <| Ï.tag J)) :=
diam_mono (h J hJ) isBounded_closedBall
_ †2 * r (Ï.tag J) := diam_closedBall (le_of_lt (r _).2)
/-- Tagged prepartition with single box and prescribed tag. -/
@[simps! (config := .asFn)]
def single (I J : Box ι) (hJ : J †I) (x : ι â â) (h : x â Box.Icc I) : TaggedPrepartition I :=
âšPrepartition.single I J hJ, fun _ => x, fun _ => hâ©
@[simp]
theorem mem_single {J'} (hJ : J †I) (h : x â Box.Icc I) : J' â single I J hJ x h â J' = J :=
Finset.mem_singleton
instance (I : Box ι) : Inhabited (TaggedPrepartition I) :=
âšsingle I I le_rfl I.upper I.upper_mem_Iccâ©
theorem isPartition_single_iff (hJ : J †I) (h : x â Box.Icc I) :
(single I J hJ x h).IsPartition â J = I :=
Prepartition.isPartition_single_iff hJ
theorem isPartition_single (h : x â Box.Icc I) : (single I I le_rfl x h).IsPartition :=
Prepartition.isPartitionTop I
theorem forall_mem_single (p : (ι â â) â Box ι â Prop) (hJ : J †I) (h : x â Box.Icc I) :
(â J' â single I J hJ x h, p ((single I J hJ x h).tag J') J') â p x J := by simp
@[simp]
theorem isHenstock_single_iff (hJ : J †I) (h : x â Box.Icc I) :
IsHenstock (single I J hJ x h) â x â Box.Icc J :=
forall_mem_single (fun x J => x â Box.Icc J) hJ h
--@[simp] -- Porting note: Commented out, because `simp only [isHenstock_single_iff]` simplifies it
theorem isHenstock_single (h : x â Box.Icc I) : IsHenstock (single I I le_rfl x h) :=
(isHenstock_single_iff (le_refl I) h).2 h
@[simp]
theorem isSubordinate_single [Fintype ι] (hJ : J †I) (h : x â Box.Icc I) :
IsSubordinate (single I J hJ x h) r â Box.Icc J â closedBall x (r x) :=
forall_mem_single (fun x J => Box.Icc J â closedBall x (r x)) hJ h
@[simp]
theorem iUnion_single (hJ : J †I) (h : x â Box.Icc I) : (single I J hJ x h).iUnion = J :=
Prepartition.iUnion_single hJ
/-- Union of two tagged prepartitions with disjoint unions of boxes. -/
def disjUnion (Ïâ Ïâ : TaggedPrepartition I) (h : Disjoint Ïâ.iUnion Ïâ.iUnion) :
TaggedPrepartition I where
toPrepartition := Ïâ.toPrepartition.disjUnion Ïâ.toPrepartition h
tag := Ïâ.boxes.piecewise Ïâ.tag Ïâ.tag
tag_mem_Icc J := by
dsimp only [Finset.piecewise]
split_ifs
exacts [Ïâ.tag_mem_Icc J, Ïâ.tag_mem_Icc J]
@[simp]
theorem disjUnion_boxes (h : Disjoint Ïâ.iUnion Ïâ.iUnion) :
(Ïâ.disjUnion Ïâ h).boxes = Ïâ.boxes ⪠Ïâ.boxes := rfl
@[simp]
theorem mem_disjUnion (h : Disjoint Ïâ.iUnion Ïâ.iUnion) :
J â Ïâ.disjUnion Ïâ h â J â Ïâ âš J â Ïâ :=
Finset.mem_union
@[simp]
theorem iUnion_disjUnion (h : Disjoint Ïâ.iUnion Ïâ.iUnion) :
(Ïâ.disjUnion Ïâ h).iUnion = Ïâ.iUnion ⪠Ïâ.iUnion :=
Prepartition.iUnion_disjUnion h
theorem disjUnion_tag_of_mem_left (h : Disjoint Ïâ.iUnion Ïâ.iUnion) (hJ : J â Ïâ) :
(Ïâ.disjUnion Ïâ h).tag J = Ïâ.tag J :=
dif_pos hJ
theorem disjUnion_tag_of_mem_right (h : Disjoint Ïâ.iUnion Ïâ.iUnion) (hJ : J â Ïâ) :
(Ïâ.disjUnion Ïâ h).tag J = Ïâ.tag J :=
dif_neg fun hâ => h.le_bot âšÏâ.subset_iUnion hâ J.upper_mem, Ïâ.subset_iUnion hJ J.upper_memâ©
theorem IsSubordinate.disjUnion [Fintype ι] (hâ : IsSubordinate Ïâ r) (hâ : IsSubordinate Ïâ r)
(h : Disjoint Ïâ.iUnion Ïâ.iUnion) : IsSubordinate (Ïâ.disjUnion Ïâ h) r := by
refine fun J hJ => (Finset.mem_union.1 hJ).elim (fun hJ => ?_) fun hJ => ?_
· rw [disjUnion_tag_of_mem_left _ hJ]
exact hâ _ hJ
· rw [disjUnion_tag_of_mem_right _ hJ]
exact hâ _ hJ
theorem IsHenstock.disjUnion (hâ : IsHenstock Ïâ) (hâ : IsHenstock Ïâ)
(h : Disjoint Ïâ.iUnion Ïâ.iUnion) : IsHenstock (Ïâ.disjUnion Ïâ h) := by
refine fun J hJ => (Finset.mem_union.1 hJ).elim (fun hJ => ?_) fun hJ => ?_
· rw [disjUnion_tag_of_mem_left _ hJ]
exact hâ _ hJ
· rw [disjUnion_tag_of_mem_right _ hJ]
exact hâ _ hJ
/-- If `I †J`, then every tagged prepartition of `I` is a tagged prepartition of `J`. -/
def embedBox (I J : Box ι) (h : I †J) : TaggedPrepartition I ⪠TaggedPrepartition J where
toFun Ï :=
{ Ï with
le_of_mem' := fun J' hJ' => (Ï.le_of_mem' J' hJ').trans h
tag_mem_Icc := fun J => Box.le_iff_Icc.1 h (Ï.tag_mem_Icc J) }
inj' := by
rintro âšâšbâ, hâle, hâdâ©, tâ, htââ© âšâšbâ, hâle, hâdâ©, tâ, htââ© H
simpa using H
section Distortion
variable [Fintype ι] (Ï)
open Finset
/-- The distortion of a tagged prepartition is the maximum of distortions of its boxes. -/
def distortion : ââ¥0 :=
Ï.toPrepartition.distortion
theorem distortion_le_of_mem (h : J â Ï) : J.distortion †Ï.distortion :=
le_sup h
theorem distortion_le_iff {c : ââ¥0} : Ï.distortion †c â â J â Ï, Box.distortion J †c :=
Finset.sup_le_iff
@[simp]
theorem _root_.BoxIntegral.Prepartition.distortion_biUnionTagged (Ï : Prepartition I)
(Ïi : â J, TaggedPrepartition J) :
(Ï.biUnionTagged Ïi).distortion = Ï.boxes.sup fun J => (Ïi J).distortion :=
sup_biUnion _ _
@[simp]
theorem distortion_biUnionPrepartition (Ï : TaggedPrepartition I) (Ïi : â J, Prepartition J) :
(Ï.biUnionPrepartition Ïi).distortion = Ï.boxes.sup fun J => (Ïi J).distortion :=
sup_biUnion _ _
@[simp]
theorem distortion_disjUnion (h : Disjoint Ïâ.iUnion Ïâ.iUnion) :
(Ïâ.disjUnion Ïâ h).distortion = max Ïâ.distortion Ïâ.distortion :=
sup_union
theorem distortion_of_const {c} (hâ : Ï.boxes.Nonempty) (hâ : â J â Ï, Box.distortion J = c) :
Ï.distortion = c :=
(sup_congr rfl hâ).trans (sup_const hâ _)
@[simp]
theorem distortion_single (hJ : J †I) (h : x â Box.Icc I) :
distortion (single I J hJ x h) = J.distortion :=
sup_singleton
theorem distortion_filter_le (p : Box ι â Prop) : (Ï.filter p).distortion †Ï.distortion :=
sup_mono (filter_subset _ _)
end Distortion
end TaggedPrepartition
end BoxIntegral
|
Analysis\Calculus\Darboux.lean | /-
Copyright (c) 2020 Yury Kudryashov. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yury Kudryashov
-/
import Mathlib.Analysis.Calculus.Deriv.Add
import Mathlib.Analysis.Calculus.Deriv.Mul
import Mathlib.Analysis.Calculus.LocalExtr.Basic
/-!
# Darboux's theorem
In this file we prove that the derivative of a differentiable function on an interval takes all
intermediate values. The proof is based on the
[Wikipedia](https://en.wikipedia.org/wiki/Darboux%27s_theorem_(analysis)) page about this theorem.
-/
open Filter Set
open scoped Topology Classical
variable {a b : â} {f f' : â â â}
/-- **Darboux's theorem**: if `a †b` and `f' a < m < f' b`, then `f' c = m` for some
`c â (a, b)`. -/
theorem exists_hasDerivWithinAt_eq_of_gt_of_lt (hab : a †b)
(hf : â x â Icc a b, HasDerivWithinAt f (f' x) (Icc a b) x) {m : â} (hma : f' a < m)
(hmb : m < f' b) : m â f' '' Ioo a b := by
rcases hab.eq_or_lt with (rfl | hab')
· exact (lt_asymm hma hmb).elim
set g : â â â := fun x => f x - m * x
have hg : â x â Icc a b, HasDerivWithinAt g (f' x - m) (Icc a b) x := by
intro x hx
simpa using (hf x hx).sub ((hasDerivWithinAt_id x _).const_mul m)
obtain âšc, cmem, hcâ© : â c â Icc a b, IsMinOn g (Icc a b) c :=
isCompact_Icc.exists_isMinOn (nonempty_Icc.2 <| hab) fun x hx => (hg x hx).continuousWithinAt
have cmem' : c â Ioo a b := by
rcases cmem.1.eq_or_lt with (rfl | hac)
-- Show that `c` can't be equal to `a`
· refine absurd (sub_nonneg.1 <| nonneg_of_mul_nonneg_right ?_ (sub_pos.2 hab'))
(not_le_of_lt hma)
have : b - a â posTangentConeAt (Icc a b) a :=
sub_mem_posTangentConeAt_of_segment_subset (segment_eq_Icc hab âž Subset.rfl)
simpa only [ContinuousLinearMap.smulRight_apply, ContinuousLinearMap.one_apply]
using hc.localize.hasFDerivWithinAt_nonneg (hg a (left_mem_Icc.2 hab)) this
rcases cmem.2.eq_or_gt with (rfl | hcb)
-- Show that `c` can't be equal to `b`
· refine absurd (sub_nonpos.1 <| nonpos_of_mul_nonneg_right ?_ (sub_lt_zero.2 hab'))
(not_le_of_lt hmb)
have : a - b â posTangentConeAt (Icc a b) b :=
sub_mem_posTangentConeAt_of_segment_subset (by rw [segment_symm, segment_eq_Icc hab])
simpa only [ContinuousLinearMap.smulRight_apply, ContinuousLinearMap.one_apply]
using hc.localize.hasFDerivWithinAt_nonneg (hg b (right_mem_Icc.2 hab)) this
exact âšhac, hcbâ©
use c, cmem'
rw [â sub_eq_zero]
have : Icc a b â ð c := by rwa [â mem_interior_iff_mem_nhds, interior_Icc]
exact (hc.isLocalMin this).hasDerivAt_eq_zero ((hg c cmem).hasDerivAt this)
/-- **Darboux's theorem**: if `a †b` and `f' b < m < f' a`, then `f' c = m` for some `c â (a, b)`.
-/
theorem exists_hasDerivWithinAt_eq_of_lt_of_gt (hab : a †b)
(hf : â x â Icc a b, HasDerivWithinAt f (f' x) (Icc a b) x) {m : â} (hma : m < f' a)
(hmb : f' b < m) : m â f' '' Ioo a b :=
let âšc, cmem, hcâ© :=
exists_hasDerivWithinAt_eq_of_gt_of_lt hab (fun x hx => (hf x hx).neg) (neg_lt_neg hma)
(neg_lt_neg hmb)
âšc, cmem, neg_injective hcâ©
/-- **Darboux's theorem**: the image of a `Set.OrdConnected` set under `f'` is a `Set.OrdConnected`
set, `HasDerivWithinAt` version. -/
theorem Set.OrdConnected.image_hasDerivWithinAt {s : Set â} (hs : OrdConnected s)
(hf : â x â s, HasDerivWithinAt f (f' x) s x) : OrdConnected (f' '' s) := by
apply ordConnected_of_Ioo
rintro _ âša, ha, rflâ© _ âšb, hb, rflâ© - m âšhma, hmbâ©
rcases le_total a b with hab | hab
· have : Icc a b â s := hs.out ha hb
rcases exists_hasDerivWithinAt_eq_of_gt_of_lt hab (fun x hx => (hf x <| this hx).mono this) hma
hmb with
âšc, cmem, hcâ©
exact âšc, this <| Ioo_subset_Icc_self cmem, hcâ©
· have : Icc b a â s := hs.out hb ha
rcases exists_hasDerivWithinAt_eq_of_lt_of_gt hab (fun x hx => (hf x <| this hx).mono this) hmb
hma with
âšc, cmem, hcâ©
exact âšc, this <| Ioo_subset_Icc_self cmem, hcâ©
/-- **Darboux's theorem**: the image of a `Set.OrdConnected` set under `f'` is a `Set.OrdConnected`
set, `derivWithin` version. -/
theorem Set.OrdConnected.image_derivWithin {s : Set â} (hs : OrdConnected s)
(hf : DifferentiableOn â f s) : OrdConnected (derivWithin f s '' s) :=
hs.image_hasDerivWithinAt fun x hx => (hf x hx).hasDerivWithinAt
/-- **Darboux's theorem**: the image of a `Set.OrdConnected` set under `f'` is a `Set.OrdConnected`
set, `deriv` version. -/
theorem Set.OrdConnected.image_deriv {s : Set â} (hs : OrdConnected s)
(hf : â x â s, DifferentiableAt â f x) : OrdConnected (deriv f '' s) :=
hs.image_hasDerivWithinAt fun x hx => (hf x hx).hasDerivAt.hasDerivWithinAt
/-- **Darboux's theorem**: the image of a convex set under `f'` is a convex set,
`HasDerivWithinAt` version. -/
theorem Convex.image_hasDerivWithinAt {s : Set â} (hs : Convex â s)
(hf : â x â s, HasDerivWithinAt f (f' x) s x) : Convex â (f' '' s) :=
(hs.ordConnected.image_hasDerivWithinAt hf).convex
/-- **Darboux's theorem**: the image of a convex set under `f'` is a convex set,
`derivWithin` version. -/
theorem Convex.image_derivWithin {s : Set â} (hs : Convex â s) (hf : DifferentiableOn â f s) :
Convex â (derivWithin f s '' s) :=
(hs.ordConnected.image_derivWithin hf).convex
/-- **Darboux's theorem**: the image of a convex set under `f'` is a convex set,
`deriv` version. -/
theorem Convex.image_deriv {s : Set â} (hs : Convex â s) (hf : â x â s, DifferentiableAt â f x) :
Convex â (deriv f '' s) :=
(hs.ordConnected.image_deriv hf).convex
/-- **Darboux's theorem**: if `a †b` and `f' a †m †f' b`, then `f' c = m` for some
`c â [a, b]`. -/
theorem exists_hasDerivWithinAt_eq_of_ge_of_le (hab : a †b)
(hf : â x â Icc a b, HasDerivWithinAt f (f' x) (Icc a b) x) {m : â} (hma : f' a †m)
(hmb : m †f' b) : m â f' '' Icc a b :=
(ordConnected_Icc.image_hasDerivWithinAt hf).out (mem_image_of_mem _ (left_mem_Icc.2 hab))
(mem_image_of_mem _ (right_mem_Icc.2 hab)) âšhma, hmbâ©
/-- **Darboux's theorem**: if `a †b` and `f' b †m †f' a`, then `f' c = m` for some
`c â [a, b]`. -/
theorem exists_hasDerivWithinAt_eq_of_le_of_ge (hab : a †b)
(hf : â x â Icc a b, HasDerivWithinAt f (f' x) (Icc a b) x) {m : â} (hma : f' a †m)
(hmb : m †f' b) : m â f' '' Icc a b :=
(ordConnected_Icc.image_hasDerivWithinAt hf).out (mem_image_of_mem _ (left_mem_Icc.2 hab))
(mem_image_of_mem _ (right_mem_Icc.2 hab)) âšhma, hmbâ©
/-- If the derivative of a function is never equal to `m`, then either
it is always greater than `m`, or it is always less than `m`. -/
theorem hasDerivWithinAt_forall_lt_or_forall_gt_of_forall_ne {s : Set â} (hs : Convex â s)
(hf : â x â s, HasDerivWithinAt f (f' x) s x) {m : â} (hf' : â x â s, f' x â m) :
(â x â s, f' x < m) âš â x â s, m < f' x := by
contrapose! hf'
rcases hf' with âšâšb, hb, hmbâ©, âša, ha, hmaâ©â©
exact (hs.ordConnected.image_hasDerivWithinAt hf).out (mem_image_of_mem f' ha)
(mem_image_of_mem f' hb) âšhma, hmbâ©
|
Analysis\Calculus\DiffContOnCl.lean | /-
Copyright (c) 2022 Yury G. Kudryashov. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yury G. Kudryashov
-/
import Mathlib.Analysis.Calculus.Deriv.Inv
import Mathlib.Analysis.NormedSpace.Real
/-!
# Functions differentiable on a domain and continuous on its closure
Many theorems in complex analysis assume that a function is complex differentiable on a domain and
is continuous on its closure. In this file we define a predicate `DiffContOnCl` that expresses
this property and prove basic facts about this predicate.
-/
open Set Filter Metric
open scoped Topology
variable (ð : Type*) {E F G : Type*} [NontriviallyNormedField ð] [NormedAddCommGroup E]
[NormedAddCommGroup F] [NormedSpace ð E] [NormedSpace ð F] [NormedAddCommGroup G]
[NormedSpace ð G] {f g : E â F} {s t : Set E} {x : E}
/-- A predicate saying that a function is differentiable on a set and is continuous on its
closure. This is a common assumption in complex analysis. -/
structure DiffContOnCl (f : E â F) (s : Set E) : Prop where
protected differentiableOn : DifferentiableOn ð f s
protected continuousOn : ContinuousOn f (closure s)
variable {ð}
theorem DifferentiableOn.diffContOnCl (h : DifferentiableOn ð f (closure s)) : DiffContOnCl ð f s :=
âšh.mono subset_closure, h.continuousOnâ©
theorem Differentiable.diffContOnCl (h : Differentiable ð f) : DiffContOnCl ð f s :=
âšh.differentiableOn, h.continuous.continuousOnâ©
theorem IsClosed.diffContOnCl_iff (hs : IsClosed s) : DiffContOnCl ð f s â DifferentiableOn ð f s :=
âšfun h => h.differentiableOn, fun h => âšh, hs.closure_eq.symm âž h.continuousOnâ©â©
theorem diffContOnCl_univ : DiffContOnCl ð f univ â Differentiable ð f :=
isClosed_univ.diffContOnCl_iff.trans differentiableOn_univ
theorem diffContOnCl_const {c : F} : DiffContOnCl ð (fun _ : E => c) s :=
âšdifferentiableOn_const c, continuousOn_constâ©
namespace DiffContOnCl
theorem comp {g : G â E} {t : Set G} (hf : DiffContOnCl ð f s) (hg : DiffContOnCl ð g t)
(h : MapsTo g t s) : DiffContOnCl ð (f â g) t :=
âšhf.1.comp hg.1 h, hf.2.comp hg.2 <| h.closure_of_continuousOn hg.2â©
theorem continuousOn_ball [NormedSpace â E] {x : E} {r : â} (h : DiffContOnCl ð f (ball x r)) :
ContinuousOn f (closedBall x r) := by
rcases eq_or_ne r 0 with (rfl | hr)
· rw [closedBall_zero]
exact continuousOn_singleton f x
· rw [â closure_ball x hr]
exact h.continuousOn
theorem mk_ball {x : E} {r : â} (hd : DifferentiableOn ð f (ball x r))
(hc : ContinuousOn f (closedBall x r)) : DiffContOnCl ð f (ball x r) :=
âšhd, hc.mono <| closure_ball_subset_closedBallâ©
protected theorem differentiableAt (h : DiffContOnCl ð f s) (hs : IsOpen s) (hx : x â s) :
DifferentiableAt ð f x :=
h.differentiableOn.differentiableAt <| hs.mem_nhds hx
theorem differentiableAt' (h : DiffContOnCl ð f s) (hx : s â ð x) : DifferentiableAt ð f x :=
h.differentiableOn.differentiableAt hx
protected theorem mono (h : DiffContOnCl ð f s) (ht : t â s) : DiffContOnCl ð f t :=
âšh.differentiableOn.mono ht, h.continuousOn.mono (closure_mono ht)â©
theorem add (hf : DiffContOnCl ð f s) (hg : DiffContOnCl ð g s) : DiffContOnCl ð (f + g) s :=
âšhf.1.add hg.1, hf.2.add hg.2â©
theorem add_const (hf : DiffContOnCl ð f s) (c : F) : DiffContOnCl ð (fun x => f x + c) s :=
hf.add diffContOnCl_const
theorem const_add (hf : DiffContOnCl ð f s) (c : F) : DiffContOnCl ð (fun x => c + f x) s :=
diffContOnCl_const.add hf
theorem neg (hf : DiffContOnCl ð f s) : DiffContOnCl ð (-f) s :=
âšhf.1.neg, hf.2.negâ©
theorem sub (hf : DiffContOnCl ð f s) (hg : DiffContOnCl ð g s) : DiffContOnCl ð (f - g) s :=
âšhf.1.sub hg.1, hf.2.sub hg.2â©
theorem sub_const (hf : DiffContOnCl ð f s) (c : F) : DiffContOnCl ð (fun x => f x - c) s :=
hf.sub diffContOnCl_const
theorem const_sub (hf : DiffContOnCl ð f s) (c : F) : DiffContOnCl ð (fun x => c - f x) s :=
diffContOnCl_const.sub hf
theorem const_smul {R : Type*} [Semiring R] [Module R F] [SMulCommClass ð R F]
[ContinuousConstSMul R F] (hf : DiffContOnCl ð f s) (c : R) : DiffContOnCl ð (c ⢠f) s :=
âšhf.1.const_smul c, hf.2.const_smul câ©
theorem smul {ð' : Type*} [NontriviallyNormedField ð'] [NormedAlgebra ð ð'] [NormedSpace ð' F]
[IsScalarTower ð ð' F] {c : E â ð'} {f : E â F} {s : Set E} (hc : DiffContOnCl ð c s)
(hf : DiffContOnCl ð f s) : DiffContOnCl ð (fun x => c x ⢠f x) s :=
âšhc.1.smul hf.1, hc.2.smul hf.2â©
theorem smul_const {ð' : Type*} [NontriviallyNormedField ð'] [NormedAlgebra ð ð']
[NormedSpace ð' F] [IsScalarTower ð ð' F] {c : E â ð'} {s : Set E} (hc : DiffContOnCl ð c s)
(y : F) : DiffContOnCl ð (fun x => c x ⢠y) s :=
hc.smul diffContOnCl_const
theorem inv {f : E â ð} (hf : DiffContOnCl ð f s) (hâ : â x â closure s, f x â 0) :
DiffContOnCl ð fâ»Â¹ s :=
âšdifferentiableOn_inv.comp hf.1 fun _ hx => hâ _ (subset_closure hx), hf.2.invâ hââ©
end DiffContOnCl
theorem Differentiable.comp_diffContOnCl {g : G â E} {t : Set G} (hf : Differentiable ð f)
(hg : DiffContOnCl ð g t) : DiffContOnCl ð (f â g) t :=
hf.diffContOnCl.comp hg (mapsTo_image _ _)
theorem DifferentiableOn.diffContOnCl_ball {U : Set E} {c : E} {R : â} (hf : DifferentiableOn ð f U)
(hc : closedBall c R â U) : DiffContOnCl ð f (ball c R) :=
DiffContOnCl.mk_ball (hf.mono (ball_subset_closedBall.trans hc)) (hf.continuousOn.mono hc)
|
Analysis\Calculus\Dslope.lean | /-
Copyright (c) 2022 Yury Kudryashov. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yury Kudryashov
-/
import Mathlib.Analysis.Calculus.Deriv.Slope
import Mathlib.Analysis.Calculus.Deriv.Inv
/-!
# Slope of a differentiable function
Given a function `f : ð â E` from a nontrivially normed field to a normed space over this field,
`dslope f a b` is defined as `slope f a b = (b - a)â»Â¹ ⢠(f b - f a)` for `a â b` and as `deriv f a`
for `a = b`.
In this file we define `dslope` and prove some basic lemmas about its continuity and
differentiability.
-/
open scoped Topology Filter
open Function Set Filter
variable {ð E : Type*} [NontriviallyNormedField ð] [NormedAddCommGroup E] [NormedSpace ð E]
open Classical in
/-- `dslope f a b` is defined as `slope f a b = (b - a)â»Â¹ ⢠(f b - f a)` for `a â b` and
`deriv f a` for `a = b`. -/
noncomputable def dslope (f : ð â E) (a : ð) : ð â E :=
update (slope f a) a (deriv f a)
@[simp]
theorem dslope_same (f : ð â E) (a : ð) : dslope f a a = deriv f a := by
classical
exact update_same _ _ _
variable {f : ð â E} {a b : ð} {s : Set ð}
theorem dslope_of_ne (f : ð â E) (h : b â a) : dslope f a b = slope f a b := by
classical
exact update_noteq h _ _
theorem ContinuousLinearMap.dslope_comp {F : Type*} [NormedAddCommGroup F] [NormedSpace ð F]
(f : E âL[ð] F) (g : ð â E) (a b : ð) (H : a = b â DifferentiableAt ð g a) :
dslope (f â g) a b = f (dslope g a b) := by
rcases eq_or_ne b a with (rfl | hne)
· simp only [dslope_same]
exact (f.hasFDerivAt.comp_hasDerivAt b (H rfl).hasDerivAt).deriv
· simpa only [dslope_of_ne _ hne] using f.toLinearMap.slope_comp g a b
theorem eqOn_dslope_slope (f : ð â E) (a : ð) : EqOn (dslope f a) (slope f a) {a}á¶ := fun _ =>
dslope_of_ne f
theorem dslope_eventuallyEq_slope_of_ne (f : ð â E) (h : b â a) : dslope f a =á¶ [ð b] slope f a :=
(eqOn_dslope_slope f a).eventuallyEq_of_mem (isOpen_ne.mem_nhds h)
theorem dslope_eventuallyEq_slope_punctured_nhds (f : ð â E) : dslope f a =á¶ [ð[â ] a] slope f a :=
(eqOn_dslope_slope f a).eventuallyEq_of_mem self_mem_nhdsWithin
@[simp]
theorem sub_smul_dslope (f : ð â E) (a b : ð) : (b - a) ⢠dslope f a b = f b - f a := by
rcases eq_or_ne b a with (rfl | hne) <;> simp [dslope_of_ne, *]
theorem dslope_sub_smul_of_ne (f : ð â E) (h : b â a) :
dslope (fun x => (x - a) ⢠f x) a b = f b := by
rw [dslope_of_ne _ h, slope_sub_smul _ h.symm]
theorem eqOn_dslope_sub_smul (f : ð â E) (a : ð) :
EqOn (dslope (fun x => (x - a) ⢠f x) a) f {a}ᶠ:= fun _ => dslope_sub_smul_of_ne f
theorem dslope_sub_smul [DecidableEq ð] (f : ð â E) (a : ð) :
dslope (fun x => (x - a) ⢠f x) a = update f a (deriv (fun x => (x - a) ⢠f x) a) :=
eq_update_iff.2 âšdslope_same _ _, eqOn_dslope_sub_smul f aâ©
@[simp]
theorem continuousAt_dslope_same : ContinuousAt (dslope f a) a â DifferentiableAt ð f a := by
simp only [dslope, continuousAt_update_same, â hasDerivAt_deriv_iff, hasDerivAt_iff_tendsto_slope]
theorem ContinuousWithinAt.of_dslope (h : ContinuousWithinAt (dslope f a) s b) :
ContinuousWithinAt f s b := by
have : ContinuousWithinAt (fun x => (x - a) ⢠dslope f a x + f a) s b :=
((continuousWithinAt_id.sub continuousWithinAt_const).smul h).add continuousWithinAt_const
simpa only [sub_smul_dslope, sub_add_cancel] using this
theorem ContinuousAt.of_dslope (h : ContinuousAt (dslope f a) b) : ContinuousAt f b :=
(continuousWithinAt_univ _ _).1 h.continuousWithinAt.of_dslope
theorem ContinuousOn.of_dslope (h : ContinuousOn (dslope f a) s) : ContinuousOn f s := fun x hx =>
(h x hx).of_dslope
theorem continuousWithinAt_dslope_of_ne (h : b â a) :
ContinuousWithinAt (dslope f a) s b â ContinuousWithinAt f s b := by
refine âšContinuousWithinAt.of_dslope, fun hc => ?_â©
classical
simp only [dslope, continuousWithinAt_update_of_ne h]
exact ((continuousWithinAt_id.sub continuousWithinAt_const).invâ (sub_ne_zero.2 h)).smul
(hc.sub continuousWithinAt_const)
theorem continuousAt_dslope_of_ne (h : b â a) : ContinuousAt (dslope f a) b â ContinuousAt f b := by
simp only [â continuousWithinAt_univ, continuousWithinAt_dslope_of_ne h]
theorem continuousOn_dslope (h : s â ð a) :
ContinuousOn (dslope f a) s â ContinuousOn f s â§ DifferentiableAt ð f a := by
refine âšfun hc => âšhc.of_dslope, continuousAt_dslope_same.1 <| hc.continuousAt hâ©, ?_â©
rintro âšhc, hdâ© x hx
rcases eq_or_ne x a with (rfl | hne)
exacts [(continuousAt_dslope_same.2 hd).continuousWithinAt,
(continuousWithinAt_dslope_of_ne hne).2 (hc x hx)]
theorem DifferentiableWithinAt.of_dslope (h : DifferentiableWithinAt ð (dslope f a) s b) :
DifferentiableWithinAt ð f s b := by
simpa only [id, sub_smul_dslope f a, sub_add_cancel] using
((differentiableWithinAt_id.sub_const a).smul h).add_const (f a)
theorem DifferentiableAt.of_dslope (h : DifferentiableAt ð (dslope f a) b) :
DifferentiableAt ð f b :=
differentiableWithinAt_univ.1 h.differentiableWithinAt.of_dslope
theorem DifferentiableOn.of_dslope (h : DifferentiableOn ð (dslope f a) s) :
DifferentiableOn ð f s := fun x hx => (h x hx).of_dslope
theorem differentiableWithinAt_dslope_of_ne (h : b â a) :
DifferentiableWithinAt ð (dslope f a) s b â DifferentiableWithinAt ð f s b := by
refine âšDifferentiableWithinAt.of_dslope, fun hd => ?_â©
refine (((differentiableWithinAt_id.sub_const a).inv (sub_ne_zero.2 h)).smul
(hd.sub_const (f a))).congr_of_eventuallyEq ?_ (dslope_of_ne _ h)
refine (eqOn_dslope_slope _ _).eventuallyEq_of_mem ?_
exact mem_nhdsWithin_of_mem_nhds (isOpen_ne.mem_nhds h)
theorem differentiableOn_dslope_of_nmem (h : a â s) :
DifferentiableOn ð (dslope f a) s â DifferentiableOn ð f s :=
forall_congr' fun _ =>
forall_congr' fun hx => differentiableWithinAt_dslope_of_ne <| ne_of_mem_of_not_mem hx h
theorem differentiableAt_dslope_of_ne (h : b â a) :
DifferentiableAt ð (dslope f a) b â DifferentiableAt ð f b := by
simp only [â differentiableWithinAt_univ, differentiableWithinAt_dslope_of_ne h]
|
Analysis\Calculus\FormalMultilinearSeries.lean | /-
Copyright (c) 2019 Sébastien Gouëzel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Sébastien Gouëzel
-/
import Mathlib.Analysis.NormedSpace.Multilinear.Curry
/-!
# Formal multilinear series
In this file we define `FormalMultilinearSeries ð E F` to be a family of `n`-multilinear maps for
all `n`, designed to model the sequence of derivatives of a function. In other files we use this
notion to define `C^n` functions (called `contDiff` in `mathlib`) and analytic functions.
## Notations
We use the notation `E [Ãn]âL[ð] F` for the space of continuous multilinear maps on `E^n` with
values in `F`. This is the space in which the `n`-th derivative of a function from `E` to `F` lives.
## Tags
multilinear, formal series
-/
noncomputable section
open Set Fin Topology
-- Porting note: added explicit universes to fix compile
universe u u' v w x
variable {ð : Type u} {ð' : Type u'} {E : Type v} {F : Type w} {G : Type x}
section
variable [Ring ð] [AddCommGroup E] [Module ð E] [TopologicalSpace E] [TopologicalAddGroup E]
[ContinuousConstSMul ð E] [AddCommGroup F] [Module ð F] [TopologicalSpace F]
[TopologicalAddGroup F] [ContinuousConstSMul ð F] [AddCommGroup G] [Module ð G]
[TopologicalSpace G] [TopologicalAddGroup G] [ContinuousConstSMul ð G]
/-- A formal multilinear series over a field `ð`, from `E` to `F`, is given by a family of
multilinear maps from `E^n` to `F` for all `n`. -/
@[nolint unusedArguments]
def FormalMultilinearSeries (ð : Type*) (E : Type*) (F : Type*) [Ring ð] [AddCommGroup E]
[Module ð E] [TopologicalSpace E] [TopologicalAddGroup E] [ContinuousConstSMul ð E]
[AddCommGroup F] [Module ð F] [TopologicalSpace F] [TopologicalAddGroup F]
[ContinuousConstSMul ð F] :=
â n : â, E[Ãn]âL[ð] F
-- Porting note: was `deriving`
instance : AddCommGroup (FormalMultilinearSeries ð E F) :=
inferInstanceAs <| AddCommGroup <| â n : â, E[Ãn]âL[ð] F
instance : Inhabited (FormalMultilinearSeries ð E F) :=
âš0â©
section Module
instance (ð') [Semiring ð'] [Module ð' F] [ContinuousConstSMul ð' F] [SMulCommClass ð ð' F] :
Module ð' (FormalMultilinearSeries ð E F) :=
inferInstanceAs <| Module ð' <| â n : â, E[Ãn]âL[ð] F
end Module
namespace FormalMultilinearSeries
#adaptation_note
/--
After https://github.com/leanprover/lean4/pull/4481
the `simpNF` linter incorrectly claims this lemma can't be applied by `simp`.
-/
@[simp, nolint simpNF]
theorem zero_apply (n : â) : (0 : FormalMultilinearSeries ð E F) n = 0 := rfl
@[simp]
theorem neg_apply (f : FormalMultilinearSeries ð E F) (n : â) : (-f) n = - f n := rfl
@[ext]
protected theorem ext {p q : FormalMultilinearSeries ð E F} (h : â n, p n = q n) : p = q :=
funext h
protected theorem ne_iff {p q : FormalMultilinearSeries ð E F} : p â q â â n, p n â q n :=
Function.ne_iff
/-- Cartesian product of two formal multilinear series (with the same field `ð` and the same source
space, but possibly different target spaces). -/
def prod (p : FormalMultilinearSeries ð E F) (q : FormalMultilinearSeries ð E G) :
FormalMultilinearSeries ð E (F Ã G)
| n => (p n).prod (q n)
/-- Killing the zeroth coefficient in a formal multilinear series -/
def removeZero (p : FormalMultilinearSeries ð E F) : FormalMultilinearSeries ð E F
| 0 => 0
| n + 1 => p (n + 1)
@[simp]
theorem removeZero_coeff_zero (p : FormalMultilinearSeries ð E F) : p.removeZero 0 = 0 :=
rfl
@[simp]
theorem removeZero_coeff_succ (p : FormalMultilinearSeries ð E F) (n : â) :
p.removeZero (n + 1) = p (n + 1) :=
rfl
theorem removeZero_of_pos (p : FormalMultilinearSeries ð E F) {n : â} (h : 0 < n) :
p.removeZero n = p n := by
rw [â Nat.succ_pred_eq_of_pos h]
rfl
/-- Convenience congruence lemma stating in a dependent setting that, if the arguments to a formal
multilinear series are equal, then the values are also equal. -/
theorem congr (p : FormalMultilinearSeries ð E F) {m n : â} {v : Fin m â E} {w : Fin n â E}
(h1 : m = n) (h2 : â (i : â) (him : i < m) (hin : i < n), v âši, himâ© = w âši, hinâ©) :
p m v = p n w := by
subst n
congr with âši, hiâ©
exact h2 i hi hi
/-- Composing each term `pâ` in a formal multilinear series with `(u, ..., u)` where `u` is a fixed
continuous linear map, gives a new formal multilinear series `p.compContinuousLinearMap u`. -/
def compContinuousLinearMap (p : FormalMultilinearSeries ð F G) (u : E âL[ð] F) :
FormalMultilinearSeries ð E G := fun n => (p n).compContinuousLinearMap fun _ : Fin n => u
@[simp]
theorem compContinuousLinearMap_apply (p : FormalMultilinearSeries ð F G) (u : E âL[ð] F) (n : â)
(v : Fin n â E) : (p.compContinuousLinearMap u) n v = p n (u â v) :=
rfl
variable (ð) [Ring ð'] [SMul ð ð']
variable [Module ð' E] [ContinuousConstSMul ð' E] [IsScalarTower ð ð' E]
variable [Module ð' F] [ContinuousConstSMul ð' F] [IsScalarTower ð ð' F]
/-- Reinterpret a formal `ð'`-multilinear series as a formal `ð`-multilinear series. -/
@[simp]
protected def restrictScalars (p : FormalMultilinearSeries ð' E F) :
FormalMultilinearSeries ð E F := fun n => (p n).restrictScalars ð
end FormalMultilinearSeries
end
namespace FormalMultilinearSeries
variable [NontriviallyNormedField ð] [NormedAddCommGroup E] [NormedSpace ð E] [NormedAddCommGroup F]
[NormedSpace ð F] [NormedAddCommGroup G] [NormedSpace ð G]
variable (p : FormalMultilinearSeries ð E F)
/-- Forgetting the zeroth term in a formal multilinear series, and interpreting the following terms
as multilinear maps into `E âL[ð] F`. If `p` is the Taylor series (`HasFTaylorSeriesUpTo`) of a
function, then `p.shift` is the Taylor series of the derivative of the function. Note that the
`p.sum` of a Taylor series `p` does not give the original function; for a formal multilinear
series that sums to the derivative of `p.sum`, see `HasFPowerSeriesOnBall.fderiv`. -/
def shift : FormalMultilinearSeries ð E (E âL[ð] F) := fun n => (p n.succ).curryRight
/-- Adding a zeroth term to a formal multilinear series taking values in `E âL[ð] F`. This
corresponds to starting from a Taylor series (`HasFTaylorSeriesUpTo`) for the derivative of a
function, and building a Taylor series for the function itself. -/
def unshift (q : FormalMultilinearSeries ð E (E âL[ð] F)) (z : F) : FormalMultilinearSeries ð E F
| 0 => (continuousMultilinearCurryFin0 ð E F).symm z
| n + 1 => -- Porting note: added type hint here and explicit universes to fix compile
(continuousMultilinearCurryRightEquiv' ð n E F :
(E [Ãn]âL[ð] E âL[ð] F) â (E [Ãn.succ]âL[ð] F)) (q n)
end FormalMultilinearSeries
section
variable [Ring ð] [AddCommGroup E] [Module ð E] [TopologicalSpace E] [TopologicalAddGroup E]
[ContinuousConstSMul ð E] [AddCommGroup F] [Module ð F] [TopologicalSpace F]
[TopologicalAddGroup F] [ContinuousConstSMul ð F] [AddCommGroup G] [Module ð G]
[TopologicalSpace G] [TopologicalAddGroup G] [ContinuousConstSMul ð G]
namespace ContinuousLinearMap
/-- Composing each term `pâ` in a formal multilinear series with a continuous linear map `f` on the
left gives a new formal multilinear series `f.compFormalMultilinearSeries p` whose general term
is `f â pâ`. -/
def compFormalMultilinearSeries (f : F âL[ð] G) (p : FormalMultilinearSeries ð E F) :
FormalMultilinearSeries ð E G := fun n => f.compContinuousMultilinearMap (p n)
@[simp]
theorem compFormalMultilinearSeries_apply (f : F âL[ð] G) (p : FormalMultilinearSeries ð E F)
(n : â) : (f.compFormalMultilinearSeries p) n = f.compContinuousMultilinearMap (p n) :=
rfl
theorem compFormalMultilinearSeries_apply' (f : F âL[ð] G) (p : FormalMultilinearSeries ð E F)
(n : â) (v : Fin n â E) : (f.compFormalMultilinearSeries p) n v = f (p n v) :=
rfl
end ContinuousLinearMap
namespace ContinuousMultilinearMap
variable {ι : Type*} {E : ι â Type*} [â i, AddCommGroup (E i)] [â i, Module ð (E i)]
[â i, TopologicalSpace (E i)] [â i, TopologicalAddGroup (E i)]
[â i, ContinuousConstSMul ð (E i)] [Fintype ι] (f : ContinuousMultilinearMap ð E F)
/-- Realize a ContinuousMultilinearMap on `â i : ι, E i` as the evaluation of a
FormalMultilinearSeries by choosing an arbitrary identification `ι â Fin (Fintype.card ι)`. -/
noncomputable def toFormalMultilinearSeries : FormalMultilinearSeries ð (â i, E i) F :=
fun n ⊠if h : Fintype.card ι = n then
(f.compContinuousLinearMap .proj).domDomCongr (Fintype.equivFinOfCardEq h)
else 0
end ContinuousMultilinearMap
end
namespace FormalMultilinearSeries
section Order
variable [Ring ð] {n : â} [AddCommGroup E] [Module ð E] [TopologicalSpace E]
[TopologicalAddGroup E] [ContinuousConstSMul ð E] [AddCommGroup F] [Module ð F]
[TopologicalSpace F] [TopologicalAddGroup F] [ContinuousConstSMul ð F]
{p : FormalMultilinearSeries ð E F}
/-- The index of the first non-zero coefficient in `p` (or `0` if all coefficients are zero). This
is the order of the isolated zero of an analytic function `f` at a point if `p` is the Taylor
series of `f` at that point. -/
noncomputable def order (p : FormalMultilinearSeries ð E F) : â :=
sInf { n | p n â 0 }
@[simp]
theorem order_zero : (0 : FormalMultilinearSeries ð E F).order = 0 := by simp [order]
theorem ne_zero_of_order_ne_zero (hp : p.order â 0) : p â 0 := fun h => by simp [h] at hp
theorem order_eq_find [DecidablePred fun n => p n â 0] (hp : â n, p n â 0) :
p.order = Nat.find hp := by convert Nat.sInf_def hp
theorem order_eq_find' [DecidablePred fun n => p n â 0] (hp : p â 0) :
p.order = Nat.find (FormalMultilinearSeries.ne_iff.mp hp) :=
order_eq_find _
theorem order_eq_zero_iff' : p.order = 0 â p = 0 âš p 0 â 0 := by
simpa [order, Nat.sInf_eq_zero, FormalMultilinearSeries.ext_iff, eq_empty_iff_forall_not_mem]
using or_comm
theorem order_eq_zero_iff (hp : p â 0) : p.order = 0 â p 0 â 0 := by
simp [order_eq_zero_iff', hp]
theorem apply_order_ne_zero (hp : p â 0) : p p.order â 0 :=
Nat.sInf_mem (FormalMultilinearSeries.ne_iff.1 hp)
theorem apply_order_ne_zero' (hp : p.order â 0) : p p.order â 0 :=
apply_order_ne_zero (ne_zero_of_order_ne_zero hp)
theorem apply_eq_zero_of_lt_order (hp : n < p.order) : p n = 0 :=
by_contra <| Nat.not_mem_of_lt_sInf hp
end Order
section Coef
variable [NontriviallyNormedField ð] [NormedAddCommGroup E] [NormedSpace ð E] {s : E}
{p : FormalMultilinearSeries ð ð E} {f : ð â E} {n : â} {z zâ : ð} {y : Fin n â ð}
/-- The `n`th coefficient of `p` when seen as a power series. -/
def coeff (p : FormalMultilinearSeries ð ð E) (n : â) : E :=
p n 1
theorem mkPiRing_coeff_eq (p : FormalMultilinearSeries ð ð E) (n : â) :
ContinuousMultilinearMap.mkPiRing ð (Fin n) (p.coeff n) = p n :=
(p n).mkPiRing_apply_one_eq_self
@[simp]
theorem apply_eq_prod_smul_coeff : p n y = (â i, y i) ⢠p.coeff n := by
convert (p n).toMultilinearMap.map_smul_univ y 1
simp only [Pi.one_apply, Algebra.id.smul_eq_mul, mul_one]
theorem coeff_eq_zero : p.coeff n = 0 â p n = 0 := by
rw [â mkPiRing_coeff_eq p, ContinuousMultilinearMap.mkPiRing_eq_zero_iff]
theorem apply_eq_pow_smul_coeff : (p n fun _ => z) = z ^ n ⢠p.coeff n := by simp
@[simp]
theorem norm_apply_eq_norm_coef : âp nâ = âcoeff p nâ := by
rw [â mkPiRing_coeff_eq p, ContinuousMultilinearMap.norm_mkPiRing]
end Coef
section Fslope
variable [NontriviallyNormedField ð] [NormedAddCommGroup E] [NormedSpace ð E]
{p : FormalMultilinearSeries ð ð E} {n : â}
/-- The formal counterpart of `dslope`, corresponding to the expansion of `(f z - f 0) / z`. If `f`
has `p` as a power series, then `dslope f` has `fslope p` as a power series. -/
noncomputable def fslope (p : FormalMultilinearSeries ð ð E) : FormalMultilinearSeries ð ð E :=
fun n => (p (n + 1)).curryLeft 1
@[simp]
theorem coeff_fslope : p.fslope.coeff n = p.coeff (n + 1) := by
simp only [fslope, coeff, ContinuousMultilinearMap.curryLeft_apply]
congr 1
exact Fin.cons_self_tail 1
@[simp]
theorem coeff_iterate_fslope (k n : â) : (fslope^[k] p).coeff n = p.coeff (n + k) := by
induction k generalizing p with
| zero => rfl
| succ k ih => simp [ih, add_assoc]
end Fslope
end FormalMultilinearSeries
section Const
/-- The formal multilinear series where all terms of positive degree are equal to zero, and the term
of degree zero is `c`. It is the power series expansion of the constant function equal to `c`
everywhere. -/
def constFormalMultilinearSeries (ð : Type*) [NontriviallyNormedField ð] (E : Type*)
[NormedAddCommGroup E] [NormedSpace ð E] [ContinuousConstSMul ð E] [TopologicalAddGroup E]
{F : Type*} [NormedAddCommGroup F] [TopologicalAddGroup F] [NormedSpace ð F]
[ContinuousConstSMul ð F] (c : F) : FormalMultilinearSeries ð E F
| 0 => ContinuousMultilinearMap.curry0 _ _ c
| _ => 0
@[simp]
theorem constFormalMultilinearSeries_apply [NontriviallyNormedField ð] [NormedAddCommGroup E]
[NormedAddCommGroup F] [NormedSpace ð E] [NormedSpace ð F] {c : F} {n : â} (hn : n â 0) :
constFormalMultilinearSeries ð E c n = 0 :=
Nat.casesOn n (fun hn => (hn rfl).elim) (fun _ _ => rfl) hn
@[simp]
lemma constFormalMultilinearSeries_zero [NontriviallyNormedField ð] [NormedAddCommGroup E ]
[NormedAddCommGroup F] [NormedSpace ð E] [NormedSpace ð F] :
constFormalMultilinearSeries ð E (0 : F) = 0 := by
ext n x
simp only [FormalMultilinearSeries.zero_apply, ContinuousMultilinearMap.zero_apply,
constFormalMultilinearSeries]
induction n
· simp only [Nat.zero_eq, ContinuousMultilinearMap.curry0_apply]
· simp only [constFormalMultilinearSeries.match_1.eq_2, ContinuousMultilinearMap.zero_apply]
end Const
section Linear
variable [NontriviallyNormedField ð]
[NormedAddCommGroup E] [NormedSpace ð E]
[NormedAddCommGroup F] [NormedSpace ð F]
namespace ContinuousLinearMap
/-- Formal power series of a continuous linear map `f : E âL[ð] F` at `x : E`:
`f y = f x + f (y - x)`. -/
def fpowerSeries (f : E âL[ð] F) (x : E) : FormalMultilinearSeries ð E F
| 0 => ContinuousMultilinearMap.curry0 ð _ (f x)
| 1 => (continuousMultilinearCurryFin1 ð E F).symm f
| _ => 0
theorem fpowerSeries_apply_zero (f : E âL[ð] F) (x : E) :
f.fpowerSeries x 0 = ContinuousMultilinearMap.curry0 ð _ (f x) :=
rfl
theorem fpowerSeries_apply_one (f : E âL[ð] F) (x : E) :
f.fpowerSeries x 1 = (continuousMultilinearCurryFin1 ð E F).symm f :=
rfl
theorem fpowerSeries_apply_add_two (f : E âL[ð] F) (x : E) (n : â) : f.fpowerSeries x (n + 2) = 0 :=
rfl
attribute
[eqns fpowerSeries_apply_zero fpowerSeries_apply_one fpowerSeries_apply_add_two] fpowerSeries
attribute [simp] fpowerSeries
end ContinuousLinearMap
end Linear
|
Analysis\Calculus\Implicit.lean | /-
Copyright (c) 2020 Yury Kudryashov. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yury Kudryashov
-/
import Mathlib.Analysis.Calculus.InverseFunctionTheorem.FDeriv
import Mathlib.Analysis.Calculus.FDeriv.Add
import Mathlib.Analysis.Calculus.FDeriv.Prod
import Mathlib.Analysis.Normed.Module.Complemented
/-!
# Implicit function theorem
We prove three versions of the implicit function theorem. First we define a structure
`ImplicitFunctionData` that holds arguments for the most general version of the implicit function
theorem, see `ImplicitFunctionData.implicitFunction` and
`ImplicitFunctionData.implicitFunction_hasStrictFDerivAt`. This version allows a user to choose a
specific implicit function but provides only a little convenience over the inverse function theorem.
Then we define `HasStrictFDerivAt.implicitFunctionDataOfComplemented`: implicit function defined by
`f (g z y) = z`, where `f : E â F` is a function strictly differentiable at `a` such that its
derivative `f'` is surjective and has a `complemented` kernel.
Finally, if the codomain of `f` is a finite dimensional space, then we can automatically prove
that the kernel of `f'` is complemented, hence the only assumptions are `HasStrictFDerivAt`
and `f'.range = â€`. This version is named `HasStrictFDerivAt.implicitFunction`.
## TODO
* Add a version for a function `f : E Ã F â G` such that $$\frac{\partial f}{\partial y}$$ is
invertible.
* Add a version for `f : ð Ã ð â ð` proving `HasStrictDerivAt` and `deriv Ï = ...`.
* Prove that in a real vector space the implicit function has the same smoothness as the original
one.
* If the original function is differentiable in a neighborhood, then the implicit function is
differentiable in a neighborhood as well. Current setup only proves differentiability at one
point for the implicit function constructed in this file (as opposed to an unspecified implicit
function). One of the ways to overcome this difficulty is to use uniqueness of the implicit
function in the general version of the theorem. Another way is to prove that *any* implicit
function satisfying some predicate is strictly differentiable.
## Tags
implicit function, inverse function
-/
noncomputable section
open scoped Topology
open Filter
open ContinuousLinearMap (fst snd smulRight ker_prod)
open ContinuousLinearEquiv (ofBijective)
open LinearMap (ker range)
/-!
### General version
Consider two functions `f : E â F` and `g : E â G` and a point `a` such that
* both functions are strictly differentiable at `a`;
* the derivatives are surjective;
* the kernels of the derivatives are complementary subspaces of `E`.
Note that the map `x ⊠(f x, g x)` has a bijective derivative, hence it is a partial homeomorphism
between `E` and `F Ã G`. We use this fact to define a function `Ï : F â G â E`
(see `ImplicitFunctionData.implicitFunction`) such that for `(y, z)` close enough to `(f a, g a)`
we have `f (Ï y z) = y` and `g (Ï y z) = z`.
We also prove a formula for $$\frac{\partial\varphi}{\partial z}.$$
Though this statement is almost symmetric with respect to `F`, `G`, we interpret it in the following
way. Consider a family of surfaces `{x | f x = y}`, `y â ð (f a)`. Each of these surfaces is
parametrized by `Ï y`.
There are many ways to choose a (differentiable) function `Ï` such that `f (Ï y z) = y` but the
extra condition `g (Ï y z) = z` allows a user to select one of these functions. If we imagine
that the level surfaces `f = const` form a local horizontal foliation, then the choice of
`g` fixes a transverse foliation `g = const`, and `Ï` is the inverse function of the projection
of `{x | f x = y}` along this transverse foliation.
This version of the theorem is used to prove the other versions and can be used if a user
needs to have a complete control over the choice of the implicit function.
-/
/-- Data for the general version of the implicit function theorem. It holds two functions
`f : E â F` and `g : E â G` (named `leftFun` and `rightFun`) and a point `a` (named `pt`) such that
* both functions are strictly differentiable at `a`;
* the derivatives are surjective;
* the kernels of the derivatives are complementary subspaces of `E`. -/
-- Porting note(#5171): linter not yet ported @[nolint has_nonempty_instance]
structure ImplicitFunctionData (ð : Type*) [NontriviallyNormedField ð] (E : Type*)
[NormedAddCommGroup E] [NormedSpace ð E] [CompleteSpace E] (F : Type*) [NormedAddCommGroup F]
[NormedSpace ð F] [CompleteSpace F] (G : Type*) [NormedAddCommGroup G] [NormedSpace ð G]
[CompleteSpace G] where
leftFun : E â F
leftDeriv : E âL[ð] F
rightFun : E â G
rightDeriv : E âL[ð] G
pt : E
left_has_deriv : HasStrictFDerivAt leftFun leftDeriv pt
right_has_deriv : HasStrictFDerivAt rightFun rightDeriv pt
left_range : range leftDeriv = â€
right_range : range rightDeriv = â€
isCompl_ker : IsCompl (ker leftDeriv) (ker rightDeriv)
namespace ImplicitFunctionData
variable {ð : Type*} [NontriviallyNormedField ð] {E : Type*} [NormedAddCommGroup E]
[NormedSpace ð E] [CompleteSpace E] {F : Type*} [NormedAddCommGroup F] [NormedSpace ð F]
[CompleteSpace F] {G : Type*} [NormedAddCommGroup G] [NormedSpace ð G] [CompleteSpace G]
(Ï : ImplicitFunctionData ð E F G)
/-- The function given by `x ⊠(leftFun x, rightFun x)`. -/
def prodFun (x : E) : F Ã G :=
(Ï.leftFun x, Ï.rightFun x)
@[simp]
theorem prodFun_apply (x : E) : Ï.prodFun x = (Ï.leftFun x, Ï.rightFun x) :=
rfl
protected theorem hasStrictFDerivAt :
HasStrictFDerivAt Ï.prodFun
(Ï.leftDeriv.equivProdOfSurjectiveOfIsCompl Ï.rightDeriv Ï.left_range Ï.right_range
Ï.isCompl_ker :
E âL[ð] F Ã G)
Ï.pt :=
Ï.left_has_deriv.prod Ï.right_has_deriv
/-- Implicit function theorem. If `f : E â F` and `g : E â G` are two maps strictly differentiable
at `a`, their derivatives `f'`, `g'` are surjective, and the kernels of these derivatives are
complementary subspaces of `E`, then `x ⊠(f x, g x)` defines a partial homeomorphism between
`E` and `F Ã G`. In particular, `{x | f x = f a}` is locally homeomorphic to `G`. -/
def toPartialHomeomorph : PartialHomeomorph E (F Ã G) :=
Ï.hasStrictFDerivAt.toPartialHomeomorph _
/-- Implicit function theorem. If `f : E â F` and `g : E â G` are two maps strictly differentiable
at `a`, their derivatives `f'`, `g'` are surjective, and the kernels of these derivatives are
complementary subspaces of `E`, then `implicitFunction` is the unique (germ of a) map
`Ï : F â G â E` such that `f (Ï y z) = y` and `g (Ï y z) = z`. -/
def implicitFunction : F â G â E :=
Function.curry <| Ï.toPartialHomeomorph.symm
@[simp]
theorem toPartialHomeomorph_coe : âÏ.toPartialHomeomorph = Ï.prodFun :=
rfl
theorem toPartialHomeomorph_apply (x : E) : Ï.toPartialHomeomorph x = (Ï.leftFun x, Ï.rightFun x) :=
rfl
theorem pt_mem_toPartialHomeomorph_source : Ï.pt â Ï.toPartialHomeomorph.source :=
Ï.hasStrictFDerivAt.mem_toPartialHomeomorph_source
theorem map_pt_mem_toPartialHomeomorph_target :
(Ï.leftFun Ï.pt, Ï.rightFun Ï.pt) â Ï.toPartialHomeomorph.target :=
Ï.toPartialHomeomorph.map_source <| Ï.pt_mem_toPartialHomeomorph_source
theorem prod_map_implicitFunction :
âá¶ p : F Ã G in ð (Ï.prodFun Ï.pt), Ï.prodFun (Ï.implicitFunction p.1 p.2) = p :=
Ï.hasStrictFDerivAt.eventually_right_inverse.mono fun âš_, _â© h => h
theorem left_map_implicitFunction :
âá¶ p : F Ã G in ð (Ï.prodFun Ï.pt), Ï.leftFun (Ï.implicitFunction p.1 p.2) = p.1 :=
Ï.prod_map_implicitFunction.mono fun _ => congr_arg Prod.fst
theorem right_map_implicitFunction :
âá¶ p : F Ã G in ð (Ï.prodFun Ï.pt), Ï.rightFun (Ï.implicitFunction p.1 p.2) = p.2 :=
Ï.prod_map_implicitFunction.mono fun _ => congr_arg Prod.snd
theorem implicitFunction_apply_image :
âá¶ x in ð Ï.pt, Ï.implicitFunction (Ï.leftFun x) (Ï.rightFun x) = x :=
Ï.hasStrictFDerivAt.eventually_left_inverse
theorem map_nhds_eq : map Ï.leftFun (ð Ï.pt) = ð (Ï.leftFun Ï.pt) :=
show map (Prod.fst â Ï.prodFun) (ð Ï.pt) = ð (Ï.prodFun Ï.pt).1 by
rw [â map_map, Ï.hasStrictFDerivAt.map_nhds_eq_of_equiv, map_fst_nhds]
theorem implicitFunction_hasStrictFDerivAt (g'inv : G âL[ð] E)
(hg'inv : Ï.rightDeriv.comp g'inv = ContinuousLinearMap.id ð G)
(hg'invf : Ï.leftDeriv.comp g'inv = 0) :
HasStrictFDerivAt (Ï.implicitFunction (Ï.leftFun Ï.pt)) g'inv (Ï.rightFun Ï.pt) := by
have := Ï.hasStrictFDerivAt.to_localInverse
simp only [prodFun] at this
convert this.comp (Ï.rightFun Ï.pt) ((hasStrictFDerivAt_const _ _).prod (hasStrictFDerivAt_id _))
-- Porting note: added parentheses to help `simp`
simp only [ContinuousLinearMap.ext_iff, (ContinuousLinearMap.comp_apply)] at hg'inv hg'invf â¢
-- porting note (#10745): was `simp [ContinuousLinearEquiv.eq_symm_apply]`;
-- both `simp` and `rw` fail here, `erw` works
intro x
erw [ContinuousLinearEquiv.eq_symm_apply]
simp [*]
end ImplicitFunctionData
namespace HasStrictFDerivAt
section Complemented
/-!
### Case of a complemented kernel
In this section we prove the following version of the implicit function theorem. Consider a map
`f : E â F` and a point `a : E` such that `f` is strictly differentiable at `a`, its derivative `f'`
is surjective and the kernel of `f'` is a complemented subspace of `E` (i.e., it has a closed
complementary subspace). Then there exists a function `Ï : F â ker f' â E` such that for `(y, z)`
close to `(f a, 0)` we have `f (Ï y z) = y` and the derivative of `Ï (f a)` at zero is the
embedding `ker f' â E`.
Note that a map with these properties is not unique. E.g., different choices of a subspace
complementary to `ker f'` lead to different maps `Ï`.
-/
variable {ð : Type*} [NontriviallyNormedField ð] {E : Type*} [NormedAddCommGroup E]
[NormedSpace ð E] [CompleteSpace E] {F : Type*} [NormedAddCommGroup F] [NormedSpace ð F]
[CompleteSpace F] {f : E â F} {f' : E âL[ð] F} {a : E}
section Defs
variable (f f')
/-- Data used to apply the generic implicit function theorem to the case of a strictly
differentiable map such that its derivative is surjective and has a complemented kernel. -/
@[simp]
def implicitFunctionDataOfComplemented (hf : HasStrictFDerivAt f f' a) (hf' : range f' = â€)
(hker : (ker f').ClosedComplemented) : ImplicitFunctionData ð E F (ker f') where
leftFun := f
leftDeriv := f'
rightFun x := Classical.choose hker (x - a)
rightDeriv := Classical.choose hker
pt := a
left_has_deriv := hf
right_has_deriv :=
(Classical.choose hker).hasStrictFDerivAt.comp a ((hasStrictFDerivAt_id a).sub_const a)
left_range := hf'
right_range := LinearMap.range_eq_of_proj (Classical.choose_spec hker)
isCompl_ker := LinearMap.isCompl_of_proj (Classical.choose_spec hker)
/-- A partial homeomorphism between `E` and `F Ã f'.ker` sending level surfaces of `f`
to vertical subspaces. -/
def implicitToPartialHomeomorphOfComplemented (hf : HasStrictFDerivAt f f' a) (hf' : range f' = â€)
(hker : (ker f').ClosedComplemented) : PartialHomeomorph E (F Ã ker f') :=
(implicitFunctionDataOfComplemented f f' hf hf' hker).toPartialHomeomorph
/-- Implicit function `g` defined by `f (g z y) = z`. -/
def implicitFunctionOfComplemented (hf : HasStrictFDerivAt f f' a) (hf' : range f' = â€)
(hker : (ker f').ClosedComplemented) : F â ker f' â E :=
(implicitFunctionDataOfComplemented f f' hf hf' hker).implicitFunction
end Defs
@[simp]
theorem implicitToPartialHomeomorphOfComplemented_fst (hf : HasStrictFDerivAt f f' a)
(hf' : range f' = â€) (hker : (ker f').ClosedComplemented) (x : E) :
(hf.implicitToPartialHomeomorphOfComplemented f f' hf' hker x).fst = f x :=
rfl
theorem implicitToPartialHomeomorphOfComplemented_apply (hf : HasStrictFDerivAt f f' a)
(hf' : range f' = â€) (hker : (ker f').ClosedComplemented) (y : E) :
hf.implicitToPartialHomeomorphOfComplemented f f' hf' hker y =
(f y, Classical.choose hker (y - a)) :=
rfl
@[simp]
theorem implicitToPartialHomeomorphOfComplemented_apply_ker (hf : HasStrictFDerivAt f f' a)
(hf' : range f' = â€) (hker : (ker f').ClosedComplemented) (y : ker f') :
hf.implicitToPartialHomeomorphOfComplemented f f' hf' hker (y + a) = (f (y + a), y) := by
simp only [implicitToPartialHomeomorphOfComplemented_apply, add_sub_cancel_right,
Classical.choose_spec hker]
@[simp]
theorem implicitToPartialHomeomorphOfComplemented_self (hf : HasStrictFDerivAt f f' a)
(hf' : range f' = â€) (hker : (ker f').ClosedComplemented) :
hf.implicitToPartialHomeomorphOfComplemented f f' hf' hker a = (f a, 0) := by
simp [hf.implicitToPartialHomeomorphOfComplemented_apply]
theorem mem_implicitToPartialHomeomorphOfComplemented_source (hf : HasStrictFDerivAt f f' a)
(hf' : range f' = â€) (hker : (ker f').ClosedComplemented) :
a â (hf.implicitToPartialHomeomorphOfComplemented f f' hf' hker).source :=
ImplicitFunctionData.pt_mem_toPartialHomeomorph_source _
theorem mem_implicitToPartialHomeomorphOfComplemented_target (hf : HasStrictFDerivAt f f' a)
(hf' : range f' = â€) (hker : (ker f').ClosedComplemented) :
(f a, (0 : ker f')) â (hf.implicitToPartialHomeomorphOfComplemented f f' hf' hker).target := by
simpa only [implicitToPartialHomeomorphOfComplemented_self] using
(hf.implicitToPartialHomeomorphOfComplemented f f' hf' hker).map_source <|
hf.mem_implicitToPartialHomeomorphOfComplemented_source hf' hker
/-- `HasStrictFDerivAt.implicitFunctionOfComplemented` sends `(z, y)` to a point in `f â»Â¹' z`. -/
theorem map_implicitFunctionOfComplemented_eq (hf : HasStrictFDerivAt f f' a) (hf' : range f' = â€)
(hker : (ker f').ClosedComplemented) :
âá¶ p : F Ã ker f' in ð (f a, 0),
f (hf.implicitFunctionOfComplemented f f' hf' hker p.1 p.2) = p.1 :=
((hf.implicitToPartialHomeomorphOfComplemented f f' hf' hker).eventually_right_inverse <|
hf.mem_implicitToPartialHomeomorphOfComplemented_target hf' hker).mono
fun âš_, _â© h => congr_arg Prod.fst h
/-- Any point in some neighborhood of `a` can be represented as
`HasStrictFDerivAt.implicitFunctionOfComplemented` of some point. -/
theorem eq_implicitFunctionOfComplemented (hf : HasStrictFDerivAt f f' a) (hf' : range f' = â€)
(hker : (ker f').ClosedComplemented) :
âá¶ x in ð a, hf.implicitFunctionOfComplemented f f' hf' hker (f x)
(hf.implicitToPartialHomeomorphOfComplemented f f' hf' hker x).snd = x :=
(implicitFunctionDataOfComplemented f f' hf hf' hker).implicitFunction_apply_image
@[simp]
theorem implicitFunctionOfComplemented_apply_image (hf : HasStrictFDerivAt f f' a)
(hf' : range f' = â€) (hker : (ker f').ClosedComplemented) :
hf.implicitFunctionOfComplemented f f' hf' hker (f a) 0 = a := by
simpa only [implicitToPartialHomeomorphOfComplemented_self] using
(hf.implicitToPartialHomeomorphOfComplemented f f' hf' hker).left_inv
(hf.mem_implicitToPartialHomeomorphOfComplemented_source hf' hker)
theorem to_implicitFunctionOfComplemented (hf : HasStrictFDerivAt f f' a) (hf' : range f' = â€)
(hker : (ker f').ClosedComplemented) :
HasStrictFDerivAt (hf.implicitFunctionOfComplemented f f' hf' hker (f a))
(ker f').subtypeL 0 := by
convert (implicitFunctionDataOfComplemented f f' hf hf' hker).implicitFunction_hasStrictFDerivAt
(ker f').subtypeL _ _
swap
· ext
-- Porting note: added parentheses to help `simp`
simp only [Classical.choose_spec hker, implicitFunctionDataOfComplemented,
ContinuousLinearMap.comp_apply, Submodule.coe_subtypeL', Submodule.coeSubtype,
ContinuousLinearMap.id_apply]
swap
· ext
-- Porting note: added parentheses to help `simp`
simp only [(ContinuousLinearMap.comp_apply), Submodule.coe_subtypeL', Submodule.coeSubtype,
LinearMap.map_coe_ker, (ContinuousLinearMap.zero_apply)]
simp only [implicitFunctionDataOfComplemented, map_sub, sub_self]
end Complemented
/-!
### Finite dimensional case
In this section we prove the following version of the implicit function theorem. Consider a map
`f : E â F` from a Banach normed space to a finite dimensional space.
Take a point `a : E` such that `f` is strictly differentiable at `a` and its derivative `f'`
is surjective. Then there exists a function `Ï : F â ker f' â E` such that for `(y, z)`
close to `(f a, 0)` we have `f (Ï y z) = y` and the derivative of `Ï (f a)` at zero is the
embedding `ker f' â E`.
This version deduces that `ker f'` is a complemented subspace from the fact that `F` is a finite
dimensional space, then applies the previous version.
Note that a map with these properties is not unique. E.g., different choices of a subspace
complementary to `ker f'` lead to different maps `Ï`.
-/
section FiniteDimensional
variable {ð : Type*} [NontriviallyNormedField ð] [CompleteSpace ð] {E : Type*}
[NormedAddCommGroup E] [NormedSpace ð E] [CompleteSpace E] {F : Type*} [NormedAddCommGroup F]
[NormedSpace ð F] [FiniteDimensional ð F] (f : E â F) (f' : E âL[ð] F) {a : E}
/-- Given a map `f : E â F` to a finite dimensional space with a surjective derivative `f'`,
returns a partial homeomorphism between `E` and `F Ã ker f'`. -/
def implicitToPartialHomeomorph (hf : HasStrictFDerivAt f f' a) (hf' : range f' = â€) :
PartialHomeomorph E (F Ã ker f') :=
haveI := FiniteDimensional.complete ð F
hf.implicitToPartialHomeomorphOfComplemented f f' hf'
f'.ker_closedComplemented_of_finiteDimensional_range
/-- Implicit function `g` defined by `f (g z y) = z`. -/
def implicitFunction (hf : HasStrictFDerivAt f f' a) (hf' : range f' = â€) : F â ker f' â E :=
Function.curry <| (hf.implicitToPartialHomeomorph f f' hf').symm
variable {f f'}
@[simp]
theorem implicitToPartialHomeomorph_fst (hf : HasStrictFDerivAt f f' a) (hf' : range f' = â€)
(x : E) : (hf.implicitToPartialHomeomorph f f' hf' x).fst = f x :=
rfl
@[simp]
theorem implicitToPartialHomeomorph_apply_ker (hf : HasStrictFDerivAt f f' a) (hf' : range f' = â€)
(y : ker f') : hf.implicitToPartialHomeomorph f f' hf' (y + a) = (f (y + a), y) :=
-- Porting note: had to add `haveI` (here and below)
haveI := FiniteDimensional.complete ð F
implicitToPartialHomeomorphOfComplemented_apply_ker ..
@[simp]
theorem implicitToPartialHomeomorph_self (hf : HasStrictFDerivAt f f' a) (hf' : range f' = â€) :
hf.implicitToPartialHomeomorph f f' hf' a = (f a, 0) :=
haveI := FiniteDimensional.complete ð F
implicitToPartialHomeomorphOfComplemented_self ..
theorem mem_implicitToPartialHomeomorph_source (hf : HasStrictFDerivAt f f' a)
(hf' : range f' = â€) : a â (hf.implicitToPartialHomeomorph f f' hf').source :=
haveI := FiniteDimensional.complete ð F
ImplicitFunctionData.pt_mem_toPartialHomeomorph_source _
theorem mem_implicitToPartialHomeomorph_target (hf : HasStrictFDerivAt f f' a)
(hf' : range f' = â€) : (f a, (0 : ker f')) â (hf.implicitToPartialHomeomorph f f' hf').target :=
haveI := FiniteDimensional.complete ð F
mem_implicitToPartialHomeomorphOfComplemented_target ..
theorem tendsto_implicitFunction (hf : HasStrictFDerivAt f f' a) (hf' : range f' = â€) {α : Type*}
{l : Filter α} {gâ : α â F} {gâ : α â ker f'} (hâ : Tendsto gâ l (ð <| f a))
(hâ : Tendsto gâ l (ð 0)) :
Tendsto (fun t => hf.implicitFunction f f' hf' (gâ t) (gâ t)) l (ð a) := by
refine ((hf.implicitToPartialHomeomorph f f' hf').tendsto_symm
(hf.mem_implicitToPartialHomeomorph_source hf')).comp ?_
rw [implicitToPartialHomeomorph_self]
exact hâ.prod_mk_nhds hâ
alias _root_.Filter.Tendsto.implicitFunction := tendsto_implicitFunction
/-- `HasStrictFDerivAt.implicitFunction` sends `(z, y)` to a point in `f â»Â¹' z`. -/
theorem map_implicitFunction_eq (hf : HasStrictFDerivAt f f' a) (hf' : range f' = â€) :
âá¶ p : F Ã ker f' in ð (f a, 0), f (hf.implicitFunction f f' hf' p.1 p.2) = p.1 :=
haveI := FiniteDimensional.complete ð F
map_implicitFunctionOfComplemented_eq ..
@[simp]
theorem implicitFunction_apply_image (hf : HasStrictFDerivAt f f' a) (hf' : range f' = â€) :
hf.implicitFunction f f' hf' (f a) 0 = a := by
haveI := FiniteDimensional.complete ð F
apply implicitFunctionOfComplemented_apply_image
/-- Any point in some neighborhood of `a` can be represented as `HasStrictFDerivAt.implicitFunction`
of some point. -/
theorem eq_implicitFunction (hf : HasStrictFDerivAt f f' a) (hf' : range f' = â€) :
âá¶ x in ð a,
hf.implicitFunction f f' hf' (f x) (hf.implicitToPartialHomeomorph f f' hf' x).snd = x :=
haveI := FiniteDimensional.complete ð F
eq_implicitFunctionOfComplemented ..
theorem to_implicitFunction (hf : HasStrictFDerivAt f f' a) (hf' : range f' = â€) :
HasStrictFDerivAt (hf.implicitFunction f f' hf' (f a)) (ker f').subtypeL 0 :=
haveI := FiniteDimensional.complete ð F
to_implicitFunctionOfComplemented ..
end FiniteDimensional
end HasStrictFDerivAt
|
Analysis\Calculus\LagrangeMultipliers.lean | /-
Copyright (c) 2021 Yury Kudryashov. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yury Kudryashov
-/
import Mathlib.Analysis.Calculus.FDeriv.Prod
import Mathlib.Analysis.Calculus.InverseFunctionTheorem.FDeriv
import Mathlib.LinearAlgebra.Dual
/-!
# Lagrange multipliers
In this file we formalize the
[Lagrange multipliers](https://en.wikipedia.org/wiki/Lagrange_multiplier) method of solving
conditional extremum problems: if a function `Ï` has a local extremum at `xâ` on the set
`f â»Â¹' {f xâ}`, `f x = (fâ x, ..., fâââ x)`, then the differentials of `fâ` and `Ï` are linearly
dependent. First we formulate a geometric version of this theorem which does not rely on the
target space being `ââ¿`, then restate it in terms of coordinates.
## TODO
Formalize Karush-Kuhn-Tucker theorem
## Tags
lagrange multiplier, local extremum
-/
open Filter Set
open scoped Topology Filter
variable {E F : Type*} [NormedAddCommGroup E] [NormedSpace â E] [CompleteSpace E]
[NormedAddCommGroup F] [NormedSpace â F] [CompleteSpace F] {f : E â F} {Ï : E â â} {xâ : E}
{f' : E âL[â] F} {Ï' : E âL[â] â}
/-- Lagrange multipliers theorem: if `Ï : E â â` has a local extremum on the set `{x | f x = f xâ}`
at `xâ`, both `f : E â F` and `Ï` are strictly differentiable at `xâ`, and the codomain of `f` is
a complete space, then the linear map `x ⊠(f' x, Ï' x)` is not surjective. -/
theorem IsLocalExtrOn.range_ne_top_of_hasStrictFDerivAt
(hextr : IsLocalExtrOn Ï {x | f x = f xâ} xâ) (hf' : HasStrictFDerivAt f f' xâ)
(hÏ' : HasStrictFDerivAt Ï Ï' xâ) : LinearMap.range (f'.prod Ï') â †:= by
intro htop
set fÏ := fun x => (f x, Ï x)
have A : map Ï (ð[f â»Â¹' {f xâ}] xâ) = ð (Ï xâ) := by
change map (Prod.snd â fÏ) (ð[fÏ â»Â¹' {p | p.1 = f xâ}] xâ) = ð (Ï xâ)
rw [â map_map, nhdsWithin, map_inf_principal_preimage, (hf'.prod hÏ').map_nhds_eq_of_surj htop]
exact map_snd_nhdsWithin _
exact hextr.not_nhds_le_map A.ge
/-- Lagrange multipliers theorem: if `Ï : E â â` has a local extremum on the set `{x | f x = f xâ}`
at `xâ`, both `f : E â F` and `Ï` are strictly differentiable at `xâ`, and the codomain of `f` is
a complete space, then there exist `Î : dual â F` and `Îâ : â` such that `(Î, Îâ) â 0` and
`Î (f' x) + Îâ ⢠Ï' x = 0` for all `x`. -/
theorem IsLocalExtrOn.exists_linear_map_of_hasStrictFDerivAt
(hextr : IsLocalExtrOn Ï {x | f x = f xâ} xâ) (hf' : HasStrictFDerivAt f f' xâ)
(hÏ' : HasStrictFDerivAt Ï Ï' xâ) :
â (Î : Module.Dual â F) (Îâ : â), (Î, Îâ) â 0 â§ â x, Î (f' x) + Îâ ⢠Ï' x = 0 := by
rcases Submodule.exists_le_ker_of_lt_top _
(lt_top_iff_ne_top.2 <| hextr.range_ne_top_of_hasStrictFDerivAt hf' hÏ') with
âšÎ', h0, hÎ'â©
set e : ((F ââ[â] â) Ã â) ââ[â] F Ã â ââ[â] â :=
((LinearEquiv.refl â (F ââ[â] â)).prod (LinearMap.ringLmapEquivSelf â â â).symm).trans
(LinearMap.coprodEquiv â)
rcases e.surjective Î' with âšâšÎ, Îââ©, rflâ©
refine âšÎ, Îâ, e.map_ne_zero_iff.1 h0, fun x => ?_â©
convert LinearMap.congr_fun (LinearMap.range_le_ker_iff.1 hÎ') x using 1
-- squeezed `simp [mul_comm]` to speed up elaboration
simp only [e, smul_eq_mul, LinearEquiv.trans_apply, LinearEquiv.prod_apply,
LinearEquiv.refl_apply, LinearMap.ringLmapEquivSelf_symm_apply, LinearMap.coprodEquiv_apply,
ContinuousLinearMap.coe_prod, LinearMap.coprod_comp_prod, LinearMap.add_apply,
LinearMap.coe_comp, ContinuousLinearMap.coe_coe, Function.comp_apply, LinearMap.coe_smulRight,
LinearMap.one_apply, mul_comm]
/-- Lagrange multipliers theorem: if `Ï : E â â` has a local extremum on the set `{x | f x = f xâ}`
at `xâ`, and both `f : E â â` and `Ï` are strictly differentiable at `xâ`, then there exist
`a b : â` such that `(a, b) â 0` and `a ⢠f' + b ⢠Ï' = 0`. -/
theorem IsLocalExtrOn.exists_multipliers_of_hasStrictFDerivAt_1d {f : E â â} {f' : E âL[â] â}
(hextr : IsLocalExtrOn Ï {x | f x = f xâ} xâ) (hf' : HasStrictFDerivAt f f' xâ)
(hÏ' : HasStrictFDerivAt Ï Ï' xâ) : â a b : â, (a, b) â 0 â§ a ⢠f' + b ⢠Ï' = 0 := by
obtain âšÎ, Îâ, hÎ, hfÎâ© := hextr.exists_linear_map_of_hasStrictFDerivAt hf' hÏ'
refine âšÎ 1, Îâ, ?_, ?_â©
· contrapose! hÎ
simp only [Prod.mk_eq_zero] at hÎ â¢
refine âšLinearMap.ext fun x => ?_, hÎ.2â©
simpa [hÎ.1] using Î.map_smul x 1
· ext x
have Hâ : Î (f' x) = f' x * Î 1 := by
simpa only [mul_one, Algebra.id.smul_eq_mul] using Î.map_smul (f' x) 1
have Hâ : f' x * Î 1 + Îâ * Ï' x = 0 := by simpa only [Algebra.id.smul_eq_mul, Hâ] using hfÎ x
simpa [mul_comm] using Hâ
/-- Lagrange multipliers theorem, 1d version. Let `f : ι â E â â` be a finite family of functions.
Suppose that `Ï : E â â` has a local extremum on the set `{x | â i, f i x = f i xâ}` at `xâ`.
Suppose that all functions `f i` as well as `Ï` are strictly differentiable at `xâ`.
Then the derivatives `f' i : E â L[â] â` and `Ï' : E âL[â] â` are linearly dependent:
there exist `Î : ι â â` and `Îâ : â`, `(Î, Îâ) â 0`, such that `â i, Î i ⢠f' i + Îâ ⢠Ï' = 0`.
See also `IsLocalExtrOn.linear_dependent_of_hasStrictFDerivAt` for a version that
states `¬LinearIndependent â _` instead of existence of `Î` and `Îâ`. -/
theorem IsLocalExtrOn.exists_multipliers_of_hasStrictFDerivAt {ι : Type*} [Fintype ι]
{f : ι â E â â} {f' : ι â E âL[â] â} (hextr : IsLocalExtrOn Ï {x | â i, f i x = f i xâ} xâ)
(hf' : â i, HasStrictFDerivAt (f i) (f' i) xâ) (hÏ' : HasStrictFDerivAt Ï Ï' xâ) :
â (Î : ι â â) (Îâ : â), (Î, Îâ) â 0 â§ (â i, Î i ⢠f' i) + Îâ ⢠Ï' = 0 := by
letI := Classical.decEq ι
replace hextr : IsLocalExtrOn Ï {x | (fun i => f i x) = fun i => f i xâ} xâ := by
simpa only [Function.funext_iff] using hextr
rcases hextr.exists_linear_map_of_hasStrictFDerivAt (hasStrictFDerivAt_pi.2 fun i => hf' i)
hÏ' with
âšÎ, Îâ, h0, hsumâ©
rcases (LinearEquiv.piRing â â ι â).symm.surjective Î with âšÎ, rflâ©
refine âšÎ, Îâ, ?_, ?_â©
· simpa only [Ne, Prod.ext_iff, LinearEquiv.map_eq_zero_iff, Prod.fst_zero] using h0
· ext x; simpa [mul_comm] using hsum x
/-- Lagrange multipliers theorem. Let `f : ι â E â â` be a finite family of functions.
Suppose that `Ï : E â â` has a local extremum on the set `{x | â i, f i x = f i xâ}` at `xâ`.
Suppose that all functions `f i` as well as `Ï` are strictly differentiable at `xâ`.
Then the derivatives `f' i : E â L[â] â` and `Ï' : E âL[â] â` are linearly dependent.
See also `IsLocalExtrOn.exists_multipliers_of_hasStrictFDerivAt` for a version that
that states existence of Lagrange multipliers `Î` and `Îâ` instead of using
`¬LinearIndependent â _` -/
theorem IsLocalExtrOn.linear_dependent_of_hasStrictFDerivAt {ι : Type*} [Finite ι] {f : ι â E â â}
{f' : ι â E âL[â] â} (hextr : IsLocalExtrOn Ï {x | â i, f i x = f i xâ} xâ)
(hf' : â i, HasStrictFDerivAt (f i) (f' i) xâ) (hÏ' : HasStrictFDerivAt Ï Ï' xâ) :
¬LinearIndependent â (Option.elim' Ï' f' : Option ι â E âL[â] â) := by
cases nonempty_fintype ι
rw [Fintype.linearIndependent_iff]; push_neg
rcases hextr.exists_multipliers_of_hasStrictFDerivAt hf' hÏ' with âšÎ, Îâ, hÎ, hÎfâ©
refine âšOption.elim' Îâ Î, ?_, ?_â©
· simpa [add_comm] using hÎf
· simpa only [Function.funext_iff, not_and_or, or_comm, Option.exists, Prod.mk_eq_zero, Ne,
not_forall] using hÎ
|
Analysis\Calculus\LHopital.lean | /-
Copyright (c) 2020 Anatole Dedecker. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Anatole Dedecker
-/
import Mathlib.Analysis.Calculus.MeanValue
import Mathlib.Analysis.Calculus.Deriv.Inv
/-!
# L'HÃŽpital's rule for 0/0 indeterminate forms
In this file, we prove several forms of "L'HÃŽpital's rule" for computing 0/0
indeterminate forms. The proof of `HasDerivAt.lhopital_zero_right_on_Ioo`
is based on the one given in the corresponding
[Wikibooks](https://en.wikibooks.org/wiki/Calculus/L%27H%C3%B4pital%27s_Rule)
chapter, and all other statements are derived from this one by composing by
carefully chosen functions.
Note that the filter `f'/g'` tends to isn't required to be one of `ð a`,
`atTop` or `atBot`. In fact, we give a slightly stronger statement by
allowing it to be any filter on `â`.
Each statement is available in a `HasDerivAt` form and a `deriv` form, which
is denoted by each statement being in either the `HasDerivAt` or the `deriv`
namespace.
## Tags
L'HÃŽpital's rule, L'Hopital's rule
-/
open Filter Set
open scoped Filter Topology Pointwise
variable {a b : â} {l : Filter â} {f f' g g' : â â â}
/-!
## Interval-based versions
We start by proving statements where all conditions (derivability, `g' â 0`) have
to be satisfied on an explicitly-provided interval.
-/
namespace HasDerivAt
theorem lhopital_zero_right_on_Ioo (hab : a < b) (hff' : â x â Ioo a b, HasDerivAt f (f' x) x)
(hgg' : â x â Ioo a b, HasDerivAt g (g' x) x) (hg' : â x â Ioo a b, g' x â 0)
(hfa : Tendsto f (ð[>] a) (ð 0)) (hga : Tendsto g (ð[>] a) (ð 0))
(hdiv : Tendsto (fun x => f' x / g' x) (ð[>] a) l) :
Tendsto (fun x => f x / g x) (ð[>] a) l := by
have sub : â x â Ioo a b, Ioo a x â Ioo a b := fun x hx =>
Ioo_subset_Ioo (le_refl a) (le_of_lt hx.2)
have hg : â x â Ioo a b, g x â 0 := by
intro x hx h
have : Tendsto g (ð[<] x) (ð 0) := by
rw [â h, â nhdsWithin_Ioo_eq_nhdsWithin_Iio hx.1]
exact ((hgg' x hx).continuousAt.continuousWithinAt.mono <| sub x hx).tendsto
obtain âšy, hyx, hyâ© : â c â Ioo a x, g' c = 0 :=
exists_hasDerivAt_eq_zero' hx.1 hga this fun y hy => hgg' y <| sub x hx hy
exact hg' y (sub x hx hyx) hy
have : â x â Ioo a b, â c â Ioo a x, f x * g' c = g x * f' c := by
intro x hx
rw [â sub_zero (f x), â sub_zero (g x)]
exact exists_ratio_hasDerivAt_eq_ratio_slope' g g' hx.1 f f' (fun y hy => hgg' y <| sub x hx hy)
(fun y hy => hff' y <| sub x hx hy) hga hfa
(tendsto_nhdsWithin_of_tendsto_nhds (hgg' x hx).continuousAt.tendsto)
(tendsto_nhdsWithin_of_tendsto_nhds (hff' x hx).continuousAt.tendsto)
choose! c hc using this
have : â x â Ioo a b, ((fun x' => f' x' / g' x') â c) x = f x / g x := by
intro x hx
rcases hc x hx with âšhâ, hââ©
field_simp [hg x hx, hg' (c x) ((sub x hx) hâ)]
simp only [hâ]
rw [mul_comm]
have cmp : â x â Ioo a b, a < c x â§ c x < x := fun x hx => (hc x hx).1
rw [â nhdsWithin_Ioo_eq_nhdsWithin_Ioi hab]
apply tendsto_nhdsWithin_congr this
apply hdiv.comp
refine tendsto_nhdsWithin_of_tendsto_nhds_of_eventually_within _
(tendsto_of_tendsto_of_tendsto_of_le_of_le' tendsto_const_nhds
(tendsto_nhdsWithin_of_tendsto_nhds tendsto_id) ?_ ?_) ?_
all_goals
apply eventually_nhdsWithin_of_forall
intro x hx
have := cmp x hx
try simp
linarith [this]
theorem lhopital_zero_right_on_Ico (hab : a < b) (hff' : â x â Ioo a b, HasDerivAt f (f' x) x)
(hgg' : â x â Ioo a b, HasDerivAt g (g' x) x) (hcf : ContinuousOn f (Ico a b))
(hcg : ContinuousOn g (Ico a b)) (hg' : â x â Ioo a b, g' x â 0) (hfa : f a = 0) (hga : g a = 0)
(hdiv : Tendsto (fun x => f' x / g' x) (ð[>] a) l) :
Tendsto (fun x => f x / g x) (ð[>] a) l := by
refine lhopital_zero_right_on_Ioo hab hff' hgg' hg' ?_ ?_ hdiv
· rw [â hfa, â nhdsWithin_Ioo_eq_nhdsWithin_Ioi hab]
exact ((hcf a <| left_mem_Ico.mpr hab).mono Ioo_subset_Ico_self).tendsto
· rw [â hga, â nhdsWithin_Ioo_eq_nhdsWithin_Ioi hab]
exact ((hcg a <| left_mem_Ico.mpr hab).mono Ioo_subset_Ico_self).tendsto
theorem lhopital_zero_left_on_Ioo (hab : a < b) (hff' : â x â Ioo a b, HasDerivAt f (f' x) x)
(hgg' : â x â Ioo a b, HasDerivAt g (g' x) x) (hg' : â x â Ioo a b, g' x â 0)
(hfb : Tendsto f (ð[<] b) (ð 0)) (hgb : Tendsto g (ð[<] b) (ð 0))
(hdiv : Tendsto (fun x => f' x / g' x) (ð[<] b) l) :
Tendsto (fun x => f x / g x) (ð[<] b) l := by
-- Here, we essentially compose by `Neg.neg`. The following is mostly technical details.
have hdnf : â x â -Ioo a b, HasDerivAt (f â Neg.neg) (f' (-x) * -1) x := fun x hx =>
comp x (hff' (-x) hx) (hasDerivAt_neg x)
have hdng : â x â -Ioo a b, HasDerivAt (g â Neg.neg) (g' (-x) * -1) x := fun x hx =>
comp x (hgg' (-x) hx) (hasDerivAt_neg x)
rw [preimage_neg_Ioo] at hdnf
rw [preimage_neg_Ioo] at hdng
have := lhopital_zero_right_on_Ioo (neg_lt_neg hab) hdnf hdng (by
intro x hx h
apply hg' _ (by rw [â preimage_neg_Ioo] at hx; exact hx)
rwa [mul_comm, â neg_eq_neg_one_mul, neg_eq_zero] at h)
(hfb.comp tendsto_neg_nhdsWithin_Ioi_neg) (hgb.comp tendsto_neg_nhdsWithin_Ioi_neg)
(by
simp only [neg_div_neg_eq, mul_one, mul_neg]
exact (tendsto_congr fun x => rfl).mp (hdiv.comp tendsto_neg_nhdsWithin_Ioi_neg))
have := this.comp tendsto_neg_nhdsWithin_Iio
unfold Function.comp at this
simpa only [neg_neg]
theorem lhopital_zero_left_on_Ioc (hab : a < b) (hff' : â x â Ioo a b, HasDerivAt f (f' x) x)
(hgg' : â x â Ioo a b, HasDerivAt g (g' x) x) (hcf : ContinuousOn f (Ioc a b))
(hcg : ContinuousOn g (Ioc a b)) (hg' : â x â Ioo a b, g' x â 0) (hfb : f b = 0) (hgb : g b = 0)
(hdiv : Tendsto (fun x => f' x / g' x) (ð[<] b) l) :
Tendsto (fun x => f x / g x) (ð[<] b) l := by
refine lhopital_zero_left_on_Ioo hab hff' hgg' hg' ?_ ?_ hdiv
· rw [â hfb, â nhdsWithin_Ioo_eq_nhdsWithin_Iio hab]
exact ((hcf b <| right_mem_Ioc.mpr hab).mono Ioo_subset_Ioc_self).tendsto
· rw [â hgb, â nhdsWithin_Ioo_eq_nhdsWithin_Iio hab]
exact ((hcg b <| right_mem_Ioc.mpr hab).mono Ioo_subset_Ioc_self).tendsto
theorem lhopital_zero_atTop_on_Ioi (hff' : â x â Ioi a, HasDerivAt f (f' x) x)
(hgg' : â x â Ioi a, HasDerivAt g (g' x) x) (hg' : â x â Ioi a, g' x â 0)
(hftop : Tendsto f atTop (ð 0)) (hgtop : Tendsto g atTop (ð 0))
(hdiv : Tendsto (fun x => f' x / g' x) atTop l) : Tendsto (fun x => f x / g x) atTop l := by
obtain âša', haa', ha'â© : â a', a < a' â§ 0 < a' := âš1 + max a 0,
âšlt_of_le_of_lt (le_max_left a 0) (lt_one_add _),
lt_of_le_of_lt (le_max_right a 0) (lt_one_add _)â©â©
have fact1 : â x : â, x â Ioo 0 a'â»Â¹ â x â 0 := fun _ hx => (ne_of_lt hx.1).symm
have fact2 : â x â Ioo 0 a'â»Â¹, a < xâ»Â¹ := fun _ hx => lt_trans haa' ((lt_inv ha' hx.1).mpr hx.2)
have hdnf : â x â Ioo 0 a'â»Â¹, HasDerivAt (f â Inv.inv) (f' xâ»Â¹ * -(x ^ 2)â»Â¹) x := fun x hx =>
comp x (hff' xâ»Â¹ <| fact2 x hx) (hasDerivAt_inv <| fact1 x hx)
have hdng : â x â Ioo 0 a'â»Â¹, HasDerivAt (g â Inv.inv) (g' xâ»Â¹ * -(x ^ 2)â»Â¹) x := fun x hx =>
comp x (hgg' xâ»Â¹ <| fact2 x hx) (hasDerivAt_inv <| fact1 x hx)
have := lhopital_zero_right_on_Ioo (inv_pos.mpr ha') hdnf hdng
(by
intro x hx
refine mul_ne_zero ?_ (neg_ne_zero.mpr <| inv_ne_zero <| pow_ne_zero _ <| fact1 x hx)
exact hg' _ (fact2 x hx))
(hftop.comp tendsto_inv_zero_atTop) (hgtop.comp tendsto_inv_zero_atTop)
(by
refine (tendsto_congr' ?_).mp (hdiv.comp tendsto_inv_zero_atTop)
rw [eventuallyEq_iff_exists_mem]
use Ioi 0, self_mem_nhdsWithin
intro x hx
unfold Function.comp
simp only
erw [mul_div_mul_right]
exact neg_ne_zero.mpr (inv_ne_zero <| pow_ne_zero _ <| ne_of_gt hx))
have := this.comp tendsto_inv_atTop_zero'
unfold Function.comp at this
simpa only [inv_inv]
theorem lhopital_zero_atBot_on_Iio (hff' : â x â Iio a, HasDerivAt f (f' x) x)
(hgg' : â x â Iio a, HasDerivAt g (g' x) x) (hg' : â x â Iio a, g' x â 0)
(hfbot : Tendsto f atBot (ð 0)) (hgbot : Tendsto g atBot (ð 0))
(hdiv : Tendsto (fun x => f' x / g' x) atBot l) : Tendsto (fun x => f x / g x) atBot l := by
-- Here, we essentially compose by `Neg.neg`. The following is mostly technical details.
have hdnf : â x â -Iio a, HasDerivAt (f â Neg.neg) (f' (-x) * -1) x := fun x hx =>
comp x (hff' (-x) hx) (hasDerivAt_neg x)
have hdng : â x â -Iio a, HasDerivAt (g â Neg.neg) (g' (-x) * -1) x := fun x hx =>
comp x (hgg' (-x) hx) (hasDerivAt_neg x)
rw [preimage_neg_Iio] at hdnf
rw [preimage_neg_Iio] at hdng
have := lhopital_zero_atTop_on_Ioi hdnf hdng
(by
intro x hx h
apply hg' _ (by rw [â preimage_neg_Iio] at hx; exact hx)
rwa [mul_comm, â neg_eq_neg_one_mul, neg_eq_zero] at h)
(hfbot.comp tendsto_neg_atTop_atBot) (hgbot.comp tendsto_neg_atTop_atBot)
(by
simp only [mul_one, mul_neg, neg_div_neg_eq]
exact (tendsto_congr fun x => rfl).mp (hdiv.comp tendsto_neg_atTop_atBot))
have := this.comp tendsto_neg_atBot_atTop
unfold Function.comp at this
simpa only [neg_neg]
end HasDerivAt
namespace deriv
theorem lhopital_zero_right_on_Ioo (hab : a < b) (hdf : DifferentiableOn â f (Ioo a b))
(hg' : â x â Ioo a b, deriv g x â 0) (hfa : Tendsto f (ð[>] a) (ð 0))
(hga : Tendsto g (ð[>] a) (ð 0))
(hdiv : Tendsto (fun x => (deriv f) x / (deriv g) x) (ð[>] a) l) :
Tendsto (fun x => f x / g x) (ð[>] a) l := by
have hdf : â x â Ioo a b, DifferentiableAt â f x := fun x hx =>
(hdf x hx).differentiableAt (Ioo_mem_nhds hx.1 hx.2)
have hdg : â x â Ioo a b, DifferentiableAt â g x := fun x hx =>
by_contradiction fun h => hg' x hx (deriv_zero_of_not_differentiableAt h)
exact HasDerivAt.lhopital_zero_right_on_Ioo hab (fun x hx => (hdf x hx).hasDerivAt)
(fun x hx => (hdg x hx).hasDerivAt) hg' hfa hga hdiv
theorem lhopital_zero_right_on_Ico (hab : a < b) (hdf : DifferentiableOn â f (Ioo a b))
(hcf : ContinuousOn f (Ico a b)) (hcg : ContinuousOn g (Ico a b))
(hg' : â x â Ioo a b, (deriv g) x â 0) (hfa : f a = 0) (hga : g a = 0)
(hdiv : Tendsto (fun x => (deriv f) x / (deriv g) x) (ð[>] a) l) :
Tendsto (fun x => f x / g x) (ð[>] a) l := by
refine lhopital_zero_right_on_Ioo hab hdf hg' ?_ ?_ hdiv
· rw [â hfa, â nhdsWithin_Ioo_eq_nhdsWithin_Ioi hab]
exact ((hcf a <| left_mem_Ico.mpr hab).mono Ioo_subset_Ico_self).tendsto
· rw [â hga, â nhdsWithin_Ioo_eq_nhdsWithin_Ioi hab]
exact ((hcg a <| left_mem_Ico.mpr hab).mono Ioo_subset_Ico_self).tendsto
theorem lhopital_zero_left_on_Ioo (hab : a < b) (hdf : DifferentiableOn â f (Ioo a b))
(hg' : â x â Ioo a b, (deriv g) x â 0) (hfb : Tendsto f (ð[<] b) (ð 0))
(hgb : Tendsto g (ð[<] b) (ð 0))
(hdiv : Tendsto (fun x => (deriv f) x / (deriv g) x) (ð[<] b) l) :
Tendsto (fun x => f x / g x) (ð[<] b) l := by
have hdf : â x â Ioo a b, DifferentiableAt â f x := fun x hx =>
(hdf x hx).differentiableAt (Ioo_mem_nhds hx.1 hx.2)
have hdg : â x â Ioo a b, DifferentiableAt â g x := fun x hx =>
by_contradiction fun h => hg' x hx (deriv_zero_of_not_differentiableAt h)
exact HasDerivAt.lhopital_zero_left_on_Ioo hab (fun x hx => (hdf x hx).hasDerivAt)
(fun x hx => (hdg x hx).hasDerivAt) hg' hfb hgb hdiv
theorem lhopital_zero_atTop_on_Ioi (hdf : DifferentiableOn â f (Ioi a))
(hg' : â x â Ioi a, (deriv g) x â 0) (hftop : Tendsto f atTop (ð 0))
(hgtop : Tendsto g atTop (ð 0)) (hdiv : Tendsto (fun x => (deriv f) x / (deriv g) x) atTop l) :
Tendsto (fun x => f x / g x) atTop l := by
have hdf : â x â Ioi a, DifferentiableAt â f x := fun x hx =>
(hdf x hx).differentiableAt (Ioi_mem_nhds hx)
have hdg : â x â Ioi a, DifferentiableAt â g x := fun x hx =>
by_contradiction fun h => hg' x hx (deriv_zero_of_not_differentiableAt h)
exact HasDerivAt.lhopital_zero_atTop_on_Ioi (fun x hx => (hdf x hx).hasDerivAt)
(fun x hx => (hdg x hx).hasDerivAt) hg' hftop hgtop hdiv
theorem lhopital_zero_atBot_on_Iio (hdf : DifferentiableOn â f (Iio a))
(hg' : â x â Iio a, (deriv g) x â 0) (hfbot : Tendsto f atBot (ð 0))
(hgbot : Tendsto g atBot (ð 0)) (hdiv : Tendsto (fun x => (deriv f) x / (deriv g) x) atBot l) :
Tendsto (fun x => f x / g x) atBot l := by
have hdf : â x â Iio a, DifferentiableAt â f x := fun x hx =>
(hdf x hx).differentiableAt (Iio_mem_nhds hx)
have hdg : â x â Iio a, DifferentiableAt â g x := fun x hx =>
by_contradiction fun h => hg' x hx (deriv_zero_of_not_differentiableAt h)
exact HasDerivAt.lhopital_zero_atBot_on_Iio (fun x hx => (hdf x hx).hasDerivAt)
(fun x hx => (hdg x hx).hasDerivAt) hg' hfbot hgbot hdiv
end deriv
/-!
## Generic versions
The following statements no longer any explicit interval, as they only require
conditions holding eventually.
-/
namespace HasDerivAt
/-- L'HÃŽpital's rule for approaching a real from the right, `HasDerivAt` version -/
theorem lhopital_zero_nhds_right (hff' : âá¶ x in ð[>] a, HasDerivAt f (f' x) x)
(hgg' : âá¶ x in ð[>] a, HasDerivAt g (g' x) x) (hg' : âá¶ x in ð[>] a, g' x â 0)
(hfa : Tendsto f (ð[>] a) (ð 0)) (hga : Tendsto g (ð[>] a) (ð 0))
(hdiv : Tendsto (fun x => f' x / g' x) (ð[>] a) l) :
Tendsto (fun x => f x / g x) (ð[>] a) l := by
rw [eventually_iff_exists_mem] at *
rcases hff' with âšsâ, hsâ, hff'â©
rcases hgg' with âšsâ, hsâ, hgg'â©
rcases hg' with âšsâ, hsâ, hg'â©
let s := sâ â© sâ â© sâ
have hs : s â ð[>] a := inter_mem (inter_mem hsâ hsâ) hsâ
rw [mem_nhdsWithin_Ioi_iff_exists_Ioo_subset] at hs
rcases hs with âšu, hau, huâ©
refine lhopital_zero_right_on_Ioo hau ?_ ?_ ?_ hfa hga hdiv <;>
intro x hx <;> apply_assumption <;>
first | exact (hu hx).1.1 | exact (hu hx).1.2 | exact (hu hx).2
/-- L'HÃŽpital's rule for approaching a real from the left, `HasDerivAt` version -/
theorem lhopital_zero_nhds_left (hff' : âá¶ x in ð[<] a, HasDerivAt f (f' x) x)
(hgg' : âá¶ x in ð[<] a, HasDerivAt g (g' x) x) (hg' : âá¶ x in ð[<] a, g' x â 0)
(hfa : Tendsto f (ð[<] a) (ð 0)) (hga : Tendsto g (ð[<] a) (ð 0))
(hdiv : Tendsto (fun x => f' x / g' x) (ð[<] a) l) :
Tendsto (fun x => f x / g x) (ð[<] a) l := by
rw [eventually_iff_exists_mem] at *
rcases hff' with âšsâ, hsâ, hff'â©
rcases hgg' with âšsâ, hsâ, hgg'â©
rcases hg' with âšsâ, hsâ, hg'â©
let s := sâ â© sâ â© sâ
have hs : s â ð[<] a := inter_mem (inter_mem hsâ hsâ) hsâ
rw [mem_nhdsWithin_Iio_iff_exists_Ioo_subset] at hs
rcases hs with âšl, hal, hlâ©
refine lhopital_zero_left_on_Ioo hal ?_ ?_ ?_ hfa hga hdiv <;> intro x hx <;> apply_assumption <;>
first | exact (hl hx).1.1| exact (hl hx).1.2| exact (hl hx).2
/-- L'HÃŽpital's rule for approaching a real, `HasDerivAt` version. This
does not require anything about the situation at `a` -/
theorem lhopital_zero_nhds' (hff' : âá¶ x in ð[â ] a, HasDerivAt f (f' x) x)
(hgg' : âá¶ x in ð[â ] a, HasDerivAt g (g' x) x) (hg' : âá¶ x in ð[â ] a, g' x â 0)
(hfa : Tendsto f (ð[â ] a) (ð 0)) (hga : Tendsto g (ð[â ] a) (ð 0))
(hdiv : Tendsto (fun x => f' x / g' x) (ð[â ] a) l) :
Tendsto (fun x => f x / g x) (ð[â ] a) l := by
simp only [â Iio_union_Ioi, nhdsWithin_union, tendsto_sup, eventually_sup] at *
exact âšlhopital_zero_nhds_left hff'.1 hgg'.1 hg'.1 hfa.1 hga.1 hdiv.1,
lhopital_zero_nhds_right hff'.2 hgg'.2 hg'.2 hfa.2 hga.2 hdiv.2â©
/-- **L'HÃŽpital's rule** for approaching a real, `HasDerivAt` version -/
theorem lhopital_zero_nhds (hff' : âá¶ x in ð a, HasDerivAt f (f' x) x)
(hgg' : âá¶ x in ð a, HasDerivAt g (g' x) x) (hg' : âá¶ x in ð a, g' x â 0)
(hfa : Tendsto f (ð a) (ð 0)) (hga : Tendsto g (ð a) (ð 0))
(hdiv : Tendsto (fun x => f' x / g' x) (ð a) l) : Tendsto (fun x => f x / g x) (ð[â ] a) l := by
apply @lhopital_zero_nhds' _ _ _ f' _ g' <;>
(first | apply eventually_nhdsWithin_of_eventually_nhds |
apply tendsto_nhdsWithin_of_tendsto_nhds) <;> assumption
/-- L'HÃŽpital's rule for approaching +â, `HasDerivAt` version -/
theorem lhopital_zero_atTop (hff' : âá¶ x in atTop, HasDerivAt f (f' x) x)
(hgg' : âá¶ x in atTop, HasDerivAt g (g' x) x) (hg' : âá¶ x in atTop, g' x â 0)
(hftop : Tendsto f atTop (ð 0)) (hgtop : Tendsto g atTop (ð 0))
(hdiv : Tendsto (fun x => f' x / g' x) atTop l) : Tendsto (fun x => f x / g x) atTop l := by
rw [eventually_iff_exists_mem] at *
rcases hff' with âšsâ, hsâ, hff'â©
rcases hgg' with âšsâ, hsâ, hgg'â©
rcases hg' with âšsâ, hsâ, hg'â©
let s := sâ â© sâ â© sâ
have hs : s â atTop := inter_mem (inter_mem hsâ hsâ) hsâ
rw [mem_atTop_sets] at hs
rcases hs with âšl, hlâ©
have hl' : Ioi l â s := fun x hx => hl x (le_of_lt hx)
refine lhopital_zero_atTop_on_Ioi ?_ ?_ (fun x hx => hg' x <| (hl' hx).2) hftop hgtop hdiv <;>
intro x hx <;> apply_assumption <;> first | exact (hl' hx).1.1| exact (hl' hx).1.2
/-- L'HÃŽpital's rule for approaching -â, `HasDerivAt` version -/
theorem lhopital_zero_atBot (hff' : âá¶ x in atBot, HasDerivAt f (f' x) x)
(hgg' : âá¶ x in atBot, HasDerivAt g (g' x) x) (hg' : âá¶ x in atBot, g' x â 0)
(hfbot : Tendsto f atBot (ð 0)) (hgbot : Tendsto g atBot (ð 0))
(hdiv : Tendsto (fun x => f' x / g' x) atBot l) : Tendsto (fun x => f x / g x) atBot l := by
rw [eventually_iff_exists_mem] at *
rcases hff' with âšsâ, hsâ, hff'â©
rcases hgg' with âšsâ, hsâ, hgg'â©
rcases hg' with âšsâ, hsâ, hg'â©
let s := sâ â© sâ â© sâ
have hs : s â atBot := inter_mem (inter_mem hsâ hsâ) hsâ
rw [mem_atBot_sets] at hs
rcases hs with âšl, hlâ©
have hl' : Iio l â s := fun x hx => hl x (le_of_lt hx)
refine lhopital_zero_atBot_on_Iio ?_ ?_ (fun x hx => hg' x <| (hl' hx).2) hfbot hgbot hdiv <;>
intro x hx <;> apply_assumption <;> first | exact (hl' hx).1.1| exact (hl' hx).1.2
end HasDerivAt
namespace deriv
/-- **L'HÃŽpital's rule** for approaching a real from the right, `deriv` version -/
theorem lhopital_zero_nhds_right (hdf : âá¶ x in ð[>] a, DifferentiableAt â f x)
(hg' : âá¶ x in ð[>] a, deriv g x â 0) (hfa : Tendsto f (ð[>] a) (ð 0))
(hga : Tendsto g (ð[>] a) (ð 0))
(hdiv : Tendsto (fun x => (deriv f) x / (deriv g) x) (ð[>] a) l) :
Tendsto (fun x => f x / g x) (ð[>] a) l := by
have hdg : âá¶ x in ð[>] a, DifferentiableAt â g x :=
hg'.mp (eventually_of_forall fun _ hg' =>
by_contradiction fun h => hg' (deriv_zero_of_not_differentiableAt h))
have hdf' : âá¶ x in ð[>] a, HasDerivAt f (deriv f x) x :=
hdf.mp (eventually_of_forall fun _ => DifferentiableAt.hasDerivAt)
have hdg' : âá¶ x in ð[>] a, HasDerivAt g (deriv g x) x :=
hdg.mp (eventually_of_forall fun _ => DifferentiableAt.hasDerivAt)
exact HasDerivAt.lhopital_zero_nhds_right hdf' hdg' hg' hfa hga hdiv
/-- **L'HÃŽpital's rule** for approaching a real from the left, `deriv` version -/
theorem lhopital_zero_nhds_left (hdf : âá¶ x in ð[<] a, DifferentiableAt â f x)
(hg' : âá¶ x in ð[<] a, deriv g x â 0) (hfa : Tendsto f (ð[<] a) (ð 0))
(hga : Tendsto g (ð[<] a) (ð 0))
(hdiv : Tendsto (fun x => (deriv f) x / (deriv g) x) (ð[<] a) l) :
Tendsto (fun x => f x / g x) (ð[<] a) l := by
have hdg : âá¶ x in ð[<] a, DifferentiableAt â g x :=
hg'.mp (eventually_of_forall fun _ hg' =>
by_contradiction fun h => hg' (deriv_zero_of_not_differentiableAt h))
have hdf' : âá¶ x in ð[<] a, HasDerivAt f (deriv f x) x :=
hdf.mp (eventually_of_forall fun _ => DifferentiableAt.hasDerivAt)
have hdg' : âá¶ x in ð[<] a, HasDerivAt g (deriv g x) x :=
hdg.mp (eventually_of_forall fun _ => DifferentiableAt.hasDerivAt)
exact HasDerivAt.lhopital_zero_nhds_left hdf' hdg' hg' hfa hga hdiv
/-- **L'HÃŽpital's rule** for approaching a real, `deriv` version. This
does not require anything about the situation at `a` -/
theorem lhopital_zero_nhds' (hdf : âá¶ x in ð[â ] a, DifferentiableAt â f x)
(hg' : âá¶ x in ð[â ] a, deriv g x â 0) (hfa : Tendsto f (ð[â ] a) (ð 0))
(hga : Tendsto g (ð[â ] a) (ð 0))
(hdiv : Tendsto (fun x => (deriv f) x / (deriv g) x) (ð[â ] a) l) :
Tendsto (fun x => f x / g x) (ð[â ] a) l := by
simp only [â Iio_union_Ioi, nhdsWithin_union, tendsto_sup, eventually_sup] at *
exact âšlhopital_zero_nhds_left hdf.1 hg'.1 hfa.1 hga.1 hdiv.1,
lhopital_zero_nhds_right hdf.2 hg'.2 hfa.2 hga.2 hdiv.2â©
/-- **L'HÃŽpital's rule** for approaching a real, `deriv` version -/
theorem lhopital_zero_nhds (hdf : âá¶ x in ð a, DifferentiableAt â f x)
(hg' : âá¶ x in ð a, deriv g x â 0) (hfa : Tendsto f (ð a) (ð 0)) (hga : Tendsto g (ð a) (ð 0))
(hdiv : Tendsto (fun x => (deriv f) x / (deriv g) x) (ð a) l) :
Tendsto (fun x => f x / g x) (ð[â ] a) l := by
apply lhopital_zero_nhds' <;>
(first | apply eventually_nhdsWithin_of_eventually_nhds |
apply tendsto_nhdsWithin_of_tendsto_nhds) <;> assumption
/-- **L'HÃŽpital's rule** for approaching +â, `deriv` version -/
theorem lhopital_zero_atTop (hdf : âá¶ x : â in atTop, DifferentiableAt â f x)
(hg' : âá¶ x : â in atTop, deriv g x â 0) (hftop : Tendsto f atTop (ð 0))
(hgtop : Tendsto g atTop (ð 0)) (hdiv : Tendsto (fun x => (deriv f) x / (deriv g) x) atTop l) :
Tendsto (fun x => f x / g x) atTop l := by
have hdg : âá¶ x in atTop, DifferentiableAt â g x := hg'.mp
(eventually_of_forall fun _ hg' =>
by_contradiction fun h => hg' (deriv_zero_of_not_differentiableAt h))
have hdf' : âá¶ x in atTop, HasDerivAt f (deriv f x) x :=
hdf.mp (eventually_of_forall fun _ => DifferentiableAt.hasDerivAt)
have hdg' : âá¶ x in atTop, HasDerivAt g (deriv g x) x :=
hdg.mp (eventually_of_forall fun _ => DifferentiableAt.hasDerivAt)
exact HasDerivAt.lhopital_zero_atTop hdf' hdg' hg' hftop hgtop hdiv
/-- **L'HÃŽpital's rule** for approaching -â, `deriv` version -/
theorem lhopital_zero_atBot (hdf : âá¶ x : â in atBot, DifferentiableAt â f x)
(hg' : âá¶ x : â in atBot, deriv g x â 0) (hfbot : Tendsto f atBot (ð 0))
(hgbot : Tendsto g atBot (ð 0)) (hdiv : Tendsto (fun x => (deriv f) x / (deriv g) x) atBot l) :
Tendsto (fun x => f x / g x) atBot l := by
have hdg : âá¶ x in atBot, DifferentiableAt â g x :=
hg'.mp (eventually_of_forall fun _ hg' =>
by_contradiction fun h => hg' (deriv_zero_of_not_differentiableAt h))
have hdf' : âá¶ x in atBot, HasDerivAt f (deriv f x) x :=
hdf.mp (eventually_of_forall fun _ => DifferentiableAt.hasDerivAt)
have hdg' : âá¶ x in atBot, HasDerivAt g (deriv g x) x :=
hdg.mp (eventually_of_forall fun _ => DifferentiableAt.hasDerivAt)
exact HasDerivAt.lhopital_zero_atBot hdf' hdg' hg' hfbot hgbot hdiv
end deriv
|
Analysis\Calculus\LogDeriv.lean | /-
Copyright (c) 2024 Chris Birkbeck. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Chris Birkbeck
-/
import Mathlib.Analysis.Calculus.Deriv.ZPow
/-!
# Logarithmic Derivatives
We define the logarithmic derivative of a function f as `deriv f / f`. We then prove some basic
facts about this, including how it changes under multiplication and composition.
-/
noncomputable section
open Filter Function
open scoped Topology BigOperators Classical
variable {ð ð': Type*} [NontriviallyNormedField ð] [NontriviallyNormedField ð']
[NormedAlgebra ð ð']
/-- The logarithmic derivative of a function defined as `deriv f /f`. Note that it will be zero
at `x` if `f` is not DifferentiableAt `x`. -/
def logDeriv (f : ð â ð') :=
deriv f / f
theorem logDeriv_apply (f : ð â ð') (x : ð) : logDeriv f x = deriv f x / f x := rfl
lemma logDeriv_eq_zero_of_not_differentiableAt (f : ð â ð') (x : ð) (h : ¬DifferentiableAt ð f x) :
logDeriv f x = 0 := by
simp only [logDeriv_apply, deriv_zero_of_not_differentiableAt h, zero_div]
@[simp]
theorem logDeriv_id (x : ð) : logDeriv id x = 1 / x := by
simp [logDeriv_apply]
@[simp] theorem logDeriv_id' (x : ð) : logDeriv (·) x = 1 / x := logDeriv_id x
@[simp]
theorem logDeriv_const (a : ð') : logDeriv (fun _ : ð ⊠a) = 0 := by
ext
simp [logDeriv_apply]
theorem logDeriv_mul {f g : ð â ð'} (x : ð) (hf : f x â 0) (hg : g x â 0)
(hdf : DifferentiableAt ð f x) (hdg : DifferentiableAt ð g x) :
logDeriv (fun z => f z * g z) x = logDeriv f x + logDeriv g x := by
simp only [logDeriv_apply, deriv_mul hdf hdg]
field_simp [mul_comm]
theorem logDeriv_mul_const {f : ð â ð'} (x : ð) (a : ð') (ha : a â 0):
logDeriv (fun z => f z * a) x = logDeriv f x := by
simp only [logDeriv_apply, deriv_mul_const_field, mul_div_mul_right _ _ ha]
theorem logDeriv_const_mul {f : ð â ð'} (x : ð) (a : ð') (ha : a â 0):
logDeriv (fun z => a * f z) x = logDeriv f x := by
simp only [logDeriv_apply, deriv_const_mul_field, mul_div_mul_left _ _ ha]
/-- The logarithmic derivative of a finite product is the sum of the logarithmic derivatives. -/
theorem logDeriv_prod {ι : Type*} (s : Finset ι) (f : ι â ð â ð') (x : ð) (hf : â i â s, f i x â 0)
(hd : â i â s, DifferentiableAt ð (f i) x) :
logDeriv (â i â s, f i ·) x = â i â s, logDeriv (f i) x := by
induction s using Finset.cons_induction with
| empty => simp
| cons a s ha ih =>
rw [Finset.forall_mem_cons] at hf hd
simp_rw [Finset.prod_cons, Finset.sum_cons]
rw [logDeriv_mul, ih hf.2 hd.2]
· exact hf.1
· simpa [Finset.prod_eq_zero_iff] using hf.2
· exact hd.1
· exact .finset_prod hd.2
lemma logDeriv_fun_zpow {f : ð â ð'} {x : ð} (hdf : DifferentiableAt ð f x) (n : â€) :
logDeriv (f · ^ n) x = n * logDeriv f x := by
rcases eq_or_ne n 0 with rfl | hn; · simp
rcases eq_or_ne (f x) 0 with hf | hf
· simp [logDeriv_apply, zero_zpow, *]
· rw [logDeriv_apply, â comp_def (·^n), deriv.comp _ (differentiableAt_zpow.2 <| .inl hf) hdf,
deriv_zpow, logDeriv_apply]
field_simp [zpow_ne_zero, zpow_sub_oneâ hf]
ring
lemma logDeriv_fun_pow {f : ð â ð'} {x : ð} (hdf : DifferentiableAt ð f x) (n : â) :
logDeriv (f · ^ n) x = n * logDeriv f x :=
mod_cast logDeriv_fun_zpow hdf n
@[simp]
lemma logDeriv_zpow (x : ð) (n : â€) : logDeriv (· ^ n) x = n / x := by
rw [logDeriv_fun_zpow (by fun_prop), logDeriv_id', mul_one_div]
@[simp]
lemma logDeriv_pow (x : ð) (n : â) : logDeriv (· ^ n) x = n / x :=
mod_cast logDeriv_zpow x n
@[simp] lemma logDeriv_inv (x : ð) : logDeriv (·â»Â¹) x = -1 / x := by
simpa using logDeriv_zpow x (-1)
theorem logDeriv_comp {f : ð' â ð'} {g : ð â ð'} {x : ð} (hf : DifferentiableAt ð' f (g x))
(hg : DifferentiableAt ð g x) : logDeriv (f â g) x = logDeriv f (g x) * deriv g x := by
simp only [logDeriv, Pi.div_apply, deriv.comp _ hf hg, comp_apply]
ring
|
Analysis\Calculus\MeanValue.lean | /-
Copyright (c) 2019 Sébastien Gouëzel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Sébastien Gouëzel, Yury Kudryashov
-/
import Mathlib.Analysis.Calculus.Deriv.AffineMap
import Mathlib.Analysis.Calculus.Deriv.Slope
import Mathlib.Analysis.Calculus.Deriv.Mul
import Mathlib.Analysis.Calculus.Deriv.Comp
import Mathlib.Analysis.Calculus.LocalExtr.Rolle
import Mathlib.Analysis.Convex.Normed
import Mathlib.Analysis.RCLike.Basic
/-!
# The mean value inequality and equalities
In this file we prove the following facts:
* `Convex.norm_image_sub_le_of_norm_deriv_le` : if `f` is differentiable on a convex set `s`
and the norm of its derivative is bounded by `C`, then `f` is Lipschitz continuous on `s` with
constant `C`; also a variant in which what is bounded by `C` is the norm of the difference of the
derivative from a fixed linear map. This lemma and its versions are formulated using `RCLike`,
so they work both for real and complex derivatives.
* `image_le_of*`, `image_norm_le_of_*` : several similar lemmas deducing `f x †B x` or
`âf xâ †B x` from upper estimates on `f'` or `âf'â`, respectively. These lemmas differ by
their assumptions:
* `of_liminf_*` lemmas assume that limit inferior of some ratio is less than `B' x`;
* `of_deriv_right_*`, `of_norm_deriv_right_*` lemmas assume that the right derivative
or its norm is less than `B' x`;
* `of_*_lt_*` lemmas assume a strict inequality whenever `f x = B x` or `âf xâ = B x`;
* `of_*_le_*` lemmas assume a non-strict inequality everywhere on `[a, b)`;
* name of a lemma ends with `'` if (1) it assumes that `B` is continuous on `[a, b]`
and has a right derivative at every point of `[a, b)`, and (2) the lemma has
a counterpart assuming that `B` is differentiable everywhere on `â`
* `norm_image_sub_le_*_segment` : if derivative of `f` on `[a, b]` is bounded above
by a constant `C`, then `âf x - f aâ †C * âx - aâ`; several versions deal with
right derivative and derivative within `[a, b]` (`HasDerivWithinAt` or `derivWithin`).
* `Convex.is_const_of_fderivWithin_eq_zero` : if a function has derivative `0` on a convex set `s`,
then it is a constant on `s`.
* `exists_ratio_hasDerivAt_eq_ratio_slope` and `exists_ratio_deriv_eq_ratio_slope` :
Cauchy's Mean Value Theorem.
* `exists_hasDerivAt_eq_slope` and `exists_deriv_eq_slope` : Lagrange's Mean Value Theorem.
* `domain_mvt` : Lagrange's Mean Value Theorem, applied to a segment in a convex domain.
* `Convex.image_sub_lt_mul_sub_of_deriv_lt`, `Convex.mul_sub_lt_image_sub_of_lt_deriv`,
`Convex.image_sub_le_mul_sub_of_deriv_le`, `Convex.mul_sub_le_image_sub_of_le_deriv`,
if `â x, C (</â€/>/â¥) (f' x)`, then `C * (y - x) (</â€/>/â¥) (f y - f x)` whenever `x < y`.
* `monotoneOn_of_deriv_nonneg`, `antitoneOn_of_deriv_nonpos`,
`strictMono_of_deriv_pos`, `strictAnti_of_deriv_neg` :
if the derivative of a function is non-negative/non-positive/positive/negative, then
the function is monotone/antitone/strictly monotone/strictly monotonically
decreasing.
* `convexOn_of_deriv`, `convexOn_of_deriv2_nonneg` : if the derivative of a function
is increasing or its second derivative is nonnegative, then the original function is convex.
* `hasStrictFDerivAt_of_hasFDerivAt_of_continuousAt` : a C^1 function over the reals is
strictly differentiable. (This is a corollary of the mean value inequality.)
-/
variable {E : Type*} [NormedAddCommGroup E] [NormedSpace â E] {F : Type*} [NormedAddCommGroup F]
[NormedSpace â F]
open Metric Set Asymptotics ContinuousLinearMap Filter
open scoped Topology NNReal
/-! ### One-dimensional fencing inequalities -/
/-- General fencing theorem for continuous functions with an estimate on the derivative.
Let `f` and `B` be continuous functions on `[a, b]` such that
* `f a †B a`;
* `B` has right derivative `B'` at every point of `[a, b)`;
* for each `x â [a, b)` the right-side limit inferior of `(f z - f x) / (z - x)`
is bounded above by a function `f'`;
* we have `f' x < B' x` whenever `f x = B x`.
Then `f x †B x` everywhere on `[a, b]`. -/
theorem image_le_of_liminf_slope_right_lt_deriv_boundary' {f f' : â â â} {a b : â}
(hf : ContinuousOn f (Icc a b))
-- `hf'` actually says `liminf (f z - f x) / (z - x) †f' x`
(hf' : â x â Ico a b, â r, f' x < r â âá¶ z in ð[>] x, slope f x z < r)
{B B' : â â â} (ha : f a †B a) (hB : ContinuousOn B (Icc a b))
(hB' : â x â Ico a b, HasDerivWithinAt B (B' x) (Ici x) x)
(bound : â x â Ico a b, f x = B x â f' x < B' x) : â âŠxâŠ, x â Icc a b â f x †B x := by
change Icc a b â { x | f x †B x }
set s := { x | f x †B x } ⩠Icc a b
have A : ContinuousOn (fun x => (f x, B x)) (Icc a b) := hf.prod hB
have : IsClosed s := by
simp only [s, inter_comm]
exact A.preimage_isClosed_of_isClosed isClosed_Icc OrderClosedTopology.isClosed_le'
apply this.Icc_subset_of_forall_exists_gt ha
rintro x âšhxB : f x †B x, xabâ© y hy
cases' hxB.lt_or_eq with hxB hxB
· -- If `f x < B x`, then all we need is continuity of both sides
refine nonempty_of_mem (inter_mem ?_ (Ioc_mem_nhdsWithin_Ioi âšle_rfl, hyâ©))
have : âá¶ x in ð[Icc a b] x, f x < B x :=
A x (Ico_subset_Icc_self xab) (IsOpen.mem_nhds (isOpen_lt continuous_fst continuous_snd) hxB)
have : âá¶ x in ð[>] x, f x < B x := nhdsWithin_le_of_mem (Icc_mem_nhdsWithin_Ioi xab) this
exact this.mono fun y => le_of_lt
· rcases exists_between (bound x xab hxB) with âšr, hfr, hrBâ©
specialize hf' x xab r hfr
have HB : âá¶ z in ð[>] x, r < slope B x z :=
(hasDerivWithinAt_iff_tendsto_slope' <| lt_irrefl x).1 (hB' x xab).Ioi_of_Ici
(Ioi_mem_nhds hrB)
obtain âšz, hfz, hzB, hzâ© : â z, slope f x z < r â§ r < slope B x z â§ z â Ioc x y :=
(hf'.and_eventually (HB.and (Ioc_mem_nhdsWithin_Ioi âšle_rfl, hyâ©))).exists
refine âšz, ?_, hzâ©
have := (hfz.trans hzB).le
rwa [slope_def_field, slope_def_field, div_le_div_right (sub_pos.2 hz.1), hxB,
sub_le_sub_iff_right] at this
/-- General fencing theorem for continuous functions with an estimate on the derivative.
Let `f` and `B` be continuous functions on `[a, b]` such that
* `f a †B a`;
* `B` has derivative `B'` everywhere on `â`;
* for each `x â [a, b)` the right-side limit inferior of `(f z - f x) / (z - x)`
is bounded above by a function `f'`;
* we have `f' x < B' x` whenever `f x = B x`.
Then `f x †B x` everywhere on `[a, b]`. -/
theorem image_le_of_liminf_slope_right_lt_deriv_boundary {f f' : â â â} {a b : â}
(hf : ContinuousOn f (Icc a b))
-- `hf'` actually says `liminf (f z - f x) / (z - x) †f' x`
(hf' : â x â Ico a b, â r, f' x < r â âá¶ z in ð[>] x, slope f x z < r)
{B B' : â â â} (ha : f a †B a) (hB : â x, HasDerivAt B (B' x) x)
(bound : â x â Ico a b, f x = B x â f' x < B' x) : â âŠxâŠ, x â Icc a b â f x †B x :=
image_le_of_liminf_slope_right_lt_deriv_boundary' hf hf' ha
(fun x _ => (hB x).continuousAt.continuousWithinAt) (fun x _ => (hB x).hasDerivWithinAt) bound
/-- General fencing theorem for continuous functions with an estimate on the derivative.
Let `f` and `B` be continuous functions on `[a, b]` such that
* `f a †B a`;
* `B` has right derivative `B'` at every point of `[a, b)`;
* for each `x â [a, b)` the right-side limit inferior of `(f z - f x) / (z - x)`
is bounded above by `B'`.
Then `f x †B x` everywhere on `[a, b]`. -/
theorem image_le_of_liminf_slope_right_le_deriv_boundary {f : â â â} {a b : â}
(hf : ContinuousOn f (Icc a b)) {B B' : â â â} (ha : f a †B a) (hB : ContinuousOn B (Icc a b))
(hB' : â x â Ico a b, HasDerivWithinAt B (B' x) (Ici x) x)
-- `bound` actually says `liminf (f z - f x) / (z - x) †B' x`
(bound : â x â Ico a b, â r, B' x < r â âá¶ z in ð[>] x, slope f x z < r) :
â âŠxâŠ, x â Icc a b â f x †B x := by
have Hr : â x â Icc a b, â r > 0, f x †B x + r * (x - a) := fun x hx r hr => by
apply image_le_of_liminf_slope_right_lt_deriv_boundary' hf bound
· rwa [sub_self, mul_zero, add_zero]
· exact hB.add (continuousOn_const.mul (continuousOn_id.sub continuousOn_const))
· intro x hx
exact (hB' x hx).add (((hasDerivWithinAt_id x (Ici x)).sub_const a).const_mul r)
· intro x _ _
rw [mul_one]
exact (lt_add_iff_pos_right _).2 hr
exact hx
intro x hx
have : ContinuousWithinAt (fun r => B x + r * (x - a)) (Ioi 0) 0 :=
continuousWithinAt_const.add (continuousWithinAt_id.mul continuousWithinAt_const)
convert continuousWithinAt_const.closure_le _ this (Hr x hx) using 1 <;> simp
/-- General fencing theorem for continuous functions with an estimate on the derivative.
Let `f` and `B` be continuous functions on `[a, b]` such that
* `f a †B a`;
* `B` has right derivative `B'` at every point of `[a, b)`;
* `f` has right derivative `f'` at every point of `[a, b)`;
* we have `f' x < B' x` whenever `f x = B x`.
Then `f x †B x` everywhere on `[a, b]`. -/
theorem image_le_of_deriv_right_lt_deriv_boundary' {f f' : â â â} {a b : â}
(hf : ContinuousOn f (Icc a b)) (hf' : â x â Ico a b, HasDerivWithinAt f (f' x) (Ici x) x)
{B B' : â â â} (ha : f a †B a) (hB : ContinuousOn B (Icc a b))
(hB' : â x â Ico a b, HasDerivWithinAt B (B' x) (Ici x) x)
(bound : â x â Ico a b, f x = B x â f' x < B' x) : â âŠxâŠ, x â Icc a b â f x †B x :=
image_le_of_liminf_slope_right_lt_deriv_boundary' hf
(fun x hx _ hr => (hf' x hx).liminf_right_slope_le hr) ha hB hB' bound
/-- General fencing theorem for continuous functions with an estimate on the derivative.
Let `f` and `B` be continuous functions on `[a, b]` such that
* `f a †B a`;
* `B` has derivative `B'` everywhere on `â`;
* `f` has right derivative `f'` at every point of `[a, b)`;
* we have `f' x < B' x` whenever `f x = B x`.
Then `f x †B x` everywhere on `[a, b]`. -/
theorem image_le_of_deriv_right_lt_deriv_boundary {f f' : â â â} {a b : â}
(hf : ContinuousOn f (Icc a b)) (hf' : â x â Ico a b, HasDerivWithinAt f (f' x) (Ici x) x)
{B B' : â â â} (ha : f a †B a) (hB : â x, HasDerivAt B (B' x) x)
(bound : â x â Ico a b, f x = B x â f' x < B' x) : â âŠxâŠ, x â Icc a b â f x †B x :=
image_le_of_deriv_right_lt_deriv_boundary' hf hf' ha
(fun x _ => (hB x).continuousAt.continuousWithinAt) (fun x _ => (hB x).hasDerivWithinAt) bound
/-- General fencing theorem for continuous functions with an estimate on the derivative.
Let `f` and `B` be continuous functions on `[a, b]` such that
* `f a †B a`;
* `B` has derivative `B'` everywhere on `â`;
* `f` has right derivative `f'` at every point of `[a, b)`;
* we have `f' x †B' x` on `[a, b)`.
Then `f x †B x` everywhere on `[a, b]`. -/
theorem image_le_of_deriv_right_le_deriv_boundary {f f' : â â â} {a b : â}
(hf : ContinuousOn f (Icc a b)) (hf' : â x â Ico a b, HasDerivWithinAt f (f' x) (Ici x) x)
{B B' : â â â} (ha : f a †B a) (hB : ContinuousOn B (Icc a b))
(hB' : â x â Ico a b, HasDerivWithinAt B (B' x) (Ici x) x)
(bound : â x â Ico a b, f' x †B' x) : â âŠxâŠ, x â Icc a b â f x †B x :=
image_le_of_liminf_slope_right_le_deriv_boundary hf ha hB hB' fun x hx _ hr =>
(hf' x hx).liminf_right_slope_le (lt_of_le_of_lt (bound x hx) hr)
/-! ### Vector-valued functions `f : â â E` -/
section
variable {f : â â E} {a b : â}
/-- General fencing theorem for continuous functions with an estimate on the derivative.
Let `f` and `B` be continuous functions on `[a, b]` such that
* `âf aâ †B a`;
* `B` has right derivative at every point of `[a, b)`;
* for each `x â [a, b)` the right-side limit inferior of `(âf zâ - âf xâ) / (z - x)`
is bounded above by a function `f'`;
* we have `f' x < B' x` whenever `âf xâ = B x`.
Then `âf xâ †B x` everywhere on `[a, b]`. -/
theorem image_norm_le_of_liminf_right_slope_norm_lt_deriv_boundary {E : Type*}
[NormedAddCommGroup E] {f : â â E} {f' : â â â} (hf : ContinuousOn f (Icc a b))
-- `hf'` actually says `liminf (âf zâ - âf xâ) / (z - x) †f' x`
(hf' : â x â Ico a b, â r, f' x < r â âá¶ z in ð[>] x, slope (norm â f) x z < r)
{B B' : â â â} (ha : âf aâ †B a) (hB : ContinuousOn B (Icc a b))
(hB' : â x â Ico a b, HasDerivWithinAt B (B' x) (Ici x) x)
(bound : â x â Ico a b, âf xâ = B x â f' x < B' x) : â âŠxâŠ, x â Icc a b â âf xâ †B x :=
image_le_of_liminf_slope_right_lt_deriv_boundary' (continuous_norm.comp_continuousOn hf) hf' ha hB
hB' bound
/-- General fencing theorem for continuous functions with an estimate on the norm of the derivative.
Let `f` and `B` be continuous functions on `[a, b]` such that
* `âf aâ †B a`;
* `f` and `B` have right derivatives `f'` and `B'` respectively at every point of `[a, b)`;
* the norm of `f'` is strictly less than `B'` whenever `âf xâ = B x`.
Then `âf xâ †B x` everywhere on `[a, b]`. We use one-sided derivatives in the assumptions
to make this theorem work for piecewise differentiable functions.
-/
theorem image_norm_le_of_norm_deriv_right_lt_deriv_boundary' {f' : â â E}
(hf : ContinuousOn f (Icc a b)) (hf' : â x â Ico a b, HasDerivWithinAt f (f' x) (Ici x) x)
{B B' : â â â} (ha : âf aâ †B a) (hB : ContinuousOn B (Icc a b))
(hB' : â x â Ico a b, HasDerivWithinAt B (B' x) (Ici x) x)
(bound : â x â Ico a b, âf xâ = B x â âf' xâ < B' x) : â âŠxâŠ, x â Icc a b â âf xâ †B x :=
image_norm_le_of_liminf_right_slope_norm_lt_deriv_boundary hf
(fun x hx _ hr => (hf' x hx).liminf_right_slope_norm_le hr) ha hB hB' bound
/-- General fencing theorem for continuous functions with an estimate on the norm of the derivative.
Let `f` and `B` be continuous functions on `[a, b]` such that
* `âf aâ †B a`;
* `f` has right derivative `f'` at every point of `[a, b)`;
* `B` has derivative `B'` everywhere on `â`;
* the norm of `f'` is strictly less than `B'` whenever `âf xâ = B x`.
Then `âf xâ †B x` everywhere on `[a, b]`. We use one-sided derivatives in the assumptions
to make this theorem work for piecewise differentiable functions.
-/
theorem image_norm_le_of_norm_deriv_right_lt_deriv_boundary {f' : â â E}
(hf : ContinuousOn f (Icc a b)) (hf' : â x â Ico a b, HasDerivWithinAt f (f' x) (Ici x) x)
{B B' : â â â} (ha : âf aâ †B a) (hB : â x, HasDerivAt B (B' x) x)
(bound : â x â Ico a b, âf xâ = B x â âf' xâ < B' x) : â âŠxâŠ, x â Icc a b â âf xâ †B x :=
image_norm_le_of_norm_deriv_right_lt_deriv_boundary' hf hf' ha
(fun x _ => (hB x).continuousAt.continuousWithinAt) (fun x _ => (hB x).hasDerivWithinAt) bound
/-- General fencing theorem for continuous functions with an estimate on the norm of the derivative.
Let `f` and `B` be continuous functions on `[a, b]` such that
* `âf aâ †B a`;
* `f` and `B` have right derivatives `f'` and `B'` respectively at every point of `[a, b)`;
* we have `âf' xâ †B x` everywhere on `[a, b)`.
Then `âf xâ †B x` everywhere on `[a, b]`. We use one-sided derivatives in the assumptions
to make this theorem work for piecewise differentiable functions.
-/
theorem image_norm_le_of_norm_deriv_right_le_deriv_boundary' {f' : â â E}
(hf : ContinuousOn f (Icc a b)) (hf' : â x â Ico a b, HasDerivWithinAt f (f' x) (Ici x) x)
{B B' : â â â} (ha : âf aâ †B a) (hB : ContinuousOn B (Icc a b))
(hB' : â x â Ico a b, HasDerivWithinAt B (B' x) (Ici x) x)
(bound : â x â Ico a b, âf' xâ †B' x) : â âŠxâŠ, x â Icc a b â âf xâ †B x :=
image_le_of_liminf_slope_right_le_deriv_boundary (continuous_norm.comp_continuousOn hf) ha hB hB'
fun x hx _ hr => (hf' x hx).liminf_right_slope_norm_le ((bound x hx).trans_lt hr)
/-- General fencing theorem for continuous functions with an estimate on the norm of the derivative.
Let `f` and `B` be continuous functions on `[a, b]` such that
* `âf aâ †B a`;
* `f` has right derivative `f'` at every point of `[a, b)`;
* `B` has derivative `B'` everywhere on `â`;
* we have `âf' xâ †B x` everywhere on `[a, b)`.
Then `âf xâ †B x` everywhere on `[a, b]`. We use one-sided derivatives in the assumptions
to make this theorem work for piecewise differentiable functions.
-/
theorem image_norm_le_of_norm_deriv_right_le_deriv_boundary {f' : â â E}
(hf : ContinuousOn f (Icc a b)) (hf' : â x â Ico a b, HasDerivWithinAt f (f' x) (Ici x) x)
{B B' : â â â} (ha : âf aâ †B a) (hB : â x, HasDerivAt B (B' x) x)
(bound : â x â Ico a b, âf' xâ †B' x) : â âŠxâŠ, x â Icc a b â âf xâ †B x :=
image_norm_le_of_norm_deriv_right_le_deriv_boundary' hf hf' ha
(fun x _ => (hB x).continuousAt.continuousWithinAt) (fun x _ => (hB x).hasDerivWithinAt) bound
/-- A function on `[a, b]` with the norm of the right derivative bounded by `C`
satisfies `âf x - f aâ †C * (x - a)`. -/
theorem norm_image_sub_le_of_norm_deriv_right_le_segment {f' : â â E} {C : â}
(hf : ContinuousOn f (Icc a b)) (hf' : â x â Ico a b, HasDerivWithinAt f (f' x) (Ici x) x)
(bound : â x â Ico a b, âf' xâ †C) : â x â Icc a b, âf x - f aâ †C * (x - a) := by
let g x := f x - f a
have hg : ContinuousOn g (Icc a b) := hf.sub continuousOn_const
have hg' : â x â Ico a b, HasDerivWithinAt g (f' x) (Ici x) x := by
intro x hx
simpa using (hf' x hx).sub (hasDerivWithinAt_const _ _ _)
let B x := C * (x - a)
have hB : â x, HasDerivAt B C x := by
intro x
simpa using (hasDerivAt_const x C).mul ((hasDerivAt_id x).sub (hasDerivAt_const x a))
convert image_norm_le_of_norm_deriv_right_le_deriv_boundary hg hg' _ hB bound
simp only [g, B]; rw [sub_self, norm_zero, sub_self, mul_zero]
/-- A function on `[a, b]` with the norm of the derivative within `[a, b]`
bounded by `C` satisfies `âf x - f aâ †C * (x - a)`, `HasDerivWithinAt`
version. -/
theorem norm_image_sub_le_of_norm_deriv_le_segment' {f' : â â E} {C : â}
(hf : â x â Icc a b, HasDerivWithinAt f (f' x) (Icc a b) x)
(bound : â x â Ico a b, âf' xâ †C) : â x â Icc a b, âf x - f aâ †C * (x - a) := by
refine
norm_image_sub_le_of_norm_deriv_right_le_segment (fun x hx => (hf x hx).continuousWithinAt)
(fun x hx => ?_) bound
exact (hf x <| Ico_subset_Icc_self hx).mono_of_mem (Icc_mem_nhdsWithin_Ici hx)
/-- A function on `[a, b]` with the norm of the derivative within `[a, b]`
bounded by `C` satisfies `âf x - f aâ †C * (x - a)`, `derivWithin`
version. -/
theorem norm_image_sub_le_of_norm_deriv_le_segment {C : â} (hf : DifferentiableOn â f (Icc a b))
(bound : â x â Ico a b, âderivWithin f (Icc a b) xâ †C) :
â x â Icc a b, âf x - f aâ †C * (x - a) := by
refine norm_image_sub_le_of_norm_deriv_le_segment' ?_ bound
exact fun x hx => (hf x hx).hasDerivWithinAt
/-- A function on `[0, 1]` with the norm of the derivative within `[0, 1]`
bounded by `C` satisfies `âf 1 - f 0â †C`, `HasDerivWithinAt`
version. -/
theorem norm_image_sub_le_of_norm_deriv_le_segment_01' {f' : â â E} {C : â}
(hf : â x â Icc (0 : â) 1, HasDerivWithinAt f (f' x) (Icc (0 : â) 1) x)
(bound : â x â Ico (0 : â) 1, âf' xâ †C) : âf 1 - f 0â †C := by
simpa only [sub_zero, mul_one] using
norm_image_sub_le_of_norm_deriv_le_segment' hf bound 1 (right_mem_Icc.2 zero_le_one)
/-- A function on `[0, 1]` with the norm of the derivative within `[0, 1]`
bounded by `C` satisfies `âf 1 - f 0â †C`, `derivWithin` version. -/
theorem norm_image_sub_le_of_norm_deriv_le_segment_01 {C : â}
(hf : DifferentiableOn â f (Icc (0 : â) 1))
(bound : â x â Ico (0 : â) 1, âderivWithin f (Icc (0 : â) 1) xâ †C) : âf 1 - f 0â †C := by
simpa only [sub_zero, mul_one] using
norm_image_sub_le_of_norm_deriv_le_segment hf bound 1 (right_mem_Icc.2 zero_le_one)
theorem constant_of_has_deriv_right_zero (hcont : ContinuousOn f (Icc a b))
(hderiv : â x â Ico a b, HasDerivWithinAt f 0 (Ici x) x) : â x â Icc a b, f x = f a := by
have : â x â Icc a b, âf x - f aâ †0 * (x - a) := fun x hx =>
norm_image_sub_le_of_norm_deriv_right_le_segment hcont hderiv (fun _ _ => norm_zero.le) x hx
simpa only [zero_mul, norm_le_zero_iff, sub_eq_zero] using this
theorem constant_of_derivWithin_zero (hdiff : DifferentiableOn â f (Icc a b))
(hderiv : â x â Ico a b, derivWithin f (Icc a b) x = 0) : â x â Icc a b, f x = f a := by
have H : â x â Ico a b, âderivWithin f (Icc a b) xâ †0 := by
simpa only [norm_le_zero_iff] using fun x hx => hderiv x hx
simpa only [zero_mul, norm_le_zero_iff, sub_eq_zero] using fun x hx =>
norm_image_sub_le_of_norm_deriv_le_segment hdiff H x hx
variable {f' g : â â E}
/-- If two continuous functions on `[a, b]` have the same right derivative and are equal at `a`,
then they are equal everywhere on `[a, b]`. -/
theorem eq_of_has_deriv_right_eq (derivf : â x â Ico a b, HasDerivWithinAt f (f' x) (Ici x) x)
(derivg : â x â Ico a b, HasDerivWithinAt g (f' x) (Ici x) x) (fcont : ContinuousOn f (Icc a b))
(gcont : ContinuousOn g (Icc a b)) (hi : f a = g a) : â y â Icc a b, f y = g y := by
simp only [â @sub_eq_zero _ _ (f _)] at hi â¢
exact hi âž constant_of_has_deriv_right_zero (fcont.sub gcont) fun y hy => by
simpa only [sub_self] using (derivf y hy).sub (derivg y hy)
/-- If two differentiable functions on `[a, b]` have the same derivative within `[a, b]` everywhere
on `[a, b)` and are equal at `a`, then they are equal everywhere on `[a, b]`. -/
theorem eq_of_derivWithin_eq (fdiff : DifferentiableOn â f (Icc a b))
(gdiff : DifferentiableOn â g (Icc a b))
(hderiv : EqOn (derivWithin f (Icc a b)) (derivWithin g (Icc a b)) (Ico a b)) (hi : f a = g a) :
â y â Icc a b, f y = g y := by
have A : â y â Ico a b, HasDerivWithinAt f (derivWithin f (Icc a b) y) (Ici y) y := fun y hy =>
(fdiff y (mem_Icc_of_Ico hy)).hasDerivWithinAt.mono_of_mem (Icc_mem_nhdsWithin_Ici hy)
have B : â y â Ico a b, HasDerivWithinAt g (derivWithin g (Icc a b) y) (Ici y) y := fun y hy =>
(gdiff y (mem_Icc_of_Ico hy)).hasDerivWithinAt.mono_of_mem (Icc_mem_nhdsWithin_Ici hy)
exact
eq_of_has_deriv_right_eq A (fun y hy => (hderiv hy).symm âž B y hy) fdiff.continuousOn
gdiff.continuousOn hi
end
/-!
### Vector-valued functions `f : E â G`
Theorems in this section work both for real and complex differentiable functions. We use assumptions
`[RCLike ð] [NormedSpace ð E] [NormedSpace ð G]` to achieve this result. For the domain `E` we
also assume `[NormedSpace â E]` to have a notion of a `Convex` set. -/
section
variable {ð G : Type*} [RCLike ð] [NormedSpace ð E] [NormedAddCommGroup G] [NormedSpace ð G]
namespace Convex
variable {f g : E â G} {C : â} {s : Set E} {x y : E} {f' g' : E â E âL[ð] G} {Ï : E âL[ð] G}
/-- The mean value theorem on a convex set: if the derivative of a function is bounded by `C`, then
the function is `C`-Lipschitz. Version with `HasFDerivWithinAt`. -/
theorem norm_image_sub_le_of_norm_hasFDerivWithin_le
(hf : â x â s, HasFDerivWithinAt f (f' x) s x) (bound : â x â s, âf' xâ †C) (hs : Convex â s)
(xs : x â s) (ys : y â s) : âf y - f xâ †C * ây - xâ := by
letI : NormedSpace â G := RestrictScalars.normedSpace â ð G
/- By composition with `AffineMap.lineMap x y`, we reduce to a statement for functions defined
on `[0,1]`, for which it is proved in `norm_image_sub_le_of_norm_deriv_le_segment`.
We just have to check the differentiability of the composition and bounds on its derivative,
which is straightforward but tedious for lack of automation. -/
set g := (AffineMap.lineMap x y : â â E)
have segm : MapsTo g (Icc 0 1 : Set â) s := hs.mapsTo_lineMap xs ys
have hD : â t â Icc (0 : â) 1,
HasDerivWithinAt (f â g) (f' (g t) (y - x)) (Icc 0 1) t := fun t ht => by
simpa using ((hf (g t) (segm ht)).restrictScalars â).comp_hasDerivWithinAt _
AffineMap.hasDerivWithinAt_lineMap segm
have bound : â t â Ico (0 : â) 1, âf' (g t) (y - x)â †C * ây - xâ := fun t ht =>
le_of_opNorm_le _ (bound _ <| segm <| Ico_subset_Icc_self ht) _
simpa [g] using norm_image_sub_le_of_norm_deriv_le_segment_01' hD bound
/-- The mean value theorem on a convex set: if the derivative of a function is bounded by `C` on
`s`, then the function is `C`-Lipschitz on `s`. Version with `HasFDerivWithinAt` and
`LipschitzOnWith`. -/
theorem lipschitzOnWith_of_nnnorm_hasFDerivWithin_le {C : ââ¥0}
(hf : â x â s, HasFDerivWithinAt f (f' x) s x) (bound : â x â s, âf' xââ †C)
(hs : Convex â s) : LipschitzOnWith C f s := by
rw [lipschitzOnWith_iff_norm_sub_le]
intro x x_in y y_in
exact hs.norm_image_sub_le_of_norm_hasFDerivWithin_le hf bound y_in x_in
/-- Let `s` be a convex set in a real normed vector space `E`, let `f : E â G` be a function
differentiable within `s` in a neighborhood of `x : E` with derivative `f'`. Suppose that `f'` is
continuous within `s` at `x`. Then for any number `K : ââ¥0` larger than `âf' xââ`, `f` is
`K`-Lipschitz on some neighborhood of `x` within `s`. See also
`Convex.exists_nhdsWithin_lipschitzOnWith_of_hasFDerivWithinAt` for a version that claims
existence of `K` instead of an explicit estimate. -/
theorem exists_nhdsWithin_lipschitzOnWith_of_hasFDerivWithinAt_of_nnnorm_lt (hs : Convex â s)
{f : E â G} (hder : âá¶ y in ð[s] x, HasFDerivWithinAt f (f' y) s y)
(hcont : ContinuousWithinAt f' s x) (K : ââ¥0) (hK : âf' xââ < K) :
â t â ð[s] x, LipschitzOnWith K f t := by
obtain âšÎµ, ε0, hε⩠: â ε > 0,
ball x ε â© s â { y | HasFDerivWithinAt f (f' y) s y â§ âf' yââ < K } :=
mem_nhdsWithin_iff.1 (hder.and <| hcont.nnnorm.eventually (gt_mem_nhds hK))
rw [inter_comm] at hε
refine âšs â© ball x ε, inter_mem_nhdsWithin _ (ball_mem_nhds _ ε0), ?_â©
exact
(hs.inter (convex_ball _ _)).lipschitzOnWith_of_nnnorm_hasFDerivWithin_le
(fun y hy => (hε hy).1.mono inter_subset_left) fun y hy => (hε hy).2.le
/-- Let `s` be a convex set in a real normed vector space `E`, let `f : E â G` be a function
differentiable within `s` in a neighborhood of `x : E` with derivative `f'`. Suppose that `f'` is
continuous within `s` at `x`. Then for any number `K : ââ¥0` larger than `âf' xââ`, `f` is Lipschitz
on some neighborhood of `x` within `s`. See also
`Convex.exists_nhdsWithin_lipschitzOnWith_of_hasFDerivWithinAt_of_nnnorm_lt` for a version
with an explicit estimate on the Lipschitz constant. -/
theorem exists_nhdsWithin_lipschitzOnWith_of_hasFDerivWithinAt (hs : Convex â s) {f : E â G}
(hder : âá¶ y in ð[s] x, HasFDerivWithinAt f (f' y) s y) (hcont : ContinuousWithinAt f' s x) :
â K, â t â ð[s] x, LipschitzOnWith K f t :=
(exists_gt _).imp <|
hs.exists_nhdsWithin_lipschitzOnWith_of_hasFDerivWithinAt_of_nnnorm_lt hder hcont
/-- The mean value theorem on a convex set: if the derivative of a function within this set is
bounded by `C`, then the function is `C`-Lipschitz. Version with `fderivWithin`. -/
theorem norm_image_sub_le_of_norm_fderivWithin_le (hf : DifferentiableOn ð f s)
(bound : â x â s, âfderivWithin ð f s xâ †C) (hs : Convex â s) (xs : x â s) (ys : y â s) :
âf y - f xâ †C * ây - xâ :=
hs.norm_image_sub_le_of_norm_hasFDerivWithin_le (fun x hx => (hf x hx).hasFDerivWithinAt) bound
xs ys
/-- The mean value theorem on a convex set: if the derivative of a function is bounded by `C` on
`s`, then the function is `C`-Lipschitz on `s`. Version with `fderivWithin` and
`LipschitzOnWith`. -/
theorem lipschitzOnWith_of_nnnorm_fderivWithin_le {C : ââ¥0} (hf : DifferentiableOn ð f s)
(bound : â x â s, âfderivWithin ð f s xââ †C) (hs : Convex â s) : LipschitzOnWith C f s :=
hs.lipschitzOnWith_of_nnnorm_hasFDerivWithin_le (fun x hx => (hf x hx).hasFDerivWithinAt) bound
/-- The mean value theorem on a convex set: if the derivative of a function is bounded by `C`,
then the function is `C`-Lipschitz. Version with `fderiv`. -/
theorem norm_image_sub_le_of_norm_fderiv_le (hf : â x â s, DifferentiableAt ð f x)
(bound : â x â s, âfderiv ð f xâ †C) (hs : Convex â s) (xs : x â s) (ys : y â s) :
âf y - f xâ †C * ây - xâ :=
hs.norm_image_sub_le_of_norm_hasFDerivWithin_le
(fun x hx => (hf x hx).hasFDerivAt.hasFDerivWithinAt) bound xs ys
/-- The mean value theorem on a convex set: if the derivative of a function is bounded by `C` on
`s`, then the function is `C`-Lipschitz on `s`. Version with `fderiv` and `LipschitzOnWith`. -/
theorem lipschitzOnWith_of_nnnorm_fderiv_le {C : ââ¥0} (hf : â x â s, DifferentiableAt ð f x)
(bound : â x â s, âfderiv ð f xââ †C) (hs : Convex â s) : LipschitzOnWith C f s :=
hs.lipschitzOnWith_of_nnnorm_hasFDerivWithin_le
(fun x hx => (hf x hx).hasFDerivAt.hasFDerivWithinAt) bound
/-- The mean value theorem: if the derivative of a function is bounded by `C`, then the function is
`C`-Lipschitz. Version with `fderiv` and `LipschitzWith`. -/
theorem _root_.lipschitzWith_of_nnnorm_fderiv_le
{E : Type*} [NormedAddCommGroup E] [NormedSpace ð E] {f : E â G}
{C : ââ¥0} (hf : Differentiable ð f)
(bound : â x, âfderiv ð f xââ †C) : LipschitzWith C f := by
let A : NormedSpace â E := RestrictScalars.normedSpace â ð E
rw [â lipschitzOnWith_univ]
exact lipschitzOnWith_of_nnnorm_fderiv_le (fun x _ ⊠hf x) (fun x _ ⊠bound x) convex_univ
/-- Variant of the mean value inequality on a convex set, using a bound on the difference between
the derivative and a fixed linear map, rather than a bound on the derivative itself. Version with
`HasFDerivWithinAt`. -/
theorem norm_image_sub_le_of_norm_hasFDerivWithin_le'
(hf : â x â s, HasFDerivWithinAt f (f' x) s x) (bound : â x â s, âf' x - Ïâ †C)
(hs : Convex â s) (xs : x â s) (ys : y â s) : âf y - f x - Ï (y - x)â †C * ây - xâ := by
/- We subtract `Ï` to define a new function `g` for which `g' = 0`, for which the previous theorem
applies, `Convex.norm_image_sub_le_of_norm_hasFDerivWithin_le`. Then, we just need to glue
together the pieces, expressing back `f` in terms of `g`. -/
let g y := f y - Ï y
have hg : â x â s, HasFDerivWithinAt g (f' x - Ï) s x := fun x xs =>
(hf x xs).sub Ï.hasFDerivWithinAt
calc
âf y - f x - Ï (y - x)â = âf y - f x - (Ï y - Ï x)â := by simp
_ = âf y - Ï y - (f x - Ï x)â := by congr 1; abel
_ = âg y - g xâ := by simp
_ †C * ây - xâ := Convex.norm_image_sub_le_of_norm_hasFDerivWithin_le hg bound hs xs ys
/-- Variant of the mean value inequality on a convex set. Version with `fderivWithin`. -/
theorem norm_image_sub_le_of_norm_fderivWithin_le' (hf : DifferentiableOn ð f s)
(bound : â x â s, âfderivWithin ð f s x - Ïâ †C) (hs : Convex â s) (xs : x â s) (ys : y â s) :
âf y - f x - Ï (y - x)â †C * ây - xâ :=
hs.norm_image_sub_le_of_norm_hasFDerivWithin_le' (fun x hx => (hf x hx).hasFDerivWithinAt) bound
xs ys
/-- Variant of the mean value inequality on a convex set. Version with `fderiv`. -/
theorem norm_image_sub_le_of_norm_fderiv_le' (hf : â x â s, DifferentiableAt ð f x)
(bound : â x â s, âfderiv ð f x - Ïâ †C) (hs : Convex â s) (xs : x â s) (ys : y â s) :
âf y - f x - Ï (y - x)â †C * ây - xâ :=
hs.norm_image_sub_le_of_norm_hasFDerivWithin_le'
(fun x hx => (hf x hx).hasFDerivAt.hasFDerivWithinAt) bound xs ys
/-- If a function has zero Fréchet derivative at every point of a convex set,
then it is a constant on this set. -/
theorem is_const_of_fderivWithin_eq_zero (hs : Convex â s) (hf : DifferentiableOn ð f s)
(hf' : â x â s, fderivWithin ð f s x = 0) (hx : x â s) (hy : y â s) : f x = f y := by
have bound : â x â s, âfderivWithin ð f s xâ †0 := fun x hx => by
simp only [hf' x hx, norm_zero, le_rfl]
simpa only [(dist_eq_norm _ _).symm, zero_mul, dist_le_zero, eq_comm] using
hs.norm_image_sub_le_of_norm_fderivWithin_le hf bound hx hy
theorem _root_.is_const_of_fderiv_eq_zero
{E : Type*} [NormedAddCommGroup E] [NormedSpace ð E] {f : E â G}
(hf : Differentiable ð f) (hf' : â x, fderiv ð f x = 0)
(x y : E) : f x = f y := by
let A : NormedSpace â E := RestrictScalars.normedSpace â ð E
exact convex_univ.is_const_of_fderivWithin_eq_zero hf.differentiableOn
(fun x _ => by rw [fderivWithin_univ]; exact hf' x) trivial trivial
/-- If two functions have equal Fréchet derivatives at every point of a convex set, and are equal at
one point in that set, then they are equal on that set. -/
theorem eqOn_of_fderivWithin_eq (hs : Convex â s) (hf : DifferentiableOn ð f s)
(hg : DifferentiableOn ð g s) (hs' : UniqueDiffOn ð s)
(hf' : â x â s, fderivWithin ð f s x = fderivWithin ð g s x) (hx : x â s) (hfgx : f x = g x) :
s.EqOn f g := fun y hy => by
suffices f x - g x = f y - g y by rwa [hfgx, sub_self, eq_comm, sub_eq_zero] at this
refine hs.is_const_of_fderivWithin_eq_zero (hf.sub hg) (fun z hz => ?_) hx hy
rw [fderivWithin_sub (hs' _ hz) (hf _ hz) (hg _ hz), sub_eq_zero, hf' _ hz]
theorem _root_.eq_of_fderiv_eq
{E : Type*} [NormedAddCommGroup E] [NormedSpace ð E] {f g : E â G}
(hf : Differentiable ð f) (hg : Differentiable ð g)
(hf' : â x, fderiv ð f x = fderiv ð g x) (x : E) (hfgx : f x = g x) : f = g := by
let A : NormedSpace â E := RestrictScalars.normedSpace â ð E
suffices Set.univ.EqOn f g from funext fun x => this <| mem_univ x
exact convex_univ.eqOn_of_fderivWithin_eq hf.differentiableOn hg.differentiableOn
uniqueDiffOn_univ (fun x _ => by simpa using hf' _) (mem_univ _) hfgx
end Convex
namespace Convex
variable {f f' : ð â G} {s : Set ð} {x y : ð}
/-- The mean value theorem on a convex set in dimension 1: if the derivative of a function is
bounded by `C`, then the function is `C`-Lipschitz. Version with `HasDerivWithinAt`. -/
theorem norm_image_sub_le_of_norm_hasDerivWithin_le {C : â}
(hf : â x â s, HasDerivWithinAt f (f' x) s x) (bound : â x â s, âf' xâ †C) (hs : Convex â s)
(xs : x â s) (ys : y â s) : âf y - f xâ †C * ây - xâ :=
Convex.norm_image_sub_le_of_norm_hasFDerivWithin_le (fun x hx => (hf x hx).hasFDerivWithinAt)
(fun x hx => le_trans (by simp) (bound x hx)) hs xs ys
/-- The mean value theorem on a convex set in dimension 1: if the derivative of a function is
bounded by `C` on `s`, then the function is `C`-Lipschitz on `s`.
Version with `HasDerivWithinAt` and `LipschitzOnWith`. -/
theorem lipschitzOnWith_of_nnnorm_hasDerivWithin_le {C : ââ¥0} (hs : Convex â s)
(hf : â x â s, HasDerivWithinAt f (f' x) s x) (bound : â x â s, âf' xââ †C) :
LipschitzOnWith C f s :=
Convex.lipschitzOnWith_of_nnnorm_hasFDerivWithin_le (fun x hx => (hf x hx).hasFDerivWithinAt)
(fun x hx => le_trans (by simp) (bound x hx)) hs
/-- The mean value theorem on a convex set in dimension 1: if the derivative of a function within
this set is bounded by `C`, then the function is `C`-Lipschitz. Version with `derivWithin` -/
theorem norm_image_sub_le_of_norm_derivWithin_le {C : â} (hf : DifferentiableOn ð f s)
(bound : â x â s, âderivWithin f s xâ †C) (hs : Convex â s) (xs : x â s) (ys : y â s) :
âf y - f xâ †C * ây - xâ :=
hs.norm_image_sub_le_of_norm_hasDerivWithin_le (fun x hx => (hf x hx).hasDerivWithinAt) bound xs
ys
/-- The mean value theorem on a convex set in dimension 1: if the derivative of a function is
bounded by `C` on `s`, then the function is `C`-Lipschitz on `s`.
Version with `derivWithin` and `LipschitzOnWith`. -/
theorem lipschitzOnWith_of_nnnorm_derivWithin_le {C : ââ¥0} (hs : Convex â s)
(hf : DifferentiableOn ð f s) (bound : â x â s, âderivWithin f s xââ †C) :
LipschitzOnWith C f s :=
hs.lipschitzOnWith_of_nnnorm_hasDerivWithin_le (fun x hx => (hf x hx).hasDerivWithinAt) bound
/-- The mean value theorem on a convex set in dimension 1: if the derivative of a function is
bounded by `C`, then the function is `C`-Lipschitz. Version with `deriv`. -/
theorem norm_image_sub_le_of_norm_deriv_le {C : â} (hf : â x â s, DifferentiableAt ð f x)
(bound : â x â s, âderiv f xâ †C) (hs : Convex â s) (xs : x â s) (ys : y â s) :
âf y - f xâ †C * ây - xâ :=
hs.norm_image_sub_le_of_norm_hasDerivWithin_le
(fun x hx => (hf x hx).hasDerivAt.hasDerivWithinAt) bound xs ys
/-- The mean value theorem on a convex set in dimension 1: if the derivative of a function is
bounded by `C` on `s`, then the function is `C`-Lipschitz on `s`.
Version with `deriv` and `LipschitzOnWith`. -/
theorem lipschitzOnWith_of_nnnorm_deriv_le {C : ââ¥0} (hf : â x â s, DifferentiableAt ð f x)
(bound : â x â s, âderiv f xââ †C) (hs : Convex â s) : LipschitzOnWith C f s :=
hs.lipschitzOnWith_of_nnnorm_hasDerivWithin_le
(fun x hx => (hf x hx).hasDerivAt.hasDerivWithinAt) bound
/-- The mean value theorem set in dimension 1: if the derivative of a function is bounded by `C`,
then the function is `C`-Lipschitz. Version with `deriv` and `LipschitzWith`. -/
theorem _root_.lipschitzWith_of_nnnorm_deriv_le {C : ââ¥0} (hf : Differentiable ð f)
(bound : â x, âderiv f xââ †C) : LipschitzWith C f :=
lipschitzOnWith_univ.1 <|
convex_univ.lipschitzOnWith_of_nnnorm_deriv_le (fun x _ => hf x) fun x _ => bound x
/-- If `f : ð â G`, `ð = R` or `ð = â`, is differentiable everywhere and its derivative equal zero,
then it is a constant function. -/
theorem _root_.is_const_of_deriv_eq_zero (hf : Differentiable ð f) (hf' : â x, deriv f x = 0)
(x y : ð) : f x = f y :=
is_const_of_fderiv_eq_zero hf (fun z => by ext; simp [â deriv_fderiv, hf']) _ _
end Convex
end
/-! ### Functions `[a, b] â â`. -/
section Interval
-- Declare all variables here to make sure they come in a correct order
variable (f f' : â â â) {a b : â} (hab : a < b) (hfc : ContinuousOn f (Icc a b))
(hff' : â x â Ioo a b, HasDerivAt f (f' x) x) (hfd : DifferentiableOn â f (Ioo a b))
(g g' : â â â) (hgc : ContinuousOn g (Icc a b)) (hgg' : â x â Ioo a b, HasDerivAt g (g' x) x)
(hgd : DifferentiableOn â g (Ioo a b))
/-- Cauchy's **Mean Value Theorem**, `HasDerivAt` version. -/
theorem exists_ratio_hasDerivAt_eq_ratio_slope :
â c â Ioo a b, (g b - g a) * f' c = (f b - f a) * g' c := by
let h x := (g b - g a) * f x - (f b - f a) * g x
have hI : h a = h b := by simp only [h]; ring
let h' x := (g b - g a) * f' x - (f b - f a) * g' x
have hhh' : â x â Ioo a b, HasDerivAt h (h' x) x := fun x hx =>
((hff' x hx).const_mul (g b - g a)).sub ((hgg' x hx).const_mul (f b - f a))
have hhc : ContinuousOn h (Icc a b) :=
(continuousOn_const.mul hfc).sub (continuousOn_const.mul hgc)
rcases exists_hasDerivAt_eq_zero hab hhc hI hhh' with âšc, cmem, hcâ©
exact âšc, cmem, sub_eq_zero.1 hcâ©
/-- Cauchy's **Mean Value Theorem**, extended `HasDerivAt` version. -/
theorem exists_ratio_hasDerivAt_eq_ratio_slope' {lfa lga lfb lgb : â}
(hff' : â x â Ioo a b, HasDerivAt f (f' x) x) (hgg' : â x â Ioo a b, HasDerivAt g (g' x) x)
(hfa : Tendsto f (ð[>] a) (ð lfa)) (hga : Tendsto g (ð[>] a) (ð lga))
(hfb : Tendsto f (ð[<] b) (ð lfb)) (hgb : Tendsto g (ð[<] b) (ð lgb)) :
â c â Ioo a b, (lgb - lga) * f' c = (lfb - lfa) * g' c := by
let h x := (lgb - lga) * f x - (lfb - lfa) * g x
have hha : Tendsto h (ð[>] a) (ð <| lgb * lfa - lfb * lga) := by
have : Tendsto h (ð[>] a) (ð <| (lgb - lga) * lfa - (lfb - lfa) * lga) :=
(tendsto_const_nhds.mul hfa).sub (tendsto_const_nhds.mul hga)
convert this using 2
ring
have hhb : Tendsto h (ð[<] b) (ð <| lgb * lfa - lfb * lga) := by
have : Tendsto h (ð[<] b) (ð <| (lgb - lga) * lfb - (lfb - lfa) * lgb) :=
(tendsto_const_nhds.mul hfb).sub (tendsto_const_nhds.mul hgb)
convert this using 2
ring
let h' x := (lgb - lga) * f' x - (lfb - lfa) * g' x
have hhh' : â x â Ioo a b, HasDerivAt h (h' x) x := by
intro x hx
exact ((hff' x hx).const_mul _).sub ((hgg' x hx).const_mul _)
rcases exists_hasDerivAt_eq_zero' hab hha hhb hhh' with âšc, cmem, hcâ©
exact âšc, cmem, sub_eq_zero.1 hcâ©
/-- Lagrange's Mean Value Theorem, `HasDerivAt` version -/
theorem exists_hasDerivAt_eq_slope : â c â Ioo a b, f' c = (f b - f a) / (b - a) := by
obtain âšc, cmem, hcâ© : â c â Ioo a b, (b - a) * f' c = (f b - f a) * 1 :=
exists_ratio_hasDerivAt_eq_ratio_slope f f' hab hfc hff' id 1 continuousOn_id
fun x _ => hasDerivAt_id x
use c, cmem
rwa [mul_one, mul_comm, â eq_div_iff (sub_ne_zero.2 hab.ne')] at hc
/-- Cauchy's Mean Value Theorem, `deriv` version. -/
theorem exists_ratio_deriv_eq_ratio_slope :
â c â Ioo a b, (g b - g a) * deriv f c = (f b - f a) * deriv g c :=
exists_ratio_hasDerivAt_eq_ratio_slope f (deriv f) hab hfc
(fun x hx => ((hfd x hx).differentiableAt <| IsOpen.mem_nhds isOpen_Ioo hx).hasDerivAt) g
(deriv g) hgc fun x hx =>
((hgd x hx).differentiableAt <| IsOpen.mem_nhds isOpen_Ioo hx).hasDerivAt
/-- Cauchy's Mean Value Theorem, extended `deriv` version. -/
theorem exists_ratio_deriv_eq_ratio_slope' {lfa lga lfb lgb : â}
(hdf : DifferentiableOn â f <| Ioo a b) (hdg : DifferentiableOn â g <| Ioo a b)
(hfa : Tendsto f (ð[>] a) (ð lfa)) (hga : Tendsto g (ð[>] a) (ð lga))
(hfb : Tendsto f (ð[<] b) (ð lfb)) (hgb : Tendsto g (ð[<] b) (ð lgb)) :
â c â Ioo a b, (lgb - lga) * deriv f c = (lfb - lfa) * deriv g c :=
exists_ratio_hasDerivAt_eq_ratio_slope' _ _ hab _ _
(fun x hx => ((hdf x hx).differentiableAt <| Ioo_mem_nhds hx.1 hx.2).hasDerivAt)
(fun x hx => ((hdg x hx).differentiableAt <| Ioo_mem_nhds hx.1 hx.2).hasDerivAt) hfa hga hfb hgb
/-- Lagrange's **Mean Value Theorem**, `deriv` version. -/
theorem exists_deriv_eq_slope : â c â Ioo a b, deriv f c = (f b - f a) / (b - a) :=
exists_hasDerivAt_eq_slope f (deriv f) hab hfc fun x hx =>
((hfd x hx).differentiableAt <| IsOpen.mem_nhds isOpen_Ioo hx).hasDerivAt
/-- Lagrange's **Mean Value Theorem**, `deriv` version. -/
theorem exists_deriv_eq_slope' : â c â Ioo a b, deriv f c = slope f a b := by
rw [slope_def_field]
exact exists_deriv_eq_slope f hab hfc hfd
/-- A real function whose derivative tends to infinity from the right at a point is not
differentiable on the right at that point -/
theorem not_differentiableWithinAt_of_deriv_tendsto_atTop_Ioi (f : â â â) {a : â}
(hf : Tendsto (deriv f) (ð[>] a) atTop) : ¬ DifferentiableWithinAt â f (Ioi a) a := by
replace hf : Tendsto (derivWithin f (Ioi a)) (ð[>] a) atTop := by
refine hf.congr' ?_
filter_upwards [eventually_mem_nhdsWithin] with x hx
have : Ioi a â ð x := by simp [â mem_interior_iff_mem_nhds, hx]
exact (derivWithin_of_mem_nhds this).symm
by_cases hcont_at_a : ContinuousWithinAt f (Ici a) a
case neg =>
intro hcontra
have := hcontra.continuousWithinAt
rw [â ContinuousWithinAt.diff_iff this] at hcont_at_a
simp at hcont_at_a
case pos =>
intro hdiff
replace hdiff := hdiff.hasDerivWithinAt
rw [hasDerivWithinAt_iff_tendsto_slope, Set.diff_singleton_eq_self not_mem_Ioi_self] at hdiff
have hâ : âá¶ b in ð[>] a,
â x â Ioc a b, max (derivWithin f (Ioi a) a + 1) 0 < derivWithin f (Ioi a) x := by
rw [(nhdsWithin_Ioi_basis a).eventually_iff]
rw [(nhdsWithin_Ioi_basis a).tendsto_left_iff] at hf
obtain âšb, hab, hbâ© := hf (Ioi (max (derivWithin f (Ioi a) a + 1) 0)) (Ioi_mem_atTop _)
refine âšb, hab, fun x hx z hz => ?_â©
simp only [MapsTo, mem_Ioo, mem_Ioi, and_imp] at hb
exact hb hz.1 <| hz.2.trans_lt hx.2
have hâ : âá¶ b in ð[>] a, slope f a b < derivWithin f (Ioi a) a + 1 := by
rw [(nhds_basis_Ioo _).tendsto_right_iff] at hdiff
specialize hdiff âšderivWithin f (Ioi a) a - 1, derivWithin f (Ioi a) a + 1â© <| by simp
filter_upwards [hdiff] with z hz using hz.2
have hcontra : âá¶ _ in ð[>] a, False := by
filter_upwards [hâ, hâ, eventually_mem_nhdsWithin] with b hb hslope (hab : a < b)
have hdiff' : DifferentiableOn â f (Ioc a b) := fun z hz => by
refine DifferentiableWithinAt.mono (t := Ioi a) ?_ Ioc_subset_Ioi_self
have : derivWithin f (Ioi a) z â 0 := ne_of_gt <| by
simp_all only [mem_Ioo, and_imp, mem_Ioc, max_lt_iff]
exact differentiableWithinAt_of_derivWithin_ne_zero this
have hcont_Ioc : â z â Ioc a b, ContinuousWithinAt f (Icc a b) z := by
intro z hz''
refine (hdiff'.continuousOn z hz'').mono_of_mem ?_
have hfinal : ð[Ioc a b] z = ð[Icc a b] z := by
refine nhdsWithin_eq_nhdsWithin' (s := Ioi a) (Ioi_mem_nhds hz''.1) ?_
simp only [Ioc_inter_Ioi, le_refl, sup_of_le_left]
ext y
exact âšfun h => âšmem_Icc_of_Ioc h, mem_of_mem_inter_left hâ©, fun âšH1, H2â© => âšH2, H1.2â©â©
rw [â hfinal]
exact self_mem_nhdsWithin
have hcont : ContinuousOn f (Icc a b) := by
intro z hz
by_cases hz' : z = a
· rw [hz']
exact hcont_at_a.mono Icc_subset_Ici_self
· exact hcont_Ioc z âšlt_of_le_of_ne hz.1 (Ne.symm hz'), hz.2â©
obtain âšx, hxâ, hxââ© :=
exists_deriv_eq_slope' f hab hcont (hdiff'.mono (Ioo_subset_Ioc_self))
specialize hb x âšhxâ.1, le_of_lt hxâ.2â©
replace hxâ : derivWithin f (Ioi a) x = slope f a b := by
have : Ioi a â ð x := by simp [â mem_interior_iff_mem_nhds, hxâ.1]
rwa [derivWithin_of_mem_nhds this]
rw [hxâ, max_lt_iff] at hb
linarith
simp [Filter.eventually_false_iff_eq_bot, â not_mem_closure_iff_nhdsWithin_eq_bot] at hcontra
/-- A real function whose derivative tends to minus infinity from the right at a point is not
differentiable on the right at that point -/
theorem not_differentiableWithinAt_of_deriv_tendsto_atBot_Ioi (f : â â â) {a : â}
(hf : Tendsto (deriv f) (ð[>] a) atBot) : ¬ DifferentiableWithinAt â f (Ioi a) a := by
intro h
have hf' : Tendsto (deriv (-f)) (ð[>] a) atTop := by
rw [Pi.neg_def, deriv.neg']
exact tendsto_neg_atBot_atTop.comp hf
exact not_differentiableWithinAt_of_deriv_tendsto_atTop_Ioi (-f) hf' h.neg
/-- A real function whose derivative tends to minus infinity from the left at a point is not
differentiable on the left at that point -/
theorem not_differentiableWithinAt_of_deriv_tendsto_atBot_Iio (f : â â â) {a : â}
(hf : Tendsto (deriv f) (ð[<] a) atBot) : ¬ DifferentiableWithinAt â f (Iio a) a := by
let f' := f â Neg.neg
have hderiv : deriv f' =á¶ [ð[>] (-a)] -(deriv f â Neg.neg) := by
rw [atBot_basis.tendsto_right_iff] at hf
specialize hf (-1) trivial
rw [(nhdsWithin_Iio_basis a).eventually_iff] at hf
rw [EventuallyEq, (nhdsWithin_Ioi_basis (-a)).eventually_iff]
obtain âšb, hbâ, hbââ© := hf
refine âš-b, by linarith, fun x hx => ?_â©
simp only [Pi.neg_apply, Function.comp_apply]
suffices deriv f' x = deriv f (-x) * deriv (Neg.neg : â â â) x by simpa using this
refine deriv.comp x (differentiableAt_of_deriv_ne_zero ?_) (by fun_prop)
rw [mem_Ioo] at hx
have hâ : -x â Ioo b a := âšby linarith, by linarithâ©
have hâ : deriv f (-x) †-1 := hbâ hâ
exact ne_of_lt (by linarith)
have hmain : ¬ DifferentiableWithinAt â f' (Ioi (-a)) (-a) := by
refine not_differentiableWithinAt_of_deriv_tendsto_atTop_Ioi f' <| Tendsto.congr' hderiv.symm ?_
refine Tendsto.comp (g := -deriv f) ?_ tendsto_neg_nhdsWithin_Ioi_neg
exact Tendsto.comp (g := Neg.neg) tendsto_neg_atBot_atTop hf
intro h
have : DifferentiableWithinAt â f' (Ioi (-a)) (-a) := by
refine DifferentiableWithinAt.comp (g := f) (f := Neg.neg) (t := Iio a) (-a) ?_ ?_ ?_
· simp [h]
· fun_prop
· intro x
simp [neg_lt]
exact hmain this
/-- A real function whose derivative tends to infinity from the left at a point is not
differentiable on the left at that point -/
theorem not_differentiableWithinAt_of_deriv_tendsto_atTop_Iio (f : â â â) {a : â}
(hf : Tendsto (deriv f) (ð[<] a) atTop) : ¬ DifferentiableWithinAt â f (Iio a) a := by
intro h
have hf' : Tendsto (deriv (-f)) (ð[<] a) atBot := by
rw [Pi.neg_def, deriv.neg']
exact tendsto_neg_atTop_atBot.comp hf
exact not_differentiableWithinAt_of_deriv_tendsto_atBot_Iio (-f) hf' h.neg
end Interval
/-- Let `f` be a function continuous on a convex (or, equivalently, connected) subset `D`
of the real line. If `f` is differentiable on the interior of `D` and `C < f'`, then
`f` grows faster than `C * x` on `D`, i.e., `C * (y - x) < f y - f x` whenever `x, y â D`,
`x < y`. -/
theorem Convex.mul_sub_lt_image_sub_of_lt_deriv {D : Set â} (hD : Convex â D) {f : â â â}
(hf : ContinuousOn f D) (hf' : DifferentiableOn â f (interior D)) {C}
(hf'_gt : â x â interior D, C < deriv f x) :
âáµ (x â D) (y â D), x < y â C * (y - x) < f y - f x := by
intro x hx y hy hxy
have hxyD : Icc x y â D := hD.ordConnected.out hx hy
have hxyD' : Ioo x y â interior D :=
subset_sUnion_of_mem âšisOpen_Ioo, Ioo_subset_Icc_self.trans hxyDâ©
obtain âša, a_mem, haâ© : â a â Ioo x y, deriv f a = (f y - f x) / (y - x) :=
exists_deriv_eq_slope f hxy (hf.mono hxyD) (hf'.mono hxyD')
have : C < (f y - f x) / (y - x) := ha âž hf'_gt _ (hxyD' a_mem)
exact (lt_div_iff (sub_pos.2 hxy)).1 this
/-- Let `f : â â â` be a differentiable function. If `C < f'`, then `f` grows faster than
`C * x`, i.e., `C * (y - x) < f y - f x` whenever `x < y`. -/
theorem mul_sub_lt_image_sub_of_lt_deriv {f : â â â} (hf : Differentiable â f) {C}
(hf'_gt : â x, C < deriv f x) âŠx y⊠(hxy : x < y) : C * (y - x) < f y - f x :=
convex_univ.mul_sub_lt_image_sub_of_lt_deriv hf.continuous.continuousOn hf.differentiableOn
(fun x _ => hf'_gt x) x trivial y trivial hxy
/-- Let `f` be a function continuous on a convex (or, equivalently, connected) subset `D`
of the real line. If `f` is differentiable on the interior of `D` and `C †f'`, then
`f` grows at least as fast as `C * x` on `D`, i.e., `C * (y - x) †f y - f x` whenever `x, y â D`,
`x †y`. -/
theorem Convex.mul_sub_le_image_sub_of_le_deriv {D : Set â} (hD : Convex â D) {f : â â â}
(hf : ContinuousOn f D) (hf' : DifferentiableOn â f (interior D)) {C}
(hf'_ge : â x â interior D, C †deriv f x) :
âáµ (x â D) (y â D), x †y â C * (y - x) †f y - f x := by
intro x hx y hy hxy
cases' eq_or_lt_of_le hxy with hxy' hxy'
· rw [hxy', sub_self, sub_self, mul_zero]
have hxyD : Icc x y â D := hD.ordConnected.out hx hy
have hxyD' : Ioo x y â interior D :=
subset_sUnion_of_mem âšisOpen_Ioo, Ioo_subset_Icc_self.trans hxyDâ©
obtain âša, a_mem, haâ© : â a â Ioo x y, deriv f a = (f y - f x) / (y - x) :=
exists_deriv_eq_slope f hxy' (hf.mono hxyD) (hf'.mono hxyD')
have : C †(f y - f x) / (y - x) := ha ➠hf'_ge _ (hxyD' a_mem)
exact (le_div_iff (sub_pos.2 hxy')).1 this
/-- Let `f : â â â` be a differentiable function. If `C †f'`, then `f` grows at least as fast
as `C * x`, i.e., `C * (y - x) †f y - f x` whenever `x †y`. -/
theorem mul_sub_le_image_sub_of_le_deriv {f : â â â} (hf : Differentiable â f) {C}
(hf'_ge : â x, C †deriv f x) âŠx y⊠(hxy : x †y) : C * (y - x) †f y - f x :=
convex_univ.mul_sub_le_image_sub_of_le_deriv hf.continuous.continuousOn hf.differentiableOn
(fun x _ => hf'_ge x) x trivial y trivial hxy
/-- Let `f` be a function continuous on a convex (or, equivalently, connected) subset `D`
of the real line. If `f` is differentiable on the interior of `D` and `f' < C`, then
`f` grows slower than `C * x` on `D`, i.e., `f y - f x < C * (y - x)` whenever `x, y â D`,
`x < y`. -/
theorem Convex.image_sub_lt_mul_sub_of_deriv_lt {D : Set â} (hD : Convex â D) {f : â â â}
(hf : ContinuousOn f D) (hf' : DifferentiableOn â f (interior D)) {C}
(lt_hf' : â x â interior D, deriv f x < C) (x : â) (hx : x â D) (y : â) (hy : y â D)
(hxy : x < y) : f y - f x < C * (y - x) :=
have hf'_gt : â x â interior D, -C < deriv (fun y => -f y) x := fun x hx => by
rw [deriv.neg, neg_lt_neg_iff]
exact lt_hf' x hx
by linarith [hD.mul_sub_lt_image_sub_of_lt_deriv hf.neg hf'.neg hf'_gt x hx y hy hxy]
/-- Let `f : â â â` be a differentiable function. If `f' < C`, then `f` grows slower than
`C * x` on `D`, i.e., `f y - f x < C * (y - x)` whenever `x < y`. -/
theorem image_sub_lt_mul_sub_of_deriv_lt {f : â â â} (hf : Differentiable â f) {C}
(lt_hf' : â x, deriv f x < C) âŠx y⊠(hxy : x < y) : f y - f x < C * (y - x) :=
convex_univ.image_sub_lt_mul_sub_of_deriv_lt hf.continuous.continuousOn hf.differentiableOn
(fun x _ => lt_hf' x) x trivial y trivial hxy
/-- Let `f` be a function continuous on a convex (or, equivalently, connected) subset `D`
of the real line. If `f` is differentiable on the interior of `D` and `f' †C`, then
`f` grows at most as fast as `C * x` on `D`, i.e., `f y - f x †C * (y - x)` whenever `x, y â D`,
`x †y`. -/
theorem Convex.image_sub_le_mul_sub_of_deriv_le {D : Set â} (hD : Convex â D) {f : â â â}
(hf : ContinuousOn f D) (hf' : DifferentiableOn â f (interior D)) {C}
(le_hf' : â x â interior D, deriv f x †C) (x : â) (hx : x â D) (y : â) (hy : y â D)
(hxy : x †y) : f y - f x †C * (y - x) :=
have hf'_ge : â x â interior D, -C †deriv (fun y => -f y) x := fun x hx => by
rw [deriv.neg, neg_le_neg_iff]
exact le_hf' x hx
by linarith [hD.mul_sub_le_image_sub_of_le_deriv hf.neg hf'.neg hf'_ge x hx y hy hxy]
/-- Let `f : â â â` be a differentiable function. If `f' †C`, then `f` grows at most as fast
as `C * x`, i.e., `f y - f x †C * (y - x)` whenever `x †y`. -/
theorem image_sub_le_mul_sub_of_deriv_le {f : â â â} (hf : Differentiable â f) {C}
(le_hf' : â x, deriv f x †C) âŠx y⊠(hxy : x †y) : f y - f x †C * (y - x) :=
convex_univ.image_sub_le_mul_sub_of_deriv_le hf.continuous.continuousOn hf.differentiableOn
(fun x _ => le_hf' x) x trivial y trivial hxy
/-- Let `f` be a function continuous on a convex (or, equivalently, connected) subset `D`
of the real line. If `f` is differentiable on the interior of `D` and `f'` is positive, then
`f` is a strictly monotone function on `D`.
Note that we don't require differentiability explicitly as it already implied by the derivative
being strictly positive. -/
theorem strictMonoOn_of_deriv_pos {D : Set â} (hD : Convex â D) {f : â â â}
(hf : ContinuousOn f D) (hf' : â x â interior D, 0 < deriv f x) : StrictMonoOn f D := by
intro x hx y hy
have : DifferentiableOn â f (interior D) := fun z hz =>
(differentiableAt_of_deriv_ne_zero (hf' z hz).ne').differentiableWithinAt
simpa only [zero_mul, sub_pos] using
hD.mul_sub_lt_image_sub_of_lt_deriv hf this hf' x hx y hy
/-- Let `f : â â â` be a differentiable function. If `f'` is positive, then
`f` is a strictly monotone function.
Note that we don't require differentiability explicitly as it already implied by the derivative
being strictly positive. -/
theorem strictMono_of_deriv_pos {f : â â â} (hf' : â x, 0 < deriv f x) : StrictMono f :=
strictMonoOn_univ.1 <| strictMonoOn_of_deriv_pos convex_univ (fun z _ =>
(differentiableAt_of_deriv_ne_zero (hf' z).ne').differentiableWithinAt.continuousWithinAt)
fun x _ => hf' x
/-- Let `f` be a function continuous on a convex (or, equivalently, connected) subset `D`
of the real line. If `f` is differentiable on the interior of `D` and `f'` is strictly positive,
then `f` is a strictly monotone function on `D`. -/
lemma strictMonoOn_of_hasDerivWithinAt_pos {D : Set â} (hD : Convex â D) {f f' : â â â}
(hf : ContinuousOn f D) (hf' : â x â interior D, HasDerivWithinAt f (f' x) (interior D) x)
(hf'â : â x â interior D, 0 < f' x) : StrictMonoOn f D :=
strictMonoOn_of_deriv_pos hD hf fun x hx ⊠by
rw [deriv_eqOn isOpen_interior hf' hx]; exact hf'â _ hx
@[deprecated (since := "2024-03-02")]
alias StrictMonoOn_of_hasDerivWithinAt_pos := strictMonoOn_of_hasDerivWithinAt_pos
/-- Let `f : â â â` be a differentiable function. If `f'` is strictly positive, then
`f` is a strictly monotone function. -/
lemma strictMono_of_hasDerivAt_pos {f f' : â â â} (hf : â x, HasDerivAt f (f' x) x)
(hf' : â x, 0 < f' x) : StrictMono f :=
strictMono_of_deriv_pos fun x ⊠by rw [(hf _).deriv]; exact hf' _
/-- Let `f` be a function continuous on a convex (or, equivalently, connected) subset `D`
of the real line. If `f` is differentiable on the interior of `D` and `f'` is nonnegative, then
`f` is a monotone function on `D`. -/
theorem monotoneOn_of_deriv_nonneg {D : Set â} (hD : Convex â D) {f : â â â}
(hf : ContinuousOn f D) (hf' : DifferentiableOn â f (interior D))
(hf'_nonneg : â x â interior D, 0 †deriv f x) : MonotoneOn f D := fun x hx y hy hxy => by
simpa only [zero_mul, sub_nonneg] using
hD.mul_sub_le_image_sub_of_le_deriv hf hf' hf'_nonneg x hx y hy hxy
/-- Let `f : â â â` be a differentiable function. If `f'` is nonnegative, then
`f` is a monotone function. -/
theorem monotone_of_deriv_nonneg {f : â â â} (hf : Differentiable â f) (hf' : â x, 0 †deriv f x) :
Monotone f :=
monotoneOn_univ.1 <|
monotoneOn_of_deriv_nonneg convex_univ hf.continuous.continuousOn hf.differentiableOn fun x _ =>
hf' x
/-- Let `f` be a function continuous on a convex (or, equivalently, connected) subset `D`
of the real line. If `f` is differentiable on the interior of `D` and `f'` is nonnegative, then
`f` is a monotone function on `D`. -/
lemma monotoneOn_of_hasDerivWithinAt_nonneg {D : Set â} (hD : Convex â D) {f f' : â â â}
(hf : ContinuousOn f D) (hf' : â x â interior D, HasDerivWithinAt f (f' x) (interior D) x)
(hf'â : â x â interior D, 0 †f' x) : MonotoneOn f D :=
monotoneOn_of_deriv_nonneg hD hf (fun x hx ⊠(hf' _ hx).differentiableWithinAt) fun x hx ⊠by
rw [deriv_eqOn isOpen_interior hf' hx]; exact hf'â _ hx
/-- Let `f : â â â` be a differentiable function. If `f'` is nonnegative, then
`f` is a monotone function. -/
lemma monotone_of_hasDerivAt_nonneg {f f' : â â â} (hf : â x, HasDerivAt f (f' x) x)
(hf' : 0 †f') : Monotone f :=
monotone_of_deriv_nonneg (fun x ⊠(hf _).differentiableAt) fun x ⊠by
rw [(hf _).deriv]; exact hf' _
/-- Let `f` be a function continuous on a convex (or, equivalently, connected) subset `D`
of the real line. If `f` is differentiable on the interior of `D` and `f'` is negative, then
`f` is a strictly antitone function on `D`. -/
theorem strictAntiOn_of_deriv_neg {D : Set â} (hD : Convex â D) {f : â â â}
(hf : ContinuousOn f D) (hf' : â x â interior D, deriv f x < 0) : StrictAntiOn f D :=
fun x hx y => by
simpa only [zero_mul, sub_lt_zero] using
hD.image_sub_lt_mul_sub_of_deriv_lt hf
(fun z hz => (differentiableAt_of_deriv_ne_zero (hf' z hz).ne).differentiableWithinAt) hf' x
hx y
/-- Let `f : â â â` be a differentiable function. If `f'` is negative, then
`f` is a strictly antitone function.
Note that we don't require differentiability explicitly as it already implied by the derivative
being strictly negative. -/
theorem strictAnti_of_deriv_neg {f : â â â} (hf' : â x, deriv f x < 0) : StrictAnti f :=
strictAntiOn_univ.1 <| strictAntiOn_of_deriv_neg convex_univ
(fun z _ =>
(differentiableAt_of_deriv_ne_zero (hf' z).ne).differentiableWithinAt.continuousWithinAt)
fun x _ => hf' x
/-- Let `f` be a function continuous on a convex (or, equivalently, connected) subset `D`
of the real line. If `f` is differentiable on the interior of `D` and `f'` is strictly positive,
then `f` is a strictly monotone function on `D`. -/
lemma strictAntiOn_of_hasDerivWithinAt_neg {D : Set â} (hD : Convex â D) {f f' : â â â}
(hf : ContinuousOn f D) (hf' : â x â interior D, HasDerivWithinAt f (f' x) (interior D) x)
(hf'â : â x â interior D, f' x < 0) : StrictAntiOn f D :=
strictAntiOn_of_deriv_neg hD hf fun x hx ⊠by
rw [deriv_eqOn isOpen_interior hf' hx]; exact hf'â _ hx
@[deprecated (since := "2024-03-02")]
alias StrictAntiOn_of_hasDerivWithinAt_pos := strictAntiOn_of_hasDerivWithinAt_neg
/-- Let `f : â â â` be a differentiable function. If `f'` is strictly positive, then
`f` is a strictly monotone function. -/
lemma strictAnti_of_hasDerivAt_neg {f f' : â â â} (hf : â x, HasDerivAt f (f' x) x)
(hf' : â x, f' x < 0) : StrictAnti f :=
strictAnti_of_deriv_neg fun x ⊠by rw [(hf _).deriv]; exact hf' _
@[deprecated (since := "2024-03-02")]
alias strictAnti_of_hasDerivAt_pos := strictAnti_of_hasDerivAt_neg
/-- Let `f` be a function continuous on a convex (or, equivalently, connected) subset `D`
of the real line. If `f` is differentiable on the interior of `D` and `f'` is nonpositive, then
`f` is an antitone function on `D`. -/
theorem antitoneOn_of_deriv_nonpos {D : Set â} (hD : Convex â D) {f : â â â}
(hf : ContinuousOn f D) (hf' : DifferentiableOn â f (interior D))
(hf'_nonpos : â x â interior D, deriv f x †0) : AntitoneOn f D := fun x hx y hy hxy => by
simpa only [zero_mul, sub_nonpos] using
hD.image_sub_le_mul_sub_of_deriv_le hf hf' hf'_nonpos x hx y hy hxy
/-- Let `f : â â â` be a differentiable function. If `f'` is nonpositive, then
`f` is an antitone function. -/
theorem antitone_of_deriv_nonpos {f : â â â} (hf : Differentiable â f) (hf' : â x, deriv f x †0) :
Antitone f :=
antitoneOn_univ.1 <|
antitoneOn_of_deriv_nonpos convex_univ hf.continuous.continuousOn hf.differentiableOn fun x _ =>
hf' x
/-- Let `f` be a function continuous on a convex (or, equivalently, connected) subset `D`
of the real line. If `f` is differentiable on the interior of `D` and `f'` is nonpositive, then
`f` is an antitone function on `D`. -/
lemma antitoneOn_of_hasDerivWithinAt_nonpos {D : Set â} (hD : Convex â D) {f f' : â â â}
(hf : ContinuousOn f D) (hf' : â x â interior D, HasDerivWithinAt f (f' x) (interior D) x)
(hf'â : â x â interior D, f' x †0) : AntitoneOn f D :=
antitoneOn_of_deriv_nonpos hD hf (fun x hx ⊠(hf' _ hx).differentiableWithinAt) fun x hx ⊠by
rw [deriv_eqOn isOpen_interior hf' hx]; exact hf'â _ hx
/-- Let `f : â â â` be a differentiable function. If `f'` is nonpositive, then `f` is an antitone
function. -/
lemma antitone_of_hasDerivAt_nonpos {f f' : â â â} (hf : â x, HasDerivAt f (f' x) x)
(hf' : f' †0) : Antitone f :=
antitone_of_deriv_nonpos (fun x ⊠(hf _).differentiableAt) fun x ⊠by
rw [(hf _).deriv]; exact hf' _
/-! ### Functions `f : E â â` -/
/-- Lagrange's **Mean Value Theorem**, applied to convex domains. -/
theorem domain_mvt {f : E â â} {s : Set E} {x y : E} {f' : E â E âL[â] â}
(hf : â x â s, HasFDerivWithinAt f (f' x) s x) (hs : Convex â s) (xs : x â s) (ys : y â s) :
â z â segment â x y, f y - f x = f' z (y - x) := by
-- Use `g = AffineMap.lineMap x y` to parametrize the segment
set g : â â E := fun t => AffineMap.lineMap x y t
set I := Icc (0 : â) 1
have hsub : Ioo (0 : â) 1 â I := Ioo_subset_Icc_self
have hmaps : MapsTo g I s := hs.mapsTo_lineMap xs ys
-- The one-variable function `f â g` has derivative `f' (g t) (y - x)` at each `t â I`
have hfg : â t â I, HasDerivWithinAt (f â g) (f' (g t) (y - x)) I t := fun t ht =>
(hf _ (hmaps ht)).comp_hasDerivWithinAt t AffineMap.hasDerivWithinAt_lineMap hmaps
-- apply 1-variable mean value theorem to pullback
have hMVT : â t â Ioo (0 : â) 1, f' (g t) (y - x) = (f (g 1) - f (g 0)) / (1 - 0) := by
refine exists_hasDerivAt_eq_slope (f â g) _ (by norm_num) ?_ ?_
· exact fun t Ht => (hfg t Ht).continuousWithinAt
· exact fun t Ht => (hfg t <| hsub Ht).hasDerivAt (Icc_mem_nhds Ht.1 Ht.2)
-- reinterpret on domain
rcases hMVT with âšt, Ht, hMVT'â©
rw [segment_eq_image_lineMap, exists_mem_image]
refine âšt, hsub Ht, ?_â©
simpa [g] using hMVT'.symm
section RCLike
/-!
### Vector-valued functions `f : E â F`. Strict differentiability.
A `C^1` function is strictly differentiable, when the field is `â` or `â`. This follows from the
mean value inequality on balls, which is a particular case of the above results after restricting
the scalars to `â`. Note that it does not make sense to talk of a convex set over `â`, but balls
make sense and are enough. Many formulations of the mean value inequality could be generalized to
balls over `â` or `â`. For now, we only include the ones that we need.
-/
variable {ð : Type*} [RCLike ð] {G : Type*} [NormedAddCommGroup G] [NormedSpace ð G] {H : Type*}
[NormedAddCommGroup H] [NormedSpace ð H] {f : G â H} {f' : G â G âL[ð] H} {x : G}
/-- Over the reals or the complexes, a continuously differentiable function is strictly
differentiable. -/
theorem hasStrictFDerivAt_of_hasFDerivAt_of_continuousAt
(hder : âá¶ y in ð x, HasFDerivAt f (f' y) y) (hcont : ContinuousAt f' x) :
HasStrictFDerivAt f (f' x) x := by
-- turn little-o definition of strict_fderiv into an epsilon-delta statement
refine isLittleO_iff.mpr fun c hc => Metric.eventually_nhds_iff_ball.mpr ?_
-- the correct ε is the modulus of continuity of f'
rcases Metric.mem_nhds_iff.mp (inter_mem hder (hcont <| ball_mem_nhds _ hc)) with âšÎµ, ε0, hεâ©
refine âšÎµ, ε0, ?_â©
-- simplify formulas involving the product E Ã E
rintro âša, bâ© h
rw [â ball_prod_same, prod_mk_mem_set_prod_eq] at h
-- exploit the choice of ε as the modulus of continuity of f'
have hf' : â x' â ball x ε, âf' x' - f' xâ †c := fun x' H' => by
rw [â dist_eq_norm]
exact le_of_lt (hε H').2
-- apply mean value theorem
letI : NormedSpace â G := RestrictScalars.normedSpace â ð G
refine (convex_ball _ _).norm_image_sub_le_of_norm_hasFDerivWithin_le' ?_ hf' h.2 h.1
exact fun y hy => (hε hy).1.hasFDerivWithinAt
/-- Over the reals or the complexes, a continuously differentiable function is strictly
differentiable. -/
theorem hasStrictDerivAt_of_hasDerivAt_of_continuousAt {f f' : ð â G} {x : ð}
(hder : âá¶ y in ð x, HasDerivAt f (f' y) y) (hcont : ContinuousAt f' x) :
HasStrictDerivAt f (f' x) x :=
hasStrictFDerivAt_of_hasFDerivAt_of_continuousAt (hder.mono fun _ hy => hy.hasFDerivAt) <|
(smulRightL ð ð G 1).continuous.continuousAt.comp hcont
end RCLike
|
Analysis\Calculus\Monotone.lean | /-
Copyright (c) 2022 Sébastien Gouëzel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Sébastien Gouëzel
-/
import Mathlib.Analysis.Calculus.Deriv.Slope
import Mathlib.MeasureTheory.Covering.OneDim
import Mathlib.Order.Monotone.Extension
/-!
# Differentiability of monotone functions
We show that a monotone function `f : â â â` is differentiable almost everywhere, in
`Monotone.ae_differentiableAt`. (We also give a version for a function monotone on a set, in
`MonotoneOn.ae_differentiableWithinAt`.)
If the function `f` is continuous, this follows directly from general differentiation of measure
theorems. Let `Ό` be the Stieltjes measure associated to `f`. Then, almost everywhere,
`Ό [x, y] / Leb [x, y]` (resp. `Ό [y, x] / Leb [y, x]`) converges to the Radon-Nikodym derivative
of `ÎŒ` with respect to Lebesgue when `y` tends to `x` in `(x, +â)` (resp. `(-â, x)`), by
`VitaliFamily.ae_tendsto_rnDeriv`. As `Ό [x, y] = f y - f x` and `Leb [x, y] = y - x`, this
gives differentiability right away.
When `f` is only monotone, the same argument works up to small adjustments, as the associated
Stieltjes measure satisfies `Ό [x, y] = f (y^+) - f (x^-)` (the right and left limits of `f` at `y`
and `x` respectively). One argues that `f (x^-) = f x` almost everywhere (in fact away from a
countable set), and moreover `f ((y - (y-x)^2)^+) †f y †f (y^+)`. This is enough to deduce the
limit of `(f y - f x) / (y - x)` by a lower and upper approximation argument from the known
behavior of `Ό [x, y]`.
-/
open Set Filter Function Metric MeasureTheory MeasureTheory.Measure IsUnifLocDoublingMeasure
open scoped Topology
/-- If `(f y - f x) / (y - x)` converges to a limit as `y` tends to `x`, then the same goes if
`y` is shifted a little bit, i.e., `f (y + (y-x)^2) - f x) / (y - x)` converges to the same limit.
This lemma contains a slightly more general version of this statement (where one considers
convergence along some subfilter, typically `ð[<] x` or `ð[>] x`) tailored to the application
to almost everywhere differentiability of monotone functions. -/
theorem tendsto_apply_add_mul_sq_div_sub {f : â â â} {x a c d : â} {l : Filter â} (hl : l †ð[â ] x)
(hf : Tendsto (fun y => (f y - d) / (y - x)) l (ð a))
(h' : Tendsto (fun y => y + c * (y - x) ^ 2) l l) :
Tendsto (fun y => (f (y + c * (y - x) ^ 2) - d) / (y - x)) l (ð a) := by
have L : Tendsto (fun y => (y + c * (y - x) ^ 2 - x) / (y - x)) l (ð 1) := by
have : Tendsto (fun y => 1 + c * (y - x)) l (ð (1 + c * (x - x))) := by
apply Tendsto.mono_left _ (hl.trans nhdsWithin_le_nhds)
exact ((tendsto_id.sub_const x).const_mul c).const_add 1
simp only [_root_.sub_self, add_zero, mul_zero] at this
apply Tendsto.congr' (Eventually.filter_mono hl _) this
filter_upwards [self_mem_nhdsWithin] with y hy
field_simp [sub_ne_zero.2 hy]
ring
have Z := (hf.comp h').mul L
rw [mul_one] at Z
apply Tendsto.congr' _ Z
have : âá¶ y in l, y + c * (y - x) ^ 2 â x := by apply Tendsto.mono_right h' hl self_mem_nhdsWithin
filter_upwards [this] with y hy
field_simp [sub_ne_zero.2 hy]
/-- A Stieltjes function is almost everywhere differentiable, with derivative equal to the
Radon-Nikodym derivative of the associated Stieltjes measure with respect to Lebesgue. -/
theorem StieltjesFunction.ae_hasDerivAt (f : StieltjesFunction) :
âáµ x, HasDerivAt f (rnDeriv f.measure volume x).toReal x := by
/- Denote by `Ό` the Stieltjes measure associated to `f`.
The general theorem `VitaliFamily.ae_tendsto_rnDeriv` ensures that `Ό [x, y] / (y - x)` tends
to the Radon-Nikodym derivative as `y` tends to `x` from the right. As `Ό [x,y] = f y - f (x^-)`
and `f (x^-) = f x` almost everywhere, this gives differentiability on the right.
On the left, `Ό [y, x] / (x - y)` again tends to the Radon-Nikodym derivative.
As `Ό [y, x] = f x - f (y^-)`, this is not exactly the right result, so one uses a sandwiching
argument to deduce the convergence for `(f x - f y) / (x - y)`. -/
filter_upwards [VitaliFamily.ae_tendsto_rnDeriv (vitaliFamily (volume : Measure â) 1) f.measure,
rnDeriv_lt_top f.measure volume, f.countable_leftLim_ne.ae_not_mem volume] with x hx h'x h''x
-- Limit on the right, following from differentiation of measures
have L1 :
Tendsto (fun y => (f y - f x) / (y - x)) (ð[>] x) (ð (rnDeriv f.measure volume x).toReal) := by
apply Tendsto.congr' _
((ENNReal.tendsto_toReal h'x.ne).comp (hx.comp (Real.tendsto_Icc_vitaliFamily_right x)))
filter_upwards [self_mem_nhdsWithin]
rintro y (hxy : x < y)
simp only [comp_apply, StieltjesFunction.measure_Icc, Real.volume_Icc, Classical.not_not.1 h''x]
rw [â ENNReal.ofReal_div_of_pos (sub_pos.2 hxy), ENNReal.toReal_ofReal]
exact div_nonneg (sub_nonneg.2 (f.mono hxy.le)) (sub_pos.2 hxy).le
-- Limit on the left, following from differentiation of measures. Its form is not exactly the one
-- we need, due to the appearance of a left limit.
have L2 : Tendsto (fun y => (leftLim f y - f x) / (y - x)) (ð[<] x)
(ð (rnDeriv f.measure volume x).toReal) := by
apply Tendsto.congr' _
((ENNReal.tendsto_toReal h'x.ne).comp (hx.comp (Real.tendsto_Icc_vitaliFamily_left x)))
filter_upwards [self_mem_nhdsWithin]
rintro y (hxy : y < x)
simp only [comp_apply, StieltjesFunction.measure_Icc, Real.volume_Icc]
rw [â ENNReal.ofReal_div_of_pos (sub_pos.2 hxy), ENNReal.toReal_ofReal, â neg_neg (y - x),
div_neg, neg_div', neg_sub, neg_sub]
exact div_nonneg (sub_nonneg.2 (f.mono.leftLim_le hxy.le)) (sub_pos.2 hxy).le
-- Shifting a little bit the limit on the left, by `(y - x)^2`.
have L3 : Tendsto (fun y => (leftLim f (y + 1 * (y - x) ^ 2) - f x) / (y - x)) (ð[<] x)
(ð (rnDeriv f.measure volume x).toReal) := by
apply tendsto_apply_add_mul_sq_div_sub (nhds_left'_le_nhds_ne x) L2
apply tendsto_nhdsWithin_of_tendsto_nhds_of_eventually_within
· apply Tendsto.mono_left _ nhdsWithin_le_nhds
have : Tendsto (fun y : â => y + â1 * (y - x) ^ 2) (ð x) (ð (x + â1 * (x - x) ^ 2)) :=
tendsto_id.add (((tendsto_id.sub_const x).pow 2).const_mul â1)
simpa using this
· have : Ioo (x - 1) x â ð[<] x := by
apply Ioo_mem_nhdsWithin_Iio; exact âšby linarith, le_refl _â©
filter_upwards [this]
rintro y âšhy : x - 1 < y, h'y : y < xâ©
rw [mem_Iio]
norm_num; nlinarith
-- Deduce the correct limit on the left, by sandwiching.
have L4 :
Tendsto (fun y => (f y - f x) / (y - x)) (ð[<] x) (ð (rnDeriv f.measure volume x).toReal) := by
apply tendsto_of_tendsto_of_tendsto_of_le_of_le' L3 L2
· filter_upwards [self_mem_nhdsWithin]
rintro y (hy : y < x)
refine div_le_div_of_nonpos_of_le (by linarith) ((sub_le_sub_iff_right _).2 ?_)
apply f.mono.le_leftLim
have : â0 < (x - y) ^ 2 := sq_pos_of_pos (sub_pos.2 hy)
norm_num; linarith
· filter_upwards [self_mem_nhdsWithin]
rintro y (hy : y < x)
refine div_le_div_of_nonpos_of_le (by linarith) ?_
simpa only [sub_le_sub_iff_right] using f.mono.leftLim_le (le_refl y)
-- prove the result by splitting into left and right limits.
rw [hasDerivAt_iff_tendsto_slope, slope_fun_def_field, â nhds_left'_sup_nhds_right', tendsto_sup]
exact âšL4, L1â©
/-- A monotone function is almost everywhere differentiable, with derivative equal to the
Radon-Nikodym derivative of the associated Stieltjes measure with respect to Lebesgue. -/
theorem Monotone.ae_hasDerivAt {f : â â â} (hf : Monotone f) :
âáµ x, HasDerivAt f (rnDeriv hf.stieltjesFunction.measure volume x).toReal x := by
/- We already know that the Stieltjes function associated to `f` (i.e., `g : x ⊠f (x^+)`) is
differentiable almost everywhere. We reduce to this statement by sandwiching values of `f` with
values of `g`, by shifting with `(y - x)^2` (which has no influence on the relevant
scale `y - x`.)-/
filter_upwards [hf.stieltjesFunction.ae_hasDerivAt,
hf.countable_not_continuousAt.ae_not_mem volume] with x hx h'x
have A : hf.stieltjesFunction x = f x := by
rw [Classical.not_not, hf.continuousAt_iff_leftLim_eq_rightLim] at h'x
apply le_antisymm _ (hf.le_rightLim (le_refl _))
rw [â h'x]
exact hf.leftLim_le (le_refl _)
rw [hasDerivAt_iff_tendsto_slope, (nhds_left'_sup_nhds_right' x).symm, tendsto_sup,
slope_fun_def_field, A] at hx
-- prove differentiability on the right, by sandwiching with values of `g`
have L1 : Tendsto (fun y => (f y - f x) / (y - x)) (ð[>] x)
(ð (rnDeriv hf.stieltjesFunction.measure volume x).toReal) := by
-- limit of a helper function, with a small shift compared to `g`
have : Tendsto (fun y => (hf.stieltjesFunction (y + -1 * (y - x) ^ 2) - f x) / (y - x)) (ð[>] x)
(ð (rnDeriv hf.stieltjesFunction.measure volume x).toReal) := by
apply tendsto_apply_add_mul_sq_div_sub (nhds_right'_le_nhds_ne x) hx.2
apply tendsto_nhdsWithin_of_tendsto_nhds_of_eventually_within
· apply Tendsto.mono_left _ nhdsWithin_le_nhds
have : Tendsto (fun y : â => y + -â1 * (y - x) ^ 2) (ð x) (ð (x + -â1 * (x - x) ^ 2)) :=
tendsto_id.add (((tendsto_id.sub_const x).pow 2).const_mul (-1))
simpa using this
· have : Ioo x (x + 1) â ð[>] x := by
apply Ioo_mem_nhdsWithin_Ioi; exact âšle_refl _, by linarithâ©
filter_upwards [this]
rintro y âšhy : x < y, h'y : y < x + 1â©
rw [mem_Ioi]
norm_num; nlinarith
-- apply the sandwiching argument, with the helper function and `g`
apply tendsto_of_tendsto_of_tendsto_of_le_of_le' this hx.2
· filter_upwards [self_mem_nhdsWithin] with y hy
rw [mem_Ioi, â sub_pos] at hy
gcongr
exact hf.rightLim_le (by nlinarith)
· filter_upwards [self_mem_nhdsWithin] with y hy
rw [mem_Ioi, â sub_pos] at hy
gcongr
exact hf.le_rightLim le_rfl
-- prove differentiability on the left, by sandwiching with values of `g`
have L2 : Tendsto (fun y => (f y - f x) / (y - x)) (ð[<] x)
(ð (rnDeriv hf.stieltjesFunction.measure volume x).toReal) := by
-- limit of a helper function, with a small shift compared to `g`
have : Tendsto (fun y => (hf.stieltjesFunction (y + -1 * (y - x) ^ 2) - f x) / (y - x)) (ð[<] x)
(ð (rnDeriv hf.stieltjesFunction.measure volume x).toReal) := by
apply tendsto_apply_add_mul_sq_div_sub (nhds_left'_le_nhds_ne x) hx.1
apply tendsto_nhdsWithin_of_tendsto_nhds_of_eventually_within
· apply Tendsto.mono_left _ nhdsWithin_le_nhds
have : Tendsto (fun y : â => y + -â1 * (y - x) ^ 2) (ð x) (ð (x + -â1 * (x - x) ^ 2)) :=
tendsto_id.add (((tendsto_id.sub_const x).pow 2).const_mul (-1))
simpa using this
· have : Ioo (x - 1) x â ð[<] x := by
apply Ioo_mem_nhdsWithin_Iio; exact âšby linarith, le_refl _â©
filter_upwards [this]
rintro y hy
rw [mem_Ioo] at hy
rw [mem_Iio]
norm_num; nlinarith
-- apply the sandwiching argument, with `g` and the helper function
apply tendsto_of_tendsto_of_tendsto_of_le_of_le' hx.1 this
· filter_upwards [self_mem_nhdsWithin]
rintro y hy
rw [mem_Iio, â sub_neg] at hy
apply div_le_div_of_nonpos_of_le hy.le
exact (sub_le_sub_iff_right _).2 (hf.le_rightLim (le_refl _))
· filter_upwards [self_mem_nhdsWithin]
rintro y hy
rw [mem_Iio, â sub_neg] at hy
have : 0 < (y - x) ^ 2 := sq_pos_of_neg hy
apply div_le_div_of_nonpos_of_le hy.le
exact (sub_le_sub_iff_right _).2 (hf.rightLim_le (by norm_num; linarith))
-- conclude global differentiability
rw [hasDerivAt_iff_tendsto_slope, slope_fun_def_field, (nhds_left'_sup_nhds_right' x).symm,
tendsto_sup]
exact âšL2, L1â©
/-- A monotone real function is differentiable Lebesgue-almost everywhere. -/
theorem Monotone.ae_differentiableAt {f : â â â} (hf : Monotone f) :
âáµ x, DifferentiableAt â f x := by
filter_upwards [hf.ae_hasDerivAt] with x hx using hx.differentiableAt
/-- A real function which is monotone on a set is differentiable Lebesgue-almost everywhere on
this set. This version does not assume that `s` is measurable. For a formulation with
`volume.restrict s` assuming that `s` is measurable, see `MonotoneOn.ae_differentiableWithinAt`.
-/
theorem MonotoneOn.ae_differentiableWithinAt_of_mem {f : â â â} {s : Set â} (hf : MonotoneOn f s) :
âáµ x, x â s â DifferentiableWithinAt â f s x := by
/- We use a global monotone extension of `f`, and argue that this extension is differentiable
almost everywhere. Such an extension need not exist (think of `1/x` on `(0, +â)`), but it exists
if one restricts first the function to a compact interval `[a, b]`. -/
apply ae_of_mem_of_ae_of_mem_inter_Ioo
intro a b as bs _
obtain âšg, hg, gfâ© : â g : â â â, Monotone g â§ EqOn f g (s â© Icc a b) :=
(hf.mono inter_subset_left).exists_monotone_extension
(hf.map_bddBelow inter_subset_left âša, fun x hx => hx.2.1, asâ©)
(hf.map_bddAbove inter_subset_left âšb, fun x hx => hx.2.2, bsâ©)
filter_upwards [hg.ae_differentiableAt] with x hx
intro h'x
apply hx.differentiableWithinAt.congr_of_eventuallyEq _ (gf âšh'x.1, h'x.2.1.le, h'x.2.2.leâ©)
have : Ioo a b â ð[s] x := nhdsWithin_le_nhds (Ioo_mem_nhds h'x.2.1 h'x.2.2)
filter_upwards [self_mem_nhdsWithin, this] with y hy h'y
exact gf âšhy, h'y.1.le, h'y.2.leâ©
/-- A real function which is monotone on a set is differentiable Lebesgue-almost everywhere on
this set. This version assumes that `s` is measurable and uses `volume.restrict s`.
For a formulation without measurability assumption,
see `MonotoneOn.ae_differentiableWithinAt_of_mem`. -/
theorem MonotoneOn.ae_differentiableWithinAt {f : â â â} {s : Set â} (hf : MonotoneOn f s)
(hs : MeasurableSet s) : âáµ x âvolume.restrict s, DifferentiableWithinAt â f s x := by
rw [ae_restrict_iff' hs]
exact hf.ae_differentiableWithinAt_of_mem
|
Analysis\Calculus\ParametricIntegral.lean | /-
Copyright (c) 2021 Patrick Massot. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Patrick Massot
-/
import Mathlib.Analysis.Calculus.MeanValue
import Mathlib.MeasureTheory.Integral.DominatedConvergence
import Mathlib.MeasureTheory.Integral.SetIntegral
import Mathlib.Analysis.NormedSpace.HahnBanach.SeparatingDual
/-!
# Derivatives of integrals depending on parameters
A parametric integral is a function with shape `f = fun x : H ⊠⫠a : α, F x a âÎŒ` for some
`F : H â α â E`, where `H` and `E` are normed spaces and `α` is a measured space with measure `ÎŒ`.
We already know from `continuous_of_dominated` in `Mathlib/MeasureTheory/Integral/Bochner.lean` how
to guarantee that `f` is continuous using the dominated convergence theorem. In this file,
we want to express the derivative of `f` as the integral of the derivative of `F` with respect
to `x`.
## Main results
As explained above, all results express the derivative of a parametric integral as the integral of
a derivative. The variations come from the assumptions and from the different ways of expressing
derivative, especially Fréchet derivatives vs elementary derivative of function of one real
variable.
* `hasFDerivAt_integral_of_dominated_loc_of_lip`: this version assumes that
- `F x` is ae-measurable for x near `xâ`,
- `F xâ` is integrable,
- `fun x ⊠F x a` has derivative `F' a : H âL[â] E` at `xâ` which is ae-measurable,
- `fun x ⊠F x a` is locally Lipschitz near `xâ` for almost every `a`,
with a Lipschitz bound which is integrable with respect to `a`.
A subtle point is that the "near xâ" in the last condition has to be uniform in `a`. This is
controlled by a positive number `ε`.
* `hasFDerivAt_integral_of_dominated_of_fderiv_le`: this version assumes `fun x ⊠F x a` has
derivative `F' x a` for `x` near `xâ` and `F' x` is bounded by an integrable function independent
from `x` near `xâ`.
`hasDerivAt_integral_of_dominated_loc_of_lip` and
`hasDerivAt_integral_of_dominated_loc_of_deriv_le` are versions of the above two results that
assume `H = â` or `H = â` and use the high-school derivative `deriv` instead of Fréchet derivative
`fderiv`.
We also provide versions of these theorems for set integrals.
## Tags
integral, derivative
-/
noncomputable section
open TopologicalSpace MeasureTheory Filter Metric
open scoped Topology Filter
variable {α : Type*} [MeasurableSpace α] {ÎŒ : Measure α} {ð : Type*} [RCLike ð] {E : Type*}
[NormedAddCommGroup E] [NormedSpace â E] [NormedSpace ð E] {H : Type*}
[NormedAddCommGroup H] [NormedSpace ð H]
variable {F : H â α â E} {xâ : H} {bound : α â â} {ε : â}
/-- Differentiation under integral of `x ⊠⫠F x a` at a given point `xâ`, assuming `F xâ` is
integrable, `âF x a - F xâ aâ †bound a * âx - xââ` for `x` in a ball around `xâ` for ae `a` with
integrable Lipschitz bound `bound` (with a ball radius independent of `a`), and `F x` is
ae-measurable for `x` in the same ball. See `hasFDerivAt_integral_of_dominated_loc_of_lip` for a
slightly less general but usually more useful version. -/
theorem hasFDerivAt_integral_of_dominated_loc_of_lip' {F' : α â H âL[ð] E} (ε_pos : 0 < ε)
(hF_meas : â x â ball xâ ε, AEStronglyMeasurable (F x) ÎŒ) (hF_int : Integrable (F xâ) ÎŒ)
(hF'_meas : AEStronglyMeasurable F' Ό)
(h_lipsch : âáµ a âÎŒ, â x â ball xâ ε, âF x a - F xâ aâ †bound a * âx - xââ)
(bound_integrable : Integrable (bound : α â â) ÎŒ)
(h_diff : âáµ a âÎŒ, HasFDerivAt (F · a) (F' a) xâ) :
Integrable F' ÎŒ â§ HasFDerivAt (fun x ⊠⫠a, F x a âÎŒ) (â« a, F' a âÎŒ) xâ := by
have xâ_in : xâ â ball xâ ε := mem_ball_self ε_pos
have nneg : â x, 0 †âx - xâââ»Â¹ := fun x ⊠inv_nonneg.mpr (norm_nonneg _)
set b : α â â := fun a ⊠|bound a|
have b_int : Integrable b Ό := bound_integrable.norm
have b_nonneg : â a, 0 †b a := fun a ⊠abs_nonneg _
replace h_lipsch : âáµ a âÎŒ, â x â ball xâ ε, âF x a - F xâ aâ †b a * âx - xââ :=
h_lipsch.mono fun a ha x hx âŠ
(ha x hx).trans <| mul_le_mul_of_nonneg_right (le_abs_self _) (norm_nonneg _)
have hF_int' : â x â ball xâ ε, Integrable (F x) ÎŒ := fun x x_in ⊠by
have : âáµ a âÎŒ, âF xâ a - F x aâ †ε * b a := by
simp only [norm_sub_rev (F xâ _)]
refine h_lipsch.mono fun a ha ⊠(ha x x_in).trans ?_
rw [mul_comm ε]
rw [mem_ball, dist_eq_norm] at x_in
exact mul_le_mul_of_nonneg_left x_in.le (b_nonneg _)
exact integrable_of_norm_sub_le (hF_meas x x_in) hF_int
(bound_integrable.norm.const_mul ε) this
have hF'_int : Integrable F' Ό :=
have : âáµ a âÎŒ, âF' aâ †b a := by
apply (h_diff.and h_lipsch).mono
rintro a âšha_diff, ha_lipâ©
exact ha_diff.le_of_lip' (b_nonneg a) (mem_of_superset (ball_mem_nhds _ ε_pos) <| ha_lip)
b_int.mono' hF'_meas this
refine âšhF'_int, ?_â©
/- Discard the trivial case where `E` is not complete, as all integrals vanish. -/
by_cases hE : CompleteSpace E; swap
· rcases subsingleton_or_nontrivial H with hH|hH
· have : Subsingleton (H âL[ð] E) := inferInstance
convert hasFDerivAt_of_subsingleton _ xâ
· have : ¬(CompleteSpace (H âL[ð] E)) := by
simpa [SeparatingDual.completeSpace_continuousLinearMap_iff] using hE
simp only [integral, hE, âreduceDIte, this]
exact hasFDerivAt_const 0 xâ
have h_ball : ball xâ ε â ð xâ := ball_mem_nhds xâ ε_pos
have : âá¶ x in ð xâ, âx - xâââ»Â¹ * â((â« a, F x a âÎŒ) - â« a, F xâ a âÎŒ) - (â« a, F' a âÎŒ) (x - xâ)â =
ââ« a, âx - xâââ»Â¹ ⢠(F x a - F xâ a - F' a (x - xâ)) âÎŒâ := by
apply mem_of_superset (ball_mem_nhds _ ε_pos)
intro x x_in; simp only
rw [Set.mem_setOf_eq, â norm_smul_of_nonneg (nneg _), integral_smul, integral_sub, integral_sub,
â ContinuousLinearMap.integral_apply hF'_int]
exacts [hF_int' x x_in, hF_int, (hF_int' x x_in).sub hF_int,
hF'_int.apply_continuousLinearMap _]
rw [hasFDerivAt_iff_tendsto, tendsto_congr' this, â tendsto_zero_iff_norm_tendsto_zero, â
show (â« a : α, âxâ - xâââ»Â¹ ⢠(F xâ a - F xâ a - (F' a) (xâ - xâ)) âÎŒ) = 0 by simp]
apply tendsto_integral_filter_of_dominated_convergence
· filter_upwards [h_ball] with _ x_in
apply AEStronglyMeasurable.const_smul
exact ((hF_meas _ x_in).sub (hF_meas _ xâ_in)).sub (hF'_meas.apply_continuousLinearMap _)
· refine mem_of_superset h_ball fun x hx ⊠?_
apply (h_diff.and h_lipsch).mono
on_goal 1 => rintro a âš-, ha_boundâ©
show ââx - xâââ»Â¹ ⢠(F x a - F xâ a - F' a (x - xâ))â †b a + âF' aâ
replace ha_bound : âF x a - F xâ aâ †b a * âx - xââ := ha_bound x hx
calc
ââx - xâââ»Â¹ ⢠(F x a - F xâ a - F' a (x - xâ))â =
ââx - xâââ»Â¹ ⢠(F x a - F xâ a) - âx - xâââ»Â¹ ⢠F' a (x - xâ)â := by rw [smul_sub]
_ †ââx - xâââ»Â¹ ⢠(F x a - F xâ a)â + ââx - xâââ»Â¹ ⢠F' a (x - xâ)â := norm_sub_le _ _
_ = âx - xâââ»Â¹ * âF x a - F xâ aâ + âx - xâââ»Â¹ * âF' a (x - xâ)â := by
rw [norm_smul_of_nonneg, norm_smul_of_nonneg] <;> exact nneg _
_ †âx - xâââ»Â¹ * (b a * âx - xââ) + âx - xâââ»Â¹ * (âF' aâ * âx - xââ) := by
gcongr; exact (F' a).le_opNorm _
_ †b a + âF' aâ := ?_
simp only [â div_eq_inv_mul]
apply_rules [add_le_add, div_le_of_nonneg_of_le_mul] <;> first | rfl | positivity
· exact b_int.add hF'_int.norm
· apply h_diff.mono
intro a ha
suffices Tendsto (fun x ⊠âx - xâââ»Â¹ ⢠(F x a - F xâ a - F' a (x - xâ))) (ð xâ) (ð 0) by simpa
rw [tendsto_zero_iff_norm_tendsto_zero]
have : (fun x ⊠âx - xâââ»Â¹ * âF x a - F xâ a - F' a (x - xâ)â) = fun x âŠ
ââx - xâââ»Â¹ ⢠(F x a - F xâ a - F' a (x - xâ))â := by
ext x
rw [norm_smul_of_nonneg (nneg _)]
rwa [hasFDerivAt_iff_tendsto, this] at ha
/-- Differentiation under integral of `x ⊠⫠F x a` at a given point `xâ`, assuming
`F xâ` is integrable, `x ⊠F x a` is locally Lipschitz on a ball around `xâ` for ae `a`
(with a ball radius independent of `a`) with integrable Lipschitz bound, and `F x` is ae-measurable
for `x` in a possibly smaller neighborhood of `xâ`. -/
theorem hasFDerivAt_integral_of_dominated_loc_of_lip {F' : α â H âL[ð] E}
(ε_pos : 0 < ε) (hF_meas : âá¶ x in ð xâ, AEStronglyMeasurable (F x) ÎŒ)
(hF_int : Integrable (F xâ) ÎŒ) (hF'_meas : AEStronglyMeasurable F' ÎŒ)
(h_lip : âáµ a âÎŒ, LipschitzOnWith (Real.nnabs <| bound a) (F · a) (ball xâ ε))
(bound_integrable : Integrable (bound : α â â) ÎŒ)
(h_diff : âáµ a âÎŒ, HasFDerivAt (F · a) (F' a) xâ) :
Integrable F' ÎŒ â§ HasFDerivAt (fun x ⊠⫠a, F x a âÎŒ) (â« a, F' a âÎŒ) xâ := by
obtain âšÎŽ, ÎŽ_pos, hÎŽâ© : â ÎŽ > 0, â x â ball xâ ÎŽ, AEStronglyMeasurable (F x) ÎŒ â§ x â ball xâ ε :=
eventually_nhds_iff_ball.mp (hF_meas.and (ball_mem_nhds xâ ε_pos))
choose hΎ_meas hΎε using hΎ
replace h_lip : âáµ a : α âÎŒ, â x â ball xâ ÎŽ, âF x a - F xâ aâ †|bound a| * âx - xââ :=
h_lip.mono fun a lip x hx ⊠lip.norm_sub_le (hΎε x hx) (mem_ball_self ε_pos)
replace bound_integrable := bound_integrable.norm
apply hasFDerivAt_integral_of_dominated_loc_of_lip' ÎŽ_pos <;> assumption
/-- Differentiation under integral of `x ⊠⫠x in a..b, F x t` at a given point `xâ â (a,b)`,
assuming `F xâ` is integrable on `(a,b)`, that `x ⊠F x t` is Lipschitz on a ball around `xâ`
for almost every `t` (with a ball radius independent of `t`) with integrable Lipschitz bound,
and `F x` is a.e.-measurable for `x` in a possibly smaller neighborhood of `xâ`. -/
theorem hasFDerivAt_integral_of_dominated_loc_of_lip_interval [NormedSpace â H] {ÎŒ : Measure â}
{F : H â â â E} {F' : â â H âL[â] E} {a b : â} {bound : â â â} (ε_pos : 0 < ε)
(hF_meas : âá¶ x in ð xâ, AEStronglyMeasurable (F x) <| ÎŒ.restrict (Î a b))
(hF_int : IntervalIntegrable (F xâ) ÎŒ a b)
(hF'_meas : AEStronglyMeasurable F' <| Ό.restrict (Πa b))
(h_lip : âáµ t âÎŒ.restrict (Î a b),
LipschitzOnWith (Real.nnabs <| bound t) (F · t) (ball xâ ε))
(bound_integrable : IntervalIntegrable bound Ό a b)
(h_diff : âáµ t âÎŒ.restrict (Î a b), HasFDerivAt (F · t) (F' t) xâ) :
IntervalIntegrable F' ÎŒ a b â§
HasFDerivAt (fun x ⊠⫠t in a..b, F x t âÎŒ) (â« t in a..b, F' t âÎŒ) xâ := by
simp_rw [AEStronglyMeasurable.aestronglyMeasurable_uIoc_iff, eventually_and] at hF_meas hF'_meas
rw [ae_restrict_uIoc_iff] at h_lip h_diff
have Hâ :=
hasFDerivAt_integral_of_dominated_loc_of_lip ε_pos hF_meas.1 hF_int.1 hF'_meas.1 h_lip.1
bound_integrable.1 h_diff.1
have Hâ :=
hasFDerivAt_integral_of_dominated_loc_of_lip ε_pos hF_meas.2 hF_int.2 hF'_meas.2 h_lip.2
bound_integrable.2 h_diff.2
exact âšâšHâ.1, Hâ.1â©, Hâ.2.sub Hâ.2â©
/-- Differentiation under integral of `x ⊠⫠F x a` at a given point `xâ`, assuming
`F xâ` is integrable, `x ⊠F x a` is differentiable on a ball around `xâ` for ae `a` with
derivative norm uniformly bounded by an integrable function (the ball radius is independent of `a`),
and `F x` is ae-measurable for `x` in a possibly smaller neighborhood of `xâ`. -/
theorem hasFDerivAt_integral_of_dominated_of_fderiv_le {F' : H â α â H âL[ð] E} (ε_pos : 0 < ε)
(hF_meas : âá¶ x in ð xâ, AEStronglyMeasurable (F x) ÎŒ) (hF_int : Integrable (F xâ) ÎŒ)
(hF'_meas : AEStronglyMeasurable (F' xâ) ÎŒ)
(h_bound : âáµ a âÎŒ, â x â ball xâ ε, âF' x aâ †bound a)
(bound_integrable : Integrable (bound : α â â) ÎŒ)
(h_diff : âáµ a âÎŒ, â x â ball xâ ε, HasFDerivAt (F · a) (F' x a) x) :
HasFDerivAt (fun x ⊠⫠a, F x a âÎŒ) (â« a, F' xâ a âÎŒ) xâ := by
letI : NormedSpace â H := NormedSpace.restrictScalars â ð H
have xâ_in : xâ â ball xâ ε := mem_ball_self ε_pos
have diff_xâ : âáµ a âÎŒ, HasFDerivAt (F · a) (F' xâ a) xâ :=
h_diff.mono fun a ha ⊠ha xâ xâ_in
have : âáµ a âÎŒ, LipschitzOnWith (Real.nnabs (bound a)) (F · a) (ball xâ ε) := by
apply (h_diff.and h_bound).mono
rintro a âšha_deriv, ha_boundâ©
refine (convex_ball _ _).lipschitzOnWith_of_nnnorm_hasFDerivWithin_le
(fun x x_in ⊠(ha_deriv x x_in).hasFDerivWithinAt) fun x x_in ⊠?_
rw [â NNReal.coe_le_coe, coe_nnnorm, Real.coe_nnabs]
exact (ha_bound x x_in).trans (le_abs_self _)
exact (hasFDerivAt_integral_of_dominated_loc_of_lip ε_pos hF_meas hF_int hF'_meas this
bound_integrable diff_xâ).2
/-- Differentiation under integral of `x ⊠⫠x in a..b, F x a` at a given point `xâ`, assuming
`F xâ` is integrable on `(a,b)`, `x ⊠F x a` is differentiable on a ball around `xâ` for ae `a` with
derivative norm uniformly bounded by an integrable function (the ball radius is independent of `a`),
and `F x` is ae-measurable for `x` in a possibly smaller neighborhood of `xâ`. -/
theorem hasFDerivAt_integral_of_dominated_of_fderiv_le'' [NormedSpace â H] {ÎŒ : Measure â}
{F : H â â â E} {F' : H â â â H âL[â] E} {a b : â} {bound : â â â} (ε_pos : 0 < ε)
(hF_meas : âá¶ x in ð xâ, AEStronglyMeasurable (F x) <| ÎŒ.restrict (Î a b))
(hF_int : IntervalIntegrable (F xâ) ÎŒ a b)
(hF'_meas : AEStronglyMeasurable (F' xâ) <| ÎŒ.restrict (Î a b))
(h_bound : âáµ t âÎŒ.restrict (Î a b), â x â ball xâ ε, âF' x tâ †bound t)
(bound_integrable : IntervalIntegrable bound Ό a b)
(h_diff : âáµ t âÎŒ.restrict (Î a b), â x â ball xâ ε, HasFDerivAt (F · t) (F' x t) x) :
HasFDerivAt (fun x ⊠⫠t in a..b, F x t âÎŒ) (â« t in a..b, F' xâ t âÎŒ) xâ := by
rw [ae_restrict_uIoc_iff] at h_diff h_bound
simp_rw [AEStronglyMeasurable.aestronglyMeasurable_uIoc_iff, eventually_and] at hF_meas hF'_meas
exact
(hasFDerivAt_integral_of_dominated_of_fderiv_le ε_pos hF_meas.1 hF_int.1 hF'_meas.1 h_bound.1
bound_integrable.1 h_diff.1).sub
(hasFDerivAt_integral_of_dominated_of_fderiv_le ε_pos hF_meas.2 hF_int.2 hF'_meas.2 h_bound.2
bound_integrable.2 h_diff.2)
section
variable {F : ð â α â E} {xâ : ð}
/-- Derivative under integral of `x ⊠⫠F x a` at a given point `xâ : ð`, `ð = â` or `ð = â`,
assuming `F xâ` is integrable, `x ⊠F x a` is locally Lipschitz on a ball around `xâ` for ae `a`
(with ball radius independent of `a`) with integrable Lipschitz bound, and `F x` is
ae-measurable for `x` in a possibly smaller neighborhood of `xâ`. -/
theorem hasDerivAt_integral_of_dominated_loc_of_lip {F' : α â E} (ε_pos : 0 < ε)
(hF_meas : âá¶ x in ð xâ, AEStronglyMeasurable (F x) ÎŒ) (hF_int : Integrable (F xâ) ÎŒ)
(hF'_meas : AEStronglyMeasurable F' Ό)
(h_lipsch : âáµ a âÎŒ, LipschitzOnWith (Real.nnabs <| bound a) (F · a) (ball xâ ε))
(bound_integrable : Integrable (bound : α â â) ÎŒ)
(h_diff : âáµ a âÎŒ, HasDerivAt (F · a) (F' a) xâ) :
Integrable F' ÎŒ â§ HasDerivAt (fun x ⊠⫠a, F x a âÎŒ) (â« a, F' a âÎŒ) xâ := by
set L : E âL[ð] ð âL[ð] E := ContinuousLinearMap.smulRightL ð ð E 1
replace h_diff : âáµ a âÎŒ, HasFDerivAt (F · a) (L (F' a)) xâ :=
h_diff.mono fun x hx ⊠hx.hasFDerivAt
have hm : AEStronglyMeasurable (L â F') ÎŒ := L.continuous.comp_aestronglyMeasurable hF'_meas
cases'
hasFDerivAt_integral_of_dominated_loc_of_lip ε_pos hF_meas hF_int hm h_lipsch bound_integrable
h_diff with
hF'_int key
replace hF'_int : Integrable F' Ό := by
rw [â integrable_norm_iff hm] at hF'_int
simpa only [L, (· â ·), integrable_norm_iff, hF'_meas, one_mul, norm_one,
ContinuousLinearMap.comp_apply, ContinuousLinearMap.coe_restrict_scalarsL',
ContinuousLinearMap.norm_restrictScalars, ContinuousLinearMap.norm_smulRightL_apply] using
hF'_int
refine âšhF'_int, ?_â©
by_cases hE : CompleteSpace E; swap
· simpa [integral, hE] using hasDerivAt_const xâ 0
simp_rw [hasDerivAt_iff_hasFDerivAt] at h_diff â¢
simpa only [(· â ·), ContinuousLinearMap.integral_comp_comm _ hF'_int] using key
/-- Derivative under integral of `x ⊠⫠F x a` at a given point `xâ : â`, assuming
`F xâ` is integrable, `x ⊠F x a` is differentiable on an interval around `xâ` for ae `a`
(with interval radius independent of `a`) with derivative uniformly bounded by an integrable
function, and `F x` is ae-measurable for `x` in a possibly smaller neighborhood of `xâ`. -/
theorem hasDerivAt_integral_of_dominated_loc_of_deriv_le (ε_pos : 0 < ε)
(hF_meas : âá¶ x in ð xâ, AEStronglyMeasurable (F x) ÎŒ) (hF_int : Integrable (F xâ) ÎŒ)
{F' : ð â α â E} (hF'_meas : AEStronglyMeasurable (F' xâ) ÎŒ)
(h_bound : âáµ a âÎŒ, â x â ball xâ ε, âF' x aâ †bound a) (bound_integrable : Integrable bound ÎŒ)
(h_diff : âáµ a âÎŒ, â x â ball xâ ε, HasDerivAt (F · a) (F' x a) x) :
Integrable (F' xâ) ÎŒ â§ HasDerivAt (fun n ⊠⫠a, F n a âÎŒ) (â« a, F' xâ a âÎŒ) xâ := by
have xâ_in : xâ â ball xâ ε := mem_ball_self ε_pos
have diff_xâ : âáµ a âÎŒ, HasDerivAt (F · a) (F' xâ a) xâ :=
h_diff.mono fun a ha ⊠ha xâ xâ_in
have : âáµ a âÎŒ, LipschitzOnWith (Real.nnabs (bound a)) (fun x : ð ⊠F x a) (ball xâ ε) := by
apply (h_diff.and h_bound).mono
rintro a âšha_deriv, ha_boundâ©
refine (convex_ball _ _).lipschitzOnWith_of_nnnorm_hasDerivWithin_le
(fun x x_in ⊠(ha_deriv x x_in).hasDerivWithinAt) fun x x_in ⊠?_
rw [â NNReal.coe_le_coe, coe_nnnorm, Real.coe_nnabs]
exact (ha_bound x x_in).trans (le_abs_self _)
exact
hasDerivAt_integral_of_dominated_loc_of_lip ε_pos hF_meas hF_int hF'_meas this bound_integrable
diff_xâ
end
|
Analysis\Calculus\ParametricIntervalIntegral.lean | /-
Copyright (c) 2021 Yury Kudryashov. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yury Kudryashov
-/
import Mathlib.Analysis.Calculus.ParametricIntegral
import Mathlib.MeasureTheory.Integral.IntervalIntegral
/-!
# Derivatives of interval integrals depending on parameters
In this file we restate theorems about derivatives of integrals depending on parameters for interval
integrals. -/
open TopologicalSpace MeasureTheory Filter Metric
open scoped Topology Filter Interval
variable {ð : Type*} [RCLike ð] {ÎŒ : Measure â} {E : Type*} [NormedAddCommGroup E]
[NormedSpace â E] [NormedSpace ð E] [CompleteSpace E] {H : Type*} [NormedAddCommGroup H]
[NormedSpace ð H] {a b ε : â} {bound : â â â}
namespace intervalIntegral
/-- Differentiation under integral of `x ⊠⫠t in a..b, F x t` at a given point `xâ`, assuming
`F xâ` is integrable, `x ⊠F x a` is locally Lipschitz on a ball around `xâ` for ae `a`
(with a ball radius independent of `a`) with integrable Lipschitz bound, and `F x` is ae-measurable
for `x` in a possibly smaller neighborhood of `xâ`. -/
nonrec theorem hasFDerivAt_integral_of_dominated_loc_of_lip
{F : H â â â E} {F' : â â H âL[ð] E} {xâ : H}
(ε_pos : 0 < ε) (hF_meas : âá¶ x in ð xâ, AEStronglyMeasurable (F x) (ÎŒ.restrict (Î a b)))
(hF_int : IntervalIntegrable (F xâ) ÎŒ a b)
(hF'_meas : AEStronglyMeasurable F' (Ό.restrict (Πa b)))
(h_lip : âáµ t âÎŒ, t â Î a b â
LipschitzOnWith (Real.nnabs <| bound t) (fun x => F x t) (ball xâ ε))
(bound_integrable : IntervalIntegrable bound Ό a b)
(h_diff : âáµ t âÎŒ, t â Î a b â HasFDerivAt (fun x => F x t) (F' t) xâ) :
IntervalIntegrable F' ÎŒ a b â§
HasFDerivAt (fun x => â« t in a..b, F x t âÎŒ) (â« t in a..b, F' t âÎŒ) xâ := by
rw [â ae_restrict_iff' measurableSet_uIoc] at h_lip h_diff
simp only [intervalIntegrable_iff] at hF_int bound_integrable â¢
simp only [intervalIntegral_eq_integral_uIoc]
have := hasFDerivAt_integral_of_dominated_loc_of_lip ε_pos hF_meas hF_int hF'_meas h_lip
bound_integrable h_diff
exact âšthis.1, this.2.const_smul _â©
/-- Differentiation under integral of `x ⊠⫠F x a` at a given point `xâ`, assuming
`F xâ` is integrable, `x ⊠F x a` is differentiable on a ball around `xâ` for ae `a` with
derivative norm uniformly bounded by an integrable function (the ball radius is independent of `a`),
and `F x` is ae-measurable for `x` in a possibly smaller neighborhood of `xâ`. -/
nonrec theorem hasFDerivAt_integral_of_dominated_of_fderiv_le
{F : H â â â E} {F' : H â â â H âL[ð] E} {xâ : H} (ε_pos : 0 < ε)
(hF_meas : âá¶ x in ð xâ, AEStronglyMeasurable (F x) (ÎŒ.restrict (Î a b)))
(hF_int : IntervalIntegrable (F xâ) ÎŒ a b)
(hF'_meas : AEStronglyMeasurable (F' xâ) (ÎŒ.restrict (Î a b)))
(h_bound : âáµ t âÎŒ, t â Î a b â â x â ball xâ ε, âF' x tâ †bound t)
(bound_integrable : IntervalIntegrable bound Ό a b)
(h_diff : âáµ t âÎŒ, t â Î a b â â x â ball xâ ε, HasFDerivAt (fun x => F x t) (F' x t) x) :
HasFDerivAt (fun x => â« t in a..b, F x t âÎŒ) (â« t in a..b, F' xâ t âÎŒ) xâ := by
rw [â ae_restrict_iff' measurableSet_uIoc] at h_bound h_diff
simp only [intervalIntegrable_iff] at hF_int bound_integrable
simp only [intervalIntegral_eq_integral_uIoc]
exact (hasFDerivAt_integral_of_dominated_of_fderiv_le ε_pos hF_meas hF_int hF'_meas h_bound
bound_integrable h_diff).const_smul _
/-- Derivative under integral of `x ⊠⫠F x a` at a given point `xâ : ð`, `ð = â` or `ð = â`,
assuming `F xâ` is integrable, `x ⊠F x a` is locally Lipschitz on a ball around `xâ` for ae `a`
(with ball radius independent of `a`) with integrable Lipschitz bound, and `F x` is
ae-measurable for `x` in a possibly smaller neighborhood of `xâ`. -/
nonrec theorem hasDerivAt_integral_of_dominated_loc_of_lip {F : ð â â â E} {F' : â â E} {xâ : ð}
(ε_pos : 0 < ε) (hF_meas : âá¶ x in ð xâ, AEStronglyMeasurable (F x) (ÎŒ.restrict (Î a b)))
(hF_int : IntervalIntegrable (F xâ) ÎŒ a b)
(hF'_meas : AEStronglyMeasurable F' (Ό.restrict (Πa b)))
(h_lipsch : âáµ t âÎŒ, t â Î a b â
LipschitzOnWith (Real.nnabs <| bound t) (fun x => F x t) (ball xâ ε))
(bound_integrable : IntervalIntegrable (bound : â â â) ÎŒ a b)
(h_diff : âáµ t âÎŒ, t â Î a b â HasDerivAt (fun x => F x t) (F' t) xâ) :
IntervalIntegrable F' ÎŒ a b â§
HasDerivAt (fun x => â« t in a..b, F x t âÎŒ) (â« t in a..b, F' t âÎŒ) xâ := by
rw [â ae_restrict_iff' measurableSet_uIoc] at h_lipsch h_diff
simp only [intervalIntegrable_iff] at hF_int bound_integrable â¢
simp only [intervalIntegral_eq_integral_uIoc]
have := hasDerivAt_integral_of_dominated_loc_of_lip ε_pos hF_meas hF_int hF'_meas h_lipsch
bound_integrable h_diff
exact âšthis.1, this.2.const_smul _â©
/-- Derivative under integral of `x ⊠⫠F x a` at a given point `xâ : ð`, `ð = â` or `ð = â`,
assuming `F xâ` is integrable, `x ⊠F x a` is differentiable on an interval around `xâ` for ae `a`
(with interval radius independent of `a`) with derivative uniformly bounded by an integrable
function, and `F x` is ae-measurable for `x` in a possibly smaller neighborhood of `xâ`. -/
nonrec theorem hasDerivAt_integral_of_dominated_loc_of_deriv_le
{F : ð â â â E} {F' : ð â â â E} {xâ : ð}
(ε_pos : 0 < ε) (hF_meas : âá¶ x in ð xâ, AEStronglyMeasurable (F x) (ÎŒ.restrict (Î a b)))
(hF_int : IntervalIntegrable (F xâ) ÎŒ a b)
(hF'_meas : AEStronglyMeasurable (F' xâ) (ÎŒ.restrict (Î a b)))
(h_bound : âáµ t âÎŒ, t â Î a b â â x â ball xâ ε, âF' x tâ †bound t)
(bound_integrable : IntervalIntegrable bound Ό a b)
(h_diff : âáµ t âÎŒ, t â Î a b â â x â ball xâ ε, HasDerivAt (fun x => F x t) (F' x t) x) :
IntervalIntegrable (F' xâ) ÎŒ a b â§
HasDerivAt (fun x => â« t in a..b, F x t âÎŒ) (â« t in a..b, F' xâ t âÎŒ) xâ := by
rw [â ae_restrict_iff' measurableSet_uIoc] at h_bound h_diff
simp only [intervalIntegrable_iff] at hF_int bound_integrable â¢
simp only [intervalIntegral_eq_integral_uIoc]
have := hasDerivAt_integral_of_dominated_loc_of_deriv_le ε_pos hF_meas hF_int hF'_meas h_bound
bound_integrable h_diff
exact âšthis.1, this.2.const_smul _â©
end intervalIntegral
|
Analysis\Calculus\Rademacher.lean | /-
Copyright (c) 2023 Sébastien Gouëzel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Sébastien Gouëzel
-/
import Mathlib.Analysis.Calculus.LineDeriv.Measurable
import Mathlib.Analysis.Normed.Module.FiniteDimension
import Mathlib.MeasureTheory.Measure.Lebesgue.EqHaar
import Mathlib.Analysis.BoundedVariation
import Mathlib.MeasureTheory.Group.Integral
import Mathlib.Analysis.Distribution.AEEqOfIntegralContDiff
import Mathlib.MeasureTheory.Measure.Haar.Disintegration
/-!
# Rademacher's theorem: a Lipschitz function is differentiable almost everywhere
This file proves Rademacher's theorem: a Lipschitz function between finite-dimensional real vector
spaces is differentiable almost everywhere with respect to the Lebesgue measure. This is the content
of `LipschitzWith.ae_differentiableAt`. Versions for functions which are Lipschitz on sets are also
given (see `LipschitzOnWith.ae_differentiableWithinAt`).
## Implementation
There are many proofs of Rademacher's theorem. We follow the one by Morrey, which is not the most
elementary but maybe the most elegant once necessary prerequisites are set up.
* Step 0: without loss of generality, one may assume that `f` is real-valued.
* Step 1: Since a one-dimensional Lipschitz function has bounded variation, it is differentiable
almost everywhere. With a Fubini argument, it follows that given any vector `v` then `f` is ae
differentiable in the direction of `v`. See `LipschitzWith.ae_lineDifferentiableAt`.
* Step 2: the line derivative `LineDeriv â f x v` is ae linear in `v`. Morrey proves this by a
duality argument, integrating against a smooth compactly supported function `g`, passing the
derivative to `g` by integration by parts, and using the linearity of the derivative of `g`.
See `LipschitzWith.ae_lineDeriv_sum_eq`.
* Step 3: consider a countable dense set `s` of directions. Almost everywhere, the function `f`
is line-differentiable in all these directions and the line derivative is linear. Approximating
any direction by a direction in `s` and using the fact that `f` is Lipschitz to control the error,
it follows that `f` is Fréchet-differentiable at these points.
See `LipschitzWith.hasFderivAt_of_hasLineDerivAt_of_closure`.
## References
* [Pertti Mattila, Geometry of sets and measures in Euclidean spaces, Theorem 7.3][Federer1996]
-/
open Filter MeasureTheory Measure FiniteDimensional Metric Set Asymptotics
open scoped NNReal ENNReal Topology
variable {E : Type*} [NormedAddCommGroup E] [NormedSpace â E] [FiniteDimensional â E]
[MeasurableSpace E] [BorelSpace E]
{F : Type*} [NormedAddCommGroup F] [NormedSpace â F] {C D : ââ¥0} {f g : E â â} {s : Set E}
{Ό : Measure E} [IsAddHaarMeasure Ό]
namespace LipschitzWith
/-!
### Step 1: A Lipschitz function is ae differentiable in any given direction
This follows from the one-dimensional result that a Lipschitz function on `â` has bounded
variation, and is therefore ae differentiable, together with a Fubini argument.
-/
theorem ae_lineDifferentiableAt (hf : LipschitzWith C f) (v : E) :
âáµ p âÎŒ, LineDifferentiableAt â f p v := by
let L : â âL[â] E := ContinuousLinearMap.smulRight (1 : â âL[â] â) v
suffices A : â p, âáµ (t : â) âvolume, LineDifferentiableAt â f (p + t ⢠v) v from
ae_mem_of_ae_add_linearMap_mem L.toLinearMap volume Ό
(measurableSet_lineDifferentiableAt hf.continuous) A
intro p
have : âáµ (s : â), DifferentiableAt â (fun t ⊠f (p + t ⢠v)) s :=
(hf.comp ((LipschitzWith.const p).add L.lipschitz)).ae_differentiableAt_real
filter_upwards [this] with s hs
have h's : DifferentiableAt â (fun t ⊠f (p + t ⢠v)) (s + 0) := by simpa using hs
have : DifferentiableAt â (fun t ⊠s + t) 0 := differentiableAt_id.const_add _
simp only [LineDifferentiableAt]
convert h's.comp 0 this with _ t
simp only [LineDifferentiableAt, add_assoc, Function.comp_apply, add_smul]
theorem memâp_lineDeriv (hf : LipschitzWith C f) (v : E) :
Memâp (fun x ⊠lineDeriv â f x v) â ÎŒ :=
memâp_top_of_bound (aestronglyMeasurable_lineDeriv hf.continuous ÎŒ)
(C * âvâ) (eventually_of_forall (fun _x ⊠norm_lineDeriv_le_of_lipschitz â hf))
theorem locallyIntegrable_lineDeriv (hf : LipschitzWith C f) (v : E) :
LocallyIntegrable (fun x ⊠lineDeriv â f x v) ÎŒ :=
(hf.memâp_lineDeriv v).locallyIntegrable le_top
/-!
### Step 2: the ae line derivative is linear
Surprisingly, this is the hardest step. We prove it using an elegant but slightly sophisticated
argument by Morrey, with a distributional flavor: we integrate against a smooth function, and push
the derivative to the smooth function by integration by parts. As the derivative of a smooth
function is linear, this gives the result.
-/
theorem integral_inv_smul_sub_mul_tendsto_integral_lineDeriv_mul
(hf : LipschitzWith C f) (hg : Integrable g Ό) (v : E) :
Tendsto (fun (t : â) ⊠⫠x, (tâ»Â¹ ⢠(f (x + t ⢠v) - f x)) * g x âÎŒ) (ð[>] 0)
(ð (â« x, lineDeriv â f x v * g x âÎŒ)) := by
apply tendsto_integral_filter_of_dominated_convergence (fun x ⊠(C * âvâ) * âg xâ)
· filter_upwards with t
apply AEStronglyMeasurable.mul ?_ hg.aestronglyMeasurable
apply aestronglyMeasurable_const.smul
apply AEStronglyMeasurable.sub _ hf.continuous.measurable.aestronglyMeasurable
apply AEMeasurable.aestronglyMeasurable
exact hf.continuous.measurable.comp_aemeasurable' (aemeasurable_id'.add_const _)
· filter_upwards [self_mem_nhdsWithin] with t (ht : 0 < t)
filter_upwards with x
calc âtâ»Â¹ ⢠(f (x + t ⢠v) - f x) * g xâ
= (tâ»Â¹ * âf (x + t ⢠v) - f xâ) * âg xâ := by simp [norm_mul, ht.le]
_ †(tâ»Â¹ * (C * â(x + t ⢠v) - xâ)) * âg xâ := by
gcongr; exact LipschitzWith.norm_sub_le hf (x + t ⢠v) x
_ = (C * âvâ) *âg xâ := by field_simp [norm_smul, abs_of_nonneg ht.le]; ring
· exact hg.norm.const_mul _
· filter_upwards [hf.ae_lineDifferentiableAt v] with x hx
exact hx.hasLineDerivAt.tendsto_slope_zero_right.mul tendsto_const_nhds
theorem integral_inv_smul_sub_mul_tendsto_integral_lineDeriv_mul'
(hf : LipschitzWith C f) (h'f : HasCompactSupport f) (hg : Continuous g) (v : E) :
Tendsto (fun (t : â) ⊠⫠x, (tâ»Â¹ ⢠(f (x + t ⢠v) - f x)) * g x âÎŒ) (ð[>] 0)
(ð (â« x, lineDeriv â f x v * g x âÎŒ)) := by
let K := cthickening (âvâ) (tsupport f)
have K_compact : IsCompact K := IsCompact.cthickening h'f
apply tendsto_integral_filter_of_dominated_convergence
(K.indicator (fun x ⊠(C * âvâ) * âg xâ))
· filter_upwards with t
apply AEStronglyMeasurable.mul ?_ hg.aestronglyMeasurable
apply aestronglyMeasurable_const.smul
apply AEStronglyMeasurable.sub _ hf.continuous.measurable.aestronglyMeasurable
apply AEMeasurable.aestronglyMeasurable
exact hf.continuous.measurable.comp_aemeasurable' (aemeasurable_id'.add_const _)
· filter_upwards [Ioc_mem_nhdsWithin_Ioi' zero_lt_one] with t ht
have t_pos : 0 < t := ht.1
filter_upwards with x
by_cases hx : x â K
· calc âtâ»Â¹ ⢠(f (x + t ⢠v) - f x) * g xâ
= (tâ»Â¹ * âf (x + t ⢠v) - f xâ) * âg xâ := by simp [norm_mul, t_pos.le]
_ †(tâ»Â¹ * (C * â(x + t ⢠v) - xâ)) * âg xâ := by
gcongr; exact LipschitzWith.norm_sub_le hf (x + t ⢠v) x
_ = (C * âvâ) *âg xâ := by field_simp [norm_smul, abs_of_nonneg t_pos.le]; ring
_ = K.indicator (fun x ⊠(C * âvâ) * âg xâ) x := by rw [indicator_of_mem hx]
· have A : f x = 0 := by
rw [â Function.nmem_support]
contrapose! hx
exact self_subset_cthickening _ (subset_tsupport _ hx)
have B : f (x + t ⢠v) = 0 := by
rw [â Function.nmem_support]
contrapose! hx
apply mem_cthickening_of_dist_le _ _ (âvâ) (tsupport f) (subset_tsupport _ hx)
simp only [dist_eq_norm, sub_add_cancel_left, norm_neg, norm_smul, Real.norm_eq_abs,
abs_of_nonneg t_pos.le, norm_pos_iff]
exact mul_le_of_le_one_left (norm_nonneg v) ht.2
simp only [B, A, _root_.sub_self, smul_eq_mul, mul_zero, zero_mul, norm_zero]
exact indicator_nonneg (fun y _hy ⊠by positivity) _
· rw [integrable_indicator_iff K_compact.measurableSet]
apply ContinuousOn.integrableOn_compact K_compact
exact (Continuous.mul continuous_const hg.norm).continuousOn
· filter_upwards [hf.ae_lineDifferentiableAt v] with x hx
exact hx.hasLineDerivAt.tendsto_slope_zero_right.mul tendsto_const_nhds
/-- Integration by parts formula for the line derivative of Lipschitz functions, assuming one of
them is compactly supported. -/
theorem integral_lineDeriv_mul_eq
(hf : LipschitzWith C f) (hg : LipschitzWith D g) (h'g : HasCompactSupport g) (v : E) :
â« x, lineDeriv â f x v * g x âÎŒ = â« x, lineDeriv â g x (-v) * f x âÎŒ := by
/- Write down the line derivative as the limit of `(f (x + t v) - f x) / t` and
`(g (x - t v) - g x) / t`, and therefore the integrals as limits of the corresponding integrals
thanks to the dominated convergence theorem. At fixed positive `t`, the integrals coincide
(with the change of variables `y = x + t v`), so the limits also coincide. -/
have A : Tendsto (fun (t : â) ⊠⫠x, (tâ»Â¹ ⢠(f (x + t ⢠v) - f x)) * g x âÎŒ) (ð[>] 0)
(ð (â« x, lineDeriv â f x v * g x âÎŒ)) :=
integral_inv_smul_sub_mul_tendsto_integral_lineDeriv_mul
hf (hg.continuous.integrable_of_hasCompactSupport h'g) v
have B : Tendsto (fun (t : â) ⊠⫠x, (tâ»Â¹ ⢠(g (x + t ⢠(-v)) - g x)) * f x âÎŒ) (ð[>] 0)
(ð (â« x, lineDeriv â g x (-v) * f x âÎŒ)) :=
integral_inv_smul_sub_mul_tendsto_integral_lineDeriv_mul' hg h'g hf.continuous (-v)
suffices S1 : â (t : â), â« x, (tâ»Â¹ ⢠(f (x + t ⢠v) - f x)) * g x âÎŒ =
â« x, (tâ»Â¹ ⢠(g (x + t ⢠(-v)) - g x)) * f x âÎŒ by
simp only [S1] at A; exact tendsto_nhds_unique A B
intro t
suffices S2 : â« x, (f (x + t ⢠v) - f x) * g x âÎŒ = â« x, f x * (g (x + t ⢠(-v)) - g x) âÎŒ by
simp only [smul_eq_mul, mul_assoc, integral_mul_left, S2, mul_neg, mul_comm (f _)]
have S3 : â« x, f (x + t ⢠v) * g x âÎŒ = â« x, f x * g (x + t ⢠(-v)) âÎŒ := by
rw [â integral_add_right_eq_self _ (t ⢠(-v))]; simp
simp_rw [_root_.sub_mul, _root_.mul_sub]
rw [integral_sub, integral_sub, S3]
· apply Continuous.integrable_of_hasCompactSupport
· exact hf.continuous.mul (hg.continuous.comp (continuous_add_right _))
· exact (h'g.comp_homeomorph (Homeomorph.addRight (t ⢠(-v)))).mul_left
· exact (hf.continuous.mul hg.continuous).integrable_of_hasCompactSupport h'g.mul_left
· apply Continuous.integrable_of_hasCompactSupport
· exact (hf.continuous.comp (continuous_add_right _)).mul hg.continuous
· exact h'g.mul_left
· exact (hf.continuous.mul hg.continuous).integrable_of_hasCompactSupport h'g.mul_left
/-- The line derivative of a Lipschitz function is almost everywhere linear with respect to fixed
coefficients. -/
theorem ae_lineDeriv_sum_eq
(hf : LipschitzWith C f) {ι : Type*} (s : Finset ι) (a : ι â â) (v : ι â E) :
âáµ x âÎŒ, lineDeriv â f x (â i â s, a i ⢠v i) = â i â s, a i ⢠lineDeriv â f x (v i) := by
/- Clever argument by Morrey: integrate against a smooth compactly supported function `g`, switch
the derivative to `g` by integration by parts, and use the linearity of the derivative of `g` to
conclude that the initial integrals coincide. -/
apply ae_eq_of_integral_contDiff_smul_eq (hf.locallyIntegrable_lineDeriv _)
(locallyIntegrable_finset_sum _ (fun i hi ⊠(hf.locallyIntegrable_lineDeriv (v i)).smul (a i)))
(fun g g_smooth g_comp ⊠?_)
simp_rw [Finset.smul_sum]
have A : â i â s, Integrable (fun x ⊠g x ⢠(a i ⢠fun x ⊠lineDeriv â f x (v i)) x) ÎŒ :=
fun i hi ⊠(g_smooth.continuous.integrable_of_hasCompactSupport g_comp).smul_of_top_left
((hf.memâp_lineDeriv (v i)).const_smul (a i))
rw [integral_finset_sum _ A]
suffices S1 : â« x, lineDeriv â f x (â i â s, a i ⢠v i) * g x âÎŒ
= â i â s, a i * â« x, lineDeriv â f x (v i) * g x âÎŒ by
dsimp only [smul_eq_mul, Pi.smul_apply]
simp_rw [â mul_assoc, mul_comm _ (a _), mul_assoc, integral_mul_left, mul_comm (g _), S1]
suffices S2 : â« x, (â i â s, a i * fderiv â g x (v i)) * f x âÎŒ =
â i â s, a i * â« x, fderiv â g x (v i) * f x âÎŒ by
obtain âšD, g_lipâ© : â D, LipschitzWith D g :=
ContDiff.lipschitzWith_of_hasCompactSupport g_comp g_smooth le_top
simp_rw [integral_lineDeriv_mul_eq hf g_lip g_comp]
simp_rw [(g_smooth.differentiable le_top).differentiableAt.lineDeriv_eq_fderiv]
simp only [map_neg, _root_.map_sum, _root_.map_smul, smul_eq_mul, neg_mul]
simp only [integral_neg, mul_neg, Finset.sum_neg_distrib, neg_inj]
exact S2
suffices B : â i â s, Integrable (fun x ⊠a i * (fderiv â g x (v i) * f x)) ÎŒ by
simp_rw [Finset.sum_mul, mul_assoc, integral_finset_sum s B, integral_mul_left]
intro i _hi
let L : (E âL[â] â) â â := fun f ⊠f (v i)
change Integrable (fun x ⊠a i * ((L â (fderiv â g)) x * f x)) ÎŒ
refine (Continuous.integrable_of_hasCompactSupport ?_ ?_).const_mul _
· exact ((g_smooth.continuous_fderiv le_top).clm_apply continuous_const).mul hf.continuous
· exact ((g_comp.fderiv â).comp_left rfl).mul_right
/-!
### Step 3: construct the derivative using the line derivatives along a basis
-/
theorem ae_exists_fderiv_of_countable
(hf : LipschitzWith C f) {s : Set E} (hs : s.Countable) :
âáµ x âÎŒ, â (L : E âL[â] â), â v â s, HasLineDerivAt â f (L v) x v := by
have B := Basis.ofVectorSpace â E
have I1 : âáµ (x : E) âÎŒ, â v â s, lineDeriv â f x (â i, (B.repr v i) ⢠B i) =
â i, B.repr v i ⢠lineDeriv â f x (B i) :=
(ae_ball_iff hs).2 (fun v _ ⊠hf.ae_lineDeriv_sum_eq _ _ _)
have I2 : âáµ (x : E) âÎŒ, â v â s, LineDifferentiableAt â f x v :=
(ae_ball_iff hs).2 (fun v _ ⊠hf.ae_lineDifferentiableAt v)
filter_upwards [I1, I2] with x hx h'x
let L : E âL[â] â :=
LinearMap.toContinuousLinearMap (B.constr â (fun i ⊠lineDeriv â f x (B i)))
refine âšL, fun v hv ⊠?_â©
have J : L v = lineDeriv â f x v := by convert (hx v hv).symm <;> simp [L, B.sum_repr v]
simpa [J] using (h'x v hv).hasLineDerivAt
/-- If a Lipschitz functions has line derivatives in a dense set of directions, all of them given by
a single continuous linear map `L`, then it admits `L` as Fréchet derivative. -/
theorem hasFderivAt_of_hasLineDerivAt_of_closure {f : E â F}
(hf : LipschitzWith C f) {s : Set E} (hs : sphere 0 1 â closure s)
{L : E âL[â] F} {x : E} (hL : â v â s, HasLineDerivAt â f (L v) x v) :
HasFDerivAt f L x := by
rw [hasFDerivAt_iff_isLittleO_nhds_zero, isLittleO_iff]
intro ε εpos
obtain âšÎŽ, ÎŽpos, hÎŽâ© : â ÎŽ, 0 < ÎŽ â§ (C + âLâ + 1) * ÎŽ = ε :=
âšÎµ / (C + âLâ + 1), by positivity, mul_div_cancelâ ε (by positivity)â©
obtain âšq, hqs, q_fin, hqâ© : â q, q â s â§ q.Finite â§ sphere 0 1 â â y â q, ball y ÎŽ := by
have : sphere 0 1 â â y â s, ball y ÎŽ := by
apply hs.trans (fun z hz ⊠?_)
obtain âšy, ys, hyâ© : â y â s, dist z y < ÎŽ := Metric.mem_closure_iff.1 hz ÎŽ ÎŽpos
exact mem_biUnion ys hy
exact (isCompact_sphere 0 1).elim_finite_subcover_image (fun y _hy ⊠isOpen_ball) this
have I : âá¶ t in ð (0 : â), â v â q, âf (x + t ⢠v) - f x - t ⢠L vâ †Ύ * âtâ := by
apply (Finite.eventually_all q_fin).2 (fun v hv ⊠?_)
apply Asymptotics.IsLittleO.def ?_ ÎŽpos
exact hasLineDerivAt_iff_isLittleO_nhds_zero.1 (hL v (hqs hv))
obtain âšr, r_pos, hrâ© : â (r : â), 0 < r â§ â (t : â), âtâ < r â
â v â q, âf (x + t ⢠v) - f x - t ⢠L vâ †Ύ * âtâ := by
rcases Metric.mem_nhds_iff.1 I with âšr, r_pos, hrâ©
exact âšr, r_pos, fun t ht v hv ⊠hr (mem_ball_zero_iff.2 ht) v hvâ©
apply Metric.mem_nhds_iff.2 âšr, r_pos, fun v hv ⊠?_â©
rcases eq_or_ne v 0 with rfl|v_ne
· simp
obtain âšw, Ï, w_mem, hvw, hÏâ© : â w Ï, w â sphere 0 1 â§ v = Ï â¢ w â§ Ï = âvâ := by
refine âšâvââ»Â¹ ⢠v, âvâ, by simp [norm_smul, inv_mul_cancel (norm_ne_zero_iff.2 v_ne)], ?_, rflâ©
simp [smul_smul, mul_inv_cancel (norm_ne_zero_iff.2 v_ne)]
have norm_rho : âÏâ = Ï := by rw [hÏ, norm_norm]
have rho_pos : 0 â€ Ï := by simp [hÏ]
obtain âšy, yq, hyâ© : â y â q, âw - yâ < ÎŽ := by simpa [â dist_eq_norm] using hq w_mem
have : ây - wâ < ÎŽ := by rwa [norm_sub_rev]
calc âf (x + v) - f x - L vâ
= âf (x + Ï â¢ w) - f x - Ï â¢ L wâ := by simp [hvw]
_ = â(f (x + Ï â¢ w) - f (x + Ï â¢ y)) + (Ï â¢ L y - Ï â¢ L w)
+ (f (x + Ï â¢ y) - f x - Ï â¢ L y)â := by congr; abel
_ †âf (x + Ï â¢ w) - f (x + Ï â¢ y)â + âÏ â¢ L y - Ï â¢ L wâ
+ âf (x + Ï â¢ y) - f x - Ï â¢ L yâ := norm_addâ_le _ _ _
_ †C * â(x + Ï â¢ w) - (x + Ï â¢ y)â + Ï * (âLâ * ây - wâ) + ÎŽ * Ï := by
gcongr
· exact hf.norm_sub_le _ _
· rw [â smul_sub, norm_smul, norm_rho]
gcongr
exact L.lipschitz.norm_sub_le _ _
· conv_rhs => rw [â norm_rho]
apply hr _ _ _ yq
simpa [norm_rho, hÏ] using hv
_ †C * (Ï * ÎŽ) + Ï * (âLâ * ÎŽ) + ÎŽ * Ï := by
simp only [add_sub_add_left_eq_sub, â smul_sub, norm_smul, norm_rho]; gcongr
_ = ((C + âLâ + 1) * ÎŽ) * Ï := by ring
_ = ε * âvâ := by rw [hÎŽ, hÏ]
/-- A real-valued function on a finite-dimensional space which is Lipschitz is
differentiable almost everywere. Superseded by
`LipschitzWith.ae_differentiableAt` which works for functions taking value in any
finite-dimensional space. -/
theorem ae_differentiableAt_of_real (hf : LipschitzWith C f) :
âáµ x âÎŒ, DifferentiableAt â f x := by
obtain âšs, s_count, s_denseâ© : â (s : Set E), s.Countable â§ Dense s :=
TopologicalSpace.exists_countable_dense E
have hs : sphere 0 1 â closure s := by rw [s_dense.closure_eq]; exact subset_univ _
filter_upwards [hf.ae_exists_fderiv_of_countable s_count]
rintro x âšL, hLâ©
exact (hf.hasFderivAt_of_hasLineDerivAt_of_closure hs hL).differentiableAt
end LipschitzWith
variable [FiniteDimensional â F]
namespace LipschitzOnWith
/-- A real-valued function on a finite-dimensional space which is Lipschitz on a set is
differentiable almost everywere in this set. Superseded by
`LipschitzOnWith.ae_differentiableWithinAt_of_mem` which works for functions taking value in any
finite-dimensional space. -/
theorem ae_differentiableWithinAt_of_mem_of_real (hf : LipschitzOnWith C f s) :
âáµ x âÎŒ, x â s â DifferentiableWithinAt â f s x := by
obtain âšg, g_lip, hgâ© : â (g : E â â), LipschitzWith C g â§ EqOn f g s := hf.extend_real
filter_upwards [g_lip.ae_differentiableAt_of_real] with x hx xs
exact hx.differentiableWithinAt.congr hg (hg xs)
/-- A function on a finite-dimensional space which is Lipschitz on a set and taking values in a
product space is differentiable almost everywere in this set. Superseded by
`LipschitzOnWith.ae_differentiableWithinAt_of_mem` which works for functions taking value in any
finite-dimensional space. -/
theorem ae_differentiableWithinAt_of_mem_pi
{ι : Type*} [Fintype ι] {f : E â ι â â} {s : Set E}
(hf : LipschitzOnWith C f s) : âáµ x âÎŒ, x â s â DifferentiableWithinAt â f s x := by
have A : â i : ι, LipschitzWith 1 (fun x : ι â â ⊠x i) := fun i => LipschitzWith.eval i
have : â i : ι, âáµ x âÎŒ, x â s â DifferentiableWithinAt â (fun x : E ⊠f x i) s x := fun i ⊠by
apply ae_differentiableWithinAt_of_mem_of_real
exact LipschitzWith.comp_lipschitzOnWith (A i) hf
filter_upwards [ae_all_iff.2 this] with x hx xs
exact differentiableWithinAt_pi.2 (fun i ⊠hx i xs)
/-- *Rademacher's theorem*: a function between finite-dimensional real vector spaces which is
Lipschitz on a set is differentiable almost everywere in this set. -/
theorem ae_differentiableWithinAt_of_mem {f : E â F} (hf : LipschitzOnWith C f s) :
âáµ x âÎŒ, x â s â DifferentiableWithinAt â f s x := by
have A := (Basis.ofVectorSpace â F).equivFun.toContinuousLinearEquiv
suffices H : âáµ x âÎŒ, x â s â DifferentiableWithinAt â (A â f) s x by
filter_upwards [H] with x hx xs
have : f = (A.symm â A) â f := by
simp only [ContinuousLinearEquiv.symm_comp_self, Function.id_comp]
rw [this]
exact A.symm.differentiableAt.comp_differentiableWithinAt x (hx xs)
apply ae_differentiableWithinAt_of_mem_pi
exact A.lipschitz.comp_lipschitzOnWith hf
/-- *Rademacher's theorem*: a function between finite-dimensional real vector spaces which is
Lipschitz on a set is differentiable almost everywere in this set. -/
theorem ae_differentiableWithinAt {f : E â F} (hf : LipschitzOnWith C f s)
(hs : MeasurableSet s) :
âáµ x â(ÎŒ.restrict s), DifferentiableWithinAt â f s x := by
rw [ae_restrict_iff' hs]
exact hf.ae_differentiableWithinAt_of_mem
end LipschitzOnWith
/-- *Rademacher's theorem*: a Lipschitz function between finite-dimensional real vector spaces is
differentiable almost everywhere. -/
theorem LipschitzWith.ae_differentiableAt {f : E â F} (h : LipschitzWith C f) :
âáµ x âÎŒ, DifferentiableAt â f x := by
rw [â lipschitzOnWith_univ] at h
simpa [differentiableWithinAt_univ] using h.ae_differentiableWithinAt_of_mem
|
Analysis\Calculus\SmoothSeries.lean | /-
Copyright (c) 2022 Sébastien Gouëzel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Sébastien Gouëzel
-/
import Mathlib.Analysis.Calculus.ContDiff.Basic
import Mathlib.Analysis.Calculus.UniformLimitsDeriv
import Mathlib.Topology.Algebra.InfiniteSum.Module
import Mathlib.Analysis.NormedSpace.FunctionSeries
/-!
# Smoothness of series
We show that series of functions are differentiable, or smooth, when each individual
function in the series is and additionally suitable uniform summable bounds are satisfied.
More specifically,
* `differentiable_tsum` ensures that a series of differentiable functions is differentiable.
* `contDiff_tsum` ensures that a series of smooth functions is smooth.
We also give versions of these statements which are localized to a set.
-/
open Set Metric TopologicalSpace Function Asymptotics Filter
open scoped Topology NNReal
variable {α β ð E F : Type*} [RCLike ð] [NormedAddCommGroup E] [NormedSpace ð E]
[NormedAddCommGroup F] [CompleteSpace F] {u : α â â}
/-! ### Differentiability -/
variable [NormedSpace ð F]
variable {f : α â E â F} {f' : α â E â E âL[ð] F} {g : α â ð â F} {g' : α â ð â F} {v : â â α â â}
{s : Set E} {t : Set ð} {xâ x : E} {yâ y : ð} {N : ââ}
/-- Consider a series of functions `â' n, f n x` on a preconnected open set. If the series converges
at a point, and all functions in the series are differentiable with a summable bound on the
derivatives, then the series converges everywhere on the set. -/
theorem summable_of_summable_hasFDerivAt_of_isPreconnected (hu : Summable u) (hs : IsOpen s)
(h's : IsPreconnected s) (hf : â n x, x â s â HasFDerivAt (f n) (f' n x) x)
(hf' : â n x, x â s â âf' n xâ †u n) (hxâ : xâ â s) (hf0 : Summable (f · xâ))
(hx : x â s) : Summable fun n => f n x := by
haveI := Classical.decEq α
rw [summable_iff_cauchySeq_finset] at hf0 â¢
have A : UniformCauchySeqOn (fun t : Finset α => fun x => â i â t, f' i x) atTop s :=
(tendstoUniformlyOn_tsum hu hf').uniformCauchySeqOn
-- Porting note: Lean 4 failed to find `f` by unification
refine cauchy_map_of_uniformCauchySeqOn_fderiv (f := fun t x ⊠â i â t, f i x)
hs h's A (fun t y hy => ?_) hxâ hx hf0
exact HasFDerivAt.sum fun i _ => hf i y hy
/-- Consider a series of functions `â' n, f n x` on a preconnected open set. If the series converges
at a point, and all functions in the series are differentiable with a summable bound on the
derivatives, then the series converges everywhere on the set. -/
theorem summable_of_summable_hasDerivAt_of_isPreconnected (hu : Summable u) (ht : IsOpen t)
(h't : IsPreconnected t) (hg : â n y, y â t â HasDerivAt (g n) (g' n y) y)
(hg' : â n y, y â t â âg' n yâ †u n) (hyâ : yâ â t) (hg0 : Summable (g · yâ))
(hy : y â t) : Summable fun n => g n y := by
simp_rw [hasDerivAt_iff_hasFDerivAt] at hg
refine summable_of_summable_hasFDerivAt_of_isPreconnected hu ht h't hg ?_ hyâ hg0 hy
simpa? says simpa only [ContinuousLinearMap.norm_smulRight_apply, norm_one, one_mul]
/-- Consider a series of functions `â' n, f n x` on a preconnected open set. If the series converges
at a point, and all functions in the series are differentiable with a summable bound on the
derivatives, then the series is differentiable on the set and its derivative is the sum of the
derivatives. -/
theorem hasFDerivAt_tsum_of_isPreconnected (hu : Summable u) (hs : IsOpen s)
(h's : IsPreconnected s) (hf : â n x, x â s â HasFDerivAt (f n) (f' n x) x)
(hf' : â n x, x â s â âf' n xâ †u n) (hxâ : xâ â s) (hf0 : Summable fun n => f n xâ)
(hx : x â s) : HasFDerivAt (fun y => â' n, f n y) (â' n, f' n x) x := by
classical
have A :
â x : E, x â s â Tendsto (fun t : Finset α => â n â t, f n x) atTop (ð (â' n, f n x)) := by
intro y hy
apply Summable.hasSum
exact summable_of_summable_hasFDerivAt_of_isPreconnected hu hs h's hf hf' hxâ hf0 hy
refine hasFDerivAt_of_tendstoUniformlyOn hs (tendstoUniformlyOn_tsum hu hf')
(fun t y hy => ?_) A _ hx
exact HasFDerivAt.sum fun n _ => hf n y hy
/-- Consider a series of functions `â' n, f n x` on a preconnected open set. If the series converges
at a point, and all functions in the series are differentiable with a summable bound on the
derivatives, then the series is differentiable on the set and its derivative is the sum of the
derivatives. -/
theorem hasDerivAt_tsum_of_isPreconnected (hu : Summable u) (ht : IsOpen t)
(h't : IsPreconnected t) (hg : â n y, y â t â HasDerivAt (g n) (g' n y) y)
(hg' : â n y, y â t â âg' n yâ †u n) (hyâ : yâ â t) (hg0 : Summable fun n => g n yâ)
(hy : y â t) : HasDerivAt (fun z => â' n, g n z) (â' n, g' n y) y := by
simp_rw [hasDerivAt_iff_hasFDerivAt] at hg â¢
convert hasFDerivAt_tsum_of_isPreconnected hu ht h't hg ?_ hyâ hg0 hy
· exact (ContinuousLinearMap.smulRightL ð ð F 1).map_tsum <|
.of_norm_bounded u hu fun n ⊠hg' n y hy
· simpa? says simpa only [ContinuousLinearMap.norm_smulRight_apply, norm_one, one_mul]
/-- Consider a series of functions `â' n, f n x`. If the series converges at a
point, and all functions in the series are differentiable with a summable bound on the derivatives,
then the series converges everywhere. -/
theorem summable_of_summable_hasFDerivAt (hu : Summable u)
(hf : â n x, HasFDerivAt (f n) (f' n x) x) (hf' : â n x, âf' n xâ †u n)
(hf0 : Summable fun n => f n xâ) (x : E) : Summable fun n => f n x := by
let _ : NormedSpace â E := NormedSpace.restrictScalars â ð _
exact summable_of_summable_hasFDerivAt_of_isPreconnected hu isOpen_univ isPreconnected_univ
(fun n x _ => hf n x) (fun n x _ => hf' n x) (mem_univ _) hf0 (mem_univ _)
/-- Consider a series of functions `â' n, f n x`. If the series converges at a
point, and all functions in the series are differentiable with a summable bound on the derivatives,
then the series converges everywhere. -/
theorem summable_of_summable_hasDerivAt (hu : Summable u)
(hg : â n y, HasDerivAt (g n) (g' n y) y) (hg' : â n y, âg' n yâ †u n)
(hg0 : Summable fun n => g n yâ) (y : ð) : Summable fun n => g n y := by
exact summable_of_summable_hasDerivAt_of_isPreconnected hu isOpen_univ isPreconnected_univ
(fun n x _ => hg n x) (fun n x _ => hg' n x) (mem_univ _) hg0 (mem_univ _)
/-- Consider a series of functions `â' n, f n x`. If the series converges at a
point, and all functions in the series are differentiable with a summable bound on the derivatives,
then the series is differentiable and its derivative is the sum of the derivatives. -/
theorem hasFDerivAt_tsum (hu : Summable u) (hf : â n x, HasFDerivAt (f n) (f' n x) x)
(hf' : â n x, âf' n xâ †u n) (hf0 : Summable fun n => f n xâ) (x : E) :
HasFDerivAt (fun y => â' n, f n y) (â' n, f' n x) x := by
let A : NormedSpace â E := NormedSpace.restrictScalars â ð _
exact hasFDerivAt_tsum_of_isPreconnected hu isOpen_univ isPreconnected_univ
(fun n x _ => hf n x) (fun n x _ => hf' n x) (mem_univ _) hf0 (mem_univ _)
/-- Consider a series of functions `â' n, f n x`. If the series converges at a
point, and all functions in the series are differentiable with a summable bound on the derivatives,
then the series is differentiable and its derivative is the sum of the derivatives. -/
theorem hasDerivAt_tsum (hu : Summable u) (hg : â n y, HasDerivAt (g n) (g' n y) y)
(hg' : â n y, âg' n yâ †u n) (hg0 : Summable fun n => g n yâ) (y : ð) :
HasDerivAt (fun z => â' n, g n z) (â' n, g' n y) y := by
exact hasDerivAt_tsum_of_isPreconnected hu isOpen_univ isPreconnected_univ
(fun n y _ => hg n y) (fun n y _ => hg' n y) (mem_univ _) hg0 (mem_univ _)
/-- Consider a series of functions `â' n, f n x`. If all functions in the series are differentiable
with a summable bound on the derivatives, then the series is differentiable.
Note that our assumptions do not ensure the pointwise convergence, but if there is no pointwise
convergence then the series is zero everywhere so the result still holds. -/
theorem differentiable_tsum (hu : Summable u) (hf : â n x, HasFDerivAt (f n) (f' n x) x)
(hf' : â n x, âf' n xâ †u n) : Differentiable ð fun y => â' n, f n y := by
by_cases h : â xâ, Summable fun n => f n xâ
· rcases h with âšxâ, hf0â©
intro x
exact (hasFDerivAt_tsum hu hf hf' hf0 x).differentiableAt
· push_neg at h
have : (fun x => â' n, f n x) = 0 := by ext1 x; exact tsum_eq_zero_of_not_summable (h x)
rw [this]
exact differentiable_const 0
/-- Consider a series of functions `â' n, f n x`. If all functions in the series are differentiable
with a summable bound on the derivatives, then the series is differentiable.
Note that our assumptions do not ensure the pointwise convergence, but if there is no pointwise
convergence then the series is zero everywhere so the result still holds. -/
theorem differentiable_tsum' (hu : Summable u) (hg : â n y, HasDerivAt (g n) (g' n y) y)
(hg' : â n y, âg' n yâ †u n) : Differentiable ð fun z => â' n, g n z := by
simp_rw [hasDerivAt_iff_hasFDerivAt] at hg
refine differentiable_tsum hu hg ?_
simpa? says simpa only [ContinuousLinearMap.norm_smulRight_apply, norm_one, one_mul]
theorem fderiv_tsum_apply (hu : Summable u) (hf : â n, Differentiable ð (f n))
(hf' : â n x, âfderiv ð (f n) xâ †u n) (hf0 : Summable fun n => f n xâ) (x : E) :
fderiv ð (fun y => â' n, f n y) x = â' n, fderiv ð (f n) x :=
(hasFDerivAt_tsum hu (fun n x => (hf n x).hasFDerivAt) hf' hf0 _).fderiv
theorem deriv_tsum_apply (hu : Summable u) (hg : â n, Differentiable ð (g n))
(hg' : â n y, âderiv (g n) yâ †u n) (hg0 : Summable fun n => g n yâ) (y : ð) :
deriv (fun z => â' n, g n z) y = â' n, deriv (g n) y :=
(hasDerivAt_tsum hu (fun n y => (hg n y).hasDerivAt) hg' hg0 _).deriv
theorem fderiv_tsum (hu : Summable u) (hf : â n, Differentiable ð (f n))
(hf' : â n x, âfderiv ð (f n) xâ †u n) (hf0 : Summable fun n => f n xâ) :
(fderiv ð fun y => â' n, f n y) = fun x => â' n, fderiv ð (f n) x := by
ext1 x
exact fderiv_tsum_apply hu hf hf' hf0 x
theorem deriv_tsum (hu : Summable u) (hg : â n, Differentiable ð (g n))
(hg' : â n y, âderiv (g n) yâ †u n) (hg0 : Summable fun n => g n yâ) :
(deriv fun y => â' n, g n y) = fun y => â' n, deriv (g n) y := by
ext1 x
exact deriv_tsum_apply hu hg hg' hg0 x
/-! ### Higher smoothness -/
/-- Consider a series of smooth functions, with summable uniform bounds on the successive
derivatives. Then the iterated derivative of the sum is the sum of the iterated derivative. -/
theorem iteratedFDeriv_tsum (hf : â i, ContDiff ð N (f i))
(hv : â k : â, (k : ââ) †N â Summable (v k))
(h'f : â (k : â) (i : α) (x : E), (k : ââ) †N â âiteratedFDeriv ð k (f i) xâ †v k i) {k : â}
(hk : (k : ââ) †N) :
(iteratedFDeriv ð k fun y => â' n, f n y) = fun x => â' n, iteratedFDeriv ð k (f n) x := by
induction' k with k IH
· ext1 x
simp_rw [iteratedFDeriv_zero_eq_comp]
exact (continuousMultilinearCurryFin0 ð E F).symm.toContinuousLinearEquiv.map_tsum
· have h'k : (k : ââ) < N := lt_of_lt_of_le (WithTop.coe_lt_coe.2 (Nat.lt_succ_self _)) hk
have A : Summable fun n => iteratedFDeriv ð k (f n) 0 :=
.of_norm_bounded (v k) (hv k h'k.le) fun n => h'f k n 0 h'k.le
simp_rw [iteratedFDeriv_succ_eq_comp_left, IH h'k.le]
rw [fderiv_tsum (hv _ hk) (fun n => (hf n).differentiable_iteratedFDeriv h'k) _ A]
· ext1 x
exact (continuousMultilinearCurryLeftEquiv ð
(fun _ : Fin (k + 1) => E) F).toContinuousLinearEquiv.map_tsum
· intro n x
simpa only [iteratedFDeriv_succ_eq_comp_left, LinearIsometryEquiv.norm_map, comp_apply]
using h'f k.succ n x hk
/-- Consider a series of smooth functions, with summable uniform bounds on the successive
derivatives. Then the iterated derivative of the sum is the sum of the iterated derivative. -/
theorem iteratedFDeriv_tsum_apply (hf : â i, ContDiff ð N (f i))
(hv : â k : â, (k : ââ) †N â Summable (v k))
(h'f : â (k : â) (i : α) (x : E), (k : ââ) †N â âiteratedFDeriv ð k (f i) xâ †v k i) {k : â}
(hk : (k : ââ) †N) (x : E) :
iteratedFDeriv ð k (fun y => â' n, f n y) x = â' n, iteratedFDeriv ð k (f n) x := by
rw [iteratedFDeriv_tsum hf hv h'f hk]
/-- Consider a series of functions `â' i, f i x`. Assume that each individual function `f i` is of
class `C^N`, and moreover there is a uniform summable upper bound on the `k`-th derivative
for each `k †N`. Then the series is also `C^N`. -/
theorem contDiff_tsum (hf : â i, ContDiff ð N (f i)) (hv : â k : â, (k : ââ) †N â Summable (v k))
(h'f : â (k : â) (i : α) (x : E), (k : ââ) †N â âiteratedFDeriv ð k (f i) xâ †v k i) :
ContDiff ð N fun x => â' i, f i x := by
rw [contDiff_iff_continuous_differentiable]
constructor
· intro m hm
rw [iteratedFDeriv_tsum hf hv h'f hm]
refine continuous_tsum ?_ (hv m hm) ?_
· intro i
exact ContDiff.continuous_iteratedFDeriv hm (hf i)
· intro n x
exact h'f _ _ _ hm
· intro m hm
have h'm : ((m + 1 : â) : ââ) †N := by
simpa only [ENat.coe_add, ENat.coe_one] using ENat.add_one_le_of_lt hm
rw [iteratedFDeriv_tsum hf hv h'f hm.le]
have A :
â n x, HasFDerivAt (iteratedFDeriv ð m (f n)) (fderiv ð (iteratedFDeriv ð m (f n)) x) x :=
fun n x => (ContDiff.differentiable_iteratedFDeriv hm (hf n)).differentiableAt.hasFDerivAt
refine differentiable_tsum (hv _ h'm) A fun n x => ?_
rw [fderiv_iteratedFDeriv, comp_apply, LinearIsometryEquiv.norm_map]
exact h'f _ _ _ h'm
/-- Consider a series of functions `â' i, f i x`. Assume that each individual function `f i` is of
class `C^N`, and moreover there is a uniform summable upper bound on the `k`-th derivative
for each `k †N` (except maybe for finitely many `i`s). Then the series is also `C^N`. -/
theorem contDiff_tsum_of_eventually (hf : â i, ContDiff ð N (f i))
(hv : â k : â, (k : ââ) †N â Summable (v k))
(h'f :
â k : â,
(k : ââ) †N â
âá¶ i in (Filter.cofinite : Filter α), â x : E, âiteratedFDeriv ð k (f i) xâ †v k i) :
ContDiff ð N fun x => â' i, f i x := by
classical
refine contDiff_iff_forall_nat_le.2 fun m hm => ?_
let t : Set α :=
{ i : α | ¬â k : â, k â Finset.range (m + 1) â â x, âiteratedFDeriv ð k (f i) xâ †v k i }
have ht : Set.Finite t :=
haveI A :
âá¶ i in (Filter.cofinite : Filter α),
â k : â, k â Finset.range (m + 1) â â x : E, âiteratedFDeriv ð k (f i) xâ †v k i := by
rw [eventually_all_finset]
intro i hi
apply h'f
simp only [Finset.mem_range_succ_iff] at hi
exact (WithTop.coe_le_coe.2 hi).trans hm
eventually_cofinite.2 A
let T : Finset α := ht.toFinset
have : (fun x => â' i, f i x) = (fun x => â i â T, f i x) +
fun x => â' i : { i // i â T }, f i x := by
ext1 x
refine (sum_add_tsum_subtype_compl ?_ T).symm
refine .of_norm_bounded_eventually _ (hv 0 (zero_le _)) ?_
filter_upwards [h'f 0 (zero_le _)] with i hi
simpa only [norm_iteratedFDeriv_zero] using hi x
rw [this]
apply (ContDiff.sum fun i _ => (hf i).of_le hm).add
have h'u : â k : â, (k : ââ) †m â Summable (v k â ((â) : { i // i â T } â α)) := fun k hk =>
(hv k (hk.trans hm)).subtype _
refine contDiff_tsum (fun i => (hf i).of_le hm) h'u ?_
rintro k âši, hiâ© x hk
simp only [t, T, Finite.mem_toFinset, mem_setOf_eq, Finset.mem_range, not_forall, not_le,
exists_prop, not_exists, not_and, not_lt] at hi
exact hi k (Nat.lt_succ_iff.2 (WithTop.coe_le_coe.1 hk)) x
|
Analysis\Calculus\TangentCone.lean | /-
Copyright (c) 2019 Sébastien Gouëzel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Sébastien Gouëzel
-/
import Mathlib.Analysis.Convex.Topology
import Mathlib.Analysis.Normed.Module.Basic
import Mathlib.Analysis.SpecificLimits.Basic
/-!
# Tangent cone
In this file, we define two predicates `UniqueDiffWithinAt ð s x` and `UniqueDiffOn ð s`
ensuring that, if a function has two derivatives, then they have to coincide. As a direct
definition of this fact (quantifying on all target types and all functions) would depend on
universes, we use a more intrinsic definition: if all the possible tangent directions to the set
`s` at the point `x` span a dense subset of the whole subset, it is easy to check that the
derivative has to be unique.
Therefore, we introduce the set of all tangent directions, named `tangentConeAt`,
and express `UniqueDiffWithinAt` and `UniqueDiffOn` in terms of it.
One should however think of this definition as an implementation detail: the only reason to
introduce the predicates `UniqueDiffWithinAt` and `UniqueDiffOn` is to ensure the uniqueness
of the derivative. This is why their names reflect their uses, and not how they are defined.
## Implementation details
Note that this file is imported by `Fderiv.Basic`. Hence, derivatives are not defined yet. The
property of uniqueness of the derivative is therefore proved in `Fderiv.Basic`, but based on the
properties of the tangent cone we prove here.
-/
variable (ð : Type*) [NontriviallyNormedField ð]
open Filter Set
open Topology
section TangentCone
variable {E : Type*} [AddCommMonoid E] [Module ð E] [TopologicalSpace E]
/-- The set of all tangent directions to the set `s` at the point `x`. -/
def tangentConeAt (s : Set E) (x : E) : Set E :=
{ y : E | â (c : â â ð) (d : â â E),
(âá¶ n in atTop, x + d n â s) â§
Tendsto (fun n => âc nâ) atTop atTop â§
Tendsto (fun n => c n ⢠d n) atTop (ð y) }
/-- A property ensuring that the tangent cone to `s` at `x` spans a dense subset of the whole space.
The main role of this property is to ensure that the differential within `s` at `x` is unique,
hence this name. The uniqueness it asserts is proved in `UniqueDiffWithinAt.eq` in `Fderiv.Basic`.
To avoid pathologies in dimension 0, we also require that `x` belongs to the closure of `s` (which
is automatic when `E` is not `0`-dimensional). -/
@[mk_iff]
structure UniqueDiffWithinAt (s : Set E) (x : E) : Prop where
dense_tangentCone : Dense (Submodule.span ð (tangentConeAt ð s x) : Set E)
mem_closure : x â closure s
/-- A property ensuring that the tangent cone to `s` at any of its points spans a dense subset of
the whole space. The main role of this property is to ensure that the differential along `s` is
unique, hence this name. The uniqueness it asserts is proved in `UniqueDiffOn.eq` in
`Fderiv.Basic`. -/
def UniqueDiffOn (s : Set E) : Prop :=
â x â s, UniqueDiffWithinAt ð s x
end TangentCone
variable {E : Type*} [NormedAddCommGroup E] [NormedSpace ð E]
variable {F : Type*} [NormedAddCommGroup F] [NormedSpace ð F]
variable {G : Type*} [NormedAddCommGroup G] [NormedSpace â G]
variable {ð} {x y : E} {s t : Set E}
section TangentCone
-- This section is devoted to the properties of the tangent cone.
open NormedField
theorem mem_tangentConeAt_of_pow_smul {r : ð} (hrâ : r â 0) (hr : ârâ < 1)
(hs : âá¶ n : â in atTop, x + r ^ n ⢠y â s) : y â tangentConeAt ð s x := by
refine âšfun n ⊠(r ^ n)â»Â¹, fun n ⊠r ^ n ⢠y, hs, ?_, ?_â©
· simp only [norm_inv, norm_pow, â inv_pow]
exact tendsto_pow_atTop_atTop_of_one_lt <| one_lt_inv (norm_pos_iff.2 hrâ) hr
· simp only [inv_smul_smulâ (pow_ne_zero _ hrâ), tendsto_const_nhds]
theorem tangentCone_univ : tangentConeAt ð univ x = univ :=
let âš_r, hrâ, hrâ© := exists_norm_lt_one ð
eq_univ_of_forall fun _ ⊠mem_tangentConeAt_of_pow_smul (norm_pos_iff.1 hrâ) hr <|
eventually_of_forall fun _ ⊠mem_univ _
theorem tangentCone_mono (h : s â t) : tangentConeAt ð s x â tangentConeAt ð t x := by
rintro y âšc, d, ds, ctop, climâ©
exact âšc, d, mem_of_superset ds fun n hn => h hn, ctop, climâ©
/-- Auxiliary lemma ensuring that, under the assumptions defining the tangent cone,
the sequence `d` tends to 0 at infinity. -/
theorem tangentConeAt.lim_zero {α : Type*} (l : Filter α) {c : α â ð} {d : α â E}
(hc : Tendsto (fun n => âc nâ) l atTop) (hd : Tendsto (fun n => c n ⢠d n) l (ð y)) :
Tendsto d l (ð 0) := by
have A : Tendsto (fun n => âc nââ»Â¹) l (ð 0) := tendsto_inv_atTop_zero.comp hc
have B : Tendsto (fun n => âc n ⢠d nâ) l (ð âyâ) := (continuous_norm.tendsto _).comp hd
have C : Tendsto (fun n => âc nââ»Â¹ * âc n ⢠d nâ) l (ð (0 * âyâ)) := A.mul B
rw [zero_mul] at C
have : âá¶ n in l, âc nââ»Â¹ * âc n ⢠d nâ = âd nâ := by
refine (eventually_ne_of_tendsto_norm_atTop hc 0).mono fun n hn => ?_
rw [norm_smul, â mul_assoc, inv_mul_cancel, one_mul]
rwa [Ne, norm_eq_zero]
have D : Tendsto (fun n => âd nâ) l (ð 0) := Tendsto.congr' this C
rw [tendsto_zero_iff_norm_tendsto_zero]
exact D
theorem tangentCone_mono_nhds (h : ð[s] x †ð[t] x) :
tangentConeAt ð s x â tangentConeAt ð t x := by
rintro y âšc, d, ds, ctop, climâ©
refine âšc, d, ?_, ctop, climâ©
suffices Tendsto (fun n => x + d n) atTop (ð[t] x) from
tendsto_principal.1 (tendsto_inf.1 this).2
refine (tendsto_inf.2 âš?_, tendsto_principal.2 dsâ©).mono_right h
simpa only [add_zero] using tendsto_const_nhds.add (tangentConeAt.lim_zero atTop ctop clim)
/-- Tangent cone of `s` at `x` depends only on `ð[s] x`. -/
theorem tangentCone_congr (h : ð[s] x = ð[t] x) : tangentConeAt ð s x = tangentConeAt ð t x :=
Subset.antisymm (tangentCone_mono_nhds <| le_of_eq h) (tangentCone_mono_nhds <| le_of_eq h.symm)
/-- Intersecting with a neighborhood of the point does not change the tangent cone. -/
theorem tangentCone_inter_nhds (ht : t â ð x) : tangentConeAt ð (s â© t) x = tangentConeAt ð s x :=
tangentCone_congr (nhdsWithin_restrict' _ ht).symm
/-- The tangent cone of a product contains the tangent cone of its left factor. -/
theorem subset_tangentCone_prod_left {t : Set F} {y : F} (ht : y â closure t) :
LinearMap.inl ð E F '' tangentConeAt ð s x â tangentConeAt ð (s ÃË¢ t) (x, y) := by
rintro _ âšv, âšc, d, hd, hc, hyâ©, rflâ©
have : â n, â d', y + d' â t â§ âc n ⢠d'â < ((1 : â) / 2) ^ n := by
intro n
rcases mem_closure_iff_nhds.1 ht _
(eventually_nhds_norm_smul_sub_lt (c n) y (pow_pos one_half_pos n)) with
âšz, hz, hztâ©
exact âšz - y, by simpa using hzt, by simpa using hzâ©
choose d' hd' using this
refine âšc, fun n => (d n, d' n), ?_, hc, ?_â©
· show âá¶ n in atTop, (x, y) + (d n, d' n) â s ÃË¢ t
filter_upwards [hd] with n hn
simp [hn, (hd' n).1]
· apply Tendsto.prod_mk_nhds hy _
refine squeeze_zero_norm (fun n => (hd' n).2.le) ?_
exact tendsto_pow_atTop_nhds_zero_of_lt_one one_half_pos.le one_half_lt_one
/-- The tangent cone of a product contains the tangent cone of its right factor. -/
theorem subset_tangentCone_prod_right {t : Set F} {y : F} (hs : x â closure s) :
LinearMap.inr ð E F '' tangentConeAt ð t y â tangentConeAt ð (s ÃË¢ t) (x, y) := by
rintro _ âšw, âšc, d, hd, hc, hyâ©, rflâ©
have : â n, â d', x + d' â s â§ âc n ⢠d'â < ((1 : â) / 2) ^ n := by
intro n
rcases mem_closure_iff_nhds.1 hs _
(eventually_nhds_norm_smul_sub_lt (c n) x (pow_pos one_half_pos n)) with
âšz, hz, hzsâ©
exact âšz - x, by simpa using hzs, by simpa using hzâ©
choose d' hd' using this
refine âšc, fun n => (d' n, d n), ?_, hc, ?_â©
· show âá¶ n in atTop, (x, y) + (d' n, d n) â s ÃË¢ t
filter_upwards [hd] with n hn
simp [hn, (hd' n).1]
· apply Tendsto.prod_mk_nhds _ hy
refine squeeze_zero_norm (fun n => (hd' n).2.le) ?_
exact tendsto_pow_atTop_nhds_zero_of_lt_one one_half_pos.le one_half_lt_one
/-- The tangent cone of a product contains the tangent cone of each factor. -/
theorem mapsTo_tangentCone_pi {ι : Type*} [DecidableEq ι] {E : ι â Type*}
[â i, NormedAddCommGroup (E i)] [â i, NormedSpace ð (E i)] {s : â i, Set (E i)} {x : â i, E i}
{i : ι} (hi : â j â i, x j â closure (s j)) :
MapsTo (LinearMap.single i : E i ââ[ð] â j, E j) (tangentConeAt ð (s i) (x i))
(tangentConeAt ð (Set.pi univ s) x) := by
rintro w âšc, d, hd, hc, hyâ©
have : â n, â j â i, â d', x j + d' â s j â§ âc n ⢠d'â < (1 / 2 : â) ^ n := fun n j hj ⊠by
rcases mem_closure_iff_nhds.1 (hi j hj) _
(eventually_nhds_norm_smul_sub_lt (c n) (x j) (pow_pos one_half_pos n)) with
âšz, hz, hzsâ©
exact âšz - x j, by simpa using hzs, by simpa using hzâ©
choose! d' hd's hcd' using this
refine âšc, fun n => Function.update (d' n) i (d n), hd.mono fun n hn j _ => ?_, hc,
tendsto_pi_nhds.2 fun j => ?_â©
· rcases em (j = i) with (rfl | hj) <;> simp [*]
· rcases em (j = i) with (rfl | hj)
· simp [hy]
· suffices Tendsto (fun n => c n ⢠d' n j) atTop (ð 0) by simpa [hj]
refine squeeze_zero_norm (fun n => (hcd' n j hj).le) ?_
exact tendsto_pow_atTop_nhds_zero_of_lt_one one_half_pos.le one_half_lt_one
/-- If a subset of a real vector space contains an open segment, then the direction of this
segment belongs to the tangent cone at its endpoints. -/
theorem mem_tangentCone_of_openSegment_subset {s : Set G} {x y : G} (h : openSegment â x y â s) :
y - x â tangentConeAt â s x := by
refine mem_tangentConeAt_of_pow_smul one_half_pos.ne' (by norm_num) ?_
refine (eventually_ne_atTop 0).mono fun n hn ⊠(h ?_)
rw [openSegment_eq_image]
refine âš(1 / 2) ^ n, âš?_, ?_â©, ?_â©
· exact pow_pos one_half_pos _
· exact pow_lt_one one_half_pos.le one_half_lt_one hn
· simp only [sub_smul, one_smul, smul_sub]; abel
/-- If a subset of a real vector space contains a segment, then the direction of this
segment belongs to the tangent cone at its endpoints. -/
theorem mem_tangentCone_of_segment_subset {s : Set G} {x y : G} (h : segment â x y â s) :
y - x â tangentConeAt â s x :=
mem_tangentCone_of_openSegment_subset ((openSegment_subset_segment â x y).trans h)
end TangentCone
section UniqueDiff
/-!
### Properties of `UniqueDiffWithinAt` and `UniqueDiffOn`
This section is devoted to properties of the predicates `UniqueDiffWithinAt` and `UniqueDiffOn`. -/
theorem UniqueDiffOn.uniqueDiffWithinAt {s : Set E} {x} (hs : UniqueDiffOn ð s) (h : x â s) :
UniqueDiffWithinAt ð s x :=
hs x h
theorem uniqueDiffWithinAt_univ : UniqueDiffWithinAt ð univ x := by
rw [uniqueDiffWithinAt_iff, tangentCone_univ]
simp
theorem uniqueDiffOn_univ : UniqueDiffOn ð (univ : Set E) :=
fun _ _ => uniqueDiffWithinAt_univ
theorem uniqueDiffOn_empty : UniqueDiffOn ð (â
: Set E) :=
fun _ hx => hx.elim
theorem UniqueDiffWithinAt.congr_pt (h : UniqueDiffWithinAt ð s x) (hy : x = y) :
UniqueDiffWithinAt ð s y := hy âž h
theorem UniqueDiffWithinAt.mono_nhds (h : UniqueDiffWithinAt ð s x) (st : ð[s] x †ð[t] x) :
UniqueDiffWithinAt ð t x := by
simp only [uniqueDiffWithinAt_iff] at *
rw [mem_closure_iff_nhdsWithin_neBot] at h â¢
exact âšh.1.mono <| Submodule.span_mono <| tangentCone_mono_nhds st, h.2.mono stâ©
theorem UniqueDiffWithinAt.mono (h : UniqueDiffWithinAt ð s x) (st : s â t) :
UniqueDiffWithinAt ð t x :=
h.mono_nhds <| nhdsWithin_mono _ st
theorem uniqueDiffWithinAt_congr (st : ð[s] x = ð[t] x) :
UniqueDiffWithinAt ð s x â UniqueDiffWithinAt ð t x :=
âšfun h => h.mono_nhds <| le_of_eq st, fun h => h.mono_nhds <| le_of_eq st.symmâ©
theorem uniqueDiffWithinAt_inter (ht : t â ð x) :
UniqueDiffWithinAt ð (s â© t) x â UniqueDiffWithinAt ð s x :=
uniqueDiffWithinAt_congr <| (nhdsWithin_restrict' _ ht).symm
theorem UniqueDiffWithinAt.inter (hs : UniqueDiffWithinAt ð s x) (ht : t â ð x) :
UniqueDiffWithinAt ð (s â© t) x :=
(uniqueDiffWithinAt_inter ht).2 hs
theorem uniqueDiffWithinAt_inter' (ht : t â ð[s] x) :
UniqueDiffWithinAt ð (s â© t) x â UniqueDiffWithinAt ð s x :=
uniqueDiffWithinAt_congr <| (nhdsWithin_restrict'' _ ht).symm
theorem UniqueDiffWithinAt.inter' (hs : UniqueDiffWithinAt ð s x) (ht : t â ð[s] x) :
UniqueDiffWithinAt ð (s â© t) x :=
(uniqueDiffWithinAt_inter' ht).2 hs
theorem uniqueDiffWithinAt_of_mem_nhds (h : s â ð x) : UniqueDiffWithinAt ð s x := by
simpa only [univ_inter] using uniqueDiffWithinAt_univ.inter h
theorem IsOpen.uniqueDiffWithinAt (hs : IsOpen s) (xs : x â s) : UniqueDiffWithinAt ð s x :=
uniqueDiffWithinAt_of_mem_nhds (IsOpen.mem_nhds hs xs)
theorem UniqueDiffOn.inter (hs : UniqueDiffOn ð s) (ht : IsOpen t) : UniqueDiffOn ð (s â© t) :=
fun x hx => (hs x hx.1).inter (IsOpen.mem_nhds ht hx.2)
theorem IsOpen.uniqueDiffOn (hs : IsOpen s) : UniqueDiffOn ð s :=
fun _ hx => IsOpen.uniqueDiffWithinAt hs hx
/-- The product of two sets of unique differentiability at points `x` and `y` has unique
differentiability at `(x, y)`. -/
theorem UniqueDiffWithinAt.prod {t : Set F} {y : F} (hs : UniqueDiffWithinAt ð s x)
(ht : UniqueDiffWithinAt ð t y) : UniqueDiffWithinAt ð (s ÃË¢ t) (x, y) := by
rw [uniqueDiffWithinAt_iff] at hs ht â¢
rw [closure_prod_eq]
refine âš?_, hs.2, ht.2â©
have : _ †Submodule.span ð (tangentConeAt ð (s ÃË¢ t) (x, y)) := Submodule.span_mono
(union_subset (subset_tangentCone_prod_left ht.2) (subset_tangentCone_prod_right hs.2))
rw [LinearMap.span_inl_union_inr, SetLike.le_def] at this
exact (hs.1.prod ht.1).mono this
theorem UniqueDiffWithinAt.univ_pi (ι : Type*) [Finite ι] (E : ι â Type*)
[â i, NormedAddCommGroup (E i)] [â i, NormedSpace ð (E i)] (s : â i, Set (E i)) (x : â i, E i)
(h : â i, UniqueDiffWithinAt ð (s i) (x i)) : UniqueDiffWithinAt ð (Set.pi univ s) x := by
classical
simp only [uniqueDiffWithinAt_iff, closure_pi_set] at h â¢
refine âš(dense_pi univ fun i _ => (h i).1).mono ?_, fun i _ => (h i).2â©
norm_cast
simp only [â Submodule.iSup_map_single, iSup_le_iff, LinearMap.map_span, Submodule.span_le,
â mapsTo']
exact fun i => (mapsTo_tangentCone_pi fun j _ => (h j).2).mono Subset.rfl Submodule.subset_span
theorem UniqueDiffWithinAt.pi (ι : Type*) [Finite ι] (E : ι â Type*)
[â i, NormedAddCommGroup (E i)] [â i, NormedSpace ð (E i)] (s : â i, Set (E i)) (x : â i, E i)
(I : Set ι) (h : â i â I, UniqueDiffWithinAt ð (s i) (x i)) :
UniqueDiffWithinAt ð (Set.pi I s) x := by
classical
rw [â Set.univ_pi_piecewise_univ]
refine UniqueDiffWithinAt.univ_pi ι E _ _ fun i => ?_
by_cases hi : i â I <;> simp [*, uniqueDiffWithinAt_univ]
/-- The product of two sets of unique differentiability is a set of unique differentiability. -/
theorem UniqueDiffOn.prod {t : Set F} (hs : UniqueDiffOn ð s) (ht : UniqueDiffOn ð t) :
UniqueDiffOn ð (s ÃË¢ t) :=
fun âšx, yâ© h => UniqueDiffWithinAt.prod (hs x h.1) (ht y h.2)
/-- The finite product of a family of sets of unique differentiability is a set of unique
differentiability. -/
theorem UniqueDiffOn.pi (ι : Type*) [Finite ι] (E : ι â Type*) [â i, NormedAddCommGroup (E i)]
[â i, NormedSpace ð (E i)] (s : â i, Set (E i)) (I : Set ι)
(h : â i â I, UniqueDiffOn ð (s i)) : UniqueDiffOn ð (Set.pi I s) :=
fun x hx => UniqueDiffWithinAt.pi _ _ _ _ _ fun i hi => h i hi (x i) (hx i hi)
/-- The finite product of a family of sets of unique differentiability is a set of unique
differentiability. -/
theorem UniqueDiffOn.univ_pi (ι : Type*) [Finite ι] (E : ι â Type*)
[â i, NormedAddCommGroup (E i)] [â i, NormedSpace ð (E i)] (s : â i, Set (E i))
(h : â i, UniqueDiffOn ð (s i)) : UniqueDiffOn ð (Set.pi univ s) :=
UniqueDiffOn.pi _ _ _ _ fun i _ => h i
/-- In a real vector space, a convex set with nonempty interior is a set of unique
differentiability at every point of its closure. -/
theorem uniqueDiffWithinAt_convex {s : Set G} (conv : Convex â s) (hs : (interior s).Nonempty)
{x : G} (hx : x â closure s) : UniqueDiffWithinAt â s x := by
rcases hs with âšy, hyâ©
suffices y - x â interior (tangentConeAt â s x) by
refine âšDense.of_closure ?_, hxâ©
simp [(Submodule.span â (tangentConeAt â s x)).eq_top_of_nonempty_interior'
âšy - x, interior_mono Submodule.subset_span thisâ©]
rw [mem_interior_iff_mem_nhds]
replace hy : interior s â ð y := IsOpen.mem_nhds isOpen_interior hy
apply mem_of_superset ((isOpenMap_sub_right x).image_mem_nhds hy)
rintro _ âšz, zs, rflâ©
refine mem_tangentCone_of_openSegment_subset (Subset.trans ?_ interior_subset)
exact conv.openSegment_closure_interior_subset_interior hx zs
/-- In a real vector space, a convex set with nonempty interior is a set of unique
differentiability. -/
theorem uniqueDiffOn_convex {s : Set G} (conv : Convex â s) (hs : (interior s).Nonempty) :
UniqueDiffOn â s :=
fun _ xs => uniqueDiffWithinAt_convex conv hs (subset_closure xs)
theorem uniqueDiffOn_Ici (a : â) : UniqueDiffOn â (Ici a) :=
uniqueDiffOn_convex (convex_Ici a) <| by simp only [interior_Ici, nonempty_Ioi]
theorem uniqueDiffOn_Iic (a : â) : UniqueDiffOn â (Iic a) :=
uniqueDiffOn_convex (convex_Iic a) <| by simp only [interior_Iic, nonempty_Iio]
theorem uniqueDiffOn_Ioi (a : â) : UniqueDiffOn â (Ioi a) :=
isOpen_Ioi.uniqueDiffOn
theorem uniqueDiffOn_Iio (a : â) : UniqueDiffOn â (Iio a) :=
isOpen_Iio.uniqueDiffOn
theorem uniqueDiffOn_Icc {a b : â} (hab : a < b) : UniqueDiffOn â (Icc a b) :=
uniqueDiffOn_convex (convex_Icc a b) <| by simp only [interior_Icc, nonempty_Ioo, hab]
theorem uniqueDiffOn_Ico (a b : â) : UniqueDiffOn â (Ico a b) :=
if hab : a < b then
uniqueDiffOn_convex (convex_Ico a b) <| by simp only [interior_Ico, nonempty_Ioo, hab]
else by simp only [Ico_eq_empty hab, uniqueDiffOn_empty]
theorem uniqueDiffOn_Ioc (a b : â) : UniqueDiffOn â (Ioc a b) :=
if hab : a < b then
uniqueDiffOn_convex (convex_Ioc a b) <| by simp only [interior_Ioc, nonempty_Ioo, hab]
else by simp only [Ioc_eq_empty hab, uniqueDiffOn_empty]
theorem uniqueDiffOn_Ioo (a b : â) : UniqueDiffOn â (Ioo a b) :=
isOpen_Ioo.uniqueDiffOn
/-- The real interval `[0, 1]` is a set of unique differentiability. -/
theorem uniqueDiffOn_Icc_zero_one : UniqueDiffOn â (Icc (0 : â) 1) :=
uniqueDiffOn_Icc zero_lt_one
theorem uniqueDiffWithinAt_Ioo {a b t : â} (ht : t â Set.Ioo a b) :
UniqueDiffWithinAt â (Set.Ioo a b) t :=
IsOpen.uniqueDiffWithinAt isOpen_Ioo ht
theorem uniqueDiffWithinAt_Ioi (a : â) : UniqueDiffWithinAt â (Ioi a) a :=
uniqueDiffWithinAt_convex (convex_Ioi a) (by simp) (by simp)
theorem uniqueDiffWithinAt_Iio (a : â) : UniqueDiffWithinAt â (Iio a) a :=
uniqueDiffWithinAt_convex (convex_Iio a) (by simp) (by simp)
end UniqueDiff
|
Analysis\Calculus\Taylor.lean | /-
Copyright (c) 2022 Moritz Doll. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Moritz Doll
-/
import Mathlib.Algebra.Polynomial.Module.Basic
import Mathlib.Analysis.Calculus.Deriv.Pow
import Mathlib.Analysis.Calculus.IteratedDeriv.Defs
import Mathlib.Analysis.Calculus.MeanValue
/-!
# Taylor's theorem
This file defines the Taylor polynomial of a real function `f : â â E`,
where `E` is a normed vector space over `â` and proves Taylor's theorem,
which states that if `f` is sufficiently smooth, then
`f` can be approximated by the Taylor polynomial up to an explicit error term.
## Main definitions
* `taylorCoeffWithin`: the Taylor coefficient using `iteratedDerivWithin`
* `taylorWithin`: the Taylor polynomial using `iteratedDerivWithin`
## Main statements
* `taylor_mean_remainder`: Taylor's theorem with the general form of the remainder term
* `taylor_mean_remainder_lagrange`: Taylor's theorem with the Lagrange remainder
* `taylor_mean_remainder_cauchy`: Taylor's theorem with the Cauchy remainder
* `exists_taylor_mean_remainder_bound`: Taylor's theorem for vector valued functions with a
polynomial bound on the remainder
## TODO
* the Peano form of the remainder
* the integral form of the remainder
* Generalization to higher dimensions
## Tags
Taylor polynomial, Taylor's theorem
-/
open scoped Interval Topology Nat
open Set
variable {ð E F : Type*}
variable [NormedAddCommGroup E] [NormedSpace â E]
/-- The `k`th coefficient of the Taylor polynomial. -/
noncomputable def taylorCoeffWithin (f : â â E) (k : â) (s : Set â) (xâ : â) : E :=
(k ! : â)â»Â¹ ⢠iteratedDerivWithin k f s xâ
/-- The Taylor polynomial with derivatives inside of a set `s`.
The Taylor polynomial is given by
$$â_{k=0}^n \frac{(x - xâ)^k}{k!} f^{(k)}(xâ),$$
where $f^{(k)}(xâ)$ denotes the iterated derivative in the set `s`. -/
noncomputable def taylorWithin (f : â â E) (n : â) (s : Set â) (xâ : â) : PolynomialModule â E :=
(Finset.range (n + 1)).sum fun k =>
PolynomialModule.comp (Polynomial.X - Polynomial.C xâ)
(PolynomialModule.single â k (taylorCoeffWithin f k s xâ))
/-- The Taylor polynomial with derivatives inside of a set `s` considered as a function `â â E`-/
noncomputable def taylorWithinEval (f : â â E) (n : â) (s : Set â) (xâ x : â) : E :=
PolynomialModule.eval x (taylorWithin f n s xâ)
theorem taylorWithin_succ (f : â â E) (n : â) (s : Set â) (xâ : â) :
taylorWithin f (n + 1) s xâ = taylorWithin f n s xâ +
PolynomialModule.comp (Polynomial.X - Polynomial.C xâ)
(PolynomialModule.single â (n + 1) (taylorCoeffWithin f (n + 1) s xâ)) := by
dsimp only [taylorWithin]
rw [Finset.sum_range_succ]
@[simp]
theorem taylorWithinEval_succ (f : â â E) (n : â) (s : Set â) (xâ x : â) :
taylorWithinEval f (n + 1) s xâ x = taylorWithinEval f n s xâ x +
(((n + 1 : â) * n !)â»Â¹ * (x - xâ) ^ (n + 1)) ⢠iteratedDerivWithin (n + 1) f s xâ := by
simp_rw [taylorWithinEval, taylorWithin_succ, LinearMap.map_add, PolynomialModule.comp_eval]
congr
simp only [Polynomial.eval_sub, Polynomial.eval_X, Polynomial.eval_C,
PolynomialModule.eval_single, mul_inv_rev]
dsimp only [taylorCoeffWithin]
rw [â mul_smul, mul_comm, Nat.factorial_succ, Nat.cast_mul, Nat.cast_add, Nat.cast_one,
mul_inv_rev]
/-- The Taylor polynomial of order zero evaluates to `f x`. -/
@[simp]
theorem taylor_within_zero_eval (f : â â E) (s : Set â) (xâ x : â) :
taylorWithinEval f 0 s xâ x = f xâ := by
dsimp only [taylorWithinEval]
dsimp only [taylorWithin]
dsimp only [taylorCoeffWithin]
simp
/-- Evaluating the Taylor polynomial at `x = xâ` yields `f x`. -/
@[simp]
theorem taylorWithinEval_self (f : â â E) (n : â) (s : Set â) (xâ : â) :
taylorWithinEval f n s xâ xâ = f xâ := by
induction' n with k hk
· exact taylor_within_zero_eval _ _ _ _
simp [hk]
theorem taylor_within_apply (f : â â E) (n : â) (s : Set â) (xâ x : â) :
taylorWithinEval f n s xâ x =
â k â Finset.range (n + 1), ((k ! : â)â»Â¹ * (x - xâ) ^ k) ⢠iteratedDerivWithin k f s xâ := by
induction' n with k hk
· simp
rw [taylorWithinEval_succ, Finset.sum_range_succ, hk]
simp [Nat.factorial]
/-- If `f` is `n` times continuous differentiable on a set `s`, then the Taylor polynomial
`taylorWithinEval f n s xâ x` is continuous in `xâ`. -/
theorem continuousOn_taylorWithinEval {f : â â E} {x : â} {n : â} {s : Set â}
(hs : UniqueDiffOn â s) (hf : ContDiffOn â n f s) :
ContinuousOn (fun t => taylorWithinEval f n s t x) s := by
simp_rw [taylor_within_apply]
refine continuousOn_finset_sum (Finset.range (n + 1)) fun i hi => ?_
refine (continuousOn_const.mul ((continuousOn_const.sub continuousOn_id).pow _)).smul ?_
rw [contDiffOn_iff_continuousOn_differentiableOn_deriv hs] at hf
cases' hf with hf_left
specialize hf_left i
simp only [Finset.mem_range] at hi
refine hf_left ?_
simp only [WithTop.coe_le_coe, Nat.cast_le, Nat.lt_succ_iff.mp hi]
/-- Helper lemma for calculating the derivative of the monomial that appears in Taylor
expansions. -/
theorem monomial_has_deriv_aux (t x : â) (n : â) :
HasDerivAt (fun y => (x - y) ^ (n + 1)) (-(n + 1) * (x - t) ^ n) t := by
simp_rw [sub_eq_neg_add]
rw [â neg_one_mul, mul_comm (-1 : â), mul_assoc, mul_comm (-1 : â), â mul_assoc]
convert HasDerivAt.pow (n + 1) ((hasDerivAt_id t).neg.add_const x)
simp only [Nat.cast_add, Nat.cast_one]
theorem hasDerivWithinAt_taylor_coeff_within {f : â â E} {x y : â} {k : â} {s t : Set â}
(ht : UniqueDiffWithinAt â t y) (hs : s â ð[t] y)
(hf : DifferentiableWithinAt â (iteratedDerivWithin (k + 1) f s) s y) :
HasDerivWithinAt
(fun z => (((k + 1 : â) * k !)â»Â¹ * (x - z) ^ (k + 1)) ⢠iteratedDerivWithin (k + 1) f s z)
((((k + 1 : â) * k !)â»Â¹ * (x - y) ^ (k + 1)) ⢠iteratedDerivWithin (k + 2) f s y -
((k ! : â)â»Â¹ * (x - y) ^ k) ⢠iteratedDerivWithin (k + 1) f s y) t y := by
replace hf :
HasDerivWithinAt (iteratedDerivWithin (k + 1) f s) (iteratedDerivWithin (k + 2) f s y) t y := by
convert (hf.mono_of_mem hs).hasDerivWithinAt using 1
rw [iteratedDerivWithin_succ (ht.mono_nhds (nhdsWithin_le_iff.mpr hs))]
exact (derivWithin_of_mem hs ht hf).symm
have : HasDerivWithinAt (fun t => ((k + 1 : â) * k !)â»Â¹ * (x - t) ^ (k + 1))
(-((k ! : â)â»Â¹ * (x - y) ^ k)) t y := by
-- Commuting the factors:
have : -((k ! : â)â»Â¹ * (x - y) ^ k) = ((k + 1 : â) * k !)â»Â¹ * (-(k + 1) * (x - y) ^ k) := by
field_simp; ring
rw [this]
exact (monomial_has_deriv_aux y x _).hasDerivWithinAt.const_mul _
convert this.smul hf using 1
field_simp
rw [neg_div, neg_smul, sub_eq_add_neg]
/-- Calculate the derivative of the Taylor polynomial with respect to `xâ`.
Version for arbitrary sets -/
theorem hasDerivWithinAt_taylorWithinEval {f : â â E} {x y : â} {n : â} {s s' : Set â}
(hs'_unique : UniqueDiffWithinAt â s' y) (hs_unique : UniqueDiffOn â s) (hs' : s' â ð[s] y)
(hy : y â s') (h : s' â s) (hf : ContDiffOn â n f s)
(hf' : DifferentiableWithinAt â (iteratedDerivWithin n f s) s y) :
HasDerivWithinAt (fun t => taylorWithinEval f n s t x)
(((n ! : â)â»Â¹ * (x - y) ^ n) ⢠iteratedDerivWithin (n + 1) f s y) s' y := by
induction' n with k hk
· simp only [taylor_within_zero_eval, Nat.factorial_zero, Nat.cast_one, inv_one, pow_zero,
mul_one, zero_add, one_smul]
simp only [iteratedDerivWithin_zero] at hf'
rw [iteratedDerivWithin_one (hs_unique _ (h hy))]
exact hf'.hasDerivWithinAt.mono h
simp_rw [Nat.add_succ, taylorWithinEval_succ]
simp only [add_zero, Nat.factorial_succ, Nat.cast_mul, Nat.cast_add, Nat.cast_one]
have coe_lt_succ : (k : WithTop â) < k.succ := Nat.cast_lt.2 k.lt_succ_self
have hdiff : DifferentiableOn â (iteratedDerivWithin k f s) s' :=
(hf.differentiableOn_iteratedDerivWithin coe_lt_succ hs_unique).mono h
specialize hk hf.of_succ ((hdiff y hy).mono_of_mem hs')
convert hk.add (hasDerivWithinAt_taylor_coeff_within hs'_unique
(nhdsWithin_mono _ h self_mem_nhdsWithin) hf') using 1
exact (add_sub_cancel _ _).symm
/-- Calculate the derivative of the Taylor polynomial with respect to `xâ`.
Version for open intervals -/
theorem taylorWithinEval_hasDerivAt_Ioo {f : â â E} {a b t : â} (x : â) {n : â} (hx : a < b)
(ht : t â Ioo a b) (hf : ContDiffOn â n f (Icc a b))
(hf' : DifferentiableOn â (iteratedDerivWithin n f (Icc a b)) (Ioo a b)) :
HasDerivAt (fun y => taylorWithinEval f n (Icc a b) y x)
(((n ! : â)â»Â¹ * (x - t) ^ n) ⢠iteratedDerivWithin (n + 1) f (Icc a b) t) t :=
have h_nhds : Ioo a b â ð t := isOpen_Ioo.mem_nhds ht
have h_nhds' : Ioo a b â ð[Icc a b] t := nhdsWithin_le_nhds h_nhds
(hasDerivWithinAt_taylorWithinEval (uniqueDiffWithinAt_Ioo ht) (uniqueDiffOn_Icc hx) h_nhds' ht
Ioo_subset_Icc_self hf <| (hf' t ht).mono_of_mem h_nhds').hasDerivAt h_nhds
/-- Calculate the derivative of the Taylor polynomial with respect to `xâ`.
Version for closed intervals -/
theorem hasDerivWithinAt_taylorWithinEval_at_Icc {f : â â E} {a b t : â} (x : â) {n : â}
(hx : a < b) (ht : t â Icc a b) (hf : ContDiffOn â n f (Icc a b))
(hf' : DifferentiableOn â (iteratedDerivWithin n f (Icc a b)) (Icc a b)) :
HasDerivWithinAt (fun y => taylorWithinEval f n (Icc a b) y x)
(((n ! : â)â»Â¹ * (x - t) ^ n) ⢠iteratedDerivWithin (n + 1) f (Icc a b) t) (Icc a b) t :=
hasDerivWithinAt_taylorWithinEval (uniqueDiffOn_Icc hx t ht) (uniqueDiffOn_Icc hx)
self_mem_nhdsWithin ht rfl.subset hf (hf' t ht)
/-! ### Taylor's theorem with mean value type remainder estimate -/
/-- **Taylor's theorem** with the general mean value form of the remainder.
We assume that `f` is `n+1`-times continuously differentiable in the closed set `Icc xâ x` and
`n+1`-times differentiable on the open set `Ioo xâ x`, and `g` is a differentiable function on
`Ioo xâ x` and continuous on `Icc xâ x`. Then there exists an `x' â Ioo xâ x` such that
$$f(x) - (P_n f)(xâ, x) = \frac{(x - x')^n}{n!} \frac{g(x) - g(xâ)}{g' x'},$$
where $P_n f$ denotes the Taylor polynomial of degree $n$. -/
theorem taylor_mean_remainder {f : â â â} {g g' : â â â} {x xâ : â} {n : â} (hx : xâ < x)
(hf : ContDiffOn â n f (Icc xâ x))
(hf' : DifferentiableOn â (iteratedDerivWithin n f (Icc xâ x)) (Ioo xâ x))
(gcont : ContinuousOn g (Icc xâ x))
(gdiff : â x_1 : â, x_1 â Ioo xâ x â HasDerivAt g (g' x_1) x_1)
(g'_ne : â x_1 : â, x_1 â Ioo xâ x â g' x_1 â 0) :
â x' â Ioo xâ x, f x - taylorWithinEval f n (Icc xâ x) xâ x =
((x - x') ^ n / n ! * (g x - g xâ) / g' x') ⢠iteratedDerivWithin (n + 1) f (Icc xâ x) x' := by
-- We apply the mean value theorem
rcases exists_ratio_hasDerivAt_eq_ratio_slope (fun t => taylorWithinEval f n (Icc xâ x) t x)
(fun t => ((n ! : â)â»Â¹ * (x - t) ^ n) ⢠iteratedDerivWithin (n + 1) f (Icc xâ x) t) hx
(continuousOn_taylorWithinEval (uniqueDiffOn_Icc hx) hf)
(fun _ hy => taylorWithinEval_hasDerivAt_Ioo x hx hy hf hf') g g' gcont gdiff with âšy, hy, hâ©
use y, hy
-- The rest is simplifications and trivial calculations
simp only [taylorWithinEval_self] at h
rw [mul_comm, â div_left_inj' (g'_ne y hy), mul_div_cancel_rightâ _ (g'_ne y hy)] at h
rw [â h]
field_simp [g'_ne y hy]
ring
/-- **Taylor's theorem** with the Lagrange form of the remainder.
We assume that `f` is `n+1`-times continuously differentiable in the closed set `Icc xâ x` and
`n+1`-times differentiable on the open set `Ioo xâ x`. Then there exists an `x' â Ioo xâ x` such
that $$f(x) - (P_n f)(xâ, x) = \frac{f^{(n+1)}(x') (x - xâ)^{n+1}}{(n+1)!},$$
where $P_n f$ denotes the Taylor polynomial of degree $n$ and $f^{(n+1)}$ is the $n+1$-th iterated
derivative. -/
theorem taylor_mean_remainder_lagrange {f : â â â} {x xâ : â} {n : â} (hx : xâ < x)
(hf : ContDiffOn â n f (Icc xâ x))
(hf' : DifferentiableOn â (iteratedDerivWithin n f (Icc xâ x)) (Ioo xâ x)) :
â x' â Ioo xâ x, f x - taylorWithinEval f n (Icc xâ x) xâ x =
iteratedDerivWithin (n + 1) f (Icc xâ x) x' * (x - xâ) ^ (n + 1) / (n + 1)! := by
have gcont : ContinuousOn (fun t : â => (x - t) ^ (n + 1)) (Icc xâ x) := by fun_prop
have xy_ne : â y : â, y â Ioo xâ x â (x - y) ^ n â 0 := by
intro y hy
refine pow_ne_zero _ ?_
rw [mem_Ioo] at hy
rw [sub_ne_zero]
exact hy.2.ne'
have hg' : â y : â, y â Ioo xâ x â -(ân + 1) * (x - y) ^ n â 0 := fun y hy =>
mul_ne_zero (neg_ne_zero.mpr (Nat.cast_add_one_ne_zero n)) (xy_ne y hy)
-- We apply the general theorem with g(t) = (x - t)^(n+1)
rcases taylor_mean_remainder hx hf hf' gcont (fun y _ => monomial_has_deriv_aux y x _) hg' with
âšy, hy, hâ©
use y, hy
simp only [sub_self, zero_pow, Ne, Nat.succ_ne_zero, not_false_iff, zero_sub, mul_neg] at h
rw [h, neg_div, â div_neg, neg_mul, neg_neg]
field_simp [xy_ne y hy, Nat.factorial]; ring
/-- **Taylor's theorem** with the Cauchy form of the remainder.
We assume that `f` is `n+1`-times continuously differentiable on the closed set `Icc xâ x` and
`n+1`-times differentiable on the open set `Ioo xâ x`. Then there exists an `x' â Ioo xâ x` such
that $$f(x) - (P_n f)(xâ, x) = \frac{f^{(n+1)}(x') (x - x')^n (x-xâ)}{n!},$$
where $P_n f$ denotes the Taylor polynomial of degree $n$ and $f^{(n+1)}$ is the $n+1$-th iterated
derivative. -/
theorem taylor_mean_remainder_cauchy {f : â â â} {x xâ : â} {n : â} (hx : xâ < x)
(hf : ContDiffOn â n f (Icc xâ x))
(hf' : DifferentiableOn â (iteratedDerivWithin n f (Icc xâ x)) (Ioo xâ x)) :
â x' â Ioo xâ x, f x - taylorWithinEval f n (Icc xâ x) xâ x =
iteratedDerivWithin (n + 1) f (Icc xâ x) x' * (x - x') ^ n / n ! * (x - xâ) := by
have gcont : ContinuousOn id (Icc xâ x) := by fun_prop
have gdiff : â x_1 : â, x_1 â Ioo xâ x â HasDerivAt id ((fun _ : â => (1 : â)) x_1) x_1 :=
fun _ _ => hasDerivAt_id _
-- We apply the general theorem with g = id
rcases taylor_mean_remainder hx hf hf' gcont gdiff fun _ _ => by simp with âšy, hy, hâ©
use y, hy
rw [h]
field_simp [n.factorial_ne_zero]
ring
/-- **Taylor's theorem** with a polynomial bound on the remainder
We assume that `f` is `n+1`-times continuously differentiable on the closed set `Icc a b`.
The difference of `f` and its `n`-th Taylor polynomial can be estimated by
`C * (x - a)^(n+1) / n!` where `C` is a bound for the `n+1`-th iterated derivative of `f`. -/
theorem taylor_mean_remainder_bound {f : â â E} {a b C x : â} {n : â} (hab : a †b)
(hf : ContDiffOn â (n + 1) f (Icc a b)) (hx : x â Icc a b)
(hC : â y â Icc a b, âiteratedDerivWithin (n + 1) f (Icc a b) yâ †C) :
âf x - taylorWithinEval f n (Icc a b) a xâ †C * (x - a) ^ (n + 1) / n ! := by
rcases eq_or_lt_of_le hab with (rfl | h)
· rw [Icc_self, mem_singleton_iff] at hx
simp [hx]
-- The nth iterated derivative is differentiable
have hf' : DifferentiableOn â (iteratedDerivWithin n f (Icc a b)) (Icc a b) :=
hf.differentiableOn_iteratedDerivWithin (WithTop.coe_lt_coe.mpr n.lt_succ_self)
(uniqueDiffOn_Icc h)
-- We can uniformly bound the derivative of the Taylor polynomial
have h' : â y â Ico a x,
â((n ! : â)â»Â¹ * (x - y) ^ n) ⢠iteratedDerivWithin (n + 1) f (Icc a b) yâ â€
(n ! : â)â»Â¹ * |x - a| ^ n * C := by
rintro y âšhay, hyxâ©
rw [norm_smul, Real.norm_eq_abs]
gcongr
· rw [abs_mul, abs_pow, abs_inv, Nat.abs_cast]
gcongr
exact sub_nonneg.2 hyx.le
-- Estimate the iterated derivative by `C`
· exact hC y âšhay, hyx.le.trans hx.2â©
-- Apply the mean value theorem for vector valued functions:
have A : â t â Icc a x, HasDerivWithinAt (fun y => taylorWithinEval f n (Icc a b) y x)
(((ân !)â»Â¹ * (x - t) ^ n) ⢠iteratedDerivWithin (n + 1) f (Icc a b) t) (Icc a x) t := by
intro t ht
have I : Icc a x â Icc a b := Icc_subset_Icc_right hx.2
exact (hasDerivWithinAt_taylorWithinEval_at_Icc x h (I ht) hf.of_succ hf').mono I
have := norm_image_sub_le_of_norm_deriv_le_segment' A h' x (right_mem_Icc.2 hx.1)
simp only [taylorWithinEval_self] at this
refine this.trans_eq ?_
-- The rest is a trivial calculation
rw [abs_of_nonneg (sub_nonneg.mpr hx.1)]
ring
/-- **Taylor's theorem** with a polynomial bound on the remainder
We assume that `f` is `n+1`-times continuously differentiable on the closed set `Icc a b`.
There exists a constant `C` such that for all `x â Icc a b` the difference of `f` and its `n`-th
Taylor polynomial can be estimated by `C * (x - a)^(n+1)`. -/
theorem exists_taylor_mean_remainder_bound {f : â â E} {a b : â} {n : â} (hab : a †b)
(hf : ContDiffOn â (n + 1) f (Icc a b)) :
â C, â x â Icc a b, âf x - taylorWithinEval f n (Icc a b) a xâ †C * (x - a) ^ (n + 1) := by
rcases eq_or_lt_of_le hab with (rfl | h)
· refine âš0, fun x hx => ?_â©
have : x = a := by simpa [â le_antisymm_iff] using hx
simp [â this]
-- We estimate by the supremum of the norm of the iterated derivative
let g : â â â := fun y => âiteratedDerivWithin (n + 1) f (Icc a b) yâ
use SupSet.sSup (g '' Icc a b) / (n !)
intro x hx
rw [div_mul_eq_mul_divâ]
refine taylor_mean_remainder_bound hab hf hx fun y => ?_
exact (hf.continuousOn_iteratedDerivWithin rfl.le <| uniqueDiffOn_Icc h).norm.le_sSup_image_Icc
|
Analysis\Calculus\UniformLimitsDeriv.lean | /-
Copyright (c) 2022 Kevin H. Wilson. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Kevin H. Wilson
-/
import Mathlib.Analysis.Calculus.MeanValue
import Mathlib.Analysis.NormedSpace.RCLike
import Mathlib.Order.Filter.Curry
/-!
# Swapping limits and derivatives via uniform convergence
The purpose of this file is to prove that the derivative of the pointwise limit of a sequence of
functions is the pointwise limit of the functions' derivatives when the derivatives converge
_uniformly_. The formal statement appears as `hasFDerivAt_of_tendstoLocallyUniformlyOn`.
## Main statements
* `uniformCauchySeqOnFilter_of_fderiv`: If
1. `f : â â E â G` is a sequence of functions which have derivatives
`f' : â â E â (E âL[ð] G)` on a neighborhood of `x`,
2. the functions `f` converge at `x`, and
3. the derivatives `f'` form a Cauchy sequence uniformly on a neighborhood of `x`,
then the `f` form a Cauchy sequence _uniformly_ on a neighborhood of `x`
* `hasFDerivAt_of_tendstoUniformlyOnFilter` : Suppose (1), (2), and (3) above are true. Let
`g` (resp. `g'`) be the limiting function of the `f` (resp. `g'`). Then `f'` is the derivative of
`g` on a neighborhood of `x`
* `hasFDerivAt_of_tendstoUniformlyOn`: An often-easier-to-use version of the above theorem when
*all* the derivatives exist and functions converge on a common open set and the derivatives
converge uniformly there.
Each of the above statements also has variations that support `deriv` instead of `fderiv`.
## Implementation notes
Our technique for proving the main result is the famous "`ε / 3` proof." In words, you can find it
explained, for instance, at [this StackExchange post](https://math.stackexchange.com/questions/214218/uniform-convergence-of-derivatives-tao-14-2-7).
The subtlety is that we want to prove that the difference quotients of the `g` converge to the `g'`.
That is, we want to prove something like:
```
â ε > 0, â ÎŽ > 0, â y â B_ÎŽ(x), |y - x|â»Â¹ * |(g y - g x) - g' x (y - x)| < ε.
```
To do so, we will need to introduce a pair of quantifiers
```lean
â ε > 0, â N, â n ⥠N, â ÎŽ > 0, â y â B_ÎŽ(x), |y - x|â»Â¹ * |(g y - g x) - g' x (y - x)| < ε.
```
So how do we write this in terms of filters? Well, the initial definition of the derivative is
```lean
tendsto (|y - x|â»Â¹ * |(g y - g x) - g' x (y - x)|) (ð x) (ð 0)
```
There are two ways we might introduce `n`. We could do:
```lean
âá¶ (n : â) in atTop, Tendsto (|y - x|â»Â¹ * |(g y - g x) - g' x (y - x)|) (ð x) (ð 0)
```
but this is equivalent to the quantifier order `â N, â n ⥠N, â ε > 0, â ÎŽ > 0, â y â B_ÎŽ(x)`,
which _implies_ our desired `â â â â â` but is _not_ equivalent to it. On the other hand, we might
try
```lean
Tendsto (|y - x|â»Â¹ * |(g y - g x) - g' x (y - x)|) (atTop ÃË¢ ð x) (ð 0)
```
but this is equivalent to the quantifier order `â ε > 0, â N, â ÎŽ > 0, â n ⥠N, â y â B_ÎŽ(x)`, which
again _implies_ our desired `â â â â â` but is not equivalent to it.
So to get the quantifier order we want, we need to introduce a new filter construction, which we
call a "curried filter"
```lean
Tendsto (|y - x|â»Â¹ * |(g y - g x) - g' x (y - x)|) (atTop.curry (ð x)) (ð 0)
```
Then the above implications are `Filter.Tendsto.curry` and
`Filter.Tendsto.mono_left Filter.curry_le_prod`. We will use both of these deductions as part of
our proof.
We note that if you loosen the assumptions of the main theorem then the proof becomes quite a bit
easier. In particular, if you assume there is a common neighborhood `s` where all of the three
assumptions of `hasFDerivAt_of_tendstoUniformlyOnFilter` hold and that the `f'` are
continuous, then you can avoid the mean value theorem and much of the work around curried filters.
## Tags
uniform convergence, limits of derivatives
-/
open Filter
open scoped uniformity Filter Topology
section LimitsOfDerivatives
variable {ι : Type*} {l : Filter ι} {E : Type*} [NormedAddCommGroup E] {ð : Type*} [RCLike ð]
[NormedSpace ð E] {G : Type*} [NormedAddCommGroup G] [NormedSpace ð G] {f : ι â E â G}
{g : E â G} {f' : ι â E â E âL[ð] G} {g' : E â E âL[ð] G} {x : E}
/-- If a sequence of functions real or complex functions are eventually differentiable on a
neighborhood of `x`, they are Cauchy _at_ `x`, and their derivatives
are a uniform Cauchy sequence in a neighborhood of `x`, then the functions form a uniform Cauchy
sequence in a neighborhood of `x`. -/
theorem uniformCauchySeqOnFilter_of_fderiv (hf' : UniformCauchySeqOnFilter f' l (ð x))
(hf : âá¶ n : ι à E in l ÃË¢ ð x, HasFDerivAt (f n.1) (f' n.1 n.2) n.2)
(hfg : Cauchy (map (fun n => f n x) l)) : UniformCauchySeqOnFilter f l (ð x) := by
letI : NormedSpace â E := NormedSpace.restrictScalars â ð _
rw [SeminormedAddGroup.uniformCauchySeqOnFilter_iff_tendstoUniformlyOnFilter_zero] at hf' â¢
suffices
TendstoUniformlyOnFilter (fun (n : ι à ι) (z : E) => f n.1 z - f n.2 z - (f n.1 x - f n.2 x)) 0
(l ÃË¢ l) (ð x) â§
TendstoUniformlyOnFilter (fun (n : ι à ι) (_ : E) => f n.1 x - f n.2 x) 0 (l ÃË¢ l) (ð x) by
have := this.1.add this.2
rw [add_zero] at this
exact this.congr (by simp)
constructor
· -- This inequality follows from the mean value theorem. To apply it, we will need to shrink our
-- neighborhood to small enough ball
rw [Metric.tendstoUniformlyOnFilter_iff] at hf' â¢
intro ε hε
have := (tendsto_swap4_prod.eventually (hf.prod_mk hf)).diag_of_prod_right
obtain âša, b, c, d, eâ© := eventually_prod_iff.1 ((hf' ε hε).and this)
obtain âšR, hR, hR'â© := Metric.nhds_basis_ball.eventually_iff.mp d
let r := min 1 R
have hr : 0 < r := by simp [r, hR]
have hr' : â âŠy : EâŠ, y â Metric.ball x r â c y := fun y hy =>
hR' (lt_of_lt_of_le (Metric.mem_ball.mp hy) (min_le_right _ _))
have hxy : â y : E, y â Metric.ball x r â ây - xâ < 1 := by
intro y hy
rw [Metric.mem_ball, dist_eq_norm] at hy
exact lt_of_lt_of_le hy (min_le_left _ _)
have hxyε : â y : E, y â Metric.ball x r â ε * ây - xâ < ε := by
intro y hy
exact (mul_lt_iff_lt_one_right hε.lt).mpr (hxy y hy)
-- With a small ball in hand, apply the mean value theorem
refine
eventually_prod_iff.mpr
âš_, b, fun e : E => Metric.ball x r e,
eventually_mem_set.mpr (Metric.nhds_basis_ball.mem_of_mem hr), fun {n} hn {y} hy => ?_â©
simp only [Pi.zero_apply, dist_zero_left] at e â¢
refine lt_of_le_of_lt ?_ (hxyε y hy)
exact
Convex.norm_image_sub_le_of_norm_hasFDerivWithin_le
(fun y hy => ((e hn (hr' hy)).2.1.sub (e hn (hr' hy)).2.2).hasFDerivWithinAt)
(fun y hy => (e hn (hr' hy)).1.le) (convex_ball x r) (Metric.mem_ball_self hr) hy
· -- This is just `hfg` run through `eventually_prod_iff`
refine Metric.tendstoUniformlyOnFilter_iff.mpr fun ε hε => ?_
obtain âšt, ht, ht'â© := (Metric.cauchy_iff.mp hfg).2 ε hε
exact
eventually_prod_iff.mpr
âšfun n : ι à ι => f n.1 x â t â§ f n.2 x â t,
eventually_prod_iff.mpr âš_, ht, _, ht, fun {n} hn {n'} hn' => âšhn, hn'â©â©,
fun _ => True,
by simp,
fun {n} hn {y} _ => by simpa [norm_sub_rev, dist_eq_norm] using ht' _ hn.1 _ hn.2â©
/-- A variant of the second fundamental theorem of calculus (FTC-2): If a sequence of functions
between real or complex normed spaces are differentiable on a ball centered at `x`, they
form a Cauchy sequence _at_ `x`, and their derivatives are Cauchy uniformly on the ball, then the
functions form a uniform Cauchy sequence on the ball.
NOTE: The fact that we work on a ball is typically all that is necessary to work with power series
and Dirichlet series (our primary use case). However, this can be generalized by replacing the ball
with any connected, bounded, open set and replacing uniform convergence with local uniform
convergence. See `cauchy_map_of_uniformCauchySeqOn_fderiv`.
-/
theorem uniformCauchySeqOn_ball_of_fderiv {r : â} (hf' : UniformCauchySeqOn f' l (Metric.ball x r))
(hf : â n : ι, â y : E, y â Metric.ball x r â HasFDerivAt (f n) (f' n y) y)
(hfg : Cauchy (map (fun n => f n x) l)) : UniformCauchySeqOn f l (Metric.ball x r) := by
letI : NormedSpace â E := NormedSpace.restrictScalars â ð _
have : NeBot l := (cauchy_map_iff.1 hfg).1
rcases le_or_lt r 0 with (hr | hr)
· simp only [Metric.ball_eq_empty.2 hr, UniformCauchySeqOn, Set.mem_empty_iff_false,
IsEmpty.forall_iff, eventually_const, imp_true_iff]
rw [SeminormedAddGroup.uniformCauchySeqOn_iff_tendstoUniformlyOn_zero] at hf' â¢
suffices
TendstoUniformlyOn (fun (n : ι à ι) (z : E) => f n.1 z - f n.2 z - (f n.1 x - f n.2 x)) 0
(l ÃË¢ l) (Metric.ball x r) â§
TendstoUniformlyOn (fun (n : ι à ι) (_ : E) => f n.1 x - f n.2 x) 0
(l ÃË¢ l) (Metric.ball x r) by
have := this.1.add this.2
rw [add_zero] at this
refine this.congr ?_
filter_upwards with n z _ using (by simp)
constructor
· -- This inequality follows from the mean value theorem
rw [Metric.tendstoUniformlyOn_iff] at hf' â¢
intro ε hε
obtain âšq, hqpos, hqâ© : â q : â, 0 < q â§ q * r < ε := by
simp_rw [mul_comm]
exact exists_pos_mul_lt hε.lt r
apply (hf' q hqpos.gt).mono
intro n hn y hy
simp_rw [dist_eq_norm, Pi.zero_apply, zero_sub, norm_neg] at hn â¢
have mvt :=
Convex.norm_image_sub_le_of_norm_hasFDerivWithin_le
(fun z hz => ((hf n.1 z hz).sub (hf n.2 z hz)).hasFDerivWithinAt) (fun z hz => (hn z hz).le)
(convex_ball x r) (Metric.mem_ball_self hr) hy
refine lt_of_le_of_lt mvt ?_
have : q * ây - xâ < q * r :=
mul_lt_mul' rfl.le (by simpa only [dist_eq_norm] using Metric.mem_ball.mp hy) (norm_nonneg _)
hqpos
exact this.trans hq
· -- This is just `hfg` run through `eventually_prod_iff`
refine Metric.tendstoUniformlyOn_iff.mpr fun ε hε => ?_
obtain âšt, ht, ht'â© := (Metric.cauchy_iff.mp hfg).2 ε hε
rw [eventually_prod_iff]
refine âšfun n => f n x â t, ht, fun n => f n x â t, ht, ?_â©
intro n hn n' hn' z _
rw [dist_eq_norm, Pi.zero_apply, zero_sub, norm_neg, â dist_eq_norm]
exact ht' _ hn _ hn'
/-- If a sequence of functions between real or complex normed spaces are differentiable on a
preconnected open set, they form a Cauchy sequence _at_ `x`, and their derivatives are Cauchy
uniformly on the set, then the functions form a Cauchy sequence at any point in the set. -/
theorem cauchy_map_of_uniformCauchySeqOn_fderiv {s : Set E} (hs : IsOpen s) (h's : IsPreconnected s)
(hf' : UniformCauchySeqOn f' l s) (hf : â n : ι, â y : E, y â s â HasFDerivAt (f n) (f' n y) y)
{xâ x : E} (hxâ : xâ â s) (hx : x â s) (hfg : Cauchy (map (fun n => f n xâ) l)) :
Cauchy (map (fun n => f n x) l) := by
have : NeBot l := (cauchy_map_iff.1 hfg).1
let t := { y | y â s â§ Cauchy (map (fun n => f n y) l) }
suffices H : s â t from (H hx).2
have A : â x ε, x â t â Metric.ball x ε â s â Metric.ball x ε â t := fun x ε xt hx y hy =>
âšhx hy,
(uniformCauchySeqOn_ball_of_fderiv (hf'.mono hx) (fun n y hy => hf n y (hx hy))
xt.2).cauchy_map
hyâ©
have open_t : IsOpen t := by
rw [Metric.isOpen_iff]
intro x hx
rcases Metric.isOpen_iff.1 hs x hx.1 with âšÎµ, εpos, hεâ©
exact âšÎµ, εpos, A x ε hx hεâ©
have st_nonempty : (s â© t).Nonempty := âšxâ, hxâ, âšhxâ, hfgâ©â©
suffices H : closure t â© s â t from h's.subset_of_closure_inter_subset open_t st_nonempty H
rintro x âšxt, xsâ©
obtain âšÎµ, εpos, hε⩠: â (ε : â), ε > 0 â§ Metric.ball x ε â s := Metric.isOpen_iff.1 hs x xs
obtain âšy, yt, hxyâ© : â (y : E), y â t â§ dist x y < ε / 2 :=
Metric.mem_closure_iff.1 xt _ (half_pos εpos)
have B : Metric.ball y (ε / 2) â Metric.ball x ε := by
apply Metric.ball_subset_ball'; rw [dist_comm]; linarith
exact A y (ε / 2) yt (B.trans hε) (Metric.mem_ball.2 hxy)
/-- If `f_n â g` pointwise and the derivatives `(f_n)' â h` _uniformly_ converge, then
in fact for a fixed `y`, the difference quotients `âz - yââ»Â¹ ⢠(f_n z - f_n y)` converge
_uniformly_ to `âz - yââ»Â¹ ⢠(g z - g y)` -/
theorem difference_quotients_converge_uniformly (hf' : TendstoUniformlyOnFilter f' g' l (ð x))
(hf : âá¶ n : ι à E in l ÃË¢ ð x, HasFDerivAt (f n.1) (f' n.1 n.2) n.2)
(hfg : âá¶ y : E in ð x, Tendsto (fun n => f n y) l (ð (g y))) :
TendstoUniformlyOnFilter (fun n : ι => fun y : E => (ây - xââ»Â¹ : ð) ⢠(f n y - f n x))
(fun y : E => (ây - xââ»Â¹ : ð) ⢠(g y - g x)) l (ð x) := by
let A : NormedSpace â E := NormedSpace.restrictScalars â ð _
rcases eq_or_ne l ⥠with (hl | hl)
· simp only [hl, TendstoUniformlyOnFilter, bot_prod, eventually_bot, imp_true_iff]
haveI : NeBot l := âšhlâ©
refine
UniformCauchySeqOnFilter.tendstoUniformlyOnFilter_of_tendsto ?_
((hfg.and (eventually_const.mpr hfg.self_of_nhds)).mono fun y hy =>
(hy.1.sub hy.2).const_smul _)
rw [SeminormedAddGroup.uniformCauchySeqOnFilter_iff_tendstoUniformlyOnFilter_zero]
rw [Metric.tendstoUniformlyOnFilter_iff]
have hfg' := hf'.uniformCauchySeqOnFilter
rw [SeminormedAddGroup.uniformCauchySeqOnFilter_iff_tendstoUniformlyOnFilter_zero] at hfg'
rw [Metric.tendstoUniformlyOnFilter_iff] at hfg'
intro ε hε
obtain âšq, hqpos, hqε⩠:= exists_pos_rat_lt hε
specialize hfg' (q : â) (by simp [hqpos])
have := (tendsto_swap4_prod.eventually (hf.prod_mk hf)).diag_of_prod_right
obtain âša, b, c, d, eâ© := eventually_prod_iff.1 (hfg'.and this)
obtain âšr, hr, hr'â© := Metric.nhds_basis_ball.eventually_iff.mp d
rw [eventually_prod_iff]
refine
âš_, b, fun e : E => Metric.ball x r e,
eventually_mem_set.mpr (Metric.nhds_basis_ball.mem_of_mem hr), fun {n} hn {y} hy => ?_â©
simp only [Pi.zero_apply, dist_zero_left]
rw [â smul_sub, norm_smul, norm_inv, RCLike.norm_coe_norm]
refine lt_of_le_of_lt ?_ hqε
by_cases hyz' : x = y; · simp [hyz', hqpos.le]
have hyz : 0 < ây - xâ := by rw [norm_pos_iff]; intro hy'; exact hyz' (eq_of_sub_eq_zero hy').symm
rw [inv_mul_le_iff hyz, mul_comm, sub_sub_sub_comm]
simp only [Pi.zero_apply, dist_zero_left] at e
refine
Convex.norm_image_sub_le_of_norm_hasFDerivWithin_le
(fun y hy => ((e hn (hr' hy)).2.1.sub (e hn (hr' hy)).2.2).hasFDerivWithinAt)
(fun y hy => (e hn (hr' hy)).1.le) (convex_ball x r) (Metric.mem_ball_self hr) hy
/-- `(d/dx) lim_{n â â} f n x = lim_{n â â} f' n x` when the `f' n` converge
_uniformly_ to their limit at `x`.
In words the assumptions mean the following:
* `hf'`: The `f'` converge "uniformly at" `x` to `g'`. This does not mean that the `f' n` even
converge away from `x`!
* `hf`: For all `(y, n)` with `y` sufficiently close to `x` and `n` sufficiently large, `f' n` is
the derivative of `f n`
* `hfg`: The `f n` converge pointwise to `g` on a neighborhood of `x` -/
theorem hasFDerivAt_of_tendstoUniformlyOnFilter [NeBot l]
(hf' : TendstoUniformlyOnFilter f' g' l (ð x))
(hf : âá¶ n : ι à E in l ÃË¢ ð x, HasFDerivAt (f n.1) (f' n.1 n.2) n.2)
(hfg : âá¶ y in ð x, Tendsto (fun n => f n y) l (ð (g y))) : HasFDerivAt g (g' x) x := by
-- The proof strategy follows several steps:
-- 1. The quantifiers in the definition of the derivative are
-- `â ε > 0, âÎŽ > 0, ây â B_ÎŽ(x)`. We will introduce a quantifier in the middle:
-- `â ε > 0, âN, ân ⥠N, âÎŽ > 0, ây â B_ÎŽ(x)` which will allow us to introduce the `f(') n`
-- 2. The order of the quantifiers `hfg` are opposite to what we need. We will be able to swap
-- the quantifiers using the uniform convergence assumption
rw [hasFDerivAt_iff_tendsto]
-- Introduce extra quantifier via curried filters
suffices
Tendsto (fun y : ι à E => ây.2 - xââ»Â¹ * âg y.2 - g x - (g' x) (y.2 - x)â)
(l.curry (ð x)) (ð 0) by
rw [Metric.tendsto_nhds] at this â¢
intro ε hε
specialize this ε hε
rw [eventually_curry_iff] at this
simp only at this
exact (eventually_const.mp this).mono (by simp only [imp_self, forall_const])
-- With the new quantifier in hand, we can perform the famous `ε/3` proof. Specifically,
-- we will break up the limit (the difference functions minus the derivative go to 0) into 3:
-- * The difference functions of the `f n` converge *uniformly* to the difference functions
-- of the `g n`
-- * The `f' n` are the derivatives of the `f n`
-- * The `f' n` converge to `g'` at `x`
conv =>
congr
ext
rw [â abs_norm, â abs_inv, â @RCLike.norm_ofReal ð _ _, RCLike.ofReal_inv, â norm_smul]
rw [â tendsto_zero_iff_norm_tendsto_zero]
have :
(fun a : ι à E => (âa.2 - xââ»Â¹ : ð) ⢠(g a.2 - g x - (g' x) (a.2 - x))) =
((fun a : ι à E => (âa.2 - xââ»Â¹ : ð) ⢠(g a.2 - g x - (f a.1 a.2 - f a.1 x))) +
fun a : ι à E =>
(âa.2 - xââ»Â¹ : ð) ⢠(f a.1 a.2 - f a.1 x - ((f' a.1 x) a.2 - (f' a.1 x) x))) +
fun a : ι à E => (âa.2 - xââ»Â¹ : ð) ⢠(f' a.1 x - g' x) (a.2 - x) := by
ext; simp only [Pi.add_apply]; rw [â smul_add, â smul_add]; congr
simp only [map_sub, sub_add_sub_cancel, ContinuousLinearMap.coe_sub', Pi.sub_apply]
-- Porting note: added
abel
simp_rw [this]
have : ð (0 : G) = ð (0 + 0 + 0) := by simp only [add_zero]
rw [this]
refine Tendsto.add (Tendsto.add ?_ ?_) ?_
· have := difference_quotients_converge_uniformly hf' hf hfg
rw [Metric.tendstoUniformlyOnFilter_iff] at this
rw [Metric.tendsto_nhds]
intro ε hε
apply ((this ε hε).filter_mono curry_le_prod).mono
intro n hn
rw [dist_eq_norm] at hn â¢
rw [â smul_sub] at hn
rwa [sub_zero]
· -- (Almost) the definition of the derivatives
rw [Metric.tendsto_nhds]
intro ε hε
rw [eventually_curry_iff]
refine hf.curry.mono fun n hn => ?_
have := hn.self_of_nhds
rw [hasFDerivAt_iff_tendsto, Metric.tendsto_nhds] at this
refine (this ε hε).mono fun y hy => ?_
rw [dist_eq_norm] at hy â¢
simp only [sub_zero, map_sub, norm_mul, norm_inv, norm_norm] at hy â¢
rw [norm_smul, norm_inv, RCLike.norm_coe_norm]
exact hy
· -- hfg' after specializing to `x` and applying the definition of the operator norm
refine Tendsto.mono_left ?_ curry_le_prod
have h1 : Tendsto (fun n : ι à E => g' n.2 - f' n.1 n.2) (l ÃË¢ ð x) (ð 0) := by
rw [Metric.tendstoUniformlyOnFilter_iff] at hf'
exact Metric.tendsto_nhds.mpr fun ε hε => by simpa using hf' ε hε
have h2 : Tendsto (fun n : ι => g' x - f' n x) l (ð 0) := by
rw [Metric.tendsto_nhds] at h1 â¢
exact fun ε hε => (h1 ε hε).curry.mono fun n hn => hn.self_of_nhds
refine squeeze_zero_norm ?_
(tendsto_zero_iff_norm_tendsto_zero.mp (tendsto_fst.comp (h2.prod_map tendsto_id)))
intro n
simp_rw [norm_smul, norm_inv, RCLike.norm_coe_norm]
by_cases hx : x = n.2; · simp [hx]
have hnx : 0 < ân.2 - xâ := by
rw [norm_pos_iff]; intro hx'; exact hx (eq_of_sub_eq_zero hx').symm
rw [inv_mul_le_iff hnx, mul_comm]
simp only [Function.comp_apply, Prod.map_apply']
rw [norm_sub_rev]
exact (f' n.1 x - g' x).le_opNorm (n.2 - x)
theorem hasFDerivAt_of_tendstoLocallyUniformlyOn [NeBot l] {s : Set E} (hs : IsOpen s)
(hf' : TendstoLocallyUniformlyOn f' g' l s) (hf : â n, â x â s, HasFDerivAt (f n) (f' n x) x)
(hfg : â x â s, Tendsto (fun n => f n x) l (ð (g x))) (hx : x â s) :
HasFDerivAt g (g' x) x := by
have h1 : s â ð x := hs.mem_nhds hx
have h3 : Set.univ ÃË¢ s â l ÃË¢ ð x := by simp only [h1, prod_mem_prod_iff, univ_mem, and_self_iff]
have h4 : âá¶ n : ι à E in l ÃË¢ ð x, HasFDerivAt (f n.1) (f' n.1 n.2) n.2 :=
eventually_of_mem h3 fun âšn, zâ© âš_, hzâ© => hf n z hz
refine hasFDerivAt_of_tendstoUniformlyOnFilter ?_ h4 (eventually_of_mem h1 hfg)
simpa [IsOpen.nhdsWithin_eq hs hx] using tendstoLocallyUniformlyOn_iff_filter.mp hf' x hx
/-- A slight variant of `hasFDerivAt_of_tendstoLocallyUniformlyOn` with the assumption stated
in terms of `DifferentiableOn` rather than `HasFDerivAt`. This makes a few proofs nicer in
complex analysis where holomorphicity is assumed but the derivative is not known a priori. -/
theorem hasFDerivAt_of_tendsto_locally_uniformly_on' [NeBot l] {s : Set E} (hs : IsOpen s)
(hf' : TendstoLocallyUniformlyOn (fderiv ð â f) g' l s) (hf : â n, DifferentiableOn ð (f n) s)
(hfg : â x â s, Tendsto (fun n => f n x) l (ð (g x))) (hx : x â s) :
HasFDerivAt g (g' x) x := by
refine hasFDerivAt_of_tendstoLocallyUniformlyOn hs hf' (fun n z hz => ?_) hfg hx
exact ((hf n z hz).differentiableAt (hs.mem_nhds hz)).hasFDerivAt
/-- `(d/dx) lim_{n â â} f n x = lim_{n â â} f' n x` when the `f' n` converge
_uniformly_ to their limit on an open set containing `x`. -/
theorem hasFDerivAt_of_tendstoUniformlyOn [NeBot l] {s : Set E} (hs : IsOpen s)
(hf' : TendstoUniformlyOn f' g' l s)
(hf : â n : ι, â x : E, x â s â HasFDerivAt (f n) (f' n x) x)
(hfg : â x : E, x â s â Tendsto (fun n => f n x) l (ð (g x))) :
â x : E, x â s â HasFDerivAt g (g' x) x := fun _ =>
hasFDerivAt_of_tendstoLocallyUniformlyOn hs hf'.tendstoLocallyUniformlyOn hf hfg
/-- `(d/dx) lim_{n â â} f n x = lim_{n â â} f' n x` when the `f' n` converge
_uniformly_ to their limit. -/
theorem hasFDerivAt_of_tendstoUniformly [NeBot l] (hf' : TendstoUniformly f' g' l)
(hf : â n : ι, â x : E, HasFDerivAt (f n) (f' n x) x)
(hfg : â x : E, Tendsto (fun n => f n x) l (ð (g x))) : â x : E, HasFDerivAt g (g' x) x := by
intro x
have hf : â n : ι, â x : E, x â Set.univ â HasFDerivAt (f n) (f' n x) x := by simp [hf]
have hfg : â x : E, x â Set.univ â Tendsto (fun n => f n x) l (ð (g x)) := by simp [hfg]
have hf' : TendstoUniformlyOn f' g' l Set.univ := by rwa [tendstoUniformlyOn_univ]
exact hasFDerivAt_of_tendstoUniformlyOn isOpen_univ hf' hf hfg x (Set.mem_univ x)
end LimitsOfDerivatives
section deriv
/-! ### `deriv` versions of above theorems
In this section, we provide `deriv` equivalents of the `fderiv` lemmas in the previous section.
-/
variable {ι : Type*} {l : Filter ι} {ð : Type*} [RCLike ð] {G : Type*} [NormedAddCommGroup G]
[NormedSpace ð G] {f : ι â ð â G} {g : ð â G} {f' : ι â ð â G} {g' : ð â G} {x : ð}
/-- If our derivatives converge uniformly, then the Fréchet derivatives converge uniformly -/
theorem UniformCauchySeqOnFilter.one_smulRight {l' : Filter ð}
(hf' : UniformCauchySeqOnFilter f' l l') :
UniformCauchySeqOnFilter (fun n => fun z => (1 : ð âL[ð] ð).smulRight (f' n z)) l l' := by
-- The tricky part of this proof is that operator norms are written in terms of `â€` whereas
-- metrics are written in terms of `<`. So we need to shrink `ε` utilizing the archimedean
-- property of `â`
rw [SeminormedAddGroup.uniformCauchySeqOnFilter_iff_tendstoUniformlyOnFilter_zero,
Metric.tendstoUniformlyOnFilter_iff] at hf' â¢
intro ε hε
obtain âšq, hq, hq'â© := exists_between hε.lt
apply (hf' q hq).mono
intro n hn
refine lt_of_le_of_lt ?_ hq'
simp only [dist_eq_norm, Pi.zero_apply, zero_sub, norm_neg] at hn â¢
refine ContinuousLinearMap.opNorm_le_bound _ hq.le ?_
intro z
simp only [ContinuousLinearMap.coe_sub', Pi.sub_apply, ContinuousLinearMap.smulRight_apply,
ContinuousLinearMap.one_apply]
rw [â smul_sub, norm_smul, mul_comm]
gcongr
theorem uniformCauchySeqOnFilter_of_deriv (hf' : UniformCauchySeqOnFilter f' l (ð x))
(hf : âá¶ n : ι à ð in l ÃË¢ ð x, HasDerivAt (f n.1) (f' n.1 n.2) n.2)
(hfg : Cauchy (map (fun n => f n x) l)) : UniformCauchySeqOnFilter f l (ð x) := by
simp_rw [hasDerivAt_iff_hasFDerivAt] at hf
exact uniformCauchySeqOnFilter_of_fderiv hf'.one_smulRight hf hfg
theorem uniformCauchySeqOn_ball_of_deriv {r : â} (hf' : UniformCauchySeqOn f' l (Metric.ball x r))
(hf : â n : ι, â y : ð, y â Metric.ball x r â HasDerivAt (f n) (f' n y) y)
(hfg : Cauchy (map (fun n => f n x) l)) : UniformCauchySeqOn f l (Metric.ball x r) := by
simp_rw [hasDerivAt_iff_hasFDerivAt] at hf
rw [uniformCauchySeqOn_iff_uniformCauchySeqOnFilter] at hf'
have hf' :
UniformCauchySeqOn (fun n => fun z => (1 : ð âL[ð] ð).smulRight (f' n z)) l
(Metric.ball x r) := by
rw [uniformCauchySeqOn_iff_uniformCauchySeqOnFilter]
exact hf'.one_smulRight
exact uniformCauchySeqOn_ball_of_fderiv hf' hf hfg
theorem hasDerivAt_of_tendstoUniformlyOnFilter [NeBot l]
(hf' : TendstoUniformlyOnFilter f' g' l (ð x))
(hf : âá¶ n : ι à ð in l ÃË¢ ð x, HasDerivAt (f n.1) (f' n.1 n.2) n.2)
(hfg : âá¶ y in ð x, Tendsto (fun n => f n y) l (ð (g y))) : HasDerivAt g (g' x) x := by
-- The first part of the proof rewrites `hf` and the goal to be functions so that Lean
-- can recognize them when we apply `hasFDerivAt_of_tendstoUniformlyOnFilter`
let F' n z := (1 : ð âL[ð] ð).smulRight (f' n z)
let G' z := (1 : ð âL[ð] ð).smulRight (g' z)
simp_rw [hasDerivAt_iff_hasFDerivAt] at hf â¢
-- Now we need to rewrite hf' in terms of `ContinuousLinearMap`s. The tricky part is that
-- operator norms are written in terms of `â€` whereas metrics are written in terms of `<`. So we
-- need to shrink `ε` utilizing the archimedean property of `â`
have hf' : TendstoUniformlyOnFilter F' G' l (ð x) := by
rw [Metric.tendstoUniformlyOnFilter_iff] at hf' â¢
intro ε hε
obtain âšq, hq, hq'â© := exists_between hε.lt
apply (hf' q hq).mono
intro n hn
refine lt_of_le_of_lt ?_ hq'
simp only [dist_eq_norm] at hn â¢
refine ContinuousLinearMap.opNorm_le_bound _ hq.le ?_
intro z
simp only [F', G', ContinuousLinearMap.coe_sub', Pi.sub_apply,
ContinuousLinearMap.smulRight_apply, ContinuousLinearMap.one_apply]
rw [â smul_sub, norm_smul, mul_comm]
gcongr
exact hasFDerivAt_of_tendstoUniformlyOnFilter hf' hf hfg
theorem hasDerivAt_of_tendstoLocallyUniformlyOn [NeBot l] {s : Set ð} (hs : IsOpen s)
(hf' : TendstoLocallyUniformlyOn f' g' l s)
(hf : âá¶ n in l, â x â s, HasDerivAt (f n) (f' n x) x)
(hfg : â x â s, Tendsto (fun n => f n x) l (ð (g x))) (hx : x â s) : HasDerivAt g (g' x) x := by
have h1 : s â ð x := hs.mem_nhds hx
have h2 : âá¶ n : ι à ð in l ÃË¢ ð x, HasDerivAt (f n.1) (f' n.1 n.2) n.2 :=
eventually_prod_iff.2 âš_, hf, fun x => x â s, h1, fun {n} => idâ©
refine hasDerivAt_of_tendstoUniformlyOnFilter ?_ h2 (eventually_of_mem h1 hfg)
simpa [IsOpen.nhdsWithin_eq hs hx] using tendstoLocallyUniformlyOn_iff_filter.mp hf' x hx
/-- A slight variant of `hasDerivAt_of_tendstoLocallyUniformlyOn` with the assumption stated in
terms of `DifferentiableOn` rather than `HasDerivAt`. This makes a few proofs nicer in complex
analysis where holomorphicity is assumed but the derivative is not known a priori. -/
theorem hasDerivAt_of_tendsto_locally_uniformly_on' [NeBot l] {s : Set ð} (hs : IsOpen s)
(hf' : TendstoLocallyUniformlyOn (deriv â f) g' l s)
(hf : âá¶ n in l, DifferentiableOn ð (f n) s)
(hfg : â x â s, Tendsto (fun n => f n x) l (ð (g x))) (hx : x â s) : HasDerivAt g (g' x) x := by
refine hasDerivAt_of_tendstoLocallyUniformlyOn hs hf' ?_ hfg hx
filter_upwards [hf] with n h z hz using ((h z hz).differentiableAt (hs.mem_nhds hz)).hasDerivAt
theorem hasDerivAt_of_tendstoUniformlyOn [NeBot l] {s : Set ð} (hs : IsOpen s)
(hf' : TendstoUniformlyOn f' g' l s)
(hf : âá¶ n in l, â x : ð, x â s â HasDerivAt (f n) (f' n x) x)
(hfg : â x : ð, x â s â Tendsto (fun n => f n x) l (ð (g x))) :
â x : ð, x â s â HasDerivAt g (g' x) x := fun _ =>
hasDerivAt_of_tendstoLocallyUniformlyOn hs hf'.tendstoLocallyUniformlyOn hf hfg
theorem hasDerivAt_of_tendstoUniformly [NeBot l] (hf' : TendstoUniformly f' g' l)
(hf : âá¶ n in l, â x : ð, HasDerivAt (f n) (f' n x) x)
(hfg : â x : ð, Tendsto (fun n => f n x) l (ð (g x))) : â x : ð, HasDerivAt g (g' x) x := by
intro x
have hf : âá¶ n in l, â x : ð, x â Set.univ â HasDerivAt (f n) (f' n x) x := by
filter_upwards [hf] with n h x _ using h x
have hfg : â x : ð, x â Set.univ â Tendsto (fun n => f n x) l (ð (g x)) := by simp [hfg]
have hf' : TendstoUniformlyOn f' g' l Set.univ := by rwa [tendstoUniformlyOn_univ]
exact hasDerivAt_of_tendstoUniformlyOn isOpen_univ hf' hf hfg x (Set.mem_univ x)
end deriv
|
Analysis\Calculus\AddTorsor\AffineMap.lean | /-
Copyright (c) 2021 Oliver Nash. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Oliver Nash
-/
import Mathlib.Analysis.NormedSpace.ContinuousAffineMap
import Mathlib.Analysis.Calculus.ContDiff.Basic
/-!
# Smooth affine maps
This file contains results about smoothness of affine maps.
## Main definitions:
* `ContinuousAffineMap.contDiff`: a continuous affine map is smooth
-/
namespace ContinuousAffineMap
variable {ð V W : Type*} [NontriviallyNormedField ð]
variable [NormedAddCommGroup V] [NormedSpace ð V]
variable [NormedAddCommGroup W] [NormedSpace ð W]
/-- A continuous affine map between normed vector spaces is smooth. -/
theorem contDiff {n : ââ} (f : V âᎬ[ð] W) : ContDiff ð n f := by
rw [f.decomp]
apply f.contLinear.contDiff.add
exact contDiff_const
end ContinuousAffineMap
|
Analysis\Calculus\AddTorsor\Coord.lean | /-
Copyright (c) 2021 Oliver Nash. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Oliver Nash
-/
import Mathlib.Analysis.Calculus.AddTorsor.AffineMap
import Mathlib.Analysis.NormedSpace.AddTorsorBases
/-!
# Barycentric coordinates are smooth
-/
variable {ι ð E P : Type*} [NontriviallyNormedField ð] [CompleteSpace ð]
variable [NormedAddCommGroup E] [NormedSpace ð E]
variable [MetricSpace P] [NormedAddTorsor E P]
variable [FiniteDimensional ð E]
theorem smooth_barycentric_coord (b : AffineBasis ι ð E) (i : ι) : ContDiff ð †(b.coord i) :=
(âšb.coord i, continuous_barycentric_coord b iâ© : E âᎬ[ð] ð).contDiff
|
Analysis\Calculus\BumpFunction\Basic.lean | /-
Copyright (c) 2020 Sébastien Gouëzel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Sébastien Gouëzel, Yury Kudryashov
-/
import Mathlib.Analysis.Calculus.ContDiff.Basic
import Mathlib.Analysis.Normed.Module.FiniteDimension
/-!
# Infinitely smooth "bump" functions
A smooth bump function is an infinitely smooth function `f : E â â` supported on a ball
that is equal to `1` on a ball of smaller radius.
These functions have many uses in real analysis. E.g.,
- they can be used to construct a smooth partition of unity which is a very useful tool;
- they can be used to approximate a continuous function by infinitely smooth functions.
There are two classes of spaces where bump functions are guaranteed to exist:
inner product spaces and finite dimensional spaces.
In this file we define a typeclass `HasContDiffBump`
saying that a normed space has a family of smooth bump functions with certain properties.
We also define a structure `ContDiffBump` that holds the center and radii of the balls from above.
An element `f : ContDiffBump c` can be coerced to a function which is an infinitely smooth function
such that
- `f` is equal to `1` in `Metric.closedBall c f.rIn`;
- `support f = Metric.ball c f.rOut`;
- `0 †f x †1` for all `x`.
## Main Definitions
- `ContDiffBump (c : E)`: a structure holding data needed to construct
an infinitely smooth bump function.
- `ContDiffBumpBase (E : Type*)`: a family of infinitely smooth bump functions
that can be used to construct coercion of a `ContDiffBump (c : E)`
to a function.
- `HasContDiffBump (E : Type*)`: a typeclass saying that `E` has a `ContDiffBumpBase`.
Two instances of this typeclass (for inner product spaces and for finite dimensional spaces)
are provided elsewhere.
## Keywords
smooth function, smooth bump function
-/
noncomputable section
open Function Set Filter
open scoped Topology Filter
variable {E X : Type*}
/-- `f : ContDiffBump c`, where `c` is a point in a normed vector space, is a
bundled smooth function such that
- `f` is equal to `1` in `Metric.closedBall c f.rIn`;
- `support f = Metric.ball c f.rOut`;
- `0 †f x †1` for all `x`.
The structure `ContDiffBump` contains the data required to construct the function:
real numbers `rIn`, `rOut`, and proofs of `0 < rIn < rOut`. The function itself is available through
`CoeFun` when the space is nice enough, i.e., satisfies the `HasContDiffBump` typeclass. -/
structure ContDiffBump (c : E) where
/-- real numbers `0 < rIn < rOut` -/
(rIn rOut : â)
rIn_pos : 0 < rIn
rIn_lt_rOut : rIn < rOut
/-- The base function from which one will construct a family of bump functions. One could
add more properties if they are useful and satisfied in the examples of inner product spaces
and finite dimensional vector spaces, notably derivative norm control in terms of `R - 1`.
TODO: do we ever need `f x = 1 â âxâ †1`? -/
-- Porting note(#5171): linter not yet ported; was @[nolint has_nonempty_instance]
structure ContDiffBumpBase (E : Type*) [NormedAddCommGroup E] [NormedSpace â E] where
/-- The function underlying this family of bump functions -/
toFun : â â E â â
mem_Icc : â (R : â) (x : E), toFun R x â Icc (0 : â) 1
symmetric : â (R : â) (x : E), toFun R (-x) = toFun R x
smooth : ContDiffOn â †(uncurry toFun) (Ioi (1 : â) ÃË¢ (univ : Set E))
eq_one : â R : â, 1 < R â â x : E, âxâ †1 â toFun R x = 1
support : â R : â, 1 < R â Function.support (toFun R) = Metric.ball (0 : E) R
/-- A class registering that a real vector space admits bump functions. This will be instantiated
first for inner product spaces, and then for finite-dimensional normed spaces.
We use a specific class instead of `Nonempty (ContDiffBumpBase E)` for performance reasons. -/
class HasContDiffBump (E : Type*) [NormedAddCommGroup E] [NormedSpace â E] : Prop where
out : Nonempty (ContDiffBumpBase E)
/-- In a space with `C^â` bump functions, register some function that will be used as a basis
to construct bump functions of arbitrary size around any point. -/
def someContDiffBumpBase (E : Type*) [NormedAddCommGroup E] [NormedSpace â E]
[hb : HasContDiffBump E] : ContDiffBumpBase E :=
Nonempty.some hb.out
namespace ContDiffBump
theorem rOut_pos {c : E} (f : ContDiffBump c) : 0 < f.rOut :=
f.rIn_pos.trans f.rIn_lt_rOut
theorem one_lt_rOut_div_rIn {c : E} (f : ContDiffBump c) : 1 < f.rOut / f.rIn := by
rw [one_lt_div f.rIn_pos]
exact f.rIn_lt_rOut
instance (c : E) : Inhabited (ContDiffBump c) :=
âšâš1, 2, zero_lt_one, one_lt_twoâ©â©
variable [NormedAddCommGroup E] [NormedSpace â E] [NormedAddCommGroup X] [NormedSpace â X]
[HasContDiffBump E] {c : E} (f : ContDiffBump c) {x : E} {n : ââ}
/-- The function defined by `f : ContDiffBump c`. Use automatic coercion to
function instead. -/
@[coe] def toFun {c : E} (f : ContDiffBump c) : E â â :=
(someContDiffBumpBase E).toFun (f.rOut / f.rIn) â fun x ⊠(f.rInâ»Â¹ ⢠(x - c))
instance : CoeFun (ContDiffBump c) fun _ => E â â :=
âštoFunâ©
protected theorem apply (x : E) :
f x = (someContDiffBumpBase E).toFun (f.rOut / f.rIn) (f.rInâ»Â¹ ⢠(x - c)) :=
rfl
protected theorem sub (x : E) : f (c - x) = f (c + x) := by
simp [f.apply, ContDiffBumpBase.symmetric]
protected theorem neg (f : ContDiffBump (0 : E)) (x : E) : f (-x) = f x := by
simp_rw [â zero_sub, f.sub, zero_add]
open Metric
theorem one_of_mem_closedBall (hx : x â closedBall c f.rIn) : f x = 1 := by
apply ContDiffBumpBase.eq_one _ _ f.one_lt_rOut_div_rIn
simpa only [norm_smul, Real.norm_eq_abs, abs_inv, abs_of_nonneg f.rIn_pos.le, â div_eq_inv_mul,
div_le_one f.rIn_pos] using mem_closedBall_iff_norm.1 hx
theorem nonneg : 0 †f x :=
(ContDiffBumpBase.mem_Icc (someContDiffBumpBase E) _ _).1
/-- A version of `ContDiffBump.nonneg` with `x` explicit -/
theorem nonneg' (x : E) : 0 †f x := f.nonneg
theorem le_one : f x †1 :=
(ContDiffBumpBase.mem_Icc (someContDiffBumpBase E) _ _).2
theorem support_eq : Function.support f = Metric.ball c f.rOut := by
simp only [toFun, support_comp_eq_preimage, ContDiffBumpBase.support _ _ f.one_lt_rOut_div_rIn]
ext x
simp only [mem_ball_iff_norm, sub_zero, norm_smul, mem_preimage, Real.norm_eq_abs, abs_inv,
abs_of_pos f.rIn_pos, â div_eq_inv_mul, div_lt_div_right f.rIn_pos]
theorem tsupport_eq : tsupport f = closedBall c f.rOut := by
simp_rw [tsupport, f.support_eq, closure_ball _ f.rOut_pos.ne']
theorem pos_of_mem_ball (hx : x â ball c f.rOut) : 0 < f x :=
f.nonneg.lt_of_ne' <| by rwa [â support_eq, mem_support] at hx
theorem zero_of_le_dist (hx : f.rOut †dist x c) : f x = 0 := by
rwa [â nmem_support, support_eq, mem_ball, not_lt]
protected theorem hasCompactSupport [FiniteDimensional â E] : HasCompactSupport f := by
simp_rw [HasCompactSupport, f.tsupport_eq, isCompact_closedBall]
theorem eventuallyEq_one_of_mem_ball (h : x â ball c f.rIn) : f =á¶ [ð x] 1 :=
mem_of_superset (closedBall_mem_nhds_of_mem h) fun _ ⊠f.one_of_mem_closedBall
theorem eventuallyEq_one : f =á¶ [ð c] 1 :=
f.eventuallyEq_one_of_mem_ball (mem_ball_self f.rIn_pos)
/-- `ContDiffBump` is `ðâ¿` in all its arguments. -/
protected theorem _root_.ContDiffWithinAt.contDiffBump {c g : X â E} {s : Set X}
{f : â x, ContDiffBump (c x)} {x : X} (hc : ContDiffWithinAt â n c s x)
(hr : ContDiffWithinAt â n (fun x => (f x).rIn) s x)
(hR : ContDiffWithinAt â n (fun x => (f x).rOut) s x)
(hg : ContDiffWithinAt â n g s x) :
ContDiffWithinAt â n (fun x => f x (g x)) s x := by
change ContDiffWithinAt â n (uncurry (someContDiffBumpBase E).toFun â fun x : X =>
((f x).rOut / (f x).rIn, (f x).rInâ»Â¹ ⢠(g x - c x))) s x
refine (((someContDiffBumpBase E).smooth.contDiffAt ?_).of_le le_top).comp_contDiffWithinAt x ?_
· exact prod_mem_nhds (Ioi_mem_nhds (f x).one_lt_rOut_div_rIn) univ_mem
· exact (hR.div hr (f x).rIn_pos.ne').prod ((hr.inv (f x).rIn_pos.ne').smul (hg.sub hc))
/-- `ContDiffBump` is `ðâ¿` in all its arguments. -/
protected nonrec theorem _root_.ContDiffAt.contDiffBump {c g : X â E} {f : â x, ContDiffBump (c x)}
{x : X} (hc : ContDiffAt â n c x) (hr : ContDiffAt â n (fun x => (f x).rIn) x)
(hR : ContDiffAt â n (fun x => (f x).rOut) x) (hg : ContDiffAt â n g x) :
ContDiffAt â n (fun x => f x (g x)) x :=
hc.contDiffBump hr hR hg
theorem _root_.ContDiff.contDiffBump {c g : X â E} {f : â x, ContDiffBump (c x)}
(hc : ContDiff â n c) (hr : ContDiff â n fun x => (f x).rIn)
(hR : ContDiff â n fun x => (f x).rOut) (hg : ContDiff â n g) :
ContDiff â n fun x => f x (g x) := by
rw [contDiff_iff_contDiffAt] at *
exact fun x => (hc x).contDiffBump (hr x) (hR x) (hg x)
protected theorem contDiff : ContDiff â n f :=
contDiff_const.contDiffBump contDiff_const contDiff_const contDiff_id
protected theorem contDiffAt : ContDiffAt â n f x :=
f.contDiff.contDiffAt
protected theorem contDiffWithinAt {s : Set E} : ContDiffWithinAt â n f s x :=
f.contDiffAt.contDiffWithinAt
protected theorem continuous : Continuous f :=
contDiff_zero.mp f.contDiff
end ContDiffBump
|
Analysis\Calculus\BumpFunction\Convolution.lean | /-
Copyright (c) 2022 Floris van Doorn. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Floris van Doorn
-/
import Mathlib.Analysis.Convolution
import Mathlib.Analysis.Calculus.BumpFunction.Normed
import Mathlib.MeasureTheory.Integral.Average
import Mathlib.MeasureTheory.Covering.Differentiation
import Mathlib.MeasureTheory.Covering.BesicovitchVectorSpace
import Mathlib.MeasureTheory.Measure.Haar.Unique
/-!
# Convolution with a bump function
In this file we prove lemmas about convolutions `(Ï.normed ÎŒ â[lsmul â â, ÎŒ] g) xâ`,
where `Ï : ContDiffBump 0` is a smooth bump function.
We prove that this convolution is equal to `g xâ`
if `g` is a constant on `Metric.ball xâ Ï.rOut`.
We also provide estimates in the case if `g x` is close to `g xâ` on this ball.
## Main results
- `ContDiffBump.convolution_tendsto_right_of_continuous`:
Let `g` be a continuous function; let `Ï i` be a family of `ContDiffBump 0` functions with.
If `(Ï i).rOut` tends to zero along a filter `l`,
then `((Ï i).normed ÎŒ â[lsmul â â, ÎŒ] g) xâ` tends to `g xâ` along the same filter.
- `ContDiffBump.convolution_tendsto_right`: generalization of the above lemma.
- `ContDiffBump.ae_convolution_tendsto_right_of_locallyIntegrable`: let `g` be a locally
integrable function. Then the convolution of `g` with a family of bump functions with
support tending to `0` converges almost everywhere to `g`.
## Keywords
convolution, smooth function, bump function
-/
universe uG uE'
open ContinuousLinearMap Metric MeasureTheory Filter Function Measure Set
open scoped Convolution Topology
namespace ContDiffBump
variable {G : Type uG} {E' : Type uE'} [NormedAddCommGroup E'] {g : G â E'} [MeasurableSpace G]
{ÎŒ : MeasureTheory.Measure G} [NormedSpace â E'] [NormedAddCommGroup G] [NormedSpace â G]
[HasContDiffBump G] [CompleteSpace E'] {Ï : ContDiffBump (0 : G)} {xâ : G}
/-- If `Ï` is a bump function, compute `(Ï â g) xâ`
if `g` is constant on `Metric.ball xâ Ï.rOut`. -/
theorem convolution_eq_right {xâ : G} (hg : â x â ball xâ Ï.rOut, g x = g xâ) :
(Ï â[lsmul â â, ÎŒ] g : G â E') xâ = integral ÎŒ Ï â¢ g xâ := by
simp_rw [convolution_eq_right' _ Ï.support_eq.subset hg, lsmul_apply, integral_smul_const]
variable [BorelSpace G]
variable [IsLocallyFiniteMeasure Ό] [Ό.IsOpenPosMeasure]
variable [FiniteDimensional â G]
/-- If `Ï` is a normed bump function, compute `Ï â g`
if `g` is constant on `Metric.ball xâ Ï.rOut`. -/
theorem normed_convolution_eq_right {xâ : G} (hg : â x â ball xâ Ï.rOut, g x = g xâ) :
(Ï.normed ÎŒ â[lsmul â â, ÎŒ] g : G â E') xâ = g xâ := by
rw [convolution_eq_right' _ Ï.support_normed_eq.subset hg]
exact integral_normed_smul Ï ÎŒ (g xâ)
variable [Ό.IsAddLeftInvariant]
/-- If `Ï` is a normed bump function, approximate `(Ï â g) xâ`
if `g` is near `g xâ` on a ball with radius `Ï.rOut` around `xâ`. -/
theorem dist_normed_convolution_le {xâ : G} {ε : â} (hmg : AEStronglyMeasurable g ÎŒ)
(hg : â x â ball xâ Ï.rOut, dist (g x) (g xâ) †ε) :
dist ((Ï.normed ÎŒ â[lsmul â â, ÎŒ] g : G â E') xâ) (g xâ) †ε :=
dist_convolution_le (by simp_rw [â dist_self (g xâ), hg xâ (mem_ball_self Ï.rOut_pos)])
Ï.support_normed_eq.subset Ï.nonneg_normed Ï.integral_normed hmg hg
/-- `(Ï i â g i) (k i)` tends to `zâ` as `i` tends to some filter `l` if
* `Ï` is a sequence of normed bump functions
such that `(Ï i).rOut` tends to `0` as `i` tends to `l`;
* `g i` is `Ό`-a.e. strongly measurable as `i` tends to `l`;
* `g i x` tends to `zâ` as `(i, x)` tends to `l ÃË¢ ð xâ`;
* `k i` tends to `xâ`. -/
nonrec theorem convolution_tendsto_right {ι} {Ï : ι â ContDiffBump (0 : G)} {g : ι â G â E'}
{k : ι â G} {xâ : G} {zâ : E'} {l : Filter ι} (hÏ : Tendsto (fun i => (Ï i).rOut) l (ð 0))
(hig : âá¶ i in l, AEStronglyMeasurable (g i) ÎŒ) (hcg : Tendsto (uncurry g) (l ÃË¢ ð xâ) (ð zâ))
(hk : Tendsto k l (ð xâ)) :
Tendsto (fun i => ((Ï i).normed ÎŒ â[lsmul â â, ÎŒ] g i) (k i)) l (ð zâ) :=
convolution_tendsto_right (eventually_of_forall fun i => (Ï i).nonneg_normed)
(eventually_of_forall fun i => (Ï i).integral_normed) (tendsto_support_normed_smallSets hÏ) hig
hcg hk
/-- Special case of `ContDiffBump.convolution_tendsto_right` where `g` is continuous,
and the limit is taken only in the first function. -/
theorem convolution_tendsto_right_of_continuous {ι} {Ï : ι â ContDiffBump (0 : G)} {l : Filter ι}
(hÏ : Tendsto (fun i => (Ï i).rOut) l (ð 0)) (hg : Continuous g) (xâ : G) :
Tendsto (fun i => ((Ï i).normed ÎŒ â[lsmul â â, ÎŒ] g) xâ) l (ð (g xâ)) :=
convolution_tendsto_right hÏ (eventually_of_forall fun _ => hg.aestronglyMeasurable)
((hg.tendsto xâ).comp tendsto_snd) tendsto_const_nhds
/-- If a function `g` is locally integrable, then the convolution `Ï i * g` converges almost
everywhere to `g` if `Ï i` is a sequence of bump functions with support tending to `0`, provided
that the ratio between the inner and outer radii of `Ï i` remains bounded. -/
theorem ae_convolution_tendsto_right_of_locallyIntegrable
{ι} {Ï : ι â ContDiffBump (0 : G)} {l : Filter ι} {K : â}
(hÏ : Tendsto (fun i ⊠(Ï i).rOut) l (ð 0))
(h'Ï : âá¶ i in l, (Ï i).rOut †K * (Ï i).rIn) (hg : LocallyIntegrable g ÎŒ) : âáµ xâ âÎŒ,
Tendsto (fun i ⊠((Ï i).normed ÎŒ â[lsmul â â, ÎŒ] g) xâ) l (ð (g xâ)) := by
have : IsAddHaarMeasure ÎŒ := âšâ©
-- By Lebesgue differentiation theorem, the average of `g` on a small ball converges
-- almost everywhere to the value of `g` as the radius shrinks to zero.
-- We will see that this set of points satisfies the desired conclusion.
filter_upwards [(Besicovitch.vitaliFamily ÎŒ).ae_tendsto_average_norm_sub hg] with xâ hâ
simp only [convolution_eq_swap, lsmul_apply]
have hÏ' : Tendsto (fun i ⊠(Ï i).rOut) l (ð[>] 0) :=
tendsto_nhdsWithin_iff.2 âšhÏ, eventually_of_forall (fun i ⊠(Ï i).rOut_pos)â©
have := (hâ.comp (Besicovitch.tendsto_filterAt ÎŒ xâ)).comp hÏ'
simp only [Function.comp] at this
apply tendsto_integral_smul_of_tendsto_average_norm_sub (K ^ (FiniteDimensional.finrank â G)) this
· filter_upwards with i using
hg.integrableOn_isCompact (isCompact_closedBall _ _)
· apply tendsto_const_nhds.congr (fun i ⊠?_)
rw [â integral_neg_eq_self]
simp only [sub_neg_eq_add, integral_add_left_eq_self, integral_normed]
· filter_upwards with i
change support ((ContDiffBump.normed (Ï i) ÎŒ) â (fun y ⊠xâ - y)) â closedBall xâ (Ï i).rOut
simp only [support_comp_eq_preimage, support_normed_eq]
intro x hx
simp only [mem_preimage, mem_ball, dist_zero_right] at hx
simpa [dist_eq_norm_sub'] using hx.le
· filter_upwards [h'Ï] with i hi x
rw [abs_of_nonneg (nonneg_normed _ _), addHaar_closedBall_center]
exact (Ï i).normed_le_div_measure_closedBall_rOut _ _ hi _
end ContDiffBump
|
Analysis\Calculus\BumpFunction\FiniteDimension.lean | /-
Copyright (c) 2022 Sébastien Gouëzel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Sébastien Gouëzel
-/
import Mathlib.Analysis.Calculus.SmoothSeries
import Mathlib.Analysis.Calculus.BumpFunction.InnerProduct
import Mathlib.Analysis.Convolution
import Mathlib.Analysis.InnerProductSpace.EuclideanDist
import Mathlib.Data.Set.Pointwise.Support
import Mathlib.MeasureTheory.Measure.Haar.NormedSpace
import Mathlib.MeasureTheory.Measure.Haar.Unique
/-!
# Bump functions in finite-dimensional vector spaces
Let `E` be a finite-dimensional real normed vector space. We show that any open set `s` in `E` is
exactly the support of a smooth function taking values in `[0, 1]`,
in `IsOpen.exists_smooth_support_eq`.
Then we use this construction to construct bump functions with nice behavior, by convolving
the indicator function of `closedBall 0 1` with a function as above with `s = ball 0 D`.
-/
noncomputable section
open Set Metric TopologicalSpace Function Asymptotics MeasureTheory FiniteDimensional
ContinuousLinearMap Filter MeasureTheory.Measure Bornology
open scoped Pointwise Topology NNReal Convolution
variable {E : Type*} [NormedAddCommGroup E]
section
variable [NormedSpace â E] [FiniteDimensional â E]
/-- If a set `s` is a neighborhood of `x`, then there exists a smooth function `f` taking
values in `[0, 1]`, supported in `s` and with `f x = 1`. -/
theorem exists_smooth_tsupport_subset {s : Set E} {x : E} (hs : s â ð x) :
â f : E â â,
tsupport f â s â§ HasCompactSupport f â§ ContDiff â †f â§ range f â Icc 0 1 â§ f x = 1 := by
obtain âšd : â, d_pos : 0 < d, hd : Euclidean.closedBall x d â sâ© :=
Euclidean.nhds_basis_closedBall.mem_iff.1 hs
let c : ContDiffBump (toEuclidean x) :=
{ rIn := d / 2
rOut := d
rIn_pos := half_pos d_pos
rIn_lt_rOut := half_lt_self d_pos }
let f : E â â := c â toEuclidean
have f_supp : f.support â Euclidean.ball x d := by
intro y hy
have : toEuclidean y â Function.support c := by
simpa only [Function.mem_support, Function.comp_apply, Ne] using hy
rwa [c.support_eq] at this
have f_tsupp : tsupport f â Euclidean.closedBall x d := by
rw [tsupport, â Euclidean.closure_ball _ d_pos.ne']
exact closure_mono f_supp
refine âšf, f_tsupp.trans hd, ?_, ?_, ?_, ?_â©
· refine isCompact_of_isClosed_isBounded isClosed_closure ?_
have : IsBounded (Euclidean.closedBall x d) := Euclidean.isCompact_closedBall.isBounded
refine this.subset (Euclidean.isClosed_closedBall.closure_subset_iff.2 ?_)
exact f_supp.trans Euclidean.ball_subset_closedBall
· apply c.contDiff.comp
exact ContinuousLinearEquiv.contDiff _
· rintro t âšy, rflâ©
exact âšc.nonneg, c.le_oneâ©
· apply c.one_of_mem_closedBall
apply mem_closedBall_self
exact (half_pos d_pos).le
/-- Given an open set `s` in a finite-dimensional real normed vector space, there exists a smooth
function with values in `[0, 1]` whose support is exactly `s`. -/
theorem IsOpen.exists_smooth_support_eq {s : Set E} (hs : IsOpen s) :
â f : E â â, f.support = s â§ ContDiff â †f â§ Set.range f â Set.Icc 0 1 := by
/- For any given point `x` in `s`, one can construct a smooth function with support in `s` and
nonzero at `x`. By second-countability, it follows that we may cover `s` with the supports of
countably many such functions, say `g i`.
Then `â i, r i ⢠g i` will be the desired function if `r i` is a sequence of positive numbers
tending quickly enough to zero. Indeed, this ensures that, for any `k †i`, the `k`-th
derivative of `r i ⢠g i` is bounded by a prescribed (summable) sequence `u i`. From this, the
summability of the series and of its successive derivatives follows. -/
rcases eq_empty_or_nonempty s with (rfl | h's)
· exact
âšfun _ => 0, Function.support_zero, contDiff_const, by
simp only [range_const, singleton_subset_iff, left_mem_Icc, zero_le_one]â©
let ι := { f : E â â // f.support â s â§ HasCompactSupport f â§ ContDiff â †f â§ range f â Icc 0 1 }
obtain âšT, T_count, hTâ© : â T : Set ι, T.Countable â§ â f â T, support (f : E â â) = s := by
have : â f : ι, (f : E â â).support = s := by
refine Subset.antisymm (iUnion_subset fun f => f.2.1) ?_
intro x hx
rcases exists_smooth_tsupport_subset (hs.mem_nhds hx) with âšf, hfâ©
let g : ι := âšf, (subset_tsupport f).trans hf.1, hf.2.1, hf.2.2.1, hf.2.2.2.1â©
have : x â support (g : E â â) := by
simp only [hf.2.2.2.2, Subtype.coe_mk, mem_support, Ne, one_ne_zero, not_false_iff]
exact mem_iUnion_of_mem _ this
simp_rw [â this]
apply isOpen_iUnion_countable
rintro âšf, hfâ©
exact hf.2.2.1.continuous.isOpen_support
obtain âšg0, hgâ© : â g0 : â â ι, T = range g0 := by
apply Countable.exists_eq_range T_count
rcases eq_empty_or_nonempty T with (rfl | hT)
· simp only [ι, iUnion_false, iUnion_empty] at hT
simp only [â hT, mem_empty_iff_false, iUnion_of_empty, iUnion_empty, Set.not_nonempty_empty]
at h's
· exact hT
let g : â â E â â := fun n => (g0 n).1
have g_s : â n, support (g n) â s := fun n => (g0 n).2.1
have s_g : â x â s, â n, x â support (g n) := fun x hx ⊠by
rw [â hT] at hx
obtain âši, iT, hiâ© : â i â T, x â support (i : E â â) := by
simpa only [mem_iUnion, exists_prop] using hx
rw [hg, mem_range] at iT
rcases iT with âšn, hnâ©
rw [â hn] at hi
exact âšn, hiâ©
have g_smooth : â n, ContDiff â †(g n) := fun n => (g0 n).2.2.2.1
have g_comp_supp : â n, HasCompactSupport (g n) := fun n => (g0 n).2.2.1
have g_nonneg : â n x, 0 †g n x := fun n x => ((g0 n).2.2.2.2 (mem_range_self x)).1
obtain âšÎŽ, ÎŽpos, c, ÎŽc, c_ltâ© :
â ÎŽ : â â ââ¥0, (â i : â, 0 < ÎŽ i) â§ â c : NNReal, HasSum ÎŽ c â§ c < 1 :=
NNReal.exists_pos_sum_of_countable one_ne_zero â
have : â n : â, â r : â, 0 < r â§ â i †n, â x, âiteratedFDeriv â i (r ⢠g n) xâ †Ύ n := by
intro n
have : â i, â R, â x, âiteratedFDeriv â i (fun x => g n x) xâ †R := by
intro i
have : BddAbove (range fun x => âiteratedFDeriv â i (fun x : E => g n x) xâ) := by
apply
((g_smooth n).continuous_iteratedFDeriv le_top).norm.bddAbove_range_of_hasCompactSupport
apply HasCompactSupport.comp_left _ norm_zero
apply (g_comp_supp n).iteratedFDeriv
rcases this with âšR, hRâ©
exact âšR, fun x => hR (mem_range_self _)â©
choose R hR using this
let M := max (((Finset.range (n + 1)).image R).max' (by simp)) 1
have ÎŽnpos : 0 < ÎŽ n := ÎŽpos n
have IR : â i †n, R i †M := by
intro i hi
refine le_trans ?_ (le_max_left _ _)
apply Finset.le_max'
apply Finset.mem_image_of_mem
-- Porting note: was
-- simp only [Finset.mem_range]
-- linarith
simpa only [Finset.mem_range, Nat.lt_add_one_iff]
refine âšMâ»Â¹ * ÎŽ n, by positivity, fun i hi x => ?_â©
calc
âiteratedFDeriv â i ((Mâ»Â¹ * ÎŽ n) ⢠g n) xâ = â(Mâ»Â¹ * ÎŽ n) ⢠iteratedFDeriv â i (g n) xâ := by
rw [iteratedFDeriv_const_smul_apply]; exact (g_smooth n).of_le le_top
_ = Mâ»Â¹ * ÎŽ n * âiteratedFDeriv â i (g n) xâ := by
rw [norm_smul _ (iteratedFDeriv â i (g n) x), Real.norm_of_nonneg]; positivity
_ †Mâ»Â¹ * ÎŽ n * M := (mul_le_mul_of_nonneg_left ((hR i x).trans (IR i hi)) (by positivity))
_ = ÎŽ n := by field_simp
choose r rpos hr using this
have S : â x, Summable fun n => (r n ⢠g n) x := fun x ⊠by
refine .of_nnnorm_bounded _ ÎŽc.summable fun n => ?_
rw [â NNReal.coe_le_coe, coe_nnnorm]
simpa only [norm_iteratedFDeriv_zero] using hr n 0 (zero_le n) x
refine âšfun x => â' n, (r n ⢠g n) x, ?_, ?_, ?_â©
· apply Subset.antisymm
· intro x hx
simp only [Pi.smul_apply, Algebra.id.smul_eq_mul, mem_support, Ne] at hx
contrapose! hx
have : â n, g n x = 0 := by
intro n
contrapose! hx
exact g_s n hx
simp only [this, mul_zero, tsum_zero]
· intro x hx
obtain âšn, hnâ© : â n, x â support (g n) := s_g x hx
have I : 0 < r n * g n x := mul_pos (rpos n) (lt_of_le_of_ne (g_nonneg n x) (Ne.symm hn))
exact ne_of_gt (tsum_pos (S x) (fun i => mul_nonneg (rpos i).le (g_nonneg i x)) n I)
· refine
contDiff_tsum_of_eventually (fun n => (g_smooth n).const_smul (r n))
(fun k _ => (NNReal.hasSum_coe.2 ÎŽc).summable) ?_
intro i _
simp only [Nat.cofinite_eq_atTop, Pi.smul_apply, Algebra.id.smul_eq_mul,
Filter.eventually_atTop]
exact âši, fun n hn x => hr _ _ hn _â©
· rintro - âšy, rflâ©
refine âštsum_nonneg fun n => mul_nonneg (rpos n).le (g_nonneg n y), le_trans ?_ c_lt.leâ©
have A : HasSum (fun n => (ÎŽ n : â)) c := NNReal.hasSum_coe.2 ÎŽc
simp only [Pi.smul_apply, smul_eq_mul, NNReal.val_eq_coe, â A.tsum_eq]
apply tsum_le_tsum _ (S y) A.summable
intro n
apply (le_abs_self _).trans
simpa only [norm_iteratedFDeriv_zero] using hr n 0 (zero_le n) y
end
section
namespace ExistsContDiffBumpBase
/-- An auxiliary function to construct partitions of unity on finite-dimensional real vector spaces.
It is the characteristic function of the closed unit ball. -/
def Ï : E â â :=
(closedBall (0 : E) 1).indicator fun _ => (1 : â)
variable [NormedSpace â E] [FiniteDimensional â E]
section HelperDefinitions
variable (E)
theorem u_exists :
â u : E â â,
ContDiff â †u â§ (â x, u x â Icc (0 : â) 1) â§ support u = ball 0 1 â§ â x, u (-x) = u x := by
have A : IsOpen (ball (0 : E) 1) := isOpen_ball
obtain âšf, f_support, f_smooth, f_rangeâ© :
â f : E â â, f.support = ball (0 : E) 1 â§ ContDiff â †f â§ Set.range f â Set.Icc 0 1 :=
A.exists_smooth_support_eq
have B : â x, f x â Icc (0 : â) 1 := fun x => f_range (mem_range_self x)
refine âšfun x => (f x + f (-x)) / 2, ?_, ?_, ?_, ?_â©
· exact (f_smooth.add (f_smooth.comp contDiff_neg)).div_const _
· intro x
simp only [mem_Icc]
constructor
· linarith [(B x).1, (B (-x)).1]
· linarith [(B x).2, (B (-x)).2]
· refine support_eq_iff.2 âšfun x hx => ?_, fun x hx => ?_â©
· apply ne_of_gt
have : 0 < f x := by
apply lt_of_le_of_ne (B x).1 (Ne.symm _)
rwa [â f_support] at hx
linarith [(B (-x)).1]
· have I1 : x â support f := by rwa [f_support]
have I2 : -x â support f := by
rw [f_support]
simpa using hx
simp only [mem_support, Classical.not_not] at I1 I2
simp only [I1, I2, add_zero, zero_div]
· intro x; simp only [add_comm, neg_neg]
variable {E}
/-- An auxiliary function to construct partitions of unity on finite-dimensional real vector spaces,
which is smooth, symmetric, and with support equal to the unit ball. -/
def u (x : E) : â :=
Classical.choose (u_exists E) x
variable (E)
theorem u_smooth : ContDiff â †(u : E â â) :=
(Classical.choose_spec (u_exists E)).1
theorem u_continuous : Continuous (u : E â â) :=
(u_smooth E).continuous
theorem u_support : support (u : E â â) = ball 0 1 :=
(Classical.choose_spec (u_exists E)).2.2.1
theorem u_compact_support : HasCompactSupport (u : E â â) := by
rw [hasCompactSupport_def, u_support, closure_ball (0 : E) one_ne_zero]
exact isCompact_closedBall _ _
variable {E}
theorem u_nonneg (x : E) : 0 †u x :=
((Classical.choose_spec (u_exists E)).2.1 x).1
theorem u_le_one (x : E) : u x †1 :=
((Classical.choose_spec (u_exists E)).2.1 x).2
theorem u_neg (x : E) : u (-x) = u x :=
(Classical.choose_spec (u_exists E)).2.2.2 x
variable [MeasurableSpace E] [BorelSpace E]
local notation "Ό" => MeasureTheory.Measure.addHaar
variable (E)
theorem u_int_pos : 0 < â« x : E, u x âÎŒ := by
refine (integral_pos_iff_support_of_nonneg u_nonneg ?_).mpr ?_
· exact (u_continuous E).integrable_of_hasCompactSupport (u_compact_support E)
· rw [u_support]; exact measure_ball_pos _ _ zero_lt_one
variable {E}
/-- An auxiliary function to construct partitions of unity on finite-dimensional real vector spaces,
which is smooth, symmetric, with support equal to the ball of radius `D` and integral `1`. -/
def w (D : â) (x : E) : â :=
((â« x : E, u x âÎŒ) * |D| ^ finrank â E)â»Â¹ ⢠u (Dâ»Â¹ ⢠x)
theorem w_def (D : â) :
(w D : E â â) = fun x => ((â« x : E, u x âÎŒ) * |D| ^ finrank â E)â»Â¹ ⢠u (Dâ»Â¹ ⢠x) := by
ext1 x; rfl
theorem w_nonneg (D : â) (x : E) : 0 †w D x := by
apply mul_nonneg _ (u_nonneg _)
apply inv_nonneg.2
apply mul_nonneg (u_int_pos E).le
norm_cast
apply pow_nonneg (abs_nonneg D)
theorem w_mul_Ï_nonneg (D : â) (x y : E) : 0 †w D y * Ï (x - y) :=
mul_nonneg (w_nonneg D y) (indicator_nonneg (by simp only [zero_le_one, imp_true_iff]) _)
variable (E)
theorem w_integral {D : â} (Dpos : 0 < D) : â« x : E, w D x âÎŒ = 1 := by
simp_rw [w, integral_smul]
rw [integral_comp_inv_smul_of_nonneg ÎŒ (u : E â â) Dpos.le, abs_of_nonneg Dpos.le, mul_comm]
field_simp [(u_int_pos E).ne']
theorem w_support {D : â} (Dpos : 0 < D) : support (w D : E â â) = ball 0 D := by
have B : D ⢠ball (0 : E) 1 = ball 0 D := by
rw [smul_unitBall Dpos.ne', Real.norm_of_nonneg Dpos.le]
have C : D ^ finrank â E â 0 := by
norm_cast
exact pow_ne_zero _ Dpos.ne'
simp only [w_def, Algebra.id.smul_eq_mul, support_mul, support_inv, univ_inter,
support_comp_inv_smulâ Dpos.ne', u_support, B, support_const (u_int_pos E).ne', support_const C,
abs_of_nonneg Dpos.le]
theorem w_compact_support {D : â} (Dpos : 0 < D) : HasCompactSupport (w D : E â â) := by
rw [hasCompactSupport_def, w_support E Dpos, closure_ball (0 : E) Dpos.ne']
exact isCompact_closedBall _ _
variable {E}
/-- An auxiliary function to construct partitions of unity on finite-dimensional real vector spaces.
It is the convolution between a smooth function of integral `1` supported in the ball of radius `D`,
with the indicator function of the closed unit ball. Therefore, it is smooth, equal to `1` on the
ball of radius `1 - D`, with support equal to the ball of radius `1 + D`. -/
def y (D : â) : E â â :=
w D â[lsmul â â, ÎŒ] Ï
theorem y_neg (D : â) (x : E) : y D (-x) = y D x := by
apply convolution_neg_of_neg_eq
· filter_upwards with x
simp only [w_def, Real.rpow_natCast, mul_inv_rev, smul_neg, u_neg, smul_eq_mul, forall_const]
· filter_upwards with x
simp only [Ï, indicator, mem_closedBall, dist_zero_right, norm_neg, forall_const]
theorem y_eq_one_of_mem_closedBall {D : â} {x : E} (Dpos : 0 < D)
(hx : x â closedBall (0 : E) (1 - D)) : y D x = 1 := by
change (w D â[lsmul â â, ÎŒ] Ï) x = 1
have B : â y : E, y â ball x D â Ï y = 1 := by
have C : ball x D â ball 0 1 := by
apply ball_subset_ball'
simp only [mem_closedBall] at hx
linarith only [hx]
intro y hy
simp only [Ï, indicator, mem_closedBall, ite_eq_left_iff, not_le, zero_ne_one]
intro h'y
linarith only [mem_ball.1 (C hy), h'y]
have Bx : Ï x = 1 := B _ (mem_ball_self Dpos)
have B' : â y, y â ball x D â Ï y = Ï x := by rw [Bx]; exact B
rw [convolution_eq_right' _ (le_of_eq (w_support E Dpos)) B']
simp only [lsmul_apply, Algebra.id.smul_eq_mul, integral_mul_right, w_integral E Dpos, Bx,
one_mul]
theorem y_eq_zero_of_not_mem_ball {D : â} {x : E} (Dpos : 0 < D) (hx : x â ball (0 : E) (1 + D)) :
y D x = 0 := by
change (w D â[lsmul â â, ÎŒ] Ï) x = 0
have B : â y, y â ball x D â Ï y = 0 := by
intro y hy
simp only [Ï, indicator, mem_closedBall_zero_iff, ite_eq_right_iff, one_ne_zero]
intro h'y
have C : ball y D â ball 0 (1 + D) := by
apply ball_subset_ball'
rw [â dist_zero_right] at h'y
linarith only [h'y]
exact hx (C (mem_ball_comm.1 hy))
have Bx : Ï x = 0 := B _ (mem_ball_self Dpos)
have B' : â y, y â ball x D â Ï y = Ï x := by rw [Bx]; exact B
rw [convolution_eq_right' _ (le_of_eq (w_support E Dpos)) B']
simp only [lsmul_apply, Algebra.id.smul_eq_mul, Bx, mul_zero, integral_const]
theorem y_nonneg (D : â) (x : E) : 0 †y D x :=
integral_nonneg (w_mul_Ï_nonneg D x)
theorem y_le_one {D : â} (x : E) (Dpos : 0 < D) : y D x †1 := by
have A : (w D â[lsmul â â, ÎŒ] Ï) x †(w D â[lsmul â â, ÎŒ] 1) x := by
apply
convolution_mono_right_of_nonneg _ (w_nonneg D) (indicator_le_self' fun x _ => zero_le_one)
fun _ => zero_le_one
refine
(HasCompactSupport.convolutionExistsLeft _ (w_compact_support E Dpos) ?_
(locallyIntegrable_const (1 : â)) x).integrable
exact continuous_const.mul ((u_continuous E).comp (continuous_id.const_smul _))
have B : (w D â[lsmul â â, ÎŒ] fun _ => (1 : â)) x = 1 := by
simp only [convolution, ContinuousLinearMap.map_smul, mul_inv_rev, coe_smul', mul_one,
lsmul_apply, Algebra.id.smul_eq_mul, integral_mul_left, w_integral E Dpos, Pi.smul_apply]
exact A.trans (le_of_eq B)
theorem y_pos_of_mem_ball {D : â} {x : E} (Dpos : 0 < D) (D_lt_one : D < 1)
(hx : x â ball (0 : E) (1 + D)) : 0 < y D x := by
simp only [mem_ball_zero_iff] at hx
refine (integral_pos_iff_support_of_nonneg (w_mul_Ï_nonneg D x) ?_).2 ?_
· have F_comp : HasCompactSupport (w D) := w_compact_support E Dpos
have B : LocallyIntegrable (Ï : E â â) ÎŒ :=
(locallyIntegrable_const _).indicator measurableSet_closedBall
have C : Continuous (w D : E â â) :=
continuous_const.mul ((u_continuous E).comp (continuous_id.const_smul _))
exact
(HasCompactSupport.convolutionExistsLeft (lsmul â â : â âL[â] â âL[â] â) F_comp C B
x).integrable
· set z := (D / (1 + D)) ⢠x with hz
have B : 0 < 1 + D := by linarith
have C : ball z (D * (1 + D - âxâ) / (1 + D)) â support fun y : E => w D y * Ï (x - y) := by
intro y hy
simp only [support_mul, w_support E Dpos]
simp only [Ï, mem_inter_iff, mem_support, Ne, indicator_apply_eq_zero,
mem_closedBall_zero_iff, one_ne_zero, not_forall, not_false_iff, exists_prop, and_true_iff]
constructor
· apply ball_subset_ball' _ hy
simp only [hz, norm_smul, abs_of_nonneg Dpos.le, abs_of_nonneg B.le, dist_zero_right,
Real.norm_eq_abs, abs_div]
simp only [div_le_iff B, field_simps]
ring_nf
rfl
· have ID : âD / (1 + D) - 1â = 1 / (1 + D) := by
rw [Real.norm_of_nonpos]
· simp only [B.ne', Ne, not_false_iff, mul_one, neg_sub, add_tsub_cancel_right,
field_simps]
· simp only [B.ne', Ne, not_false_iff, mul_one, field_simps]
apply div_nonpos_of_nonpos_of_nonneg _ B.le
linarith only
rw [â mem_closedBall_iff_norm']
apply closedBall_subset_closedBall' _ (ball_subset_closedBall hy)
rw [â one_smul â x, dist_eq_norm, hz, â sub_smul, one_smul, norm_smul, ID]
simp only [B.ne', div_le_iff B, field_simps]
nlinarith only [hx, D_lt_one]
apply lt_of_lt_of_le _ (measure_mono C)
apply measure_ball_pos
exact div_pos (mul_pos Dpos (by linarith only [hx])) B
variable (E)
theorem y_smooth : ContDiffOn â †(uncurry y) (Ioo (0 : â) 1 ÃË¢ (univ : Set E)) := by
have hs : IsOpen (Ioo (0 : â) (1 : â)) := isOpen_Ioo
have hk : IsCompact (closedBall (0 : E) 1) := ProperSpace.isCompact_closedBall _ _
refine contDiffOn_convolution_left_with_param (lsmul â â) hs hk ?_ ?_ ?_
· rintro p x hp hx
simp only [w, mul_inv_rev, Algebra.id.smul_eq_mul, mul_eq_zero, inv_eq_zero]
right
contrapose! hx
have : pâ»Â¹ ⢠x â support u := mem_support.2 hx
simp only [u_support, norm_smul, mem_ball_zero_iff, Real.norm_eq_abs, abs_inv,
abs_of_nonneg hp.1.le, â div_eq_inv_mul, div_lt_one hp.1] at this
rw [mem_closedBall_zero_iff]
exact this.le.trans hp.2.le
· exact (locallyIntegrable_const _).indicator measurableSet_closedBall
· apply ContDiffOn.mul
· norm_cast
refine
(contDiffOn_const.mul ?_).inv fun x hx =>
ne_of_gt (mul_pos (u_int_pos E) (pow_pos (abs_pos_of_pos hx.1.1) (finrank â E)))
apply ContDiffOn.pow
simp_rw [â Real.norm_eq_abs]
apply ContDiffOn.norm â
· exact contDiffOn_fst
· intro x hx; exact ne_of_gt hx.1.1
· apply (u_smooth E).comp_contDiffOn
exact ContDiffOn.smul (contDiffOn_fst.inv fun x hx => ne_of_gt hx.1.1) contDiffOn_snd
theorem y_support {D : â} (Dpos : 0 < D) (D_lt_one : D < 1) :
support (y D : E â â) = ball (0 : E) (1 + D) :=
support_eq_iff.2
âšfun _ hx => (y_pos_of_mem_ball Dpos D_lt_one hx).ne', fun _ hx =>
y_eq_zero_of_not_mem_ball Dpos hxâ©
variable {E}
end HelperDefinitions
instance (priority := 100) {E : Type*} [NormedAddCommGroup E] [NormedSpace â E]
[FiniteDimensional â E] : HasContDiffBump E := by
refine âšâš?_â©â©
borelize E
have IR : â R : â, 1 < R â 0 < (R - 1) / (R + 1) := by intro R hR; apply div_pos <;> linarith
exact
{ toFun := fun R x => if 1 < R then y ((R - 1) / (R + 1)) (((R + 1) / 2)â»Â¹ ⢠x) else 0
mem_Icc := fun R x => by
simp only [mem_Icc]
split_ifs with h
· refine âšy_nonneg _ _, y_le_one _ (IR R h)â©
· simp only [le_refl, zero_le_one, and_self]
symmetric := fun R x => by
simp only
split_ifs
· simp only [y_neg, smul_neg]
· rfl
smooth := by
suffices
ContDiffOn â â€
(uncurry y â fun p : â à E => ((p.1 - 1) / (p.1 + 1), ((p.1 + 1) / 2)â»Â¹ ⢠p.2))
(Ioi 1 ÃË¢ univ) by
apply this.congr
rintro âšR, xâ© âšhR : 1 < R, _â©
simp only [hR, uncurry_apply_pair, if_true, Function.comp_apply]
apply (y_smooth E).comp
· apply ContDiffOn.prod
· refine
(contDiffOn_fst.sub contDiffOn_const).div (contDiffOn_fst.add contDiffOn_const) ?_
rintro âšR, xâ© âšhR : 1 < R, _â©
apply ne_of_gt
dsimp only
linarith
· apply ContDiffOn.smul _ contDiffOn_snd
refine ((contDiffOn_fst.add contDiffOn_const).div_const _).inv ?_
rintro âšR, xâ© âšhR : 1 < R, _â©
apply ne_of_gt
dsimp only
linarith
· rintro âšR, xâ© âšhR : 1 < R, _â©
have A : 0 < (R - 1) / (R + 1) := by apply div_pos <;> linarith
have B : (R - 1) / (R + 1) < 1 := by apply (div_lt_one _).2 <;> linarith
simp only [mem_preimage, prod_mk_mem_set_prod_eq, mem_Ioo, mem_univ, and_true_iff, A, B]
eq_one := fun R hR x hx => by
have A : 0 < R + 1 := by linarith
simp only [hR, if_true]
apply y_eq_one_of_mem_closedBall (IR R hR)
simp only [norm_smul, inv_div, mem_closedBall_zero_iff, Real.norm_eq_abs, abs_div, abs_two,
abs_of_nonneg A.le]
calc
2 / (R + 1) * âxâ †2 / (R + 1) := mul_le_of_le_one_right (by positivity) hx
_ = 1 - (R - 1) / (R + 1) := by field_simp; ring
support := fun R hR => by
have A : 0 < (R + 1) / 2 := by linarith
have C : (R - 1) / (R + 1) < 1 := by apply (div_lt_one _).2 <;> linarith
simp only [hR, if_true, support_comp_inv_smulâ A.ne', y_support _ (IR R hR) C,
_root_.smul_ball A.ne', Real.norm_of_nonneg A.le, smul_zero]
refine congr (congr_arg ball (Eq.refl 0)) ?_
field_simp; ring }
end ExistsContDiffBumpBase
end
|
Analysis\Calculus\BumpFunction\InnerProduct.lean | /-
Copyright (c) 2020 Sébastien Gouëzel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Sébastien Gouëzel, Yury Kudryashov
-/
import Mathlib.Analysis.Calculus.BumpFunction.Basic
import Mathlib.Analysis.InnerProductSpace.Calculus
import Mathlib.Analysis.SpecialFunctions.SmoothTransition
/-!
# Smooth bump functions in inner product spaces
In this file we prove that a real inner product space has smooth bump functions,
see `hasContDiffBump_of_innerProductSpace`.
## Keywords
smooth function, bump function, inner product space
-/
open Function Real
open scoped Topology
variable (E : Type*) [NormedAddCommGroup E] [InnerProductSpace â E]
-- Porting note: this definition was hidden inside the next instance.
/-- A base bump function in an inner product space. This construction works in any space with a
norm smooth away from zero but we do not have a typeclass for this. -/
noncomputable def ContDiffBumpBase.ofInnerProductSpace : ContDiffBumpBase E where
toFun R x := smoothTransition ((R - âxâ) / (R - 1))
mem_Icc _ _ := âšsmoothTransition.nonneg _, smoothTransition.le_one _â©
symmetric _ _ := by simp only [norm_neg]
smooth := by
rintro âšR, xâ© âšhR : 1 < R, -â©
apply ContDiffAt.contDiffWithinAt
rw [â sub_pos] at hR
rcases eq_or_ne x 0 with rfl | hx
· have A : ContinuousAt (fun p : â à E ⊠(p.1 - âp.2â) / (p.1 - 1)) (R, 0) :=
(continuousAt_fst.sub continuousAt_snd.norm).div
(continuousAt_fst.sub continuousAt_const) hR.ne'
have B : âá¶ p in ð (R, (0 : E)), 1 †(p.1 - âp.2â) / (p.1 - 1) :=
A.eventually <| le_mem_nhds <| (one_lt_div hR).2 <| sub_lt_sub_left (by simp) _
refine (contDiffAt_const (c := 1)).congr_of_eventuallyEq <| B.mono fun _ âŠ
smoothTransition.one_of_one_le
· refine smoothTransition.contDiffAt.comp _ (ContDiffAt.div ?_ ?_ hR.ne')
· exact contDiffAt_fst.sub (contDiffAt_snd.norm â hx)
· exact contDiffAt_fst.sub contDiffAt_const
eq_one R hR x hx := smoothTransition.one_of_one_le <| (one_le_div <| sub_pos.2 hR).2 <|
sub_le_sub_left hx _
support R hR := by
ext x
rw [mem_support, Ne, smoothTransition.zero_iff_nonpos, not_le, mem_ball_zero_iff]
simp [div_pos_iff, sq_lt_sq, abs_of_pos (one_pos.trans hR), hR, hR.not_lt]
/-- Any inner product space has smooth bump functions. -/
instance (priority := 100) hasContDiffBump_of_innerProductSpace : HasContDiffBump E :=
âšâš.ofInnerProductSpace Eâ©â©
|
Analysis\Calculus\BumpFunction\Normed.lean | /-
Copyright (c) 2022 Floris van Doorn. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Floris van Doorn
-/
import Mathlib.Analysis.Calculus.BumpFunction.Basic
import Mathlib.MeasureTheory.Integral.SetIntegral
import Mathlib.MeasureTheory.Measure.Lebesgue.EqHaar
/-!
# Normed bump function
In this file we define `ContDiffBump.normed f Ό` to be the bump function `f` normalized so that
`â« x, f.normed ÎŒ x âÎŒ = 1` and prove some properties of this function.
-/
noncomputable section
open Function Filter Set Metric MeasureTheory FiniteDimensional Measure
open scoped Topology
namespace ContDiffBump
variable {E : Type*} [NormedAddCommGroup E] [NormedSpace â E] [HasContDiffBump E]
[MeasurableSpace E] {c : E} (f : ContDiffBump c) {x : E} {n : ââ} {ÎŒ : Measure E}
/-- A bump function normed so that `â« x, f.normed ÎŒ x âÎŒ = 1`. -/
protected def normed (ÎŒ : Measure E) : E â â := fun x => f x / â« x, f x âÎŒ
theorem normed_def {ÎŒ : Measure E} (x : E) : f.normed ÎŒ x = f x / â« x, f x âÎŒ :=
rfl
theorem nonneg_normed (x : E) : 0 †f.normed Ό x :=
div_nonneg f.nonneg <| integral_nonneg f.nonneg'
theorem contDiff_normed {n : ââ} : ContDiff â n (f.normed ÎŒ) :=
f.contDiff.div_const _
theorem continuous_normed : Continuous (f.normed Ό) :=
f.continuous.div_const _
theorem normed_sub (x : E) : f.normed Ό (c - x) = f.normed Ό (c + x) := by
simp_rw [f.normed_def, f.sub]
theorem normed_neg (f : ContDiffBump (0 : E)) (x : E) : f.normed Ό (-x) = f.normed Ό x := by
simp_rw [f.normed_def, f.neg]
variable [BorelSpace E] [FiniteDimensional â E] [IsLocallyFiniteMeasure ÎŒ]
protected theorem integrable : Integrable f Ό :=
f.continuous.integrable_of_hasCompactSupport f.hasCompactSupport
protected theorem integrable_normed : Integrable (f.normed Ό) Ό :=
f.integrable.div_const _
variable [Ό.IsOpenPosMeasure]
theorem integral_pos : 0 < â« x, f x âÎŒ := by
refine (integral_pos_iff_support_of_nonneg f.nonneg' f.integrable).mpr ?_
rw [f.support_eq]
exact measure_ball_pos Ό c f.rOut_pos
theorem integral_normed : â« x, f.normed ÎŒ x âÎŒ = 1 := by
simp_rw [ContDiffBump.normed, div_eq_mul_inv, mul_comm (f _), â smul_eq_mul, integral_smul]
exact inv_mul_cancel f.integral_pos.ne'
theorem support_normed_eq : Function.support (f.normed Ό) = Metric.ball c f.rOut := by
unfold ContDiffBump.normed
rw [support_div, f.support_eq, support_const f.integral_pos.ne', inter_univ]
theorem tsupport_normed_eq : tsupport (f.normed Ό) = Metric.closedBall c f.rOut := by
rw [tsupport, f.support_normed_eq, closure_ball _ f.rOut_pos.ne']
theorem hasCompactSupport_normed : HasCompactSupport (f.normed Ό) := by
simp only [HasCompactSupport, f.tsupport_normed_eq (Ό := Ό), isCompact_closedBall]
theorem tendsto_support_normed_smallSets {ι} {Ï : ι â ContDiffBump c} {l : Filter ι}
(hÏ : Tendsto (fun i => (Ï i).rOut) l (ð 0)) :
Tendsto (fun i => Function.support fun x => (Ï i).normed ÎŒ x) l (ð c).smallSets := by
simp_rw [NormedAddCommGroup.tendsto_nhds_zero, Real.norm_eq_abs,
abs_eq_self.mpr (Ï _).rOut_pos.le] at hÏ
rw [nhds_basis_ball.smallSets.tendsto_right_iff]
refine fun ε hε ⊠(hÏ Îµ hε).mono fun i hi ⊠?_
rw [(Ï i).support_normed_eq]
exact ball_subset_ball hi.le
variable (Ό)
theorem integral_normed_smul {X} [NormedAddCommGroup X] [NormedSpace â X]
[CompleteSpace X] (z : X) : â« x, f.normed ÎŒ x ⢠z âÎŒ = z := by
simp_rw [integral_smul_const, f.integral_normed (Ό := Ό), one_smul]
theorem measure_closedBall_le_integral : (ÎŒ (closedBall c f.rIn)).toReal †⫠x, f x âÎŒ := by calc
(ÎŒ (closedBall c f.rIn)).toReal = â« x in closedBall c f.rIn, 1 âÎŒ := by simp
_ = â« x in closedBall c f.rIn, f x âÎŒ := setIntegral_congr measurableSet_closedBall
(fun x hx ⊠(one_of_mem_closedBall f hx).symm)
_ †⫠x, f x âÎŒ := setIntegral_le_integral f.integrable (eventually_of_forall (fun x ⊠f.nonneg))
theorem normed_le_div_measure_closedBall_rIn (x : E) :
f.normed Ό x †1 / (Ό (closedBall c f.rIn)).toReal := by
rw [normed_def]
gcongr
· exact ENNReal.toReal_pos (measure_closedBall_pos _ _ f.rIn_pos).ne' measure_closedBall_lt_top.ne
· exact f.le_one
· exact f.measure_closedBall_le_integral Ό
theorem integral_le_measure_closedBall : â« x, f x âÎŒ †(ÎŒ (closedBall c f.rOut)).toReal := by calc
â« x, f x âÎŒ = â« x in closedBall c f.rOut, f x âÎŒ := by
apply (setIntegral_eq_integral_of_forall_compl_eq_zero (fun x hx ⊠?_)).symm
apply f.zero_of_le_dist (le_of_lt _)
simpa using hx
_ †⫠x in closedBall c f.rOut, 1 âÎŒ := by
apply setIntegral_mono f.integrable.integrableOn _ (fun x ⊠f.le_one)
simp [measure_closedBall_lt_top]
_ = (Ό (closedBall c f.rOut)).toReal := by simp
theorem measure_closedBall_div_le_integral [IsAddHaarMeasure ÎŒ] (K : â) (h : f.rOut †K * f.rIn) :
(ÎŒ (closedBall c f.rOut)).toReal / K ^ finrank â E †⫠x, f x âÎŒ := by
have K_pos : 0 < K := by
simpa [f.rIn_pos, not_lt.2 f.rIn_pos.le] using mul_pos_iff.1 (f.rOut_pos.trans_le h)
apply le_trans _ (f.measure_closedBall_le_integral Ό)
rw [div_le_iff (pow_pos K_pos _), addHaar_closedBall' _ _ f.rIn_pos.le,
addHaar_closedBall' _ _ f.rOut_pos.le, ENNReal.toReal_mul, ENNReal.toReal_mul,
ENNReal.toReal_ofReal (pow_nonneg f.rOut_pos.le _),
ENNReal.toReal_ofReal (pow_nonneg f.rIn_pos.le _), mul_assoc, mul_comm _ (K ^ _), â mul_assoc,
â mul_pow, mul_comm _ K]
gcongr
exact f.rOut_pos.le
theorem normed_le_div_measure_closedBall_rOut [IsAddHaarMeasure ÎŒ] (K : â) (h : f.rOut †K * f.rIn)
(x : E) :
f.normed ÎŒ x †K ^ finrank â E / (ÎŒ (closedBall c f.rOut)).toReal := by
have K_pos : 0 < K := by
simpa [f.rIn_pos, not_lt.2 f.rIn_pos.le] using mul_pos_iff.1 (f.rOut_pos.trans_le h)
have : f x / â« y, f y âÎŒ †1 / â« y, f y âÎŒ := by
gcongr
· exact f.integral_pos.le
· exact f.le_one
apply this.trans
rw [div_le_div_iff f.integral_pos, one_mul, â div_le_iff' (pow_pos K_pos _)]
· exact f.measure_closedBall_div_le_integral Ό K h
· exact ENNReal.toReal_pos (measure_closedBall_pos _ _ f.rOut_pos).ne'
measure_closedBall_lt_top.ne
end ContDiffBump
|
Analysis\Calculus\Conformal\InnerProduct.lean | /-
Copyright (c) 2021 Yourong Zang. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yourong Zang
-/
import Mathlib.Analysis.Calculus.Conformal.NormedSpace
import Mathlib.Analysis.InnerProductSpace.ConformalLinearMap
/-!
# Conformal maps between inner product spaces
A function between inner product spaces which has a derivative at `x`
is conformal at `x` iff the derivative preserves inner products up to a scalar multiple.
-/
noncomputable section
variable {E F : Type*}
variable [NormedAddCommGroup E] [NormedAddCommGroup F]
variable [InnerProductSpace â E] [InnerProductSpace â F]
open RealInnerProductSpace
/-- A real differentiable map `f` is conformal at point `x` if and only if its
differential `fderiv â f x` at that point scales every inner product by a positive scalar. -/
theorem conformalAt_iff' {f : E â F} {x : E} : ConformalAt f x â
â c : â, 0 < c â§ â u v : E, âªfderiv â f x u, fderiv â f x vâ« = c * âªu, vâ« := by
rw [conformalAt_iff_isConformalMap_fderiv, isConformalMap_iff]
/-- A real differentiable map `f` is conformal at point `x` if and only if its
differential `f'` at that point scales every inner product by a positive scalar. -/
theorem conformalAt_iff {f : E â F} {x : E} {f' : E âL[â] F} (h : HasFDerivAt f f' x) :
ConformalAt f x â â c : â, 0 < c â§ â u v : E, âªf' u, f' vâ« = c * âªu, vâ« := by
simp only [conformalAt_iff', h.fderiv]
/-- The conformal factor of a conformal map at some point `x`. Some authors refer to this function
as the characteristic function of the conformal map. -/
def conformalFactorAt {f : E â F} {x : E} (h : ConformalAt f x) : â :=
Classical.choose (conformalAt_iff'.mp h)
theorem conformalFactorAt_pos {f : E â F} {x : E} (h : ConformalAt f x) : 0 < conformalFactorAt h :=
(Classical.choose_spec <| conformalAt_iff'.mp h).1
theorem conformalFactorAt_inner_eq_mul_inner' {f : E â F} {x : E} (h : ConformalAt f x) (u v : E) :
âª(fderiv â f x) u, (fderiv â f x) vâ« = (conformalFactorAt h : â) * âªu, vâ« :=
(Classical.choose_spec <| conformalAt_iff'.mp h).2 u v
theorem conformalFactorAt_inner_eq_mul_inner {f : E â F} {x : E} {f' : E âL[â] F}
(h : HasFDerivAt f f' x) (H : ConformalAt f x) (u v : E) :
âªf' u, f' vâ« = (conformalFactorAt H : â) * âªu, vâ« :=
H.differentiableAt.hasFDerivAt.unique h âž conformalFactorAt_inner_eq_mul_inner' H u v
|
Analysis\Calculus\Conformal\NormedSpace.lean | /-
Copyright (c) 2021 Yourong Zang. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yourong Zang
-/
import Mathlib.Analysis.NormedSpace.ConformalLinearMap
import Mathlib.Analysis.Calculus.FDeriv.Add
/-!
# Conformal Maps
A continuous linear map between real normed spaces `X` and `Y` is `ConformalAt` some point `x`
if it is real differentiable at that point and its differential is a conformal linear map.
## Main definitions
* `ConformalAt`: the main definition of conformal maps
* `Conformal`: maps that are conformal at every point
## Main results
* The conformality of the composition of two conformal maps, the identity map
and multiplications by nonzero constants
* `conformalAt_iff_isConformalMap_fderiv`: an equivalent definition of the conformality of a map
In `Analysis.Calculus.Conformal.InnerProduct`:
* `conformalAt_iff`: an equivalent definition of the conformality of a map
In `Geometry.Euclidean.Angle.Unoriented.Conformal`:
* `ConformalAt.preserves_angle`: if a map is conformal at `x`, then its differential preserves
all angles at `x`
## Tags
conformal
## Warning
The definition of conformality in this file does NOT require the maps to be orientation-preserving.
Maps such as the complex conjugate are considered to be conformal.
-/
noncomputable section
variable {X Y Z : Type*} [NormedAddCommGroup X] [NormedAddCommGroup Y] [NormedAddCommGroup Z]
[NormedSpace â X] [NormedSpace â Y] [NormedSpace â Z]
section LocConformality
open LinearIsometry ContinuousLinearMap
/-- A map `f` is said to be conformal if it has a conformal differential `f'`. -/
def ConformalAt (f : X â Y) (x : X) :=
â f' : X âL[â] Y, HasFDerivAt f f' x â§ IsConformalMap f'
theorem conformalAt_id (x : X) : ConformalAt _root_.id x :=
âšid â X, hasFDerivAt_id _, isConformalMap_idâ©
theorem conformalAt_const_smul {c : â} (h : c â 0) (x : X) : ConformalAt (fun x' : X => c ⢠x') x :=
âšc ⢠ContinuousLinearMap.id â X, (hasFDerivAt_id x).const_smul c, isConformalMap_const_smul hâ©
@[nontriviality]
theorem Subsingleton.conformalAt [Subsingleton X] (f : X â Y) (x : X) : ConformalAt f x :=
âš0, hasFDerivAt_of_subsingleton _ _, isConformalMap_of_subsingleton _â©
/-- A function is a conformal map if and only if its differential is a conformal linear map-/
theorem conformalAt_iff_isConformalMap_fderiv {f : X â Y} {x : X} :
ConformalAt f x â IsConformalMap (fderiv â f x) := by
constructor
· rintro âšf', hf, hf'â©
rwa [hf.fderiv]
· intro H
by_cases h : DifferentiableAt â f x
· exact âšfderiv â f x, h.hasFDerivAt, Hâ©
· nontriviality X
exact absurd (fderiv_zero_of_not_differentiableAt h) H.ne_zero
namespace ConformalAt
theorem differentiableAt {f : X â Y} {x : X} (h : ConformalAt f x) : DifferentiableAt â f x :=
let âš_, hâ, _â© := h
hâ.differentiableAt
theorem congr {f g : X â Y} {x : X} {u : Set X} (hx : x â u) (hu : IsOpen u) (hf : ConformalAt f x)
(h : â x : X, x â u â g x = f x) : ConformalAt g x :=
let âšf', hfderiv, hf'â© := hf
âšf', hfderiv.congr_of_eventuallyEq ((hu.eventually_mem hx).mono h), hf'â©
theorem comp {f : X â Y} {g : Y â Z} (x : X) (hg : ConformalAt g (f x)) (hf : ConformalAt f x) :
ConformalAt (g â f) x := by
rcases hf with âšf', hfâ, cfâ©
rcases hg with âšg', hgâ, cgâ©
exact âšg'.comp f', hgâ.comp x hfâ, cg.comp cfâ©
theorem const_smul {f : X â Y} {x : X} {c : â} (hc : c â 0) (hf : ConformalAt f x) :
ConformalAt (c ⢠f) x :=
(conformalAt_const_smul hc <| f x).comp x hf
end ConformalAt
end LocConformality
section GlobalConformality
/-- A map `f` is conformal if it's conformal at every point. -/
def Conformal (f : X â Y) :=
â x : X, ConformalAt f x
theorem conformal_id : Conformal (id : X â X) := fun x => conformalAt_id x
theorem conformal_const_smul {c : â} (h : c â 0) : Conformal fun x : X => c ⢠x := fun x =>
conformalAt_const_smul h x
namespace Conformal
theorem conformalAt {f : X â Y} (h : Conformal f) (x : X) : ConformalAt f x :=
h x
theorem differentiable {f : X â Y} (h : Conformal f) : Differentiable â f := fun x =>
(h x).differentiableAt
theorem comp {f : X â Y} {g : Y â Z} (hf : Conformal f) (hg : Conformal g) : Conformal (g â f) :=
fun x => (hg <| f x).comp x (hf x)
theorem const_smul {f : X â Y} (hf : Conformal f) {c : â} (hc : c â 0) : Conformal (c ⢠f) :=
fun x => (hf x).const_smul hc
end Conformal
end GlobalConformality
|
Analysis\Calculus\ContDiff\Basic.lean | /-
Copyright (c) 2019 Sébastien Gouëzel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Sébastien Gouëzel, Floris van Doorn
-/
import Mathlib.Analysis.Calculus.ContDiff.Defs
import Mathlib.Analysis.Calculus.FDeriv.Add
import Mathlib.Analysis.Calculus.FDeriv.Mul
import Mathlib.Analysis.Calculus.Deriv.Inverse
/-!
# Higher differentiability of usual operations
We prove that the usual operations (addition, multiplication, difference, composition, and
so on) preserve `C^n` functions. We also expand the API around `C^n` functions.
## Main results
* `ContDiff.comp` states that the composition of two `C^n` functions is `C^n`.
Similar results are given for `C^n` functions on domains.
## Notations
We use the notation `E [Ãn]âL[ð] F` for the space of continuous multilinear maps on `E^n` with
values in `F`. This is the space in which the `n`-th derivative of a function from `E` to `F` lives.
In this file, we denote `†: ââ` with `â`.
## Tags
derivative, differentiability, higher derivative, `C^n`, multilinear, Taylor series, formal series
-/
noncomputable section
open scoped NNReal Nat
local notation "â" => (†: ââ)
universe u v w uD uE uF uG
attribute [local instance 1001]
NormedAddCommGroup.toAddCommGroup NormedSpace.toModule' AddCommGroup.toAddCommMonoid
open Set Fin Filter Function
open scoped Topology
variable {ð : Type*} [NontriviallyNormedField ð] {D : Type uD} [NormedAddCommGroup D]
[NormedSpace ð D] {E : Type uE} [NormedAddCommGroup E] [NormedSpace ð E] {F : Type uF}
[NormedAddCommGroup F] [NormedSpace ð F] {G : Type uG} [NormedAddCommGroup G] [NormedSpace ð G]
{X : Type*} [NormedAddCommGroup X] [NormedSpace ð X] {s sâ t u : Set E} {f fâ : E â F}
{g : F â G} {x xâ : E} {c : F} {b : E Ã F â G} {m n : ââ} {p : E â FormalMultilinearSeries ð E F}
/-! ### Constants -/
@[simp]
theorem iteratedFDerivWithin_zero_fun (hs : UniqueDiffOn ð s) (hx : x â s) {i : â} :
iteratedFDerivWithin ð i (fun _ : E ⊠(0 : F)) s x = 0 := by
induction i generalizing x with
| zero => ext; simp
| succ i IH =>
ext m
rw [iteratedFDerivWithin_succ_apply_left, fderivWithin_congr (fun _ ⊠IH) (IH hx)]
rw [fderivWithin_const_apply _ (hs x hx)]
rfl
@[simp]
theorem iteratedFDeriv_zero_fun {n : â} : (iteratedFDeriv ð n fun _ : E ⊠(0 : F)) = 0 :=
funext fun x ⊠by simpa [â iteratedFDerivWithin_univ] using
iteratedFDerivWithin_zero_fun uniqueDiffOn_univ (mem_univ x)
theorem contDiff_zero_fun : ContDiff ð n fun _ : E => (0 : F) :=
contDiff_of_differentiable_iteratedFDeriv fun m _ => by
rw [iteratedFDeriv_zero_fun]
exact differentiable_const (0 : E[Ãm]âL[ð] F)
/-- Constants are `C^â`.
-/
theorem contDiff_const {c : F} : ContDiff ð n fun _ : E => c := by
suffices h : ContDiff ð â fun _ : E => c from h.of_le le_top
rw [contDiff_top_iff_fderiv]
refine âšdifferentiable_const c, ?_â©
rw [fderiv_const]
exact contDiff_zero_fun
theorem contDiffOn_const {c : F} {s : Set E} : ContDiffOn ð n (fun _ : E => c) s :=
contDiff_const.contDiffOn
theorem contDiffAt_const {c : F} : ContDiffAt ð n (fun _ : E => c) x :=
contDiff_const.contDiffAt
theorem contDiffWithinAt_const {c : F} : ContDiffWithinAt ð n (fun _ : E => c) s x :=
contDiffAt_const.contDiffWithinAt
@[nontriviality]
theorem contDiff_of_subsingleton [Subsingleton F] : ContDiff ð n f := by
rw [Subsingleton.elim f fun _ => 0]; exact contDiff_const
@[nontriviality]
theorem contDiffAt_of_subsingleton [Subsingleton F] : ContDiffAt ð n f x := by
rw [Subsingleton.elim f fun _ => 0]; exact contDiffAt_const
@[nontriviality]
theorem contDiffWithinAt_of_subsingleton [Subsingleton F] : ContDiffWithinAt ð n f s x := by
rw [Subsingleton.elim f fun _ => 0]; exact contDiffWithinAt_const
@[nontriviality]
theorem contDiffOn_of_subsingleton [Subsingleton F] : ContDiffOn ð n f s := by
rw [Subsingleton.elim f fun _ => 0]; exact contDiffOn_const
theorem iteratedFDerivWithin_succ_const (n : â) (c : F) (hs : UniqueDiffOn ð s) (hx : x â s) :
iteratedFDerivWithin ð (n + 1) (fun _ : E ⊠c) s x = 0 := by
ext m
rw [iteratedFDerivWithin_succ_apply_right hs hx]
rw [iteratedFDerivWithin_congr (fun y hy ⊠fderivWithin_const_apply c (hs y hy)) hx]
rw [iteratedFDerivWithin_zero_fun hs hx]
simp [ContinuousMultilinearMap.zero_apply (R := ð)]
theorem iteratedFDeriv_succ_const (n : â) (c : F) :
(iteratedFDeriv ð (n + 1) fun _ : E ⊠c) = 0 :=
funext fun x ⊠by simpa [â iteratedFDerivWithin_univ] using
iteratedFDerivWithin_succ_const n c uniqueDiffOn_univ (mem_univ x)
theorem iteratedFDerivWithin_const_of_ne {n : â} (hn : n â 0) (c : F)
(hs : UniqueDiffOn ð s) (hx : x â s) :
iteratedFDerivWithin ð n (fun _ : E ⊠c) s x = 0 := by
cases n with
| zero => contradiction
| succ n => exact iteratedFDerivWithin_succ_const n c hs hx
theorem iteratedFDeriv_const_of_ne {n : â} (hn : n â 0) (c : F) :
(iteratedFDeriv ð n fun _ : E ⊠c) = 0 :=
funext fun x ⊠by simpa [â iteratedFDerivWithin_univ] using
iteratedFDerivWithin_const_of_ne hn c uniqueDiffOn_univ (mem_univ x)
/-! ### Smoothness of linear functions -/
/-- Unbundled bounded linear functions are `C^â`.
-/
theorem IsBoundedLinearMap.contDiff (hf : IsBoundedLinearMap ð f) : ContDiff ð n f := by
suffices h : ContDiff ð â f from h.of_le le_top
rw [contDiff_top_iff_fderiv]
refine âšhf.differentiable, ?_â©
simp_rw [hf.fderiv]
exact contDiff_const
theorem ContinuousLinearMap.contDiff (f : E âL[ð] F) : ContDiff ð n f :=
f.isBoundedLinearMap.contDiff
theorem ContinuousLinearEquiv.contDiff (f : E âL[ð] F) : ContDiff ð n f :=
(f : E âL[ð] F).contDiff
theorem LinearIsometry.contDiff (f : E ââáµ¢[ð] F) : ContDiff ð n f :=
f.toContinuousLinearMap.contDiff
theorem LinearIsometryEquiv.contDiff (f : E ââáµ¢[ð] F) : ContDiff ð n f :=
(f : E âL[ð] F).contDiff
/-- The identity is `C^â`.
-/
theorem contDiff_id : ContDiff ð n (id : E â E) :=
IsBoundedLinearMap.id.contDiff
theorem contDiffWithinAt_id {s x} : ContDiffWithinAt ð n (id : E â E) s x :=
contDiff_id.contDiffWithinAt
theorem contDiffAt_id {x} : ContDiffAt ð n (id : E â E) x :=
contDiff_id.contDiffAt
theorem contDiffOn_id {s} : ContDiffOn ð n (id : E â E) s :=
contDiff_id.contDiffOn
/-- Bilinear functions are `C^â`.
-/
theorem IsBoundedBilinearMap.contDiff (hb : IsBoundedBilinearMap ð b) : ContDiff ð n b := by
suffices h : ContDiff ð â b from h.of_le le_top
rw [contDiff_top_iff_fderiv]
refine âšhb.differentiable, ?_â©
simp only [hb.fderiv]
exact hb.isBoundedLinearMap_deriv.contDiff
/-- If `f` admits a Taylor series `p` in a set `s`, and `g` is linear, then `g â f` admits a Taylor
series whose `k`-th term is given by `g â (p k)`. -/
theorem HasFTaylorSeriesUpToOn.continuousLinearMap_comp (g : F âL[ð] G)
(hf : HasFTaylorSeriesUpToOn n f p s) :
HasFTaylorSeriesUpToOn n (g â f) (fun x k => g.compContinuousMultilinearMap (p x k)) s where
zero_eq x hx := congr_arg g (hf.zero_eq x hx)
fderivWithin m hm x hx := (ContinuousLinearMap.compContinuousMultilinearMapL ð
(fun _ : Fin m => E) F G g).hasFDerivAt.comp_hasFDerivWithinAt x (hf.fderivWithin m hm x hx)
cont m hm := (ContinuousLinearMap.compContinuousMultilinearMapL ð
(fun _ : Fin m => E) F G g).continuous.comp_continuousOn (hf.cont m hm)
/-- Composition by continuous linear maps on the left preserves `C^n` functions in a domain
at a point. -/
theorem ContDiffWithinAt.continuousLinearMap_comp (g : F âL[ð] G)
(hf : ContDiffWithinAt ð n f s x) : ContDiffWithinAt ð n (g â f) s x := fun m hm ⊠by
rcases hf m hm with âšu, hu, p, hpâ©
exact âšu, hu, _, hp.continuousLinearMap_comp gâ©
/-- Composition by continuous linear maps on the left preserves `C^n` functions in a domain
at a point. -/
theorem ContDiffAt.continuousLinearMap_comp (g : F âL[ð] G) (hf : ContDiffAt ð n f x) :
ContDiffAt ð n (g â f) x :=
ContDiffWithinAt.continuousLinearMap_comp g hf
/-- Composition by continuous linear maps on the left preserves `C^n` functions on domains. -/
theorem ContDiffOn.continuousLinearMap_comp (g : F âL[ð] G) (hf : ContDiffOn ð n f s) :
ContDiffOn ð n (g â f) s := fun x hx => (hf x hx).continuousLinearMap_comp g
/-- Composition by continuous linear maps on the left preserves `C^n` functions. -/
theorem ContDiff.continuousLinearMap_comp {f : E â F} (g : F âL[ð] G) (hf : ContDiff ð n f) :
ContDiff ð n fun x => g (f x) :=
contDiffOn_univ.1 <| ContDiffOn.continuousLinearMap_comp _ (contDiffOn_univ.2 hf)
/-- The iterated derivative within a set of the composition with a linear map on the left is
obtained by applying the linear map to the iterated derivative. -/
theorem ContinuousLinearMap.iteratedFDerivWithin_comp_left {f : E â F} (g : F âL[ð] G)
(hf : ContDiffOn ð n f s) (hs : UniqueDiffOn ð s) (hx : x â s) {i : â} (hi : (i : ââ) †n) :
iteratedFDerivWithin ð i (g â f) s x =
g.compContinuousMultilinearMap (iteratedFDerivWithin ð i f s x) :=
(((hf.ftaylorSeriesWithin hs).continuousLinearMap_comp g).eq_iteratedFDerivWithin_of_uniqueDiffOn
hi hs hx).symm
/-- The iterated derivative of the composition with a linear map on the left is
obtained by applying the linear map to the iterated derivative. -/
theorem ContinuousLinearMap.iteratedFDeriv_comp_left {f : E â F} (g : F âL[ð] G)
(hf : ContDiff ð n f) (x : E) {i : â} (hi : (i : ââ) †n) :
iteratedFDeriv ð i (g â f) x = g.compContinuousMultilinearMap (iteratedFDeriv ð i f x) := by
simp only [â iteratedFDerivWithin_univ]
exact g.iteratedFDerivWithin_comp_left hf.contDiffOn uniqueDiffOn_univ (mem_univ x) hi
/-- The iterated derivative within a set of the composition with a linear equiv on the left is
obtained by applying the linear equiv to the iterated derivative. This is true without
differentiability assumptions. -/
theorem ContinuousLinearEquiv.iteratedFDerivWithin_comp_left (g : F âL[ð] G) (f : E â F)
(hs : UniqueDiffOn ð s) (hx : x â s) (i : â) :
iteratedFDerivWithin ð i (g â f) s x =
(g : F âL[ð] G).compContinuousMultilinearMap (iteratedFDerivWithin ð i f s x) := by
induction' i with i IH generalizing x
· ext1 m
simp only [Nat.zero_eq, iteratedFDerivWithin_zero_apply, comp_apply,
ContinuousLinearMap.compContinuousMultilinearMap_coe, coe_coe]
· ext1 m
rw [iteratedFDerivWithin_succ_apply_left]
have Z : fderivWithin ð (iteratedFDerivWithin ð i (g â f) s) s x =
fderivWithin ð (g.compContinuousMultilinearMapL (fun _ : Fin i => E) â
iteratedFDerivWithin ð i f s) s x :=
fderivWithin_congr' (@IH) hx
simp_rw [Z]
rw [(g.compContinuousMultilinearMapL fun _ : Fin i => E).comp_fderivWithin (hs x hx)]
simp only [ContinuousLinearMap.coe_comp', ContinuousLinearEquiv.coe_coe, comp_apply,
ContinuousLinearEquiv.compContinuousMultilinearMapL_apply,
ContinuousLinearMap.compContinuousMultilinearMap_coe, EmbeddingLike.apply_eq_iff_eq]
rw [iteratedFDerivWithin_succ_apply_left]
/-- Composition with a linear isometry on the left preserves the norm of the iterated
derivative within a set. -/
theorem LinearIsometry.norm_iteratedFDerivWithin_comp_left {f : E â F} (g : F ââáµ¢[ð] G)
(hf : ContDiffOn ð n f s) (hs : UniqueDiffOn ð s) (hx : x â s) {i : â} (hi : (i : ââ) †n) :
âiteratedFDerivWithin ð i (g â f) s xâ = âiteratedFDerivWithin ð i f s xâ := by
have :
iteratedFDerivWithin ð i (g â f) s x =
g.toContinuousLinearMap.compContinuousMultilinearMap (iteratedFDerivWithin ð i f s x) :=
g.toContinuousLinearMap.iteratedFDerivWithin_comp_left hf hs hx hi
rw [this]
apply LinearIsometry.norm_compContinuousMultilinearMap
/-- Composition with a linear isometry on the left preserves the norm of the iterated
derivative. -/
theorem LinearIsometry.norm_iteratedFDeriv_comp_left {f : E â F} (g : F ââáµ¢[ð] G)
(hf : ContDiff ð n f) (x : E) {i : â} (hi : (i : ââ) †n) :
âiteratedFDeriv ð i (g â f) xâ = âiteratedFDeriv ð i f xâ := by
simp only [â iteratedFDerivWithin_univ]
exact g.norm_iteratedFDerivWithin_comp_left hf.contDiffOn uniqueDiffOn_univ (mem_univ x) hi
/-- Composition with a linear isometry equiv on the left preserves the norm of the iterated
derivative within a set. -/
theorem LinearIsometryEquiv.norm_iteratedFDerivWithin_comp_left (g : F ââáµ¢[ð] G) (f : E â F)
(hs : UniqueDiffOn ð s) (hx : x â s) (i : â) :
âiteratedFDerivWithin ð i (g â f) s xâ = âiteratedFDerivWithin ð i f s xâ := by
have :
iteratedFDerivWithin ð i (g â f) s x =
(g : F âL[ð] G).compContinuousMultilinearMap (iteratedFDerivWithin ð i f s x) :=
g.toContinuousLinearEquiv.iteratedFDerivWithin_comp_left f hs hx i
rw [this]
apply LinearIsometry.norm_compContinuousMultilinearMap g.toLinearIsometry
/-- Composition with a linear isometry equiv on the left preserves the norm of the iterated
derivative. -/
theorem LinearIsometryEquiv.norm_iteratedFDeriv_comp_left (g : F ââáµ¢[ð] G) (f : E â F) (x : E)
(i : â) : âiteratedFDeriv ð i (g â f) xâ = âiteratedFDeriv ð i f xâ := by
rw [â iteratedFDerivWithin_univ, â iteratedFDerivWithin_univ]
apply g.norm_iteratedFDerivWithin_comp_left f uniqueDiffOn_univ (mem_univ x) i
/-- Composition by continuous linear equivs on the left respects higher differentiability at a
point in a domain. -/
theorem ContinuousLinearEquiv.comp_contDiffWithinAt_iff (e : F âL[ð] G) :
ContDiffWithinAt ð n (e â f) s x â ContDiffWithinAt ð n f s x :=
âšfun H => by
simpa only [(· â ·), e.symm.coe_coe, e.symm_apply_apply] using
H.continuousLinearMap_comp (e.symm : G âL[ð] F),
fun H => H.continuousLinearMap_comp (e : F âL[ð] G)â©
/-- Composition by continuous linear equivs on the left respects higher differentiability at a
point. -/
theorem ContinuousLinearEquiv.comp_contDiffAt_iff (e : F âL[ð] G) :
ContDiffAt ð n (e â f) x â ContDiffAt ð n f x := by
simp only [â contDiffWithinAt_univ, e.comp_contDiffWithinAt_iff]
/-- Composition by continuous linear equivs on the left respects higher differentiability on
domains. -/
theorem ContinuousLinearEquiv.comp_contDiffOn_iff (e : F âL[ð] G) :
ContDiffOn ð n (e â f) s â ContDiffOn ð n f s := by
simp [ContDiffOn, e.comp_contDiffWithinAt_iff]
/-- Composition by continuous linear equivs on the left respects higher differentiability. -/
theorem ContinuousLinearEquiv.comp_contDiff_iff (e : F âL[ð] G) :
ContDiff ð n (e â f) â ContDiff ð n f := by
simp only [â contDiffOn_univ, e.comp_contDiffOn_iff]
/-- If `f` admits a Taylor series `p` in a set `s`, and `g` is linear, then `f â g` admits a Taylor
series in `g â»Â¹' s`, whose `k`-th term is given by `p k (g vâ, ..., g vâ)` . -/
theorem HasFTaylorSeriesUpToOn.compContinuousLinearMap (hf : HasFTaylorSeriesUpToOn n f p s)
(g : G âL[ð] E) :
HasFTaylorSeriesUpToOn n (f â g) (fun x k => (p (g x) k).compContinuousLinearMap fun _ => g)
(g â»Â¹' s) := by
let A : â m : â, (E[Ãm]âL[ð] F) â G[Ãm]âL[ð] F := fun m h => h.compContinuousLinearMap fun _ => g
have hA : â m, IsBoundedLinearMap ð (A m) := fun m =>
isBoundedLinearMap_continuousMultilinearMap_comp_linear g
constructor
· intro x hx
simp only [(hf.zero_eq (g x) hx).symm, Function.comp_apply]
change (p (g x) 0 fun _ : Fin 0 => g 0) = p (g x) 0 0
rw [ContinuousLinearMap.map_zero]
rfl
· intro m hm x hx
convert (hA m).hasFDerivAt.comp_hasFDerivWithinAt x
((hf.fderivWithin m hm (g x) hx).comp x g.hasFDerivWithinAt (Subset.refl _))
ext y v
change p (g x) (Nat.succ m) (g â cons y v) = p (g x) m.succ (cons (g y) (g â v))
rw [comp_cons]
· intro m hm
exact (hA m).continuous.comp_continuousOn <| (hf.cont m hm).comp g.continuous.continuousOn <|
Subset.refl _
/-- Composition by continuous linear maps on the right preserves `C^n` functions at a point on
a domain. -/
theorem ContDiffWithinAt.comp_continuousLinearMap {x : G} (g : G âL[ð] E)
(hf : ContDiffWithinAt ð n f s (g x)) : ContDiffWithinAt ð n (f â g) (g â»Â¹' s) x := by
intro m hm
rcases hf m hm with âšu, hu, p, hpâ©
refine âšg â»Â¹' u, ?_, _, hp.compContinuousLinearMap gâ©
refine g.continuous.continuousWithinAt.tendsto_nhdsWithin ?_ hu
exact (mapsTo_singleton.2 <| mem_singleton _).union_union (mapsTo_preimage _ _)
/-- Composition by continuous linear maps on the right preserves `C^n` functions on domains. -/
theorem ContDiffOn.comp_continuousLinearMap (hf : ContDiffOn ð n f s) (g : G âL[ð] E) :
ContDiffOn ð n (f â g) (g â»Â¹' s) := fun x hx => (hf (g x) hx).comp_continuousLinearMap g
/-- Composition by continuous linear maps on the right preserves `C^n` functions. -/
theorem ContDiff.comp_continuousLinearMap {f : E â F} {g : G âL[ð] E} (hf : ContDiff ð n f) :
ContDiff ð n (f â g) :=
contDiffOn_univ.1 <| ContDiffOn.comp_continuousLinearMap (contDiffOn_univ.2 hf) _
/-- The iterated derivative within a set of the composition with a linear map on the right is
obtained by composing the iterated derivative with the linear map. -/
theorem ContinuousLinearMap.iteratedFDerivWithin_comp_right {f : E â F} (g : G âL[ð] E)
(hf : ContDiffOn ð n f s) (hs : UniqueDiffOn ð s) (h's : UniqueDiffOn ð (g â»Â¹' s)) {x : G}
(hx : g x â s) {i : â} (hi : (i : ââ) †n) :
iteratedFDerivWithin ð i (f â g) (g â»Â¹' s) x =
(iteratedFDerivWithin ð i f s (g x)).compContinuousLinearMap fun _ => g :=
(((hf.ftaylorSeriesWithin hs).compContinuousLinearMap g).eq_iteratedFDerivWithin_of_uniqueDiffOn
hi h's hx).symm
/-- The iterated derivative within a set of the composition with a linear equiv on the right is
obtained by composing the iterated derivative with the linear equiv. -/
theorem ContinuousLinearEquiv.iteratedFDerivWithin_comp_right (g : G âL[ð] E) (f : E â F)
(hs : UniqueDiffOn ð s) {x : G} (hx : g x â s) (i : â) :
iteratedFDerivWithin ð i (f â g) (g â»Â¹' s) x =
(iteratedFDerivWithin ð i f s (g x)).compContinuousLinearMap fun _ => g := by
induction' i with i IH generalizing x
· ext1
simp only [Nat.zero_eq, iteratedFDerivWithin_zero_apply, comp_apply,
ContinuousMultilinearMap.compContinuousLinearMap_apply]
· ext1 m
simp only [ContinuousMultilinearMap.compContinuousLinearMap_apply,
ContinuousLinearEquiv.coe_coe, iteratedFDerivWithin_succ_apply_left]
have : fderivWithin ð (iteratedFDerivWithin ð i (f â g) (g â»Â¹' s)) (g â»Â¹' s) x =
fderivWithin ð
(ContinuousMultilinearMap.compContinuousLinearMapEquivL _ (fun _x : Fin i => g) â
(iteratedFDerivWithin ð i f s â g)) (g â»Â¹' s) x :=
fderivWithin_congr' (@IH) hx
rw [this, ContinuousLinearEquiv.comp_fderivWithin _ (g.uniqueDiffOn_preimage_iff.2 hs x hx)]
simp only [ContinuousLinearMap.coe_comp', ContinuousLinearEquiv.coe_coe, comp_apply,
ContinuousMultilinearMap.compContinuousLinearMapEquivL_apply,
ContinuousMultilinearMap.compContinuousLinearMap_apply]
rw [ContinuousLinearEquiv.comp_right_fderivWithin _ (g.uniqueDiffOn_preimage_iff.2 hs x hx),
ContinuousLinearMap.coe_comp', coe_coe, comp_apply, tail_def, tail_def]
/-- The iterated derivative of the composition with a linear map on the right is
obtained by composing the iterated derivative with the linear map. -/
theorem ContinuousLinearMap.iteratedFDeriv_comp_right (g : G âL[ð] E) {f : E â F}
(hf : ContDiff ð n f) (x : G) {i : â} (hi : (i : ââ) †n) :
iteratedFDeriv ð i (f â g) x =
(iteratedFDeriv ð i f (g x)).compContinuousLinearMap fun _ => g := by
simp only [â iteratedFDerivWithin_univ]
exact g.iteratedFDerivWithin_comp_right hf.contDiffOn uniqueDiffOn_univ uniqueDiffOn_univ
(mem_univ _) hi
/-- Composition with a linear isometry on the right preserves the norm of the iterated derivative
within a set. -/
theorem LinearIsometryEquiv.norm_iteratedFDerivWithin_comp_right (g : G ââáµ¢[ð] E) (f : E â F)
(hs : UniqueDiffOn ð s) {x : G} (hx : g x â s) (i : â) :
âiteratedFDerivWithin ð i (f â g) (g â»Â¹' s) xâ = âiteratedFDerivWithin ð i f s (g x)â := by
have : iteratedFDerivWithin ð i (f â g) (g â»Â¹' s) x =
(iteratedFDerivWithin ð i f s (g x)).compContinuousLinearMap fun _ => g :=
g.toContinuousLinearEquiv.iteratedFDerivWithin_comp_right f hs hx i
rw [this, ContinuousMultilinearMap.norm_compContinuous_linearIsometryEquiv]
/-- Composition with a linear isometry on the right preserves the norm of the iterated derivative
within a set. -/
theorem LinearIsometryEquiv.norm_iteratedFDeriv_comp_right (g : G ââáµ¢[ð] E) (f : E â F) (x : G)
(i : â) : âiteratedFDeriv ð i (f â g) xâ = âiteratedFDeriv ð i f (g x)â := by
simp only [â iteratedFDerivWithin_univ]
apply g.norm_iteratedFDerivWithin_comp_right f uniqueDiffOn_univ (mem_univ (g x)) i
/-- Composition by continuous linear equivs on the right respects higher differentiability at a
point in a domain. -/
theorem ContinuousLinearEquiv.contDiffWithinAt_comp_iff (e : G âL[ð] E) :
ContDiffWithinAt ð n (f â e) (e â»Â¹' s) (e.symm x) â ContDiffWithinAt ð n f s x := by
constructor
· intro H
simpa [â preimage_comp, (· â ·)] using H.comp_continuousLinearMap (e.symm : E âL[ð] G)
· intro H
rw [â e.apply_symm_apply x, â e.coe_coe] at H
exact H.comp_continuousLinearMap _
/-- Composition by continuous linear equivs on the right respects higher differentiability at a
point. -/
theorem ContinuousLinearEquiv.contDiffAt_comp_iff (e : G âL[ð] E) :
ContDiffAt ð n (f â e) (e.symm x) â ContDiffAt ð n f x := by
rw [â contDiffWithinAt_univ, â contDiffWithinAt_univ, â preimage_univ]
exact e.contDiffWithinAt_comp_iff
/-- Composition by continuous linear equivs on the right respects higher differentiability on
domains. -/
theorem ContinuousLinearEquiv.contDiffOn_comp_iff (e : G âL[ð] E) :
ContDiffOn ð n (f â e) (e â»Â¹' s) â ContDiffOn ð n f s :=
âšfun H => by simpa [(· â ·)] using H.comp_continuousLinearMap (e.symm : E âL[ð] G), fun H =>
H.comp_continuousLinearMap (e : G âL[ð] E)â©
/-- Composition by continuous linear equivs on the right respects higher differentiability. -/
theorem ContinuousLinearEquiv.contDiff_comp_iff (e : G âL[ð] E) :
ContDiff ð n (f â e) â ContDiff ð n f := by
rw [â contDiffOn_univ, â contDiffOn_univ, â preimage_univ]
exact e.contDiffOn_comp_iff
/-- If two functions `f` and `g` admit Taylor series `p` and `q` in a set `s`, then the cartesian
product of `f` and `g` admits the cartesian product of `p` and `q` as a Taylor series. -/
theorem HasFTaylorSeriesUpToOn.prod (hf : HasFTaylorSeriesUpToOn n f p s) {g : E â G}
{q : E â FormalMultilinearSeries ð E G} (hg : HasFTaylorSeriesUpToOn n g q s) :
HasFTaylorSeriesUpToOn n (fun y => (f y, g y)) (fun y k => (p y k).prod (q y k)) s := by
set L := fun m => ContinuousMultilinearMap.prodL ð (fun _ : Fin m => E) F G
constructor
· intro x hx; rw [â hf.zero_eq x hx, â hg.zero_eq x hx]; rfl
· intro m hm x hx
convert (L m).hasFDerivAt.comp_hasFDerivWithinAt x
((hf.fderivWithin m hm x hx).prod (hg.fderivWithin m hm x hx))
· intro m hm
exact (L m).continuous.comp_continuousOn ((hf.cont m hm).prod (hg.cont m hm))
/-- The cartesian product of `C^n` functions at a point in a domain is `C^n`. -/
theorem ContDiffWithinAt.prod {s : Set E} {f : E â F} {g : E â G} (hf : ContDiffWithinAt ð n f s x)
(hg : ContDiffWithinAt ð n g s x) : ContDiffWithinAt ð n (fun x : E => (f x, g x)) s x := by
intro m hm
rcases hf m hm with âšu, hu, p, hpâ©
rcases hg m hm with âšv, hv, q, hqâ©
exact
âšu â© v, Filter.inter_mem hu hv, _,
(hp.mono inter_subset_left).prod (hq.mono inter_subset_right)â©
/-- The cartesian product of `C^n` functions on domains is `C^n`. -/
theorem ContDiffOn.prod {s : Set E} {f : E â F} {g : E â G} (hf : ContDiffOn ð n f s)
(hg : ContDiffOn ð n g s) : ContDiffOn ð n (fun x : E => (f x, g x)) s := fun x hx =>
(hf x hx).prod (hg x hx)
/-- The cartesian product of `C^n` functions at a point is `C^n`. -/
theorem ContDiffAt.prod {f : E â F} {g : E â G} (hf : ContDiffAt ð n f x)
(hg : ContDiffAt ð n g x) : ContDiffAt ð n (fun x : E => (f x, g x)) x :=
contDiffWithinAt_univ.1 <|
ContDiffWithinAt.prod (contDiffWithinAt_univ.2 hf) (contDiffWithinAt_univ.2 hg)
/-- The cartesian product of `C^n` functions is `C^n`. -/
theorem ContDiff.prod {f : E â F} {g : E â G} (hf : ContDiff ð n f) (hg : ContDiff ð n g) :
ContDiff ð n fun x : E => (f x, g x) :=
contDiffOn_univ.1 <| ContDiffOn.prod (contDiffOn_univ.2 hf) (contDiffOn_univ.2 hg)
/-!
### Composition of `C^n` functions
We show that the composition of `C^n` functions is `C^n`. One way to prove it would be to write
the `n`-th derivative of the composition (this is Faà di Bruno's formula) and check its continuity,
but this is very painful. Instead, we go for a simple inductive proof. Assume it is done for `n`.
Then, to check it for `n+1`, one needs to check that the derivative of `g â f` is `C^n`, i.e.,
that `Dg(f x) ⬠Df(x)` is `C^n`. The term `Dg (f x)` is the composition of two `C^n` functions, so
it is `C^n` by the inductive assumption. The term `Df(x)` is also `C^n`. Then, the matrix
multiplication is the application of a bilinear map (which is `C^â`, and therefore `C^n`) to
`x ⊠(Dg(f x), Df x)`. As the composition of two `C^n` maps, it is again `C^n`, and we are done.
There is a subtlety in this argument: we apply the inductive assumption to functions on other Banach
spaces. In maths, one would say: prove by induction over `n` that, for all `C^n` maps between all
pairs of Banach spaces, their composition is `C^n`. In Lean, this is fine as long as the spaces
stay in the same universe. This is not the case in the above argument: if `E` lives in universe `u`
and `F` lives in universe `v`, then linear maps from `E` to `F` (to which the derivative of `f`
belongs) is in universe `max u v`. If one could quantify over finitely many universes, the above
proof would work fine, but this is not the case. One could still write the proof considering spaces
in any universe in `u, v, w, max u v, max v w, max u v w`, but it would be extremely tedious and
lead to a lot of duplication. Instead, we formulate the above proof when all spaces live in the same
universe (where everything is fine), and then we deduce the general result by lifting all our spaces
to a common universe through `ULift`. This lifting is done through a continuous linear equiv.
We have already proved that composing with such a linear equiv does not change the fact of
being `C^n`, which concludes the proof.
-/
/-- Auxiliary lemma proving that the composition of `C^n` functions on domains is `C^n` when all
spaces live in the same universe. Use instead `ContDiffOn.comp` which removes the universe
assumption (but is deduced from this one). -/
private theorem ContDiffOn.comp_same_univ {Eu : Type u} [NormedAddCommGroup Eu] [NormedSpace ð Eu]
{Fu : Type u} [NormedAddCommGroup Fu] [NormedSpace ð Fu] {Gu : Type u} [NormedAddCommGroup Gu]
[NormedSpace ð Gu] {s : Set Eu} {t : Set Fu} {g : Fu â Gu} {f : Eu â Fu}
(hg : ContDiffOn ð n g t) (hf : ContDiffOn ð n f s) (st : s â f â»Â¹' t) :
ContDiffOn ð n (g â f) s := by
induction' n using ENat.nat_induction with n IH Itop generalizing Eu Fu Gu
· rw [contDiffOn_zero] at hf hg â¢
exact ContinuousOn.comp hg hf st
· rw [contDiffOn_succ_iff_hasFDerivWithinAt] at hg â¢
intro x hx
rcases (contDiffOn_succ_iff_hasFDerivWithinAt.1 hf) x hx with âšu, hu, f', hf', f'_diffâ©
rcases hg (f x) (st hx) with âšv, hv, g', hg', g'_diffâ©
rw [insert_eq_of_mem hx] at hu â¢
have xu : x â u := mem_of_mem_nhdsWithin hx hu
let w := s â© (u â© f â»Â¹' v)
have wv : w â f â»Â¹' v := fun y hy => hy.2.2
have wu : w â u := fun y hy => hy.2.1
have ws : w â s := fun y hy => hy.1
refine âšw, ?_, fun y => (g' (f y)).comp (f' y), ?_, ?_â©
· show w â ð[s] x
apply Filter.inter_mem self_mem_nhdsWithin
apply Filter.inter_mem hu
apply ContinuousWithinAt.preimage_mem_nhdsWithin'
· rw [â continuousWithinAt_inter' hu]
exact (hf' x xu).differentiableWithinAt.continuousWithinAt.mono inter_subset_right
· apply nhdsWithin_mono _ _ hv
exact Subset.trans (image_subset_iff.mpr st) (subset_insert (f x) t)
· show â y â w, HasFDerivWithinAt (g â f) ((g' (f y)).comp (f' y)) w y
rintro y âš-, yu, yvâ©
exact (hg' (f y) yv).comp y ((hf' y yu).mono wu) wv
· show ContDiffOn ð n (fun y => (g' (f y)).comp (f' y)) w
have A : ContDiffOn ð n (fun y => g' (f y)) w :=
IH g'_diff ((hf.of_le (WithTop.coe_le_coe.2 (Nat.le_succ n))).mono ws) wv
have B : ContDiffOn ð n f' w := f'_diff.mono wu
have C : ContDiffOn ð n (fun y => (g' (f y), f' y)) w := A.prod B
have D : ContDiffOn ð n (fun p : (Fu âL[ð] Gu) Ã (Eu âL[ð] Fu) => p.1.comp p.2) univ :=
isBoundedBilinearMap_comp.contDiff.contDiffOn
exact IH D C (subset_univ _)
· rw [contDiffOn_top] at hf hg â¢
exact fun n => Itop n (hg n) (hf n) st
/-- The composition of `C^n` functions on domains is `C^n`. -/
theorem ContDiffOn.comp {s : Set E} {t : Set F} {g : F â G} {f : E â F} (hg : ContDiffOn ð n g t)
(hf : ContDiffOn ð n f s) (st : s â f â»Â¹' t) : ContDiffOn ð n (g â f) s := by
/- we lift all the spaces to a common universe, as we have already proved the result in this
situation. -/
let Eu : Type max uE uF uG := ULift.{max uF uG} E
let Fu : Type max uE uF uG := ULift.{max uE uG} F
let Gu : Type max uE uF uG := ULift.{max uE uF} G
-- declare the isomorphisms
have isoE : Eu âL[ð] E := ContinuousLinearEquiv.ulift
have isoF : Fu âL[ð] F := ContinuousLinearEquiv.ulift
have isoG : Gu âL[ð] G := ContinuousLinearEquiv.ulift
-- lift the functions to the new spaces, check smoothness there, and then go back.
let fu : Eu â Fu := (isoF.symm â f) â isoE
have fu_diff : ContDiffOn ð n fu (isoE â»Â¹' s) := by
rwa [isoE.contDiffOn_comp_iff, isoF.symm.comp_contDiffOn_iff]
let gu : Fu â Gu := (isoG.symm â g) â isoF
have gu_diff : ContDiffOn ð n gu (isoF â»Â¹' t) := by
rwa [isoF.contDiffOn_comp_iff, isoG.symm.comp_contDiffOn_iff]
have main : ContDiffOn ð n (gu â fu) (isoE â»Â¹' s) := by
apply ContDiffOn.comp_same_univ gu_diff fu_diff
intro y hy
simp only [fu, ContinuousLinearEquiv.coe_apply, Function.comp_apply, mem_preimage]
rw [isoF.apply_symm_apply (f (isoE y))]
exact st hy
have : gu â fu = (isoG.symm â g â f) â isoE := by
ext y
simp only [fu, gu, Function.comp_apply]
rw [isoF.apply_symm_apply (f (isoE y))]
rwa [this, isoE.contDiffOn_comp_iff, isoG.symm.comp_contDiffOn_iff] at main
/-- The composition of `C^n` functions on domains is `C^n`. -/
theorem ContDiffOn.comp' {s : Set E} {t : Set F} {g : F â G} {f : E â F} (hg : ContDiffOn ð n g t)
(hf : ContDiffOn ð n f s) : ContDiffOn ð n (g â f) (s â© f â»Â¹' t) :=
hg.comp (hf.mono inter_subset_left) inter_subset_right
/-- The composition of a `C^n` function on a domain with a `C^n` function is `C^n`. -/
theorem ContDiff.comp_contDiffOn {s : Set E} {g : F â G} {f : E â F} (hg : ContDiff ð n g)
(hf : ContDiffOn ð n f s) : ContDiffOn ð n (g â f) s :=
(contDiffOn_univ.2 hg).comp hf subset_preimage_univ
/-- The composition of `C^n` functions is `C^n`. -/
theorem ContDiff.comp {g : F â G} {f : E â F} (hg : ContDiff ð n g) (hf : ContDiff ð n f) :
ContDiff ð n (g â f) :=
contDiffOn_univ.1 <| ContDiffOn.comp (contDiffOn_univ.2 hg) (contDiffOn_univ.2 hf) (subset_univ _)
/-- The composition of `C^n` functions at points in domains is `C^n`. -/
theorem ContDiffWithinAt.comp {s : Set E} {t : Set F} {g : F â G} {f : E â F} (x : E)
(hg : ContDiffWithinAt ð n g t (f x)) (hf : ContDiffWithinAt ð n f s x) (st : s â f â»Â¹' t) :
ContDiffWithinAt ð n (g â f) s x := by
intro m hm
rcases hg.contDiffOn hm with âšu, u_nhd, _, huâ©
rcases hf.contDiffOn hm with âšv, v_nhd, vs, hvâ©
have xmem : x â f â»Â¹' u â© v :=
âš(mem_of_mem_nhdsWithin (mem_insert (f x) _) u_nhd : _),
mem_of_mem_nhdsWithin (mem_insert x s) v_nhdâ©
have : f â»Â¹' u â ð[insert x s] x := by
apply hf.continuousWithinAt.insert_self.preimage_mem_nhdsWithin'
apply nhdsWithin_mono _ _ u_nhd
rw [image_insert_eq]
exact insert_subset_insert (image_subset_iff.mpr st)
have Z :=
(hu.comp (hv.mono inter_subset_right) inter_subset_left).contDiffWithinAt
xmem m le_rfl
have : ð[f â»Â¹' u â© v] x = ð[insert x s] x := by
have A : f â»Â¹' u â© v = insert x s â© (f â»Â¹' u â© v) := by
apply Subset.antisymm _ inter_subset_right
rintro y âšhy1, hy2â©
simpa only [mem_inter_iff, mem_preimage, hy2, and_true, true_and, vs hy2] using hy1
rw [A, â nhdsWithin_restrict'']
exact Filter.inter_mem this v_nhd
rwa [insert_eq_of_mem xmem, this] at Z
/-- The composition of `C^n` functions at points in domains is `C^n`,
with a weaker condition on `s` and `t`. -/
theorem ContDiffWithinAt.comp_of_mem {s : Set E} {t : Set F} {g : F â G} {f : E â F} (x : E)
(hg : ContDiffWithinAt ð n g t (f x)) (hf : ContDiffWithinAt ð n f s x)
(hs : t â ð[f '' s] f x) : ContDiffWithinAt ð n (g â f) s x :=
(hg.mono_of_mem hs).comp x hf (subset_preimage_image f s)
/-- The composition of `C^n` functions at points in domains is `C^n`. -/
theorem ContDiffWithinAt.comp' {s : Set E} {t : Set F} {g : F â G} {f : E â F} (x : E)
(hg : ContDiffWithinAt ð n g t (f x)) (hf : ContDiffWithinAt ð n f s x) :
ContDiffWithinAt ð n (g â f) (s â© f â»Â¹' t) x :=
hg.comp x (hf.mono inter_subset_left) inter_subset_right
theorem ContDiffAt.comp_contDiffWithinAt {n} (x : E) (hg : ContDiffAt ð n g (f x))
(hf : ContDiffWithinAt ð n f s x) : ContDiffWithinAt ð n (g â f) s x :=
hg.comp x hf (mapsTo_univ _ _)
/-- The composition of `C^n` functions at points is `C^n`. -/
nonrec theorem ContDiffAt.comp (x : E) (hg : ContDiffAt ð n g (f x)) (hf : ContDiffAt ð n f x) :
ContDiffAt ð n (g â f) x :=
hg.comp x hf subset_preimage_univ
theorem ContDiff.comp_contDiffWithinAt {g : F â G} {f : E â F} (h : ContDiff ð n g)
(hf : ContDiffWithinAt ð n f t x) : ContDiffWithinAt ð n (g â f) t x :=
haveI : ContDiffWithinAt ð n g univ (f x) := h.contDiffAt.contDiffWithinAt
this.comp x hf (subset_univ _)
theorem ContDiff.comp_contDiffAt {g : F â G} {f : E â F} (x : E) (hg : ContDiff ð n g)
(hf : ContDiffAt ð n f x) : ContDiffAt ð n (g â f) x :=
hg.comp_contDiffWithinAt hf
/-!
### Smoothness of projections
-/
/-- The first projection in a product is `C^â`. -/
theorem contDiff_fst : ContDiff ð n (Prod.fst : E Ã F â E) :=
IsBoundedLinearMap.contDiff IsBoundedLinearMap.fst
/-- Postcomposing `f` with `Prod.fst` is `C^n` -/
theorem ContDiff.fst {f : E â F Ã G} (hf : ContDiff ð n f) : ContDiff ð n fun x => (f x).1 :=
contDiff_fst.comp hf
/-- Precomposing `f` with `Prod.fst` is `C^n` -/
theorem ContDiff.fst' {f : E â G} (hf : ContDiff ð n f) : ContDiff ð n fun x : E Ã F => f x.1 :=
hf.comp contDiff_fst
/-- The first projection on a domain in a product is `C^â`. -/
theorem contDiffOn_fst {s : Set (E Ã F)} : ContDiffOn ð n (Prod.fst : E Ã F â E) s :=
ContDiff.contDiffOn contDiff_fst
theorem ContDiffOn.fst {f : E â F Ã G} {s : Set E} (hf : ContDiffOn ð n f s) :
ContDiffOn ð n (fun x => (f x).1) s :=
contDiff_fst.comp_contDiffOn hf
/-- The first projection at a point in a product is `C^â`. -/
theorem contDiffAt_fst {p : E Ã F} : ContDiffAt ð n (Prod.fst : E Ã F â E) p :=
contDiff_fst.contDiffAt
/-- Postcomposing `f` with `Prod.fst` is `C^n` at `(x, y)` -/
theorem ContDiffAt.fst {f : E â F Ã G} {x : E} (hf : ContDiffAt ð n f x) :
ContDiffAt ð n (fun x => (f x).1) x :=
contDiffAt_fst.comp x hf
/-- Precomposing `f` with `Prod.fst` is `C^n` at `(x, y)` -/
theorem ContDiffAt.fst' {f : E â G} {x : E} {y : F} (hf : ContDiffAt ð n f x) :
ContDiffAt ð n (fun x : E Ã F => f x.1) (x, y) :=
ContDiffAt.comp (x, y) hf contDiffAt_fst
/-- Precomposing `f` with `Prod.fst` is `C^n` at `x : E Ã F` -/
theorem ContDiffAt.fst'' {f : E â G} {x : E Ã F} (hf : ContDiffAt ð n f x.1) :
ContDiffAt ð n (fun x : E Ã F => f x.1) x :=
hf.comp x contDiffAt_fst
/-- The first projection within a domain at a point in a product is `C^â`. -/
theorem contDiffWithinAt_fst {s : Set (E Ã F)} {p : E Ã F} :
ContDiffWithinAt ð n (Prod.fst : E Ã F â E) s p :=
contDiff_fst.contDiffWithinAt
/-- The second projection in a product is `C^â`. -/
theorem contDiff_snd : ContDiff ð n (Prod.snd : E Ã F â F) :=
IsBoundedLinearMap.contDiff IsBoundedLinearMap.snd
/-- Postcomposing `f` with `Prod.snd` is `C^n` -/
theorem ContDiff.snd {f : E â F Ã G} (hf : ContDiff ð n f) : ContDiff ð n fun x => (f x).2 :=
contDiff_snd.comp hf
/-- Precomposing `f` with `Prod.snd` is `C^n` -/
theorem ContDiff.snd' {f : F â G} (hf : ContDiff ð n f) : ContDiff ð n fun x : E Ã F => f x.2 :=
hf.comp contDiff_snd
/-- The second projection on a domain in a product is `C^â`. -/
theorem contDiffOn_snd {s : Set (E Ã F)} : ContDiffOn ð n (Prod.snd : E Ã F â F) s :=
ContDiff.contDiffOn contDiff_snd
theorem ContDiffOn.snd {f : E â F Ã G} {s : Set E} (hf : ContDiffOn ð n f s) :
ContDiffOn ð n (fun x => (f x).2) s :=
contDiff_snd.comp_contDiffOn hf
/-- The second projection at a point in a product is `C^â`. -/
theorem contDiffAt_snd {p : E Ã F} : ContDiffAt ð n (Prod.snd : E Ã F â F) p :=
contDiff_snd.contDiffAt
/-- Postcomposing `f` with `Prod.snd` is `C^n` at `x` -/
theorem ContDiffAt.snd {f : E â F Ã G} {x : E} (hf : ContDiffAt ð n f x) :
ContDiffAt ð n (fun x => (f x).2) x :=
contDiffAt_snd.comp x hf
/-- Precomposing `f` with `Prod.snd` is `C^n` at `(x, y)` -/
theorem ContDiffAt.snd' {f : F â G} {x : E} {y : F} (hf : ContDiffAt ð n f y) :
ContDiffAt ð n (fun x : E Ã F => f x.2) (x, y) :=
ContDiffAt.comp (x, y) hf contDiffAt_snd
/-- Precomposing `f` with `Prod.snd` is `C^n` at `x : E Ã F` -/
theorem ContDiffAt.snd'' {f : F â G} {x : E Ã F} (hf : ContDiffAt ð n f x.2) :
ContDiffAt ð n (fun x : E Ã F => f x.2) x :=
hf.comp x contDiffAt_snd
/-- The second projection within a domain at a point in a product is `C^â`. -/
theorem contDiffWithinAt_snd {s : Set (E Ã F)} {p : E Ã F} :
ContDiffWithinAt ð n (Prod.snd : E Ã F â F) s p :=
contDiff_snd.contDiffWithinAt
section NAry
variable {Eâ Eâ Eâ Eâ : Type*}
variable [NormedAddCommGroup Eâ] [NormedAddCommGroup Eâ] [NormedAddCommGroup Eâ]
[NormedAddCommGroup Eâ] [NormedSpace ð Eâ] [NormedSpace ð Eâ] [NormedSpace ð Eâ]
[NormedSpace ð Eâ]
theorem ContDiff.compâ {g : Eâ Ã Eâ â G} {fâ : F â Eâ} {fâ : F â Eâ} (hg : ContDiff ð n g)
(hfâ : ContDiff ð n fâ) (hfâ : ContDiff ð n fâ) : ContDiff ð n fun x => g (fâ x, fâ x) :=
hg.comp <| hfâ.prod hfâ
theorem ContDiff.compâ {g : Eâ Ã Eâ Ã Eâ â G} {fâ : F â Eâ} {fâ : F â Eâ} {fâ : F â Eâ}
(hg : ContDiff ð n g) (hfâ : ContDiff ð n fâ) (hfâ : ContDiff ð n fâ) (hfâ : ContDiff ð n fâ) :
ContDiff ð n fun x => g (fâ x, fâ x, fâ x) :=
hg.compâ hfâ <| hfâ.prod hfâ
theorem ContDiff.comp_contDiff_onâ {g : Eâ Ã Eâ â G} {fâ : F â Eâ} {fâ : F â Eâ} {s : Set F}
(hg : ContDiff ð n g) (hfâ : ContDiffOn ð n fâ s) (hfâ : ContDiffOn ð n fâ s) :
ContDiffOn ð n (fun x => g (fâ x, fâ x)) s :=
hg.comp_contDiffOn <| hfâ.prod hfâ
theorem ContDiff.comp_contDiff_onâ {g : Eâ Ã Eâ Ã Eâ â G} {fâ : F â Eâ} {fâ : F â Eâ} {fâ : F â Eâ}
{s : Set F} (hg : ContDiff ð n g) (hfâ : ContDiffOn ð n fâ s) (hfâ : ContDiffOn ð n fâ s)
(hfâ : ContDiffOn ð n fâ s) : ContDiffOn ð n (fun x => g (fâ x, fâ x, fâ x)) s :=
hg.comp_contDiff_onâ hfâ <| hfâ.prod hfâ
end NAry
section SpecificBilinearMaps
theorem ContDiff.clm_comp {g : X â F âL[ð] G} {f : X â E âL[ð] F} (hg : ContDiff ð n g)
(hf : ContDiff ð n f) : ContDiff ð n fun x => (g x).comp (f x) :=
isBoundedBilinearMap_comp.contDiff.compâ hg hf
theorem ContDiffOn.clm_comp {g : X â F âL[ð] G} {f : X â E âL[ð] F} {s : Set X}
(hg : ContDiffOn ð n g s) (hf : ContDiffOn ð n f s) :
ContDiffOn ð n (fun x => (g x).comp (f x)) s :=
isBoundedBilinearMap_comp.contDiff.comp_contDiff_onâ hg hf
theorem ContDiff.clm_apply {f : E â F âL[ð] G} {g : E â F} {n : ââ} (hf : ContDiff ð n f)
(hg : ContDiff ð n g) : ContDiff ð n fun x => (f x) (g x) :=
isBoundedBilinearMap_apply.contDiff.compâ hf hg
theorem ContDiffOn.clm_apply {f : E â F âL[ð] G} {g : E â F} {n : ââ} (hf : ContDiffOn ð n f s)
(hg : ContDiffOn ð n g s) : ContDiffOn ð n (fun x => (f x) (g x)) s :=
isBoundedBilinearMap_apply.contDiff.comp_contDiff_onâ hf hg
-- Porting note: In Lean 3 we had to give implicit arguments in proofs like the following,
-- to speed up elaboration. In Lean 4 this isn't necessary anymore.
theorem ContDiff.smulRight {f : E â F âL[ð] ð} {g : E â G} {n : ââ} (hf : ContDiff ð n f)
(hg : ContDiff ð n g) : ContDiff ð n fun x => (f x).smulRight (g x) :=
isBoundedBilinearMap_smulRight.contDiff.compâ hf hg
end SpecificBilinearMaps
section ClmApplyConst
/-- Application of a `ContinuousLinearMap` to a constant commutes with `iteratedFDerivWithin`. -/
theorem iteratedFDerivWithin_clm_apply_const_apply
{s : Set E} (hs : UniqueDiffOn ð s) {n : ââ} {c : E â F âL[ð] G} (hc : ContDiffOn ð n c s)
{i : â} (hi : i †n) {x : E} (hx : x â s) {u : F} {m : Fin i â E} :
(iteratedFDerivWithin ð i (fun y ⊠(c y) u) s x) m = (iteratedFDerivWithin ð i c s x) m u := by
induction i generalizing x with
| zero => simp
| succ i ih =>
replace hi : i < n := lt_of_lt_of_le (by norm_cast; simp) hi
have h_deriv_apply : DifferentiableOn ð (iteratedFDerivWithin ð i (fun y ⊠(c y) u) s) s :=
(hc.clm_apply contDiffOn_const).differentiableOn_iteratedFDerivWithin hi hs
have h_deriv : DifferentiableOn ð (iteratedFDerivWithin ð i c s) s :=
hc.differentiableOn_iteratedFDerivWithin hi hs
simp only [iteratedFDerivWithin_succ_apply_left]
rw [â fderivWithin_continuousMultilinear_apply_const_apply (hs x hx) (h_deriv_apply x hx)]
rw [fderivWithin_congr' (fun x hx ⊠ih hi.le hx) hx]
rw [fderivWithin_clm_apply (hs x hx) (h_deriv.continuousMultilinear_apply_const _ x hx)
(differentiableWithinAt_const u)]
rw [fderivWithin_const_apply _ (hs x hx)]
simp only [ContinuousLinearMap.flip_apply, ContinuousLinearMap.comp_zero, zero_add]
rw [fderivWithin_continuousMultilinear_apply_const_apply (hs x hx) (h_deriv x hx)]
/-- Application of a `ContinuousLinearMap` to a constant commutes with `iteratedFDeriv`. -/
theorem iteratedFDeriv_clm_apply_const_apply
{n : ââ} {c : E â F âL[ð] G} (hc : ContDiff ð n c)
{i : â} (hi : i †n) {x : E} {u : F} {m : Fin i â E} :
(iteratedFDeriv ð i (fun y ⊠(c y) u) x) m = (iteratedFDeriv ð i c x) m u := by
simp only [â iteratedFDerivWithin_univ]
exact iteratedFDerivWithin_clm_apply_const_apply uniqueDiffOn_univ hc.contDiffOn hi (mem_univ _)
end ClmApplyConst
/-- The natural equivalence `(E Ã F) Ã G â E Ã (F Ã G)` is smooth.
Warning: if you think you need this lemma, it is likely that you can simplify your proof by
reformulating the lemma that you're applying next using the tips in
Note [continuity lemma statement]
-/
theorem contDiff_prodAssoc : ContDiff ð †<| Equiv.prodAssoc E F G :=
(LinearIsometryEquiv.prodAssoc ð E F G).contDiff
/-- The natural equivalence `E Ã (F Ã G) â (E Ã F) Ã G` is smooth.
Warning: see remarks attached to `contDiff_prodAssoc`
-/
theorem contDiff_prodAssoc_symm : ContDiff ð †<| (Equiv.prodAssoc E F G).symm :=
(LinearIsometryEquiv.prodAssoc ð E F G).symm.contDiff
/-! ### Bundled derivatives are smooth -/
/-- One direction of `contDiffWithinAt_succ_iff_hasFDerivWithinAt`, but where all derivatives
taken within the same set. Version for partial derivatives / functions with parameters. `f x` is a
`C^n+1` family of functions and `g x` is a `C^n` family of points, then the derivative of `f x` at
`g x` depends in a `C^n` way on `x`. We give a general version of this fact relative to sets which
may not have unique derivatives, in the following form. If `f : E Ã F â G` is `C^n+1` at
`(xâ, g(xâ))` in `(s ⪠{xâ}) à t â E à F` and `g : E â F` is `C^n` at `xâ` within some set `s â E`,
then there is a function `f' : E â F âL[ð] G` that is `C^n` at `xâ` within `s` such that for all `x`
sufficiently close to `xâ` within `s ⪠{xâ}` the function `y ⊠f x y` has derivative `f' x` at `g x`
within `t â F`. For convenience, we return an explicit set of `x`'s where this holds that is a
subset of `s ⪠{xâ}`. We need one additional condition, namely that `t` is a neighborhood of
`g(xâ)` within `g '' s`. -/
theorem ContDiffWithinAt.hasFDerivWithinAt_nhds {f : E â F â G} {g : E â F} {t : Set F} {n : â}
{xâ : E} (hf : ContDiffWithinAt ð (n + 1) (uncurry f) (insert xâ s ÃË¢ t) (xâ, g xâ))
(hg : ContDiffWithinAt ð n g s xâ) (hgt : t â ð[g '' s] g xâ) :
â v â ð[insert xâ s] xâ, v â insert xâ s â§ â f' : E â F âL[ð] G,
(â x â v, HasFDerivWithinAt (f x) (f' x) t (g x)) â§
ContDiffWithinAt ð n (fun x => f' x) s xâ := by
have hst : insert xâ s ÃË¢ t â ð[(fun x => (x, g x)) '' s] (xâ, g xâ) := by
refine nhdsWithin_mono _ ?_ (nhdsWithin_prod self_mem_nhdsWithin hgt)
simp_rw [image_subset_iff, mk_preimage_prod, preimage_id', subset_inter_iff, subset_insert,
true_and_iff, subset_preimage_image]
obtain âšv, hv, hvs, f', hvf', hf'â© := contDiffWithinAt_succ_iff_hasFDerivWithinAt'.mp hf
refine
âš(fun z => (z, g z)) â»Â¹' v â© insert xâ s, ?_, inter_subset_right, fun z =>
(f' (z, g z)).comp (ContinuousLinearMap.inr ð E F), ?_, ?_â©
· refine inter_mem ?_ self_mem_nhdsWithin
have := mem_of_mem_nhdsWithin (mem_insert _ _) hv
refine mem_nhdsWithin_insert.mpr âšthis, ?_â©
refine (continuousWithinAt_id.prod hg.continuousWithinAt).preimage_mem_nhdsWithin' ?_
rw [â nhdsWithin_le_iff] at hst hv â¢
exact (hst.trans <| nhdsWithin_mono _ <| subset_insert _ _).trans hv
· intro z hz
have := hvf' (z, g z) hz.1
refine this.comp _ (hasFDerivAt_prod_mk_right _ _).hasFDerivWithinAt ?_
exact mapsTo'.mpr (image_prod_mk_subset_prod_right hz.2)
· exact (hf'.continuousLinearMap_comp <| (ContinuousLinearMap.compL ð F (E à F) G).flip
(ContinuousLinearMap.inr ð E F)).comp_of_mem xâ (contDiffWithinAt_id.prod hg) hst
/-- The most general lemma stating that `x ⊠fderivWithin ð (f x) t (g x)` is `C^n`
at a point within a set.
To show that `x ⊠D_yf(x,y)g(x)` (taken within `t`) is `C^m` at `xâ` within `s`, we require that
* `f` is `C^n` at `(xâ, g(xâ))` within `(s ⪠{xâ}) à t` for `n ⥠m+1`.
* `g` is `C^m` at `xâ` within `s`;
* Derivatives are unique at `g(x)` within `t` for `x` sufficiently close to `xâ` within `s ⪠{xâ}`;
* `t` is a neighborhood of `g(xâ)` within `g '' s`; -/
theorem ContDiffWithinAt.fderivWithin'' {f : E â F â G} {g : E â F} {t : Set F} {n : ââ}
(hf : ContDiffWithinAt ð n (Function.uncurry f) (insert xâ s ÃË¢ t) (xâ, g xâ))
(hg : ContDiffWithinAt ð m g s xâ)
(ht : âá¶ x in ð[insert xâ s] xâ, UniqueDiffWithinAt ð t (g x)) (hmn : m + 1 †n)
(hgt : t â ð[g '' s] g xâ) :
ContDiffWithinAt ð m (fun x => fderivWithin ð (f x) t (g x)) s xâ := by
have : â k : â, (k : ââ) †m â
ContDiffWithinAt ð k (fun x => fderivWithin ð (f x) t (g x)) s xâ := fun k hkm ⊠by
obtain âšv, hv, -, f', hvf', hf'â© :=
(hf.of_le <| (add_le_add_right hkm 1).trans hmn).hasFDerivWithinAt_nhds (hg.of_le hkm) hgt
refine hf'.congr_of_eventuallyEq_insert ?_
filter_upwards [hv, ht]
exact fun y hy h2y => (hvf' y hy).fderivWithin h2y
induction' m with m
· obtain rfl := eq_top_iff.mpr hmn
rw [contDiffWithinAt_top]
exact fun m => this m le_top
exact this _ le_rfl
/-- A special case of `ContDiffWithinAt.fderivWithin''` where we require that `s â gâ»Â¹(t)`. -/
theorem ContDiffWithinAt.fderivWithin' {f : E â F â G} {g : E â F} {t : Set F} {n : ââ}
(hf : ContDiffWithinAt ð n (Function.uncurry f) (insert xâ s ÃË¢ t) (xâ, g xâ))
(hg : ContDiffWithinAt ð m g s xâ)
(ht : âá¶ x in ð[insert xâ s] xâ, UniqueDiffWithinAt ð t (g x)) (hmn : m + 1 †n)
(hst : s â g â»Â¹' t) : ContDiffWithinAt ð m (fun x => fderivWithin ð (f x) t (g x)) s xâ :=
hf.fderivWithin'' hg ht hmn <| mem_of_superset self_mem_nhdsWithin <| image_subset_iff.mpr hst
/-- A special case of `ContDiffWithinAt.fderivWithin'` where we require that `xâ â s` and there
are unique derivatives everywhere within `t`. -/
protected theorem ContDiffWithinAt.fderivWithin {f : E â F â G} {g : E â F} {t : Set F} {n : ââ}
(hf : ContDiffWithinAt ð n (Function.uncurry f) (s ÃË¢ t) (xâ, g xâ))
(hg : ContDiffWithinAt ð m g s xâ) (ht : UniqueDiffOn ð t) (hmn : m + 1 †n) (hxâ : xâ â s)
(hst : s â g â»Â¹' t) : ContDiffWithinAt ð m (fun x => fderivWithin ð (f x) t (g x)) s xâ := by
rw [â insert_eq_self.mpr hxâ] at hf
refine hf.fderivWithin' hg ?_ hmn hst
rw [insert_eq_self.mpr hxâ]
exact eventually_of_mem self_mem_nhdsWithin fun x hx => ht _ (hst hx)
/-- `x ⊠fderivWithin ð (f x) t (g x) (k x)` is smooth at a point within a set. -/
theorem ContDiffWithinAt.fderivWithin_apply {f : E â F â G} {g k : E â F} {t : Set F} {n : ââ}
(hf : ContDiffWithinAt ð n (Function.uncurry f) (s ÃË¢ t) (xâ, g xâ))
(hg : ContDiffWithinAt ð m g s xâ) (hk : ContDiffWithinAt ð m k s xâ) (ht : UniqueDiffOn ð t)
(hmn : m + 1 †n) (hxâ : xâ â s) (hst : s â g â»Â¹' t) :
ContDiffWithinAt ð m (fun x => fderivWithin ð (f x) t (g x) (k x)) s xâ :=
(contDiff_fst.clm_apply contDiff_snd).contDiffAt.comp_contDiffWithinAt xâ
((hf.fderivWithin hg ht hmn hxâ hst).prod hk)
/-- `fderivWithin ð f s` is smooth at `xâ` within `s`. -/
theorem ContDiffWithinAt.fderivWithin_right (hf : ContDiffWithinAt ð n f s xâ)
(hs : UniqueDiffOn ð s) (hmn : (m + 1 : ââ) †n) (hxâs : xâ â s) :
ContDiffWithinAt ð m (fderivWithin ð f s) s xâ :=
ContDiffWithinAt.fderivWithin
(ContDiffWithinAt.comp (xâ, xâ) hf contDiffWithinAt_snd <| prod_subset_preimage_snd s s)
contDiffWithinAt_id hs hmn hxâs (by rw [preimage_id'])
-- TODO: can we make a version of `ContDiffWithinAt.fderivWithin` for iterated derivatives?
theorem ContDiffWithinAt.iteratedFderivWithin_right {i : â} (hf : ContDiffWithinAt ð n f s xâ)
(hs : UniqueDiffOn ð s) (hmn : (m + i : ââ) †n) (hxâs : xâ â s) :
ContDiffWithinAt ð m (iteratedFDerivWithin ð i f s) s xâ := by
induction' i with i hi generalizing m
· rw [ENat.coe_zero, add_zero] at hmn
exact (hf.of_le hmn).continuousLinearMap_comp
((continuousMultilinearCurryFin0 ð E F).symm : _ âL[ð] E [Ã0]âL[ð] F)
· rw [Nat.cast_succ, add_comm _ 1, â add_assoc] at hmn
exact ((hi hmn).fderivWithin_right hs le_rfl hxâs).continuousLinearMap_comp
(continuousMultilinearCurryLeftEquiv ð (fun _ : Fin (i+1) ⊠E) F : _ âL[ð] E [Ã(i+1)]âL[ð] F)
/-- `x ⊠fderiv ð (f x) (g x)` is smooth at `xâ`. -/
protected theorem ContDiffAt.fderiv {f : E â F â G} {g : E â F} {n : ââ}
(hf : ContDiffAt ð n (Function.uncurry f) (xâ, g xâ)) (hg : ContDiffAt ð m g xâ)
(hmn : m + 1 †n) : ContDiffAt ð m (fun x => fderiv ð (f x) (g x)) xâ := by
simp_rw [â fderivWithin_univ]
refine (ContDiffWithinAt.fderivWithin hf.contDiffWithinAt hg.contDiffWithinAt uniqueDiffOn_univ
hmn (mem_univ xâ) ?_).contDiffAt univ_mem
rw [preimage_univ]
/-- `fderiv ð f` is smooth at `xâ`. -/
theorem ContDiffAt.fderiv_right (hf : ContDiffAt ð n f xâ) (hmn : (m + 1 : ââ) †n) :
ContDiffAt ð m (fderiv ð f) xâ :=
ContDiffAt.fderiv (ContDiffAt.comp (xâ, xâ) hf contDiffAt_snd) contDiffAt_id hmn
theorem ContDiffAt.iteratedFDeriv_right {i : â} (hf : ContDiffAt ð n f xâ)
(hmn : (m + i : ââ) †n) : ContDiffAt ð m (iteratedFDeriv ð i f) xâ := by
rw [â iteratedFDerivWithin_univ, â contDiffWithinAt_univ] at *
exact hf.iteratedFderivWithin_right uniqueDiffOn_univ hmn trivial
/-- `x ⊠fderiv ð (f x) (g x)` is smooth. -/
protected theorem ContDiff.fderiv {f : E â F â G} {g : E â F} {n m : ââ}
(hf : ContDiff ð m <| Function.uncurry f) (hg : ContDiff ð n g) (hnm : n + 1 †m) :
ContDiff ð n fun x => fderiv ð (f x) (g x) :=
contDiff_iff_contDiffAt.mpr fun _ => hf.contDiffAt.fderiv hg.contDiffAt hnm
/-- `fderiv ð f` is smooth. -/
theorem ContDiff.fderiv_right (hf : ContDiff ð n f) (hmn : (m + 1 : ââ) †n) :
ContDiff ð m (fderiv ð f) :=
contDiff_iff_contDiffAt.mpr fun _x => hf.contDiffAt.fderiv_right hmn
theorem ContDiff.iteratedFDeriv_right {i : â} (hf : ContDiff ð n f)
(hmn : (m + i : ââ) †n) : ContDiff ð m (iteratedFDeriv ð i f) :=
contDiff_iff_contDiffAt.mpr fun _x => hf.contDiffAt.iteratedFDeriv_right hmn
/-- `x ⊠fderiv ð (f x) (g x)` is continuous. -/
theorem Continuous.fderiv {f : E â F â G} {g : E â F} {n : ââ}
(hf : ContDiff ð n <| Function.uncurry f) (hg : Continuous g) (hn : 1 †n) :
Continuous fun x => fderiv ð (f x) (g x) :=
(hf.fderiv (contDiff_zero.mpr hg) hn).continuous
/-- `x ⊠fderiv ð (f x) (g x) (k x)` is smooth. -/
theorem ContDiff.fderiv_apply {f : E â F â G} {g k : E â F} {n m : ââ}
(hf : ContDiff ð m <| Function.uncurry f) (hg : ContDiff ð n g) (hk : ContDiff ð n k)
(hnm : n + 1 †m) : ContDiff ð n fun x => fderiv ð (f x) (g x) (k x) :=
(hf.fderiv hg hnm).clm_apply hk
/-- The bundled derivative of a `C^{n+1}` function is `C^n`. -/
theorem contDiffOn_fderivWithin_apply {m n : ââ} {s : Set E} {f : E â F} (hf : ContDiffOn ð n f s)
(hs : UniqueDiffOn ð s) (hmn : m + 1 †n) :
ContDiffOn ð m (fun p : E Ã E => (fderivWithin ð f s p.1 : E âL[ð] F) p.2) (s ÃË¢ univ) :=
((hf.fderivWithin hs hmn).comp contDiffOn_fst (prod_subset_preimage_fst _ _)).clm_apply
contDiffOn_snd
/-- If a function is at least `C^1`, its bundled derivative (mapping `(x, v)` to `Df(x) v`) is
continuous. -/
theorem ContDiffOn.continuousOn_fderivWithin_apply (hf : ContDiffOn ð n f s) (hs : UniqueDiffOn ð s)
(hn : 1 †n) :
ContinuousOn (fun p : E Ã E => (fderivWithin ð f s p.1 : E â F) p.2) (s ÃË¢ univ) :=
(contDiffOn_fderivWithin_apply hf hs <| by rwa [zero_add]).continuousOn
/-- The bundled derivative of a `C^{n+1}` function is `C^n`. -/
theorem ContDiff.contDiff_fderiv_apply {f : E â F} (hf : ContDiff ð n f) (hmn : m + 1 †n) :
ContDiff ð m fun p : E Ã E => (fderiv ð f p.1 : E âL[ð] F) p.2 := by
rw [â contDiffOn_univ] at hf â¢
rw [â fderivWithin_univ, â univ_prod_univ]
exact contDiffOn_fderivWithin_apply hf uniqueDiffOn_univ hmn
/-!
### Smoothness of functions `f : E â Î i, F' i`
-/
section Pi
variable {ι ι' : Type*} [Fintype ι] [Fintype ι'] {F' : ι â Type*} [â i, NormedAddCommGroup (F' i)]
[â i, NormedSpace ð (F' i)] {Ï : â i, E â F' i} {p' : â i, E â FormalMultilinearSeries ð E (F' i)}
{Ί : E â â i, F' i} {P' : E â FormalMultilinearSeries ð E (â i, F' i)}
theorem hasFTaylorSeriesUpToOn_pi :
HasFTaylorSeriesUpToOn n (fun x i => Ï i x)
(fun x m => ContinuousMultilinearMap.pi fun i => p' i x m) s â
â i, HasFTaylorSeriesUpToOn n (Ï i) (p' i) s := by
set pr := @ContinuousLinearMap.proj ð _ ι F' _ _ _
letI : â (m : â) (i : ι), NormedSpace ð (E[Ãm]âL[ð] F' i) := fun m i => inferInstance
set L : â m : â, (â i, E[Ãm]âL[ð] F' i) ââáµ¢[ð] E[Ãm]âL[ð] â i, F' i := fun m =>
ContinuousMultilinearMap.piâáµ¢ _ _
refine âšfun h i => ?_, fun h => âšfun x hx => ?_, ?_, ?_â©â©
· convert h.continuousLinearMap_comp (pr i)
· ext1 i
exact (h i).zero_eq x hx
· intro m hm x hx
have := hasFDerivWithinAt_pi.2 fun i => (h i).fderivWithin m hm x hx
convert (L m).hasFDerivAt.comp_hasFDerivWithinAt x this
· intro m hm
have := continuousOn_pi.2 fun i => (h i).cont m hm
convert (L m).continuous.comp_continuousOn this
@[simp]
theorem hasFTaylorSeriesUpToOn_pi' :
HasFTaylorSeriesUpToOn n Ί P' s â
â i, HasFTaylorSeriesUpToOn n (fun x => Ί x i)
(fun x m => (@ContinuousLinearMap.proj ð _ ι F' _ _ _ i).compContinuousMultilinearMap
(P' x m)) s := by
convert hasFTaylorSeriesUpToOn_pi (ð := ð) (Ï := fun i x ⊠Ί x i); ext; rfl
theorem contDiffWithinAt_pi :
ContDiffWithinAt ð n Ί s x â â i, ContDiffWithinAt ð n (fun x => Ί x i) s x := by
set pr := @ContinuousLinearMap.proj ð _ ι F' _ _ _
refine âšfun h i => h.continuousLinearMap_comp (pr i), fun h m hm => ?_â©
choose u hux p hp using fun i => h i m hm
exact âšâ i, u i, Filter.iInter_mem.2 hux, _,
hasFTaylorSeriesUpToOn_pi.2 fun i => (hp i).mono <| iInter_subset _ _â©
theorem contDiffOn_pi : ContDiffOn ð n Ί s â â i, ContDiffOn ð n (fun x => Ί x i) s :=
âšfun h _ x hx => contDiffWithinAt_pi.1 (h x hx) _, fun h x hx =>
contDiffWithinAt_pi.2 fun i => h i x hxâ©
theorem contDiffAt_pi : ContDiffAt ð n Ί x â â i, ContDiffAt ð n (fun x => Ί x i) x :=
contDiffWithinAt_pi
theorem contDiff_pi : ContDiff ð n Ί â â i, ContDiff ð n fun x => Ί x i := by
simp only [â contDiffOn_univ, contDiffOn_pi]
theorem contDiff_update [DecidableEq ι] (k : ââ) (x : â i, F' i) (i : ι) :
ContDiff ð k (update x i) := by
rw [contDiff_pi]
intro j
dsimp [Function.update]
split_ifs with h
· subst h
exact contDiff_id
· exact contDiff_const
variable (F') in
theorem contDiff_single [DecidableEq ι] (k : ââ) (i : ι) :
ContDiff ð k (Pi.single i : F' i â â i, F' i) :=
contDiff_update k 0 i
variable (ð E)
theorem contDiff_apply (i : ι) : ContDiff ð n fun f : ι â E => f i :=
contDiff_pi.mp contDiff_id i
theorem contDiff_apply_apply (i : ι) (j : ι') : ContDiff ð n fun f : ι â ι' â E => f i j :=
contDiff_pi.mp (contDiff_apply ð (ι' â E) i) j
end Pi
/-! ### Sum of two functions -/
section Add
theorem HasFTaylorSeriesUpToOn.add {q g} (hf : HasFTaylorSeriesUpToOn n f p s)
(hg : HasFTaylorSeriesUpToOn n g q s) : HasFTaylorSeriesUpToOn n (f + g) (p + q) s := by
convert HasFTaylorSeriesUpToOn.continuousLinearMap_comp
(ContinuousLinearMap.fst ð F F + .snd ð F F) (hf.prod hg)
-- The sum is smooth.
theorem contDiff_add : ContDiff ð n fun p : F Ã F => p.1 + p.2 :=
(IsBoundedLinearMap.fst.add IsBoundedLinearMap.snd).contDiff
/-- The sum of two `C^n` functions within a set at a point is `C^n` within this set
at this point. -/
theorem ContDiffWithinAt.add {s : Set E} {f g : E â F} (hf : ContDiffWithinAt ð n f s x)
(hg : ContDiffWithinAt ð n g s x) : ContDiffWithinAt ð n (fun x => f x + g x) s x :=
contDiff_add.contDiffWithinAt.comp x (hf.prod hg) subset_preimage_univ
/-- The sum of two `C^n` functions at a point is `C^n` at this point. -/
theorem ContDiffAt.add {f g : E â F} (hf : ContDiffAt ð n f x) (hg : ContDiffAt ð n g x) :
ContDiffAt ð n (fun x => f x + g x) x := by
rw [â contDiffWithinAt_univ] at *; exact hf.add hg
/-- The sum of two `C^n`functions is `C^n`. -/
theorem ContDiff.add {f g : E â F} (hf : ContDiff ð n f) (hg : ContDiff ð n g) :
ContDiff ð n fun x => f x + g x :=
contDiff_add.comp (hf.prod hg)
/-- The sum of two `C^n` functions on a domain is `C^n`. -/
theorem ContDiffOn.add {s : Set E} {f g : E â F} (hf : ContDiffOn ð n f s)
(hg : ContDiffOn ð n g s) : ContDiffOn ð n (fun x => f x + g x) s := fun x hx =>
(hf x hx).add (hg x hx)
variable {i : â}
/-- The iterated derivative of the sum of two functions is the sum of the iterated derivatives.
See also `iteratedFDerivWithin_add_apply'`, which uses the spelling `(fun x ⊠f x + g x)`
instead of `f + g`. -/
theorem iteratedFDerivWithin_add_apply {f g : E â F} (hf : ContDiffOn ð i f s)
(hg : ContDiffOn ð i g s) (hu : UniqueDiffOn ð s) (hx : x â s) :
iteratedFDerivWithin ð i (f + g) s x =
iteratedFDerivWithin ð i f s x + iteratedFDerivWithin ð i g s x :=
Eq.symm <| ((hf.ftaylorSeriesWithin hu).add
(hg.ftaylorSeriesWithin hu)).eq_iteratedFDerivWithin_of_uniqueDiffOn le_rfl hu hx
/-- The iterated derivative of the sum of two functions is the sum of the iterated derivatives.
This is the same as `iteratedFDerivWithin_add_apply`, but using the spelling `(fun x ⊠f x + g x)`
instead of `f + g`, which can be handy for some rewrites.
TODO: use one form consistently. -/
theorem iteratedFDerivWithin_add_apply' {f g : E â F} (hf : ContDiffOn ð i f s)
(hg : ContDiffOn ð i g s) (hu : UniqueDiffOn ð s) (hx : x â s) :
iteratedFDerivWithin ð i (fun x => f x + g x) s x =
iteratedFDerivWithin ð i f s x + iteratedFDerivWithin ð i g s x :=
iteratedFDerivWithin_add_apply hf hg hu hx
theorem iteratedFDeriv_add_apply {i : â} {f g : E â F} (hf : ContDiff ð i f) (hg : ContDiff ð i g) :
iteratedFDeriv ð i (f + g) x = iteratedFDeriv ð i f x + iteratedFDeriv ð i g x := by
simp_rw [â contDiffOn_univ, â iteratedFDerivWithin_univ] at hf hg â¢
exact iteratedFDerivWithin_add_apply hf hg uniqueDiffOn_univ (Set.mem_univ _)
theorem iteratedFDeriv_add_apply' {i : â} {f g : E â F} (hf : ContDiff ð i f)
(hg : ContDiff ð i g) :
iteratedFDeriv ð i (fun x => f x + g x) x = iteratedFDeriv ð i f x + iteratedFDeriv ð i g x :=
iteratedFDeriv_add_apply hf hg
end Add
/-! ### Negative -/
section Neg
-- The negative is smooth.
theorem contDiff_neg : ContDiff ð n fun p : F => -p :=
IsBoundedLinearMap.id.neg.contDiff
/-- The negative of a `C^n` function within a domain at a point is `C^n` within this domain at
this point. -/
theorem ContDiffWithinAt.neg {s : Set E} {f : E â F} (hf : ContDiffWithinAt ð n f s x) :
ContDiffWithinAt ð n (fun x => -f x) s x :=
contDiff_neg.contDiffWithinAt.comp x hf subset_preimage_univ
/-- The negative of a `C^n` function at a point is `C^n` at this point. -/
theorem ContDiffAt.neg {f : E â F} (hf : ContDiffAt ð n f x) :
ContDiffAt ð n (fun x => -f x) x := by rw [â contDiffWithinAt_univ] at *; exact hf.neg
/-- The negative of a `C^n`function is `C^n`. -/
theorem ContDiff.neg {f : E â F} (hf : ContDiff ð n f) : ContDiff ð n fun x => -f x :=
contDiff_neg.comp hf
/-- The negative of a `C^n` function on a domain is `C^n`. -/
theorem ContDiffOn.neg {s : Set E} {f : E â F} (hf : ContDiffOn ð n f s) :
ContDiffOn ð n (fun x => -f x) s := fun x hx => (hf x hx).neg
variable {i : â}
-- Porting note (#11215): TODO: define `Neg` instance on `ContinuousLinearEquiv`,
-- prove it from `ContinuousLinearEquiv.iteratedFDerivWithin_comp_left`
theorem iteratedFDerivWithin_neg_apply {f : E â F} (hu : UniqueDiffOn ð s) (hx : x â s) :
iteratedFDerivWithin ð i (-f) s x = -iteratedFDerivWithin ð i f s x := by
induction' i with i hi generalizing x
· ext; simp
· ext h
calc
iteratedFDerivWithin ð (i + 1) (-f) s x h =
fderivWithin ð (iteratedFDerivWithin ð i (-f) s) s x (h 0) (Fin.tail h) :=
rfl
_ = fderivWithin ð (-iteratedFDerivWithin ð i f s) s x (h 0) (Fin.tail h) := by
rw [fderivWithin_congr' (@hi) hx]; rfl
_ = -(fderivWithin ð (iteratedFDerivWithin ð i f s) s) x (h 0) (Fin.tail h) := by
rw [Pi.neg_def, fderivWithin_neg (hu x hx)]; rfl
_ = -(iteratedFDerivWithin ð (i + 1) f s) x h := rfl
theorem iteratedFDeriv_neg_apply {i : â} {f : E â F} :
iteratedFDeriv ð i (-f) x = -iteratedFDeriv ð i f x := by
simp_rw [â iteratedFDerivWithin_univ]
exact iteratedFDerivWithin_neg_apply uniqueDiffOn_univ (Set.mem_univ _)
end Neg
/-! ### Subtraction -/
/-- The difference of two `C^n` functions within a set at a point is `C^n` within this set
at this point. -/
theorem ContDiffWithinAt.sub {s : Set E} {f g : E â F} (hf : ContDiffWithinAt ð n f s x)
(hg : ContDiffWithinAt ð n g s x) : ContDiffWithinAt ð n (fun x => f x - g x) s x := by
simpa only [sub_eq_add_neg] using hf.add hg.neg
/-- The difference of two `C^n` functions at a point is `C^n` at this point. -/
theorem ContDiffAt.sub {f g : E â F} (hf : ContDiffAt ð n f x) (hg : ContDiffAt ð n g x) :
ContDiffAt ð n (fun x => f x - g x) x := by simpa only [sub_eq_add_neg] using hf.add hg.neg
/-- The difference of two `C^n` functions on a domain is `C^n`. -/
theorem ContDiffOn.sub {s : Set E} {f g : E â F} (hf : ContDiffOn ð n f s)
(hg : ContDiffOn ð n g s) : ContDiffOn ð n (fun x => f x - g x) s := by
simpa only [sub_eq_add_neg] using hf.add hg.neg
/-- The difference of two `C^n` functions is `C^n`. -/
theorem ContDiff.sub {f g : E â F} (hf : ContDiff ð n f) (hg : ContDiff ð n g) :
ContDiff ð n fun x => f x - g x := by simpa only [sub_eq_add_neg] using hf.add hg.neg
/-! ### Sum of finitely many functions -/
theorem ContDiffWithinAt.sum {ι : Type*} {f : ι â E â F} {s : Finset ι} {t : Set E} {x : E}
(h : â i â s, ContDiffWithinAt ð n (fun x => f i x) t x) :
ContDiffWithinAt ð n (fun x => â i â s, f i x) t x := by
classical
induction' s using Finset.induction_on with i s is IH
· simp [contDiffWithinAt_const]
· simp only [is, Finset.sum_insert, not_false_iff]
exact (h _ (Finset.mem_insert_self i s)).add
(IH fun j hj => h _ (Finset.mem_insert_of_mem hj))
theorem ContDiffAt.sum {ι : Type*} {f : ι â E â F} {s : Finset ι} {x : E}
(h : â i â s, ContDiffAt ð n (fun x => f i x) x) :
ContDiffAt ð n (fun x => â i â s, f i x) x := by
rw [â contDiffWithinAt_univ] at *; exact ContDiffWithinAt.sum h
theorem ContDiffOn.sum {ι : Type*} {f : ι â E â F} {s : Finset ι} {t : Set E}
(h : â i â s, ContDiffOn ð n (fun x => f i x) t) :
ContDiffOn ð n (fun x => â i â s, f i x) t := fun x hx =>
ContDiffWithinAt.sum fun i hi => h i hi x hx
theorem ContDiff.sum {ι : Type*} {f : ι â E â F} {s : Finset ι}
(h : â i â s, ContDiff ð n fun x => f i x) : ContDiff ð n fun x => â i â s, f i x := by
simp only [â contDiffOn_univ] at *; exact ContDiffOn.sum h
theorem iteratedFDerivWithin_sum_apply {ι : Type*} {f : ι â E â F} {u : Finset ι} {i : â} {x : E}
(hs : UniqueDiffOn ð s) (hx : x â s) (h : â j â u, ContDiffOn ð i (f j) s) :
iteratedFDerivWithin ð i (â j â u, f j ·) s x =
â j â u, iteratedFDerivWithin ð i (f j) s x := by
induction u using Finset.cons_induction with
| empty => ext; simp [hs, hx]
| cons a u ha IH =>
simp only [Finset.mem_cons, forall_eq_or_imp] at h
simp only [Finset.sum_cons]
rw [iteratedFDerivWithin_add_apply' h.1 (ContDiffOn.sum h.2) hs hx, IH h.2]
theorem iteratedFDeriv_sum {ι : Type*} {f : ι â E â F} {u : Finset ι} {i : â}
(h : â j â u, ContDiff ð i (f j)) :
iteratedFDeriv ð i (â j â u, f j ·) = â j â u, iteratedFDeriv ð i (f j) :=
funext fun x ⊠by simpa [iteratedFDerivWithin_univ] using
iteratedFDerivWithin_sum_apply uniqueDiffOn_univ (mem_univ x) fun j hj ⊠(h j hj).contDiffOn
/-! ### Product of two functions -/
section MulProd
variable {ðž ðž' ι ð' : Type*} [NormedRing ðž] [NormedAlgebra ð ðž] [NormedCommRing ðž']
[NormedAlgebra ð ðž'] [NormedField ð'] [NormedAlgebra ð ð']
-- The product is smooth.
theorem contDiff_mul : ContDiff ð n fun p : ðž Ã ðž => p.1 * p.2 :=
(ContinuousLinearMap.mul ð ðž).isBoundedBilinearMap.contDiff
/-- The product of two `C^n` functions within a set at a point is `C^n` within this set
at this point. -/
theorem ContDiffWithinAt.mul {s : Set E} {f g : E â ðž} (hf : ContDiffWithinAt ð n f s x)
(hg : ContDiffWithinAt ð n g s x) : ContDiffWithinAt ð n (fun x => f x * g x) s x :=
contDiff_mul.comp_contDiffWithinAt (hf.prod hg)
/-- The product of two `C^n` functions at a point is `C^n` at this point. -/
nonrec theorem ContDiffAt.mul {f g : E â ðž} (hf : ContDiffAt ð n f x) (hg : ContDiffAt ð n g x) :
ContDiffAt ð n (fun x => f x * g x) x :=
hf.mul hg
/-- The product of two `C^n` functions on a domain is `C^n`. -/
theorem ContDiffOn.mul {f g : E â ðž} (hf : ContDiffOn ð n f s) (hg : ContDiffOn ð n g s) :
ContDiffOn ð n (fun x => f x * g x) s := fun x hx => (hf x hx).mul (hg x hx)
/-- The product of two `C^n`functions is `C^n`. -/
theorem ContDiff.mul {f g : E â ðž} (hf : ContDiff ð n f) (hg : ContDiff ð n g) :
ContDiff ð n fun x => f x * g x :=
contDiff_mul.comp (hf.prod hg)
theorem contDiffWithinAt_prod' {t : Finset ι} {f : ι â E â ðž'}
(h : â i â t, ContDiffWithinAt ð n (f i) s x) : ContDiffWithinAt ð n (â i â t, f i) s x :=
Finset.prod_induction f (fun f => ContDiffWithinAt ð n f s x) (fun _ _ => ContDiffWithinAt.mul)
(contDiffWithinAt_const (c := 1)) h
theorem contDiffWithinAt_prod {t : Finset ι} {f : ι â E â ðž'}
(h : â i â t, ContDiffWithinAt ð n (f i) s x) :
ContDiffWithinAt ð n (fun y => â i â t, f i y) s x := by
simpa only [â Finset.prod_apply] using contDiffWithinAt_prod' h
theorem contDiffAt_prod' {t : Finset ι} {f : ι â E â ðž'} (h : â i â t, ContDiffAt ð n (f i) x) :
ContDiffAt ð n (â i â t, f i) x :=
contDiffWithinAt_prod' h
theorem contDiffAt_prod {t : Finset ι} {f : ι â E â ðž'} (h : â i â t, ContDiffAt ð n (f i) x) :
ContDiffAt ð n (fun y => â i â t, f i y) x :=
contDiffWithinAt_prod h
theorem contDiffOn_prod' {t : Finset ι} {f : ι â E â ðž'} (h : â i â t, ContDiffOn ð n (f i) s) :
ContDiffOn ð n (â i â t, f i) s := fun x hx => contDiffWithinAt_prod' fun i hi => h i hi x hx
theorem contDiffOn_prod {t : Finset ι} {f : ι â E â ðž'} (h : â i â t, ContDiffOn ð n (f i) s) :
ContDiffOn ð n (fun y => â i â t, f i y) s := fun x hx =>
contDiffWithinAt_prod fun i hi => h i hi x hx
theorem contDiff_prod' {t : Finset ι} {f : ι â E â ðž'} (h : â i â t, ContDiff ð n (f i)) :
ContDiff ð n (â i â t, f i) :=
contDiff_iff_contDiffAt.mpr fun _ => contDiffAt_prod' fun i hi => (h i hi).contDiffAt
theorem contDiff_prod {t : Finset ι} {f : ι â E â ðž'} (h : â i â t, ContDiff ð n (f i)) :
ContDiff ð n fun y => â i â t, f i y :=
contDiff_iff_contDiffAt.mpr fun _ => contDiffAt_prod fun i hi => (h i hi).contDiffAt
theorem ContDiff.pow {f : E â ðž} (hf : ContDiff ð n f) : â m : â, ContDiff ð n fun x => f x ^ m
| 0 => by simpa using contDiff_const
| m + 1 => by simpa [pow_succ] using (hf.pow m).mul hf
theorem ContDiffWithinAt.pow {f : E â ðž} (hf : ContDiffWithinAt ð n f s x) (m : â) :
ContDiffWithinAt ð n (fun y => f y ^ m) s x :=
(contDiff_id.pow m).comp_contDiffWithinAt hf
nonrec theorem ContDiffAt.pow {f : E â ðž} (hf : ContDiffAt ð n f x) (m : â) :
ContDiffAt ð n (fun y => f y ^ m) x :=
hf.pow m
theorem ContDiffOn.pow {f : E â ðž} (hf : ContDiffOn ð n f s) (m : â) :
ContDiffOn ð n (fun y => f y ^ m) s := fun y hy => (hf y hy).pow m
theorem ContDiffWithinAt.div_const {f : E â ð'} {n} (hf : ContDiffWithinAt ð n f s x) (c : ð') :
ContDiffWithinAt ð n (fun x => f x / c) s x := by
simpa only [div_eq_mul_inv] using hf.mul contDiffWithinAt_const
nonrec theorem ContDiffAt.div_const {f : E â ð'} {n} (hf : ContDiffAt ð n f x) (c : ð') :
ContDiffAt ð n (fun x => f x / c) x :=
hf.div_const c
theorem ContDiffOn.div_const {f : E â ð'} {n} (hf : ContDiffOn ð n f s) (c : ð') :
ContDiffOn ð n (fun x => f x / c) s := fun x hx => (hf x hx).div_const c
theorem ContDiff.div_const {f : E â ð'} {n} (hf : ContDiff ð n f) (c : ð') :
ContDiff ð n fun x => f x / c := by simpa only [div_eq_mul_inv] using hf.mul contDiff_const
end MulProd
/-! ### Scalar multiplication -/
section SMul
-- The scalar multiplication is smooth.
theorem contDiff_smul : ContDiff ð n fun p : ð à F => p.1 ⢠p.2 :=
isBoundedBilinearMap_smul.contDiff
/-- The scalar multiplication of two `C^n` functions within a set at a point is `C^n` within this
set at this point. -/
theorem ContDiffWithinAt.smul {s : Set E} {f : E â ð} {g : E â F} (hf : ContDiffWithinAt ð n f s x)
(hg : ContDiffWithinAt ð n g s x) : ContDiffWithinAt ð n (fun x => f x ⢠g x) s x :=
contDiff_smul.contDiffWithinAt.comp x (hf.prod hg) subset_preimage_univ
/-- The scalar multiplication of two `C^n` functions at a point is `C^n` at this point. -/
theorem ContDiffAt.smul {f : E â ð} {g : E â F} (hf : ContDiffAt ð n f x)
(hg : ContDiffAt ð n g x) : ContDiffAt ð n (fun x => f x ⢠g x) x := by
rw [â contDiffWithinAt_univ] at *; exact hf.smul hg
/-- The scalar multiplication of two `C^n` functions is `C^n`. -/
theorem ContDiff.smul {f : E â ð} {g : E â F} (hf : ContDiff ð n f) (hg : ContDiff ð n g) :
ContDiff ð n fun x => f x ⢠g x :=
contDiff_smul.comp (hf.prod hg)
/-- The scalar multiplication of two `C^n` functions on a domain is `C^n`. -/
theorem ContDiffOn.smul {s : Set E} {f : E â ð} {g : E â F} (hf : ContDiffOn ð n f s)
(hg : ContDiffOn ð n g s) : ContDiffOn ð n (fun x => f x ⢠g x) s := fun x hx =>
(hf x hx).smul (hg x hx)
end SMul
/-! ### Constant scalar multiplication
Porting note (#11215): TODO: generalize results in this section.
1. It should be possible to assume `[Monoid R] [DistribMulAction R F] [SMulCommClass ð R F]`.
2. If `c` is a unit (or `R` is a group), then one can drop `ContDiff*` assumptions in some
lemmas.
-/
section ConstSMul
variable {R : Type*} [Semiring R] [Module R F] [SMulCommClass ð R F]
variable [ContinuousConstSMul R F]
-- The scalar multiplication with a constant is smooth.
theorem contDiff_const_smul (c : R) : ContDiff ð n fun p : F => c ⢠p :=
(c ⢠ContinuousLinearMap.id ð F).contDiff
/-- The scalar multiplication of a constant and a `C^n` function within a set at a point is `C^n`
within this set at this point. -/
theorem ContDiffWithinAt.const_smul {s : Set E} {f : E â F} {x : E} (c : R)
(hf : ContDiffWithinAt ð n f s x) : ContDiffWithinAt ð n (fun y => c ⢠f y) s x :=
(contDiff_const_smul c).contDiffAt.comp_contDiffWithinAt x hf
/-- The scalar multiplication of a constant and a `C^n` function at a point is `C^n` at this
point. -/
theorem ContDiffAt.const_smul {f : E â F} {x : E} (c : R) (hf : ContDiffAt ð n f x) :
ContDiffAt ð n (fun y => c ⢠f y) x := by
rw [â contDiffWithinAt_univ] at *; exact hf.const_smul c
/-- The scalar multiplication of a constant and a `C^n` function is `C^n`. -/
theorem ContDiff.const_smul {f : E â F} (c : R) (hf : ContDiff ð n f) :
ContDiff ð n fun y => c ⢠f y :=
(contDiff_const_smul c).comp hf
/-- The scalar multiplication of a constant and a `C^n` on a domain is `C^n`. -/
theorem ContDiffOn.const_smul {s : Set E} {f : E â F} (c : R) (hf : ContDiffOn ð n f s) :
ContDiffOn ð n (fun y => c ⢠f y) s := fun x hx => (hf x hx).const_smul c
variable {i : â} {a : R}
theorem iteratedFDerivWithin_const_smul_apply (hf : ContDiffOn ð i f s) (hu : UniqueDiffOn ð s)
(hx : x â s) : iteratedFDerivWithin ð i (a ⢠f) s x = a ⢠iteratedFDerivWithin ð i f s x :=
(a ⢠(1 : F âL[ð] F)).iteratedFDerivWithin_comp_left hf hu hx le_rfl
theorem iteratedFDeriv_const_smul_apply {x : E} (hf : ContDiff ð i f) :
iteratedFDeriv ð i (a ⢠f) x = a ⢠iteratedFDeriv ð i f x := by
simp_rw [â contDiffOn_univ, â iteratedFDerivWithin_univ] at *
exact iteratedFDerivWithin_const_smul_apply hf uniqueDiffOn_univ (Set.mem_univ _)
theorem iteratedFDeriv_const_smul_apply' {x : E} (hf : ContDiff ð i f) :
iteratedFDeriv ð i (fun x ⊠a ⢠f x) x = a ⢠iteratedFDeriv ð i f x :=
iteratedFDeriv_const_smul_apply hf
end ConstSMul
/-! ### Cartesian product of two functions -/
section prodMap
variable {E' : Type*} [NormedAddCommGroup E'] [NormedSpace ð E']
variable {F' : Type*} [NormedAddCommGroup F'] [NormedSpace ð F']
/-- The product map of two `C^n` functions within a set at a point is `C^n`
within the product set at the product point. -/
theorem ContDiffWithinAt.prod_map' {s : Set E} {t : Set E'} {f : E â F} {g : E' â F'} {p : E Ã E'}
(hf : ContDiffWithinAt ð n f s p.1) (hg : ContDiffWithinAt ð n g t p.2) :
ContDiffWithinAt ð n (Prod.map f g) (s ÃË¢ t) p :=
(hf.comp p contDiffWithinAt_fst (prod_subset_preimage_fst _ _)).prod
(hg.comp p contDiffWithinAt_snd (prod_subset_preimage_snd _ _))
theorem ContDiffWithinAt.prod_map {s : Set E} {t : Set E'} {f : E â F} {g : E' â F'} {x : E}
{y : E'} (hf : ContDiffWithinAt ð n f s x) (hg : ContDiffWithinAt ð n g t y) :
ContDiffWithinAt ð n (Prod.map f g) (s ÃË¢ t) (x, y) :=
ContDiffWithinAt.prod_map' hf hg
/-- The product map of two `C^n` functions on a set is `C^n` on the product set. -/
theorem ContDiffOn.prod_map {E' : Type*} [NormedAddCommGroup E'] [NormedSpace ð E'] {F' : Type*}
[NormedAddCommGroup F'] [NormedSpace ð F'] {s : Set E} {t : Set E'} {f : E â F} {g : E' â F'}
(hf : ContDiffOn ð n f s) (hg : ContDiffOn ð n g t) : ContDiffOn ð n (Prod.map f g) (s ÃË¢ t) :=
(hf.comp contDiffOn_fst (prod_subset_preimage_fst _ _)).prod
(hg.comp contDiffOn_snd (prod_subset_preimage_snd _ _))
/-- The product map of two `C^n` functions within a set at a point is `C^n`
within the product set at the product point. -/
theorem ContDiffAt.prod_map {f : E â F} {g : E' â F'} {x : E} {y : E'} (hf : ContDiffAt ð n f x)
(hg : ContDiffAt ð n g y) : ContDiffAt ð n (Prod.map f g) (x, y) := by
rw [ContDiffAt] at *
convert hf.prod_map hg
simp only [univ_prod_univ]
/-- The product map of two `C^n` functions within a set at a point is `C^n`
within the product set at the product point. -/
theorem ContDiffAt.prod_map' {f : E â F} {g : E' â F'} {p : E Ã E'} (hf : ContDiffAt ð n f p.1)
(hg : ContDiffAt ð n g p.2) : ContDiffAt ð n (Prod.map f g) p := by
rcases p with âšâ©
exact ContDiffAt.prod_map hf hg
/-- The product map of two `C^n` functions is `C^n`. -/
theorem ContDiff.prod_map {f : E â F} {g : E' â F'} (hf : ContDiff ð n f) (hg : ContDiff ð n g) :
ContDiff ð n (Prod.map f g) := by
rw [contDiff_iff_contDiffAt] at *
exact fun âšx, yâ© => (hf x).prod_map (hg y)
theorem contDiff_prod_mk_left (fâ : F) : ContDiff ð n fun e : E => (e, fâ) :=
contDiff_id.prod contDiff_const
theorem contDiff_prod_mk_right (eâ : E) : ContDiff ð n fun f : F => (eâ, f) :=
contDiff_const.prod contDiff_id
end prodMap
/-! ### Inversion in a complete normed algebra -/
section AlgebraInverse
variable (ð) {R : Type*} [NormedRing R]
-- Porting note: this couldn't be on the same line as the binder type update of `ð`
variable [NormedAlgebra ð R]
open NormedRing ContinuousLinearMap Ring
/-- In a complete normed algebra, the operation of inversion is `C^n`, for all `n`, at each
invertible element. The proof is by induction, bootstrapping using an identity expressing the
derivative of inversion as a bilinear map of inversion itself. -/
theorem contDiffAt_ring_inverse [CompleteSpace R] (x : RË£) :
ContDiffAt ð n Ring.inverse (x : R) := by
induction' n using ENat.nat_induction with n IH Itop
· intro m hm
refine âš{ y : R | IsUnit y }, ?_, ?_â©
· simpa [nhdsWithin_univ] using x.nhds
· use ftaylorSeriesWithin ð inverse univ
rw [le_antisymm hm bot_le, hasFTaylorSeriesUpToOn_zero_iff]
constructor
· rintro _ âšx', rflâ©
exact (inverse_continuousAt x').continuousWithinAt
· simp [ftaylorSeriesWithin]
· rw [contDiffAt_succ_iff_hasFDerivAt]
refine âšfun x : R => -mulLeftRight ð R (inverse x) (inverse x), ?_, ?_â©
· refine âš{ y : R | IsUnit y }, x.nhds, ?_â©
rintro _ âšy, rflâ©
simp_rw [inverse_unit]
exact hasFDerivAt_ring_inverse y
· convert (mulLeftRight_isBoundedBilinear ð R).contDiff.neg.comp_contDiffAt (x : R)
(IH.prod IH)
· exact contDiffAt_top.mpr Itop
variable {ð' : Type*} [NormedField ð'] [NormedAlgebra ð ð'] [CompleteSpace ð']
theorem contDiffAt_inv {x : ð'} (hx : x â 0) {n} : ContDiffAt ð n Inv.inv x := by
simpa only [Ring.inverse_eq_inv'] using contDiffAt_ring_inverse ð (Units.mk0 x hx)
theorem contDiffOn_inv {n} : ContDiffOn ð n (Inv.inv : ð' â ð') {0}á¶ := fun _ hx =>
(contDiffAt_inv ð hx).contDiffWithinAt
variable {ð}
-- TODO: the next few lemmas don't need `ð` or `ð'` to be complete
-- A good way to show this is to generalize `contDiffAt_ring_inverse` to the setting
-- of a function `f` such that `âá¶ x in ð a, x * f x = 1`.
theorem ContDiffWithinAt.inv {f : E â ð'} {n} (hf : ContDiffWithinAt ð n f s x) (hx : f x â 0) :
ContDiffWithinAt ð n (fun x => (f x)â»Â¹) s x :=
(contDiffAt_inv ð hx).comp_contDiffWithinAt x hf
theorem ContDiffOn.inv {f : E â ð'} {n} (hf : ContDiffOn ð n f s) (h : â x â s, f x â 0) :
ContDiffOn ð n (fun x => (f x)â»Â¹) s := fun x hx => (hf.contDiffWithinAt hx).inv (h x hx)
nonrec theorem ContDiffAt.inv {f : E â ð'} {n} (hf : ContDiffAt ð n f x) (hx : f x â 0) :
ContDiffAt ð n (fun x => (f x)â»Â¹) x :=
hf.inv hx
theorem ContDiff.inv {f : E â ð'} {n} (hf : ContDiff ð n f) (h : â x, f x â 0) :
ContDiff ð n fun x => (f x)â»Â¹ := by
rw [contDiff_iff_contDiffAt]; exact fun x => hf.contDiffAt.inv (h x)
-- TODO: generalize to `f g : E â ð'`
theorem ContDiffWithinAt.div [CompleteSpace ð] {f g : E â ð} {n} (hf : ContDiffWithinAt ð n f s x)
(hg : ContDiffWithinAt ð n g s x) (hx : g x â 0) :
ContDiffWithinAt ð n (fun x => f x / g x) s x := by
simpa only [div_eq_mul_inv] using hf.mul (hg.inv hx)
theorem ContDiffOn.div [CompleteSpace ð] {f g : E â ð} {n} (hf : ContDiffOn ð n f s)
(hg : ContDiffOn ð n g s) (hâ : â x â s, g x â 0) : ContDiffOn ð n (f / g) s := fun x hx =>
(hf x hx).div (hg x hx) (hâ x hx)
nonrec theorem ContDiffAt.div [CompleteSpace ð] {f g : E â ð} {n} (hf : ContDiffAt ð n f x)
(hg : ContDiffAt ð n g x) (hx : g x â 0) : ContDiffAt ð n (fun x => f x / g x) x :=
hf.div hg hx
theorem ContDiff.div [CompleteSpace ð] {f g : E â ð} {n} (hf : ContDiff ð n f) (hg : ContDiff ð n g)
(h0 : â x, g x â 0) : ContDiff ð n fun x => f x / g x := by
simp only [contDiff_iff_contDiffAt] at *
exact fun x => (hf x).div (hg x) (h0 x)
end AlgebraInverse
/-! ### Inversion of continuous linear maps between Banach spaces -/
section MapInverse
open ContinuousLinearMap
/-- At a continuous linear equivalence `e : E âL[ð] F` between Banach spaces, the operation of
inversion is `C^n`, for all `n`. -/
theorem contDiffAt_map_inverse [CompleteSpace E] (e : E âL[ð] F) :
ContDiffAt ð n inverse (e : E âL[ð] F) := by
nontriviality E
-- first, we use the lemma `to_ring_inverse` to rewrite in terms of `Ring.inverse` in the ring
-- `E âL[ð] E`
let Oâ : (E âL[ð] E) â F âL[ð] E := fun f => f.comp (e.symm : F âL[ð] E)
let Oâ : (E âL[ð] F) â E âL[ð] E := fun f => (e.symm : F âL[ð] E).comp f
have : ContinuousLinearMap.inverse = Oâ â Ring.inverse â Oâ := funext (to_ring_inverse e)
rw [this]
-- `Oâ` and `Oâ` are `ContDiff`,
-- so we reduce to proving that `Ring.inverse` is `ContDiff`
have hâ : ContDiff ð n Oâ := contDiff_id.clm_comp contDiff_const
have hâ : ContDiff ð n Oâ := contDiff_const.clm_comp contDiff_id
refine hâ.contDiffAt.comp _ (ContDiffAt.comp _ ?_ hâ.contDiffAt)
convert contDiffAt_ring_inverse ð (1 : (E âL[ð] E)Ë£)
simp [Oâ, one_def]
end MapInverse
section FunctionInverse
open ContinuousLinearMap
/-- If `f` is a local homeomorphism and the point `a` is in its target,
and if `f` is `n` times continuously differentiable at `f.symm a`,
and if the derivative at `f.symm a` is a continuous linear equivalence,
then `f.symm` is `n` times continuously differentiable at the point `a`.
This is one of the easy parts of the inverse function theorem: it assumes that we already have
an inverse function. -/
theorem PartialHomeomorph.contDiffAt_symm [CompleteSpace E] (f : PartialHomeomorph E F)
{fâ' : E âL[ð] F} {a : F} (ha : a â f.target)
(hfâ' : HasFDerivAt f (fâ' : E âL[ð] F) (f.symm a)) (hf : ContDiffAt ð n f (f.symm a)) :
ContDiffAt ð n f.symm a := by
-- We prove this by induction on `n`
induction' n using ENat.nat_induction with n IH Itop
· rw [contDiffAt_zero]
exact âšf.target, IsOpen.mem_nhds f.open_target ha, f.continuousOn_invFunâ©
· obtain âšf', âšu, hu, hff'â©, hf'â© := contDiffAt_succ_iff_hasFDerivAt.mp hf
rw [contDiffAt_succ_iff_hasFDerivAt]
-- For showing `n.succ` times continuous differentiability (the main inductive step), it
-- suffices to produce the derivative and show that it is `n` times continuously differentiable
have eq_fâ' : f' (f.symm a) = fâ' := (hff' (f.symm a) (mem_of_mem_nhds hu)).unique hfâ'
-- This follows by a bootstrapping formula expressing the derivative as a function of `f` itself
refine âšinverse â f' â f.symm, ?_, ?_â©
· -- We first check that the derivative of `f` is that formula
have h_nhds : { y : E | â e : E âL[ð] F, âe = f' y } â ð (f.symm a) := by
have hfâ' := fâ'.nhds
rw [â eq_fâ'] at hfâ'
exact hf'.continuousAt.preimage_mem_nhds hfâ'
obtain âšt, htu, ht, htfâ© := mem_nhds_iff.mp (Filter.inter_mem hu h_nhds)
use f.target â© f.symm â»Â¹' t
refine âšIsOpen.mem_nhds ?_ ?_, ?_â©
· exact f.isOpen_inter_preimage_symm ht
· exact mem_inter ha (mem_preimage.mpr htf)
intro x hx
obtain âšhxu, e, heâ© := htu hx.2
have h_deriv : HasFDerivAt f (e : E âL[ð] F) (f.symm x) := by
rw [he]
exact hff' (f.symm x) hxu
convert f.hasFDerivAt_symm hx.1 h_deriv
simp [â he]
· -- Then we check that the formula, being a composition of `ContDiff` pieces, is
-- itself `ContDiff`
have h_derivâ : ContDiffAt ð n inverse (f' (f.symm a)) := by
rw [eq_fâ']
exact contDiffAt_map_inverse _
have h_derivâ : ContDiffAt ð n f.symm a := by
refine IH (hf.of_le ?_)
norm_cast
exact Nat.le_succ n
exact (h_derivâ.comp _ hf').comp _ h_derivâ
· refine contDiffAt_top.mpr ?_
intro n
exact Itop n (contDiffAt_top.mp hf n)
/-- If `f` is an `n` times continuously differentiable homeomorphism,
and if the derivative of `f` at each point is a continuous linear equivalence,
then `f.symm` is `n` times continuously differentiable.
This is one of the easy parts of the inverse function theorem: it assumes that we already have
an inverse function. -/
theorem Homeomorph.contDiff_symm [CompleteSpace E] (f : E ââ F) {fâ' : E â E âL[ð] F}
(hfâ' : â a, HasFDerivAt f (fâ' a : E âL[ð] F) a) (hf : ContDiff ð n (f : E â F)) :
ContDiff ð n (f.symm : F â E) :=
contDiff_iff_contDiffAt.2 fun x =>
f.toPartialHomeomorph.contDiffAt_symm (mem_univ x) (hfâ' _) hf.contDiffAt
/-- Let `f` be a local homeomorphism of a nontrivially normed field, let `a` be a point in its
target. if `f` is `n` times continuously differentiable at `f.symm a`, and if the derivative at
`f.symm a` is nonzero, then `f.symm` is `n` times continuously differentiable at the point `a`.
This is one of the easy parts of the inverse function theorem: it assumes that we already have
an inverse function. -/
theorem PartialHomeomorph.contDiffAt_symm_deriv [CompleteSpace ð] (f : PartialHomeomorph ð ð)
{fâ' a : ð} (hâ : fâ' â 0) (ha : a â f.target) (hfâ' : HasDerivAt f fâ' (f.symm a))
(hf : ContDiffAt ð n f (f.symm a)) : ContDiffAt ð n f.symm a :=
f.contDiffAt_symm ha (hfâ'.hasFDerivAt_equiv hâ) hf
/-- Let `f` be an `n` times continuously differentiable homeomorphism of a nontrivially normed
field. Suppose that the derivative of `f` is never equal to zero. Then `f.symm` is `n` times
continuously differentiable.
This is one of the easy parts of the inverse function theorem: it assumes that we already have
an inverse function. -/
theorem Homeomorph.contDiff_symm_deriv [CompleteSpace ð] (f : ð ââ ð) {f' : ð â ð}
(hâ : â x, f' x â 0) (hf' : â x, HasDerivAt f (f' x) x) (hf : ContDiff ð n (f : ð â ð)) :
ContDiff ð n (f.symm : ð â ð) :=
contDiff_iff_contDiffAt.2 fun x =>
f.toPartialHomeomorph.contDiffAt_symm_deriv (hâ _) (mem_univ x) (hf' _) hf.contDiffAt
namespace PartialHomeomorph
variable (ð)
/-- Restrict a partial homeomorphism to the subsets of the source and target
that consist of points `x â f.source`, `y = f x â f.target`
such that `f` is `C^n` at `x` and `f.symm` is `C^n` at `y`.
Note that `n` is a natural number, not `â`,
because the set of points of `C^â`-smoothness of `f` is not guaranteed to be open. -/
@[simps! apply symm_apply source target]
def restrContDiff (f : PartialHomeomorph E F) (n : â) : PartialHomeomorph E F :=
haveI H : f.IsImage {x | ContDiffAt ð n f x â§ ContDiffAt ð n f.symm (f x)}
{y | ContDiffAt ð n f.symm y â§ ContDiffAt ð n f (f.symm y)} := fun x hx ⊠by
simp [hx, and_comm]
H.restr <| isOpen_iff_mem_nhds.2 fun x âšhxs, hxf, hxf'â© âŠ
inter_mem (f.open_source.mem_nhds hxs) <| hxf.eventually.and <|
f.continuousAt hxs hxf'.eventually
lemma contDiffOn_restrContDiff_source (f : PartialHomeomorph E F) (n : â) :
ContDiffOn ð n f (f.restrContDiff ð n).source := fun _x hx ⊠hx.2.1.contDiffWithinAt
lemma contDiffOn_restrContDiff_target (f : PartialHomeomorph E F) (n : â) :
ContDiffOn ð n f.symm (f.restrContDiff ð n).target := fun _x hx ⊠hx.2.1.contDiffWithinAt
end PartialHomeomorph
end FunctionInverse
section deriv
/-!
### One dimension
All results up to now have been expressed in terms of the general Fréchet derivative `fderiv`. For
maps defined on the field, the one-dimensional derivative `deriv` is often easier to use. In this
paragraph, we reformulate some higher smoothness results in terms of `deriv`.
-/
variable {fâ : ð â F} {sâ : Set ð}
open ContinuousLinearMap (smulRight)
/-- A function is `C^(n + 1)` on a domain with unique derivatives if and only if it is
differentiable there, and its derivative (formulated with `derivWithin`) is `C^n`. -/
theorem contDiffOn_succ_iff_derivWithin {n : â} (hs : UniqueDiffOn ð sâ) :
ContDiffOn ð (n + 1 : â) fâ sâ â
DifferentiableOn ð fâ sâ â§ ContDiffOn ð n (derivWithin fâ sâ) sâ := by
rw [contDiffOn_succ_iff_fderivWithin hs, and_congr_right_iff]
intro _
constructor
· intro h
have : derivWithin fâ sâ = (fun u : ð âL[ð] F => u 1) â fderivWithin ð fâ sâ := by
ext x; rfl
simp_rw [this]
apply ContDiff.comp_contDiffOn _ h
exact (isBoundedBilinearMap_apply.isBoundedLinearMap_left _).contDiff
· intro h
have : fderivWithin ð fâ sâ = smulRight (1 : ð âL[ð] ð) â derivWithin fâ sâ := by
ext x; simp [derivWithin]
simp only [this]
apply ContDiff.comp_contDiffOn _ h
have : IsBoundedBilinearMap ð fun _ : (ð âL[ð] ð) Ã F => _ := isBoundedBilinearMap_smulRight
exact (this.isBoundedLinearMap_right _).contDiff
/-- A function is `C^(n + 1)` on an open domain if and only if it is
differentiable there, and its derivative (formulated with `deriv`) is `C^n`. -/
theorem contDiffOn_succ_iff_deriv_of_isOpen {n : â} (hs : IsOpen sâ) :
ContDiffOn ð (n + 1 : â) fâ sâ â DifferentiableOn ð fâ sâ â§ ContDiffOn ð n (deriv fâ) sâ := by
rw [contDiffOn_succ_iff_derivWithin hs.uniqueDiffOn]
exact Iff.rfl.and (contDiffOn_congr fun _ => derivWithin_of_isOpen hs)
/-- A function is `C^â` on a domain with unique derivatives if and only if it is differentiable
there, and its derivative (formulated with `derivWithin`) is `C^â`. -/
theorem contDiffOn_top_iff_derivWithin (hs : UniqueDiffOn ð sâ) :
ContDiffOn ð â fâ sâ â DifferentiableOn ð fâ sâ â§ ContDiffOn ð â (derivWithin fâ sâ) sâ := by
constructor
· intro h
refine âšh.differentiableOn le_top, ?_â©
refine contDiffOn_top.2 fun n => ((contDiffOn_succ_iff_derivWithin hs).1 ?_).2
exact h.of_le le_top
· intro h
refine contDiffOn_top.2 fun n => ?_
have A : (n : ââ) †â := le_top
apply ((contDiffOn_succ_iff_derivWithin hs).2 âšh.1, h.2.of_le Aâ©).of_le
exact WithTop.coe_le_coe.2 (Nat.le_succ n)
/-- A function is `C^â` on an open domain if and only if it is differentiable
there, and its derivative (formulated with `deriv`) is `C^â`. -/
theorem contDiffOn_top_iff_deriv_of_isOpen (hs : IsOpen sâ) :
ContDiffOn ð â fâ sâ â DifferentiableOn ð fâ sâ â§ ContDiffOn ð â (deriv fâ) sâ := by
rw [contDiffOn_top_iff_derivWithin hs.uniqueDiffOn]
exact Iff.rfl.and <| contDiffOn_congr fun _ => derivWithin_of_isOpen hs
protected theorem ContDiffOn.derivWithin (hf : ContDiffOn ð n fâ sâ) (hs : UniqueDiffOn ð sâ)
(hmn : m + 1 †n) : ContDiffOn ð m (derivWithin fâ sâ) sâ := by
cases m
· change â + 1 †n at hmn
have : n = â := by simpa using hmn
rw [this] at hf
exact ((contDiffOn_top_iff_derivWithin hs).1 hf).2
· change (Nat.succ _ : ââ) †n at hmn
exact ((contDiffOn_succ_iff_derivWithin hs).1 (hf.of_le hmn)).2
theorem ContDiffOn.deriv_of_isOpen (hf : ContDiffOn ð n fâ sâ) (hs : IsOpen sâ) (hmn : m + 1 †n) :
ContDiffOn ð m (deriv fâ) sâ :=
(hf.derivWithin hs.uniqueDiffOn hmn).congr fun _ hx => (derivWithin_of_isOpen hs hx).symm
theorem ContDiffOn.continuousOn_derivWithin (h : ContDiffOn ð n fâ sâ) (hs : UniqueDiffOn ð sâ)
(hn : 1 †n) : ContinuousOn (derivWithin fâ sâ) sâ :=
((contDiffOn_succ_iff_derivWithin hs).1 (h.of_le hn)).2.continuousOn
theorem ContDiffOn.continuousOn_deriv_of_isOpen (h : ContDiffOn ð n fâ sâ) (hs : IsOpen sâ)
(hn : 1 †n) : ContinuousOn (deriv fâ) sâ :=
((contDiffOn_succ_iff_deriv_of_isOpen hs).1 (h.of_le hn)).2.continuousOn
/-- A function is `C^(n + 1)` if and only if it is differentiable,
and its derivative (formulated in terms of `deriv`) is `C^n`. -/
theorem contDiff_succ_iff_deriv {n : â} :
ContDiff ð (n + 1 : â) fâ â Differentiable ð fâ â§ ContDiff ð n (deriv fâ) := by
simp only [â contDiffOn_univ, contDiffOn_succ_iff_deriv_of_isOpen, isOpen_univ,
differentiableOn_univ]
theorem contDiff_one_iff_deriv : ContDiff ð 1 fâ â Differentiable ð fâ â§ Continuous (deriv fâ) :=
contDiff_succ_iff_deriv.trans <| Iff.rfl.and contDiff_zero
/-- A function is `C^â` if and only if it is differentiable,
and its derivative (formulated in terms of `deriv`) is `C^â`. -/
theorem contDiff_top_iff_deriv :
ContDiff ð â fâ â Differentiable ð fâ â§ ContDiff ð â (deriv fâ) := by
simp only [â contDiffOn_univ, â differentiableOn_univ, â derivWithin_univ]
rw [contDiffOn_top_iff_derivWithin uniqueDiffOn_univ]
theorem ContDiff.continuous_deriv (h : ContDiff ð n fâ) (hn : 1 †n) : Continuous (deriv fâ) :=
(contDiff_succ_iff_deriv.mp (h.of_le hn)).2.continuous
theorem ContDiff.iterate_deriv :
â (n : â) {fâ : ð â F}, ContDiff ð â fâ â ContDiff ð â (deriv^[n] fâ)
| 0, _, hf => hf
| n + 1, _, hf => ContDiff.iterate_deriv n (contDiff_top_iff_deriv.mp hf).2
theorem ContDiff.iterate_deriv' (n : â) :
â (k : â) {fâ : ð â F}, ContDiff ð (n + k : â) fâ â ContDiff ð n (deriv^[k] fâ)
| 0, _, hf => hf
| k + 1, _, hf => ContDiff.iterate_deriv' _ k (contDiff_succ_iff_deriv.mp hf).2
end deriv
section RestrictScalars
/-!
### Restricting from `â` to `â`, or generally from `ð'` to `ð`
If a function is `n` times continuously differentiable over `â`, then it is `n` times continuously
differentiable over `â`. In this paragraph, we give variants of this statement, in the general
situation where `â` and `â` are replaced respectively by `ð'` and `ð` where `ð'` is a normed algebra
over `ð`.
-/
variable (ð) {ð' : Type*} [NontriviallyNormedField ð']
-- Porting note: this couldn't be on the same line as the binder type update of `ð`
variable [NormedAlgebra ð ð']
variable [NormedSpace ð' E] [IsScalarTower ð ð' E]
variable [NormedSpace ð' F] [IsScalarTower ð ð' F]
variable {p' : E â FormalMultilinearSeries ð' E F}
theorem HasFTaylorSeriesUpToOn.restrictScalars (h : HasFTaylorSeriesUpToOn n f p' s) :
HasFTaylorSeriesUpToOn n f (fun x => (p' x).restrictScalars ð) s where
zero_eq x hx := h.zero_eq x hx
fderivWithin m hm x hx := by
simpa only using -- Porting note: added `by simpa only using`
(ContinuousMultilinearMap.restrictScalarsLinear ð).hasFDerivAt.comp_hasFDerivWithinAt x <|
(h.fderivWithin m hm x hx).restrictScalars ð
cont m hm := ContinuousMultilinearMap.continuous_restrictScalars.comp_continuousOn (h.cont m hm)
theorem ContDiffWithinAt.restrict_scalars (h : ContDiffWithinAt ð' n f s x) :
ContDiffWithinAt ð n f s x := fun m hm ⊠by
rcases h m hm with âšu, u_mem, p', hp'â©
exact âšu, u_mem, _, hp'.restrictScalars _â©
theorem ContDiffOn.restrict_scalars (h : ContDiffOn ð' n f s) : ContDiffOn ð n f s := fun x hx =>
(h x hx).restrict_scalars _
theorem ContDiffAt.restrict_scalars (h : ContDiffAt ð' n f x) : ContDiffAt ð n f x :=
contDiffWithinAt_univ.1 <| h.contDiffWithinAt.restrict_scalars _
theorem ContDiff.restrict_scalars (h : ContDiff ð' n f) : ContDiff ð n f :=
contDiff_iff_contDiffAt.2 fun _ => h.contDiffAt.restrict_scalars _
end RestrictScalars
|
Analysis\Calculus\ContDiff\Bounds.lean | /-
Copyright (c) 2019 Sébastien Gouëzel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Sébastien Gouëzel, Floris van Doorn
-/
import Mathlib.Analysis.Calculus.ContDiff.Basic
import Mathlib.Data.Finset.Sym
import Mathlib.Data.Nat.Choose.Cast
import Mathlib.Data.Nat.Choose.Multinomial
/-!
# Bounds on higher derivatives
`norm_iteratedFDeriv_comp_le` gives the bound `n! * C * D ^ n` for the `n`-th derivative
of `g â f` assuming that the derivatives of `g` are bounded by `C` and the `i`-th
derivative of `f` is bounded by `D ^ i`.
-/
noncomputable section
open scoped NNReal Nat
universe u uD uE uF uG
open Set Fin Filter Function
variable {ð : Type*} [NontriviallyNormedField ð] {D : Type uD} [NormedAddCommGroup D]
[NormedSpace ð D] {E : Type uE} [NormedAddCommGroup E] [NormedSpace ð E] {F : Type uF}
[NormedAddCommGroup F] [NormedSpace ð F] {G : Type uG} [NormedAddCommGroup G] [NormedSpace ð G]
{s sâ t u : Set E}
/-!## Quantitative bounds -/
/-- Bounding the norm of the iterated derivative of `B (f x) (g x)` within a set in terms of the
iterated derivatives of `f` and `g` when `B` is bilinear. This lemma is an auxiliary version
assuming all spaces live in the same universe, to enable an induction. Use instead
`ContinuousLinearMap.norm_iteratedFDerivWithin_le_of_bilinear` that removes this assumption. -/
theorem ContinuousLinearMap.norm_iteratedFDerivWithin_le_of_bilinear_aux {Du Eu Fu Gu : Type u}
[NormedAddCommGroup Du] [NormedSpace ð Du] [NormedAddCommGroup Eu] [NormedSpace ð Eu]
[NormedAddCommGroup Fu] [NormedSpace ð Fu] [NormedAddCommGroup Gu] [NormedSpace ð Gu]
(B : Eu âL[ð] Fu âL[ð] Gu) {f : Du â Eu} {g : Du â Fu} {n : â} {s : Set Du} {x : Du}
(hf : ContDiffOn ð n f s) (hg : ContDiffOn ð n g s) (hs : UniqueDiffOn ð s) (hx : x â s) :
âiteratedFDerivWithin ð n (fun y => B (f y) (g y)) s xâ â€
âBâ * â i â Finset.range (n + 1), (n.choose i : â) * âiteratedFDerivWithin ð i f s xâ *
âiteratedFDerivWithin ð (n - i) g s xâ := by
/- We argue by induction on `n`. The bound is trivial for `n = 0`. For `n + 1`, we write
the `(n+1)`-th derivative as the `n`-th derivative of the derivative `B f g' + B f' g`,
and apply the inductive assumption to each of those two terms. For this induction to make sense,
the spaces of linear maps that appear in the induction should be in the same universe as the
original spaces, which explains why we assume in the lemma that all spaces live in the same
universe. -/
induction' n with n IH generalizing Eu Fu Gu
· simp only [Nat.zero_eq, norm_iteratedFDerivWithin_zero, zero_add, Finset.range_one,
Finset.sum_singleton, Nat.choose_self, Nat.cast_one, one_mul, Nat.sub_zero, â mul_assoc]
apply B.le_opNormâ
· have In : (n : ââ) + 1 †n.succ := by simp only [Nat.cast_succ, le_refl]
-- Porting note: the next line is a hack allowing Lean to find the operator norm instance.
let norm := @ContinuousLinearMap.hasOpNorm _ _ Eu ((Du âL[ð] Fu) âL[ð] Du âL[ð] Gu) _ _ _ _ _ _
(RingHom.id ð)
have I1 :
âiteratedFDerivWithin ð n (fun y : Du => B.precompR Du (f y) (fderivWithin ð g s y)) s xâ â€
âBâ * â i â Finset.range (n + 1), n.choose i * âiteratedFDerivWithin ð i f s xâ *
âiteratedFDerivWithin ð (n + 1 - i) g s xâ := by
calc
âiteratedFDerivWithin ð n (fun y : Du => B.precompR Du (f y) (fderivWithin ð g s y)) s xâ â€
âB.precompR Duâ * â i â Finset.range (n + 1),
n.choose i * âiteratedFDerivWithin ð i f s xâ *
âiteratedFDerivWithin ð (n - i) (fderivWithin ð g s) s xâ :=
IH _ (hf.of_le (Nat.cast_le.2 (Nat.le_succ n))) (hg.fderivWithin hs In)
_ †âBâ * â i â Finset.range (n + 1), n.choose i * âiteratedFDerivWithin ð i f s xâ *
âiteratedFDerivWithin ð (n - i) (fderivWithin ð g s) s xâ :=
mul_le_mul_of_nonneg_right (B.norm_precompR_le Du) (by positivity)
_ = _ := by
congr 1
apply Finset.sum_congr rfl fun i hi => ?_
rw [Nat.succ_sub (Nat.lt_succ_iff.1 (Finset.mem_range.1 hi)),
â norm_iteratedFDerivWithin_fderivWithin hs hx]
-- Porting note: the next line is a hack allowing Lean to find the operator norm instance.
let norm := @ContinuousLinearMap.hasOpNorm _ _ (Du âL[ð] Eu) (Fu âL[ð] Du âL[ð] Gu) _ _ _ _ _ _
(RingHom.id ð)
have I2 :
âiteratedFDerivWithin ð n (fun y : Du => B.precompL Du (fderivWithin ð f s y) (g y)) s xâ â€
âBâ * â i â Finset.range (n + 1), n.choose i * âiteratedFDerivWithin ð (i + 1) f s xâ *
âiteratedFDerivWithin ð (n - i) g s xâ :=
calc
âiteratedFDerivWithin ð n (fun y : Du => B.precompL Du (fderivWithin ð f s y) (g y)) s xâ â€
âB.precompL Duâ * â i â Finset.range (n + 1),
n.choose i * âiteratedFDerivWithin ð i (fderivWithin ð f s) s xâ *
âiteratedFDerivWithin ð (n - i) g s xâ :=
IH _ (hf.fderivWithin hs In) (hg.of_le (Nat.cast_le.2 (Nat.le_succ n)))
_ †âBâ * â i â Finset.range (n + 1),
n.choose i * âiteratedFDerivWithin ð i (fderivWithin ð f s) s xâ *
âiteratedFDerivWithin ð (n - i) g s xâ :=
mul_le_mul_of_nonneg_right (B.norm_precompL_le Du) (by positivity)
_ = _ := by
congr 1
apply Finset.sum_congr rfl fun i _ => ?_
rw [â norm_iteratedFDerivWithin_fderivWithin hs hx]
have J : iteratedFDerivWithin ð n
(fun y : Du => fderivWithin ð (fun y : Du => B (f y) (g y)) s y) s x =
iteratedFDerivWithin ð n (fun y => B.precompR Du (f y)
(fderivWithin ð g s y) + B.precompL Du (fderivWithin ð f s y) (g y)) s x := by
apply iteratedFDerivWithin_congr (fun y hy => ?_) hx
have L : (1 : ââ) †n.succ := by
simpa only [ENat.coe_one, Nat.one_le_cast] using Nat.succ_pos n
exact B.fderivWithin_of_bilinear (hf.differentiableOn L y hy) (hg.differentiableOn L y hy)
(hs y hy)
rw [â norm_iteratedFDerivWithin_fderivWithin hs hx, J]
have A : ContDiffOn ð n (fun y => B.precompR Du (f y) (fderivWithin ð g s y)) s :=
(B.precompR Du).isBoundedBilinearMap.contDiff.comp_contDiff_onâ
(hf.of_le (Nat.cast_le.2 (Nat.le_succ n))) (hg.fderivWithin hs In)
have A' : ContDiffOn ð n (fun y => B.precompL Du (fderivWithin ð f s y) (g y)) s :=
(B.precompL Du).isBoundedBilinearMap.contDiff.comp_contDiff_onâ (hf.fderivWithin hs In)
(hg.of_le (Nat.cast_le.2 (Nat.le_succ n)))
rw [iteratedFDerivWithin_add_apply' A A' hs hx]
apply (norm_add_le _ _).trans ((add_le_add I1 I2).trans (le_of_eq ?_))
simp_rw [â mul_add, mul_assoc]
congr 1
exact (Finset.sum_choose_succ_mul
(fun i j => âiteratedFDerivWithin ð i f s xâ * âiteratedFDerivWithin ð j g s xâ) n).symm
/-- Bounding the norm of the iterated derivative of `B (f x) (g x)` within a set in terms of the
iterated derivatives of `f` and `g` when `B` is bilinear:
`âD^n (x ⊠B (f x) (g x))â †âBâ â_{k †n} n.choose k âD^k fâ âD^{n-k} gâ` -/
theorem ContinuousLinearMap.norm_iteratedFDerivWithin_le_of_bilinear (B : E âL[ð] F âL[ð] G)
{f : D â E} {g : D â F} {N : ââ} {s : Set D} {x : D} (hf : ContDiffOn ð N f s)
(hg : ContDiffOn ð N g s) (hs : UniqueDiffOn ð s) (hx : x â s) {n : â} (hn : (n : ââ) †N) :
âiteratedFDerivWithin ð n (fun y => B (f y) (g y)) s xâ â€
âBâ * â i â Finset.range (n + 1), (n.choose i : â) * âiteratedFDerivWithin ð i f s xâ *
âiteratedFDerivWithin ð (n - i) g s xâ := by
/- We reduce the bound to the case where all spaces live in the same universe (in which we
already have proved the result), by using linear isometries between the spaces and their `ULift`
to a common universe. These linear isometries preserve the norm of the iterated derivative. -/
let Du : Type max uD uE uF uG := ULift.{max uE uF uG, uD} D
let Eu : Type max uD uE uF uG := ULift.{max uD uF uG, uE} E
let Fu : Type max uD uE uF uG := ULift.{max uD uE uG, uF} F
let Gu : Type max uD uE uF uG := ULift.{max uD uE uF, uG} G
have isoD : Du ââáµ¢[ð] D := LinearIsometryEquiv.ulift ð D
have isoE : Eu ââáµ¢[ð] E := LinearIsometryEquiv.ulift ð E
have isoF : Fu ââáµ¢[ð] F := LinearIsometryEquiv.ulift ð F
have isoG : Gu ââáµ¢[ð] G := LinearIsometryEquiv.ulift ð G
-- lift `f` and `g` to versions `fu` and `gu` on the lifted spaces.
set fu : Du â Eu := isoE.symm â f â isoD with hfu
set gu : Du â Fu := isoF.symm â g â isoD with hgu
-- lift the bilinear map `B` to a bilinear map `Bu` on the lifted spaces.
set Buâ : Eu âL[ð] Fu âL[ð] G := ((B.comp (isoE : Eu âL[ð] E)).flip.comp (isoF : Fu âL[ð] F)).flip
with hBuâ
let Bu : Eu âL[ð] Fu âL[ð] Gu :=
ContinuousLinearMap.compL ð Eu (Fu âL[ð] G) (Fu âL[ð] Gu)
(ContinuousLinearMap.compL ð Fu G Gu (isoG.symm : G âL[ð] Gu)) Buâ
have hBu : Bu = ContinuousLinearMap.compL ð Eu (Fu âL[ð] G) (Fu âL[ð] Gu)
(ContinuousLinearMap.compL ð Fu G Gu (isoG.symm : G âL[ð] Gu)) Buâ := rfl
have Bu_eq : (fun y => Bu (fu y) (gu y)) = isoG.symm â (fun y => B (f y) (g y)) â isoD := by
ext1 y
simp [hBu, hBuâ, hfu, hgu]
-- All norms are preserved by the lifting process.
have Bu_le : âBuâ †âBâ := by
refine ContinuousLinearMap.opNorm_le_bound _ (norm_nonneg B) fun y => ?_
refine ContinuousLinearMap.opNorm_le_bound _ (by positivity) fun x => ?_
simp only [hBu, hBuâ, compL_apply, coe_comp', Function.comp_apply,
ContinuousLinearEquiv.coe_coe, LinearIsometryEquiv.coe_coe, flip_apply,
LinearIsometryEquiv.norm_map]
calc
âB (isoE y) (isoF x)â †âB (isoE y)â * âisoF xâ := ContinuousLinearMap.le_opNorm _ _
_ †âBâ * âisoE yâ * âisoF xâ := by gcongr; apply ContinuousLinearMap.le_opNorm
_ = âBâ * âyâ * âxâ := by simp only [LinearIsometryEquiv.norm_map]
let su := isoD â»Â¹' s
have hsu : UniqueDiffOn ð su := isoD.toContinuousLinearEquiv.uniqueDiffOn_preimage_iff.2 hs
let xu := isoD.symm x
have hxu : xu â su := by
simpa only [xu, su, Set.mem_preimage, LinearIsometryEquiv.apply_symm_apply] using hx
have xu_x : isoD xu = x := by simp only [xu, LinearIsometryEquiv.apply_symm_apply]
have hfu : ContDiffOn ð n fu su :=
isoE.symm.contDiff.comp_contDiffOn
((hf.of_le hn).comp_continuousLinearMap (isoD : Du âL[ð] D))
have hgu : ContDiffOn ð n gu su :=
isoF.symm.contDiff.comp_contDiffOn
((hg.of_le hn).comp_continuousLinearMap (isoD : Du âL[ð] D))
have Nfu : â i, âiteratedFDerivWithin ð i fu su xuâ = âiteratedFDerivWithin ð i f s xâ := by
intro i
rw [LinearIsometryEquiv.norm_iteratedFDerivWithin_comp_left _ _ hsu hxu]
rw [LinearIsometryEquiv.norm_iteratedFDerivWithin_comp_right _ _ hs, xu_x]
rwa [â xu_x] at hx
have Ngu : â i, âiteratedFDerivWithin ð i gu su xuâ = âiteratedFDerivWithin ð i g s xâ := by
intro i
rw [LinearIsometryEquiv.norm_iteratedFDerivWithin_comp_left _ _ hsu hxu]
rw [LinearIsometryEquiv.norm_iteratedFDerivWithin_comp_right _ _ hs, xu_x]
rwa [â xu_x] at hx
have NBu :
âiteratedFDerivWithin ð n (fun y => Bu (fu y) (gu y)) su xuâ =
âiteratedFDerivWithin ð n (fun y => B (f y) (g y)) s xâ := by
rw [Bu_eq]
rw [LinearIsometryEquiv.norm_iteratedFDerivWithin_comp_left _ _ hsu hxu]
rw [LinearIsometryEquiv.norm_iteratedFDerivWithin_comp_right _ _ hs, xu_x]
rwa [â xu_x] at hx
-- state the bound for the lifted objects, and deduce the original bound from it.
have : âiteratedFDerivWithin ð n (fun y => Bu (fu y) (gu y)) su xuâ â€
âBuâ * â i â Finset.range (n + 1), (n.choose i : â) * âiteratedFDerivWithin ð i fu su xuâ *
âiteratedFDerivWithin ð (n - i) gu su xuâ :=
Bu.norm_iteratedFDerivWithin_le_of_bilinear_aux hfu hgu hsu hxu
simp only [Nfu, Ngu, NBu] at this
exact this.trans (mul_le_mul_of_nonneg_right Bu_le (by positivity))
/-- Bounding the norm of the iterated derivative of `B (f x) (g x)` in terms of the
iterated derivatives of `f` and `g` when `B` is bilinear:
`âD^n (x ⊠B (f x) (g x))â †âBâ â_{k †n} n.choose k âD^k fâ âD^{n-k} gâ` -/
theorem ContinuousLinearMap.norm_iteratedFDeriv_le_of_bilinear (B : E âL[ð] F âL[ð] G) {f : D â E}
{g : D â F} {N : ââ} (hf : ContDiff ð N f) (hg : ContDiff ð N g) (x : D) {n : â}
(hn : (n : ââ) †N) :
âiteratedFDeriv ð n (fun y => B (f y) (g y)) xâ †âBâ * â i â Finset.range (n + 1),
(n.choose i : â) * âiteratedFDeriv ð i f xâ * âiteratedFDeriv ð (n - i) g xâ := by
simp_rw [â iteratedFDerivWithin_univ]
exact B.norm_iteratedFDerivWithin_le_of_bilinear hf.contDiffOn hg.contDiffOn uniqueDiffOn_univ
(mem_univ x) hn
/-- Bounding the norm of the iterated derivative of `B (f x) (g x)` within a set in terms of the
iterated derivatives of `f` and `g` when `B` is bilinear of norm at most `1`:
`âD^n (x ⊠B (f x) (g x))â †â_{k †n} n.choose k âD^k fâ âD^{n-k} gâ` -/
theorem ContinuousLinearMap.norm_iteratedFDerivWithin_le_of_bilinear_of_le_one
(B : E âL[ð] F âL[ð] G) {f : D â E} {g : D â F} {N : ââ} {s : Set D} {x : D}
(hf : ContDiffOn ð N f s) (hg : ContDiffOn ð N g s) (hs : UniqueDiffOn ð s) (hx : x â s) {n : â}
(hn : (n : ââ) †N) (hB : âBâ †1) : âiteratedFDerivWithin ð n (fun y => B (f y) (g y)) s xâ â€
â i â Finset.range (n + 1), (n.choose i : â) * âiteratedFDerivWithin ð i f s xâ *
âiteratedFDerivWithin ð (n - i) g s xâ := by
apply (B.norm_iteratedFDerivWithin_le_of_bilinear hf hg hs hx hn).trans
exact mul_le_of_le_one_left (by positivity) hB
/-- Bounding the norm of the iterated derivative of `B (f x) (g x)` in terms of the
iterated derivatives of `f` and `g` when `B` is bilinear of norm at most `1`:
`âD^n (x ⊠B (f x) (g x))â †â_{k †n} n.choose k âD^k fâ âD^{n-k} gâ` -/
theorem ContinuousLinearMap.norm_iteratedFDeriv_le_of_bilinear_of_le_one (B : E âL[ð] F âL[ð] G)
{f : D â E} {g : D â F} {N : ââ} (hf : ContDiff ð N f) (hg : ContDiff ð N g) (x : D) {n : â}
(hn : (n : ââ) †N) (hB : âBâ †1) : âiteratedFDeriv ð n (fun y => B (f y) (g y)) xâ â€
â i â Finset.range (n + 1),
(n.choose i : â) * âiteratedFDeriv ð i f xâ * âiteratedFDeriv ð (n - i) g xâ := by
simp_rw [â iteratedFDerivWithin_univ]
exact B.norm_iteratedFDerivWithin_le_of_bilinear_of_le_one hf.contDiffOn hg.contDiffOn
uniqueDiffOn_univ (mem_univ x) hn hB
section
variable {ð' : Type*} [NormedField ð'] [NormedAlgebra ð ð'] [NormedSpace ð' F]
[IsScalarTower ð ð' F]
theorem norm_iteratedFDerivWithin_smul_le {f : E â ð'} {g : E â F} {N : ââ}
(hf : ContDiffOn ð N f s) (hg : ContDiffOn ð N g s) (hs : UniqueDiffOn ð s) {x : E} (hx : x â s)
{n : â} (hn : (n : ââ) †N) : âiteratedFDerivWithin ð n (fun y => f y ⢠g y) s xâ â€
â i â Finset.range (n + 1), (n.choose i : â) * âiteratedFDerivWithin ð i f s xâ *
âiteratedFDerivWithin ð (n - i) g s xâ :=
(ContinuousLinearMap.lsmul ð ð' :
ð' âL[ð] F âL[ð] F).norm_iteratedFDerivWithin_le_of_bilinear_of_le_one
hf hg hs hx hn ContinuousLinearMap.opNorm_lsmul_le
theorem norm_iteratedFDeriv_smul_le {f : E â ð'} {g : E â F} {N : ââ} (hf : ContDiff ð N f)
(hg : ContDiff ð N g) (x : E) {n : â} (hn : (n : ââ) †N) :
âiteratedFDeriv ð n (fun y => f y ⢠g y) xâ †â i â Finset.range (n + 1),
(n.choose i : â) * âiteratedFDeriv ð i f xâ * âiteratedFDeriv ð (n - i) g xâ :=
(ContinuousLinearMap.lsmul ð ð' : ð' âL[ð] F âL[ð] F).norm_iteratedFDeriv_le_of_bilinear_of_le_one
hf hg x hn ContinuousLinearMap.opNorm_lsmul_le
end
section
variable {ι : Type*} {A : Type*} [NormedRing A] [NormedAlgebra ð A] {A' : Type*} [NormedCommRing A']
[NormedAlgebra ð A']
theorem norm_iteratedFDerivWithin_mul_le {f : E â A} {g : E â A} {N : ââ} (hf : ContDiffOn ð N f s)
(hg : ContDiffOn ð N g s) (hs : UniqueDiffOn ð s) {x : E} (hx : x â s) {n : â}
(hn : (n : ââ) †N) : âiteratedFDerivWithin ð n (fun y => f y * g y) s xâ â€
â i â Finset.range (n + 1), (n.choose i : â) * âiteratedFDerivWithin ð i f s xâ *
âiteratedFDerivWithin ð (n - i) g s xâ :=
(ContinuousLinearMap.mul ð A :
A âL[ð] A âL[ð] A).norm_iteratedFDerivWithin_le_of_bilinear_of_le_one
hf hg hs hx hn (ContinuousLinearMap.opNorm_mul_le _ _)
theorem norm_iteratedFDeriv_mul_le {f : E â A} {g : E â A} {N : ââ} (hf : ContDiff ð N f)
(hg : ContDiff ð N g) (x : E) {n : â} (hn : (n : ââ) †N) :
âiteratedFDeriv ð n (fun y => f y * g y) xâ †â i â Finset.range (n + 1),
(n.choose i : â) * âiteratedFDeriv ð i f xâ * âiteratedFDeriv ð (n - i) g xâ := by
simp_rw [â iteratedFDerivWithin_univ]
exact norm_iteratedFDerivWithin_mul_le
hf.contDiffOn hg.contDiffOn uniqueDiffOn_univ (mem_univ x) hn
-- TODO: Add `norm_iteratedFDeriv[Within]_list_prod_le` for non-commutative `NormedRing A`.
theorem norm_iteratedFDerivWithin_prod_le [DecidableEq ι] [NormOneClass A'] {u : Finset ι}
{f : ι â E â A'} {N : ââ} (hf : â i â u, ContDiffOn ð N (f i) s) (hs : UniqueDiffOn ð s) {x : E}
(hx : x â s) {n : â} (hn : (n : ââ) †N) :
âiteratedFDerivWithin ð n (â j â u, f j ·) s xâ â€
â p â u.sym n, (p : Multiset ι).multinomial *
â j â u, âiteratedFDerivWithin ð (Multiset.count j p) (f j) s xâ := by
induction u using Finset.induction generalizing n with
| empty =>
cases n with
| zero => simp [Sym.eq_nil_of_card_zero]
| succ n => simp [iteratedFDerivWithin_succ_const _ _ hs hx]
| @insert i u hi IH =>
conv => lhs; simp only [Finset.prod_insert hi]
simp only [Finset.mem_insert, forall_eq_or_imp] at hf
refine le_trans (norm_iteratedFDerivWithin_mul_le hf.1 (contDiffOn_prod hf.2) hs hx hn) ?_
rw [â Finset.sum_coe_sort (Finset.sym _ _)]
rw [Finset.sum_equiv (Finset.symInsertEquiv hi) (t := Finset.univ)
(g := (fun v ⊠v.multinomial *
â j â insert i u, âiteratedFDerivWithin ð (v.count j) (f j) s xâ) â
Sym.toMultiset â Subtype.val â (Finset.symInsertEquiv hi).symm)
(by simp) (by simp only [â comp_apply (g := Finset.symInsertEquiv hi), comp.assoc]; simp)]
rw [â Finset.univ_sigma_univ, Finset.sum_sigma, Finset.sum_range]
simp only [comp_apply, Finset.symInsertEquiv_symm_apply_coe]
refine Finset.sum_le_sum ?_
intro m _
specialize IH hf.2 (n := n - m) (le_trans (WithTop.coe_le_coe.mpr (n.sub_le m)) hn)
refine le_trans (mul_le_mul_of_nonneg_left IH (by simp [mul_nonneg])) ?_
rw [Finset.mul_sum, â Finset.sum_coe_sort]
refine Finset.sum_le_sum ?_
simp only [Finset.mem_univ, forall_true_left, Subtype.forall, Finset.mem_sym_iff]
intro p hp
refine le_of_eq ?_
rw [Finset.prod_insert hi]
have hip : i â p := mt (hp i) hi
rw [Sym.count_coe_fill_self_of_not_mem hip, Sym.multinomial_coe_fill_of_not_mem hip]
suffices â j â u, âiteratedFDerivWithin ð (Multiset.count j p) (f j) s xâ =
â j â u, âiteratedFDerivWithin ð (Multiset.count j (Sym.fill i m p)) (f j) s xâ by
rw [this, Nat.cast_mul]
ring
refine Finset.prod_congr rfl ?_
intro j hj
have hji : j â i := mt (· âž hj) hi
rw [Sym.count_coe_fill_of_ne hji]
theorem norm_iteratedFDeriv_prod_le [DecidableEq ι] [NormOneClass A'] {u : Finset ι}
{f : ι â E â A'} {N : ââ} (hf : â i â u, ContDiff ð N (f i)) {x : E} {n : â}
(hn : (n : ââ) †N) :
âiteratedFDeriv ð n (â j â u, f j ·) xâ â€
â p â u.sym n, (p : Multiset ι).multinomial *
â j â u, âiteratedFDeriv ð ((p : Multiset ι).count j) (f j) xâ := by
simpa [iteratedFDerivWithin_univ] using
norm_iteratedFDerivWithin_prod_le (fun i hi ⊠(hf i hi).contDiffOn) uniqueDiffOn_univ
(mem_univ x) hn
end
/-- If the derivatives within a set of `g` at `f x` are bounded by `C`, and the `i`-th derivative
within a set of `f` at `x` is bounded by `D^i` for all `1 †i †n`, then the `n`-th derivative
of `g â f` is bounded by `n! * C * D^n`.
This lemma proves this estimate assuming additionally that two of the spaces live in the same
universe, to make an induction possible. Use instead `norm_iteratedFDerivWithin_comp_le` that
removes this assumption. -/
theorem norm_iteratedFDerivWithin_comp_le_aux {Fu Gu : Type u} [NormedAddCommGroup Fu]
[NormedSpace ð Fu] [NormedAddCommGroup Gu] [NormedSpace ð Gu] {g : Fu â Gu} {f : E â Fu} {n : â}
{s : Set E} {t : Set Fu} {x : E} (hg : ContDiffOn ð n g t) (hf : ContDiffOn ð n f s)
(ht : UniqueDiffOn ð t) (hs : UniqueDiffOn ð s) (hst : MapsTo f s t) (hx : x â s) {C : â}
{D : â} (hC : â i, i †n â âiteratedFDerivWithin ð i g t (f x)â †C)
(hD : â i, 1 †i â i †n â âiteratedFDerivWithin ð i f s xâ †D ^ i) :
âiteratedFDerivWithin ð n (g â f) s xâ †n ! * C * D ^ n := by
/- We argue by induction on `n`, using that `D^(n+1) (g â f) = D^n (g ' â f ⬠f')`. The successive
derivatives of `g' â f` are controlled thanks to the inductive assumption, and those of `f'` are
controlled by assumption.
As composition of linear maps is a bilinear map, one may use
`ContinuousLinearMap.norm_iteratedFDeriv_le_of_bilinear_of_le_one` to get from these a bound
on `D^n (g ' â f ⬠f')`. -/
induction' n using Nat.case_strong_induction_on with n IH generalizing Gu
· simpa [norm_iteratedFDerivWithin_zero, Nat.factorial_zero, algebraMap.coe_one, one_mul,
pow_zero, mul_one, comp_apply] using hC 0 le_rfl
have M : (n : ââ) < n.succ := Nat.cast_lt.2 n.lt_succ_self
have Cnonneg : 0 †C := (norm_nonneg _).trans (hC 0 bot_le)
have Dnonneg : 0 †D := by
have : 1 †n + 1 := by simp only [le_add_iff_nonneg_left, zero_le']
simpa only [pow_one] using (norm_nonneg _).trans (hD 1 le_rfl this)
-- use the inductive assumption to bound the derivatives of `g' â f`.
have I : â i â Finset.range (n + 1),
âiteratedFDerivWithin ð i (fderivWithin ð g t â f) s xâ †i ! * C * D ^ i := by
intro i hi
simp only [Finset.mem_range_succ_iff] at hi
apply IH i hi
· apply hg.fderivWithin ht
simp only [Nat.cast_succ]
exact add_le_add_right (Nat.cast_le.2 hi) _
· apply hf.of_le (Nat.cast_le.2 (hi.trans n.le_succ))
· intro j hj
have : âiteratedFDerivWithin ð j (fderivWithin ð g t) t (f x)â =
âiteratedFDerivWithin ð (j + 1) g t (f x)â := by
rw [iteratedFDerivWithin_succ_eq_comp_right ht (hst hx), comp_apply,
LinearIsometryEquiv.norm_map]
rw [this]
exact hC (j + 1) (add_le_add (hj.trans hi) le_rfl)
· intro j hj h'j
exact hD j hj (h'j.trans (hi.trans n.le_succ))
-- reformulate `hD` as a bound for the derivatives of `f'`.
have J : â i, âiteratedFDerivWithin ð (n - i) (fderivWithin ð f s) s xâ †D ^ (n - i + 1) := by
intro i
have : âiteratedFDerivWithin ð (n - i) (fderivWithin ð f s) s xâ =
âiteratedFDerivWithin ð (n - i + 1) f s xâ := by
rw [iteratedFDerivWithin_succ_eq_comp_right hs hx, comp_apply, LinearIsometryEquiv.norm_map]
rw [this]
apply hD
· simp only [le_add_iff_nonneg_left, zero_le']
· apply Nat.succ_le_succ tsub_le_self
-- Now put these together: first, notice that we have to bound `D^n (g' â f ⬠f')`.
calc
âiteratedFDerivWithin ð (n + 1) (g â f) s xâ =
âiteratedFDerivWithin ð n (fun y : E => fderivWithin ð (g â f) s y) s xâ := by
rw [iteratedFDerivWithin_succ_eq_comp_right hs hx, comp_apply,
LinearIsometryEquiv.norm_map]
_ = âiteratedFDerivWithin ð n (fun y : E => ContinuousLinearMap.compL ð E Fu Gu
(fderivWithin ð g t (f y)) (fderivWithin ð f s y)) s xâ := by
have L : (1 : ââ) †n.succ := by simpa only [ENat.coe_one, Nat.one_le_cast] using n.succ_pos
congr 1
refine iteratedFDerivWithin_congr (fun y hy => ?_) hx _
apply fderivWithin.comp _ _ _ hst (hs y hy)
· exact hg.differentiableOn L _ (hst hy)
· exact hf.differentiableOn L _ hy
-- bound it using the fact that the composition of linear maps is a bilinear operation,
-- for which we have bounds for the`n`-th derivative.
_ †â i â Finset.range (n + 1),
(n.choose i : â) * âiteratedFDerivWithin ð i (fderivWithin ð g t â f) s xâ *
âiteratedFDerivWithin ð (n - i) (fderivWithin ð f s) s xâ := by
have A : ContDiffOn ð n (fderivWithin ð g t â f) s := by
apply ContDiffOn.comp _ (hf.of_le M.le) hst
apply hg.fderivWithin ht
simp only [Nat.cast_succ, le_refl]
have B : ContDiffOn ð n (fderivWithin ð f s) s := by
apply hf.fderivWithin hs
simp only [Nat.cast_succ, le_refl]
exact (ContinuousLinearMap.compL ð E Fu Gu).norm_iteratedFDerivWithin_le_of_bilinear_of_le_one
A B hs hx le_rfl (ContinuousLinearMap.norm_compL_le ð E Fu Gu)
-- bound each of the terms using the estimates on previous derivatives (that use the inductive
-- assumption for `g' â f`).
_ †â i â Finset.range (n + 1), (n.choose i : â) * (i ! * C * D ^ i) * D ^ (n - i + 1) := by
gcongr with i hi
· exact I i hi
· exact J i
-- We are left with trivial algebraic manipulations to see that this is smaller than
-- the claimed bound.
_ = â i â Finset.range (n + 1),
-- Porting note: had to insert a few more explicit type ascriptions in this and similar
-- expressions.
(n ! : â) * ((i ! : â)â»Â¹ * i !) * C * (D ^ i * D ^ (n - i + 1)) * ((n - i)! : â)â»Â¹ := by
congr! 1 with i hi
simp only [Nat.cast_choose â (Finset.mem_range_succ_iff.1 hi), div_eq_inv_mul, mul_inv]
ring
_ = â i â Finset.range (n + 1), (n ! : â) * 1 * C * D ^ (n + 1) * ((n - i)! : â)â»Â¹ := by
congr! with i hi
· apply inv_mul_cancel
simpa only [Ne, Nat.cast_eq_zero] using i.factorial_ne_zero
· rw [â pow_add]
congr 1
rw [Nat.add_succ, Nat.succ_inj']
exact Nat.add_sub_of_le (Finset.mem_range_succ_iff.1 hi)
_ †â i â Finset.range (n + 1), (n ! : â) * 1 * C * D ^ (n + 1) * 1 := by
gcongr with i
apply inv_le_one
simpa only [Nat.one_le_cast] using (n - i).factorial_pos
_ = (n + 1)! * C * D ^ (n + 1) := by
simp only [mul_assoc, mul_one, Finset.sum_const, Finset.card_range, nsmul_eq_mul,
Nat.factorial_succ, Nat.cast_mul]
/-- If the derivatives within a set of `g` at `f x` are bounded by `C`, and the `i`-th derivative
within a set of `f` at `x` is bounded by `D^i` for all `1 †i †n`, then the `n`-th derivative
of `g â f` is bounded by `n! * C * D^n`. -/
theorem norm_iteratedFDerivWithin_comp_le {g : F â G} {f : E â F} {n : â} {s : Set E} {t : Set F}
{x : E} {N : ââ} (hg : ContDiffOn ð N g t) (hf : ContDiffOn ð N f s) (hn : (n : ââ) †N)
(ht : UniqueDiffOn ð t) (hs : UniqueDiffOn ð s) (hst : MapsTo f s t) (hx : x â s) {C : â}
{D : â} (hC : â i, i †n â âiteratedFDerivWithin ð i g t (f x)â †C)
(hD : â i, 1 †i â i †n â âiteratedFDerivWithin ð i f s xâ †D ^ i) :
âiteratedFDerivWithin ð n (g â f) s xâ †n ! * C * D ^ n := by
/- We reduce the bound to the case where all spaces live in the same universe (in which we
already have proved the result), by using linear isometries between the spaces and their `ULift`
to a common universe. These linear isometries preserve the norm of the iterated derivative. -/
let Fu : Type max uF uG := ULift.{uG, uF} F
let Gu : Type max uF uG := ULift.{uF, uG} G
have isoF : Fu ââáµ¢[ð] F := LinearIsometryEquiv.ulift ð F
have isoG : Gu ââáµ¢[ð] G := LinearIsometryEquiv.ulift ð G
-- lift `f` and `g` to versions `fu` and `gu` on the lifted spaces.
let fu : E â Fu := isoF.symm â f
let gu : Fu â Gu := isoG.symm â g â isoF
let tu := isoF â»Â¹' t
have htu : UniqueDiffOn ð tu := isoF.toContinuousLinearEquiv.uniqueDiffOn_preimage_iff.2 ht
have hstu : MapsTo fu s tu := fun y hy ⊠by
simpa only [fu, tu, mem_preimage, comp_apply, LinearIsometryEquiv.apply_symm_apply] using hst hy
have Ffu : isoF (fu x) = f x := by
simp only [fu, comp_apply, LinearIsometryEquiv.apply_symm_apply]
-- All norms are preserved by the lifting process.
have hfu : ContDiffOn ð n fu s := isoF.symm.contDiff.comp_contDiffOn (hf.of_le hn)
have hgu : ContDiffOn ð n gu tu :=
isoG.symm.contDiff.comp_contDiffOn
((hg.of_le hn).comp_continuousLinearMap (isoF : Fu âL[ð] F))
have Nfu : â i, âiteratedFDerivWithin ð i fu s xâ = âiteratedFDerivWithin ð i f s xâ := fun i ⊠by
rw [LinearIsometryEquiv.norm_iteratedFDerivWithin_comp_left _ _ hs hx]
simp_rw [â Nfu] at hD
have Ngu : â i,
âiteratedFDerivWithin ð i gu tu (fu x)â = âiteratedFDerivWithin ð i g t (f x)â := fun i ⊠by
rw [LinearIsometryEquiv.norm_iteratedFDerivWithin_comp_left _ _ htu (hstu hx)]
rw [LinearIsometryEquiv.norm_iteratedFDerivWithin_comp_right _ _ ht, Ffu]
rw [Ffu]
exact hst hx
simp_rw [â Ngu] at hC
have Nfgu :
âiteratedFDerivWithin ð n (g â f) s xâ = âiteratedFDerivWithin ð n (gu â fu) s xâ := by
have : gu â fu = isoG.symm â g â f := by
ext x
simp only [fu, gu, comp_apply, LinearIsometryEquiv.map_eq_iff,
LinearIsometryEquiv.apply_symm_apply]
rw [this, LinearIsometryEquiv.norm_iteratedFDerivWithin_comp_left _ _ hs hx]
-- deduce the required bound from the one for `gu â fu`.
rw [Nfgu]
exact norm_iteratedFDerivWithin_comp_le_aux hgu hfu htu hs hstu hx hC hD
/-- If the derivatives of `g` at `f x` are bounded by `C`, and the `i`-th derivative
of `f` at `x` is bounded by `D^i` for all `1 †i †n`, then the `n`-th derivative
of `g â f` is bounded by `n! * C * D^n`. -/
theorem norm_iteratedFDeriv_comp_le {g : F â G} {f : E â F} {n : â} {N : ââ} (hg : ContDiff ð N g)
(hf : ContDiff ð N f) (hn : (n : ââ) †N) (x : E) {C : â} {D : â}
(hC : â i, i †n â âiteratedFDeriv ð i g (f x)â †C)
(hD : â i, 1 †i â i †n â âiteratedFDeriv ð i f xâ †D ^ i) :
âiteratedFDeriv ð n (g â f) xâ †n ! * C * D ^ n := by
simp_rw [â iteratedFDerivWithin_univ] at hC hD â¢
exact norm_iteratedFDerivWithin_comp_le hg.contDiffOn hf.contDiffOn hn uniqueDiffOn_univ
uniqueDiffOn_univ (mapsTo_univ _ _) (mem_univ x) hC hD
section Apply
theorem norm_iteratedFDerivWithin_clm_apply {f : E â F âL[ð] G} {g : E â F} {s : Set E} {x : E}
{N : ââ} {n : â} (hf : ContDiffOn ð N f s) (hg : ContDiffOn ð N g s) (hs : UniqueDiffOn ð s)
(hx : x â s) (hn : ân †N) : âiteratedFDerivWithin ð n (fun y => (f y) (g y)) s xâ â€
â i â Finset.range (n + 1), â(n.choose i) * âiteratedFDerivWithin ð i f s xâ *
âiteratedFDerivWithin ð (n - i) g s xâ := by
let B : (F âL[ð] G) âL[ð] F âL[ð] G := ContinuousLinearMap.flip (ContinuousLinearMap.apply ð G)
have hB : âBâ †1 := by
simp only [B, ContinuousLinearMap.opNorm_flip, ContinuousLinearMap.apply]
refine ContinuousLinearMap.opNorm_le_bound _ zero_le_one fun f => ?_
simp only [ContinuousLinearMap.coe_id', id, one_mul]
rfl
exact B.norm_iteratedFDerivWithin_le_of_bilinear_of_le_one hf hg hs hx hn hB
theorem norm_iteratedFDeriv_clm_apply {f : E â F âL[ð] G} {g : E â F} {N : ââ} {n : â}
(hf : ContDiff ð N f) (hg : ContDiff ð N g) (x : E) (hn : ân †N) :
âiteratedFDeriv ð n (fun y : E => (f y) (g y)) xâ †â i â Finset.range (n + 1),
â(n.choose i) * âiteratedFDeriv ð i f xâ * âiteratedFDeriv ð (n - i) g xâ := by
simp only [â iteratedFDerivWithin_univ]
exact norm_iteratedFDerivWithin_clm_apply hf.contDiffOn hg.contDiffOn uniqueDiffOn_univ
(Set.mem_univ x) hn
theorem norm_iteratedFDerivWithin_clm_apply_const {f : E â F âL[ð] G} {c : F} {s : Set E} {x : E}
{N : ââ} {n : â} (hf : ContDiffOn ð N f s) (hs : UniqueDiffOn ð s) (hx : x â s) (hn : ân †N) :
âiteratedFDerivWithin ð n (fun y : E => (f y) c) s xâ â€
âcâ * âiteratedFDerivWithin ð n f s xâ := by
let g : (F âL[ð] G) âL[ð] G := ContinuousLinearMap.apply ð G c
have h := g.norm_compContinuousMultilinearMap_le (iteratedFDerivWithin ð n f s x)
rw [â g.iteratedFDerivWithin_comp_left hf hs hx hn] at h
refine h.trans (mul_le_mul_of_nonneg_right ?_ (norm_nonneg _))
refine g.opNorm_le_bound (norm_nonneg _) fun f => ?_
rw [ContinuousLinearMap.apply_apply, mul_comm]
exact f.le_opNorm c
theorem norm_iteratedFDeriv_clm_apply_const {f : E â F âL[ð] G} {c : F} {x : E} {N : ââ} {n : â}
(hf : ContDiff ð N f) (hn : ân †N) :
âiteratedFDeriv ð n (fun y : E => (f y) c) xâ †âcâ * âiteratedFDeriv ð n f xâ := by
simp only [â iteratedFDerivWithin_univ]
exact norm_iteratedFDerivWithin_clm_apply_const hf.contDiffOn uniqueDiffOn_univ
(Set.mem_univ x) hn
end Apply
|
Analysis\Calculus\ContDiff\Defs.lean | /-
Copyright (c) 2019 Sébastien Gouëzel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Sébastien Gouëzel
-/
import Mathlib.Analysis.Calculus.FDeriv.Equiv
import Mathlib.Analysis.Calculus.FormalMultilinearSeries
/-!
# Higher differentiability
A function is `C^1` on a domain if it is differentiable there, and its derivative is continuous.
By induction, it is `C^n` if it is `C^{n-1}` and its (n-1)-th derivative is `C^1` there or,
equivalently, if it is `C^1` and its derivative is `C^{n-1}`.
Finally, it is `C^â` if it is `C^n` for all n.
We formalize these notions by defining iteratively the `n+1`-th derivative of a function as the
derivative of the `n`-th derivative. It is called `iteratedFDeriv ð n f x` where `ð` is the
field, `n` is the number of iterations, `f` is the function and `x` is the point, and it is given
as an `n`-multilinear map. We also define a version `iteratedFDerivWithin` relative to a domain,
as well as predicates `ContDiffWithinAt`, `ContDiffAt`, `ContDiffOn` and
`ContDiff` saying that the function is `C^n` within a set at a point, at a point, on a set
and on the whole space respectively.
To avoid the issue of choice when choosing a derivative in sets where the derivative is not
necessarily unique, `ContDiffOn` is not defined directly in terms of the
regularity of the specific choice `iteratedFDerivWithin ð n f s` inside `s`, but in terms of the
existence of a nice sequence of derivatives, expressed with a predicate
`HasFTaylorSeriesUpToOn`.
We prove basic properties of these notions.
## Main definitions and results
Let `f : E â F` be a map between normed vector spaces over a nontrivially normed field `ð`.
* `HasFTaylorSeriesUpTo n f p`: expresses that the formal multilinear series `p` is a sequence
of iterated derivatives of `f`, up to the `n`-th term (where `n` is a natural number or `â`).
* `HasFTaylorSeriesUpToOn n f p s`: same thing, but inside a set `s`. The notion of derivative
is now taken inside `s`. In particular, derivatives don't have to be unique.
* `ContDiff ð n f`: expresses that `f` is `C^n`, i.e., it admits a Taylor series up to
rank `n`.
* `ContDiffOn ð n f s`: expresses that `f` is `C^n` in `s`.
* `ContDiffAt ð n f x`: expresses that `f` is `C^n` around `x`.
* `ContDiffWithinAt ð n f s x`: expresses that `f` is `C^n` around `x` within the set `s`.
* `iteratedFDerivWithin ð n f s x` is an `n`-th derivative of `f` over the field `ð` on the
set `s` at the point `x`. It is a continuous multilinear map from `E^n` to `F`, defined as a
derivative within `s` of `iteratedFDerivWithin ð (n-1) f s` if one exists, and `0` otherwise.
* `iteratedFDeriv ð n f x` is the `n`-th derivative of `f` over the field `ð` at the point `x`.
It is a continuous multilinear map from `E^n` to `F`, defined as a derivative of
`iteratedFDeriv ð (n-1) f` if one exists, and `0` otherwise.
In sets of unique differentiability, `ContDiffOn ð n f s` can be expressed in terms of the
properties of `iteratedFDerivWithin ð m f s` for `m †n`. In the whole space,
`ContDiff ð n f` can be expressed in terms of the properties of `iteratedFDeriv ð m f`
for `m †n`.
## Implementation notes
The definitions in this file are designed to work on any field `ð`. They are sometimes slightly more
complicated than the naive definitions one would guess from the intuition over the real or complex
numbers, but they are designed to circumvent the lack of gluing properties and partitions of unity
in general. In the usual situations, they coincide with the usual definitions.
### Definition of `C^n` functions in domains
One could define `C^n` functions in a domain `s` by fixing an arbitrary choice of derivatives (this
is what we do with `iteratedFDerivWithin`) and requiring that all these derivatives up to `n` are
continuous. If the derivative is not unique, this could lead to strange behavior like two `C^n`
functions `f` and `g` on `s` whose sum is not `C^n`. A better definition is thus to say that a
function is `C^n` inside `s` if it admits a sequence of derivatives up to `n` inside `s`.
This definition still has the problem that a function which is locally `C^n` would not need to
be `C^n`, as different choices of sequences of derivatives around different points might possibly
not be glued together to give a globally defined sequence of derivatives. (Note that this issue
can not happen over reals, thanks to partition of unity, but the behavior over a general field is
not so clear, and we want a definition for general fields). Also, there are locality
problems for the order parameter: one could image a function which, for each `n`, has a nice
sequence of derivatives up to order `n`, but they do not coincide for varying `n` and can therefore
not be glued to give rise to an infinite sequence of derivatives. This would give a function
which is `C^n` for all `n`, but not `C^â`. We solve this issue by putting locality conditions
in space and order in our definition of `ContDiffWithinAt` and `ContDiffOn`.
The resulting definition is slightly more complicated to work with (in fact not so much), but it
gives rise to completely satisfactory theorems.
For instance, with this definition, a real function which is `C^m` (but not better) on `(-1/m, 1/m)`
for each natural `m` is by definition `C^â` at `0`.
There is another issue with the definition of `ContDiffWithinAt ð n f s x`. We can
require the existence and good behavior of derivatives up to order `n` on a neighborhood of `x`
within `s`. However, this does not imply continuity or differentiability within `s` of the function
at `x` when `x` does not belong to `s`. Therefore, we require such existence and good behavior on
a neighborhood of `x` within `s ⪠{x}` (which appears as `insert x s` in this file).
### Side of the composition, and universe issues
With a naïve direct definition, the `n`-th derivative of a function belongs to the space
`E âL[ð] (E âL[ð] (E ... F)...)))` where there are n iterations of `E âL[ð]`. This space
may also be seen as the space of continuous multilinear functions on `n` copies of `E` with
values in `F`, by uncurrying. This is the point of view that is usually adopted in textbooks,
and that we also use. This means that the definition and the first proofs are slightly involved,
as one has to keep track of the uncurrying operation. The uncurrying can be done from the
left or from the right, amounting to defining the `n+1`-th derivative either as the derivative of
the `n`-th derivative, or as the `n`-th derivative of the derivative.
For proofs, it would be more convenient to use the latter approach (from the right),
as it means to prove things at the `n+1`-th step we only need to understand well enough the
derivative in `E âL[ð] F` (contrary to the approach from the left, where one would need to know
enough on the `n`-th derivative to deduce things on the `n+1`-th derivative).
However, the definition from the right leads to a universe polymorphism problem: if we define
`iteratedFDeriv ð (n + 1) f x = iteratedFDeriv ð n (fderiv ð f) x` by induction, we need to
generalize over all spaces (as `f` and `fderiv ð f` don't take values in the same space). It is
only possible to generalize over all spaces in some fixed universe in an inductive definition.
For `f : E â F`, then `fderiv ð f` is a map `E â (E âL[ð] F)`. Therefore, the definition will only
work if `F` and `E âL[ð] F` are in the same universe.
This issue does not appear with the definition from the left, where one does not need to generalize
over all spaces. Therefore, we use the definition from the left. This means some proofs later on
become a little bit more complicated: to prove that a function is `C^n`, the most efficient approach
is to exhibit a formula for its `n`-th derivative and prove it is continuous (contrary to the
inductive approach where one would prove smoothness statements without giving a formula for the
derivative). In the end, this approach is still satisfactory as it is good to have formulas for the
iterated derivatives in various constructions.
One point where we depart from this explicit approach is in the proof of smoothness of a
composition: there is a formula for the `n`-th derivative of a composition (Faà di Bruno's formula),
but it is very complicated and barely usable, while the inductive proof is very simple. Thus, we
give the inductive proof. As explained above, it works by generalizing over the target space, hence
it only works well if all spaces belong to the same universe. To get the general version, we lift
things to a common universe using a trick.
### Variables management
The textbook definitions and proofs use various identifications and abuse of notations, for instance
when saying that the natural space in which the derivative lives, i.e.,
`E âL[ð] (E âL[ð] ( ... âL[ð] F))`, is the same as a space of multilinear maps. When doing things
formally, we need to provide explicit maps for these identifications, and chase some diagrams to see
everything is compatible with the identifications. In particular, one needs to check that taking the
derivative and then doing the identification, or first doing the identification and then taking the
derivative, gives the same result. The key point for this is that taking the derivative commutes
with continuous linear equivalences. Therefore, we need to implement all our identifications with
continuous linear equivs.
## Notations
We use the notation `E [Ãn]âL[ð] F` for the space of continuous multilinear maps on `E^n` with
values in `F`. This is the space in which the `n`-th derivative of a function from `E` to `F` lives.
In this file, we denote `†: ââ` with `â`.
## Tags
derivative, differentiability, higher derivative, `C^n`, multilinear, Taylor series, formal series
-/
noncomputable section
open scoped Classical
open NNReal Topology Filter
local notation "â" => (†: ââ)
/-
Porting note: These lines are not required in Mathlib4.
attribute [local instance 1001]
NormedAddCommGroup.toAddCommGroup NormedSpace.toModule' AddCommGroup.toAddCommMonoid
-/
open Set Fin Filter Function
universe u uE uF uG uX
variable {ð : Type u} [NontriviallyNormedField ð] {E : Type uE} [NormedAddCommGroup E]
[NormedSpace ð E] {F : Type uF} [NormedAddCommGroup F] [NormedSpace ð F] {G : Type uG}
[NormedAddCommGroup G] [NormedSpace ð G] {X : Type uX} [NormedAddCommGroup X] [NormedSpace ð X]
{s sâ t u : Set E} {f fâ : E â F} {g : F â G} {x xâ : E} {c : F} {m n : ââ}
{p : E â FormalMultilinearSeries ð E F}
/-! ### Functions with a Taylor series on a domain -/
/-- `HasFTaylorSeriesUpToOn n f p s` registers the fact that `p 0 = f` and `p (m+1)` is a
derivative of `p m` for `m < n`, and is continuous for `m †n`. This is a predicate analogous to
`HasFDerivWithinAt` but for higher order derivatives.
Notice that `p` does not sum up to `f` on the diagonal (`FormalMultilinearSeries.sum`), even if
`f` is analytic and `n = â`: an additional `1/m!` factor on the `m`th term is necessary for that. -/
structure HasFTaylorSeriesUpToOn (n : ââ) (f : E â F) (p : E â FormalMultilinearSeries ð E F)
(s : Set E) : Prop where
zero_eq : â x â s, (p x 0).uncurry0 = f x
protected fderivWithin : â m : â, (m : ââ) < n â â x â s,
HasFDerivWithinAt (p · m) (p x m.succ).curryLeft s x
cont : â m : â, (m : ââ) †n â ContinuousOn (p · m) s
theorem HasFTaylorSeriesUpToOn.zero_eq' (h : HasFTaylorSeriesUpToOn n f p s) {x : E} (hx : x â s) :
p x 0 = (continuousMultilinearCurryFin0 ð E F).symm (f x) := by
rw [â h.zero_eq x hx]
exact (p x 0).uncurry0_curry0.symm
/-- If two functions coincide on a set `s`, then a Taylor series for the first one is as well a
Taylor series for the second one. -/
theorem HasFTaylorSeriesUpToOn.congr (h : HasFTaylorSeriesUpToOn n f p s)
(hâ : â x â s, fâ x = f x) : HasFTaylorSeriesUpToOn n fâ p s := by
refine âšfun x hx => ?_, h.fderivWithin, h.contâ©
rw [hâ x hx]
exact h.zero_eq x hx
theorem HasFTaylorSeriesUpToOn.mono (h : HasFTaylorSeriesUpToOn n f p s) {t : Set E} (hst : t â s) :
HasFTaylorSeriesUpToOn n f p t :=
âšfun x hx => h.zero_eq x (hst hx), fun m hm x hx => (h.fderivWithin m hm x (hst hx)).mono hst,
fun m hm => (h.cont m hm).mono hstâ©
theorem HasFTaylorSeriesUpToOn.of_le (h : HasFTaylorSeriesUpToOn n f p s) (hmn : m †n) :
HasFTaylorSeriesUpToOn m f p s :=
âšh.zero_eq, fun k hk x hx => h.fderivWithin k (lt_of_lt_of_le hk hmn) x hx, fun k hk =>
h.cont k (le_trans hk hmn)â©
theorem HasFTaylorSeriesUpToOn.continuousOn (h : HasFTaylorSeriesUpToOn n f p s) :
ContinuousOn f s := by
have := (h.cont 0 bot_le).congr fun x hx => (h.zero_eq' hx).symm
rwa [â (continuousMultilinearCurryFin0 ð E F).symm.comp_continuousOn_iff]
theorem hasFTaylorSeriesUpToOn_zero_iff :
HasFTaylorSeriesUpToOn 0 f p s â ContinuousOn f s â§ â x â s, (p x 0).uncurry0 = f x := by
refine âšfun H => âšH.continuousOn, H.zero_eqâ©, fun H =>
âšH.2, fun m hm => False.elim (not_le.2 hm bot_le), fun m hm ⊠?_â©â©
obtain rfl : m = 0 := mod_cast hm.antisymm (zero_le _)
have : EqOn (p · 0) ((continuousMultilinearCurryFin0 ð E F).symm â f) s := fun x hx âŠ
(continuousMultilinearCurryFin0 ð E F).eq_symm_apply.2 (H.2 x hx)
rw [continuousOn_congr this, LinearIsometryEquiv.comp_continuousOn_iff]
exact H.1
theorem hasFTaylorSeriesUpToOn_top_iff :
HasFTaylorSeriesUpToOn â f p s â â n : â, HasFTaylorSeriesUpToOn n f p s := by
constructor
· intro H n; exact H.of_le le_top
· intro H
constructor
· exact (H 0).zero_eq
· intro m _
apply (H m.succ).fderivWithin m (WithTop.coe_lt_coe.2 (lt_add_one m))
· intro m _
apply (H m).cont m le_rfl
/-- In the case that `n = â` we don't need the continuity assumption in
`HasFTaylorSeriesUpToOn`. -/
theorem hasFTaylorSeriesUpToOn_top_iff' :
HasFTaylorSeriesUpToOn â f p s â
(â x â s, (p x 0).uncurry0 = f x) â§
â m : â, â x â s, HasFDerivWithinAt (fun y => p y m) (p x m.succ).curryLeft s x :=
-- Everything except for the continuity is trivial:
âšfun h => âšh.1, fun m => h.2 m (WithTop.coe_lt_top m)â©, fun h =>
âšh.1, fun m _ => h.2 m, fun m _ x hx =>
-- The continuity follows from the existence of a derivative:
(h.2 m x hx).continuousWithinAtâ©â©
/-- If a function has a Taylor series at order at least `1`, then the term of order `1` of this
series is a derivative of `f`. -/
theorem HasFTaylorSeriesUpToOn.hasFDerivWithinAt (h : HasFTaylorSeriesUpToOn n f p s) (hn : 1 †n)
(hx : x â s) : HasFDerivWithinAt f (continuousMultilinearCurryFin1 ð E F (p x 1)) s x := by
have A : â y â s, f y = (continuousMultilinearCurryFin0 ð E F) (p y 0) := fun y hy âŠ
(h.zero_eq y hy).symm
suffices H : HasFDerivWithinAt (continuousMultilinearCurryFin0 ð E F â (p · 0))
(continuousMultilinearCurryFin1 ð E F (p x 1)) s x from H.congr A (A x hx)
rw [LinearIsometryEquiv.comp_hasFDerivWithinAt_iff']
have : ((0 : â) : ââ) < n := zero_lt_one.trans_le hn
convert h.fderivWithin _ this x hx
ext y v
change (p x 1) (snoc 0 y) = (p x 1) (cons y v)
congr with i
rw [Unique.eq_default (α := Fin 1) i]
rfl
theorem HasFTaylorSeriesUpToOn.differentiableOn (h : HasFTaylorSeriesUpToOn n f p s) (hn : 1 †n) :
DifferentiableOn ð f s := fun _x hx => (h.hasFDerivWithinAt hn hx).differentiableWithinAt
/-- If a function has a Taylor series at order at least `1` on a neighborhood of `x`, then the term
of order `1` of this series is a derivative of `f` at `x`. -/
theorem HasFTaylorSeriesUpToOn.hasFDerivAt (h : HasFTaylorSeriesUpToOn n f p s) (hn : 1 †n)
(hx : s â ð x) : HasFDerivAt f (continuousMultilinearCurryFin1 ð E F (p x 1)) x :=
(h.hasFDerivWithinAt hn (mem_of_mem_nhds hx)).hasFDerivAt hx
/-- If a function has a Taylor series at order at least `1` on a neighborhood of `x`, then
in a neighborhood of `x`, the term of order `1` of this series is a derivative of `f`. -/
theorem HasFTaylorSeriesUpToOn.eventually_hasFDerivAt (h : HasFTaylorSeriesUpToOn n f p s)
(hn : 1 †n) (hx : s â ð x) :
âá¶ y in ð x, HasFDerivAt f (continuousMultilinearCurryFin1 ð E F (p y 1)) y :=
(eventually_eventually_nhds.2 hx).mono fun _y hy => h.hasFDerivAt hn hy
/-- If a function has a Taylor series at order at least `1` on a neighborhood of `x`, then
it is differentiable at `x`. -/
theorem HasFTaylorSeriesUpToOn.differentiableAt (h : HasFTaylorSeriesUpToOn n f p s) (hn : 1 †n)
(hx : s â ð x) : DifferentiableAt ð f x :=
(h.hasFDerivAt hn hx).differentiableAt
/-- `p` is a Taylor series of `f` up to `n+1` if and only if `p` is a Taylor series up to `n`, and
`p (n + 1)` is a derivative of `p n`. -/
theorem hasFTaylorSeriesUpToOn_succ_iff_left {n : â} :
HasFTaylorSeriesUpToOn (n + 1) f p s â
HasFTaylorSeriesUpToOn n f p s â§
(â x â s, HasFDerivWithinAt (fun y => p y n) (p x n.succ).curryLeft s x) â§
ContinuousOn (fun x => p x (n + 1)) s := by
constructor
· exact fun h ⊠âšh.of_le (WithTop.coe_le_coe.2 (Nat.le_succ n)),
h.fderivWithin _ (WithTop.coe_lt_coe.2 (lt_add_one n)), h.cont (n + 1) le_rflâ©
· intro h
constructor
· exact h.1.zero_eq
· intro m hm
by_cases h' : m < n
· exact h.1.fderivWithin m (WithTop.coe_lt_coe.2 h')
· have : m = n := Nat.eq_of_lt_succ_of_not_lt (WithTop.coe_lt_coe.1 hm) h'
rw [this]
exact h.2.1
· intro m hm
by_cases h' : m †n
· apply h.1.cont m (WithTop.coe_le_coe.2 h')
· have : m = n + 1 := le_antisymm (WithTop.coe_le_coe.1 hm) (not_le.1 h')
rw [this]
exact h.2.2
#adaptation_note
/--
After https://github.com/leanprover/lean4/pull/4119,
without `set_option maxSynthPendingDepth 2` this proof needs substantial repair.
-/
set_option maxSynthPendingDepth 2 in
-- Porting note: this was split out from `hasFTaylorSeriesUpToOn_succ_iff_right` to avoid a timeout.
theorem HasFTaylorSeriesUpToOn.shift_of_succ
{n : â} (H : HasFTaylorSeriesUpToOn (n + 1 : â) f p s) :
(HasFTaylorSeriesUpToOn n (fun x => continuousMultilinearCurryFin1 ð E F (p x 1))
(fun x => (p x).shift)) s := by
constructor
· intro x _
rfl
· intro m (hm : (m : ââ) < n) x (hx : x â s)
have A : (m.succ : ââ) < n.succ := by
rw [Nat.cast_lt] at hm â¢
exact Nat.succ_lt_succ hm
change HasFDerivWithinAt ((continuousMultilinearCurryRightEquiv' ð m E F).symm â (p · m.succ))
(p x m.succ.succ).curryRight.curryLeft s x
rw [((continuousMultilinearCurryRightEquiv' ð m E F).symm).comp_hasFDerivWithinAt_iff']
convert H.fderivWithin _ A x hx
ext y v
change p x (m + 2) (snoc (cons y (init v)) (v (last _))) = p x (m + 2) (cons y v)
rw [â cons_snoc_eq_snoc_cons, snoc_init_self]
· intro m (hm : (m : ââ) †n)
suffices A : ContinuousOn (p · (m + 1)) s from
((continuousMultilinearCurryRightEquiv' ð m E F).symm).continuous.comp_continuousOn A
refine H.cont _ ?_
rw [Nat.cast_le] at hm â¢
exact Nat.succ_le_succ hm
/-- `p` is a Taylor series of `f` up to `n+1` if and only if `p.shift` is a Taylor series up to `n`
for `p 1`, which is a derivative of `f`. -/
theorem hasFTaylorSeriesUpToOn_succ_iff_right {n : â} :
HasFTaylorSeriesUpToOn (n + 1 : â) f p s â
(â x â s, (p x 0).uncurry0 = f x) â§
(â x â s, HasFDerivWithinAt (fun y => p y 0) (p x 1).curryLeft s x) â§
HasFTaylorSeriesUpToOn n (fun x => continuousMultilinearCurryFin1 ð E F (p x 1))
(fun x => (p x).shift) s := by
constructor
· intro H
refine âšH.zero_eq, H.fderivWithin 0 (Nat.cast_lt.2 (Nat.succ_pos n)), ?_â©
exact H.shift_of_succ
· rintro âšHzero_eq, Hfderiv_zero, Htaylorâ©
constructor
· exact Hzero_eq
· intro m (hm : (m : ââ) < n.succ) x (hx : x â s)
cases' m with m
· exact Hfderiv_zero x hx
· have A : (m : ââ) < n := by
rw [Nat.cast_lt] at hm â¢
exact Nat.lt_of_succ_lt_succ hm
have :
HasFDerivWithinAt ((continuousMultilinearCurryRightEquiv' ð m E F).symm â (p · m.succ))
((p x).shift m.succ).curryLeft s x := Htaylor.fderivWithin _ A x hx
rw [LinearIsometryEquiv.comp_hasFDerivWithinAt_iff'] at this
convert this
ext y v
change
(p x (Nat.succ (Nat.succ m))) (cons y v) =
(p x m.succ.succ) (snoc (cons y (init v)) (v (last _)))
rw [â cons_snoc_eq_snoc_cons, snoc_init_self]
· intro m (hm : (m : ââ) †n.succ)
cases' m with m
· have : DifferentiableOn ð (fun x => p x 0) s := fun x hx =>
(Hfderiv_zero x hx).differentiableWithinAt
exact this.continuousOn
· refine (continuousMultilinearCurryRightEquiv' ð m E F).symm.comp_continuousOn_iff.mp ?_
refine Htaylor.cont _ ?_
rw [Nat.cast_le] at hm â¢
exact Nat.lt_succ_iff.mp hm
/-! ### Smooth functions within a set around a point -/
variable (ð)
/-- A function is continuously differentiable up to order `n` within a set `s` at a point `x` if
it admits continuous derivatives up to order `n` in a neighborhood of `x` in `s ⪠{x}`.
For `n = â`, we only require that this holds up to any finite order (where the neighborhood may
depend on the finite order we consider).
For instance, a real function which is `C^m` on `(-1/m, 1/m)` for each natural `m`, but not
better, is `C^â` at `0` within `univ`.
-/
def ContDiffWithinAt (n : ââ) (f : E â F) (s : Set E) (x : E) : Prop :=
â m : â, (m : ââ) †n â â u â ð[insert x s] x,
â p : E â FormalMultilinearSeries ð E F, HasFTaylorSeriesUpToOn m f p u
variable {ð}
theorem contDiffWithinAt_nat {n : â} :
ContDiffWithinAt ð n f s x â â u â ð[insert x s] x,
â p : E â FormalMultilinearSeries ð E F, HasFTaylorSeriesUpToOn n f p u :=
âšfun H => H n le_rfl, fun âšu, hu, p, hpâ© _m hm => âšu, hu, p, hp.of_le hmâ©â©
theorem ContDiffWithinAt.of_le (h : ContDiffWithinAt ð n f s x) (hmn : m †n) :
ContDiffWithinAt ð m f s x := fun k hk => h k (le_trans hk hmn)
theorem contDiffWithinAt_iff_forall_nat_le :
ContDiffWithinAt ð n f s x â â m : â, âm †n â ContDiffWithinAt ð m f s x :=
âšfun H _m hm => H.of_le hm, fun H m hm => H m hm _ le_rflâ©
theorem contDiffWithinAt_top : ContDiffWithinAt ð â f s x â â n : â, ContDiffWithinAt ð n f s x :=
contDiffWithinAt_iff_forall_nat_le.trans <| by simp only [forall_prop_of_true, le_top]
theorem ContDiffWithinAt.continuousWithinAt (h : ContDiffWithinAt ð n f s x) :
ContinuousWithinAt f s x := by
rcases h 0 bot_le with âšu, hu, p, Hâ©
rw [mem_nhdsWithin_insert] at hu
exact (H.continuousOn.continuousWithinAt hu.1).mono_of_mem hu.2
theorem ContDiffWithinAt.congr_of_eventuallyEq (h : ContDiffWithinAt ð n f s x)
(hâ : fâ =á¶ [ð[s] x] f) (hx : fâ x = f x) : ContDiffWithinAt ð n fâ s x := fun m hm =>
let âšu, hu, p, Hâ© := h m hm
âš{ x â u | fâ x = f x }, Filter.inter_mem hu (mem_nhdsWithin_insert.2 âšhx, hââ©), p,
(H.mono (sep_subset _ _)).congr fun _ => And.rightâ©
theorem ContDiffWithinAt.congr_of_eventuallyEq_insert (h : ContDiffWithinAt ð n f s x)
(hâ : fâ =á¶ [ð[insert x s] x] f) : ContDiffWithinAt ð n fâ s x :=
h.congr_of_eventuallyEq (nhdsWithin_mono x (subset_insert x s) hâ)
(mem_of_mem_nhdsWithin (mem_insert x s) hâ : _)
theorem ContDiffWithinAt.congr_of_eventually_eq' (h : ContDiffWithinAt ð n f s x)
(hâ : fâ =á¶ [ð[s] x] f) (hx : x â s) : ContDiffWithinAt ð n fâ s x :=
h.congr_of_eventuallyEq hâ <| hâ.self_of_nhdsWithin hx
theorem Filter.EventuallyEq.contDiffWithinAt_iff (hâ : fâ =á¶ [ð[s] x] f) (hx : fâ x = f x) :
ContDiffWithinAt ð n fâ s x â ContDiffWithinAt ð n f s x :=
âšfun H => ContDiffWithinAt.congr_of_eventuallyEq H hâ.symm hx.symm, fun H =>
H.congr_of_eventuallyEq hâ hxâ©
theorem ContDiffWithinAt.congr (h : ContDiffWithinAt ð n f s x) (hâ : â y â s, fâ y = f y)
(hx : fâ x = f x) : ContDiffWithinAt ð n fâ s x :=
h.congr_of_eventuallyEq (Filter.eventuallyEq_of_mem self_mem_nhdsWithin hâ) hx
theorem ContDiffWithinAt.congr' (h : ContDiffWithinAt ð n f s x) (hâ : â y â s, fâ y = f y)
(hx : x â s) : ContDiffWithinAt ð n fâ s x :=
h.congr hâ (hâ _ hx)
theorem ContDiffWithinAt.mono_of_mem (h : ContDiffWithinAt ð n f s x) {t : Set E}
(hst : s â ð[t] x) : ContDiffWithinAt ð n f t x := by
intro m hm
rcases h m hm with âšu, hu, p, Hâ©
exact âšu, nhdsWithin_le_of_mem (insert_mem_nhdsWithin_insert hst) hu, p, Hâ©
theorem ContDiffWithinAt.mono (h : ContDiffWithinAt ð n f s x) {t : Set E} (hst : t â s) :
ContDiffWithinAt ð n f t x :=
h.mono_of_mem <| Filter.mem_of_superset self_mem_nhdsWithin hst
theorem ContDiffWithinAt.congr_nhds (h : ContDiffWithinAt ð n f s x) {t : Set E}
(hst : ð[s] x = ð[t] x) : ContDiffWithinAt ð n f t x :=
h.mono_of_mem <| hst âž self_mem_nhdsWithin
theorem contDiffWithinAt_congr_nhds {t : Set E} (hst : ð[s] x = ð[t] x) :
ContDiffWithinAt ð n f s x â ContDiffWithinAt ð n f t x :=
âšfun h => h.congr_nhds hst, fun h => h.congr_nhds hst.symmâ©
theorem contDiffWithinAt_inter' (h : t â ð[s] x) :
ContDiffWithinAt ð n f (s â© t) x â ContDiffWithinAt ð n f s x :=
contDiffWithinAt_congr_nhds <| Eq.symm <| nhdsWithin_restrict'' _ h
theorem contDiffWithinAt_inter (h : t â ð x) :
ContDiffWithinAt ð n f (s â© t) x â ContDiffWithinAt ð n f s x :=
contDiffWithinAt_inter' (mem_nhdsWithin_of_mem_nhds h)
theorem contDiffWithinAt_insert_self :
ContDiffWithinAt ð n f (insert x s) x â ContDiffWithinAt ð n f s x := by
simp_rw [ContDiffWithinAt, insert_idem]
theorem contDiffWithinAt_insert {y : E} :
ContDiffWithinAt ð n f (insert y s) x â ContDiffWithinAt ð n f s x := by
rcases eq_or_ne x y with (rfl | h)
· exact contDiffWithinAt_insert_self
simp_rw [ContDiffWithinAt, insert_comm x y, nhdsWithin_insert_of_ne h]
alias âšContDiffWithinAt.of_insert, ContDiffWithinAt.insert'â© := contDiffWithinAt_insert
protected theorem ContDiffWithinAt.insert (h : ContDiffWithinAt ð n f s x) :
ContDiffWithinAt ð n f (insert x s) x :=
h.insert'
/-- If a function is `C^n` within a set at a point, with `n ⥠1`, then it is differentiable
within this set at this point. -/
theorem ContDiffWithinAt.differentiable_within_at' (h : ContDiffWithinAt ð n f s x) (hn : 1 †n) :
DifferentiableWithinAt ð f (insert x s) x := by
rcases h 1 hn with âšu, hu, p, Hâ©
rcases mem_nhdsWithin.1 hu with âšt, t_open, xt, tuâ©
rw [inter_comm] at tu
have := ((H.mono tu).differentiableOn le_rfl) x âšmem_insert x s, xtâ©
exact (differentiableWithinAt_inter (IsOpen.mem_nhds t_open xt)).1 this
theorem ContDiffWithinAt.differentiableWithinAt (h : ContDiffWithinAt ð n f s x) (hn : 1 †n) :
DifferentiableWithinAt ð f s x :=
(h.differentiable_within_at' hn).mono (subset_insert x s)
/-- A function is `C^(n + 1)` on a domain iff locally, it has a derivative which is `C^n`. -/
theorem contDiffWithinAt_succ_iff_hasFDerivWithinAt {n : â} :
ContDiffWithinAt ð (n + 1 : â) f s x â â u â ð[insert x s] x, â f' : E â E âL[ð] F,
(â x â u, HasFDerivWithinAt f (f' x) u x) â§ ContDiffWithinAt ð n f' u x := by
constructor
· intro h
rcases h n.succ le_rfl with âšu, hu, p, Hpâ©
refine
âšu, hu, fun y => (continuousMultilinearCurryFin1 ð E F) (p y 1), fun y hy =>
Hp.hasFDerivWithinAt (WithTop.coe_le_coe.2 (Nat.le_add_left 1 n)) hy, ?_â©
intro m hm
refine âšu, ?_, fun y : E => (p y).shift, ?_â©
· -- Porting note: without the explicit argument Lean is not sure of the type.
convert @self_mem_nhdsWithin _ _ x u
have : x â insert x s := by simp
exact insert_eq_of_mem (mem_of_mem_nhdsWithin this hu)
· rw [hasFTaylorSeriesUpToOn_succ_iff_right] at Hp
exact Hp.2.2.of_le hm
· rintro âšu, hu, f', f'_eq_deriv, Hf'â©
rw [contDiffWithinAt_nat]
rcases Hf' n le_rfl with âšv, hv, p', Hp'â©
refine âšv â© u, ?_, fun x => (p' x).unshift (f x), ?_â©
· apply Filter.inter_mem _ hu
apply nhdsWithin_le_of_mem hu
exact nhdsWithin_mono _ (subset_insert x u) hv
· rw [hasFTaylorSeriesUpToOn_succ_iff_right]
refine âšfun y _ => rfl, fun y hy => ?_, ?_â©
· change
HasFDerivWithinAt (fun z => (continuousMultilinearCurryFin0 ð E F).symm (f z))
(FormalMultilinearSeries.unshift (p' y) (f y) 1).curryLeft (v â© u) y
-- Porting note: needed `erw` here.
-- https://github.com/leanprover-community/mathlib4/issues/5164
erw [LinearIsometryEquiv.comp_hasFDerivWithinAt_iff']
convert (f'_eq_deriv y hy.2).mono inter_subset_right
rw [â Hp'.zero_eq y hy.1]
ext z
change ((p' y 0) (init (@cons 0 (fun _ => E) z 0))) (@cons 0 (fun _ => E) z 0 (last 0)) =
((p' y 0) 0) z
congr
norm_num [eq_iff_true_of_subsingleton]
· convert (Hp'.mono inter_subset_left).congr fun x hx => Hp'.zero_eq x hx.1 using 1
· ext x y
change p' x 0 (init (@snoc 0 (fun _ : Fin 1 => E) 0 y)) y = p' x 0 0 y
rw [init_snoc]
· ext x k v y
change p' x k (init (@snoc k (fun _ : Fin k.succ => E) v y))
(@snoc k (fun _ : Fin k.succ => E) v y (last k)) = p' x k v y
rw [snoc_last, init_snoc]
/-- A version of `contDiffWithinAt_succ_iff_hasFDerivWithinAt` where all derivatives
are taken within the same set. -/
theorem contDiffWithinAt_succ_iff_hasFDerivWithinAt' {n : â} :
ContDiffWithinAt ð (n + 1 : â) f s x â
â u â ð[insert x s] x, u â insert x s â§ â f' : E â E âL[ð] F,
(â x â u, HasFDerivWithinAt f (f' x) s x) â§ ContDiffWithinAt ð n f' s x := by
refine âšfun hf => ?_, ?_â©
· obtain âšu, hu, f', huf', hf'â© := contDiffWithinAt_succ_iff_hasFDerivWithinAt.mp hf
obtain âšw, hw, hxw, hwuâ© := mem_nhdsWithin.mp hu
rw [inter_comm] at hwu
refine âšinsert x s â© w, inter_mem_nhdsWithin _ (hw.mem_nhds hxw), inter_subset_left, f',
fun y hy => ?_, ?_â©
· refine ((huf' y <| hwu hy).mono hwu).mono_of_mem ?_
refine mem_of_superset ?_ (inter_subset_inter_left _ (subset_insert _ _))
exact inter_mem_nhdsWithin _ (hw.mem_nhds hy.2)
· exact hf'.mono_of_mem (nhdsWithin_mono _ (subset_insert _ _) hu)
· rw [â contDiffWithinAt_insert, contDiffWithinAt_succ_iff_hasFDerivWithinAt,
insert_eq_of_mem (mem_insert _ _)]
rintro âšu, hu, hus, f', huf', hf'â©
exact âšu, hu, f', fun y hy => (huf' y hy).insert'.mono hus, hf'.insert.mono husâ©
/-! ### Smooth functions within a set -/
variable (ð)
/-- A function is continuously differentiable up to `n` on `s` if, for any point `x` in `s`, it
admits continuous derivatives up to order `n` on a neighborhood of `x` in `s`.
For `n = â`, we only require that this holds up to any finite order (where the neighborhood may
depend on the finite order we consider).
-/
def ContDiffOn (n : ââ) (f : E â F) (s : Set E) : Prop :=
â x â s, ContDiffWithinAt ð n f s x
variable {ð}
theorem HasFTaylorSeriesUpToOn.contDiffOn {f' : E â FormalMultilinearSeries ð E F}
(hf : HasFTaylorSeriesUpToOn n f f' s) : ContDiffOn ð n f s := by
intro x hx m hm
use s
simp only [Set.insert_eq_of_mem hx, self_mem_nhdsWithin, true_and_iff]
exact âšf', hf.of_le hmâ©
theorem ContDiffOn.contDiffWithinAt (h : ContDiffOn ð n f s) (hx : x â s) :
ContDiffWithinAt ð n f s x :=
h x hx
theorem ContDiffWithinAt.contDiffOn' {m : â} (hm : (m : ââ) †n)
(h : ContDiffWithinAt ð n f s x) :
â u, IsOpen u â§ x â u â§ ContDiffOn ð m f (insert x s â© u) := by
rcases h m hm with âšt, ht, p, hpâ©
rcases mem_nhdsWithin.1 ht with âšu, huo, hxu, hutâ©
rw [inter_comm] at hut
exact âšu, huo, hxu, (hp.mono hut).contDiffOnâ©
theorem ContDiffWithinAt.contDiffOn {m : â} (hm : (m : ââ) †n) (h : ContDiffWithinAt ð n f s x) :
â u â ð[insert x s] x, u â insert x s â§ ContDiffOn ð m f u :=
let âš_u, uo, xu, hâ© := h.contDiffOn' hm
âš_, inter_mem_nhdsWithin _ (uo.mem_nhds xu), inter_subset_left, hâ©
protected theorem ContDiffWithinAt.eventually {n : â} (h : ContDiffWithinAt ð n f s x) :
âá¶ y in ð[insert x s] x, ContDiffWithinAt ð n f s y := by
rcases h.contDiffOn le_rfl with âšu, hu, _, hdâ©
have : âá¶ y : E in ð[insert x s] x, u â ð[insert x s] y â§ y â u :=
(eventually_nhdsWithin_nhdsWithin.2 hu).and hu
refine this.mono fun y hy => (hd y hy.2).mono_of_mem ?_
exact nhdsWithin_mono y (subset_insert _ _) hy.1
theorem ContDiffOn.of_le (h : ContDiffOn ð n f s) (hmn : m †n) : ContDiffOn ð m f s := fun x hx =>
(h x hx).of_le hmn
theorem ContDiffOn.of_succ {n : â} (h : ContDiffOn ð (n + 1) f s) : ContDiffOn ð n f s :=
h.of_le <| WithTop.coe_le_coe.mpr le_self_add
theorem ContDiffOn.one_of_succ {n : â} (h : ContDiffOn ð (n + 1) f s) : ContDiffOn ð 1 f s :=
h.of_le <| WithTop.coe_le_coe.mpr le_add_self
theorem contDiffOn_iff_forall_nat_le : ContDiffOn ð n f s â â m : â, âm †n â ContDiffOn ð m f s :=
âšfun H _ hm => H.of_le hm, fun H x hx m hm => H m hm x hx m le_rflâ©
theorem contDiffOn_top : ContDiffOn ð â f s â â n : â, ContDiffOn ð n f s :=
contDiffOn_iff_forall_nat_le.trans <| by simp only [le_top, forall_prop_of_true]
theorem contDiffOn_all_iff_nat : (â n, ContDiffOn ð n f s) â â n : â, ContDiffOn ð n f s := by
refine âšfun H n => H n, ?_â©
rintro H (_ | n)
exacts [contDiffOn_top.2 H, H n]
theorem ContDiffOn.continuousOn (h : ContDiffOn ð n f s) : ContinuousOn f s := fun x hx =>
(h x hx).continuousWithinAt
theorem ContDiffOn.congr (h : ContDiffOn ð n f s) (hâ : â x â s, fâ x = f x) :
ContDiffOn ð n fâ s := fun x hx => (h x hx).congr hâ (hâ x hx)
theorem contDiffOn_congr (hâ : â x â s, fâ x = f x) : ContDiffOn ð n fâ s â ContDiffOn ð n f s :=
âšfun H => H.congr fun x hx => (hâ x hx).symm, fun H => H.congr hââ©
theorem ContDiffOn.mono (h : ContDiffOn ð n f s) {t : Set E} (hst : t â s) : ContDiffOn ð n f t :=
fun x hx => (h x (hst hx)).mono hst
theorem ContDiffOn.congr_mono (hf : ContDiffOn ð n f s) (hâ : â x â sâ, fâ x = f x) (hs : sâ â s) :
ContDiffOn ð n fâ sâ :=
(hf.mono hs).congr hâ
/-- If a function is `C^n` on a set with `n ⥠1`, then it is differentiable there. -/
theorem ContDiffOn.differentiableOn (h : ContDiffOn ð n f s) (hn : 1 †n) :
DifferentiableOn ð f s := fun x hx => (h x hx).differentiableWithinAt hn
/-- If a function is `C^n` around each point in a set, then it is `C^n` on the set. -/
theorem contDiffOn_of_locally_contDiffOn
(h : â x â s, â u, IsOpen u â§ x â u â§ ContDiffOn ð n f (s â© u)) : ContDiffOn ð n f s := by
intro x xs
rcases h x xs with âšu, u_open, xu, huâ©
apply (contDiffWithinAt_inter _).1 (hu x âšxs, xuâ©)
exact IsOpen.mem_nhds u_open xu
/-- A function is `C^(n + 1)` on a domain iff locally, it has a derivative which is `C^n`. -/
theorem contDiffOn_succ_iff_hasFDerivWithinAt {n : â} :
ContDiffOn ð (n + 1 : â) f s â
â x â s, â u â ð[insert x s] x, â f' : E â E âL[ð] F,
(â x â u, HasFDerivWithinAt f (f' x) u x) â§ ContDiffOn ð n f' u := by
constructor
· intro h x hx
rcases (h x hx) n.succ le_rfl with âšu, hu, p, Hpâ©
refine
âšu, hu, fun y => (continuousMultilinearCurryFin1 ð E F) (p y 1), fun y hy =>
Hp.hasFDerivWithinAt (WithTop.coe_le_coe.2 (Nat.le_add_left 1 n)) hy, ?_â©
rw [hasFTaylorSeriesUpToOn_succ_iff_right] at Hp
intro z hz m hm
refine âšu, ?_, fun x : E => (p x).shift, Hp.2.2.of_le hmâ©
-- Porting note: without the explicit arguments `convert` can not determine the type.
convert @self_mem_nhdsWithin _ _ z u
exact insert_eq_of_mem hz
· intro h x hx
rw [contDiffWithinAt_succ_iff_hasFDerivWithinAt]
rcases h x hx with âšu, u_nhbd, f', hu, hf'â©
have : x â u := mem_of_mem_nhdsWithin (mem_insert _ _) u_nhbd
exact âšu, u_nhbd, f', hu, hf' x thisâ©
/-! ### Iterated derivative within a set -/
variable (ð)
/-- The `n`-th derivative of a function along a set, defined inductively by saying that the `n+1`-th
derivative of `f` is the derivative of the `n`-th derivative of `f` along this set, together with
an uncurrying step to see it as a multilinear map in `n+1` variables..
-/
noncomputable def iteratedFDerivWithin (n : â) (f : E â F) (s : Set E) : E â E[Ãn]âL[ð] F :=
Nat.recOn n (fun x => ContinuousMultilinearMap.curry0 ð E (f x)) fun _ rec x =>
ContinuousLinearMap.uncurryLeft (fderivWithin ð rec s x)
/-- Formal Taylor series associated to a function within a set. -/
def ftaylorSeriesWithin (f : E â F) (s : Set E) (x : E) : FormalMultilinearSeries ð E F := fun n =>
iteratedFDerivWithin ð n f s x
variable {ð}
@[simp]
theorem iteratedFDerivWithin_zero_apply (m : Fin 0 â E) :
(iteratedFDerivWithin ð 0 f s x : (Fin 0 â E) â F) m = f x :=
rfl
theorem iteratedFDerivWithin_zero_eq_comp :
iteratedFDerivWithin ð 0 f s = (continuousMultilinearCurryFin0 ð E F).symm â f :=
rfl
@[simp]
theorem norm_iteratedFDerivWithin_zero : âiteratedFDerivWithin ð 0 f s xâ = âf xâ := by
-- Porting note: added `comp_apply`.
rw [iteratedFDerivWithin_zero_eq_comp, comp_apply, LinearIsometryEquiv.norm_map]
theorem iteratedFDerivWithin_succ_apply_left {n : â} (m : Fin (n + 1) â E) :
(iteratedFDerivWithin ð (n + 1) f s x : (Fin (n + 1) â E) â F) m =
(fderivWithin ð (iteratedFDerivWithin ð n f s) s x : E â E[Ãn]âL[ð] F) (m 0) (tail m) :=
rfl
/-- Writing explicitly the `n+1`-th derivative as the composition of a currying linear equiv,
and the derivative of the `n`-th derivative. -/
theorem iteratedFDerivWithin_succ_eq_comp_left {n : â} :
iteratedFDerivWithin ð (n + 1) f s =
(continuousMultilinearCurryLeftEquiv ð (fun _ : Fin (n + 1) => E) F :
(E âL[ð] (E [Ãn]âL[ð] F)) â (E [Ãn.succ]âL[ð] F)) â
fderivWithin ð (iteratedFDerivWithin ð n f s) s :=
rfl
theorem fderivWithin_iteratedFDerivWithin {s : Set E} {n : â} :
fderivWithin ð (iteratedFDerivWithin ð n f s) s =
(continuousMultilinearCurryLeftEquiv ð (fun _ : Fin (n + 1) => E) F).symm â
iteratedFDerivWithin ð (n + 1) f s := by
rw [iteratedFDerivWithin_succ_eq_comp_left]
ext1 x
simp only [Function.comp_apply, LinearIsometryEquiv.symm_apply_apply]
theorem norm_fderivWithin_iteratedFDerivWithin {n : â} :
âfderivWithin ð (iteratedFDerivWithin ð n f s) s xâ =
âiteratedFDerivWithin ð (n + 1) f s xâ := by
-- Porting note: added `comp_apply`.
rw [iteratedFDerivWithin_succ_eq_comp_left, comp_apply, LinearIsometryEquiv.norm_map]
theorem iteratedFDerivWithin_succ_apply_right {n : â} (hs : UniqueDiffOn ð s) (hx : x â s)
(m : Fin (n + 1) â E) :
(iteratedFDerivWithin ð (n + 1) f s x : (Fin (n + 1) â E) â F) m =
iteratedFDerivWithin ð n (fun y => fderivWithin ð f s y) s x (init m) (m (last n)) := by
induction' n with n IH generalizing x
· rw [iteratedFDerivWithin_succ_eq_comp_left, iteratedFDerivWithin_zero_eq_comp,
iteratedFDerivWithin_zero_apply, Function.comp_apply,
LinearIsometryEquiv.comp_fderivWithin _ (hs x hx)]
rfl
· let I := continuousMultilinearCurryRightEquiv' ð n E F
have A : â y â s, iteratedFDerivWithin ð n.succ f s y =
(I â iteratedFDerivWithin ð n (fun y => fderivWithin ð f s y) s) y := fun y hy ⊠by
ext m
rw [@IH y hy m]
rfl
calc
(iteratedFDerivWithin ð (n + 2) f s x : (Fin (n + 2) â E) â F) m =
(fderivWithin ð (iteratedFDerivWithin ð n.succ f s) s x : E â E[Ãn + 1]âL[ð] F) (m 0)
(tail m) :=
rfl
_ = (fderivWithin ð (I â iteratedFDerivWithin ð n (fderivWithin ð f s) s) s x :
E â E[Ãn + 1]âL[ð] F) (m 0) (tail m) := by
rw [fderivWithin_congr A (A x hx)]
_ = (I â fderivWithin ð (iteratedFDerivWithin ð n (fderivWithin ð f s) s) s x :
E â E[Ãn + 1]âL[ð] F) (m 0) (tail m) := by
#adaptation_note
/--
After https://github.com/leanprover/lean4/pull/4119 we need to either use
`set_option maxSynthPendingDepth 2 in`
or fill in an explicit argument as
```
simp only [LinearIsometryEquiv.comp_fderivWithin _
(f := iteratedFDerivWithin ð n (fderivWithin ð f s) s) (hs x hx)]
```
-/
set_option maxSynthPendingDepth 2 in
simp only [LinearIsometryEquiv.comp_fderivWithin _ (hs x hx)]
rfl
_ = (fderivWithin ð (iteratedFDerivWithin ð n (fun y => fderivWithin ð f s y) s) s x :
E â E[Ãn]âL[ð] E âL[ð] F) (m 0) (init (tail m)) ((tail m) (last n)) := rfl
_ = iteratedFDerivWithin ð (Nat.succ n) (fun y => fderivWithin ð f s y) s x (init m)
(m (last (n + 1))) := by
rw [iteratedFDerivWithin_succ_apply_left, tail_init_eq_init_tail]
rfl
/-- Writing explicitly the `n+1`-th derivative as the composition of a currying linear equiv,
and the `n`-th derivative of the derivative. -/
theorem iteratedFDerivWithin_succ_eq_comp_right {n : â} (hs : UniqueDiffOn ð s) (hx : x â s) :
iteratedFDerivWithin ð (n + 1) f s x =
(continuousMultilinearCurryRightEquiv' ð n E F â
iteratedFDerivWithin ð n (fun y => fderivWithin ð f s y) s)
x := by
ext m; rw [iteratedFDerivWithin_succ_apply_right hs hx]; rfl
theorem norm_iteratedFDerivWithin_fderivWithin {n : â} (hs : UniqueDiffOn ð s) (hx : x â s) :
âiteratedFDerivWithin ð n (fderivWithin ð f s) s xâ =
âiteratedFDerivWithin ð (n + 1) f s xâ := by
-- Porting note: added `comp_apply`.
rw [iteratedFDerivWithin_succ_eq_comp_right hs hx, comp_apply, LinearIsometryEquiv.norm_map]
@[simp]
theorem iteratedFDerivWithin_one_apply (h : UniqueDiffWithinAt ð s x) (m : Fin 1 â E) :
iteratedFDerivWithin ð 1 f s x m = fderivWithin ð f s x (m 0) := by
simp only [iteratedFDerivWithin_succ_apply_left, iteratedFDerivWithin_zero_eq_comp,
(continuousMultilinearCurryFin0 ð E F).symm.comp_fderivWithin h]
rfl
/-- On a set of unique differentiability, the second derivative is obtained by taking the
derivative of the derivative. -/
lemma iteratedFDerivWithin_two_apply (f : E â F) {z : E} (hs : UniqueDiffOn ð s) (hz : z â s)
(m : Fin 2 â E) :
iteratedFDerivWithin ð 2 f s z m = fderivWithin ð (fderivWithin ð f s) s z (m 0) (m 1) := by
simp only [iteratedFDerivWithin_succ_apply_right hs hz]
rfl
theorem Filter.EventuallyEq.iteratedFDerivWithin' (h : fâ =á¶ [ð[s] x] f) (ht : t â s) (n : â) :
iteratedFDerivWithin ð n fâ t =á¶ [ð[s] x] iteratedFDerivWithin ð n f t := by
induction' n with n ihn
· exact h.mono fun y hy => DFunLike.ext _ _ fun _ => hy
· have : fderivWithin ð _ t =á¶ [ð[s] x] fderivWithin ð _ t := ihn.fderivWithin' ht
apply this.mono
intro y hy
simp only [iteratedFDerivWithin_succ_eq_comp_left, hy, (· â ·)]
protected theorem Filter.EventuallyEq.iteratedFDerivWithin (h : fâ =á¶ [ð[s] x] f) (n : â) :
iteratedFDerivWithin ð n fâ s =á¶ [ð[s] x] iteratedFDerivWithin ð n f s :=
h.iteratedFDerivWithin' Subset.rfl n
/-- If two functions coincide in a neighborhood of `x` within a set `s` and at `x`, then their
iterated differentials within this set at `x` coincide. -/
theorem Filter.EventuallyEq.iteratedFDerivWithin_eq (h : fâ =á¶ [ð[s] x] f) (hx : fâ x = f x)
(n : â) : iteratedFDerivWithin ð n fâ s x = iteratedFDerivWithin ð n f s x :=
have : fâ =á¶ [ð[insert x s] x] f := by simpa [EventuallyEq, hx]
(this.iteratedFDerivWithin' (subset_insert _ _) n).self_of_nhdsWithin (mem_insert _ _)
/-- If two functions coincide on a set `s`, then their iterated differentials within this set
coincide. See also `Filter.EventuallyEq.iteratedFDerivWithin_eq` and
`Filter.EventuallyEq.iteratedFDerivWithin`. -/
theorem iteratedFDerivWithin_congr (hs : EqOn fâ f s) (hx : x â s) (n : â) :
iteratedFDerivWithin ð n fâ s x = iteratedFDerivWithin ð n f s x :=
(hs.eventuallyEq.filter_mono inf_le_right).iteratedFDerivWithin_eq (hs hx) _
/-- If two functions coincide on a set `s`, then their iterated differentials within this set
coincide. See also `Filter.EventuallyEq.iteratedFDerivWithin_eq` and
`Filter.EventuallyEq.iteratedFDerivWithin`. -/
protected theorem Set.EqOn.iteratedFDerivWithin (hs : EqOn fâ f s) (n : â) :
EqOn (iteratedFDerivWithin ð n fâ s) (iteratedFDerivWithin ð n f s) s := fun _x hx =>
iteratedFDerivWithin_congr hs hx n
theorem iteratedFDerivWithin_eventually_congr_set' (y : E) (h : s =á¶ [ð[{y}á¶] x] t) (n : â) :
iteratedFDerivWithin ð n f s =á¶ [ð x] iteratedFDerivWithin ð n f t := by
induction' n with n ihn generalizing x
· rfl
· refine (eventually_nhds_nhdsWithin.2 h).mono fun y hy => ?_
simp only [iteratedFDerivWithin_succ_eq_comp_left, (· â ·)]
rw [(ihn hy).fderivWithin_eq_nhds, fderivWithin_congr_set' _ hy]
theorem iteratedFDerivWithin_eventually_congr_set (h : s =á¶ [ð x] t) (n : â) :
iteratedFDerivWithin ð n f s =á¶ [ð x] iteratedFDerivWithin ð n f t :=
iteratedFDerivWithin_eventually_congr_set' x (h.filter_mono inf_le_left) n
theorem iteratedFDerivWithin_congr_set (h : s =á¶ [ð x] t) (n : â) :
iteratedFDerivWithin ð n f s x = iteratedFDerivWithin ð n f t x :=
(iteratedFDerivWithin_eventually_congr_set h n).self_of_nhds
/-- The iterated differential within a set `s` at a point `x` is not modified if one intersects
`s` with a neighborhood of `x` within `s`. -/
theorem iteratedFDerivWithin_inter' {n : â} (hu : u â ð[s] x) :
iteratedFDerivWithin ð n f (s â© u) x = iteratedFDerivWithin ð n f s x :=
iteratedFDerivWithin_congr_set (nhdsWithin_eq_iff_eventuallyEq.1 <| nhdsWithin_inter_of_mem' hu) _
/-- The iterated differential within a set `s` at a point `x` is not modified if one intersects
`s` with a neighborhood of `x`. -/
theorem iteratedFDerivWithin_inter {n : â} (hu : u â ð x) :
iteratedFDerivWithin ð n f (s â© u) x = iteratedFDerivWithin ð n f s x :=
iteratedFDerivWithin_inter' (mem_nhdsWithin_of_mem_nhds hu)
/-- The iterated differential within a set `s` at a point `x` is not modified if one intersects
`s` with an open set containing `x`. -/
theorem iteratedFDerivWithin_inter_open {n : â} (hu : IsOpen u) (hx : x â u) :
iteratedFDerivWithin ð n f (s â© u) x = iteratedFDerivWithin ð n f s x :=
iteratedFDerivWithin_inter (hu.mem_nhds hx)
@[simp]
theorem contDiffOn_zero : ContDiffOn ð 0 f s â ContinuousOn f s := by
refine âšfun H => H.continuousOn, fun H => ?_â©
intro x hx m hm
have : (m : ââ) = 0 := le_antisymm hm bot_le
rw [this]
refine âšinsert x s, self_mem_nhdsWithin, ftaylorSeriesWithin ð f s, ?_â©
rw [hasFTaylorSeriesUpToOn_zero_iff]
exact âšby rwa [insert_eq_of_mem hx], fun x _ => by simp [ftaylorSeriesWithin]â©
theorem contDiffWithinAt_zero (hx : x â s) :
ContDiffWithinAt ð 0 f s x â â u â ð[s] x, ContinuousOn f (s â© u) := by
constructor
· intro h
obtain âšu, H, p, hpâ© := h 0 le_rfl
refine âšu, ?_, ?_â©
· simpa [hx] using H
· simp only [Nat.cast_zero, hasFTaylorSeriesUpToOn_zero_iff] at hp
exact hp.1.mono inter_subset_right
· rintro âšu, H, huâ©
rw [â contDiffWithinAt_inter' H]
have h' : x â s â© u := âšhx, mem_of_mem_nhdsWithin hx Hâ©
exact (contDiffOn_zero.mpr hu).contDiffWithinAt h'
/-- On a set with unique differentiability, any choice of iterated differential has to coincide
with the one we have chosen in `iteratedFDerivWithin ð m f s`. -/
theorem HasFTaylorSeriesUpToOn.eq_iteratedFDerivWithin_of_uniqueDiffOn
(h : HasFTaylorSeriesUpToOn n f p s) {m : â} (hmn : (m : ââ) †n) (hs : UniqueDiffOn ð s)
(hx : x â s) : p x m = iteratedFDerivWithin ð m f s x := by
induction' m with m IH generalizing x
· rw [h.zero_eq' hx, iteratedFDerivWithin_zero_eq_comp]; rfl
· have A : (m : ââ) < n := lt_of_lt_of_le (WithTop.coe_lt_coe.2 (lt_add_one m)) hmn
have :
HasFDerivWithinAt (fun y : E => iteratedFDerivWithin ð m f s y)
(ContinuousMultilinearMap.curryLeft (p x (Nat.succ m))) s x :=
(h.fderivWithin m A x hx).congr (fun y hy => (IH (le_of_lt A) hy).symm)
(IH (le_of_lt A) hx).symm
rw [iteratedFDerivWithin_succ_eq_comp_left, Function.comp_apply, this.fderivWithin (hs x hx)]
exact (ContinuousMultilinearMap.uncurry_curryLeft _).symm
@[deprecated (since := "2024-03-28")]
alias HasFTaylorSeriesUpToOn.eq_ftaylor_series_of_uniqueDiffOn :=
HasFTaylorSeriesUpToOn.eq_iteratedFDerivWithin_of_uniqueDiffOn
/-- When a function is `C^n` in a set `s` of unique differentiability, it admits
`ftaylorSeriesWithin ð f s` as a Taylor series up to order `n` in `s`. -/
protected theorem ContDiffOn.ftaylorSeriesWithin (h : ContDiffOn ð n f s) (hs : UniqueDiffOn ð s) :
HasFTaylorSeriesUpToOn n f (ftaylorSeriesWithin ð f s) s := by
constructor
· intro x _
simp only [ftaylorSeriesWithin, ContinuousMultilinearMap.uncurry0_apply,
iteratedFDerivWithin_zero_apply]
· intro m hm x hx
rcases (h x hx) m.succ (ENat.add_one_le_of_lt hm) with âšu, hu, p, Hpâ©
rw [insert_eq_of_mem hx] at hu
rcases mem_nhdsWithin.1 hu with âšo, o_open, xo, hoâ©
rw [inter_comm] at ho
have : p x m.succ = ftaylorSeriesWithin ð f s x m.succ := by
change p x m.succ = iteratedFDerivWithin ð m.succ f s x
rw [â iteratedFDerivWithin_inter_open o_open xo]
exact (Hp.mono ho).eq_iteratedFDerivWithin_of_uniqueDiffOn le_rfl (hs.inter o_open) âšhx, xoâ©
rw [â this, â hasFDerivWithinAt_inter (IsOpen.mem_nhds o_open xo)]
have A : â y â s â© o, p y m = ftaylorSeriesWithin ð f s y m := by
rintro y âšhy, yoâ©
change p y m = iteratedFDerivWithin ð m f s y
rw [â iteratedFDerivWithin_inter_open o_open yo]
exact
(Hp.mono ho).eq_iteratedFDerivWithin_of_uniqueDiffOn (WithTop.coe_le_coe.2 (Nat.le_succ m))
(hs.inter o_open) âšhy, yoâ©
exact
((Hp.mono ho).fderivWithin m (WithTop.coe_lt_coe.2 (lt_add_one m)) x âšhx, xoâ©).congr
(fun y hy => (A y hy).symm) (A x âšhx, xoâ©).symm
· intro m hm
apply continuousOn_of_locally_continuousOn
intro x hx
rcases h x hx m hm with âšu, hu, p, Hpâ©
rcases mem_nhdsWithin.1 hu with âšo, o_open, xo, hoâ©
rw [insert_eq_of_mem hx] at ho
rw [inter_comm] at ho
refine âšo, o_open, xo, ?_â©
have A : â y â s â© o, p y m = ftaylorSeriesWithin ð f s y m := by
rintro y âšhy, yoâ©
change p y m = iteratedFDerivWithin ð m f s y
rw [â iteratedFDerivWithin_inter_open o_open yo]
exact (Hp.mono ho).eq_iteratedFDerivWithin_of_uniqueDiffOn le_rfl (hs.inter o_open) âšhy, yoâ©
exact ((Hp.mono ho).cont m le_rfl).congr fun y hy => (A y hy).symm
theorem contDiffOn_of_continuousOn_differentiableOn
(Hcont : â m : â, (m : ââ) †n â ContinuousOn (fun x => iteratedFDerivWithin ð m f s x) s)
(Hdiff : â m : â, (m : ââ) < n â
DifferentiableOn ð (fun x => iteratedFDerivWithin ð m f s x) s) :
ContDiffOn ð n f s := by
intro x hx m hm
rw [insert_eq_of_mem hx]
refine âšs, self_mem_nhdsWithin, ftaylorSeriesWithin ð f s, ?_â©
constructor
· intro y _
simp only [ftaylorSeriesWithin, ContinuousMultilinearMap.uncurry0_apply,
iteratedFDerivWithin_zero_apply]
· intro k hk y hy
convert (Hdiff k (lt_of_lt_of_le hk hm) y hy).hasFDerivWithinAt
· intro k hk
exact Hcont k (le_trans hk hm)
theorem contDiffOn_of_differentiableOn
(h : â m : â, (m : ââ) †n â DifferentiableOn ð (iteratedFDerivWithin ð m f s) s) :
ContDiffOn ð n f s :=
contDiffOn_of_continuousOn_differentiableOn (fun m hm => (h m hm).continuousOn) fun m hm =>
h m (le_of_lt hm)
theorem ContDiffOn.continuousOn_iteratedFDerivWithin {m : â} (h : ContDiffOn ð n f s)
(hmn : (m : ââ) †n) (hs : UniqueDiffOn ð s) : ContinuousOn (iteratedFDerivWithin ð m f s) s :=
(h.ftaylorSeriesWithin hs).cont m hmn
theorem ContDiffOn.differentiableOn_iteratedFDerivWithin {m : â} (h : ContDiffOn ð n f s)
(hmn : (m : ââ) < n) (hs : UniqueDiffOn ð s) :
DifferentiableOn ð (iteratedFDerivWithin ð m f s) s := fun x hx =>
((h.ftaylorSeriesWithin hs).fderivWithin m hmn x hx).differentiableWithinAt
theorem ContDiffWithinAt.differentiableWithinAt_iteratedFDerivWithin {m : â}
(h : ContDiffWithinAt ð n f s x) (hmn : (m : ââ) < n) (hs : UniqueDiffOn ð (insert x s)) :
DifferentiableWithinAt ð (iteratedFDerivWithin ð m f s) s x := by
rcases h.contDiffOn' (ENat.add_one_le_of_lt hmn) with âšu, uo, xu, huâ©
set t := insert x s â© u
have A : t =á¶ [ð[â ] x] s := by
simp only [set_eventuallyEq_iff_inf_principal, â nhdsWithin_inter']
rw [â inter_assoc, nhdsWithin_inter_of_mem', â diff_eq_compl_inter, insert_diff_of_mem,
diff_eq_compl_inter]
exacts [rfl, mem_nhdsWithin_of_mem_nhds (uo.mem_nhds xu)]
have B : iteratedFDerivWithin ð m f s =á¶ [ð x] iteratedFDerivWithin ð m f t :=
iteratedFDerivWithin_eventually_congr_set' _ A.symm _
have C : DifferentiableWithinAt ð (iteratedFDerivWithin ð m f t) t x :=
hu.differentiableOn_iteratedFDerivWithin (Nat.cast_lt.2 m.lt_succ_self) (hs.inter uo) x
âšmem_insert _ _, xuâ©
rw [differentiableWithinAt_congr_set' _ A] at C
exact C.congr_of_eventuallyEq (B.filter_mono inf_le_left) B.self_of_nhds
theorem contDiffOn_iff_continuousOn_differentiableOn (hs : UniqueDiffOn ð s) :
ContDiffOn ð n f s â
(â m : â, (m : ââ) †n â ContinuousOn (fun x => iteratedFDerivWithin ð m f s x) s) â§
â m : â, (m : ââ) < n â DifferentiableOn ð (fun x => iteratedFDerivWithin ð m f s x) s :=
âšfun h => âšfun _m hm => h.continuousOn_iteratedFDerivWithin hm hs, fun _m hm =>
h.differentiableOn_iteratedFDerivWithin hm hsâ©,
fun h => contDiffOn_of_continuousOn_differentiableOn h.1 h.2â©
theorem contDiffOn_succ_of_fderivWithin {n : â} (hf : DifferentiableOn ð f s)
(h : ContDiffOn ð n (fun y => fderivWithin ð f s y) s) : ContDiffOn ð (n + 1 : â) f s := by
intro x hx
rw [contDiffWithinAt_succ_iff_hasFDerivWithinAt, insert_eq_of_mem hx]
exact
âšs, self_mem_nhdsWithin, fderivWithin ð f s, fun y hy => (hf y hy).hasFDerivWithinAt, h x hxâ©
/-- A function is `C^(n + 1)` on a domain with unique derivatives if and only if it is
differentiable there, and its derivative (expressed with `fderivWithin`) is `C^n`. -/
theorem contDiffOn_succ_iff_fderivWithin {n : â} (hs : UniqueDiffOn ð s) :
ContDiffOn ð (n + 1 : â) f s â
DifferentiableOn ð f s â§ ContDiffOn ð n (fun y => fderivWithin ð f s y) s := by
refine âšfun H => ?_, fun h => contDiffOn_succ_of_fderivWithin h.1 h.2â©
refine âšH.differentiableOn (WithTop.coe_le_coe.2 (Nat.le_add_left 1 n)), fun x hx => ?_â©
rcases contDiffWithinAt_succ_iff_hasFDerivWithinAt.1 (H x hx) with âšu, hu, f', hff', hf'â©
rcases mem_nhdsWithin.1 hu with âšo, o_open, xo, hoâ©
rw [inter_comm, insert_eq_of_mem hx] at ho
have := hf'.mono ho
rw [contDiffWithinAt_inter' (mem_nhdsWithin_of_mem_nhds (IsOpen.mem_nhds o_open xo))] at this
apply this.congr_of_eventually_eq' _ hx
have : o â© s â ð[s] x := mem_nhdsWithin.2 âšo, o_open, xo, Subset.refl _â©
rw [inter_comm] at this
refine Filter.eventuallyEq_of_mem this fun y hy => ?_
have A : fderivWithin ð f (s â© o) y = f' y :=
((hff' y (ho hy)).mono ho).fderivWithin (hs.inter o_open y hy)
rwa [fderivWithin_inter (o_open.mem_nhds hy.2)] at A
theorem contDiffOn_succ_iff_hasFDerivWithin {n : â} (hs : UniqueDiffOn ð s) :
ContDiffOn ð (n + 1 : â) f s â
â f' : E â E âL[ð] F, ContDiffOn ð n f' s â§ â x, x â s â HasFDerivWithinAt f (f' x) s x := by
rw [contDiffOn_succ_iff_fderivWithin hs]
refine âšfun h => âšfderivWithin ð f s, h.2, fun x hx => (h.1 x hx).hasFDerivWithinAtâ©, fun h => ?_â©
rcases h with âšf', h1, h2â©
refine âšfun x hx => (h2 x hx).differentiableWithinAt, fun x hx => ?_â©
exact (h1 x hx).congr' (fun y hy => (h2 y hy).fderivWithin (hs y hy)) hx
/-- A function is `C^(n + 1)` on an open domain if and only if it is
differentiable there, and its derivative (expressed with `fderiv`) is `C^n`. -/
theorem contDiffOn_succ_iff_fderiv_of_isOpen {n : â} (hs : IsOpen s) :
ContDiffOn ð (n + 1 : â) f s â
DifferentiableOn ð f s â§ ContDiffOn ð n (fun y => fderiv ð f y) s := by
rw [contDiffOn_succ_iff_fderivWithin hs.uniqueDiffOn]
exact Iff.rfl.and (contDiffOn_congr fun x hx ⊠fderivWithin_of_isOpen hs hx)
/-- A function is `C^â` on a domain with unique derivatives if and only if it is differentiable
there, and its derivative (expressed with `fderivWithin`) is `C^â`. -/
theorem contDiffOn_top_iff_fderivWithin (hs : UniqueDiffOn ð s) :
ContDiffOn ð â f s â
DifferentiableOn ð f s â§ ContDiffOn ð â (fun y => fderivWithin ð f s y) s := by
constructor
· intro h
refine âšh.differentiableOn le_top, ?_â©
refine contDiffOn_top.2 fun n => ((contDiffOn_succ_iff_fderivWithin hs).1 ?_).2
exact h.of_le le_top
· intro h
refine contDiffOn_top.2 fun n => ?_
have A : (n : ââ) †â := le_top
apply ((contDiffOn_succ_iff_fderivWithin hs).2 âšh.1, h.2.of_le Aâ©).of_le
exact WithTop.coe_le_coe.2 (Nat.le_succ n)
/-- A function is `C^â` on an open domain if and only if it is differentiable there, and its
derivative (expressed with `fderiv`) is `C^â`. -/
theorem contDiffOn_top_iff_fderiv_of_isOpen (hs : IsOpen s) :
ContDiffOn ð â f s â DifferentiableOn ð f s â§ ContDiffOn ð â (fun y => fderiv ð f y) s := by
rw [contDiffOn_top_iff_fderivWithin hs.uniqueDiffOn]
exact Iff.rfl.and <| contDiffOn_congr fun x hx ⊠fderivWithin_of_isOpen hs hx
protected theorem ContDiffOn.fderivWithin (hf : ContDiffOn ð n f s) (hs : UniqueDiffOn ð s)
(hmn : m + 1 †n) : ContDiffOn ð m (fun y => fderivWithin ð f s y) s := by
cases' m with m
· change â + 1 †n at hmn
have : n = â := by simpa using hmn
rw [this] at hf
exact ((contDiffOn_top_iff_fderivWithin hs).1 hf).2
· change (m.succ : ââ) †n at hmn
exact ((contDiffOn_succ_iff_fderivWithin hs).1 (hf.of_le hmn)).2
theorem ContDiffOn.fderiv_of_isOpen (hf : ContDiffOn ð n f s) (hs : IsOpen s) (hmn : m + 1 †n) :
ContDiffOn ð m (fun y => fderiv ð f y) s :=
(hf.fderivWithin hs.uniqueDiffOn hmn).congr fun _ hx => (fderivWithin_of_isOpen hs hx).symm
theorem ContDiffOn.continuousOn_fderivWithin (h : ContDiffOn ð n f s) (hs : UniqueDiffOn ð s)
(hn : 1 †n) : ContinuousOn (fun x => fderivWithin ð f s x) s :=
((contDiffOn_succ_iff_fderivWithin hs).1 (h.of_le hn)).2.continuousOn
theorem ContDiffOn.continuousOn_fderiv_of_isOpen (h : ContDiffOn ð n f s) (hs : IsOpen s)
(hn : 1 †n) : ContinuousOn (fun x => fderiv ð f x) s :=
((contDiffOn_succ_iff_fderiv_of_isOpen hs).1 (h.of_le hn)).2.continuousOn
/-! ### Functions with a Taylor series on the whole space -/
/-- `HasFTaylorSeriesUpTo n f p` registers the fact that `p 0 = f` and `p (m+1)` is a
derivative of `p m` for `m < n`, and is continuous for `m †n`. This is a predicate analogous to
`HasFDerivAt` but for higher order derivatives.
Notice that `p` does not sum up to `f` on the diagonal (`FormalMultilinearSeries.sum`), even if
`f` is analytic and `n = â`: an addition `1/m!` factor on the `m`th term is necessary for that. -/
structure HasFTaylorSeriesUpTo (n : ââ) (f : E â F) (p : E â FormalMultilinearSeries ð E F) :
Prop where
zero_eq : â x, (p x 0).uncurry0 = f x
fderiv : â m : â, (m : ââ) < n â â x, HasFDerivAt (fun y => p y m) (p x m.succ).curryLeft x
cont : â m : â, (m : ââ) †n â Continuous fun x => p x m
theorem HasFTaylorSeriesUpTo.zero_eq' (h : HasFTaylorSeriesUpTo n f p) (x : E) :
p x 0 = (continuousMultilinearCurryFin0 ð E F).symm (f x) := by
rw [â h.zero_eq x]
exact (p x 0).uncurry0_curry0.symm
theorem hasFTaylorSeriesUpToOn_univ_iff :
HasFTaylorSeriesUpToOn n f p univ â HasFTaylorSeriesUpTo n f p := by
constructor
· intro H
constructor
· exact fun x => H.zero_eq x (mem_univ x)
· intro m hm x
rw [â hasFDerivWithinAt_univ]
exact H.fderivWithin m hm x (mem_univ x)
· intro m hm
rw [continuous_iff_continuousOn_univ]
exact H.cont m hm
· intro H
constructor
· exact fun x _ => H.zero_eq x
· intro m hm x _
rw [hasFDerivWithinAt_univ]
exact H.fderiv m hm x
· intro m hm
rw [â continuous_iff_continuousOn_univ]
exact H.cont m hm
theorem HasFTaylorSeriesUpTo.hasFTaylorSeriesUpToOn (h : HasFTaylorSeriesUpTo n f p) (s : Set E) :
HasFTaylorSeriesUpToOn n f p s :=
(hasFTaylorSeriesUpToOn_univ_iff.2 h).mono (subset_univ _)
theorem HasFTaylorSeriesUpTo.ofLe (h : HasFTaylorSeriesUpTo n f p) (hmn : m †n) :
HasFTaylorSeriesUpTo m f p := by
rw [â hasFTaylorSeriesUpToOn_univ_iff] at h â¢; exact h.of_le hmn
theorem HasFTaylorSeriesUpTo.continuous (h : HasFTaylorSeriesUpTo n f p) : Continuous f := by
rw [â hasFTaylorSeriesUpToOn_univ_iff] at h
rw [continuous_iff_continuousOn_univ]
exact h.continuousOn
theorem hasFTaylorSeriesUpTo_zero_iff :
HasFTaylorSeriesUpTo 0 f p â Continuous f â§ â x, (p x 0).uncurry0 = f x := by
simp [hasFTaylorSeriesUpToOn_univ_iff.symm, continuous_iff_continuousOn_univ,
hasFTaylorSeriesUpToOn_zero_iff]
theorem hasFTaylorSeriesUpTo_top_iff :
HasFTaylorSeriesUpTo â f p â â n : â, HasFTaylorSeriesUpTo n f p := by
simp only [â hasFTaylorSeriesUpToOn_univ_iff, hasFTaylorSeriesUpToOn_top_iff]
/-- In the case that `n = â` we don't need the continuity assumption in
`HasFTaylorSeriesUpTo`. -/
theorem hasFTaylorSeriesUpTo_top_iff' :
HasFTaylorSeriesUpTo â f p â
(â x, (p x 0).uncurry0 = f x) â§
â (m : â) (x), HasFDerivAt (fun y => p y m) (p x m.succ).curryLeft x := by
simp only [â hasFTaylorSeriesUpToOn_univ_iff, hasFTaylorSeriesUpToOn_top_iff', mem_univ,
forall_true_left, hasFDerivWithinAt_univ]
/-- If a function has a Taylor series at order at least `1`, then the term of order `1` of this
series is a derivative of `f`. -/
theorem HasFTaylorSeriesUpTo.hasFDerivAt (h : HasFTaylorSeriesUpTo n f p) (hn : 1 †n) (x : E) :
HasFDerivAt f (continuousMultilinearCurryFin1 ð E F (p x 1)) x := by
rw [â hasFDerivWithinAt_univ]
exact (hasFTaylorSeriesUpToOn_univ_iff.2 h).hasFDerivWithinAt hn (mem_univ _)
theorem HasFTaylorSeriesUpTo.differentiable (h : HasFTaylorSeriesUpTo n f p) (hn : 1 †n) :
Differentiable ð f := fun x => (h.hasFDerivAt hn x).differentiableAt
/-- `p` is a Taylor series of `f` up to `n+1` if and only if `p.shift` is a Taylor series up to `n`
for `p 1`, which is a derivative of `f`. -/
theorem hasFTaylorSeriesUpTo_succ_iff_right {n : â} :
HasFTaylorSeriesUpTo (n + 1 : â) f p â
(â x, (p x 0).uncurry0 = f x) â§
(â x, HasFDerivAt (fun y => p y 0) (p x 1).curryLeft x) â§
HasFTaylorSeriesUpTo n (fun x => continuousMultilinearCurryFin1 ð E F (p x 1)) fun x =>
(p x).shift := by
simp only [hasFTaylorSeriesUpToOn_succ_iff_right, â hasFTaylorSeriesUpToOn_univ_iff, mem_univ,
forall_true_left, hasFDerivWithinAt_univ]
/-! ### Smooth functions at a point -/
variable (ð)
/-- A function is continuously differentiable up to `n` at a point `x` if, for any integer `k †n`,
there is a neighborhood of `x` where `f` admits derivatives up to order `n`, which are continuous.
-/
def ContDiffAt (n : ââ) (f : E â F) (x : E) : Prop :=
ContDiffWithinAt ð n f univ x
variable {ð}
theorem contDiffWithinAt_univ : ContDiffWithinAt ð n f univ x â ContDiffAt ð n f x :=
Iff.rfl
theorem contDiffAt_top : ContDiffAt ð â f x â â n : â, ContDiffAt ð n f x := by
simp [â contDiffWithinAt_univ, contDiffWithinAt_top]
theorem ContDiffAt.contDiffWithinAt (h : ContDiffAt ð n f x) : ContDiffWithinAt ð n f s x :=
h.mono (subset_univ _)
theorem ContDiffWithinAt.contDiffAt (h : ContDiffWithinAt ð n f s x) (hx : s â ð x) :
ContDiffAt ð n f x := by rwa [ContDiffAt, â contDiffWithinAt_inter hx, univ_inter]
theorem ContDiffOn.contDiffAt (h : ContDiffOn ð n f s) (hx : s â ð x) :
ContDiffAt ð n f x :=
(h _ (mem_of_mem_nhds hx)).contDiffAt hx
theorem ContDiffAt.congr_of_eventuallyEq (h : ContDiffAt ð n f x) (hg : fâ =á¶ [ð x] f) :
ContDiffAt ð n fâ x :=
h.congr_of_eventually_eq' (by rwa [nhdsWithin_univ]) (mem_univ x)
theorem ContDiffAt.of_le (h : ContDiffAt ð n f x) (hmn : m †n) : ContDiffAt ð m f x :=
ContDiffWithinAt.of_le h hmn
theorem ContDiffAt.continuousAt (h : ContDiffAt ð n f x) : ContinuousAt f x := by
simpa [continuousWithinAt_univ] using h.continuousWithinAt
/-- If a function is `C^n` with `n ⥠1` at a point, then it is differentiable there. -/
theorem ContDiffAt.differentiableAt (h : ContDiffAt ð n f x) (hn : 1 †n) :
DifferentiableAt ð f x := by
simpa [hn, differentiableWithinAt_univ] using h.differentiableWithinAt
nonrec lemma ContDiffAt.contDiffOn {m : â} (h : ContDiffAt ð n f x) (hm : m †n) :
â u â ð x, ContDiffOn ð m f u := by
simpa [nhdsWithin_univ] using h.contDiffOn hm
/-- A function is `C^(n + 1)` at a point iff locally, it has a derivative which is `C^n`. -/
theorem contDiffAt_succ_iff_hasFDerivAt {n : â} :
ContDiffAt ð (n + 1 : â) f x â
â f' : E â E âL[ð] F, (â u â ð x, â x â u, HasFDerivAt f (f' x) x) â§ ContDiffAt ð n f' x := by
rw [â contDiffWithinAt_univ, contDiffWithinAt_succ_iff_hasFDerivWithinAt]
simp only [nhdsWithin_univ, exists_prop, mem_univ, insert_eq_of_mem]
constructor
· rintro âšu, H, f', h_fderiv, h_cont_diffâ©
rcases mem_nhds_iff.mp H with âšt, htu, ht, hxtâ©
refine âšf', âšt, ?_â©, h_cont_diff.contDiffAt Hâ©
refine âšmem_nhds_iff.mpr âšt, Subset.rfl, ht, hxtâ©, ?_â©
intro y hyt
refine (h_fderiv y (htu hyt)).hasFDerivAt ?_
exact mem_nhds_iff.mpr âšt, htu, ht, hytâ©
· rintro âšf', âšu, H, h_fderivâ©, h_cont_diffâ©
refine âšu, H, f', ?_, h_cont_diff.contDiffWithinAtâ©
intro x hxu
exact (h_fderiv x hxu).hasFDerivWithinAt
protected theorem ContDiffAt.eventually {n : â} (h : ContDiffAt ð n f x) :
âá¶ y in ð x, ContDiffAt ð n f y := by
simpa [nhdsWithin_univ] using ContDiffWithinAt.eventually h
/-! ### Smooth functions -/
variable (ð)
/-- A function is continuously differentiable up to `n` if it admits derivatives up to
order `n`, which are continuous. Contrary to the case of definitions in domains (where derivatives
might not be unique) we do not need to localize the definition in space or time.
-/
def ContDiff (n : ââ) (f : E â F) : Prop :=
â p : E â FormalMultilinearSeries ð E F, HasFTaylorSeriesUpTo n f p
variable {ð}
/-- If `f` has a Taylor series up to `n`, then it is `C^n`. -/
theorem HasFTaylorSeriesUpTo.contDiff {f' : E â FormalMultilinearSeries ð E F}
(hf : HasFTaylorSeriesUpTo n f f') : ContDiff ð n f :=
âšf', hfâ©
theorem contDiffOn_univ : ContDiffOn ð n f univ â ContDiff ð n f := by
constructor
· intro H
use ftaylorSeriesWithin ð f univ
rw [â hasFTaylorSeriesUpToOn_univ_iff]
exact H.ftaylorSeriesWithin uniqueDiffOn_univ
· rintro âšp, hpâ© x _ m hm
exact âšuniv, Filter.univ_sets _, p, (hp.hasFTaylorSeriesUpToOn univ).of_le hmâ©
theorem contDiff_iff_contDiffAt : ContDiff ð n f â â x, ContDiffAt ð n f x := by
simp [â contDiffOn_univ, ContDiffOn, ContDiffAt]
theorem ContDiff.contDiffAt (h : ContDiff ð n f) : ContDiffAt ð n f x :=
contDiff_iff_contDiffAt.1 h x
theorem ContDiff.contDiffWithinAt (h : ContDiff ð n f) : ContDiffWithinAt ð n f s x :=
h.contDiffAt.contDiffWithinAt
theorem contDiff_top : ContDiff ð â f â â n : â, ContDiff ð n f := by
simp [contDiffOn_univ.symm, contDiffOn_top]
theorem contDiff_all_iff_nat : (â n, ContDiff ð n f) â â n : â, ContDiff ð n f := by
simp only [â contDiffOn_univ, contDiffOn_all_iff_nat]
theorem ContDiff.contDiffOn (h : ContDiff ð n f) : ContDiffOn ð n f s :=
(contDiffOn_univ.2 h).mono (subset_univ _)
@[simp]
theorem contDiff_zero : ContDiff ð 0 f â Continuous f := by
rw [â contDiffOn_univ, continuous_iff_continuousOn_univ]
exact contDiffOn_zero
theorem contDiffAt_zero : ContDiffAt ð 0 f x â â u â ð x, ContinuousOn f u := by
rw [â contDiffWithinAt_univ]; simp [contDiffWithinAt_zero, nhdsWithin_univ]
theorem contDiffAt_one_iff :
ContDiffAt ð 1 f x â
â f' : E â E âL[ð] F, â u â ð x, ContinuousOn f' u â§ â x â u, HasFDerivAt f (f' x) x := by
simp_rw [show (1 : ââ) = (0 + 1 : â) from (zero_add 1).symm, contDiffAt_succ_iff_hasFDerivAt,
show ((0 : â) : ââ) = 0 from rfl, contDiffAt_zero,
exists_mem_and_iff antitone_bforall antitone_continuousOn, and_comm]
theorem ContDiff.of_le (h : ContDiff ð n f) (hmn : m †n) : ContDiff ð m f :=
contDiffOn_univ.1 <| (contDiffOn_univ.2 h).of_le hmn
theorem ContDiff.of_succ {n : â} (h : ContDiff ð (n + 1) f) : ContDiff ð n f :=
h.of_le <| WithTop.coe_le_coe.mpr le_self_add
theorem ContDiff.one_of_succ {n : â} (h : ContDiff ð (n + 1) f) : ContDiff ð 1 f :=
h.of_le <| WithTop.coe_le_coe.mpr le_add_self
theorem ContDiff.continuous (h : ContDiff ð n f) : Continuous f :=
contDiff_zero.1 (h.of_le bot_le)
/-- If a function is `C^n` with `n ⥠1`, then it is differentiable. -/
theorem ContDiff.differentiable (h : ContDiff ð n f) (hn : 1 †n) : Differentiable ð f :=
differentiableOn_univ.1 <| (contDiffOn_univ.2 h).differentiableOn hn
theorem contDiff_iff_forall_nat_le : ContDiff ð n f â â m : â, âm †n â ContDiff ð m f := by
simp_rw [â contDiffOn_univ]; exact contDiffOn_iff_forall_nat_le
/-- A function is `C^(n+1)` iff it has a `C^n` derivative. -/
theorem contDiff_succ_iff_hasFDerivAt {n : â} :
ContDiff ð (n + 1 : â) f â
â f' : E â E âL[ð] F, ContDiff ð n f' â§ â x, HasFDerivAt f (f' x) x := by
simp only [â contDiffOn_univ, â hasFDerivWithinAt_univ,
contDiffOn_succ_iff_hasFDerivWithin uniqueDiffOn_univ, Set.mem_univ, forall_true_left]
theorem contDiff_one_iff_hasFDerivAt : ContDiff ð 1 f â
â f' : E â E âL[ð] F, Continuous f' â§ â x, HasFDerivAt f (f' x) x := by
convert contDiff_succ_iff_hasFDerivAt using 4; simp
/-! ### Iterated derivative -/
variable (ð)
/-- The `n`-th derivative of a function, as a multilinear map, defined inductively. -/
noncomputable def iteratedFDeriv (n : â) (f : E â F) : E â E[Ãn]âL[ð] F :=
Nat.recOn n (fun x => ContinuousMultilinearMap.curry0 ð E (f x)) fun _ rec x =>
ContinuousLinearMap.uncurryLeft (fderiv ð rec x)
/-- Formal Taylor series associated to a function. -/
def ftaylorSeries (f : E â F) (x : E) : FormalMultilinearSeries ð E F := fun n =>
iteratedFDeriv ð n f x
variable {ð}
@[simp]
theorem iteratedFDeriv_zero_apply (m : Fin 0 â E) :
(iteratedFDeriv ð 0 f x : (Fin 0 â E) â F) m = f x :=
rfl
theorem iteratedFDeriv_zero_eq_comp :
iteratedFDeriv ð 0 f = (continuousMultilinearCurryFin0 ð E F).symm â f :=
rfl
@[simp]
theorem norm_iteratedFDeriv_zero : âiteratedFDeriv ð 0 f xâ = âf xâ := by
-- Porting note: added `comp_apply`.
rw [iteratedFDeriv_zero_eq_comp, comp_apply, LinearIsometryEquiv.norm_map]
theorem iteratedFDerivWithin_zero_eq : iteratedFDerivWithin ð 0 f s = iteratedFDeriv ð 0 f := rfl
theorem iteratedFDeriv_succ_apply_left {n : â} (m : Fin (n + 1) â E) :
(iteratedFDeriv ð (n + 1) f x : (Fin (n + 1) â E) â F) m =
(fderiv ð (iteratedFDeriv ð n f) x : E â E[Ãn]âL[ð] F) (m 0) (tail m) :=
rfl
/-- Writing explicitly the `n+1`-th derivative as the composition of a currying linear equiv,
and the derivative of the `n`-th derivative. -/
theorem iteratedFDeriv_succ_eq_comp_left {n : â} :
iteratedFDeriv ð (n + 1) f =
continuousMultilinearCurryLeftEquiv ð (fun _ : Fin (n + 1) => E) F â
fderiv ð (iteratedFDeriv ð n f) :=
rfl
/-- Writing explicitly the derivative of the `n`-th derivative as the composition of a currying
linear equiv, and the `n + 1`-th derivative. -/
theorem fderiv_iteratedFDeriv {n : â} :
fderiv ð (iteratedFDeriv ð n f) =
(continuousMultilinearCurryLeftEquiv ð (fun _ : Fin (n + 1) => E) F).symm â
iteratedFDeriv ð (n + 1) f := by
rw [iteratedFDeriv_succ_eq_comp_left]
ext1 x
simp only [Function.comp_apply, LinearIsometryEquiv.symm_apply_apply]
theorem tsupport_iteratedFDeriv_subset (n : â) : tsupport (iteratedFDeriv ð n f) â tsupport f := by
induction' n with n IH
· rw [iteratedFDeriv_zero_eq_comp]
exact closure_minimal ((support_comp_subset (LinearIsometryEquiv.map_zero _) _).trans
subset_closure) isClosed_closure
· rw [iteratedFDeriv_succ_eq_comp_left]
exact closure_minimal ((support_comp_subset (LinearIsometryEquiv.map_zero _) _).trans
((support_fderiv_subset ð).trans IH)) isClosed_closure
theorem support_iteratedFDeriv_subset (n : â) : support (iteratedFDeriv ð n f) â tsupport f :=
subset_closure.trans (tsupport_iteratedFDeriv_subset n)
theorem HasCompactSupport.iteratedFDeriv (hf : HasCompactSupport f) (n : â) :
HasCompactSupport (iteratedFDeriv ð n f) :=
hf.of_isClosed_subset isClosed_closure (tsupport_iteratedFDeriv_subset n)
theorem norm_fderiv_iteratedFDeriv {n : â} :
âfderiv ð (iteratedFDeriv ð n f) xâ = âiteratedFDeriv ð (n + 1) f xâ := by
-- Porting note: added `comp_apply`.
rw [iteratedFDeriv_succ_eq_comp_left, comp_apply, LinearIsometryEquiv.norm_map]
theorem iteratedFDerivWithin_univ {n : â} :
iteratedFDerivWithin ð n f univ = iteratedFDeriv ð n f := by
induction' n with n IH
· ext x; simp
· ext x m
rw [iteratedFDeriv_succ_apply_left, iteratedFDerivWithin_succ_apply_left, IH, fderivWithin_univ]
theorem HasFTaylorSeriesUpTo.eq_iteratedFDeriv
(h : HasFTaylorSeriesUpTo n f p) {m : â} (hmn : (m : ââ) †n) (x : E) :
p x m = iteratedFDeriv ð m f x := by
rw [â iteratedFDerivWithin_univ]
rw [â hasFTaylorSeriesUpToOn_univ_iff] at h
exact h.eq_iteratedFDerivWithin_of_uniqueDiffOn hmn uniqueDiffOn_univ (mem_univ _)
/-- In an open set, the iterated derivative within this set coincides with the global iterated
derivative. -/
theorem iteratedFDerivWithin_of_isOpen (n : â) (hs : IsOpen s) :
EqOn (iteratedFDerivWithin ð n f s) (iteratedFDeriv ð n f) s := by
induction' n with n IH
· intro x _
ext1
simp only [Nat.zero_eq, iteratedFDerivWithin_zero_apply, iteratedFDeriv_zero_apply]
· intro x hx
rw [iteratedFDeriv_succ_eq_comp_left, iteratedFDerivWithin_succ_eq_comp_left]
dsimp
congr 1
rw [fderivWithin_of_isOpen hs hx]
apply Filter.EventuallyEq.fderiv_eq
filter_upwards [hs.mem_nhds hx]
exact IH
theorem ftaylorSeriesWithin_univ : ftaylorSeriesWithin ð f univ = ftaylorSeries ð f := by
ext1 x; ext1 n
change iteratedFDerivWithin ð n f univ x = iteratedFDeriv ð n f x
rw [iteratedFDerivWithin_univ]
theorem iteratedFDeriv_succ_apply_right {n : â} (m : Fin (n + 1) â E) :
(iteratedFDeriv ð (n + 1) f x : (Fin (n + 1) â E) â F) m =
iteratedFDeriv ð n (fun y => fderiv ð f y) x (init m) (m (last n)) := by
rw [â iteratedFDerivWithin_univ, â iteratedFDerivWithin_univ, â fderivWithin_univ]
exact iteratedFDerivWithin_succ_apply_right uniqueDiffOn_univ (mem_univ _) _
/-- Writing explicitly the `n+1`-th derivative as the composition of a currying linear equiv,
and the `n`-th derivative of the derivative. -/
theorem iteratedFDeriv_succ_eq_comp_right {n : â} :
iteratedFDeriv ð (n + 1) f x =
(continuousMultilinearCurryRightEquiv' ð n E F â iteratedFDeriv ð n fun y => fderiv ð f y)
x := by
ext m; rw [iteratedFDeriv_succ_apply_right]; rfl
theorem norm_iteratedFDeriv_fderiv {n : â} :
âiteratedFDeriv ð n (fderiv ð f) xâ = âiteratedFDeriv ð (n + 1) f xâ := by
-- Porting note: added `comp_apply`.
rw [iteratedFDeriv_succ_eq_comp_right, comp_apply, LinearIsometryEquiv.norm_map]
@[simp]
theorem iteratedFDeriv_one_apply (m : Fin 1 â E) :
iteratedFDeriv ð 1 f x m = fderiv ð f x (m 0) := by
rw [iteratedFDeriv_succ_apply_right, iteratedFDeriv_zero_apply]; rfl
lemma iteratedFDeriv_two_apply (f : E â F) (z : E) (m : Fin 2 â E) :
iteratedFDeriv ð 2 f z m = fderiv ð (fderiv ð f) z (m 0) (m 1) := by
simp only [iteratedFDeriv_succ_apply_right]
rfl
/-- When a function is `C^n` in a set `s` of unique differentiability, it admits
`ftaylorSeriesWithin ð f s` as a Taylor series up to order `n` in `s`. -/
theorem contDiff_iff_ftaylorSeries :
ContDiff ð n f â HasFTaylorSeriesUpTo n f (ftaylorSeries ð f) := by
constructor
· rw [â contDiffOn_univ, â hasFTaylorSeriesUpToOn_univ_iff, â ftaylorSeriesWithin_univ]
exact fun h => ContDiffOn.ftaylorSeriesWithin h uniqueDiffOn_univ
· intro h; exact âšftaylorSeries ð f, hâ©
theorem contDiff_iff_continuous_differentiable :
ContDiff ð n f â
(â m : â, (m : ââ) †n â Continuous fun x => iteratedFDeriv ð m f x) â§
â m : â, (m : ââ) < n â Differentiable ð fun x => iteratedFDeriv ð m f x := by
simp [contDiffOn_univ.symm, continuous_iff_continuousOn_univ, differentiableOn_univ.symm,
iteratedFDerivWithin_univ, contDiffOn_iff_continuousOn_differentiableOn uniqueDiffOn_univ]
/-- If `f` is `C^n` then its `m`-times iterated derivative is continuous for `m †n`. -/
theorem ContDiff.continuous_iteratedFDeriv {m : â} (hm : (m : ââ) †n) (hf : ContDiff ð n f) :
Continuous fun x => iteratedFDeriv ð m f x :=
(contDiff_iff_continuous_differentiable.mp hf).1 m hm
/-- If `f` is `C^n` then its `m`-times iterated derivative is differentiable for `m < n`. -/
theorem ContDiff.differentiable_iteratedFDeriv {m : â} (hm : (m : ââ) < n) (hf : ContDiff ð n f) :
Differentiable ð fun x => iteratedFDeriv ð m f x :=
(contDiff_iff_continuous_differentiable.mp hf).2 m hm
theorem contDiff_of_differentiable_iteratedFDeriv
(h : â m : â, (m : ââ) †n â Differentiable ð (iteratedFDeriv ð m f)) : ContDiff ð n f :=
contDiff_iff_continuous_differentiable.2
âšfun m hm => (h m hm).continuous, fun m hm => h m (le_of_lt hm)â©
/-- A function is `C^(n + 1)` if and only if it is differentiable,
and its derivative (formulated in terms of `fderiv`) is `C^n`. -/
theorem contDiff_succ_iff_fderiv {n : â} :
ContDiff ð (n + 1 : â) f â Differentiable ð f â§ ContDiff ð n fun y => fderiv ð f y := by
simp only [â contDiffOn_univ, â differentiableOn_univ, â fderivWithin_univ,
contDiffOn_succ_iff_fderivWithin uniqueDiffOn_univ]
theorem contDiff_one_iff_fderiv : ContDiff ð 1 f â Differentiable ð f â§ Continuous (fderiv ð f) :=
contDiff_succ_iff_fderiv.trans <| Iff.rfl.and contDiff_zero
/-- A function is `C^â` if and only if it is differentiable,
and its derivative (formulated in terms of `fderiv`) is `C^â`. -/
theorem contDiff_top_iff_fderiv :
ContDiff ð â f â Differentiable ð f â§ ContDiff ð â fun y => fderiv ð f y := by
simp only [â contDiffOn_univ, â differentiableOn_univ, â fderivWithin_univ]
rw [contDiffOn_top_iff_fderivWithin uniqueDiffOn_univ]
theorem ContDiff.continuous_fderiv (h : ContDiff ð n f) (hn : 1 †n) :
Continuous fun x => fderiv ð f x :=
(contDiff_succ_iff_fderiv.1 (h.of_le hn)).2.continuous
/-- If a function is at least `C^1`, its bundled derivative (mapping `(x, v)` to `Df(x) v`) is
continuous. -/
theorem ContDiff.continuous_fderiv_apply (h : ContDiff ð n f) (hn : 1 †n) :
Continuous fun p : E Ã E => (fderiv ð f p.1 : E â F) p.2 :=
have A : Continuous fun q : (E âL[ð] F) Ã E => q.1 q.2 := isBoundedBilinearMap_apply.continuous
have B : Continuous fun p : E Ã E => (fderiv ð f p.1, p.2) :=
((h.continuous_fderiv hn).comp continuous_fst).prod_mk continuous_snd
A.comp B
|
Analysis\Calculus\ContDiff\FiniteDimension.lean | /-
Copyright (c) 2019 Sébastien Gouëzel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Sébastien Gouëzel, Floris van Doorn
-/
import Mathlib.Analysis.Calculus.ContDiff.Basic
import Mathlib.Analysis.Normed.Module.FiniteDimension
/-!
# Higher differentiability in finite dimensions.
-/
noncomputable section
universe uD uE uF uG
variable {ð : Type*} [NontriviallyNormedField ð] {D : Type uD} [NormedAddCommGroup D]
[NormedSpace ð D] {E : Type uE} [NormedAddCommGroup E] [NormedSpace ð E] {F : Type uF}
[NormedAddCommGroup F] [NormedSpace ð F] {G : Type uG} [NormedAddCommGroup G] [NormedSpace ð G]
/-! ### Finite dimensional results -/
section FiniteDimensional
open Function FiniteDimensional
variable [CompleteSpace ð]
/-- A family of continuous linear maps is `C^n` on `s` if all its applications are. -/
theorem contDiffOn_clm_apply {n : ââ} {f : E â F âL[ð] G} {s : Set E} [FiniteDimensional ð F] :
ContDiffOn ð n f s â â y, ContDiffOn ð n (fun x => f x y) s := by
refine âšfun h y => h.clm_apply contDiffOn_const, fun h => ?_â©
let d := finrank ð F
have hd : d = finrank ð (Fin d â ð) := (finrank_fin_fun ð).symm
let eâ := ContinuousLinearEquiv.ofFinrankEq hd
let eâ := (eâ.arrowCongr (1 : G âL[ð] G)).trans (ContinuousLinearEquiv.piRing (Fin d))
rw [â id_comp f, â eâ.symm_comp_self]
exact eâ.symm.contDiff.comp_contDiffOn (contDiffOn_pi.mpr fun i => h _)
theorem contDiff_clm_apply_iff {n : ââ} {f : E â F âL[ð] G} [FiniteDimensional ð F] :
ContDiff ð n f â â y, ContDiff ð n fun x => f x y := by
simp_rw [â contDiffOn_univ, contDiffOn_clm_apply]
/-- This is a useful lemma to prove that a certain operation preserves functions being `C^n`.
When you do induction on `n`, this gives a useful characterization of a function being `C^(n+1)`,
assuming you have already computed the derivative. The advantage of this version over
`contDiff_succ_iff_fderiv` is that both occurrences of `ContDiff` are for functions with the same
domain and codomain (`E` and `F`). This is not the case for `contDiff_succ_iff_fderiv`, which
often requires an inconvenient need to generalize `F`, which results in universe issues
(see the discussion in the section of `ContDiff.comp`).
This lemma avoids these universe issues, but only applies for finite dimensional `E`. -/
theorem contDiff_succ_iff_fderiv_apply [FiniteDimensional ð E] {n : â} {f : E â F} :
ContDiff ð (n + 1 : â) f â Differentiable ð f â§ â y, ContDiff ð n fun x => fderiv ð f x y := by
rw [contDiff_succ_iff_fderiv, contDiff_clm_apply_iff]
theorem contDiffOn_succ_of_fderiv_apply [FiniteDimensional ð E] {n : â} {f : E â F} {s : Set E}
(hf : DifferentiableOn ð f s) (h : â y, ContDiffOn ð n (fun x => fderivWithin ð f s x y) s) :
ContDiffOn ð (n + 1 : â) f s :=
contDiffOn_succ_of_fderivWithin hf <| contDiffOn_clm_apply.mpr h
theorem contDiffOn_succ_iff_fderiv_apply [FiniteDimensional ð E] {n : â} {f : E â F} {s : Set E}
(hs : UniqueDiffOn ð s) :
ContDiffOn ð (n + 1 : â) f s â
DifferentiableOn ð f s â§ â y, ContDiffOn ð n (fun x => fderivWithin ð f s x y) s := by
rw [contDiffOn_succ_iff_fderivWithin hs, contDiffOn_clm_apply]
end FiniteDimensional
|
Analysis\Calculus\ContDiff\RCLike.lean | /-
Copyright (c) 2019 Sébastien Gouëzel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Sébastien Gouëzel, Floris van Doorn
-/
import Mathlib.Analysis.Calculus.ContDiff.Defs
import Mathlib.Analysis.Calculus.MeanValue
/-!
# Higher differentiability over `â` or `â`
-/
noncomputable section
open Set Fin Filter Function
open scoped NNReal Topology
section Real
/-!
### Results over `â` or `â`
The results in this section rely on the Mean Value Theorem, and therefore hold only over `â` (and
its extension fields such as `â`).
-/
variable {n : ââ} {ð : Type*} [RCLike ð] {E' : Type*} [NormedAddCommGroup E'] [NormedSpace ð E']
{F' : Type*} [NormedAddCommGroup F'] [NormedSpace ð F']
/-- If a function has a Taylor series at order at least 1, then at points in the interior of the
domain of definition, the term of order 1 of this series is a strict derivative of `f`. -/
theorem HasFTaylorSeriesUpToOn.hasStrictFDerivAt {s : Set E'} {f : E' â F'} {x : E'}
{p : E' â FormalMultilinearSeries ð E' F'} (hf : HasFTaylorSeriesUpToOn n f p s) (hn : 1 †n)
(hs : s â ð x) : HasStrictFDerivAt f ((continuousMultilinearCurryFin1 ð E' F') (p x 1)) x :=
hasStrictFDerivAt_of_hasFDerivAt_of_continuousAt (hf.eventually_hasFDerivAt hn hs) <|
(continuousMultilinearCurryFin1 ð E' F').continuousAt.comp <| (hf.cont 1 hn).continuousAt hs
/-- If a function is `C^n` with `1 †n` around a point, and its derivative at that point is given to
us as `f'`, then `f'` is also a strict derivative. -/
theorem ContDiffAt.hasStrictFDerivAt' {f : E' â F'} {f' : E' âL[ð] F'} {x : E'}
(hf : ContDiffAt ð n f x) (hf' : HasFDerivAt f f' x) (hn : 1 †n) :
HasStrictFDerivAt f f' x := by
rcases hf 1 hn with âšu, H, p, hpâ©
simp only [nhdsWithin_univ, mem_univ, insert_eq_of_mem] at H
have := hp.hasStrictFDerivAt le_rfl H
rwa [hf'.unique this.hasFDerivAt]
/-- If a function is `C^n` with `1 †n` around a point, and its derivative at that point is given to
us as `f'`, then `f'` is also a strict derivative. -/
theorem ContDiffAt.hasStrictDerivAt' {f : ð â F'} {f' : F'} {x : ð} (hf : ContDiffAt ð n f x)
(hf' : HasDerivAt f f' x) (hn : 1 †n) : HasStrictDerivAt f f' x :=
hf.hasStrictFDerivAt' hf' hn
/-- If a function is `C^n` with `1 †n` around a point, then the derivative of `f` at this point
is also a strict derivative. -/
theorem ContDiffAt.hasStrictFDerivAt {f : E' â F'} {x : E'} (hf : ContDiffAt ð n f x) (hn : 1 †n) :
HasStrictFDerivAt f (fderiv ð f x) x :=
hf.hasStrictFDerivAt' (hf.differentiableAt hn).hasFDerivAt hn
/-- If a function is `C^n` with `1 †n` around a point, then the derivative of `f` at this point
is also a strict derivative. -/
theorem ContDiffAt.hasStrictDerivAt {f : ð â F'} {x : ð} (hf : ContDiffAt ð n f x) (hn : 1 †n) :
HasStrictDerivAt f (deriv f x) x :=
(hf.hasStrictFDerivAt hn).hasStrictDerivAt
/-- If a function is `C^n` with `1 †n`, then the derivative of `f` is also a strict derivative. -/
theorem ContDiff.hasStrictFDerivAt {f : E' â F'} {x : E'} (hf : ContDiff ð n f) (hn : 1 †n) :
HasStrictFDerivAt f (fderiv ð f x) x :=
hf.contDiffAt.hasStrictFDerivAt hn
/-- If a function is `C^n` with `1 †n`, then the derivative of `f` is also a strict derivative. -/
theorem ContDiff.hasStrictDerivAt {f : ð â F'} {x : ð} (hf : ContDiff ð n f) (hn : 1 †n) :
HasStrictDerivAt f (deriv f x) x :=
hf.contDiffAt.hasStrictDerivAt hn
/-- If `f` has a formal Taylor series `p` up to order `1` on `{x} ⪠s`, where `s` is a convex set,
and `âp x 1ââ < K`, then `f` is `K`-Lipschitz in a neighborhood of `x` within `s`. -/
theorem HasFTaylorSeriesUpToOn.exists_lipschitzOnWith_of_nnnorm_lt {E F : Type*}
[NormedAddCommGroup E] [NormedSpace â E] [NormedAddCommGroup F] [NormedSpace â F] {f : E â F}
{p : E â FormalMultilinearSeries â E F} {s : Set E} {x : E}
(hf : HasFTaylorSeriesUpToOn 1 f p (insert x s)) (hs : Convex â s) (K : ââ¥0)
(hK : âp x 1ââ < K) : â t â ð[s] x, LipschitzOnWith K f t := by
set f' := fun y => continuousMultilinearCurryFin1 â E F (p y 1)
have hder : â y â s, HasFDerivWithinAt f (f' y) s y := fun y hy =>
(hf.hasFDerivWithinAt le_rfl (subset_insert x s hy)).mono (subset_insert x s)
have hcont : ContinuousWithinAt f' s x :=
(continuousMultilinearCurryFin1 â E F).continuousAt.comp_continuousWithinAt
((hf.cont _ le_rfl _ (mem_insert _ _)).mono (subset_insert x s))
replace hK : âf' xââ < K := by simpa only [f', LinearIsometryEquiv.nnnorm_map]
exact
hs.exists_nhdsWithin_lipschitzOnWith_of_hasFDerivWithinAt_of_nnnorm_lt
(eventually_nhdsWithin_iff.2 <| eventually_of_forall hder) hcont K hK
/-- If `f` has a formal Taylor series `p` up to order `1` on `{x} ⪠s`, where `s` is a convex set,
then `f` is Lipschitz in a neighborhood of `x` within `s`. -/
theorem HasFTaylorSeriesUpToOn.exists_lipschitzOnWith {E F : Type*} [NormedAddCommGroup E]
[NormedSpace â E] [NormedAddCommGroup F] [NormedSpace â F] {f : E â F}
{p : E â FormalMultilinearSeries â E F} {s : Set E} {x : E}
(hf : HasFTaylorSeriesUpToOn 1 f p (insert x s)) (hs : Convex â s) :
â K, â t â ð[s] x, LipschitzOnWith K f t :=
(exists_gt _).imp <| hf.exists_lipschitzOnWith_of_nnnorm_lt hs
/-- If `f` is `C^1` within a convex set `s` at `x`, then it is Lipschitz on a neighborhood of `x`
within `s`. -/
theorem ContDiffWithinAt.exists_lipschitzOnWith {E F : Type*} [NormedAddCommGroup E]
[NormedSpace â E] [NormedAddCommGroup F] [NormedSpace â F] {f : E â F} {s : Set E} {x : E}
(hf : ContDiffWithinAt â 1 f s x) (hs : Convex â s) :
â K : ââ¥0, â t â ð[s] x, LipschitzOnWith K f t := by
rcases hf 1 le_rfl with âšt, hst, p, hpâ©
rcases Metric.mem_nhdsWithin_iff.mp hst with âšÎµ, ε0, hεâ©
replace hp : HasFTaylorSeriesUpToOn 1 f p (Metric.ball x ε ⩠insert x s) := hp.mono hε
clear hst hε t
rw [â insert_eq_of_mem (Metric.mem_ball_self ε0), â insert_inter_distrib] at hp
rcases hp.exists_lipschitzOnWith ((convex_ball _ _).inter hs) with âšK, t, hst, hftâ©
rw [inter_comm, â nhdsWithin_restrict' _ (Metric.ball_mem_nhds _ ε0)] at hst
exact âšK, t, hst, hftâ©
/-- If `f` is `C^1` at `x` and `K > âfderiv ð f xâ`, then `f` is `K`-Lipschitz in a neighborhood of
`x`. -/
theorem ContDiffAt.exists_lipschitzOnWith_of_nnnorm_lt {f : E' â F'} {x : E'}
(hf : ContDiffAt ð 1 f x) (K : ââ¥0) (hK : âfderiv ð f xââ < K) :
â t â ð x, LipschitzOnWith K f t :=
(hf.hasStrictFDerivAt le_rfl).exists_lipschitzOnWith_of_nnnorm_lt K hK
/-- If `f` is `C^1` at `x`, then `f` is Lipschitz in a neighborhood of `x`. -/
theorem ContDiffAt.exists_lipschitzOnWith {f : E' â F'} {x : E'} (hf : ContDiffAt ð 1 f x) :
â K, â t â ð x, LipschitzOnWith K f t :=
(hf.hasStrictFDerivAt le_rfl).exists_lipschitzOnWith
/-- If `f` is `C^1`, it is locally Lipschitz. -/
lemma ContDiff.locallyLipschitz {f : E' â F'} (hf : ContDiff ð 1 f) : LocallyLipschitz f := by
intro x
rcases hf.contDiffAt.exists_lipschitzOnWith with âšK, t, ht, hfâ©
use K, t
/-- A `C^1` function with compact support is Lipschitz. -/
theorem ContDiff.lipschitzWith_of_hasCompactSupport {f : E' â F'} {n : ââ}
(hf : HasCompactSupport f) (h'f : ContDiff ð n f) (hn : 1 †n) :
â C, LipschitzWith C f := by
obtain âšC, hCâ© := (hf.fderiv ð).exists_bound_of_continuous (h'f.continuous_fderiv hn)
refine âšâšmax C 0, le_max_right _ _â©, ?_â©
apply lipschitzWith_of_nnnorm_fderiv_le (h'f.differentiable hn) (fun x ⊠?_)
simp [â NNReal.coe_le_coe, hC x]
end Real
|
Analysis\Calculus\Deriv\Add.lean | /-
Copyright (c) 2019 Gabriel Ebner. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Gabriel Ebner, Sébastien Gouëzel, Yury Kudryashov, Anatole Dedecker
-/
import Mathlib.Analysis.Calculus.Deriv.Basic
import Mathlib.Analysis.Calculus.FDeriv.Add
/-!
# One-dimensional derivatives of sums etc
In this file we prove formulas about derivatives of `f + g`, `-f`, `f - g`, and `â i, f i x` for
functions from the base field to a normed space over this field.
For a more detailed overview of one-dimensional derivatives in mathlib, see the module docstring of
`Analysis/Calculus/Deriv/Basic`.
## Keywords
derivative
-/
universe u v w
open scoped Classical
open Topology Filter ENNReal
open Filter Asymptotics Set
variable {ð : Type u} [NontriviallyNormedField ð]
variable {F : Type v} [NormedAddCommGroup F] [NormedSpace ð F]
variable {E : Type w} [NormedAddCommGroup E] [NormedSpace ð E]
variable {f fâ fâ g : ð â F}
variable {f' fâ' fâ' g' : F}
variable {x : ð}
variable {s t : Set ð}
variable {L : Filter ð}
section Add
/-! ### Derivative of the sum of two functions -/
nonrec theorem HasDerivAtFilter.add (hf : HasDerivAtFilter f f' x L)
(hg : HasDerivAtFilter g g' x L) : HasDerivAtFilter (fun y => f y + g y) (f' + g') x L := by
simpa using (hf.add hg).hasDerivAtFilter
nonrec theorem HasStrictDerivAt.add (hf : HasStrictDerivAt f f' x) (hg : HasStrictDerivAt g g' x) :
HasStrictDerivAt (fun y => f y + g y) (f' + g') x := by simpa using (hf.add hg).hasStrictDerivAt
nonrec theorem HasDerivWithinAt.add (hf : HasDerivWithinAt f f' s x)
(hg : HasDerivWithinAt g g' s x) : HasDerivWithinAt (fun y => f y + g y) (f' + g') s x :=
hf.add hg
nonrec theorem HasDerivAt.add (hf : HasDerivAt f f' x) (hg : HasDerivAt g g' x) :
HasDerivAt (fun x => f x + g x) (f' + g') x :=
hf.add hg
theorem derivWithin_add (hxs : UniqueDiffWithinAt ð s x) (hf : DifferentiableWithinAt ð f s x)
(hg : DifferentiableWithinAt ð g s x) :
derivWithin (fun y => f y + g y) s x = derivWithin f s x + derivWithin g s x :=
(hf.hasDerivWithinAt.add hg.hasDerivWithinAt).derivWithin hxs
@[simp]
theorem deriv_add (hf : DifferentiableAt ð f x) (hg : DifferentiableAt ð g x) :
deriv (fun y => f y + g y) x = deriv f x + deriv g x :=
(hf.hasDerivAt.add hg.hasDerivAt).deriv
theorem HasStrictDerivAt.add_const (c : F) (hf : HasStrictDerivAt f f' x) :
HasStrictDerivAt (fun y ⊠f y + c) f' x :=
add_zero f' âž hf.add (hasStrictDerivAt_const x c)
theorem HasDerivAtFilter.add_const (hf : HasDerivAtFilter f f' x L) (c : F) :
HasDerivAtFilter (fun y => f y + c) f' x L :=
add_zero f' âž hf.add (hasDerivAtFilter_const x L c)
nonrec theorem HasDerivWithinAt.add_const (hf : HasDerivWithinAt f f' s x) (c : F) :
HasDerivWithinAt (fun y => f y + c) f' s x :=
hf.add_const c
nonrec theorem HasDerivAt.add_const (hf : HasDerivAt f f' x) (c : F) :
HasDerivAt (fun x => f x + c) f' x :=
hf.add_const c
theorem derivWithin_add_const (hxs : UniqueDiffWithinAt ð s x) (c : F) :
derivWithin (fun y => f y + c) s x = derivWithin f s x := by
simp only [derivWithin, fderivWithin_add_const hxs]
theorem deriv_add_const (c : F) : deriv (fun y => f y + c) x = deriv f x := by
simp only [deriv, fderiv_add_const]
@[simp]
theorem deriv_add_const' (c : F) : (deriv fun y => f y + c) = deriv f :=
funext fun _ => deriv_add_const c
theorem HasStrictDerivAt.const_add (c : F) (hf : HasStrictDerivAt f f' x) :
HasStrictDerivAt (fun y ⊠c + f y) f' x :=
zero_add f' âž (hasStrictDerivAt_const x c).add hf
theorem HasDerivAtFilter.const_add (c : F) (hf : HasDerivAtFilter f f' x L) :
HasDerivAtFilter (fun y => c + f y) f' x L :=
zero_add f' âž (hasDerivAtFilter_const x L c).add hf
nonrec theorem HasDerivWithinAt.const_add (c : F) (hf : HasDerivWithinAt f f' s x) :
HasDerivWithinAt (fun y => c + f y) f' s x :=
hf.const_add c
nonrec theorem HasDerivAt.const_add (c : F) (hf : HasDerivAt f f' x) :
HasDerivAt (fun x => c + f x) f' x :=
hf.const_add c
theorem derivWithin_const_add (hxs : UniqueDiffWithinAt ð s x) (c : F) :
derivWithin (fun y => c + f y) s x = derivWithin f s x := by
simp only [derivWithin, fderivWithin_const_add hxs]
theorem deriv_const_add (c : F) : deriv (fun y => c + f y) x = deriv f x := by
simp only [deriv, fderiv_const_add]
@[simp]
theorem deriv_const_add' (c : F) : (deriv fun y => c + f y) = deriv f :=
funext fun _ => deriv_const_add c
end Add
section Sum
/-! ### Derivative of a finite sum of functions -/
variable {ι : Type*} {u : Finset ι} {A : ι â ð â F} {A' : ι â F}
theorem HasDerivAtFilter.sum (h : â i â u, HasDerivAtFilter (A i) (A' i) x L) :
HasDerivAtFilter (fun y => â i â u, A i y) (â i â u, A' i) x L := by
simpa [ContinuousLinearMap.sum_apply] using (HasFDerivAtFilter.sum h).hasDerivAtFilter
theorem HasStrictDerivAt.sum (h : â i â u, HasStrictDerivAt (A i) (A' i) x) :
HasStrictDerivAt (fun y => â i â u, A i y) (â i â u, A' i) x := by
simpa [ContinuousLinearMap.sum_apply] using (HasStrictFDerivAt.sum h).hasStrictDerivAt
theorem HasDerivWithinAt.sum (h : â i â u, HasDerivWithinAt (A i) (A' i) s x) :
HasDerivWithinAt (fun y => â i â u, A i y) (â i â u, A' i) s x :=
HasDerivAtFilter.sum h
theorem HasDerivAt.sum (h : â i â u, HasDerivAt (A i) (A' i) x) :
HasDerivAt (fun y => â i â u, A i y) (â i â u, A' i) x :=
HasDerivAtFilter.sum h
theorem derivWithin_sum (hxs : UniqueDiffWithinAt ð s x)
(h : â i â u, DifferentiableWithinAt ð (A i) s x) :
derivWithin (fun y => â i â u, A i y) s x = â i â u, derivWithin (A i) s x :=
(HasDerivWithinAt.sum fun i hi => (h i hi).hasDerivWithinAt).derivWithin hxs
@[simp]
theorem deriv_sum (h : â i â u, DifferentiableAt ð (A i) x) :
deriv (fun y => â i â u, A i y) x = â i â u, deriv (A i) x :=
(HasDerivAt.sum fun i hi => (h i hi).hasDerivAt).deriv
end Sum
section Neg
/-! ### Derivative of the negative of a function -/
nonrec theorem HasDerivAtFilter.neg (h : HasDerivAtFilter f f' x L) :
HasDerivAtFilter (fun x => -f x) (-f') x L := by simpa using h.neg.hasDerivAtFilter
nonrec theorem HasDerivWithinAt.neg (h : HasDerivWithinAt f f' s x) :
HasDerivWithinAt (fun x => -f x) (-f') s x :=
h.neg
nonrec theorem HasDerivAt.neg (h : HasDerivAt f f' x) : HasDerivAt (fun x => -f x) (-f') x :=
h.neg
nonrec theorem HasStrictDerivAt.neg (h : HasStrictDerivAt f f' x) :
HasStrictDerivAt (fun x => -f x) (-f') x := by simpa using h.neg.hasStrictDerivAt
theorem derivWithin.neg (hxs : UniqueDiffWithinAt ð s x) :
derivWithin (fun y => -f y) s x = -derivWithin f s x := by
simp only [derivWithin, fderivWithin_neg hxs, ContinuousLinearMap.neg_apply]
theorem deriv.neg : deriv (fun y => -f y) x = -deriv f x := by
simp only [deriv, fderiv_neg, ContinuousLinearMap.neg_apply]
@[simp]
theorem deriv.neg' : (deriv fun y => -f y) = fun x => -deriv f x :=
funext fun _ => deriv.neg
end Neg
section Neg2
/-! ### Derivative of the negation function (i.e `Neg.neg`) -/
variable (s x L)
theorem hasDerivAtFilter_neg : HasDerivAtFilter Neg.neg (-1) x L :=
HasDerivAtFilter.neg <| hasDerivAtFilter_id _ _
theorem hasDerivWithinAt_neg : HasDerivWithinAt Neg.neg (-1) s x :=
hasDerivAtFilter_neg _ _
theorem hasDerivAt_neg : HasDerivAt Neg.neg (-1) x :=
hasDerivAtFilter_neg _ _
theorem hasDerivAt_neg' : HasDerivAt (fun x => -x) (-1) x :=
hasDerivAtFilter_neg _ _
theorem hasStrictDerivAt_neg : HasStrictDerivAt Neg.neg (-1) x :=
HasStrictDerivAt.neg <| hasStrictDerivAt_id _
theorem deriv_neg : deriv Neg.neg x = -1 :=
HasDerivAt.deriv (hasDerivAt_neg x)
@[simp]
theorem deriv_neg' : deriv (Neg.neg : ð â ð) = fun _ => -1 :=
funext deriv_neg
@[simp]
theorem deriv_neg'' : deriv (fun x : ð => -x) x = -1 :=
deriv_neg x
theorem derivWithin_neg (hxs : UniqueDiffWithinAt ð s x) : derivWithin Neg.neg s x = -1 :=
(hasDerivWithinAt_neg x s).derivWithin hxs
theorem differentiable_neg : Differentiable ð (Neg.neg : ð â ð) :=
Differentiable.neg differentiable_id
theorem differentiableOn_neg : DifferentiableOn ð (Neg.neg : ð â ð) s :=
DifferentiableOn.neg differentiableOn_id
theorem not_differentiableAt_abs_zero : ¬ DifferentiableAt â (abs : â â â) 0 := by
intro h
have hâ : deriv abs (0 : â) = 1 :=
(uniqueDiffOn_Ici _ _ Set.left_mem_Ici).eq_deriv _ h.hasDerivAt.hasDerivWithinAt <|
(hasDerivWithinAt_id _ _).congr_of_mem (fun _ h ⊠abs_of_nonneg h) Set.left_mem_Ici
have hâ : deriv abs (0 : â) = -1 :=
(uniqueDiffOn_Iic _ _ Set.right_mem_Iic).eq_deriv _ h.hasDerivAt.hasDerivWithinAt <|
(hasDerivWithinAt_neg _ _).congr_of_mem (fun _ h ⊠abs_of_nonpos h) Set.right_mem_Iic
linarith
lemma differentiableAt_comp_neg_iff {a : ð} :
DifferentiableAt ð f (-a) â DifferentiableAt ð (fun x ⊠f (-x)) a := by
refine âšfun H ⊠H.comp a differentiable_neg.differentiableAt, fun H ⊠?_â©
convert ((neg_neg a).symm âž H).comp (-a) differentiable_neg.differentiableAt
ext
simp only [Function.comp_apply, neg_neg]
end Neg2
section Sub
/-! ### Derivative of the difference of two functions -/
theorem HasDerivAtFilter.sub (hf : HasDerivAtFilter f f' x L) (hg : HasDerivAtFilter g g' x L) :
HasDerivAtFilter (fun x => f x - g x) (f' - g') x L := by
simpa only [sub_eq_add_neg] using hf.add hg.neg
nonrec theorem HasDerivWithinAt.sub (hf : HasDerivWithinAt f f' s x)
(hg : HasDerivWithinAt g g' s x) : HasDerivWithinAt (fun x => f x - g x) (f' - g') s x :=
hf.sub hg
nonrec theorem HasDerivAt.sub (hf : HasDerivAt f f' x) (hg : HasDerivAt g g' x) :
HasDerivAt (fun x => f x - g x) (f' - g') x :=
hf.sub hg
theorem HasStrictDerivAt.sub (hf : HasStrictDerivAt f f' x) (hg : HasStrictDerivAt g g' x) :
HasStrictDerivAt (fun x => f x - g x) (f' - g') x := by
simpa only [sub_eq_add_neg] using hf.add hg.neg
theorem derivWithin_sub (hxs : UniqueDiffWithinAt ð s x) (hf : DifferentiableWithinAt ð f s x)
(hg : DifferentiableWithinAt ð g s x) :
derivWithin (fun y => f y - g y) s x = derivWithin f s x - derivWithin g s x :=
(hf.hasDerivWithinAt.sub hg.hasDerivWithinAt).derivWithin hxs
@[simp]
theorem deriv_sub (hf : DifferentiableAt ð f x) (hg : DifferentiableAt ð g x) :
deriv (fun y => f y - g y) x = deriv f x - deriv g x :=
(hf.hasDerivAt.sub hg.hasDerivAt).deriv
theorem HasDerivAtFilter.sub_const (hf : HasDerivAtFilter f f' x L) (c : F) :
HasDerivAtFilter (fun x => f x - c) f' x L := by
simpa only [sub_eq_add_neg] using hf.add_const (-c)
nonrec theorem HasDerivWithinAt.sub_const (hf : HasDerivWithinAt f f' s x) (c : F) :
HasDerivWithinAt (fun x => f x - c) f' s x :=
hf.sub_const c
nonrec theorem HasDerivAt.sub_const (hf : HasDerivAt f f' x) (c : F) :
HasDerivAt (fun x => f x - c) f' x :=
hf.sub_const c
theorem derivWithin_sub_const (hxs : UniqueDiffWithinAt ð s x) (c : F) :
derivWithin (fun y => f y - c) s x = derivWithin f s x := by
simp only [derivWithin, fderivWithin_sub_const hxs]
theorem deriv_sub_const (c : F) : deriv (fun y => f y - c) x = deriv f x := by
simp only [deriv, fderiv_sub_const]
theorem HasDerivAtFilter.const_sub (c : F) (hf : HasDerivAtFilter f f' x L) :
HasDerivAtFilter (fun x => c - f x) (-f') x L := by
simpa only [sub_eq_add_neg] using hf.neg.const_add c
nonrec theorem HasDerivWithinAt.const_sub (c : F) (hf : HasDerivWithinAt f f' s x) :
HasDerivWithinAt (fun x => c - f x) (-f') s x :=
hf.const_sub c
theorem HasStrictDerivAt.const_sub (c : F) (hf : HasStrictDerivAt f f' x) :
HasStrictDerivAt (fun x => c - f x) (-f') x := by
simpa only [sub_eq_add_neg] using hf.neg.const_add c
nonrec theorem HasDerivAt.const_sub (c : F) (hf : HasDerivAt f f' x) :
HasDerivAt (fun x => c - f x) (-f') x :=
hf.const_sub c
theorem derivWithin_const_sub (hxs : UniqueDiffWithinAt ð s x) (c : F) :
derivWithin (fun y => c - f y) s x = -derivWithin f s x := by
simp [derivWithin, fderivWithin_const_sub hxs]
theorem deriv_const_sub (c : F) : deriv (fun y => c - f y) x = -deriv f x := by
simp only [â derivWithin_univ,
derivWithin_const_sub (uniqueDiffWithinAt_univ : UniqueDiffWithinAt ð _ _)]
end Sub
|
Analysis\Calculus\Deriv\AffineMap.lean | /-
Copyright (c) 2023 Yury Kudryashov. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yury Kudryashov
-/
import Mathlib.Analysis.Calculus.Deriv.Add
import Mathlib.Analysis.Calculus.Deriv.Linear
import Mathlib.LinearAlgebra.AffineSpace.AffineMap
/-!
# Derivatives of affine maps
In this file we prove formulas for one-dimensional derivatives of affine maps `f : ð âáµ[ð] E`. We
also specialise some of these results to `AffineMap.lineMap` because it is useful to transfer MVT
from dimension 1 to a domain in higher dimension.
## TODO
Add theorems about `deriv`s and `fderiv`s of `ContinuousAffineMap`s once they will be ported to
Mathlib 4.
## Keywords
affine map, derivative, differentiability
-/
variable {ð : Type*} [NontriviallyNormedField ð]
{E : Type*} [NormedAddCommGroup E] [NormedSpace ð E]
(f : ð âáµ[ð] E) {a b : E} {L : Filter ð} {s : Set ð} {x : ð}
namespace AffineMap
theorem hasStrictDerivAt : HasStrictDerivAt f (f.linear 1) x := by
rw [f.decomp]
exact f.linear.hasStrictDerivAt.add_const (f 0)
theorem hasDerivAtFilter : HasDerivAtFilter f (f.linear 1) x L := by
rw [f.decomp]
exact f.linear.hasDerivAtFilter.add_const (f 0)
theorem hasDerivWithinAt : HasDerivWithinAt f (f.linear 1) s x := f.hasDerivAtFilter
theorem hasDerivAt : HasDerivAt f (f.linear 1) x := f.hasDerivAtFilter
protected theorem derivWithin (hs : UniqueDiffWithinAt ð s x) :
derivWithin f s x = f.linear 1 :=
f.hasDerivWithinAt.derivWithin hs
@[simp] protected theorem deriv : deriv f x = f.linear 1 := f.hasDerivAt.deriv
protected theorem differentiableAt : DifferentiableAt ð f x := f.hasDerivAt.differentiableAt
protected theorem differentiable : Differentiable ð f := fun _ ⊠f.differentiableAt
protected theorem differentiableWithinAt : DifferentiableWithinAt ð f s x :=
f.differentiableAt.differentiableWithinAt
protected theorem differentiableOn : DifferentiableOn ð f s := fun _ _ ⊠f.differentiableWithinAt
/-!
### Line map
In this section we specialize some lemmas to `AffineMap.lineMap` because this map is very useful to
deduce higher dimensional lemmas from one-dimensional versions.
-/
theorem hasStrictDerivAt_lineMap : HasStrictDerivAt (lineMap a b) (b - a) x := by
simpa using (lineMap a b : ð âáµ[ð] E).hasStrictDerivAt
theorem hasDerivAt_lineMap : HasDerivAt (lineMap a b) (b - a) x :=
hasStrictDerivAt_lineMap.hasDerivAt
theorem hasDerivWithinAt_lineMap : HasDerivWithinAt (lineMap a b) (b - a) s x :=
hasDerivAt_lineMap.hasDerivWithinAt
end AffineMap
|
Analysis\Calculus\Deriv\Basic.lean | /-
Copyright (c) 2019 Gabriel Ebner. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Gabriel Ebner, Sébastien Gouëzel
-/
import Mathlib.Analysis.Calculus.FDeriv.Basic
import Mathlib.Analysis.NormedSpace.OperatorNorm.NormedSpace
/-!
# One-dimensional derivatives
This file defines the derivative of a function `f : ð â F` where `ð` is a
normed field and `F` is a normed space over this field. The derivative of
such a function `f` at a point `x` is given by an element `f' : F`.
The theory is developed analogously to the [Fréchet
derivatives](./fderiv.html). We first introduce predicates defined in terms
of the corresponding predicates for Fréchet derivatives:
- `HasDerivAtFilter f f' x L` states that the function `f` has the
derivative `f'` at the point `x` as `x` goes along the filter `L`.
- `HasDerivWithinAt f f' s x` states that the function `f` has the
derivative `f'` at the point `x` within the subset `s`.
- `HasDerivAt f f' x` states that the function `f` has the derivative `f'`
at the point `x`.
- `HasStrictDerivAt f f' x` states that the function `f` has the derivative `f'`
at the point `x` in the sense of strict differentiability, i.e.,
`f y - f z = (y - z) ⢠f' + o (y - z)` as `y, z â x`.
For the last two notions we also define a functional version:
- `derivWithin f s x` is a derivative of `f` at `x` within `s`. If the
derivative does not exist, then `derivWithin f s x` equals zero.
- `deriv f x` is a derivative of `f` at `x`. If the derivative does not
exist, then `deriv f x` equals zero.
The theorems `fderivWithin_derivWithin` and `fderiv_deriv` show that the
one-dimensional derivatives coincide with the general Fréchet derivatives.
We also show the existence and compute the derivatives of:
- constants
- the identity function
- linear maps (in `Linear.lean`)
- addition (in `Add.lean`)
- sum of finitely many functions (in `Add.lean`)
- negation (in `Add.lean`)
- subtraction (in `Add.lean`)
- star (in `Star.lean`)
- multiplication of two functions in `ð â ð` (in `Mul.lean`)
- multiplication of a function in `ð â ð` and of a function in `ð â E` (in `Mul.lean`)
- powers of a function (in `Pow.lean` and `ZPow.lean`)
- inverse `x â xâ»Â¹` (in `Inv.lean`)
- division (in `Inv.lean`)
- composition of a function in `ð â F` with a function in `ð â ð` (in `Comp.lean`)
- composition of a function in `F â E` with a function in `ð â F` (in `Comp.lean`)
- inverse function (assuming that it exists; the inverse function theorem is in `Inverse.lean`)
- polynomials (in `Polynomial.lean`)
For most binary operations we also define `const_op` and `op_const` theorems for the cases when
the first or second argument is a constant. This makes writing chains of `HasDerivAt`'s easier,
and they more frequently lead to the desired result.
We set up the simplifier so that it can compute the derivative of simple functions. For instance,
```lean
example (x : â) :
deriv (fun x ⊠cos (sin x) * exp x) x = (cos(sin(x))-sin(sin(x))*cos(x))*exp(x) := by
simp; ring
```
The relationship between the derivative of a function and its definition from a standard
undergraduate course as the limit of the slope `(f y - f x) / (y - x)` as `y` tends to `ð[â ] x`
is developed in the file `Slope.lean`.
## Implementation notes
Most of the theorems are direct restatements of the corresponding theorems
for Fréchet derivatives.
The strategy to construct simp lemmas that give the simplifier the possibility to compute
derivatives is the same as the one for differentiability statements, as explained in
`FDeriv/Basic.lean`. See the explanations there.
-/
universe u v w
noncomputable section
open scoped Topology ENNReal NNReal
open Filter Asymptotics Set
open ContinuousLinearMap (smulRight smulRight_one_eq_iff)
variable {ð : Type u} [NontriviallyNormedField ð]
variable {F : Type v} [NormedAddCommGroup F] [NormedSpace ð F]
variable {E : Type w} [NormedAddCommGroup E] [NormedSpace ð E]
/-- `f` has the derivative `f'` at the point `x` as `x` goes along the filter `L`.
That is, `f x' = f x + (x' - x) ⢠f' + o(x' - x)` where `x'` converges along the filter `L`.
-/
def HasDerivAtFilter (f : ð â F) (f' : F) (x : ð) (L : Filter ð) :=
HasFDerivAtFilter f (smulRight (1 : ð âL[ð] ð) f') x L
/-- `f` has the derivative `f'` at the point `x` within the subset `s`.
That is, `f x' = f x + (x' - x) ⢠f' + o(x' - x)` where `x'` converges to `x` inside `s`.
-/
def HasDerivWithinAt (f : ð â F) (f' : F) (s : Set ð) (x : ð) :=
HasDerivAtFilter f f' x (ð[s] x)
/-- `f` has the derivative `f'` at the point `x`.
That is, `f x' = f x + (x' - x) ⢠f' + o(x' - x)` where `x'` converges to `x`.
-/
def HasDerivAt (f : ð â F) (f' : F) (x : ð) :=
HasDerivAtFilter f f' x (ð x)
/-- `f` has the derivative `f'` at the point `x` in the sense of strict differentiability.
That is, `f y - f z = (y - z) ⢠f' + o(y - z)` as `y, z â x`. -/
def HasStrictDerivAt (f : ð â F) (f' : F) (x : ð) :=
HasStrictFDerivAt f (smulRight (1 : ð âL[ð] ð) f') x
/-- Derivative of `f` at the point `x` within the set `s`, if it exists. Zero otherwise.
If the derivative exists (i.e., `â f', HasDerivWithinAt f f' s x`), then
`f x' = f x + (x' - x) ⢠derivWithin f s x + o(x' - x)` where `x'` converges to `x` inside `s`.
-/
def derivWithin (f : ð â F) (s : Set ð) (x : ð) :=
fderivWithin ð f s x 1
/-- Derivative of `f` at the point `x`, if it exists. Zero otherwise.
If the derivative exists (i.e., `â f', HasDerivAt f f' x`), then
`f x' = f x + (x' - x) ⢠deriv f x + o(x' - x)` where `x'` converges to `x`.
-/
def deriv (f : ð â F) (x : ð) :=
fderiv ð f x 1
variable {f fâ fâ g : ð â F}
variable {f' fâ' fâ' g' : F}
variable {x : ð}
variable {s t : Set ð}
variable {L Lâ Lâ : Filter ð}
/-- Expressing `HasFDerivAtFilter f f' x L` in terms of `HasDerivAtFilter` -/
theorem hasFDerivAtFilter_iff_hasDerivAtFilter {f' : ð âL[ð] F} :
HasFDerivAtFilter f f' x L â HasDerivAtFilter f (f' 1) x L := by simp [HasDerivAtFilter]
theorem HasFDerivAtFilter.hasDerivAtFilter {f' : ð âL[ð] F} :
HasFDerivAtFilter f f' x L â HasDerivAtFilter f (f' 1) x L :=
hasFDerivAtFilter_iff_hasDerivAtFilter.mp
/-- Expressing `HasFDerivWithinAt f f' s x` in terms of `HasDerivWithinAt` -/
theorem hasFDerivWithinAt_iff_hasDerivWithinAt {f' : ð âL[ð] F} :
HasFDerivWithinAt f f' s x â HasDerivWithinAt f (f' 1) s x :=
hasFDerivAtFilter_iff_hasDerivAtFilter
/-- Expressing `HasDerivWithinAt f f' s x` in terms of `HasFDerivWithinAt` -/
theorem hasDerivWithinAt_iff_hasFDerivWithinAt {f' : F} :
HasDerivWithinAt f f' s x â HasFDerivWithinAt f (smulRight (1 : ð âL[ð] ð) f') s x :=
Iff.rfl
theorem HasFDerivWithinAt.hasDerivWithinAt {f' : ð âL[ð] F} :
HasFDerivWithinAt f f' s x â HasDerivWithinAt f (f' 1) s x :=
hasFDerivWithinAt_iff_hasDerivWithinAt.mp
theorem HasDerivWithinAt.hasFDerivWithinAt {f' : F} :
HasDerivWithinAt f f' s x â HasFDerivWithinAt f (smulRight (1 : ð âL[ð] ð) f') s x :=
hasDerivWithinAt_iff_hasFDerivWithinAt.mp
/-- Expressing `HasFDerivAt f f' x` in terms of `HasDerivAt` -/
theorem hasFDerivAt_iff_hasDerivAt {f' : ð âL[ð] F} : HasFDerivAt f f' x â HasDerivAt f (f' 1) x :=
hasFDerivAtFilter_iff_hasDerivAtFilter
theorem HasFDerivAt.hasDerivAt {f' : ð âL[ð] F} : HasFDerivAt f f' x â HasDerivAt f (f' 1) x :=
hasFDerivAt_iff_hasDerivAt.mp
theorem hasStrictFDerivAt_iff_hasStrictDerivAt {f' : ð âL[ð] F} :
HasStrictFDerivAt f f' x â HasStrictDerivAt f (f' 1) x := by
simp [HasStrictDerivAt, HasStrictFDerivAt]
protected theorem HasStrictFDerivAt.hasStrictDerivAt {f' : ð âL[ð] F} :
HasStrictFDerivAt f f' x â HasStrictDerivAt f (f' 1) x :=
hasStrictFDerivAt_iff_hasStrictDerivAt.mp
theorem hasStrictDerivAt_iff_hasStrictFDerivAt :
HasStrictDerivAt f f' x â HasStrictFDerivAt f (smulRight (1 : ð âL[ð] ð) f') x :=
Iff.rfl
alias âšHasStrictDerivAt.hasStrictFDerivAt, _â© := hasStrictDerivAt_iff_hasStrictFDerivAt
/-- Expressing `HasDerivAt f f' x` in terms of `HasFDerivAt` -/
theorem hasDerivAt_iff_hasFDerivAt {f' : F} :
HasDerivAt f f' x â HasFDerivAt f (smulRight (1 : ð âL[ð] ð) f') x :=
Iff.rfl
alias âšHasDerivAt.hasFDerivAt, _â© := hasDerivAt_iff_hasFDerivAt
theorem derivWithin_zero_of_not_differentiableWithinAt (h : ¬DifferentiableWithinAt ð f s x) :
derivWithin f s x = 0 := by
unfold derivWithin
rw [fderivWithin_zero_of_not_differentiableWithinAt h]
simp
theorem derivWithin_zero_of_isolated (h : ð[s \ {x}] x = â¥) : derivWithin f s x = 0 := by
rw [derivWithin, fderivWithin_zero_of_isolated h, ContinuousLinearMap.zero_apply]
theorem derivWithin_zero_of_nmem_closure (h : x â closure s) : derivWithin f s x = 0 := by
rw [derivWithin, fderivWithin_zero_of_nmem_closure h, ContinuousLinearMap.zero_apply]
theorem differentiableWithinAt_of_derivWithin_ne_zero (h : derivWithin f s x â 0) :
DifferentiableWithinAt ð f s x :=
not_imp_comm.1 derivWithin_zero_of_not_differentiableWithinAt h
theorem deriv_zero_of_not_differentiableAt (h : ¬DifferentiableAt ð f x) : deriv f x = 0 := by
unfold deriv
rw [fderiv_zero_of_not_differentiableAt h]
simp
theorem differentiableAt_of_deriv_ne_zero (h : deriv f x â 0) : DifferentiableAt ð f x :=
not_imp_comm.1 deriv_zero_of_not_differentiableAt h
theorem UniqueDiffWithinAt.eq_deriv (s : Set ð) (H : UniqueDiffWithinAt ð s x)
(h : HasDerivWithinAt f f' s x) (hâ : HasDerivWithinAt f fâ' s x) : f' = fâ' :=
smulRight_one_eq_iff.mp <| UniqueDiffWithinAt.eq H h hâ
theorem hasDerivAtFilter_iff_isLittleO :
HasDerivAtFilter f f' x L â (fun x' : ð => f x' - f x - (x' - x) ⢠f') =o[L] fun x' => x' - x :=
hasFDerivAtFilter_iff_isLittleO ..
theorem hasDerivAtFilter_iff_tendsto :
HasDerivAtFilter f f' x L â
Tendsto (fun x' : ð => âx' - xââ»Â¹ * âf x' - f x - (x' - x) ⢠f'â) L (ð 0) :=
hasFDerivAtFilter_iff_tendsto
theorem hasDerivWithinAt_iff_isLittleO :
HasDerivWithinAt f f' s x â
(fun x' : ð => f x' - f x - (x' - x) ⢠f') =o[ð[s] x] fun x' => x' - x :=
hasFDerivAtFilter_iff_isLittleO ..
theorem hasDerivWithinAt_iff_tendsto :
HasDerivWithinAt f f' s x â
Tendsto (fun x' => âx' - xââ»Â¹ * âf x' - f x - (x' - x) ⢠f'â) (ð[s] x) (ð 0) :=
hasFDerivAtFilter_iff_tendsto
theorem hasDerivAt_iff_isLittleO :
HasDerivAt f f' x â (fun x' : ð => f x' - f x - (x' - x) ⢠f') =o[ð x] fun x' => x' - x :=
hasFDerivAtFilter_iff_isLittleO ..
theorem hasDerivAt_iff_tendsto :
HasDerivAt f f' x â Tendsto (fun x' => âx' - xââ»Â¹ * âf x' - f x - (x' - x) ⢠f'â) (ð x) (ð 0) :=
hasFDerivAtFilter_iff_tendsto
theorem HasDerivAtFilter.isBigO_sub (h : HasDerivAtFilter f f' x L) :
(fun x' => f x' - f x) =O[L] fun x' => x' - x :=
HasFDerivAtFilter.isBigO_sub h
nonrec theorem HasDerivAtFilter.isBigO_sub_rev (hf : HasDerivAtFilter f f' x L) (hf' : f' â 0) :
(fun x' => x' - x) =O[L] fun x' => f x' - f x :=
suffices AntilipschitzWith âf'âââ»Â¹ (smulRight (1 : ð âL[ð] ð) f') from hf.isBigO_sub_rev this
AddMonoidHomClass.antilipschitz_of_bound (smulRight (1 : ð âL[ð] ð) f') fun x => by
simp [norm_smul, â div_eq_inv_mul, mul_div_cancel_rightâ _ (mt norm_eq_zero.1 hf')]
theorem HasStrictDerivAt.hasDerivAt (h : HasStrictDerivAt f f' x) : HasDerivAt f f' x :=
h.hasFDerivAt
theorem hasDerivWithinAt_congr_set' {s t : Set ð} (y : ð) (h : s =á¶ [ð[{y}á¶] x] t) :
HasDerivWithinAt f f' s x â HasDerivWithinAt f f' t x :=
hasFDerivWithinAt_congr_set' y h
theorem hasDerivWithinAt_congr_set {s t : Set ð} (h : s =á¶ [ð x] t) :
HasDerivWithinAt f f' s x â HasDerivWithinAt f f' t x :=
hasFDerivWithinAt_congr_set h
alias âšHasDerivWithinAt.congr_set, _â© := hasDerivWithinAt_congr_set
@[simp]
theorem hasDerivWithinAt_diff_singleton :
HasDerivWithinAt f f' (s \ {x}) x â HasDerivWithinAt f f' s x :=
hasFDerivWithinAt_diff_singleton _
@[simp]
theorem hasDerivWithinAt_Ioi_iff_Ici [PartialOrder ð] :
HasDerivWithinAt f f' (Ioi x) x â HasDerivWithinAt f f' (Ici x) x := by
rw [â Ici_diff_left, hasDerivWithinAt_diff_singleton]
alias âšHasDerivWithinAt.Ici_of_Ioi, HasDerivWithinAt.Ioi_of_Iciâ© := hasDerivWithinAt_Ioi_iff_Ici
@[simp]
theorem hasDerivWithinAt_Iio_iff_Iic [PartialOrder ð] :
HasDerivWithinAt f f' (Iio x) x â HasDerivWithinAt f f' (Iic x) x := by
rw [â Iic_diff_right, hasDerivWithinAt_diff_singleton]
alias âšHasDerivWithinAt.Iic_of_Iio, HasDerivWithinAt.Iio_of_Iicâ© := hasDerivWithinAt_Iio_iff_Iic
theorem HasDerivWithinAt.Ioi_iff_Ioo [LinearOrder ð] [OrderClosedTopology ð] {x y : ð} (h : x < y) :
HasDerivWithinAt f f' (Ioo x y) x â HasDerivWithinAt f f' (Ioi x) x :=
hasFDerivWithinAt_inter <| Iio_mem_nhds h
alias âšHasDerivWithinAt.Ioi_of_Ioo, HasDerivWithinAt.Ioo_of_Ioiâ© := HasDerivWithinAt.Ioi_iff_Ioo
theorem hasDerivAt_iff_isLittleO_nhds_zero :
HasDerivAt f f' x â (fun h => f (x + h) - f x - h ⢠f') =o[ð 0] fun h => h :=
hasFDerivAt_iff_isLittleO_nhds_zero
theorem HasDerivAtFilter.mono (h : HasDerivAtFilter f f' x Lâ) (hst : Lâ †Lâ) :
HasDerivAtFilter f f' x Lâ :=
HasFDerivAtFilter.mono h hst
theorem HasDerivWithinAt.mono (h : HasDerivWithinAt f f' t x) (hst : s â t) :
HasDerivWithinAt f f' s x :=
HasFDerivWithinAt.mono h hst
theorem HasDerivWithinAt.mono_of_mem (h : HasDerivWithinAt f f' t x) (hst : t â ð[s] x) :
HasDerivWithinAt f f' s x :=
HasFDerivWithinAt.mono_of_mem h hst
theorem HasDerivAt.hasDerivAtFilter (h : HasDerivAt f f' x) (hL : L †ð x) :
HasDerivAtFilter f f' x L :=
HasFDerivAt.hasFDerivAtFilter h hL
theorem HasDerivAt.hasDerivWithinAt (h : HasDerivAt f f' x) : HasDerivWithinAt f f' s x :=
HasFDerivAt.hasFDerivWithinAt h
theorem HasDerivWithinAt.differentiableWithinAt (h : HasDerivWithinAt f f' s x) :
DifferentiableWithinAt ð f s x :=
HasFDerivWithinAt.differentiableWithinAt h
theorem HasDerivAt.differentiableAt (h : HasDerivAt f f' x) : DifferentiableAt ð f x :=
HasFDerivAt.differentiableAt h
@[simp]
theorem hasDerivWithinAt_univ : HasDerivWithinAt f f' univ x â HasDerivAt f f' x :=
hasFDerivWithinAt_univ
theorem HasDerivAt.unique (hâ : HasDerivAt f fâ' x) (hâ : HasDerivAt f fâ' x) : fâ' = fâ' :=
smulRight_one_eq_iff.mp <| hâ.hasFDerivAt.unique hâ
theorem hasDerivWithinAt_inter' (h : t â ð[s] x) :
HasDerivWithinAt f f' (s â© t) x â HasDerivWithinAt f f' s x :=
hasFDerivWithinAt_inter' h
theorem hasDerivWithinAt_inter (h : t â ð x) :
HasDerivWithinAt f f' (s â© t) x â HasDerivWithinAt f f' s x :=
hasFDerivWithinAt_inter h
theorem HasDerivWithinAt.union (hs : HasDerivWithinAt f f' s x) (ht : HasDerivWithinAt f f' t x) :
HasDerivWithinAt f f' (s ⪠t) x :=
hs.hasFDerivWithinAt.union ht.hasFDerivWithinAt
theorem HasDerivWithinAt.hasDerivAt (h : HasDerivWithinAt f f' s x) (hs : s â ð x) :
HasDerivAt f f' x :=
HasFDerivWithinAt.hasFDerivAt h hs
theorem DifferentiableWithinAt.hasDerivWithinAt (h : DifferentiableWithinAt ð f s x) :
HasDerivWithinAt f (derivWithin f s x) s x :=
h.hasFDerivWithinAt.hasDerivWithinAt
theorem DifferentiableAt.hasDerivAt (h : DifferentiableAt ð f x) : HasDerivAt f (deriv f x) x :=
h.hasFDerivAt.hasDerivAt
@[simp]
theorem hasDerivAt_deriv_iff : HasDerivAt f (deriv f x) x â DifferentiableAt ð f x :=
âšfun h => h.differentiableAt, fun h => h.hasDerivAtâ©
@[simp]
theorem hasDerivWithinAt_derivWithin_iff :
HasDerivWithinAt f (derivWithin f s x) s x â DifferentiableWithinAt ð f s x :=
âšfun h => h.differentiableWithinAt, fun h => h.hasDerivWithinAtâ©
theorem DifferentiableOn.hasDerivAt (h : DifferentiableOn ð f s) (hs : s â ð x) :
HasDerivAt f (deriv f x) x :=
(h.hasFDerivAt hs).hasDerivAt
theorem HasDerivAt.deriv (h : HasDerivAt f f' x) : deriv f x = f' :=
h.differentiableAt.hasDerivAt.unique h
theorem deriv_eq {f' : ð â F} (h : â x, HasDerivAt f (f' x) x) : deriv f = f' :=
funext fun x => (h x).deriv
theorem HasDerivWithinAt.derivWithin (h : HasDerivWithinAt f f' s x)
(hxs : UniqueDiffWithinAt ð s x) : derivWithin f s x = f' :=
hxs.eq_deriv _ h.differentiableWithinAt.hasDerivWithinAt h
theorem fderivWithin_derivWithin : (fderivWithin ð f s x : ð â F) 1 = derivWithin f s x :=
rfl
theorem derivWithin_fderivWithin :
smulRight (1 : ð âL[ð] ð) (derivWithin f s x) = fderivWithin ð f s x := by simp [derivWithin]
theorem norm_derivWithin_eq_norm_fderivWithin : âderivWithin f s xâ = âfderivWithin ð f s xâ := by
simp [â derivWithin_fderivWithin]
theorem fderiv_deriv : (fderiv ð f x : ð â F) 1 = deriv f x :=
rfl
@[simp]
theorem fderiv_eq_smul_deriv (y : ð) : (fderiv ð f x : ð â F) y = y ⢠deriv f x := by
rw [â fderiv_deriv, â ContinuousLinearMap.map_smul]
simp only [smul_eq_mul, mul_one]
theorem deriv_fderiv : smulRight (1 : ð âL[ð] ð) (deriv f x) = fderiv ð f x := by
simp only [deriv, ContinuousLinearMap.smulRight_one_one]
lemma fderiv_eq_deriv_mul {f : ð â ð} {x y : ð} : (fderiv ð f x : ð â ð) y = (deriv f x) * y := by
simp [mul_comm]
theorem norm_deriv_eq_norm_fderiv : âderiv f xâ = âfderiv ð f xâ := by
simp [â deriv_fderiv]
theorem DifferentiableAt.derivWithin (h : DifferentiableAt ð f x) (hxs : UniqueDiffWithinAt ð s x) :
derivWithin f s x = deriv f x := by
unfold derivWithin deriv
rw [h.fderivWithin hxs]
theorem HasDerivWithinAt.deriv_eq_zero (hd : HasDerivWithinAt f 0 s x)
(H : UniqueDiffWithinAt ð s x) : deriv f x = 0 :=
(em' (DifferentiableAt ð f x)).elim deriv_zero_of_not_differentiableAt fun h =>
H.eq_deriv _ h.hasDerivAt.hasDerivWithinAt hd
theorem derivWithin_of_mem (st : t â ð[s] x) (ht : UniqueDiffWithinAt ð s x)
(h : DifferentiableWithinAt ð f t x) : derivWithin f s x = derivWithin f t x :=
((DifferentiableWithinAt.hasDerivWithinAt h).mono_of_mem st).derivWithin ht
theorem derivWithin_subset (st : s â t) (ht : UniqueDiffWithinAt ð s x)
(h : DifferentiableWithinAt ð f t x) : derivWithin f s x = derivWithin f t x :=
((DifferentiableWithinAt.hasDerivWithinAt h).mono st).derivWithin ht
theorem derivWithin_congr_set' (y : ð) (h : s =á¶ [ð[{y}á¶] x] t) :
derivWithin f s x = derivWithin f t x := by simp only [derivWithin, fderivWithin_congr_set' y h]
theorem derivWithin_congr_set (h : s =á¶ [ð x] t) : derivWithin f s x = derivWithin f t x := by
simp only [derivWithin, fderivWithin_congr_set h]
@[simp]
theorem derivWithin_univ : derivWithin f univ = deriv f := by
ext
unfold derivWithin deriv
rw [fderivWithin_univ]
theorem derivWithin_inter (ht : t â ð x) : derivWithin f (s â© t) x = derivWithin f s x := by
unfold derivWithin
rw [fderivWithin_inter ht]
theorem derivWithin_of_mem_nhds (h : s â ð x) : derivWithin f s x = deriv f x := by
simp only [derivWithin, deriv, fderivWithin_of_mem_nhds h]
theorem derivWithin_of_isOpen (hs : IsOpen s) (hx : x â s) : derivWithin f s x = deriv f x :=
derivWithin_of_mem_nhds (hs.mem_nhds hx)
lemma deriv_eqOn {f' : ð â F} (hs : IsOpen s) (hf' : â x â s, HasDerivWithinAt f (f' x) s x) :
s.EqOn (deriv f) f' := fun x hx ⊠by
rw [â derivWithin_of_isOpen hs hx, (hf' _ hx).derivWithin <| hs.uniqueDiffWithinAt hx]
theorem deriv_mem_iff {f : ð â F} {s : Set F} {x : ð} :
deriv f x â s â
DifferentiableAt ð f x â§ deriv f x â s ⚠¬DifferentiableAt ð f x â§ (0 : F) â s := by
by_cases hx : DifferentiableAt ð f x <;> simp [deriv_zero_of_not_differentiableAt, *]
theorem derivWithin_mem_iff {f : ð â F} {t : Set ð} {s : Set F} {x : ð} :
derivWithin f t x â s â
DifferentiableWithinAt ð f t x â§ derivWithin f t x â s âš
¬DifferentiableWithinAt ð f t x â§ (0 : F) â s := by
by_cases hx : DifferentiableWithinAt ð f t x <;>
simp [derivWithin_zero_of_not_differentiableWithinAt, *]
theorem differentiableWithinAt_Ioi_iff_Ici [PartialOrder ð] :
DifferentiableWithinAt ð f (Ioi x) x â DifferentiableWithinAt ð f (Ici x) x :=
âšfun h => h.hasDerivWithinAt.Ici_of_Ioi.differentiableWithinAt, fun h =>
h.hasDerivWithinAt.Ioi_of_Ici.differentiableWithinAtâ©
-- Golfed while splitting the file
theorem derivWithin_Ioi_eq_Ici {E : Type*} [NormedAddCommGroup E] [NormedSpace â E] (f : â â E)
(x : â) : derivWithin f (Ioi x) x = derivWithin f (Ici x) x := by
by_cases H : DifferentiableWithinAt â f (Ioi x) x
· have A := H.hasDerivWithinAt.Ici_of_Ioi
have B := (differentiableWithinAt_Ioi_iff_Ici.1 H).hasDerivWithinAt
simpa using (uniqueDiffOn_Ici x).eq left_mem_Ici A B
· rw [derivWithin_zero_of_not_differentiableWithinAt H,
derivWithin_zero_of_not_differentiableWithinAt]
rwa [differentiableWithinAt_Ioi_iff_Ici] at H
section congr
/-! ### Congruence properties of derivatives -/
theorem Filter.EventuallyEq.hasDerivAtFilter_iff (hâ : fâ =á¶ [L] fâ) (hx : fâ x = fâ x)
(hâ : fâ' = fâ') : HasDerivAtFilter fâ fâ' x L â HasDerivAtFilter fâ fâ' x L :=
hâ.hasFDerivAtFilter_iff hx (by simp [hâ])
theorem HasDerivAtFilter.congr_of_eventuallyEq (h : HasDerivAtFilter f f' x L) (hL : fâ =á¶ [L] f)
(hx : fâ x = f x) : HasDerivAtFilter fâ f' x L := by rwa [hL.hasDerivAtFilter_iff hx rfl]
theorem HasDerivWithinAt.congr_mono (h : HasDerivWithinAt f f' s x) (ht : â x â t, fâ x = f x)
(hx : fâ x = f x) (hâ : t â s) : HasDerivWithinAt fâ f' t x :=
HasFDerivWithinAt.congr_mono h ht hx hâ
theorem HasDerivWithinAt.congr (h : HasDerivWithinAt f f' s x) (hs : â x â s, fâ x = f x)
(hx : fâ x = f x) : HasDerivWithinAt fâ f' s x :=
h.congr_mono hs hx (Subset.refl _)
theorem HasDerivWithinAt.congr_of_mem (h : HasDerivWithinAt f f' s x) (hs : â x â s, fâ x = f x)
(hx : x â s) : HasDerivWithinAt fâ f' s x :=
h.congr hs (hs _ hx)
theorem HasDerivWithinAt.congr_of_eventuallyEq (h : HasDerivWithinAt f f' s x)
(hâ : fâ =á¶ [ð[s] x] f) (hx : fâ x = f x) : HasDerivWithinAt fâ f' s x :=
HasDerivAtFilter.congr_of_eventuallyEq h hâ hx
theorem Filter.EventuallyEq.hasDerivWithinAt_iff (hâ : fâ =á¶ [ð[s] x] f) (hx : fâ x = f x) :
HasDerivWithinAt fâ f' s x â HasDerivWithinAt f f' s x :=
âšfun h' ⊠h'.congr_of_eventuallyEq hâ.symm hx.symm, fun h' ⊠h'.congr_of_eventuallyEq hâ hxâ©
theorem HasDerivWithinAt.congr_of_eventuallyEq_of_mem (h : HasDerivWithinAt f f' s x)
(hâ : fâ =á¶ [ð[s] x] f) (hx : x â s) : HasDerivWithinAt fâ f' s x :=
h.congr_of_eventuallyEq hâ (hâ.eq_of_nhdsWithin hx)
theorem Filter.EventuallyEq.hasDerivWithinAt_iff_of_mem (hâ : fâ =á¶ [ð[s] x] f) (hx : x â s) :
HasDerivWithinAt fâ f' s x â HasDerivWithinAt f f' s x :=
âšfun h' ⊠h'.congr_of_eventuallyEq_of_mem hâ.symm hx,
fun h' ⊠h'.congr_of_eventuallyEq_of_mem hâ hxâ©
theorem HasStrictDerivAt.congr_deriv (h : HasStrictDerivAt f f' x) (h' : f' = g') :
HasStrictDerivAt f g' x :=
h.congr_fderiv <| congr_arg _ h'
theorem HasDerivAt.congr_deriv (h : HasDerivAt f f' x) (h' : f' = g') : HasDerivAt f g' x :=
HasFDerivAt.congr_fderiv h <| congr_arg _ h'
theorem HasDerivWithinAt.congr_deriv (h : HasDerivWithinAt f f' s x) (h' : f' = g') :
HasDerivWithinAt f g' s x :=
HasFDerivWithinAt.congr_fderiv h <| congr_arg _ h'
theorem HasDerivAt.congr_of_eventuallyEq (h : HasDerivAt f f' x) (hâ : fâ =á¶ [ð x] f) :
HasDerivAt fâ f' x :=
HasDerivAtFilter.congr_of_eventuallyEq h hâ (mem_of_mem_nhds hâ : _)
theorem Filter.EventuallyEq.hasDerivAt_iff (h : fâ =á¶ [ð x] fâ) :
HasDerivAt fâ f' x â HasDerivAt fâ f' x :=
âšfun h' ⊠h'.congr_of_eventuallyEq h.symm, fun h' ⊠h'.congr_of_eventuallyEq hâ©
theorem Filter.EventuallyEq.derivWithin_eq (hs : fâ =á¶ [ð[s] x] f) (hx : fâ x = f x) :
derivWithin fâ s x = derivWithin f s x := by
unfold derivWithin
rw [hs.fderivWithin_eq hx]
theorem derivWithin_congr (hs : EqOn fâ f s) (hx : fâ x = f x) :
derivWithin fâ s x = derivWithin f s x := by
unfold derivWithin
rw [fderivWithin_congr hs hx]
theorem Filter.EventuallyEq.deriv_eq (hL : fâ =á¶ [ð x] f) : deriv fâ x = deriv f x := by
unfold deriv
rwa [Filter.EventuallyEq.fderiv_eq]
protected theorem Filter.EventuallyEq.deriv (h : fâ =á¶ [ð x] f) : deriv fâ =á¶ [ð x] deriv f :=
h.eventuallyEq_nhds.mono fun _ h => h.deriv_eq
end congr
section id
/-! ### Derivative of the identity -/
variable (s x L)
theorem hasDerivAtFilter_id : HasDerivAtFilter id 1 x L :=
(hasFDerivAtFilter_id x L).hasDerivAtFilter
theorem hasDerivWithinAt_id : HasDerivWithinAt id 1 s x :=
hasDerivAtFilter_id _ _
theorem hasDerivAt_id : HasDerivAt id 1 x :=
hasDerivAtFilter_id _ _
theorem hasDerivAt_id' : HasDerivAt (fun x : ð => x) 1 x :=
hasDerivAtFilter_id _ _
theorem hasStrictDerivAt_id : HasStrictDerivAt id 1 x :=
(hasStrictFDerivAt_id x).hasStrictDerivAt
theorem deriv_id : deriv id x = 1 :=
HasDerivAt.deriv (hasDerivAt_id x)
@[simp]
theorem deriv_id' : deriv (@id ð) = fun _ => 1 :=
funext deriv_id
@[simp]
theorem deriv_id'' : (deriv fun x : ð => x) = fun _ => 1 :=
deriv_id'
theorem derivWithin_id (hxs : UniqueDiffWithinAt ð s x) : derivWithin id s x = 1 :=
(hasDerivWithinAt_id x s).derivWithin hxs
end id
section Const
/-! ### Derivative of constant functions -/
variable (c : F) (s x L)
theorem hasDerivAtFilter_const : HasDerivAtFilter (fun _ => c) 0 x L :=
(hasFDerivAtFilter_const c x L).hasDerivAtFilter
theorem hasStrictDerivAt_const : HasStrictDerivAt (fun _ => c) 0 x :=
(hasStrictFDerivAt_const c x).hasStrictDerivAt
theorem hasDerivWithinAt_const : HasDerivWithinAt (fun _ => c) 0 s x :=
hasDerivAtFilter_const _ _ _
theorem hasDerivAt_const : HasDerivAt (fun _ => c) 0 x :=
hasDerivAtFilter_const _ _ _
theorem deriv_const : deriv (fun _ => c) x = 0 :=
HasDerivAt.deriv (hasDerivAt_const x c)
@[simp]
theorem deriv_const' : (deriv fun _ : ð => c) = fun _ => 0 :=
funext fun x => deriv_const x c
theorem derivWithin_const (hxs : UniqueDiffWithinAt ð s x) : derivWithin (fun _ => c) s x = 0 :=
(hasDerivWithinAt_const _ _ _).derivWithin hxs
end Const
section Continuous
/-! ### Continuity of a function admitting a derivative -/
nonrec theorem HasDerivAtFilter.tendsto_nhds (hL : L †ð x) (h : HasDerivAtFilter f f' x L) :
Tendsto f L (ð (f x)) :=
h.tendsto_nhds hL
theorem HasDerivWithinAt.continuousWithinAt (h : HasDerivWithinAt f f' s x) :
ContinuousWithinAt f s x :=
HasDerivAtFilter.tendsto_nhds inf_le_left h
theorem HasDerivAt.continuousAt (h : HasDerivAt f f' x) : ContinuousAt f x :=
HasDerivAtFilter.tendsto_nhds le_rfl h
protected theorem HasDerivAt.continuousOn {f f' : ð â F} (hderiv : â x â s, HasDerivAt f (f' x) x) :
ContinuousOn f s := fun x hx => (hderiv x hx).continuousAt.continuousWithinAt
end Continuous
/-- Converse to the mean value inequality: if `f` is differentiable at `xâ` and `C`-lipschitz
on a neighborhood of `xâ` then its derivative at `xâ` has norm bounded by `C`. This version
only assumes that `âf x - f xââ †C * âx - xââ` in a neighborhood of `x`. -/
theorem HasDerivAt.le_of_lip' {f : ð â F} {f' : F} {xâ : ð} (hf : HasDerivAt f f' xâ)
{C : â} (hCâ : 0 †C) (hlip : âá¶ x in ð xâ, âf x - f xââ †C * âx - xââ) :
âf'â †C := by
simpa using HasFDerivAt.le_of_lip' hf.hasFDerivAt hCâ hlip
/-- Converse to the mean value inequality: if `f` is differentiable at `xâ` and `C`-lipschitz
on a neighborhood of `xâ` then its derivative at `xâ` has norm bounded by `C`. -/
theorem HasDerivAt.le_of_lipschitzOn {f : ð â F} {f' : F} {xâ : ð} (hf : HasDerivAt f f' xâ)
{s : Set ð} (hs : s â ð xâ) {C : ââ¥0} (hlip : LipschitzOnWith C f s) : âf'â †C := by
simpa using HasFDerivAt.le_of_lipschitzOn hf.hasFDerivAt hs hlip
/-- Converse to the mean value inequality: if `f` is differentiable at `xâ` and `C`-lipschitz
then its derivative at `xâ` has norm bounded by `C`. -/
theorem HasDerivAt.le_of_lipschitz {f : ð â F} {f' : F} {xâ : ð} (hf : HasDerivAt f f' xâ)
{C : ââ¥0} (hlip : LipschitzWith C f) : âf'â †C := by
simpa using HasFDerivAt.le_of_lipschitz hf.hasFDerivAt hlip
/-- Converse to the mean value inequality: if `f` is `C`-lipschitz
on a neighborhood of `xâ` then its derivative at `xâ` has norm bounded by `C`. This version
only assumes that `âf x - f xââ †C * âx - xââ` in a neighborhood of `x`. -/
theorem norm_deriv_le_of_lip' {f : ð â F} {xâ : ð}
{C : â} (hCâ : 0 †C) (hlip : âá¶ x in ð xâ, âf x - f xââ †C * âx - xââ) :
âderiv f xââ †C := by
simpa [norm_deriv_eq_norm_fderiv] using norm_fderiv_le_of_lip' ð hCâ hlip
/-- Converse to the mean value inequality: if `f` is `C`-lipschitz
on a neighborhood of `xâ` then its derivative at `xâ` has norm bounded by `C`.
Version using `deriv`. -/
theorem norm_deriv_le_of_lipschitzOn {f : ð â F} {xâ : ð} {s : Set ð} (hs : s â ð xâ)
{C : ââ¥0} (hlip : LipschitzOnWith C f s) : âderiv f xââ †C := by
simpa [norm_deriv_eq_norm_fderiv] using norm_fderiv_le_of_lipschitzOn ð hs hlip
/-- Converse to the mean value inequality: if `f` is `C`-lipschitz then
its derivative at `xâ` has norm bounded by `C`.
Version using `deriv`. -/
theorem norm_deriv_le_of_lipschitz {f : ð â F} {xâ : ð}
{C : ââ¥0} (hlip : LipschitzWith C f) : âderiv f xââ †C := by
simpa [norm_deriv_eq_norm_fderiv] using norm_fderiv_le_of_lipschitz ð hlip
|
Analysis\Calculus\Deriv\Comp.lean | /-
Copyright (c) 2019 Gabriel Ebner. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Gabriel Ebner, Sébastien Gouëzel, Yury Kudryashov, Yuyang Zhao
-/
import Mathlib.Analysis.Calculus.Deriv.Basic
import Mathlib.Analysis.Calculus.FDeriv.Comp
import Mathlib.Analysis.Calculus.FDeriv.RestrictScalars
/-!
# One-dimensional derivatives of compositions of functions
In this file we prove the chain rule for the following cases:
* `HasDerivAt.comp` etc: `f : ð' â ð'` composed with `g : ð â ð'`;
* `HasDerivAt.scomp` etc: `f : ð' â E` composed with `g : ð â ð'`;
* `HasFDerivAt.comp_hasDerivAt` etc: `f : E â F` composed with `g : ð â E`;
Here `ð` is the base normed field, `E` and `F` are normed spaces over `ð` and `ð'` is an algebra
over `ð` (e.g., `ð'=ð` or `ð=â`, `ð'=â`).
We also give versions with the `of_eq` suffix, which require an equality proof instead
of definitional equality of the different points used in the composition. These versions are
often more flexible to use.
For a more detailed overview of one-dimensional derivatives in mathlib, see the module docstring of
`analysis/calculus/deriv/basic`.
## Keywords
derivative, chain rule
-/
universe u v w
open scoped Classical
open Topology Filter ENNReal
open Filter Asymptotics Set
open ContinuousLinearMap (smulRight smulRight_one_eq_iff)
variable {ð : Type u} [NontriviallyNormedField ð]
variable {F : Type v} [NormedAddCommGroup F] [NormedSpace ð F]
variable {E : Type w} [NormedAddCommGroup E] [NormedSpace ð E]
variable {f fâ fâ g : ð â F}
variable {f' fâ' fâ' g' : F}
variable {x : ð}
variable {s t : Set ð}
variable {L Lâ Lâ : Filter ð}
section Composition
/-!
### Derivative of the composition of a vector function and a scalar function
We use `scomp` in lemmas on composition of vector valued and scalar valued functions, and `comp`
in lemmas on composition of scalar valued functions, in analogy for `smul` and `mul` (and also
because the `comp` version with the shorter name will show up much more often in applications).
The formula for the derivative involves `smul` in `scomp` lemmas, which can be reduced to
usual multiplication in `comp` lemmas.
-/
/- For composition lemmas, we put x explicit to help the elaborator, as otherwise Lean tends to
get confused since there are too many possibilities for composition -/
variable {ð' : Type*} [NontriviallyNormedField ð'] [NormedAlgebra ð ð'] [NormedSpace ð' F]
[IsScalarTower ð ð' F] {s' t' : Set ð'} {h : ð â ð'} {hâ : ð â ð} {hâ : ð' â ð'} {h' hâ' : ð'}
{hâ' : ð} {gâ : ð' â F} {gâ' : F} {L' : Filter ð'} {y : ð'} (x)
theorem HasDerivAtFilter.scomp (hg : HasDerivAtFilter gâ gâ' (h x) L')
(hh : HasDerivAtFilter h h' x L) (hL : Tendsto h L L') :
HasDerivAtFilter (gâ â h) (h' ⢠gâ') x L := by
simpa using ((hg.restrictScalars ð).comp x hh hL).hasDerivAtFilter
theorem HasDerivAtFilter.scomp_of_eq (hg : HasDerivAtFilter gâ gâ' y L')
(hh : HasDerivAtFilter h h' x L) (hy : y = h x) (hL : Tendsto h L L') :
HasDerivAtFilter (gâ â h) (h' ⢠gâ') x L := by
rw [hy] at hg; exact hg.scomp x hh hL
theorem HasDerivWithinAt.scomp_hasDerivAt (hg : HasDerivWithinAt gâ gâ' s' (h x))
(hh : HasDerivAt h h' x) (hs : â x, h x â s') : HasDerivAt (gâ â h) (h' ⢠gâ') x :=
hg.scomp x hh <| tendsto_inf.2 âšhh.continuousAt, tendsto_principal.2 <| eventually_of_forall hsâ©
theorem HasDerivWithinAt.scomp_hasDerivAt_of_eq (hg : HasDerivWithinAt gâ gâ' s' y)
(hh : HasDerivAt h h' x) (hs : â x, h x â s') (hy : y = h x) :
HasDerivAt (gâ â h) (h' ⢠gâ') x := by
rw [hy] at hg; exact hg.scomp_hasDerivAt x hh hs
nonrec theorem HasDerivWithinAt.scomp (hg : HasDerivWithinAt gâ gâ' t' (h x))
(hh : HasDerivWithinAt h h' s x) (hst : MapsTo h s t') :
HasDerivWithinAt (gâ â h) (h' ⢠gâ') s x :=
hg.scomp x hh <| hh.continuousWithinAt.tendsto_nhdsWithin hst
theorem HasDerivWithinAt.scomp_of_eq (hg : HasDerivWithinAt gâ gâ' t' y)
(hh : HasDerivWithinAt h h' s x) (hst : MapsTo h s t') (hy : y = h x) :
HasDerivWithinAt (gâ â h) (h' ⢠gâ') s x := by
rw [hy] at hg; exact hg.scomp x hh hst
/-- The chain rule. -/
nonrec theorem HasDerivAt.scomp (hg : HasDerivAt gâ gâ' (h x)) (hh : HasDerivAt h h' x) :
HasDerivAt (gâ â h) (h' ⢠gâ') x :=
hg.scomp x hh hh.continuousAt
/-- The chain rule. -/
theorem HasDerivAt.scomp_of_eq
(hg : HasDerivAt gâ gâ' y) (hh : HasDerivAt h h' x) (hy : y = h x) :
HasDerivAt (gâ â h) (h' ⢠gâ') x := by
rw [hy] at hg; exact hg.scomp x hh
theorem HasStrictDerivAt.scomp (hg : HasStrictDerivAt gâ gâ' (h x)) (hh : HasStrictDerivAt h h' x) :
HasStrictDerivAt (gâ â h) (h' ⢠gâ') x := by
simpa using ((hg.restrictScalars ð).comp x hh).hasStrictDerivAt
theorem HasStrictDerivAt.scomp_of_eq
(hg : HasStrictDerivAt gâ gâ' y) (hh : HasStrictDerivAt h h' x) (hy : y = h x) :
HasStrictDerivAt (gâ â h) (h' ⢠gâ') x := by
rw [hy] at hg; exact hg.scomp x hh
theorem HasDerivAt.scomp_hasDerivWithinAt (hg : HasDerivAt gâ gâ' (h x))
(hh : HasDerivWithinAt h h' s x) : HasDerivWithinAt (gâ â h) (h' ⢠gâ') s x :=
HasDerivWithinAt.scomp x hg.hasDerivWithinAt hh (mapsTo_univ _ _)
theorem HasDerivAt.scomp_hasDerivWithinAt_of_eq (hg : HasDerivAt gâ gâ' y)
(hh : HasDerivWithinAt h h' s x) (hy : y = h x) :
HasDerivWithinAt (gâ â h) (h' ⢠gâ') s x := by
rw [hy] at hg; exact hg.scomp_hasDerivWithinAt x hh
theorem derivWithin.scomp (hg : DifferentiableWithinAt ð' gâ t' (h x))
(hh : DifferentiableWithinAt ð h s x) (hs : MapsTo h s t') (hxs : UniqueDiffWithinAt ð s x) :
derivWithin (gâ â h) s x = derivWithin h s x ⢠derivWithin gâ t' (h x) :=
(HasDerivWithinAt.scomp x hg.hasDerivWithinAt hh.hasDerivWithinAt hs).derivWithin hxs
theorem derivWithin.scomp_of_eq (hg : DifferentiableWithinAt ð' gâ t' y)
(hh : DifferentiableWithinAt ð h s x) (hs : MapsTo h s t') (hxs : UniqueDiffWithinAt ð s x)
(hy : y = h x) :
derivWithin (gâ â h) s x = derivWithin h s x ⢠derivWithin gâ t' (h x) := by
rw [hy] at hg; exact derivWithin.scomp x hg hh hs hxs
theorem deriv.scomp (hg : DifferentiableAt ð' gâ (h x)) (hh : DifferentiableAt ð h x) :
deriv (gâ â h) x = deriv h x ⢠deriv gâ (h x) :=
(HasDerivAt.scomp x hg.hasDerivAt hh.hasDerivAt).deriv
theorem deriv.scomp_of_eq
(hg : DifferentiableAt ð' gâ y) (hh : DifferentiableAt ð h x) (hy : y = h x) :
deriv (gâ â h) x = deriv h x ⢠deriv gâ (h x) := by
rw [hy] at hg; exact deriv.scomp x hg hh
/-! ### Derivative of the composition of a scalar and vector functions -/
theorem HasDerivAtFilter.comp_hasFDerivAtFilter {f : E â ð'} {f' : E âL[ð] ð'} (x) {L'' : Filter E}
(hhâ : HasDerivAtFilter hâ hâ' (f x) L') (hf : HasFDerivAtFilter f f' x L'')
(hL : Tendsto f L'' L') : HasFDerivAtFilter (hâ â f) (hâ' ⢠f') x L'' := by
convert (hhâ.restrictScalars ð).comp x hf hL
ext x
simp [mul_comm]
theorem HasDerivAtFilter.comp_hasFDerivAtFilter_of_eq
{f : E â ð'} {f' : E âL[ð] ð'} (x) {L'' : Filter E}
(hhâ : HasDerivAtFilter hâ hâ' y L') (hf : HasFDerivAtFilter f f' x L'')
(hL : Tendsto f L'' L') (hy : y = f x) : HasFDerivAtFilter (hâ â f) (hâ' ⢠f') x L'' := by
rw [hy] at hhâ; exact hhâ.comp_hasFDerivAtFilter x hf hL
theorem HasStrictDerivAt.comp_hasStrictFDerivAt {f : E â ð'} {f' : E âL[ð] ð'} (x)
(hh : HasStrictDerivAt hâ hâ' (f x)) (hf : HasStrictFDerivAt f f' x) :
HasStrictFDerivAt (hâ â f) (hâ' ⢠f') x := by
rw [HasStrictDerivAt] at hh
convert (hh.restrictScalars ð).comp x hf
ext x
simp [mul_comm]
theorem HasStrictDerivAt.comp_hasStrictFDerivAt_of_eq {f : E â ð'} {f' : E âL[ð] ð'} (x)
(hh : HasStrictDerivAt hâ hâ' y) (hf : HasStrictFDerivAt f f' x) (hy : y = f x) :
HasStrictFDerivAt (hâ â f) (hâ' ⢠f') x := by
rw [hy] at hh; exact hh.comp_hasStrictFDerivAt x hf
theorem HasDerivAt.comp_hasFDerivAt {f : E â ð'} {f' : E âL[ð] ð'} (x)
(hh : HasDerivAt hâ hâ' (f x)) (hf : HasFDerivAt f f' x) : HasFDerivAt (hâ â f) (hâ' ⢠f') x :=
hh.comp_hasFDerivAtFilter x hf hf.continuousAt
theorem HasDerivAt.comp_hasFDerivAt_of_eq {f : E â ð'} {f' : E âL[ð] ð'} (x)
(hh : HasDerivAt hâ hâ' y) (hf : HasFDerivAt f f' x) (hy : y = f x) :
HasFDerivAt (hâ â f) (hâ' ⢠f') x := by
rw [hy] at hh; exact hh.comp_hasFDerivAt x hf
theorem HasDerivAt.comp_hasFDerivWithinAt {f : E â ð'} {f' : E âL[ð] ð'} {s} (x)
(hh : HasDerivAt hâ hâ' (f x)) (hf : HasFDerivWithinAt f f' s x) :
HasFDerivWithinAt (hâ â f) (hâ' ⢠f') s x :=
hh.comp_hasFDerivAtFilter x hf hf.continuousWithinAt
theorem HasDerivAt.comp_hasFDerivWithinAt_of_eq {f : E â ð'} {f' : E âL[ð] ð'} {s} (x)
(hh : HasDerivAt hâ hâ' y) (hf : HasFDerivWithinAt f f' s x) (hy : y = f x) :
HasFDerivWithinAt (hâ â f) (hâ' ⢠f') s x := by
rw [hy] at hh; exact hh.comp_hasFDerivWithinAt x hf
theorem HasDerivWithinAt.comp_hasFDerivWithinAt {f : E â ð'} {f' : E âL[ð] ð'} {s t} (x)
(hh : HasDerivWithinAt hâ hâ' t (f x)) (hf : HasFDerivWithinAt f f' s x) (hst : MapsTo f s t) :
HasFDerivWithinAt (hâ â f) (hâ' ⢠f') s x :=
hh.comp_hasFDerivAtFilter x hf <| hf.continuousWithinAt.tendsto_nhdsWithin hst
theorem HasDerivWithinAt.comp_hasFDerivWithinAt_of_eq {f : E â ð'} {f' : E âL[ð] ð'} {s t} (x)
(hh : HasDerivWithinAt hâ hâ' t y) (hf : HasFDerivWithinAt f f' s x) (hst : MapsTo f s t)
(hy : y = f x) :
HasFDerivWithinAt (hâ â f) (hâ' ⢠f') s x := by
rw [hy] at hh; exact hh.comp_hasFDerivWithinAt x hf hst
/-! ### Derivative of the composition of two scalar functions -/
theorem HasDerivAtFilter.comp (hhâ : HasDerivAtFilter hâ hâ' (h x) L')
(hh : HasDerivAtFilter h h' x L) (hL : Tendsto h L L') :
HasDerivAtFilter (hâ â h) (hâ' * h') x L := by
rw [mul_comm]
exact hhâ.scomp x hh hL
theorem HasDerivAtFilter.comp_of_eq (hhâ : HasDerivAtFilter hâ hâ' y L')
(hh : HasDerivAtFilter h h' x L) (hL : Tendsto h L L') (hy : y = h x) :
HasDerivAtFilter (hâ â h) (hâ' * h') x L := by
rw [hy] at hhâ; exact hhâ.comp x hh hL
theorem HasDerivWithinAt.comp (hhâ : HasDerivWithinAt hâ hâ' s' (h x))
(hh : HasDerivWithinAt h h' s x) (hst : MapsTo h s s') :
HasDerivWithinAt (hâ â h) (hâ' * h') s x := by
rw [mul_comm]
exact hhâ.scomp x hh hst
theorem HasDerivWithinAt.comp_of_eq (hhâ : HasDerivWithinAt hâ hâ' s' y)
(hh : HasDerivWithinAt h h' s x) (hst : MapsTo h s s') (hy : y = h x) :
HasDerivWithinAt (hâ â h) (hâ' * h') s x := by
rw [hy] at hhâ; exact hhâ.comp x hh hst
/-- The chain rule.
Note that the function `hâ` is a function on an algebra. If you are looking for the chain rule
with `hâ` taking values in a vector space, use `HasDerivAt.scomp`. -/
nonrec theorem HasDerivAt.comp (hhâ : HasDerivAt hâ hâ' (h x)) (hh : HasDerivAt h h' x) :
HasDerivAt (hâ â h) (hâ' * h') x :=
hhâ.comp x hh hh.continuousAt
/-- The chain rule.
Note that the function `hâ` is a function on an algebra. If you are looking for the chain rule
with `hâ` taking values in a vector space, use `HasDerivAt.scomp_of_eq`. -/
theorem HasDerivAt.comp_of_eq
(hhâ : HasDerivAt hâ hâ' y) (hh : HasDerivAt h h' x) (hy : y = h x) :
HasDerivAt (hâ â h) (hâ' * h') x := by
rw [hy] at hhâ; exact hhâ.comp x hh
theorem HasStrictDerivAt.comp (hhâ : HasStrictDerivAt hâ hâ' (h x)) (hh : HasStrictDerivAt h h' x) :
HasStrictDerivAt (hâ â h) (hâ' * h') x := by
rw [mul_comm]
exact hhâ.scomp x hh
theorem HasStrictDerivAt.comp_of_eq
(hhâ : HasStrictDerivAt hâ hâ' y) (hh : HasStrictDerivAt h h' x) (hy : y = h x) :
HasStrictDerivAt (hâ â h) (hâ' * h') x := by
rw [hy] at hhâ; exact hhâ.comp x hh
theorem HasDerivAt.comp_hasDerivWithinAt (hhâ : HasDerivAt hâ hâ' (h x))
(hh : HasDerivWithinAt h h' s x) : HasDerivWithinAt (hâ â h) (hâ' * h') s x :=
hhâ.hasDerivWithinAt.comp x hh (mapsTo_univ _ _)
theorem HasDerivAt.comp_hasDerivWithinAt_of_eq (hhâ : HasDerivAt hâ hâ' y)
(hh : HasDerivWithinAt h h' s x) (hy : y = h x) :
HasDerivWithinAt (hâ â h) (hâ' * h') s x := by
rw [hy] at hhâ; exact hhâ.comp_hasDerivWithinAt x hh
theorem derivWithin.comp (hhâ : DifferentiableWithinAt ð' hâ s' (h x))
(hh : DifferentiableWithinAt ð h s x) (hs : MapsTo h s s') (hxs : UniqueDiffWithinAt ð s x) :
derivWithin (hâ â h) s x = derivWithin hâ s' (h x) * derivWithin h s x :=
(hhâ.hasDerivWithinAt.comp x hh.hasDerivWithinAt hs).derivWithin hxs
theorem derivWithin.comp_of_eq (hhâ : DifferentiableWithinAt ð' hâ s' y)
(hh : DifferentiableWithinAt ð h s x) (hs : MapsTo h s s') (hxs : UniqueDiffWithinAt ð s x)
(hy : y = h x) :
derivWithin (hâ â h) s x = derivWithin hâ s' (h x) * derivWithin h s x := by
rw [hy] at hhâ; exact derivWithin.comp x hhâ hh hs hxs
theorem deriv.comp (hhâ : DifferentiableAt ð' hâ (h x)) (hh : DifferentiableAt ð h x) :
deriv (hâ â h) x = deriv hâ (h x) * deriv h x :=
(hhâ.hasDerivAt.comp x hh.hasDerivAt).deriv
theorem deriv.comp_of_eq (hhâ : DifferentiableAt ð' hâ y) (hh : DifferentiableAt ð h x)
(hy : y = h x) :
deriv (hâ â h) x = deriv hâ (h x) * deriv h x := by
rw [hy] at hhâ; exact deriv.comp x hhâ hh
protected nonrec theorem HasDerivAtFilter.iterate {f : ð â ð} {f' : ð}
(hf : HasDerivAtFilter f f' x L) (hL : Tendsto f L L) (hx : f x = x) (n : â) :
HasDerivAtFilter f^[n] (f' ^ n) x L := by
have := hf.iterate hL hx n
rwa [ContinuousLinearMap.smulRight_one_pow] at this
protected nonrec theorem HasDerivAt.iterate {f : ð â ð} {f' : ð} (hf : HasDerivAt f f' x)
(hx : f x = x) (n : â) : HasDerivAt f^[n] (f' ^ n) x :=
hf.iterate _ (have := hf.tendsto_nhds le_rfl; by rwa [hx] at this) hx n
protected theorem HasDerivWithinAt.iterate {f : ð â ð} {f' : ð} (hf : HasDerivWithinAt f f' s x)
(hx : f x = x) (hs : MapsTo f s s) (n : â) : HasDerivWithinAt f^[n] (f' ^ n) s x := by
have := HasFDerivWithinAt.iterate hf hx hs n
rwa [ContinuousLinearMap.smulRight_one_pow] at this
protected nonrec theorem HasStrictDerivAt.iterate {f : ð â ð} {f' : ð}
(hf : HasStrictDerivAt f f' x) (hx : f x = x) (n : â) :
HasStrictDerivAt f^[n] (f' ^ n) x := by
have := hf.iterate hx n
rwa [ContinuousLinearMap.smulRight_one_pow] at this
end Composition
section CompositionVector
/-! ### Derivative of the composition of a function between vector spaces and a function on `ð` -/
open ContinuousLinearMap
variable {l : F â E} {l' : F âL[ð] E} {y : F}
variable (x)
/-- The composition `l â f` where `l : F â E` and `f : ð â F`, has a derivative within a set
equal to the Fréchet derivative of `l` applied to the derivative of `f`. -/
theorem HasFDerivWithinAt.comp_hasDerivWithinAt {t : Set F} (hl : HasFDerivWithinAt l l' t (f x))
(hf : HasDerivWithinAt f f' s x) (hst : MapsTo f s t) :
HasDerivWithinAt (l â f) (l' f') s x := by
simpa only [one_apply, one_smul, smulRight_apply, coe_comp', (· â ·)] using
(hl.comp x hf.hasFDerivWithinAt hst).hasDerivWithinAt
/-- The composition `l â f` where `l : F â E` and `f : ð â F`, has a derivative within a set
equal to the Fréchet derivative of `l` applied to the derivative of `f`. -/
theorem HasFDerivWithinAt.comp_hasDerivWithinAt_of_eq {t : Set F}
(hl : HasFDerivWithinAt l l' t y)
(hf : HasDerivWithinAt f f' s x) (hst : MapsTo f s t) (hy : y = f x) :
HasDerivWithinAt (l â f) (l' f') s x := by
rw [hy] at hl; exact hl.comp_hasDerivWithinAt x hf hst
theorem HasFDerivAt.comp_hasDerivWithinAt (hl : HasFDerivAt l l' (f x))
(hf : HasDerivWithinAt f f' s x) : HasDerivWithinAt (l â f) (l' f') s x :=
hl.hasFDerivWithinAt.comp_hasDerivWithinAt x hf (mapsTo_univ _ _)
theorem HasFDerivAt.comp_hasDerivWithinAt_of_eq (hl : HasFDerivAt l l' y)
(hf : HasDerivWithinAt f f' s x) (hy : y = f x) :
HasDerivWithinAt (l â f) (l' f') s x := by
rw [hy] at hl; exact hl.comp_hasDerivWithinAt x hf
/-- The composition `l â f` where `l : F â E` and `f : ð â F`, has a derivative equal to the
Fréchet derivative of `l` applied to the derivative of `f`. -/
theorem HasFDerivAt.comp_hasDerivAt (hl : HasFDerivAt l l' (f x)) (hf : HasDerivAt f f' x) :
HasDerivAt (l â f) (l' f') x :=
hasDerivWithinAt_univ.mp <| hl.comp_hasDerivWithinAt x hf.hasDerivWithinAt
/-- The composition `l â f` where `l : F â E` and `f : ð â F`, has a derivative equal to the
Fréchet derivative of `l` applied to the derivative of `f`. -/
theorem HasFDerivAt.comp_hasDerivAt_of_eq
(hl : HasFDerivAt l l' y) (hf : HasDerivAt f f' x) (hy : y = f x) :
HasDerivAt (l â f) (l' f') x := by
rw [hy] at hl; exact hl.comp_hasDerivAt x hf
theorem HasStrictFDerivAt.comp_hasStrictDerivAt (hl : HasStrictFDerivAt l l' (f x))
(hf : HasStrictDerivAt f f' x) : HasStrictDerivAt (l â f) (l' f') x := by
simpa only [one_apply, one_smul, smulRight_apply, coe_comp', (· â ·)] using
(hl.comp x hf.hasStrictFDerivAt).hasStrictDerivAt
theorem HasStrictFDerivAt.comp_hasStrictDerivAt_of_eq (hl : HasStrictFDerivAt l l' y)
(hf : HasStrictDerivAt f f' x) (hy : y = f x) :
HasStrictDerivAt (l â f) (l' f') x := by
rw [hy] at hl; exact hl.comp_hasStrictDerivAt x hf
theorem fderivWithin.comp_derivWithin {t : Set F} (hl : DifferentiableWithinAt ð l t (f x))
(hf : DifferentiableWithinAt ð f s x) (hs : MapsTo f s t) (hxs : UniqueDiffWithinAt ð s x) :
derivWithin (l â f) s x = (fderivWithin ð l t (f x) : F â E) (derivWithin f s x) :=
(hl.hasFDerivWithinAt.comp_hasDerivWithinAt x hf.hasDerivWithinAt hs).derivWithin hxs
theorem fderivWithin.comp_derivWithin_of_eq {t : Set F} (hl : DifferentiableWithinAt ð l t y)
(hf : DifferentiableWithinAt ð f s x) (hs : MapsTo f s t) (hxs : UniqueDiffWithinAt ð s x)
(hy : y = f x) :
derivWithin (l â f) s x = (fderivWithin ð l t (f x) : F â E) (derivWithin f s x) := by
rw [hy] at hl; exact fderivWithin.comp_derivWithin x hl hf hs hxs
theorem fderiv.comp_deriv (hl : DifferentiableAt ð l (f x)) (hf : DifferentiableAt ð f x) :
deriv (l â f) x = (fderiv ð l (f x) : F â E) (deriv f x) :=
(hl.hasFDerivAt.comp_hasDerivAt x hf.hasDerivAt).deriv
theorem fderiv.comp_deriv_of_eq (hl : DifferentiableAt ð l y) (hf : DifferentiableAt ð f x)
(hy : y = f x) :
deriv (l â f) x = (fderiv ð l (f x) : F â E) (deriv f x) := by
rw [hy] at hl; exact fderiv.comp_deriv x hl hf
end CompositionVector
|
Analysis\Calculus\Deriv\Inv.lean | /-
Copyright (c) 2023 Sébastien Gouëzel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Sébastien Gouëzel, Yury Kudryashov
-/
import Mathlib.Analysis.Calculus.Deriv.Mul
import Mathlib.Analysis.Calculus.Deriv.Comp
/-!
# Derivatives of `x ⊠xâ»Â¹` and `f x / g x`
In this file we prove `(xâ»Â¹)' = -1 / x ^ 2`, `((f x)â»Â¹)' = -f' x / (f x) ^ 2`, and
`(f x / g x)' = (f' x * g x - f x * g' x) / (g x) ^ 2` for different notions of derivative.
For a more detailed overview of one-dimensional derivatives in mathlib, see the module docstring of
`Analysis/Calculus/Deriv/Basic`.
## Keywords
derivative
-/
universe u v w
open scoped Classical
open Topology Filter ENNReal
open Filter Asymptotics Set
open ContinuousLinearMap (smulRight smulRight_one_eq_iff)
variable {ð : Type u} [NontriviallyNormedField ð]
variable {F : Type v} [NormedAddCommGroup F] [NormedSpace ð F]
variable {E : Type w} [NormedAddCommGroup E] [NormedSpace ð E]
variable {f fâ fâ g : ð â F}
variable {f' fâ' fâ' g' : F}
variable {x : ð}
variable {s t : Set ð}
variable {L : Filter ð}
section Inverse
/-! ### Derivative of `x ⊠xâ»Â¹` -/
theorem hasStrictDerivAt_inv (hx : x â 0) : HasStrictDerivAt Inv.inv (-(x ^ 2)â»Â¹) x := by
suffices
(fun p : ð à ð => (p.1 - p.2) * ((x * x)â»Â¹ - (p.1 * p.2)â»Â¹)) =o[ð (x, x)] fun p =>
(p.1 - p.2) * 1 by
refine this.congr' ?_ (eventually_of_forall fun _ => mul_one _)
refine Eventually.mono ((isOpen_ne.prod isOpen_ne).mem_nhds âšhx, hxâ©) ?_
rintro âšy, zâ© âšhy, hzâ©
simp only [mem_setOf_eq] at hy hz
-- hy : y â 0, hz : z â 0
field_simp [hx, hy, hz]
ring
refine (isBigO_refl (fun p : ð Ã ð => p.1 - p.2) _).mul_isLittleO ((isLittleO_one_iff ð).2 ?_)
rw [â sub_self (x * x)â»Â¹]
exact tendsto_const_nhds.sub ((continuous_mul.tendsto (x, x)).invâ <| mul_ne_zero hx hx)
theorem hasDerivAt_inv (x_ne_zero : x â 0) : HasDerivAt (fun y => yâ»Â¹) (-(x ^ 2)â»Â¹) x :=
(hasStrictDerivAt_inv x_ne_zero).hasDerivAt
theorem hasDerivWithinAt_inv (x_ne_zero : x â 0) (s : Set ð) :
HasDerivWithinAt (fun x => xâ»Â¹) (-(x ^ 2)â»Â¹) s x :=
(hasDerivAt_inv x_ne_zero).hasDerivWithinAt
theorem differentiableAt_inv : DifferentiableAt ð (fun x => xâ»Â¹) x â x â 0 :=
âšfun H => NormedField.continuousAt_inv.1 H.continuousAt, fun H =>
(hasDerivAt_inv H).differentiableAtâ©
theorem differentiableWithinAt_inv (x_ne_zero : x â 0) :
DifferentiableWithinAt ð (fun x => xâ»Â¹) s x :=
(differentiableAt_inv.2 x_ne_zero).differentiableWithinAt
theorem differentiableOn_inv : DifferentiableOn ð (fun x : ð => xâ»Â¹) { x | x â 0 } := fun _x hx =>
differentiableWithinAt_inv hx
theorem deriv_inv : deriv (fun x => xâ»Â¹) x = -(x ^ 2)â»Â¹ := by
rcases eq_or_ne x 0 with (rfl | hne)
· simp [deriv_zero_of_not_differentiableAt (mt differentiableAt_inv.1 (not_not.2 rfl))]
· exact (hasDerivAt_inv hne).deriv
@[simp]
theorem deriv_inv' : (deriv fun x : ð => xâ»Â¹) = fun x => -(x ^ 2)â»Â¹ :=
funext fun _ => deriv_inv
theorem derivWithin_inv (x_ne_zero : x â 0) (hxs : UniqueDiffWithinAt ð s x) :
derivWithin (fun x => xâ»Â¹) s x = -(x ^ 2)â»Â¹ := by
rw [DifferentiableAt.derivWithin (differentiableAt_inv.2 x_ne_zero) hxs]
exact deriv_inv
theorem hasFDerivAt_inv (x_ne_zero : x â 0) :
HasFDerivAt (fun x => xâ»Â¹) (smulRight (1 : ð âL[ð] ð) (-(x ^ 2)â»Â¹) : ð âL[ð] ð) x :=
hasDerivAt_inv x_ne_zero
theorem hasFDerivWithinAt_inv (x_ne_zero : x â 0) :
HasFDerivWithinAt (fun x => xâ»Â¹) (smulRight (1 : ð âL[ð] ð) (-(x ^ 2)â»Â¹) : ð âL[ð] ð) s x :=
(hasFDerivAt_inv x_ne_zero).hasFDerivWithinAt
theorem fderiv_inv : fderiv ð (fun x => xâ»Â¹) x = smulRight (1 : ð âL[ð] ð) (-(x ^ 2)â»Â¹) := by
rw [â deriv_fderiv, deriv_inv]
theorem fderivWithin_inv (x_ne_zero : x â 0) (hxs : UniqueDiffWithinAt ð s x) :
fderivWithin ð (fun x => xâ»Â¹) s x = smulRight (1 : ð âL[ð] ð) (-(x ^ 2)â»Â¹) := by
rw [DifferentiableAt.fderivWithin (differentiableAt_inv.2 x_ne_zero) hxs]
exact fderiv_inv
variable {c : ð â ð} {h : E â ð} {c' : ð} {z : E} {S : Set E}
theorem HasDerivWithinAt.inv (hc : HasDerivWithinAt c c' s x) (hx : c x â 0) :
HasDerivWithinAt (fun y => (c y)â»Â¹) (-c' / c x ^ 2) s x := by
convert (hasDerivAt_inv hx).comp_hasDerivWithinAt x hc using 1
field_simp
theorem HasDerivAt.inv (hc : HasDerivAt c c' x) (hx : c x â 0) :
HasDerivAt (fun y => (c y)â»Â¹) (-c' / c x ^ 2) x := by
rw [â hasDerivWithinAt_univ] at *
exact hc.inv hx
theorem DifferentiableWithinAt.inv (hf : DifferentiableWithinAt ð h S z) (hz : h z â 0) :
DifferentiableWithinAt ð (fun x => (h x)â»Â¹) S z :=
(differentiableAt_inv.mpr hz).comp_differentiableWithinAt z hf
@[simp]
theorem DifferentiableAt.inv (hf : DifferentiableAt ð h z) (hz : h z â 0) :
DifferentiableAt ð (fun x => (h x)â»Â¹) z :=
(differentiableAt_inv.mpr hz).comp z hf
theorem DifferentiableOn.inv (hf : DifferentiableOn ð h S) (hz : â x â S, h x â 0) :
DifferentiableOn ð (fun x => (h x)â»Â¹) S := fun x h => (hf x h).inv (hz x h)
@[simp]
theorem Differentiable.inv (hf : Differentiable ð h) (hz : â x, h x â 0) :
Differentiable ð fun x => (h x)â»Â¹ := fun x => (hf x).inv (hz x)
theorem derivWithin_inv' (hc : DifferentiableWithinAt ð c s x) (hx : c x â 0)
(hxs : UniqueDiffWithinAt ð s x) :
derivWithin (fun x => (c x)â»Â¹) s x = -derivWithin c s x / c x ^ 2 :=
(hc.hasDerivWithinAt.inv hx).derivWithin hxs
@[simp]
theorem deriv_inv'' (hc : DifferentiableAt ð c x) (hx : c x â 0) :
deriv (fun x => (c x)â»Â¹) x = -deriv c x / c x ^ 2 :=
(hc.hasDerivAt.inv hx).deriv
end Inverse
section Division
/-! ### Derivative of `x ⊠c x / d x` -/
variable {ð' : Type*} [NontriviallyNormedField ð'] [NormedAlgebra ð ð'] {c d : ð â ð'} {c' d' : ð'}
theorem HasDerivWithinAt.div (hc : HasDerivWithinAt c c' s x) (hd : HasDerivWithinAt d d' s x)
(hx : d x â 0) :
HasDerivWithinAt (fun y => c y / d y) ((c' * d x - c x * d') / d x ^ 2) s x := by
convert hc.mul ((hasDerivAt_inv hx).comp_hasDerivWithinAt x hd) using 1
· simp only [div_eq_mul_inv, (· â ·)]
· field_simp
ring
theorem HasStrictDerivAt.div (hc : HasStrictDerivAt c c' x) (hd : HasStrictDerivAt d d' x)
(hx : d x â 0) : HasStrictDerivAt (fun y => c y / d y) ((c' * d x - c x * d') / d x ^ 2) x := by
convert hc.mul ((hasStrictDerivAt_inv hx).comp x hd) using 1
· simp only [div_eq_mul_inv, (· â ·)]
· field_simp
ring
theorem HasDerivAt.div (hc : HasDerivAt c c' x) (hd : HasDerivAt d d' x) (hx : d x â 0) :
HasDerivAt (fun y => c y / d y) ((c' * d x - c x * d') / d x ^ 2) x := by
rw [â hasDerivWithinAt_univ] at *
exact hc.div hd hx
theorem DifferentiableWithinAt.div (hc : DifferentiableWithinAt ð c s x)
(hd : DifferentiableWithinAt ð d s x) (hx : d x â 0) :
DifferentiableWithinAt ð (fun x => c x / d x) s x :=
(hc.hasDerivWithinAt.div hd.hasDerivWithinAt hx).differentiableWithinAt
@[simp]
theorem DifferentiableAt.div (hc : DifferentiableAt ð c x) (hd : DifferentiableAt ð d x)
(hx : d x â 0) : DifferentiableAt ð (fun x => c x / d x) x :=
(hc.hasDerivAt.div hd.hasDerivAt hx).differentiableAt
theorem DifferentiableOn.div (hc : DifferentiableOn ð c s) (hd : DifferentiableOn ð d s)
(hx : â x â s, d x â 0) : DifferentiableOn ð (fun x => c x / d x) s := fun x h =>
(hc x h).div (hd x h) (hx x h)
@[simp]
theorem Differentiable.div (hc : Differentiable ð c) (hd : Differentiable ð d) (hx : â x, d x â 0) :
Differentiable ð fun x => c x / d x := fun x => (hc x).div (hd x) (hx x)
theorem derivWithin_div (hc : DifferentiableWithinAt ð c s x) (hd : DifferentiableWithinAt ð d s x)
(hx : d x â 0) (hxs : UniqueDiffWithinAt ð s x) :
derivWithin (fun x => c x / d x) s x =
(derivWithin c s x * d x - c x * derivWithin d s x) / d x ^ 2 :=
(hc.hasDerivWithinAt.div hd.hasDerivWithinAt hx).derivWithin hxs
@[simp]
theorem deriv_div (hc : DifferentiableAt ð c x) (hd : DifferentiableAt ð d x) (hx : d x â 0) :
deriv (fun x => c x / d x) x = (deriv c x * d x - c x * deriv d x) / d x ^ 2 :=
(hc.hasDerivAt.div hd.hasDerivAt hx).deriv
end Division
|
Analysis\Calculus\Deriv\Inverse.lean | /-
Copyright (c) 2021 Yury Kudryashov. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yury Kudryashov
-/
import Mathlib.Analysis.Calculus.Deriv.Comp
import Mathlib.Analysis.Calculus.FDeriv.Equiv
/-!
# Inverse function theorem - the easy half
In this file we prove that `g' (f x) = (f' x)â»Â¹` provided that `f` is strictly differentiable at
`x`, `f' x â 0`, and `g` is a local left inverse of `f` that is continuous at `f x`. This is the
easy half of the inverse function theorem: the harder half states that `g` exists.
For a more detailed overview of one-dimensional derivatives in mathlib, see the module docstring of
`Analysis/Calculus/Deriv/Basic`.
## Keywords
derivative, inverse function
-/
universe u v w
open scoped Classical
open Topology Filter ENNReal
open Filter Asymptotics Set
variable {ð : Type u} [NontriviallyNormedField ð]
variable {F : Type v} [NormedAddCommGroup F] [NormedSpace ð F]
variable {E : Type w} [NormedAddCommGroup E] [NormedSpace ð E]
variable {f fâ fâ g : ð â F}
variable {f' fâ' fâ' g' : F}
variable {x : ð}
variable {s t : Set ð}
variable {L Lâ Lâ : Filter ð}
theorem HasStrictDerivAt.hasStrictFDerivAt_equiv {f : ð â ð} {f' x : ð}
(hf : HasStrictDerivAt f f' x) (hf' : f' â 0) :
HasStrictFDerivAt f (ContinuousLinearEquiv.unitsEquivAut ð (Units.mk0 f' hf') : ð âL[ð] ð) x :=
hf
theorem HasDerivAt.hasFDerivAt_equiv {f : ð â ð} {f' x : ð} (hf : HasDerivAt f f' x)
(hf' : f' â 0) :
HasFDerivAt f (ContinuousLinearEquiv.unitsEquivAut ð (Units.mk0 f' hf') : ð âL[ð] ð) x :=
hf
/-- If `f (g y) = y` for `y` in some neighborhood of `a`, `g` is continuous at `a`, and `f` has an
invertible derivative `f'` at `g a` in the strict sense, then `g` has the derivative `f'â»Â¹` at `a`
in the strict sense.
This is one of the easy parts of the inverse function theorem: it assumes that we already have an
inverse function. -/
theorem HasStrictDerivAt.of_local_left_inverse {f g : ð â ð} {f' a : ð} (hg : ContinuousAt g a)
(hf : HasStrictDerivAt f f' (g a)) (hf' : f' â 0) (hfg : âá¶ y in ð a, f (g y) = y) :
HasStrictDerivAt g f'â»Â¹ a :=
(hf.hasStrictFDerivAt_equiv hf').of_local_left_inverse hg hfg
/-- If `f` is a partial homeomorphism defined on a neighbourhood of `f.symm a`, and `f` has a
nonzero derivative `f'` at `f.symm a` in the strict sense, then `f.symm` has the derivative `f'â»Â¹`
at `a` in the strict sense.
This is one of the easy parts of the inverse function theorem: it assumes that we already have
an inverse function. -/
theorem PartialHomeomorph.hasStrictDerivAt_symm (f : PartialHomeomorph ð ð) {a f' : ð}
(ha : a â f.target) (hf' : f' â 0) (htff' : HasStrictDerivAt f f' (f.symm a)) :
HasStrictDerivAt f.symm f'â»Â¹ a :=
htff'.of_local_left_inverse (f.symm.continuousAt ha) hf' (f.eventually_right_inverse ha)
/-- If `f (g y) = y` for `y` in some neighborhood of `a`, `g` is continuous at `a`, and `f` has an
invertible derivative `f'` at `g a`, then `g` has the derivative `f'â»Â¹` at `a`.
This is one of the easy parts of the inverse function theorem: it assumes that we already have
an inverse function. -/
theorem HasDerivAt.of_local_left_inverse {f g : ð â ð} {f' a : ð} (hg : ContinuousAt g a)
(hf : HasDerivAt f f' (g a)) (hf' : f' â 0) (hfg : âá¶ y in ð a, f (g y) = y) :
HasDerivAt g f'â»Â¹ a :=
(hf.hasFDerivAt_equiv hf').of_local_left_inverse hg hfg
/-- If `f` is a partial homeomorphism defined on a neighbourhood of `f.symm a`, and `f` has a
nonzero derivative `f'` at `f.symm a`, then `f.symm` has the derivative `f'â»Â¹` at `a`.
This is one of the easy parts of the inverse function theorem: it assumes that we already have
an inverse function. -/
theorem PartialHomeomorph.hasDerivAt_symm (f : PartialHomeomorph ð ð) {a f' : ð} (ha : a â f.target)
(hf' : f' â 0) (htff' : HasDerivAt f f' (f.symm a)) : HasDerivAt f.symm f'â»Â¹ a :=
htff'.of_local_left_inverse (f.symm.continuousAt ha) hf' (f.eventually_right_inverse ha)
theorem HasDerivAt.eventually_ne (h : HasDerivAt f f' x) (hf' : f' â 0) :
âá¶ z in ð[â ] x, f z â f x :=
(hasDerivAt_iff_hasFDerivAt.1 h).eventually_ne
âšâf'ââ»Â¹, fun z => by field_simp [norm_smul, mt norm_eq_zero.1 hf']â©
theorem HasDerivAt.tendsto_punctured_nhds (h : HasDerivAt f f' x) (hf' : f' â 0) :
Tendsto f (ð[â ] x) (ð[â ] f x) :=
tendsto_nhdsWithin_of_tendsto_nhds_of_eventually_within _ h.continuousAt.continuousWithinAt
(h.eventually_ne hf')
theorem not_differentiableWithinAt_of_local_left_inverse_hasDerivWithinAt_zero {f g : ð â ð} {a : ð}
{s t : Set ð} (ha : a â s) (hsu : UniqueDiffWithinAt ð s a) (hf : HasDerivWithinAt f 0 t (g a))
(hst : MapsTo g s t) (hfg : f â g =á¶ [ð[s] a] id) : ¬DifferentiableWithinAt ð g s a := by
intro hg
have := (hf.comp a hg.hasDerivWithinAt hst).congr_of_eventuallyEq_of_mem hfg.symm ha
simpa using hsu.eq_deriv _ this (hasDerivWithinAt_id _ _)
theorem not_differentiableAt_of_local_left_inverse_hasDerivAt_zero {f g : ð â ð} {a : ð}
(hf : HasDerivAt f 0 (g a)) (hfg : f â g =á¶ [ð a] id) : ¬DifferentiableAt ð g a := by
intro hg
have := (hf.comp a hg.hasDerivAt).congr_of_eventuallyEq hfg.symm
simpa using this.unique (hasDerivAt_id a)
|
Analysis\Calculus\Deriv\Linear.lean | /-
Copyright (c) 2019 Gabriel Ebner. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Gabriel Ebner, Yury Kudryashov
-/
import Mathlib.Analysis.Calculus.Deriv.Basic
import Mathlib.Analysis.Calculus.FDeriv.Linear
/-!
# Derivatives of continuous linear maps from the base field
In this file we prove that `f : ð âL[ð] E` (or `f : ð ââ[ð] E`) has derivative `f 1`.
For a more detailed overview of one-dimensional derivatives in mathlib, see the module docstring of
`Analysis/Calculus/Deriv/Basic`.
## Keywords
derivative, linear map
-/
universe u v w
open Topology Filter
open Filter Asymptotics Set
variable {ð : Type u} [NontriviallyNormedField ð]
variable {F : Type v} [NormedAddCommGroup F] [NormedSpace ð F]
variable {E : Type w} [NormedAddCommGroup E] [NormedSpace ð E]
variable {x : ð}
variable {s : Set ð}
variable {L : Filter ð}
section ContinuousLinearMap
/-! ### Derivative of continuous linear maps -/
variable (e : ð âL[ð] F)
protected theorem ContinuousLinearMap.hasDerivAtFilter : HasDerivAtFilter e (e 1) x L :=
e.hasFDerivAtFilter.hasDerivAtFilter
protected theorem ContinuousLinearMap.hasStrictDerivAt : HasStrictDerivAt e (e 1) x :=
e.hasStrictFDerivAt.hasStrictDerivAt
protected theorem ContinuousLinearMap.hasDerivAt : HasDerivAt e (e 1) x :=
e.hasDerivAtFilter
protected theorem ContinuousLinearMap.hasDerivWithinAt : HasDerivWithinAt e (e 1) s x :=
e.hasDerivAtFilter
@[simp]
protected theorem ContinuousLinearMap.deriv : deriv e x = e 1 :=
e.hasDerivAt.deriv
protected theorem ContinuousLinearMap.derivWithin (hxs : UniqueDiffWithinAt ð s x) :
derivWithin e s x = e 1 :=
e.hasDerivWithinAt.derivWithin hxs
end ContinuousLinearMap
section LinearMap
/-! ### Derivative of bundled linear maps -/
variable (e : ð ââ[ð] F)
protected theorem LinearMap.hasDerivAtFilter : HasDerivAtFilter e (e 1) x L :=
e.toContinuousLinearMapâ.hasDerivAtFilter
protected theorem LinearMap.hasStrictDerivAt : HasStrictDerivAt e (e 1) x :=
e.toContinuousLinearMapâ.hasStrictDerivAt
protected theorem LinearMap.hasDerivAt : HasDerivAt e (e 1) x :=
e.hasDerivAtFilter
protected theorem LinearMap.hasDerivWithinAt : HasDerivWithinAt e (e 1) s x :=
e.hasDerivAtFilter
@[simp]
protected theorem LinearMap.deriv : deriv e x = e 1 :=
e.hasDerivAt.deriv
protected theorem LinearMap.derivWithin (hxs : UniqueDiffWithinAt ð s x) :
derivWithin e s x = e 1 :=
e.hasDerivWithinAt.derivWithin hxs
end LinearMap
|
Analysis\Calculus\Deriv\Mul.lean | /-
Copyright (c) 2019 Gabriel Ebner. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Gabriel Ebner, Anatole Dedecker, Yury Kudryashov
-/
import Mathlib.Analysis.Calculus.Deriv.Basic
import Mathlib.Analysis.Calculus.FDeriv.Mul
import Mathlib.Analysis.Calculus.FDeriv.Add
/-!
# Derivative of `f x * g x`
In this file we prove formulas for `(f x * g x)'` and `(f x ⢠g x)'`.
For a more detailed overview of one-dimensional derivatives in mathlib, see the module docstring of
`Analysis/Calculus/Deriv/Basic`.
## Keywords
derivative, multiplication
-/
universe u v w
noncomputable section
open scoped Topology Filter ENNReal
open Filter Asymptotics Set
open ContinuousLinearMap (smulRight smulRight_one_eq_iff)
variable {ð : Type u} [NontriviallyNormedField ð]
variable {F : Type v} [NormedAddCommGroup F] [NormedSpace ð F]
variable {E : Type w} [NormedAddCommGroup E] [NormedSpace ð E]
variable {G : Type*} [NormedAddCommGroup G] [NormedSpace ð G]
variable {f fâ fâ g : ð â F}
variable {f' fâ' fâ' g' : F}
variable {x : ð}
variable {s t : Set ð}
variable {L Lâ Lâ : Filter ð}
/-! ### Derivative of bilinear maps -/
namespace ContinuousLinearMap
variable {B : E âL[ð] F âL[ð] G} {u : ð â E} {v : ð â F} {u' : E} {v' : F}
theorem hasDerivWithinAt_of_bilinear
(hu : HasDerivWithinAt u u' s x) (hv : HasDerivWithinAt v v' s x) :
HasDerivWithinAt (fun x ⊠B (u x) (v x)) (B (u x) v' + B u' (v x)) s x := by
simpa using (B.hasFDerivWithinAt_of_bilinear
hu.hasFDerivWithinAt hv.hasFDerivWithinAt).hasDerivWithinAt
theorem hasDerivAt_of_bilinear (hu : HasDerivAt u u' x) (hv : HasDerivAt v v' x) :
HasDerivAt (fun x ⊠B (u x) (v x)) (B (u x) v' + B u' (v x)) x := by
simpa using (B.hasFDerivAt_of_bilinear hu.hasFDerivAt hv.hasFDerivAt).hasDerivAt
theorem hasStrictDerivAt_of_bilinear (hu : HasStrictDerivAt u u' x) (hv : HasStrictDerivAt v v' x) :
HasStrictDerivAt (fun x ⊠B (u x) (v x)) (B (u x) v' + B u' (v x)) x := by
simpa using
(B.hasStrictFDerivAt_of_bilinear hu.hasStrictFDerivAt hv.hasStrictFDerivAt).hasStrictDerivAt
theorem derivWithin_of_bilinear (hxs : UniqueDiffWithinAt ð s x)
(hu : DifferentiableWithinAt ð u s x) (hv : DifferentiableWithinAt ð v s x) :
derivWithin (fun y => B (u y) (v y)) s x =
B (u x) (derivWithin v s x) + B (derivWithin u s x) (v x) :=
(B.hasDerivWithinAt_of_bilinear hu.hasDerivWithinAt hv.hasDerivWithinAt).derivWithin hxs
theorem deriv_of_bilinear (hu : DifferentiableAt ð u x) (hv : DifferentiableAt ð v x) :
deriv (fun y => B (u y) (v y)) x = B (u x) (deriv v x) + B (deriv u x) (v x) :=
(B.hasDerivAt_of_bilinear hu.hasDerivAt hv.hasDerivAt).deriv
end ContinuousLinearMap
section SMul
/-! ### Derivative of the multiplication of a scalar function and a vector function -/
variable {ð' : Type*} [NontriviallyNormedField ð'] [NormedAlgebra ð ð'] [NormedSpace ð' F]
[IsScalarTower ð ð' F] {c : ð â ð'} {c' : ð'}
theorem HasDerivWithinAt.smul (hc : HasDerivWithinAt c c' s x) (hf : HasDerivWithinAt f f' s x) :
HasDerivWithinAt (fun y => c y ⢠f y) (c x ⢠f' + c' ⢠f x) s x := by
simpa using (HasFDerivWithinAt.smul hc hf).hasDerivWithinAt
theorem HasDerivAt.smul (hc : HasDerivAt c c' x) (hf : HasDerivAt f f' x) :
HasDerivAt (fun y => c y ⢠f y) (c x ⢠f' + c' ⢠f x) x := by
rw [â hasDerivWithinAt_univ] at *
exact hc.smul hf
nonrec theorem HasStrictDerivAt.smul (hc : HasStrictDerivAt c c' x) (hf : HasStrictDerivAt f f' x) :
HasStrictDerivAt (fun y => c y ⢠f y) (c x ⢠f' + c' ⢠f x) x := by
simpa using (hc.smul hf).hasStrictDerivAt
theorem derivWithin_smul (hxs : UniqueDiffWithinAt ð s x) (hc : DifferentiableWithinAt ð c s x)
(hf : DifferentiableWithinAt ð f s x) :
derivWithin (fun y => c y ⢠f y) s x = c x ⢠derivWithin f s x + derivWithin c s x ⢠f x :=
(hc.hasDerivWithinAt.smul hf.hasDerivWithinAt).derivWithin hxs
theorem deriv_smul (hc : DifferentiableAt ð c x) (hf : DifferentiableAt ð f x) :
deriv (fun y => c y ⢠f y) x = c x ⢠deriv f x + deriv c x ⢠f x :=
(hc.hasDerivAt.smul hf.hasDerivAt).deriv
theorem HasStrictDerivAt.smul_const (hc : HasStrictDerivAt c c' x) (f : F) :
HasStrictDerivAt (fun y => c y ⢠f) (c' ⢠f) x := by
have := hc.smul (hasStrictDerivAt_const x f)
rwa [smul_zero, zero_add] at this
theorem HasDerivWithinAt.smul_const (hc : HasDerivWithinAt c c' s x) (f : F) :
HasDerivWithinAt (fun y => c y ⢠f) (c' ⢠f) s x := by
have := hc.smul (hasDerivWithinAt_const x s f)
rwa [smul_zero, zero_add] at this
theorem HasDerivAt.smul_const (hc : HasDerivAt c c' x) (f : F) :
HasDerivAt (fun y => c y ⢠f) (c' ⢠f) x := by
rw [â hasDerivWithinAt_univ] at *
exact hc.smul_const f
theorem derivWithin_smul_const (hxs : UniqueDiffWithinAt ð s x)
(hc : DifferentiableWithinAt ð c s x) (f : F) :
derivWithin (fun y => c y ⢠f) s x = derivWithin c s x ⢠f :=
(hc.hasDerivWithinAt.smul_const f).derivWithin hxs
theorem deriv_smul_const (hc : DifferentiableAt ð c x) (f : F) :
deriv (fun y => c y ⢠f) x = deriv c x ⢠f :=
(hc.hasDerivAt.smul_const f).deriv
end SMul
section ConstSMul
variable {R : Type*} [Semiring R] [Module R F] [SMulCommClass ð R F] [ContinuousConstSMul R F]
nonrec theorem HasStrictDerivAt.const_smul (c : R) (hf : HasStrictDerivAt f f' x) :
HasStrictDerivAt (fun y => c ⢠f y) (c ⢠f') x := by
simpa using (hf.const_smul c).hasStrictDerivAt
nonrec theorem HasDerivAtFilter.const_smul (c : R) (hf : HasDerivAtFilter f f' x L) :
HasDerivAtFilter (fun y => c ⢠f y) (c ⢠f') x L := by
simpa using (hf.const_smul c).hasDerivAtFilter
nonrec theorem HasDerivWithinAt.const_smul (c : R) (hf : HasDerivWithinAt f f' s x) :
HasDerivWithinAt (fun y => c ⢠f y) (c ⢠f') s x :=
hf.const_smul c
nonrec theorem HasDerivAt.const_smul (c : R) (hf : HasDerivAt f f' x) :
HasDerivAt (fun y => c ⢠f y) (c ⢠f') x :=
hf.const_smul c
theorem derivWithin_const_smul (hxs : UniqueDiffWithinAt ð s x) (c : R)
(hf : DifferentiableWithinAt ð f s x) :
derivWithin (fun y => c ⢠f y) s x = c ⢠derivWithin f s x :=
(hf.hasDerivWithinAt.const_smul c).derivWithin hxs
theorem deriv_const_smul (c : R) (hf : DifferentiableAt ð f x) :
deriv (fun y => c ⢠f y) x = c ⢠deriv f x :=
(hf.hasDerivAt.const_smul c).deriv
/-- A variant of `deriv_const_smul` without differentiability assumption when the scalar
multiplication is by field elements. -/
lemma deriv_const_smul' {f : ð â F} {x : ð} {R : Type*} [Field R] [Module R F] [SMulCommClass ð R F]
[ContinuousConstSMul R F] (c : R) :
deriv (fun y ⊠c ⢠f y) x = c ⢠deriv f x := by
by_cases hf : DifferentiableAt ð f x
· exact deriv_const_smul c hf
· rcases eq_or_ne c 0 with rfl | hc
· simp only [zero_smul, deriv_const']
· have H : ¬DifferentiableAt ð (fun y ⊠c ⢠f y) x := by
contrapose! hf
change DifferentiableAt ð (fun y ⊠f y) x
conv => enter [2, y]; rw [â inv_smul_smulâ hc (f y)]
exact DifferentiableAt.const_smul hf câ»Â¹
rw [deriv_zero_of_not_differentiableAt hf, deriv_zero_of_not_differentiableAt H, smul_zero]
end ConstSMul
section Mul
/-! ### Derivative of the multiplication of two functions -/
variable {ð' ðž : Type*} [NormedField ð'] [NormedRing ðž] [NormedAlgebra ð ð'] [NormedAlgebra ð ðž]
{c d : ð â ðž} {c' d' : ðž} {u v : ð â ð'}
theorem HasDerivWithinAt.mul (hc : HasDerivWithinAt c c' s x) (hd : HasDerivWithinAt d d' s x) :
HasDerivWithinAt (fun y => c y * d y) (c' * d x + c x * d') s x := by
have := (HasFDerivWithinAt.mul' hc hd).hasDerivWithinAt
rwa [ContinuousLinearMap.add_apply, ContinuousLinearMap.smul_apply,
ContinuousLinearMap.smulRight_apply, ContinuousLinearMap.smulRight_apply,
ContinuousLinearMap.smulRight_apply, ContinuousLinearMap.one_apply, one_smul, one_smul,
add_comm] at this
theorem HasDerivAt.mul (hc : HasDerivAt c c' x) (hd : HasDerivAt d d' x) :
HasDerivAt (fun y => c y * d y) (c' * d x + c x * d') x := by
rw [â hasDerivWithinAt_univ] at *
exact hc.mul hd
theorem HasStrictDerivAt.mul (hc : HasStrictDerivAt c c' x) (hd : HasStrictDerivAt d d' x) :
HasStrictDerivAt (fun y => c y * d y) (c' * d x + c x * d') x := by
have := (HasStrictFDerivAt.mul' hc hd).hasStrictDerivAt
rwa [ContinuousLinearMap.add_apply, ContinuousLinearMap.smul_apply,
ContinuousLinearMap.smulRight_apply, ContinuousLinearMap.smulRight_apply,
ContinuousLinearMap.smulRight_apply, ContinuousLinearMap.one_apply, one_smul, one_smul,
add_comm] at this
theorem derivWithin_mul (hxs : UniqueDiffWithinAt ð s x) (hc : DifferentiableWithinAt ð c s x)
(hd : DifferentiableWithinAt ð d s x) :
derivWithin (fun y => c y * d y) s x = derivWithin c s x * d x + c x * derivWithin d s x :=
(hc.hasDerivWithinAt.mul hd.hasDerivWithinAt).derivWithin hxs
@[simp]
theorem deriv_mul (hc : DifferentiableAt ð c x) (hd : DifferentiableAt ð d x) :
deriv (fun y => c y * d y) x = deriv c x * d x + c x * deriv d x :=
(hc.hasDerivAt.mul hd.hasDerivAt).deriv
theorem HasDerivWithinAt.mul_const (hc : HasDerivWithinAt c c' s x) (d : ðž) :
HasDerivWithinAt (fun y => c y * d) (c' * d) s x := by
convert hc.mul (hasDerivWithinAt_const x s d) using 1
rw [mul_zero, add_zero]
theorem HasDerivAt.mul_const (hc : HasDerivAt c c' x) (d : ðž) :
HasDerivAt (fun y => c y * d) (c' * d) x := by
rw [â hasDerivWithinAt_univ] at *
exact hc.mul_const d
theorem hasDerivAt_mul_const (c : ð) : HasDerivAt (fun x => x * c) c x := by
simpa only [one_mul] using (hasDerivAt_id' x).mul_const c
theorem HasStrictDerivAt.mul_const (hc : HasStrictDerivAt c c' x) (d : ðž) :
HasStrictDerivAt (fun y => c y * d) (c' * d) x := by
convert hc.mul (hasStrictDerivAt_const x d) using 1
rw [mul_zero, add_zero]
theorem derivWithin_mul_const (hxs : UniqueDiffWithinAt ð s x) (hc : DifferentiableWithinAt ð c s x)
(d : ðž) : derivWithin (fun y => c y * d) s x = derivWithin c s x * d :=
(hc.hasDerivWithinAt.mul_const d).derivWithin hxs
theorem deriv_mul_const (hc : DifferentiableAt ð c x) (d : ðž) :
deriv (fun y => c y * d) x = deriv c x * d :=
(hc.hasDerivAt.mul_const d).deriv
theorem deriv_mul_const_field (v : ð') : deriv (fun y => u y * v) x = deriv u x * v := by
by_cases hu : DifferentiableAt ð u x
· exact deriv_mul_const hu v
· rw [deriv_zero_of_not_differentiableAt hu, zero_mul]
rcases eq_or_ne v 0 with (rfl | hd)
· simp only [mul_zero, deriv_const]
· refine deriv_zero_of_not_differentiableAt (mt (fun H => ?_) hu)
simpa only [mul_inv_cancel_rightâ hd] using H.mul_const vâ»Â¹
@[simp]
theorem deriv_mul_const_field' (v : ð') : (deriv fun x => u x * v) = fun x => deriv u x * v :=
funext fun _ => deriv_mul_const_field v
theorem HasDerivWithinAt.const_mul (c : ðž) (hd : HasDerivWithinAt d d' s x) :
HasDerivWithinAt (fun y => c * d y) (c * d') s x := by
convert (hasDerivWithinAt_const x s c).mul hd using 1
rw [zero_mul, zero_add]
theorem HasDerivAt.const_mul (c : ðž) (hd : HasDerivAt d d' x) :
HasDerivAt (fun y => c * d y) (c * d') x := by
rw [â hasDerivWithinAt_univ] at *
exact hd.const_mul c
theorem HasStrictDerivAt.const_mul (c : ðž) (hd : HasStrictDerivAt d d' x) :
HasStrictDerivAt (fun y => c * d y) (c * d') x := by
convert (hasStrictDerivAt_const _ _).mul hd using 1
rw [zero_mul, zero_add]
theorem derivWithin_const_mul (hxs : UniqueDiffWithinAt ð s x) (c : ðž)
(hd : DifferentiableWithinAt ð d s x) :
derivWithin (fun y => c * d y) s x = c * derivWithin d s x :=
(hd.hasDerivWithinAt.const_mul c).derivWithin hxs
theorem deriv_const_mul (c : ðž) (hd : DifferentiableAt ð d x) :
deriv (fun y => c * d y) x = c * deriv d x :=
(hd.hasDerivAt.const_mul c).deriv
theorem deriv_const_mul_field (u : ð') : deriv (fun y => u * v y) x = u * deriv v x := by
simp only [mul_comm u, deriv_mul_const_field]
@[simp]
theorem deriv_const_mul_field' (u : ð') : (deriv fun x => u * v x) = fun x => u * deriv v x :=
funext fun _ => deriv_const_mul_field u
end Mul
section Prod
section HasDeriv
variable {ι : Type*} [DecidableEq ι] {ðž' : Type*} [NormedCommRing ðž'] [NormedAlgebra ð ðž']
{u : Finset ι} {f : ι â ð â ðž'} {f' : ι â ðž'}
theorem HasDerivAt.finset_prod (hf : â i â u, HasDerivAt (f i) (f' i) x) :
HasDerivAt (â i â u, f i ·) (â i â u, (â j â u.erase i, f j x) ⢠f' i) x := by
simpa [ContinuousLinearMap.sum_apply, ContinuousLinearMap.smul_apply] using
(HasFDerivAt.finset_prod (fun i hi ⊠(hf i hi).hasFDerivAt)).hasDerivAt
theorem HasDerivWithinAt.finset_prod (hf : â i â u, HasDerivWithinAt (f i) (f' i) s x) :
HasDerivWithinAt (â i â u, f i ·) (â i â u, (â j â u.erase i, f j x) ⢠f' i) s x := by
simpa [ContinuousLinearMap.sum_apply, ContinuousLinearMap.smul_apply] using
(HasFDerivWithinAt.finset_prod (fun i hi ⊠(hf i hi).hasFDerivWithinAt)).hasDerivWithinAt
theorem HasStrictDerivAt.finset_prod (hf : â i â u, HasStrictDerivAt (f i) (f' i) x) :
HasStrictDerivAt (â i â u, f i ·) (â i â u, (â j â u.erase i, f j x) ⢠f' i) x := by
simpa [ContinuousLinearMap.sum_apply, ContinuousLinearMap.smul_apply] using
(HasStrictFDerivAt.finset_prod (fun i hi ⊠(hf i hi).hasStrictFDerivAt)).hasStrictDerivAt
theorem deriv_finset_prod (hf : â i â u, DifferentiableAt ð (f i) x) :
deriv (â i â u, f i ·) x = â i â u, (â j â u.erase i, f j x) ⢠deriv (f i) x :=
(HasDerivAt.finset_prod fun i hi ⊠(hf i hi).hasDerivAt).deriv
theorem derivWithin_finset_prod (hxs : UniqueDiffWithinAt ð s x)
(hf : â i â u, DifferentiableWithinAt ð (f i) s x) :
derivWithin (â i â u, f i ·) s x =
â i â u, (â j â u.erase i, f j x) ⢠derivWithin (f i) s x :=
(HasDerivWithinAt.finset_prod fun i hi ⊠(hf i hi).hasDerivWithinAt).derivWithin hxs
end HasDeriv
variable {ι : Type*} {ðž' : Type*} [NormedCommRing ðž'] [NormedAlgebra ð ðž']
{u : Finset ι} {f : ι â ð â ðž'} {f' : ι â ðž'}
theorem DifferentiableAt.finset_prod (hd : â i â u, DifferentiableAt ð (f i) x) :
DifferentiableAt ð (â i â u, f i ·) x := by
classical
exact
(HasDerivAt.finset_prod (fun i hi ⊠DifferentiableAt.hasDerivAt (hd i hi))).differentiableAt
theorem DifferentiableWithinAt.finset_prod (hd : â i â u, DifferentiableWithinAt ð (f i) s x) :
DifferentiableWithinAt ð (â i â u, f i ·) s x := by
classical
exact (HasDerivWithinAt.finset_prod (fun i hi âŠ
DifferentiableWithinAt.hasDerivWithinAt (hd i hi))).differentiableWithinAt
theorem DifferentiableOn.finset_prod (hd : â i â u, DifferentiableOn ð (f i) s) :
DifferentiableOn ð (â i â u, f i ·) s :=
fun x hx ⊠.finset_prod (fun i hi ⊠hd i hi x hx)
theorem Differentiable.finset_prod (hd : â i â u, Differentiable ð (f i)) :
Differentiable ð (â i â u, f i ·) :=
fun x ⊠.finset_prod (fun i hi ⊠hd i hi x)
end Prod
section Div
variable {ð' : Type*} [NontriviallyNormedField ð'] [NormedAlgebra ð ð'] {c d : ð â ð'} {c' d' : ð'}
theorem HasDerivAt.div_const (hc : HasDerivAt c c' x) (d : ð') :
HasDerivAt (fun x => c x / d) (c' / d) x := by
simpa only [div_eq_mul_inv] using hc.mul_const dâ»Â¹
theorem HasDerivWithinAt.div_const (hc : HasDerivWithinAt c c' s x) (d : ð') :
HasDerivWithinAt (fun x => c x / d) (c' / d) s x := by
simpa only [div_eq_mul_inv] using hc.mul_const dâ»Â¹
theorem HasStrictDerivAt.div_const (hc : HasStrictDerivAt c c' x) (d : ð') :
HasStrictDerivAt (fun x => c x / d) (c' / d) x := by
simpa only [div_eq_mul_inv] using hc.mul_const dâ»Â¹
theorem DifferentiableWithinAt.div_const (hc : DifferentiableWithinAt ð c s x) (d : ð') :
DifferentiableWithinAt ð (fun x => c x / d) s x :=
(hc.hasDerivWithinAt.div_const _).differentiableWithinAt
@[simp]
theorem DifferentiableAt.div_const (hc : DifferentiableAt ð c x) (d : ð') :
DifferentiableAt ð (fun x => c x / d) x :=
(hc.hasDerivAt.div_const _).differentiableAt
theorem DifferentiableOn.div_const (hc : DifferentiableOn ð c s) (d : ð') :
DifferentiableOn ð (fun x => c x / d) s := fun x hx => (hc x hx).div_const d
@[simp]
theorem Differentiable.div_const (hc : Differentiable ð c) (d : ð') :
Differentiable ð fun x => c x / d := fun x => (hc x).div_const d
theorem derivWithin_div_const (hc : DifferentiableWithinAt ð c s x)
(d : ð') (hxs : UniqueDiffWithinAt ð s x) :
derivWithin (fun x => c x / d) s x = derivWithin c s x / d := by
simp [div_eq_inv_mul, derivWithin_const_mul, hc, hxs]
@[simp]
theorem deriv_div_const (d : ð') : deriv (fun x => c x / d) x = deriv c x / d := by
simp only [div_eq_mul_inv, deriv_mul_const_field]
end Div
section CLMCompApply
/-! ### Derivative of the pointwise composition/application of continuous linear maps -/
open ContinuousLinearMap
variable {G : Type*} [NormedAddCommGroup G] [NormedSpace ð G] {c : ð â F âL[ð] G} {c' : F âL[ð] G}
{d : ð â E âL[ð] F} {d' : E âL[ð] F} {u : ð â F} {u' : F}
theorem HasStrictDerivAt.clm_comp (hc : HasStrictDerivAt c c' x) (hd : HasStrictDerivAt d d' x) :
HasStrictDerivAt (fun y => (c y).comp (d y)) (c'.comp (d x) + (c x).comp d') x := by
have := (hc.hasStrictFDerivAt.clm_comp hd.hasStrictFDerivAt).hasStrictDerivAt
rwa [add_apply, comp_apply, comp_apply, smulRight_apply, smulRight_apply, one_apply, one_smul,
one_smul, add_comm] at this
theorem HasDerivWithinAt.clm_comp (hc : HasDerivWithinAt c c' s x)
(hd : HasDerivWithinAt d d' s x) :
HasDerivWithinAt (fun y => (c y).comp (d y)) (c'.comp (d x) + (c x).comp d') s x := by
have := (hc.hasFDerivWithinAt.clm_comp hd.hasFDerivWithinAt).hasDerivWithinAt
rwa [add_apply, comp_apply, comp_apply, smulRight_apply, smulRight_apply, one_apply, one_smul,
one_smul, add_comm] at this
theorem HasDerivAt.clm_comp (hc : HasDerivAt c c' x) (hd : HasDerivAt d d' x) :
HasDerivAt (fun y => (c y).comp (d y)) (c'.comp (d x) + (c x).comp d') x := by
rw [â hasDerivWithinAt_univ] at *
exact hc.clm_comp hd
theorem derivWithin_clm_comp (hc : DifferentiableWithinAt ð c s x)
(hd : DifferentiableWithinAt ð d s x) (hxs : UniqueDiffWithinAt ð s x) :
derivWithin (fun y => (c y).comp (d y)) s x =
(derivWithin c s x).comp (d x) + (c x).comp (derivWithin d s x) :=
(hc.hasDerivWithinAt.clm_comp hd.hasDerivWithinAt).derivWithin hxs
theorem deriv_clm_comp (hc : DifferentiableAt ð c x) (hd : DifferentiableAt ð d x) :
deriv (fun y => (c y).comp (d y)) x = (deriv c x).comp (d x) + (c x).comp (deriv d x) :=
(hc.hasDerivAt.clm_comp hd.hasDerivAt).deriv
theorem HasStrictDerivAt.clm_apply (hc : HasStrictDerivAt c c' x) (hu : HasStrictDerivAt u u' x) :
HasStrictDerivAt (fun y => (c y) (u y)) (c' (u x) + c x u') x := by
have := (hc.hasStrictFDerivAt.clm_apply hu.hasStrictFDerivAt).hasStrictDerivAt
rwa [add_apply, comp_apply, flip_apply, smulRight_apply, smulRight_apply, one_apply, one_smul,
one_smul, add_comm] at this
theorem HasDerivWithinAt.clm_apply (hc : HasDerivWithinAt c c' s x)
(hu : HasDerivWithinAt u u' s x) :
HasDerivWithinAt (fun y => (c y) (u y)) (c' (u x) + c x u') s x := by
have := (hc.hasFDerivWithinAt.clm_apply hu.hasFDerivWithinAt).hasDerivWithinAt
rwa [add_apply, comp_apply, flip_apply, smulRight_apply, smulRight_apply, one_apply, one_smul,
one_smul, add_comm] at this
theorem HasDerivAt.clm_apply (hc : HasDerivAt c c' x) (hu : HasDerivAt u u' x) :
HasDerivAt (fun y => (c y) (u y)) (c' (u x) + c x u') x := by
have := (hc.hasFDerivAt.clm_apply hu.hasFDerivAt).hasDerivAt
rwa [add_apply, comp_apply, flip_apply, smulRight_apply, smulRight_apply, one_apply, one_smul,
one_smul, add_comm] at this
theorem derivWithin_clm_apply (hxs : UniqueDiffWithinAt ð s x) (hc : DifferentiableWithinAt ð c s x)
(hu : DifferentiableWithinAt ð u s x) :
derivWithin (fun y => (c y) (u y)) s x = derivWithin c s x (u x) + c x (derivWithin u s x) :=
(hc.hasDerivWithinAt.clm_apply hu.hasDerivWithinAt).derivWithin hxs
theorem deriv_clm_apply (hc : DifferentiableAt ð c x) (hu : DifferentiableAt ð u x) :
deriv (fun y => (c y) (u y)) x = deriv c x (u x) + c x (deriv u x) :=
(hc.hasDerivAt.clm_apply hu.hasDerivAt).deriv
end CLMCompApply
|
Analysis\Calculus\Deriv\Pi.lean | /-
Copyright (c) 2023 Floris van Doorn. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Floris van Doorn, Heather Macbeth
-/
import Mathlib.Analysis.Calculus.FDeriv.Pi
import Mathlib.Analysis.Calculus.Deriv.Basic
/-!
# One-dimensional derivatives on pi-types.
-/
variable {ð ι : Type*} [DecidableEq ι] [Fintype ι] [NontriviallyNormedField ð]
theorem hasDerivAt_update (x : ι â ð) (i : ι) (y : ð) :
HasDerivAt (Function.update x i) (Pi.single i (1 : ð)) y := by
convert (hasFDerivAt_update x y).hasDerivAt
ext z j
rw [Pi.single, Function.update_apply]
split_ifs with h
· simp [h]
· simp [Pi.single_eq_of_ne h]
theorem hasDerivAt_single (i : ι) (y : ð) :
HasDerivAt (Pi.single (f := fun _ ⊠ð) i) (Pi.single i (1 : ð)) y :=
hasDerivAt_update 0 i y
theorem deriv_update (x : ι â ð) (i : ι) (y : ð) :
deriv (Function.update x i) y = Pi.single i (1 : ð) :=
(hasDerivAt_update x i y).deriv
theorem deriv_single (i : ι) (y : ð) :
deriv (Pi.single (f := fun _ ⊠ð) i) y = Pi.single i (1 : ð) :=
deriv_update 0 i y
|
Analysis\Calculus\Deriv\Polynomial.lean | /-
Copyright (c) 2019 Sébastien Gouëzel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Sébastien Gouëzel, Eric Wieser
-/
import Mathlib.Algebra.Polynomial.AlgebraMap
import Mathlib.Algebra.Polynomial.Derivative
import Mathlib.Analysis.Calculus.Deriv.Pow
import Mathlib.Analysis.Calculus.Deriv.Add
/-!
# Derivatives of polynomials
In this file we prove that derivatives of polynomials in the analysis sense agree with their
derivatives in the algebraic sense.
For a more detailed overview of one-dimensional derivatives in mathlib, see the module docstring of
`analysis/calculus/deriv/basic`.
## TODO
* Add results about multivariable polynomials.
* Generalize some (most?) results to an algebra over the base field.
## Keywords
derivative, polynomial
-/
universe u v w
open scoped Topology Filter ENNReal Polynomial
open Set
open ContinuousLinearMap (smulRight smulRight_one_eq_iff)
variable {ð : Type u} [NontriviallyNormedField ð]
variable {F : Type v} [NormedAddCommGroup F] [NormedSpace ð F]
variable {E : Type w} [NormedAddCommGroup E] [NormedSpace ð E]
variable {f fâ fâ g : ð â F}
variable {f' fâ' fâ' g' : F}
variable {x : ð}
variable {s t : Set ð}
variable {L Lâ Lâ : Filter ð}
namespace Polynomial
/-! ### Derivative of a polynomial -/
variable {R : Type*} [CommSemiring R] [Algebra R ð]
variable (p : ð[X]) (q : R[X])
/-- The derivative (in the analysis sense) of a polynomial `p` is given by `p.derivative`. -/
protected theorem hasStrictDerivAt (x : ð) :
HasStrictDerivAt (fun x => p.eval x) (p.derivative.eval x) x := by
induction p using Polynomial.induction_on' with
| h_add p q hp hq => simpa using hp.add hq
| h_monomial n a => simpa [mul_assoc] using (hasStrictDerivAt_pow n x).const_mul a
protected theorem hasStrictDerivAt_aeval (x : ð) :
HasStrictDerivAt (fun x => aeval x q) (aeval x (derivative q)) x := by
simpa only [aeval_def, evalâ_eq_eval_map, derivative_map] using
(q.map (algebraMap R ð)).hasStrictDerivAt x
/-- The derivative (in the analysis sense) of a polynomial `p` is given by `p.derivative`. -/
protected theorem hasDerivAt (x : ð) : HasDerivAt (fun x => p.eval x) (p.derivative.eval x) x :=
(p.hasStrictDerivAt x).hasDerivAt
protected theorem hasDerivAt_aeval (x : ð) :
HasDerivAt (fun x => aeval x q) (aeval x (derivative q)) x :=
(q.hasStrictDerivAt_aeval x).hasDerivAt
protected theorem hasDerivWithinAt (x : ð) (s : Set ð) :
HasDerivWithinAt (fun x => p.eval x) (p.derivative.eval x) s x :=
(p.hasDerivAt x).hasDerivWithinAt
protected theorem hasDerivWithinAt_aeval (x : ð) (s : Set ð) :
HasDerivWithinAt (fun x => aeval x q) (aeval x (derivative q)) s x :=
(q.hasDerivAt_aeval x).hasDerivWithinAt
protected theorem differentiableAt : DifferentiableAt ð (fun x => p.eval x) x :=
(p.hasDerivAt x).differentiableAt
protected theorem differentiableAt_aeval : DifferentiableAt ð (fun x => aeval x q) x :=
(q.hasDerivAt_aeval x).differentiableAt
protected theorem differentiableWithinAt : DifferentiableWithinAt ð (fun x => p.eval x) s x :=
p.differentiableAt.differentiableWithinAt
protected theorem differentiableWithinAt_aeval :
DifferentiableWithinAt ð (fun x => aeval x q) s x :=
q.differentiableAt_aeval.differentiableWithinAt
protected theorem differentiable : Differentiable ð fun x => p.eval x := fun _ => p.differentiableAt
protected theorem differentiable_aeval : Differentiable ð fun x : ð => aeval x q := fun _ =>
q.differentiableAt_aeval
protected theorem differentiableOn : DifferentiableOn ð (fun x => p.eval x) s :=
p.differentiable.differentiableOn
protected theorem differentiableOn_aeval : DifferentiableOn ð (fun x => aeval x q) s :=
q.differentiable_aeval.differentiableOn
@[simp]
protected theorem deriv : deriv (fun x => p.eval x) x = p.derivative.eval x :=
(p.hasDerivAt x).deriv
@[simp]
protected theorem deriv_aeval : deriv (fun x => aeval x q) x = aeval x (derivative q) :=
(q.hasDerivAt_aeval x).deriv
protected theorem derivWithin (hxs : UniqueDiffWithinAt ð s x) :
derivWithin (fun x => p.eval x) s x = p.derivative.eval x := by
rw [DifferentiableAt.derivWithin p.differentiableAt hxs]
exact p.deriv
protected theorem derivWithin_aeval (hxs : UniqueDiffWithinAt ð s x) :
derivWithin (fun x => aeval x q) s x = aeval x (derivative q) := by
simpa only [aeval_def, evalâ_eq_eval_map, derivative_map] using
(q.map (algebraMap R ð)).derivWithin hxs
protected theorem hasFDerivAt (x : ð) :
HasFDerivAt (fun x => p.eval x) (smulRight (1 : ð âL[ð] ð) (p.derivative.eval x)) x :=
p.hasDerivAt x
protected theorem hasFDerivAt_aeval (x : ð) :
HasFDerivAt (fun x => aeval x q) (smulRight (1 : ð âL[ð] ð) (aeval x (derivative q))) x :=
q.hasDerivAt_aeval x
protected theorem hasFDerivWithinAt (x : ð) :
HasFDerivWithinAt (fun x => p.eval x) (smulRight (1 : ð âL[ð] ð) (p.derivative.eval x)) s x :=
(p.hasFDerivAt x).hasFDerivWithinAt
protected theorem hasFDerivWithinAt_aeval (x : ð) :
HasFDerivWithinAt (fun x => aeval x q) (smulRight (1 : ð âL[ð] ð)
(aeval x (derivative q))) s x :=
(q.hasFDerivAt_aeval x).hasFDerivWithinAt
@[simp]
protected theorem fderiv :
fderiv ð (fun x => p.eval x) x = smulRight (1 : ð âL[ð] ð) (p.derivative.eval x) :=
(p.hasFDerivAt x).fderiv
@[simp]
protected theorem fderiv_aeval :
fderiv ð (fun x => aeval x q) x = smulRight (1 : ð âL[ð] ð) (aeval x (derivative q)) :=
(q.hasFDerivAt_aeval x).fderiv
protected theorem fderivWithin (hxs : UniqueDiffWithinAt ð s x) :
fderivWithin ð (fun x => p.eval x) s x = smulRight (1 : ð âL[ð] ð) (p.derivative.eval x) :=
(p.hasFDerivWithinAt x).fderivWithin hxs
protected theorem fderivWithin_aeval (hxs : UniqueDiffWithinAt ð s x) :
fderivWithin ð (fun x => aeval x q) s x = smulRight (1 : ð âL[ð] ð) (aeval x (derivative q)) :=
(q.hasFDerivWithinAt_aeval x).fderivWithin hxs
end Polynomial
|
Analysis\Calculus\Deriv\Pow.lean | /-
Copyright (c) 2019 Sébastien Gouëzel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Sébastien Gouëzel
-/
import Mathlib.Analysis.Calculus.Deriv.Mul
import Mathlib.Analysis.Calculus.Deriv.Comp
/-!
# Derivative of `(f x) ^ n`, `n : â`
In this file we prove that `(x ^ n)' = n * x ^ (n - 1)`, where `n` is a natural number.
For a more detailed overview of one-dimensional derivatives in mathlib, see the module docstring of
`Analysis/Calculus/Deriv/Basic`.
## Keywords
derivative, power
-/
universe u v w
open scoped Classical
open Topology Filter ENNReal
open Filter Asymptotics Set
variable {ð : Type u} [NontriviallyNormedField ð]
variable {F : Type v} [NormedAddCommGroup F] [NormedSpace ð F]
variable {E : Type w} [NormedAddCommGroup E] [NormedSpace ð E]
variable {f fâ fâ g : ð â F}
variable {f' fâ' fâ' g' : F}
variable {x : ð}
variable {s t : Set ð}
variable {L Lâ Lâ : Filter ð}
/-! ### Derivative of `x ⊠x^n` for `n : â` -/
variable {c : ð â ð} {c' : ð}
variable (n : â)
theorem hasStrictDerivAt_pow :
â (n : â) (x : ð), HasStrictDerivAt (fun x : ð ⊠x ^ n) ((n : ð) * x ^ (n - 1)) x
| 0, x => by simp [hasStrictDerivAt_const]
| 1, x => by simpa using hasStrictDerivAt_id x
| n + 1 + 1, x => by
simpa [pow_succ, add_mul, mul_assoc] using
(hasStrictDerivAt_pow (n + 1) x).mul (hasStrictDerivAt_id x)
theorem hasDerivAt_pow (n : â) (x : ð) :
HasDerivAt (fun x : ð => x ^ n) ((n : ð) * x ^ (n - 1)) x :=
(hasStrictDerivAt_pow n x).hasDerivAt
theorem hasDerivWithinAt_pow (n : â) (x : ð) (s : Set ð) :
HasDerivWithinAt (fun x : ð => x ^ n) ((n : ð) * x ^ (n - 1)) s x :=
(hasDerivAt_pow n x).hasDerivWithinAt
theorem differentiableAt_pow : DifferentiableAt ð (fun x : ð => x ^ n) x :=
(hasDerivAt_pow n x).differentiableAt
theorem differentiableWithinAt_pow :
DifferentiableWithinAt ð (fun x : ð => x ^ n) s x :=
(differentiableAt_pow n).differentiableWithinAt
theorem differentiable_pow : Differentiable ð fun x : ð => x ^ n := fun _ => differentiableAt_pow n
theorem differentiableOn_pow : DifferentiableOn ð (fun x : ð => x ^ n) s :=
(differentiable_pow n).differentiableOn
theorem deriv_pow : deriv (fun x : ð => x ^ n) x = (n : ð) * x ^ (n - 1) :=
(hasDerivAt_pow n x).deriv
@[simp]
theorem deriv_pow' : (deriv fun x : ð => x ^ n) = fun x => (n : ð) * x ^ (n - 1) :=
funext fun _ => deriv_pow n
theorem derivWithin_pow (hxs : UniqueDiffWithinAt ð s x) :
derivWithin (fun x : ð => x ^ n) s x = (n : ð) * x ^ (n - 1) :=
(hasDerivWithinAt_pow n x s).derivWithin hxs
theorem HasDerivWithinAt.pow (hc : HasDerivWithinAt c c' s x) :
HasDerivWithinAt (fun y => c y ^ n) ((n : ð) * c x ^ (n - 1) * c') s x :=
(hasDerivAt_pow n (c x)).comp_hasDerivWithinAt x hc
theorem HasDerivAt.pow (hc : HasDerivAt c c' x) :
HasDerivAt (fun y => c y ^ n) ((n : ð) * c x ^ (n - 1) * c') x := by
rw [â hasDerivWithinAt_univ] at *
exact hc.pow n
theorem derivWithin_pow' (hc : DifferentiableWithinAt ð c s x) (hxs : UniqueDiffWithinAt ð s x) :
derivWithin (fun x => c x ^ n) s x = (n : ð) * c x ^ (n - 1) * derivWithin c s x :=
(hc.hasDerivWithinAt.pow n).derivWithin hxs
@[simp]
theorem deriv_pow'' (hc : DifferentiableAt ð c x) :
deriv (fun x => c x ^ n) x = (n : ð) * c x ^ (n - 1) * deriv c x :=
(hc.hasDerivAt.pow n).deriv
|
Analysis\Calculus\Deriv\Prod.lean | /-
Copyright (c) 2019 Gabriel Ebner. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Gabriel Ebner, Yury Kudryashov
-/
import Mathlib.Analysis.Calculus.Deriv.Basic
import Mathlib.Analysis.Calculus.FDeriv.Prod
/-!
# Derivatives of functions taking values in product types
In this file we prove lemmas about derivatives of functions `f : ð â E Ã F` and of functions
`f : ð â (Î i, E i)`.
For a more detailed overview of one-dimensional derivatives in mathlib, see the module docstring of
`analysis/calculus/deriv/basic`.
## Keywords
derivative
-/
universe u v w
open scoped Classical
open Topology Filter
open Filter Asymptotics Set
variable {ð : Type u} [NontriviallyNormedField ð]
variable {F : Type v} [NormedAddCommGroup F] [NormedSpace ð F]
variable {E : Type w} [NormedAddCommGroup E] [NormedSpace ð E]
variable {f fâ fâ g : ð â F}
variable {f' fâ' fâ' g' : F}
variable {x : ð}
variable {s t : Set ð}
variable {L Lâ Lâ : Filter ð}
section CartesianProduct
/-! ### Derivative of the cartesian product of two functions -/
variable {G : Type w} [NormedAddCommGroup G] [NormedSpace ð G]
variable {fâ : ð â G} {fâ' : G}
nonrec theorem HasDerivAtFilter.prod (hfâ : HasDerivAtFilter fâ fâ' x L)
(hfâ : HasDerivAtFilter fâ fâ' x L) : HasDerivAtFilter (fun x => (fâ x, fâ x)) (fâ', fâ') x L :=
hfâ.prod hfâ
nonrec theorem HasDerivWithinAt.prod (hfâ : HasDerivWithinAt fâ fâ' s x)
(hfâ : HasDerivWithinAt fâ fâ' s x) : HasDerivWithinAt (fun x => (fâ x, fâ x)) (fâ', fâ') s x :=
hfâ.prod hfâ
nonrec theorem HasDerivAt.prod (hfâ : HasDerivAt fâ fâ' x) (hfâ : HasDerivAt fâ fâ' x) :
HasDerivAt (fun x => (fâ x, fâ x)) (fâ', fâ') x :=
hfâ.prod hfâ
nonrec theorem HasStrictDerivAt.prod (hfâ : HasStrictDerivAt fâ fâ' x)
(hfâ : HasStrictDerivAt fâ fâ' x) : HasStrictDerivAt (fun x => (fâ x, fâ x)) (fâ', fâ') x :=
hfâ.prod hfâ
end CartesianProduct
section Pi
/-! ### Derivatives of functions `f : ð â Î i, E i` -/
variable {ι : Type*} [Fintype ι] {E' : ι â Type*} [â i, NormedAddCommGroup (E' i)]
[â i, NormedSpace ð (E' i)] {Ï : ð â â i, E' i} {Ï' : â i, E' i}
@[simp]
theorem hasStrictDerivAt_pi :
HasStrictDerivAt Ï Ï' x â â i, HasStrictDerivAt (fun x => Ï x i) (Ï' i) x :=
hasStrictFDerivAt_pi'
@[simp]
theorem hasDerivAtFilter_pi :
HasDerivAtFilter Ï Ï' x L â â i, HasDerivAtFilter (fun x => Ï x i) (Ï' i) x L :=
hasFDerivAtFilter_pi'
theorem hasDerivAt_pi : HasDerivAt Ï Ï' x â â i, HasDerivAt (fun x => Ï x i) (Ï' i) x :=
hasDerivAtFilter_pi
theorem hasDerivWithinAt_pi :
HasDerivWithinAt Ï Ï' s x â â i, HasDerivWithinAt (fun x => Ï x i) (Ï' i) s x :=
hasDerivAtFilter_pi
theorem derivWithin_pi (h : â i, DifferentiableWithinAt ð (fun x => Ï x i) s x)
(hs : UniqueDiffWithinAt ð s x) :
derivWithin Ï s x = fun i => derivWithin (fun x => Ï x i) s x :=
(hasDerivWithinAt_pi.2 fun i => (h i).hasDerivWithinAt).derivWithin hs
theorem deriv_pi (h : â i, DifferentiableAt ð (fun x => Ï x i) x) :
deriv Ï x = fun i => deriv (fun x => Ï x i) x :=
(hasDerivAt_pi.2 fun i => (h i).hasDerivAt).deriv
end Pi
|
Analysis\Calculus\Deriv\Shift.lean | /-
Copyright (c) 2023 Michael Stoll. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Michael Stoll
-/
import Mathlib.Analysis.Calculus.Deriv.Add
import Mathlib.Analysis.Calculus.Deriv.Comp
/-!
### Invariance of the derivative under translation
We show that if a function `h` has derivative `h'` at a point `a + x`, then `h (a + ·)`
has derivative `h'` at `x`. Similarly for `x + a`.
-/
/-- Translation in the domain does not change the derivative. -/
lemma HasDerivAt.comp_const_add {ð : Type*} [NontriviallyNormedField ð] (a x : ð) {ð' : Type*}
[NormedAddCommGroup ð'] [NormedSpace ð ð'] {h : ð â ð'} {h' : ð'}
(hh : HasDerivAt h h' (a + x)) :
HasDerivAt (fun x ⊠h (a + x)) h' x := by
simpa [Function.comp_def] using HasDerivAt.scomp (ð := ð) x hh <| hasDerivAt_id' x |>.const_add a
/-- Translation in the domain does not change the derivative. -/
lemma HasDerivAt.comp_add_const {ð : Type*} [NontriviallyNormedField ð] (x a : ð) {ð' : Type*}
[NormedAddCommGroup ð'] [NormedSpace ð ð'] {h : ð â ð'} {h' : ð'}
(hh : HasDerivAt h h' (x + a)) :
HasDerivAt (fun x ⊠h (x + a)) h' x := by
simpa [Function.comp_def] using HasDerivAt.scomp (ð := ð) x hh <| hasDerivAt_id' x |>.add_const a
/-- The derivative of `x ⊠f (-x)` at `a` is the negative of the derivative of `f` at `-a`. -/
lemma deriv_comp_neg {ð : Type*} [NontriviallyNormedField ð] {F : Type*} [NormedAddCommGroup F]
[NormedSpace ð F] (f : ð â F) (a : ð) : deriv (fun x ⊠f (-x)) a = -deriv f (-a) := by
by_cases h : DifferentiableAt ð f (-a)
· simpa only [deriv_neg, neg_one_smul] using deriv.scomp a h (differentiable_neg _)
· rw [deriv_zero_of_not_differentiableAt (mt differentiableAt_comp_neg_iff.mpr h),
deriv_zero_of_not_differentiableAt h, neg_zero]
/-- Translation in the domain does not change the derivative. -/
lemma deriv_comp_const_add {ð : Type*} [NontriviallyNormedField ð] (a x : ð) {ð' : Type*}
[NormedAddCommGroup ð'] [NormedSpace ð ð'] {h : ð â ð'}
(hh : DifferentiableAt ð h (a + x)) :
deriv (fun x ⊠h (a + x)) x = deriv h (a + x) := HasDerivAt.deriv hh.hasDerivAt.comp_const_add
/-- Translation in the domain does not change the derivative. -/
lemma deriv_comp_add_const {ð : Type*} [NontriviallyNormedField ð] (a x : ð) {ð' : Type*}
[NormedAddCommGroup ð'] [NormedSpace ð ð'] {h : ð â ð'}
(hh : DifferentiableAt ð h (x + a)) :
deriv (fun x ⊠h (x + a)) x = deriv h (x + a) := HasDerivAt.deriv hh.hasDerivAt.comp_add_const
|
Analysis\Calculus\Deriv\Slope.lean | /-
Copyright (c) 2019 Yury Kudryashov. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yury Kudryashov
-/
import Mathlib.Analysis.Calculus.Deriv.Basic
import Mathlib.LinearAlgebra.AffineSpace.Slope
/-!
# Derivative as the limit of the slope
In this file we relate the derivative of a function with its definition from a standard
undergraduate course as the limit of the slope `(f y - f x) / (y - x)` as `y` tends to `ð[â ] x`.
Since we are talking about functions taking values in a normed space instead of the base field, we
use `slope f x y = (y - x)â»Â¹ ⢠(f y - f x)` instead of division.
We also prove some estimates on the upper/lower limits of the slope in terms of the derivative.
For a more detailed overview of one-dimensional derivatives in mathlib, see the module docstring of
`analysis/calculus/deriv/basic`.
## Keywords
derivative, slope
-/
universe u v w
noncomputable section
open Topology Filter TopologicalSpace
open Filter Set
section NormedField
variable {ð : Type u} [NontriviallyNormedField ð]
variable {F : Type v} [NormedAddCommGroup F] [NormedSpace ð F]
variable {E : Type w} [NormedAddCommGroup E] [NormedSpace ð E]
variable {f fâ fâ g : ð â F}
variable {f' fâ' fâ' g' : F}
variable {x : ð}
variable {s t : Set ð}
variable {L Lâ Lâ : Filter ð}
/-- If the domain has dimension one, then Fréchet derivative is equivalent to the classical
definition with a limit. In this version we have to take the limit along the subset `-{x}`,
because for `y=x` the slope equals zero due to the convention `0â»Â¹=0`. -/
theorem hasDerivAtFilter_iff_tendsto_slope {x : ð} {L : Filter ð} :
HasDerivAtFilter f f' x L â Tendsto (slope f x) (L â ð {x}á¶) (ð f') :=
calc HasDerivAtFilter f f' x L
â Tendsto (fun y ⊠slope f x y - (y - x)â»Â¹ ⢠(y - x) ⢠f') L (ð 0) := by
simp only [hasDerivAtFilter_iff_tendsto, â norm_inv, â norm_smul,
â tendsto_zero_iff_norm_tendsto_zero, slope_def_module, smul_sub]
_ â Tendsto (fun y ⊠slope f x y - (y - x)â»Â¹ ⢠(y - x) ⢠f') (L â ð {x}á¶) (ð 0) :=
.symm <| tendsto_inf_principal_nhds_iff_of_forall_eq <| by simp
_ â Tendsto (fun y ⊠slope f x y - f') (L â ð {x}á¶) (ð 0) := tendsto_congr' <| by
refine (EqOn.eventuallyEq fun y hy ⊠?_).filter_mono inf_le_right
rw [inv_smul_smulâ (sub_ne_zero.2 hy) f']
_ â Tendsto (slope f x) (L â ð {x}á¶) (ð f') := by
rw [â nhds_translation_sub f', tendsto_comap_iff]; rfl
theorem hasDerivWithinAt_iff_tendsto_slope :
HasDerivWithinAt f f' s x â Tendsto (slope f x) (ð[s \ {x}] x) (ð f') := by
simp only [HasDerivWithinAt, nhdsWithin, diff_eq, â inf_assoc, inf_principal.symm]
exact hasDerivAtFilter_iff_tendsto_slope
theorem hasDerivWithinAt_iff_tendsto_slope' (hs : x â s) :
HasDerivWithinAt f f' s x â Tendsto (slope f x) (ð[s] x) (ð f') := by
rw [hasDerivWithinAt_iff_tendsto_slope, diff_singleton_eq_self hs]
theorem hasDerivAt_iff_tendsto_slope : HasDerivAt f f' x â Tendsto (slope f x) (ð[â ] x) (ð f') :=
hasDerivAtFilter_iff_tendsto_slope
theorem hasDerivAt_iff_tendsto_slope_zero :
HasDerivAt f f' x â Tendsto (fun t ⊠tâ»Â¹ ⢠(f (x + t) - f x)) (ð[â ] 0) (ð f') := by
have : ð[â ] x = Filter.map (fun t ⊠x + t) (ð[â ] 0) := by
simp [nhdsWithin, map_add_left_nhds_zero x, Filter.map_inf, add_right_injective x]
simp [hasDerivAt_iff_tendsto_slope, this, slope, Function.comp]
alias âšHasDerivAt.tendsto_slope_zero, _â© := hasDerivAt_iff_tendsto_slope_zero
theorem HasDerivAt.tendsto_slope_zero_right [PartialOrder ð] (h : HasDerivAt f f' x) :
Tendsto (fun t ⊠tâ»Â¹ ⢠(f (x + t) - f x)) (ð[>] 0) (ð f') :=
h.tendsto_slope_zero.mono_left (nhds_right'_le_nhds_ne 0)
theorem HasDerivAt.tendsto_slope_zero_left [PartialOrder ð] (h : HasDerivAt f f' x) :
Tendsto (fun t ⊠tâ»Â¹ ⢠(f (x + t) - f x)) (ð[<] 0) (ð f') :=
h.tendsto_slope_zero.mono_left (nhds_left'_le_nhds_ne 0)
/-- Given a set `t` such that `s â© t` is dense in `s`, then the range of `derivWithin f s` is
contained in the closure of the submodule spanned by the image of `t`. -/
theorem range_derivWithin_subset_closure_span_image
(f : ð â F) {s t : Set ð} (h : s â closure (s â© t)) :
range (derivWithin f s) â closure (Submodule.span ð (f '' t)) := by
rintro - âšx, rflâ©
rcases eq_or_neBot (ð[s \ {x}] x) with H|H
· simpa [derivWithin, fderivWithin, H] using subset_closure (zero_mem _)
by_cases H' : DifferentiableWithinAt ð f s x; swap
· rw [derivWithin_zero_of_not_differentiableWithinAt H']
exact subset_closure (zero_mem _)
have I : (ð[(s â© t) \ {x}] x).NeBot := by
rw [â mem_closure_iff_nhdsWithin_neBot] at H â¢
have A : closure (s \ {x}) â closure (closure (s â© t) \ {x}) :=
closure_mono (diff_subset_diff_left h)
have B : closure (s â© t) \ {x} â closure ((s â© t) \ {x}) := by
convert closure_diff; exact closure_singleton.symm
simpa using A.trans (closure_mono B) H
have : Tendsto (slope f x) (ð[(s â© t) \ {x}] x) (ð (derivWithin f s x)) := by
apply Tendsto.mono_left (hasDerivWithinAt_iff_tendsto_slope.1 H'.hasDerivWithinAt)
rw [inter_comm, inter_diff_assoc]
exact nhdsWithin_mono _ inter_subset_right
rw [â closure_closure, â Submodule.topologicalClosure_coe]
apply mem_closure_of_tendsto this
filter_upwards [self_mem_nhdsWithin] with y hy
simp only [slope, vsub_eq_sub, SetLike.mem_coe]
refine Submodule.smul_mem _ _ (Submodule.sub_mem _ ?_ ?_)
· apply Submodule.le_topologicalClosure
apply Submodule.subset_span
exact mem_image_of_mem _ hy.1.2
· apply Submodule.closure_subset_topologicalClosure_span
suffices A : f x â closure (f '' (s â© t)) from
closure_mono (image_subset _ inter_subset_right) A
apply ContinuousWithinAt.mem_closure_image
· apply H'.continuousWithinAt.mono inter_subset_left
rw [mem_closure_iff_nhdsWithin_neBot]
exact I.mono (nhdsWithin_mono _ diff_subset)
/-- Given a dense set `t`, then the range of `deriv f` is contained in the closure of the submodule
spanned by the image of `t`. -/
theorem range_deriv_subset_closure_span_image
(f : ð â F) {t : Set ð} (h : Dense t) :
range (deriv f) â closure (Submodule.span ð (f '' t)) := by
rw [â derivWithin_univ]
apply range_derivWithin_subset_closure_span_image
simp [dense_iff_closure_eq.1 h]
theorem isSeparable_range_derivWithin [SeparableSpace ð] (f : ð â F) (s : Set ð) :
IsSeparable (range (derivWithin f s)) := by
obtain âšt, ts, t_count, htâ© : â t, t â s â§ Set.Countable t â§ s â closure t :=
(IsSeparable.of_separableSpace s).exists_countable_dense_subset
have : s â closure (s â© t) := by rwa [inter_eq_self_of_subset_right ts]
apply IsSeparable.mono _ (range_derivWithin_subset_closure_span_image f this)
exact (Countable.image t_count f).isSeparable.span.closure
theorem isSeparable_range_deriv [SeparableSpace ð] (f : ð â F) :
IsSeparable (range (deriv f)) := by
rw [â derivWithin_univ]
exact isSeparable_range_derivWithin _ _
end NormedField
/-! ### Upper estimates on liminf and limsup -/
section Real
variable {f : â â â} {f' : â} {s : Set â} {x : â} {r : â}
theorem HasDerivWithinAt.limsup_slope_le (hf : HasDerivWithinAt f f' s x) (hr : f' < r) :
âá¶ z in ð[s \ {x}] x, slope f x z < r :=
hasDerivWithinAt_iff_tendsto_slope.1 hf (IsOpen.mem_nhds isOpen_Iio hr)
theorem HasDerivWithinAt.limsup_slope_le' (hf : HasDerivWithinAt f f' s x) (hs : x â s)
(hr : f' < r) : âá¶ z in ð[s] x, slope f x z < r :=
(hasDerivWithinAt_iff_tendsto_slope' hs).1 hf (IsOpen.mem_nhds isOpen_Iio hr)
theorem HasDerivWithinAt.liminf_right_slope_le (hf : HasDerivWithinAt f f' (Ici x) x)
(hr : f' < r) : âá¶ z in ð[>] x, slope f x z < r :=
(hf.Ioi_of_Ici.limsup_slope_le' (lt_irrefl x) hr).frequently
end Real
section RealSpace
open Metric
variable {E : Type u} [NormedAddCommGroup E] [NormedSpace â E] {f : â â E} {f' : E} {s : Set â}
{x r : â}
/-- If `f` has derivative `f'` within `s` at `x`, then for any `r > âf'â` the ratio
`âf z - f xâ / âz - xâ` is less than `r` in some neighborhood of `x` within `s`.
In other words, the limit superior of this ratio as `z` tends to `x` along `s`
is less than or equal to `âf'â`. -/
theorem HasDerivWithinAt.limsup_norm_slope_le (hf : HasDerivWithinAt f f' s x) (hr : âf'â < r) :
âá¶ z in ð[s] x, âz - xââ»Â¹ * âf z - f xâ < r := by
have hrâ : 0 < r := lt_of_le_of_lt (norm_nonneg f') hr
have A : âá¶ z in ð[s \ {x}] x, â(z - x)â»Â¹ ⢠(f z - f x)â â Iio r :=
(hasDerivWithinAt_iff_tendsto_slope.1 hf).norm (IsOpen.mem_nhds isOpen_Iio hr)
have B : âá¶ z in ð[{x}] x, â(z - x)â»Â¹ ⢠(f z - f x)â â Iio r :=
mem_of_superset self_mem_nhdsWithin (singleton_subset_iff.2 <| by simp [hrâ])
have C := mem_sup.2 âšA, Bâ©
rw [â nhdsWithin_union, diff_union_self, nhdsWithin_union, mem_sup] at C
filter_upwards [C.1]
simp only [norm_smul, mem_Iio, norm_inv]
exact fun _ => id
/-- If `f` has derivative `f'` within `s` at `x`, then for any `r > âf'â` the ratio
`(âf zâ - âf xâ) / âz - xâ` is less than `r` in some neighborhood of `x` within `s`.
In other words, the limit superior of this ratio as `z` tends to `x` along `s`
is less than or equal to `âf'â`.
This lemma is a weaker version of `HasDerivWithinAt.limsup_norm_slope_le`
where `âf zâ - âf xâ` is replaced by `âf z - f xâ`. -/
theorem HasDerivWithinAt.limsup_slope_norm_le (hf : HasDerivWithinAt f f' s x) (hr : âf'â < r) :
âá¶ z in ð[s] x, âz - xââ»Â¹ * (âf zâ - âf xâ) < r := by
apply (hf.limsup_norm_slope_le hr).mono
intro z hz
refine lt_of_le_of_lt (mul_le_mul_of_nonneg_left (norm_sub_norm_le _ _) ?_) hz
exact inv_nonneg.2 (norm_nonneg _)
/-- If `f` has derivative `f'` within `(x, +â)` at `x`, then for any `r > âf'â` the ratio
`âf z - f xâ / âz - xâ` is frequently less than `r` as `z â x+0`.
In other words, the limit inferior of this ratio as `z` tends to `x+0`
is less than or equal to `âf'â`. See also `HasDerivWithinAt.limsup_norm_slope_le`
for a stronger version using limit superior and any set `s`. -/
theorem HasDerivWithinAt.liminf_right_norm_slope_le (hf : HasDerivWithinAt f f' (Ici x) x)
(hr : âf'â < r) : âá¶ z in ð[>] x, âz - xââ»Â¹ * âf z - f xâ < r :=
(hf.Ioi_of_Ici.limsup_norm_slope_le hr).frequently
/-- If `f` has derivative `f'` within `(x, +â)` at `x`, then for any `r > âf'â` the ratio
`(âf zâ - âf xâ) / (z - x)` is frequently less than `r` as `z â x+0`.
In other words, the limit inferior of this ratio as `z` tends to `x+0`
is less than or equal to `âf'â`.
See also
* `HasDerivWithinAt.limsup_norm_slope_le` for a stronger version using
limit superior and any set `s`;
* `HasDerivWithinAt.liminf_right_norm_slope_le` for a stronger version using
`âf z - f xpâ` instead of `âf zâ - âf xâ`. -/
theorem HasDerivWithinAt.liminf_right_slope_norm_le (hf : HasDerivWithinAt f f' (Ici x) x)
(hr : âf'â < r) : âá¶ z in ð[>] x, (z - x)â»Â¹ * (âf zâ - âf xâ) < r := by
have := (hf.Ioi_of_Ici.limsup_slope_norm_le hr).frequently
refine this.mp (Eventually.mono self_mem_nhdsWithin fun z hxz hz ⊠?_)
rwa [Real.norm_eq_abs, abs_of_pos (sub_pos_of_lt hxz)] at hz
end RealSpace
|
Analysis\Calculus\Deriv\Star.lean | /-
Copyright (c) 2023 Eric Wieser. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Eric Wieser
-/
import Mathlib.Analysis.Calculus.Deriv.Basic
import Mathlib.Analysis.Calculus.FDeriv.Star
/-!
# Star operations on derivatives
This file contains the usual formulas (and existence assertions) for the derivative of the star
operation. Note that these only apply when the field that the derivative is respect to has a trivial
star operation; which as should be expected rules out `ð = â`.
-/
universe u v w
variable {ð : Type u} [NontriviallyNormedField ð]
variable {F : Type v} [NormedAddCommGroup F] [NormedSpace ð F]
variable {f : ð â F}
/-! ### Derivative of `x ⊠star x` -/
variable [StarRing ð] [TrivialStar ð] [StarAddMonoid F] [ContinuousStar F]
variable [StarModule ð F] {f' : F} {s : Set ð} {x : ð} {L : Filter ð}
protected nonrec theorem HasDerivAtFilter.star (h : HasDerivAtFilter f f' x L) :
HasDerivAtFilter (fun x => star (f x)) (star f') x L := by
simpa using h.star.hasDerivAtFilter
protected nonrec theorem HasDerivWithinAt.star (h : HasDerivWithinAt f f' s x) :
HasDerivWithinAt (fun x => star (f x)) (star f') s x :=
h.star
protected nonrec theorem HasDerivAt.star (h : HasDerivAt f f' x) :
HasDerivAt (fun x => star (f x)) (star f') x :=
h.star
protected nonrec theorem HasStrictDerivAt.star (h : HasStrictDerivAt f f' x) :
HasStrictDerivAt (fun x => star (f x)) (star f') x := by simpa using h.star.hasStrictDerivAt
protected theorem derivWithin.star (hxs : UniqueDiffWithinAt ð s x) :
derivWithin (fun y => star (f y)) s x = star (derivWithin f s x) :=
DFunLike.congr_fun (fderivWithin_star hxs) _
protected theorem deriv.star : deriv (fun y => star (f y)) x = star (deriv f x) :=
DFunLike.congr_fun fderiv_star _
@[simp]
protected theorem deriv.star' : (deriv fun y => star (f y)) = fun x => star (deriv f x) :=
funext fun _ => deriv.star
|
Analysis\Calculus\Deriv\Support.lean | /-
Copyright (c) 2022 Floris van Doorn. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Floris van Doorn
-/
import Mathlib.Analysis.Calculus.Deriv.Basic
/-!
# Support of the derivative of a function
In this file we prove that the (topological) support of a function includes the support of its
derivative. As a corollary, we show that the derivative of a function with compact support has
compact support.
## Keywords
derivative, support
-/
universe u v
variable {ð : Type u} [NontriviallyNormedField ð]
variable {E : Type v} [NormedAddCommGroup E] [NormedSpace ð E]
variable {f : ð â E}
/-! ### Support of derivatives -/
section Support
open Function
theorem support_deriv_subset : support (deriv f) â tsupport f := by
intro x
rw [â not_imp_not]
intro h2x
rw [not_mem_tsupport_iff_eventuallyEq] at h2x
exact nmem_support.mpr (h2x.deriv_eq.trans (deriv_const x 0))
protected theorem HasCompactSupport.deriv (hf : HasCompactSupport f) :
HasCompactSupport (deriv f) :=
hf.mono' support_deriv_subset
end Support
|
Analysis\Calculus\Deriv\ZPow.lean | /-
Copyright (c) 2020 Sébastien Gouëzel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Sébastien Gouëzel, Yury Kudryashov
-/
import Mathlib.Analysis.Calculus.Deriv.Pow
import Mathlib.Analysis.Calculus.Deriv.Inv
/-!
# Derivatives of `x ^ m`, `m : â€`
In this file we prove theorems about (iterated) derivatives of `x ^ m`, `m : â€`.
For a more detailed overview of one-dimensional derivatives in mathlib, see the module docstring of
`analysis/calculus/deriv/basic`.
## Keywords
derivative, power
-/
universe u v w
open scoped Classical
open Topology Filter
open Filter Asymptotics Set
variable {ð : Type u} [NontriviallyNormedField ð]
variable {E : Type v} [NormedAddCommGroup E] [NormedSpace ð E]
variable {x : ð}
variable {s : Set ð}
variable {m : â€}
/-! ### Derivative of `x ⊠x^m` for `m : â€` -/
theorem hasStrictDerivAt_zpow (m : â€) (x : ð) (h : x â 0 âš 0 †m) :
HasStrictDerivAt (fun x => x ^ m) ((m : ð) * x ^ (m - 1)) x := by
have : â m : â€, 0 < m â HasStrictDerivAt (· ^ m) ((m : ð) * x ^ (m - 1)) x := fun m hm ⊠by
lift m to â using hm.le
simp only [zpow_natCast, Int.cast_natCast]
convert hasStrictDerivAt_pow m x using 2
rw [â Int.ofNat_one, â Int.ofNat_sub, zpow_natCast]
norm_cast at hm
rcases lt_trichotomy m 0 with (hm | hm | hm)
· have hx : x â 0 := h.resolve_right hm.not_le
have := (hasStrictDerivAt_inv ?_).scomp _ (this (-m) (neg_pos.2 hm)) <;>
[skip; exact zpow_ne_zero _ hx]
simp only [(· â ·), zpow_neg, one_div, inv_inv, smul_eq_mul] at this
convert this using 1
rw [sq, mul_inv, inv_inv, Int.cast_neg, neg_mul, neg_mul_neg, â zpow_addâ hx, mul_assoc, â
zpow_addâ hx]
congr
abel
· simp only [hm, zpow_zero, Int.cast_zero, zero_mul, hasStrictDerivAt_const]
· exact this m hm
theorem hasDerivAt_zpow (m : â€) (x : ð) (h : x â 0 âš 0 †m) :
HasDerivAt (fun x => x ^ m) ((m : ð) * x ^ (m - 1)) x :=
(hasStrictDerivAt_zpow m x h).hasDerivAt
theorem hasDerivWithinAt_zpow (m : â€) (x : ð) (h : x â 0 âš 0 †m) (s : Set ð) :
HasDerivWithinAt (fun x => x ^ m) ((m : ð) * x ^ (m - 1)) s x :=
(hasDerivAt_zpow m x h).hasDerivWithinAt
theorem differentiableAt_zpow : DifferentiableAt ð (fun x => x ^ m) x â x â 0 âš 0 †m :=
âšfun H => NormedField.continuousAt_zpow.1 H.continuousAt, fun H =>
(hasDerivAt_zpow m x H).differentiableAtâ©
theorem differentiableWithinAt_zpow (m : â€) (x : ð) (h : x â 0 âš 0 †m) :
DifferentiableWithinAt ð (fun x => x ^ m) s x :=
(differentiableAt_zpow.mpr h).differentiableWithinAt
theorem differentiableOn_zpow (m : â€) (s : Set ð) (h : (0 : ð) â s âš 0 †m) :
DifferentiableOn ð (fun x => x ^ m) s := fun x hxs =>
differentiableWithinAt_zpow m x <| h.imp_left <| ne_of_mem_of_not_mem hxs
theorem deriv_zpow (m : â€) (x : ð) : deriv (fun x => x ^ m) x = m * x ^ (m - 1) := by
by_cases H : x â 0 âš 0 †m
· exact (hasDerivAt_zpow m x H).deriv
· rw [deriv_zero_of_not_differentiableAt (mt differentiableAt_zpow.1 H)]
push_neg at H
rcases H with âšrfl, hmâ©
rw [zero_zpow _ ((sub_one_lt _).trans hm).ne, mul_zero]
@[simp]
theorem deriv_zpow' (m : â€) : (deriv fun x : ð => x ^ m) = fun x => (m : ð) * x ^ (m - 1) :=
funext <| deriv_zpow m
theorem derivWithin_zpow (hxs : UniqueDiffWithinAt ð s x) (h : x â 0 âš 0 †m) :
derivWithin (fun x => x ^ m) s x = (m : ð) * x ^ (m - 1) :=
(hasDerivWithinAt_zpow m x h s).derivWithin hxs
@[simp]
theorem iter_deriv_zpow' (m : â€) (k : â) :
(deriv^[k] fun x : ð => x ^ m) =
fun x => (â i â Finset.range k, ((m : ð) - i)) * x ^ (m - k) := by
induction' k with k ihk
· simp only [Nat.zero_eq, one_mul, Int.ofNat_zero, id, sub_zero, Finset.prod_range_zero,
Function.iterate_zero]
· simp only [Function.iterate_succ_apply', ihk, deriv_const_mul_field', deriv_zpow',
Finset.prod_range_succ, Int.ofNat_succ, â sub_sub, Int.cast_sub, Int.cast_natCast, mul_assoc]
theorem iter_deriv_zpow (m : â€) (x : ð) (k : â) :
deriv^[k] (fun y => y ^ m) x = (â i â Finset.range k, ((m : ð) - i)) * x ^ (m - k) :=
congr_fun (iter_deriv_zpow' m k) x
theorem iter_deriv_pow (n : â) (x : ð) (k : â) :
deriv^[k] (fun x : ð => x ^ n) x = (â i â Finset.range k, ((n : ð) - i)) * x ^ (n - k) := by
simp only [â zpow_natCast, iter_deriv_zpow, Int.cast_natCast]
rcases le_or_lt k n with hkn | hnk
· rw [Int.ofNat_sub hkn]
· have : (â i â Finset.range k, (n - i : ð)) = 0 :=
Finset.prod_eq_zero (Finset.mem_range.2 hnk) (sub_self _)
simp only [this, zero_mul]
@[simp]
theorem iter_deriv_pow' (n k : â) :
(deriv^[k] fun x : ð => x ^ n) =
fun x => (â i â Finset.range k, ((n : ð) - i)) * x ^ (n - k) :=
funext fun x => iter_deriv_pow n x k
theorem iter_deriv_inv (k : â) (x : ð) :
deriv^[k] Inv.inv x = (â i â Finset.range k, (-1 - i : ð)) * x ^ (-1 - k : â€) := by
simpa only [zpow_neg_one, Int.cast_neg, Int.cast_one] using iter_deriv_zpow (-1) x k
@[simp]
theorem iter_deriv_inv' (k : â) :
deriv^[k] Inv.inv = fun x : ð => (â i â Finset.range k, (-1 - i : ð)) * x ^ (-1 - k : â€) :=
funext (iter_deriv_inv k)
variable {f : E â ð} {t : Set E} {a : E}
theorem DifferentiableWithinAt.zpow (hf : DifferentiableWithinAt ð f t a) (h : f a â 0 âš 0 †m) :
DifferentiableWithinAt ð (fun x => f x ^ m) t a :=
(differentiableAt_zpow.2 h).comp_differentiableWithinAt a hf
theorem DifferentiableAt.zpow (hf : DifferentiableAt ð f a) (h : f a â 0 âš 0 †m) :
DifferentiableAt ð (fun x => f x ^ m) a :=
(differentiableAt_zpow.2 h).comp a hf
theorem DifferentiableOn.zpow (hf : DifferentiableOn ð f t) (h : (â x â t, f x â 0) âš 0 †m) :
DifferentiableOn ð (fun x => f x ^ m) t := fun x hx =>
(hf x hx).zpow <| h.imp_left fun h => h x hx
theorem Differentiable.zpow (hf : Differentiable ð f) (h : (â x, f x â 0) âš 0 †m) :
Differentiable ð fun x => f x ^ m := fun x => (hf x).zpow <| h.imp_left fun h => h x
|
Analysis\Calculus\FDeriv\Add.lean | /-
Copyright (c) 2019 Jeremy Avigad. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Jeremy Avigad, Sébastien Gouëzel, Yury Kudryashov
-/
import Mathlib.Analysis.Calculus.FDeriv.Linear
import Mathlib.Analysis.Calculus.FDeriv.Comp
/-!
# Additive operations on derivatives
For detailed documentation of the Fréchet derivative,
see the module docstring of `Analysis/Calculus/FDeriv/Basic.lean`.
This file contains the usual formulas (and existence assertions) for the derivative of
* sum of finitely many functions
* multiplication of a function by a scalar constant
* negative of a function
* subtraction of two functions
-/
open Filter Asymptotics ContinuousLinearMap Set Metric
open scoped Classical
open Topology NNReal Filter Asymptotics ENNReal
noncomputable section
section
variable {ð : Type*} [NontriviallyNormedField ð]
variable {E : Type*} [NormedAddCommGroup E] [NormedSpace ð E]
variable {F : Type*} [NormedAddCommGroup F] [NormedSpace ð F]
variable {G : Type*} [NormedAddCommGroup G] [NormedSpace ð G]
variable {G' : Type*} [NormedAddCommGroup G'] [NormedSpace ð G']
variable {f fâ fâ g : E â F}
variable {f' fâ' fâ' g' : E âL[ð] F}
variable (e : E âL[ð] F)
variable {x : E}
variable {s t : Set E}
variable {L Lâ Lâ : Filter E}
section ConstSMul
variable {R : Type*} [Semiring R] [Module R F] [SMulCommClass ð R F] [ContinuousConstSMul R F]
/-! ### Derivative of a function multiplied by a constant -/
@[fun_prop]
theorem HasStrictFDerivAt.const_smul (h : HasStrictFDerivAt f f' x) (c : R) :
HasStrictFDerivAt (fun x => c ⢠f x) (c ⢠f') x :=
(c ⢠(1 : F âL[ð] F)).hasStrictFDerivAt.comp x h
theorem HasFDerivAtFilter.const_smul (h : HasFDerivAtFilter f f' x L) (c : R) :
HasFDerivAtFilter (fun x => c ⢠f x) (c ⢠f') x L :=
(c ⢠(1 : F âL[ð] F)).hasFDerivAtFilter.comp x h tendsto_map
@[fun_prop]
nonrec theorem HasFDerivWithinAt.const_smul (h : HasFDerivWithinAt f f' s x) (c : R) :
HasFDerivWithinAt (fun x => c ⢠f x) (c ⢠f') s x :=
h.const_smul c
@[fun_prop]
nonrec theorem HasFDerivAt.const_smul (h : HasFDerivAt f f' x) (c : R) :
HasFDerivAt (fun x => c ⢠f x) (c ⢠f') x :=
h.const_smul c
@[fun_prop]
theorem DifferentiableWithinAt.const_smul (h : DifferentiableWithinAt ð f s x) (c : R) :
DifferentiableWithinAt ð (fun y => c ⢠f y) s x :=
(h.hasFDerivWithinAt.const_smul c).differentiableWithinAt
@[fun_prop]
theorem DifferentiableAt.const_smul (h : DifferentiableAt ð f x) (c : R) :
DifferentiableAt ð (fun y => c ⢠f y) x :=
(h.hasFDerivAt.const_smul c).differentiableAt
@[fun_prop]
theorem DifferentiableOn.const_smul (h : DifferentiableOn ð f s) (c : R) :
DifferentiableOn ð (fun y => c ⢠f y) s := fun x hx => (h x hx).const_smul c
@[fun_prop]
theorem Differentiable.const_smul (h : Differentiable ð f) (c : R) :
Differentiable ð fun y => c ⢠f y := fun x => (h x).const_smul c
theorem fderivWithin_const_smul (hxs : UniqueDiffWithinAt ð s x)
(h : DifferentiableWithinAt ð f s x) (c : R) :
fderivWithin ð (fun y => c ⢠f y) s x = c ⢠fderivWithin ð f s x :=
(h.hasFDerivWithinAt.const_smul c).fderivWithin hxs
theorem fderiv_const_smul (h : DifferentiableAt ð f x) (c : R) :
fderiv ð (fun y => c ⢠f y) x = c ⢠fderiv ð f x :=
(h.hasFDerivAt.const_smul c).fderiv
end ConstSMul
section Add
/-! ### Derivative of the sum of two functions -/
@[fun_prop]
nonrec theorem HasStrictFDerivAt.add (hf : HasStrictFDerivAt f f' x)
(hg : HasStrictFDerivAt g g' x) : HasStrictFDerivAt (fun y => f y + g y) (f' + g') x :=
(hf.add hg).congr_left fun y => by
simp only [LinearMap.sub_apply, LinearMap.add_apply, map_sub, map_add, add_apply]
abel
theorem HasFDerivAtFilter.add (hf : HasFDerivAtFilter f f' x L)
(hg : HasFDerivAtFilter g g' x L) : HasFDerivAtFilter (fun y => f y + g y) (f' + g') x L :=
.of_isLittleO <| (hf.isLittleO.add hg.isLittleO).congr_left fun _ => by
simp only [LinearMap.sub_apply, LinearMap.add_apply, map_sub, map_add, add_apply]
abel
@[fun_prop]
nonrec theorem HasFDerivWithinAt.add (hf : HasFDerivWithinAt f f' s x)
(hg : HasFDerivWithinAt g g' s x) : HasFDerivWithinAt (fun y => f y + g y) (f' + g') s x :=
hf.add hg
@[fun_prop]
nonrec theorem HasFDerivAt.add (hf : HasFDerivAt f f' x) (hg : HasFDerivAt g g' x) :
HasFDerivAt (fun x => f x + g x) (f' + g') x :=
hf.add hg
@[fun_prop]
theorem DifferentiableWithinAt.add (hf : DifferentiableWithinAt ð f s x)
(hg : DifferentiableWithinAt ð g s x) : DifferentiableWithinAt ð (fun y => f y + g y) s x :=
(hf.hasFDerivWithinAt.add hg.hasFDerivWithinAt).differentiableWithinAt
@[simp, fun_prop]
theorem DifferentiableAt.add (hf : DifferentiableAt ð f x) (hg : DifferentiableAt ð g x) :
DifferentiableAt ð (fun y => f y + g y) x :=
(hf.hasFDerivAt.add hg.hasFDerivAt).differentiableAt
@[fun_prop]
theorem DifferentiableOn.add (hf : DifferentiableOn ð f s) (hg : DifferentiableOn ð g s) :
DifferentiableOn ð (fun y => f y + g y) s := fun x hx => (hf x hx).add (hg x hx)
@[simp, fun_prop]
theorem Differentiable.add (hf : Differentiable ð f) (hg : Differentiable ð g) :
Differentiable ð fun y => f y + g y := fun x => (hf x).add (hg x)
theorem fderivWithin_add (hxs : UniqueDiffWithinAt ð s x) (hf : DifferentiableWithinAt ð f s x)
(hg : DifferentiableWithinAt ð g s x) :
fderivWithin ð (fun y => f y + g y) s x = fderivWithin ð f s x + fderivWithin ð g s x :=
(hf.hasFDerivWithinAt.add hg.hasFDerivWithinAt).fderivWithin hxs
theorem fderiv_add (hf : DifferentiableAt ð f x) (hg : DifferentiableAt ð g x) :
fderiv ð (fun y => f y + g y) x = fderiv ð f x + fderiv ð g x :=
(hf.hasFDerivAt.add hg.hasFDerivAt).fderiv
@[fun_prop]
theorem HasStrictFDerivAt.add_const (hf : HasStrictFDerivAt f f' x) (c : F) :
HasStrictFDerivAt (fun y => f y + c) f' x :=
add_zero f' âž hf.add (hasStrictFDerivAt_const _ _)
theorem HasFDerivAtFilter.add_const (hf : HasFDerivAtFilter f f' x L) (c : F) :
HasFDerivAtFilter (fun y => f y + c) f' x L :=
add_zero f' âž hf.add (hasFDerivAtFilter_const _ _ _)
@[fun_prop]
nonrec theorem HasFDerivWithinAt.add_const (hf : HasFDerivWithinAt f f' s x) (c : F) :
HasFDerivWithinAt (fun y => f y + c) f' s x :=
hf.add_const c
@[fun_prop]
nonrec theorem HasFDerivAt.add_const (hf : HasFDerivAt f f' x) (c : F) :
HasFDerivAt (fun x => f x + c) f' x :=
hf.add_const c
@[fun_prop]
theorem DifferentiableWithinAt.add_const (hf : DifferentiableWithinAt ð f s x) (c : F) :
DifferentiableWithinAt ð (fun y => f y + c) s x :=
(hf.hasFDerivWithinAt.add_const c).differentiableWithinAt
@[simp]
theorem differentiableWithinAt_add_const_iff (c : F) :
DifferentiableWithinAt ð (fun y => f y + c) s x â DifferentiableWithinAt ð f s x :=
âšfun h => by simpa using h.add_const (-c), fun h => h.add_const câ©
@[fun_prop]
theorem DifferentiableAt.add_const (hf : DifferentiableAt ð f x) (c : F) :
DifferentiableAt ð (fun y => f y + c) x :=
(hf.hasFDerivAt.add_const c).differentiableAt
@[simp]
theorem differentiableAt_add_const_iff (c : F) :
DifferentiableAt ð (fun y => f y + c) x â DifferentiableAt ð f x :=
âšfun h => by simpa using h.add_const (-c), fun h => h.add_const câ©
@[fun_prop]
theorem DifferentiableOn.add_const (hf : DifferentiableOn ð f s) (c : F) :
DifferentiableOn ð (fun y => f y + c) s := fun x hx => (hf x hx).add_const c
@[simp]
theorem differentiableOn_add_const_iff (c : F) :
DifferentiableOn ð (fun y => f y + c) s â DifferentiableOn ð f s :=
âšfun h => by simpa using h.add_const (-c), fun h => h.add_const câ©
@[fun_prop]
theorem Differentiable.add_const (hf : Differentiable ð f) (c : F) :
Differentiable ð fun y => f y + c := fun x => (hf x).add_const c
@[simp]
theorem differentiable_add_const_iff (c : F) :
(Differentiable ð fun y => f y + c) â Differentiable ð f :=
âšfun h => by simpa using h.add_const (-c), fun h => h.add_const câ©
theorem fderivWithin_add_const (hxs : UniqueDiffWithinAt ð s x) (c : F) :
fderivWithin ð (fun y => f y + c) s x = fderivWithin ð f s x :=
if hf : DifferentiableWithinAt ð f s x then (hf.hasFDerivWithinAt.add_const c).fderivWithin hxs
else by
rw [fderivWithin_zero_of_not_differentiableWithinAt hf,
fderivWithin_zero_of_not_differentiableWithinAt]
simpa
theorem fderiv_add_const (c : F) : fderiv ð (fun y => f y + c) x = fderiv ð f x := by
simp only [â fderivWithin_univ, fderivWithin_add_const uniqueDiffWithinAt_univ]
@[fun_prop]
theorem HasStrictFDerivAt.const_add (hf : HasStrictFDerivAt f f' x) (c : F) :
HasStrictFDerivAt (fun y => c + f y) f' x :=
zero_add f' âž (hasStrictFDerivAt_const _ _).add hf
theorem HasFDerivAtFilter.const_add (hf : HasFDerivAtFilter f f' x L) (c : F) :
HasFDerivAtFilter (fun y => c + f y) f' x L :=
zero_add f' âž (hasFDerivAtFilter_const _ _ _).add hf
@[fun_prop]
nonrec theorem HasFDerivWithinAt.const_add (hf : HasFDerivWithinAt f f' s x) (c : F) :
HasFDerivWithinAt (fun y => c + f y) f' s x :=
hf.const_add c
@[fun_prop]
nonrec theorem HasFDerivAt.const_add (hf : HasFDerivAt f f' x) (c : F) :
HasFDerivAt (fun x => c + f x) f' x :=
hf.const_add c
@[fun_prop]
theorem DifferentiableWithinAt.const_add (hf : DifferentiableWithinAt ð f s x) (c : F) :
DifferentiableWithinAt ð (fun y => c + f y) s x :=
(hf.hasFDerivWithinAt.const_add c).differentiableWithinAt
@[simp]
theorem differentiableWithinAt_const_add_iff (c : F) :
DifferentiableWithinAt ð (fun y => c + f y) s x â DifferentiableWithinAt ð f s x :=
âšfun h => by simpa using h.const_add (-c), fun h => h.const_add câ©
@[fun_prop]
theorem DifferentiableAt.const_add (hf : DifferentiableAt ð f x) (c : F) :
DifferentiableAt ð (fun y => c + f y) x :=
(hf.hasFDerivAt.const_add c).differentiableAt
@[simp]
theorem differentiableAt_const_add_iff (c : F) :
DifferentiableAt ð (fun y => c + f y) x â DifferentiableAt ð f x :=
âšfun h => by simpa using h.const_add (-c), fun h => h.const_add câ©
@[fun_prop]
theorem DifferentiableOn.const_add (hf : DifferentiableOn ð f s) (c : F) :
DifferentiableOn ð (fun y => c + f y) s := fun x hx => (hf x hx).const_add c
@[simp]
theorem differentiableOn_const_add_iff (c : F) :
DifferentiableOn ð (fun y => c + f y) s â DifferentiableOn ð f s :=
âšfun h => by simpa using h.const_add (-c), fun h => h.const_add câ©
@[fun_prop]
theorem Differentiable.const_add (hf : Differentiable ð f) (c : F) :
Differentiable ð fun y => c + f y := fun x => (hf x).const_add c
@[simp]
theorem differentiable_const_add_iff (c : F) :
(Differentiable ð fun y => c + f y) â Differentiable ð f :=
âšfun h => by simpa using h.const_add (-c), fun h => h.const_add câ©
theorem fderivWithin_const_add (hxs : UniqueDiffWithinAt ð s x) (c : F) :
fderivWithin ð (fun y => c + f y) s x = fderivWithin ð f s x := by
simpa only [add_comm] using fderivWithin_add_const hxs c
theorem fderiv_const_add (c : F) : fderiv ð (fun y => c + f y) x = fderiv ð f x := by
simp only [add_comm c, fderiv_add_const]
end Add
section Sum
/-! ### Derivative of a finite sum of functions -/
variable {ι : Type*} {u : Finset ι} {A : ι â E â F} {A' : ι â E âL[ð] F}
@[fun_prop]
theorem HasStrictFDerivAt.sum (h : â i â u, HasStrictFDerivAt (A i) (A' i) x) :
HasStrictFDerivAt (fun y => â i â u, A i y) (â i â u, A' i) x := by
dsimp [HasStrictFDerivAt] at *
convert IsLittleO.sum h
simp [Finset.sum_sub_distrib, ContinuousLinearMap.sum_apply]
theorem HasFDerivAtFilter.sum (h : â i â u, HasFDerivAtFilter (A i) (A' i) x L) :
HasFDerivAtFilter (fun y => â i â u, A i y) (â i â u, A' i) x L := by
simp only [hasFDerivAtFilter_iff_isLittleO] at *
convert IsLittleO.sum h
simp [ContinuousLinearMap.sum_apply]
@[fun_prop]
theorem HasFDerivWithinAt.sum (h : â i â u, HasFDerivWithinAt (A i) (A' i) s x) :
HasFDerivWithinAt (fun y => â i â u, A i y) (â i â u, A' i) s x :=
HasFDerivAtFilter.sum h
@[fun_prop]
theorem HasFDerivAt.sum (h : â i â u, HasFDerivAt (A i) (A' i) x) :
HasFDerivAt (fun y => â i â u, A i y) (â i â u, A' i) x :=
HasFDerivAtFilter.sum h
@[fun_prop]
theorem DifferentiableWithinAt.sum (h : â i â u, DifferentiableWithinAt ð (A i) s x) :
DifferentiableWithinAt ð (fun y => â i â u, A i y) s x :=
HasFDerivWithinAt.differentiableWithinAt <|
HasFDerivWithinAt.sum fun i hi => (h i hi).hasFDerivWithinAt
@[simp, fun_prop]
theorem DifferentiableAt.sum (h : â i â u, DifferentiableAt ð (A i) x) :
DifferentiableAt ð (fun y => â i â u, A i y) x :=
HasFDerivAt.differentiableAt <| HasFDerivAt.sum fun i hi => (h i hi).hasFDerivAt
@[fun_prop]
theorem DifferentiableOn.sum (h : â i â u, DifferentiableOn ð (A i) s) :
DifferentiableOn ð (fun y => â i â u, A i y) s := fun x hx =>
DifferentiableWithinAt.sum fun i hi => h i hi x hx
@[simp, fun_prop]
theorem Differentiable.sum (h : â i â u, Differentiable ð (A i)) :
Differentiable ð fun y => â i â u, A i y := fun x => DifferentiableAt.sum fun i hi => h i hi x
theorem fderivWithin_sum (hxs : UniqueDiffWithinAt ð s x)
(h : â i â u, DifferentiableWithinAt ð (A i) s x) :
fderivWithin ð (fun y => â i â u, A i y) s x = â i â u, fderivWithin ð (A i) s x :=
(HasFDerivWithinAt.sum fun i hi => (h i hi).hasFDerivWithinAt).fderivWithin hxs
theorem fderiv_sum (h : â i â u, DifferentiableAt ð (A i) x) :
fderiv ð (fun y => â i â u, A i y) x = â i â u, fderiv ð (A i) x :=
(HasFDerivAt.sum fun i hi => (h i hi).hasFDerivAt).fderiv
end Sum
section Neg
/-! ### Derivative of the negative of a function -/
@[fun_prop]
theorem HasStrictFDerivAt.neg (h : HasStrictFDerivAt f f' x) :
HasStrictFDerivAt (fun x => -f x) (-f') x :=
(-1 : F âL[ð] F).hasStrictFDerivAt.comp x h
theorem HasFDerivAtFilter.neg (h : HasFDerivAtFilter f f' x L) :
HasFDerivAtFilter (fun x => -f x) (-f') x L :=
(-1 : F âL[ð] F).hasFDerivAtFilter.comp x h tendsto_map
@[fun_prop]
nonrec theorem HasFDerivWithinAt.neg (h : HasFDerivWithinAt f f' s x) :
HasFDerivWithinAt (fun x => -f x) (-f') s x :=
h.neg
@[fun_prop]
nonrec theorem HasFDerivAt.neg (h : HasFDerivAt f f' x) : HasFDerivAt (fun x => -f x) (-f') x :=
h.neg
@[fun_prop]
theorem DifferentiableWithinAt.neg (h : DifferentiableWithinAt ð f s x) :
DifferentiableWithinAt ð (fun y => -f y) s x :=
h.hasFDerivWithinAt.neg.differentiableWithinAt
@[simp]
theorem differentiableWithinAt_neg_iff :
DifferentiableWithinAt ð (fun y => -f y) s x â DifferentiableWithinAt ð f s x :=
âšfun h => by simpa only [neg_neg] using h.neg, fun h => h.negâ©
@[fun_prop]
theorem DifferentiableAt.neg (h : DifferentiableAt ð f x) : DifferentiableAt ð (fun y => -f y) x :=
h.hasFDerivAt.neg.differentiableAt
@[simp]
theorem differentiableAt_neg_iff : DifferentiableAt ð (fun y => -f y) x â DifferentiableAt ð f x :=
âšfun h => by simpa only [neg_neg] using h.neg, fun h => h.negâ©
@[fun_prop]
theorem DifferentiableOn.neg (h : DifferentiableOn ð f s) : DifferentiableOn ð (fun y => -f y) s :=
fun x hx => (h x hx).neg
@[simp]
theorem differentiableOn_neg_iff : DifferentiableOn ð (fun y => -f y) s â DifferentiableOn ð f s :=
âšfun h => by simpa only [neg_neg] using h.neg, fun h => h.negâ©
@[fun_prop]
theorem Differentiable.neg (h : Differentiable ð f) : Differentiable ð fun y => -f y := fun x =>
(h x).neg
@[simp]
theorem differentiable_neg_iff : (Differentiable ð fun y => -f y) â Differentiable ð f :=
âšfun h => by simpa only [neg_neg] using h.neg, fun h => h.negâ©
theorem fderivWithin_neg (hxs : UniqueDiffWithinAt ð s x) :
fderivWithin ð (fun y => -f y) s x = -fderivWithin ð f s x :=
if h : DifferentiableWithinAt ð f s x then h.hasFDerivWithinAt.neg.fderivWithin hxs
else by
rw [fderivWithin_zero_of_not_differentiableWithinAt h,
fderivWithin_zero_of_not_differentiableWithinAt, neg_zero]
simpa
@[simp]
theorem fderiv_neg : fderiv ð (fun y => -f y) x = -fderiv ð f x := by
simp only [â fderivWithin_univ, fderivWithin_neg uniqueDiffWithinAt_univ]
end Neg
section Sub
/-! ### Derivative of the difference of two functions -/
@[fun_prop]
theorem HasStrictFDerivAt.sub (hf : HasStrictFDerivAt f f' x) (hg : HasStrictFDerivAt g g' x) :
HasStrictFDerivAt (fun x => f x - g x) (f' - g') x := by
simpa only [sub_eq_add_neg] using hf.add hg.neg
theorem HasFDerivAtFilter.sub (hf : HasFDerivAtFilter f f' x L) (hg : HasFDerivAtFilter g g' x L) :
HasFDerivAtFilter (fun x => f x - g x) (f' - g') x L := by
simpa only [sub_eq_add_neg] using hf.add hg.neg
@[fun_prop]
nonrec theorem HasFDerivWithinAt.sub (hf : HasFDerivWithinAt f f' s x)
(hg : HasFDerivWithinAt g g' s x) : HasFDerivWithinAt (fun x => f x - g x) (f' - g') s x :=
hf.sub hg
@[fun_prop]
nonrec theorem HasFDerivAt.sub (hf : HasFDerivAt f f' x) (hg : HasFDerivAt g g' x) :
HasFDerivAt (fun x => f x - g x) (f' - g') x :=
hf.sub hg
@[fun_prop]
theorem DifferentiableWithinAt.sub (hf : DifferentiableWithinAt ð f s x)
(hg : DifferentiableWithinAt ð g s x) : DifferentiableWithinAt ð (fun y => f y - g y) s x :=
(hf.hasFDerivWithinAt.sub hg.hasFDerivWithinAt).differentiableWithinAt
@[simp, fun_prop]
theorem DifferentiableAt.sub (hf : DifferentiableAt ð f x) (hg : DifferentiableAt ð g x) :
DifferentiableAt ð (fun y => f y - g y) x :=
(hf.hasFDerivAt.sub hg.hasFDerivAt).differentiableAt
@[simp]
lemma DifferentiableAt.add_iff_left (hg : DifferentiableAt ð g x) :
DifferentiableAt ð (fun y => f y + g y) x â DifferentiableAt ð f x := by
refine âšfun h ⊠?_, fun hf ⊠hf.add hgâ©
simpa only [add_sub_cancel_right] using h.sub hg
@[simp]
lemma DifferentiableAt.add_iff_right (hg : DifferentiableAt ð f x) :
DifferentiableAt ð (fun y => f y + g y) x â DifferentiableAt ð g x := by
simp only [add_comm (f _), hg.add_iff_left]
@[simp]
lemma DifferentiableAt.sub_iff_left (hg : DifferentiableAt ð g x) :
DifferentiableAt ð (fun y => f y - g y) x â DifferentiableAt ð f x := by
simp only [sub_eq_add_neg, differentiableAt_neg_iff, hg, add_iff_left]
@[simp]
lemma DifferentiableAt.sub_iff_right (hg : DifferentiableAt ð f x) :
DifferentiableAt ð (fun y => f y - g y) x â DifferentiableAt ð g x := by
simp only [sub_eq_add_neg, hg, add_iff_right, differentiableAt_neg_iff]
@[fun_prop]
theorem DifferentiableOn.sub (hf : DifferentiableOn ð f s) (hg : DifferentiableOn ð g s) :
DifferentiableOn ð (fun y => f y - g y) s := fun x hx => (hf x hx).sub (hg x hx)
@[simp]
lemma DifferentiableOn.add_iff_left (hg : DifferentiableOn ð g s) :
DifferentiableOn ð (fun y => f y + g y) s â DifferentiableOn ð f s := by
refine âšfun h ⊠?_, fun hf ⊠hf.add hgâ©
simpa only [add_sub_cancel_right] using h.sub hg
@[simp]
lemma DifferentiableOn.add_iff_right (hg : DifferentiableOn ð f s) :
DifferentiableOn ð (fun y => f y + g y) s â DifferentiableOn ð g s := by
simp only [add_comm (f _), hg.add_iff_left]
@[simp]
lemma DifferentiableOn.sub_iff_left (hg : DifferentiableOn ð g s) :
DifferentiableOn ð (fun y => f y - g y) s â DifferentiableOn ð f s := by
simp only [sub_eq_add_neg, differentiableOn_neg_iff, hg, add_iff_left]
@[simp]
lemma DifferentiableOn.sub_iff_right (hg : DifferentiableOn ð f s) :
DifferentiableOn ð (fun y => f y - g y) s â DifferentiableOn ð g s := by
simp only [sub_eq_add_neg, differentiableOn_neg_iff, hg, add_iff_right]
@[simp, fun_prop]
theorem Differentiable.sub (hf : Differentiable ð f) (hg : Differentiable ð g) :
Differentiable ð fun y => f y - g y := fun x => (hf x).sub (hg x)
@[simp]
lemma Differentiable.add_iff_left (hg : Differentiable ð g) :
Differentiable ð (fun y => f y + g y) â Differentiable ð f := by
refine âšfun h ⊠?_, fun hf ⊠hf.add hgâ©
simpa only [add_sub_cancel_right] using h.sub hg
@[simp]
lemma Differentiable.add_iff_right (hg : Differentiable ð f) :
Differentiable ð (fun y => f y + g y) â Differentiable ð g := by
simp only [add_comm (f _), hg.add_iff_left]
@[simp]
lemma Differentiable.sub_iff_left (hg : Differentiable ð g) :
Differentiable ð (fun y => f y - g y) â Differentiable ð f := by
simp only [sub_eq_add_neg, differentiable_neg_iff, hg, add_iff_left]
@[simp]
lemma Differentiable.sub_iff_right (hg : Differentiable ð f) :
Differentiable ð (fun y => f y - g y) â Differentiable ð g := by
simp only [sub_eq_add_neg, differentiable_neg_iff, hg, add_iff_right]
theorem fderivWithin_sub (hxs : UniqueDiffWithinAt ð s x) (hf : DifferentiableWithinAt ð f s x)
(hg : DifferentiableWithinAt ð g s x) :
fderivWithin ð (fun y => f y - g y) s x = fderivWithin ð f s x - fderivWithin ð g s x :=
(hf.hasFDerivWithinAt.sub hg.hasFDerivWithinAt).fderivWithin hxs
theorem fderiv_sub (hf : DifferentiableAt ð f x) (hg : DifferentiableAt ð g x) :
fderiv ð (fun y => f y - g y) x = fderiv ð f x - fderiv ð g x :=
(hf.hasFDerivAt.sub hg.hasFDerivAt).fderiv
@[fun_prop]
theorem HasStrictFDerivAt.sub_const (hf : HasStrictFDerivAt f f' x) (c : F) :
HasStrictFDerivAt (fun x => f x - c) f' x := by
simpa only [sub_eq_add_neg] using hf.add_const (-c)
theorem HasFDerivAtFilter.sub_const (hf : HasFDerivAtFilter f f' x L) (c : F) :
HasFDerivAtFilter (fun x => f x - c) f' x L := by
simpa only [sub_eq_add_neg] using hf.add_const (-c)
@[fun_prop]
nonrec theorem HasFDerivWithinAt.sub_const (hf : HasFDerivWithinAt f f' s x) (c : F) :
HasFDerivWithinAt (fun x => f x - c) f' s x :=
hf.sub_const c
@[fun_prop]
nonrec theorem HasFDerivAt.sub_const (hf : HasFDerivAt f f' x) (c : F) :
HasFDerivAt (fun x => f x - c) f' x :=
hf.sub_const c
@[fun_prop]
theorem hasStrictFDerivAt_sub_const {x : F} (c : F) : HasStrictFDerivAt (· - c) (id ð F) x :=
(hasStrictFDerivAt_id x).sub_const c
@[fun_prop]
theorem hasFDerivAt_sub_const {x : F} (c : F) : HasFDerivAt (· - c) (id ð F) x :=
(hasFDerivAt_id x).sub_const c
@[fun_prop]
theorem DifferentiableWithinAt.sub_const (hf : DifferentiableWithinAt ð f s x) (c : F) :
DifferentiableWithinAt ð (fun y => f y - c) s x :=
(hf.hasFDerivWithinAt.sub_const c).differentiableWithinAt
@[simp]
theorem differentiableWithinAt_sub_const_iff (c : F) :
DifferentiableWithinAt ð (fun y => f y - c) s x â DifferentiableWithinAt ð f s x := by
simp only [sub_eq_add_neg, differentiableWithinAt_add_const_iff]
@[fun_prop]
theorem DifferentiableAt.sub_const (hf : DifferentiableAt ð f x) (c : F) :
DifferentiableAt ð (fun y => f y - c) x :=
(hf.hasFDerivAt.sub_const c).differentiableAt
@[deprecated DifferentiableAt.sub_iff_left (since := "2024-07-11")]
theorem differentiableAt_sub_const_iff (c : F) :
DifferentiableAt ð (fun y => f y - c) x â DifferentiableAt ð f x :=
(differentiableAt_const _).sub_iff_left
@[fun_prop]
theorem DifferentiableOn.sub_const (hf : DifferentiableOn ð f s) (c : F) :
DifferentiableOn ð (fun y => f y - c) s := fun x hx => (hf x hx).sub_const c
@[deprecated DifferentiableOn.sub_iff_left (since := "2024-07-11")]
theorem differentiableOn_sub_const_iff (c : F) :
DifferentiableOn ð (fun y => f y - c) s â DifferentiableOn ð f s :=
(differentiableOn_const _).sub_iff_left
@[fun_prop]
theorem Differentiable.sub_const (hf : Differentiable ð f) (c : F) :
Differentiable ð fun y => f y - c := fun x => (hf x).sub_const c
@[deprecated Differentiable.sub_iff_left (since := "2024-07-11")]
theorem differentiable_sub_const_iff (c : F) :
(Differentiable ð fun y => f y - c) â Differentiable ð f :=
(differentiable_const _).sub_iff_left
theorem fderivWithin_sub_const (hxs : UniqueDiffWithinAt ð s x) (c : F) :
fderivWithin ð (fun y => f y - c) s x = fderivWithin ð f s x := by
simp only [sub_eq_add_neg, fderivWithin_add_const hxs]
theorem fderiv_sub_const (c : F) : fderiv ð (fun y => f y - c) x = fderiv ð f x := by
simp only [sub_eq_add_neg, fderiv_add_const]
@[fun_prop]
theorem HasStrictFDerivAt.const_sub (hf : HasStrictFDerivAt f f' x) (c : F) :
HasStrictFDerivAt (fun x => c - f x) (-f') x := by
simpa only [sub_eq_add_neg] using hf.neg.const_add c
theorem HasFDerivAtFilter.const_sub (hf : HasFDerivAtFilter f f' x L) (c : F) :
HasFDerivAtFilter (fun x => c - f x) (-f') x L := by
simpa only [sub_eq_add_neg] using hf.neg.const_add c
@[fun_prop]
nonrec theorem HasFDerivWithinAt.const_sub (hf : HasFDerivWithinAt f f' s x) (c : F) :
HasFDerivWithinAt (fun x => c - f x) (-f') s x :=
hf.const_sub c
@[fun_prop]
nonrec theorem HasFDerivAt.const_sub (hf : HasFDerivAt f f' x) (c : F) :
HasFDerivAt (fun x => c - f x) (-f') x :=
hf.const_sub c
@[fun_prop]
theorem DifferentiableWithinAt.const_sub (hf : DifferentiableWithinAt ð f s x) (c : F) :
DifferentiableWithinAt ð (fun y => c - f y) s x :=
(hf.hasFDerivWithinAt.const_sub c).differentiableWithinAt
@[simp]
theorem differentiableWithinAt_const_sub_iff (c : F) :
DifferentiableWithinAt ð (fun y => c - f y) s x â DifferentiableWithinAt ð f s x := by
simp [sub_eq_add_neg]
@[fun_prop]
theorem DifferentiableAt.const_sub (hf : DifferentiableAt ð f x) (c : F) :
DifferentiableAt ð (fun y => c - f y) x :=
(hf.hasFDerivAt.const_sub c).differentiableAt
@[deprecated DifferentiableAt.sub_iff_right (since := "2024-07-11")]
theorem differentiableAt_const_sub_iff (c : F) :
DifferentiableAt ð (fun y => c - f y) x â DifferentiableAt ð f x :=
(differentiableAt_const _).sub_iff_right
@[fun_prop]
theorem DifferentiableOn.const_sub (hf : DifferentiableOn ð f s) (c : F) :
DifferentiableOn ð (fun y => c - f y) s := fun x hx => (hf x hx).const_sub c
@[deprecated DifferentiableOn.sub_iff_right (since := "2024-07-11")]
theorem differentiableOn_const_sub_iff (c : F) :
DifferentiableOn ð (fun y => c - f y) s â DifferentiableOn ð f s :=
(differentiableOn_const _).sub_iff_right
@[fun_prop]
theorem Differentiable.const_sub (hf : Differentiable ð f) (c : F) :
Differentiable ð fun y => c - f y := fun x => (hf x).const_sub c
@[deprecated Differentiable.sub_iff_right (since := "2024-07-11")]
theorem differentiable_const_sub_iff (c : F) :
(Differentiable ð fun y => c - f y) â Differentiable ð f :=
(differentiable_const _).sub_iff_right
theorem fderivWithin_const_sub (hxs : UniqueDiffWithinAt ð s x) (c : F) :
fderivWithin ð (fun y => c - f y) s x = -fderivWithin ð f s x := by
simp only [sub_eq_add_neg, fderivWithin_const_add, fderivWithin_neg, hxs]
theorem fderiv_const_sub (c : F) : fderiv ð (fun y => c - f y) x = -fderiv ð f x := by
simp only [â fderivWithin_univ, fderivWithin_const_sub uniqueDiffWithinAt_univ]
end Sub
end
|
Analysis\Calculus\FDeriv\Analytic.lean | /-
Copyright (c) 2021 Yury Kudryashov. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yury Kudryashov
-/
import Mathlib.Analysis.Analytic.Basic
import Mathlib.Analysis.Analytic.CPolynomial
import Mathlib.Analysis.Calculus.Deriv.Basic
import Mathlib.Analysis.Calculus.ContDiff.Defs
import Mathlib.Analysis.Calculus.FDeriv.Add
/-!
# Frechet derivatives of analytic functions.
A function expressible as a power series at a point has a Frechet derivative there.
Also the special case in terms of `deriv` when the domain is 1-dimensional.
As an application, we show that continuous multilinear maps are smooth. We also compute their
iterated derivatives, in `ContinuousMultilinearMap.iteratedFDeriv_eq`.
-/
open Filter Asymptotics
open scoped ENNReal
universe u v
variable {ð : Type*} [NontriviallyNormedField ð]
variable {E : Type u} [NormedAddCommGroup E] [NormedSpace ð E]
variable {F : Type v} [NormedAddCommGroup F] [NormedSpace ð F]
section fderiv
variable {p : FormalMultilinearSeries ð E F} {r : ââ¥0â}
variable {f : E â F} {x : E} {s : Set E}
theorem HasFPowerSeriesAt.hasStrictFDerivAt (h : HasFPowerSeriesAt f p x) :
HasStrictFDerivAt f (continuousMultilinearCurryFin1 ð E F (p 1)) x := by
refine h.isBigO_image_sub_norm_mul_norm_sub.trans_isLittleO (IsLittleO.of_norm_right ?_)
refine isLittleO_iff_exists_eq_mul.2 âšfun y => ây - (x, x)â, ?_, EventuallyEq.rflâ©
refine (continuous_id.sub continuous_const).norm.tendsto' _ _ ?_
rw [_root_.id, sub_self, norm_zero]
theorem HasFPowerSeriesAt.hasFDerivAt (h : HasFPowerSeriesAt f p x) :
HasFDerivAt f (continuousMultilinearCurryFin1 ð E F (p 1)) x :=
h.hasStrictFDerivAt.hasFDerivAt
theorem HasFPowerSeriesAt.differentiableAt (h : HasFPowerSeriesAt f p x) : DifferentiableAt ð f x :=
h.hasFDerivAt.differentiableAt
theorem AnalyticAt.differentiableAt : AnalyticAt ð f x â DifferentiableAt ð f x
| âš_, hpâ© => hp.differentiableAt
theorem AnalyticAt.differentiableWithinAt (h : AnalyticAt ð f x) : DifferentiableWithinAt ð f s x :=
h.differentiableAt.differentiableWithinAt
theorem HasFPowerSeriesAt.fderiv_eq (h : HasFPowerSeriesAt f p x) :
fderiv ð f x = continuousMultilinearCurryFin1 ð E F (p 1) :=
h.hasFDerivAt.fderiv
theorem HasFPowerSeriesOnBall.differentiableOn [CompleteSpace F]
(h : HasFPowerSeriesOnBall f p x r) : DifferentiableOn ð f (EMetric.ball x r) := fun _ hy =>
(h.analyticAt_of_mem hy).differentiableWithinAt
theorem AnalyticOn.differentiableOn (h : AnalyticOn ð f s) : DifferentiableOn ð f s := fun y hy =>
(h y hy).differentiableWithinAt
theorem HasFPowerSeriesOnBall.hasFDerivAt [CompleteSpace F] (h : HasFPowerSeriesOnBall f p x r)
{y : E} (hy : (âyââ : ââ¥0â) < r) :
HasFDerivAt f (continuousMultilinearCurryFin1 ð E F (p.changeOrigin y 1)) (x + y) :=
(h.changeOrigin hy).hasFPowerSeriesAt.hasFDerivAt
theorem HasFPowerSeriesOnBall.fderiv_eq [CompleteSpace F] (h : HasFPowerSeriesOnBall f p x r)
{y : E} (hy : (âyââ : ââ¥0â) < r) :
fderiv ð f (x + y) = continuousMultilinearCurryFin1 ð E F (p.changeOrigin y 1) :=
(h.hasFDerivAt hy).fderiv
/-- If a function has a power series on a ball, then so does its derivative. -/
theorem HasFPowerSeriesOnBall.fderiv [CompleteSpace F] (h : HasFPowerSeriesOnBall f p x r) :
HasFPowerSeriesOnBall (fderiv ð f) p.derivSeries x r := by
refine .congr (f := fun z ⊠continuousMultilinearCurryFin1 ð E F (p.changeOrigin (z - x) 1)) ?_
fun z hz ⊠?_
· refine continuousMultilinearCurryFin1 ð E F
|>.toContinuousLinearEquiv.toContinuousLinearMap.comp_hasFPowerSeriesOnBall ?_
simpa using ((p.hasFPowerSeriesOnBall_changeOrigin 1
(h.r_pos.trans_le h.r_le)).mono h.r_pos h.r_le).comp_sub x
dsimp only
rw [â h.fderiv_eq, add_sub_cancel]
simpa only [edist_eq_coe_nnnorm_sub, EMetric.mem_ball] using hz
/-- If a function is analytic on a set `s`, so is its Fréchet derivative. -/
theorem AnalyticOn.fderiv [CompleteSpace F] (h : AnalyticOn ð f s) :
AnalyticOn ð (fderiv ð f) s := by
intro y hy
rcases h y hy with âšp, r, hpâ©
exact hp.fderiv.analyticAt
/-- If a function is analytic on a set `s`, so are its successive Fréchet derivative. -/
theorem AnalyticOn.iteratedFDeriv [CompleteSpace F] (h : AnalyticOn ð f s) (n : â) :
AnalyticOn ð (iteratedFDeriv ð n f) s := by
induction' n with n IH
· rw [iteratedFDeriv_zero_eq_comp]
exact ((continuousMultilinearCurryFin0 ð E F).symm : F âL[ð] E[Ã0]âL[ð] F).comp_analyticOn h
· rw [iteratedFDeriv_succ_eq_comp_left]
-- Porting note: for reasons that I do not understand at all, `?g` cannot be inlined.
convert ContinuousLinearMap.comp_analyticOn ?g IH.fderiv
case g => exact â(continuousMultilinearCurryLeftEquiv ð (fun _ : Fin (n + 1) ⊠E) F)
simp
/-- An analytic function is infinitely differentiable. -/
theorem AnalyticOn.contDiffOn [CompleteSpace F] (h : AnalyticOn ð f s) {n : ââ} :
ContDiffOn ð n f s :=
let t := { x | AnalyticAt ð f x }
suffices ContDiffOn ð n f t from this.mono h
have H : AnalyticOn ð f t := fun _x hx ⊠hx
have t_open : IsOpen t := isOpen_analyticAt ð f
contDiffOn_of_continuousOn_differentiableOn
(fun m _ ⊠(H.iteratedFDeriv m).continuousOn.congr
fun _ hx ⊠iteratedFDerivWithin_of_isOpen _ t_open hx)
(fun m _ ⊠(H.iteratedFDeriv m).differentiableOn.congr
fun _ hx ⊠iteratedFDerivWithin_of_isOpen _ t_open hx)
theorem AnalyticAt.contDiffAt [CompleteSpace F] (h : AnalyticAt ð f x) {n : ââ} :
ContDiffAt ð n f x := by
obtain âšs, hs, hfâ© := h.exists_mem_nhds_analyticOn
exact hf.contDiffOn.contDiffAt hs
end fderiv
section deriv
variable {p : FormalMultilinearSeries ð ð F} {r : ââ¥0â}
variable {f : ð â F} {x : ð} {s : Set ð}
protected theorem HasFPowerSeriesAt.hasStrictDerivAt (h : HasFPowerSeriesAt f p x) :
HasStrictDerivAt f (p 1 fun _ => 1) x :=
h.hasStrictFDerivAt.hasStrictDerivAt
protected theorem HasFPowerSeriesAt.hasDerivAt (h : HasFPowerSeriesAt f p x) :
HasDerivAt f (p 1 fun _ => 1) x :=
h.hasStrictDerivAt.hasDerivAt
protected theorem HasFPowerSeriesAt.deriv (h : HasFPowerSeriesAt f p x) :
deriv f x = p 1 fun _ => 1 :=
h.hasDerivAt.deriv
/-- If a function is analytic on a set `s`, so is its derivative. -/
theorem AnalyticOn.deriv [CompleteSpace F] (h : AnalyticOn ð f s) : AnalyticOn ð (deriv f) s :=
(ContinuousLinearMap.apply ð F (1 : ð)).comp_analyticOn h.fderiv
/-- If a function is analytic on a set `s`, so are its successive derivatives. -/
theorem AnalyticOn.iterated_deriv [CompleteSpace F] (h : AnalyticOn ð f s) (n : â) :
AnalyticOn ð (_root_.deriv^[n] f) s := by
induction' n with n IH
· exact h
· simpa only [Function.iterate_succ', Function.comp_apply] using IH.deriv
end deriv
section fderiv
variable {p : FormalMultilinearSeries ð E F} {r : ââ¥0â} {n : â}
variable {f : E â F} {x : E} {s : Set E}
/-! The case of continuously polynomial functions. We get the same differentiability
results as for analytic functions, but without the assumptions that `F` is complete. -/
theorem HasFiniteFPowerSeriesOnBall.differentiableOn
(h : HasFiniteFPowerSeriesOnBall f p x n r) : DifferentiableOn ð f (EMetric.ball x r) :=
fun _ hy ⊠(h.cPolynomialAt_of_mem hy).analyticAt.differentiableWithinAt
theorem HasFiniteFPowerSeriesOnBall.hasFDerivAt (h : HasFiniteFPowerSeriesOnBall f p x n r)
{y : E} (hy : (âyââ : ââ¥0â) < r) :
HasFDerivAt f (continuousMultilinearCurryFin1 ð E F (p.changeOrigin y 1)) (x + y) :=
(h.changeOrigin hy).toHasFPowerSeriesOnBall.hasFPowerSeriesAt.hasFDerivAt
theorem HasFiniteFPowerSeriesOnBall.fderiv_eq (h : HasFiniteFPowerSeriesOnBall f p x n r)
{y : E} (hy : (âyââ : ââ¥0â) < r) :
fderiv ð f (x + y) = continuousMultilinearCurryFin1 ð E F (p.changeOrigin y 1) :=
(h.hasFDerivAt hy).fderiv
/-- If a function has a finite power series on a ball, then so does its derivative. -/
protected theorem HasFiniteFPowerSeriesOnBall.fderiv
(h : HasFiniteFPowerSeriesOnBall f p x (n + 1) r) :
HasFiniteFPowerSeriesOnBall (fderiv ð f) p.derivSeries x n r := by
refine .congr (f := fun z ⊠continuousMultilinearCurryFin1 ð E F (p.changeOrigin (z - x) 1)) ?_
fun z hz ⊠?_
· refine continuousMultilinearCurryFin1 ð E F
|>.toContinuousLinearEquiv.toContinuousLinearMap.comp_hasFiniteFPowerSeriesOnBall ?_
simpa using
((p.hasFiniteFPowerSeriesOnBall_changeOrigin 1 h.finite).mono h.r_pos le_top).comp_sub x
dsimp only
rw [â h.fderiv_eq, add_sub_cancel]
simpa only [edist_eq_coe_nnnorm_sub, EMetric.mem_ball] using hz
/-- If a function has a finite power series on a ball, then so does its derivative.
This is a variant of `HasFiniteFPowerSeriesOnBall.fderiv` where the degree of `f` is `< n`
and not `< n + 1`. -/
theorem HasFiniteFPowerSeriesOnBall.fderiv' (h : HasFiniteFPowerSeriesOnBall f p x n r) :
HasFiniteFPowerSeriesOnBall (fderiv ð f) p.derivSeries x (n - 1) r := by
obtain rfl | hn := eq_or_ne n 0
· rw [zero_tsub]
refine HasFiniteFPowerSeriesOnBall.bound_zero_of_eq_zero (fun y hy ⊠?_) h.r_pos fun n ⊠?_
· rw [Filter.EventuallyEq.fderiv_eq (f := fun _ ⊠0)]
· rw [fderiv_const, Pi.zero_apply]
· exact Filter.eventuallyEq_iff_exists_mem.mpr âšEMetric.ball x r,
EMetric.isOpen_ball.mem_nhds hy, fun z hz ⊠by rw [h.eq_zero_of_bound_zero z hz]â©
· apply ContinuousMultilinearMap.ext; intro a
change (continuousMultilinearCurryFin1 ð E F) (p.changeOriginSeries 1 n a) = 0
rw [p.changeOriginSeries_finite_of_finite h.finite 1 (Nat.zero_le _)]
exact map_zero _
· rw [â Nat.succ_pred hn] at h
exact h.fderiv
/-- If a function is polynomial on a set `s`, so is its Fréchet derivative. -/
theorem CPolynomialOn.fderiv (h : CPolynomialOn ð f s) :
CPolynomialOn ð (fderiv ð f) s := by
intro y hy
rcases h y hy with âšp, r, n, hpâ©
exact hp.fderiv'.cPolynomialAt
/-- If a function is polynomial on a set `s`, so are its successive Fréchet derivative. -/
theorem CPolynomialOn.iteratedFDeriv (h : CPolynomialOn ð f s) (n : â) :
CPolynomialOn ð (iteratedFDeriv ð n f) s := by
induction' n with n IH
· rw [iteratedFDeriv_zero_eq_comp]
exact ((continuousMultilinearCurryFin0 ð E F).symm : F âL[ð] E[Ã0]âL[ð] F).comp_cPolynomialOn h
· rw [iteratedFDeriv_succ_eq_comp_left]
convert ContinuousLinearMap.comp_cPolynomialOn ?g IH.fderiv
case g => exact â(continuousMultilinearCurryLeftEquiv ð (fun _ : Fin (n + 1) ⊠E) F)
simp
/-- A polynomial function is infinitely differentiable. -/
theorem CPolynomialOn.contDiffOn (h : CPolynomialOn ð f s) {n : ââ} :
ContDiffOn ð n f s :=
let t := { x | CPolynomialAt ð f x }
suffices ContDiffOn ð n f t from this.mono h
have H : CPolynomialOn ð f t := fun _x hx ⊠hx
have t_open : IsOpen t := isOpen_cPolynomialAt ð f
contDiffOn_of_continuousOn_differentiableOn
(fun m _ ⊠(H.iteratedFDeriv m).continuousOn.congr
fun _ hx ⊠iteratedFDerivWithin_of_isOpen _ t_open hx)
(fun m _ ⊠(H.iteratedFDeriv m).analyticOn.differentiableOn.congr
fun _ hx ⊠iteratedFDerivWithin_of_isOpen _ t_open hx)
theorem CPolynomialAt.contDiffAt (h : CPolynomialAt ð f x) {n : ââ} :
ContDiffAt ð n f x :=
let âš_, hs, hfâ© := h.exists_mem_nhds_cPolynomialOn
hf.contDiffOn.contDiffAt hs
end fderiv
section deriv
variable {p : FormalMultilinearSeries ð ð F} {r : ââ¥0â}
variable {f : ð â F} {x : ð} {s : Set ð}
/-- If a function is polynomial on a set `s`, so is its derivative. -/
protected theorem CPolynomialOn.deriv (h : CPolynomialOn ð f s) : CPolynomialOn ð (deriv f) s :=
(ContinuousLinearMap.apply ð F (1 : ð)).comp_cPolynomialOn h.fderiv
/-- If a function is polynomial on a set `s`, so are its successive derivatives. -/
theorem CPolynomialOn.iterated_deriv (h : CPolynomialOn ð f s) (n : â) :
CPolynomialOn ð (deriv^[n] f) s := by
induction' n with n IH
· exact h
· simpa only [Function.iterate_succ', Function.comp_apply] using IH.deriv
end deriv
namespace ContinuousMultilinearMap
variable {ι : Type*} {E : ι â Type*} [â i, NormedAddCommGroup (E i)] [â i, NormedSpace ð (E i)]
[Fintype ι] (f : ContinuousMultilinearMap ð E F)
open FormalMultilinearSeries
protected theorem hasFiniteFPowerSeriesOnBall :
HasFiniteFPowerSeriesOnBall f f.toFormalMultilinearSeries 0 (Fintype.card ι + 1) †:=
.mk' (fun m hm ⊠dif_neg (Nat.succ_le_iff.mp hm).ne) ENNReal.zero_lt_top fun y _ ⊠by
rw [Finset.sum_eq_single_of_mem _ (Finset.self_mem_range_succ _), zero_add]
· rw [toFormalMultilinearSeries, dif_pos rfl]; rfl
· intro m _ ne; rw [toFormalMultilinearSeries, dif_neg ne.symm]; rfl
theorem changeOriginSeries_support {k l : â} (h : k + l â Fintype.card ι) :
f.toFormalMultilinearSeries.changeOriginSeries k l = 0 :=
Finset.sum_eq_zero fun _ _ ⊠by
simp_rw [FormalMultilinearSeries.changeOriginSeriesTerm,
toFormalMultilinearSeries, dif_neg h.symm, LinearIsometryEquiv.map_zero]
variable {n : ââ} (x : â i, E i)
open Finset in
theorem changeOrigin_toFormalMultilinearSeries [DecidableEq ι] :
continuousMultilinearCurryFin1 ð (â i, E i) F (f.toFormalMultilinearSeries.changeOrigin x 1) =
f.linearDeriv x := by
ext y
rw [continuousMultilinearCurryFin1_apply, linearDeriv_apply,
changeOrigin, FormalMultilinearSeries.sum]
cases isEmpty_or_nonempty ι
· have (l) : 1 + l â Fintype.card ι := by
rw [add_comm, Fintype.card_eq_zero]; exact Nat.succ_ne_zero _
simp_rw [Fintype.sum_empty, changeOriginSeries_support _ (this _), zero_apply _, tsum_zero]; rfl
rw [tsum_eq_single (Fintype.card ι - 1), changeOriginSeries]; swap
· intro m hm
rw [Ne, eq_tsub_iff_add_eq_of_le (by exact Fintype.card_pos), add_comm] at hm
rw [f.changeOriginSeries_support hm, zero_apply]
rw [sum_apply, ContinuousMultilinearMap.sum_apply, Fin.snoc_zero]
simp_rw [changeOriginSeriesTerm_apply]
refine (Fintype.sum_bijective (?_ â Fintype.equivFinOfCardEq (Nat.add_sub_of_le
Fintype.card_pos).symm) (.comp ?_ <| Equiv.bijective _) _ _ fun i ⊠?_).symm
· exact (âš{·}á¶, by
rw [card_compl, Fintype.card_fin, card_singleton, Nat.add_sub_cancel_left]â©)
· use fun _ _ ⊠(singleton_injective <| compl_injective <| Subtype.ext_iff.mp ·)
intro âšs, hsâ©
have h : sá¶.card = 1 := by rw [card_compl, hs, Fintype.card_fin, Nat.add_sub_cancel]
obtain âša, haâ© := card_eq_one.mp h
exact âša, Subtype.ext (compl_eq_comm.mp ha)â©
rw [Function.comp_apply, Subtype.coe_mk, compl_singleton, piecewise_erase_univ,
toFormalMultilinearSeries, dif_pos (Nat.add_sub_of_le Fintype.card_pos).symm]
simp_rw [domDomCongr_apply, compContinuousLinearMap_apply, ContinuousLinearMap.proj_apply,
Function.update_apply, (Equiv.injective _).eq_iff, ite_apply]
congr; ext j
obtain rfl | hj := eq_or_ne j i
· rw [Function.update_same, if_pos rfl]
· rw [Function.update_noteq hj, if_neg hj]
protected theorem hasFDerivAt [DecidableEq ι] : HasFDerivAt f (f.linearDeriv x) x := by
rw [â changeOrigin_toFormalMultilinearSeries]
convert f.hasFiniteFPowerSeriesOnBall.hasFDerivAt (y := x) ENNReal.coe_lt_top
rw [zero_add]
/-- Technical lemma used in the proof of `hasFTaylorSeriesUpTo_iteratedFDeriv`, to compare sums
over embedding of `Fin k` and `Fin (k + 1)`. -/
private lemma _root_.Equiv.succ_embeddingFinSucc_fst_symm_apply {ι : Type*} [DecidableEq ι]
{n : â} (e : Fin (n+1) ⪠ι) {k : ι}
(h'k : k â Set.range (Equiv.embeddingFinSucc n ι e).1) (hk : k â Set.range e) :
Fin.succ ((Equiv.embeddingFinSucc n ι e).1.toEquivRange.symm âšk, h'kâ©)
= e.toEquivRange.symm âšk, hkâ© := by
rcases hk with âšj, rflâ©
have hj : j â 0 := by
rintro rfl
simp at h'k
simp only [Function.Embedding.toEquivRange_symm_apply_self]
have : e j = (Equiv.embeddingFinSucc n ι e).1 (Fin.pred j hj) := by simp
simp_rw [this]
simp [-Equiv.embeddingFinSucc_fst]
/-- A continuous multilinear function `f` admits a Taylor series, whose successive terms are given
by `f.iteratedFDeriv n`. This is the point of the definition of `f.iteratedFDeriv`. -/
theorem hasFTaylorSeriesUpTo_iteratedFDeriv :
HasFTaylorSeriesUpTo †f (fun v n ⊠f.iteratedFDeriv n v) := by
classical
constructor
· simp [ContinuousMultilinearMap.iteratedFDeriv]
· rintro n - x
suffices H : curryLeft (f.iteratedFDeriv (Nat.succ n) x) = (â e : Fin n ⪠ι,
((iteratedFDerivComponent f e.toEquivRange).linearDeriv
(Pi.compRightL ð _ Subtype.val x)) âL (Pi.compRightL ð _ Subtype.val)) by
have A : HasFDerivAt (f.iteratedFDeriv n) (â e : Fin n ⪠ι,
((iteratedFDerivComponent f e.toEquivRange).linearDeriv (Pi.compRightL ð _ Subtype.val x))
âL (Pi.compRightL ð _ Subtype.val)) x := by
apply HasFDerivAt.sum (fun s _hs ⊠?_)
exact (ContinuousMultilinearMap.hasFDerivAt _ _).comp x (ContinuousLinearMap.hasFDerivAt _)
rwa [â H] at A
ext v m
simp only [ContinuousMultilinearMap.iteratedFDeriv, curryLeft_apply, sum_apply,
iteratedFDerivComponent_apply, Finset.univ_sigma_univ,
Pi.compRightL_apply, ContinuousLinearMap.coe_sum', ContinuousLinearMap.coe_comp',
Finset.sum_apply, Function.comp_apply, linearDeriv_apply, Finset.sum_sigma']
rw [â (Equiv.embeddingFinSucc n ι).sum_comp]
congr with e
congr with k
by_cases hke : k â Set.range e
· simp only [hke, âreduceDIte]
split_ifs with hkf
· simp only [â Equiv.succ_embeddingFinSucc_fst_symm_apply e hkf hke, Fin.cons_succ]
· obtain rfl : k = e 0 := by
rcases hke with âšj, rflâ©
simpa using hkf
simp only [Function.Embedding.toEquivRange_symm_apply_self, Fin.cons_zero, Function.update,
Pi.compRightL_apply]
split_ifs with h
· congr!
· exfalso
apply h
simp_rw [â Equiv.embeddingFinSucc_snd e]
· have hkf : k â Set.range (Equiv.embeddingFinSucc n ι e).1 := by
contrapose! hke
rw [Equiv.embeddingFinSucc_fst] at hke
exact Set.range_comp_subset_range _ _ hke
simp only [hke, hkf, âreduceDIte, Pi.compRightL,
ContinuousLinearMap.coe_mk', LinearMap.coe_mk, AddHom.coe_mk]
rw [Function.update_noteq]
contrapose! hke
rw [show k = _ from Subtype.ext_iff_val.1 hke, Equiv.embeddingFinSucc_snd e]
exact Set.mem_range_self _
· rintro n -
apply continuous_finset_sum _ (fun e _ ⊠?_)
exact (ContinuousMultilinearMap.coe_continuous _).comp (ContinuousLinearMap.continuous _)
theorem iteratedFDeriv_eq (n : â) :
iteratedFDeriv ð n f = f.iteratedFDeriv n :=
funext fun x ⊠(f.hasFTaylorSeriesUpTo_iteratedFDeriv.eq_iteratedFDeriv (m := n) le_top x).symm
theorem norm_iteratedFDeriv_le (n : â) (x : (i : ι) â E i) :
âiteratedFDeriv ð n f xâ
†Nat.descFactorial (Fintype.card ι) n * âfâ * âxâ ^ (Fintype.card ι - n) := by
rw [f.iteratedFDeriv_eq]
exact f.norm_iteratedFDeriv_le' n x
lemma cPolynomialAt : CPolynomialAt ð f x :=
f.hasFiniteFPowerSeriesOnBall.cPolynomialAt_of_mem
(by simp only [Metric.emetric_ball_top, Set.mem_univ])
lemma cPolyomialOn : CPolynomialOn ð f †:= fun x _ ⊠f.cPolynomialAt x
lemma contDiffAt : ContDiffAt ð n f x := (f.cPolynomialAt x).contDiffAt
lemma contDiff : ContDiff ð n f := contDiff_iff_contDiffAt.mpr f.contDiffAt
end ContinuousMultilinearMap
namespace FormalMultilinearSeries
variable (p : FormalMultilinearSeries ð E F)
open Fintype ContinuousLinearMap in
theorem derivSeries_apply_diag (n : â) (x : E) :
derivSeries p n (fun _ ⊠x) x = (n + 1) ⢠p (n + 1) fun _ ⊠x := by
simp only [derivSeries, compFormalMultilinearSeries_apply, changeOriginSeries,
compContinuousMultilinearMap_coe, ContinuousLinearEquiv.coe_coe, LinearIsometryEquiv.coe_coe,
Function.comp_apply, ContinuousMultilinearMap.sum_apply, map_sum, coe_sum', Finset.sum_apply,
continuousMultilinearCurryFin1_apply, Matrix.zero_empty]
convert Finset.sum_const _
· rw [Fin.snoc_zero, changeOriginSeriesTerm_apply, Finset.piecewise_same, add_comm]
· rw [â card, card_subtype, â Finset.powerset_univ, â Finset.powersetCard_eq_filter,
Finset.card_powersetCard, â card, card_fin, eq_comm, add_comm, Nat.choose_succ_self_right]
end FormalMultilinearSeries
namespace HasFPowerSeriesOnBall
open FormalMultilinearSeries ENNReal Nat
variable {p : FormalMultilinearSeries ð E F} {f : E â F} {x : E} {r : ââ¥0â}
(h : HasFPowerSeriesOnBall f p x r) (y : E)
theorem iteratedFDeriv_zero_apply_diag : iteratedFDeriv ð 0 f x = p 0 := by
ext
convert (h.hasSum <| EMetric.mem_ball_self h.r_pos).tsum_eq.symm
· rw [iteratedFDeriv_zero_apply, add_zero]
· rw [tsum_eq_single 0 fun n hn ⊠by haveI := NeZero.mk hn; exact (p n).map_zero]
exact congr(p 0 $(Subsingleton.elim _ _))
open ContinuousLinearMap
private theorem factorial_smul' {n : â} : â {F : Type max u v} [NormedAddCommGroup F]
[NormedSpace ð F] [CompleteSpace F] {p : FormalMultilinearSeries ð E F}
{f : E â F}, HasFPowerSeriesOnBall f p x r â
n ! ⢠p n (fun _ ⊠y) = iteratedFDeriv ð n f x (fun _ ⊠y) := by
induction' n with n ih <;> intro F _ _ _ p f h
· rw [factorial_zero, one_smul, h.iteratedFDeriv_zero_apply_diag]
· rw [factorial_succ, mul_comm, mul_smul, â derivSeries_apply_diag, â smul_apply,
ih h.fderiv, iteratedFDeriv_succ_apply_right]
rfl
variable [CompleteSpace F]
theorem factorial_smul (n : â) :
n ! ⢠p n (fun _ ⊠y) = iteratedFDeriv ð n f x (fun _ ⊠y) := by
cases n
· rw [factorial_zero, one_smul, h.iteratedFDeriv_zero_apply_diag]
· rw [factorial_succ, mul_comm, mul_smul, â derivSeries_apply_diag, â smul_apply,
factorial_smul' _ h.fderiv, iteratedFDeriv_succ_apply_right]
rfl
theorem hasSum_iteratedFDeriv [CharZero ð] {y : E} (hy : y â EMetric.ball 0 r) :
HasSum (fun n ⊠(n ! : ð)â»Â¹ ⢠iteratedFDeriv ð n f x fun _ ⊠y) (f (x + y)) := by
convert h.hasSum hy with n
rw [â h.factorial_smul y n, smul_comm, â smul_assoc, nsmul_eq_mul,
mul_inv_cancel <| cast_ne_zero.mpr n.factorial_ne_zero, one_smul]
end HasFPowerSeriesOnBall
|
Analysis\Calculus\FDeriv\Basic.lean | /-
Copyright (c) 2019 Jeremy Avigad. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Jeremy Avigad, Sébastien Gouëzel, Yury Kudryashov
-/
import Mathlib.Analysis.Calculus.TangentCone
import Mathlib.Analysis.NormedSpace.OperatorNorm.Asymptotics
/-!
# The Fréchet derivative
Let `E` and `F` be normed spaces, `f : E â F`, and `f' : E âL[ð] F` a
continuous ð-linear map, where `ð` is a non-discrete normed field. Then
`HasFDerivWithinAt f f' s x`
says that `f` has derivative `f'` at `x`, where the domain of interest
is restricted to `s`. We also have
`HasFDerivAt f f' x := HasFDerivWithinAt f f' x univ`
Finally,
`HasStrictFDerivAt f f' x`
means that `f : E â F` has derivative `f' : E âL[ð] F` in the sense of strict differentiability,
i.e., `f y - f z - f'(y - z) = o(y - z)` as `y, z â x`. This notion is used in the inverse
function theorem, and is defined here only to avoid proving theorems like
`IsBoundedBilinearMap.hasFDerivAt` twice: first for `HasFDerivAt`, then for
`HasStrictFDerivAt`.
## Main results
In addition to the definition and basic properties of the derivative,
the folder `Analysis/Calculus/FDeriv/` contains the usual formulas
(and existence assertions) for the derivative of
* constants
* the identity
* bounded linear maps (`Linear.lean`)
* bounded bilinear maps (`Bilinear.lean`)
* sum of two functions (`Add.lean`)
* sum of finitely many functions (`Add.lean`)
* multiplication of a function by a scalar constant (`Add.lean`)
* negative of a function (`Add.lean`)
* subtraction of two functions (`Add.lean`)
* multiplication of a function by a scalar function (`Mul.lean`)
* multiplication of two scalar functions (`Mul.lean`)
* composition of functions (the chain rule) (`Comp.lean`)
* inverse function (`Mul.lean`)
(assuming that it exists; the inverse function theorem is in `../Inverse.lean`)
For most binary operations we also define `const_op` and `op_const` theorems for the cases when
the first or second argument is a constant. This makes writing chains of `HasDerivAt`'s easier,
and they more frequently lead to the desired result.
One can also interpret the derivative of a function `f : ð â E` as an element of `E` (by identifying
a linear function from `ð` to `E` with its value at `1`). Results on the Fréchet derivative are
translated to this more elementary point of view on the derivative in the file `Deriv.lean`. The
derivative of polynomials is handled there, as it is naturally one-dimensional.
The simplifier is set up to prove automatically that some functions are differentiable, or
differentiable at a point (but not differentiable on a set or within a set at a point, as checking
automatically that the good domains are mapped one to the other when using composition is not
something the simplifier can easily do). This means that one can write
`example (x : â) : Differentiable â (fun x ⊠sin (exp (3 + x^2)) - 5 * cos x) := by simp`.
If there are divisions, one needs to supply to the simplifier proofs that the denominators do
not vanish, as in
```lean
example (x : â) (h : 1 + sin x â 0) : DifferentiableAt â (fun x ⊠exp x / (1 + sin x)) x := by
simp [h]
```
Of course, these examples only work once `exp`, `cos` and `sin` have been shown to be
differentiable, in `Analysis.SpecialFunctions.Trigonometric`.
The simplifier is not set up to compute the Fréchet derivative of maps (as these are in general
complicated multidimensional linear maps), but it will compute one-dimensional derivatives,
see `Deriv.lean`.
## Implementation details
The derivative is defined in terms of the `isLittleO` relation, but also
characterized in terms of the `Tendsto` relation.
We also introduce predicates `DifferentiableWithinAt ð f s x` (where `ð` is the base field,
`f` the function to be differentiated, `x` the point at which the derivative is asserted to exist,
and `s` the set along which the derivative is defined), as well as `DifferentiableAt ð f x`,
`DifferentiableOn ð f s` and `Differentiable ð f` to express the existence of a derivative.
To be able to compute with derivatives, we write `fderivWithin ð f s x` and `fderiv ð f x`
for some choice of a derivative if it exists, and the zero function otherwise. This choice only
behaves well along sets for which the derivative is unique, i.e., those for which the tangent
directions span a dense subset of the whole space. The predicates `UniqueDiffWithinAt s x` and
`UniqueDiffOn s`, defined in `TangentCone.lean` express this property. We prove that indeed
they imply the uniqueness of the derivative. This is satisfied for open subsets, and in particular
for `univ`. This uniqueness only holds when the field is non-discrete, which we request at the very
beginning: otherwise, a derivative can be defined, but it has no interesting properties whatsoever.
To make sure that the simplifier can prove automatically that functions are differentiable, we tag
many lemmas with the `simp` attribute, for instance those saying that the sum of differentiable
functions is differentiable, as well as their product, their cartesian product, and so on. A notable
exception is the chain rule: we do not mark as a simp lemma the fact that, if `f` and `g` are
differentiable, then their composition also is: `simp` would always be able to match this lemma,
by taking `f` or `g` to be the identity. Instead, for every reasonable function (say, `exp`),
we add a lemma that if `f` is differentiable then so is `(fun x ⊠exp (f x))`. This means adding
some boilerplate lemmas, but these can also be useful in their own right.
Tests for this ability of the simplifier (with more examples) are provided in
`Tests/Differentiable.lean`.
## Tags
derivative, differentiable, Fréchet, calculus
-/
open Filter Asymptotics ContinuousLinearMap Set Metric
open scoped Classical
open Topology NNReal Filter Asymptotics ENNReal
noncomputable section
section
variable {ð : Type*} [NontriviallyNormedField ð]
variable {E : Type*} [NormedAddCommGroup E] [NormedSpace ð E]
variable {F : Type*} [NormedAddCommGroup F] [NormedSpace ð F]
variable {G : Type*} [NormedAddCommGroup G] [NormedSpace ð G]
variable {G' : Type*} [NormedAddCommGroup G'] [NormedSpace ð G']
/-- A function `f` has the continuous linear map `f'` as derivative along the filter `L` if
`f x' = f x + f' (x' - x) + o (x' - x)` when `x'` converges along the filter `L`. This definition
is designed to be specialized for `L = ð x` (in `HasFDerivAt`), giving rise to the usual notion
of Fréchet derivative, and for `L = ð[s] x` (in `HasFDerivWithinAt`), giving rise to
the notion of Fréchet derivative along the set `s`. -/
@[mk_iff hasFDerivAtFilter_iff_isLittleO]
structure HasFDerivAtFilter (f : E â F) (f' : E âL[ð] F) (x : E) (L : Filter E) : Prop where
of_isLittleO :: isLittleO : (fun x' => f x' - f x - f' (x' - x)) =o[L] fun x' => x' - x
/-- A function `f` has the continuous linear map `f'` as derivative at `x` within a set `s` if
`f x' = f x + f' (x' - x) + o (x' - x)` when `x'` tends to `x` inside `s`. -/
@[fun_prop]
def HasFDerivWithinAt (f : E â F) (f' : E âL[ð] F) (s : Set E) (x : E) :=
HasFDerivAtFilter f f' x (ð[s] x)
/-- A function `f` has the continuous linear map `f'` as derivative at `x` if
`f x' = f x + f' (x' - x) + o (x' - x)` when `x'` tends to `x`. -/
@[fun_prop]
def HasFDerivAt (f : E â F) (f' : E âL[ð] F) (x : E) :=
HasFDerivAtFilter f f' x (ð x)
/-- A function `f` has derivative `f'` at `a` in the sense of *strict differentiability*
if `f x - f y - f' (x - y) = o(x - y)` as `x, y â a`. This form of differentiability is required,
e.g., by the inverse function theorem. Any `C^1` function on a vector space over `â` is strictly
differentiable but this definition works, e.g., for vector spaces over `p`-adic numbers. -/
@[fun_prop]
def HasStrictFDerivAt (f : E â F) (f' : E âL[ð] F) (x : E) :=
(fun p : E Ã E => f p.1 - f p.2 - f' (p.1 - p.2)) =o[ð (x, x)] fun p : E Ã E => p.1 - p.2
variable (ð)
/-- A function `f` is differentiable at a point `x` within a set `s` if it admits a derivative
there (possibly non-unique). -/
@[fun_prop]
def DifferentiableWithinAt (f : E â F) (s : Set E) (x : E) :=
â f' : E âL[ð] F, HasFDerivWithinAt f f' s x
/-- A function `f` is differentiable at a point `x` if it admits a derivative there (possibly
non-unique). -/
@[fun_prop]
def DifferentiableAt (f : E â F) (x : E) :=
â f' : E âL[ð] F, HasFDerivAt f f' x
/-- If `f` has a derivative at `x` within `s`, then `fderivWithin ð f s x` is such a derivative.
Otherwise, it is set to `0`. If `x` is isolated in `s`, we take the derivative within `s` to
be zero for convenience. -/
irreducible_def fderivWithin (f : E â F) (s : Set E) (x : E) : E âL[ð] F :=
if ð[s \ {x}] x = ⥠then 0 else
if h : â f', HasFDerivWithinAt f f' s x then Classical.choose h else 0
/-- If `f` has a derivative at `x`, then `fderiv ð f x` is such a derivative. Otherwise, it is
set to `0`. -/
irreducible_def fderiv (f : E â F) (x : E) : E âL[ð] F :=
if h : â f', HasFDerivAt f f' x then Classical.choose h else 0
/-- `DifferentiableOn ð f s` means that `f` is differentiable within `s` at any point of `s`. -/
@[fun_prop]
def DifferentiableOn (f : E â F) (s : Set E) :=
â x â s, DifferentiableWithinAt ð f s x
/-- `Differentiable ð f` means that `f` is differentiable at any point. -/
@[fun_prop]
def Differentiable (f : E â F) :=
â x, DifferentiableAt ð f x
variable {ð}
variable {f fâ fâ g : E â F}
variable {f' fâ' fâ' g' : E âL[ð] F}
variable (e : E âL[ð] F)
variable {x : E}
variable {s t : Set E}
variable {L Lâ Lâ : Filter E}
theorem fderivWithin_zero_of_isolated (h : ð[s \ {x}] x = â¥) : fderivWithin ð f s x = 0 := by
rw [fderivWithin, if_pos h]
theorem fderivWithin_zero_of_nmem_closure (h : x â closure s) : fderivWithin ð f s x = 0 := by
apply fderivWithin_zero_of_isolated
simp only [mem_closure_iff_nhdsWithin_neBot, neBot_iff, Ne, Classical.not_not] at h
rw [eq_bot_iff, â h]
exact nhdsWithin_mono _ diff_subset
theorem fderivWithin_zero_of_not_differentiableWithinAt (h : ¬DifferentiableWithinAt ð f s x) :
fderivWithin ð f s x = 0 := by
have : ‰ f', HasFDerivWithinAt f f' s x := h
simp [fderivWithin, this]
theorem fderiv_zero_of_not_differentiableAt (h : ¬DifferentiableAt ð f x) : fderiv ð f x = 0 := by
have : ‰ f', HasFDerivAt f f' x := h
simp [fderiv, this]
section DerivativeUniqueness
/- In this section, we discuss the uniqueness of the derivative.
We prove that the definitions `UniqueDiffWithinAt` and `UniqueDiffOn` indeed imply the
uniqueness of the derivative. -/
/-- If a function f has a derivative f' at x, a rescaled version of f around x converges to f',
i.e., `n (f (x + (1/n) v) - f x)` converges to `f' v`. More generally, if `c n` tends to infinity
and `c n * d n` tends to `v`, then `c n * (f (x + d n) - f x)` tends to `f' v`. This lemma expresses
this fact, for functions having a derivative within a set. Its specific formulation is useful for
tangent cone related discussions. -/
theorem HasFDerivWithinAt.lim (h : HasFDerivWithinAt f f' s x) {α : Type*} (l : Filter α)
{c : α â ð} {d : α â E} {v : E} (dtop : âá¶ n in l, x + d n â s)
(clim : Tendsto (fun n => âc nâ) l atTop) (cdlim : Tendsto (fun n => c n ⢠d n) l (ð v)) :
Tendsto (fun n => c n ⢠(f (x + d n) - f x)) l (ð (f' v)) := by
have tendsto_arg : Tendsto (fun n => x + d n) l (ð[s] x) := by
conv in ð[s] x => rw [â add_zero x]
rw [nhdsWithin, tendsto_inf]
constructor
· apply tendsto_const_nhds.add (tangentConeAt.lim_zero l clim cdlim)
· rwa [tendsto_principal]
have : (fun y => f y - f x - f' (y - x)) =o[ð[s] x] fun y => y - x := h.isLittleO
have : (fun n => f (x + d n) - f x - f' (x + d n - x)) =o[l] fun n => x + d n - x :=
this.comp_tendsto tendsto_arg
have : (fun n => f (x + d n) - f x - f' (d n)) =o[l] d := by simpa only [add_sub_cancel_left]
have : (fun n => c n ⢠(f (x + d n) - f x - f' (d n))) =o[l] fun n => c n ⢠d n :=
(isBigO_refl c l).smul_isLittleO this
have : (fun n => c n ⢠(f (x + d n) - f x - f' (d n))) =o[l] fun _ => (1 : â) :=
this.trans_isBigO (cdlim.isBigO_one â)
have L1 : Tendsto (fun n => c n ⢠(f (x + d n) - f x - f' (d n))) l (ð 0) :=
(isLittleO_one_iff â).1 this
have L2 : Tendsto (fun n => f' (c n ⢠d n)) l (ð (f' v)) :=
Tendsto.comp f'.cont.continuousAt cdlim
have L3 :
Tendsto (fun n => c n ⢠(f (x + d n) - f x - f' (d n)) + f' (c n ⢠d n)) l (ð (0 + f' v)) :=
L1.add L2
have :
(fun n => c n ⢠(f (x + d n) - f x - f' (d n)) + f' (c n ⢠d n)) = fun n =>
c n ⢠(f (x + d n) - f x) := by
ext n
simp [smul_add, smul_sub]
rwa [this, zero_add] at L3
/-- If `f'` and `fâ'` are two derivatives of `f` within `s` at `x`, then they are equal on the
tangent cone to `s` at `x` -/
theorem HasFDerivWithinAt.unique_on (hf : HasFDerivWithinAt f f' s x)
(hg : HasFDerivWithinAt f fâ' s x) : EqOn f' fâ' (tangentConeAt ð s x) :=
fun _ âš_, _, dtop, clim, cdlimâ© =>
tendsto_nhds_unique (hf.lim atTop dtop clim cdlim) (hg.lim atTop dtop clim cdlim)
/-- `UniqueDiffWithinAt` achieves its goal: it implies the uniqueness of the derivative. -/
theorem UniqueDiffWithinAt.eq (H : UniqueDiffWithinAt ð s x) (hf : HasFDerivWithinAt f f' s x)
(hg : HasFDerivWithinAt f fâ' s x) : f' = fâ' :=
ContinuousLinearMap.ext_on H.1 (hf.unique_on hg)
theorem UniqueDiffOn.eq (H : UniqueDiffOn ð s) (hx : x â s) (h : HasFDerivWithinAt f f' s x)
(hâ : HasFDerivWithinAt f fâ' s x) : f' = fâ' :=
(H x hx).eq h hâ
end DerivativeUniqueness
section FDerivProperties
/-! ### Basic properties of the derivative -/
theorem hasFDerivAtFilter_iff_tendsto :
HasFDerivAtFilter f f' x L â
Tendsto (fun x' => âx' - xââ»Â¹ * âf x' - f x - f' (x' - x)â) L (ð 0) := by
have h : â x', âx' - xâ = 0 â âf x' - f x - f' (x' - x)â = 0 := fun x' hx' => by
rw [sub_eq_zero.1 (norm_eq_zero.1 hx')]
simp
rw [hasFDerivAtFilter_iff_isLittleO, â isLittleO_norm_left, â isLittleO_norm_right,
isLittleO_iff_tendsto h]
exact tendsto_congr fun _ => div_eq_inv_mul _ _
theorem hasFDerivWithinAt_iff_tendsto :
HasFDerivWithinAt f f' s x â
Tendsto (fun x' => âx' - xââ»Â¹ * âf x' - f x - f' (x' - x)â) (ð[s] x) (ð 0) :=
hasFDerivAtFilter_iff_tendsto
theorem hasFDerivAt_iff_tendsto :
HasFDerivAt f f' x â Tendsto (fun x' => âx' - xââ»Â¹ * âf x' - f x - f' (x' - x)â) (ð x) (ð 0) :=
hasFDerivAtFilter_iff_tendsto
theorem hasFDerivAt_iff_isLittleO_nhds_zero :
HasFDerivAt f f' x â (fun h : E => f (x + h) - f x - f' h) =o[ð 0] fun h => h := by
rw [HasFDerivAt, hasFDerivAtFilter_iff_isLittleO, â map_add_left_nhds_zero x, isLittleO_map]
simp [(· â ·)]
/-- Converse to the mean value inequality: if `f` is differentiable at `xâ` and `C`-lipschitz
on a neighborhood of `xâ` then its derivative at `xâ` has norm bounded by `C`. This version
only assumes that `âf x - f xââ †C * âx - xââ` in a neighborhood of `x`. -/
theorem HasFDerivAt.le_of_lip' {f : E â F} {f' : E âL[ð] F} {xâ : E} (hf : HasFDerivAt f f' xâ)
{C : â} (hCâ : 0 †C) (hlip : âá¶ x in ð xâ, âf x - f xââ †C * âx - xââ) : âf'â †C := by
refine le_of_forall_pos_le_add fun ε ε0 => opNorm_le_of_nhds_zero ?_ ?_
· exact add_nonneg hCâ ε0.le
rw [â map_add_left_nhds_zero xâ, eventually_map] at hlip
filter_upwards [isLittleO_iff.1 (hasFDerivAt_iff_isLittleO_nhds_zero.1 hf) ε0, hlip] with y hy hyC
rw [add_sub_cancel_left] at hyC
calc
âf' yâ †âf (xâ + y) - f xââ + âf (xâ + y) - f xâ - f' yâ := norm_le_insert _ _
_ †C * âyâ + ε * âyâ := add_le_add hyC hy
_ = (C + ε) * âyâ := (add_mul _ _ _).symm
/-- Converse to the mean value inequality: if `f` is differentiable at `xâ` and `C`-lipschitz
on a neighborhood of `xâ` then its derivative at `xâ` has norm bounded by `C`. -/
theorem HasFDerivAt.le_of_lipschitzOn
{f : E â F} {f' : E âL[ð] F} {xâ : E} (hf : HasFDerivAt f f' xâ)
{s : Set E} (hs : s â ð xâ) {C : ââ¥0} (hlip : LipschitzOnWith C f s) : âf'â †C := by
refine hf.le_of_lip' C.coe_nonneg ?_
filter_upwards [hs] with x hx using hlip.norm_sub_le hx (mem_of_mem_nhds hs)
/-- Converse to the mean value inequality: if `f` is differentiable at `xâ` and `C`-lipschitz
then its derivative at `xâ` has norm bounded by `C`. -/
theorem HasFDerivAt.le_of_lipschitz {f : E â F} {f' : E âL[ð] F} {xâ : E} (hf : HasFDerivAt f f' xâ)
{C : ââ¥0} (hlip : LipschitzWith C f) : âf'â †C :=
hf.le_of_lipschitzOn univ_mem (lipschitzOnWith_univ.2 hlip)
nonrec theorem HasFDerivAtFilter.mono (h : HasFDerivAtFilter f f' x Lâ) (hst : Lâ †Lâ) :
HasFDerivAtFilter f f' x Lâ :=
.of_isLittleO <| h.isLittleO.mono hst
theorem HasFDerivWithinAt.mono_of_mem (h : HasFDerivWithinAt f f' t x) (hst : t â ð[s] x) :
HasFDerivWithinAt f f' s x :=
h.mono <| nhdsWithin_le_iff.mpr hst
nonrec theorem HasFDerivWithinAt.mono (h : HasFDerivWithinAt f f' t x) (hst : s â t) :
HasFDerivWithinAt f f' s x :=
h.mono <| nhdsWithin_mono _ hst
theorem HasFDerivAt.hasFDerivAtFilter (h : HasFDerivAt f f' x) (hL : L †ð x) :
HasFDerivAtFilter f f' x L :=
h.mono hL
@[fun_prop]
theorem HasFDerivAt.hasFDerivWithinAt (h : HasFDerivAt f f' x) : HasFDerivWithinAt f f' s x :=
h.hasFDerivAtFilter inf_le_left
@[fun_prop]
theorem HasFDerivWithinAt.differentiableWithinAt (h : HasFDerivWithinAt f f' s x) :
DifferentiableWithinAt ð f s x :=
âšf', hâ©
@[fun_prop]
theorem HasFDerivAt.differentiableAt (h : HasFDerivAt f f' x) : DifferentiableAt ð f x :=
âšf', hâ©
@[simp]
theorem hasFDerivWithinAt_univ : HasFDerivWithinAt f f' univ x â HasFDerivAt f f' x := by
simp only [HasFDerivWithinAt, nhdsWithin_univ]
rfl
alias âšHasFDerivWithinAt.hasFDerivAt_of_univ, _â© := hasFDerivWithinAt_univ
theorem hasFDerivWithinAt_of_mem_nhds (h : s â ð x) :
HasFDerivWithinAt f f' s x â HasFDerivAt f f' x := by
rw [HasFDerivAt, HasFDerivWithinAt, nhdsWithin_eq_nhds.mpr h]
lemma hasFDerivWithinAt_of_isOpen (h : IsOpen s) (hx : x â s) :
HasFDerivWithinAt f f' s x â HasFDerivAt f f' x :=
hasFDerivWithinAt_of_mem_nhds (h.mem_nhds hx)
theorem hasFDerivWithinAt_insert {y : E} :
HasFDerivWithinAt f f' (insert y s) x â HasFDerivWithinAt f f' s x := by
rcases eq_or_ne x y with (rfl | h)
· simp_rw [HasFDerivWithinAt, hasFDerivAtFilter_iff_isLittleO]
apply Asymptotics.isLittleO_insert
simp only [sub_self, map_zero]
refine âšfun h => h.mono <| subset_insert y s, fun hf => hf.mono_of_mem ?_â©
simp_rw [nhdsWithin_insert_of_ne h, self_mem_nhdsWithin]
alias âšHasFDerivWithinAt.of_insert, HasFDerivWithinAt.insert'â© := hasFDerivWithinAt_insert
protected theorem HasFDerivWithinAt.insert (h : HasFDerivWithinAt g g' s x) :
HasFDerivWithinAt g g' (insert x s) x :=
h.insert'
theorem hasFDerivWithinAt_diff_singleton (y : E) :
HasFDerivWithinAt f f' (s \ {y}) x â HasFDerivWithinAt f f' s x := by
rw [â hasFDerivWithinAt_insert, insert_diff_singleton, hasFDerivWithinAt_insert]
theorem HasStrictFDerivAt.isBigO_sub (hf : HasStrictFDerivAt f f' x) :
(fun p : E Ã E => f p.1 - f p.2) =O[ð (x, x)] fun p : E Ã E => p.1 - p.2 :=
hf.isBigO.congr_of_sub.2 (f'.isBigO_comp _ _)
theorem HasFDerivAtFilter.isBigO_sub (h : HasFDerivAtFilter f f' x L) :
(fun x' => f x' - f x) =O[L] fun x' => x' - x :=
h.isLittleO.isBigO.congr_of_sub.2 (f'.isBigO_sub _ _)
@[fun_prop]
protected theorem HasStrictFDerivAt.hasFDerivAt (hf : HasStrictFDerivAt f f' x) :
HasFDerivAt f f' x := by
rw [HasFDerivAt, hasFDerivAtFilter_iff_isLittleO, isLittleO_iff]
exact fun c hc => tendsto_id.prod_mk_nhds tendsto_const_nhds (isLittleO_iff.1 hf hc)
protected theorem HasStrictFDerivAt.differentiableAt (hf : HasStrictFDerivAt f f' x) :
DifferentiableAt ð f x :=
hf.hasFDerivAt.differentiableAt
/-- If `f` is strictly differentiable at `x` with derivative `f'` and `K > âf'ââ`, then `f` is
`K`-Lipschitz in a neighborhood of `x`. -/
theorem HasStrictFDerivAt.exists_lipschitzOnWith_of_nnnorm_lt (hf : HasStrictFDerivAt f f' x)
(K : ââ¥0) (hK : âf'ââ < K) : â s â ð x, LipschitzOnWith K f s := by
have := hf.add_isBigOWith (f'.isBigOWith_comp _ _) hK
simp only [sub_add_cancel, IsBigOWith] at this
rcases exists_nhds_square this with âšU, Uo, xU, hUâ©
exact
âšU, Uo.mem_nhds xU, lipschitzOnWith_iff_norm_sub_le.2 fun x hx y hy => hU (mk_mem_prod hx hy)â©
/-- If `f` is strictly differentiable at `x` with derivative `f'`, then `f` is Lipschitz in a
neighborhood of `x`. See also `HasStrictFDerivAt.exists_lipschitzOnWith_of_nnnorm_lt` for a
more precise statement. -/
theorem HasStrictFDerivAt.exists_lipschitzOnWith (hf : HasStrictFDerivAt f f' x) :
â K, â s â ð x, LipschitzOnWith K f s :=
(exists_gt _).imp hf.exists_lipschitzOnWith_of_nnnorm_lt
/-- Directional derivative agrees with `HasFDeriv`. -/
theorem HasFDerivAt.lim (hf : HasFDerivAt f f' x) (v : E) {α : Type*} {c : α â ð} {l : Filter α}
(hc : Tendsto (fun n => âc nâ) l atTop) :
Tendsto (fun n => c n ⢠(f (x + (c n)â»Â¹ ⢠v) - f x)) l (ð (f' v)) := by
refine (hasFDerivWithinAt_univ.2 hf).lim _ univ_mem hc ?_
intro U hU
refine (eventually_ne_of_tendsto_norm_atTop hc (0 : ð)).mono fun y hy => ?_
convert mem_of_mem_nhds hU
dsimp only
rw [â mul_smul, mul_inv_cancel hy, one_smul]
theorem HasFDerivAt.unique (hâ : HasFDerivAt f fâ' x) (hâ : HasFDerivAt f fâ' x) : fâ' = fâ' := by
rw [â hasFDerivWithinAt_univ] at hâ hâ
exact uniqueDiffWithinAt_univ.eq hâ hâ
theorem hasFDerivWithinAt_inter' (h : t â ð[s] x) :
HasFDerivWithinAt f f' (s â© t) x â HasFDerivWithinAt f f' s x := by
simp [HasFDerivWithinAt, nhdsWithin_restrict'' s h]
theorem hasFDerivWithinAt_inter (h : t â ð x) :
HasFDerivWithinAt f f' (s â© t) x â HasFDerivWithinAt f f' s x := by
simp [HasFDerivWithinAt, nhdsWithin_restrict' s h]
theorem HasFDerivWithinAt.union (hs : HasFDerivWithinAt f f' s x)
(ht : HasFDerivWithinAt f f' t x) : HasFDerivWithinAt f f' (s ⪠t) x := by
simp only [HasFDerivWithinAt, nhdsWithin_union]
exact .of_isLittleO <| hs.isLittleO.sup ht.isLittleO
theorem HasFDerivWithinAt.hasFDerivAt (h : HasFDerivWithinAt f f' s x) (hs : s â ð x) :
HasFDerivAt f f' x := by
rwa [â univ_inter s, hasFDerivWithinAt_inter hs, hasFDerivWithinAt_univ] at h
theorem DifferentiableWithinAt.differentiableAt (h : DifferentiableWithinAt ð f s x)
(hs : s â ð x) : DifferentiableAt ð f x :=
h.imp fun _ hf' => hf'.hasFDerivAt hs
/-- If `x` is isolated in `s`, then `f` has any derivative at `x` within `s`,
as this statement is empty. -/
theorem HasFDerivWithinAt.of_nhdsWithin_eq_bot (h : ð[s\{x}] x = â¥) :
HasFDerivWithinAt f f' s x := by
rw [â hasFDerivWithinAt_diff_singleton x, HasFDerivWithinAt, h, hasFDerivAtFilter_iff_isLittleO]
apply isLittleO_bot
/-- If `x` is not in the closure of `s`, then `f` has any derivative at `x` within `s`,
as this statement is empty. -/
theorem hasFDerivWithinAt_of_nmem_closure (h : x â closure s) : HasFDerivWithinAt f f' s x :=
.of_nhdsWithin_eq_bot <| eq_bot_mono (nhdsWithin_mono _ diff_subset) <| by
rwa [mem_closure_iff_nhdsWithin_neBot, not_neBot] at h
theorem DifferentiableWithinAt.hasFDerivWithinAt (h : DifferentiableWithinAt ð f s x) :
HasFDerivWithinAt f (fderivWithin ð f s x) s x := by
by_cases H : ð[s \ {x}] x = â¥
· exact .of_nhdsWithin_eq_bot H
· unfold DifferentiableWithinAt at h
rw [fderivWithin, if_neg H, dif_pos h]
exact Classical.choose_spec h
theorem DifferentiableAt.hasFDerivAt (h : DifferentiableAt ð f x) :
HasFDerivAt f (fderiv ð f x) x := by
dsimp only [DifferentiableAt] at h
rw [fderiv, dif_pos h]
exact Classical.choose_spec h
theorem DifferentiableOn.hasFDerivAt (h : DifferentiableOn ð f s) (hs : s â ð x) :
HasFDerivAt f (fderiv ð f x) x :=
((h x (mem_of_mem_nhds hs)).differentiableAt hs).hasFDerivAt
theorem DifferentiableOn.differentiableAt (h : DifferentiableOn ð f s) (hs : s â ð x) :
DifferentiableAt ð f x :=
(h.hasFDerivAt hs).differentiableAt
theorem DifferentiableOn.eventually_differentiableAt (h : DifferentiableOn ð f s) (hs : s â ð x) :
âá¶ y in ð x, DifferentiableAt ð f y :=
(eventually_eventually_nhds.2 hs).mono fun _ => h.differentiableAt
protected theorem HasFDerivAt.fderiv (h : HasFDerivAt f f' x) : fderiv ð f x = f' := by
ext
rw [h.unique h.differentiableAt.hasFDerivAt]
theorem fderiv_eq {f' : E â E âL[ð] F} (h : â x, HasFDerivAt f (f' x) x) : fderiv ð f = f' :=
funext fun x => (h x).fderiv
variable (ð)
/-- Converse to the mean value inequality: if `f` is `C`-lipschitz
on a neighborhood of `xâ` then its derivative at `xâ` has norm bounded by `C`. This version
only assumes that `âf x - f xââ †C * âx - xââ` in a neighborhood of `x`. -/
theorem norm_fderiv_le_of_lip' {f : E â F} {xâ : E}
{C : â} (hCâ : 0 †C) (hlip : âá¶ x in ð xâ, âf x - f xââ †C * âx - xââ) :
âfderiv ð f xââ †C := by
by_cases hf : DifferentiableAt ð f xâ
· exact hf.hasFDerivAt.le_of_lip' hCâ hlip
· rw [fderiv_zero_of_not_differentiableAt hf]
simp [hCâ]
/-- Converse to the mean value inequality: if `f` is `C`-lipschitz
on a neighborhood of `xâ` then its derivative at `xâ` has norm bounded by `C`.
Version using `fderiv`. -/
-- Porting note: renamed so that dot-notation makes sense
theorem norm_fderiv_le_of_lipschitzOn {f : E â F} {xâ : E} {s : Set E} (hs : s â ð xâ)
{C : ââ¥0} (hlip : LipschitzOnWith C f s) : âfderiv ð f xââ †C := by
refine norm_fderiv_le_of_lip' ð C.coe_nonneg ?_
filter_upwards [hs] with x hx using hlip.norm_sub_le hx (mem_of_mem_nhds hs)
/-- Converse to the mean value inequality: if `f` is `C`-lipschitz then
its derivative at `xâ` has norm bounded by `C`.
Version using `fderiv`. -/
theorem norm_fderiv_le_of_lipschitz {f : E â F} {xâ : E}
{C : ââ¥0} (hlip : LipschitzWith C f) : âfderiv ð f xââ †C :=
norm_fderiv_le_of_lipschitzOn ð univ_mem (lipschitzOnWith_univ.2 hlip)
variable {ð}
protected theorem HasFDerivWithinAt.fderivWithin (h : HasFDerivWithinAt f f' s x)
(hxs : UniqueDiffWithinAt ð s x) : fderivWithin ð f s x = f' :=
(hxs.eq h h.differentiableWithinAt.hasFDerivWithinAt).symm
theorem DifferentiableWithinAt.mono (h : DifferentiableWithinAt ð f t x) (st : s â t) :
DifferentiableWithinAt ð f s x := by
rcases h with âšf', hf'â©
exact âšf', hf'.mono stâ©
theorem DifferentiableWithinAt.mono_of_mem (h : DifferentiableWithinAt ð f s x) {t : Set E}
(hst : s â ð[t] x) : DifferentiableWithinAt ð f t x :=
(h.hasFDerivWithinAt.mono_of_mem hst).differentiableWithinAt
theorem differentiableWithinAt_univ :
DifferentiableWithinAt ð f univ x â DifferentiableAt ð f x := by
simp only [DifferentiableWithinAt, hasFDerivWithinAt_univ, DifferentiableAt]
theorem differentiableWithinAt_inter (ht : t â ð x) :
DifferentiableWithinAt ð f (s â© t) x â DifferentiableWithinAt ð f s x := by
simp only [DifferentiableWithinAt, hasFDerivWithinAt_inter ht]
theorem differentiableWithinAt_inter' (ht : t â ð[s] x) :
DifferentiableWithinAt ð f (s â© t) x â DifferentiableWithinAt ð f s x := by
simp only [DifferentiableWithinAt, hasFDerivWithinAt_inter' ht]
theorem DifferentiableAt.differentiableWithinAt (h : DifferentiableAt ð f x) :
DifferentiableWithinAt ð f s x :=
(differentiableWithinAt_univ.2 h).mono (subset_univ _)
@[fun_prop]
theorem Differentiable.differentiableAt (h : Differentiable ð f) : DifferentiableAt ð f x :=
h x
protected theorem DifferentiableAt.fderivWithin (h : DifferentiableAt ð f x)
(hxs : UniqueDiffWithinAt ð s x) : fderivWithin ð f s x = fderiv ð f x :=
h.hasFDerivAt.hasFDerivWithinAt.fderivWithin hxs
theorem DifferentiableOn.mono (h : DifferentiableOn ð f t) (st : s â t) : DifferentiableOn ð f s :=
fun x hx => (h x (st hx)).mono st
theorem differentiableOn_univ : DifferentiableOn ð f univ â Differentiable ð f := by
simp only [DifferentiableOn, Differentiable, differentiableWithinAt_univ, mem_univ,
forall_true_left]
@[fun_prop]
theorem Differentiable.differentiableOn (h : Differentiable ð f) : DifferentiableOn ð f s :=
(differentiableOn_univ.2 h).mono (subset_univ _)
theorem differentiableOn_of_locally_differentiableOn
(h : â x â s, â u, IsOpen u â§ x â u â§ DifferentiableOn ð f (s â© u)) :
DifferentiableOn ð f s := by
intro x xs
rcases h x xs with âšt, t_open, xt, htâ©
exact (differentiableWithinAt_inter (IsOpen.mem_nhds t_open xt)).1 (ht x âšxs, xtâ©)
theorem fderivWithin_of_mem (st : t â ð[s] x) (ht : UniqueDiffWithinAt ð s x)
(h : DifferentiableWithinAt ð f t x) : fderivWithin ð f s x = fderivWithin ð f t x :=
((DifferentiableWithinAt.hasFDerivWithinAt h).mono_of_mem st).fderivWithin ht
theorem fderivWithin_subset (st : s â t) (ht : UniqueDiffWithinAt ð s x)
(h : DifferentiableWithinAt ð f t x) : fderivWithin ð f s x = fderivWithin ð f t x :=
fderivWithin_of_mem (nhdsWithin_mono _ st self_mem_nhdsWithin) ht h
theorem fderivWithin_inter (ht : t â ð x) : fderivWithin ð f (s â© t) x = fderivWithin ð f s x := by
have A : ð[(s â© t) \ {x}] x = ð[s \ {x}] x := by
have : (s â© t) \ {x} = (s \ {x}) â© t := by rw [inter_comm, inter_diff_assoc, inter_comm]
rw [this, â nhdsWithin_restrict' _ ht]
simp [fderivWithin, A, hasFDerivWithinAt_inter ht]
@[simp]
theorem fderivWithin_univ : fderivWithin ð f univ = fderiv ð f := by
ext1 x
nontriviality E
have H : ð[univ \ {x}] x â ⥠:= by
rw [â compl_eq_univ_diff, â neBot_iff]
exact Module.punctured_nhds_neBot ð E x
simp [fderivWithin, fderiv, H]
theorem fderivWithin_of_mem_nhds (h : s â ð x) : fderivWithin ð f s x = fderiv ð f x := by
rw [â fderivWithin_univ, â univ_inter s, fderivWithin_inter h]
theorem fderivWithin_of_isOpen (hs : IsOpen s) (hx : x â s) : fderivWithin ð f s x = fderiv ð f x :=
fderivWithin_of_mem_nhds (hs.mem_nhds hx)
theorem fderivWithin_eq_fderiv (hs : UniqueDiffWithinAt ð s x) (h : DifferentiableAt ð f x) :
fderivWithin ð f s x = fderiv ð f x := by
rw [â fderivWithin_univ]
exact fderivWithin_subset (subset_univ _) hs h.differentiableWithinAt
theorem fderiv_mem_iff {f : E â F} {s : Set (E âL[ð] F)} {x : E} : fderiv ð f x â s â
DifferentiableAt ð f x â§ fderiv ð f x â s ⚠¬DifferentiableAt ð f x â§ (0 : E âL[ð] F) â s := by
by_cases hx : DifferentiableAt ð f x <;> simp [fderiv_zero_of_not_differentiableAt, *]
theorem fderivWithin_mem_iff {f : E â F} {t : Set E} {s : Set (E âL[ð] F)} {x : E} :
fderivWithin ð f t x â s â
DifferentiableWithinAt ð f t x â§ fderivWithin ð f t x â s âš
¬DifferentiableWithinAt ð f t x â§ (0 : E âL[ð] F) â s := by
by_cases hx : DifferentiableWithinAt ð f t x <;>
simp [fderivWithin_zero_of_not_differentiableWithinAt, *]
theorem Asymptotics.IsBigO.hasFDerivWithinAt {s : Set E} {xâ : E} {n : â}
(h : f =O[ð[s] xâ] fun x => âx - xââ ^ n) (hxâ : xâ â s) (hn : 1 < n) :
HasFDerivWithinAt f (0 : E âL[ð] F) s xâ := by
simp_rw [HasFDerivWithinAt, hasFDerivAtFilter_iff_isLittleO,
h.eq_zero_of_norm_pow_within hxâ hn.ne_bot, zero_apply, sub_zero,
h.trans_isLittleO ((isLittleO_pow_sub_sub xâ hn).mono nhdsWithin_le_nhds)]
theorem Asymptotics.IsBigO.hasFDerivAt {xâ : E} {n : â} (h : f =O[ð xâ] fun x => âx - xââ ^ n)
(hn : 1 < n) : HasFDerivAt f (0 : E âL[ð] F) xâ := by
rw [â nhdsWithin_univ] at h
exact (h.hasFDerivWithinAt (mem_univ _) hn).hasFDerivAt_of_univ
nonrec theorem HasFDerivWithinAt.isBigO_sub {f : E â F} {s : Set E} {xâ : E} {f' : E âL[ð] F}
(h : HasFDerivWithinAt f f' s xâ) : (f · - f xâ) =O[ð[s] xâ] (· - xâ) :=
h.isBigO_sub
lemma DifferentiableWithinAt.isBigO_sub {f : E â F} {s : Set E} {xâ : E}
(h : DifferentiableWithinAt ð f s xâ) : (f · - f xâ) =O[ð[s] xâ] (· - xâ) :=
h.hasFDerivWithinAt.isBigO_sub
nonrec theorem HasFDerivAt.isBigO_sub {f : E â F} {xâ : E} {f' : E âL[ð] F}
(h : HasFDerivAt f f' xâ) : (f · - f xâ) =O[ð xâ] (· - xâ) :=
h.isBigO_sub
nonrec theorem DifferentiableAt.isBigO_sub {f : E â F} {xâ : E} (h : DifferentiableAt ð f xâ) :
(f · - f xâ) =O[ð xâ] (· - xâ) :=
h.hasFDerivAt.isBigO_sub
end FDerivProperties
section Continuous
/-! ### Deducing continuity from differentiability -/
theorem HasFDerivAtFilter.tendsto_nhds (hL : L †ð x) (h : HasFDerivAtFilter f f' x L) :
Tendsto f L (ð (f x)) := by
have : Tendsto (fun x' => f x' - f x) L (ð 0) := by
refine h.isBigO_sub.trans_tendsto (Tendsto.mono_left ?_ hL)
rw [â sub_self x]
exact tendsto_id.sub tendsto_const_nhds
have := this.add (tendsto_const_nhds (x := f x))
rw [zero_add (f x)] at this
exact this.congr (by simp only [sub_add_cancel, eq_self_iff_true, forall_const])
theorem HasFDerivWithinAt.continuousWithinAt (h : HasFDerivWithinAt f f' s x) :
ContinuousWithinAt f s x :=
HasFDerivAtFilter.tendsto_nhds inf_le_left h
theorem HasFDerivAt.continuousAt (h : HasFDerivAt f f' x) : ContinuousAt f x :=
HasFDerivAtFilter.tendsto_nhds le_rfl h
@[fun_prop]
theorem DifferentiableWithinAt.continuousWithinAt (h : DifferentiableWithinAt ð f s x) :
ContinuousWithinAt f s x :=
let âš_, hf'â© := h
hf'.continuousWithinAt
@[fun_prop]
theorem DifferentiableAt.continuousAt (h : DifferentiableAt ð f x) : ContinuousAt f x :=
let âš_, hf'â© := h
hf'.continuousAt
@[fun_prop]
theorem DifferentiableOn.continuousOn (h : DifferentiableOn ð f s) : ContinuousOn f s := fun x hx =>
(h x hx).continuousWithinAt
@[fun_prop]
theorem Differentiable.continuous (h : Differentiable ð f) : Continuous f :=
continuous_iff_continuousAt.2 fun x => (h x).continuousAt
protected theorem HasStrictFDerivAt.continuousAt (hf : HasStrictFDerivAt f f' x) :
ContinuousAt f x :=
hf.hasFDerivAt.continuousAt
theorem HasStrictFDerivAt.isBigO_sub_rev {f' : E âL[ð] F}
(hf : HasStrictFDerivAt f (f' : E âL[ð] F) x) :
(fun p : E Ã E => p.1 - p.2) =O[ð (x, x)] fun p : E Ã E => f p.1 - f p.2 :=
((f'.isBigO_comp_rev _ _).trans (hf.trans_isBigO (f'.isBigO_comp_rev _ _)).right_isBigO_add).congr
(fun _ => rfl) fun _ => sub_add_cancel _ _
theorem HasFDerivAtFilter.isBigO_sub_rev (hf : HasFDerivAtFilter f f' x L) {C}
(hf' : AntilipschitzWith C f') : (fun x' => x' - x) =O[L] fun x' => f x' - f x :=
have : (fun x' => x' - x) =O[L] fun x' => f' (x' - x) :=
isBigO_iff.2 âšC, eventually_of_forall fun _ => ZeroHomClass.bound_of_antilipschitz f' hf' _â©
(this.trans (hf.isLittleO.trans_isBigO this).right_isBigO_add).congr (fun _ => rfl) fun _ =>
sub_add_cancel _ _
end Continuous
section congr
/-! ### congr properties of the derivative -/
theorem hasFDerivWithinAt_congr_set' (y : E) (h : s =á¶ [ð[{y}á¶] x] t) :
HasFDerivWithinAt f f' s x â HasFDerivWithinAt f f' t x :=
calc
HasFDerivWithinAt f f' s x â HasFDerivWithinAt f f' (s \ {y}) x :=
(hasFDerivWithinAt_diff_singleton _).symm
_ â HasFDerivWithinAt f f' (t \ {y}) x := by
suffices ð[s \ {y}] x = ð[t \ {y}] x by simp only [HasFDerivWithinAt, this]
simpa only [set_eventuallyEq_iff_inf_principal, â nhdsWithin_inter', diff_eq,
inter_comm] using h
_ â HasFDerivWithinAt f f' t x := hasFDerivWithinAt_diff_singleton _
theorem hasFDerivWithinAt_congr_set (h : s =á¶ [ð x] t) :
HasFDerivWithinAt f f' s x â HasFDerivWithinAt f f' t x :=
hasFDerivWithinAt_congr_set' x <| h.filter_mono inf_le_left
theorem differentiableWithinAt_congr_set' (y : E) (h : s =á¶ [ð[{y}á¶] x] t) :
DifferentiableWithinAt ð f s x â DifferentiableWithinAt ð f t x :=
exists_congr fun _ => hasFDerivWithinAt_congr_set' _ h
theorem differentiableWithinAt_congr_set (h : s =á¶ [ð x] t) :
DifferentiableWithinAt ð f s x â DifferentiableWithinAt ð f t x :=
exists_congr fun _ => hasFDerivWithinAt_congr_set h
theorem fderivWithin_congr_set' (y : E) (h : s =á¶ [ð[{y}á¶] x] t) :
fderivWithin ð f s x = fderivWithin ð f t x := by
have : s =á¶ [ð[{x}á¶] x] t := nhdsWithin_compl_singleton_le x y h
have : ð[s \ {x}] x = ð[t \ {x}] x := by
simpa only [set_eventuallyEq_iff_inf_principal, â nhdsWithin_inter', diff_eq,
inter_comm] using this
simp only [fderivWithin, hasFDerivWithinAt_congr_set' y h, this]
theorem fderivWithin_congr_set (h : s =á¶ [ð x] t) : fderivWithin ð f s x = fderivWithin ð f t x :=
fderivWithin_congr_set' x <| h.filter_mono inf_le_left
theorem fderivWithin_eventually_congr_set' (y : E) (h : s =á¶ [ð[{y}á¶] x] t) :
fderivWithin ð f s =á¶ [ð x] fderivWithin ð f t :=
(eventually_nhds_nhdsWithin.2 h).mono fun _ => fderivWithin_congr_set' y
theorem fderivWithin_eventually_congr_set (h : s =á¶ [ð x] t) :
fderivWithin ð f s =á¶ [ð x] fderivWithin ð f t :=
fderivWithin_eventually_congr_set' x <| h.filter_mono inf_le_left
theorem Filter.EventuallyEq.hasStrictFDerivAt_iff (h : fâ =á¶ [ð x] fâ) (h' : â y, fâ' y = fâ' y) :
HasStrictFDerivAt fâ fâ' x â HasStrictFDerivAt fâ fâ' x := by
refine isLittleO_congr ((h.prod_mk_nhds h).mono ?_) .rfl
rintro p âšhpâ, hpââ©
simp only [*]
theorem HasStrictFDerivAt.congr_fderiv (h : HasStrictFDerivAt f f' x) (h' : f' = g') :
HasStrictFDerivAt f g' x :=
h' âž h
theorem HasFDerivAt.congr_fderiv (h : HasFDerivAt f f' x) (h' : f' = g') : HasFDerivAt f g' x :=
h' âž h
theorem HasFDerivWithinAt.congr_fderiv (h : HasFDerivWithinAt f f' s x) (h' : f' = g') :
HasFDerivWithinAt f g' s x :=
h' âž h
theorem HasStrictFDerivAt.congr_of_eventuallyEq (h : HasStrictFDerivAt f f' x)
(hâ : f =á¶ [ð x] fâ) : HasStrictFDerivAt fâ f' x :=
(hâ.hasStrictFDerivAt_iff fun _ => rfl).1 h
theorem Filter.EventuallyEq.hasFDerivAtFilter_iff (hâ : fâ =á¶ [L] fâ) (hx : fâ x = fâ x)
(hâ : â x, fâ' x = fâ' x) : HasFDerivAtFilter fâ fâ' x L â HasFDerivAtFilter fâ fâ' x L := by
simp only [hasFDerivAtFilter_iff_isLittleO]
exact isLittleO_congr (hâ.mono fun y hy => by simp only [hy, hâ, hx]) .rfl
theorem HasFDerivAtFilter.congr_of_eventuallyEq (h : HasFDerivAtFilter f f' x L) (hL : fâ =á¶ [L] f)
(hx : fâ x = f x) : HasFDerivAtFilter fâ f' x L :=
(hL.hasFDerivAtFilter_iff hx fun _ => rfl).2 h
theorem Filter.EventuallyEq.hasFDerivAt_iff (h : fâ =á¶ [ð x] fâ) :
HasFDerivAt fâ f' x â HasFDerivAt fâ f' x :=
h.hasFDerivAtFilter_iff h.eq_of_nhds fun _ => _root_.rfl
theorem Filter.EventuallyEq.differentiableAt_iff (h : fâ =á¶ [ð x] fâ) :
DifferentiableAt ð fâ x â DifferentiableAt ð fâ x :=
exists_congr fun _ => h.hasFDerivAt_iff
theorem Filter.EventuallyEq.hasFDerivWithinAt_iff (h : fâ =á¶ [ð[s] x] fâ) (hx : fâ x = fâ x) :
HasFDerivWithinAt fâ f' s x â HasFDerivWithinAt fâ f' s x :=
h.hasFDerivAtFilter_iff hx fun _ => _root_.rfl
theorem Filter.EventuallyEq.hasFDerivWithinAt_iff_of_mem (h : fâ =á¶ [ð[s] x] fâ) (hx : x â s) :
HasFDerivWithinAt fâ f' s x â HasFDerivWithinAt fâ f' s x :=
h.hasFDerivWithinAt_iff (h.eq_of_nhdsWithin hx)
theorem Filter.EventuallyEq.differentiableWithinAt_iff (h : fâ =á¶ [ð[s] x] fâ) (hx : fâ x = fâ x) :
DifferentiableWithinAt ð fâ s x â DifferentiableWithinAt ð fâ s x :=
exists_congr fun _ => h.hasFDerivWithinAt_iff hx
theorem Filter.EventuallyEq.differentiableWithinAt_iff_of_mem (h : fâ =á¶ [ð[s] x] fâ) (hx : x â s) :
DifferentiableWithinAt ð fâ s x â DifferentiableWithinAt ð fâ s x :=
h.differentiableWithinAt_iff (h.eq_of_nhdsWithin hx)
theorem HasFDerivWithinAt.congr_mono (h : HasFDerivWithinAt f f' s x) (ht : EqOn fâ f t)
(hx : fâ x = f x) (hâ : t â s) : HasFDerivWithinAt fâ f' t x :=
HasFDerivAtFilter.congr_of_eventuallyEq (h.mono hâ) (Filter.mem_inf_of_right ht) hx
theorem HasFDerivWithinAt.congr (h : HasFDerivWithinAt f f' s x) (hs : EqOn fâ f s)
(hx : fâ x = f x) : HasFDerivWithinAt fâ f' s x :=
h.congr_mono hs hx (Subset.refl _)
theorem HasFDerivWithinAt.congr' (h : HasFDerivWithinAt f f' s x) (hs : EqOn fâ f s) (hx : x â s) :
HasFDerivWithinAt fâ f' s x :=
h.congr hs (hs hx)
theorem HasFDerivWithinAt.congr_of_eventuallyEq (h : HasFDerivWithinAt f f' s x)
(hâ : fâ =á¶ [ð[s] x] f) (hx : fâ x = f x) : HasFDerivWithinAt fâ f' s x :=
HasFDerivAtFilter.congr_of_eventuallyEq h hâ hx
theorem HasFDerivAt.congr_of_eventuallyEq (h : HasFDerivAt f f' x) (hâ : fâ =á¶ [ð x] f) :
HasFDerivAt fâ f' x :=
HasFDerivAtFilter.congr_of_eventuallyEq h hâ (mem_of_mem_nhds hâ : _)
theorem DifferentiableWithinAt.congr_mono (h : DifferentiableWithinAt ð f s x) (ht : EqOn fâ f t)
(hx : fâ x = f x) (hâ : t â s) : DifferentiableWithinAt ð fâ t x :=
(HasFDerivWithinAt.congr_mono h.hasFDerivWithinAt ht hx hâ).differentiableWithinAt
theorem DifferentiableWithinAt.congr (h : DifferentiableWithinAt ð f s x) (ht : â x â s, fâ x = f x)
(hx : fâ x = f x) : DifferentiableWithinAt ð fâ s x :=
DifferentiableWithinAt.congr_mono h ht hx (Subset.refl _)
theorem DifferentiableWithinAt.congr_of_eventuallyEq (h : DifferentiableWithinAt ð f s x)
(hâ : fâ =á¶ [ð[s] x] f) (hx : fâ x = f x) : DifferentiableWithinAt ð fâ s x :=
(h.hasFDerivWithinAt.congr_of_eventuallyEq hâ hx).differentiableWithinAt
theorem DifferentiableOn.congr_mono (h : DifferentiableOn ð f s) (h' : â x â t, fâ x = f x)
(hâ : t â s) : DifferentiableOn ð fâ t := fun x hx => (h x (hâ hx)).congr_mono h' (h' x hx) hâ
theorem DifferentiableOn.congr (h : DifferentiableOn ð f s) (h' : â x â s, fâ x = f x) :
DifferentiableOn ð fâ s := fun x hx => (h x hx).congr h' (h' x hx)
theorem differentiableOn_congr (h' : â x â s, fâ x = f x) :
DifferentiableOn ð fâ s â DifferentiableOn ð f s :=
âšfun h => DifferentiableOn.congr h fun y hy => (h' y hy).symm, fun h =>
DifferentiableOn.congr h h'â©
theorem DifferentiableAt.congr_of_eventuallyEq (h : DifferentiableAt ð f x) (hL : fâ =á¶ [ð x] f) :
DifferentiableAt ð fâ x :=
hL.differentiableAt_iff.2 h
theorem DifferentiableWithinAt.fderivWithin_congr_mono (h : DifferentiableWithinAt ð f s x)
(hs : EqOn fâ f t) (hx : fâ x = f x) (hxt : UniqueDiffWithinAt ð t x) (hâ : t â s) :
fderivWithin ð fâ t x = fderivWithin ð f s x :=
(HasFDerivWithinAt.congr_mono h.hasFDerivWithinAt hs hx hâ).fderivWithin hxt
theorem Filter.EventuallyEq.fderivWithin_eq (hs : fâ =á¶ [ð[s] x] f) (hx : fâ x = f x) :
fderivWithin ð fâ s x = fderivWithin ð f s x := by
simp only [fderivWithin, hs.hasFDerivWithinAt_iff hx]
theorem Filter.EventuallyEq.fderivWithin' (hs : fâ =á¶ [ð[s] x] f) (ht : t â s) :
fderivWithin ð fâ t =á¶ [ð[s] x] fderivWithin ð f t :=
(eventually_nhdsWithin_nhdsWithin.2 hs).mp <|
eventually_mem_nhdsWithin.mono fun _y hys hs =>
EventuallyEq.fderivWithin_eq (hs.filter_mono <| nhdsWithin_mono _ ht)
(hs.self_of_nhdsWithin hys)
protected theorem Filter.EventuallyEq.fderivWithin (hs : fâ =á¶ [ð[s] x] f) :
fderivWithin ð fâ s =á¶ [ð[s] x] fderivWithin ð f s :=
hs.fderivWithin' Subset.rfl
theorem Filter.EventuallyEq.fderivWithin_eq_nhds (h : fâ =á¶ [ð x] f) :
fderivWithin ð fâ s x = fderivWithin ð f s x :=
(h.filter_mono nhdsWithin_le_nhds).fderivWithin_eq h.self_of_nhds
theorem fderivWithin_congr (hs : EqOn fâ f s) (hx : fâ x = f x) :
fderivWithin ð fâ s x = fderivWithin ð f s x :=
(hs.eventuallyEq.filter_mono inf_le_right).fderivWithin_eq hx
theorem fderivWithin_congr' (hs : EqOn fâ f s) (hx : x â s) :
fderivWithin ð fâ s x = fderivWithin ð f s x :=
fderivWithin_congr hs (hs hx)
theorem Filter.EventuallyEq.fderiv_eq (h : fâ =á¶ [ð x] f) : fderiv ð fâ x = fderiv ð f x := by
rw [â fderivWithin_univ, â fderivWithin_univ, h.fderivWithin_eq_nhds]
protected theorem Filter.EventuallyEq.fderiv (h : fâ =á¶ [ð x] f) : fderiv ð fâ =á¶ [ð x] fderiv ð f :=
h.eventuallyEq_nhds.mono fun _ h => h.fderiv_eq
end congr
section id
/-! ### Derivative of the identity -/
@[fun_prop]
theorem hasStrictFDerivAt_id (x : E) : HasStrictFDerivAt id (id ð E) x :=
(isLittleO_zero _ _).congr_left <| by simp
theorem hasFDerivAtFilter_id (x : E) (L : Filter E) : HasFDerivAtFilter id (id ð E) x L :=
.of_isLittleO <| (isLittleO_zero _ _).congr_left <| by simp
@[fun_prop]
theorem hasFDerivWithinAt_id (x : E) (s : Set E) : HasFDerivWithinAt id (id ð E) s x :=
hasFDerivAtFilter_id _ _
@[fun_prop]
theorem hasFDerivAt_id (x : E) : HasFDerivAt id (id ð E) x :=
hasFDerivAtFilter_id _ _
@[simp, fun_prop]
theorem differentiableAt_id : DifferentiableAt ð id x :=
(hasFDerivAt_id x).differentiableAt
@[simp]
theorem differentiableAt_id' : DifferentiableAt ð (fun x => x) x :=
(hasFDerivAt_id x).differentiableAt
@[fun_prop]
theorem differentiableWithinAt_id : DifferentiableWithinAt ð id s x :=
differentiableAt_id.differentiableWithinAt
@[simp, fun_prop]
theorem differentiable_id : Differentiable ð (id : E â E) := fun _ => differentiableAt_id
@[simp]
theorem differentiable_id' : Differentiable ð fun x : E => x := fun _ => differentiableAt_id
@[fun_prop]
theorem differentiableOn_id : DifferentiableOn ð id s :=
differentiable_id.differentiableOn
@[simp]
theorem fderiv_id : fderiv ð id x = id ð E :=
HasFDerivAt.fderiv (hasFDerivAt_id x)
@[simp]
theorem fderiv_id' : fderiv ð (fun x : E => x) x = ContinuousLinearMap.id ð E :=
fderiv_id
theorem fderivWithin_id (hxs : UniqueDiffWithinAt ð s x) : fderivWithin ð id s x = id ð E := by
rw [DifferentiableAt.fderivWithin differentiableAt_id hxs]
exact fderiv_id
theorem fderivWithin_id' (hxs : UniqueDiffWithinAt ð s x) :
fderivWithin ð (fun x : E => x) s x = ContinuousLinearMap.id ð E :=
fderivWithin_id hxs
end id
section Const
/-! ### Derivative of a constant function -/
@[fun_prop]
theorem hasStrictFDerivAt_const (c : F) (x : E) :
HasStrictFDerivAt (fun _ => c) (0 : E âL[ð] F) x :=
(isLittleO_zero _ _).congr_left fun _ => by simp only [zero_apply, sub_self]
theorem hasFDerivAtFilter_const (c : F) (x : E) (L : Filter E) :
HasFDerivAtFilter (fun _ => c) (0 : E âL[ð] F) x L :=
.of_isLittleO <| (isLittleO_zero _ _).congr_left fun _ => by simp only [zero_apply, sub_self]
@[fun_prop]
theorem hasFDerivWithinAt_const (c : F) (x : E) (s : Set E) :
HasFDerivWithinAt (fun _ => c) (0 : E âL[ð] F) s x :=
hasFDerivAtFilter_const _ _ _
@[fun_prop]
theorem hasFDerivAt_const (c : F) (x : E) : HasFDerivAt (fun _ => c) (0 : E âL[ð] F) x :=
hasFDerivAtFilter_const _ _ _
@[simp, fun_prop]
theorem differentiableAt_const (c : F) : DifferentiableAt ð (fun _ => c) x :=
âš0, hasFDerivAt_const c xâ©
@[fun_prop]
theorem differentiableWithinAt_const (c : F) : DifferentiableWithinAt ð (fun _ => c) s x :=
DifferentiableAt.differentiableWithinAt (differentiableAt_const _)
theorem fderiv_const_apply (c : F) : fderiv ð (fun _ => c) x = 0 :=
HasFDerivAt.fderiv (hasFDerivAt_const c x)
@[simp]
theorem fderiv_const (c : F) : (fderiv ð fun _ : E => c) = 0 := by
ext m
rw [fderiv_const_apply]
rfl
theorem fderivWithin_const_apply (c : F) (hxs : UniqueDiffWithinAt ð s x) :
fderivWithin ð (fun _ => c) s x = 0 := by
rw [DifferentiableAt.fderivWithin (differentiableAt_const _) hxs]
exact fderiv_const_apply _
@[simp, fun_prop]
theorem differentiable_const (c : F) : Differentiable ð fun _ : E => c := fun _ =>
differentiableAt_const _
@[simp, fun_prop]
theorem differentiableOn_const (c : F) : DifferentiableOn ð (fun _ => c) s :=
(differentiable_const _).differentiableOn
@[fun_prop]
theorem hasFDerivWithinAt_singleton (f : E â F) (x : E) :
HasFDerivWithinAt f (0 : E âL[ð] F) {x} x := by
simp only [HasFDerivWithinAt, nhdsWithin_singleton, hasFDerivAtFilter_iff_isLittleO,
isLittleO_pure, ContinuousLinearMap.zero_apply, sub_self]
@[fun_prop]
theorem hasFDerivAt_of_subsingleton [h : Subsingleton E] (f : E â F) (x : E) :
HasFDerivAt f (0 : E âL[ð] F) x := by
rw [â hasFDerivWithinAt_univ, subsingleton_univ.eq_singleton_of_mem (mem_univ x)]
exact hasFDerivWithinAt_singleton f x
@[fun_prop]
theorem differentiableOn_empty : DifferentiableOn ð f â
:= fun _ => False.elim
@[fun_prop]
theorem differentiableOn_singleton : DifferentiableOn ð f {x} :=
forall_eq.2 (hasFDerivWithinAt_singleton f x).differentiableWithinAt
@[fun_prop]
theorem Set.Subsingleton.differentiableOn (hs : s.Subsingleton) : DifferentiableOn ð f s :=
hs.induction_on differentiableOn_empty fun _ => differentiableOn_singleton
theorem hasFDerivAt_zero_of_eventually_const (c : F) (hf : f =á¶ [ð x] fun _ => c) :
HasFDerivAt f (0 : E âL[ð] F) x :=
(hasFDerivAt_const _ _).congr_of_eventuallyEq hf
end Const
end
/-! ### Support of derivatives -/
section Support
open Function
variable (ð : Type*) {E F : Type*} [NontriviallyNormedField ð] [NormedAddCommGroup E]
[NormedSpace ð E] [NormedAddCommGroup F] [NormedSpace ð F] {f : E â F} {x : E}
theorem HasStrictFDerivAt.of_nmem_tsupport (h : x â tsupport f) :
HasStrictFDerivAt f (0 : E âL[ð] F) x := by
rw [not_mem_tsupport_iff_eventuallyEq] at h
exact (hasStrictFDerivAt_const (0 : F) x).congr_of_eventuallyEq h.symm
theorem HasFDerivAt.of_nmem_tsupport (h : x â tsupport f) :
HasFDerivAt f (0 : E âL[ð] F) x :=
(HasStrictFDerivAt.of_nmem_tsupport ð h).hasFDerivAt
theorem HasFDerivWithinAt.of_not_mem_tsupport {s : Set E} {x : E} (h : x â tsupport f) :
HasFDerivWithinAt f (0 : E âL[ð] F) s x :=
(HasFDerivAt.of_nmem_tsupport ð h).hasFDerivWithinAt
theorem fderiv_of_not_mem_tsupport (h : x â tsupport f) : fderiv ð f x = 0 :=
(HasFDerivAt.of_nmem_tsupport ð h).fderiv
theorem support_fderiv_subset : support (fderiv ð f) â tsupport f := fun x ⊠by
rw [â not_imp_not, nmem_support]
exact fderiv_of_not_mem_tsupport _
theorem tsupport_fderiv_subset : tsupport (fderiv ð f) â tsupport f :=
closure_minimal (support_fderiv_subset ð) isClosed_closure
protected theorem HasCompactSupport.fderiv (hf : HasCompactSupport f) :
HasCompactSupport (fderiv ð f) :=
hf.mono' <| support_fderiv_subset ð
protected theorem HasCompactSupport.fderiv_apply (hf : HasCompactSupport f) (v : E) :
HasCompactSupport (fderiv ð f · v) :=
hf.fderiv ð |>.comp_left (g := fun L : E âL[ð] F ⊠L v) rfl
end Support
|
Analysis\Calculus\FDeriv\Bilinear.lean | /-
Copyright (c) 2019 Jeremy Avigad. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Jeremy Avigad, Sébastien Gouëzel, Yury Kudryashov
-/
import Mathlib.Analysis.Calculus.FDeriv.Prod
/-!
# The derivative of bounded bilinear maps
For detailed documentation of the Fréchet derivative,
see the module docstring of `Analysis/Calculus/Fderiv/Basic.lean`.
This file contains the usual formulas (and existence assertions) for the derivative of
bounded bilinear maps.
-/
open Filter Asymptotics ContinuousLinearMap Set Metric
open scoped Classical
open Topology NNReal Asymptotics ENNReal
noncomputable section
section
variable {ð : Type*} [NontriviallyNormedField ð]
variable {E : Type*} [NormedAddCommGroup E] [NormedSpace ð E]
variable {F : Type*} [NormedAddCommGroup F] [NormedSpace ð F]
variable {G : Type*} [NormedAddCommGroup G] [NormedSpace ð G]
variable {G' : Type*} [NormedAddCommGroup G'] [NormedSpace ð G']
variable {f fâ fâ g : E â F}
variable {f' fâ' fâ' g' : E âL[ð] F}
variable (e : E âL[ð] F)
variable {x : E}
variable {s t : Set E}
variable {L Lâ Lâ : Filter E}
section BilinearMap
/-! ### Derivative of a bounded bilinear map -/
variable {b : E Ã F â G} {u : Set (E Ã F)}
open NormedField
-- Porting note (#11215): TODO: rewrite/golf using analytic functions?
@[fun_prop]
theorem IsBoundedBilinearMap.hasStrictFDerivAt (h : IsBoundedBilinearMap ð b) (p : E Ã F) :
HasStrictFDerivAt b (h.deriv p) p := by
simp only [HasStrictFDerivAt]
simp only [â map_add_left_nhds_zero (p, p), isLittleO_map]
set T := (E Ã F) Ã E Ã F
calc
_ = fun x ⊠h.deriv (x.1 - x.2) (x.2.1, x.1.2) := by
ext âšâšxâ, yââ©, âšxâ, yââ©â©
rcases p with âšx, yâ©
simp only [map_sub, deriv_apply, Function.comp_apply, Prod.mk_add_mk, h.add_right, h.add_left,
Prod.mk_sub_mk, h.map_sub_left, h.map_sub_right, sub_add_sub_cancel]
abel
-- _ =O[ð (0 : T)] fun x ⊠âx.1 - x.2â * â(x.2.1, x.1.2)â :=
-- h.toContinuousLinearMap.derivâ.isBoundedBilinearMap.isBigO_comp
-- _ = o[ð 0] fun x ⊠âx.1 - x.2â * 1 := _
_ =o[ð (0 : T)] fun x ⊠x.1 - x.2 := by
-- TODO : add 2 `calc` steps instead of the next 3 lines
refine h.toContinuousLinearMap.derivâ.isBoundedBilinearMap.isBigO_comp.trans_isLittleO ?_
suffices (fun x : T ⊠âx.1 - x.2â * â(x.2.1, x.1.2)â) =o[ð 0] fun x ⊠âx.1 - x.2â * 1 by
simpa only [mul_one, isLittleO_norm_right] using this
refine (isBigO_refl _ _).mul_isLittleO ((isLittleO_one_iff _).2 ?_)
-- TODO: `continuity` fails
exact (continuous_snd.fst.prod_mk continuous_fst.snd).norm.tendsto' _ _ (by simp)
_ = _ := by simp [(· â ·)]
@[fun_prop]
theorem IsBoundedBilinearMap.hasFDerivAt (h : IsBoundedBilinearMap ð b) (p : E Ã F) :
HasFDerivAt b (h.deriv p) p :=
(h.hasStrictFDerivAt p).hasFDerivAt
@[fun_prop]
theorem IsBoundedBilinearMap.hasFDerivWithinAt (h : IsBoundedBilinearMap ð b) (p : E Ã F) :
HasFDerivWithinAt b (h.deriv p) u p :=
(h.hasFDerivAt p).hasFDerivWithinAt
@[fun_prop]
theorem IsBoundedBilinearMap.differentiableAt (h : IsBoundedBilinearMap ð b) (p : E Ã F) :
DifferentiableAt ð b p :=
(h.hasFDerivAt p).differentiableAt
@[fun_prop]
theorem IsBoundedBilinearMap.differentiableWithinAt (h : IsBoundedBilinearMap ð b) (p : E Ã F) :
DifferentiableWithinAt ð b u p :=
(h.differentiableAt p).differentiableWithinAt
protected theorem IsBoundedBilinearMap.fderiv (h : IsBoundedBilinearMap ð b) (p : E Ã F) :
fderiv ð b p = h.deriv p :=
HasFDerivAt.fderiv (h.hasFDerivAt p)
protected theorem IsBoundedBilinearMap.fderivWithin (h : IsBoundedBilinearMap ð b) (p : E Ã F)
(hxs : UniqueDiffWithinAt ð u p) : fderivWithin ð b u p = h.deriv p := by
rw [DifferentiableAt.fderivWithin (h.differentiableAt p) hxs]
exact h.fderiv p
@[fun_prop]
theorem IsBoundedBilinearMap.differentiable (h : IsBoundedBilinearMap ð b) : Differentiable ð b :=
fun x => h.differentiableAt x
@[fun_prop]
theorem IsBoundedBilinearMap.differentiableOn (h : IsBoundedBilinearMap ð b) :
DifferentiableOn ð b u :=
h.differentiable.differentiableOn
variable (B : E âL[ð] F âL[ð] G)
@[fun_prop]
theorem ContinuousLinearMap.hasFDerivWithinAt_of_bilinear {f : G' â E} {g : G' â F}
{f' : G' âL[ð] E} {g' : G' âL[ð] F} {x : G'} {s : Set G'} (hf : HasFDerivWithinAt f f' s x)
(hg : HasFDerivWithinAt g g' s x) :
HasFDerivWithinAt (fun y => B (f y) (g y))
(B.precompR G' (f x) g' + B.precompL G' f' (g x)) s x :=
(B.isBoundedBilinearMap.hasFDerivAt (f x, g x)).comp_hasFDerivWithinAt x (hf.prod hg)
@[fun_prop]
theorem ContinuousLinearMap.hasFDerivAt_of_bilinear {f : G' â E} {g : G' â F} {f' : G' âL[ð] E}
{g' : G' âL[ð] F} {x : G'} (hf : HasFDerivAt f f' x) (hg : HasFDerivAt g g' x) :
HasFDerivAt (fun y => B (f y) (g y)) (B.precompR G' (f x) g' + B.precompL G' f' (g x)) x :=
(B.isBoundedBilinearMap.hasFDerivAt (f x, g x)).comp x (hf.prod hg)
@[fun_prop]
theorem ContinuousLinearMap.hasStrictFDerivAt_of_bilinear
{f : G' â E} {g : G' â F} {f' : G' âL[ð] E}
{g' : G' âL[ð] F} {x : G'} (hf : HasStrictFDerivAt f f' x) (hg : HasStrictFDerivAt g g' x) :
HasStrictFDerivAt (fun y => B (f y) (g y))
(B.precompR G' (f x) g' + B.precompL G' f' (g x)) x :=
(B.isBoundedBilinearMap.hasStrictFDerivAt (f x, g x)).comp x (hf.prod hg)
theorem ContinuousLinearMap.fderivWithin_of_bilinear {f : G' â E} {g : G' â F} {x : G'} {s : Set G'}
(hf : DifferentiableWithinAt ð f s x) (hg : DifferentiableWithinAt ð g s x)
(hs : UniqueDiffWithinAt ð s x) :
fderivWithin ð (fun y => B (f y) (g y)) s x =
B.precompR G' (f x) (fderivWithin ð g s x) + B.precompL G' (fderivWithin ð f s x) (g x) :=
(B.hasFDerivWithinAt_of_bilinear hf.hasFDerivWithinAt hg.hasFDerivWithinAt).fderivWithin hs
theorem ContinuousLinearMap.fderiv_of_bilinear {f : G' â E} {g : G' â F} {x : G'}
(hf : DifferentiableAt ð f x) (hg : DifferentiableAt ð g x) :
fderiv ð (fun y => B (f y) (g y)) x =
B.precompR G' (f x) (fderiv ð g x) + B.precompL G' (fderiv ð f x) (g x) :=
(B.hasFDerivAt_of_bilinear hf.hasFDerivAt hg.hasFDerivAt).fderiv
end BilinearMap
end
|
Analysis\Calculus\FDeriv\Comp.lean | /-
Copyright (c) 2019 Jeremy Avigad. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Jeremy Avigad, Sébastien Gouëzel, Yury Kudryashov
-/
import Mathlib.Analysis.Calculus.FDeriv.Basic
/-!
# The derivative of a composition (chain rule)
For detailed documentation of the Fréchet derivative,
see the module docstring of `Analysis/Calculus/FDeriv/Basic.lean`.
This file contains the usual formulas (and existence assertions) for the derivative of
composition of functions (the chain rule).
-/
open Filter Asymptotics ContinuousLinearMap Set Metric
open scoped Classical
open Topology NNReal Filter Asymptotics ENNReal
noncomputable section
section
variable {ð : Type*} [NontriviallyNormedField ð]
variable {E : Type*} [NormedAddCommGroup E] [NormedSpace ð E]
variable {F : Type*} [NormedAddCommGroup F] [NormedSpace ð F]
variable {G : Type*} [NormedAddCommGroup G] [NormedSpace ð G]
variable {G' : Type*} [NormedAddCommGroup G'] [NormedSpace ð G']
variable {f fâ fâ g : E â F}
variable {f' fâ' fâ' g' : E âL[ð] F}
variable (e : E âL[ð] F)
variable {x : E}
variable {s t : Set E}
variable {L Lâ Lâ : Filter E}
section Composition
/-!
### Derivative of the composition of two functions
For composition lemmas, we put `x` explicit to help the elaborator, as otherwise Lean tends to
get confused since there are too many possibilities for composition. -/
variable (x)
theorem HasFDerivAtFilter.comp {g : F â G} {g' : F âL[ð] G} {L' : Filter F}
(hg : HasFDerivAtFilter g g' (f x) L') (hf : HasFDerivAtFilter f f' x L) (hL : Tendsto f L L') :
HasFDerivAtFilter (g â f) (g'.comp f') x L := by
let eqâ := (g'.isBigO_comp _ _).trans_isLittleO hf.isLittleO
let eqâ := (hg.isLittleO.comp_tendsto hL).trans_isBigO hf.isBigO_sub
refine .of_isLittleO <| eqâ.triangle <| eqâ.congr_left fun x' => ?_
simp
/- A readable version of the previous theorem, a general form of the chain rule. -/
example {g : F â G} {g' : F âL[ð] G} (hg : HasFDerivAtFilter g g' (f x) (L.map f))
(hf : HasFDerivAtFilter f f' x L) : HasFDerivAtFilter (g â f) (g'.comp f') x L := by
have :=
calc
(fun x' => g (f x') - g (f x) - g' (f x' - f x)) =o[L] fun x' => f x' - f x :=
hg.isLittleO.comp_tendsto le_rfl
_ =O[L] fun x' => x' - x := hf.isBigO_sub
refine .of_isLittleO <| this.triangle ?_
calc
(fun x' : E => g' (f x' - f x) - g'.comp f' (x' - x))
_ =á¶ [L] fun x' => g' (f x' - f x - f' (x' - x)) := eventually_of_forall fun x' => by simp
_ =O[L] fun x' => f x' - f x - f' (x' - x) := g'.isBigO_comp _ _
_ =o[L] fun x' => x' - x := hf.isLittleO
@[fun_prop]
theorem HasFDerivWithinAt.comp {g : F â G} {g' : F âL[ð] G} {t : Set F}
(hg : HasFDerivWithinAt g g' t (f x)) (hf : HasFDerivWithinAt f f' s x) (hst : MapsTo f s t) :
HasFDerivWithinAt (g â f) (g'.comp f') s x :=
HasFDerivAtFilter.comp x hg hf <| hf.continuousWithinAt.tendsto_nhdsWithin hst
@[fun_prop]
theorem HasFDerivAt.comp_hasFDerivWithinAt {g : F â G} {g' : F âL[ð] G}
(hg : HasFDerivAt g g' (f x)) (hf : HasFDerivWithinAt f f' s x) :
HasFDerivWithinAt (g â f) (g'.comp f') s x :=
hg.comp x hf hf.continuousWithinAt
@[fun_prop]
theorem HasFDerivWithinAt.comp_of_mem {g : F â G} {g' : F âL[ð] G} {t : Set F}
(hg : HasFDerivWithinAt g g' t (f x)) (hf : HasFDerivWithinAt f f' s x)
(hst : Tendsto f (ð[s] x) (ð[t] f x)) : HasFDerivWithinAt (g â f) (g'.comp f') s x :=
HasFDerivAtFilter.comp x hg hf hst
/-- The chain rule. -/
@[fun_prop]
theorem HasFDerivAt.comp {g : F â G} {g' : F âL[ð] G} (hg : HasFDerivAt g g' (f x))
(hf : HasFDerivAt f f' x) : HasFDerivAt (g â f) (g'.comp f') x :=
HasFDerivAtFilter.comp x hg hf hf.continuousAt
@[fun_prop]
theorem DifferentiableWithinAt.comp {g : F â G} {t : Set F}
(hg : DifferentiableWithinAt ð g t (f x)) (hf : DifferentiableWithinAt ð f s x)
(h : MapsTo f s t) : DifferentiableWithinAt ð (g â f) s x :=
(hg.hasFDerivWithinAt.comp x hf.hasFDerivWithinAt h).differentiableWithinAt
@[fun_prop]
theorem DifferentiableWithinAt.comp' {g : F â G} {t : Set F}
(hg : DifferentiableWithinAt ð g t (f x)) (hf : DifferentiableWithinAt ð f s x) :
DifferentiableWithinAt ð (g â f) (s â© f â»Â¹' t) x :=
hg.comp x (hf.mono inter_subset_left) inter_subset_right
@[fun_prop]
theorem DifferentiableAt.comp {g : F â G} (hg : DifferentiableAt ð g (f x))
(hf : DifferentiableAt ð f x) : DifferentiableAt ð (g â f) x :=
(hg.hasFDerivAt.comp x hf.hasFDerivAt).differentiableAt
@[fun_prop]
theorem DifferentiableAt.comp_differentiableWithinAt {g : F â G} (hg : DifferentiableAt ð g (f x))
(hf : DifferentiableWithinAt ð f s x) : DifferentiableWithinAt ð (g â f) s x :=
hg.differentiableWithinAt.comp x hf (mapsTo_univ _ _)
theorem fderivWithin.comp {g : F â G} {t : Set F} (hg : DifferentiableWithinAt ð g t (f x))
(hf : DifferentiableWithinAt ð f s x) (h : MapsTo f s t) (hxs : UniqueDiffWithinAt ð s x) :
fderivWithin ð (g â f) s x = (fderivWithin ð g t (f x)).comp (fderivWithin ð f s x) :=
(hg.hasFDerivWithinAt.comp x hf.hasFDerivWithinAt h).fderivWithin hxs
/-- A version of `fderivWithin.comp` that is useful to rewrite the composition of two derivatives
into a single derivative. This version always applies, but creates a new side-goal `f x = y`. -/
theorem fderivWithin_fderivWithin {g : F â G} {f : E â F} {x : E} {y : F} {s : Set E} {t : Set F}
(hg : DifferentiableWithinAt ð g t y) (hf : DifferentiableWithinAt ð f s x) (h : MapsTo f s t)
(hxs : UniqueDiffWithinAt ð s x) (hy : f x = y) (v : E) :
fderivWithin ð g t y (fderivWithin ð f s x v) = fderivWithin ð (g â f) s x v := by
subst y
rw [fderivWithin.comp x hg hf h hxs, coe_comp', Function.comp_apply]
/-- Ternary version of `fderivWithin.comp`, with equality assumptions of basepoints added, in
order to apply more easily as a rewrite from right-to-left. -/
theorem fderivWithin.compâ {g' : G â G'} {g : F â G} {t : Set F} {u : Set G} {y : F} {y' : G}
(hg' : DifferentiableWithinAt ð g' u y') (hg : DifferentiableWithinAt ð g t y)
(hf : DifferentiableWithinAt ð f s x) (h2g : MapsTo g t u) (h2f : MapsTo f s t) (h3g : g y = y')
(h3f : f x = y) (hxs : UniqueDiffWithinAt ð s x) :
fderivWithin ð (g' â g â f) s x =
(fderivWithin ð g' u y').comp ((fderivWithin ð g t y).comp (fderivWithin ð f s x)) := by
substs h3g h3f
exact (hg'.hasFDerivWithinAt.comp x (hg.hasFDerivWithinAt.comp x hf.hasFDerivWithinAt h2f) <|
h2g.comp h2f).fderivWithin hxs
theorem fderiv.comp {g : F â G} (hg : DifferentiableAt ð g (f x)) (hf : DifferentiableAt ð f x) :
fderiv ð (g â f) x = (fderiv ð g (f x)).comp (fderiv ð f x) :=
(hg.hasFDerivAt.comp x hf.hasFDerivAt).fderiv
theorem fderiv.comp_fderivWithin {g : F â G} (hg : DifferentiableAt ð g (f x))
(hf : DifferentiableWithinAt ð f s x) (hxs : UniqueDiffWithinAt ð s x) :
fderivWithin ð (g â f) s x = (fderiv ð g (f x)).comp (fderivWithin ð f s x) :=
(hg.hasFDerivAt.comp_hasFDerivWithinAt x hf.hasFDerivWithinAt).fderivWithin hxs
@[fun_prop]
theorem DifferentiableOn.comp {g : F â G} {t : Set F} (hg : DifferentiableOn ð g t)
(hf : DifferentiableOn ð f s) (st : MapsTo f s t) : DifferentiableOn ð (g â f) s :=
fun x hx => DifferentiableWithinAt.comp x (hg (f x) (st hx)) (hf x hx) st
@[fun_prop]
theorem Differentiable.comp {g : F â G} (hg : Differentiable ð g) (hf : Differentiable ð f) :
Differentiable ð (g â f) :=
fun x => DifferentiableAt.comp x (hg (f x)) (hf x)
@[fun_prop]
theorem Differentiable.comp_differentiableOn {g : F â G} (hg : Differentiable ð g)
(hf : DifferentiableOn ð f s) : DifferentiableOn ð (g â f) s :=
hg.differentiableOn.comp hf (mapsTo_univ _ _)
/-- The chain rule for derivatives in the sense of strict differentiability. -/
@[fun_prop]
protected theorem HasStrictFDerivAt.comp {g : F â G} {g' : F âL[ð] G}
(hg : HasStrictFDerivAt g g' (f x)) (hf : HasStrictFDerivAt f f' x) :
HasStrictFDerivAt (fun x => g (f x)) (g'.comp f') x :=
((hg.comp_tendsto (hf.continuousAt.prod_map' hf.continuousAt)).trans_isBigO
hf.isBigO_sub).triangle <| by
simpa only [g'.map_sub, f'.coe_comp'] using (g'.isBigO_comp _ _).trans_isLittleO hf
@[fun_prop]
protected theorem Differentiable.iterate {f : E â E} (hf : Differentiable ð f) (n : â) :
Differentiable ð f^[n] :=
Nat.recOn n differentiable_id fun _ ihn => ihn.comp hf
@[fun_prop]
protected theorem DifferentiableOn.iterate {f : E â E} (hf : DifferentiableOn ð f s)
(hs : MapsTo f s s) (n : â) : DifferentiableOn ð f^[n] s :=
Nat.recOn n differentiableOn_id fun _ ihn => ihn.comp hf hs
variable {x}
protected theorem HasFDerivAtFilter.iterate {f : E â E} {f' : E âL[ð] E}
(hf : HasFDerivAtFilter f f' x L) (hL : Tendsto f L L) (hx : f x = x) (n : â) :
HasFDerivAtFilter f^[n] (f' ^ n) x L := by
induction' n with n ihn
· exact hasFDerivAtFilter_id x L
· rw [Function.iterate_succ, pow_succ]
rw [â hx] at ihn
exact ihn.comp x hf hL
@[fun_prop]
protected theorem HasFDerivAt.iterate {f : E â E} {f' : E âL[ð] E} (hf : HasFDerivAt f f' x)
(hx : f x = x) (n : â) : HasFDerivAt f^[n] (f' ^ n) x := by
refine HasFDerivAtFilter.iterate hf ?_ hx n
-- Porting note: was `convert hf.continuousAt`
convert hf.continuousAt.tendsto
exact hx.symm
@[fun_prop]
protected theorem HasFDerivWithinAt.iterate {f : E â E} {f' : E âL[ð] E}
(hf : HasFDerivWithinAt f f' s x) (hx : f x = x) (hs : MapsTo f s s) (n : â) :
HasFDerivWithinAt f^[n] (f' ^ n) s x := by
refine HasFDerivAtFilter.iterate hf ?_ hx n
rw [_root_.nhdsWithin] -- Porting note: Added `rw` to get rid of an error
convert tendsto_inf.2 âšhf.continuousWithinAt, _â©
exacts [hx.symm, (tendsto_principal_principal.2 hs).mono_left inf_le_right]
@[fun_prop]
protected theorem HasStrictFDerivAt.iterate {f : E â E} {f' : E âL[ð] E}
(hf : HasStrictFDerivAt f f' x) (hx : f x = x) (n : â) :
HasStrictFDerivAt f^[n] (f' ^ n) x := by
induction' n with n ihn
· exact hasStrictFDerivAt_id x
· rw [Function.iterate_succ, pow_succ]
rw [â hx] at ihn
exact ihn.comp x hf
@[fun_prop]
protected theorem DifferentiableAt.iterate {f : E â E} (hf : DifferentiableAt ð f x) (hx : f x = x)
(n : â) : DifferentiableAt ð f^[n] x :=
(hf.hasFDerivAt.iterate hx n).differentiableAt
@[fun_prop]
protected theorem DifferentiableWithinAt.iterate {f : E â E} (hf : DifferentiableWithinAt ð f s x)
(hx : f x = x) (hs : MapsTo f s s) (n : â) : DifferentiableWithinAt ð f^[n] s x :=
(hf.hasFDerivWithinAt.iterate hx hs n).differentiableWithinAt
end Composition
end
|
Analysis\Calculus\FDeriv\Equiv.lean | /-
Copyright (c) 2019 Jeremy Avigad. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Jeremy Avigad, Sébastien Gouëzel, Yury Kudryashov
-/
import Mathlib.Analysis.Asymptotics.AsymptoticEquivalent
import Mathlib.Analysis.Calculus.FDeriv.Linear
import Mathlib.Analysis.Calculus.FDeriv.Comp
/-!
# The derivative of a linear equivalence
For detailed documentation of the Fréchet derivative,
see the module docstring of `Analysis/Calculus/FDeriv/Basic.lean`.
This file contains the usual formulas (and existence assertions) for the derivative of
continuous linear equivalences.
We also prove the usual formula for the derivative of the inverse function, assuming it exists.
The inverse function theorem is in `Mathlib/Analysis/Calculus/InverseFunctionTheorem/FDeriv.lean`.
-/
open Filter Asymptotics ContinuousLinearMap Set Metric
open scoped Classical
open Topology NNReal Filter Asymptotics ENNReal
noncomputable section
section
variable {ð : Type*} [NontriviallyNormedField ð]
variable {E : Type*} [NormedAddCommGroup E] [NormedSpace ð E]
variable {F : Type*} [NormedAddCommGroup F] [NormedSpace ð F]
variable {G : Type*} [NormedAddCommGroup G] [NormedSpace ð G]
variable {G' : Type*} [NormedAddCommGroup G'] [NormedSpace ð G']
variable {f fâ fâ g : E â F}
variable {f' fâ' fâ' g' : E âL[ð] F}
variable (e : E âL[ð] F)
variable {x : E}
variable {s t : Set E}
variable {L Lâ Lâ : Filter E}
namespace ContinuousLinearEquiv
/-! ### Differentiability of linear equivs, and invariance of differentiability -/
variable (iso : E âL[ð] F)
@[fun_prop]
protected theorem hasStrictFDerivAt : HasStrictFDerivAt iso (iso : E âL[ð] F) x :=
iso.toContinuousLinearMap.hasStrictFDerivAt
@[fun_prop]
protected theorem hasFDerivWithinAt : HasFDerivWithinAt iso (iso : E âL[ð] F) s x :=
iso.toContinuousLinearMap.hasFDerivWithinAt
@[fun_prop]
protected theorem hasFDerivAt : HasFDerivAt iso (iso : E âL[ð] F) x :=
iso.toContinuousLinearMap.hasFDerivAtFilter
@[fun_prop]
protected theorem differentiableAt : DifferentiableAt ð iso x :=
iso.hasFDerivAt.differentiableAt
@[fun_prop]
protected theorem differentiableWithinAt : DifferentiableWithinAt ð iso s x :=
iso.differentiableAt.differentiableWithinAt
protected theorem fderiv : fderiv ð iso x = iso :=
iso.hasFDerivAt.fderiv
protected theorem fderivWithin (hxs : UniqueDiffWithinAt ð s x) : fderivWithin ð iso s x = iso :=
iso.toContinuousLinearMap.fderivWithin hxs
@[fun_prop]
protected theorem differentiable : Differentiable ð iso := fun _ => iso.differentiableAt
@[fun_prop]
protected theorem differentiableOn : DifferentiableOn ð iso s :=
iso.differentiable.differentiableOn
theorem comp_differentiableWithinAt_iff {f : G â E} {s : Set G} {x : G} :
DifferentiableWithinAt ð (iso â f) s x â DifferentiableWithinAt ð f s x := by
refine
âšfun H => ?_, fun H => iso.differentiable.differentiableAt.comp_differentiableWithinAt x Hâ©
have : DifferentiableWithinAt ð (iso.symm â iso â f) s x :=
iso.symm.differentiable.differentiableAt.comp_differentiableWithinAt x H
rwa [â Function.comp.assoc iso.symm iso f, iso.symm_comp_self] at this
theorem comp_differentiableAt_iff {f : G â E} {x : G} :
DifferentiableAt ð (iso â f) x â DifferentiableAt ð f x := by
rw [â differentiableWithinAt_univ, â differentiableWithinAt_univ,
iso.comp_differentiableWithinAt_iff]
theorem comp_differentiableOn_iff {f : G â E} {s : Set G} :
DifferentiableOn ð (iso â f) s â DifferentiableOn ð f s := by
rw [DifferentiableOn, DifferentiableOn]
simp only [iso.comp_differentiableWithinAt_iff]
theorem comp_differentiable_iff {f : G â E} : Differentiable ð (iso â f) â Differentiable ð f := by
rw [â differentiableOn_univ, â differentiableOn_univ]
exact iso.comp_differentiableOn_iff
theorem comp_hasFDerivWithinAt_iff {f : G â E} {s : Set G} {x : G} {f' : G âL[ð] E} :
HasFDerivWithinAt (iso â f) ((iso : E âL[ð] F).comp f') s x â HasFDerivWithinAt f f' s x := by
refine âšfun H => ?_, fun H => iso.hasFDerivAt.comp_hasFDerivWithinAt x Hâ©
have A : f = iso.symm â iso â f := by
rw [â Function.comp.assoc, iso.symm_comp_self]
rfl
have B : f' = (iso.symm : F âL[ð] E).comp ((iso : E âL[ð] F).comp f') := by
rw [â ContinuousLinearMap.comp_assoc, iso.coe_symm_comp_coe, ContinuousLinearMap.id_comp]
rw [A, B]
exact iso.symm.hasFDerivAt.comp_hasFDerivWithinAt x H
theorem comp_hasStrictFDerivAt_iff {f : G â E} {x : G} {f' : G âL[ð] E} :
HasStrictFDerivAt (iso â f) ((iso : E âL[ð] F).comp f') x â HasStrictFDerivAt f f' x := by
refine âšfun H => ?_, fun H => iso.hasStrictFDerivAt.comp x Hâ©
convert iso.symm.hasStrictFDerivAt.comp x H using 1 <;>
ext z <;> apply (iso.symm_apply_apply _).symm
theorem comp_hasFDerivAt_iff {f : G â E} {x : G} {f' : G âL[ð] E} :
HasFDerivAt (iso â f) ((iso : E âL[ð] F).comp f') x â HasFDerivAt f f' x := by
simp_rw [â hasFDerivWithinAt_univ, iso.comp_hasFDerivWithinAt_iff]
theorem comp_hasFDerivWithinAt_iff' {f : G â E} {s : Set G} {x : G} {f' : G âL[ð] F} :
HasFDerivWithinAt (iso â f) f' s x â
HasFDerivWithinAt f ((iso.symm : F âL[ð] E).comp f') s x := by
rw [â iso.comp_hasFDerivWithinAt_iff, â ContinuousLinearMap.comp_assoc, iso.coe_comp_coe_symm,
ContinuousLinearMap.id_comp]
theorem comp_hasFDerivAt_iff' {f : G â E} {x : G} {f' : G âL[ð] F} :
HasFDerivAt (iso â f) f' x â HasFDerivAt f ((iso.symm : F âL[ð] E).comp f') x := by
simp_rw [â hasFDerivWithinAt_univ, iso.comp_hasFDerivWithinAt_iff']
theorem comp_fderivWithin {f : G â E} {s : Set G} {x : G} (hxs : UniqueDiffWithinAt ð s x) :
fderivWithin ð (iso â f) s x = (iso : E âL[ð] F).comp (fderivWithin ð f s x) := by
by_cases h : DifferentiableWithinAt ð f s x
· rw [fderiv.comp_fderivWithin x iso.differentiableAt h hxs, iso.fderiv]
· have : ¬DifferentiableWithinAt ð (iso â f) s x := mt iso.comp_differentiableWithinAt_iff.1 h
rw [fderivWithin_zero_of_not_differentiableWithinAt h,
fderivWithin_zero_of_not_differentiableWithinAt this, ContinuousLinearMap.comp_zero]
theorem comp_fderiv {f : G â E} {x : G} :
fderiv ð (iso â f) x = (iso : E âL[ð] F).comp (fderiv ð f x) := by
rw [â fderivWithin_univ, â fderivWithin_univ]
exact iso.comp_fderivWithin uniqueDiffWithinAt_univ
lemma _root_.fderivWithin_continuousLinearEquiv_comp (L : G âL[ð] G') (f : E â (F âL[ð] G))
(hs : UniqueDiffWithinAt ð s x) :
fderivWithin ð (fun x ⊠(L : G âL[ð] G').comp (f x)) s x =
(((ContinuousLinearEquiv.refl ð F).arrowCongr L)) âL (fderivWithin ð f s x) := by
change fderivWithin ð (((ContinuousLinearEquiv.refl ð F).arrowCongr L) â f) s x = _
rw [ContinuousLinearEquiv.comp_fderivWithin _ hs]
lemma _root_.fderiv_continuousLinearEquiv_comp (L : G âL[ð] G') (f : E â (F âL[ð] G)) (x : E) :
fderiv ð (fun x ⊠(L : G âL[ð] G').comp (f x)) x =
(((ContinuousLinearEquiv.refl ð F).arrowCongr L)) âL (fderiv ð f x) := by
change fderiv ð (((ContinuousLinearEquiv.refl ð F).arrowCongr L) â f) x = _
rw [ContinuousLinearEquiv.comp_fderiv]
lemma _root_.fderiv_continuousLinearEquiv_comp' (L : G âL[ð] G') (f : E â (F âL[ð] G)) :
fderiv ð (fun x ⊠(L : G âL[ð] G').comp (f x)) =
fun x ⊠(((ContinuousLinearEquiv.refl ð F).arrowCongr L)) âL (fderiv ð f x) := by
ext x : 1
exact fderiv_continuousLinearEquiv_comp L f x
theorem comp_right_differentiableWithinAt_iff {f : F â G} {s : Set F} {x : E} :
DifferentiableWithinAt ð (f â iso) (iso â»Â¹' s) x â DifferentiableWithinAt ð f s (iso x) := by
refine âšfun H => ?_, fun H => H.comp x iso.differentiableWithinAt (mapsTo_preimage _ s)â©
have : DifferentiableWithinAt ð ((f â iso) â iso.symm) s (iso x) := by
rw [â iso.symm_apply_apply x] at H
apply H.comp (iso x) iso.symm.differentiableWithinAt
intro y hy
simpa only [mem_preimage, apply_symm_apply] using hy
rwa [Function.comp.assoc, iso.self_comp_symm] at this
theorem comp_right_differentiableAt_iff {f : F â G} {x : E} :
DifferentiableAt ð (f â iso) x â DifferentiableAt ð f (iso x) := by
simp only [â differentiableWithinAt_univ, â iso.comp_right_differentiableWithinAt_iff,
preimage_univ]
theorem comp_right_differentiableOn_iff {f : F â G} {s : Set F} :
DifferentiableOn ð (f â iso) (iso â»Â¹' s) â DifferentiableOn ð f s := by
refine âšfun H y hy => ?_, fun H y hy => iso.comp_right_differentiableWithinAt_iff.2 (H _ hy)â©
rw [â iso.apply_symm_apply y, â comp_right_differentiableWithinAt_iff]
apply H
simpa only [mem_preimage, apply_symm_apply] using hy
theorem comp_right_differentiable_iff {f : F â G} :
Differentiable ð (f â iso) â Differentiable ð f := by
simp only [â differentiableOn_univ, â iso.comp_right_differentiableOn_iff, preimage_univ]
theorem comp_right_hasFDerivWithinAt_iff {f : F â G} {s : Set F} {x : E} {f' : F âL[ð] G} :
HasFDerivWithinAt (f â iso) (f'.comp (iso : E âL[ð] F)) (iso â»Â¹' s) x â
HasFDerivWithinAt f f' s (iso x) := by
refine âšfun H => ?_, fun H => H.comp x iso.hasFDerivWithinAt (mapsTo_preimage _ s)â©
rw [â iso.symm_apply_apply x] at H
have A : f = (f â iso) â iso.symm := by
rw [Function.comp.assoc, iso.self_comp_symm]
rfl
have B : f' = (f'.comp (iso : E âL[ð] F)).comp (iso.symm : F âL[ð] E) := by
rw [ContinuousLinearMap.comp_assoc, iso.coe_comp_coe_symm, ContinuousLinearMap.comp_id]
rw [A, B]
apply H.comp (iso x) iso.symm.hasFDerivWithinAt
intro y hy
simpa only [mem_preimage, apply_symm_apply] using hy
theorem comp_right_hasFDerivAt_iff {f : F â G} {x : E} {f' : F âL[ð] G} :
HasFDerivAt (f â iso) (f'.comp (iso : E âL[ð] F)) x â HasFDerivAt f f' (iso x) := by
simp only [â hasFDerivWithinAt_univ, â comp_right_hasFDerivWithinAt_iff, preimage_univ]
theorem comp_right_hasFDerivWithinAt_iff' {f : F â G} {s : Set F} {x : E} {f' : E âL[ð] G} :
HasFDerivWithinAt (f â iso) f' (iso â»Â¹' s) x â
HasFDerivWithinAt f (f'.comp (iso.symm : F âL[ð] E)) s (iso x) := by
rw [â iso.comp_right_hasFDerivWithinAt_iff, ContinuousLinearMap.comp_assoc,
iso.coe_symm_comp_coe, ContinuousLinearMap.comp_id]
theorem comp_right_hasFDerivAt_iff' {f : F â G} {x : E} {f' : E âL[ð] G} :
HasFDerivAt (f â iso) f' x â HasFDerivAt f (f'.comp (iso.symm : F âL[ð] E)) (iso x) := by
simp only [â hasFDerivWithinAt_univ, â iso.comp_right_hasFDerivWithinAt_iff', preimage_univ]
theorem comp_right_fderivWithin {f : F â G} {s : Set F} {x : E}
(hxs : UniqueDiffWithinAt ð (iso â»Â¹' s) x) :
fderivWithin ð (f â iso) (iso â»Â¹' s) x =
(fderivWithin ð f s (iso x)).comp (iso : E âL[ð] F) := by
by_cases h : DifferentiableWithinAt ð f s (iso x)
· exact (iso.comp_right_hasFDerivWithinAt_iff.2 h.hasFDerivWithinAt).fderivWithin hxs
· have : ¬DifferentiableWithinAt ð (f â iso) (iso â»Â¹' s) x := by
intro h'
exact h (iso.comp_right_differentiableWithinAt_iff.1 h')
rw [fderivWithin_zero_of_not_differentiableWithinAt h,
fderivWithin_zero_of_not_differentiableWithinAt this, ContinuousLinearMap.zero_comp]
theorem comp_right_fderiv {f : F â G} {x : E} :
fderiv ð (f â iso) x = (fderiv ð f (iso x)).comp (iso : E âL[ð] F) := by
rw [â fderivWithin_univ, â fderivWithin_univ, â iso.comp_right_fderivWithin, preimage_univ]
exact uniqueDiffWithinAt_univ
end ContinuousLinearEquiv
namespace LinearIsometryEquiv
/-! ### Differentiability of linear isometry equivs, and invariance of differentiability -/
variable (iso : E ââáµ¢[ð] F)
@[fun_prop]
protected theorem hasStrictFDerivAt : HasStrictFDerivAt iso (iso : E âL[ð] F) x :=
(iso : E âL[ð] F).hasStrictFDerivAt
@[fun_prop]
protected theorem hasFDerivWithinAt : HasFDerivWithinAt iso (iso : E âL[ð] F) s x :=
(iso : E âL[ð] F).hasFDerivWithinAt
@[fun_prop]
protected theorem hasFDerivAt : HasFDerivAt iso (iso : E âL[ð] F) x :=
(iso : E âL[ð] F).hasFDerivAt
@[fun_prop]
protected theorem differentiableAt : DifferentiableAt ð iso x :=
iso.hasFDerivAt.differentiableAt
@[fun_prop]
protected theorem differentiableWithinAt : DifferentiableWithinAt ð iso s x :=
iso.differentiableAt.differentiableWithinAt
protected theorem fderiv : fderiv ð iso x = iso :=
iso.hasFDerivAt.fderiv
protected theorem fderivWithin (hxs : UniqueDiffWithinAt ð s x) : fderivWithin ð iso s x = iso :=
(iso : E âL[ð] F).fderivWithin hxs
@[fun_prop]
protected theorem differentiable : Differentiable ð iso := fun _ => iso.differentiableAt
@[fun_prop]
protected theorem differentiableOn : DifferentiableOn ð iso s :=
iso.differentiable.differentiableOn
theorem comp_differentiableWithinAt_iff {f : G â E} {s : Set G} {x : G} :
DifferentiableWithinAt ð (iso â f) s x â DifferentiableWithinAt ð f s x :=
(iso : E âL[ð] F).comp_differentiableWithinAt_iff
theorem comp_differentiableAt_iff {f : G â E} {x : G} :
DifferentiableAt ð (iso â f) x â DifferentiableAt ð f x :=
(iso : E âL[ð] F).comp_differentiableAt_iff
theorem comp_differentiableOn_iff {f : G â E} {s : Set G} :
DifferentiableOn ð (iso â f) s â DifferentiableOn ð f s :=
(iso : E âL[ð] F).comp_differentiableOn_iff
theorem comp_differentiable_iff {f : G â E} : Differentiable ð (iso â f) â Differentiable ð f :=
(iso : E âL[ð] F).comp_differentiable_iff
theorem comp_hasFDerivWithinAt_iff {f : G â E} {s : Set G} {x : G} {f' : G âL[ð] E} :
HasFDerivWithinAt (iso â f) ((iso : E âL[ð] F).comp f') s x â HasFDerivWithinAt f f' s x :=
(iso : E âL[ð] F).comp_hasFDerivWithinAt_iff
theorem comp_hasStrictFDerivAt_iff {f : G â E} {x : G} {f' : G âL[ð] E} :
HasStrictFDerivAt (iso â f) ((iso : E âL[ð] F).comp f') x â HasStrictFDerivAt f f' x :=
(iso : E âL[ð] F).comp_hasStrictFDerivAt_iff
theorem comp_hasFDerivAt_iff {f : G â E} {x : G} {f' : G âL[ð] E} :
HasFDerivAt (iso â f) ((iso : E âL[ð] F).comp f') x â HasFDerivAt f f' x :=
(iso : E âL[ð] F).comp_hasFDerivAt_iff
theorem comp_hasFDerivWithinAt_iff' {f : G â E} {s : Set G} {x : G} {f' : G âL[ð] F} :
HasFDerivWithinAt (iso â f) f' s x â HasFDerivWithinAt f ((iso.symm : F âL[ð] E).comp f') s x :=
(iso : E âL[ð] F).comp_hasFDerivWithinAt_iff'
theorem comp_hasFDerivAt_iff' {f : G â E} {x : G} {f' : G âL[ð] F} :
HasFDerivAt (iso â f) f' x â HasFDerivAt f ((iso.symm : F âL[ð] E).comp f') x :=
(iso : E âL[ð] F).comp_hasFDerivAt_iff'
theorem comp_fderivWithin {f : G â E} {s : Set G} {x : G} (hxs : UniqueDiffWithinAt ð s x) :
fderivWithin ð (iso â f) s x = (iso : E âL[ð] F).comp (fderivWithin ð f s x) :=
(iso : E âL[ð] F).comp_fderivWithin hxs
theorem comp_fderiv {f : G â E} {x : G} :
fderiv ð (iso â f) x = (iso : E âL[ð] F).comp (fderiv ð f x) :=
(iso : E âL[ð] F).comp_fderiv
theorem comp_fderiv' {f : G â E} :
fderiv ð (iso â f) = fun x ⊠(iso : E âL[ð] F).comp (fderiv ð f x) := by
ext x : 1
exact LinearIsometryEquiv.comp_fderiv iso
end LinearIsometryEquiv
/-- If `f (g y) = y` for `y` in some neighborhood of `a`, `g` is continuous at `a`, and `f` has an
invertible derivative `f'` at `g a` in the strict sense, then `g` has the derivative `f'â»Â¹` at `a`
in the strict sense.
This is one of the easy parts of the inverse function theorem: it assumes that we already have an
inverse function. -/
theorem HasStrictFDerivAt.of_local_left_inverse {f : E â F} {f' : E âL[ð] F} {g : F â E} {a : F}
(hg : ContinuousAt g a) (hf : HasStrictFDerivAt f (f' : E âL[ð] F) (g a))
(hfg : âá¶ y in ð a, f (g y) = y) : HasStrictFDerivAt g (f'.symm : F âL[ð] E) a := by
replace hg := hg.prod_map' hg
replace hfg := hfg.prod_mk_nhds hfg
have :
(fun p : F Ã F => g p.1 - g p.2 - f'.symm (p.1 - p.2)) =O[ð (a, a)] fun p : F Ã F =>
f' (g p.1 - g p.2) - (p.1 - p.2) := by
refine ((f'.symm : F âL[ð] E).isBigO_comp _ _).congr (fun x => ?_) fun _ => rfl
simp
refine this.trans_isLittleO ?_
clear this
refine ((hf.comp_tendsto hg).symm.congr'
(hfg.mono ?_) (eventually_of_forall fun _ => rfl)).trans_isBigO ?_
· rintro p âšhp1, hp2â©
simp [hp1, hp2]
· refine (hf.isBigO_sub_rev.comp_tendsto hg).congr' (eventually_of_forall fun _ => rfl)
(hfg.mono ?_)
rintro p âšhp1, hp2â©
simp only [(· â ·), hp1, hp2]
/-- If `f (g y) = y` for `y` in some neighborhood of `a`, `g` is continuous at `a`, and `f` has an
invertible derivative `f'` at `g a`, then `g` has the derivative `f'â»Â¹` at `a`.
This is one of the easy parts of the inverse function theorem: it assumes that we already have
an inverse function. -/
theorem HasFDerivAt.of_local_left_inverse {f : E â F} {f' : E âL[ð] F} {g : F â E} {a : F}
(hg : ContinuousAt g a) (hf : HasFDerivAt f (f' : E âL[ð] F) (g a))
(hfg : âá¶ y in ð a, f (g y) = y) : HasFDerivAt g (f'.symm : F âL[ð] E) a := by
have : (fun x : F => g x - g a - f'.symm (x - a)) =O[ð a]
fun x : F => f' (g x - g a) - (x - a) := by
refine ((f'.symm : F âL[ð] E).isBigO_comp _ _).congr (fun x => ?_) fun _ => rfl
simp
refine HasFDerivAtFilter.of_isLittleO <| this.trans_isLittleO ?_
clear this
refine ((hf.isLittleO.comp_tendsto hg).symm.congr' (hfg.mono ?_) .rfl).trans_isBigO ?_
· intro p hp
simp [hp, hfg.self_of_nhds]
· refine ((hf.isBigO_sub_rev f'.antilipschitz).comp_tendsto hg).congr'
(eventually_of_forall fun _ => rfl) (hfg.mono ?_)
rintro p hp
simp only [(· â ·), hp, hfg.self_of_nhds]
/-- If `f` is a partial homeomorphism defined on a neighbourhood of `f.symm a`, and `f` has an
invertible derivative `f'` in the sense of strict differentiability at `f.symm a`, then `f.symm` has
the derivative `f'â»Â¹` at `a`.
This is one of the easy parts of the inverse function theorem: it assumes that we already have
an inverse function. -/
theorem PartialHomeomorph.hasStrictFDerivAt_symm (f : PartialHomeomorph E F) {f' : E âL[ð] F}
{a : F} (ha : a â f.target) (htff' : HasStrictFDerivAt f (f' : E âL[ð] F) (f.symm a)) :
HasStrictFDerivAt f.symm (f'.symm : F âL[ð] E) a :=
htff'.of_local_left_inverse (f.symm.continuousAt ha) (f.eventually_right_inverse ha)
/-- If `f` is a partial homeomorphism defined on a neighbourhood of `f.symm a`, and `f` has an
invertible derivative `f'` at `f.symm a`, then `f.symm` has the derivative `f'â»Â¹` at `a`.
This is one of the easy parts of the inverse function theorem: it assumes that we already have
an inverse function. -/
theorem PartialHomeomorph.hasFDerivAt_symm (f : PartialHomeomorph E F) {f' : E âL[ð] F} {a : F}
(ha : a â f.target) (htff' : HasFDerivAt f (f' : E âL[ð] F) (f.symm a)) :
HasFDerivAt f.symm (f'.symm : F âL[ð] E) a :=
htff'.of_local_left_inverse (f.symm.continuousAt ha) (f.eventually_right_inverse ha)
theorem HasFDerivWithinAt.eventually_ne (h : HasFDerivWithinAt f f' s x)
(hf' : â C, â z, âzâ †C * âf' zâ) : âá¶ z in ð[s \ {x}] x, f z â f x := by
rw [nhdsWithin, diff_eq, â inf_principal, â inf_assoc, eventually_inf_principal]
have A : (fun z => z - x) =O[ð[s] x] fun z => f' (z - x) :=
isBigO_iff.2 <| hf'.imp fun C hC => eventually_of_forall fun z => hC _
have : (fun z => f z - f x) ~[ð[s] x] fun z => f' (z - x) := h.isLittleO.trans_isBigO A
simpa [not_imp_not, sub_eq_zero] using (A.trans this.isBigO_symm).eq_zero_imp
theorem HasFDerivAt.eventually_ne (h : HasFDerivAt f f' x) (hf' : â C, â z, âzâ †C * âf' zâ) :
âá¶ z in ð[â ] x, f z â f x := by
simpa only [compl_eq_univ_diff] using (hasFDerivWithinAt_univ.2 h).eventually_ne hf'
end
section
/-
In the special case of a normed space over the reals,
we can use scalar multiplication in the `tendsto` characterization
of the Fréchet derivative.
-/
variable {E : Type*} [NormedAddCommGroup E] [NormedSpace â E]
variable {F : Type*} [NormedAddCommGroup F] [NormedSpace â F]
variable {f : E â F} {f' : E âL[â] F} {x : E}
theorem has_fderiv_at_filter_real_equiv {L : Filter E} :
Tendsto (fun x' : E => âx' - xââ»Â¹ * âf x' - f x - f' (x' - x)â) L (ð 0) â
Tendsto (fun x' : E => âx' - xââ»Â¹ ⢠(f x' - f x - f' (x' - x))) L (ð 0) := by
symm
rw [tendsto_iff_norm_sub_tendsto_zero]
refine tendsto_congr fun x' => ?_
simp [norm_smul]
theorem HasFDerivAt.lim_real (hf : HasFDerivAt f f' x) (v : E) :
Tendsto (fun c : â => c ⢠(f (x + câ»Â¹ ⢠v) - f x)) atTop (ð (f' v)) := by
apply hf.lim v
rw [tendsto_atTop_atTop]
exact fun b => âšb, fun a ha => le_trans ha (le_abs_self _)â©
end
section TangentCone
variable {ð : Type*} [NontriviallyNormedField ð] {E : Type*} [NormedAddCommGroup E]
[NormedSpace ð E] {F : Type*} [NormedAddCommGroup F] [NormedSpace ð F] {f : E â F} {s : Set E}
{f' : E âL[ð] F}
/-- The image of a tangent cone under the differential of a map is included in the tangent cone to
the image. -/
theorem HasFDerivWithinAt.mapsTo_tangent_cone {x : E} (h : HasFDerivWithinAt f f' s x) :
MapsTo f' (tangentConeAt ð s x) (tangentConeAt ð (f '' s) (f x)) := by
rintro v âšc, d, dtop, clim, cdlimâ©
refine
âšc, fun n => f (x + d n) - f x, mem_of_superset dtop ?_, clim, h.lim atTop dtop clim cdlimâ©
simp (config := { contextual := true }) [-mem_image, mem_image_of_mem]
/-- If a set has the unique differentiability property at a point x, then the image of this set
under a map with onto derivative has also the unique differentiability property at the image point.
-/
theorem HasFDerivWithinAt.uniqueDiffWithinAt {x : E} (h : HasFDerivWithinAt f f' s x)
(hs : UniqueDiffWithinAt ð s x) (h' : DenseRange f') : UniqueDiffWithinAt ð (f '' s) (f x) := by
refine âšh'.dense_of_mapsTo f'.continuous hs.1 ?_, h.continuousWithinAt.mem_closure_image hs.2â©
show
Submodule.span ð (tangentConeAt ð s x) â€
(Submodule.span ð (tangentConeAt ð (f '' s) (f x))).comap f'
rw [Submodule.span_le]
exact h.mapsTo_tangent_cone.mono Subset.rfl Submodule.subset_span
theorem UniqueDiffOn.image {f' : E â E âL[ð] F} (hs : UniqueDiffOn ð s)
(hf' : â x â s, HasFDerivWithinAt f (f' x) s x) (hd : â x â s, DenseRange (f' x)) :
UniqueDiffOn ð (f '' s) :=
forall_mem_image.2 fun x hx => (hf' x hx).uniqueDiffWithinAt (hs x hx) (hd x hx)
theorem HasFDerivWithinAt.uniqueDiffWithinAt_of_continuousLinearEquiv {x : E} (e' : E âL[ð] F)
(h : HasFDerivWithinAt f (e' : E âL[ð] F) s x) (hs : UniqueDiffWithinAt ð s x) :
UniqueDiffWithinAt ð (f '' s) (f x) :=
h.uniqueDiffWithinAt hs e'.surjective.denseRange
theorem ContinuousLinearEquiv.uniqueDiffOn_image (e : E âL[ð] F) (h : UniqueDiffOn ð s) :
UniqueDiffOn ð (e '' s) :=
h.image (fun _ _ => e.hasFDerivWithinAt) fun _ _ => e.surjective.denseRange
@[simp]
theorem ContinuousLinearEquiv.uniqueDiffOn_image_iff (e : E âL[ð] F) :
UniqueDiffOn ð (e '' s) â UniqueDiffOn ð s :=
âšfun h => e.symm_image_image s âž e.symm.uniqueDiffOn_image h, e.uniqueDiffOn_imageâ©
@[simp]
theorem ContinuousLinearEquiv.uniqueDiffOn_preimage_iff (e : F âL[ð] E) :
UniqueDiffOn ð (e â»Â¹' s) â UniqueDiffOn ð s := by
rw [â e.image_symm_eq_preimage, e.symm.uniqueDiffOn_image_iff]
end TangentCone
|
Analysis\Calculus\FDeriv\Extend.lean | /-
Copyright (c) 2019 Sébastien Gouëzel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Sébastien Gouëzel
-/
import Mathlib.Analysis.Calculus.MeanValue
/-!
# Extending differentiability to the boundary
We investigate how differentiable functions inside a set extend to differentiable functions
on the boundary. For this, it suffices that the function and its derivative admit limits there.
A general version of this statement is given in `hasFDerivWithinAt_closure_of_tendsto_fderiv`.
One-dimensional versions, in which one wants to obtain differentiability at the left endpoint or
the right endpoint of an interval, are given in `hasDerivWithinAt_Ici_of_tendsto_deriv` and
`hasDerivWithinAt_Iic_of_tendsto_deriv`. These versions are formulated in terms of the
one-dimensional derivative `deriv â f`.
-/
variable {E : Type*} [NormedAddCommGroup E] [NormedSpace â E] {F : Type*} [NormedAddCommGroup F]
[NormedSpace â F]
open Filter Set Metric ContinuousLinearMap
open scoped Topology
/-- If a function `f` is differentiable in a convex open set and continuous on its closure, and its
derivative converges to a limit `f'` at a point on the boundary, then `f` is differentiable there
with derivative `f'`. -/
theorem hasFDerivWithinAt_closure_of_tendsto_fderiv {f : E â F} {s : Set E} {x : E} {f' : E âL[â] F}
(f_diff : DifferentiableOn â f s) (s_conv : Convex â s) (s_open : IsOpen s)
(f_cont : â y â closure s, ContinuousWithinAt f s y)
(h : Tendsto (fun y => fderiv â f y) (ð[s] x) (ð f')) :
HasFDerivWithinAt f f' (closure s) x := by
classical
-- one can assume without loss of generality that `x` belongs to the closure of `s`, as the
-- statement is empty otherwise
by_cases hx : x â closure s
· rw [â closure_closure] at hx; exact hasFDerivWithinAt_of_nmem_closure hx
push_neg at hx
rw [HasFDerivWithinAt, hasFDerivAtFilter_iff_isLittleO, Asymptotics.isLittleO_iff]
/- One needs to show that `âf y - f x - f' (y - x)â †ε ây - xâ` for `y` close to `x` in
`closure s`, where `ε` is an arbitrary positive constant. By continuity of the functions, it
suffices to prove this for nearby points inside `s`. In a neighborhood of `x`, the derivative
of `f` is arbitrarily close to `f'` by assumption. The mean value inequality completes the
proof. -/
intro ε ε_pos
obtain âšÎŽ, ÎŽ_pos, hÎŽâ© : â ÎŽ > 0, â y â s, dist y x < ÎŽ â âfderiv â f y - f'â < ε := by
simpa [dist_zero_right] using tendsto_nhdsWithin_nhds.1 h ε ε_pos
set B := ball x ÎŽ
suffices â y â B â© closure s, âf y - f x - (f' y - f' x)â †ε * ây - xâ from
mem_nhdsWithin_iff.2 âšÎŽ, ÎŽ_pos, fun y hy => by simpa using this y hyâ©
suffices
â p : E Ã E,
p â closure ((B â© s) ÃË¢ (B â© s)) â âf p.2 - f p.1 - (f' p.2 - f' p.1)â †ε * âp.2 - p.1â by
rw [closure_prod_eq] at this
intro y y_in
apply this âšx, yâ©
have : B â© closure s â closure (B â© s) := isOpen_ball.inter_closure
exact âšthis âšmem_ball_self ÎŽ_pos, hxâ©, this y_inâ©
have key : â p : E Ã E, p â (B â© s) ÃË¢ (B â© s) â
âf p.2 - f p.1 - (f' p.2 - f' p.1)â †ε * âp.2 - p.1â := by
rintro âšu, vâ© âšu_in, v_inâ©
have conv : Convex â (B â© s) := (convex_ball _ _).inter s_conv
have diff : DifferentiableOn â f (B â© s) := f_diff.mono inter_subset_right
have bound : â z â B â© s, âfderivWithin â f (B â© s) z - f'â †ε := by
intro z z_in
have h := hÎŽ z
have : fderivWithin â f (B â© s) z = fderiv â f z := by
have op : IsOpen (B â© s) := isOpen_ball.inter s_open
rw [DifferentiableAt.fderivWithin _ (op.uniqueDiffOn z z_in)]
exact (diff z z_in).differentiableAt (IsOpen.mem_nhds op z_in)
rw [â this] at h
exact le_of_lt (h z_in.2 z_in.1)
simpa using conv.norm_image_sub_le_of_norm_fderivWithin_le' diff bound u_in v_in
rintro âšu, vâ© uv_in
have f_cont' : â y â closure s, ContinuousWithinAt (f - âf') s y := by
intro y y_in
exact Tendsto.sub (f_cont y y_in) f'.cont.continuousWithinAt
refine ContinuousWithinAt.closure_le uv_in ?_ ?_ key
all_goals
-- common start for both continuity proofs
have : (B â© s) ÃË¢ (B â© s) â s ÃË¢ s := by gcongr <;> exact inter_subset_right
obtain âšu_in, v_inâ© : u â closure s â§ v â closure s := by
simpa [closure_prod_eq] using closure_mono this uv_in
apply ContinuousWithinAt.mono _ this
simp only [ContinuousWithinAt]
· rw [nhdsWithin_prod_eq]
have : â u v, f v - f u - (f' v - f' u) = f v - f' v - (f u - f' u) := by intros; abel
simp only [this]
exact
Tendsto.comp continuous_norm.continuousAt
((Tendsto.comp (f_cont' v v_in) tendsto_snd).sub <|
Tendsto.comp (f_cont' u u_in) tendsto_fst)
· apply tendsto_nhdsWithin_of_tendsto_nhds
rw [nhds_prod_eq]
exact
tendsto_const_nhds.mul
(Tendsto.comp continuous_norm.continuousAt <| tendsto_snd.sub tendsto_fst)
@[deprecated (since := "2024-07-10")] alias has_fderiv_at_boundary_of_tendsto_fderiv :=
hasFDerivWithinAt_closure_of_tendsto_fderiv
/-- If a function is differentiable on the right of a point `a : â`, continuous at `a`, and
its derivative also converges at `a`, then `f` is differentiable on the right at `a`. -/
theorem hasDerivWithinAt_Ici_of_tendsto_deriv {s : Set â} {e : E} {a : â} {f : â â E}
(f_diff : DifferentiableOn â f s) (f_lim : ContinuousWithinAt f s a) (hs : s â ð[>] a)
(f_lim' : Tendsto (fun x => deriv f x) (ð[>] a) (ð e)) : HasDerivWithinAt f e (Ici a) a := by
/- This is a specialization of `hasFDerivWithinAt_closure_of_tendsto_fderiv`. To be in the
setting of this theorem, we need to work on an open interval with closure contained in
`s ⪠{a}`, that we call `t = (a, b)`. Then, we check all the assumptions of this theorem and
we apply it. -/
obtain âšb, ab : a < b, sab : Ioc a b â sâ© := mem_nhdsWithin_Ioi_iff_exists_Ioc_subset.1 hs
let t := Ioo a b
have ts : t â s := Subset.trans Ioo_subset_Ioc_self sab
have t_diff : DifferentiableOn â f t := f_diff.mono ts
have t_conv : Convex â t := convex_Ioo a b
have t_open : IsOpen t := isOpen_Ioo
have t_closure : closure t = Icc a b := closure_Ioo ab.ne
have t_cont : â y â closure t, ContinuousWithinAt f t y := by
rw [t_closure]
intro y hy
by_cases h : y = a
· rw [h]; exact f_lim.mono ts
· have : y â s := sab âšlt_of_le_of_ne hy.1 (Ne.symm h), hy.2â©
exact (f_diff.continuousOn y this).mono ts
have t_diff' : Tendsto (fun x => fderiv â f x) (ð[t] a) (ð (smulRight (1 : â âL[â] â) e)) := by
simp only [deriv_fderiv.symm]
exact Tendsto.comp
(isBoundedBilinearMap_smulRight : IsBoundedBilinearMap â _).continuous_right.continuousAt
(tendsto_nhdsWithin_mono_left Ioo_subset_Ioi_self f_lim')
-- now we can apply `hasFDerivWithinAt_closure_of_tendsto_fderiv`
have : HasDerivWithinAt f e (Icc a b) a := by
rw [hasDerivWithinAt_iff_hasFDerivWithinAt, â t_closure]
exact hasFDerivWithinAt_closure_of_tendsto_fderiv t_diff t_conv t_open t_cont t_diff'
exact this.mono_of_mem (Icc_mem_nhdsWithin_Ici <| left_mem_Ico.2 ab)
@[deprecated (since := "2024-07-10")] alias has_deriv_at_interval_left_endpoint_of_tendsto_deriv :=
hasDerivWithinAt_Ici_of_tendsto_deriv
/-- If a function is differentiable on the left of a point `a : â`, continuous at `a`, and
its derivative also converges at `a`, then `f` is differentiable on the left at `a`. -/
theorem hasDerivWithinAt_Iic_of_tendsto_deriv {s : Set â} {e : E} {a : â}
{f : â â E} (f_diff : DifferentiableOn â f s) (f_lim : ContinuousWithinAt f s a)
(hs : s â ð[<] a) (f_lim' : Tendsto (fun x => deriv f x) (ð[<] a) (ð e)) :
HasDerivWithinAt f e (Iic a) a := by
/- This is a specialization of `hasFDerivWithinAt_closure_of_tendsto_fderiv`. To be in the
setting of this theorem, we need to work on an open interval with closure contained in
`s ⪠{a}`, that we call `t = (b, a)`. Then, we check all the assumptions of this theorem and we
apply it. -/
obtain âšb, ba, sabâ© : â b â Iio a, Ico b a â s := mem_nhdsWithin_Iio_iff_exists_Ico_subset.1 hs
let t := Ioo b a
have ts : t â s := Subset.trans Ioo_subset_Ico_self sab
have t_diff : DifferentiableOn â f t := f_diff.mono ts
have t_conv : Convex â t := convex_Ioo b a
have t_open : IsOpen t := isOpen_Ioo
have t_closure : closure t = Icc b a := closure_Ioo (ne_of_lt ba)
have t_cont : â y â closure t, ContinuousWithinAt f t y := by
rw [t_closure]
intro y hy
by_cases h : y = a
· rw [h]; exact f_lim.mono ts
· have : y â s := sab âšhy.1, lt_of_le_of_ne hy.2 hâ©
exact (f_diff.continuousOn y this).mono ts
have t_diff' : Tendsto (fun x => fderiv â f x) (ð[t] a) (ð (smulRight (1 : â âL[â] â) e)) := by
simp only [deriv_fderiv.symm]
exact Tendsto.comp
(isBoundedBilinearMap_smulRight : IsBoundedBilinearMap â _).continuous_right.continuousAt
(tendsto_nhdsWithin_mono_left Ioo_subset_Iio_self f_lim')
-- now we can apply `hasFDerivWithinAt_closure_of_tendsto_fderiv`
have : HasDerivWithinAt f e (Icc b a) a := by
rw [hasDerivWithinAt_iff_hasFDerivWithinAt, â t_closure]
exact hasFDerivWithinAt_closure_of_tendsto_fderiv t_diff t_conv t_open t_cont t_diff'
exact this.mono_of_mem (Icc_mem_nhdsWithin_Iic <| right_mem_Ioc.2 ba)
@[deprecated (since := "2024-07-10")] alias has_deriv_at_interval_right_endpoint_of_tendsto_deriv :=
hasDerivWithinAt_Iic_of_tendsto_deriv
/-- If a real function `f` has a derivative `g` everywhere but at a point, and `f` and `g` are
continuous at this point, then `g` is also the derivative of `f` at this point. -/
theorem hasDerivAt_of_hasDerivAt_of_ne {f g : â â E} {x : â}
(f_diff : â y â x, HasDerivAt f (g y) y) (hf : ContinuousAt f x)
(hg : ContinuousAt g x) : HasDerivAt f (g x) x := by
have A : HasDerivWithinAt f (g x) (Ici x) x := by
have diff : DifferentiableOn â f (Ioi x) := fun y hy =>
(f_diff y (ne_of_gt hy)).differentiableAt.differentiableWithinAt
-- next line is the nontrivial bit of this proof, appealing to differentiability
-- extension results.
apply
hasDerivWithinAt_Ici_of_tendsto_deriv diff hf.continuousWithinAt
self_mem_nhdsWithin
have : Tendsto g (ð[>] x) (ð (g x)) := tendsto_inf_left hg
apply this.congr' _
apply mem_of_superset self_mem_nhdsWithin fun y hy => _
intros y hy
exact (f_diff y (ne_of_gt hy)).deriv.symm
have B : HasDerivWithinAt f (g x) (Iic x) x := by
have diff : DifferentiableOn â f (Iio x) := fun y hy =>
(f_diff y (ne_of_lt hy)).differentiableAt.differentiableWithinAt
-- next line is the nontrivial bit of this proof, appealing to differentiability
-- extension results.
apply
hasDerivWithinAt_Iic_of_tendsto_deriv diff hf.continuousWithinAt
self_mem_nhdsWithin
have : Tendsto g (ð[<] x) (ð (g x)) := tendsto_inf_left hg
apply this.congr' _
apply mem_of_superset self_mem_nhdsWithin fun y hy => _
intros y hy
exact (f_diff y (ne_of_lt hy)).deriv.symm
simpa using B.union A
/-- If a real function `f` has a derivative `g` everywhere but at a point, and `f` and `g` are
continuous at this point, then `g` is the derivative of `f` everywhere. -/
theorem hasDerivAt_of_hasDerivAt_of_ne' {f g : â â E} {x : â}
(f_diff : â y â x, HasDerivAt f (g y) y) (hf : ContinuousAt f x)
(hg : ContinuousAt g x) (y : â) : HasDerivAt f (g y) y := by
rcases eq_or_ne y x with (rfl | hne)
· exact hasDerivAt_of_hasDerivAt_of_ne f_diff hf hg
· exact f_diff y hne
|
Analysis\Calculus\FDeriv\Linear.lean | /-
Copyright (c) 2019 Jeremy Avigad. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Jeremy Avigad, Sébastien Gouëzel, Yury Kudryashov
-/
import Mathlib.Analysis.Calculus.FDeriv.Basic
import Mathlib.Analysis.Normed.Operator.BoundedLinearMaps
/-!
# The derivative of bounded linear maps
For detailed documentation of the Fréchet derivative,
see the module docstring of `Analysis/Calculus/FDeriv/Basic.lean`.
This file contains the usual formulas (and existence assertions) for the derivative of
bounded linear maps.
-/
open Filter Asymptotics ContinuousLinearMap Set Metric
open scoped Classical
open Topology NNReal Filter Asymptotics ENNReal
noncomputable section
section
variable {ð : Type*} [NontriviallyNormedField ð]
variable {E : Type*} [NormedAddCommGroup E] [NormedSpace ð E]
variable {F : Type*} [NormedAddCommGroup F] [NormedSpace ð F]
variable {G : Type*} [NormedAddCommGroup G] [NormedSpace ð G]
variable {G' : Type*} [NormedAddCommGroup G'] [NormedSpace ð G']
variable {f fâ fâ g : E â F}
variable {f' fâ' fâ' g' : E âL[ð] F}
variable (e : E âL[ð] F)
variable {x : E}
variable {s t : Set E}
variable {L Lâ Lâ : Filter E}
section ContinuousLinearMap
/-!
### Continuous linear maps
There are currently two variants of these in mathlib, the bundled version
(named `ContinuousLinearMap`, and denoted `E âL[ð] F`), and the unbundled version (with a
predicate `IsBoundedLinearMap`). We give statements for both versions. -/
@[fun_prop]
protected theorem ContinuousLinearMap.hasStrictFDerivAt {x : E} : HasStrictFDerivAt e e x :=
(isLittleO_zero _ _).congr_left fun x => by simp only [e.map_sub, sub_self]
protected theorem ContinuousLinearMap.hasFDerivAtFilter : HasFDerivAtFilter e e x L :=
.of_isLittleO <| (isLittleO_zero _ _).congr_left fun x => by simp only [e.map_sub, sub_self]
@[fun_prop]
protected theorem ContinuousLinearMap.hasFDerivWithinAt : HasFDerivWithinAt e e s x :=
e.hasFDerivAtFilter
@[fun_prop]
protected theorem ContinuousLinearMap.hasFDerivAt : HasFDerivAt e e x :=
e.hasFDerivAtFilter
@[simp, fun_prop]
protected theorem ContinuousLinearMap.differentiableAt : DifferentiableAt ð e x :=
e.hasFDerivAt.differentiableAt
@[fun_prop]
protected theorem ContinuousLinearMap.differentiableWithinAt : DifferentiableWithinAt ð e s x :=
e.differentiableAt.differentiableWithinAt
@[simp]
protected theorem ContinuousLinearMap.fderiv : fderiv ð e x = e :=
e.hasFDerivAt.fderiv
protected theorem ContinuousLinearMap.fderivWithin (hxs : UniqueDiffWithinAt ð s x) :
fderivWithin ð e s x = e := by
rw [DifferentiableAt.fderivWithin e.differentiableAt hxs]
exact e.fderiv
@[simp, fun_prop]
protected theorem ContinuousLinearMap.differentiable : Differentiable ð e := fun _ =>
e.differentiableAt
@[fun_prop]
protected theorem ContinuousLinearMap.differentiableOn : DifferentiableOn ð e s :=
e.differentiable.differentiableOn
theorem IsBoundedLinearMap.hasFDerivAtFilter (h : IsBoundedLinearMap ð f) :
HasFDerivAtFilter f h.toContinuousLinearMap x L :=
h.toContinuousLinearMap.hasFDerivAtFilter
@[fun_prop]
theorem IsBoundedLinearMap.hasFDerivWithinAt (h : IsBoundedLinearMap ð f) :
HasFDerivWithinAt f h.toContinuousLinearMap s x :=
h.hasFDerivAtFilter
@[fun_prop]
theorem IsBoundedLinearMap.hasFDerivAt (h : IsBoundedLinearMap ð f) :
HasFDerivAt f h.toContinuousLinearMap x :=
h.hasFDerivAtFilter
@[fun_prop]
theorem IsBoundedLinearMap.differentiableAt (h : IsBoundedLinearMap ð f) : DifferentiableAt ð f x :=
h.hasFDerivAt.differentiableAt
@[fun_prop]
theorem IsBoundedLinearMap.differentiableWithinAt (h : IsBoundedLinearMap ð f) :
DifferentiableWithinAt ð f s x :=
h.differentiableAt.differentiableWithinAt
theorem IsBoundedLinearMap.fderiv (h : IsBoundedLinearMap ð f) :
fderiv ð f x = h.toContinuousLinearMap :=
HasFDerivAt.fderiv h.hasFDerivAt
theorem IsBoundedLinearMap.fderivWithin (h : IsBoundedLinearMap ð f)
(hxs : UniqueDiffWithinAt ð s x) : fderivWithin ð f s x = h.toContinuousLinearMap := by
rw [DifferentiableAt.fderivWithin h.differentiableAt hxs]
exact h.fderiv
@[fun_prop]
theorem IsBoundedLinearMap.differentiable (h : IsBoundedLinearMap ð f) : Differentiable ð f :=
fun _ => h.differentiableAt
@[fun_prop]
theorem IsBoundedLinearMap.differentiableOn (h : IsBoundedLinearMap ð f) : DifferentiableOn ð f s :=
h.differentiable.differentiableOn
end ContinuousLinearMap
end
|
Analysis\Calculus\FDeriv\Measurable.lean | /-
Copyright (c) 2020 Sébastien Gouëzel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Sébastien Gouëzel, Yury Kudryashov
-/
import Mathlib.Analysis.Calculus.Deriv.Basic
import Mathlib.Analysis.Calculus.Deriv.Slope
import Mathlib.Analysis.Normed.Operator.BoundedLinearMaps
import Mathlib.Analysis.Normed.Module.FiniteDimension
import Mathlib.MeasureTheory.Constructions.BorelSpace.ContinuousLinearMap
import Mathlib.MeasureTheory.Function.StronglyMeasurable.Basic
/-!
# Derivative is measurable
In this file we prove that the derivative of any function with complete codomain is a measurable
function. Namely, we prove:
* `measurableSet_of_differentiableAt`: the set `{x | DifferentiableAt ð f x}` is measurable;
* `measurable_fderiv`: the function `fderiv ð f` is measurable;
* `measurable_fderiv_apply_const`: for a fixed vector `y`, the function `fun x ⊠fderiv ð f x y`
is measurable;
* `measurable_deriv`: the function `deriv f` is measurable (for `f : ð â F`).
We also show the same results for the right derivative on the real line
(see `measurable_derivWithin_Ici` and `measurable_derivWithin_Ioi`), following the same
proof strategy.
We also prove measurability statements for functions depending on a parameter: for `f : α â E â F`,
we show the measurability of `(p : α à E) ⊠fderiv ð (f p.1) p.2`. This requires additional
assumptions. We give versions of the above statements (appending `with_param` to their names) when
`f` is continuous and `E` is locally compact.
## Implementation
We give a proof that avoids second-countability issues, by expressing the differentiability set
as a function of open sets in the following way. Define `A (L, r, ε)` to be the set of points
where, on a ball of radius roughly `r` around `x`, the function is uniformly approximated by the
linear map `L`, up to `ε r`. It is an open set.
Let also `B (L, r, s, ε) = A (L, r, ε) ⩠A (L, s, ε)`: we require that at two possibly different
scales `r` and `s`, the function is well approximated by the linear map `L`. It is also open.
We claim that the differentiability set of `f` is exactly
`D = â ε > 0, â ÎŽ > 0, â r, s < ÎŽ, â L, B (L, r, s, ε)`.
In other words, for any `ε > 0`, we require that there is a size `Ύ` such that, for any two scales
below this size, the function is well approximated by a linear map, common to the two scales.
The set `â L, B (L, r, s, ε)` is open, as a union of open sets. Converting the intersections and
unions to countable ones (using real numbers of the form `2 ^ (-n)`), it follows that the
differentiability set is measurable.
To prove the claim, there are two inclusions. One is trivial: if the function is differentiable
at `x`, then `x` belongs to `D` (just take `L` to be the derivative, and use that the
differentiability exactly says that the map is well approximated by `L`). This is proved in
`mem_A_of_differentiable` and `differentiable_set_subset_D`.
For the other direction, the difficulty is that `L` in the union may depend on `ε, r, s`. The key
point is that, in fact, it doesn't depend too much on them. First, if `x` belongs both to
`A (L, r, ε)` and `A (L', r, ε)`, then `L` and `L'` have to be close on a shell, and thus
`âL - L'â` is bounded by `ε` (see `norm_sub_le_of_mem_A`). Assume now `x â D`. If one has two maps
`L` and `L'` such that `x` belongs to `A (L, r, ε)` and to `A (L', r', ε')`, one deduces that `L` is
close to `L'` by arguing as follows. Consider another scale `s` smaller than `r` and `r'`. Take a
linear map `Lâ` that approximates `f` around `x` both at scales `r` and `s` w.r.t. `ε` (it exists as
`x` belongs to `D`). Take also `Lâ` that approximates `f` around `x` both at scales `r'` and `s`
w.r.t. `ε'`. Then `Lâ` is close to `L` (as they are close on a shell of radius `r`), and `Lâ` is
close to `Lâ` (as they are close on a shell of radius `s`), and `L'` is close to `Lâ` (as they are
close on a shell of radius `r'`). It follows that `L` is close to `L'`, as we claimed.
It follows that the different approximating linear maps that show up form a Cauchy sequence when
`ε` tends to `0`. When the target space is complete, this sequence converges, to a limit `f'`.
With the same kind of arguments, one checks that `f` is differentiable with derivative `f'`.
To show that the derivative itself is measurable, add in the definition of `B` and `D` a set
`K` of continuous linear maps to which `L` should belong. Then, when `K` is complete, the set `D K`
is exactly the set of points where `f` is differentiable with a derivative in `K`.
## Tags
derivative, measurable function, Borel Ï-algebra
-/
noncomputable section
open Set Metric Asymptotics Filter ContinuousLinearMap MeasureTheory TopologicalSpace
open scoped Topology
namespace ContinuousLinearMap
variable {ð E F : Type*} [NontriviallyNormedField ð] [NormedAddCommGroup E] [NormedSpace ð E]
[NormedAddCommGroup F] [NormedSpace ð F]
theorem measurable_applyâ [MeasurableSpace E] [OpensMeasurableSpace E]
[SecondCountableTopologyEither (E âL[ð] F) E]
[MeasurableSpace F] [BorelSpace F] : Measurable fun p : (E âL[ð] F) Ã E => p.1 p.2 :=
isBoundedBilinearMap_apply.continuous.measurable
end ContinuousLinearMap
section fderiv
variable {ð : Type*} [NontriviallyNormedField ð]
variable {E : Type*} [NormedAddCommGroup E] [NormedSpace ð E]
variable {F : Type*} [NormedAddCommGroup F] [NormedSpace ð F]
variable {f : E â F} (K : Set (E âL[ð] F))
namespace FDerivMeasurableAux
/-- The set `A f L r ε` is the set of points `x` around which the function `f` is well approximated
at scale `r` by the linear map `L`, up to an error `ε`. We tweak the definition to make sure that
this is an open set. -/
def A (f : E â F) (L : E âL[ð] F) (r ε : â) : Set E :=
{ x | â r' â Ioc (r / 2) r, â y â ball x r', â z â ball x r', âf z - f y - L (z - y)â < ε * r }
/-- The set `B f K r s ε` is the set of points `x` around which there exists a continuous linear map
`L` belonging to `K` (a given set of continuous linear maps) that approximates well the
function `f` (up to an error `ε`), simultaneously at scales `r` and `s`. -/
def B (f : E â F) (K : Set (E âL[ð] F)) (r s ε : â) : Set E :=
â L â K, A f L r ε â© A f L s ε
/-- The set `D f K` is a complicated set constructed using countable intersections and unions. Its
main use is that, when `K` is complete, it is exactly the set of points where `f` is differentiable,
with a derivative in `K`. -/
def D (f : E â F) (K : Set (E âL[ð] F)) : Set E :=
â e : â, â n : â, â (p ⥠n) (q ⥠n), B f K ((1 / 2) ^ p) ((1 / 2) ^ q) ((1 / 2) ^ e)
theorem isOpen_A (L : E âL[ð] F) (r ε : â) : IsOpen (A f L r ε) := by
rw [Metric.isOpen_iff]
rintro x âšr', r'_mem, hr'â©
obtain âšs, s_gt, s_ltâ© : â s : â, r / 2 < s â§ s < r' := exists_between r'_mem.1
have : s â Ioc (r / 2) r := âšs_gt, le_of_lt (s_lt.trans_le r'_mem.2)â©
refine âšr' - s, by linarith, fun x' hx' => âšs, this, ?_â©â©
have B : ball x' s â ball x r' := ball_subset (le_of_lt hx')
intro y hy z hz
exact hr' y (B hy) z (B hz)
theorem isOpen_B {K : Set (E âL[ð] F)} {r s ε : â} : IsOpen (B f K r s ε) := by
simp [B, isOpen_biUnion, IsOpen.inter, isOpen_A]
theorem A_mono (L : E âL[ð] F) (r : â) {ε ÎŽ : â} (h : ε †Ύ) : A f L r ε â A f L r ÎŽ := by
rintro x âšr', r'r, hr'â©
refine âšr', r'r, fun y hy z hz => (hr' y hy z hz).trans_le (mul_le_mul_of_nonneg_right h ?_)â©
linarith [mem_ball.1 hy, r'r.2, @dist_nonneg _ _ y x]
theorem le_of_mem_A {r ε : â} {L : E âL[ð] F} {x : E} (hx : x â A f L r ε) {y z : E}
(hy : y â closedBall x (r / 2)) (hz : z â closedBall x (r / 2)) :
âf z - f y - L (z - y)â †ε * r := by
rcases hx with âšr', r'mem, hr'â©
apply le_of_lt
exact hr' _ ((mem_closedBall.1 hy).trans_lt r'mem.1) _ ((mem_closedBall.1 hz).trans_lt r'mem.1)
theorem mem_A_of_differentiable {ε : â} (hε : 0 < ε) {x : E} (hx : DifferentiableAt ð f x) :
â R > 0, â r â Ioo (0 : â) R, x â A f (fderiv ð f x) r ε := by
let Ύ := (ε / 2) / 2
obtain âšR, R_pos, hRâ© :
â R > 0, â y â ball x R, âf y - f x - fderiv ð f x (y - x)â †Ύ * ây - xâ :=
eventually_nhds_iff_ball.1 <| hx.hasFDerivAt.isLittleO.bound <| by positivity
refine âšR, R_pos, fun r hr => ?_â©
have : r â Ioc (r / 2) r := right_mem_Ioc.2 <| half_lt_self hr.1
refine âšr, this, fun y hy z hz => ?_â©
calc
âf z - f y - (fderiv ð f x) (z - y)â =
âf z - f x - (fderiv ð f x) (z - x) - (f y - f x - (fderiv ð f x) (y - x))â := by
simp only [map_sub]; abel_nf
_ †âf z - f x - (fderiv ð f x) (z - x)â + âf y - f x - (fderiv ð f x) (y - x)â :=
norm_sub_le _ _
_ †Ύ * âz - xâ + ÎŽ * ây - xâ :=
add_le_add (hR _ (ball_subset_ball hr.2.le hz)) (hR _ (ball_subset_ball hr.2.le hy))
_ †Ύ * r + Ύ * r := by rw [mem_ball_iff_norm] at hz hy; gcongr
_ = (ε / 2) * r := by ring
_ < ε * r := by gcongr; exacts [hr.1, half_lt_self hε]
theorem norm_sub_le_of_mem_A {c : ð} (hc : 1 < âcâ) {r ε : â} (hε : 0 < ε) (hr : 0 < r) {x : E}
{Lâ Lâ : E âL[ð] F} (hâ : x â A f Lâ r ε) (hâ : x â A f Lâ r ε) : âLâ - Lââ †4 * âcâ * ε := by
refine opNorm_le_of_shell (half_pos hr) (by positivity) hc ?_
intro y ley ylt
rw [div_div, div_le_iff' (mul_pos (by norm_num : (0 : â) < 2) (zero_lt_one.trans hc))] at ley
calc
â(Lâ - Lâ) yâ = âf (x + y) - f x - Lâ (x + y - x) - (f (x + y) - f x - Lâ (x + y - x))â := by
simp
_ †âf (x + y) - f x - Lâ (x + y - x)â + âf (x + y) - f x - Lâ (x + y - x)â := norm_sub_le _ _
_ †ε * r + ε * r := by
apply add_le_add
· apply le_of_mem_A hâ
· simp only [le_of_lt (half_pos hr), mem_closedBall, dist_self]
· simp only [dist_eq_norm, add_sub_cancel_left, mem_closedBall, ylt.le]
· apply le_of_mem_A hâ
· simp only [le_of_lt (half_pos hr), mem_closedBall, dist_self]
· simp only [dist_eq_norm, add_sub_cancel_left, mem_closedBall, ylt.le]
_ = 2 * ε * r := by ring
_ †2 * ε * (2 * âcâ * âyâ) := by gcongr
_ = 4 * âcâ * ε * âyâ := by ring
/-- Easy inclusion: a differentiability point with derivative in `K` belongs to `D f K`. -/
theorem differentiable_set_subset_D :
{ x | DifferentiableAt ð f x â§ fderiv ð f x â K } â D f K := by
intro x hx
rw [D, mem_iInter]
intro e
have : (0 : â) < (1 / 2) ^ e := by positivity
rcases mem_A_of_differentiable this hx.1 with âšR, R_pos, hRâ©
obtain âšn, hnâ© : â n : â, (1 / 2) ^ n < R :=
exists_pow_lt_of_lt_one R_pos (by norm_num : (1 : â) / 2 < 1)
simp only [mem_iUnion, mem_iInter, B, mem_inter_iff]
refine âšn, fun p hp q hq => âšfderiv ð f x, hx.2, âš?_, ?_â©â©â© <;>
· refine hR _ âšpow_pos (by norm_num) _, lt_of_le_of_lt ?_ hnâ©
exact pow_le_pow_of_le_one (by norm_num) (by norm_num) (by assumption)
/-- Harder inclusion: at a point in `D f K`, the function `f` has a derivative, in `K`. -/
theorem D_subset_differentiable_set {K : Set (E âL[ð] F)} (hK : IsComplete K) :
D f K â { x | DifferentiableAt ð f x â§ fderiv ð f x â K } := by
have P : â {n : â}, (0 : â) < (1 / 2) ^ n := fun {n} => pow_pos (by norm_num) n
rcases NormedField.exists_one_lt_norm ð with âšc, hcâ©
intro x hx
have :
â e : â, â n : â, â p q, n †p â n †q â
â L â K, x â A f L ((1 / 2) ^ p) ((1 / 2) ^ e) â© A f L ((1 / 2) ^ q) ((1 / 2) ^ e) := by
intro e
have := mem_iInter.1 hx e
rcases mem_iUnion.1 this with âšn, hnâ©
refine âšn, fun p q hp hq => ?_â©
simp only [mem_iInter] at hn
rcases mem_iUnion.1 (hn p hp q hq) with âšL, hLâ©
exact âšL, exists_prop.mp <| mem_iUnion.1 hLâ©
/- Recast the assumptions: for each `e`, there exist `n e` and linear maps `L e p q` in `K`
such that, for `p, q ⥠n e`, then `f` is well approximated by `L e p q` at scale `2 ^ (-p)` and
`2 ^ (-q)`, with an error `2 ^ (-e)`. -/
choose! n L hn using this
/- All the operators `L e p q` that show up are close to each other. To prove this, we argue
that `L e p q` is close to `L e p r` (where `r` is large enough), as both approximate `f` at
scale `2 ^(- p)`. And `L e p r` is close to `L e' p' r` as both approximate `f` at scale
`2 ^ (- r)`. And `L e' p' r` is close to `L e' p' q'` as both approximate `f` at scale
`2 ^ (- p')`. -/
have M :
â e p q e' p' q',
n e †p â
n e †q â
n e' †p' â n e' †q' â e †e' â âL e p q - L e' p' q'â †12 * âcâ * (1 / 2) ^ e := by
intro e p q e' p' q' hp hq hp' hq' he'
let r := max (n e) (n e')
have I : ((1 : â) / 2) ^ e' †(1 / 2) ^ e :=
pow_le_pow_of_le_one (by norm_num) (by norm_num) he'
have J1 : âL e p q - L e p râ †4 * âcâ * (1 / 2) ^ e := by
have I1 : x â A f (L e p q) ((1 / 2) ^ p) ((1 / 2) ^ e) := (hn e p q hp hq).2.1
have I2 : x â A f (L e p r) ((1 / 2) ^ p) ((1 / 2) ^ e) := (hn e p r hp (le_max_left _ _)).2.1
exact norm_sub_le_of_mem_A hc P P I1 I2
have J2 : âL e p r - L e' p' râ †4 * âcâ * (1 / 2) ^ e := by
have I1 : x â A f (L e p r) ((1 / 2) ^ r) ((1 / 2) ^ e) := (hn e p r hp (le_max_left _ _)).2.2
have I2 : x â A f (L e' p' r) ((1 / 2) ^ r) ((1 / 2) ^ e') :=
(hn e' p' r hp' (le_max_right _ _)).2.2
exact norm_sub_le_of_mem_A hc P P I1 (A_mono _ _ I I2)
have J3 : âL e' p' r - L e' p' q'â †4 * âcâ * (1 / 2) ^ e := by
have I1 : x â A f (L e' p' r) ((1 / 2) ^ p') ((1 / 2) ^ e') :=
(hn e' p' r hp' (le_max_right _ _)).2.1
have I2 : x â A f (L e' p' q') ((1 / 2) ^ p') ((1 / 2) ^ e') := (hn e' p' q' hp' hq').2.1
exact norm_sub_le_of_mem_A hc P P (A_mono _ _ I I1) (A_mono _ _ I I2)
calc
âL e p q - L e' p' q'â =
âL e p q - L e p r + (L e p r - L e' p' r) + (L e' p' r - L e' p' q')â := by
congr 1; abel
_ †âL e p q - L e p râ + âL e p r - L e' p' râ + âL e' p' r - L e' p' q'â :=
norm_addâ_le _ _ _
_ †4 * âcâ * (1 / 2) ^ e + 4 * âcâ * (1 / 2) ^ e + 4 * âcâ * (1 / 2) ^ e := by gcongr
_ = 12 * âcâ * (1 / 2) ^ e := by ring
/- For definiteness, use `L0 e = L e (n e) (n e)`, to have a single sequence. We claim that this
is a Cauchy sequence. -/
let L0 : â â E âL[ð] F := fun e => L e (n e) (n e)
have : CauchySeq L0 := by
rw [Metric.cauchySeq_iff']
intro ε εpos
obtain âše, heâ© : â e : â, (1 / 2) ^ e < ε / (12 * âcâ) :=
exists_pow_lt_of_lt_one (by positivity) (by norm_num)
refine âše, fun e' he' => ?_â©
rw [dist_comm, dist_eq_norm]
calc
âL0 e - L0 e'â †12 * âcâ * (1 / 2) ^ e := M _ _ _ _ _ _ le_rfl le_rfl le_rfl le_rfl he'
_ < 12 * âcâ * (ε / (12 * âcâ)) := by gcongr
_ = ε := by field_simp
-- As it is Cauchy, the sequence `L0` converges, to a limit `f'` in `K`.
obtain âšf', f'K, hf'â© : â f' â K, Tendsto L0 atTop (ð f') :=
cauchySeq_tendsto_of_isComplete hK (fun e => (hn e (n e) (n e) le_rfl le_rfl).1) this
have Lf' : â e p, n e †p â âL e (n e) p - f'â †12 * âcâ * (1 / 2) ^ e := by
intro e p hp
apply le_of_tendsto (tendsto_const_nhds.sub hf').norm
rw [eventually_atTop]
exact âše, fun e' he' => M _ _ _ _ _ _ le_rfl hp le_rfl le_rfl he'â©
-- Let us show that `f` has derivative `f'` at `x`.
have : HasFDerivAt f f' x := by
simp only [hasFDerivAt_iff_isLittleO_nhds_zero, isLittleO_iff]
/- to get an approximation with a precision `ε`, we will replace `f` with `L e (n e) m` for
some large enough `e` (yielding a small error by uniform approximation). As one can vary `m`,
this makes it possible to cover all scales, and thus to obtain a good linear approximation in
the whole ball of radius `(1/2)^(n e)`. -/
intro ε εpos
have pos : 0 < 4 + 12 * âcâ := by positivity
obtain âše, heâ© : â e : â, (1 / 2) ^ e < ε / (4 + 12 * âcâ) :=
exists_pow_lt_of_lt_one (div_pos εpos pos) (by norm_num)
rw [eventually_nhds_iff_ball]
refine âš(1 / 2) ^ (n e + 1), P, fun y hy => ?_â©
-- We need to show that `f (x + y) - f x - f' y` is small. For this, we will work at scale
-- `k` where `k` is chosen with `âyâ ⌠2 ^ (-k)`.
by_cases y_pos : y = 0
· simp [y_pos]
have yzero : 0 < âyâ := norm_pos_iff.mpr y_pos
have y_lt : âyâ < (1 / 2) ^ (n e + 1) := by simpa using mem_ball_iff_norm.1 hy
have yone : âyâ †1 := le_trans y_lt.le (pow_le_one _ (by norm_num) (by norm_num))
-- define the scale `k`.
obtain âšk, hk, h'kâ© : â k : â, (1 / 2) ^ (k + 1) < âyâ â§ âyâ †(1 / 2) ^ k :=
exists_nat_pow_near_of_lt_one yzero yone (by norm_num : (0 : â) < 1 / 2)
(by norm_num : (1 : â) / 2 < 1)
-- the scale is large enough (as `y` is small enough)
have k_gt : n e < k := by
have : ((1 : â) / 2) ^ (k + 1) < (1 / 2) ^ (n e + 1) := lt_trans hk y_lt
rw [pow_lt_pow_iff_right_of_lt_one (by norm_num : (0 : â) < 1 / 2) (by norm_num)] at this
omega
set m := k - 1
have m_ge : n e †m := Nat.le_sub_one_of_lt k_gt
have km : k = m + 1 := (Nat.succ_pred_eq_of_pos (lt_of_le_of_lt (zero_le _) k_gt)).symm
rw [km] at hk h'k
-- `f` is well approximated by `L e (n e) k` at the relevant scale
-- (in fact, we use `m = k - 1` instead of `k` because of the precise definition of `A`).
have J1 : âf (x + y) - f x - L e (n e) m (x + y - x)â †(1 / 2) ^ e * (1 / 2) ^ m := by
apply le_of_mem_A (hn e (n e) m le_rfl m_ge).2.2
· simp only [mem_closedBall, dist_self]
positivity
· simpa only [dist_eq_norm, add_sub_cancel_left, mem_closedBall, pow_succ, mul_one_div] using
h'k
have J2 : âf (x + y) - f x - L e (n e) m yâ †4 * (1 / 2) ^ e * âyâ :=
calc
âf (x + y) - f x - L e (n e) m yâ †(1 / 2) ^ e * (1 / 2) ^ m := by
simpa only [add_sub_cancel_left] using J1
_ = 4 * (1 / 2) ^ e * (1 / 2) ^ (m + 2) := by field_simp; ring
_ †4 * (1 / 2) ^ e * âyâ := by gcongr
-- use the previous estimates to see that `f (x + y) - f x - f' y` is small.
calc
âf (x + y) - f x - f' yâ = âf (x + y) - f x - L e (n e) m y + (L e (n e) m - f') yâ :=
congr_arg _ (by simp)
_ †4 * (1 / 2) ^ e * âyâ + 12 * âcâ * (1 / 2) ^ e * âyâ :=
norm_add_le_of_le J2 <| (le_opNorm _ _).trans <| by gcongr; exact Lf' _ _ m_ge
_ = (4 + 12 * âcâ) * âyâ * (1 / 2) ^ e := by ring
_ †(4 + 12 * âcâ) * âyâ * (ε / (4 + 12 * âcâ)) := by gcongr
_ = ε * âyâ := by field_simp [ne_of_gt pos]; ring
rw [â this.fderiv] at f'K
exact âšthis.differentiableAt, f'Kâ©
theorem differentiable_set_eq_D (hK : IsComplete K) :
{ x | DifferentiableAt ð f x â§ fderiv ð f x â K } = D f K :=
Subset.antisymm (differentiable_set_subset_D _) (D_subset_differentiable_set hK)
end FDerivMeasurableAux
open FDerivMeasurableAux
variable [MeasurableSpace E] [OpensMeasurableSpace E]
variable (ð f)
/-- The set of differentiability points of a function, with derivative in a given complete set,
is Borel-measurable. -/
theorem measurableSet_of_differentiableAt_of_isComplete {K : Set (E âL[ð] F)} (hK : IsComplete K) :
MeasurableSet { x | DifferentiableAt ð f x â§ fderiv ð f x â K } := by
-- Porting note: was
-- simp [differentiable_set_eq_D K hK, D, isOpen_B.measurableSet, MeasurableSet.iInter,
-- MeasurableSet.iUnion]
simp only [D, differentiable_set_eq_D K hK]
repeat apply_rules [MeasurableSet.iUnion, MeasurableSet.iInter] <;> intro
exact isOpen_B.measurableSet
variable [CompleteSpace F]
/-- The set of differentiability points of a function taking values in a complete space is
Borel-measurable. -/
theorem measurableSet_of_differentiableAt : MeasurableSet { x | DifferentiableAt ð f x } := by
have : IsComplete (univ : Set (E âL[ð] F)) := complete_univ
convert measurableSet_of_differentiableAt_of_isComplete ð f this
simp
@[measurability, fun_prop]
theorem measurable_fderiv : Measurable (fderiv ð f) := by
refine measurable_of_isClosed fun s hs => ?_
have :
fderiv ð f â»Â¹' s =
{ x | DifferentiableAt ð f x â§ fderiv ð f x â s } âª
{ x | ¬DifferentiableAt ð f x } â© { _x | (0 : E âL[ð] F) â s } :=
Set.ext fun x => mem_preimage.trans fderiv_mem_iff
rw [this]
exact
(measurableSet_of_differentiableAt_of_isComplete _ _ hs.isComplete).union
((measurableSet_of_differentiableAt _ _).compl.inter (MeasurableSet.const _))
@[measurability, fun_prop]
theorem measurable_fderiv_apply_const [MeasurableSpace F] [BorelSpace F] (y : E) :
Measurable fun x => fderiv ð f x y :=
(ContinuousLinearMap.measurable_apply y).comp (measurable_fderiv ð f)
variable {ð}
@[measurability, fun_prop]
theorem measurable_deriv [MeasurableSpace ð] [OpensMeasurableSpace ð] [MeasurableSpace F]
[BorelSpace F] (f : ð â F) : Measurable (deriv f) := by
simpa only [fderiv_deriv] using measurable_fderiv_apply_const ð f 1
theorem stronglyMeasurable_deriv [MeasurableSpace ð] [OpensMeasurableSpace ð]
[h : SecondCountableTopologyEither ð F] (f : ð â F) : StronglyMeasurable (deriv f) := by
borelize F
rcases h.out with hð|hF
· exact stronglyMeasurable_iff_measurable_separable.2
âšmeasurable_deriv f, isSeparable_range_deriv _â©
· exact (measurable_deriv f).stronglyMeasurable
theorem aemeasurable_deriv [MeasurableSpace ð] [OpensMeasurableSpace ð] [MeasurableSpace F]
[BorelSpace F] (f : ð â F) (ÎŒ : Measure ð) : AEMeasurable (deriv f) ÎŒ :=
(measurable_deriv f).aemeasurable
theorem aestronglyMeasurable_deriv [MeasurableSpace ð] [OpensMeasurableSpace ð]
[SecondCountableTopologyEither ð F] (f : ð â F) (ÎŒ : Measure ð) :
AEStronglyMeasurable (deriv f) Ό :=
(stronglyMeasurable_deriv f).aestronglyMeasurable
end fderiv
section RightDeriv
variable {F : Type*} [NormedAddCommGroup F] [NormedSpace â F]
variable {f : â â F} (K : Set F)
namespace RightDerivMeasurableAux
/-- The set `A f L r ε` is the set of points `x` around which the function `f` is well approximated
at scale `r` by the linear map `h ⊠h ⢠L`, up to an error `ε`. We tweak the definition to
make sure that this is open on the right. -/
def A (f : â â F) (L : F) (r ε : â) : Set â :=
{ x | â r' â Ioc (r / 2) r, âáµ (y â Icc x (x + r')) (z â Icc x (x + r')),
âf z - f y - (z - y) ⢠Lâ †ε * r }
/-- The set `B f K r s ε` is the set of points `x` around which there exists a vector
`L` belonging to `K` (a given set of vectors) such that `h ⢠L` approximates well `f (x + h)`
(up to an error `ε`), simultaneously at scales `r` and `s`. -/
def B (f : â â F) (K : Set F) (r s ε : â) : Set â :=
â L â K, A f L r ε â© A f L s ε
/-- The set `D f K` is a complicated set constructed using countable intersections and unions. Its
main use is that, when `K` is complete, it is exactly the set of points where `f` is differentiable,
with a derivative in `K`. -/
def D (f : â â F) (K : Set F) : Set â :=
â e : â, â n : â, â (p ⥠n) (q ⥠n), B f K ((1 / 2) ^ p) ((1 / 2) ^ q) ((1 / 2) ^ e)
theorem A_mem_nhdsWithin_Ioi {L : F} {r ε x : â} (hx : x â A f L r ε) : A f L r ε â ð[>] x := by
rcases hx with âšr', rr', hr'â©
rw [mem_nhdsWithin_Ioi_iff_exists_Ioo_subset]
obtain âšs, s_gt, s_ltâ© : â s : â, r / 2 < s â§ s < r' := exists_between rr'.1
have : s â Ioc (r / 2) r := âšs_gt, le_of_lt (s_lt.trans_le rr'.2)â©
refine âšx + r' - s, by simp only [mem_Ioi]; linarith, fun x' hx' => âšs, this, ?_â©â©
have A : Icc x' (x' + s) â Icc x (x + r') := by
apply Icc_subset_Icc hx'.1.le
linarith [hx'.2]
intro y hy z hz
exact hr' y (A hy) z (A hz)
theorem B_mem_nhdsWithin_Ioi {K : Set F} {r s ε x : â} (hx : x â B f K r s ε) :
B f K r s ε â ð[>] x := by
obtain âšL, LK, hLâ, hLââ© : â L : F, L â K â§ x â A f L r ε â§ x â A f L s ε := by
simpa only [B, mem_iUnion, mem_inter_iff, exists_prop] using hx
filter_upwards [A_mem_nhdsWithin_Ioi hLâ, A_mem_nhdsWithin_Ioi hLâ] with y hyâ hyâ
simp only [B, mem_iUnion, mem_inter_iff, exists_prop]
exact âšL, LK, hyâ, hyââ©
theorem measurableSet_B {K : Set F} {r s ε : â} : MeasurableSet (B f K r s ε) :=
measurableSet_of_mem_nhdsWithin_Ioi fun _ hx => B_mem_nhdsWithin_Ioi hx
theorem A_mono (L : F) (r : â) {ε ÎŽ : â} (h : ε †Ύ) : A f L r ε â A f L r ÎŽ := by
rintro x âšr', r'r, hr'â©
refine âšr', r'r, fun y hy z hz => (hr' y hy z hz).trans (mul_le_mul_of_nonneg_right h ?_)â©
linarith [hy.1, hy.2, r'r.2]
theorem le_of_mem_A {r ε : â} {L : F} {x : â} (hx : x â A f L r ε) {y z : â}
(hy : y â Icc x (x + r / 2)) (hz : z â Icc x (x + r / 2)) :
âf z - f y - (z - y) ⢠Lâ †ε * r := by
rcases hx with âšr', r'mem, hr'â©
have A : x + r / 2 †x + r' := by linarith [r'mem.1]
exact hr' _ ((Icc_subset_Icc le_rfl A) hy) _ ((Icc_subset_Icc le_rfl A) hz)
theorem mem_A_of_differentiable {ε : â} (hε : 0 < ε) {x : â}
(hx : DifferentiableWithinAt â f (Ici x) x) :
â R > 0, â r â Ioo (0 : â) R, x â A f (derivWithin f (Ici x) x) r ε := by
have := hx.hasDerivWithinAt
simp_rw [hasDerivWithinAt_iff_isLittleO, isLittleO_iff] at this
rcases mem_nhdsWithin_Ici_iff_exists_Ico_subset.1 (this (half_pos hε)) with âšm, xm, hmâ©
refine âšm - x, by linarith [show x < m from xm], fun r hr => ?_â©
have : r â Ioc (r / 2) r := âšhalf_lt_self hr.1, le_rflâ©
refine âšr, this, fun y hy z hz => ?_â©
calc
âf z - f y - (z - y) ⢠derivWithin f (Ici x) xâ =
âf z - f x - (z - x) ⢠derivWithin f (Ici x) x -
(f y - f x - (y - x) ⢠derivWithin f (Ici x) x)â := by
congr 1; simp only [sub_smul]; abel
_ â€
âf z - f x - (z - x) ⢠derivWithin f (Ici x) xâ +
âf y - f x - (y - x) ⢠derivWithin f (Ici x) xâ :=
(norm_sub_le _ _)
_ †ε / 2 * âz - xâ + ε / 2 * ây - xâ :=
(add_le_add (hm âšhz.1, hz.2.trans_lt (by linarith [hr.2])â©)
(hm âšhy.1, hy.2.trans_lt (by linarith [hr.2])â©))
_ †ε / 2 * r + ε / 2 * r := by
gcongr
· rw [Real.norm_of_nonneg] <;> linarith [hz.1, hz.2]
· rw [Real.norm_of_nonneg] <;> linarith [hy.1, hy.2]
_ = ε * r := by ring
theorem norm_sub_le_of_mem_A {r x : â} (hr : 0 < r) (ε : â) {Lâ Lâ : F} (hâ : x â A f Lâ r ε)
(hâ : x â A f Lâ r ε) : âLâ - Lââ †4 * ε := by
suffices H : â(r / 2) ⢠(Lâ - Lâ)â †r / 2 * (4 * ε) by
rwa [norm_smul, Real.norm_of_nonneg (half_pos hr).le, mul_le_mul_left (half_pos hr)] at H
calc
â(r / 2) ⢠(Lâ - Lâ)â =
âf (x + r / 2) - f x - (x + r / 2 - x) ⢠Lâ -
(f (x + r / 2) - f x - (x + r / 2 - x) ⢠Lâ)â := by
simp [smul_sub]
_ †âf (x + r / 2) - f x - (x + r / 2 - x) ⢠Lââ +
âf (x + r / 2) - f x - (x + r / 2 - x) ⢠Lââ :=
norm_sub_le _ _
_ †ε * r + ε * r := by
apply add_le_add
· apply le_of_mem_A hâ <;> simp [(half_pos hr).le]
· apply le_of_mem_A hâ <;> simp [(half_pos hr).le]
_ = r / 2 * (4 * ε) := by ring
/-- Easy inclusion: a differentiability point with derivative in `K` belongs to `D f K`. -/
theorem differentiable_set_subset_D :
{ x | DifferentiableWithinAt â f (Ici x) x â§ derivWithin f (Ici x) x â K } â D f K := by
intro x hx
rw [D, mem_iInter]
intro e
have : (0 : â) < (1 / 2) ^ e := pow_pos (by norm_num) _
rcases mem_A_of_differentiable this hx.1 with âšR, R_pos, hRâ©
obtain âšn, hnâ© : â n : â, (1 / 2) ^ n < R :=
exists_pow_lt_of_lt_one R_pos (by norm_num : (1 : â) / 2 < 1)
simp only [mem_iUnion, mem_iInter, B, mem_inter_iff]
refine âšn, fun p hp q hq => âšderivWithin f (Ici x) x, hx.2, âš?_, ?_â©â©â© <;>
· refine hR _ âšpow_pos (by norm_num) _, lt_of_le_of_lt ?_ hnâ©
exact pow_le_pow_of_le_one (by norm_num) (by norm_num) (by assumption)
/-- Harder inclusion: at a point in `D f K`, the function `f` has a derivative, in `K`. -/
theorem D_subset_differentiable_set {K : Set F} (hK : IsComplete K) :
D f K â { x | DifferentiableWithinAt â f (Ici x) x â§ derivWithin f (Ici x) x â K } := by
have P : â {n : â}, (0 : â) < (1 / 2) ^ n := fun {n} => pow_pos (by norm_num) n
intro x hx
have :
â e : â, â n : â, â p q, n †p â n †q â
â L â K, x â A f L ((1 / 2) ^ p) ((1 / 2) ^ e) â© A f L ((1 / 2) ^ q) ((1 / 2) ^ e) := by
intro e
have := mem_iInter.1 hx e
rcases mem_iUnion.1 this with âšn, hnâ©
refine âšn, fun p q hp hq => ?_â©
simp only [mem_iInter] at hn
rcases mem_iUnion.1 (hn p hp q hq) with âšL, hLâ©
exact âšL, exists_prop.mp <| mem_iUnion.1 hLâ©
/- Recast the assumptions: for each `e`, there exist `n e` and linear maps `L e p q` in `K`
such that, for `p, q ⥠n e`, then `f` is well approximated by `L e p q` at scale `2 ^ (-p)` and
`2 ^ (-q)`, with an error `2 ^ (-e)`. -/
choose! n L hn using this
/- All the operators `L e p q` that show up are close to each other. To prove this, we argue
that `L e p q` is close to `L e p r` (where `r` is large enough), as both approximate `f` at
scale `2 ^(- p)`. And `L e p r` is close to `L e' p' r` as both approximate `f` at scale
`2 ^ (- r)`. And `L e' p' r` is close to `L e' p' q'` as both approximate `f` at scale
`2 ^ (- p')`. -/
have M :
â e p q e' p' q',
n e †p â
n e †q â n e' †p' â n e' †q' â e †e' â âL e p q - L e' p' q'â †12 * (1 / 2) ^ e := by
intro e p q e' p' q' hp hq hp' hq' he'
let r := max (n e) (n e')
have I : ((1 : â) / 2) ^ e' †(1 / 2) ^ e :=
pow_le_pow_of_le_one (by norm_num) (by norm_num) he'
have J1 : âL e p q - L e p râ †4 * (1 / 2) ^ e := by
have I1 : x â A f (L e p q) ((1 / 2) ^ p) ((1 / 2) ^ e) := (hn e p q hp hq).2.1
have I2 : x â A f (L e p r) ((1 / 2) ^ p) ((1 / 2) ^ e) := (hn e p r hp (le_max_left _ _)).2.1
exact norm_sub_le_of_mem_A P _ I1 I2
have J2 : âL e p r - L e' p' râ †4 * (1 / 2) ^ e := by
have I1 : x â A f (L e p r) ((1 / 2) ^ r) ((1 / 2) ^ e) := (hn e p r hp (le_max_left _ _)).2.2
have I2 : x â A f (L e' p' r) ((1 / 2) ^ r) ((1 / 2) ^ e') :=
(hn e' p' r hp' (le_max_right _ _)).2.2
exact norm_sub_le_of_mem_A P _ I1 (A_mono _ _ I I2)
have J3 : âL e' p' r - L e' p' q'â †4 * (1 / 2) ^ e := by
have I1 : x â A f (L e' p' r) ((1 / 2) ^ p') ((1 / 2) ^ e') :=
(hn e' p' r hp' (le_max_right _ _)).2.1
have I2 : x â A f (L e' p' q') ((1 / 2) ^ p') ((1 / 2) ^ e') := (hn e' p' q' hp' hq').2.1
exact norm_sub_le_of_mem_A P _ (A_mono _ _ I I1) (A_mono _ _ I I2)
calc
âL e p q - L e' p' q'â =
âL e p q - L e p r + (L e p r - L e' p' r) + (L e' p' r - L e' p' q')â := by
congr 1; abel
_ †âL e p q - L e p râ + âL e p r - L e' p' râ + âL e' p' r - L e' p' q'â :=
(le_trans (norm_add_le _ _) (add_le_add_right (norm_add_le _ _) _))
_ †4 * (1 / 2) ^ e + 4 * (1 / 2) ^ e + 4 * (1 / 2) ^ e := by gcongr
-- Porting note: proof was `by apply_rules [add_le_add]`
_ = 12 * (1 / 2) ^ e := by ring
/- For definiteness, use `L0 e = L e (n e) (n e)`, to have a single sequence. We claim that this
is a Cauchy sequence. -/
let L0 : â â F := fun e => L e (n e) (n e)
have : CauchySeq L0 := by
rw [Metric.cauchySeq_iff']
intro ε εpos
obtain âše, heâ© : â e : â, (1 / 2) ^ e < ε / 12 :=
exists_pow_lt_of_lt_one (div_pos εpos (by norm_num)) (by norm_num)
refine âše, fun e' he' => ?_â©
rw [dist_comm, dist_eq_norm]
calc
âL0 e - L0 e'â †12 * (1 / 2) ^ e := M _ _ _ _ _ _ le_rfl le_rfl le_rfl le_rfl he'
_ < 12 * (ε / 12) := mul_lt_mul' le_rfl he (le_of_lt P) (by norm_num)
_ = ε := by field_simp [(by norm_num : (12 : â) â 0)]
-- As it is Cauchy, the sequence `L0` converges, to a limit `f'` in `K`.
obtain âšf', f'K, hf'â© : â f' â K, Tendsto L0 atTop (ð f') :=
cauchySeq_tendsto_of_isComplete hK (fun e => (hn e (n e) (n e) le_rfl le_rfl).1) this
have Lf' : â e p, n e †p â âL e (n e) p - f'â †12 * (1 / 2) ^ e := by
intro e p hp
apply le_of_tendsto (tendsto_const_nhds.sub hf').norm
rw [eventually_atTop]
exact âše, fun e' he' => M _ _ _ _ _ _ le_rfl hp le_rfl le_rfl he'â©
-- Let us show that `f` has right derivative `f'` at `x`.
have : HasDerivWithinAt f f' (Ici x) x := by
simp only [hasDerivWithinAt_iff_isLittleO, isLittleO_iff]
/- to get an approximation with a precision `ε`, we will replace `f` with `L e (n e) m` for
some large enough `e` (yielding a small error by uniform approximation). As one can vary `m`,
this makes it possible to cover all scales, and thus to obtain a good linear approximation in
the whole interval of length `(1/2)^(n e)`. -/
intro ε εpos
obtain âše, heâ© : â e : â, (1 / 2) ^ e < ε / 16 :=
exists_pow_lt_of_lt_one (div_pos εpos (by norm_num)) (by norm_num)
have xmem : x â Ico x (x + (1 / 2) ^ (n e + 1)) := by
simp only [one_div, left_mem_Ico, lt_add_iff_pos_right, inv_pos, pow_pos, zero_lt_two,
zero_lt_one]
filter_upwards [Icc_mem_nhdsWithin_Ici xmem] with y hy
-- We need to show that `f y - f x - f' (y - x)` is small. For this, we will work at scale
-- `k` where `k` is chosen with `ây - xâ ⌠2 ^ (-k)`.
rcases eq_or_lt_of_le hy.1 with (rfl | xy)
· simp only [sub_self, zero_smul, norm_zero, mul_zero, le_rfl]
have yzero : 0 < y - x := sub_pos.2 xy
have y_le : y - x †(1 / 2) ^ (n e + 1) := by linarith [hy.2]
have yone : y - x †1 := le_trans y_le (pow_le_one _ (by norm_num) (by norm_num))
-- define the scale `k`.
obtain âšk, hk, h'kâ© : â k : â, (1 / 2) ^ (k + 1) < y - x â§ y - x †(1 / 2) ^ k :=
exists_nat_pow_near_of_lt_one yzero yone (by norm_num : (0 : â) < 1 / 2)
(by norm_num : (1 : â) / 2 < 1)
-- the scale is large enough (as `y - x` is small enough)
have k_gt : n e < k := by
have : ((1 : â) / 2) ^ (k + 1) < (1 / 2) ^ (n e + 1) := lt_of_lt_of_le hk y_le
rw [pow_lt_pow_iff_right_of_lt_one (by norm_num : (0 : â) < 1 / 2) (by norm_num)] at this
omega
set m := k - 1
have m_ge : n e †m := Nat.le_sub_one_of_lt k_gt
have km : k = m + 1 := (Nat.succ_pred_eq_of_pos (lt_of_le_of_lt (zero_le _) k_gt)).symm
rw [km] at hk h'k
-- `f` is well approximated by `L e (n e) k` at the relevant scale
-- (in fact, we use `m = k - 1` instead of `k` because of the precise definition of `A`).
have J : âf y - f x - (y - x) ⢠L e (n e) mâ †4 * (1 / 2) ^ e * ây - xâ :=
calc
âf y - f x - (y - x) ⢠L e (n e) mâ †(1 / 2) ^ e * (1 / 2) ^ m := by
apply le_of_mem_A (hn e (n e) m le_rfl m_ge).2.2
· simp only [one_div, inv_pow, left_mem_Icc, le_add_iff_nonneg_right]
positivity
· simp only [pow_add, tsub_le_iff_left] at h'k
simpa only [hy.1, mem_Icc, true_and_iff, one_div, pow_one] using h'k
_ = 4 * (1 / 2) ^ e * (1 / 2) ^ (m + 2) := by field_simp; ring
_ †4 * (1 / 2) ^ e * (y - x) := by gcongr
_ = 4 * (1 / 2) ^ e * ây - xâ := by rw [Real.norm_of_nonneg yzero.le]
calc
âf y - f x - (y - x) ⢠f'â =
âf y - f x - (y - x) ⢠L e (n e) m + (y - x) ⢠(L e (n e) m - f')â := by
simp only [smul_sub, sub_add_sub_cancel]
_ †4 * (1 / 2) ^ e * ây - xâ + ây - xâ * (12 * (1 / 2) ^ e) :=
norm_add_le_of_le J <| by rw [norm_smul]; gcongr; exact Lf' _ _ m_ge
_ = 16 * ây - xâ * (1 / 2) ^ e := by ring
_ †16 * ây - xâ * (ε / 16) := by gcongr
_ = ε * ây - xâ := by ring
rw [â this.derivWithin (uniqueDiffOn_Ici x x Set.left_mem_Ici)] at f'K
exact âšthis.differentiableWithinAt, f'Kâ©
theorem differentiable_set_eq_D (hK : IsComplete K) :
{ x | DifferentiableWithinAt â f (Ici x) x â§ derivWithin f (Ici x) x â K } = D f K :=
Subset.antisymm (differentiable_set_subset_D _) (D_subset_differentiable_set hK)
end RightDerivMeasurableAux
open RightDerivMeasurableAux
variable (f)
/-- The set of right differentiability points of a function, with derivative in a given complete
set, is Borel-measurable. -/
theorem measurableSet_of_differentiableWithinAt_Ici_of_isComplete {K : Set F} (hK : IsComplete K) :
MeasurableSet { x | DifferentiableWithinAt â f (Ici x) x â§ derivWithin f (Ici x) x â K } := by
-- simp [differentiable_set_eq_d K hK, D, measurableSet_b, MeasurableSet.iInter,
-- MeasurableSet.iUnion]
simp only [differentiable_set_eq_D K hK, D]
repeat apply_rules [MeasurableSet.iUnion, MeasurableSet.iInter] <;> intro
exact measurableSet_B
variable [CompleteSpace F]
/-- The set of right differentiability points of a function taking values in a complete space is
Borel-measurable. -/
theorem measurableSet_of_differentiableWithinAt_Ici :
MeasurableSet { x | DifferentiableWithinAt â f (Ici x) x } := by
have : IsComplete (univ : Set F) := complete_univ
convert measurableSet_of_differentiableWithinAt_Ici_of_isComplete f this
simp
@[measurability, fun_prop]
theorem measurable_derivWithin_Ici [MeasurableSpace F] [BorelSpace F] :
Measurable fun x => derivWithin f (Ici x) x := by
refine measurable_of_isClosed fun s hs => ?_
have :
(fun x => derivWithin f (Ici x) x) â»Â¹' s =
{ x | DifferentiableWithinAt â f (Ici x) x â§ derivWithin f (Ici x) x â s } âª
{ x | ¬DifferentiableWithinAt â f (Ici x) x } â© { _x | (0 : F) â s } :=
Set.ext fun x => mem_preimage.trans derivWithin_mem_iff
rw [this]
exact
(measurableSet_of_differentiableWithinAt_Ici_of_isComplete _ hs.isComplete).union
((measurableSet_of_differentiableWithinAt_Ici _).compl.inter (MeasurableSet.const _))
theorem stronglyMeasurable_derivWithin_Ici :
StronglyMeasurable (fun x ⊠derivWithin f (Ici x) x) := by
borelize F
apply stronglyMeasurable_iff_measurable_separable.2 âšmeasurable_derivWithin_Ici f, ?_â©
obtain âšt, t_count, htâ© : â t : Set â, t.Countable â§ Dense t := exists_countable_dense â
suffices H : range (fun x ⊠derivWithin f (Ici x) x) â closure (Submodule.span â (f '' t)) from
IsSeparable.mono (t_count.image f).isSeparable.span.closure H
rintro - âšx, rflâ©
suffices H' : range (fun y ⊠derivWithin f (Ici x) y) â closure (Submodule.span â (f '' t)) from
H' (mem_range_self _)
apply range_derivWithin_subset_closure_span_image
calc Ici x
= closure (Ioi x â© closure t) := by simp [dense_iff_closure_eq.1 ht]
_ â closure (closure (Ioi x â© t)) := by
apply closure_mono
simpa [inter_comm] using (isOpen_Ioi (a := x)).closure_inter (s := t)
_ â closure (Ici x â© t) := by
rw [closure_closure]
exact closure_mono (inter_subset_inter_left _ Ioi_subset_Ici_self)
theorem aemeasurable_derivWithin_Ici [MeasurableSpace F] [BorelSpace F] (ÎŒ : Measure â) :
AEMeasurable (fun x => derivWithin f (Ici x) x) Ό :=
(measurable_derivWithin_Ici f).aemeasurable
theorem aestronglyMeasurable_derivWithin_Ici (ÎŒ : Measure â) :
AEStronglyMeasurable (fun x => derivWithin f (Ici x) x) Ό :=
(stronglyMeasurable_derivWithin_Ici f).aestronglyMeasurable
/-- The set of right differentiability points of a function taking values in a complete space is
Borel-measurable. -/
theorem measurableSet_of_differentiableWithinAt_Ioi :
MeasurableSet { x | DifferentiableWithinAt â f (Ioi x) x } := by
simpa [differentiableWithinAt_Ioi_iff_Ici] using measurableSet_of_differentiableWithinAt_Ici f
@[measurability, fun_prop]
theorem measurable_derivWithin_Ioi [MeasurableSpace F] [BorelSpace F] :
Measurable fun x => derivWithin f (Ioi x) x := by
simpa [derivWithin_Ioi_eq_Ici] using measurable_derivWithin_Ici f
theorem stronglyMeasurable_derivWithin_Ioi :
StronglyMeasurable (fun x ⊠derivWithin f (Ioi x) x) := by
simpa [derivWithin_Ioi_eq_Ici] using stronglyMeasurable_derivWithin_Ici f
theorem aemeasurable_derivWithin_Ioi [MeasurableSpace F] [BorelSpace F] (ÎŒ : Measure â) :
AEMeasurable (fun x => derivWithin f (Ioi x) x) Ό :=
(measurable_derivWithin_Ioi f).aemeasurable
theorem aestronglyMeasurable_derivWithin_Ioi (ÎŒ : Measure â) :
AEStronglyMeasurable (fun x => derivWithin f (Ioi x) x) Ό :=
(stronglyMeasurable_derivWithin_Ioi f).aestronglyMeasurable
end RightDeriv
section WithParam
/- In this section, we prove the measurability of the derivative in a context with parameters:
given `f : α â E â F`, we want to show that `p ⊠fderiv ð (f p.1) p.2` is measurable. Contrary
to the previous sections, some assumptions are needed for this: if `f p.1` depends arbitrarily on
`p.1`, this is obviously false. We require that `f` is continuous and `E` is locally compact --
then the proofs in the previous sections adapt readily, as the set `A` defined above is open, so
that the differentiability set `D` is measurable. -/
variable {ð : Type*} [NontriviallyNormedField ð]
{E : Type*} [NormedAddCommGroup E] [NormedSpace ð E] [LocallyCompactSpace E]
{F : Type*} [NormedAddCommGroup F] [NormedSpace ð F]
{α : Type*} [TopologicalSpace α] [MeasurableSpace α] [MeasurableSpace E]
[OpensMeasurableSpace α] [OpensMeasurableSpace E]
{f : α â E â F} (K : Set (E âL[ð] F))
namespace FDerivMeasurableAux
open Uniformity
lemma isOpen_A_with_param {r s : â} (hf : Continuous f.uncurry) (L : E âL[ð] F) :
IsOpen {p : α à E | p.2 â A (f p.1) L r s} := by
have : ProperSpace E := .of_locallyCompactSpace ð
simp only [A, half_lt_self_iff, not_lt, mem_Ioc, mem_ball, map_sub, mem_setOf_eq]
apply isOpen_iff_mem_nhds.2
rintro âša, xâ© âšr', âšIrr', Ir'râ©, hrâ©
have ha : Continuous (f a) := hf.uncurry_left a
rcases exists_between Irr' with âšt, hrt, htr'â©
rcases exists_between hrt with âšt', hrt', ht'tâ©
obtain âšb, b_lt, hbâ© : â b, b < s * r â§ â y â closedBall x t, â z â closedBall x t,
âf a z - f a y - (L z - L y)â †b := by
have B : Continuous (fun (p : E à E) ⊠âf a p.2 - f a p.1 - (L p.2 - L p.1)â) := by fun_prop
have C : (closedBall x t ÃË¢ closedBall x t).Nonempty := by simp; linarith
rcases ((isCompact_closedBall x t).prod (isCompact_closedBall x t)).exists_isMaxOn
C B.continuousOn with âšp, pt, hpâ©
simp only [mem_prod, mem_closedBall] at pt
refine âšâf a p.2 - f a p.1 - (L p.2 - L p.1)â,
hr p.1 (pt.1.trans_lt htr') p.2 (pt.2.trans_lt htr'), fun y hy z hz ⊠?_â©
have D : (y, z) â closedBall x t ÃË¢ closedBall x t := mem_prod.2 âšhy, hzâ©
exact hp D
obtain âšÎµ, εpos, hε⩠: â ε, 0 < ε â§ b + 2 * ε < s * r :=
âš(s * r - b) / 3, by linarith, by linarithâ©
obtain âšu, u_open, au, huâ© : â u, IsOpen u â§ a â u â§ â (p : α à E),
p.1 â u â p.2 â closedBall x t â dist (f.uncurry p) (f.uncurry (a, p.2)) < ε := by
have C : Continuous (fun (p : α à E) ⊠f a p.2) := by fun_prop
have D : ({a} ÃË¢ closedBall x t).EqOn f.uncurry (fun p ⊠f a p.2) := by
rintro âšb, yâ© âšhb, -â©
simp only [mem_singleton_iff] at hb
simp [hb]
obtain âšv, v_open, sub_v, hvâ© : â v, IsOpen v â§ {a} ÃË¢ closedBall x t â v â§
â p â v, dist (Function.uncurry f p) (f a p.2) < ε :=
Uniform.exists_is_open_mem_uniformity_of_forall_mem_eq (s := {a} ÃË¢ closedBall x t)
(fun p _ ⊠hf.continuousAt) (fun p _ ⊠C.continuousAt) D (dist_mem_uniformity εpos)
obtain âšw, w', w_open, -, sub_w, sub_w', hww'â© : â (w : Set α) (w' : Set E),
IsOpen w â§ IsOpen w' â§ {a} â w â§ closedBall x t â w' â§ w ÃË¢ w' â v :=
generalized_tube_lemma isCompact_singleton (isCompact_closedBall x t) v_open sub_v
refine âšw, w_open, sub_w rfl, ?_â©
rintro âšb, yâ© h hby
exact hv _ (hww' âšh, sub_w' hbyâ©)
have : u ÃË¢ ball x (t - t') â ð (a, x) :=
prod_mem_nhds (u_open.mem_nhds au) (ball_mem_nhds _ (sub_pos.2 ht't))
filter_upwards [this]
rintro âša', x'â© ha'x'
simp only [mem_prod, mem_ball] at ha'x'
refine âšt', âšhrt', ht't.le.trans (htr'.le.trans Ir'r)â©, fun y hy z hz ⊠?_â©
have dyx : dist y x †t := by linarith [dist_triangle y x' x]
have dzx : dist z x †t := by linarith [dist_triangle z x' x]
calc
âf a' z - f a' y - (L z - L y)â =
â(f a' z - f a z) + (f a y - f a' y) + (f a z - f a y - (L z - L y))â := by congr; abel
_ †âf a' z - f a zâ + âf a y - f a' yâ + âf a z - f a y - (L z - L y)â := norm_addâ_le _ _ _
_ †ε + ε + b := by
gcongr
· rw [â dist_eq_norm]
change dist (f.uncurry (a', z)) (f.uncurry (a, z)) †ε
apply (hu _ _ _).le
· exact ha'x'.1
· simp [dzx]
· rw [â dist_eq_norm']
change dist (f.uncurry (a', y)) (f.uncurry (a, y)) †ε
apply (hu _ _ _).le
· exact ha'x'.1
· simp [dyx]
· simp [hb, dyx, dzx]
_ < s * r := by linarith
lemma isOpen_B_with_param {r s t : â} (hf : Continuous f.uncurry) (K : Set (E âL[ð] F)) :
IsOpen {p : α à E | p.2 â B (f p.1) K r s t} := by
suffices H : IsOpen (â L â K,
{p : α à E | p.2 â A (f p.1) L r t â§ p.2 â A (f p.1) L s t}) by
convert H; ext p; simp [B]
refine isOpen_biUnion (fun L _ ⊠?_)
exact (isOpen_A_with_param hf L).inter (isOpen_A_with_param hf L)
end FDerivMeasurableAux
open FDerivMeasurableAux
theorem measurableSet_of_differentiableAt_of_isComplete_with_param
(hf : Continuous f.uncurry) {K : Set (E âL[ð] F)} (hK : IsComplete K) :
MeasurableSet {p : α à E | DifferentiableAt ð (f p.1) p.2 â§ fderiv ð (f p.1) p.2 â K} := by
have : {p : α à E | DifferentiableAt ð (f p.1) p.2 â§ fderiv ð (f p.1) p.2 â K}
= {p : α à E | p.2 â D (f p.1) K} := by simp [â differentiable_set_eq_D K hK]
rw [this]
simp only [D, mem_iInter, mem_iUnion]
simp only [setOf_forall, setOf_exists]
refine MeasurableSet.iInter (fun _ ⊠?_)
refine MeasurableSet.iUnion (fun _ ⊠?_)
refine MeasurableSet.iInter (fun _ ⊠?_)
refine MeasurableSet.iInter (fun _ ⊠?_)
refine MeasurableSet.iInter (fun _ ⊠?_)
refine MeasurableSet.iInter (fun _ ⊠?_)
have : ProperSpace E := .of_locallyCompactSpace ð
exact (isOpen_B_with_param hf K).measurableSet
variable (ð)
variable [CompleteSpace F]
/-- The set of differentiability points of a continuous function depending on a parameter taking
values in a complete space is Borel-measurable. -/
theorem measurableSet_of_differentiableAt_with_param (hf : Continuous f.uncurry) :
MeasurableSet {p : α à E | DifferentiableAt ð (f p.1) p.2} := by
have : IsComplete (univ : Set (E âL[ð] F)) := complete_univ
convert measurableSet_of_differentiableAt_of_isComplete_with_param hf this
simp
theorem measurable_fderiv_with_param (hf : Continuous f.uncurry) :
Measurable (fun (p : α à E) ⊠fderiv ð (f p.1) p.2) := by
refine measurable_of_isClosed (fun s hs ⊠?_)
have :
(fun (p : α à E) ⊠fderiv ð (f p.1) p.2) â»Â¹' s =
{p | DifferentiableAt ð (f p.1) p.2 â§ fderiv ð (f p.1) p.2 â s } âª
{ p | ¬DifferentiableAt ð (f p.1) p.2} â© { _p | (0 : E âL[ð] F) â s} :=
Set.ext (fun x ⊠mem_preimage.trans fderiv_mem_iff)
rw [this]
exact
(measurableSet_of_differentiableAt_of_isComplete_with_param hf hs.isComplete).union
((measurableSet_of_differentiableAt_with_param _ hf).compl.inter (MeasurableSet.const _))
theorem measurable_fderiv_apply_const_with_param [MeasurableSpace F] [BorelSpace F]
(hf : Continuous f.uncurry) (y : E) :
Measurable (fun (p : α à E) ⊠fderiv ð (f p.1) p.2 y) :=
(ContinuousLinearMap.measurable_apply y).comp (measurable_fderiv_with_param ð hf)
variable {ð}
theorem measurable_deriv_with_param [LocallyCompactSpace ð] [MeasurableSpace ð]
[OpensMeasurableSpace ð] [MeasurableSpace F]
[BorelSpace F] {f : α â ð â F} (hf : Continuous f.uncurry) :
Measurable (fun (p : α à ð) ⊠deriv (f p.1) p.2) := by
simpa only [fderiv_deriv] using measurable_fderiv_apply_const_with_param ð hf 1
theorem stronglyMeasurable_deriv_with_param [LocallyCompactSpace ð] [MeasurableSpace ð]
[OpensMeasurableSpace ð] [h : SecondCountableTopologyEither α F]
{f : α â ð â F} (hf : Continuous f.uncurry) :
StronglyMeasurable (fun (p : α à ð) ⊠deriv (f p.1) p.2) := by
borelize F
rcases h.out with hα|hF
· have : ProperSpace ð := .of_locallyCompactSpace ð
apply stronglyMeasurable_iff_measurable_separable.2 âšmeasurable_deriv_with_param hf, ?_â©
have : range (fun (p : α à ð) ⊠deriv (f p.1) p.2)
â closure (Submodule.span ð (range f.uncurry)) := by
rintro - âšp, rflâ©
have A : deriv (f p.1) p.2 â closure (Submodule.span ð (range (f p.1))) := by
rw [â image_univ]
apply range_deriv_subset_closure_span_image _ dense_univ (mem_range_self _)
have B : range (f p.1) â range (f.uncurry) := by
rintro - âšx, rflâ©
exact mem_range_self (p.1, x)
exact closure_mono (Submodule.span_mono B) A
exact (isSeparable_range hf).span.closure.mono this
· exact (measurable_deriv_with_param hf).stronglyMeasurable
theorem aemeasurable_deriv_with_param [LocallyCompactSpace ð] [MeasurableSpace ð]
[OpensMeasurableSpace ð] [MeasurableSpace F]
[BorelSpace F] {f : α â ð â F} (hf : Continuous f.uncurry) (ÎŒ : Measure (α à ð)) :
AEMeasurable (fun (p : α à ð) ⊠deriv (f p.1) p.2) ÎŒ :=
(measurable_deriv_with_param hf).aemeasurable
theorem aestronglyMeasurable_deriv_with_param [LocallyCompactSpace ð] [MeasurableSpace ð]
[OpensMeasurableSpace ð] [SecondCountableTopologyEither α F]
{f : α â ð â F} (hf : Continuous f.uncurry) (ÎŒ : Measure (α à ð)) :
AEStronglyMeasurable (fun (p : α à ð) ⊠deriv (f p.1) p.2) ÎŒ :=
(stronglyMeasurable_deriv_with_param hf).aestronglyMeasurable
end WithParam
|
Analysis\Calculus\FDeriv\Mul.lean | /-
Copyright (c) 2019 Jeremy Avigad. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Jeremy Avigad, Sébastien Gouëzel, Yury Kudryashov
-/
import Mathlib.Analysis.Calculus.FDeriv.Bilinear
/-!
# Multiplicative operations on derivatives
For detailed documentation of the Fréchet derivative,
see the module docstring of `Mathlib/Analysis/Calculus/FDeriv/Basic.lean`.
This file contains the usual formulas (and existence assertions) for the derivative of
* multiplication of a function by a scalar function
* product of finitely many scalar functions
* taking the pointwise multiplicative inverse (i.e. `Inv.inv` or `Ring.inverse`) of a function
-/
open scoped Classical
open Filter Asymptotics ContinuousLinearMap Set Metric Topology NNReal ENNReal
noncomputable section
section
variable {ð : Type*} [NontriviallyNormedField ð]
variable {E : Type*} [NormedAddCommGroup E] [NormedSpace ð E]
variable {F : Type*} [NormedAddCommGroup F] [NormedSpace ð F]
variable {G : Type*} [NormedAddCommGroup G] [NormedSpace ð G]
variable {G' : Type*} [NormedAddCommGroup G'] [NormedSpace ð G']
variable {f fâ fâ g : E â F}
variable {f' fâ' fâ' g' : E âL[ð] F}
variable (e : E âL[ð] F)
variable {x : E}
variable {s t : Set E}
variable {L Lâ Lâ : Filter E}
section CLMCompApply
/-! ### Derivative of the pointwise composition/application of continuous linear maps -/
variable {H : Type*} [NormedAddCommGroup H] [NormedSpace ð H] {c : E â G âL[ð] H}
{c' : E âL[ð] G âL[ð] H} {d : E â F âL[ð] G} {d' : E âL[ð] F âL[ð] G} {u : E â G} {u' : E âL[ð] G}
@[fun_prop]
theorem HasStrictFDerivAt.clm_comp (hc : HasStrictFDerivAt c c' x) (hd : HasStrictFDerivAt d d' x) :
HasStrictFDerivAt (fun y => (c y).comp (d y))
((compL ð F G H (c x)).comp d' + ((compL ð F G H).flip (d x)).comp c') x :=
(isBoundedBilinearMap_comp.hasStrictFDerivAt (c x, d x)).comp x <| hc.prod hd
@[fun_prop]
theorem HasFDerivWithinAt.clm_comp (hc : HasFDerivWithinAt c c' s x)
(hd : HasFDerivWithinAt d d' s x) :
HasFDerivWithinAt (fun y => (c y).comp (d y))
((compL ð F G H (c x)).comp d' + ((compL ð F G H).flip (d x)).comp c') s x :=
(isBoundedBilinearMap_comp.hasFDerivAt (c x, d x)).comp_hasFDerivWithinAt x <| hc.prod hd
@[fun_prop]
theorem HasFDerivAt.clm_comp (hc : HasFDerivAt c c' x) (hd : HasFDerivAt d d' x) :
HasFDerivAt (fun y => (c y).comp (d y))
((compL ð F G H (c x)).comp d' + ((compL ð F G H).flip (d x)).comp c') x :=
(isBoundedBilinearMap_comp.hasFDerivAt (c x, d x)).comp x <| hc.prod hd
@[fun_prop]
theorem DifferentiableWithinAt.clm_comp (hc : DifferentiableWithinAt ð c s x)
(hd : DifferentiableWithinAt ð d s x) :
DifferentiableWithinAt ð (fun y => (c y).comp (d y)) s x :=
(hc.hasFDerivWithinAt.clm_comp hd.hasFDerivWithinAt).differentiableWithinAt
@[fun_prop]
theorem DifferentiableAt.clm_comp (hc : DifferentiableAt ð c x) (hd : DifferentiableAt ð d x) :
DifferentiableAt ð (fun y => (c y).comp (d y)) x :=
(hc.hasFDerivAt.clm_comp hd.hasFDerivAt).differentiableAt
@[fun_prop]
theorem DifferentiableOn.clm_comp (hc : DifferentiableOn ð c s) (hd : DifferentiableOn ð d s) :
DifferentiableOn ð (fun y => (c y).comp (d y)) s := fun x hx => (hc x hx).clm_comp (hd x hx)
@[fun_prop]
theorem Differentiable.clm_comp (hc : Differentiable ð c) (hd : Differentiable ð d) :
Differentiable ð fun y => (c y).comp (d y) := fun x => (hc x).clm_comp (hd x)
theorem fderivWithin_clm_comp (hxs : UniqueDiffWithinAt ð s x) (hc : DifferentiableWithinAt ð c s x)
(hd : DifferentiableWithinAt ð d s x) :
fderivWithin ð (fun y => (c y).comp (d y)) s x =
(compL ð F G H (c x)).comp (fderivWithin ð d s x) +
((compL ð F G H).flip (d x)).comp (fderivWithin ð c s x) :=
(hc.hasFDerivWithinAt.clm_comp hd.hasFDerivWithinAt).fderivWithin hxs
theorem fderiv_clm_comp (hc : DifferentiableAt ð c x) (hd : DifferentiableAt ð d x) :
fderiv ð (fun y => (c y).comp (d y)) x =
(compL ð F G H (c x)).comp (fderiv ð d x) +
((compL ð F G H).flip (d x)).comp (fderiv ð c x) :=
(hc.hasFDerivAt.clm_comp hd.hasFDerivAt).fderiv
@[fun_prop]
theorem HasStrictFDerivAt.clm_apply (hc : HasStrictFDerivAt c c' x)
(hu : HasStrictFDerivAt u u' x) :
HasStrictFDerivAt (fun y => (c y) (u y)) ((c x).comp u' + c'.flip (u x)) x :=
(isBoundedBilinearMap_apply.hasStrictFDerivAt (c x, u x)).comp x (hc.prod hu)
@[fun_prop]
theorem HasFDerivWithinAt.clm_apply (hc : HasFDerivWithinAt c c' s x)
(hu : HasFDerivWithinAt u u' s x) :
HasFDerivWithinAt (fun y => (c y) (u y)) ((c x).comp u' + c'.flip (u x)) s x :=
(isBoundedBilinearMap_apply.hasFDerivAt (c x, u x)).comp_hasFDerivWithinAt x (hc.prod hu)
@[fun_prop]
theorem HasFDerivAt.clm_apply (hc : HasFDerivAt c c' x) (hu : HasFDerivAt u u' x) :
HasFDerivAt (fun y => (c y) (u y)) ((c x).comp u' + c'.flip (u x)) x :=
(isBoundedBilinearMap_apply.hasFDerivAt (c x, u x)).comp x (hc.prod hu)
@[fun_prop]
theorem DifferentiableWithinAt.clm_apply (hc : DifferentiableWithinAt ð c s x)
(hu : DifferentiableWithinAt ð u s x) : DifferentiableWithinAt ð (fun y => (c y) (u y)) s x :=
(hc.hasFDerivWithinAt.clm_apply hu.hasFDerivWithinAt).differentiableWithinAt
@[fun_prop]
theorem DifferentiableAt.clm_apply (hc : DifferentiableAt ð c x) (hu : DifferentiableAt ð u x) :
DifferentiableAt ð (fun y => (c y) (u y)) x :=
(hc.hasFDerivAt.clm_apply hu.hasFDerivAt).differentiableAt
@[fun_prop]
theorem DifferentiableOn.clm_apply (hc : DifferentiableOn ð c s) (hu : DifferentiableOn ð u s) :
DifferentiableOn ð (fun y => (c y) (u y)) s := fun x hx => (hc x hx).clm_apply (hu x hx)
@[fun_prop]
theorem Differentiable.clm_apply (hc : Differentiable ð c) (hu : Differentiable ð u) :
Differentiable ð fun y => (c y) (u y) := fun x => (hc x).clm_apply (hu x)
theorem fderivWithin_clm_apply (hxs : UniqueDiffWithinAt ð s x)
(hc : DifferentiableWithinAt ð c s x) (hu : DifferentiableWithinAt ð u s x) :
fderivWithin ð (fun y => (c y) (u y)) s x =
(c x).comp (fderivWithin ð u s x) + (fderivWithin ð c s x).flip (u x) :=
(hc.hasFDerivWithinAt.clm_apply hu.hasFDerivWithinAt).fderivWithin hxs
theorem fderiv_clm_apply (hc : DifferentiableAt ð c x) (hu : DifferentiableAt ð u x) :
fderiv ð (fun y => (c y) (u y)) x = (c x).comp (fderiv ð u x) + (fderiv ð c x).flip (u x) :=
(hc.hasFDerivAt.clm_apply hu.hasFDerivAt).fderiv
end CLMCompApply
section ContinuousMultilinearApplyConst
/-! ### Derivative of the application of continuous multilinear maps to a constant -/
variable {ι : Type*} [Fintype ι]
{M : ι â Type*} [â i, NormedAddCommGroup (M i)] [â i, NormedSpace ð (M i)]
{H : Type*} [NormedAddCommGroup H] [NormedSpace ð H]
{c : E â ContinuousMultilinearMap ð M H}
{c' : E âL[ð] ContinuousMultilinearMap ð M H}
@[fun_prop]
theorem HasStrictFDerivAt.continuousMultilinear_apply_const (hc : HasStrictFDerivAt c c' x)
(u : â i, M i) : HasStrictFDerivAt (fun y ⊠(c y) u) (c'.flipMultilinear u) x :=
(ContinuousMultilinearMap.apply ð M H u).hasStrictFDerivAt.comp x hc
@[fun_prop]
theorem HasFDerivWithinAt.continuousMultilinear_apply_const (hc : HasFDerivWithinAt c c' s x)
(u : â i, M i) :
HasFDerivWithinAt (fun y ⊠(c y) u) (c'.flipMultilinear u) s x :=
(ContinuousMultilinearMap.apply ð M H u).hasFDerivAt.comp_hasFDerivWithinAt x hc
@[fun_prop]
theorem HasFDerivAt.continuousMultilinear_apply_const (hc : HasFDerivAt c c' x) (u : â i, M i) :
HasFDerivAt (fun y ⊠(c y) u) (c'.flipMultilinear u) x :=
(ContinuousMultilinearMap.apply ð M H u).hasFDerivAt.comp x hc
@[fun_prop]
theorem DifferentiableWithinAt.continuousMultilinear_apply_const
(hc : DifferentiableWithinAt ð c s x) (u : â i, M i) :
DifferentiableWithinAt ð (fun y ⊠(c y) u) s x :=
(hc.hasFDerivWithinAt.continuousMultilinear_apply_const u).differentiableWithinAt
@[fun_prop]
theorem DifferentiableAt.continuousMultilinear_apply_const (hc : DifferentiableAt ð c x)
(u : â i, M i) :
DifferentiableAt ð (fun y ⊠(c y) u) x :=
(hc.hasFDerivAt.continuousMultilinear_apply_const u).differentiableAt
@[fun_prop]
theorem DifferentiableOn.continuousMultilinear_apply_const (hc : DifferentiableOn ð c s)
(u : â i, M i) : DifferentiableOn ð (fun y ⊠(c y) u) s :=
fun x hx ⊠(hc x hx).continuousMultilinear_apply_const u
@[fun_prop]
theorem Differentiable.continuousMultilinear_apply_const (hc : Differentiable ð c) (u : â i, M i) :
Differentiable ð fun y ⊠(c y) u := fun x ⊠(hc x).continuousMultilinear_apply_const u
theorem fderivWithin_continuousMultilinear_apply_const (hxs : UniqueDiffWithinAt ð s x)
(hc : DifferentiableWithinAt ð c s x) (u : â i, M i) :
fderivWithin ð (fun y ⊠(c y) u) s x = ((fderivWithin ð c s x).flipMultilinear u) :=
(hc.hasFDerivWithinAt.continuousMultilinear_apply_const u).fderivWithin hxs
theorem fderiv_continuousMultilinear_apply_const (hc : DifferentiableAt ð c x) (u : â i, M i) :
(fderiv ð (fun y ⊠(c y) u) x) = (fderiv ð c x).flipMultilinear u :=
(hc.hasFDerivAt.continuousMultilinear_apply_const u).fderiv
/-- Application of a `ContinuousMultilinearMap` to a constant commutes with `fderivWithin`. -/
theorem fderivWithin_continuousMultilinear_apply_const_apply (hxs : UniqueDiffWithinAt ð s x)
(hc : DifferentiableWithinAt ð c s x) (u : â i, M i) (m : E) :
(fderivWithin ð (fun y ⊠(c y) u) s x) m = (fderivWithin ð c s x) m u := by
simp [fderivWithin_continuousMultilinear_apply_const hxs hc]
/-- Application of a `ContinuousMultilinearMap` to a constant commutes with `fderiv`. -/
theorem fderiv_continuousMultilinear_apply_const_apply (hc : DifferentiableAt ð c x)
(u : â i, M i) (m : E) :
(fderiv ð (fun y ⊠(c y) u) x) m = (fderiv ð c x) m u := by
simp [fderiv_continuousMultilinear_apply_const hc]
end ContinuousMultilinearApplyConst
section SMul
/-! ### Derivative of the product of a scalar-valued function and a vector-valued function
If `c` is a differentiable scalar-valued function and `f` is a differentiable vector-valued
function, then `fun x ⊠c x ⢠f x` is differentiable as well. Lemmas in this section works for
function `c` taking values in the base field, as well as in a normed algebra over the base
field: e.g., they work for `c : E â â` and `f : E â F` provided that `F` is a complex
normed vector space.
-/
variable {ð' : Type*} [NontriviallyNormedField ð'] [NormedAlgebra ð ð'] [NormedSpace ð' F]
[IsScalarTower ð ð' F]
variable {c : E â ð'} {c' : E âL[ð] ð'}
@[fun_prop]
theorem HasStrictFDerivAt.smul (hc : HasStrictFDerivAt c c' x) (hf : HasStrictFDerivAt f f' x) :
HasStrictFDerivAt (fun y => c y ⢠f y) (c x ⢠f' + c'.smulRight (f x)) x :=
(isBoundedBilinearMap_smul.hasStrictFDerivAt (c x, f x)).comp x <| hc.prod hf
@[fun_prop]
theorem HasFDerivWithinAt.smul (hc : HasFDerivWithinAt c c' s x) (hf : HasFDerivWithinAt f f' s x) :
HasFDerivWithinAt (fun y => c y ⢠f y) (c x ⢠f' + c'.smulRight (f x)) s x :=
(isBoundedBilinearMap_smul.hasFDerivAt (c x, f x)).comp_hasFDerivWithinAt x <| hc.prod hf
@[fun_prop]
theorem HasFDerivAt.smul (hc : HasFDerivAt c c' x) (hf : HasFDerivAt f f' x) :
HasFDerivAt (fun y => c y ⢠f y) (c x ⢠f' + c'.smulRight (f x)) x :=
(isBoundedBilinearMap_smul.hasFDerivAt (c x, f x)).comp x <| hc.prod hf
@[fun_prop]
theorem DifferentiableWithinAt.smul (hc : DifferentiableWithinAt ð c s x)
(hf : DifferentiableWithinAt ð f s x) : DifferentiableWithinAt ð (fun y => c y ⢠f y) s x :=
(hc.hasFDerivWithinAt.smul hf.hasFDerivWithinAt).differentiableWithinAt
@[simp, fun_prop]
theorem DifferentiableAt.smul (hc : DifferentiableAt ð c x) (hf : DifferentiableAt ð f x) :
DifferentiableAt ð (fun y => c y ⢠f y) x :=
(hc.hasFDerivAt.smul hf.hasFDerivAt).differentiableAt
@[fun_prop]
theorem DifferentiableOn.smul (hc : DifferentiableOn ð c s) (hf : DifferentiableOn ð f s) :
DifferentiableOn ð (fun y => c y ⢠f y) s := fun x hx => (hc x hx).smul (hf x hx)
@[simp, fun_prop]
theorem Differentiable.smul (hc : Differentiable ð c) (hf : Differentiable ð f) :
Differentiable ð fun y => c y ⢠f y := fun x => (hc x).smul (hf x)
theorem fderivWithin_smul (hxs : UniqueDiffWithinAt ð s x) (hc : DifferentiableWithinAt ð c s x)
(hf : DifferentiableWithinAt ð f s x) :
fderivWithin ð (fun y => c y ⢠f y) s x =
c x ⢠fderivWithin ð f s x + (fderivWithin ð c s x).smulRight (f x) :=
(hc.hasFDerivWithinAt.smul hf.hasFDerivWithinAt).fderivWithin hxs
theorem fderiv_smul (hc : DifferentiableAt ð c x) (hf : DifferentiableAt ð f x) :
fderiv ð (fun y => c y ⢠f y) x = c x ⢠fderiv ð f x + (fderiv ð c x).smulRight (f x) :=
(hc.hasFDerivAt.smul hf.hasFDerivAt).fderiv
@[fun_prop]
theorem HasStrictFDerivAt.smul_const (hc : HasStrictFDerivAt c c' x) (f : F) :
HasStrictFDerivAt (fun y => c y ⢠f) (c'.smulRight f) x := by
simpa only [smul_zero, zero_add] using hc.smul (hasStrictFDerivAt_const f x)
@[fun_prop]
theorem HasFDerivWithinAt.smul_const (hc : HasFDerivWithinAt c c' s x) (f : F) :
HasFDerivWithinAt (fun y => c y ⢠f) (c'.smulRight f) s x := by
simpa only [smul_zero, zero_add] using hc.smul (hasFDerivWithinAt_const f x s)
@[fun_prop]
theorem HasFDerivAt.smul_const (hc : HasFDerivAt c c' x) (f : F) :
HasFDerivAt (fun y => c y ⢠f) (c'.smulRight f) x := by
simpa only [smul_zero, zero_add] using hc.smul (hasFDerivAt_const f x)
@[fun_prop]
theorem DifferentiableWithinAt.smul_const (hc : DifferentiableWithinAt ð c s x) (f : F) :
DifferentiableWithinAt ð (fun y => c y ⢠f) s x :=
(hc.hasFDerivWithinAt.smul_const f).differentiableWithinAt
@[fun_prop]
theorem DifferentiableAt.smul_const (hc : DifferentiableAt ð c x) (f : F) :
DifferentiableAt ð (fun y => c y ⢠f) x :=
(hc.hasFDerivAt.smul_const f).differentiableAt
@[fun_prop]
theorem DifferentiableOn.smul_const (hc : DifferentiableOn ð c s) (f : F) :
DifferentiableOn ð (fun y => c y ⢠f) s := fun x hx => (hc x hx).smul_const f
@[fun_prop]
theorem Differentiable.smul_const (hc : Differentiable ð c) (f : F) :
Differentiable ð fun y => c y ⢠f := fun x => (hc x).smul_const f
theorem fderivWithin_smul_const (hxs : UniqueDiffWithinAt ð s x)
(hc : DifferentiableWithinAt ð c s x) (f : F) :
fderivWithin ð (fun y => c y ⢠f) s x = (fderivWithin ð c s x).smulRight f :=
(hc.hasFDerivWithinAt.smul_const f).fderivWithin hxs
theorem fderiv_smul_const (hc : DifferentiableAt ð c x) (f : F) :
fderiv ð (fun y => c y ⢠f) x = (fderiv ð c x).smulRight f :=
(hc.hasFDerivAt.smul_const f).fderiv
end SMul
section Mul
/-! ### Derivative of the product of two functions -/
variable {ðž ðž' : Type*} [NormedRing ðž] [NormedCommRing ðž'] [NormedAlgebra ð ðž] [NormedAlgebra ð ðž']
{a b : E â ðž} {a' b' : E âL[ð] ðž} {c d : E â ðž'} {c' d' : E âL[ð] ðž'}
@[fun_prop]
theorem HasStrictFDerivAt.mul' {x : E} (ha : HasStrictFDerivAt a a' x)
(hb : HasStrictFDerivAt b b' x) :
HasStrictFDerivAt (fun y => a y * b y) (a x ⢠b' + a'.smulRight (b x)) x :=
((ContinuousLinearMap.mul ð ðž).isBoundedBilinearMap.hasStrictFDerivAt (a x, b x)).comp x
(ha.prod hb)
@[fun_prop]
theorem HasStrictFDerivAt.mul (hc : HasStrictFDerivAt c c' x) (hd : HasStrictFDerivAt d d' x) :
HasStrictFDerivAt (fun y => c y * d y) (c x ⢠d' + d x ⢠c') x := by
convert hc.mul' hd
ext z
apply mul_comm
@[fun_prop]
theorem HasFDerivWithinAt.mul' (ha : HasFDerivWithinAt a a' s x) (hb : HasFDerivWithinAt b b' s x) :
HasFDerivWithinAt (fun y => a y * b y) (a x ⢠b' + a'.smulRight (b x)) s x :=
((ContinuousLinearMap.mul ð ðž).isBoundedBilinearMap.hasFDerivAt (a x, b x)).comp_hasFDerivWithinAt
x (ha.prod hb)
@[fun_prop]
theorem HasFDerivWithinAt.mul (hc : HasFDerivWithinAt c c' s x) (hd : HasFDerivWithinAt d d' s x) :
HasFDerivWithinAt (fun y => c y * d y) (c x ⢠d' + d x ⢠c') s x := by
convert hc.mul' hd
ext z
apply mul_comm
@[fun_prop]
theorem HasFDerivAt.mul' (ha : HasFDerivAt a a' x) (hb : HasFDerivAt b b' x) :
HasFDerivAt (fun y => a y * b y) (a x ⢠b' + a'.smulRight (b x)) x :=
((ContinuousLinearMap.mul ð ðž).isBoundedBilinearMap.hasFDerivAt (a x, b x)).comp x (ha.prod hb)
@[fun_prop]
theorem HasFDerivAt.mul (hc : HasFDerivAt c c' x) (hd : HasFDerivAt d d' x) :
HasFDerivAt (fun y => c y * d y) (c x ⢠d' + d x ⢠c') x := by
convert hc.mul' hd
ext z
apply mul_comm
@[fun_prop]
theorem DifferentiableWithinAt.mul (ha : DifferentiableWithinAt ð a s x)
(hb : DifferentiableWithinAt ð b s x) : DifferentiableWithinAt ð (fun y => a y * b y) s x :=
(ha.hasFDerivWithinAt.mul' hb.hasFDerivWithinAt).differentiableWithinAt
@[simp, fun_prop]
theorem DifferentiableAt.mul (ha : DifferentiableAt ð a x) (hb : DifferentiableAt ð b x) :
DifferentiableAt ð (fun y => a y * b y) x :=
(ha.hasFDerivAt.mul' hb.hasFDerivAt).differentiableAt
@[fun_prop]
theorem DifferentiableOn.mul (ha : DifferentiableOn ð a s) (hb : DifferentiableOn ð b s) :
DifferentiableOn ð (fun y => a y * b y) s := fun x hx => (ha x hx).mul (hb x hx)
@[simp, fun_prop]
theorem Differentiable.mul (ha : Differentiable ð a) (hb : Differentiable ð b) :
Differentiable ð fun y => a y * b y := fun x => (ha x).mul (hb x)
@[fun_prop]
theorem DifferentiableWithinAt.pow (ha : DifferentiableWithinAt ð a s x) :
â n : â, DifferentiableWithinAt ð (fun x => a x ^ n) s x
| 0 => by simp only [pow_zero, differentiableWithinAt_const]
| n + 1 => by simp only [pow_succ', DifferentiableWithinAt.pow ha n, ha.mul]
@[simp, fun_prop]
theorem DifferentiableAt.pow (ha : DifferentiableAt ð a x) (n : â) :
DifferentiableAt ð (fun x => a x ^ n) x :=
differentiableWithinAt_univ.mp <| ha.differentiableWithinAt.pow n
@[fun_prop]
theorem DifferentiableOn.pow (ha : DifferentiableOn ð a s) (n : â) :
DifferentiableOn ð (fun x => a x ^ n) s := fun x h => (ha x h).pow n
@[simp, fun_prop]
theorem Differentiable.pow (ha : Differentiable ð a) (n : â) : Differentiable ð fun x => a x ^ n :=
fun x => (ha x).pow n
theorem fderivWithin_mul' (hxs : UniqueDiffWithinAt ð s x) (ha : DifferentiableWithinAt ð a s x)
(hb : DifferentiableWithinAt ð b s x) :
fderivWithin ð (fun y => a y * b y) s x =
a x ⢠fderivWithin ð b s x + (fderivWithin ð a s x).smulRight (b x) :=
(ha.hasFDerivWithinAt.mul' hb.hasFDerivWithinAt).fderivWithin hxs
theorem fderivWithin_mul (hxs : UniqueDiffWithinAt ð s x) (hc : DifferentiableWithinAt ð c s x)
(hd : DifferentiableWithinAt ð d s x) :
fderivWithin ð (fun y => c y * d y) s x =
c x ⢠fderivWithin ð d s x + d x ⢠fderivWithin ð c s x :=
(hc.hasFDerivWithinAt.mul hd.hasFDerivWithinAt).fderivWithin hxs
theorem fderiv_mul' (ha : DifferentiableAt ð a x) (hb : DifferentiableAt ð b x) :
fderiv ð (fun y => a y * b y) x = a x ⢠fderiv ð b x + (fderiv ð a x).smulRight (b x) :=
(ha.hasFDerivAt.mul' hb.hasFDerivAt).fderiv
theorem fderiv_mul (hc : DifferentiableAt ð c x) (hd : DifferentiableAt ð d x) :
fderiv ð (fun y => c y * d y) x = c x ⢠fderiv ð d x + d x ⢠fderiv ð c x :=
(hc.hasFDerivAt.mul hd.hasFDerivAt).fderiv
@[fun_prop]
theorem HasStrictFDerivAt.mul_const' (ha : HasStrictFDerivAt a a' x) (b : ðž) :
HasStrictFDerivAt (fun y => a y * b) (a'.smulRight b) x :=
((ContinuousLinearMap.mul ð ðž).flip b).hasStrictFDerivAt.comp x ha
@[fun_prop]
theorem HasStrictFDerivAt.mul_const (hc : HasStrictFDerivAt c c' x) (d : ðž') :
HasStrictFDerivAt (fun y => c y * d) (d ⢠c') x := by
convert hc.mul_const' d
ext z
apply mul_comm
@[fun_prop]
theorem HasFDerivWithinAt.mul_const' (ha : HasFDerivWithinAt a a' s x) (b : ðž) :
HasFDerivWithinAt (fun y => a y * b) (a'.smulRight b) s x :=
((ContinuousLinearMap.mul ð ðž).flip b).hasFDerivAt.comp_hasFDerivWithinAt x ha
@[fun_prop]
theorem HasFDerivWithinAt.mul_const (hc : HasFDerivWithinAt c c' s x) (d : ðž') :
HasFDerivWithinAt (fun y => c y * d) (d ⢠c') s x := by
convert hc.mul_const' d
ext z
apply mul_comm
@[fun_prop]
theorem HasFDerivAt.mul_const' (ha : HasFDerivAt a a' x) (b : ðž) :
HasFDerivAt (fun y => a y * b) (a'.smulRight b) x :=
((ContinuousLinearMap.mul ð ðž).flip b).hasFDerivAt.comp x ha
@[fun_prop]
theorem HasFDerivAt.mul_const (hc : HasFDerivAt c c' x) (d : ðž') :
HasFDerivAt (fun y => c y * d) (d ⢠c') x := by
convert hc.mul_const' d
ext z
apply mul_comm
@[fun_prop]
theorem DifferentiableWithinAt.mul_const (ha : DifferentiableWithinAt ð a s x) (b : ðž) :
DifferentiableWithinAt ð (fun y => a y * b) s x :=
(ha.hasFDerivWithinAt.mul_const' b).differentiableWithinAt
@[fun_prop]
theorem DifferentiableAt.mul_const (ha : DifferentiableAt ð a x) (b : ðž) :
DifferentiableAt ð (fun y => a y * b) x :=
(ha.hasFDerivAt.mul_const' b).differentiableAt
@[fun_prop]
theorem DifferentiableOn.mul_const (ha : DifferentiableOn ð a s) (b : ðž) :
DifferentiableOn ð (fun y => a y * b) s := fun x hx => (ha x hx).mul_const b
@[fun_prop]
theorem Differentiable.mul_const (ha : Differentiable ð a) (b : ðž) :
Differentiable ð fun y => a y * b := fun x => (ha x).mul_const b
theorem fderivWithin_mul_const' (hxs : UniqueDiffWithinAt ð s x)
(ha : DifferentiableWithinAt ð a s x) (b : ðž) :
fderivWithin ð (fun y => a y * b) s x = (fderivWithin ð a s x).smulRight b :=
(ha.hasFDerivWithinAt.mul_const' b).fderivWithin hxs
theorem fderivWithin_mul_const (hxs : UniqueDiffWithinAt ð s x)
(hc : DifferentiableWithinAt ð c s x) (d : ðž') :
fderivWithin ð (fun y => c y * d) s x = d ⢠fderivWithin ð c s x :=
(hc.hasFDerivWithinAt.mul_const d).fderivWithin hxs
theorem fderiv_mul_const' (ha : DifferentiableAt ð a x) (b : ðž) :
fderiv ð (fun y => a y * b) x = (fderiv ð a x).smulRight b :=
(ha.hasFDerivAt.mul_const' b).fderiv
theorem fderiv_mul_const (hc : DifferentiableAt ð c x) (d : ðž') :
fderiv ð (fun y => c y * d) x = d ⢠fderiv ð c x :=
(hc.hasFDerivAt.mul_const d).fderiv
@[fun_prop]
theorem HasStrictFDerivAt.const_mul (ha : HasStrictFDerivAt a a' x) (b : ðž) :
HasStrictFDerivAt (fun y => b * a y) (b ⢠a') x :=
((ContinuousLinearMap.mul ð ðž) b).hasStrictFDerivAt.comp x ha
@[fun_prop]
theorem HasFDerivWithinAt.const_mul (ha : HasFDerivWithinAt a a' s x) (b : ðž) :
HasFDerivWithinAt (fun y => b * a y) (b ⢠a') s x :=
((ContinuousLinearMap.mul ð ðž) b).hasFDerivAt.comp_hasFDerivWithinAt x ha
@[fun_prop]
theorem HasFDerivAt.const_mul (ha : HasFDerivAt a a' x) (b : ðž) :
HasFDerivAt (fun y => b * a y) (b ⢠a') x :=
((ContinuousLinearMap.mul ð ðž) b).hasFDerivAt.comp x ha
@[fun_prop]
theorem DifferentiableWithinAt.const_mul (ha : DifferentiableWithinAt ð a s x) (b : ðž) :
DifferentiableWithinAt ð (fun y => b * a y) s x :=
(ha.hasFDerivWithinAt.const_mul b).differentiableWithinAt
@[fun_prop]
theorem DifferentiableAt.const_mul (ha : DifferentiableAt ð a x) (b : ðž) :
DifferentiableAt ð (fun y => b * a y) x :=
(ha.hasFDerivAt.const_mul b).differentiableAt
@[fun_prop]
theorem DifferentiableOn.const_mul (ha : DifferentiableOn ð a s) (b : ðž) :
DifferentiableOn ð (fun y => b * a y) s := fun x hx => (ha x hx).const_mul b
@[fun_prop]
theorem Differentiable.const_mul (ha : Differentiable ð a) (b : ðž) :
Differentiable ð fun y => b * a y := fun x => (ha x).const_mul b
theorem fderivWithin_const_mul (hxs : UniqueDiffWithinAt ð s x)
(ha : DifferentiableWithinAt ð a s x) (b : ðž) :
fderivWithin ð (fun y => b * a y) s x = b ⢠fderivWithin ð a s x :=
(ha.hasFDerivWithinAt.const_mul b).fderivWithin hxs
theorem fderiv_const_mul (ha : DifferentiableAt ð a x) (b : ðž) :
fderiv ð (fun y => b * a y) x = b ⢠fderiv ð a x :=
(ha.hasFDerivAt.const_mul b).fderiv
end Mul
section Prod
/-! ### Derivative of a finite product of functions -/
variable {ι : Type*} {ðž ðž' : Type*} [NormedRing ðž] [NormedCommRing ðž'] [NormedAlgebra ð ðž]
[NormedAlgebra ð ðž'] {u : Finset ι} {f : ι â E â ðž} {f' : ι â E âL[ð] ðž} {g : ι â E â ðž'}
{g' : ι â E âL[ð] ðž'}
@[fun_prop]
theorem hasStrictFDerivAt_list_prod' [Fintype ι] {l : List ι} {x : ι â ðž} :
HasStrictFDerivAt (ð := ð) (fun x ⊠(l.map x).prod)
(â i : Fin l.length, ((l.take i).map x).prod â¢
smulRight (proj (l.get i)) ((l.drop (.succ i)).map x).prod) x := by
induction l with
| nil => simp [hasStrictFDerivAt_const]
| cons a l IH =>
simp only [List.map_cons, List.prod_cons, â proj_apply (R := ð) (Ï := fun _ : ι ⊠ðž) a]
exact .congr_fderiv (.mul' (ContinuousLinearMap.hasStrictFDerivAt _) IH)
(by ext; simp [Fin.sum_univ_succ, Finset.mul_sum, mul_assoc, add_comm])
@[fun_prop]
theorem hasStrictFDerivAt_list_prod_finRange' {n : â} {x : Fin n â ðž} :
HasStrictFDerivAt (ð := ð) (fun x ⊠((List.finRange n).map x).prod)
(â i : Fin n, (((List.finRange n).take i).map x).prod â¢
smulRight (proj i) (((List.finRange n).drop (.succ i)).map x).prod) x :=
hasStrictFDerivAt_list_prod'.congr_fderiv <|
Finset.sum_equiv (finCongr (List.length_finRange n)) (by simp) (by simp [Fin.forall_iff])
@[fun_prop]
theorem hasStrictFDerivAt_list_prod_attach' [DecidableEq ι] {l : List ι} {x : {i // i â l} â ðž} :
HasStrictFDerivAt (ð := ð) (fun x ⊠(l.attach.map x).prod)
(â i : Fin l.length, ((l.attach.take i).map x).prod â¢
smulRight (proj (l.attach.get (i.cast l.length_attach.symm)))
((l.attach.drop (.succ i)).map x).prod) x :=
hasStrictFDerivAt_list_prod'.congr_fderiv <| Eq.symm <|
Finset.sum_equiv (finCongr l.length_attach.symm) (by simp) (by simp)
@[fun_prop]
theorem hasFDerivAt_list_prod' [Fintype ι] {l : List ι} {x : ι â ðž'} :
HasFDerivAt (ð := ð) (fun x ⊠(l.map x).prod)
(â i : Fin l.length, ((l.take i).map x).prod â¢
smulRight (proj (l.get i)) ((l.drop (.succ i)).map x).prod) x :=
hasStrictFDerivAt_list_prod'.hasFDerivAt
@[fun_prop]
theorem hasFDerivAt_list_prod_finRange' {n : â} {x : Fin n â ðž} :
HasFDerivAt (ð := ð) (fun x ⊠((List.finRange n).map x).prod)
(â i : Fin n, (((List.finRange n).take i).map x).prod â¢
smulRight (proj i) (((List.finRange n).drop (.succ i)).map x).prod) x :=
(hasStrictFDerivAt_list_prod_finRange').hasFDerivAt
@[fun_prop]
theorem hasFDerivAt_list_prod_attach' [DecidableEq ι] {l : List ι} {x : {i // i â l} â ðž} :
HasFDerivAt (ð := ð) (fun x ⊠(l.attach.map x).prod)
(â i : Fin l.length, ((l.attach.take i).map x).prod â¢
smulRight (proj (l.attach.get (i.cast l.length_attach.symm)))
((l.attach.drop (.succ i)).map x).prod) x :=
hasStrictFDerivAt_list_prod_attach'.hasFDerivAt
/--
Auxiliary lemma for `hasStrictFDerivAt_multiset_prod`.
For `NormedCommRing ðž'`, can rewrite as `Multiset` using `Multiset.prod_coe`.
-/
@[fun_prop]
theorem hasStrictFDerivAt_list_prod [DecidableEq ι] [Fintype ι] {l : List ι} {x : ι â ðž'} :
HasStrictFDerivAt (ð := ð) (fun x ⊠(l.map x).prod)
(l.map fun i ⊠((l.erase i).map x).prod ⢠proj i).sum x := by
refine .congr_fderiv hasStrictFDerivAt_list_prod' ?_
conv_rhs => arg 1; arg 2; rw [â List.finRange_map_get l]
simp only [List.map_map, â List.sum_toFinset _ (List.nodup_finRange _), List.toFinset_finRange,
Function.comp_def, ((List.erase_get _).map _).prod_eq, List.eraseIdx_eq_take_drop_succ,
List.map_append, List.prod_append]
exact Finset.sum_congr rfl fun i _ ⊠by
ext; simp only [smul_apply, smulRight_apply, smul_eq_mul]; ring
@[fun_prop]
theorem hasStrictFDerivAt_multiset_prod [DecidableEq ι] [Fintype ι] {u : Multiset ι} {x : ι â ðž'} :
HasStrictFDerivAt (ð := ð) (fun x ⊠(u.map x).prod)
(u.map (fun i ⊠((u.erase i).map x).prod ⢠proj i)).sum x :=
u.inductionOn fun l ⊠by simpa using hasStrictFDerivAt_list_prod
@[fun_prop]
theorem hasFDerivAt_multiset_prod [DecidableEq ι] [Fintype ι] {u : Multiset ι} {x : ι â ðž'} :
HasFDerivAt (ð := ð) (fun x ⊠(u.map x).prod)
(Multiset.sum (u.map (fun i ⊠((u.erase i).map x).prod ⢠proj i))) x :=
hasStrictFDerivAt_multiset_prod.hasFDerivAt
theorem hasStrictFDerivAt_finset_prod [DecidableEq ι] [Fintype ι] {x : ι â ðž'} :
HasStrictFDerivAt (ð := ð) (â i â u, · i) (â i â u, (â j â u.erase i, x j) ⢠proj i) x := by
simp only [Finset.sum_eq_multiset_sum, Finset.prod_eq_multiset_prod]
exact hasStrictFDerivAt_multiset_prod
theorem hasFDerivAt_finset_prod [DecidableEq ι] [Fintype ι] {x : ι â ðž'} :
HasFDerivAt (ð := ð) (â i â u, · i) (â i â u, (â j â u.erase i, x j) ⢠proj i) x :=
hasStrictFDerivAt_finset_prod.hasFDerivAt
section Comp
@[fun_prop]
theorem HasStrictFDerivAt.list_prod' {l : List ι} {x : E}
(h : â i â l, HasStrictFDerivAt (f i ·) (f' i) x) :
HasStrictFDerivAt (fun x ⊠(l.map (f · x)).prod)
(â i : Fin l.length, ((l.take i).map (f · x)).prod â¢
smulRight (f' (l.get i)) ((l.drop (.succ i)).map (f · x)).prod) x := by
simp only [â List.finRange_map_get l, List.map_map]
refine .congr_fderiv (hasStrictFDerivAt_list_prod_finRange'.comp x
(hasStrictFDerivAt_pi.mpr fun i ⊠h (l.get i) (l.get_mem i i.isLt))) ?_
ext m
simp [â Function.comp_def (f · x) (l.get ·), â List.map_map, List.map_take, List.map_drop]
/--
Unlike `HasFDerivAt.finset_prod`, supports non-commutative multiply and duplicate elements.
-/
@[fun_prop]
theorem HasFDerivAt.list_prod' {l : List ι} {x : E}
(h : â i â l, HasFDerivAt (f i ·) (f' i) x) :
HasFDerivAt (fun x ⊠(l.map (f · x)).prod)
(â i : Fin l.length, ((l.take i).map (f · x)).prod â¢
smulRight (f' (l.get i)) ((l.drop (.succ i)).map (f · x)).prod) x := by
simp only [â List.finRange_map_get l, List.map_map]
refine .congr_fderiv (hasFDerivAt_list_prod_finRange'.comp x
(hasFDerivAt_pi.mpr fun i ⊠h (l.get i) (l.get_mem i i.isLt))) ?_
ext m
simp [â Function.comp_def (f · x) (l.get ·), â List.map_map, List.map_take, List.map_drop]
@[fun_prop]
theorem HasFDerivWithinAt.list_prod' {l : List ι} {x : E}
(h : â i â l, HasFDerivWithinAt (f i ·) (f' i) s x) :
HasFDerivWithinAt (fun x ⊠(l.map (f · x)).prod)
(â i : Fin l.length, ((l.take i).map (f · x)).prod â¢
smulRight (f' (l.get i)) ((l.drop (.succ i)).map (f · x)).prod) s x := by
simp only [â List.finRange_map_get l, List.map_map]
refine .congr_fderiv (hasFDerivAt_list_prod_finRange'.comp_hasFDerivWithinAt x
(hasFDerivWithinAt_pi.mpr fun i ⊠h (l.get i) (l.get_mem i i.isLt))) ?_
ext m
simp [â Function.comp_def (f · x) (l.get ·), â List.map_map, List.map_take, List.map_drop]
theorem fderiv_list_prod' {l : List ι} {x : E}
(h : â i â l, DifferentiableAt ð (f i ·) x) :
fderiv ð (fun x ⊠(l.map (f · x)).prod) x =
â i : Fin l.length, ((l.take i).map (f · x)).prod â¢
smulRight (fderiv ð (fun x ⊠f (l.get i) x) x)
((l.drop (.succ i)).map (f · x)).prod :=
(HasFDerivAt.list_prod' fun i hi ⊠(h i hi).hasFDerivAt).fderiv
theorem fderivWithin_list_prod' {l : List ι} {x : E}
(hxs : UniqueDiffWithinAt ð s x) (h : â i â l, DifferentiableWithinAt ð (f i ·) s x) :
fderivWithin ð (fun x ⊠(l.map (f · x)).prod) s x =
â i : Fin l.length, ((l.take i).map (f · x)).prod â¢
smulRight (fderivWithin ð (fun x ⊠f (l.get i) x) s x)
((l.drop (.succ i)).map (f · x)).prod :=
(HasFDerivWithinAt.list_prod' fun i hi ⊠(h i hi).hasFDerivWithinAt).fderivWithin hxs
@[fun_prop]
theorem HasStrictFDerivAt.multiset_prod [DecidableEq ι] {u : Multiset ι} {x : E}
(h : â i â u, HasStrictFDerivAt (g i ·) (g' i) x) :
HasStrictFDerivAt (fun x ⊠(u.map (g · x)).prod)
(u.map fun i ⊠((u.erase i).map (g · x)).prod ⢠g' i).sum x := by
simp only [â Multiset.attach_map_val u, Multiset.map_map]
exact .congr_fderiv
(hasStrictFDerivAt_multiset_prod.comp x <| hasStrictFDerivAt_pi.mpr fun i ⊠h i i.prop)
(by ext; simp [Finset.sum_multiset_map_count, u.erase_attach_map (g · x)])
/--
Unlike `HasFDerivAt.finset_prod`, supports duplicate elements.
-/
@[fun_prop]
theorem HasFDerivAt.multiset_prod [DecidableEq ι] {u : Multiset ι} {x : E}
(h : â i â u, HasFDerivAt (g i ·) (g' i) x) :
HasFDerivAt (fun x ⊠(u.map (g · x)).prod)
(u.map fun i ⊠((u.erase i).map (g · x)).prod ⢠g' i).sum x := by
simp only [â Multiset.attach_map_val u, Multiset.map_map]
exact .congr_fderiv
(hasFDerivAt_multiset_prod.comp x <| hasFDerivAt_pi.mpr fun i ⊠h i i.prop)
(by ext; simp [Finset.sum_multiset_map_count, u.erase_attach_map (g · x)])
@[fun_prop]
theorem HasFDerivWithinAt.multiset_prod [DecidableEq ι] {u : Multiset ι} {x : E}
(h : â i â u, HasFDerivWithinAt (g i ·) (g' i) s x) :
HasFDerivWithinAt (fun x ⊠(u.map (g · x)).prod)
(u.map fun i ⊠((u.erase i).map (g · x)).prod ⢠g' i).sum s x := by
simp only [â Multiset.attach_map_val u, Multiset.map_map]
exact .congr_fderiv
(hasFDerivAt_multiset_prod.comp_hasFDerivWithinAt x <|
hasFDerivWithinAt_pi.mpr fun i ⊠h i i.prop)
(by ext; simp [Finset.sum_multiset_map_count, u.erase_attach_map (g · x)])
theorem fderiv_multiset_prod [DecidableEq ι] {u : Multiset ι} {x : E}
(h : â i â u, DifferentiableAt ð (g i ·) x) :
fderiv ð (fun x ⊠(u.map (g · x)).prod) x =
(u.map fun i ⊠((u.erase i).map (g · x)).prod ⢠fderiv ð (g i) x).sum :=
(HasFDerivAt.multiset_prod fun i hi ⊠(h i hi).hasFDerivAt).fderiv
theorem fderivWithin_multiset_prod [DecidableEq ι] {u : Multiset ι} {x : E}
(hxs : UniqueDiffWithinAt ð s x) (h : â i â u, DifferentiableWithinAt ð (g i ·) s x) :
fderivWithin ð (fun x ⊠(u.map (g · x)).prod) s x =
(u.map fun i ⊠((u.erase i).map (g · x)).prod ⢠fderivWithin ð (g i) s x).sum :=
(HasFDerivWithinAt.multiset_prod fun i hi ⊠(h i hi).hasFDerivWithinAt).fderivWithin hxs
theorem HasStrictFDerivAt.finset_prod [DecidableEq ι] {x : E}
(hg : â i â u, HasStrictFDerivAt (g i) (g' i) x) :
HasStrictFDerivAt (â i â u, g i ·) (â i â u, (â j â u.erase i, g j x) ⢠g' i) x := by
simpa [â Finset.prod_attach u] using .congr_fderiv
(hasStrictFDerivAt_finset_prod.comp x <| hasStrictFDerivAt_pi.mpr fun i ⊠hg i i.prop)
(by ext; simp [Finset.prod_erase_attach (g · x), â u.sum_attach])
theorem HasFDerivAt.finset_prod [DecidableEq ι] {x : E}
(hg : â i â u, HasFDerivAt (g i) (g' i) x) :
HasFDerivAt (â i â u, g i ·) (â i â u, (â j â u.erase i, g j x) ⢠g' i) x := by
simpa [â Finset.prod_attach u] using .congr_fderiv
(hasFDerivAt_finset_prod.comp x <| hasFDerivAt_pi.mpr fun i ⊠hg i i.prop)
(by ext; simp [Finset.prod_erase_attach (g · x), â u.sum_attach])
theorem HasFDerivWithinAt.finset_prod [DecidableEq ι] {x : E}
(hg : â i â u, HasFDerivWithinAt (g i) (g' i) s x) :
HasFDerivWithinAt (â i â u, g i ·) (â i â u, (â j â u.erase i, g j x) ⢠g' i) s x := by
simpa [â Finset.prod_attach u] using .congr_fderiv
(hasFDerivAt_finset_prod.comp_hasFDerivWithinAt x <|
hasFDerivWithinAt_pi.mpr fun i ⊠hg i i.prop)
(by ext; simp [Finset.prod_erase_attach (g · x), â u.sum_attach])
theorem fderiv_finset_prod [DecidableEq ι] {x : E} (hg : â i â u, DifferentiableAt ð (g i) x) :
fderiv ð (â i â u, g i ·) x = â i â u, (â j â u.erase i, (g j x)) ⢠fderiv ð (g i) x :=
(HasFDerivAt.finset_prod fun i hi ⊠(hg i hi).hasFDerivAt).fderiv
theorem fderivWithin_finset_prod [DecidableEq ι] {x : E} (hxs : UniqueDiffWithinAt ð s x)
(hg : â i â u, DifferentiableWithinAt ð (g i) s x) :
fderivWithin ð (â i â u, g i ·) s x =
â i â u, (â j â u.erase i, (g j x)) ⢠fderivWithin ð (g i) s x :=
(HasFDerivWithinAt.finset_prod fun i hi ⊠(hg i hi).hasFDerivWithinAt).fderivWithin hxs
end Comp
end Prod
section AlgebraInverse
variable {R : Type*} [NormedRing R] [NormedAlgebra ð R] [CompleteSpace R]
open NormedRing ContinuousLinearMap Ring
/-- At an invertible element `x` of a normed algebra `R`, the Fréchet derivative of the inversion
operation is the linear map `fun t ⊠- xâ»Â¹ * t * xâ»Â¹`.
TODO: prove that `Ring.inverse` is analytic and use it to prove a `HasStrictFDerivAt` lemma.
TODO (low prio): prove a version without assumption `[CompleteSpace R]` but within the set of
units. -/
@[fun_prop]
theorem hasFDerivAt_ring_inverse (x : RË£) :
HasFDerivAt Ring.inverse (-mulLeftRight ð R âxâ»Â¹ âxâ»Â¹) x :=
have : (fun t : R => Ring.inverse (âx + t) - âxâ»Â¹ + âxâ»Â¹ * t * âxâ»Â¹) =o[ð 0] id :=
(inverse_add_norm_diff_second_order x).trans_isLittleO (isLittleO_norm_pow_id one_lt_two)
by simpa [hasFDerivAt_iff_isLittleO_nhds_zero] using this
@[fun_prop]
theorem differentiableAt_inverse {x : R} (hx : IsUnit x) :
DifferentiableAt ð (@Ring.inverse R _) x :=
let âšu, huâ© := hx; hu âž (hasFDerivAt_ring_inverse u).differentiableAt
@[fun_prop]
theorem differentiableWithinAt_inverse {x : R} (hx : IsUnit x) (s : Set R) :
DifferentiableWithinAt ð (@Ring.inverse R _) s x :=
(differentiableAt_inverse hx).differentiableWithinAt
@[fun_prop]
theorem differentiableOn_inverse : DifferentiableOn ð (@Ring.inverse R _) {x | IsUnit x} :=
fun _x hx => differentiableWithinAt_inverse hx _
theorem fderiv_inverse (x : RË£) : fderiv ð (@Ring.inverse R _) x = -mulLeftRight ð R âxâ»Â¹ âxâ»Â¹ :=
(hasFDerivAt_ring_inverse x).fderiv
variable {h : E â R} {z : E} {S : Set E}
@[fun_prop]
theorem DifferentiableWithinAt.inverse (hf : DifferentiableWithinAt ð h S z) (hz : IsUnit (h z)) :
DifferentiableWithinAt ð (fun x => Ring.inverse (h x)) S z :=
(differentiableAt_inverse hz).comp_differentiableWithinAt z hf
@[simp, fun_prop]
theorem DifferentiableAt.inverse (hf : DifferentiableAt ð h z) (hz : IsUnit (h z)) :
DifferentiableAt ð (fun x => Ring.inverse (h x)) z :=
(differentiableAt_inverse hz).comp z hf
@[fun_prop]
theorem DifferentiableOn.inverse (hf : DifferentiableOn ð h S) (hz : â x â S, IsUnit (h x)) :
DifferentiableOn ð (fun x => Ring.inverse (h x)) S := fun x h => (hf x h).inverse (hz x h)
@[simp, fun_prop]
theorem Differentiable.inverse (hf : Differentiable ð h) (hz : â x, IsUnit (h x)) :
Differentiable ð fun x => Ring.inverse (h x) := fun x => (hf x).inverse (hz x)
end AlgebraInverse
/-! ### Derivative of the inverse in a division ring
Note these lemmas are primed as they need `CompleteSpace R`, whereas the other lemmas in
`Mathlib/Analysis/Calculus/Deriv/Inv.lean` do not, but instead need `NontriviallyNormedField R`.
-/
section DivisionRingInverse
variable {R : Type*} [NormedDivisionRing R] [NormedAlgebra ð R] [CompleteSpace R]
open NormedRing ContinuousLinearMap Ring
/-- At an invertible element `x` of a normed division algebra `R`, the Fréchet derivative of the
inversion operation is the linear map `fun t ⊠- xâ»Â¹ * t * xâ»Â¹`. -/
@[fun_prop]
theorem hasFDerivAt_inv' {x : R} (hx : x â 0) :
HasFDerivAt Inv.inv (-mulLeftRight ð R xâ»Â¹ xâ»Â¹) x := by
simpa using hasFDerivAt_ring_inverse (Units.mk0 _ hx)
@[fun_prop]
theorem differentiableAt_inv' {x : R} (hx : x â 0) : DifferentiableAt ð Inv.inv x :=
(hasFDerivAt_inv' hx).differentiableAt
@[fun_prop]
theorem differentiableWithinAt_inv' {x : R} (hx : x â 0) (s : Set R) :
DifferentiableWithinAt ð (fun x => xâ»Â¹) s x :=
(differentiableAt_inv' hx).differentiableWithinAt
@[fun_prop]
theorem differentiableOn_inv' : DifferentiableOn ð (fun x : R => xâ»Â¹) {x | x â 0} := fun _x hx =>
differentiableWithinAt_inv' hx _
/-- Non-commutative version of `fderiv_inv` -/
theorem fderiv_inv' {x : R} (hx : x â 0) : fderiv ð Inv.inv x = -mulLeftRight ð R xâ»Â¹ xâ»Â¹ :=
(hasFDerivAt_inv' hx).fderiv
/-- Non-commutative version of `fderivWithin_inv` -/
theorem fderivWithin_inv' {s : Set R} {x : R} (hx : x â 0) (hxs : UniqueDiffWithinAt ð s x) :
fderivWithin ð (fun x => xâ»Â¹) s x = -mulLeftRight ð R xâ»Â¹ xâ»Â¹ := by
rw [DifferentiableAt.fderivWithin (differentiableAt_inv' hx) hxs]
exact fderiv_inv' hx
variable {h : E â R} {z : E} {S : Set E}
@[fun_prop]
theorem DifferentiableWithinAt.inv' (hf : DifferentiableWithinAt ð h S z) (hz : h z â 0) :
DifferentiableWithinAt ð (fun x => (h x)â»Â¹) S z :=
(differentiableAt_inv' hz).comp_differentiableWithinAt z hf
@[simp, fun_prop]
theorem DifferentiableAt.inv' (hf : DifferentiableAt ð h z) (hz : h z â 0) :
DifferentiableAt ð (fun x => (h x)â»Â¹) z :=
(differentiableAt_inv' hz).comp z hf
@[fun_prop]
theorem DifferentiableOn.inv' (hf : DifferentiableOn ð h S) (hz : â x â S, h x â 0) :
DifferentiableOn ð (fun x => (h x)â»Â¹) S := fun x h => (hf x h).inv' (hz x h)
@[simp, fun_prop]
theorem Differentiable.inv' (hf : Differentiable ð h) (hz : â x, h x â 0) :
Differentiable ð fun x => (h x)â»Â¹ := fun x => (hf x).inv' (hz x)
end DivisionRingInverse
end
|
Analysis\Calculus\FDeriv\Pi.lean | /-
Copyright (c) 2023 Floris van Doorn. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Floris van Doorn, Heather Macbeth
-/
import Mathlib.Analysis.Calculus.FDeriv.Add
/-!
# Derivatives on pi-types.
-/
variable {ð ι : Type*} [DecidableEq ι] [Fintype ι] [NontriviallyNormedField ð]
variable {E : ι â Type*} [â i, NormedAddCommGroup (E i)] [â i, NormedSpace ð (E i)]
variable {F : Type*} [NormedAddCommGroup F] [NormedSpace ð F]
@[fun_prop]
theorem hasFDerivAt_update (x : â i, E i) {i : ι} (y : E i) :
HasFDerivAt (Function.update x i) (.pi (Pi.single i (.id ð (E i)))) y := by
set l := (ContinuousLinearMap.pi (Pi.single i (.id ð (E i))))
have update_eq : Function.update x i = (fun _ ⊠x) + l â (· - x i) := by
ext t j
dsimp [l, Pi.single, Function.update]
split_ifs with hji
· subst hji
simp
· simp
rw [update_eq]
convert (hasFDerivAt_const _ _).add (l.hasFDerivAt.comp y (hasFDerivAt_sub_const (x i)))
rw [zero_add, ContinuousLinearMap.comp_id]
@[fun_prop]
theorem hasFDerivAt_single {i : ι} (y : E i) :
HasFDerivAt (Pi.single i) (.pi (Pi.single i (.id ð (E i)))) y :=
hasFDerivAt_update 0 y
theorem fderiv_update (x : â i, E i) {i : ι} (y : E i) :
fderiv ð (Function.update x i) y = .pi (Pi.single i (.id ð (E i))) :=
(hasFDerivAt_update x y).fderiv
theorem fderiv_single {i : ι} (y : E i) :
fderiv ð (Pi.single i) y = .pi (Pi.single i (.id ð (E i))) :=
fderiv_update 0 y
|
Analysis\Calculus\FDeriv\Prod.lean | /-
Copyright (c) 2019 Jeremy Avigad. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Jeremy Avigad, Sébastien Gouëzel, Yury Kudryashov
-/
import Mathlib.Analysis.Calculus.FDeriv.Linear
import Mathlib.Analysis.Calculus.FDeriv.Comp
/-!
# Derivative of the cartesian product of functions
For detailed documentation of the Fréchet derivative,
see the module docstring of `Analysis/Calculus/FDeriv/Basic.lean`.
This file contains the usual formulas (and existence assertions) for the derivative of
cartesian products of functions, and functions into Pi-types.
-/
open Filter Asymptotics ContinuousLinearMap Set Metric
open scoped Classical
open Topology NNReal Filter Asymptotics ENNReal
noncomputable section
section
variable {ð : Type*} [NontriviallyNormedField ð]
variable {E : Type*} [NormedAddCommGroup E] [NormedSpace ð E]
variable {F : Type*} [NormedAddCommGroup F] [NormedSpace ð F]
variable {G : Type*} [NormedAddCommGroup G] [NormedSpace ð G]
variable {G' : Type*} [NormedAddCommGroup G'] [NormedSpace ð G']
variable {f fâ fâ g : E â F}
variable {f' fâ' fâ' g' : E âL[ð] F}
variable (e : E âL[ð] F)
variable {x : E}
variable {s t : Set E}
variable {L Lâ Lâ : Filter E}
section CartesianProduct
/-! ### Derivative of the cartesian product of two functions -/
section Prod
variable {fâ : E â G} {fâ' : E âL[ð] G}
protected theorem HasStrictFDerivAt.prod (hfâ : HasStrictFDerivAt fâ fâ' x)
(hfâ : HasStrictFDerivAt fâ fâ' x) :
HasStrictFDerivAt (fun x => (fâ x, fâ x)) (fâ'.prod fâ') x :=
hfâ.prod_left hfâ
theorem HasFDerivAtFilter.prod (hfâ : HasFDerivAtFilter fâ fâ' x L)
(hfâ : HasFDerivAtFilter fâ fâ' x L) :
HasFDerivAtFilter (fun x => (fâ x, fâ x)) (fâ'.prod fâ') x L :=
.of_isLittleO <| hfâ.isLittleO.prod_left hfâ.isLittleO
@[fun_prop]
nonrec theorem HasFDerivWithinAt.prod (hfâ : HasFDerivWithinAt fâ fâ' s x)
(hfâ : HasFDerivWithinAt fâ fâ' s x) :
HasFDerivWithinAt (fun x => (fâ x, fâ x)) (fâ'.prod fâ') s x :=
hfâ.prod hfâ
@[fun_prop]
nonrec theorem HasFDerivAt.prod (hfâ : HasFDerivAt fâ fâ' x) (hfâ : HasFDerivAt fâ fâ' x) :
HasFDerivAt (fun x => (fâ x, fâ x)) (fâ'.prod fâ') x :=
hfâ.prod hfâ
@[fun_prop]
theorem hasFDerivAt_prod_mk_left (eâ : E) (fâ : F) :
HasFDerivAt (fun e : E => (e, fâ)) (inl ð E F) eâ :=
(hasFDerivAt_id eâ).prod (hasFDerivAt_const fâ eâ)
@[fun_prop]
theorem hasFDerivAt_prod_mk_right (eâ : E) (fâ : F) :
HasFDerivAt (fun f : F => (eâ, f)) (inr ð E F) fâ :=
(hasFDerivAt_const eâ fâ).prod (hasFDerivAt_id fâ)
@[fun_prop]
theorem DifferentiableWithinAt.prod (hfâ : DifferentiableWithinAt ð fâ s x)
(hfâ : DifferentiableWithinAt ð fâ s x) :
DifferentiableWithinAt ð (fun x : E => (fâ x, fâ x)) s x :=
(hfâ.hasFDerivWithinAt.prod hfâ.hasFDerivWithinAt).differentiableWithinAt
@[simp, fun_prop]
theorem DifferentiableAt.prod (hfâ : DifferentiableAt ð fâ x) (hfâ : DifferentiableAt ð fâ x) :
DifferentiableAt ð (fun x : E => (fâ x, fâ x)) x :=
(hfâ.hasFDerivAt.prod hfâ.hasFDerivAt).differentiableAt
@[fun_prop]
theorem DifferentiableOn.prod (hfâ : DifferentiableOn ð fâ s) (hfâ : DifferentiableOn ð fâ s) :
DifferentiableOn ð (fun x : E => (fâ x, fâ x)) s := fun x hx =>
DifferentiableWithinAt.prod (hfâ x hx) (hfâ x hx)
@[simp, fun_prop]
theorem Differentiable.prod (hfâ : Differentiable ð fâ) (hfâ : Differentiable ð fâ) :
Differentiable ð fun x : E => (fâ x, fâ x) := fun x => DifferentiableAt.prod (hfâ x) (hfâ x)
theorem DifferentiableAt.fderiv_prod (hfâ : DifferentiableAt ð fâ x)
(hfâ : DifferentiableAt ð fâ x) :
fderiv ð (fun x : E => (fâ x, fâ x)) x = (fderiv ð fâ x).prod (fderiv ð fâ x) :=
(hfâ.hasFDerivAt.prod hfâ.hasFDerivAt).fderiv
theorem DifferentiableWithinAt.fderivWithin_prod (hfâ : DifferentiableWithinAt ð fâ s x)
(hfâ : DifferentiableWithinAt ð fâ s x) (hxs : UniqueDiffWithinAt ð s x) :
fderivWithin ð (fun x : E => (fâ x, fâ x)) s x =
(fderivWithin ð fâ s x).prod (fderivWithin ð fâ s x) :=
(hfâ.hasFDerivWithinAt.prod hfâ.hasFDerivWithinAt).fderivWithin hxs
end Prod
section Fst
variable {fâ : E â F Ã G} {fâ' : E âL[ð] F Ã G} {p : E Ã F}
@[fun_prop]
theorem hasStrictFDerivAt_fst : HasStrictFDerivAt (@Prod.fst E F) (fst ð E F) p :=
(fst ð E F).hasStrictFDerivAt
@[fun_prop]
protected theorem HasStrictFDerivAt.fst (h : HasStrictFDerivAt fâ fâ' x) :
HasStrictFDerivAt (fun x => (fâ x).1) ((fst ð F G).comp fâ') x :=
hasStrictFDerivAt_fst.comp x h
theorem hasFDerivAtFilter_fst {L : Filter (E Ã F)} :
HasFDerivAtFilter (@Prod.fst E F) (fst ð E F) p L :=
(fst ð E F).hasFDerivAtFilter
protected theorem HasFDerivAtFilter.fst (h : HasFDerivAtFilter fâ fâ' x L) :
HasFDerivAtFilter (fun x => (fâ x).1) ((fst ð F G).comp fâ') x L :=
hasFDerivAtFilter_fst.comp x h tendsto_map
@[fun_prop]
theorem hasFDerivAt_fst : HasFDerivAt (@Prod.fst E F) (fst ð E F) p :=
hasFDerivAtFilter_fst
@[fun_prop]
protected nonrec theorem HasFDerivAt.fst (h : HasFDerivAt fâ fâ' x) :
HasFDerivAt (fun x => (fâ x).1) ((fst ð F G).comp fâ') x :=
h.fst
@[fun_prop]
theorem hasFDerivWithinAt_fst {s : Set (E Ã F)} :
HasFDerivWithinAt (@Prod.fst E F) (fst ð E F) s p :=
hasFDerivAtFilter_fst
@[fun_prop]
protected nonrec theorem HasFDerivWithinAt.fst (h : HasFDerivWithinAt fâ fâ' s x) :
HasFDerivWithinAt (fun x => (fâ x).1) ((fst ð F G).comp fâ') s x :=
h.fst
@[fun_prop]
theorem differentiableAt_fst : DifferentiableAt ð Prod.fst p :=
hasFDerivAt_fst.differentiableAt
@[simp, fun_prop]
protected theorem DifferentiableAt.fst (h : DifferentiableAt ð fâ x) :
DifferentiableAt ð (fun x => (fâ x).1) x :=
differentiableAt_fst.comp x h
@[fun_prop]
theorem differentiable_fst : Differentiable ð (Prod.fst : E Ã F â E) := fun _ =>
differentiableAt_fst
@[simp, fun_prop]
protected theorem Differentiable.fst (h : Differentiable ð fâ) :
Differentiable ð fun x => (fâ x).1 :=
differentiable_fst.comp h
@[fun_prop]
theorem differentiableWithinAt_fst {s : Set (E Ã F)} : DifferentiableWithinAt ð Prod.fst s p :=
differentiableAt_fst.differentiableWithinAt
@[fun_prop]
protected theorem DifferentiableWithinAt.fst (h : DifferentiableWithinAt ð fâ s x) :
DifferentiableWithinAt ð (fun x => (fâ x).1) s x :=
differentiableAt_fst.comp_differentiableWithinAt x h
@[fun_prop]
theorem differentiableOn_fst {s : Set (E Ã F)} : DifferentiableOn ð Prod.fst s :=
differentiable_fst.differentiableOn
@[fun_prop]
protected theorem DifferentiableOn.fst (h : DifferentiableOn ð fâ s) :
DifferentiableOn ð (fun x => (fâ x).1) s :=
differentiable_fst.comp_differentiableOn h
theorem fderiv_fst : fderiv ð Prod.fst p = fst ð E F :=
hasFDerivAt_fst.fderiv
theorem fderiv.fst (h : DifferentiableAt ð fâ x) :
fderiv ð (fun x => (fâ x).1) x = (fst ð F G).comp (fderiv ð fâ x) :=
h.hasFDerivAt.fst.fderiv
theorem fderivWithin_fst {s : Set (E Ã F)} (hs : UniqueDiffWithinAt ð s p) :
fderivWithin ð Prod.fst s p = fst ð E F :=
hasFDerivWithinAt_fst.fderivWithin hs
theorem fderivWithin.fst (hs : UniqueDiffWithinAt ð s x) (h : DifferentiableWithinAt ð fâ s x) :
fderivWithin ð (fun x => (fâ x).1) s x = (fst ð F G).comp (fderivWithin ð fâ s x) :=
h.hasFDerivWithinAt.fst.fderivWithin hs
end Fst
section Snd
variable {fâ : E â F Ã G} {fâ' : E âL[ð] F Ã G} {p : E Ã F}
@[fun_prop]
theorem hasStrictFDerivAt_snd : HasStrictFDerivAt (@Prod.snd E F) (snd ð E F) p :=
(snd ð E F).hasStrictFDerivAt
@[fun_prop]
protected theorem HasStrictFDerivAt.snd (h : HasStrictFDerivAt fâ fâ' x) :
HasStrictFDerivAt (fun x => (fâ x).2) ((snd ð F G).comp fâ') x :=
hasStrictFDerivAt_snd.comp x h
theorem hasFDerivAtFilter_snd {L : Filter (E Ã F)} :
HasFDerivAtFilter (@Prod.snd E F) (snd ð E F) p L :=
(snd ð E F).hasFDerivAtFilter
protected theorem HasFDerivAtFilter.snd (h : HasFDerivAtFilter fâ fâ' x L) :
HasFDerivAtFilter (fun x => (fâ x).2) ((snd ð F G).comp fâ') x L :=
hasFDerivAtFilter_snd.comp x h tendsto_map
@[fun_prop]
theorem hasFDerivAt_snd : HasFDerivAt (@Prod.snd E F) (snd ð E F) p :=
hasFDerivAtFilter_snd
@[fun_prop]
protected nonrec theorem HasFDerivAt.snd (h : HasFDerivAt fâ fâ' x) :
HasFDerivAt (fun x => (fâ x).2) ((snd ð F G).comp fâ') x :=
h.snd
@[fun_prop]
theorem hasFDerivWithinAt_snd {s : Set (E Ã F)} :
HasFDerivWithinAt (@Prod.snd E F) (snd ð E F) s p :=
hasFDerivAtFilter_snd
@[fun_prop]
protected nonrec theorem HasFDerivWithinAt.snd (h : HasFDerivWithinAt fâ fâ' s x) :
HasFDerivWithinAt (fun x => (fâ x).2) ((snd ð F G).comp fâ') s x :=
h.snd
@[fun_prop]
theorem differentiableAt_snd : DifferentiableAt ð Prod.snd p :=
hasFDerivAt_snd.differentiableAt
@[simp, fun_prop]
protected theorem DifferentiableAt.snd (h : DifferentiableAt ð fâ x) :
DifferentiableAt ð (fun x => (fâ x).2) x :=
differentiableAt_snd.comp x h
@[fun_prop]
theorem differentiable_snd : Differentiable ð (Prod.snd : E Ã F â F) := fun _ =>
differentiableAt_snd
@[simp, fun_prop]
protected theorem Differentiable.snd (h : Differentiable ð fâ) :
Differentiable ð fun x => (fâ x).2 :=
differentiable_snd.comp h
@[fun_prop]
theorem differentiableWithinAt_snd {s : Set (E Ã F)} : DifferentiableWithinAt ð Prod.snd s p :=
differentiableAt_snd.differentiableWithinAt
@[fun_prop]
protected theorem DifferentiableWithinAt.snd (h : DifferentiableWithinAt ð fâ s x) :
DifferentiableWithinAt ð (fun x => (fâ x).2) s x :=
differentiableAt_snd.comp_differentiableWithinAt x h
@[fun_prop]
theorem differentiableOn_snd {s : Set (E Ã F)} : DifferentiableOn ð Prod.snd s :=
differentiable_snd.differentiableOn
@[fun_prop]
protected theorem DifferentiableOn.snd (h : DifferentiableOn ð fâ s) :
DifferentiableOn ð (fun x => (fâ x).2) s :=
differentiable_snd.comp_differentiableOn h
theorem fderiv_snd : fderiv ð Prod.snd p = snd ð E F :=
hasFDerivAt_snd.fderiv
theorem fderiv.snd (h : DifferentiableAt ð fâ x) :
fderiv ð (fun x => (fâ x).2) x = (snd ð F G).comp (fderiv ð fâ x) :=
h.hasFDerivAt.snd.fderiv
theorem fderivWithin_snd {s : Set (E Ã F)} (hs : UniqueDiffWithinAt ð s p) :
fderivWithin ð Prod.snd s p = snd ð E F :=
hasFDerivWithinAt_snd.fderivWithin hs
theorem fderivWithin.snd (hs : UniqueDiffWithinAt ð s x) (h : DifferentiableWithinAt ð fâ s x) :
fderivWithin ð (fun x => (fâ x).2) s x = (snd ð F G).comp (fderivWithin ð fâ s x) :=
h.hasFDerivWithinAt.snd.fderivWithin hs
end Snd
section prodMap
variable {fâ : G â G'} {fâ' : G âL[ð] G'} {y : G} (p : E Ã G)
@[fun_prop]
protected theorem HasStrictFDerivAt.prodMap (hf : HasStrictFDerivAt f f' p.1)
(hfâ : HasStrictFDerivAt fâ fâ' p.2) : HasStrictFDerivAt (Prod.map f fâ) (f'.prodMap fâ') p :=
(hf.comp p hasStrictFDerivAt_fst).prod (hfâ.comp p hasStrictFDerivAt_snd)
@[fun_prop]
protected theorem HasFDerivAt.prodMap (hf : HasFDerivAt f f' p.1) (hfâ : HasFDerivAt fâ fâ' p.2) :
HasFDerivAt (Prod.map f fâ) (f'.prodMap fâ') p :=
(hf.comp p hasFDerivAt_fst).prod (hfâ.comp p hasFDerivAt_snd)
@[simp, fun_prop]
protected theorem DifferentiableAt.prod_map (hf : DifferentiableAt ð f p.1)
(hfâ : DifferentiableAt ð fâ p.2) : DifferentiableAt ð (fun p : E Ã G => (f p.1, fâ p.2)) p :=
(hf.comp p differentiableAt_fst).prod (hfâ.comp p differentiableAt_snd)
end prodMap
section Pi
/-!
### Derivatives of functions `f : E â Î i, F' i`
In this section we formulate `has*FDeriv*_pi` theorems as `iff`s, and provide two versions of each
theorem:
* the version without `'` deals with `Ï : Î i, E â F' i` and `Ï' : Î i, E âL[ð] F' i`
and is designed to deduce differentiability of `fun x i âŠ Ï i x` from differentiability
of each `Ï i`;
* the version with `'` deals with `Ί : E â Î i, F' i` and `Ί' : E âL[ð] Î i, F' i`
and is designed to deduce differentiability of the components `fun x ⊠Ί x i` from
differentiability of `Ί`.
-/
variable {ι : Type*} [Fintype ι] {F' : ι â Type*} [â i, NormedAddCommGroup (F' i)]
[â i, NormedSpace ð (F' i)] {Ï : â i, E â F' i} {Ï' : â i, E âL[ð] F' i} {Ί : E â â i, F' i}
{Ί' : E âL[ð] â i, F' i}
@[simp]
theorem hasStrictFDerivAt_pi' :
HasStrictFDerivAt Ί Ί' x â â i, HasStrictFDerivAt (fun x => Ί x i) ((proj i).comp Ί') x := by
simp only [HasStrictFDerivAt, ContinuousLinearMap.coe_pi]
exact isLittleO_pi
@[fun_prop]
theorem hasStrictFDerivAt_pi'' (hÏ : â i, HasStrictFDerivAt (fun x => Ί x i) ((proj i).comp Ί') x) :
HasStrictFDerivAt Ί Ί' x := hasStrictFDerivAt_pi'.2 hÏ
@[fun_prop]
theorem hasStrictFDerivAt_apply (i : ι) (f : â i, F' i) :
HasStrictFDerivAt (ð := ð) (fun f : â i, F' i => f i) (proj i) f := by
let id' := ContinuousLinearMap.id ð (â i, F' i)
have h := ((hasStrictFDerivAt_pi'
(Ί := fun (f : â i, F' i) (i' : ι) => f i') (Ί' := id') (x := f))).1
have h' : comp (proj i) id' = proj i := by rfl
rw [â h']; apply h; apply hasStrictFDerivAt_id
@[simp 1100] -- Porting note: increased priority to make lint happy
theorem hasStrictFDerivAt_pi :
HasStrictFDerivAt (fun x i => Ï i x) (ContinuousLinearMap.pi Ï') x â
â i, HasStrictFDerivAt (Ï i) (Ï' i) x :=
hasStrictFDerivAt_pi'
@[simp]
theorem hasFDerivAtFilter_pi' :
HasFDerivAtFilter Ί Ί' x L â
â i, HasFDerivAtFilter (fun x => Ί x i) ((proj i).comp Ί') x L := by
simp only [hasFDerivAtFilter_iff_isLittleO, ContinuousLinearMap.coe_pi]
exact isLittleO_pi
theorem hasFDerivAtFilter_pi :
HasFDerivAtFilter (fun x i => Ï i x) (ContinuousLinearMap.pi Ï') x L â
â i, HasFDerivAtFilter (Ï i) (Ï' i) x L :=
hasFDerivAtFilter_pi'
@[simp]
theorem hasFDerivAt_pi' :
HasFDerivAt Ί Ί' x â â i, HasFDerivAt (fun x => Ί x i) ((proj i).comp Ί') x :=
hasFDerivAtFilter_pi'
@[fun_prop]
theorem hasFDerivAt_pi'' (hÏ : â i, HasFDerivAt (fun x => Ί x i) ((proj i).comp Ί') x) :
HasFDerivAt Ί Ί' x := hasFDerivAt_pi'.2 hÏ
@[fun_prop]
theorem hasFDerivAt_apply (i : ι) (f : â i, F' i) :
HasFDerivAt (ð := ð) (fun f : â i, F' i => f i) (proj i) f := by
apply HasStrictFDerivAt.hasFDerivAt
apply hasStrictFDerivAt_apply
theorem hasFDerivAt_pi :
HasFDerivAt (fun x i => Ï i x) (ContinuousLinearMap.pi Ï') x â
â i, HasFDerivAt (Ï i) (Ï' i) x :=
hasFDerivAtFilter_pi
@[simp]
theorem hasFDerivWithinAt_pi' :
HasFDerivWithinAt Ί Ί' s x â â i, HasFDerivWithinAt (fun x => Ί x i) ((proj i).comp Ί') s x :=
hasFDerivAtFilter_pi'
@[fun_prop]
theorem hasFDerivWithinAt_pi''
(hÏ : â i, HasFDerivWithinAt (fun x => Ί x i) ((proj i).comp Ί') s x) :
HasFDerivWithinAt Ί Ί' s x := hasFDerivWithinAt_pi'.2 hÏ
@[fun_prop]
theorem hasFDerivWithinAt_apply (i : ι) (f : â i, F' i) (s' : Set (â i, F' i)) :
HasFDerivWithinAt (ð := ð) (fun f : â i, F' i => f i) (proj i) s' f := by
let id' := ContinuousLinearMap.id ð (â i, F' i)
have h := ((hasFDerivWithinAt_pi'
(Ί := fun (f : â i, F' i) (i' : ι) => f i') (Ί' := id') (x := f) (s := s'))).1
have h' : comp (proj i) id' = proj i := by rfl
rw [â h']; apply h; apply hasFDerivWithinAt_id
theorem hasFDerivWithinAt_pi :
HasFDerivWithinAt (fun x i => Ï i x) (ContinuousLinearMap.pi Ï') s x â
â i, HasFDerivWithinAt (Ï i) (Ï' i) s x :=
hasFDerivAtFilter_pi
@[simp]
theorem differentiableWithinAt_pi :
DifferentiableWithinAt ð Ί s x â â i, DifferentiableWithinAt ð (fun x => Ί x i) s x :=
âšfun h i => (hasFDerivWithinAt_pi'.1 h.hasFDerivWithinAt i).differentiableWithinAt, fun h =>
(hasFDerivWithinAt_pi.2 fun i => (h i).hasFDerivWithinAt).differentiableWithinAtâ©
@[fun_prop]
theorem differentiableWithinAt_pi'' (hÏ : â i, DifferentiableWithinAt ð (fun x => Ί x i) s x) :
DifferentiableWithinAt ð Ί s x := differentiableWithinAt_pi.2 hÏ
@[fun_prop]
theorem differentiableWithinAt_apply (i : ι) (f : â i, F' i) (s' : Set (â i, F' i)) :
DifferentiableWithinAt (ð := ð) (fun f : â i, F' i => f i) s' f := by
apply HasFDerivWithinAt.differentiableWithinAt
fun_prop
@[simp]
theorem differentiableAt_pi : DifferentiableAt ð Ί x â â i, DifferentiableAt ð (fun x => Ί x i) x :=
âšfun h i => (hasFDerivAt_pi'.1 h.hasFDerivAt i).differentiableAt, fun h =>
(hasFDerivAt_pi.2 fun i => (h i).hasFDerivAt).differentiableAtâ©
@[fun_prop]
theorem differentiableAt_pi'' (hÏ : â i, DifferentiableAt ð (fun x => Ί x i) x) :
DifferentiableAt ð Ί x := differentiableAt_pi.2 hÏ
@[fun_prop]
theorem differentiableAt_apply (i : ι) (f : â i, F' i) :
DifferentiableAt (ð := ð) (fun f : â i, F' i => f i) f := by
have h := ((differentiableAt_pi (ð := ð)
(Ί := fun (f : â i, F' i) (i' : ι) => f i') (x := f))).1
apply h; apply differentiableAt_id
theorem differentiableOn_pi : DifferentiableOn ð Ί s â â i, DifferentiableOn ð (fun x => Ί x i) s :=
âšfun h i x hx => differentiableWithinAt_pi.1 (h x hx) i, fun h x hx =>
differentiableWithinAt_pi.2 fun i => h i x hxâ©
@[fun_prop]
theorem differentiableOn_pi'' (hÏ : â i, DifferentiableOn ð (fun x => Ί x i) s) :
DifferentiableOn ð Ί s := differentiableOn_pi.2 hÏ
@[fun_prop]
theorem differentiableOn_apply (i : ι) (s' : Set (â i, F' i)) :
DifferentiableOn (ð := ð) (fun f : â i, F' i => f i) s' := by
have h := ((differentiableOn_pi (ð := ð)
(Ί := fun (f : â i, F' i) (i' : ι) => f i') (s := s'))).1
apply h; apply differentiableOn_id
theorem differentiable_pi : Differentiable ð Ί â â i, Differentiable ð fun x => Ί x i :=
âšfun h i x => differentiableAt_pi.1 (h x) i, fun h x => differentiableAt_pi.2 fun i => h i xâ©
@[fun_prop]
theorem differentiable_pi'' (hÏ : â i, Differentiable ð fun x => Ί x i) :
Differentiable ð Ί := differentiable_pi.2 hÏ
@[fun_prop]
theorem differentiable_apply (i : ι) :
Differentiable (ð := ð) (fun f : â i, F' i => f i) := by intro x; apply differentiableAt_apply
-- TODO: find out which version (`Ï` or `Ί`) works better with `rw`/`simp`
theorem fderivWithin_pi (h : â i, DifferentiableWithinAt ð (Ï i) s x)
(hs : UniqueDiffWithinAt ð s x) :
fderivWithin ð (fun x i => Ï i x) s x = pi fun i => fderivWithin ð (Ï i) s x :=
(hasFDerivWithinAt_pi.2 fun i => (h i).hasFDerivWithinAt).fderivWithin hs
theorem fderiv_pi (h : â i, DifferentiableAt ð (Ï i) x) :
fderiv ð (fun x i => Ï i x) x = pi fun i => fderiv ð (Ï i) x :=
(hasFDerivAt_pi.2 fun i => (h i).hasFDerivAt).fderiv
end Pi
end CartesianProduct
end
|
Analysis\Calculus\FDeriv\RestrictScalars.lean | /-
Copyright (c) 2019 Jeremy Avigad. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Jeremy Avigad, Sébastien Gouëzel, Yury Kudryashov
-/
import Mathlib.Analysis.Calculus.FDeriv.Basic
/-!
# The derivative of the scalar restriction of a linear map
For detailed documentation of the Fréchet derivative,
see the module docstring of `Analysis/Calculus/FDeriv/Basic.lean`.
This file contains the usual formulas (and existence assertions) for the derivative of
the scalar restriction of a linear map.
-/
open Filter Asymptotics ContinuousLinearMap Set Metric
open scoped Classical
open Topology NNReal Filter Asymptotics ENNReal
noncomputable section
section RestrictScalars
/-!
### Restricting from `â` to `â`, or generally from `ð'` to `ð`
If a function is differentiable over `â`, then it is differentiable over `â`. In this paragraph,
we give variants of this statement, in the general situation where `â` and `â` are replaced
respectively by `ð'` and `ð` where `ð'` is a normed algebra over `ð`.
-/
variable (ð : Type*) [NontriviallyNormedField ð]
variable {ð' : Type*} [NontriviallyNormedField ð'] [NormedAlgebra ð ð']
variable {E : Type*} [NormedAddCommGroup E] [NormedSpace ð E] [NormedSpace ð' E]
variable [IsScalarTower ð ð' E]
variable {F : Type*} [NormedAddCommGroup F] [NormedSpace ð F] [NormedSpace ð' F]
variable [IsScalarTower ð ð' F]
variable {f : E â F} {f' : E âL[ð'] F} {s : Set E} {x : E}
@[fun_prop]
theorem HasStrictFDerivAt.restrictScalars (h : HasStrictFDerivAt f f' x) :
HasStrictFDerivAt f (f'.restrictScalars ð) x :=
h
theorem HasFDerivAtFilter.restrictScalars {L} (h : HasFDerivAtFilter f f' x L) :
HasFDerivAtFilter f (f'.restrictScalars ð) x L :=
.of_isLittleO h.1
@[fun_prop]
theorem HasFDerivAt.restrictScalars (h : HasFDerivAt f f' x) :
HasFDerivAt f (f'.restrictScalars ð) x :=
.of_isLittleO h.1
@[fun_prop]
theorem HasFDerivWithinAt.restrictScalars (h : HasFDerivWithinAt f f' s x) :
HasFDerivWithinAt f (f'.restrictScalars ð) s x :=
.of_isLittleO h.1
@[fun_prop]
theorem DifferentiableAt.restrictScalars (h : DifferentiableAt ð' f x) : DifferentiableAt ð f x :=
(h.hasFDerivAt.restrictScalars ð).differentiableAt
@[fun_prop]
theorem DifferentiableWithinAt.restrictScalars (h : DifferentiableWithinAt ð' f s x) :
DifferentiableWithinAt ð f s x :=
(h.hasFDerivWithinAt.restrictScalars ð).differentiableWithinAt
@[fun_prop]
theorem DifferentiableOn.restrictScalars (h : DifferentiableOn ð' f s) : DifferentiableOn ð f s :=
fun x hx => (h x hx).restrictScalars ð
@[fun_prop]
theorem Differentiable.restrictScalars (h : Differentiable ð' f) : Differentiable ð f := fun x =>
(h x).restrictScalars ð
@[fun_prop]
theorem HasFDerivWithinAt.of_restrictScalars {g' : E âL[ð] F} (h : HasFDerivWithinAt f g' s x)
(H : f'.restrictScalars ð = g') : HasFDerivWithinAt f f' s x := by
rw [â H] at h
exact .of_isLittleO h.1
@[fun_prop]
theorem hasFDerivAt_of_restrictScalars {g' : E âL[ð] F} (h : HasFDerivAt f g' x)
(H : f'.restrictScalars ð = g') : HasFDerivAt f f' x := by
rw [â H] at h
exact .of_isLittleO h.1
theorem DifferentiableAt.fderiv_restrictScalars (h : DifferentiableAt ð' f x) :
fderiv ð f x = (fderiv ð' f x).restrictScalars ð :=
(h.hasFDerivAt.restrictScalars ð).fderiv
theorem differentiableWithinAt_iff_restrictScalars (hf : DifferentiableWithinAt ð f s x)
(hs : UniqueDiffWithinAt ð s x) : DifferentiableWithinAt ð' f s x â
â g' : E âL[ð'] F, g'.restrictScalars ð = fderivWithin ð f s x := by
constructor
· rintro âšg', hg'â©
exact âšg', hs.eq (hg'.restrictScalars ð) hf.hasFDerivWithinAtâ©
· rintro âšf', hf'â©
exact âšf', hf.hasFDerivWithinAt.of_restrictScalars ð hf'â©
theorem differentiableAt_iff_restrictScalars (hf : DifferentiableAt ð f x) :
DifferentiableAt ð' f x â â g' : E âL[ð'] F, g'.restrictScalars ð = fderiv ð f x := by
rw [â differentiableWithinAt_univ, â fderivWithin_univ]
exact
differentiableWithinAt_iff_restrictScalars ð hf.differentiableWithinAt uniqueDiffWithinAt_univ
end RestrictScalars
|
Analysis\Calculus\FDeriv\Star.lean | /-
Copyright (c) 2023 Eric Wieser. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Eric Wieser
-/
import Mathlib.Analysis.Calculus.FDeriv.Linear
import Mathlib.Analysis.Calculus.FDeriv.Comp
import Mathlib.Analysis.Calculus.FDeriv.Equiv
import Mathlib.Topology.Algebra.Module.Star
/-!
# Star operations on derivatives
For detailed documentation of the Fréchet derivative,
see the module docstring of `Analysis/Calculus/FDeriv/Basic.lean`.
This file contains the usual formulas (and existence assertions) for the derivative of the star
operation. Note that these only apply when the field that the derivative is respect to has a trivial
star operation; which as should be expected rules out `ð = â`.
-/
open scoped Classical
variable {ð : Type*} [NontriviallyNormedField ð] [StarRing ð] [TrivialStar ð]
variable {E : Type*} [NormedAddCommGroup E] [NormedSpace ð E]
variable {F : Type*} [NormedAddCommGroup F] [StarAddMonoid F] [NormedSpace ð F] [StarModule ð F]
[ContinuousStar F]
variable {f : E â F} {f' : E âL[ð] F} (e : E âL[ð] F) {x : E} {s : Set E} {L : Filter E}
@[fun_prop]
theorem HasStrictFDerivAt.star (h : HasStrictFDerivAt f f' x) :
HasStrictFDerivAt (fun x => star (f x)) (((starL' ð : F âL[ð] F) : F âL[ð] F) âL f') x :=
(starL' ð : F âL[ð] F).toContinuousLinearMap.hasStrictFDerivAt.comp x h
theorem HasFDerivAtFilter.star (h : HasFDerivAtFilter f f' x L) :
HasFDerivAtFilter (fun x => star (f x)) (((starL' ð : F âL[ð] F) : F âL[ð] F) âL f') x L :=
(starL' ð : F âL[ð] F).toContinuousLinearMap.hasFDerivAtFilter.comp x h Filter.tendsto_map
@[fun_prop]
nonrec theorem HasFDerivWithinAt.star (h : HasFDerivWithinAt f f' s x) :
HasFDerivWithinAt (fun x => star (f x)) (((starL' ð : F âL[ð] F) : F âL[ð] F) âL f') s x :=
h.star
@[fun_prop]
nonrec theorem HasFDerivAt.star (h : HasFDerivAt f f' x) :
HasFDerivAt (fun x => star (f x)) (((starL' ð : F âL[ð] F) : F âL[ð] F) âL f') x :=
h.star
@[fun_prop]
theorem DifferentiableWithinAt.star (h : DifferentiableWithinAt ð f s x) :
DifferentiableWithinAt ð (fun y => star (f y)) s x :=
h.hasFDerivWithinAt.star.differentiableWithinAt
@[simp]
theorem differentiableWithinAt_star_iff :
DifferentiableWithinAt ð (fun y => star (f y)) s x â DifferentiableWithinAt ð f s x :=
(starL' ð : F âL[ð] F).comp_differentiableWithinAt_iff
@[fun_prop]
theorem DifferentiableAt.star (h : DifferentiableAt ð f x) :
DifferentiableAt ð (fun y => star (f y)) x :=
h.hasFDerivAt.star.differentiableAt
@[simp]
theorem differentiableAt_star_iff :
DifferentiableAt ð (fun y => star (f y)) x â DifferentiableAt ð f x :=
(starL' ð : F âL[ð] F).comp_differentiableAt_iff
@[fun_prop]
theorem DifferentiableOn.star (h : DifferentiableOn ð f s) :
DifferentiableOn ð (fun y => star (f y)) s := fun x hx => (h x hx).star
@[simp]
theorem differentiableOn_star_iff :
DifferentiableOn ð (fun y => star (f y)) s â DifferentiableOn ð f s :=
(starL' ð : F âL[ð] F).comp_differentiableOn_iff
@[fun_prop]
theorem Differentiable.star (h : Differentiable ð f) : Differentiable ð fun y => star (f y) :=
fun x => (h x).star
@[simp]
theorem differentiable_star_iff : (Differentiable ð fun y => star (f y)) â Differentiable ð f :=
(starL' ð : F âL[ð] F).comp_differentiable_iff
theorem fderivWithin_star (hxs : UniqueDiffWithinAt ð s x) :
fderivWithin ð (fun y => star (f y)) s x =
((starL' ð : F âL[ð] F) : F âL[ð] F) âL fderivWithin ð f s x :=
(starL' ð : F âL[ð] F).comp_fderivWithin hxs
@[simp]
theorem fderiv_star :
fderiv ð (fun y => star (f y)) x = ((starL' ð : F âL[ð] F) : F âL[ð] F) âL fderiv ð f x :=
(starL' ð : F âL[ð] F).comp_fderiv
|
Analysis\Calculus\FDeriv\Symmetric.lean | /-
Copyright (c) 2021 Sébastien Gouëzel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Sébastien Gouëzel
-/
import Mathlib.Analysis.Calculus.Deriv.Pow
import Mathlib.Analysis.Calculus.MeanValue
/-!
# Symmetry of the second derivative
We show that, over the reals, the second derivative is symmetric.
The most precise result is `Convex.second_derivative_within_at_symmetric`. It asserts that,
if a function is differentiable inside a convex set `s` with nonempty interior, and has a second
derivative within `s` at a point `x`, then this second derivative at `x` is symmetric. Note that
this result does not require continuity of the first derivative.
The following particular cases of this statement are especially relevant:
`second_derivative_symmetric_of_eventually` asserts that, if a function is differentiable on a
neighborhood of `x`, and has a second derivative at `x`, then this second derivative is symmetric.
`second_derivative_symmetric` asserts that, if a function is differentiable, and has a second
derivative at `x`, then this second derivative is symmetric.
## Implementation note
For the proof, we obtain an asymptotic expansion to order two of `f (x + v + w) - f (x + v)`, by
using the mean value inequality applied to a suitable function along the
segment `[x + v, x + v + w]`. This expansion involves `f'' ⬠w` as we move along a segment directed
by `w` (see `Convex.taylor_approx_two_segment`).
Consider the alternate sum `f (x + v + w) + f x - f (x + v) - f (x + w)`, corresponding to the
values of `f` along a rectangle based at `x` with sides `v` and `w`. One can write it using the two
sides directed by `w`, as `(f (x + v + w) - f (x + v)) - (f (x + w) - f x)`. Together with the
previous asymptotic expansion, one deduces that it equals `f'' v w + o(1)` when `v, w` tends to `0`.
Exchanging the roles of `v` and `w`, one instead gets an asymptotic expansion `f'' w v`, from which
the equality `f'' v w = f'' w v` follows.
In our most general statement, we only assume that `f` is differentiable inside a convex set `s`, so
a few modifications have to be made. Since we don't assume continuity of `f` at `x`, we consider
instead the rectangle based at `x + v + w` with sides `v` and `w`,
in `Convex.isLittleO_alternate_sum_square`, but the argument is essentially the same. It only works
when `v` and `w` both point towards the interior of `s`, to make sure that all the sides of the
rectangle are contained in `s` by convexity. The general case follows by linearity, though.
-/
open Asymptotics Set
open scoped Topology
variable {E F : Type*} [NormedAddCommGroup E] [NormedSpace â E] [NormedAddCommGroup F]
[NormedSpace â F] {s : Set E} (s_conv : Convex â s) {f : E â F} {f' : E â E âL[â] F}
{f'' : E âL[â] E âL[â] F} (hf : â x â interior s, HasFDerivAt f (f' x) x) {x : E} (xs : x â s)
(hx : HasFDerivWithinAt f' f'' (interior s) x)
/-- Assume that `f` is differentiable inside a convex set `s`, and that its derivative `f'` is
differentiable at a point `x`. Then, given two vectors `v` and `w` pointing inside `s`, one can
Taylor-expand to order two the function `f` on the segment `[x + h v, x + h (v + w)]`, giving a
bilinear estimate for `f (x + hv + hw) - f (x + hv)` in terms of `f' w` and of `f'' ⬠w`, up to
`o(h^2)`.
This is a technical statement used to show that the second derivative is symmetric. -/
theorem Convex.taylor_approx_two_segment {v w : E} (hv : x + v â interior s)
(hw : x + v + w â interior s) :
(fun h : â => f (x + h ⢠v + h ⢠w)
- f (x + h ⢠v) - h ⢠f' x w - h ^ 2 ⢠f'' v w - (h ^ 2 / 2) ⢠f'' w w) =o[ð[>] 0]
fun h => h ^ 2 := by
-- it suffices to check that the expression is bounded by `ε * ((âvâ + âwâ) * âwâ) * h^2` for
-- small enough `h`, for any positive `ε`.
refine IsLittleO.trans_isBigO
(isLittleO_iff.2 fun ε εpos => ?_) (isBigO_const_mul_self ((âvâ + âwâ) * âwâ) _ _)
-- consider a ball of radius `ÎŽ` around `x` in which the Taylor approximation for `f''` is
-- good up to `ÎŽ`.
rw [HasFDerivWithinAt, hasFDerivAtFilter_iff_isLittleO, isLittleO_iff] at hx
rcases Metric.mem_nhdsWithin_iff.1 (hx εpos) with âšÎŽ, ÎŽpos, sÎŽâ©
have E1 : âá¶ h in ð[>] (0 : â), h * (âvâ + âwâ) < ÎŽ := by
have : Filter.Tendsto (fun h => h * (âvâ + âwâ)) (ð[>] (0 : â)) (ð (0 * (âvâ + âwâ))) :=
(continuous_id.mul continuous_const).continuousWithinAt
apply (tendsto_order.1 this).2 ÎŽ
simpa only [zero_mul] using ÎŽpos
have E2 : âá¶ h in ð[>] (0 : â), (h : â) < 1 :=
mem_nhdsWithin_Ioi_iff_exists_Ioo_subset.2
âš(1 : â), by simp only [mem_Ioi, zero_lt_one], fun x hx => hx.2â©
filter_upwards [E1, E2, self_mem_nhdsWithin] with h hÎŽ h_lt_1 hpos
-- we consider `h` small enough that all points under consideration belong to this ball,
-- and also with `0 < h < 1`.
replace hpos : 0 < h := hpos
have xt_mem : â t â Icc (0 : â) 1, x + h ⢠v + (t * h) ⢠w â interior s := by
intro t ht
have : x + h ⢠v â interior s := s_conv.add_smul_mem_interior xs hv âšhpos, h_lt_1.leâ©
rw [â smul_smul]
apply s_conv.interior.add_smul_mem this _ ht
rw [add_assoc] at hw
rw [add_assoc, â smul_add]
exact s_conv.add_smul_mem_interior xs hw âšhpos, h_lt_1.leâ©
-- define a function `g` on `[0,1]` (identified with `[v, v + w]`) such that `g 1 - g 0` is the
-- quantity to be estimated. We will check that its derivative is given by an explicit
-- expression `g'`, that we can bound. Then the desired bound for `g 1 - g 0` follows from the
-- mean value inequality.
let g t :=
f (x + h ⢠v + (t * h) ⢠w) - (t * h) ⢠f' x w - (t * h ^ 2) ⢠f'' v w -
((t * h) ^ 2 / 2) ⢠f'' w w
set g' := fun t =>
f' (x + h ⢠v + (t * h) ⢠w) (h ⢠w) - h ⢠f' x w - h ^ 2 ⢠f'' v w - (t * h ^ 2) ⢠f'' w w
with hg'
-- check that `g'` is the derivative of `g`, by a straightforward computation
have g_deriv : â t â Icc (0 : â) 1, HasDerivWithinAt g (g' t) (Icc 0 1) t := by
intro t ht
apply_rules [HasDerivWithinAt.sub, HasDerivWithinAt.add]
· refine (hf _ ?_).comp_hasDerivWithinAt _ ?_
· exact xt_mem t ht
apply_rules [HasDerivAt.hasDerivWithinAt, HasDerivAt.const_add, HasDerivAt.smul_const,
hasDerivAt_mul_const]
· apply_rules [HasDerivAt.hasDerivWithinAt, HasDerivAt.smul_const, hasDerivAt_mul_const]
· apply_rules [HasDerivAt.hasDerivWithinAt, HasDerivAt.smul_const, hasDerivAt_mul_const]
· suffices H : HasDerivWithinAt (fun u => ((u * h) ^ 2 / 2) ⢠f'' w w)
((((2 : â) : â) * (t * h) ^ (2 - 1) * (1 * h) / 2) ⢠f'' w w) (Icc 0 1) t by
convert H using 2
ring
apply_rules [HasDerivAt.hasDerivWithinAt, HasDerivAt.smul_const, hasDerivAt_id',
HasDerivAt.pow, HasDerivAt.mul_const]
-- check that `g'` is uniformly bounded, with a suitable bound `ε * ((âvâ + âwâ) * âwâ) * h^2`.
have g'_bound : â t â Ico (0 : â) 1, âg' tâ †ε * ((âvâ + âwâ) * âwâ) * h ^ 2 := by
intro t ht
have I : âh ⢠v + (t * h) ⢠wâ †h * (âvâ + âwâ) :=
calc
âh ⢠v + (t * h) ⢠wâ †âh ⢠vâ + â(t * h) ⢠wâ := norm_add_le _ _
_ = h * âvâ + t * (h * âwâ) := by
simp only [norm_smul, Real.norm_eq_abs, hpos.le, abs_of_nonneg, abs_mul, ht.left,
mul_assoc]
_ †h * âvâ + 1 * (h * âwâ) := by gcongr; exact ht.2.le
_ = h * (âvâ + âwâ) := by ring
calc
âg' tâ = â(f' (x + h ⢠v + (t * h) ⢠w) - f' x - f'' (h ⢠v + (t * h) ⢠w)) (h ⢠w)â := by
rw [hg']
have : h * (t * h) = t * (h * h) := by ring
simp only [ContinuousLinearMap.coe_sub', ContinuousLinearMap.map_add, pow_two,
ContinuousLinearMap.add_apply, Pi.smul_apply, smul_sub, smul_add, smul_smul, â sub_sub,
ContinuousLinearMap.coe_smul', Pi.sub_apply, ContinuousLinearMap.map_smul, this]
_ †âf' (x + h ⢠v + (t * h) ⢠w) - f' x - f'' (h ⢠v + (t * h) ⢠w)â * âh ⢠wâ :=
(ContinuousLinearMap.le_opNorm _ _)
_ †ε * âh ⢠v + (t * h) ⢠wâ * âh ⢠wâ := by
apply mul_le_mul_of_nonneg_right _ (norm_nonneg _)
have H : x + h ⢠v + (t * h) ⢠w â Metric.ball x ÎŽ â© interior s := by
refine âš?_, xt_mem t âšht.1, ht.2.leâ©â©
rw [add_assoc, add_mem_ball_iff_norm]
exact I.trans_lt hÎŽ
simpa only [mem_setOf_eq, add_assoc x, add_sub_cancel_left] using sÎŽ H
_ †ε * (âh ⢠vâ + âh ⢠wâ) * âh ⢠wâ := by
gcongr
apply (norm_add_le _ _).trans
gcongr
simp only [norm_smul, Real.norm_eq_abs, abs_mul, abs_of_nonneg, ht.1, hpos.le, mul_assoc]
exact mul_le_of_le_one_left (mul_nonneg hpos.le (norm_nonneg _)) ht.2.le
_ = ε * ((âvâ + âwâ) * âwâ) * h ^ 2 := by
simp only [norm_smul, Real.norm_eq_abs, abs_mul, abs_of_nonneg, hpos.le]; ring
-- conclude using the mean value inequality
have I : âg 1 - g 0â †ε * ((âvâ + âwâ) * âwâ) * h ^ 2 := by
simpa only [mul_one, sub_zero] using
norm_image_sub_le_of_norm_deriv_le_segment' g_deriv g'_bound 1 (right_mem_Icc.2 zero_le_one)
convert I using 1
· congr 1
simp only [g, Nat.one_ne_zero, add_zero, one_mul, zero_div, zero_mul, sub_zero,
zero_smul, Ne, not_false_iff, zero_pow]
abel
· simp only [Real.norm_eq_abs, abs_mul, add_nonneg (norm_nonneg v) (norm_nonneg w), abs_of_nonneg,
hpos.le, mul_assoc, norm_nonneg, abs_pow]
/-- One can get `f'' v w` as the limit of `h ^ (-2)` times the alternate sum of the values of `f`
along the vertices of a quadrilateral with sides `h v` and `h w` based at `x`.
In a setting where `f` is not guaranteed to be continuous at `f`, we can still
get this if we use a quadrilateral based at `h v + h w`. -/
theorem Convex.isLittleO_alternate_sum_square {v w : E} (h4v : x + (4 : â) ⢠v â interior s)
(h4w : x + (4 : â) ⢠w â interior s) :
(fun h : â => f (x + h ⢠(2 ⢠v + 2 ⢠w)) + f (x + h ⢠(v + w))
- f (x + h ⢠(2 ⢠v + w)) - f (x + h ⢠(v + 2 ⢠w)) - h ^ 2 ⢠f'' v w) =o[ð[>] 0]
fun h => h ^ 2 := by
have A : (1 : â) / 2 â Ioc (0 : â) 1 := âšby norm_num, by norm_numâ©
have B : (1 : â) / 2 â Icc (0 : â) 1 := âšby norm_num, by norm_numâ©
have C : â w : E, (2 : â) ⢠w = 2 ⢠w := fun w => by simp only [two_smul]
have h2v2w : x + (2 : â) ⢠v + (2 : â) ⢠w â interior s := by
convert s_conv.interior.add_smul_sub_mem h4v h4w B using 1
simp only [smul_sub, smul_smul, one_div, add_sub_add_left_eq_sub, mul_add, add_smul]
norm_num
simp only [show (4 : â) = (2 : â) + (2 : â) by norm_num, _root_.add_smul]
abel
have h2vww : x + (2 ⢠v + w) + w â interior s := by
convert h2v2w using 1
simp only [two_smul]
abel
have h2v : x + (2 : â) ⢠v â interior s := by
convert s_conv.add_smul_sub_mem_interior xs h4v A using 1
simp only [smul_smul, one_div, add_sub_cancel_left, add_right_inj]
norm_num
have h2w : x + (2 : â) ⢠w â interior s := by
convert s_conv.add_smul_sub_mem_interior xs h4w A using 1
simp only [smul_smul, one_div, add_sub_cancel_left, add_right_inj]
norm_num
have hvw : x + (v + w) â interior s := by
convert s_conv.add_smul_sub_mem_interior xs h2v2w A using 1
simp only [smul_smul, one_div, add_sub_cancel_left, add_right_inj, smul_add, smul_sub]
norm_num
abel
have h2vw : x + (2 ⢠v + w) â interior s := by
convert s_conv.interior.add_smul_sub_mem h2v h2v2w B using 1
simp only [smul_add, smul_sub, smul_smul, â C]
norm_num
abel
have hvww : x + (v + w) + w â interior s := by
convert s_conv.interior.add_smul_sub_mem h2w h2v2w B using 1
rw [one_div, add_sub_add_right_eq_sub, add_sub_cancel_left, inv_smul_smulâ two_ne_zero,
two_smul]
abel
have TA1 := s_conv.taylor_approx_two_segment hf xs hx h2vw h2vww
have TA2 := s_conv.taylor_approx_two_segment hf xs hx hvw hvww
convert TA1.sub TA2 using 1
ext h
simp only [two_smul, smul_add, â add_assoc, ContinuousLinearMap.map_add,
ContinuousLinearMap.add_apply, Pi.smul_apply, ContinuousLinearMap.coe_smul',
ContinuousLinearMap.map_smul]
abel
/-- Assume that `f` is differentiable inside a convex set `s`, and that its derivative `f'` is
differentiable at a point `x`. Then, given two vectors `v` and `w` pointing inside `s`, one
has `f'' v w = f'' w v`. Superseded by `Convex.second_derivative_within_at_symmetric`, which
removes the assumption that `v` and `w` point inside `s`. -/
theorem Convex.second_derivative_within_at_symmetric_of_mem_interior {v w : E}
(h4v : x + (4 : â) ⢠v â interior s) (h4w : x + (4 : â) ⢠w â interior s) :
f'' w v = f'' v w := by
have A : (fun h : â => h ^ 2 ⢠(f'' w v - f'' v w)) =o[ð[>] 0] fun h => h ^ 2 := by
convert (s_conv.isLittleO_alternate_sum_square hf xs hx h4v h4w).sub
(s_conv.isLittleO_alternate_sum_square hf xs hx h4w h4v) using 1
ext h
simp only [add_comm, smul_add, smul_sub]
abel
have B : (fun _ : â => f'' w v - f'' v w) =o[ð[>] 0] fun _ => (1 : â) := by
have : (fun h : â => 1 / h ^ 2) =O[ð[>] 0] fun h => 1 / h ^ 2 := isBigO_refl _ _
have C := this.smul_isLittleO A
apply C.congr' _ _
· filter_upwards [self_mem_nhdsWithin]
intro h (hpos : 0 < h)
rw [â one_smul â (f'' w v - f'' v w), smul_smul, smul_smul]
congr 1
field_simp [LT.lt.ne' hpos]
· filter_upwards [self_mem_nhdsWithin] with h (hpos : 0 < h)
field_simp [LT.lt.ne' hpos, SMul.smul]
simpa only [sub_eq_zero] using isLittleO_const_const_iff.1 B
/-- If a function is differentiable inside a convex set with nonempty interior, and has a second
derivative at a point of this convex set, then this second derivative is symmetric. -/
theorem Convex.second_derivative_within_at_symmetric {s : Set E} (s_conv : Convex â s)
(hne : (interior s).Nonempty) {f : E â F} {f' : E â E âL[â] F} {f'' : E âL[â] E âL[â] F}
(hf : â x â interior s, HasFDerivAt f (f' x) x) {x : E} (xs : x â s)
(hx : HasFDerivWithinAt f' f'' (interior s) x) (v w : E) : f'' v w = f'' w v := by
/- we work around a point `x + 4 z` in the interior of `s`. For any vector `m`,
then `x + 4 (z + t m)` also belongs to the interior of `s` for small enough `t`. This means that
we will be able to apply `second_derivative_within_at_symmetric_of_mem_interior` to show
that `f''` is symmetric, after cancelling all the contributions due to `z`. -/
rcases hne with âšy, hyâ©
obtain âšz, hzâ© : â z, z = ((1 : â) / 4) ⢠(y - x) := âš((1 : â) / 4) ⢠(y - x), rflâ©
have A : â m : E, Filter.Tendsto (fun t : â => x + (4 : â) ⢠(z + t ⢠m)) (ð 0) (ð y) := by
intro m
have : x + (4 : â) ⢠(z + (0 : â) ⢠m) = y := by simp [hz]
rw [â this]
refine tendsto_const_nhds.add <| tendsto_const_nhds.smul <| tendsto_const_nhds.add ?_
exact continuousAt_id.smul continuousAt_const
have B : â m : E, âá¶ t in ð[>] (0 : â), x + (4 : â) ⢠(z + t ⢠m) â interior s := by
intro m
apply nhdsWithin_le_nhds
apply A m
rw [mem_interior_iff_mem_nhds] at hy
exact interior_mem_nhds.2 hy
-- we choose `t m > 0` such that `x + 4 (z + (t m) m)` belongs to the interior of `s`, for any
-- vector `m`.
choose t ts tpos using fun m => ((B m).and self_mem_nhdsWithin).exists
-- applying `second_derivative_within_at_symmetric_of_mem_interior` to the vectors `z`
-- and `z + (t m) m`, we deduce that `f'' m z = f'' z m` for all `m`.
have C : â m : E, f'' m z = f'' z m := by
intro m
have : f'' (z + t m ⢠m) (z + t 0 ⢠(0 : E)) = f'' (z + t 0 ⢠(0 : E)) (z + t m ⢠m) :=
s_conv.second_derivative_within_at_symmetric_of_mem_interior hf xs hx (ts 0) (ts m)
simp only [ContinuousLinearMap.map_add, ContinuousLinearMap.map_smul, add_right_inj,
ContinuousLinearMap.add_apply, Pi.smul_apply, ContinuousLinearMap.coe_smul', add_zero,
ContinuousLinearMap.zero_apply, smul_zero, ContinuousLinearMap.map_zero] at this
exact smul_right_injective F (tpos m).ne' this
-- applying `second_derivative_within_at_symmetric_of_mem_interior` to the vectors `z + (t v) v`
-- and `z + (t w) w`, we deduce that `f'' v w = f'' w v`. Cross terms involving `z` can be
-- eliminated thanks to the fact proved above that `f'' m z = f'' z m`.
have : f'' (z + t v ⢠v) (z + t w ⢠w) = f'' (z + t w ⢠w) (z + t v ⢠v) :=
s_conv.second_derivative_within_at_symmetric_of_mem_interior hf xs hx (ts w) (ts v)
simp only [ContinuousLinearMap.map_add, ContinuousLinearMap.map_smul, smul_add, smul_smul,
ContinuousLinearMap.add_apply, Pi.smul_apply, ContinuousLinearMap.coe_smul', C] at this
rw [add_assoc, add_assoc, add_right_inj, add_left_comm, add_right_inj, add_right_inj, mul_comm]
at this
apply smul_right_injective F _ this
simp [(tpos v).ne', (tpos w).ne']
/-- If a function is differentiable around `x`, and has two derivatives at `x`, then the second
derivative is symmetric. -/
theorem second_derivative_symmetric_of_eventually {f : E â F} {f' : E â E âL[â] F}
{f'' : E âL[â] E âL[â] F} (hf : âá¶ y in ð x, HasFDerivAt f (f' y) y) (hx : HasFDerivAt f' f'' x)
(v w : E) : f'' v w = f'' w v := by
rcases Metric.mem_nhds_iff.1 hf with âšÎµ, εpos, hεâ©
have A : (interior (Metric.ball x ε)).Nonempty := by
rwa [Metric.isOpen_ball.interior_eq, Metric.nonempty_ball]
exact
Convex.second_derivative_within_at_symmetric (convex_ball x ε) A
(fun y hy => hε (interior_subset hy)) (Metric.mem_ball_self εpos) hx.hasFDerivWithinAt v w
/-- If a function is differentiable, and has two derivatives at `x`, then the second
derivative is symmetric. -/
theorem second_derivative_symmetric {f : E â F} {f' : E â E âL[â] F} {f'' : E âL[â] E âL[â] F}
(hf : â y, HasFDerivAt f (f' y) y) (hx : HasFDerivAt f' f'' x) (v w : E) : f'' v w = f'' w v :=
second_derivative_symmetric_of_eventually (Filter.eventually_of_forall hf) hx v w
|
Analysis\Calculus\Gradient\Basic.lean | /-
Copyright (c) 2023 Ziyu Wang. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Ziyu Wang, Chenyi Li, Sébastien Gouëzel, Penghao Yu, Zhipeng Cao
-/
import Mathlib.Analysis.InnerProductSpace.Dual
import Mathlib.Analysis.Calculus.FDeriv.Basic
import Mathlib.Analysis.Calculus.Deriv.Basic
/-!
# Gradient
## Main Definitions
Let `f` be a function from a Hilbert Space `F` to `ð` (`ð` is `â` or `â`) , `x` be a point in `F`
and `f'` be a vector in F. Then
`HasGradientWithinAt f f' s x`
says that `f` has a gradient `f'` at `x`, where the domain of interest
is restricted to `s`. We also have
`HasGradientAt f f' x := HasGradientWithinAt f f' x univ`
## Main results
This file contains the following parts of gradient.
* the definition of gradient.
* the theorems translating between `HasGradientAtFilter` and `HasFDerivAtFilter`,
`HasGradientWithinAt` and `HasFDerivWithinAt`, `HasGradientAt` and `HasFDerivAt`,
`Gradient` and `fderiv`.
* theorems the Uniqueness of Gradient.
* the theorems translating between `HasGradientAtFilter` and `HasDerivAtFilter`,
`HasGradientAt` and `HasDerivAt`, `Gradient` and `deriv` when `F = ð`.
* the theorems about the congruence of the gradient.
* the theorems about the gradient of constant function.
* the theorems about the continuity of a function admitting a gradient.
-/
open Topology InnerProductSpace Set
noncomputable section
variable {ð F : Type*} [RCLike ð]
variable [NormedAddCommGroup F] [InnerProductSpace ð F] [CompleteSpace F]
variable {f : F â ð} {f' x : F}
/-- A function `f` has the gradient `f'` as derivative along the filter `L` if
`f x' = f x + âšf', x' - xâ© + o (x' - x)` when `x'` converges along the filter `L`. -/
def HasGradientAtFilter (f : F â ð) (f' x : F) (L : Filter F) :=
HasFDerivAtFilter f (toDual ð F f') x L
/-- `f` has the gradient `f'` at the point `x` within the subset `s` if
`f x' = f x + âšf', x' - xâ© + o (x' - x)` where `x'` converges to `x` inside `s`. -/
def HasGradientWithinAt (f : F â ð) (f' : F) (s : Set F) (x : F) :=
HasGradientAtFilter f f' x (ð[s] x)
/-- `f` has the gradient `f'` at the point `x` if
`f x' = f x + âšf', x' - xâ© + o (x' - x)` where `x'` converges to `x`. -/
def HasGradientAt (f : F â ð) (f' x : F) :=
HasGradientAtFilter f f' x (ð x)
/-- Gradient of `f` at the point `x` within the set `s`, if it exists. Zero otherwise.
If the derivative exists (i.e., `â f', HasGradientWithinAt f f' s x`), then
`f x' = f x + âšf', x' - xâ© + o (x' - x)` where `x'` converges to `x` inside `s`. -/
def gradientWithin (f : F â ð) (s : Set F) (x : F) : F :=
(toDual ð F).symm (fderivWithin ð f s x)
/-- Gradient of `f` at the point `x`, if it exists. Zero otherwise.
If the derivative exists (i.e., `â f', HasGradientAt f f' x`), then
`f x' = f x + âšf', x' - xâ© + o (x' - x)` where `x'` converges to `x`. -/
def gradient (f : F â ð) (x : F) : F :=
(toDual ð F).symm (fderiv ð f x)
@[inherit_doc]
scoped[Gradient] notation "â" => gradient
local notation "âª" x ", " y "â«" => @inner ð _ _ x y
open scoped Gradient
variable {s : Set F} {L : Filter F}
theorem hasGradientWithinAt_iff_hasFDerivWithinAt {s : Set F} :
HasGradientWithinAt f f' s x â HasFDerivWithinAt f (toDual ð F f') s x :=
Iff.rfl
theorem hasFDerivWithinAt_iff_hasGradientWithinAt {frechet : F âL[ð] ð} {s : Set F} :
HasFDerivWithinAt f frechet s x â HasGradientWithinAt f ((toDual ð F).symm frechet) s x := by
rw [hasGradientWithinAt_iff_hasFDerivWithinAt, (toDual ð F).apply_symm_apply frechet]
theorem hasGradientAt_iff_hasFDerivAt :
HasGradientAt f f' x â HasFDerivAt f (toDual ð F f') x :=
Iff.rfl
theorem hasFDerivAt_iff_hasGradientAt {frechet : F âL[ð] ð} :
HasFDerivAt f frechet x â HasGradientAt f ((toDual ð F).symm frechet) x := by
rw [hasGradientAt_iff_hasFDerivAt, (toDual ð F).apply_symm_apply frechet]
alias âšHasGradientWithinAt.hasFDerivWithinAt, _â© := hasGradientWithinAt_iff_hasFDerivWithinAt
alias âšHasFDerivWithinAt.hasGradientWithinAt, _â© := hasFDerivWithinAt_iff_hasGradientWithinAt
alias âšHasGradientAt.hasFDerivAt, _â© := hasGradientAt_iff_hasFDerivAt
alias âšHasFDerivAt.hasGradientAt, _â© := hasFDerivAt_iff_hasGradientAt
theorem gradient_eq_zero_of_not_differentiableAt (h : ¬DifferentiableAt ð f x) : â f x = 0 := by
rw [gradient, fderiv_zero_of_not_differentiableAt h, map_zero]
theorem HasGradientAt.unique {gradf gradg : F}
(hf : HasGradientAt f gradf x) (hg : HasGradientAt f gradg x) :
gradf = gradg :=
(toDual ð F).injective (hf.hasFDerivAt.unique hg.hasFDerivAt)
theorem DifferentiableAt.hasGradientAt (h : DifferentiableAt ð f x) :
HasGradientAt f (â f x) x := by
rw [hasGradientAt_iff_hasFDerivAt, gradient, (toDual ð F).apply_symm_apply (fderiv ð f x)]
exact h.hasFDerivAt
theorem HasGradientAt.differentiableAt (h : HasGradientAt f f' x) :
DifferentiableAt ð f x :=
h.hasFDerivAt.differentiableAt
theorem DifferentiableWithinAt.hasGradientWithinAt (h : DifferentiableWithinAt ð f s x) :
HasGradientWithinAt f (gradientWithin f s x) s x := by
rw [hasGradientWithinAt_iff_hasFDerivWithinAt, gradientWithin,
(toDual ð F).apply_symm_apply (fderivWithin ð f s x)]
exact h.hasFDerivWithinAt
theorem HasGradientWithinAt.differentiableWithinAt (h : HasGradientWithinAt f f' s x) :
DifferentiableWithinAt ð f s x :=
h.hasFDerivWithinAt.differentiableWithinAt
@[simp]
theorem hasGradientWithinAt_univ : HasGradientWithinAt f f' univ x â HasGradientAt f f' x := by
rw [hasGradientWithinAt_iff_hasFDerivWithinAt, hasGradientAt_iff_hasFDerivAt]
exact hasFDerivWithinAt_univ
theorem DifferentiableOn.hasGradientAt (h : DifferentiableOn ð f s) (hs : s â ð x) :
HasGradientAt f (â f x) x :=
(h.hasFDerivAt hs).hasGradientAt
theorem HasGradientAt.gradient (h : HasGradientAt f f' x) : â f x = f' :=
h.differentiableAt.hasGradientAt.unique h
theorem gradient_eq {f' : F â F} (h : â x, HasGradientAt f (f' x) x) : â f = f' :=
funext fun x => (h x).gradient
section OneDimension
variable {g : ð â ð} {g' u : ð} {L' : Filter ð}
theorem HasGradientAtFilter.hasDerivAtFilter (h : HasGradientAtFilter g g' u L') :
HasDerivAtFilter g (starRingEnd ð g') u L' := by
have : ContinuousLinearMap.smulRight (1 : ð âL[ð] ð) (starRingEnd ð g') = (toDual ð ð) g' := by
ext; simp
rwa [HasDerivAtFilter, this]
theorem HasDerivAtFilter.hasGradientAtFilter (h : HasDerivAtFilter g g' u L') :
HasGradientAtFilter g (starRingEnd ð g') u L' := by
have : ContinuousLinearMap.smulRight (1 : ð âL[ð] ð) g' = (toDual ð ð) (starRingEnd ð g') := by
ext; simp
rwa [HasGradientAtFilter, â this]
theorem HasGradientAt.hasDerivAt (h : HasGradientAt g g' u) :
HasDerivAt g (starRingEnd ð g') u := by
rw [hasGradientAt_iff_hasFDerivAt, hasFDerivAt_iff_hasDerivAt] at h
simpa using h
theorem HasDerivAt.hasGradientAt (h : HasDerivAt g g' u) :
HasGradientAt g (starRingEnd ð g') u := by
rw [hasGradientAt_iff_hasFDerivAt, hasFDerivAt_iff_hasDerivAt]
simpa
theorem gradient_eq_deriv : â g u = starRingEnd ð (deriv g u) := by
by_cases h : DifferentiableAt ð g u
· rw [h.hasGradientAt.hasDerivAt.deriv, RCLike.conj_conj]
· rw [gradient_eq_zero_of_not_differentiableAt h, deriv_zero_of_not_differentiableAt h, map_zero]
end OneDimension
section OneDimensionReal
variable {g : â â â} {g' u : â} {L' : Filter â}
theorem HasGradientAtFilter.hasDerivAtFilter' (h : HasGradientAtFilter g g' u L') :
HasDerivAtFilter g g' u L' := h.hasDerivAtFilter
theorem HasDerivAtFilter.hasGradientAtFilter' (h : HasDerivAtFilter g g' u L') :
HasGradientAtFilter g g' u L' := h.hasGradientAtFilter
theorem HasGradientAt.hasDerivAt' (h : HasGradientAt g g' u) :
HasDerivAt g g' u := h.hasDerivAt
theorem HasDerivAt.hasGradientAt' (h : HasDerivAt g g' u) :
HasGradientAt g g' u := h.hasGradientAt
theorem gradient_eq_deriv' : â g u = deriv g u := gradient_eq_deriv
end OneDimensionReal
open Filter
section GradientProperties
theorem hasGradientAtFilter_iff_isLittleO :
HasGradientAtFilter f f' x L â
(fun x' : F => f x' - f x - âªf', x' - xâ«) =o[L] fun x' => x' - x :=
hasFDerivAtFilter_iff_isLittleO ..
theorem hasGradientWithinAt_iff_isLittleO :
HasGradientWithinAt f f' s x â
(fun x' : F => f x' - f x - âªf', x' - xâ«) =o[ð[s] x] fun x' => x' - x :=
hasGradientAtFilter_iff_isLittleO
theorem hasGradientWithinAt_iff_tendsto :
HasGradientWithinAt f f' s x â
Tendsto (fun x' => âx' - xââ»Â¹ * âf x' - f x - âªf', x' - xâ«â) (ð[s] x) (ð 0) :=
hasFDerivAtFilter_iff_tendsto
theorem hasGradientAt_iff_isLittleO : HasGradientAt f f' x â
(fun x' : F => f x' - f x - âªf', x' - xâ«) =o[ð x] fun x' => x' - x :=
hasGradientAtFilter_iff_isLittleO
theorem hasGradientAt_iff_tendsto :
HasGradientAt f f' x â
Tendsto (fun x' => âx' - xââ»Â¹ * âf x' - f x - âªf', x' - xâ«â) (ð x) (ð 0) :=
hasFDerivAtFilter_iff_tendsto
theorem HasGradientAtFilter.isBigO_sub (h : HasGradientAtFilter f f' x L) :
(fun x' => f x' - f x) =O[L] fun x' => x' - x :=
HasFDerivAtFilter.isBigO_sub h
theorem hasGradientWithinAt_congr_set' {s t : Set F} (y : F) (h : s =á¶ [ð[{y}á¶] x] t) :
HasGradientWithinAt f f' s x â HasGradientWithinAt f f' t x :=
hasFDerivWithinAt_congr_set' y h
theorem hasGradientWithinAt_congr_set {s t : Set F} (h : s =á¶ [ð x] t) :
HasGradientWithinAt f f' s x â HasGradientWithinAt f f' t x :=
hasFDerivWithinAt_congr_set h
theorem hasGradientAt_iff_isLittleO_nhds_zero : HasGradientAt f f' x â
(fun h => f (x + h) - f x - âªf', hâ«) =o[ð 0] fun h => h :=
hasFDerivAt_iff_isLittleO_nhds_zero
end GradientProperties
section congr
/-! ### Congruence properties of the Gradient -/
variable {fâ fâ : F â ð} {fâ' fâ' : F} {xâ xâ : F} {sâ sâ t : Set F} {Lâ Lâ : Filter F}
theorem Filter.EventuallyEq.hasGradientAtFilter_iff (hâ : fâ =á¶ [L] fâ) (hx : fâ x = fâ x)
(hâ : fâ' = fâ') : HasGradientAtFilter fâ fâ' x L â HasGradientAtFilter fâ fâ' x L :=
hâ.hasFDerivAtFilter_iff hx (by simp [hâ])
theorem HasGradientAtFilter.congr_of_eventuallyEq (h : HasGradientAtFilter f f' x L)
(hL : fâ =á¶ [L] f) (hx : fâ x = f x) : HasGradientAtFilter fâ f' x L := by
rwa [hL.hasGradientAtFilter_iff hx rfl]
theorem HasGradientWithinAt.congr_mono (h : HasGradientWithinAt f f' s x) (ht : â x â t, fâ x = f x)
(hx : fâ x = f x) (hâ : t â s) : HasGradientWithinAt fâ f' t x :=
HasFDerivWithinAt.congr_mono h ht hx hâ
theorem HasGradientWithinAt.congr (h : HasGradientWithinAt f f' s x) (hs : â x â s, fâ x = f x)
(hx : fâ x = f x) : HasGradientWithinAt fâ f' s x :=
h.congr_mono hs hx (by tauto)
theorem HasGradientWithinAt.congr_of_mem (h : HasGradientWithinAt f f' s x)
(hs : â x â s, fâ x = f x) (hx : x â s) : HasGradientWithinAt fâ f' s x :=
h.congr hs (hs _ hx)
theorem HasGradientWithinAt.congr_of_eventuallyEq (h : HasGradientWithinAt f f' s x)
(hâ : fâ =á¶ [ð[s] x] f) (hx : fâ x = f x) : HasGradientWithinAt fâ f' s x :=
HasGradientAtFilter.congr_of_eventuallyEq h hâ hx
theorem HasGradientWithinAt.congr_of_eventuallyEq_of_mem (h : HasGradientWithinAt f f' s x)
(hâ : fâ =á¶ [ð[s] x] f) (hx : x â s) : HasGradientWithinAt fâ f' s x :=
h.congr_of_eventuallyEq hâ (hâ.eq_of_nhdsWithin hx)
theorem HasGradientAt.congr_of_eventuallyEq (h : HasGradientAt f f' x) (hâ : fâ =á¶ [ð x] f) :
HasGradientAt fâ f' x :=
HasGradientAtFilter.congr_of_eventuallyEq h hâ (mem_of_mem_nhds hâ : _)
theorem Filter.EventuallyEq.gradient_eq (hL : fâ =á¶ [ð x] f) : â fâ x = â f x := by
unfold gradient
rwa [Filter.EventuallyEq.fderiv_eq]
protected theorem Filter.EventuallyEq.gradient (h : fâ =á¶ [ð x] f) : â fâ =á¶ [ð x] â f :=
h.eventuallyEq_nhds.mono fun _ h => h.gradient_eq
end congr
/-! ### The Gradient of constant functions -/
section Const
variable (c : ð) (s x L)
theorem hasGradientAtFilter_const : HasGradientAtFilter (fun _ => c) 0 x L := by
rw [HasGradientAtFilter, map_zero]; apply hasFDerivAtFilter_const c x L
theorem hasGradientWithinAt_const : HasGradientWithinAt (fun _ => c) 0 s x :=
hasGradientAtFilter_const _ _ _
theorem hasGradientAt_const : HasGradientAt (fun _ => c) 0 x :=
hasGradientAtFilter_const _ _ _
theorem gradient_const : â (fun _ => c) x = 0 := by
rw [gradient, fderiv_const, Pi.zero_apply, map_zero]
@[simp]
theorem gradient_const' : (â fun _ : ð => c) = fun _ => 0 :=
funext fun x => gradient_const x c
end Const
section Continuous
/-! ### Continuity of a function admitting a gradient -/
nonrec theorem HasGradientAtFilter.tendsto_nhds (hL : L †ð x) (h : HasGradientAtFilter f f' x L) :
Tendsto f L (ð (f x)) :=
h.tendsto_nhds hL
theorem HasGradientWithinAt.continuousWithinAt (h : HasGradientWithinAt f f' s x) :
ContinuousWithinAt f s x :=
HasGradientAtFilter.tendsto_nhds inf_le_left h
theorem HasGradientAt.continuousAt (h : HasGradientAt f f' x) : ContinuousAt f x :=
HasGradientAtFilter.tendsto_nhds le_rfl h
protected theorem HasGradientAt.continuousOn {f' : F â F} (h : â x â s, HasGradientAt f (f' x) x) :
ContinuousOn f s :=
fun x hx => (h x hx).continuousAt.continuousWithinAt
end Continuous
|
Analysis\Calculus\InverseFunctionTheorem\ApproximatesLinearOn.lean | /-
Copyright (c) 2020 Yury Kudryashov. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yury Kudryashov, Sébastien Gouëzel
-/
import Mathlib.Analysis.Normed.Operator.Banach
import Mathlib.Analysis.NormedSpace.OperatorNorm.NormedSpace
import Mathlib.Topology.PartialHomeomorph
/-!
# Non-linear maps close to affine maps
In this file we study a map `f` such that `âf x - f y - f' (x - y)â †c * âx - yâ` on an open set
`s`, where `f' : E âL[ð] F` is a continuous linear map and `c` is suitably small. Maps of this type
behave like `f a + f' (x - a)` near each `a â s`.
When `f'` is onto, we show that `f` is locally onto.
When `f'` is a continuous linear equiv, we show that `f` is a homeomorphism
between `s` and `f '' s`. More precisely, we define `ApproximatesLinearOn.toPartialHomeomorph` to
be a `PartialHomeomorph` with `toFun = f`, `source = s`, and `target = f '' s`.
between `s` and `f '' s`. More precisely, we define `ApproximatesLinearOn.toPartialHomeomorph` to
be a `PartialHomeomorph` with `toFun = f`, `source = s`, and `target = f '' s`.
Maps of this type naturally appear in the proof of the inverse function theorem (see next section),
and `ApproximatesLinearOn.toPartialHomeomorph` will imply that the locally inverse function
and `ApproximatesLinearOn.toPartialHomeomorph` will imply that the locally inverse function
exists.
We define this auxiliary notion to split the proof of the inverse function theorem into small
lemmas. This approach makes it possible
- to prove a lower estimate on the size of the domain of the inverse function;
- to reuse parts of the proofs in the case if a function is not strictly differentiable. E.g., for a
function `f : E Ã F â G` with estimates on `f x yâ - f x yâ` but not on `f xâ y - f xâ y`.
## Notations
We introduce some `local notation` to make formulas shorter:
* by `N` we denote `âf'â»Â¹â`;
* by `g` we denote the auxiliary contracting map `x ⊠x + f'.symm (y - f x)` used to prove that
`{x | f x = y}` is nonempty.
-/
open Function Set Filter Metric
open scoped Topology NNReal
noncomputable section
variable {ð : Type*} [NontriviallyNormedField ð]
variable {E : Type*} [NormedAddCommGroup E] [NormedSpace ð E]
variable {F : Type*} [NormedAddCommGroup F] [NormedSpace ð F]
variable {G : Type*} [NormedAddCommGroup G] [NormedSpace ð G]
variable {G' : Type*} [NormedAddCommGroup G'] [NormedSpace ð G']
variable {ε : â}
open Filter Metric Set
open ContinuousLinearMap (id)
/-- We say that `f` approximates a continuous linear map `f'` on `s` with constant `c`,
if `âf x - f y - f' (x - y)â †c * âx - yâ` whenever `x, y â s`.
This predicate is defined to facilitate the splitting of the inverse function theorem into small
lemmas. Some of these lemmas can be useful, e.g., to prove that the inverse function is defined
on a specific set. -/
def ApproximatesLinearOn (f : E â F) (f' : E âL[ð] F) (s : Set E) (c : ââ¥0) : Prop :=
â x â s, â y â s, âf x - f y - f' (x - y)â †c * âx - yâ
@[simp]
theorem approximatesLinearOn_empty (f : E â F) (f' : E âL[ð] F) (c : ââ¥0) :
ApproximatesLinearOn f f' â
c := by simp [ApproximatesLinearOn]
namespace ApproximatesLinearOn
variable {f : E â F}
/-! First we prove some properties of a function that `ApproximatesLinearOn` a (not necessarily
invertible) continuous linear map. -/
section
variable {f' : E âL[ð] F} {s t : Set E} {c c' : ââ¥0}
theorem mono_num (hc : c †c') (hf : ApproximatesLinearOn f f' s c) :
ApproximatesLinearOn f f' s c' := fun x hx y hy =>
le_trans (hf x hx y hy) (mul_le_mul_of_nonneg_right hc <| norm_nonneg _)
theorem mono_set (hst : s â t) (hf : ApproximatesLinearOn f f' t c) :
ApproximatesLinearOn f f' s c := fun x hx y hy => hf x (hst hx) y (hst hy)
theorem approximatesLinearOn_iff_lipschitzOnWith {f : E â F} {f' : E âL[ð] F} {s : Set E}
{c : ââ¥0} : ApproximatesLinearOn f f' s c â LipschitzOnWith c (f - âf') s := by
have : â x y, f x - f y - f' (x - y) = (f - f') x - (f - f') y := fun x y ⊠by
simp only [map_sub, Pi.sub_apply]; abel
simp only [this, lipschitzOnWith_iff_norm_sub_le, ApproximatesLinearOn]
alias âšlipschitzOnWith, _root_.LipschitzOnWith.approximatesLinearOnâ© :=
approximatesLinearOn_iff_lipschitzOnWith
theorem lipschitz_sub (hf : ApproximatesLinearOn f f' s c) :
LipschitzWith c fun x : s => f x - f' x :=
hf.lipschitzOnWith.to_restrict
protected theorem lipschitz (hf : ApproximatesLinearOn f f' s c) :
LipschitzWith (âf'ââ + c) (s.restrict f) := by
simpa only [restrict_apply, add_sub_cancel] using
(f'.lipschitz.restrict s).add hf.lipschitz_sub
protected theorem continuous (hf : ApproximatesLinearOn f f' s c) : Continuous (s.restrict f) :=
hf.lipschitz.continuous
protected theorem continuousOn (hf : ApproximatesLinearOn f f' s c) : ContinuousOn f s :=
continuousOn_iff_continuous_restrict.2 hf.continuous
end
section LocallyOnto
/-!
We prove that a function which is linearly approximated by a continuous linear map with a nonlinear
right inverse is locally onto. This will apply to the case where the approximating map is a linear
equivalence, for the local inverse theorem, but also whenever the approximating map is onto,
by Banach's open mapping theorem. -/
variable [CompleteSpace E] {s : Set E} {c : ââ¥0} {f' : E âL[ð] F}
/-- If a function is linearly approximated by a continuous linear map with a (possibly nonlinear)
right inverse, then it is locally onto: a ball of an explicit radius is included in the image
of the map. -/
theorem surjOn_closedBall_of_nonlinearRightInverse
(hf : ApproximatesLinearOn f f' s c)
(f'symm : f'.NonlinearRightInverse) {ε : â} {b : E} (ε0 : 0 †ε) (hε : closedBall b ε â s) :
SurjOn f (closedBall b ε) (closedBall (f b) (((f'symm.nnnorm : â)â»Â¹ - c) * ε)) := by
intro y hy
rcases le_or_lt (f'symm.nnnorm : â)â»Â¹ c with hc | hc
· refine âšb, by simp [ε0], ?_â©
have : dist y (f b) †0 :=
(mem_closedBall.1 hy).trans (mul_nonpos_of_nonpos_of_nonneg (by linarith) ε0)
simp only [dist_le_zero] at this
rw [this]
have If' : (0 : â) < f'symm.nnnorm := by rw [â inv_pos]; exact (NNReal.coe_nonneg _).trans_lt hc
have Icf' : (c : â) * f'symm.nnnorm < 1 := by rwa [inv_eq_one_div, lt_div_iff If'] at hc
have Jf' : (f'symm.nnnorm : â) â 0 := ne_of_gt If'
have Jcf' : (1 : â) - c * f'symm.nnnorm â 0 := by apply ne_of_gt; linarith
/- We have to show that `y` can be written as `f x` for some `x â closedBall b ε`.
The idea of the proof is to apply the Banach contraction principle to the map
`g : x ⊠x + f'symm (y - f x)`, as a fixed point of this map satisfies `f x = y`.
When `f'symm` is a genuine linear inverse, `g` is a contracting map. In our case, since `f'symm`
is nonlinear, this map is not contracting (it is not even continuous), but still the proof of
the contraction theorem holds: `uâ = gâ¿ b` is a Cauchy sequence, converging exponentially fast
to the desired point `x`. Instead of appealing to general results, we check this by hand.
The main point is that `f (u n)` becomes exponentially close to `y`, and therefore
`dist (u (n+1)) (u n)` becomes exponentally small, making it possible to get an inductive
bound on `dist (u n) b`, from which one checks that `u n` stays in the ball on which one has a
control. Therefore, the bound can be checked at the next step, and so on inductively.
-/
set g := fun x => x + f'symm (y - f x) with hg
set u := fun n : â => g^[n] b with hu
have usucc : â n, u (n + 1) = g (u n) := by simp [hu, â iterate_succ_apply' g _ b]
-- First bound: if `f z` is close to `y`, then `g z` is close to `z` (i.e., almost a fixed point).
have A : â z, dist (g z) z †f'symm.nnnorm * dist (f z) y := by
intro z
rw [dist_eq_norm, hg, add_sub_cancel_left, dist_eq_norm']
exact f'symm.bound _
-- Second bound: if `z` and `g z` are in the set with good control, then `f (g z)` becomes closer
-- to `y` than `f z` was (this uses the linear approximation property, and is the reason for the
-- choice of the formula for `g`).
have B :
â z â closedBall b ε,
g z â closedBall b ε â dist (f (g z)) y †c * f'symm.nnnorm * dist (f z) y := by
intro z hz hgz
set v := f'symm (y - f z)
calc
dist (f (g z)) y = âf (z + v) - yâ := by rw [dist_eq_norm]
_ = âf (z + v) - f z - f' v + f' v - (y - f z)â := by congr 1; abel
_ = âf (z + v) - f z - f' (z + v - z)â := by
simp only [v, ContinuousLinearMap.NonlinearRightInverse.right_inv, add_sub_cancel_left,
sub_add_cancel]
_ †c * âz + v - zâ := hf _ (hε hgz) _ (hε hz)
_ †c * (f'symm.nnnorm * dist (f z) y) := by
gcongr
simpa [dist_eq_norm'] using f'symm.bound (y - f z)
_ = c * f'symm.nnnorm * dist (f z) y := by ring
-- Third bound: a complicated bound on `dist w b` (that will show up in the induction) is enough
-- to check that `w` is in the ball on which one has controls. Will be used to check that `u n`
-- belongs to this ball for all `n`.
have C : â (n : â) (w : E), dist w b †f'symm.nnnorm * (1 - ((c : â) * f'symm.nnnorm) ^ n) /
(1 - c * f'symm.nnnorm) * dist (f b) y â w â closedBall b ε := fun n w hw ⊠by
apply hw.trans
rw [div_mul_eq_mul_div, div_le_iff]; swap; · linarith
calc
(f'symm.nnnorm : â) * (1 - ((c : â) * f'symm.nnnorm) ^ n) * dist (f b) y =
f'symm.nnnorm * dist (f b) y * (1 - ((c : â) * f'symm.nnnorm) ^ n) := by
ring
_ †f'symm.nnnorm * dist (f b) y * 1 := by
gcongr
rw [sub_le_self_iff]
positivity
_ †f'symm.nnnorm * (((f'symm.nnnorm : â)â»Â¹ - c) * ε) := by
rw [mul_one]
gcongr
exact mem_closedBall'.1 hy
_ = ε * (1 - c * f'symm.nnnorm) := by field_simp; ring
/- Main inductive control: `f (u n)` becomes exponentially close to `y`, and therefore
`dist (u (n+1)) (u n)` becomes exponentally small, making it possible to get an inductive
bound on `dist (u n) b`, from which one checks that `u n` remains in the ball on which we
have estimates. -/
have D : â n : â, dist (f (u n)) y †((c : â) * f'symm.nnnorm) ^ n * dist (f b) y â§
dist (u n) b †f'symm.nnnorm * (1 - ((c : â) * f'symm.nnnorm) ^ n) /
(1 - (c : â) * f'symm.nnnorm) * dist (f b) y := fun n ⊠by
induction' n with n IH; · simp [hu, le_refl]
rw [usucc]
have Ign : dist (g (u n)) b †f'symm.nnnorm * (1 - ((c : â) * f'symm.nnnorm) ^ n.succ) /
(1 - c * f'symm.nnnorm) * dist (f b) y :=
calc
dist (g (u n)) b †dist (g (u n)) (u n) + dist (u n) b := dist_triangle _ _ _
_ †f'symm.nnnorm * dist (f (u n)) y + dist (u n) b := add_le_add (A _) le_rfl
_ †f'symm.nnnorm * (((c : â) * f'symm.nnnorm) ^ n * dist (f b) y) +
f'symm.nnnorm * (1 - ((c : â) * f'symm.nnnorm) ^ n) / (1 - c * f'symm.nnnorm) *
dist (f b) y := by
gcongr
· exact IH.1
· exact IH.2
_ = f'symm.nnnorm * (1 - ((c : â) * f'symm.nnnorm) ^ n.succ) /
(1 - (c : â) * f'symm.nnnorm) * dist (f b) y := by
field_simp [Jcf', pow_succ]; ring
refine âš?_, Ignâ©
calc
dist (f (g (u n))) y †c * f'symm.nnnorm * dist (f (u n)) y :=
B _ (C n _ IH.2) (C n.succ _ Ign)
_ †(c : â) * f'symm.nnnorm * (((c : â) * f'symm.nnnorm) ^ n * dist (f b) y) := by
gcongr
apply IH.1
_ = ((c : â) * f'symm.nnnorm) ^ n.succ * dist (f b) y := by simp only [pow_succ']; ring
-- Deduce from the inductive bound that `uâ` is a Cauchy sequence, therefore converging.
have : CauchySeq u := by
refine cauchySeq_of_le_geometric _ (âf'symm.nnnorm * dist (f b) y) Icf' fun n ⊠?_
calc
dist (u n) (u (n + 1)) = dist (g (u n)) (u n) := by rw [usucc, dist_comm]
_ †f'symm.nnnorm * dist (f (u n)) y := A _
_ †f'symm.nnnorm * (((c : â) * f'symm.nnnorm) ^ n * dist (f b) y) := by
gcongr
exact (D n).1
_ = f'symm.nnnorm * dist (f b) y * ((c : â) * f'symm.nnnorm) ^ n := by ring
obtain âšx, hxâ© : â x, Tendsto u atTop (ð x) := cauchySeq_tendsto_of_complete this
-- As all the `uâ` belong to the ball `closedBall b ε`, so does their limit `x`.
have xmem : x â closedBall b ε :=
isClosed_ball.mem_of_tendsto hx (eventually_of_forall fun n => C n _ (D n).2)
refine âšx, xmem, ?_â©
-- It remains to check that `f x = y`. This follows from continuity of `f` on `closedBall b ε`
-- and from the fact that `f uâ` is converging to `y` by construction.
have hx' : Tendsto u atTop (ð[closedBall b ε] x) := by
simp only [nhdsWithin, tendsto_inf, hx, true_and_iff, tendsto_principal]
exact eventually_of_forall fun n => C n _ (D n).2
have T1 : Tendsto (f â u) atTop (ð (f x)) :=
(hf.continuousOn.mono hε x xmem).tendsto.comp hx'
have T2 : Tendsto (f â u) atTop (ð y) := by
rw [tendsto_iff_dist_tendsto_zero]
refine squeeze_zero (fun _ => dist_nonneg) (fun n => (D n).1) ?_
simpa using (tendsto_pow_atTop_nhds_zero_of_lt_one (by positivity) Icf').mul tendsto_const_nhds
exact tendsto_nhds_unique T1 T2
theorem open_image (hf : ApproximatesLinearOn f f' s c) (f'symm : f'.NonlinearRightInverse)
(hs : IsOpen s) (hc : Subsingleton F âš c < f'symm.nnnormâ»Â¹) : IsOpen (f '' s) := by
cases' hc with hE hc
· exact isOpen_discrete _
simp only [isOpen_iff_mem_nhds, nhds_basis_closedBall.mem_iff, forall_mem_image] at hs â¢
intro x hx
rcases hs x hx with âšÎµ, ε0, hεâ©
refine âš(f'symm.nnnormâ»Â¹ - c) * ε, mul_pos (sub_pos.2 hc) ε0, ?_â©
exact (hf.surjOn_closedBall_of_nonlinearRightInverse f'symm (le_of_lt ε0) hε).mono hε Subset.rfl
theorem image_mem_nhds (hf : ApproximatesLinearOn f f' s c) (f'symm : f'.NonlinearRightInverse)
{x : E} (hs : s â ð x) (hc : Subsingleton F âš c < f'symm.nnnormâ»Â¹) : f '' s â ð (f x) := by
obtain âšt, hts, ht, xtâ© : â t, t â s â§ IsOpen t â§ x â t := _root_.mem_nhds_iff.1 hs
have := IsOpen.mem_nhds ((hf.mono_set hts).open_image f'symm ht hc) (mem_image_of_mem _ xt)
exact mem_of_superset this (image_subset _ hts)
theorem map_nhds_eq (hf : ApproximatesLinearOn f f' s c) (f'symm : f'.NonlinearRightInverse) {x : E}
(hs : s â ð x) (hc : Subsingleton F âš c < f'symm.nnnormâ»Â¹) : map f (ð x) = ð (f x) := by
refine
le_antisymm ((hf.continuousOn x (mem_of_mem_nhds hs)).continuousAt hs) (le_map fun t ht => ?_)
have : f '' (s â© t) â ð (f x) :=
(hf.mono_set inter_subset_left).image_mem_nhds f'symm (inter_mem hs ht) hc
exact mem_of_superset this (image_subset _ inter_subset_right)
end LocallyOnto
/-!
From now on we assume that `f` approximates an invertible continuous linear map `f : E âL[ð] F`.
We also assume that either `E = {0}`, or `c < âf'â»Â¹ââ»Â¹`. We use `N` as an abbreviation for `âf'â»Â¹â`.
-/
variable {f' : E âL[ð] F} {s : Set E} {c : ââ¥0}
local notation "N" => â(f'.symm : F âL[ð] E)ââ
protected theorem antilipschitz (hf : ApproximatesLinearOn f (f' : E âL[ð] F) s c)
(hc : Subsingleton E âš c < Nâ»Â¹) : AntilipschitzWith (Nâ»Â¹ - c)â»Â¹ (s.restrict f) := by
cases' hc with hE hc
· exact AntilipschitzWith.of_subsingleton
convert (f'.antilipschitz.restrict s).add_lipschitzWith hf.lipschitz_sub hc
simp [restrict]
protected theorem injective (hf : ApproximatesLinearOn f (f' : E âL[ð] F) s c)
(hc : Subsingleton E âš c < Nâ»Â¹) : Injective (s.restrict f) :=
(hf.antilipschitz hc).injective
protected theorem injOn (hf : ApproximatesLinearOn f (f' : E âL[ð] F) s c)
(hc : Subsingleton E âš c < Nâ»Â¹) : InjOn f s :=
injOn_iff_injective.2 <| hf.injective hc
protected theorem surjective [CompleteSpace E] (hf : ApproximatesLinearOn f (f' : E âL[ð] F) univ c)
(hc : Subsingleton E âš c < Nâ»Â¹) : Surjective f := by
cases' hc with hE hc
· haveI : Subsingleton F := (Equiv.subsingleton_congr f'.toEquiv).1 hE
exact surjective_to_subsingleton _
· apply forall_of_forall_mem_closedBall (fun y : F => â a, f a = y) (f 0) _
have hc' : (0 : â) < Nâ»Â¹ - c := by rw [sub_pos]; exact hc
let p : â â Prop := fun R => closedBall (f 0) R â Set.range f
have hp : âá¶ r : â in atTop, p ((Nâ»Â¹ - c) * r) := by
have hr : âá¶ r : â in atTop, 0 †r := eventually_ge_atTop 0
refine hr.mono fun r hr => Subset.trans ?_ (image_subset_range f (closedBall 0 r))
refine hf.surjOn_closedBall_of_nonlinearRightInverse f'.toNonlinearRightInverse hr ?_
exact subset_univ _
refine ((tendsto_id.const_mul_atTop hc').frequently hp.frequently).mono ?_
exact fun R h y hy => h hy
/-- A map approximating a linear equivalence on a set defines a partial equivalence on this set.
Should not be used outside of this file, because it is superseded by `toPartialHomeomorph` below.
This is a first step towards the inverse function. -/
def toPartialEquiv (hf : ApproximatesLinearOn f (f' : E âL[ð] F) s c)
(hc : Subsingleton E âš c < Nâ»Â¹) : PartialEquiv E F :=
(hf.injOn hc).toPartialEquiv _ _
/-- The inverse function is continuous on `f '' s`.
Use properties of `PartialHomeomorph` instead. -/
theorem inverse_continuousOn (hf : ApproximatesLinearOn f (f' : E âL[ð] F) s c)
(hc : Subsingleton E âš c < Nâ»Â¹) : ContinuousOn (hf.toPartialEquiv hc).symm (f '' s) := by
apply continuousOn_iff_continuous_restrict.2
refine ((hf.antilipschitz hc).to_rightInvOn' ?_ (hf.toPartialEquiv hc).right_inv').continuous
exact fun x hx => (hf.toPartialEquiv hc).map_target hx
/-- The inverse function is approximated linearly on `f '' s` by `f'.symm`. -/
theorem to_inv (hf : ApproximatesLinearOn f (f' : E âL[ð] F) s c) (hc : Subsingleton E âš c < Nâ»Â¹) :
ApproximatesLinearOn (hf.toPartialEquiv hc).symm (f'.symm : F âL[ð] E) (f '' s)
(N * (Nâ»Â¹ - c)â»Â¹ * c) := fun x hx y hy ⊠by
set A := hf.toPartialEquiv hc
have Af : â z, A z = f z := fun z => rfl
rcases (mem_image _ _ _).1 hx with âšx', x's, rflâ©
rcases (mem_image _ _ _).1 hy with âšy', y's, rflâ©
rw [â Af x', â Af y', A.left_inv x's, A.left_inv y's]
calc
âx' - y' - f'.symm (A x' - A y')â †N * âf' (x' - y' - f'.symm (A x' - A y'))â :=
(f' : E âL[ð] F).bound_of_antilipschitz f'.antilipschitz _
_ = N * âA y' - A x' - f' (y' - x')â := by
congr 2
simp only [ContinuousLinearEquiv.apply_symm_apply, ContinuousLinearEquiv.map_sub]
abel
_ †N * (c * ây' - x'â) := mul_le_mul_of_nonneg_left (hf _ y's _ x's) (NNReal.coe_nonneg _)
_ †N * (c * (((Nâ»Â¹ - c)â»Â¹ : ââ¥0) * âA y' - A x'â)) := by
gcongr
rw [â dist_eq_norm, â dist_eq_norm]
exact (hf.antilipschitz hc).le_mul_dist âšy', y'sâ© âšx', x'sâ©
_ = (N * (Nâ»Â¹ - c)â»Â¹ * c : ââ¥0) * âA x' - A y'â := by
simp only [norm_sub_rev, NNReal.coe_mul]; ring
variable [CompleteSpace E]
section
variable (f s)
/-- Given a function `f` that approximates a linear equivalence on an open set `s`,
returns a partial homeomorphism with `toFun = f` and `source = s`. -/
def toPartialHomeomorph (hf : ApproximatesLinearOn f (f' : E âL[ð] F) s c)
(hc : Subsingleton E âš c < Nâ»Â¹) (hs : IsOpen s) : PartialHomeomorph E F where
toPartialEquiv := hf.toPartialEquiv hc
open_source := hs
open_target := hf.open_image f'.toNonlinearRightInverse hs <| by
rwa [f'.toEquiv.subsingleton_congr] at hc
continuousOn_toFun := hf.continuousOn
continuousOn_invFun := hf.inverse_continuousOn hc
@[simp]
theorem toPartialHomeomorph_coe (hf : ApproximatesLinearOn f (f' : E âL[ð] F) s c)
(hc : Subsingleton E âš c < Nâ»Â¹) (hs : IsOpen s) :
(hf.toPartialHomeomorph f s hc hs : E â F) = f :=
rfl
@[simp]
theorem toPartialHomeomorph_source (hf : ApproximatesLinearOn f (f' : E âL[ð] F) s c)
(hc : Subsingleton E âš c < Nâ»Â¹) (hs : IsOpen s) :
(hf.toPartialHomeomorph f s hc hs).source = s :=
rfl
@[simp]
theorem toPartialHomeomorph_target (hf : ApproximatesLinearOn f (f' : E âL[ð] F) s c)
(hc : Subsingleton E âš c < Nâ»Â¹) (hs : IsOpen s) :
(hf.toPartialHomeomorph f s hc hs).target = f '' s :=
rfl
/-- A function `f` that approximates a linear equivalence on the whole space is a homeomorphism. -/
def toHomeomorph (hf : ApproximatesLinearOn f (f' : E âL[ð] F) univ c)
(hc : Subsingleton E âš c < Nâ»Â¹) : E ââ F := by
refine (hf.toPartialHomeomorph _ _ hc isOpen_univ).toHomeomorphOfSourceEqUnivTargetEqUniv rfl ?_
rw [toPartialHomeomorph_target, image_univ, range_iff_surjective]
exact hf.surjective hc
end
theorem closedBall_subset_target (hf : ApproximatesLinearOn f (f' : E âL[ð] F) s c)
(hc : Subsingleton E âš c < Nâ»Â¹) (hs : IsOpen s) {b : E} (ε0 : 0 †ε) (hε : closedBall b ε â s) :
closedBall (f b) ((Nâ»Â¹ - c) * ε) â (hf.toPartialHomeomorph f s hc hs).target :=
(hf.surjOn_closedBall_of_nonlinearRightInverse f'.toNonlinearRightInverse ε0 hε).mono hε
Subset.rfl
end ApproximatesLinearOn
|
Analysis\Calculus\InverseFunctionTheorem\ContDiff.lean | /-
Copyright (c) 2020 Heather Macbeth. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Heather Macbeth
-/
import Mathlib.Analysis.Calculus.ContDiff.Basic
import Mathlib.Analysis.Calculus.ContDiff.RCLike
import Mathlib.Analysis.Calculus.InverseFunctionTheorem.FDeriv
/-!
# Inverse function theorem, smooth case
In this file we specialize the inverse function theorem to `C^r`-smooth functions.
-/
noncomputable section
namespace ContDiffAt
variable {ð : Type*} [RCLike ð]
variable {E : Type*} [NormedAddCommGroup E] [NormedSpace ð E]
variable {F : Type*} [NormedAddCommGroup F] [NormedSpace ð F]
variable [CompleteSpace E] (f : E â F) {f' : E âL[ð] F} {a : E}
/-- Given a `ContDiff` function over `ð` (which is `â` or `â`) with an invertible
derivative at `a`, returns a `PartialHomeomorph` with `to_fun = f` and `a â source`. -/
def toPartialHomeomorph {n : ââ} (hf : ContDiffAt ð n f a) (hf' : HasFDerivAt f (f' : E âL[ð] F) a)
(hn : 1 †n) : PartialHomeomorph E F :=
(hf.hasStrictFDerivAt' hf' hn).toPartialHomeomorph f
variable {f}
@[simp]
theorem toPartialHomeomorph_coe {n : ââ} (hf : ContDiffAt ð n f a)
(hf' : HasFDerivAt f (f' : E âL[ð] F) a) (hn : 1 †n) :
(hf.toPartialHomeomorph f hf' hn : E â F) = f :=
rfl
theorem mem_toPartialHomeomorph_source {n : ââ} (hf : ContDiffAt ð n f a)
(hf' : HasFDerivAt f (f' : E âL[ð] F) a) (hn : 1 †n) :
a â (hf.toPartialHomeomorph f hf' hn).source :=
(hf.hasStrictFDerivAt' hf' hn).mem_toPartialHomeomorph_source
theorem image_mem_toPartialHomeomorph_target {n : ââ} (hf : ContDiffAt ð n f a)
(hf' : HasFDerivAt f (f' : E âL[ð] F) a) (hn : 1 †n) :
f a â (hf.toPartialHomeomorph f hf' hn).target :=
(hf.hasStrictFDerivAt' hf' hn).image_mem_toPartialHomeomorph_target
/-- Given a `ContDiff` function over `ð` (which is `â` or `â`) with an invertible derivative
at `a`, returns a function that is locally inverse to `f`. -/
def localInverse {n : ââ} (hf : ContDiffAt ð n f a) (hf' : HasFDerivAt f (f' : E âL[ð] F) a)
(hn : 1 †n) : F â E :=
(hf.hasStrictFDerivAt' hf' hn).localInverse f f' a
theorem localInverse_apply_image {n : ââ} (hf : ContDiffAt ð n f a)
(hf' : HasFDerivAt f (f' : E âL[ð] F) a) (hn : 1 †n) : hf.localInverse hf' hn (f a) = a :=
(hf.hasStrictFDerivAt' hf' hn).localInverse_apply_image
/-- Given a `ContDiff` function over `ð` (which is `â` or `â`) with an invertible derivative
at `a`, the inverse function (produced by `ContDiff.toPartialHomeomorph`) is
also `ContDiff`. -/
theorem to_localInverse {n : ââ} (hf : ContDiffAt ð n f a)
(hf' : HasFDerivAt f (f' : E âL[ð] F) a) (hn : 1 †n) :
ContDiffAt ð n (hf.localInverse hf' hn) (f a) := by
have := hf.localInverse_apply_image hf' hn
apply (hf.toPartialHomeomorph f hf' hn).contDiffAt_symm
(image_mem_toPartialHomeomorph_target hf hf' hn)
· convert hf'
· convert hf
end ContDiffAt
|
Analysis\Calculus\InverseFunctionTheorem\Deriv.lean | /-
Copyright (c) 2020 Yury G. Kudryashov. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yury G. Kudryashov
-/
import Mathlib.Analysis.Calculus.Deriv.Inverse
import Mathlib.Analysis.Calculus.InverseFunctionTheorem.FDeriv
/-!
# Inverse function theorem, 1D case
In this file we prove a version of the inverse function theorem for maps `f : ð â ð`.
We use `ContinuousLinearEquiv.unitsEquivAut` to translate `HasStrictDerivAt f f' a` and
`f' â 0` into `HasStrictFDerivAt f (_ : ð âL[ð] ð) a`.
-/
open Filter
open scoped Topology
variable {ð : Type*} [NontriviallyNormedField ð] [CompleteSpace ð] (f : ð â ð)
noncomputable section
namespace HasStrictDerivAt
variable (f' a : ð) (hf : HasStrictDerivAt f f' a) (hf' : f' â 0)
/-- A function that is inverse to `f` near `a`. -/
abbrev localInverse : ð â ð :=
(hf.hasStrictFDerivAt_equiv hf').localInverse _ _ _
variable {f f' a}
theorem map_nhds_eq : map f (ð a) = ð (f a) :=
(hf.hasStrictFDerivAt_equiv hf').map_nhds_eq_of_equiv
theorem to_localInverse : HasStrictDerivAt (hf.localInverse f f' a hf') f'â»Â¹ (f a) :=
(hf.hasStrictFDerivAt_equiv hf').to_localInverse
theorem to_local_left_inverse {g : ð â ð} (hg : âá¶ x in ð a, g (f x) = x) :
HasStrictDerivAt g f'â»Â¹ (f a) :=
(hf.hasStrictFDerivAt_equiv hf').to_local_left_inverse hg
end HasStrictDerivAt
variable {f}
/-- If a function has a non-zero strict derivative at all points, then it is an open map. -/
theorem isOpenMap_of_hasStrictDerivAt {f' : ð â ð}
(hf : â x, HasStrictDerivAt f (f' x) x) (h0 : â x, f' x â 0) : IsOpenMap f :=
isOpenMap_iff_nhds_le.2 fun x => ((hf x).map_nhds_eq (h0 x)).ge
@[deprecated (since := "2024-03-23")]
alias open_map_of_strict_deriv := isOpenMap_of_hasStrictDerivAt
|
Analysis\Calculus\InverseFunctionTheorem\FDeriv.lean | /-
Copyright (c) 2020 Yury Kudryashov. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yury Kudryashov, Sébastien Gouëzel
-/
import Mathlib.Analysis.Calculus.FDeriv.Equiv
import Mathlib.Analysis.Calculus.InverseFunctionTheorem.ApproximatesLinearOn
/-!
# Inverse function theorem
In this file we prove the inverse function theorem. It says that if a map `f : E â F`
has an invertible strict derivative `f'` at `a`, then it is locally invertible,
and the inverse function has derivative `f' â»Â¹`.
We define `HasStrictFDerivAt.toPartialHomeomorph` that repacks a function `f`
with a `hf : HasStrictFDerivAt f f' a`, `f' : E âL[ð] F`, into a `PartialHomeomorph`.
The `toFun` of this `PartialHomeomorph` is defeq to `f`, so one can apply theorems
about `PartialHomeomorph` to `hf.toPartialHomeomorph f`, and get statements about `f`.
Then we define `HasStrictFDerivAt.localInverse` to be the `invFun` of this `PartialHomeomorph`,
and prove two versions of the inverse function theorem:
* `HasStrictFDerivAt.to_localInverse`: if `f` has an invertible derivative `f'` at `a` in the
strict sense (`hf`), then `hf.localInverse f f' a` has derivative `f'.symm` at `f a` in the
strict sense;
* `HasStrictFDerivAt.to_local_left_inverse`: if `f` has an invertible derivative `f'` at `a` in
the strict sense and `g` is locally left inverse to `f` near `a`, then `g` has derivative
`f'.symm` at `f a` in the strict sense.
Some related theorems, providing the derivative and higher regularity assuming that we already know
the inverse function, are formulated in the `Analysis/Calculus/FDeriv` and `Analysis/Calculus/Deriv`
folders, and in `ContDiff.lean`.
## Tags
derivative, strictly differentiable, continuously differentiable, smooth, inverse function
-/
open Function Set Filter Metric
open scoped Topology NNReal
noncomputable section
variable {ð : Type*} [NontriviallyNormedField ð]
variable {E : Type*} [NormedAddCommGroup E] [NormedSpace ð E]
variable {F : Type*} [NormedAddCommGroup F] [NormedSpace ð F]
variable {G : Type*} [NormedAddCommGroup G] [NormedSpace ð G]
variable {G' : Type*} [NormedAddCommGroup G'] [NormedSpace ð G']
variable {ε : â}
open Asymptotics Filter Metric Set
open ContinuousLinearMap (id)
/-!
### Inverse function theorem
Let `f : E â F` be a map defined on a complete vector
space `E`. Assume that `f` has an invertible derivative `f' : E âL[ð] F` at `a : E` in the strict
sense. Then `f` approximates `f'` in the sense of `ApproximatesLinearOn` on an open neighborhood
of `a`, and we can apply `ApproximatesLinearOn.toPartialHomeomorph` to construct the inverse
function. -/
namespace HasStrictFDerivAt
/-- If `f` has derivative `f'` at `a` in the strict sense and `c > 0`, then `f` approximates `f'`
with constant `c` on some neighborhood of `a`. -/
theorem approximates_deriv_on_nhds {f : E â F} {f' : E âL[ð] F} {a : E}
(hf : HasStrictFDerivAt f f' a) {c : ââ¥0} (hc : Subsingleton E âš 0 < c) :
â s â ð a, ApproximatesLinearOn f f' s c := by
cases' hc with hE hc
· refine âšuniv, IsOpen.mem_nhds isOpen_univ trivial, fun x _ y _ => ?_â©
simp [@Subsingleton.elim E hE x y]
have := hf.def hc
rw [nhds_prod_eq, Filter.Eventually, mem_prod_same_iff] at this
rcases this with âšs, has, hsâ©
exact âšs, has, fun x hx y hy => hs (mk_mem_prod hx hy)â©
theorem map_nhds_eq_of_surj [CompleteSpace E] [CompleteSpace F] {f : E â F} {f' : E âL[ð] F} {a : E}
(hf : HasStrictFDerivAt f (f' : E âL[ð] F) a) (h : LinearMap.range f' = â€) :
map f (ð a) = ð (f a) := by
let f'symm := f'.nonlinearRightInverseOfSurjective h
set c : ââ¥0 := f'symm.nnnormâ»Â¹ / 2 with hc
have f'symm_pos : 0 < f'symm.nnnorm := f'.nonlinearRightInverseOfSurjective_nnnorm_pos h
have cpos : 0 < c := by simp [hc, half_pos, inv_pos, f'symm_pos]
obtain âšs, s_nhds, hsâ© : â s â ð a, ApproximatesLinearOn f f' s c :=
hf.approximates_deriv_on_nhds (Or.inr cpos)
apply hs.map_nhds_eq f'symm s_nhds (Or.inr (NNReal.half_lt_self _))
simp [ne_of_gt f'symm_pos]
variable [CompleteSpace E] {f : E â F} {f' : E âL[ð] F} {a : E}
theorem approximates_deriv_on_open_nhds (hf : HasStrictFDerivAt f (f' : E âL[ð] F) a) :
â s : Set E, a â s â§ IsOpen s â§
ApproximatesLinearOn f (f' : E âL[ð] F) s (â(f'.symm : F âL[ð] E)âââ»Â¹ / 2) := by
simp only [â and_assoc]
refine ((nhds_basis_opens a).exists_iff fun s t => ApproximatesLinearOn.mono_set).1 ?_
exact
hf.approximates_deriv_on_nhds <|
f'.subsingleton_or_nnnorm_symm_pos.imp id fun hf' => half_pos <| inv_pos.2 hf'
variable (f)
/-- Given a function with an invertible strict derivative at `a`, returns a `PartialHomeomorph`
with `to_fun = f` and `a â source`. This is a part of the inverse function theorem.
The other part `HasStrictFDerivAt.to_localInverse` states that the inverse function
of this `PartialHomeomorph` has derivative `f'.symm`. -/
def toPartialHomeomorph (hf : HasStrictFDerivAt f (f' : E âL[ð] F) a) : PartialHomeomorph E F :=
ApproximatesLinearOn.toPartialHomeomorph f (Classical.choose hf.approximates_deriv_on_open_nhds)
(Classical.choose_spec hf.approximates_deriv_on_open_nhds).2.2
(f'.subsingleton_or_nnnorm_symm_pos.imp id fun hf' =>
NNReal.half_lt_self <| ne_of_gt <| inv_pos.2 hf')
(Classical.choose_spec hf.approximates_deriv_on_open_nhds).2.1
variable {f}
@[simp]
theorem toPartialHomeomorph_coe (hf : HasStrictFDerivAt f (f' : E âL[ð] F) a) :
(hf.toPartialHomeomorph f : E â F) = f :=
rfl
theorem mem_toPartialHomeomorph_source (hf : HasStrictFDerivAt f (f' : E âL[ð] F) a) :
a â (hf.toPartialHomeomorph f).source :=
(Classical.choose_spec hf.approximates_deriv_on_open_nhds).1
theorem image_mem_toPartialHomeomorph_target (hf : HasStrictFDerivAt f (f' : E âL[ð] F) a) :
f a â (hf.toPartialHomeomorph f).target :=
(hf.toPartialHomeomorph f).map_source hf.mem_toPartialHomeomorph_source
theorem map_nhds_eq_of_equiv (hf : HasStrictFDerivAt f (f' : E âL[ð] F) a) :
map f (ð a) = ð (f a) :=
(hf.toPartialHomeomorph f).map_nhds_eq hf.mem_toPartialHomeomorph_source
variable (f f' a)
/-- Given a function `f` with an invertible derivative, returns a function that is locally inverse
to `f`. -/
def localInverse (hf : HasStrictFDerivAt f (f' : E âL[ð] F) a) : F â E :=
(hf.toPartialHomeomorph f).symm
variable {f f' a}
theorem localInverse_def (hf : HasStrictFDerivAt f (f' : E âL[ð] F) a) :
hf.localInverse f _ _ = (hf.toPartialHomeomorph f).symm :=
rfl
theorem eventually_left_inverse (hf : HasStrictFDerivAt f (f' : E âL[ð] F) a) :
âá¶ x in ð a, hf.localInverse f f' a (f x) = x :=
(hf.toPartialHomeomorph f).eventually_left_inverse hf.mem_toPartialHomeomorph_source
@[simp]
theorem localInverse_apply_image (hf : HasStrictFDerivAt f (f' : E âL[ð] F) a) :
hf.localInverse f f' a (f a) = a :=
hf.eventually_left_inverse.self_of_nhds
theorem eventually_right_inverse (hf : HasStrictFDerivAt f (f' : E âL[ð] F) a) :
âá¶ y in ð (f a), f (hf.localInverse f f' a y) = y :=
(hf.toPartialHomeomorph f).eventually_right_inverse' hf.mem_toPartialHomeomorph_source
theorem localInverse_continuousAt (hf : HasStrictFDerivAt f (f' : E âL[ð] F) a) :
ContinuousAt (hf.localInverse f f' a) (f a) :=
(hf.toPartialHomeomorph f).continuousAt_symm hf.image_mem_toPartialHomeomorph_target
theorem localInverse_tendsto (hf : HasStrictFDerivAt f (f' : E âL[ð] F) a) :
Tendsto (hf.localInverse f f' a) (ð <| f a) (ð a) :=
(hf.toPartialHomeomorph f).tendsto_symm hf.mem_toPartialHomeomorph_source
theorem localInverse_unique (hf : HasStrictFDerivAt f (f' : E âL[ð] F) a) {g : F â E}
(hg : âá¶ x in ð a, g (f x) = x) : âá¶ y in ð (f a), g y = localInverse f f' a hf y :=
eventuallyEq_of_left_inv_of_right_inv hg hf.eventually_right_inverse <|
(hf.toPartialHomeomorph f).tendsto_symm hf.mem_toPartialHomeomorph_source
/-- If `f` has an invertible derivative `f'` at `a` in the sense of strict differentiability `(hf)`,
then the inverse function `hf.localInverse f` has derivative `f'.symm` at `f a`. -/
theorem to_localInverse (hf : HasStrictFDerivAt f (f' : E âL[ð] F) a) :
HasStrictFDerivAt (hf.localInverse f f' a) (f'.symm : F âL[ð] E) (f a) :=
(hf.toPartialHomeomorph f).hasStrictFDerivAt_symm hf.image_mem_toPartialHomeomorph_target <| by
simpa [â localInverse_def] using hf
/-- If `f : E â F` has an invertible derivative `f'` at `a` in the sense of strict differentiability
and `g (f x) = x` in a neighborhood of `a`, then `g` has derivative `f'.symm` at `f a`.
For a version assuming `f (g y) = y` and continuity of `g` at `f a` but not `[CompleteSpace E]`
see `of_local_left_inverse`. -/
theorem to_local_left_inverse (hf : HasStrictFDerivAt f (f' : E âL[ð] F) a) {g : F â E}
(hg : âá¶ x in ð a, g (f x) = x) : HasStrictFDerivAt g (f'.symm : F âL[ð] E) (f a) :=
hf.to_localInverse.congr_of_eventuallyEq <| (hf.localInverse_unique hg).mono fun _ => Eq.symm
end HasStrictFDerivAt
/-- If a function has an invertible strict derivative at all points, then it is an open map. -/
theorem isOpenMap_of_hasStrictFDerivAt_equiv [CompleteSpace E] {f : E â F} {f' : E â E âL[ð] F}
(hf : â x, HasStrictFDerivAt f (f' x : E âL[ð] F) x) : IsOpenMap f :=
isOpenMap_iff_nhds_le.2 fun x => (hf x).map_nhds_eq_of_equiv.ge
@[deprecated (since := "2024-03-23")]
alias open_map_of_strict_fderiv_equiv := isOpenMap_of_hasStrictFDerivAt_equiv
|
Analysis\Calculus\InverseFunctionTheorem\FiniteDimensional.lean | /-
Copyright (c) 2022 Sébastien Gouëzel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Sébastien Gouëzel
-/
import Mathlib.Analysis.Calculus.InverseFunctionTheorem.ApproximatesLinearOn
import Mathlib.Analysis.Normed.Module.FiniteDimension
/-!
# A lemma about `ApproximatesLinearOn` that needs `FiniteDimensional`
In this file we prove that in a real vector space,
a function `f` that approximates a linear equivalence on a subset `s`
can be extended to a homeomorphism of the whole space.
This used to be the only lemma in `Mathlib/Analysis/Calculus/Inverse`
depending on `FiniteDimensional`, so it was moved to a new file when the original file got split.
-/
open Set
open scoped NNReal
namespace ApproximatesLinearOn
/-- In a real vector space, a function `f` that approximates a linear equivalence on a subset `s`
can be extended to a homeomorphism of the whole space. -/
theorem exists_homeomorph_extension {E : Type*} [NormedAddCommGroup E] [NormedSpace â E]
{F : Type*} [NormedAddCommGroup F] [NormedSpace â F] [FiniteDimensional â F] {s : Set E}
{f : E â F} {f' : E âL[â] F} {c : ââ¥0} (hf : ApproximatesLinearOn f (f' : E âL[â] F) s c)
(hc : Subsingleton E âš lipschitzExtensionConstant F * c < â(f'.symm : F âL[â] E)âââ»Â¹) :
â g : E ââ F, EqOn f g s := by
-- the difference `f - f'` is Lipschitz on `s`. It can be extended to a Lipschitz function `u`
-- on the whole space, with a slightly worse Lipschitz constant. Then `f' + u` will be the
-- desired homeomorphism.
obtain âšu, hu, ufâ© :
â u : E â F, LipschitzWith (lipschitzExtensionConstant F * c) u â§ EqOn (f - âf') u s :=
hf.lipschitzOnWith.extend_finite_dimension
let g : E â F := fun x => f' x + u x
have fg : EqOn f g s := fun x hx => by simp_rw [g, â uf hx, Pi.sub_apply, add_sub_cancel]
have hg : ApproximatesLinearOn g (f' : E âL[â] F) univ (lipschitzExtensionConstant F * c) := by
apply LipschitzOnWith.approximatesLinearOn
rw [lipschitzOnWith_univ]
convert hu
ext x
simp only [g, add_sub_cancel_left, ContinuousLinearEquiv.coe_coe, Pi.sub_apply]
haveI : FiniteDimensional â E := f'.symm.finiteDimensional
exact âšhg.toHomeomorph g hc, fgâ©
end ApproximatesLinearOn
|
Analysis\Calculus\IteratedDeriv\Defs.lean | /-
Copyright (c) 2020 Sébastien Gouëzel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Sébastien Gouëzel
-/
import Mathlib.Analysis.Calculus.Deriv.Basic
import Mathlib.Analysis.Calculus.ContDiff.Defs
/-!
# One-dimensional iterated derivatives
We define the `n`-th derivative of a function `f : ð â F` as a function
`iteratedDeriv n f : ð â F`, as well as a version on domains `iteratedDerivWithin n f s : ð â F`,
and prove their basic properties.
## Main definitions and results
Let `ð` be a nontrivially normed field, and `F` a normed vector space over `ð`. Let `f : ð â F`.
* `iteratedDeriv n f` is the `n`-th derivative of `f`, seen as a function from `ð` to `F`.
It is defined as the `n`-th Fréchet derivative (which is a multilinear map) applied to the
vector `(1, ..., 1)`, to take advantage of all the existing framework, but we show that it
coincides with the naive iterative definition.
* `iteratedDeriv_eq_iterate` states that the `n`-th derivative of `f` is obtained by starting
from `f` and differentiating it `n` times.
* `iteratedDerivWithin n f s` is the `n`-th derivative of `f` within the domain `s`. It only
behaves well when `s` has the unique derivative property.
* `iteratedDerivWithin_eq_iterate` states that the `n`-th derivative of `f` in the domain `s` is
obtained by starting from `f` and differentiating it `n` times within `s`. This only holds when
`s` has the unique derivative property.
## Implementation details
The results are deduced from the corresponding results for the more general (multilinear) iterated
Fréchet derivative. For this, we write `iteratedDeriv n f` as the composition of
`iteratedFDeriv ð n f` and a continuous linear equiv. As continuous linear equivs respect
differentiability and commute with differentiation, this makes it possible to prove readily that
the derivative of the `n`-th derivative is the `n+1`-th derivative in `iteratedDerivWithin_succ`,
by translating the corresponding result `iteratedFDerivWithin_succ_apply_left` for the
iterated Fréchet derivative.
-/
noncomputable section
open scoped Topology
open Filter Asymptotics Set
variable {ð : Type*} [NontriviallyNormedField ð]
variable {F : Type*} [NormedAddCommGroup F] [NormedSpace ð F]
variable {E : Type*} [NormedAddCommGroup E] [NormedSpace ð E]
/-- The `n`-th iterated derivative of a function from `ð` to `F`, as a function from `ð` to `F`. -/
def iteratedDeriv (n : â) (f : ð â F) (x : ð) : F :=
(iteratedFDeriv ð n f x : (Fin n â ð) â F) fun _ : Fin n => 1
/-- The `n`-th iterated derivative of a function from `ð` to `F` within a set `s`, as a function
from `ð` to `F`. -/
def iteratedDerivWithin (n : â) (f : ð â F) (s : Set ð) (x : ð) : F :=
(iteratedFDerivWithin ð n f s x : (Fin n â ð) â F) fun _ : Fin n => 1
variable {n : â} {f : ð â F} {s : Set ð} {x : ð}
theorem iteratedDerivWithin_univ : iteratedDerivWithin n f univ = iteratedDeriv n f := by
ext x
rw [iteratedDerivWithin, iteratedDeriv, iteratedFDerivWithin_univ]
/-! ### Properties of the iterated derivative within a set -/
theorem iteratedDerivWithin_eq_iteratedFDerivWithin : iteratedDerivWithin n f s x =
(iteratedFDerivWithin ð n f s x : (Fin n â ð) â F) fun _ : Fin n => 1 :=
rfl
/-- Write the iterated derivative as the composition of a continuous linear equiv and the iterated
Fréchet derivative -/
theorem iteratedDerivWithin_eq_equiv_comp : iteratedDerivWithin n f s =
(ContinuousMultilinearMap.piFieldEquiv ð (Fin n) F).symm â iteratedFDerivWithin ð n f s := by
ext x; rfl
/-- Write the iterated Fréchet derivative as the composition of a continuous linear equiv and the
iterated derivative. -/
theorem iteratedFDerivWithin_eq_equiv_comp :
iteratedFDerivWithin ð n f s =
ContinuousMultilinearMap.piFieldEquiv ð (Fin n) F â iteratedDerivWithin n f s := by
rw [iteratedDerivWithin_eq_equiv_comp, â Function.comp.assoc, LinearIsometryEquiv.self_comp_symm,
Function.id_comp]
/-- The `n`-th Fréchet derivative applied to a vector `(m 0, ..., m (n-1))` is the derivative
multiplied by the product of the `m i`s. -/
theorem iteratedFDerivWithin_apply_eq_iteratedDerivWithin_mul_prod {m : Fin n â ð} :
(iteratedFDerivWithin ð n f s x : (Fin n â ð) â F) m =
(â i, m i) ⢠iteratedDerivWithin n f s x := by
rw [iteratedDerivWithin_eq_iteratedFDerivWithin, â ContinuousMultilinearMap.map_smul_univ]
simp
theorem norm_iteratedFDerivWithin_eq_norm_iteratedDerivWithin :
âiteratedFDerivWithin ð n f s xâ = âiteratedDerivWithin n f s xâ := by
rw [iteratedDerivWithin_eq_equiv_comp, Function.comp_apply, LinearIsometryEquiv.norm_map]
@[simp]
theorem iteratedDerivWithin_zero : iteratedDerivWithin 0 f s = f := by
ext x
simp [iteratedDerivWithin]
@[simp]
theorem iteratedDerivWithin_one {x : ð} (h : UniqueDiffWithinAt ð s x) :
iteratedDerivWithin 1 f s x = derivWithin f s x := by
simp only [iteratedDerivWithin, iteratedFDerivWithin_one_apply h]; rfl
/-- If the first `n` derivatives within a set of a function are continuous, and its first `n-1`
derivatives are differentiable, then the function is `C^n`. This is not an equivalence in general,
but this is an equivalence when the set has unique derivatives, see
`contDiffOn_iff_continuousOn_differentiableOn_deriv`. -/
theorem contDiffOn_of_continuousOn_differentiableOn_deriv {n : ââ}
(Hcont : â m : â, (m : ââ) †n â ContinuousOn (fun x => iteratedDerivWithin m f s x) s)
(Hdiff : â m : â, (m : ââ) < n â DifferentiableOn ð (fun x => iteratedDerivWithin m f s x) s) :
ContDiffOn ð n f s := by
apply contDiffOn_of_continuousOn_differentiableOn
· simpa only [iteratedFDerivWithin_eq_equiv_comp, LinearIsometryEquiv.comp_continuousOn_iff]
· simpa only [iteratedFDerivWithin_eq_equiv_comp, LinearIsometryEquiv.comp_differentiableOn_iff]
/-- To check that a function is `n` times continuously differentiable, it suffices to check that its
first `n` derivatives are differentiable. This is slightly too strong as the condition we
require on the `n`-th derivative is differentiability instead of continuity, but it has the
advantage of avoiding the discussion of continuity in the proof (and for `n = â` this is optimal).
-/
theorem contDiffOn_of_differentiableOn_deriv {n : ââ}
(h : â m : â, (m : ââ) †n â DifferentiableOn ð (iteratedDerivWithin m f s) s) :
ContDiffOn ð n f s := by
apply contDiffOn_of_differentiableOn
simpa only [iteratedFDerivWithin_eq_equiv_comp, LinearIsometryEquiv.comp_differentiableOn_iff]
/-- On a set with unique derivatives, a `C^n` function has derivatives up to `n` which are
continuous. -/
theorem ContDiffOn.continuousOn_iteratedDerivWithin {n : ââ} {m : â} (h : ContDiffOn ð n f s)
(hmn : (m : ââ) †n) (hs : UniqueDiffOn ð s) : ContinuousOn (iteratedDerivWithin m f s) s := by
simpa only [iteratedDerivWithin_eq_equiv_comp, LinearIsometryEquiv.comp_continuousOn_iff] using
h.continuousOn_iteratedFDerivWithin hmn hs
theorem ContDiffWithinAt.differentiableWithinAt_iteratedDerivWithin {n : ââ} {m : â}
(h : ContDiffWithinAt ð n f s x) (hmn : (m : ââ) < n) (hs : UniqueDiffOn ð (insert x s)) :
DifferentiableWithinAt ð (iteratedDerivWithin m f s) s x := by
simpa only [iteratedDerivWithin_eq_equiv_comp,
LinearIsometryEquiv.comp_differentiableWithinAt_iff] using
h.differentiableWithinAt_iteratedFDerivWithin hmn hs
/-- On a set with unique derivatives, a `C^n` function has derivatives less than `n` which are
differentiable. -/
theorem ContDiffOn.differentiableOn_iteratedDerivWithin {n : ââ} {m : â} (h : ContDiffOn ð n f s)
(hmn : (m : ââ) < n) (hs : UniqueDiffOn ð s) :
DifferentiableOn ð (iteratedDerivWithin m f s) s := fun x hx =>
(h x hx).differentiableWithinAt_iteratedDerivWithin hmn <| by rwa [insert_eq_of_mem hx]
/-- The property of being `C^n`, initially defined in terms of the Fréchet derivative, can be
reformulated in terms of the one-dimensional derivative on sets with unique derivatives. -/
theorem contDiffOn_iff_continuousOn_differentiableOn_deriv {n : ââ} (hs : UniqueDiffOn ð s) :
ContDiffOn ð n f s â (â m : â, (m : ââ) †n â ContinuousOn (iteratedDerivWithin m f s) s) â§
â m : â, (m : ââ) < n â DifferentiableOn ð (iteratedDerivWithin m f s) s := by
simp only [contDiffOn_iff_continuousOn_differentiableOn hs, iteratedFDerivWithin_eq_equiv_comp,
LinearIsometryEquiv.comp_continuousOn_iff, LinearIsometryEquiv.comp_differentiableOn_iff]
/-- The `n+1`-th iterated derivative within a set with unique derivatives can be obtained by
differentiating the `n`-th iterated derivative. -/
theorem iteratedDerivWithin_succ {x : ð} (hxs : UniqueDiffWithinAt ð s x) :
iteratedDerivWithin (n + 1) f s x = derivWithin (iteratedDerivWithin n f s) s x := by
rw [iteratedDerivWithin_eq_iteratedFDerivWithin, iteratedFDerivWithin_succ_apply_left,
iteratedFDerivWithin_eq_equiv_comp, LinearIsometryEquiv.comp_fderivWithin _ hxs, derivWithin]
change ((ContinuousMultilinearMap.mkPiRing ð (Fin n) ((fderivWithin ð
(iteratedDerivWithin n f s) s x : ð â F) 1) : (Fin n â ð) â F) fun i : Fin n => 1) =
(fderivWithin ð (iteratedDerivWithin n f s) s x : ð â F) 1
simp
/-- The `n`-th iterated derivative within a set with unique derivatives can be obtained by
iterating `n` times the differentiation operation. -/
theorem iteratedDerivWithin_eq_iterate {x : ð} (hs : UniqueDiffOn ð s) (hx : x â s) :
iteratedDerivWithin n f s x = (fun g : ð â F => derivWithin g s)^[n] f x := by
induction' n with n IH generalizing x
· simp
· rw [iteratedDerivWithin_succ (hs x hx), Function.iterate_succ']
exact derivWithin_congr (fun y hy => IH hy) (IH hx)
/-- The `n+1`-th iterated derivative within a set with unique derivatives can be obtained by
taking the `n`-th derivative of the derivative. -/
theorem iteratedDerivWithin_succ' {x : ð} (hxs : UniqueDiffOn ð s) (hx : x â s) :
iteratedDerivWithin (n + 1) f s x = (iteratedDerivWithin n (derivWithin f s) s) x := by
rw [iteratedDerivWithin_eq_iterate hxs hx, iteratedDerivWithin_eq_iterate hxs hx]; rfl
/-! ### Properties of the iterated derivative on the whole space -/
theorem iteratedDeriv_eq_iteratedFDeriv :
iteratedDeriv n f x = (iteratedFDeriv ð n f x : (Fin n â ð) â F) fun _ : Fin n => 1 :=
rfl
/-- Write the iterated derivative as the composition of a continuous linear equiv and the iterated
Fréchet derivative -/
theorem iteratedDeriv_eq_equiv_comp : iteratedDeriv n f =
(ContinuousMultilinearMap.piFieldEquiv ð (Fin n) F).symm â iteratedFDeriv ð n f := by
ext x; rfl
/-- Write the iterated Fréchet derivative as the composition of a continuous linear equiv and the
iterated derivative. -/
theorem iteratedFDeriv_eq_equiv_comp : iteratedFDeriv ð n f =
ContinuousMultilinearMap.piFieldEquiv ð (Fin n) F â iteratedDeriv n f := by
rw [iteratedDeriv_eq_equiv_comp, â Function.comp.assoc, LinearIsometryEquiv.self_comp_symm,
Function.id_comp]
/-- The `n`-th Fréchet derivative applied to a vector `(m 0, ..., m (n-1))` is the derivative
multiplied by the product of the `m i`s. -/
theorem iteratedFDeriv_apply_eq_iteratedDeriv_mul_prod {m : Fin n â ð} :
(iteratedFDeriv ð n f x : (Fin n â ð) â F) m = (â i, m i) ⢠iteratedDeriv n f x := by
rw [iteratedDeriv_eq_iteratedFDeriv, â ContinuousMultilinearMap.map_smul_univ]; simp
theorem norm_iteratedFDeriv_eq_norm_iteratedDeriv :
âiteratedFDeriv ð n f xâ = âiteratedDeriv n f xâ := by
rw [iteratedDeriv_eq_equiv_comp, Function.comp_apply, LinearIsometryEquiv.norm_map]
@[simp]
theorem iteratedDeriv_zero : iteratedDeriv 0 f = f := by ext x; simp [iteratedDeriv]
@[simp]
theorem iteratedDeriv_one : iteratedDeriv 1 f = deriv f := by ext x; simp [iteratedDeriv]
/-- The property of being `C^n`, initially defined in terms of the Fréchet derivative, can be
reformulated in terms of the one-dimensional derivative. -/
theorem contDiff_iff_iteratedDeriv {n : ââ} : ContDiff ð n f â
(â m : â, (m : ââ) †n â Continuous (iteratedDeriv m f)) â§
â m : â, (m : ââ) < n â Differentiable ð (iteratedDeriv m f) := by
simp only [contDiff_iff_continuous_differentiable, iteratedFDeriv_eq_equiv_comp,
LinearIsometryEquiv.comp_continuous_iff, LinearIsometryEquiv.comp_differentiable_iff]
/-- To check that a function is `n` times continuously differentiable, it suffices to check that its
first `n` derivatives are differentiable. This is slightly too strong as the condition we
require on the `n`-th derivative is differentiability instead of continuity, but it has the
advantage of avoiding the discussion of continuity in the proof (and for `n = â` this is optimal).
-/
theorem contDiff_of_differentiable_iteratedDeriv {n : ââ}
(h : â m : â, (m : ââ) †n â Differentiable ð (iteratedDeriv m f)) : ContDiff ð n f :=
contDiff_iff_iteratedDeriv.2 âšfun m hm => (h m hm).continuous, fun m hm => h m (le_of_lt hm)â©
theorem ContDiff.continuous_iteratedDeriv {n : ââ} (m : â) (h : ContDiff ð n f)
(hmn : (m : ââ) †n) : Continuous (iteratedDeriv m f) :=
(contDiff_iff_iteratedDeriv.1 h).1 m hmn
theorem ContDiff.differentiable_iteratedDeriv {n : ââ} (m : â) (h : ContDiff ð n f)
(hmn : (m : ââ) < n) : Differentiable ð (iteratedDeriv m f) :=
(contDiff_iff_iteratedDeriv.1 h).2 m hmn
/-- The `n+1`-th iterated derivative can be obtained by differentiating the `n`-th
iterated derivative. -/
theorem iteratedDeriv_succ : iteratedDeriv (n + 1) f = deriv (iteratedDeriv n f) := by
ext x
rw [â iteratedDerivWithin_univ, â iteratedDerivWithin_univ, â derivWithin_univ]
exact iteratedDerivWithin_succ uniqueDiffWithinAt_univ
/-- The `n`-th iterated derivative can be obtained by iterating `n` times the
differentiation operation. -/
theorem iteratedDeriv_eq_iterate : iteratedDeriv n f = deriv^[n] f := by
ext x
rw [â iteratedDerivWithin_univ]
convert iteratedDerivWithin_eq_iterate uniqueDiffOn_univ (F := F) (mem_univ x)
simp [derivWithin_univ]
/-- The `n+1`-th iterated derivative can be obtained by taking the `n`-th derivative of the
derivative. -/
theorem iteratedDeriv_succ' : iteratedDeriv (n + 1) f = iteratedDeriv n (deriv f) := by
rw [iteratedDeriv_eq_iterate, iteratedDeriv_eq_iterate]; rfl
|
Analysis\Calculus\IteratedDeriv\Lemmas.lean | /-
Copyright (c) 2023 Chris Birkbeck. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Chris Birkbeck, Ruben Van de Velde
-/
import Mathlib.Analysis.Calculus.ContDiff.Basic
import Mathlib.Analysis.Calculus.Deriv.Mul
import Mathlib.Analysis.Calculus.Deriv.Shift
import Mathlib.Analysis.Calculus.IteratedDeriv.Defs
/-!
# One-dimensional iterated derivatives
This file contains a number of further results on `iteratedDerivWithin` that need more imports
than are available in `Mathlib/Analysis/Calculus/IteratedDeriv/Defs.lean`.
-/
variable
{ð : Type*} [NontriviallyNormedField ð]
{F : Type*} [NormedAddCommGroup F] [NormedSpace ð F]
{R : Type*} [Semiring R] [Module R F] [SMulCommClass ð R F] [ContinuousConstSMul R F]
{n : â} {x : ð} {s : Set ð} (hx : x â s) (h : UniqueDiffOn ð s) {f g : ð â F}
theorem iteratedDerivWithin_add (hf : ContDiffOn ð n f s) (hg : ContDiffOn ð n g s) :
iteratedDerivWithin n (f + g) s x =
iteratedDerivWithin n f s x + iteratedDerivWithin n g s x := by
simp_rw [iteratedDerivWithin, iteratedFDerivWithin_add_apply hf hg h hx,
ContinuousMultilinearMap.add_apply]
theorem iteratedDerivWithin_congr (hfg : Set.EqOn f g s) :
Set.EqOn (iteratedDerivWithin n f s) (iteratedDerivWithin n g s) s := by
induction n generalizing f g with
| zero => rwa [iteratedDerivWithin_zero]
| succ n IH =>
intro y hy
have : UniqueDiffWithinAt ð s y := h.uniqueDiffWithinAt hy
rw [iteratedDerivWithin_succ this, iteratedDerivWithin_succ this]
exact derivWithin_congr (IH hfg) (IH hfg hy)
theorem iteratedDerivWithin_const_add (hn : 0 < n) (c : F) :
iteratedDerivWithin n (fun z => c + f z) s x = iteratedDerivWithin n f s x := by
obtain âšn, rflâ© := n.exists_eq_succ_of_ne_zero hn.ne'
rw [iteratedDerivWithin_succ' h hx, iteratedDerivWithin_succ' h hx]
refine iteratedDerivWithin_congr h ?_ hx
intro y hy
exact derivWithin_const_add (h.uniqueDiffWithinAt hy) _
theorem iteratedDerivWithin_const_neg (hn : 0 < n) (c : F) :
iteratedDerivWithin n (fun z => c - f z) s x = iteratedDerivWithin n (fun z => -f z) s x := by
obtain âšn, rflâ© := n.exists_eq_succ_of_ne_zero hn.ne'
rw [iteratedDerivWithin_succ' h hx, iteratedDerivWithin_succ' h hx]
refine iteratedDerivWithin_congr h ?_ hx
intro y hy
have : UniqueDiffWithinAt ð s y := h.uniqueDiffWithinAt hy
rw [derivWithin.neg this]
exact derivWithin_const_sub this _
theorem iteratedDerivWithin_const_smul (c : R) (hf : ContDiffOn ð n f s) :
iteratedDerivWithin n (c ⢠f) s x = c ⢠iteratedDerivWithin n f s x := by
simp_rw [iteratedDerivWithin]
rw [iteratedFDerivWithin_const_smul_apply hf h hx]
simp only [ContinuousMultilinearMap.smul_apply]
theorem iteratedDerivWithin_const_mul (c : ð) {f : ð â ð} (hf : ContDiffOn ð n f s) :
iteratedDerivWithin n (fun z => c * f z) s x = c * iteratedDerivWithin n f s x := by
simpa using iteratedDerivWithin_const_smul (F := ð) hx h c hf
variable (f) in
theorem iteratedDerivWithin_neg :
iteratedDerivWithin n (-f) s x = -iteratedDerivWithin n f s x := by
rw [iteratedDerivWithin, iteratedDerivWithin, iteratedFDerivWithin_neg_apply h hx,
ContinuousMultilinearMap.neg_apply]
variable (f) in
theorem iteratedDerivWithin_neg' :
iteratedDerivWithin n (fun z => -f z) s x = -iteratedDerivWithin n f s x :=
iteratedDerivWithin_neg hx h f
theorem iteratedDerivWithin_sub (hf : ContDiffOn ð n f s) (hg : ContDiffOn ð n g s) :
iteratedDerivWithin n (f - g) s x =
iteratedDerivWithin n f s x - iteratedDerivWithin n g s x := by
rw [sub_eq_add_neg, sub_eq_add_neg, Pi.neg_def, iteratedDerivWithin_add hx h hf hg.neg,
iteratedDerivWithin_neg' hx h]
theorem iteratedDeriv_const_smul {n : â} {f : ð â F} (h : ContDiff ð n f) (c : ð) :
iteratedDeriv n (fun x => f (c * x)) = fun x => c ^ n ⢠iteratedDeriv n f (c * x) := by
induction n with
| zero => simp
| succ n ih =>
funext x
have hâ : DifferentiableAt ð (iteratedDeriv n f) (c * x) :=
h.differentiable_iteratedDeriv n (Nat.cast_lt.mpr n.lt_succ_self) |>.differentiableAt
have hâ : DifferentiableAt ð (fun x => iteratedDeriv n f (c * x)) x := by
rw [â Function.comp_def]
apply DifferentiableAt.comp
· exact h.differentiable_iteratedDeriv n (Nat.cast_lt.mpr n.lt_succ_self) |>.differentiableAt
· exact differentiableAt_id'.const_mul _
rw [iteratedDeriv_succ, ih h.of_succ, deriv_const_smul _ hâ, iteratedDeriv_succ,
â Function.comp_def, deriv.scomp x hâ (differentiableAt_id'.const_mul _),
deriv_const_mul _ differentiableAt_id', deriv_id'', smul_smul, mul_one, pow_succ]
theorem iteratedDeriv_const_mul {n : â} {f : ð â ð} (h : ContDiff ð n f) (c : ð) :
iteratedDeriv n (fun x => f (c * x)) = fun x => c ^ n * iteratedDeriv n f (c * x) := by
simpa only [smul_eq_mul] using iteratedDeriv_const_smul h c
lemma iteratedDeriv_neg (n : â) (f : ð â F) (a : ð) :
iteratedDeriv n (fun x ⊠-(f x)) a = -(iteratedDeriv n f a) := by
simp_rw [â iteratedDerivWithin_univ, iteratedDerivWithin_neg' (Set.mem_univ a) uniqueDiffOn_univ]
lemma iteratedDeriv_comp_neg (n : â) (f : ð â F) (a : ð) :
iteratedDeriv n (fun x ⊠f (-x)) a = (-1 : ð) ^ n ⢠iteratedDeriv n f (-a) := by
induction' n with n ih generalizing a
· simp only [Nat.zero_eq, iteratedDeriv_zero, pow_zero, one_smul]
· have ih' : iteratedDeriv n (fun x ⊠f (-x)) = fun x ⊠(-1 : ð) ^ n ⢠iteratedDeriv n f (-x) :=
funext ih
rw [iteratedDeriv_succ, iteratedDeriv_succ, ih', pow_succ', neg_mul, one_mul,
deriv_comp_neg (f := fun x ⊠(-1 : ð) ^ n ⢠iteratedDeriv n f x), deriv_const_smul',
neg_smul]
|
Analysis\Calculus\LineDeriv\Basic.lean | /-
Copyright (c) 2023 Sébastien Gouëzel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Sébastien Gouëzel
-/
import Mathlib.Analysis.Calculus.Deriv.Comp
import Mathlib.Analysis.Calculus.Deriv.Add
import Mathlib.Analysis.Calculus.Deriv.Mul
import Mathlib.Analysis.Calculus.Deriv.Slope
/-!
# Line derivatives
We define the line derivative of a function `f : E â F`, at a point `x : E` along a vector `v : E`,
as the element `f' : F` such that `f (x + t ⢠v) = f x + t ⢠f' + o (t)` as `t` tends to `0` in
the scalar field `ð`, if it exists. It is denoted by `lineDeriv ð f x v`.
This notion is generally less well behaved than the full Fréchet derivative (for instance, the
composition of functions which are line-differentiable is not line-differentiable in general).
The Fréchet derivative should therefore be favored over this one in general, although the line
derivative may sometimes prove handy.
The line derivative in direction `v` is also called the Gateaux derivative in direction `v`,
although the term "Gateaux derivative" is sometimes reserved for the situation where there is
such a derivative in all directions, for the map `v ⊠lineDeriv ð f x v` (which doesn't have to be
linear in general).
## Main definition and results
We mimic the definitions and statements for the Fréchet derivative and the one-dimensional
derivative. We define in particular the following objects:
* `LineDifferentiableWithinAt ð f s x v`
* `LineDifferentiableAt ð f x v`
* `HasLineDerivWithinAt ð f f' s x v`
* `HasLineDerivAt ð f s x v`
* `lineDerivWithin ð f s x v`
* `lineDeriv ð f x v`
and develop about them a basic API inspired by the one for the Fréchet derivative.
We depart from the Fréchet derivative in two places, as the dependence of the following predicates
on the direction would make them barely usable:
* We do not define an analogue of the predicate `UniqueDiffOn`;
* We do not define `LineDifferentiableOn` nor `LineDifferentiable`.
-/
noncomputable section
open scoped Topology Filter ENNReal NNReal
open Filter Asymptotics Set
variable {ð : Type*} [NontriviallyNormedField ð]
variable {F : Type*} [NormedAddCommGroup F] [NormedSpace ð F]
section Module
/-!
Results that do not rely on a topological structure on `E`
-/
variable (ð)
variable {E : Type*} [AddCommGroup E] [Module ð E]
/-- `f` has the derivative `f'` at the point `x` along the direction `v` in the set `s`.
That is, `f (x + t v) = f x + t ⢠f' + o (t)` when `t` tends to `0` and `x + t v â s`.
Note that this definition is less well behaved than the total Fréchet derivative, which
should generally be favored over this one. -/
def HasLineDerivWithinAt (f : E â F) (f' : F) (s : Set E) (x : E) (v : E) :=
HasDerivWithinAt (fun t ⊠f (x + t ⢠v)) f' ((fun t ⊠x + t ⢠v) â»Â¹' s) (0 : ð)
/-- `f` has the derivative `f'` at the point `x` along the direction `v`.
That is, `f (x + t v) = f x + t ⢠f' + o (t)` when `t` tends to `0`.
Note that this definition is less well behaved than the total Fréchet derivative, which
should generally be favored over this one. -/
def HasLineDerivAt (f : E â F) (f' : F) (x : E) (v : E) :=
HasDerivAt (fun t ⊠f (x + t ⢠v)) f' (0 : ð)
/-- `f` is line-differentiable at the point `x` in the direction `v` in the set `s` if there
exists `f'` such that `f (x + t v) = f x + t ⢠f' + o (t)` when `t` tends to `0` and `x + t v â s`.
-/
def LineDifferentiableWithinAt (f : E â F) (s : Set E) (x : E) (v : E) : Prop :=
DifferentiableWithinAt ð (fun t ⊠f (x + t ⢠v)) ((fun t ⊠x + t ⢠v) â»Â¹' s) (0 : ð)
/-- `f` is line-differentiable at the point `x` in the direction `v` if there
exists `f'` such that `f (x + t v) = f x + t ⢠f' + o (t)` when `t` tends to `0`. -/
def LineDifferentiableAt (f : E â F) (x : E) (v : E) : Prop :=
DifferentiableAt ð (fun t ⊠f (x + t ⢠v)) (0 : ð)
/-- Line derivative of `f` at the point `x` in the direction `v` within the set `s`, if it exists.
Zero otherwise.
If the line derivative exists (i.e., `â f', HasLineDerivWithinAt ð f f' s x v`), then
`f (x + t v) = f x + t lineDerivWithin ð f s x v + o (t)` when `t` tends to `0` and `x + t v â s`.
-/
def lineDerivWithin (f : E â F) (s : Set E) (x : E) (v : E) : F :=
derivWithin (fun t ⊠f (x + t ⢠v)) ((fun t ⊠x + t ⢠v) â»Â¹' s) (0 : ð)
/-- Line derivative of `f` at the point `x` in the direction `v`, if it exists. Zero otherwise.
If the line derivative exists (i.e., `â f', HasLineDerivAt ð f f' x v`), then
`f (x + t v) = f x + t lineDeriv ð f x v + o (t)` when `t` tends to `0`.
-/
def lineDeriv (f : E â F) (x : E) (v : E) : F :=
deriv (fun t ⊠f (x + t ⢠v)) (0 : ð)
variable {ð}
variable {f fâ : E â F} {f' fâ' fâ' : F} {s t : Set E} {x v : E}
lemma HasLineDerivWithinAt.mono (hf : HasLineDerivWithinAt ð f f' s x v) (hst : t â s) :
HasLineDerivWithinAt ð f f' t x v :=
HasDerivWithinAt.mono hf (preimage_mono hst)
lemma HasLineDerivAt.hasLineDerivWithinAt (hf : HasLineDerivAt ð f f' x v) (s : Set E) :
HasLineDerivWithinAt ð f f' s x v :=
HasDerivAt.hasDerivWithinAt hf
lemma HasLineDerivWithinAt.lineDifferentiableWithinAt (hf : HasLineDerivWithinAt ð f f' s x v) :
LineDifferentiableWithinAt ð f s x v :=
HasDerivWithinAt.differentiableWithinAt hf
theorem HasLineDerivAt.lineDifferentiableAt (hf : HasLineDerivAt ð f f' x v) :
LineDifferentiableAt ð f x v :=
HasDerivAt.differentiableAt hf
theorem LineDifferentiableWithinAt.hasLineDerivWithinAt (h : LineDifferentiableWithinAt ð f s x v) :
HasLineDerivWithinAt ð f (lineDerivWithin ð f s x v) s x v :=
DifferentiableWithinAt.hasDerivWithinAt h
theorem LineDifferentiableAt.hasLineDerivAt (h : LineDifferentiableAt ð f x v) :
HasLineDerivAt ð f (lineDeriv ð f x v) x v :=
DifferentiableAt.hasDerivAt h
@[simp] lemma hasLineDerivWithinAt_univ :
HasLineDerivWithinAt ð f f' univ x v â HasLineDerivAt ð f f' x v := by
simp only [HasLineDerivWithinAt, HasLineDerivAt, preimage_univ, hasDerivWithinAt_univ]
theorem lineDerivWithin_zero_of_not_lineDifferentiableWithinAt
(h : ¬LineDifferentiableWithinAt ð f s x v) :
lineDerivWithin ð f s x v = 0 :=
derivWithin_zero_of_not_differentiableWithinAt h
theorem lineDeriv_zero_of_not_lineDifferentiableAt (h : ¬LineDifferentiableAt ð f x v) :
lineDeriv ð f x v = 0 :=
deriv_zero_of_not_differentiableAt h
theorem hasLineDerivAt_iff_isLittleO_nhds_zero :
HasLineDerivAt ð f f' x v â
(fun t : ð => f (x + t ⢠v) - f x - t ⢠f') =o[ð 0] fun t => t := by
simp only [HasLineDerivAt, hasDerivAt_iff_isLittleO_nhds_zero, zero_add, zero_smul, add_zero]
theorem HasLineDerivAt.unique (hâ : HasLineDerivAt ð f fâ' x v) (hâ : HasLineDerivAt ð f fâ' x v) :
fâ' = fâ' :=
HasDerivAt.unique hâ hâ
protected theorem HasLineDerivAt.lineDeriv (h : HasLineDerivAt ð f f' x v) :
lineDeriv ð f x v = f' := by
rw [h.unique h.lineDifferentiableAt.hasLineDerivAt]
theorem lineDifferentiableWithinAt_univ :
LineDifferentiableWithinAt ð f univ x v â LineDifferentiableAt ð f x v := by
simp only [LineDifferentiableWithinAt, LineDifferentiableAt, preimage_univ,
differentiableWithinAt_univ]
theorem LineDifferentiableAt.lineDifferentiableWithinAt (h : LineDifferentiableAt ð f x v) :
LineDifferentiableWithinAt ð f s x v :=
(differentiableWithinAt_univ.2 h).mono (subset_univ _)
@[simp]
theorem lineDerivWithin_univ : lineDerivWithin ð f univ x v = lineDeriv ð f x v := by
simp [lineDerivWithin, lineDeriv]
theorem LineDifferentiableWithinAt.mono (h : LineDifferentiableWithinAt ð f t x v) (st : s â t) :
LineDifferentiableWithinAt ð f s x v :=
(h.hasLineDerivWithinAt.mono st).lineDifferentiableWithinAt
theorem HasLineDerivWithinAt.congr_mono (h : HasLineDerivWithinAt ð f f' s x v) (ht : EqOn fâ f t)
(hx : fâ x = f x) (hâ : t â s) : HasLineDerivWithinAt ð fâ f' t x v :=
HasDerivWithinAt.congr_mono h (fun y hy ⊠ht hy) (by simpa using hx) (preimage_mono hâ)
theorem HasLineDerivWithinAt.congr (h : HasLineDerivWithinAt ð f f' s x v) (hs : EqOn fâ f s)
(hx : fâ x = f x) : HasLineDerivWithinAt ð fâ f' s x v :=
h.congr_mono hs hx (Subset.refl _)
theorem HasLineDerivWithinAt.congr' (h : HasLineDerivWithinAt ð f f' s x v)
(hs : EqOn fâ f s) (hx : x â s) :
HasLineDerivWithinAt ð fâ f' s x v :=
h.congr hs (hs hx)
theorem LineDifferentiableWithinAt.congr_mono (h : LineDifferentiableWithinAt ð f s x v)
(ht : EqOn fâ f t) (hx : fâ x = f x) (hâ : t â s) :
LineDifferentiableWithinAt ð fâ t x v :=
(HasLineDerivWithinAt.congr_mono h.hasLineDerivWithinAt ht hx hâ).differentiableWithinAt
theorem LineDifferentiableWithinAt.congr (h : LineDifferentiableWithinAt ð f s x v)
(ht : â x â s, fâ x = f x) (hx : fâ x = f x) :
LineDifferentiableWithinAt ð fâ s x v :=
LineDifferentiableWithinAt.congr_mono h ht hx (Subset.refl _)
theorem lineDerivWithin_congr (hs : EqOn fâ f s) (hx : fâ x = f x) :
lineDerivWithin ð fâ s x v = lineDerivWithin ð f s x v :=
derivWithin_congr (fun y hy ⊠hs hy) (by simpa using hx)
theorem lineDerivWithin_congr' (hs : EqOn fâ f s) (hx : x â s) :
lineDerivWithin ð fâ s x v = lineDerivWithin ð f s x v :=
lineDerivWithin_congr hs (hs hx)
theorem hasLineDerivAt_iff_tendsto_slope_zero :
HasLineDerivAt ð f f' x v â
Tendsto (fun (t : ð) ⊠tâ»Â¹ ⢠(f (x + t ⢠v) - f x)) (ð[â ] 0) (ð f') := by
simp only [HasLineDerivAt, hasDerivAt_iff_tendsto_slope_zero, zero_add,
zero_smul, add_zero]
alias âšHasLineDerivAt.tendsto_slope_zero, _â© := hasLineDerivAt_iff_tendsto_slope_zero
theorem HasLineDerivAt.tendsto_slope_zero_right [PartialOrder ð] (h : HasLineDerivAt ð f f' x v) :
Tendsto (fun (t : ð) ⊠tâ»Â¹ ⢠(f (x + t ⢠v) - f x)) (ð[>] 0) (ð f') :=
h.tendsto_slope_zero.mono_left (nhds_right'_le_nhds_ne 0)
theorem HasLineDerivAt.tendsto_slope_zero_left [PartialOrder ð] (h : HasLineDerivAt ð f f' x v) :
Tendsto (fun (t : ð) ⊠tâ»Â¹ ⢠(f (x + t ⢠v) - f x)) (ð[<] 0) (ð f') :=
h.tendsto_slope_zero.mono_left (nhds_left'_le_nhds_ne 0)
theorem HasLineDerivWithinAt.hasLineDerivAt'
(h : HasLineDerivWithinAt ð f f' s x v) (hs : âá¶ t : ð in ð 0, x + t ⢠v â s) :
HasLineDerivAt ð f f' x v :=
h.hasDerivAt hs
end Module
section NormedSpace
/-!
Results that need a normed space structure on `E`
-/
variable {E : Type*} [NormedAddCommGroup E] [NormedSpace ð E]
{f fâ fâ : E â F} {f' : F} {s t : Set E} {x v : E} {L : E âL[ð] F}
theorem HasLineDerivWithinAt.mono_of_mem
(h : HasLineDerivWithinAt ð f f' t x v) (hst : t â ð[s] x) :
HasLineDerivWithinAt ð f f' s x v := by
apply HasDerivWithinAt.mono_of_mem h
apply ContinuousWithinAt.preimage_mem_nhdsWithin'' _ hst (by simp)
apply Continuous.continuousWithinAt; fun_prop
theorem HasLineDerivWithinAt.hasLineDerivAt
(h : HasLineDerivWithinAt ð f f' s x v) (hs : s â ð x) :
HasLineDerivAt ð f f' x v :=
h.hasLineDerivAt' <| (Continuous.tendsto' (by fun_prop) 0 _ (by simp)).eventually hs
theorem LineDifferentiableWithinAt.lineDifferentiableAt (h : LineDifferentiableWithinAt ð f s x v)
(hs : s â ð x) : LineDifferentiableAt ð f x v :=
(h.hasLineDerivWithinAt.hasLineDerivAt hs).lineDifferentiableAt
lemma HasFDerivWithinAt.hasLineDerivWithinAt (hf : HasFDerivWithinAt f L s x) (v : E) :
HasLineDerivWithinAt ð f (L v) s x v := by
let F := fun (t : ð) ⊠x + t ⢠v
rw [show x = F (0 : ð) by simp [F]] at hf
have A : HasDerivWithinAt F (0 + (1 : ð) ⢠v) (F â»Â¹' s) 0 :=
((hasDerivAt_const (0 : ð) x).add ((hasDerivAt_id' (0 : ð)).smul_const v)).hasDerivWithinAt
simp only [one_smul, zero_add] at A
exact hf.comp_hasDerivWithinAt (x := (0 : ð)) A (mapsTo_preimage F s)
lemma HasFDerivAt.hasLineDerivAt (hf : HasFDerivAt f L x) (v : E) :
HasLineDerivAt ð f (L v) x v := by
rw [â hasLineDerivWithinAt_univ]
exact hf.hasFDerivWithinAt.hasLineDerivWithinAt v
lemma DifferentiableAt.lineDeriv_eq_fderiv (hf : DifferentiableAt ð f x) :
lineDeriv ð f x v = fderiv ð f x v :=
(hf.hasFDerivAt.hasLineDerivAt v).lineDeriv
theorem LineDifferentiableWithinAt.mono_of_mem (h : LineDifferentiableWithinAt ð f s x v)
(hst : s â ð[t] x) : LineDifferentiableWithinAt ð f t x v :=
(h.hasLineDerivWithinAt.mono_of_mem hst).lineDifferentiableWithinAt
theorem lineDerivWithin_of_mem_nhds (h : s â ð x) :
lineDerivWithin ð f s x v = lineDeriv ð f x v := by
apply derivWithin_of_mem_nhds
apply (Continuous.continuousAt _).preimage_mem_nhds (by simpa using h)
fun_prop
theorem lineDerivWithin_of_isOpen (hs : IsOpen s) (hx : x â s) :
lineDerivWithin ð f s x v = lineDeriv ð f x v :=
lineDerivWithin_of_mem_nhds (hs.mem_nhds hx)
theorem hasLineDerivWithinAt_congr_set (h : s =á¶ [ð x] t) :
HasLineDerivWithinAt ð f f' s x v â HasLineDerivWithinAt ð f f' t x v := by
apply hasDerivWithinAt_congr_set
let F := fun (t : ð) ⊠x + t ⢠v
have B : ContinuousAt F 0 := by apply Continuous.continuousAt; fun_prop
have : s =á¶ [ð (F 0)] t := by convert h; simp [F]
exact B.preimage_mem_nhds this
theorem lineDifferentiableWithinAt_congr_set (h : s =á¶ [ð x] t) :
LineDifferentiableWithinAt ð f s x v â LineDifferentiableWithinAt ð f t x v :=
âšfun h' ⊠((hasLineDerivWithinAt_congr_set h).1
h'.hasLineDerivWithinAt).lineDifferentiableWithinAt,
fun h' ⊠((hasLineDerivWithinAt_congr_set h.symm).1
h'.hasLineDerivWithinAt).lineDifferentiableWithinAtâ©
theorem lineDerivWithin_congr_set (h : s =á¶ [ð x] t) :
lineDerivWithin ð f s x v = lineDerivWithin ð f t x v := by
apply derivWithin_congr_set
let F := fun (t : ð) ⊠x + t ⢠v
have B : ContinuousAt F 0 := by apply Continuous.continuousAt; fun_prop
have : s =á¶ [ð (F 0)] t := by convert h; simp [F]
exact B.preimage_mem_nhds this
theorem Filter.EventuallyEq.hasLineDerivAt_iff (h : fâ =á¶ [ð x] fâ) :
HasLineDerivAt ð fâ f' x v â HasLineDerivAt ð fâ f' x v := by
apply hasDerivAt_iff
let F := fun (t : ð) ⊠x + t ⢠v
have B : ContinuousAt F 0 := by apply Continuous.continuousAt; fun_prop
have : fâ =á¶ [ð (F 0)] fâ := by convert h; simp [F]
exact B.preimage_mem_nhds this
theorem Filter.EventuallyEq.lineDifferentiableAt_iff (h : fâ =á¶ [ð x] fâ) :
LineDifferentiableAt ð fâ x v â LineDifferentiableAt ð fâ x v :=
âšfun h' ⊠(h.hasLineDerivAt_iff.1 h'.hasLineDerivAt).lineDifferentiableAt,
fun h' ⊠(h.hasLineDerivAt_iff.2 h'.hasLineDerivAt).lineDifferentiableAtâ©
theorem Filter.EventuallyEq.hasLineDerivWithinAt_iff (h : fâ =á¶ [ð[s] x] fâ) (hx : fâ x = fâ x) :
HasLineDerivWithinAt ð fâ f' s x v â HasLineDerivWithinAt ð fâ f' s x v := by
apply hasDerivWithinAt_iff
· have A : Continuous (fun (t : ð) ⊠x + t ⢠v) := by fun_prop
exact A.continuousWithinAt.preimage_mem_nhdsWithin'' h (by simp)
· simpa using hx
theorem Filter.EventuallyEq.hasLineDerivWithinAt_iff_of_mem (h : fâ =á¶ [ð[s] x] fâ) (hx : x â s) :
HasLineDerivWithinAt ð fâ f' s x v â HasLineDerivWithinAt ð fâ f' s x v :=
h.hasLineDerivWithinAt_iff (h.eq_of_nhdsWithin hx)
theorem Filter.EventuallyEq.lineDifferentiableWithinAt_iff
(h : fâ =á¶ [ð[s] x] fâ) (hx : fâ x = fâ x) :
LineDifferentiableWithinAt ð fâ s x v â LineDifferentiableWithinAt ð fâ s x v :=
âšfun h' ⊠((h.hasLineDerivWithinAt_iff hx).1 h'.hasLineDerivWithinAt).lineDifferentiableWithinAt,
fun h' ⊠((h.hasLineDerivWithinAt_iff hx).2 h'.hasLineDerivWithinAt).lineDifferentiableWithinAtâ©
theorem Filter.EventuallyEq.lineDifferentiableWithinAt_iff_of_mem
(h : fâ =á¶ [ð[s] x] fâ) (hx : x â s) :
LineDifferentiableWithinAt ð fâ s x v â LineDifferentiableWithinAt ð fâ s x v :=
h.lineDifferentiableWithinAt_iff (h.eq_of_nhdsWithin hx)
lemma HasLineDerivWithinAt.congr_of_eventuallyEq (hf : HasLineDerivWithinAt ð f f' s x v)
(h'f : fâ =á¶ [ð[s] x] f) (hx : fâ x = f x) : HasLineDerivWithinAt ð fâ f' s x v := by
apply HasDerivWithinAt.congr_of_eventuallyEq hf _ (by simp [hx])
have A : Continuous (fun (t : ð) ⊠x + t ⢠v) := by fun_prop
exact A.continuousWithinAt.preimage_mem_nhdsWithin'' h'f (by simp)
theorem HasLineDerivAt.congr_of_eventuallyEq (h : HasLineDerivAt ð f f' x v) (hâ : fâ =á¶ [ð x] f) :
HasLineDerivAt ð fâ f' x v := by
apply HasDerivAt.congr_of_eventuallyEq h
let F := fun (t : ð) ⊠x + t ⢠v
rw [show x = F 0 by simp [F]] at hâ
exact (Continuous.continuousAt (by fun_prop)).preimage_mem_nhds hâ
theorem LineDifferentiableWithinAt.congr_of_eventuallyEq (h : LineDifferentiableWithinAt ð f s x v)
(hâ : fâ =á¶ [ð[s] x] f) (hx : fâ x = f x) : LineDifferentiableWithinAt ð fâ s x v :=
(h.hasLineDerivWithinAt.congr_of_eventuallyEq hâ hx).differentiableWithinAt
theorem LineDifferentiableAt.congr_of_eventuallyEq
(h : LineDifferentiableAt ð f x v) (hL : fâ =á¶ [ð x] f) :
LineDifferentiableAt ð fâ x v := by
apply DifferentiableAt.congr_of_eventuallyEq h
let F := fun (t : ð) ⊠x + t ⢠v
rw [show x = F 0 by simp [F]] at hL
exact (Continuous.continuousAt (by fun_prop)).preimage_mem_nhds hL
theorem Filter.EventuallyEq.lineDerivWithin_eq (hs : fâ =á¶ [ð[s] x] f) (hx : fâ x = f x) :
lineDerivWithin ð fâ s x v = lineDerivWithin ð f s x v := by
apply derivWithin_eq ?_ (by simpa using hx)
have A : Continuous (fun (t : ð) ⊠x + t ⢠v) := by fun_prop
exact A.continuousWithinAt.preimage_mem_nhdsWithin'' hs (by simp)
theorem Filter.EventuallyEq.lineDerivWithin_eq_nhds (h : fâ =á¶ [ð x] f) :
lineDerivWithin ð fâ s x v = lineDerivWithin ð f s x v :=
(h.filter_mono nhdsWithin_le_nhds).lineDerivWithin_eq h.self_of_nhds
theorem Filter.EventuallyEq.lineDeriv_eq (h : fâ =á¶ [ð x] f) :
lineDeriv ð fâ x v = lineDeriv ð f x v := by
rw [â lineDerivWithin_univ, â lineDerivWithin_univ, h.lineDerivWithin_eq_nhds]
/-- Converse to the mean value inequality: if `f` is line differentiable at `xâ` and `C`-lipschitz
on a neighborhood of `xâ` then its line derivative at `xâ` in the direction `v` has norm
bounded by `C * âvâ`. This version only assumes that `âf x - f xââ †C * âx - xââ` in a
neighborhood of `x`. -/
theorem HasLineDerivAt.le_of_lip' {f : E â F} {f' : F} {xâ : E} (hf : HasLineDerivAt ð f f' xâ v)
{C : â} (hCâ : 0 †C) (hlip : âá¶ x in ð xâ, âf x - f xââ †C * âx - xââ) :
âf'â †C * âvâ := by
apply HasDerivAt.le_of_lip' hf (by positivity)
have A : Continuous (fun (t : ð) ⊠xâ + t ⢠v) := by fun_prop
have : âá¶ x in ð (xâ + (0 : ð) ⢠v), âf x - f xââ †C * âx - xââ := by simpa using hlip
filter_upwards [(A.continuousAt (x := 0)).preimage_mem_nhds this] with t ht
simp only [preimage_setOf_eq, add_sub_cancel_left, norm_smul, mem_setOf_eq, mul_comm (âtâ)] at ht
simpa [mul_assoc] using ht
/-- Converse to the mean value inequality: if `f` is line differentiable at `xâ` and `C`-lipschitz
on a neighborhood of `xâ` then its line derivative at `xâ` in the direction `v` has norm
bounded by `C * âvâ`. This version only assumes that `âf x - f xââ †C * âx - xââ` in a
neighborhood of `x`. -/
theorem HasLineDerivAt.le_of_lipschitzOn
{f : E â F} {f' : F} {xâ : E} (hf : HasLineDerivAt ð f f' xâ v)
{s : Set E} (hs : s â ð xâ) {C : ââ¥0} (hlip : LipschitzOnWith C f s) :
âf'â †C * âvâ := by
refine hf.le_of_lip' C.coe_nonneg ?_
filter_upwards [hs] with x hx using hlip.norm_sub_le hx (mem_of_mem_nhds hs)
/-- Converse to the mean value inequality: if `f` is line differentiable at `xâ` and `C`-lipschitz
then its line derivative at `xâ` in the direction `v` has norm bounded by `C * âvâ`. -/
theorem HasLineDerivAt.le_of_lipschitz
{f : E â F} {f' : F} {xâ : E} (hf : HasLineDerivAt ð f f' xâ v)
{C : ââ¥0} (hlip : LipschitzWith C f) : âf'â †C * âvâ :=
hf.le_of_lipschitzOn univ_mem (lipschitzOnWith_univ.2 hlip)
variable (ð)
/-- Converse to the mean value inequality: if `f` is `C`-lipschitz
on a neighborhood of `xâ` then its line derivative at `xâ` in the direction `v` has norm
bounded by `C * âvâ`. This version only assumes that `âf x - f xââ †C * âx - xââ` in a
neighborhood of `x`.
Version using `lineDeriv`. -/
theorem norm_lineDeriv_le_of_lip' {f : E â F} {xâ : E}
{C : â} (hCâ : 0 †C) (hlip : âá¶ x in ð xâ, âf x - f xââ †C * âx - xââ) :
âlineDeriv ð f xâ vâ †C * âvâ := by
apply norm_deriv_le_of_lip' (by positivity)
have A : Continuous (fun (t : ð) ⊠xâ + t ⢠v) := by fun_prop
have : âá¶ x in ð (xâ + (0 : ð) ⢠v), âf x - f xââ †C * âx - xââ := by simpa using hlip
filter_upwards [(A.continuousAt (x := 0)).preimage_mem_nhds this] with t ht
simp only [preimage_setOf_eq, add_sub_cancel_left, norm_smul, mem_setOf_eq, mul_comm (âtâ)] at ht
simpa [mul_assoc] using ht
/-- Converse to the mean value inequality: if `f` is `C`-lipschitz on a neighborhood of `xâ`
then its line derivative at `xâ` in the direction `v` has norm bounded by `C * âvâ`.
Version using `lineDeriv`. -/
theorem norm_lineDeriv_le_of_lipschitzOn {f : E â F} {xâ : E} {s : Set E} (hs : s â ð xâ)
{C : ââ¥0} (hlip : LipschitzOnWith C f s) : âlineDeriv ð f xâ vâ †C * âvâ := by
refine norm_lineDeriv_le_of_lip' ð C.coe_nonneg ?_
filter_upwards [hs] with x hx using hlip.norm_sub_le hx (mem_of_mem_nhds hs)
/-- Converse to the mean value inequality: if `f` is `C`-lipschitz then
its line derivative at `xâ` in the direction `v` has norm bounded by `C * âvâ`.
Version using `lineDeriv`. -/
theorem norm_lineDeriv_le_of_lipschitz {f : E â F} {xâ : E}
{C : ââ¥0} (hlip : LipschitzWith C f) : âlineDeriv ð f xâ vâ †C * âvâ :=
norm_lineDeriv_le_of_lipschitzOn ð univ_mem (lipschitzOnWith_univ.2 hlip)
variable {ð}
end NormedSpace
section Zero
variable {E : Type*} [AddCommGroup E] [Module ð E] {f : E â F} {s : Set E} {x : E}
theorem hasLineDerivWithinAt_zero : HasLineDerivWithinAt ð f 0 s x 0 := by
simp [HasLineDerivWithinAt, hasDerivWithinAt_const]
theorem hasLineDerivAt_zero : HasLineDerivAt ð f 0 x 0 := by
simp [HasLineDerivAt, hasDerivAt_const]
theorem lineDifferentiableWithinAt_zero : LineDifferentiableWithinAt ð f s x 0 :=
hasLineDerivWithinAt_zero.lineDifferentiableWithinAt
theorem lineDifferentiableAt_zero : LineDifferentiableAt ð f x 0 :=
hasLineDerivAt_zero.lineDifferentiableAt
theorem lineDeriv_zero : lineDeriv ð f x 0 = 0 :=
hasLineDerivAt_zero.lineDeriv
end Zero
section CompRight
variable {E : Type*} [AddCommGroup E] [Module ð E]
{E' : Type*} [AddCommGroup E'] [Module ð E']
{f : E â F} {f' : F} {x v : E'} {L : E' ââ[ð] E}
theorem HasLineDerivAt.of_comp {v : E'} (hf : HasLineDerivAt ð (f â L) f' x v) :
HasLineDerivAt ð f f' (L x) (L v) := by
simpa [HasLineDerivAt] using hf
theorem LineDifferentiableAt.of_comp {v : E'} (hf : LineDifferentiableAt ð (f â L) x v) :
LineDifferentiableAt ð f (L x) (L v) :=
hf.hasLineDerivAt.of_comp.lineDifferentiableAt
end CompRight
section SMul
variable {E : Type*} [AddCommGroup E] [Module ð E] {f : E â F} {s : Set E} {x v : E} {f' : F}
theorem HasLineDerivWithinAt.smul (h : HasLineDerivWithinAt ð f f' s x v) (c : ð) :
HasLineDerivWithinAt ð f (c ⢠f') s x (c ⢠v) := by
simp only [HasLineDerivWithinAt] at h â¢
let g := fun (t : ð) ⊠c ⢠t
let s' := (fun (t : ð) ⊠x + t ⢠v) â»Â¹' s
have A : HasDerivAt g c 0 := by simpa using (hasDerivAt_id (0 : ð)).const_smul c
have B : HasDerivWithinAt (fun t ⊠f (x + t ⢠v)) f' s' (g 0) := by simpa [g] using h
have Z := B.scomp (0 : ð) A.hasDerivWithinAt (mapsTo_preimage g s')
simp only [g, s', Function.comp, smul_eq_mul, mul_comm c, â smul_smul] at Z
convert Z
ext t
simp [â smul_smul]
theorem hasLineDerivWithinAt_smul_iff {c : ð} (hc : c â 0) :
HasLineDerivWithinAt ð f (c ⢠f') s x (c ⢠v) â HasLineDerivWithinAt ð f f' s x v :=
âšfun h ⊠by simpa [smul_smul, inv_mul_cancel hc] using h.smul (c â»Â¹), fun h ⊠h.smul câ©
theorem HasLineDerivAt.smul (h : HasLineDerivAt ð f f' x v) (c : ð) :
HasLineDerivAt ð f (c ⢠f') x (c ⢠v) := by
simp only [â hasLineDerivWithinAt_univ] at h â¢
exact HasLineDerivWithinAt.smul h c
theorem hasLineDerivAt_smul_iff {c : ð} (hc : c â 0) :
HasLineDerivAt ð f (c ⢠f') x (c ⢠v) â HasLineDerivAt ð f f' x v :=
âšfun h ⊠by simpa [smul_smul, inv_mul_cancel hc] using h.smul (c â»Â¹), fun h ⊠h.smul câ©
theorem LineDifferentiableWithinAt.smul (h : LineDifferentiableWithinAt ð f s x v) (c : ð) :
LineDifferentiableWithinAt ð f s x (c ⢠v) :=
(h.hasLineDerivWithinAt.smul c).lineDifferentiableWithinAt
theorem lineDifferentiableWithinAt_smul_iff {c : ð} (hc : c â 0) :
LineDifferentiableWithinAt ð f s x (c ⢠v) â LineDifferentiableWithinAt ð f s x v :=
âšfun h ⊠by simpa [smul_smul, inv_mul_cancel hc] using h.smul (c â»Â¹), fun h ⊠h.smul câ©
theorem LineDifferentiableAt.smul (h : LineDifferentiableAt ð f x v) (c : ð) :
LineDifferentiableAt ð f x (c ⢠v) :=
(h.hasLineDerivAt.smul c).lineDifferentiableAt
theorem lineDifferentiableAt_smul_iff {c : ð} (hc : c â 0) :
LineDifferentiableAt ð f x (c ⢠v) â LineDifferentiableAt ð f x v :=
âšfun h ⊠by simpa [smul_smul, inv_mul_cancel hc] using h.smul (c â»Â¹), fun h ⊠h.smul câ©
theorem lineDeriv_smul {c : ð} : lineDeriv ð f x (c ⢠v) = c ⢠lineDeriv ð f x v := by
rcases eq_or_ne c 0 with rfl|hc
· simp [lineDeriv_zero]
by_cases H : LineDifferentiableAt ð f x v
· exact (H.hasLineDerivAt.smul c).lineDeriv
· have H' : ¬ (LineDifferentiableAt ð f x (c ⢠v)) := by
simpa [lineDifferentiableAt_smul_iff hc] using H
simp [lineDeriv_zero_of_not_lineDifferentiableAt, H, H']
theorem lineDeriv_neg : lineDeriv ð f x (-v) = - lineDeriv ð f x v := by
rw [â neg_one_smul (R := ð) v, lineDeriv_smul, neg_one_smul]
end SMul
|
Analysis\Calculus\LineDeriv\IntegrationByParts.lean | /-
Copyright (c) 2024 Sébastien Gouëzel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Sébastien Gouëzel
-/
import Mathlib.Analysis.Calculus.LineDeriv.Basic
import Mathlib.MeasureTheory.Integral.IntegralEqImproper
/-!
# Integration by parts for line derivatives
Let `f, g : E â â` be two differentiable functions on a real vector space endowed with a Haar
measure. Then `â« f * g' = - â« f' * g`, where `f'` and `g'` denote the derivatives of `f` and `g`
in a given direction `v`, provided that `f * g`, `f' * g` and `f * g'` are all integrable.
In this file, we prove this theorem as well as more general versions where the multiplication is
replaced by a general continuous bilinear form, giving versions both for the line derivative and
the Fréchet derivative. These results are derived from the one-dimensional version and a Fubini
argument.
## Main statements
* `integral_bilinear_hasLineDerivAt_right_eq_neg_left_of_integrable`: integration by parts
in terms of line derivatives, with `HasLineDerivAt` assumptions and general bilinear form.
* `integral_bilinear_hasFDerivAt_right_eq_neg_left_of_integrable`: integration by parts
in terms of Fréchet derivatives, with `HasFDerivAt` assumptions and general bilinear form.
* `integral_bilinear_fderiv_right_eq_neg_left_of_integrable`: integration by parts
in terms of Fréchet derivatives, written with `fderiv` assumptions and general bilinear form.
* `integral_smul_fderiv_eq_neg_fderiv_smul_of_integrable`: integration by parts for scalar
action, in terms of Fréchet derivatives, written with `fderiv` assumptions.
* `integral_mul_fderiv_eq_neg_fderiv_mul_of_integrable`: integration by parts for scalar
multiplication, in terms of Fréchet derivatives, written with `fderiv` assumptions.
## Implementation notes
A standard set of assumptions for integration by parts in a finite-dimensional real vector
space (without boundary term) is that the functions tend to zero at infinity and have integrable
derivatives. In this file, we instead assume that the functions are integrable and have integrable
derivatives. These sets of assumptions are not directly comparable (an integrable function with
integrable derivative does *not* have to tend to zero at infinity). The one we use is geared
towards applications to Fourier transforms.
TODO: prove similar theorems assuming that the functions tend to zero at infinity and have
integrable derivatives.
-/
open MeasureTheory Measure FiniteDimensional
variable {E F G W : Type*} [NormedAddCommGroup E] [NormedSpace â E] [NormedAddCommGroup F]
[NormedSpace â F] [NormedAddCommGroup G] [NormedSpace â G] [NormedAddCommGroup W]
[NormedSpace â W] [MeasurableSpace E] [BorelSpace E] {ÎŒ : Measure E}
lemma integral_bilinear_hasLineDerivAt_right_eq_neg_left_of_integrable_aux1 [SigmaFinite Ό]
{f f' : E Ã â â F} {g g' : E Ã â â G} {B : F âL[â] G âL[â] W}
(hf'g : Integrable (fun x ⊠B (f' x) (g x)) (Ό.prod volume))
(hfg' : Integrable (fun x ⊠B (f x) (g' x)) (Ό.prod volume))
(hfg : Integrable (fun x ⊠B (f x) (g x)) (Ό.prod volume))
(hf : â x, HasLineDerivAt â f (f' x) x (0, 1)) (hg : â x, HasLineDerivAt â g (g' x) x (0, 1)) :
â« x, B (f x) (g' x) â(ÎŒ.prod volume) = - â« x, B (f' x) (g x) â(ÎŒ.prod volume) := calc
â« x, B (f x) (g' x) â(ÎŒ.prod volume)
= â« x, (â« t, B (f (x, t)) (g' (x, t))) âÎŒ := integral_prod _ hfg'
_ = â« x, (- â« t, B (f' (x, t)) (g (x, t))) âÎŒ := by
apply integral_congr_ae
filter_upwards [hf'g.prod_right_ae, hfg'.prod_right_ae, hfg.prod_right_ae]
with x hf'gx hfg'x hfgx
apply integral_bilinear_hasDerivAt_right_eq_neg_left_of_integrable ?_ ?_ hfg'x hf'gx hfgx
· intro t
convert (hf (x, t)).scomp_of_eq t ((hasDerivAt_id t).add (hasDerivAt_const t (-t))) (by simp)
<;> simp
· intro t
convert (hg (x, t)).scomp_of_eq t ((hasDerivAt_id t).add (hasDerivAt_const t (-t))) (by simp)
<;> simp
_ = - â« x, B (f' x) (g x) â(ÎŒ.prod volume) := by rw [integral_neg, integral_prod _ hf'g]
lemma integral_bilinear_hasLineDerivAt_right_eq_neg_left_of_integrable_aux2
[FiniteDimensional â E] {ÎŒ : Measure (E à â)} [IsAddHaarMeasure ÎŒ]
{f f' : E Ã â â F} {g g' : E Ã â â G} {B : F âL[â] G âL[â] W}
(hf'g : Integrable (fun x ⊠B (f' x) (g x)) Ό)
(hfg' : Integrable (fun x ⊠B (f x) (g' x)) Ό)
(hfg : Integrable (fun x ⊠B (f x) (g x)) Ό)
(hf : â x, HasLineDerivAt â f (f' x) x (0, 1)) (hg : â x, HasLineDerivAt â g (g' x) x (0, 1)) :
â« x, B (f x) (g' x) âÎŒ = - â« x, B (f' x) (g x) âÎŒ := by
let Μ : Measure E := addHaar
have A : Μ.prod volume = (addHaarScalarFactor (Μ.prod volume) Ό) ⢠Ό :=
isAddLeftInvariant_eq_smul _ _
have Hf'g : Integrable (fun x ⊠B (f' x) (g x)) (Μ.prod volume) := by
rw [A]; exact hf'g.smul_measure_nnreal
have Hfg' : Integrable (fun x ⊠B (f x) (g' x)) (Μ.prod volume) := by
rw [A]; exact hfg'.smul_measure_nnreal
have Hfg : Integrable (fun x ⊠B (f x) (g x)) (Μ.prod volume) := by
rw [A]; exact hfg.smul_measure_nnreal
rw [isAddLeftInvariant_eq_smul Ό (Μ.prod volume)]
simp [integral_bilinear_hasLineDerivAt_right_eq_neg_left_of_integrable_aux1 Hf'g Hfg' Hfg hf hg]
variable [FiniteDimensional â E] [IsAddHaarMeasure ÎŒ]
/-- **Integration by parts for line derivatives**
Version with a general bilinear form `B`.
If `B f g` is integrable, as well as `B f' g` and `B f g'` where `f'` and `g'` are derivatives
of `f` and `g` in a given direction `v`, then `â« B f g' = - â« B f' g`. -/
theorem integral_bilinear_hasLineDerivAt_right_eq_neg_left_of_integrable
{f f' : E â F} {g g' : E â G} {v : E} {B : F âL[â] G âL[â] W}
(hf'g : Integrable (fun x ⊠B (f' x) (g x)) Ό) (hfg' : Integrable (fun x ⊠B (f x) (g' x)) Ό)
(hfg : Integrable (fun x ⊠B (f x) (g x)) Ό)
(hf : â x, HasLineDerivAt â f (f' x) x v) (hg : â x, HasLineDerivAt â g (g' x) x v) :
â« x, B (f x) (g' x) âÎŒ = - â« x, B (f' x) (g x) âÎŒ := by
by_cases hW : CompleteSpace W; swap
· simp [integral, hW]
rcases eq_or_ne v 0 with rfl|hv
· have Hf' x : f' x = 0 := by
simpa [(hasLineDerivAt_zero (f := f) (x := x)).lineDeriv] using (hf x).lineDeriv.symm
have Hg' x : g' x = 0 := by
simpa [(hasLineDerivAt_zero (f := g) (x := x)).lineDeriv] using (hg x).lineDeriv.symm
simp [Hf', Hg']
have : Nontrivial E := nontrivial_iff.2 âšv, 0, hvâ©
let n := finrank â E
let E' := Fin (n - 1) â â
obtain âšL, hLâ© : â L : E âL[â] (E' Ã â), L v = (0, 1) := by
have : finrank â (E' Ã â) = n := by simpa [this, E'] using Nat.sub_add_cancel finrank_pos
have Lâ : E âL[â] (E' Ã â) := (ContinuousLinearEquiv.ofFinrankEq this).symm
obtain âšM, hMâ© : â M : (E' Ã â) âL[â] (E' Ã â), M (Lâ v) = (0, 1) := by
apply SeparatingDual.exists_continuousLinearEquiv_apply_eq
· simpa using hv
· simp
exact âšLâ.trans M, by simp [hM]â©
let Μ := Measure.map L Ό
suffices H : â« (x : E' à â), (B (f (L.symm x))) (g' (L.symm x)) âΜ =
-â« (x : E' à â), (B (f' (L.symm x))) (g (L.symm x)) âΜ by
have : Ό = Measure.map L.symm Μ := by
simp [Measure.map_map L.symm.continuous.measurable L.continuous.measurable]
have hL : ClosedEmbedding L.symm := L.symm.toHomeomorph.closedEmbedding
simpa [this, hL.integral_map] using H
have L_emb : MeasurableEmbedding L := L.toHomeomorph.measurableEmbedding
apply integral_bilinear_hasLineDerivAt_right_eq_neg_left_of_integrable_aux2
· simpa [L_emb.integrable_map_iff, Function.comp] using hf'g
· simpa [L_emb.integrable_map_iff, Function.comp] using hfg'
· simpa [L_emb.integrable_map_iff, Function.comp] using hfg
· intro x
have : f = (f â L.symm) â (L : E ââ[â] (E' Ã â)) := by ext y; simp
specialize hf (L.symm x)
rw [this] at hf
convert hf.of_comp using 1
· simp
· simp [â hL]
· intro x
have : g = (g â L.symm) â (L : E ââ[â] (E' Ã â)) := by ext y; simp
specialize hg (L.symm x)
rw [this] at hg
convert hg.of_comp using 1
· simp
· simp [â hL]
/-- **Integration by parts for Fréchet derivatives**
Version with a general bilinear form `B`.
If `B f g` is integrable, as well as `B f' g` and `B f g'` where `f'` and `g'` are derivatives
of `f` and `g` in a given direction `v`, then `â« B f g' = - â« B f' g`. -/
theorem integral_bilinear_hasFDerivAt_right_eq_neg_left_of_integrable
{f : E â F} {f' : E â (E âL[â] F)}
{g : E â G} {g' : E â (E âL[â] G)} {v : E} {B : F âL[â] G âL[â] W}
(hf'g : Integrable (fun x ⊠B (f' x v) (g x)) Ό)
(hfg' : Integrable (fun x ⊠B (f x) (g' x v)) Ό)
(hfg : Integrable (fun x ⊠B (f x) (g x)) Ό)
(hf : â x, HasFDerivAt f (f' x) x) (hg : â x, HasFDerivAt g (g' x) x) :
â« x, B (f x) (g' x v) âÎŒ = - â« x, B (f' x v) (g x) âÎŒ :=
integral_bilinear_hasLineDerivAt_right_eq_neg_left_of_integrable hf'g hfg' hfg
(fun x ⊠(hf x).hasLineDerivAt v) (fun x ⊠(hg x).hasLineDerivAt v)
/-- **Integration by parts for Fréchet derivatives**
Version with a general bilinear form `B`.
If `B f g` is integrable, as well as `B f' g` and `B f g'` where `f'` and `g'` are the derivatives
of `f` and `g` in a given direction `v`, then `â« B f g' = - â« B f' g`. -/
theorem integral_bilinear_fderiv_right_eq_neg_left_of_integrable
{f : E â F} {g : E â G} {v : E} {B : F âL[â] G âL[â] W}
(hf'g : Integrable (fun x ⊠B (fderiv â f x v) (g x)) ÎŒ)
(hfg' : Integrable (fun x ⊠B (f x) (fderiv â g x v)) ÎŒ)
(hfg : Integrable (fun x ⊠B (f x) (g x)) Ό)
(hf : Differentiable â f) (hg : Differentiable â g) :
â« x, B (f x) (fderiv â g x v) âÎŒ = - â« x, B (fderiv â f x v) (g x) âÎŒ :=
integral_bilinear_hasFDerivAt_right_eq_neg_left_of_integrable hf'g hfg' hfg
(fun x ⊠(hf x).hasFDerivAt) (fun x ⊠(hg x).hasFDerivAt)
variable {ð : Type*} [NormedField ð] [NormedAlgebra â ð]
[NormedSpace ð G] [IsScalarTower â ð G]
/-- **Integration by parts for Fréchet derivatives**
Version with a scalar function: `⫠f ⢠g' = - ⫠f' ⢠g` when `f ⢠g'` and `f' ⢠g` and `f ⢠g`
are integrable, where `f'` and `g'` are the derivatives of `f` and `g` in a given direction `v`. -/
theorem integral_smul_fderiv_eq_neg_fderiv_smul_of_integrable
{f : E â ð} {g : E â G} {v : E}
(hf'g : Integrable (fun x ⊠fderiv â f x v ⢠g x) ÎŒ)
(hfg' : Integrable (fun x ⊠f x ⢠fderiv â g x v) ÎŒ)
(hfg : Integrable (fun x ⊠f x ⢠g x) Ό)
(hf : Differentiable â f) (hg : Differentiable â g) :
â« x, f x ⢠fderiv â g x v âÎŒ = - â« x, fderiv â f x v ⢠g x âÎŒ :=
integral_bilinear_fderiv_right_eq_neg_left_of_integrable
(B := ContinuousLinearMap.lsmul â ð) hf'g hfg' hfg hf hg
/-- **Integration by parts for Fréchet derivatives**
Version with two scalar functions: `â« f * g' = - â« f' * g` when `f * g'` and `f' * g` and `f * g`
are integrable, where `f'` and `g'` are the derivatives of `f` and `g` in a given direction `v`. -/
theorem integral_mul_fderiv_eq_neg_fderiv_mul_of_integrable
{f : E â ð} {g : E â ð} {v : E}
(hf'g : Integrable (fun x ⊠fderiv â f x v * g x) ÎŒ)
(hfg' : Integrable (fun x ⊠f x * fderiv â g x v) ÎŒ)
(hfg : Integrable (fun x ⊠f x * g x) Ό)
(hf : Differentiable â f) (hg : Differentiable â g) :
â« x, f x * fderiv â g x v âÎŒ = - â« x, fderiv â f x v * g x âÎŒ :=
integral_bilinear_fderiv_right_eq_neg_left_of_integrable
(B := ContinuousLinearMap.mul â ð) hf'g hfg' hfg hf hg
|
Analysis\Calculus\LineDeriv\Measurable.lean | /-
Copyright (c) 2023 Sébastien Gouëzel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Sébastien Gouëzel
-/
import Mathlib.Analysis.Calculus.LineDeriv.Basic
import Mathlib.Analysis.Calculus.FDeriv.Measurable
/-! # Measurability of the line derivative
We prove in `measurable_lineDeriv` that the line derivative of a function (with respect to a
locally compact scalar field) is measurable, provided the function is continuous.
In `measurable_lineDeriv_uncurry`, assuming additionally that the source space is second countable,
we show that `(x, v) ⊠lineDeriv ð f x v` is also measurable.
An assumption such as continuity is necessary, as otherwise one could alternate in a non-measurable
way between differentiable and non-differentiable functions along the various lines
directed by `v`.
-/
open MeasureTheory
variable {ð : Type*} [NontriviallyNormedField ð] [LocallyCompactSpace ð]
{E : Type*} [NormedAddCommGroup E] [NormedSpace ð E] [MeasurableSpace E] [OpensMeasurableSpace E]
{F : Type*} [NormedAddCommGroup F] [NormedSpace ð F] [CompleteSpace F]
{f : E â F} {v : E}
/-!
Measurability of the line derivative `lineDeriv ð f x v` with respect to a fixed direction `v`.
-/
theorem measurableSet_lineDifferentiableAt (hf : Continuous f) :
MeasurableSet {x : E | LineDifferentiableAt ð f x v} := by
borelize ð
let g : E â ð â F := fun x t ⊠f (x + t ⢠v)
have hg : Continuous g.uncurry := by fun_prop
exact measurable_prod_mk_right (measurableSet_of_differentiableAt_with_param ð hg)
theorem measurable_lineDeriv [MeasurableSpace F] [BorelSpace F]
(hf : Continuous f) : Measurable (fun x ⊠lineDeriv ð f x v) := by
borelize ð
let g : E â ð â F := fun x t ⊠f (x + t ⢠v)
have hg : Continuous g.uncurry := by fun_prop
exact (measurable_deriv_with_param hg).comp measurable_prod_mk_right
theorem stronglyMeasurable_lineDeriv [SecondCountableTopologyEither E F] (hf : Continuous f) :
StronglyMeasurable (fun x ⊠lineDeriv ð f x v) := by
borelize ð
let g : E â ð â F := fun x t ⊠f (x + t ⢠v)
have hg : Continuous g.uncurry := by fun_prop
exact (stronglyMeasurable_deriv_with_param hg).comp_measurable measurable_prod_mk_right
theorem aemeasurable_lineDeriv [MeasurableSpace F] [BorelSpace F]
(hf : Continuous f) (Ό : Measure E) :
AEMeasurable (fun x ⊠lineDeriv ð f x v) ÎŒ :=
(measurable_lineDeriv hf).aemeasurable
theorem aestronglyMeasurable_lineDeriv [SecondCountableTopologyEither E F]
(hf : Continuous f) (Ό : Measure E) :
AEStronglyMeasurable (fun x ⊠lineDeriv ð f x v) ÎŒ :=
(stronglyMeasurable_lineDeriv hf).aestronglyMeasurable
/-!
Measurability of the line derivative `lineDeriv ð f x v` when varying both `x` and `v`. For this,
we need an additional second countability assumption on `E` to make sure that open sets are
measurable in `E Ã E`.
-/
variable [SecondCountableTopology E]
theorem measurableSet_lineDifferentiableAt_uncurry (hf : Continuous f) :
MeasurableSet {p : E Ã E | LineDifferentiableAt ð f p.1 p.2} := by
borelize ð
let g : (E à E) â ð â F := fun p t ⊠f (p.1 + t ⢠p.2)
have : Continuous g.uncurry :=
hf.comp <| (continuous_fst.comp continuous_fst).add
<| continuous_snd.smul (continuous_snd.comp continuous_fst)
have M_meas : MeasurableSet {q : (E Ã E) Ã ð | DifferentiableAt ð (g q.1) q.2} :=
measurableSet_of_differentiableAt_with_param ð this
exact measurable_prod_mk_right M_meas
theorem measurable_lineDeriv_uncurry [MeasurableSpace F] [BorelSpace F]
(hf : Continuous f) : Measurable (fun (p : E à E) ⊠lineDeriv ð f p.1 p.2) := by
borelize ð
let g : (E à E) â ð â F := fun p t ⊠f (p.1 + t ⢠p.2)
have : Continuous g.uncurry :=
hf.comp <| (continuous_fst.comp continuous_fst).add
<| continuous_snd.smul (continuous_snd.comp continuous_fst)
exact (measurable_deriv_with_param this).comp measurable_prod_mk_right
theorem stronglyMeasurable_lineDeriv_uncurry (hf : Continuous f) :
StronglyMeasurable (fun (p : E à E) ⊠lineDeriv ð f p.1 p.2) := by
borelize ð
let g : (E à E) â ð â F := fun p t ⊠f (p.1 + t ⢠p.2)
have : Continuous g.uncurry :=
hf.comp <| (continuous_fst.comp continuous_fst).add
<| continuous_snd.smul (continuous_snd.comp continuous_fst)
exact (stronglyMeasurable_deriv_with_param this).comp_measurable measurable_prod_mk_right
theorem aemeasurable_lineDeriv_uncurry [MeasurableSpace F] [BorelSpace F]
(hf : Continuous f) (Ό : Measure (E à E)) :
AEMeasurable (fun (p : E à E) ⊠lineDeriv ð f p.1 p.2) ÎŒ :=
(measurable_lineDeriv_uncurry hf).aemeasurable
theorem aestronglyMeasurable_lineDeriv_uncurry (hf : Continuous f) (Ό : Measure (E à E)) :
AEStronglyMeasurable (fun (p : E à E) ⊠lineDeriv ð f p.1 p.2) ÎŒ :=
(stronglyMeasurable_lineDeriv_uncurry hf).aestronglyMeasurable
|
Analysis\Calculus\LineDeriv\QuadraticMap.lean | /-
Copyright (c) 2024 Yury Kudryashov. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yury Kudryashov
-/
import Mathlib.Analysis.Calculus.LineDeriv.Basic
import Mathlib.Analysis.Calculus.Deriv.Mul
import Mathlib.LinearAlgebra.QuadraticForm.Basic
/-!
# Quadratic forms are line (Gateaux) differentiable
In this file we prove that a quadratic form is line differentiable,
with the line derivative given by the polar bilinear form.
Note that this statement does not need topology on the domain.
In particular, it applies to discontinuous quadratic forms on infinite dimensional spaces.
-/
variable {ð E F : Type*} [NontriviallyNormedField ð] [AddCommGroup E] [Module ð E]
[NormedAddCommGroup F] [NormedSpace ð F]
namespace QuadraticMap
theorem hasLineDerivAt (f : QuadraticMap ð E F) (a b : E) :
HasLineDerivAt ð f (polar f a b) a b := by
simpa [HasLineDerivAt, QuadraticMap.map_add, f.map_smul] using
((hasDerivAt_const (0 : ð) (f a)).add <|
((hasDerivAt_id 0).mul (hasDerivAt_id 0)).smul (hasDerivAt_const 0 (f b))).add
((hasDerivAt_id 0).smul (hasDerivAt_const 0 (polar f a b)))
theorem lineDifferentiableAt (f : QuadraticMap ð E F) (a b : E) : LineDifferentiableAt ð f a b :=
(f.hasLineDerivAt a b).lineDifferentiableAt
@[simp]
protected theorem lineDeriv (f : QuadraticMap ð E F) : lineDeriv ð f = polar f := by
ext a b
exact (f.hasLineDerivAt a b).lineDeriv
end QuadraticMap
|
Analysis\Calculus\LocalExtr\Basic.lean | /-
Copyright (c) 2019 Yury Kudryashov. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yury Kudryashov
-/
import Mathlib.Analysis.Calculus.Deriv.Add
/-!
# Local extrema of differentiable functions
## Main definitions
In a real normed space `E` we define `posTangentConeAt (s : Set E) (x : E)`.
This would be the same as `tangentConeAt ââ¥0 s x` if we had a theory of normed semifields.
This set is used in the proof of Fermat's Theorem (see below), and can be used to formalize
[Lagrange multipliers](https://en.wikipedia.org/wiki/Lagrange_multiplier) and/or
[KarushâKuhnâTucker conditions](https://en.wikipedia.org/wiki/KarushâKuhnâTucker_conditions).
## Main statements
For each theorem name listed below,
we also prove similar theorems for `min`, `extr` (if applicable),
and `fderiv`/`deriv` instead of `HasFDerivAt`/`HasDerivAt`.
* `IsLocalMaxOn.hasFDerivWithinAt_nonpos` : `f' y †0` whenever `a` is a local maximum
of `f` on `s`, `f` has derivative `f'` at `a` within `s`, and `y` belongs to the positive tangent
cone of `s` at `a`.
* `IsLocalMaxOn.hasFDerivWithinAt_eq_zero` : In the settings of the previous theorem, if both
`y` and `-y` belong to the positive tangent cone, then `f' y = 0`.
* `IsLocalMax.hasFDerivAt_eq_zero` :
[Fermat's Theorem](https://en.wikipedia.org/wiki/Fermat's_theorem_(stationary_points)),
the derivative of a differentiable function at a local extremum point equals zero.
## Implementation notes
For each mathematical fact we prove several versions of its formalization:
* for maxima and minima;
* using `HasFDeriv*`/`HasDeriv*` or `fderiv*`/`deriv*`.
For the `fderiv*`/`deriv*` versions we omit the differentiability condition whenever it is possible
due to the fact that `fderiv` and `deriv` are defined to be zero for non-differentiable functions.
## References
* [Fermat's Theorem](https://en.wikipedia.org/wiki/Fermat's_theorem_(stationary_points));
* [Tangent cone](https://en.wikipedia.org/wiki/Tangent_cone);
## Tags
local extremum, tangent cone, Fermat's Theorem
-/
universe u v
open Filter Set
open scoped Topology Convex
section Module
variable {E : Type u} [NormedAddCommGroup E] [NormedSpace â E]
{f : E â â} {f' : E âL[â] â} {s : Set E} {a x y : E}
/-!
### Positive tangent cone
-/
/-- "Positive" tangent cone to `s` at `x`; the only difference from `tangentConeAt`
is that we require `c n â â` instead of `âc nâ â â`. One can think about `posTangentConeAt`
as `tangentConeAt NNReal` but we have no theory of normed semifields yet. -/
def posTangentConeAt (s : Set E) (x : E) : Set E :=
{ y : E | â (c : â â â) (d : â â E), (âá¶ n in atTop, x + d n â s) â§
Tendsto c atTop atTop â§ Tendsto (fun n => c n ⢠d n) atTop (ð y) }
theorem posTangentConeAt_mono : Monotone fun s => posTangentConeAt s a := by
rintro s t hst y âšc, d, hd, hc, hcdâ©
exact âšc, d, mem_of_superset hd fun h hn => hst hn, hc, hcdâ©
theorem mem_posTangentConeAt_of_frequently_mem (h : âá¶ t : â in ð[>] 0, x + t ⢠y â s) :
y â posTangentConeAt s x := by
obtain âša, ha, hasâ© := Filter.exists_seq_forall_of_frequently h
refine âšaâ»Â¹, (a · ⢠y), eventually_of_forall has, tendsto_inv_zero_atTop.comp ha, ?_â©
refine tendsto_const_nhds.congr' ?_
filter_upwards [(tendsto_nhdsWithin_iff.1 ha).2] with n (hn : 0 < a n)
simp [ne_of_gt hn]
/-- If `[x -[â] x + y] â s`, then `y` belongs to the positive tangnet cone of `s`.
Before 2024-07-13, this lemma used to be called `mem_posTangentConeAt_of_segment_subset`.
See also `sub_mem_posTangentConeAt_of_segment_subset`
for the lemma that used to be called `mem_posTangentConeAt_of_segment_subset`. -/
theorem mem_posTangentConeAt_of_segment_subset (h : [x -[â] x + y] â s) :
y â posTangentConeAt s x := by
refine mem_posTangentConeAt_of_frequently_mem (Eventually.frequently ?_)
rw [eventually_nhdsWithin_iff]
filter_upwards [ge_mem_nhds one_pos] with t htâ htâ
apply h
rw [segment_eq_image', add_sub_cancel_left]
exact mem_image_of_mem _ âšle_of_lt htâ, htââ©
@[deprecated (since := "2024-07-13")] -- cleanup docstrings when we drop this alias
alias mem_posTangentConeAt_of_segment_subset' := mem_posTangentConeAt_of_segment_subset
theorem sub_mem_posTangentConeAt_of_segment_subset (h : segment â x y â s) :
y - x â posTangentConeAt s x :=
mem_posTangentConeAt_of_segment_subset <| by rwa [add_sub_cancel]
@[simp]
theorem posTangentConeAt_univ : posTangentConeAt univ a = univ :=
eq_univ_of_forall fun _ => mem_posTangentConeAt_of_segment_subset (subset_univ _)
/-!
### Fermat's Theorem (vector space)
-/
/-- If `f` has a local max on `s` at `a`, `f'` is the derivative of `f` at `a` within `s`, and
`y` belongs to the positive tangent cone of `s` at `a`, then `f' y †0`. -/
theorem IsLocalMaxOn.hasFDerivWithinAt_nonpos (h : IsLocalMaxOn f s a)
(hf : HasFDerivWithinAt f f' s a) (hy : y â posTangentConeAt s a) : f' y †0 := by
rcases hy with âšc, d, hd, hc, hcdâ©
have hc' : Tendsto (âc ·â) atTop atTop := tendsto_abs_atTop_atTop.comp hc
suffices âá¶ n in atTop, c n ⢠(f (a + d n) - f a) †0 from
le_of_tendsto (hf.lim atTop hd hc' hcd) this
replace hd : Tendsto (fun n => a + d n) atTop (ð[s] (a + 0)) :=
tendsto_nhdsWithin_iff.2 âštendsto_const_nhds.add (tangentConeAt.lim_zero _ hc' hcd), hdâ©
rw [add_zero] at hd
filter_upwards [hd.eventually h, hc.eventually_ge_atTop 0] with n hfn hcn
exact mul_nonpos_of_nonneg_of_nonpos hcn (sub_nonpos.2 hfn)
/-- If `f` has a local max on `s` at `a` and `y` belongs to the positive tangent cone
of `s` at `a`, then `f' y †0`. -/
theorem IsLocalMaxOn.fderivWithin_nonpos (h : IsLocalMaxOn f s a)
(hy : y â posTangentConeAt s a) : (fderivWithin â f s a : E â â) y †0 := by
classical
exact
if hf : DifferentiableWithinAt â f s a then h.hasFDerivWithinAt_nonpos hf.hasFDerivWithinAt hy
else by rw [fderivWithin_zero_of_not_differentiableWithinAt hf]; rfl
/-- If `f` has a local max on `s` at `a`, `f'` is a derivative of `f` at `a` within `s`, and
both `y` and `-y` belong to the positive tangent cone of `s` at `a`, then `f' y †0`. -/
theorem IsLocalMaxOn.hasFDerivWithinAt_eq_zero (h : IsLocalMaxOn f s a)
(hf : HasFDerivWithinAt f f' s a) (hy : y â posTangentConeAt s a)
(hy' : -y â posTangentConeAt s a) : f' y = 0 :=
le_antisymm (h.hasFDerivWithinAt_nonpos hf hy) <| by simpa using h.hasFDerivWithinAt_nonpos hf hy'
/-- If `f` has a local max on `s` at `a` and both `y` and `-y` belong to the positive tangent cone
of `s` at `a`, then `f' y = 0`. -/
theorem IsLocalMaxOn.fderivWithin_eq_zero (h : IsLocalMaxOn f s a)
(hy : y â posTangentConeAt s a) (hy' : -y â posTangentConeAt s a) :
(fderivWithin â f s a : E â â) y = 0 := by
classical
exact if hf : DifferentiableWithinAt â f s a then
h.hasFDerivWithinAt_eq_zero hf.hasFDerivWithinAt hy hy'
else by rw [fderivWithin_zero_of_not_differentiableWithinAt hf]; rfl
/-- If `f` has a local min on `s` at `a`, `f'` is the derivative of `f` at `a` within `s`, and
`y` belongs to the positive tangent cone of `s` at `a`, then `0 †f' y`. -/
theorem IsLocalMinOn.hasFDerivWithinAt_nonneg (h : IsLocalMinOn f s a)
(hf : HasFDerivWithinAt f f' s a) (hy : y â posTangentConeAt s a) : 0 †f' y := by
simpa using h.neg.hasFDerivWithinAt_nonpos hf.neg hy
/-- If `f` has a local min on `s` at `a` and `y` belongs to the positive tangent cone
of `s` at `a`, then `0 †f' y`. -/
theorem IsLocalMinOn.fderivWithin_nonneg (h : IsLocalMinOn f s a)
(hy : y â posTangentConeAt s a) : (0 : â) †(fderivWithin â f s a : E â â) y := by
classical
exact
if hf : DifferentiableWithinAt â f s a then h.hasFDerivWithinAt_nonneg hf.hasFDerivWithinAt hy
else by rw [fderivWithin_zero_of_not_differentiableWithinAt hf]; rfl
/-- If `f` has a local max on `s` at `a`, `f'` is a derivative of `f` at `a` within `s`, and
both `y` and `-y` belong to the positive tangent cone of `s` at `a`, then `f' y †0`. -/
theorem IsLocalMinOn.hasFDerivWithinAt_eq_zero (h : IsLocalMinOn f s a)
(hf : HasFDerivWithinAt f f' s a) (hy : y â posTangentConeAt s a)
(hy' : -y â posTangentConeAt s a) : f' y = 0 := by
simpa using h.neg.hasFDerivWithinAt_eq_zero hf.neg hy hy'
/-- If `f` has a local min on `s` at `a` and both `y` and `-y` belong to the positive tangent cone
of `s` at `a`, then `f' y = 0`. -/
theorem IsLocalMinOn.fderivWithin_eq_zero (h : IsLocalMinOn f s a)
(hy : y â posTangentConeAt s a) (hy' : -y â posTangentConeAt s a) :
(fderivWithin â f s a : E â â) y = 0 := by
classical
exact if hf : DifferentiableWithinAt â f s a then
h.hasFDerivWithinAt_eq_zero hf.hasFDerivWithinAt hy hy'
else by rw [fderivWithin_zero_of_not_differentiableWithinAt hf]; rfl
/-- **Fermat's Theorem**: the derivative of a function at a local minimum equals zero. -/
theorem IsLocalMin.hasFDerivAt_eq_zero (h : IsLocalMin f a) (hf : HasFDerivAt f f' a) : f' = 0 := by
ext y
apply (h.on univ).hasFDerivWithinAt_eq_zero hf.hasFDerivWithinAt <;>
rw [posTangentConeAt_univ] <;>
apply mem_univ
/-- **Fermat's Theorem**: the derivative of a function at a local minimum equals zero. -/
theorem IsLocalMin.fderiv_eq_zero (h : IsLocalMin f a) : fderiv â f a = 0 := by
classical
exact if hf : DifferentiableAt â f a then h.hasFDerivAt_eq_zero hf.hasFDerivAt
else fderiv_zero_of_not_differentiableAt hf
/-- **Fermat's Theorem**: the derivative of a function at a local maximum equals zero. -/
theorem IsLocalMax.hasFDerivAt_eq_zero (h : IsLocalMax f a) (hf : HasFDerivAt f f' a) : f' = 0 :=
neg_eq_zero.1 <| h.neg.hasFDerivAt_eq_zero hf.neg
/-- **Fermat's Theorem**: the derivative of a function at a local maximum equals zero. -/
theorem IsLocalMax.fderiv_eq_zero (h : IsLocalMax f a) : fderiv â f a = 0 := by
classical
exact if hf : DifferentiableAt â f a then h.hasFDerivAt_eq_zero hf.hasFDerivAt
else fderiv_zero_of_not_differentiableAt hf
/-- **Fermat's Theorem**: the derivative of a function at a local extremum equals zero. -/
theorem IsLocalExtr.hasFDerivAt_eq_zero (h : IsLocalExtr f a) : HasFDerivAt f f' a â f' = 0 :=
h.elim IsLocalMin.hasFDerivAt_eq_zero IsLocalMax.hasFDerivAt_eq_zero
/-- **Fermat's Theorem**: the derivative of a function at a local extremum equals zero. -/
theorem IsLocalExtr.fderiv_eq_zero (h : IsLocalExtr f a) : fderiv â f a = 0 :=
h.elim IsLocalMin.fderiv_eq_zero IsLocalMax.fderiv_eq_zero
end Module
/-!
### Fermat's Theorem
-/
section Real
variable {f : â â â} {f' : â} {s : Set â} {a b : â}
lemma one_mem_posTangentConeAt_iff_mem_closure :
1 â posTangentConeAt s a â a â closure (Ioi a â© s) := by
constructor
· rintro âšc, d, hs, hc, hcdâ©
have : Tendsto (a + d ·) atTop (ð a) := by
simpa only [add_zero] using tendsto_const_nhds.add
(tangentConeAt.lim_zero _ (tendsto_abs_atTop_atTop.comp hc) hcd)
apply mem_closure_of_tendsto this
filter_upwards [hc.eventually_gt_atTop 0, hcd.eventually (lt_mem_nhds one_pos), hs]
with n hcn hcdn hdn
simp_all
· intro h
apply mem_posTangentConeAt_of_frequently_mem
rw [mem_closure_iff_frequently, â map_add_left_nhds_zero, frequently_map] at h
simpa [nhdsWithin, frequently_inf_principal] using h
lemma one_mem_posTangentConeAt_iff_frequently :
1 â posTangentConeAt s a â âá¶ x in ð[>] a, x â s := by
rw [one_mem_posTangentConeAt_iff_mem_closure, mem_closure_iff_frequently,
frequently_nhdsWithin_iff, inter_comm]
simp_rw [mem_inter_iff]
/-- **Fermat's Theorem**: the derivative of a function at a local minimum equals zero. -/
theorem IsLocalMin.hasDerivAt_eq_zero (h : IsLocalMin f a) (hf : HasDerivAt f f' a) : f' = 0 := by
simpa using DFunLike.congr_fun (h.hasFDerivAt_eq_zero (hasDerivAt_iff_hasFDerivAt.1 hf)) 1
/-- **Fermat's Theorem**: the derivative of a function at a local minimum equals zero. -/
theorem IsLocalMin.deriv_eq_zero (h : IsLocalMin f a) : deriv f a = 0 := by
classical
exact if hf : DifferentiableAt â f a then h.hasDerivAt_eq_zero hf.hasDerivAt
else deriv_zero_of_not_differentiableAt hf
/-- **Fermat's Theorem**: the derivative of a function at a local maximum equals zero. -/
theorem IsLocalMax.hasDerivAt_eq_zero (h : IsLocalMax f a) (hf : HasDerivAt f f' a) : f' = 0 :=
neg_eq_zero.1 <| h.neg.hasDerivAt_eq_zero hf.neg
/-- **Fermat's Theorem**: the derivative of a function at a local maximum equals zero. -/
theorem IsLocalMax.deriv_eq_zero (h : IsLocalMax f a) : deriv f a = 0 := by
classical
exact if hf : DifferentiableAt â f a then h.hasDerivAt_eq_zero hf.hasDerivAt
else deriv_zero_of_not_differentiableAt hf
/-- **Fermat's Theorem**: the derivative of a function at a local extremum equals zero. -/
theorem IsLocalExtr.hasDerivAt_eq_zero (h : IsLocalExtr f a) : HasDerivAt f f' a â f' = 0 :=
h.elim IsLocalMin.hasDerivAt_eq_zero IsLocalMax.hasDerivAt_eq_zero
/-- **Fermat's Theorem**: the derivative of a function at a local extremum equals zero. -/
theorem IsLocalExtr.deriv_eq_zero (h : IsLocalExtr f a) : deriv f a = 0 :=
h.elim IsLocalMin.deriv_eq_zero IsLocalMax.deriv_eq_zero
end Real
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.