text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
/-
Copyright (c) 2019 Scott Morrison. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Scott Morrison, Bhavik Mehta
! This file was ported from Lean 3 source module category_theory.limits.shapes.terminal
! leanprover-community/mathlib commit f47581155c818e6361af4e4fda60d27d020c226b
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathbin.CategoryTheory.Pempty
import Mathbin.CategoryTheory.Limits.HasLimits
import Mathbin.CategoryTheory.EpiMono
import Mathbin.CategoryTheory.Category.Preorder
/-!
# Initial and terminal objects in a category.
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
## References
* [Stacks: Initial and final objects](https://stacks.math.columbia.edu/tag/002B)
-/
noncomputable section
universe w w' v v₁ v₂ u u₁ u₂
open CategoryTheory
namespace CategoryTheory.Limits
variable {C : Type u₁} [Category.{v₁} C]
attribute [local tidy] tactic.discrete_cases
#print CategoryTheory.Limits.asEmptyCone /-
/-- Construct a cone for the empty diagram given an object. -/
@[simps]
def asEmptyCone (X : C) : Cone (Functor.empty.{0} C) :=
{ pt
π := by tidy }
#align category_theory.limits.as_empty_cone CategoryTheory.Limits.asEmptyCone
-/
#print CategoryTheory.Limits.asEmptyCocone /-
/-- Construct a cocone for the empty diagram given an object. -/
@[simps]
def asEmptyCocone (X : C) : Cocone (Functor.empty.{0} C) :=
{ pt
ι := by tidy }
#align category_theory.limits.as_empty_cocone CategoryTheory.Limits.asEmptyCocone
-/
#print CategoryTheory.Limits.IsTerminal /-
/-- `X` is terminal if the cone it induces on the empty diagram is limiting. -/
abbrev IsTerminal (X : C) :=
IsLimit (asEmptyCone X)
#align category_theory.limits.is_terminal CategoryTheory.Limits.IsTerminal
-/
#print CategoryTheory.Limits.IsInitial /-
/-- `X` is initial if the cocone it induces on the empty diagram is colimiting. -/
abbrev IsInitial (X : C) :=
IsColimit (asEmptyCocone X)
#align category_theory.limits.is_initial CategoryTheory.Limits.IsInitial
-/
/- warning: category_theory.limits.is_terminal_equiv_unique -> CategoryTheory.Limits.isTerminalEquivUnique is a dubious translation:
lean 3 declaration is
forall {C : Type.{u2}} [_inst_1 : CategoryTheory.Category.{u1, u2} C] (F : CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (Y : C), Equiv.{max 1 (succ u2) (succ u1), max (succ u2) (succ u1)} (CategoryTheory.Limits.IsLimit.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 F (CategoryTheory.Limits.Cone.mk.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 F Y (CategoryTheory.NatTrans.mk.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 (CategoryTheory.Functor.obj.{u1, u1, u2, max u1 u2} C _inst_1 (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.const.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) Y) F (CategoryTheory.Limits.isTerminalEquivUnique._aux1.{u2, u1} C _inst_1 F Y) (CategoryTheory.Limits.isTerminalEquivUnique._proof_1.{u2, u1} C _inst_1 F Y)))) (forall (X : C), Unique.{succ u1} (Quiver.Hom.{succ u1, u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) X Y))
but is expected to have type
forall {C : Type.{u2}} [_inst_1 : CategoryTheory.Category.{u1, u2} C] (F : CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (Y : C), Equiv.{max (succ u2) (succ u1), max (succ u2) (succ u1)} (CategoryTheory.Limits.IsLimit.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 F (CategoryTheory.Limits.Cone.mk.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 F Y (CategoryTheory.NatTrans.mk.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 (Prefunctor.obj.{succ u1, succ u1, u2, max u1 u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.CategoryStruct.toQuiver.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Category.toCategoryStruct.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, max u2 u1} C _inst_1 (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.const.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1)) Y) F (fun (h._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.858 : CategoryTheory.Discrete.{0} PEmpty.{1}) => CategoryTheory.Limits.asEmptyCone.match_1.{succ u1} (fun (h._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.858.871 : CategoryTheory.Discrete.{0} PEmpty.{1}) => [mdata noImplicitLambda:1 Quiver.Hom.{succ u1, u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 (Prefunctor.obj.{succ u1, succ u1, u2, max u1 u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.CategoryStruct.toQuiver.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Category.toCategoryStruct.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, max u2 u1} C _inst_1 (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.const.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1)) Y)) h._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.858.871) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 F) h._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.858.871)]) h._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.858 (fun (a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.830 : PEmpty.{1}) => PEmpty.casesOn.{succ u1, 1} (fun (t._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.886 : PEmpty.{1}) => (Eq.{1} PEmpty.{1} a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.830 t._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.886) -> ([mdata noImplicitLambda:1 Quiver.Hom.{succ u1, u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 (Prefunctor.obj.{succ u1, succ u1, u2, max u1 u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.CategoryStruct.toQuiver.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Category.toCategoryStruct.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, max u2 u1} C _inst_1 (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.const.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1)) Y)) (CategoryTheory.Discrete.mk.{0} PEmpty.{1} a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.830)) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 F) (CategoryTheory.Discrete.mk.{0} PEmpty.{1} a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.830))])) a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.830 (Eq.refl.{1} PEmpty.{1} a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.830))) (fun {{h._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.943 : CategoryTheory.Discrete.{0} PEmpty.{1}}} => CategoryTheory.Limits.asEmptyCone.match_2 (fun (h._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.943.956 : CategoryTheory.Discrete.{0} PEmpty.{1}) => [mdata noImplicitLambda:1 forall {{Y_1 : CategoryTheory.Discrete.{0} PEmpty.{1}}} (f : Quiver.Hom.{1, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) h._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.943.956 Y_1), Eq.{succ u1} (Quiver.Hom.{succ u1, u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 (Prefunctor.obj.{succ u1, succ u1, u2, max u1 u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.CategoryStruct.toQuiver.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Category.toCategoryStruct.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, max u2 u1} C _inst_1 (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.const.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1)) Y)) h._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.943.956) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 F) Y_1)) (CategoryTheory.CategoryStruct.comp.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 (Prefunctor.obj.{succ u1, succ u1, u2, max u1 u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.CategoryStruct.toQuiver.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Category.toCategoryStruct.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, max u2 u1} C _inst_1 (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.const.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1)) Y)) h._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.943.956) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 (Prefunctor.obj.{succ u1, succ u1, u2, max u1 u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.CategoryStruct.toQuiver.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Category.toCategoryStruct.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, max u2 u1} C _inst_1 (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.const.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1)) Y)) Y_1) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 F) Y_1) (Prefunctor.map.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 (Prefunctor.obj.{succ u1, succ u1, u2, max u1 u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.CategoryStruct.toQuiver.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Category.toCategoryStruct.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, max u2 u1} C _inst_1 (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.const.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1)) Y)) h._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.943.956 Y_1 f) (CategoryTheory.Limits.asEmptyCone.match_1.{succ u1} (fun (h._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.858.871 : CategoryTheory.Discrete.{0} PEmpty.{1}) => [mdata noImplicitLambda:1 Quiver.Hom.{succ u1, u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 (Prefunctor.obj.{succ u1, succ u1, u2, max u1 u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.CategoryStruct.toQuiver.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Category.toCategoryStruct.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, max u2 u1} C _inst_1 (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.const.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1)) Y)) h._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.858.871) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 F) h._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.858.871)]) Y_1 (fun (a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.830 : PEmpty.{1}) => PEmpty.casesOn.{succ u1, 1} (fun (t._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.886 : PEmpty.{1}) => (Eq.{1} PEmpty.{1} a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.830 t._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.886) -> ([mdata noImplicitLambda:1 Quiver.Hom.{succ u1, u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 (Prefunctor.obj.{succ u1, succ u1, u2, max u1 u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.CategoryStruct.toQuiver.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Category.toCategoryStruct.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, max u2 u1} C _inst_1 (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.const.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1)) Y)) (CategoryTheory.Discrete.mk.{0} PEmpty.{1} a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.830)) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 F) (CategoryTheory.Discrete.mk.{0} PEmpty.{1} a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.830))])) a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.830 (Eq.refl.{1} PEmpty.{1} a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.830)))) (CategoryTheory.CategoryStruct.comp.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 (Prefunctor.obj.{succ u1, succ u1, u2, max u1 u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.CategoryStruct.toQuiver.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Category.toCategoryStruct.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, max u2 u1} C _inst_1 (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.const.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1)) Y)) h._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.943.956) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 F) h._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.943.956) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 F) Y_1) (CategoryTheory.Limits.asEmptyCone.match_1.{succ u1} (fun (h._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.858.871 : CategoryTheory.Discrete.{0} PEmpty.{1}) => [mdata noImplicitLambda:1 Quiver.Hom.{succ u1, u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 (Prefunctor.obj.{succ u1, succ u1, u2, max u1 u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.CategoryStruct.toQuiver.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Category.toCategoryStruct.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, max u2 u1} C _inst_1 (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.const.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1)) Y)) h._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.858.871) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 F) h._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.858.871)]) h._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.943.956 (fun (a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.830 : PEmpty.{1}) => PEmpty.casesOn.{succ u1, 1} (fun (t._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.886 : PEmpty.{1}) => (Eq.{1} PEmpty.{1} a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.830 t._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.886) -> ([mdata noImplicitLambda:1 Quiver.Hom.{succ u1, u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 (Prefunctor.obj.{succ u1, succ u1, u2, max u1 u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.CategoryStruct.toQuiver.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Category.toCategoryStruct.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, max u2 u1} C _inst_1 (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.const.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1)) Y)) (CategoryTheory.Discrete.mk.{0} PEmpty.{1} a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.830)) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 F) (CategoryTheory.Discrete.mk.{0} PEmpty.{1} a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.830))])) a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.830 (Eq.refl.{1} PEmpty.{1} a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.830))) (Prefunctor.map.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 F) h._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.943.956 Y_1 f))]) h._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.943 (fun (a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.915 : PEmpty.{1}) => PEmpty.casesOn.{0, 1} (fun (t._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.971 : PEmpty.{1}) => (Eq.{1} PEmpty.{1} a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.915 t._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.971) -> ([mdata noImplicitLambda:1 forall {{Y_1 : CategoryTheory.Discrete.{0} PEmpty.{1}}} (f : Quiver.Hom.{1, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) (CategoryTheory.Discrete.mk.{0} PEmpty.{1} a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.915) Y_1), Eq.{succ u1} (Quiver.Hom.{succ u1, u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 (Prefunctor.obj.{succ u1, succ u1, u2, max u1 u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.CategoryStruct.toQuiver.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Category.toCategoryStruct.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, max u2 u1} C _inst_1 (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.const.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1)) Y)) (CategoryTheory.Discrete.mk.{0} PEmpty.{1} a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.915)) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 F) Y_1)) (CategoryTheory.CategoryStruct.comp.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 (Prefunctor.obj.{succ u1, succ u1, u2, max u1 u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.CategoryStruct.toQuiver.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Category.toCategoryStruct.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, max u2 u1} C _inst_1 (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.const.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1)) Y)) (CategoryTheory.Discrete.mk.{0} PEmpty.{1} a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.915)) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 (Prefunctor.obj.{succ u1, succ u1, u2, max u1 u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.CategoryStruct.toQuiver.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Category.toCategoryStruct.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, max u2 u1} C _inst_1 (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.const.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1)) Y)) Y_1) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 F) Y_1) (Prefunctor.map.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 (Prefunctor.obj.{succ u1, succ u1, u2, max u1 u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.CategoryStruct.toQuiver.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Category.toCategoryStruct.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, max u2 u1} C _inst_1 (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.const.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1)) Y)) (CategoryTheory.Discrete.mk.{0} PEmpty.{1} a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.915) Y_1 f) (CategoryTheory.Limits.asEmptyCone.match_1.{succ u1} (fun (h._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.858.871 : CategoryTheory.Discrete.{0} PEmpty.{1}) => [mdata noImplicitLambda:1 Quiver.Hom.{succ u1, u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 (Prefunctor.obj.{succ u1, succ u1, u2, max u1 u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.CategoryStruct.toQuiver.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Category.toCategoryStruct.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, max u2 u1} C _inst_1 (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.const.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1)) Y)) h._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.858.871) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 F) h._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.858.871)]) Y_1 (fun (a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.830 : PEmpty.{1}) => PEmpty.casesOn.{succ u1, 1} (fun (t._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.886 : PEmpty.{1}) => (Eq.{1} PEmpty.{1} a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.830 t._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.886) -> ([mdata noImplicitLambda:1 Quiver.Hom.{succ u1, u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 (Prefunctor.obj.{succ u1, succ u1, u2, max u1 u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.CategoryStruct.toQuiver.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Category.toCategoryStruct.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, max u2 u1} C _inst_1 (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.const.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1)) Y)) (CategoryTheory.Discrete.mk.{0} PEmpty.{1} a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.830)) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 F) (CategoryTheory.Discrete.mk.{0} PEmpty.{1} a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.830))])) a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.830 (Eq.refl.{1} PEmpty.{1} a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.830)))) (CategoryTheory.CategoryStruct.comp.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 (Prefunctor.obj.{succ u1, succ u1, u2, max u1 u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.CategoryStruct.toQuiver.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Category.toCategoryStruct.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, max u2 u1} C _inst_1 (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.const.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1)) Y)) (CategoryTheory.Discrete.mk.{0} PEmpty.{1} a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.915)) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 F) (CategoryTheory.Discrete.mk.{0} PEmpty.{1} a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.915)) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 F) Y_1) (CategoryTheory.Limits.asEmptyCone.match_1.{succ u1} (fun (h._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.858.871 : CategoryTheory.Discrete.{0} PEmpty.{1}) => [mdata noImplicitLambda:1 Quiver.Hom.{succ u1, u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 (Prefunctor.obj.{succ u1, succ u1, u2, max u1 u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.CategoryStruct.toQuiver.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Category.toCategoryStruct.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, max u2 u1} C _inst_1 (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.const.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1)) Y)) h._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.858.871) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 F) h._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.858.871)]) (CategoryTheory.Discrete.mk.{0} PEmpty.{1} a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.915) (fun (a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.830 : PEmpty.{1}) => PEmpty.casesOn.{succ u1, 1} (fun (t._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.886 : PEmpty.{1}) => (Eq.{1} PEmpty.{1} a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.830 t._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.886) -> ([mdata noImplicitLambda:1 Quiver.Hom.{succ u1, u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 (Prefunctor.obj.{succ u1, succ u1, u2, max u1 u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.CategoryStruct.toQuiver.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Category.toCategoryStruct.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, max u2 u1} C _inst_1 (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.const.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1)) Y)) (CategoryTheory.Discrete.mk.{0} PEmpty.{1} a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.830)) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 F) (CategoryTheory.Discrete.mk.{0} PEmpty.{1} a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.830))])) a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.830 (Eq.refl.{1} PEmpty.{1} a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.830))) (Prefunctor.map.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 F) (CategoryTheory.Discrete.mk.{0} PEmpty.{1} a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.915) Y_1 f))])) a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.915 (Eq.refl.{1} PEmpty.{1} a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.915)))))) (forall (X : C), Unique.{succ u1} (Quiver.Hom.{succ u1, u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) X Y))
Case conversion may be inaccurate. Consider using '#align category_theory.limits.is_terminal_equiv_unique CategoryTheory.Limits.isTerminalEquivUniqueₓ'. -/
/-- An object `Y` is terminal iff for every `X` there is a unique morphism `X ⟶ Y`. -/
def isTerminalEquivUnique (F : Discrete.{0} PEmpty.{1} ⥤ C) (Y : C) :
IsLimit (⟨Y, by tidy⟩ : Cone F) ≃ ∀ X : C, Unique (X ⟶ Y)
where
toFun t X :=
{ default := t.lift ⟨X, by tidy⟩
uniq := fun f => t.uniq ⟨X, by tidy⟩ f (by tidy) }
invFun u :=
{ lift := fun s => (u s.pt).default
uniq := fun s _ _ => (u s.pt).2 _ }
left_inv := by tidy
right_inv := by tidy
#align category_theory.limits.is_terminal_equiv_unique CategoryTheory.Limits.isTerminalEquivUnique
#print CategoryTheory.Limits.IsTerminal.ofUnique /-
/-- An object `Y` is terminal if for every `X` there is a unique morphism `X ⟶ Y`
(as an instance). -/
def IsTerminal.ofUnique (Y : C) [h : ∀ X : C, Unique (X ⟶ Y)] : IsTerminal Y
where lift s := (h s.pt).default
#align category_theory.limits.is_terminal.of_unique CategoryTheory.Limits.IsTerminal.ofUnique
-/
/- warning: category_theory.limits.is_terminal_top -> CategoryTheory.Limits.isTerminalTop is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_2 : Preorder.{u1} α] [_inst_3 : OrderTop.{u1} α (Preorder.toLE.{u1} α _inst_2)], CategoryTheory.Limits.IsTerminal.{u1, u1} α (Preorder.smallCategory.{u1} α _inst_2) (Top.top.{u1} α (OrderTop.toHasTop.{u1} α (Preorder.toLE.{u1} α _inst_2) _inst_3))
but is expected to have type
forall {α : Type.{u1}} [_inst_2 : Preorder.{u1} α] [_inst_3 : OrderTop.{u1} α (Preorder.toLE.{u1} α _inst_2)], CategoryTheory.Limits.IsTerminal.{u1, u1} α (Preorder.smallCategory.{u1} α _inst_2) (Top.top.{u1} α (OrderTop.toTop.{u1} α (Preorder.toLE.{u1} α _inst_2) _inst_3))
Case conversion may be inaccurate. Consider using '#align category_theory.limits.is_terminal_top CategoryTheory.Limits.isTerminalTopₓ'. -/
/-- If `α` is a preorder with top, then `⊤` is a terminal object. -/
def isTerminalTop {α : Type _} [Preorder α] [OrderTop α] : IsTerminal (⊤ : α) :=
IsTerminal.ofUnique _
#align category_theory.limits.is_terminal_top CategoryTheory.Limits.isTerminalTop
#print CategoryTheory.Limits.IsTerminal.ofIso /-
/-- Transport a term of type `is_terminal` across an isomorphism. -/
def IsTerminal.ofIso {Y Z : C} (hY : IsTerminal Y) (i : Y ≅ Z) : IsTerminal Z :=
IsLimit.ofIsoLimit hY
{ Hom := { Hom := i.Hom }
inv := { Hom := i.inv } }
#align category_theory.limits.is_terminal.of_iso CategoryTheory.Limits.IsTerminal.ofIso
-/
/- warning: category_theory.limits.is_initial_equiv_unique -> CategoryTheory.Limits.isInitialEquivUnique is a dubious translation:
lean 3 declaration is
forall {C : Type.{u2}} [_inst_1 : CategoryTheory.Category.{u1, u2} C] (F : CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (X : C), Equiv.{max 1 (succ u2) (succ u1), max (succ u2) (succ u1)} (CategoryTheory.Limits.IsColimit.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 F (CategoryTheory.Limits.Cocone.mk.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 F X (CategoryTheory.NatTrans.mk.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 F (CategoryTheory.Functor.obj.{u1, u1, u2, max u1 u2} C _inst_1 (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.const.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) X) (CategoryTheory.Limits.isInitialEquivUnique._aux1.{u2, u1} C _inst_1 F X) (CategoryTheory.Limits.isInitialEquivUnique._proof_1.{u2, u1} C _inst_1 F X)))) (forall (Y : C), Unique.{succ u1} (Quiver.Hom.{succ u1, u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) X Y))
but is expected to have type
forall {C : Type.{u2}} [_inst_1 : CategoryTheory.Category.{u1, u2} C] (F : CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (X : C), Equiv.{max (succ u2) (succ u1), max (succ u2) (succ u1)} (CategoryTheory.Limits.IsColimit.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 F (CategoryTheory.Limits.Cocone.mk.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 F X (CategoryTheory.NatTrans.mk.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 F (Prefunctor.obj.{succ u1, succ u1, u2, max u1 u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.CategoryStruct.toQuiver.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Category.toCategoryStruct.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, max u2 u1} C _inst_1 (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.const.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1)) X) (fun (h._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2127 : CategoryTheory.Discrete.{0} PEmpty.{1}) => CategoryTheory.Limits.asEmptyCone.match_1.{succ u1} (fun (h._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2127.2140 : CategoryTheory.Discrete.{0} PEmpty.{1}) => [mdata noImplicitLambda:1 Quiver.Hom.{succ u1, u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 F) h._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2127.2140) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 (Prefunctor.obj.{succ u1, succ u1, u2, max u1 u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.CategoryStruct.toQuiver.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Category.toCategoryStruct.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, max u2 u1} C _inst_1 (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.const.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1)) X)) h._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2127.2140)]) h._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2127 (fun (a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2099 : PEmpty.{1}) => PEmpty.casesOn.{succ u1, 1} (fun (t._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2155 : PEmpty.{1}) => (Eq.{1} PEmpty.{1} a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2099 t._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2155) -> ([mdata noImplicitLambda:1 Quiver.Hom.{succ u1, u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 F) (CategoryTheory.Discrete.mk.{0} PEmpty.{1} a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2099)) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 (Prefunctor.obj.{succ u1, succ u1, u2, max u1 u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.CategoryStruct.toQuiver.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Category.toCategoryStruct.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, max u2 u1} C _inst_1 (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.const.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1)) X)) (CategoryTheory.Discrete.mk.{0} PEmpty.{1} a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2099))])) a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2099 (Eq.refl.{1} PEmpty.{1} a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2099))) (fun {{h._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2212 : CategoryTheory.Discrete.{0} PEmpty.{1}}} => CategoryTheory.Limits.asEmptyCone.match_2 (fun (h._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2212.2225 : CategoryTheory.Discrete.{0} PEmpty.{1}) => [mdata noImplicitLambda:1 forall {{Y : CategoryTheory.Discrete.{0} PEmpty.{1}}} (f : Quiver.Hom.{1, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) h._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2212.2225 Y), Eq.{succ u1} (Quiver.Hom.{succ u1, u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 F) h._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2212.2225) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 (Prefunctor.obj.{succ u1, succ u1, u2, max u1 u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.CategoryStruct.toQuiver.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Category.toCategoryStruct.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, max u2 u1} C _inst_1 (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.const.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1)) X)) Y)) (CategoryTheory.CategoryStruct.comp.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 F) h._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2212.2225) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 F) Y) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 (Prefunctor.obj.{succ u1, succ u1, u2, max u1 u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.CategoryStruct.toQuiver.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Category.toCategoryStruct.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, max u2 u1} C _inst_1 (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.const.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1)) X)) Y) (Prefunctor.map.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 F) h._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2212.2225 Y f) (CategoryTheory.Limits.asEmptyCone.match_1.{succ u1} (fun (h._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2127.2140 : CategoryTheory.Discrete.{0} PEmpty.{1}) => [mdata noImplicitLambda:1 Quiver.Hom.{succ u1, u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 F) h._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2127.2140) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 (Prefunctor.obj.{succ u1, succ u1, u2, max u1 u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.CategoryStruct.toQuiver.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Category.toCategoryStruct.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, max u2 u1} C _inst_1 (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.const.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1)) X)) h._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2127.2140)]) Y (fun (a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2099 : PEmpty.{1}) => PEmpty.casesOn.{succ u1, 1} (fun (t._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2155 : PEmpty.{1}) => (Eq.{1} PEmpty.{1} a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2099 t._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2155) -> ([mdata noImplicitLambda:1 Quiver.Hom.{succ u1, u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 F) (CategoryTheory.Discrete.mk.{0} PEmpty.{1} a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2099)) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 (Prefunctor.obj.{succ u1, succ u1, u2, max u1 u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.CategoryStruct.toQuiver.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Category.toCategoryStruct.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, max u2 u1} C _inst_1 (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.const.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1)) X)) (CategoryTheory.Discrete.mk.{0} PEmpty.{1} a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2099))])) a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2099 (Eq.refl.{1} PEmpty.{1} a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2099)))) (CategoryTheory.CategoryStruct.comp.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 F) h._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2212.2225) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 (Prefunctor.obj.{succ u1, succ u1, u2, max u1 u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.CategoryStruct.toQuiver.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Category.toCategoryStruct.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, max u2 u1} C _inst_1 (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.const.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1)) X)) h._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2212.2225) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 (Prefunctor.obj.{succ u1, succ u1, u2, max u1 u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.CategoryStruct.toQuiver.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Category.toCategoryStruct.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, max u2 u1} C _inst_1 (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.const.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1)) X)) Y) (CategoryTheory.Limits.asEmptyCone.match_1.{succ u1} (fun (h._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2127.2140 : CategoryTheory.Discrete.{0} PEmpty.{1}) => [mdata noImplicitLambda:1 Quiver.Hom.{succ u1, u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 F) h._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2127.2140) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 (Prefunctor.obj.{succ u1, succ u1, u2, max u1 u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.CategoryStruct.toQuiver.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Category.toCategoryStruct.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, max u2 u1} C _inst_1 (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.const.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1)) X)) h._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2127.2140)]) h._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2212.2225 (fun (a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2099 : PEmpty.{1}) => PEmpty.casesOn.{succ u1, 1} (fun (t._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2155 : PEmpty.{1}) => (Eq.{1} PEmpty.{1} a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2099 t._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2155) -> ([mdata noImplicitLambda:1 Quiver.Hom.{succ u1, u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 F) (CategoryTheory.Discrete.mk.{0} PEmpty.{1} a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2099)) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 (Prefunctor.obj.{succ u1, succ u1, u2, max u1 u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.CategoryStruct.toQuiver.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Category.toCategoryStruct.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, max u2 u1} C _inst_1 (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.const.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1)) X)) (CategoryTheory.Discrete.mk.{0} PEmpty.{1} a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2099))])) a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2099 (Eq.refl.{1} PEmpty.{1} a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2099))) (Prefunctor.map.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 (Prefunctor.obj.{succ u1, succ u1, u2, max u1 u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.CategoryStruct.toQuiver.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Category.toCategoryStruct.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, max u2 u1} C _inst_1 (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.const.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1)) X)) h._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2212.2225 Y f))]) h._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2212 (fun (a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2184 : PEmpty.{1}) => PEmpty.casesOn.{0, 1} (fun (t._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2240 : PEmpty.{1}) => (Eq.{1} PEmpty.{1} a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2184 t._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2240) -> ([mdata noImplicitLambda:1 forall {{Y : CategoryTheory.Discrete.{0} PEmpty.{1}}} (f : Quiver.Hom.{1, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) (CategoryTheory.Discrete.mk.{0} PEmpty.{1} a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2184) Y), Eq.{succ u1} (Quiver.Hom.{succ u1, u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 F) (CategoryTheory.Discrete.mk.{0} PEmpty.{1} a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2184)) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 (Prefunctor.obj.{succ u1, succ u1, u2, max u1 u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.CategoryStruct.toQuiver.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Category.toCategoryStruct.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, max u2 u1} C _inst_1 (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.const.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1)) X)) Y)) (CategoryTheory.CategoryStruct.comp.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 F) (CategoryTheory.Discrete.mk.{0} PEmpty.{1} a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2184)) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 F) Y) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 (Prefunctor.obj.{succ u1, succ u1, u2, max u1 u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.CategoryStruct.toQuiver.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Category.toCategoryStruct.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, max u2 u1} C _inst_1 (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.const.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1)) X)) Y) (Prefunctor.map.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 F) (CategoryTheory.Discrete.mk.{0} PEmpty.{1} a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2184) Y f) (CategoryTheory.Limits.asEmptyCone.match_1.{succ u1} (fun (h._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2127.2140 : CategoryTheory.Discrete.{0} PEmpty.{1}) => [mdata noImplicitLambda:1 Quiver.Hom.{succ u1, u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 F) h._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2127.2140) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 (Prefunctor.obj.{succ u1, succ u1, u2, max u1 u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.CategoryStruct.toQuiver.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Category.toCategoryStruct.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, max u2 u1} C _inst_1 (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.const.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1)) X)) h._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2127.2140)]) Y (fun (a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2099 : PEmpty.{1}) => PEmpty.casesOn.{succ u1, 1} (fun (t._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2155 : PEmpty.{1}) => (Eq.{1} PEmpty.{1} a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2099 t._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2155) -> ([mdata noImplicitLambda:1 Quiver.Hom.{succ u1, u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 F) (CategoryTheory.Discrete.mk.{0} PEmpty.{1} a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2099)) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 (Prefunctor.obj.{succ u1, succ u1, u2, max u1 u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.CategoryStruct.toQuiver.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Category.toCategoryStruct.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, max u2 u1} C _inst_1 (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.const.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1)) X)) (CategoryTheory.Discrete.mk.{0} PEmpty.{1} a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2099))])) a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2099 (Eq.refl.{1} PEmpty.{1} a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2099)))) (CategoryTheory.CategoryStruct.comp.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 F) (CategoryTheory.Discrete.mk.{0} PEmpty.{1} a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2184)) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 (Prefunctor.obj.{succ u1, succ u1, u2, max u1 u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.CategoryStruct.toQuiver.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Category.toCategoryStruct.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, max u2 u1} C _inst_1 (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.const.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1)) X)) (CategoryTheory.Discrete.mk.{0} PEmpty.{1} a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2184)) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 (Prefunctor.obj.{succ u1, succ u1, u2, max u1 u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.CategoryStruct.toQuiver.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Category.toCategoryStruct.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, max u2 u1} C _inst_1 (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.const.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1)) X)) Y) (CategoryTheory.Limits.asEmptyCone.match_1.{succ u1} (fun (h._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2127.2140 : CategoryTheory.Discrete.{0} PEmpty.{1}) => [mdata noImplicitLambda:1 Quiver.Hom.{succ u1, u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 F) h._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2127.2140) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 (Prefunctor.obj.{succ u1, succ u1, u2, max u1 u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.CategoryStruct.toQuiver.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Category.toCategoryStruct.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, max u2 u1} C _inst_1 (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.const.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1)) X)) h._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2127.2140)]) (CategoryTheory.Discrete.mk.{0} PEmpty.{1} a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2184) (fun (a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2099 : PEmpty.{1}) => PEmpty.casesOn.{succ u1, 1} (fun (t._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2155 : PEmpty.{1}) => (Eq.{1} PEmpty.{1} a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2099 t._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2155) -> ([mdata noImplicitLambda:1 Quiver.Hom.{succ u1, u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 F) (CategoryTheory.Discrete.mk.{0} PEmpty.{1} a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2099)) (Prefunctor.obj.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 (Prefunctor.obj.{succ u1, succ u1, u2, max u1 u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.CategoryStruct.toQuiver.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Category.toCategoryStruct.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, max u2 u1} C _inst_1 (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.const.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1)) X)) (CategoryTheory.Discrete.mk.{0} PEmpty.{1} a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2099))])) a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2099 (Eq.refl.{1} PEmpty.{1} a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2099))) (Prefunctor.map.{1, succ u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.CategoryStruct.toQuiver.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.Category.toCategoryStruct.{0, 0} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}))) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1 (Prefunctor.obj.{succ u1, succ u1, u2, max u1 u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.CategoryStruct.toQuiver.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Category.toCategoryStruct.{u1, max u2 u1} (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1))) (CategoryTheory.Functor.toPrefunctor.{u1, u1, u2, max u2 u1} C _inst_1 (CategoryTheory.Functor.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.category.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1) (CategoryTheory.Functor.const.{0, u1, 0, u2} (CategoryTheory.Discrete.{0} PEmpty.{1}) (CategoryTheory.discreteCategory.{0} PEmpty.{1}) C _inst_1)) X)) (CategoryTheory.Discrete.mk.{0} PEmpty.{1} a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2184) Y f))])) a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2184 (Eq.refl.{1} PEmpty.{1} a._@.Mathlib.CategoryTheory.Limits.Shapes.Terminal._hyg.2184)))))) (forall (Y : C), Unique.{succ u1} (Quiver.Hom.{succ u1, u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_1)) X Y))
Case conversion may be inaccurate. Consider using '#align category_theory.limits.is_initial_equiv_unique CategoryTheory.Limits.isInitialEquivUniqueₓ'. -/
/-- An object `X` is initial iff for every `Y` there is a unique morphism `X ⟶ Y`. -/
def isInitialEquivUnique (F : Discrete.{0} PEmpty.{1} ⥤ C) (X : C) :
IsColimit (⟨X, by tidy⟩ : Cocone F) ≃ ∀ Y : C, Unique (X ⟶ Y)
where
toFun t X :=
{ default := t.desc ⟨X, by tidy⟩
uniq := fun f => t.uniq ⟨X, by tidy⟩ f (by tidy) }
invFun u :=
{ desc := fun s => (u s.pt).default
uniq := fun s _ _ => (u s.pt).2 _ }
left_inv := by tidy
right_inv := by tidy
#align category_theory.limits.is_initial_equiv_unique CategoryTheory.Limits.isInitialEquivUnique
#print CategoryTheory.Limits.IsInitial.ofUnique /-
/-- An object `X` is initial if for every `Y` there is a unique morphism `X ⟶ Y`
(as an instance). -/
def IsInitial.ofUnique (X : C) [h : ∀ Y : C, Unique (X ⟶ Y)] : IsInitial X
where desc s := (h s.pt).default
#align category_theory.limits.is_initial.of_unique CategoryTheory.Limits.IsInitial.ofUnique
-/
/- warning: category_theory.limits.is_initial_bot -> CategoryTheory.Limits.isInitialBot is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_2 : Preorder.{u1} α] [_inst_3 : OrderBot.{u1} α (Preorder.toLE.{u1} α _inst_2)], CategoryTheory.Limits.IsInitial.{u1, u1} α (Preorder.smallCategory.{u1} α _inst_2) (Bot.bot.{u1} α (OrderBot.toHasBot.{u1} α (Preorder.toLE.{u1} α _inst_2) _inst_3))
but is expected to have type
forall {α : Type.{u1}} [_inst_2 : Preorder.{u1} α] [_inst_3 : OrderBot.{u1} α (Preorder.toLE.{u1} α _inst_2)], CategoryTheory.Limits.IsInitial.{u1, u1} α (Preorder.smallCategory.{u1} α _inst_2) (Bot.bot.{u1} α (OrderBot.toBot.{u1} α (Preorder.toLE.{u1} α _inst_2) _inst_3))
Case conversion may be inaccurate. Consider using '#align category_theory.limits.is_initial_bot CategoryTheory.Limits.isInitialBotₓ'. -/
/-- If `α` is a preorder with bot, then `⊥` is an initial object. -/
def isInitialBot {α : Type _} [Preorder α] [OrderBot α] : IsInitial (⊥ : α) :=
IsInitial.ofUnique _
#align category_theory.limits.is_initial_bot CategoryTheory.Limits.isInitialBot
#print CategoryTheory.Limits.IsInitial.ofIso /-
/-- Transport a term of type `is_initial` across an isomorphism. -/
def IsInitial.ofIso {X Y : C} (hX : IsInitial X) (i : X ≅ Y) : IsInitial Y :=
IsColimit.ofIsoColimit hX
{ Hom := { Hom := i.Hom }
inv := { Hom := i.inv } }
#align category_theory.limits.is_initial.of_iso CategoryTheory.Limits.IsInitial.ofIso
-/
#print CategoryTheory.Limits.IsTerminal.from /-
/-- Give the morphism to a terminal object from any other. -/
def IsTerminal.from {X : C} (t : IsTerminal X) (Y : C) : Y ⟶ X :=
t.lift (asEmptyCone Y)
#align category_theory.limits.is_terminal.from CategoryTheory.Limits.IsTerminal.from
-/
#print CategoryTheory.Limits.IsTerminal.hom_ext /-
/-- Any two morphisms to a terminal object are equal. -/
theorem IsTerminal.hom_ext {X Y : C} (t : IsTerminal X) (f g : Y ⟶ X) : f = g :=
t.hom_ext (by tidy)
#align category_theory.limits.is_terminal.hom_ext CategoryTheory.Limits.IsTerminal.hom_ext
-/
#print CategoryTheory.Limits.IsTerminal.comp_from /-
@[simp]
theorem IsTerminal.comp_from {Z : C} (t : IsTerminal Z) {X Y : C} (f : X ⟶ Y) :
f ≫ t.from Y = t.from X :=
t.hom_ext _ _
#align category_theory.limits.is_terminal.comp_from CategoryTheory.Limits.IsTerminal.comp_from
-/
#print CategoryTheory.Limits.IsTerminal.from_self /-
@[simp]
theorem IsTerminal.from_self {X : C} (t : IsTerminal X) : t.from X = 𝟙 X :=
t.hom_ext _ _
#align category_theory.limits.is_terminal.from_self CategoryTheory.Limits.IsTerminal.from_self
-/
#print CategoryTheory.Limits.IsInitial.to /-
/-- Give the morphism from an initial object to any other. -/
def IsInitial.to {X : C} (t : IsInitial X) (Y : C) : X ⟶ Y :=
t.desc (asEmptyCocone Y)
#align category_theory.limits.is_initial.to CategoryTheory.Limits.IsInitial.to
-/
#print CategoryTheory.Limits.IsInitial.hom_ext /-
/-- Any two morphisms from an initial object are equal. -/
theorem IsInitial.hom_ext {X Y : C} (t : IsInitial X) (f g : X ⟶ Y) : f = g :=
t.hom_ext (by tidy)
#align category_theory.limits.is_initial.hom_ext CategoryTheory.Limits.IsInitial.hom_ext
-/
#print CategoryTheory.Limits.IsInitial.to_comp /-
@[simp]
theorem IsInitial.to_comp {X : C} (t : IsInitial X) {Y Z : C} (f : Y ⟶ Z) : t.to Y ≫ f = t.to Z :=
t.hom_ext _ _
#align category_theory.limits.is_initial.to_comp CategoryTheory.Limits.IsInitial.to_comp
-/
#print CategoryTheory.Limits.IsInitial.to_self /-
@[simp]
theorem IsInitial.to_self {X : C} (t : IsInitial X) : t.to X = 𝟙 X :=
t.hom_ext _ _
#align category_theory.limits.is_initial.to_self CategoryTheory.Limits.IsInitial.to_self
-/
#print CategoryTheory.Limits.IsTerminal.isSplitMono_from /-
/-- Any morphism from a terminal object is split mono. -/
theorem IsTerminal.isSplitMono_from {X Y : C} (t : IsTerminal X) (f : X ⟶ Y) : IsSplitMono f :=
IsSplitMono.mk' ⟨t.from _, t.hom_ext _ _⟩
#align category_theory.limits.is_terminal.is_split_mono_from CategoryTheory.Limits.IsTerminal.isSplitMono_from
-/
#print CategoryTheory.Limits.IsInitial.isSplitEpi_to /-
/-- Any morphism to an initial object is split epi. -/
theorem IsInitial.isSplitEpi_to {X Y : C} (t : IsInitial X) (f : Y ⟶ X) : IsSplitEpi f :=
IsSplitEpi.mk' ⟨t.to _, t.hom_ext _ _⟩
#align category_theory.limits.is_initial.is_split_epi_to CategoryTheory.Limits.IsInitial.isSplitEpi_to
-/
#print CategoryTheory.Limits.IsTerminal.mono_from /-
/-- Any morphism from a terminal object is mono. -/
theorem IsTerminal.mono_from {X Y : C} (t : IsTerminal X) (f : X ⟶ Y) : Mono f := by
haveI := t.is_split_mono_from f <;> infer_instance
#align category_theory.limits.is_terminal.mono_from CategoryTheory.Limits.IsTerminal.mono_from
-/
#print CategoryTheory.Limits.IsInitial.epi_to /-
/-- Any morphism to an initial object is epi. -/
theorem IsInitial.epi_to {X Y : C} (t : IsInitial X) (f : Y ⟶ X) : Epi f := by
haveI := t.is_split_epi_to f <;> infer_instance
#align category_theory.limits.is_initial.epi_to CategoryTheory.Limits.IsInitial.epi_to
-/
#print CategoryTheory.Limits.IsTerminal.uniqueUpToIso /-
/-- If `T` and `T'` are terminal, they are isomorphic. -/
@[simps]
def IsTerminal.uniqueUpToIso {T T' : C} (hT : IsTerminal T) (hT' : IsTerminal T') : T ≅ T'
where
Hom := hT'.from _
inv := hT.from _
#align category_theory.limits.is_terminal.unique_up_to_iso CategoryTheory.Limits.IsTerminal.uniqueUpToIso
-/
#print CategoryTheory.Limits.IsInitial.uniqueUpToIso /-
/-- If `I` and `I'` are initial, they are isomorphic. -/
@[simps]
def IsInitial.uniqueUpToIso {I I' : C} (hI : IsInitial I) (hI' : IsInitial I') : I ≅ I'
where
Hom := hI.to _
inv := hI'.to _
#align category_theory.limits.is_initial.unique_up_to_iso CategoryTheory.Limits.IsInitial.uniqueUpToIso
-/
variable (C)
#print CategoryTheory.Limits.HasTerminal /-
/-- A category has a terminal object if it has a limit over the empty diagram.
Use `has_terminal_of_unique` to construct instances.
-/
abbrev HasTerminal :=
HasLimitsOfShape (Discrete.{0} PEmpty) C
#align category_theory.limits.has_terminal CategoryTheory.Limits.HasTerminal
-/
#print CategoryTheory.Limits.HasInitial /-
/-- A category has an initial object if it has a colimit over the empty diagram.
Use `has_initial_of_unique` to construct instances.
-/
abbrev HasInitial :=
HasColimitsOfShape (Discrete.{0} PEmpty) C
#align category_theory.limits.has_initial CategoryTheory.Limits.HasInitial
-/
section Univ
variable (X : C) {F₁ : Discrete.{w} PEmpty ⥤ C} {F₂ : Discrete.{w'} PEmpty ⥤ C}
#print CategoryTheory.Limits.isLimitChangeEmptyCone /-
/-- Being terminal is independent of the empty diagram, its universe, and the cone over it,
as long as the cone points are isomorphic. -/
def isLimitChangeEmptyCone {c₁ : Cone F₁} (hl : IsLimit c₁) (c₂ : Cone F₂) (hi : c₁.pt ≅ c₂.pt) :
IsLimit c₂ where
lift c := hl.lift ⟨c.pt, by tidy⟩ ≫ hi.Hom
fac _ j := j.as.elim
uniq c f _ := by
erw [← hl.uniq ⟨c.X, by tidy⟩ (f ≫ hi.inv) fun j => j.as.elim]
simp
#align category_theory.limits.is_limit_change_empty_cone CategoryTheory.Limits.isLimitChangeEmptyCone
-/
#print CategoryTheory.Limits.isLimitEmptyConeEquiv /-
/-- Replacing an empty cone in `is_limit` by another with the same cone point
is an equivalence. -/
def isLimitEmptyConeEquiv (c₁ : Cone F₁) (c₂ : Cone F₂) (h : c₁.pt ≅ c₂.pt) :
IsLimit c₁ ≃ IsLimit c₂
where
toFun hl := isLimitChangeEmptyCone C hl c₂ h
invFun hl := isLimitChangeEmptyCone C hl c₁ h.symm
left_inv := by tidy
right_inv := by tidy
#align category_theory.limits.is_limit_empty_cone_equiv CategoryTheory.Limits.isLimitEmptyConeEquiv
-/
#print CategoryTheory.Limits.hasTerminalChangeDiagram /-
theorem hasTerminalChangeDiagram (h : HasLimit F₁) : HasLimit F₂ :=
⟨⟨⟨⟨limit F₁, by tidy⟩, isLimitChangeEmptyCone C (limit.isLimit F₁) _ (eqToIso rfl)⟩⟩⟩
#align category_theory.limits.has_terminal_change_diagram CategoryTheory.Limits.hasTerminalChangeDiagram
-/
#print CategoryTheory.Limits.hasTerminalChangeUniverse /-
theorem hasTerminalChangeUniverse [h : HasLimitsOfShape (Discrete.{w} PEmpty) C] :
HasLimitsOfShape (Discrete.{w'} PEmpty) C :=
{
HasLimit := fun J =>
hasTerminalChangeDiagram C
(let f := h.1
f (Functor.empty C)) }
#align category_theory.limits.has_terminal_change_universe CategoryTheory.Limits.hasTerminalChangeUniverse
-/
#print CategoryTheory.Limits.isColimitChangeEmptyCocone /-
/-- Being initial is independent of the empty diagram, its universe, and the cocone over it,
as long as the cocone points are isomorphic. -/
def isColimitChangeEmptyCocone {c₁ : Cocone F₁} (hl : IsColimit c₁) (c₂ : Cocone F₂)
(hi : c₁.pt ≅ c₂.pt) : IsColimit c₂
where
desc c := hi.inv ≫ hl.desc ⟨c.pt, by tidy⟩
fac _ j := j.as.elim
uniq c f _ := by
erw [← hl.uniq ⟨c.X, by tidy⟩ (hi.hom ≫ f) fun j => j.as.elim]
simp
#align category_theory.limits.is_colimit_change_empty_cocone CategoryTheory.Limits.isColimitChangeEmptyCocone
-/
#print CategoryTheory.Limits.isColimitEmptyCoconeEquiv /-
/-- Replacing an empty cocone in `is_colimit` by another with the same cocone point
is an equivalence. -/
def isColimitEmptyCoconeEquiv (c₁ : Cocone F₁) (c₂ : Cocone F₂) (h : c₁.pt ≅ c₂.pt) :
IsColimit c₁ ≃ IsColimit c₂
where
toFun hl := isColimitChangeEmptyCocone C hl c₂ h
invFun hl := isColimitChangeEmptyCocone C hl c₁ h.symm
left_inv := by tidy
right_inv := by tidy
#align category_theory.limits.is_colimit_empty_cocone_equiv CategoryTheory.Limits.isColimitEmptyCoconeEquiv
-/
#print CategoryTheory.Limits.hasInitialChangeDiagram /-
theorem hasInitialChangeDiagram (h : HasColimit F₁) : HasColimit F₂ :=
⟨⟨⟨⟨colimit F₁, by tidy⟩, isColimitChangeEmptyCocone C (colimit.isColimit F₁) _ (eqToIso rfl)⟩⟩⟩
#align category_theory.limits.has_initial_change_diagram CategoryTheory.Limits.hasInitialChangeDiagram
-/
#print CategoryTheory.Limits.hasInitialChangeUniverse /-
theorem hasInitialChangeUniverse [h : HasColimitsOfShape (Discrete.{w} PEmpty) C] :
HasColimitsOfShape (Discrete.{w'} PEmpty) C :=
{
HasColimit := fun J =>
hasInitialChangeDiagram C
(let f := h.1
f (Functor.empty C)) }
#align category_theory.limits.has_initial_change_universe CategoryTheory.Limits.hasInitialChangeUniverse
-/
end Univ
#print CategoryTheory.Limits.terminal /-
/-- An arbitrary choice of terminal object, if one exists.
You can use the notation `⊤_ C`.
This object is characterized by having a unique morphism from any object.
-/
abbrev terminal [HasTerminal C] : C :=
limit (Functor.empty.{0} C)
#align category_theory.limits.terminal CategoryTheory.Limits.terminal
-/
#print CategoryTheory.Limits.initial /-
/-- An arbitrary choice of initial object, if one exists.
You can use the notation `⊥_ C`.
This object is characterized by having a unique morphism to any object.
-/
abbrev initial [HasInitial C] : C :=
colimit (Functor.empty.{0} C)
#align category_theory.limits.initial CategoryTheory.Limits.initial
-/
-- mathport name: «expr⊤_ »
notation "⊤_ " C:20 => terminal C
-- mathport name: «expr⊥_ »
notation "⊥_ " C:20 => initial C
section
variable {C}
#print CategoryTheory.Limits.hasTerminal_of_unique /-
/-- We can more explicitly show that a category has a terminal object by specifying the object,
and showing there is a unique morphism to it from any other object. -/
theorem hasTerminal_of_unique (X : C) [h : ∀ Y : C, Unique (Y ⟶ X)] : HasTerminal C :=
{ HasLimit := fun F => HasLimit.mk ⟨_, (isTerminalEquivUnique F X).invFun h⟩ }
#align category_theory.limits.has_terminal_of_unique CategoryTheory.Limits.hasTerminal_of_unique
-/
#print CategoryTheory.Limits.IsTerminal.hasTerminal /-
theorem IsTerminal.hasTerminal {X : C} (h : IsTerminal X) : HasTerminal C :=
{ HasLimit := fun F => HasLimit.mk ⟨⟨X, by tidy⟩, isLimitChangeEmptyCone _ h _ (Iso.refl _)⟩ }
#align category_theory.limits.is_terminal.has_terminal CategoryTheory.Limits.IsTerminal.hasTerminal
-/
#print CategoryTheory.Limits.hasInitial_of_unique /-
/-- We can more explicitly show that a category has an initial object by specifying the object,
and showing there is a unique morphism from it to any other object. -/
theorem hasInitial_of_unique (X : C) [h : ∀ Y : C, Unique (X ⟶ Y)] : HasInitial C :=
{ HasColimit := fun F => HasColimit.mk ⟨_, (isInitialEquivUnique F X).invFun h⟩ }
#align category_theory.limits.has_initial_of_unique CategoryTheory.Limits.hasInitial_of_unique
-/
#print CategoryTheory.Limits.IsInitial.hasInitial /-
theorem IsInitial.hasInitial {X : C} (h : IsInitial X) : HasInitial C :=
{
HasColimit := fun F =>
HasColimit.mk ⟨⟨X, by tidy⟩, isColimitChangeEmptyCocone _ h _ (Iso.refl _)⟩ }
#align category_theory.limits.is_initial.has_initial CategoryTheory.Limits.IsInitial.hasInitial
-/
#print CategoryTheory.Limits.terminal.from /-
/-- The map from an object to the terminal object. -/
abbrev terminal.from [HasTerminal C] (P : C) : P ⟶ ⊤_ C :=
limit.lift (Functor.empty C) (asEmptyCone P)
#align category_theory.limits.terminal.from CategoryTheory.Limits.terminal.from
-/
#print CategoryTheory.Limits.initial.to /-
/-- The map to an object from the initial object. -/
abbrev initial.to [HasInitial C] (P : C) : ⊥_ C ⟶ P :=
colimit.desc (Functor.empty C) (asEmptyCocone P)
#align category_theory.limits.initial.to CategoryTheory.Limits.initial.to
-/
#print CategoryTheory.Limits.terminalIsTerminal /-
/-- A terminal object is terminal. -/
def terminalIsTerminal [HasTerminal C] : IsTerminal (⊤_ C) where lift s := terminal.from _
#align category_theory.limits.terminal_is_terminal CategoryTheory.Limits.terminalIsTerminal
-/
#print CategoryTheory.Limits.initialIsInitial /-
/-- An initial object is initial. -/
def initialIsInitial [HasInitial C] : IsInitial (⊥_ C) where desc s := initial.to _
#align category_theory.limits.initial_is_initial CategoryTheory.Limits.initialIsInitial
-/
#print CategoryTheory.Limits.uniqueToTerminal /-
instance uniqueToTerminal [HasTerminal C] (P : C) : Unique (P ⟶ ⊤_ C) :=
isTerminalEquivUnique _ (⊤_ C) terminalIsTerminal P
#align category_theory.limits.unique_to_terminal CategoryTheory.Limits.uniqueToTerminal
-/
#print CategoryTheory.Limits.uniqueFromInitial /-
instance uniqueFromInitial [HasInitial C] (P : C) : Unique (⊥_ C ⟶ P) :=
isInitialEquivUnique _ (⊥_ C) initialIsInitial P
#align category_theory.limits.unique_from_initial CategoryTheory.Limits.uniqueFromInitial
-/
#print CategoryTheory.Limits.terminal.comp_from /-
@[simp]
theorem terminal.comp_from [HasTerminal C] {P Q : C} (f : P ⟶ Q) :
f ≫ terminal.from Q = terminal.from P := by tidy
#align category_theory.limits.terminal.comp_from CategoryTheory.Limits.terminal.comp_from
-/
#print CategoryTheory.Limits.initial.to_comp /-
@[simp]
theorem initial.to_comp [HasInitial C] {P Q : C} (f : P ⟶ Q) : initial.to P ≫ f = initial.to Q := by
tidy
#align category_theory.limits.initial.to_comp CategoryTheory.Limits.initial.to_comp
-/
#print CategoryTheory.Limits.initialIsoIsInitial /-
/-- The (unique) isomorphism between the chosen initial object and any other initial object. -/
@[simp]
def initialIsoIsInitial [HasInitial C] {P : C} (t : IsInitial P) : ⊥_ C ≅ P :=
initialIsInitial.uniqueUpToIso t
#align category_theory.limits.initial_iso_is_initial CategoryTheory.Limits.initialIsoIsInitial
-/
#print CategoryTheory.Limits.terminalIsoIsTerminal /-
/-- The (unique) isomorphism between the chosen terminal object and any other terminal object. -/
@[simp]
def terminalIsoIsTerminal [HasTerminal C] {P : C} (t : IsTerminal P) : ⊤_ C ≅ P :=
terminalIsTerminal.uniqueUpToIso t
#align category_theory.limits.terminal_iso_is_terminal CategoryTheory.Limits.terminalIsoIsTerminal
-/
#print CategoryTheory.Limits.terminal.isSplitMono_from /-
/-- Any morphism from a terminal object is split mono. -/
instance terminal.isSplitMono_from {Y : C} [HasTerminal C] (f : ⊤_ C ⟶ Y) : IsSplitMono f :=
IsTerminal.isSplitMono_from terminalIsTerminal _
#align category_theory.limits.terminal.is_split_mono_from CategoryTheory.Limits.terminal.isSplitMono_from
-/
#print CategoryTheory.Limits.initial.isSplitEpi_to /-
/-- Any morphism to an initial object is split epi. -/
instance initial.isSplitEpi_to {Y : C} [HasInitial C] (f : Y ⟶ ⊥_ C) : IsSplitEpi f :=
IsInitial.isSplitEpi_to initialIsInitial _
#align category_theory.limits.initial.is_split_epi_to CategoryTheory.Limits.initial.isSplitEpi_to
-/
#print CategoryTheory.Limits.terminalOpOfInitial /-
/-- An initial object is terminal in the opposite category. -/
def terminalOpOfInitial {X : C} (t : IsInitial X) : IsTerminal (Opposite.op X)
where
lift s := (t.to s.pt.unop).op
uniq s m w := Quiver.Hom.unop_inj (t.hom_ext _ _)
#align category_theory.limits.terminal_op_of_initial CategoryTheory.Limits.terminalOpOfInitial
-/
#print CategoryTheory.Limits.terminalUnopOfInitial /-
/-- An initial object in the opposite category is terminal in the original category. -/
def terminalUnopOfInitial {X : Cᵒᵖ} (t : IsInitial X) : IsTerminal X.unop
where
lift s := (t.to (Opposite.op s.pt)).unop
uniq s m w := Quiver.Hom.op_inj (t.hom_ext _ _)
#align category_theory.limits.terminal_unop_of_initial CategoryTheory.Limits.terminalUnopOfInitial
-/
#print CategoryTheory.Limits.initialOpOfTerminal /-
/-- A terminal object is initial in the opposite category. -/
def initialOpOfTerminal {X : C} (t : IsTerminal X) : IsInitial (Opposite.op X)
where
desc s := (t.from s.pt.unop).op
uniq s m w := Quiver.Hom.unop_inj (t.hom_ext _ _)
#align category_theory.limits.initial_op_of_terminal CategoryTheory.Limits.initialOpOfTerminal
-/
#print CategoryTheory.Limits.initialUnopOfTerminal /-
/-- A terminal object in the opposite category is initial in the original category. -/
def initialUnopOfTerminal {X : Cᵒᵖ} (t : IsTerminal X) : IsInitial X.unop
where
desc s := (t.from (Opposite.op s.pt)).unop
uniq s m w := Quiver.Hom.op_inj (t.hom_ext _ _)
#align category_theory.limits.initial_unop_of_terminal CategoryTheory.Limits.initialUnopOfTerminal
-/
#print CategoryTheory.Limits.hasInitial_op_of_hasTerminal /-
instance hasInitial_op_of_hasTerminal [HasTerminal C] : HasInitial Cᵒᵖ :=
(initialOpOfTerminal terminalIsTerminal).HasInitial
#align category_theory.limits.has_initial_op_of_has_terminal CategoryTheory.Limits.hasInitial_op_of_hasTerminal
-/
#print CategoryTheory.Limits.hasTerminal_op_of_hasInitial /-
instance hasTerminal_op_of_hasInitial [HasInitial C] : HasTerminal Cᵒᵖ :=
(terminalOpOfInitial initialIsInitial).HasTerminal
#align category_theory.limits.has_terminal_op_of_has_initial CategoryTheory.Limits.hasTerminal_op_of_hasInitial
-/
#print CategoryTheory.Limits.hasTerminal_of_hasInitial_op /-
theorem hasTerminal_of_hasInitial_op [HasInitial Cᵒᵖ] : HasTerminal C :=
(terminalUnopOfInitial initialIsInitial).HasTerminal
#align category_theory.limits.has_terminal_of_has_initial_op CategoryTheory.Limits.hasTerminal_of_hasInitial_op
-/
#print CategoryTheory.Limits.hasInitial_of_hasTerminal_op /-
theorem hasInitial_of_hasTerminal_op [HasTerminal Cᵒᵖ] : HasInitial C :=
(initialUnopOfTerminal terminalIsTerminal).HasInitial
#align category_theory.limits.has_initial_of_has_terminal_op CategoryTheory.Limits.hasInitial_of_hasTerminal_op
-/
instance {J : Type _} [Category J] {C : Type _} [Category C] [HasTerminal C] :
HasLimit ((CategoryTheory.Functor.const J).obj (⊤_ C)) :=
HasLimit.mk
{ Cone :=
{ pt := ⊤_ C
π := { app := fun _ => terminal.from _ } }
IsLimit := { lift := fun s => terminal.from _ } }
/- warning: category_theory.limits.limit_const_terminal -> CategoryTheory.Limits.limitConstTerminal is a dubious translation:
lean 3 declaration is
forall {J : Type.{u1}} [_inst_2 : CategoryTheory.Category.{u2, u1} J] {C : Type.{u3}} [_inst_3 : CategoryTheory.Category.{u4, u3} C] [_inst_4 : CategoryTheory.Limits.HasTerminal.{u4, u3} C _inst_3], CategoryTheory.Iso.{u4, u3} C _inst_3 (CategoryTheory.Limits.limit.{u2, u1, u4, u3} J _inst_2 C _inst_3 (CategoryTheory.Functor.obj.{u4, max u1 u4, u3, max u2 u4 u1 u3} C _inst_3 (CategoryTheory.Functor.{u2, u4, u1, u3} J _inst_2 C _inst_3) (CategoryTheory.Functor.category.{u2, u4, u1, u3} J _inst_2 C _inst_3) (CategoryTheory.Functor.const.{u2, u4, u1, u3} J _inst_2 C _inst_3) (CategoryTheory.Limits.terminal.{u4, u3} C _inst_3 _inst_4)) (CategoryTheory.Limits.Obj.hasLimit.{u1, u2, u3, u4} J _inst_2 C _inst_3 _inst_4)) (CategoryTheory.Limits.terminal.{u4, u3} C _inst_3 _inst_4)
but is expected to have type
forall {J : Type.{u1}} [_inst_2 : CategoryTheory.Category.{u2, u1} J] {C : Type.{u3}} [_inst_3 : CategoryTheory.Category.{u4, u3} C] [_inst_4 : CategoryTheory.Limits.HasTerminal.{u4, u3} C _inst_3], CategoryTheory.Iso.{u4, u3} C _inst_3 (CategoryTheory.Limits.limit.{u2, u1, u4, u3} J _inst_2 C _inst_3 (Prefunctor.obj.{succ u4, max (succ u1) (succ u4), u3, max (max (max u1 u2) u4) u3} C (CategoryTheory.CategoryStruct.toQuiver.{u4, u3} C (CategoryTheory.Category.toCategoryStruct.{u4, u3} C _inst_3)) (CategoryTheory.Functor.{u2, u4, u1, u3} J _inst_2 C _inst_3) (CategoryTheory.CategoryStruct.toQuiver.{max u1 u4, max (max (max u1 u2) u3) u4} (CategoryTheory.Functor.{u2, u4, u1, u3} J _inst_2 C _inst_3) (CategoryTheory.Category.toCategoryStruct.{max u1 u4, max (max (max u1 u2) u3) u4} (CategoryTheory.Functor.{u2, u4, u1, u3} J _inst_2 C _inst_3) (CategoryTheory.Functor.category.{u2, u4, u1, u3} J _inst_2 C _inst_3))) (CategoryTheory.Functor.toPrefunctor.{u4, max u1 u4, u3, max (max (max u1 u2) u3) u4} C _inst_3 (CategoryTheory.Functor.{u2, u4, u1, u3} J _inst_2 C _inst_3) (CategoryTheory.Functor.category.{u2, u4, u1, u3} J _inst_2 C _inst_3) (CategoryTheory.Functor.const.{u2, u4, u1, u3} J _inst_2 C _inst_3)) (CategoryTheory.Limits.terminal.{u4, u3} C _inst_3 _inst_4)) (CategoryTheory.Limits.instHasLimitObjToQuiverToCategoryStructFunctorToQuiverToCategoryStructCategoryToPrefunctorConstTerminal.{u1, u2, u3, u4} J _inst_2 C _inst_3 _inst_4)) (CategoryTheory.Limits.terminal.{u4, u3} C _inst_3 _inst_4)
Case conversion may be inaccurate. Consider using '#align category_theory.limits.limit_const_terminal CategoryTheory.Limits.limitConstTerminalₓ'. -/
/-- The limit of the constant `⊤_ C` functor is `⊤_ C`. -/
@[simps Hom]
def limitConstTerminal {J : Type _} [Category J] {C : Type _} [Category C] [HasTerminal C] :
limit ((CategoryTheory.Functor.const J).obj (⊤_ C)) ≅ ⊤_ C
where
Hom := terminal.from _
inv :=
limit.lift ((CategoryTheory.Functor.const J).obj (⊤_ C))
{ pt := ⊤_ C
π := { app := fun j => terminal.from _ } }
#align category_theory.limits.limit_const_terminal CategoryTheory.Limits.limitConstTerminal
/- warning: category_theory.limits.limit_const_terminal_inv_π -> CategoryTheory.Limits.limitConstTerminal_inv_π is a dubious translation:
lean 3 declaration is
forall {J : Type.{u1}} [_inst_2 : CategoryTheory.Category.{u2, u1} J] {C : Type.{u3}} [_inst_3 : CategoryTheory.Category.{u4, u3} C] [_inst_4 : CategoryTheory.Limits.HasTerminal.{u4, u3} C _inst_3] {j : J}, Eq.{succ u4} (Quiver.Hom.{succ u4, u3} C (CategoryTheory.CategoryStruct.toQuiver.{u4, u3} C (CategoryTheory.Category.toCategoryStruct.{u4, u3} C _inst_3)) (CategoryTheory.Limits.terminal.{u4, u3} C _inst_3 _inst_4) (CategoryTheory.Functor.obj.{u2, u4, u1, u3} J _inst_2 C _inst_3 (CategoryTheory.Functor.obj.{u4, max u1 u4, u3, max u2 u4 u1 u3} C _inst_3 (CategoryTheory.Functor.{u2, u4, u1, u3} J _inst_2 C _inst_3) (CategoryTheory.Functor.category.{u2, u4, u1, u3} J _inst_2 C _inst_3) (CategoryTheory.Functor.const.{u2, u4, u1, u3} J _inst_2 C _inst_3) (CategoryTheory.Limits.terminal.{u4, u3} C _inst_3 _inst_4)) j)) (CategoryTheory.CategoryStruct.comp.{u4, u3} C (CategoryTheory.Category.toCategoryStruct.{u4, u3} C _inst_3) (CategoryTheory.Limits.terminal.{u4, u3} C _inst_3 _inst_4) (CategoryTheory.Limits.limit.{u2, u1, u4, u3} J _inst_2 C _inst_3 (CategoryTheory.Functor.obj.{u4, max u1 u4, u3, max u2 u4 u1 u3} C _inst_3 (CategoryTheory.Functor.{u2, u4, u1, u3} J _inst_2 C _inst_3) (CategoryTheory.Functor.category.{u2, u4, u1, u3} J _inst_2 C _inst_3) (CategoryTheory.Functor.const.{u2, u4, u1, u3} J _inst_2 C _inst_3) (CategoryTheory.Limits.terminal.{u4, u3} C _inst_3 _inst_4)) (CategoryTheory.Limits.Obj.hasLimit.{u1, u2, u3, u4} J _inst_2 C _inst_3 _inst_4)) (CategoryTheory.Functor.obj.{u2, u4, u1, u3} J _inst_2 C _inst_3 (CategoryTheory.Functor.obj.{u4, max u1 u4, u3, max u2 u4 u1 u3} C _inst_3 (CategoryTheory.Functor.{u2, u4, u1, u3} J _inst_2 C _inst_3) (CategoryTheory.Functor.category.{u2, u4, u1, u3} J _inst_2 C _inst_3) (CategoryTheory.Functor.const.{u2, u4, u1, u3} J _inst_2 C _inst_3) (CategoryTheory.Limits.terminal.{u4, u3} C _inst_3 _inst_4)) j) (CategoryTheory.Iso.inv.{u4, u3} C _inst_3 (CategoryTheory.Limits.limit.{u2, u1, u4, u3} J _inst_2 C _inst_3 (CategoryTheory.Functor.obj.{u4, max u1 u4, u3, max u2 u4 u1 u3} C _inst_3 (CategoryTheory.Functor.{u2, u4, u1, u3} J _inst_2 C _inst_3) (CategoryTheory.Functor.category.{u2, u4, u1, u3} J _inst_2 C _inst_3) (CategoryTheory.Functor.const.{u2, u4, u1, u3} J _inst_2 C _inst_3) (CategoryTheory.Limits.terminal.{u4, u3} C _inst_3 _inst_4)) (CategoryTheory.Limits.Obj.hasLimit.{u1, u2, u3, u4} J _inst_2 C _inst_3 _inst_4)) (CategoryTheory.Limits.terminal.{u4, u3} C _inst_3 _inst_4) (CategoryTheory.Limits.limitConstTerminal.{u1, u2, u3, u4} J _inst_2 C _inst_3 _inst_4)) (CategoryTheory.Limits.limit.π.{u2, u1, u4, u3} J _inst_2 C _inst_3 (CategoryTheory.Functor.obj.{u4, max u1 u4, u3, max u2 u4 u1 u3} C _inst_3 (CategoryTheory.Functor.{u2, u4, u1, u3} J _inst_2 C _inst_3) (CategoryTheory.Functor.category.{u2, u4, u1, u3} J _inst_2 C _inst_3) (CategoryTheory.Functor.const.{u2, u4, u1, u3} J _inst_2 C _inst_3) (CategoryTheory.Limits.terminal.{u4, u3} C _inst_3 _inst_4)) (CategoryTheory.Limits.Obj.hasLimit.{u1, u2, u3, u4} J _inst_2 C _inst_3 _inst_4) j)) (CategoryTheory.Limits.terminal.from.{u4, u3} C _inst_3 _inst_4 (CategoryTheory.Limits.terminal.{u4, u3} C _inst_3 _inst_4))
but is expected to have type
forall {J : Type.{u4}} [_inst_2 : CategoryTheory.Category.{u3, u4} J] {C : Type.{u2}} [_inst_3 : CategoryTheory.Category.{u1, u2} C] [_inst_4 : CategoryTheory.Limits.HasTerminal.{u1, u2} C _inst_3] {j : J}, Eq.{succ u1} (Quiver.Hom.{succ u1, u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_3)) (CategoryTheory.Limits.terminal.{u1, u2} C _inst_3 _inst_4) (Prefunctor.obj.{succ u3, succ u1, u4, u2} J (CategoryTheory.CategoryStruct.toQuiver.{u3, u4} J (CategoryTheory.Category.toCategoryStruct.{u3, u4} J _inst_2)) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_3)) (CategoryTheory.Functor.toPrefunctor.{u3, u1, u4, u2} J _inst_2 C _inst_3 (Prefunctor.obj.{succ u1, max (succ u4) (succ u1), u2, max (max (max u4 u3) u1) u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_3)) (CategoryTheory.Functor.{u3, u1, u4, u2} J _inst_2 C _inst_3) (CategoryTheory.CategoryStruct.toQuiver.{max u4 u1, max (max (max u4 u3) u2) u1} (CategoryTheory.Functor.{u3, u1, u4, u2} J _inst_2 C _inst_3) (CategoryTheory.Category.toCategoryStruct.{max u4 u1, max (max (max u4 u3) u2) u1} (CategoryTheory.Functor.{u3, u1, u4, u2} J _inst_2 C _inst_3) (CategoryTheory.Functor.category.{u3, u1, u4, u2} J _inst_2 C _inst_3))) (CategoryTheory.Functor.toPrefunctor.{u1, max u4 u1, u2, max (max (max u4 u3) u2) u1} C _inst_3 (CategoryTheory.Functor.{u3, u1, u4, u2} J _inst_2 C _inst_3) (CategoryTheory.Functor.category.{u3, u1, u4, u2} J _inst_2 C _inst_3) (CategoryTheory.Functor.const.{u3, u1, u4, u2} J _inst_2 C _inst_3)) (CategoryTheory.Limits.terminal.{u1, u2} C _inst_3 _inst_4))) j)) (CategoryTheory.CategoryStruct.comp.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_3) (CategoryTheory.Limits.terminal.{u1, u2} C _inst_3 _inst_4) (CategoryTheory.Limits.limit.{u3, u4, u1, u2} J _inst_2 C _inst_3 (Prefunctor.obj.{succ u1, max (succ u4) (succ u1), u2, max (max (max u4 u3) u1) u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_3)) (CategoryTheory.Functor.{u3, u1, u4, u2} J _inst_2 C _inst_3) (CategoryTheory.CategoryStruct.toQuiver.{max u4 u1, max (max (max u4 u3) u2) u1} (CategoryTheory.Functor.{u3, u1, u4, u2} J _inst_2 C _inst_3) (CategoryTheory.Category.toCategoryStruct.{max u4 u1, max (max (max u4 u3) u2) u1} (CategoryTheory.Functor.{u3, u1, u4, u2} J _inst_2 C _inst_3) (CategoryTheory.Functor.category.{u3, u1, u4, u2} J _inst_2 C _inst_3))) (CategoryTheory.Functor.toPrefunctor.{u1, max u4 u1, u2, max (max (max u4 u3) u2) u1} C _inst_3 (CategoryTheory.Functor.{u3, u1, u4, u2} J _inst_2 C _inst_3) (CategoryTheory.Functor.category.{u3, u1, u4, u2} J _inst_2 C _inst_3) (CategoryTheory.Functor.const.{u3, u1, u4, u2} J _inst_2 C _inst_3)) (CategoryTheory.Limits.terminal.{u1, u2} C _inst_3 _inst_4)) (CategoryTheory.Limits.instHasLimitObjToQuiverToCategoryStructFunctorToQuiverToCategoryStructCategoryToPrefunctorConstTerminal.{u4, u3, u2, u1} J _inst_2 C _inst_3 _inst_4)) (Prefunctor.obj.{succ u3, succ u1, u4, u2} J (CategoryTheory.CategoryStruct.toQuiver.{u3, u4} J (CategoryTheory.Category.toCategoryStruct.{u3, u4} J _inst_2)) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_3)) (CategoryTheory.Functor.toPrefunctor.{u3, u1, u4, u2} J _inst_2 C _inst_3 (Prefunctor.obj.{succ u1, max (succ u4) (succ u1), u2, max (max (max u4 u3) u1) u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_3)) (CategoryTheory.Functor.{u3, u1, u4, u2} J _inst_2 C _inst_3) (CategoryTheory.CategoryStruct.toQuiver.{max u4 u1, max (max (max u4 u3) u2) u1} (CategoryTheory.Functor.{u3, u1, u4, u2} J _inst_2 C _inst_3) (CategoryTheory.Category.toCategoryStruct.{max u4 u1, max (max (max u4 u3) u2) u1} (CategoryTheory.Functor.{u3, u1, u4, u2} J _inst_2 C _inst_3) (CategoryTheory.Functor.category.{u3, u1, u4, u2} J _inst_2 C _inst_3))) (CategoryTheory.Functor.toPrefunctor.{u1, max u4 u1, u2, max (max (max u4 u3) u2) u1} C _inst_3 (CategoryTheory.Functor.{u3, u1, u4, u2} J _inst_2 C _inst_3) (CategoryTheory.Functor.category.{u3, u1, u4, u2} J _inst_2 C _inst_3) (CategoryTheory.Functor.const.{u3, u1, u4, u2} J _inst_2 C _inst_3)) (CategoryTheory.Limits.terminal.{u1, u2} C _inst_3 _inst_4))) j) (CategoryTheory.Iso.inv.{u1, u2} C _inst_3 (CategoryTheory.Limits.limit.{u3, u4, u1, u2} J _inst_2 C _inst_3 (Prefunctor.obj.{succ u1, max (succ u4) (succ u1), u2, max (max (max u4 u3) u1) u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_3)) (CategoryTheory.Functor.{u3, u1, u4, u2} J _inst_2 C _inst_3) (CategoryTheory.CategoryStruct.toQuiver.{max u4 u1, max (max (max u4 u3) u2) u1} (CategoryTheory.Functor.{u3, u1, u4, u2} J _inst_2 C _inst_3) (CategoryTheory.Category.toCategoryStruct.{max u4 u1, max (max (max u4 u3) u2) u1} (CategoryTheory.Functor.{u3, u1, u4, u2} J _inst_2 C _inst_3) (CategoryTheory.Functor.category.{u3, u1, u4, u2} J _inst_2 C _inst_3))) (CategoryTheory.Functor.toPrefunctor.{u1, max u4 u1, u2, max (max (max u4 u3) u2) u1} C _inst_3 (CategoryTheory.Functor.{u3, u1, u4, u2} J _inst_2 C _inst_3) (CategoryTheory.Functor.category.{u3, u1, u4, u2} J _inst_2 C _inst_3) (CategoryTheory.Functor.const.{u3, u1, u4, u2} J _inst_2 C _inst_3)) (CategoryTheory.Limits.terminal.{u1, u2} C _inst_3 _inst_4)) (CategoryTheory.Limits.instHasLimitObjToQuiverToCategoryStructFunctorToQuiverToCategoryStructCategoryToPrefunctorConstTerminal.{u4, u3, u2, u1} J _inst_2 C _inst_3 _inst_4)) (CategoryTheory.Limits.terminal.{u1, u2} C _inst_3 _inst_4) (CategoryTheory.Limits.limitConstTerminal.{u4, u3, u2, u1} J _inst_2 C _inst_3 _inst_4)) (CategoryTheory.Limits.limit.π.{u3, u4, u1, u2} J _inst_2 C _inst_3 (Prefunctor.obj.{succ u1, max (succ u4) (succ u1), u2, max (max (max u4 u3) u1) u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_3)) (CategoryTheory.Functor.{u3, u1, u4, u2} J _inst_2 C _inst_3) (CategoryTheory.CategoryStruct.toQuiver.{max u4 u1, max (max (max u4 u3) u2) u1} (CategoryTheory.Functor.{u3, u1, u4, u2} J _inst_2 C _inst_3) (CategoryTheory.Category.toCategoryStruct.{max u4 u1, max (max (max u4 u3) u2) u1} (CategoryTheory.Functor.{u3, u1, u4, u2} J _inst_2 C _inst_3) (CategoryTheory.Functor.category.{u3, u1, u4, u2} J _inst_2 C _inst_3))) (CategoryTheory.Functor.toPrefunctor.{u1, max u4 u1, u2, max (max (max u4 u3) u2) u1} C _inst_3 (CategoryTheory.Functor.{u3, u1, u4, u2} J _inst_2 C _inst_3) (CategoryTheory.Functor.category.{u3, u1, u4, u2} J _inst_2 C _inst_3) (CategoryTheory.Functor.const.{u3, u1, u4, u2} J _inst_2 C _inst_3)) (CategoryTheory.Limits.terminal.{u1, u2} C _inst_3 _inst_4)) (CategoryTheory.Limits.instHasLimitObjToQuiverToCategoryStructFunctorToQuiverToCategoryStructCategoryToPrefunctorConstTerminal.{u4, u3, u2, u1} J _inst_2 C _inst_3 _inst_4) j)) (CategoryTheory.Limits.terminal.from.{u1, u2} C _inst_3 _inst_4 (CategoryTheory.Limits.terminal.{u1, u2} C _inst_3 _inst_4))
Case conversion may be inaccurate. Consider using '#align category_theory.limits.limit_const_terminal_inv_π CategoryTheory.Limits.limitConstTerminal_inv_πₓ'. -/
@[simp, reassoc.1]
theorem limitConstTerminal_inv_π {J : Type _} [Category J] {C : Type _} [Category C] [HasTerminal C]
{j : J} :
limitConstTerminal.inv ≫ limit.π ((CategoryTheory.Functor.const J).obj (⊤_ C)) j =
terminal.from _ :=
by ext ⟨⟨⟩⟩
#align category_theory.limits.limit_const_terminal_inv_π CategoryTheory.Limits.limitConstTerminal_inv_π
instance {J : Type _} [Category J] {C : Type _} [Category C] [HasInitial C] :
HasColimit ((CategoryTheory.Functor.const J).obj (⊥_ C)) :=
HasColimit.mk
{ Cocone :=
{ pt := ⊥_ C
ι := { app := fun _ => initial.to _ } }
IsColimit := { desc := fun s => initial.to _ } }
/- warning: category_theory.limits.colimit_const_initial -> CategoryTheory.Limits.colimitConstInitial is a dubious translation:
lean 3 declaration is
forall {J : Type.{u1}} [_inst_2 : CategoryTheory.Category.{u2, u1} J] {C : Type.{u3}} [_inst_3 : CategoryTheory.Category.{u4, u3} C] [_inst_4 : CategoryTheory.Limits.HasInitial.{u4, u3} C _inst_3], CategoryTheory.Iso.{u4, u3} C _inst_3 (CategoryTheory.Limits.colimit.{u2, u1, u4, u3} J _inst_2 C _inst_3 (CategoryTheory.Functor.obj.{u4, max u1 u4, u3, max u2 u4 u1 u3} C _inst_3 (CategoryTheory.Functor.{u2, u4, u1, u3} J _inst_2 C _inst_3) (CategoryTheory.Functor.category.{u2, u4, u1, u3} J _inst_2 C _inst_3) (CategoryTheory.Functor.const.{u2, u4, u1, u3} J _inst_2 C _inst_3) (CategoryTheory.Limits.initial.{u4, u3} C _inst_3 _inst_4)) (CategoryTheory.Limits.Obj.hasColimit.{u1, u2, u3, u4} J _inst_2 C _inst_3 _inst_4)) (CategoryTheory.Limits.initial.{u4, u3} C _inst_3 _inst_4)
but is expected to have type
forall {J : Type.{u1}} [_inst_2 : CategoryTheory.Category.{u2, u1} J] {C : Type.{u3}} [_inst_3 : CategoryTheory.Category.{u4, u3} C] [_inst_4 : CategoryTheory.Limits.HasInitial.{u4, u3} C _inst_3], CategoryTheory.Iso.{u4, u3} C _inst_3 (CategoryTheory.Limits.colimit.{u2, u1, u4, u3} J _inst_2 C _inst_3 (Prefunctor.obj.{succ u4, max (succ u1) (succ u4), u3, max (max (max u1 u2) u4) u3} C (CategoryTheory.CategoryStruct.toQuiver.{u4, u3} C (CategoryTheory.Category.toCategoryStruct.{u4, u3} C _inst_3)) (CategoryTheory.Functor.{u2, u4, u1, u3} J _inst_2 C _inst_3) (CategoryTheory.CategoryStruct.toQuiver.{max u1 u4, max (max (max u1 u2) u3) u4} (CategoryTheory.Functor.{u2, u4, u1, u3} J _inst_2 C _inst_3) (CategoryTheory.Category.toCategoryStruct.{max u1 u4, max (max (max u1 u2) u3) u4} (CategoryTheory.Functor.{u2, u4, u1, u3} J _inst_2 C _inst_3) (CategoryTheory.Functor.category.{u2, u4, u1, u3} J _inst_2 C _inst_3))) (CategoryTheory.Functor.toPrefunctor.{u4, max u1 u4, u3, max (max (max u1 u2) u3) u4} C _inst_3 (CategoryTheory.Functor.{u2, u4, u1, u3} J _inst_2 C _inst_3) (CategoryTheory.Functor.category.{u2, u4, u1, u3} J _inst_2 C _inst_3) (CategoryTheory.Functor.const.{u2, u4, u1, u3} J _inst_2 C _inst_3)) (CategoryTheory.Limits.initial.{u4, u3} C _inst_3 _inst_4)) (CategoryTheory.Limits.instHasColimitObjToQuiverToCategoryStructFunctorToQuiverToCategoryStructCategoryToPrefunctorConstInitial.{u1, u2, u3, u4} J _inst_2 C _inst_3 _inst_4)) (CategoryTheory.Limits.initial.{u4, u3} C _inst_3 _inst_4)
Case conversion may be inaccurate. Consider using '#align category_theory.limits.colimit_const_initial CategoryTheory.Limits.colimitConstInitialₓ'. -/
/-- The colimit of the constant `⊥_ C` functor is `⊥_ C`. -/
@[simps inv]
def colimitConstInitial {J : Type _} [Category J] {C : Type _} [Category C] [HasInitial C] :
colimit ((CategoryTheory.Functor.const J).obj (⊥_ C)) ≅ ⊥_ C
where
Hom :=
colimit.desc ((CategoryTheory.Functor.const J).obj (⊥_ C))
{ pt := ⊥_ C
ι := { app := fun j => initial.to _ } }
inv := initial.to _
#align category_theory.limits.colimit_const_initial CategoryTheory.Limits.colimitConstInitial
/- warning: category_theory.limits.ι_colimit_const_initial_hom -> CategoryTheory.Limits.ι_colimitConstInitial_hom is a dubious translation:
lean 3 declaration is
forall {J : Type.{u1}} [_inst_2 : CategoryTheory.Category.{u2, u1} J] {C : Type.{u3}} [_inst_3 : CategoryTheory.Category.{u4, u3} C] [_inst_4 : CategoryTheory.Limits.HasInitial.{u4, u3} C _inst_3] {j : J}, Eq.{succ u4} (Quiver.Hom.{succ u4, u3} C (CategoryTheory.CategoryStruct.toQuiver.{u4, u3} C (CategoryTheory.Category.toCategoryStruct.{u4, u3} C _inst_3)) (CategoryTheory.Functor.obj.{u2, u4, u1, u3} J _inst_2 C _inst_3 (CategoryTheory.Functor.obj.{u4, max u1 u4, u3, max u2 u4 u1 u3} C _inst_3 (CategoryTheory.Functor.{u2, u4, u1, u3} J _inst_2 C _inst_3) (CategoryTheory.Functor.category.{u2, u4, u1, u3} J _inst_2 C _inst_3) (CategoryTheory.Functor.const.{u2, u4, u1, u3} J _inst_2 C _inst_3) (CategoryTheory.Limits.initial.{u4, u3} C _inst_3 _inst_4)) j) (CategoryTheory.Limits.initial.{u4, u3} C _inst_3 _inst_4)) (CategoryTheory.CategoryStruct.comp.{u4, u3} C (CategoryTheory.Category.toCategoryStruct.{u4, u3} C _inst_3) (CategoryTheory.Functor.obj.{u2, u4, u1, u3} J _inst_2 C _inst_3 (CategoryTheory.Functor.obj.{u4, max u1 u4, u3, max u2 u4 u1 u3} C _inst_3 (CategoryTheory.Functor.{u2, u4, u1, u3} J _inst_2 C _inst_3) (CategoryTheory.Functor.category.{u2, u4, u1, u3} J _inst_2 C _inst_3) (CategoryTheory.Functor.const.{u2, u4, u1, u3} J _inst_2 C _inst_3) (CategoryTheory.Limits.initial.{u4, u3} C _inst_3 _inst_4)) j) (CategoryTheory.Limits.colimit.{u2, u1, u4, u3} J _inst_2 C _inst_3 (CategoryTheory.Functor.obj.{u4, max u1 u4, u3, max u2 u4 u1 u3} C _inst_3 (CategoryTheory.Functor.{u2, u4, u1, u3} J _inst_2 C _inst_3) (CategoryTheory.Functor.category.{u2, u4, u1, u3} J _inst_2 C _inst_3) (CategoryTheory.Functor.const.{u2, u4, u1, u3} J _inst_2 C _inst_3) (CategoryTheory.Limits.initial.{u4, u3} C _inst_3 _inst_4)) (CategoryTheory.Limits.Obj.hasColimit.{u1, u2, u3, u4} J _inst_2 C _inst_3 _inst_4)) (CategoryTheory.Limits.initial.{u4, u3} C _inst_3 _inst_4) (CategoryTheory.Limits.colimit.ι.{u2, u1, u4, u3} J _inst_2 C _inst_3 (CategoryTheory.Functor.obj.{u4, max u1 u4, u3, max u2 u4 u1 u3} C _inst_3 (CategoryTheory.Functor.{u2, u4, u1, u3} J _inst_2 C _inst_3) (CategoryTheory.Functor.category.{u2, u4, u1, u3} J _inst_2 C _inst_3) (CategoryTheory.Functor.const.{u2, u4, u1, u3} J _inst_2 C _inst_3) (CategoryTheory.Limits.initial.{u4, u3} C _inst_3 _inst_4)) (CategoryTheory.Limits.Obj.hasColimit.{u1, u2, u3, u4} J _inst_2 C _inst_3 _inst_4) j) (CategoryTheory.Iso.hom.{u4, u3} C _inst_3 (CategoryTheory.Limits.colimit.{u2, u1, u4, u3} J _inst_2 C _inst_3 (CategoryTheory.Functor.obj.{u4, max u1 u4, u3, max u2 u4 u1 u3} C _inst_3 (CategoryTheory.Functor.{u2, u4, u1, u3} J _inst_2 C _inst_3) (CategoryTheory.Functor.category.{u2, u4, u1, u3} J _inst_2 C _inst_3) (CategoryTheory.Functor.const.{u2, u4, u1, u3} J _inst_2 C _inst_3) (CategoryTheory.Limits.initial.{u4, u3} C _inst_3 _inst_4)) (CategoryTheory.Limits.Obj.hasColimit.{u1, u2, u3, u4} J _inst_2 C _inst_3 _inst_4)) (CategoryTheory.Limits.initial.{u4, u3} C _inst_3 _inst_4) (CategoryTheory.Limits.colimitConstInitial.{u1, u2, u3, u4} J _inst_2 C _inst_3 _inst_4))) (CategoryTheory.Limits.initial.to.{u4, u3} C _inst_3 _inst_4 (CategoryTheory.Limits.initial.{u4, u3} C _inst_3 _inst_4))
but is expected to have type
forall {J : Type.{u4}} [_inst_2 : CategoryTheory.Category.{u3, u4} J] {C : Type.{u2}} [_inst_3 : CategoryTheory.Category.{u1, u2} C] [_inst_4 : CategoryTheory.Limits.HasInitial.{u1, u2} C _inst_3] {j : J}, Eq.{succ u1} (Quiver.Hom.{succ u1, u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_3)) (Prefunctor.obj.{succ u3, succ u1, u4, u2} J (CategoryTheory.CategoryStruct.toQuiver.{u3, u4} J (CategoryTheory.Category.toCategoryStruct.{u3, u4} J _inst_2)) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_3)) (CategoryTheory.Functor.toPrefunctor.{u3, u1, u4, u2} J _inst_2 C _inst_3 (Prefunctor.obj.{succ u1, max (succ u4) (succ u1), u2, max (max (max u4 u3) u1) u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_3)) (CategoryTheory.Functor.{u3, u1, u4, u2} J _inst_2 C _inst_3) (CategoryTheory.CategoryStruct.toQuiver.{max u4 u1, max (max (max u4 u3) u2) u1} (CategoryTheory.Functor.{u3, u1, u4, u2} J _inst_2 C _inst_3) (CategoryTheory.Category.toCategoryStruct.{max u4 u1, max (max (max u4 u3) u2) u1} (CategoryTheory.Functor.{u3, u1, u4, u2} J _inst_2 C _inst_3) (CategoryTheory.Functor.category.{u3, u1, u4, u2} J _inst_2 C _inst_3))) (CategoryTheory.Functor.toPrefunctor.{u1, max u4 u1, u2, max (max (max u4 u3) u2) u1} C _inst_3 (CategoryTheory.Functor.{u3, u1, u4, u2} J _inst_2 C _inst_3) (CategoryTheory.Functor.category.{u3, u1, u4, u2} J _inst_2 C _inst_3) (CategoryTheory.Functor.const.{u3, u1, u4, u2} J _inst_2 C _inst_3)) (CategoryTheory.Limits.initial.{u1, u2} C _inst_3 _inst_4))) j) (CategoryTheory.Limits.initial.{u1, u2} C _inst_3 _inst_4)) (CategoryTheory.CategoryStruct.comp.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_3) (Prefunctor.obj.{succ u3, succ u1, u4, u2} J (CategoryTheory.CategoryStruct.toQuiver.{u3, u4} J (CategoryTheory.Category.toCategoryStruct.{u3, u4} J _inst_2)) C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_3)) (CategoryTheory.Functor.toPrefunctor.{u3, u1, u4, u2} J _inst_2 C _inst_3 (Prefunctor.obj.{succ u1, max (succ u4) (succ u1), u2, max (max (max u4 u3) u1) u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_3)) (CategoryTheory.Functor.{u3, u1, u4, u2} J _inst_2 C _inst_3) (CategoryTheory.CategoryStruct.toQuiver.{max u4 u1, max (max (max u4 u3) u2) u1} (CategoryTheory.Functor.{u3, u1, u4, u2} J _inst_2 C _inst_3) (CategoryTheory.Category.toCategoryStruct.{max u4 u1, max (max (max u4 u3) u2) u1} (CategoryTheory.Functor.{u3, u1, u4, u2} J _inst_2 C _inst_3) (CategoryTheory.Functor.category.{u3, u1, u4, u2} J _inst_2 C _inst_3))) (CategoryTheory.Functor.toPrefunctor.{u1, max u4 u1, u2, max (max (max u4 u3) u2) u1} C _inst_3 (CategoryTheory.Functor.{u3, u1, u4, u2} J _inst_2 C _inst_3) (CategoryTheory.Functor.category.{u3, u1, u4, u2} J _inst_2 C _inst_3) (CategoryTheory.Functor.const.{u3, u1, u4, u2} J _inst_2 C _inst_3)) (CategoryTheory.Limits.initial.{u1, u2} C _inst_3 _inst_4))) j) (CategoryTheory.Limits.colimit.{u3, u4, u1, u2} J _inst_2 C _inst_3 (Prefunctor.obj.{succ u1, max (succ u4) (succ u1), u2, max (max (max u4 u3) u1) u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_3)) (CategoryTheory.Functor.{u3, u1, u4, u2} J _inst_2 C _inst_3) (CategoryTheory.CategoryStruct.toQuiver.{max u4 u1, max (max (max u4 u3) u2) u1} (CategoryTheory.Functor.{u3, u1, u4, u2} J _inst_2 C _inst_3) (CategoryTheory.Category.toCategoryStruct.{max u4 u1, max (max (max u4 u3) u2) u1} (CategoryTheory.Functor.{u3, u1, u4, u2} J _inst_2 C _inst_3) (CategoryTheory.Functor.category.{u3, u1, u4, u2} J _inst_2 C _inst_3))) (CategoryTheory.Functor.toPrefunctor.{u1, max u4 u1, u2, max (max (max u4 u3) u2) u1} C _inst_3 (CategoryTheory.Functor.{u3, u1, u4, u2} J _inst_2 C _inst_3) (CategoryTheory.Functor.category.{u3, u1, u4, u2} J _inst_2 C _inst_3) (CategoryTheory.Functor.const.{u3, u1, u4, u2} J _inst_2 C _inst_3)) (CategoryTheory.Limits.initial.{u1, u2} C _inst_3 _inst_4)) (CategoryTheory.Limits.instHasColimitObjToQuiverToCategoryStructFunctorToQuiverToCategoryStructCategoryToPrefunctorConstInitial.{u4, u3, u2, u1} J _inst_2 C _inst_3 _inst_4)) (CategoryTheory.Limits.initial.{u1, u2} C _inst_3 _inst_4) (CategoryTheory.Limits.colimit.ι.{u3, u4, u1, u2} J _inst_2 C _inst_3 (Prefunctor.obj.{succ u1, max (succ u4) (succ u1), u2, max (max (max u4 u3) u1) u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_3)) (CategoryTheory.Functor.{u3, u1, u4, u2} J _inst_2 C _inst_3) (CategoryTheory.CategoryStruct.toQuiver.{max u4 u1, max (max (max u4 u3) u2) u1} (CategoryTheory.Functor.{u3, u1, u4, u2} J _inst_2 C _inst_3) (CategoryTheory.Category.toCategoryStruct.{max u4 u1, max (max (max u4 u3) u2) u1} (CategoryTheory.Functor.{u3, u1, u4, u2} J _inst_2 C _inst_3) (CategoryTheory.Functor.category.{u3, u1, u4, u2} J _inst_2 C _inst_3))) (CategoryTheory.Functor.toPrefunctor.{u1, max u4 u1, u2, max (max (max u4 u3) u2) u1} C _inst_3 (CategoryTheory.Functor.{u3, u1, u4, u2} J _inst_2 C _inst_3) (CategoryTheory.Functor.category.{u3, u1, u4, u2} J _inst_2 C _inst_3) (CategoryTheory.Functor.const.{u3, u1, u4, u2} J _inst_2 C _inst_3)) (CategoryTheory.Limits.initial.{u1, u2} C _inst_3 _inst_4)) (CategoryTheory.Limits.instHasColimitObjToQuiverToCategoryStructFunctorToQuiverToCategoryStructCategoryToPrefunctorConstInitial.{u4, u3, u2, u1} J _inst_2 C _inst_3 _inst_4) j) (CategoryTheory.Iso.hom.{u1, u2} C _inst_3 (CategoryTheory.Limits.colimit.{u3, u4, u1, u2} J _inst_2 C _inst_3 (Prefunctor.obj.{succ u1, max (succ u4) (succ u1), u2, max (max (max u4 u3) u1) u2} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u2} C (CategoryTheory.Category.toCategoryStruct.{u1, u2} C _inst_3)) (CategoryTheory.Functor.{u3, u1, u4, u2} J _inst_2 C _inst_3) (CategoryTheory.CategoryStruct.toQuiver.{max u4 u1, max (max (max u4 u3) u2) u1} (CategoryTheory.Functor.{u3, u1, u4, u2} J _inst_2 C _inst_3) (CategoryTheory.Category.toCategoryStruct.{max u4 u1, max (max (max u4 u3) u2) u1} (CategoryTheory.Functor.{u3, u1, u4, u2} J _inst_2 C _inst_3) (CategoryTheory.Functor.category.{u3, u1, u4, u2} J _inst_2 C _inst_3))) (CategoryTheory.Functor.toPrefunctor.{u1, max u4 u1, u2, max (max (max u4 u3) u2) u1} C _inst_3 (CategoryTheory.Functor.{u3, u1, u4, u2} J _inst_2 C _inst_3) (CategoryTheory.Functor.category.{u3, u1, u4, u2} J _inst_2 C _inst_3) (CategoryTheory.Functor.const.{u3, u1, u4, u2} J _inst_2 C _inst_3)) (CategoryTheory.Limits.initial.{u1, u2} C _inst_3 _inst_4)) (CategoryTheory.Limits.instHasColimitObjToQuiverToCategoryStructFunctorToQuiverToCategoryStructCategoryToPrefunctorConstInitial.{u4, u3, u2, u1} J _inst_2 C _inst_3 _inst_4)) (CategoryTheory.Limits.initial.{u1, u2} C _inst_3 _inst_4) (CategoryTheory.Limits.colimitConstInitial.{u4, u3, u2, u1} J _inst_2 C _inst_3 _inst_4))) (CategoryTheory.Limits.initial.to.{u1, u2} C _inst_3 _inst_4 (CategoryTheory.Limits.initial.{u1, u2} C _inst_3 _inst_4))
Case conversion may be inaccurate. Consider using '#align category_theory.limits.ι_colimit_const_initial_hom CategoryTheory.Limits.ι_colimitConstInitial_homₓ'. -/
@[simp, reassoc.1]
theorem ι_colimitConstInitial_hom {J : Type _} [Category J] {C : Type _} [Category C] [HasInitial C]
{j : J} :
colimit.ι ((CategoryTheory.Functor.const J).obj (⊥_ C)) j ≫ colimitConstInitial.Hom =
initial.to _ :=
by ext ⟨⟨⟩⟩
#align category_theory.limits.ι_colimit_const_initial_hom CategoryTheory.Limits.ι_colimitConstInitial_hom
#print CategoryTheory.Limits.InitialMonoClass /-
/-- A category is a `initial_mono_class` if the canonical morphism of an initial object is a
monomorphism. In practice, this is most useful when given an arbitrary morphism out of the chosen
initial object, see `initial.mono_from`.
Given a terminal object, this is equivalent to the assumption that the unique morphism from initial
to terminal is a monomorphism, which is the second of Freyd's axioms for an AT category.
TODO: This is a condition satisfied by categories with zero objects and morphisms.
-/
class InitialMonoClass (C : Type u₁) [Category.{v₁} C] : Prop where
isInitial_mono_from : ∀ {I} (X : C) (hI : IsInitial I), Mono (hI.to X)
#align category_theory.limits.initial_mono_class CategoryTheory.Limits.InitialMonoClass
-/
#print CategoryTheory.Limits.IsInitial.mono_from /-
theorem IsInitial.mono_from [InitialMonoClass C] {I} {X : C} (hI : IsInitial I) (f : I ⟶ X) :
Mono f := by
rw [hI.hom_ext f (hI.to X)]
apply initial_mono_class.is_initial_mono_from
#align category_theory.limits.is_initial.mono_from CategoryTheory.Limits.IsInitial.mono_from
-/
#print CategoryTheory.Limits.initial.mono_from /-
instance (priority := 100) initial.mono_from [HasInitial C] [InitialMonoClass C] (X : C)
(f : ⊥_ C ⟶ X) : Mono f :=
initialIsInitial.mono_from f
#align category_theory.limits.initial.mono_from CategoryTheory.Limits.initial.mono_from
-/
#print CategoryTheory.Limits.InitialMonoClass.of_isInitial /-
/-- To show a category is a `initial_mono_class` it suffices to give an initial object such that
every morphism out of it is a monomorphism. -/
theorem InitialMonoClass.of_isInitial {I : C} (hI : IsInitial I) (h : ∀ X, Mono (hI.to X)) :
InitialMonoClass C :=
{
isInitial_mono_from := fun I' X hI' =>
by
rw [hI'.hom_ext (hI'.to X) ((hI'.unique_up_to_iso hI).Hom ≫ hI.to X)]
apply mono_comp }
#align category_theory.limits.initial_mono_class.of_is_initial CategoryTheory.Limits.InitialMonoClass.of_isInitial
-/
#print CategoryTheory.Limits.InitialMonoClass.of_initial /-
/-- To show a category is a `initial_mono_class` it suffices to show every morphism out of the
initial object is a monomorphism. -/
theorem InitialMonoClass.of_initial [HasInitial C] (h : ∀ X : C, Mono (initial.to X)) :
InitialMonoClass C :=
InitialMonoClass.of_isInitial initialIsInitial h
#align category_theory.limits.initial_mono_class.of_initial CategoryTheory.Limits.InitialMonoClass.of_initial
-/
#print CategoryTheory.Limits.InitialMonoClass.of_isTerminal /-
/-- To show a category is a `initial_mono_class` it suffices to show the unique morphism from an
initial object to a terminal object is a monomorphism. -/
theorem InitialMonoClass.of_isTerminal {I T : C} (hI : IsInitial I) (hT : IsTerminal T)
(f : Mono (hI.to T)) : InitialMonoClass C :=
InitialMonoClass.of_isInitial hI fun X => mono_of_mono_fac (hI.hom_ext (_ ≫ hT.from X) (hI.to T))
#align category_theory.limits.initial_mono_class.of_is_terminal CategoryTheory.Limits.InitialMonoClass.of_isTerminal
-/
#print CategoryTheory.Limits.InitialMonoClass.of_terminal /-
/-- To show a category is a `initial_mono_class` it suffices to show the unique morphism from the
initial object to a terminal object is a monomorphism. -/
theorem InitialMonoClass.of_terminal [HasInitial C] [HasTerminal C] (h : Mono (initial.to (⊤_ C))) :
InitialMonoClass C :=
InitialMonoClass.of_isTerminal initialIsInitial terminalIsTerminal h
#align category_theory.limits.initial_mono_class.of_terminal CategoryTheory.Limits.InitialMonoClass.of_terminal
-/
section Comparison
variable {D : Type u₂} [Category.{v₂} D] (G : C ⥤ D)
/- warning: category_theory.limits.terminal_comparison -> CategoryTheory.Limits.terminalComparison is a dubious translation:
lean 3 declaration is
forall {C : Type.{u3}} [_inst_1 : CategoryTheory.Category.{u1, u3} C] {D : Type.{u4}} [_inst_2 : CategoryTheory.Category.{u2, u4} D] (G : CategoryTheory.Functor.{u1, u2, u3, u4} C _inst_1 D _inst_2) [_inst_3 : CategoryTheory.Limits.HasTerminal.{u1, u3} C _inst_1] [_inst_4 : CategoryTheory.Limits.HasTerminal.{u2, u4} D _inst_2], Quiver.Hom.{succ u2, u4} D (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} D (CategoryTheory.Category.toCategoryStruct.{u2, u4} D _inst_2)) (CategoryTheory.Functor.obj.{u1, u2, u3, u4} C _inst_1 D _inst_2 G (CategoryTheory.Limits.terminal.{u1, u3} C _inst_1 _inst_3)) (CategoryTheory.Limits.terminal.{u2, u4} D _inst_2 _inst_4)
but is expected to have type
forall {C : Type.{u3}} [_inst_1 : CategoryTheory.Category.{u1, u3} C] {D : Type.{u4}} [_inst_2 : CategoryTheory.Category.{u2, u4} D] (G : CategoryTheory.Functor.{u1, u2, u3, u4} C _inst_1 D _inst_2) [_inst_3 : CategoryTheory.Limits.HasTerminal.{u1, u3} C _inst_1] [_inst_4 : CategoryTheory.Limits.HasTerminal.{u2, u4} D _inst_2], Quiver.Hom.{succ u2, u4} D (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} D (CategoryTheory.Category.toCategoryStruct.{u2, u4} D _inst_2)) (Prefunctor.obj.{succ u1, succ u2, u3, u4} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} C (CategoryTheory.Category.toCategoryStruct.{u1, u3} C _inst_1)) D (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} D (CategoryTheory.Category.toCategoryStruct.{u2, u4} D _inst_2)) (CategoryTheory.Functor.toPrefunctor.{u1, u2, u3, u4} C _inst_1 D _inst_2 G) (CategoryTheory.Limits.terminal.{u1, u3} C _inst_1 _inst_3)) (CategoryTheory.Limits.terminal.{u2, u4} D _inst_2 _inst_4)
Case conversion may be inaccurate. Consider using '#align category_theory.limits.terminal_comparison CategoryTheory.Limits.terminalComparisonₓ'. -/
/-- The comparison morphism from the image of a terminal object to the terminal object in the target
category.
This is an isomorphism iff `G` preserves terminal objects, see
`category_theory.limits.preserves_terminal.of_iso_comparison`.
-/
def terminalComparison [HasTerminal C] [HasTerminal D] : G.obj (⊤_ C) ⟶ ⊤_ D :=
terminal.from _
#align category_theory.limits.terminal_comparison CategoryTheory.Limits.terminalComparison
/- warning: category_theory.limits.initial_comparison -> CategoryTheory.Limits.initialComparison is a dubious translation:
lean 3 declaration is
forall {C : Type.{u3}} [_inst_1 : CategoryTheory.Category.{u1, u3} C] {D : Type.{u4}} [_inst_2 : CategoryTheory.Category.{u2, u4} D] (G : CategoryTheory.Functor.{u1, u2, u3, u4} C _inst_1 D _inst_2) [_inst_3 : CategoryTheory.Limits.HasInitial.{u1, u3} C _inst_1] [_inst_4 : CategoryTheory.Limits.HasInitial.{u2, u4} D _inst_2], Quiver.Hom.{succ u2, u4} D (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} D (CategoryTheory.Category.toCategoryStruct.{u2, u4} D _inst_2)) (CategoryTheory.Limits.initial.{u2, u4} D _inst_2 _inst_4) (CategoryTheory.Functor.obj.{u1, u2, u3, u4} C _inst_1 D _inst_2 G (CategoryTheory.Limits.initial.{u1, u3} C _inst_1 _inst_3))
but is expected to have type
forall {C : Type.{u3}} [_inst_1 : CategoryTheory.Category.{u1, u3} C] {D : Type.{u4}} [_inst_2 : CategoryTheory.Category.{u2, u4} D] (G : CategoryTheory.Functor.{u1, u2, u3, u4} C _inst_1 D _inst_2) [_inst_3 : CategoryTheory.Limits.HasInitial.{u1, u3} C _inst_1] [_inst_4 : CategoryTheory.Limits.HasInitial.{u2, u4} D _inst_2], Quiver.Hom.{succ u2, u4} D (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} D (CategoryTheory.Category.toCategoryStruct.{u2, u4} D _inst_2)) (CategoryTheory.Limits.initial.{u2, u4} D _inst_2 _inst_4) (Prefunctor.obj.{succ u1, succ u2, u3, u4} C (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} C (CategoryTheory.Category.toCategoryStruct.{u1, u3} C _inst_1)) D (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} D (CategoryTheory.Category.toCategoryStruct.{u2, u4} D _inst_2)) (CategoryTheory.Functor.toPrefunctor.{u1, u2, u3, u4} C _inst_1 D _inst_2 G) (CategoryTheory.Limits.initial.{u1, u3} C _inst_1 _inst_3))
Case conversion may be inaccurate. Consider using '#align category_theory.limits.initial_comparison CategoryTheory.Limits.initialComparisonₓ'. -/
-- TODO: Show this is an isomorphism if and only if `G` preserves initial objects.
/--
The comparison morphism from the initial object in the target category to the image of the initial
object.
-/
def initialComparison [HasInitial C] [HasInitial D] : ⊥_ D ⟶ G.obj (⊥_ C) :=
initial.to _
#align category_theory.limits.initial_comparison CategoryTheory.Limits.initialComparison
end Comparison
variable {J : Type u} [Category.{v} J]
#print CategoryTheory.Limits.coneOfDiagramInitial /-
/-- From a functor `F : J ⥤ C`, given an initial object of `J`, construct a cone for `J`.
In `limit_of_diagram_initial` we show it is a limit cone. -/
@[simps]
def coneOfDiagramInitial {X : J} (tX : IsInitial X) (F : J ⥤ C) : Cone F
where
pt := F.obj X
π :=
{ app := fun j => F.map (tX.to j)
naturality' := fun j j' k => by
dsimp
rw [← F.map_comp, category.id_comp, tX.hom_ext (tX.to j ≫ k) (tX.to j')] }
#align category_theory.limits.cone_of_diagram_initial CategoryTheory.Limits.coneOfDiagramInitial
-/
#print CategoryTheory.Limits.limitOfDiagramInitial /-
/-- From a functor `F : J ⥤ C`, given an initial object of `J`, show the cone
`cone_of_diagram_initial` is a limit. -/
def limitOfDiagramInitial {X : J} (tX : IsInitial X) (F : J ⥤ C) :
IsLimit (coneOfDiagramInitial tX F)
where
lift s := s.π.app X
uniq s m w := by
rw [← w X, cone_of_diagram_initial_π_app, tX.hom_ext (tX.to X) (𝟙 _)]
dsimp; simp
#align category_theory.limits.limit_of_diagram_initial CategoryTheory.Limits.limitOfDiagramInitial
-/
/- warning: category_theory.limits.limit_of_initial -> CategoryTheory.Limits.limitOfInitial is a dubious translation:
lean 3 declaration is
forall {C : Type.{u4}} [_inst_1 : CategoryTheory.Category.{u2, u4} C] {J : Type.{u3}} [_inst_2 : CategoryTheory.Category.{u1, u3} J] (F : CategoryTheory.Functor.{u1, u2, u3, u4} J _inst_2 C _inst_1) [_inst_3 : CategoryTheory.Limits.HasInitial.{u1, u3} J _inst_2] [_inst_4 : CategoryTheory.Limits.HasLimit.{u1, u3, u2, u4} J _inst_2 C _inst_1 F], CategoryTheory.Iso.{u2, u4} C _inst_1 (CategoryTheory.Limits.limit.{u1, u3, u2, u4} J _inst_2 C _inst_1 F _inst_4) (CategoryTheory.Functor.obj.{u1, u2, u3, u4} J _inst_2 C _inst_1 F (CategoryTheory.Limits.initial.{u1, u3} J _inst_2 _inst_3))
but is expected to have type
forall {C : Type.{u4}} [_inst_1 : CategoryTheory.Category.{u2, u4} C] {J : Type.{u3}} [_inst_2 : CategoryTheory.Category.{u1, u3} J] (F : CategoryTheory.Functor.{u1, u2, u3, u4} J _inst_2 C _inst_1) [_inst_3 : CategoryTheory.Limits.HasInitial.{u1, u3} J _inst_2] [_inst_4 : CategoryTheory.Limits.HasLimit.{u1, u3, u2, u4} J _inst_2 C _inst_1 F], CategoryTheory.Iso.{u2, u4} C _inst_1 (CategoryTheory.Limits.limit.{u1, u3, u2, u4} J _inst_2 C _inst_1 F _inst_4) (Prefunctor.obj.{succ u1, succ u2, u3, u4} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) C (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} C (CategoryTheory.Category.toCategoryStruct.{u2, u4} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{u1, u2, u3, u4} J _inst_2 C _inst_1 F) (CategoryTheory.Limits.initial.{u1, u3} J _inst_2 _inst_3))
Case conversion may be inaccurate. Consider using '#align category_theory.limits.limit_of_initial CategoryTheory.Limits.limitOfInitialₓ'. -/
-- See note [dsimp, simp]
-- This is reducible to allow usage of lemmas about `cone_point_unique_up_to_iso`.
/-- For a functor `F : J ⥤ C`, if `J` has an initial object then the image of it is isomorphic
to the limit of `F`. -/
@[reducible]
def limitOfInitial (F : J ⥤ C) [HasInitial J] [HasLimit F] : limit F ≅ F.obj (⊥_ J) :=
IsLimit.conePointUniqueUpToIso (limit.isLimit _) (limitOfDiagramInitial initialIsInitial F)
#align category_theory.limits.limit_of_initial CategoryTheory.Limits.limitOfInitial
/- warning: category_theory.limits.cone_of_diagram_terminal -> CategoryTheory.Limits.coneOfDiagramTerminal is a dubious translation:
lean 3 declaration is
forall {C : Type.{u4}} [_inst_1 : CategoryTheory.Category.{u2, u4} C] {J : Type.{u3}} [_inst_2 : CategoryTheory.Category.{u1, u3} J] {X : J}, (CategoryTheory.Limits.IsTerminal.{u1, u3} J _inst_2 X) -> (forall (F : CategoryTheory.Functor.{u1, u2, u3, u4} J _inst_2 C _inst_1) [_inst_3 : forall (i : J) (j : J) (f : Quiver.Hom.{succ u1, u3} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) i j), CategoryTheory.IsIso.{u2, u4} C _inst_1 (CategoryTheory.Functor.obj.{u1, u2, u3, u4} J _inst_2 C _inst_1 F i) (CategoryTheory.Functor.obj.{u1, u2, u3, u4} J _inst_2 C _inst_1 F j) (CategoryTheory.Functor.map.{u1, u2, u3, u4} J _inst_2 C _inst_1 F i j f)], CategoryTheory.Limits.Cone.{u1, u2, u3, u4} J _inst_2 C _inst_1 F)
but is expected to have type
forall {C : Type.{u4}} [_inst_1 : CategoryTheory.Category.{u2, u4} C] {J : Type.{u3}} [_inst_2 : CategoryTheory.Category.{u1, u3} J] {X : J}, (CategoryTheory.Limits.IsTerminal.{u1, u3} J _inst_2 X) -> (forall (F : CategoryTheory.Functor.{u1, u2, u3, u4} J _inst_2 C _inst_1) [_inst_3 : forall (i : J) (j : J) (f : Quiver.Hom.{succ u1, u3} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) i j), CategoryTheory.IsIso.{u2, u4} C _inst_1 (Prefunctor.obj.{succ u1, succ u2, u3, u4} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) C (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} C (CategoryTheory.Category.toCategoryStruct.{u2, u4} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{u1, u2, u3, u4} J _inst_2 C _inst_1 F) i) (Prefunctor.obj.{succ u1, succ u2, u3, u4} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) C (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} C (CategoryTheory.Category.toCategoryStruct.{u2, u4} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{u1, u2, u3, u4} J _inst_2 C _inst_1 F) j) (Prefunctor.map.{succ u1, succ u2, u3, u4} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) C (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} C (CategoryTheory.Category.toCategoryStruct.{u2, u4} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{u1, u2, u3, u4} J _inst_2 C _inst_1 F) i j f)], CategoryTheory.Limits.Cone.{u1, u2, u3, u4} J _inst_2 C _inst_1 F)
Case conversion may be inaccurate. Consider using '#align category_theory.limits.cone_of_diagram_terminal CategoryTheory.Limits.coneOfDiagramTerminalₓ'. -/
/-- From a functor `F : J ⥤ C`, given a terminal object of `J`, construct a cone for `J`,
provided that the morphisms in the diagram are isomorphisms.
In `limit_of_diagram_terminal` we show it is a limit cone. -/
@[simps]
def coneOfDiagramTerminal {X : J} (hX : IsTerminal X) (F : J ⥤ C)
[∀ (i j : J) (f : i ⟶ j), IsIso (F.map f)] : Cone F
where
pt := F.obj X
π :=
{ app := fun i => inv (F.map (hX.from _))
naturality' := by
intro i j f
dsimp
simp only [is_iso.eq_inv_comp, is_iso.comp_inv_eq, category.id_comp, ← F.map_comp,
hX.hom_ext (hX.from i) (f ≫ hX.from j)] }
#align category_theory.limits.cone_of_diagram_terminal CategoryTheory.Limits.coneOfDiagramTerminal
/- warning: category_theory.limits.limit_of_diagram_terminal -> CategoryTheory.Limits.limitOfDiagramTerminal is a dubious translation:
lean 3 declaration is
forall {C : Type.{u4}} [_inst_1 : CategoryTheory.Category.{u2, u4} C] {J : Type.{u3}} [_inst_2 : CategoryTheory.Category.{u1, u3} J] {X : J} (hX : CategoryTheory.Limits.IsTerminal.{u1, u3} J _inst_2 X) (F : CategoryTheory.Functor.{u1, u2, u3, u4} J _inst_2 C _inst_1) [_inst_3 : forall (i : J) (j : J) (f : Quiver.Hom.{succ u1, u3} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) i j), CategoryTheory.IsIso.{u2, u4} C _inst_1 (CategoryTheory.Functor.obj.{u1, u2, u3, u4} J _inst_2 C _inst_1 F i) (CategoryTheory.Functor.obj.{u1, u2, u3, u4} J _inst_2 C _inst_1 F j) (CategoryTheory.Functor.map.{u1, u2, u3, u4} J _inst_2 C _inst_1 F i j f)], CategoryTheory.Limits.IsLimit.{u1, u2, u3, u4} J _inst_2 C _inst_1 F (CategoryTheory.Limits.coneOfDiagramTerminal.{u1, u2, u3, u4} C _inst_1 J _inst_2 X hX F (CategoryTheory.Limits.limitOfDiagramTerminal._proof_1.{u3, u4, u1, u2} C _inst_1 J _inst_2 F _inst_3))
but is expected to have type
forall {C : Type.{u4}} [_inst_1 : CategoryTheory.Category.{u2, u4} C] {J : Type.{u3}} [_inst_2 : CategoryTheory.Category.{u1, u3} J] {X : J} (hX : CategoryTheory.Limits.IsTerminal.{u1, u3} J _inst_2 X) (F : CategoryTheory.Functor.{u1, u2, u3, u4} J _inst_2 C _inst_1) [_inst_3 : forall (i : J) (j : J) (f : Quiver.Hom.{succ u1, u3} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) i j), CategoryTheory.IsIso.{u2, u4} C _inst_1 (Prefunctor.obj.{succ u1, succ u2, u3, u4} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) C (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} C (CategoryTheory.Category.toCategoryStruct.{u2, u4} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{u1, u2, u3, u4} J _inst_2 C _inst_1 F) i) (Prefunctor.obj.{succ u1, succ u2, u3, u4} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) C (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} C (CategoryTheory.Category.toCategoryStruct.{u2, u4} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{u1, u2, u3, u4} J _inst_2 C _inst_1 F) j) (Prefunctor.map.{succ u1, succ u2, u3, u4} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) C (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} C (CategoryTheory.Category.toCategoryStruct.{u2, u4} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{u1, u2, u3, u4} J _inst_2 C _inst_1 F) i j f)], CategoryTheory.Limits.IsLimit.{u1, u2, u3, u4} J _inst_2 C _inst_1 F (CategoryTheory.Limits.coneOfDiagramTerminal.{u1, u2, u3, u4} C _inst_1 J _inst_2 X hX F (fun (i : J) (j : J) (f : Quiver.Hom.{succ u1, u3} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) i j) => _inst_3 i j f))
Case conversion may be inaccurate. Consider using '#align category_theory.limits.limit_of_diagram_terminal CategoryTheory.Limits.limitOfDiagramTerminalₓ'. -/
/-- From a functor `F : J ⥤ C`, given a terminal object of `J` and that the morphisms in the
diagram are isomorphisms, show the cone `cone_of_diagram_terminal` is a limit. -/
def limitOfDiagramTerminal {X : J} (hX : IsTerminal X) (F : J ⥤ C)
[∀ (i j : J) (f : i ⟶ j), IsIso (F.map f)] : IsLimit (coneOfDiagramTerminal hX F)
where lift S := S.π.app _
#align category_theory.limits.limit_of_diagram_terminal CategoryTheory.Limits.limitOfDiagramTerminal
/- warning: category_theory.limits.limit_of_terminal -> CategoryTheory.Limits.limitOfTerminal is a dubious translation:
lean 3 declaration is
forall {C : Type.{u4}} [_inst_1 : CategoryTheory.Category.{u2, u4} C] {J : Type.{u3}} [_inst_2 : CategoryTheory.Category.{u1, u3} J] (F : CategoryTheory.Functor.{u1, u2, u3, u4} J _inst_2 C _inst_1) [_inst_3 : CategoryTheory.Limits.HasTerminal.{u1, u3} J _inst_2] [_inst_4 : CategoryTheory.Limits.HasLimit.{u1, u3, u2, u4} J _inst_2 C _inst_1 F] [_inst_5 : forall (i : J) (j : J) (f : Quiver.Hom.{succ u1, u3} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) i j), CategoryTheory.IsIso.{u2, u4} C _inst_1 (CategoryTheory.Functor.obj.{u1, u2, u3, u4} J _inst_2 C _inst_1 F i) (CategoryTheory.Functor.obj.{u1, u2, u3, u4} J _inst_2 C _inst_1 F j) (CategoryTheory.Functor.map.{u1, u2, u3, u4} J _inst_2 C _inst_1 F i j f)], CategoryTheory.Iso.{u2, u4} C _inst_1 (CategoryTheory.Limits.limit.{u1, u3, u2, u4} J _inst_2 C _inst_1 F _inst_4) (CategoryTheory.Functor.obj.{u1, u2, u3, u4} J _inst_2 C _inst_1 F (CategoryTheory.Limits.terminal.{u1, u3} J _inst_2 _inst_3))
but is expected to have type
forall {C : Type.{u4}} [_inst_1 : CategoryTheory.Category.{u2, u4} C] {J : Type.{u3}} [_inst_2 : CategoryTheory.Category.{u1, u3} J] (F : CategoryTheory.Functor.{u1, u2, u3, u4} J _inst_2 C _inst_1) [_inst_3 : CategoryTheory.Limits.HasTerminal.{u1, u3} J _inst_2] [_inst_4 : CategoryTheory.Limits.HasLimit.{u1, u3, u2, u4} J _inst_2 C _inst_1 F] [_inst_5 : forall (i : J) (j : J) (f : Quiver.Hom.{succ u1, u3} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) i j), CategoryTheory.IsIso.{u2, u4} C _inst_1 (Prefunctor.obj.{succ u1, succ u2, u3, u4} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) C (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} C (CategoryTheory.Category.toCategoryStruct.{u2, u4} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{u1, u2, u3, u4} J _inst_2 C _inst_1 F) i) (Prefunctor.obj.{succ u1, succ u2, u3, u4} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) C (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} C (CategoryTheory.Category.toCategoryStruct.{u2, u4} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{u1, u2, u3, u4} J _inst_2 C _inst_1 F) j) (Prefunctor.map.{succ u1, succ u2, u3, u4} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) C (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} C (CategoryTheory.Category.toCategoryStruct.{u2, u4} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{u1, u2, u3, u4} J _inst_2 C _inst_1 F) i j f)], CategoryTheory.Iso.{u2, u4} C _inst_1 (CategoryTheory.Limits.limit.{u1, u3, u2, u4} J _inst_2 C _inst_1 F _inst_4) (Prefunctor.obj.{succ u1, succ u2, u3, u4} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) C (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} C (CategoryTheory.Category.toCategoryStruct.{u2, u4} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{u1, u2, u3, u4} J _inst_2 C _inst_1 F) (CategoryTheory.Limits.terminal.{u1, u3} J _inst_2 _inst_3))
Case conversion may be inaccurate. Consider using '#align category_theory.limits.limit_of_terminal CategoryTheory.Limits.limitOfTerminalₓ'. -/
-- This is reducible to allow usage of lemmas about `cone_point_unique_up_to_iso`.
/-- For a functor `F : J ⥤ C`, if `J` has a terminal object and all the morphisms in the diagram
are isomorphisms, then the image of the terminal object is isomorphic to the limit of `F`. -/
@[reducible]
def limitOfTerminal (F : J ⥤ C) [HasTerminal J] [HasLimit F]
[∀ (i j : J) (f : i ⟶ j), IsIso (F.map f)] : limit F ≅ F.obj (⊤_ J) :=
IsLimit.conePointUniqueUpToIso (limit.isLimit _) (limitOfDiagramTerminal terminalIsTerminal F)
#align category_theory.limits.limit_of_terminal CategoryTheory.Limits.limitOfTerminal
#print CategoryTheory.Limits.coconeOfDiagramTerminal /-
/-- From a functor `F : J ⥤ C`, given a terminal object of `J`, construct a cocone for `J`.
In `colimit_of_diagram_terminal` we show it is a colimit cocone. -/
@[simps]
def coconeOfDiagramTerminal {X : J} (tX : IsTerminal X) (F : J ⥤ C) : Cocone F
where
pt := F.obj X
ι :=
{ app := fun j => F.map (tX.from j)
naturality' := fun j j' k => by
dsimp
rw [← F.map_comp, category.comp_id, tX.hom_ext (k ≫ tX.from j') (tX.from j)] }
#align category_theory.limits.cocone_of_diagram_terminal CategoryTheory.Limits.coconeOfDiagramTerminal
-/
#print CategoryTheory.Limits.colimitOfDiagramTerminal /-
/-- From a functor `F : J ⥤ C`, given a terminal object of `J`, show the cocone
`cocone_of_diagram_terminal` is a colimit. -/
def colimitOfDiagramTerminal {X : J} (tX : IsTerminal X) (F : J ⥤ C) :
IsColimit (coconeOfDiagramTerminal tX F)
where
desc s := s.ι.app X
uniq s m w :=
by
rw [← w X, cocone_of_diagram_terminal_ι_app, tX.hom_ext (tX.from X) (𝟙 _)]
simp
#align category_theory.limits.colimit_of_diagram_terminal CategoryTheory.Limits.colimitOfDiagramTerminal
-/
/- warning: category_theory.limits.colimit_of_terminal -> CategoryTheory.Limits.colimitOfTerminal is a dubious translation:
lean 3 declaration is
forall {C : Type.{u4}} [_inst_1 : CategoryTheory.Category.{u2, u4} C] {J : Type.{u3}} [_inst_2 : CategoryTheory.Category.{u1, u3} J] (F : CategoryTheory.Functor.{u1, u2, u3, u4} J _inst_2 C _inst_1) [_inst_3 : CategoryTheory.Limits.HasTerminal.{u1, u3} J _inst_2] [_inst_4 : CategoryTheory.Limits.HasColimit.{u1, u3, u2, u4} J _inst_2 C _inst_1 F], CategoryTheory.Iso.{u2, u4} C _inst_1 (CategoryTheory.Limits.colimit.{u1, u3, u2, u4} J _inst_2 C _inst_1 F _inst_4) (CategoryTheory.Functor.obj.{u1, u2, u3, u4} J _inst_2 C _inst_1 F (CategoryTheory.Limits.terminal.{u1, u3} J _inst_2 _inst_3))
but is expected to have type
forall {C : Type.{u4}} [_inst_1 : CategoryTheory.Category.{u2, u4} C] {J : Type.{u3}} [_inst_2 : CategoryTheory.Category.{u1, u3} J] (F : CategoryTheory.Functor.{u1, u2, u3, u4} J _inst_2 C _inst_1) [_inst_3 : CategoryTheory.Limits.HasTerminal.{u1, u3} J _inst_2] [_inst_4 : CategoryTheory.Limits.HasColimit.{u1, u3, u2, u4} J _inst_2 C _inst_1 F], CategoryTheory.Iso.{u2, u4} C _inst_1 (CategoryTheory.Limits.colimit.{u1, u3, u2, u4} J _inst_2 C _inst_1 F _inst_4) (Prefunctor.obj.{succ u1, succ u2, u3, u4} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) C (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} C (CategoryTheory.Category.toCategoryStruct.{u2, u4} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{u1, u2, u3, u4} J _inst_2 C _inst_1 F) (CategoryTheory.Limits.terminal.{u1, u3} J _inst_2 _inst_3))
Case conversion may be inaccurate. Consider using '#align category_theory.limits.colimit_of_terminal CategoryTheory.Limits.colimitOfTerminalₓ'. -/
-- This is reducible to allow usage of lemmas about `cocone_point_unique_up_to_iso`.
/-- For a functor `F : J ⥤ C`, if `J` has a terminal object then the image of it is isomorphic
to the colimit of `F`. -/
@[reducible]
def colimitOfTerminal (F : J ⥤ C) [HasTerminal J] [HasColimit F] : colimit F ≅ F.obj (⊤_ J) :=
IsColimit.coconePointUniqueUpToIso (colimit.isColimit _)
(colimitOfDiagramTerminal terminalIsTerminal F)
#align category_theory.limits.colimit_of_terminal CategoryTheory.Limits.colimitOfTerminal
/- warning: category_theory.limits.cocone_of_diagram_initial -> CategoryTheory.Limits.coconeOfDiagramInitial is a dubious translation:
lean 3 declaration is
forall {C : Type.{u4}} [_inst_1 : CategoryTheory.Category.{u2, u4} C] {J : Type.{u3}} [_inst_2 : CategoryTheory.Category.{u1, u3} J] {X : J}, (CategoryTheory.Limits.IsInitial.{u1, u3} J _inst_2 X) -> (forall (F : CategoryTheory.Functor.{u1, u2, u3, u4} J _inst_2 C _inst_1) [_inst_3 : forall (i : J) (j : J) (f : Quiver.Hom.{succ u1, u3} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) i j), CategoryTheory.IsIso.{u2, u4} C _inst_1 (CategoryTheory.Functor.obj.{u1, u2, u3, u4} J _inst_2 C _inst_1 F i) (CategoryTheory.Functor.obj.{u1, u2, u3, u4} J _inst_2 C _inst_1 F j) (CategoryTheory.Functor.map.{u1, u2, u3, u4} J _inst_2 C _inst_1 F i j f)], CategoryTheory.Limits.Cocone.{u1, u2, u3, u4} J _inst_2 C _inst_1 F)
but is expected to have type
forall {C : Type.{u4}} [_inst_1 : CategoryTheory.Category.{u2, u4} C] {J : Type.{u3}} [_inst_2 : CategoryTheory.Category.{u1, u3} J] {X : J}, (CategoryTheory.Limits.IsInitial.{u1, u3} J _inst_2 X) -> (forall (F : CategoryTheory.Functor.{u1, u2, u3, u4} J _inst_2 C _inst_1) [_inst_3 : forall (i : J) (j : J) (f : Quiver.Hom.{succ u1, u3} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) i j), CategoryTheory.IsIso.{u2, u4} C _inst_1 (Prefunctor.obj.{succ u1, succ u2, u3, u4} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) C (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} C (CategoryTheory.Category.toCategoryStruct.{u2, u4} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{u1, u2, u3, u4} J _inst_2 C _inst_1 F) i) (Prefunctor.obj.{succ u1, succ u2, u3, u4} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) C (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} C (CategoryTheory.Category.toCategoryStruct.{u2, u4} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{u1, u2, u3, u4} J _inst_2 C _inst_1 F) j) (Prefunctor.map.{succ u1, succ u2, u3, u4} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) C (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} C (CategoryTheory.Category.toCategoryStruct.{u2, u4} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{u1, u2, u3, u4} J _inst_2 C _inst_1 F) i j f)], CategoryTheory.Limits.Cocone.{u1, u2, u3, u4} J _inst_2 C _inst_1 F)
Case conversion may be inaccurate. Consider using '#align category_theory.limits.cocone_of_diagram_initial CategoryTheory.Limits.coconeOfDiagramInitialₓ'. -/
/-- From a functor `F : J ⥤ C`, given an initial object of `J`, construct a cocone for `J`,
provided that the morphisms in the diagram are isomorphisms.
In `colimit_of_diagram_initial` we show it is a colimit cocone. -/
@[simps]
def coconeOfDiagramInitial {X : J} (hX : IsInitial X) (F : J ⥤ C)
[∀ (i j : J) (f : i ⟶ j), IsIso (F.map f)] : Cocone F
where
pt := F.obj X
ι :=
{ app := fun i => inv (F.map (hX.to _))
naturality' := by
intro i j f
dsimp
simp only [is_iso.eq_inv_comp, is_iso.comp_inv_eq, category.comp_id, ← F.map_comp,
hX.hom_ext (hX.to i ≫ f) (hX.to j)] }
#align category_theory.limits.cocone_of_diagram_initial CategoryTheory.Limits.coconeOfDiagramInitial
/- warning: category_theory.limits.colimit_of_diagram_initial -> CategoryTheory.Limits.colimitOfDiagramInitial is a dubious translation:
lean 3 declaration is
forall {C : Type.{u4}} [_inst_1 : CategoryTheory.Category.{u2, u4} C] {J : Type.{u3}} [_inst_2 : CategoryTheory.Category.{u1, u3} J] {X : J} (hX : CategoryTheory.Limits.IsInitial.{u1, u3} J _inst_2 X) (F : CategoryTheory.Functor.{u1, u2, u3, u4} J _inst_2 C _inst_1) [_inst_3 : forall (i : J) (j : J) (f : Quiver.Hom.{succ u1, u3} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) i j), CategoryTheory.IsIso.{u2, u4} C _inst_1 (CategoryTheory.Functor.obj.{u1, u2, u3, u4} J _inst_2 C _inst_1 F i) (CategoryTheory.Functor.obj.{u1, u2, u3, u4} J _inst_2 C _inst_1 F j) (CategoryTheory.Functor.map.{u1, u2, u3, u4} J _inst_2 C _inst_1 F i j f)], CategoryTheory.Limits.IsColimit.{u1, u2, u3, u4} J _inst_2 C _inst_1 F (CategoryTheory.Limits.coconeOfDiagramInitial.{u1, u2, u3, u4} C _inst_1 J _inst_2 X hX F (CategoryTheory.Limits.colimitOfDiagramInitial._proof_1.{u3, u4, u1, u2} C _inst_1 J _inst_2 F _inst_3))
but is expected to have type
forall {C : Type.{u4}} [_inst_1 : CategoryTheory.Category.{u2, u4} C] {J : Type.{u3}} [_inst_2 : CategoryTheory.Category.{u1, u3} J] {X : J} (hX : CategoryTheory.Limits.IsInitial.{u1, u3} J _inst_2 X) (F : CategoryTheory.Functor.{u1, u2, u3, u4} J _inst_2 C _inst_1) [_inst_3 : forall (i : J) (j : J) (f : Quiver.Hom.{succ u1, u3} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) i j), CategoryTheory.IsIso.{u2, u4} C _inst_1 (Prefunctor.obj.{succ u1, succ u2, u3, u4} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) C (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} C (CategoryTheory.Category.toCategoryStruct.{u2, u4} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{u1, u2, u3, u4} J _inst_2 C _inst_1 F) i) (Prefunctor.obj.{succ u1, succ u2, u3, u4} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) C (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} C (CategoryTheory.Category.toCategoryStruct.{u2, u4} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{u1, u2, u3, u4} J _inst_2 C _inst_1 F) j) (Prefunctor.map.{succ u1, succ u2, u3, u4} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) C (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} C (CategoryTheory.Category.toCategoryStruct.{u2, u4} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{u1, u2, u3, u4} J _inst_2 C _inst_1 F) i j f)], CategoryTheory.Limits.IsColimit.{u1, u2, u3, u4} J _inst_2 C _inst_1 F (CategoryTheory.Limits.coconeOfDiagramInitial.{u1, u2, u3, u4} C _inst_1 J _inst_2 X hX F (fun (i : J) (j : J) (f : Quiver.Hom.{succ u1, u3} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) i j) => _inst_3 i j f))
Case conversion may be inaccurate. Consider using '#align category_theory.limits.colimit_of_diagram_initial CategoryTheory.Limits.colimitOfDiagramInitialₓ'. -/
/-- From a functor `F : J ⥤ C`, given an initial object of `J` and that the morphisms in the
diagram are isomorphisms, show the cone `cocone_of_diagram_initial` is a colimit. -/
def colimitOfDiagramInitial {X : J} (hX : IsInitial X) (F : J ⥤ C)
[∀ (i j : J) (f : i ⟶ j), IsIso (F.map f)] : IsColimit (coconeOfDiagramInitial hX F)
where desc S := S.ι.app _
#align category_theory.limits.colimit_of_diagram_initial CategoryTheory.Limits.colimitOfDiagramInitial
/- warning: category_theory.limits.colimit_of_initial -> CategoryTheory.Limits.colimitOfInitial is a dubious translation:
lean 3 declaration is
forall {C : Type.{u4}} [_inst_1 : CategoryTheory.Category.{u2, u4} C] {J : Type.{u3}} [_inst_2 : CategoryTheory.Category.{u1, u3} J] (F : CategoryTheory.Functor.{u1, u2, u3, u4} J _inst_2 C _inst_1) [_inst_3 : CategoryTheory.Limits.HasInitial.{u1, u3} J _inst_2] [_inst_4 : CategoryTheory.Limits.HasColimit.{u1, u3, u2, u4} J _inst_2 C _inst_1 F] [_inst_5 : forall (i : J) (j : J) (f : Quiver.Hom.{succ u1, u3} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) i j), CategoryTheory.IsIso.{u2, u4} C _inst_1 (CategoryTheory.Functor.obj.{u1, u2, u3, u4} J _inst_2 C _inst_1 F i) (CategoryTheory.Functor.obj.{u1, u2, u3, u4} J _inst_2 C _inst_1 F j) (CategoryTheory.Functor.map.{u1, u2, u3, u4} J _inst_2 C _inst_1 F i j f)], CategoryTheory.Iso.{u2, u4} C _inst_1 (CategoryTheory.Limits.colimit.{u1, u3, u2, u4} J _inst_2 C _inst_1 F _inst_4) (CategoryTheory.Functor.obj.{u1, u2, u3, u4} J _inst_2 C _inst_1 F (CategoryTheory.Limits.initial.{u1, u3} J _inst_2 _inst_3))
but is expected to have type
forall {C : Type.{u4}} [_inst_1 : CategoryTheory.Category.{u2, u4} C] {J : Type.{u3}} [_inst_2 : CategoryTheory.Category.{u1, u3} J] (F : CategoryTheory.Functor.{u1, u2, u3, u4} J _inst_2 C _inst_1) [_inst_3 : CategoryTheory.Limits.HasInitial.{u1, u3} J _inst_2] [_inst_4 : CategoryTheory.Limits.HasColimit.{u1, u3, u2, u4} J _inst_2 C _inst_1 F] [_inst_5 : forall (i : J) (j : J) (f : Quiver.Hom.{succ u1, u3} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) i j), CategoryTheory.IsIso.{u2, u4} C _inst_1 (Prefunctor.obj.{succ u1, succ u2, u3, u4} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) C (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} C (CategoryTheory.Category.toCategoryStruct.{u2, u4} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{u1, u2, u3, u4} J _inst_2 C _inst_1 F) i) (Prefunctor.obj.{succ u1, succ u2, u3, u4} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) C (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} C (CategoryTheory.Category.toCategoryStruct.{u2, u4} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{u1, u2, u3, u4} J _inst_2 C _inst_1 F) j) (Prefunctor.map.{succ u1, succ u2, u3, u4} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) C (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} C (CategoryTheory.Category.toCategoryStruct.{u2, u4} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{u1, u2, u3, u4} J _inst_2 C _inst_1 F) i j f)], CategoryTheory.Iso.{u2, u4} C _inst_1 (CategoryTheory.Limits.colimit.{u1, u3, u2, u4} J _inst_2 C _inst_1 F _inst_4) (Prefunctor.obj.{succ u1, succ u2, u3, u4} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) C (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} C (CategoryTheory.Category.toCategoryStruct.{u2, u4} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{u1, u2, u3, u4} J _inst_2 C _inst_1 F) (CategoryTheory.Limits.initial.{u1, u3} J _inst_2 _inst_3))
Case conversion may be inaccurate. Consider using '#align category_theory.limits.colimit_of_initial CategoryTheory.Limits.colimitOfInitialₓ'. -/
-- This is reducible to allow usage of lemmas about `cocone_point_unique_up_to_iso`.
/-- For a functor `F : J ⥤ C`, if `J` has an initial object and all the morphisms in the diagram
are isomorphisms, then the image of the initial object is isomorphic to the colimit of `F`. -/
@[reducible]
def colimitOfInitial (F : J ⥤ C) [HasInitial J] [HasColimit F]
[∀ (i j : J) (f : i ⟶ j), IsIso (F.map f)] : colimit F ≅ F.obj (⊥_ J) :=
IsColimit.coconePointUniqueUpToIso (colimit.isColimit _)
(colimitOfDiagramInitial initialIsInitial _)
#align category_theory.limits.colimit_of_initial CategoryTheory.Limits.colimitOfInitial
/- warning: category_theory.limits.is_iso_π_of_is_initial -> CategoryTheory.Limits.isIso_π_of_isInitial is a dubious translation:
lean 3 declaration is
forall {C : Type.{u4}} [_inst_1 : CategoryTheory.Category.{u2, u4} C] {J : Type.{u3}} [_inst_2 : CategoryTheory.Category.{u1, u3} J] {j : J}, (CategoryTheory.Limits.IsInitial.{u1, u3} J _inst_2 j) -> (forall (F : CategoryTheory.Functor.{u1, u2, u3, u4} J _inst_2 C _inst_1) [_inst_3 : CategoryTheory.Limits.HasLimit.{u1, u3, u2, u4} J _inst_2 C _inst_1 F], CategoryTheory.IsIso.{u2, u4} C _inst_1 (CategoryTheory.Limits.limit.{u1, u3, u2, u4} J _inst_2 C _inst_1 F _inst_3) (CategoryTheory.Functor.obj.{u1, u2, u3, u4} J _inst_2 C _inst_1 F j) (CategoryTheory.Limits.limit.π.{u1, u3, u2, u4} J _inst_2 C _inst_1 F _inst_3 j))
but is expected to have type
forall {C : Type.{u4}} [_inst_1 : CategoryTheory.Category.{u2, u4} C] {J : Type.{u3}} [_inst_2 : CategoryTheory.Category.{u1, u3} J] {j : J}, (CategoryTheory.Limits.IsInitial.{u1, u3} J _inst_2 j) -> (forall (F : CategoryTheory.Functor.{u1, u2, u3, u4} J _inst_2 C _inst_1) [_inst_3 : CategoryTheory.Limits.HasLimit.{u1, u3, u2, u4} J _inst_2 C _inst_1 F], CategoryTheory.IsIso.{u2, u4} C _inst_1 (CategoryTheory.Limits.limit.{u1, u3, u2, u4} J _inst_2 C _inst_1 F _inst_3) (Prefunctor.obj.{succ u1, succ u2, u3, u4} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) C (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} C (CategoryTheory.Category.toCategoryStruct.{u2, u4} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{u1, u2, u3, u4} J _inst_2 C _inst_1 F) j) (CategoryTheory.Limits.limit.π.{u1, u3, u2, u4} J _inst_2 C _inst_1 F _inst_3 j))
Case conversion may be inaccurate. Consider using '#align category_theory.limits.is_iso_π_of_is_initial CategoryTheory.Limits.isIso_π_of_isInitialₓ'. -/
/-- If `j` is initial in the index category, then the map `limit.π F j` is an isomorphism.
-/
theorem isIso_π_of_isInitial {j : J} (I : IsInitial j) (F : J ⥤ C) [HasLimit F] :
IsIso (limit.π F j) :=
⟨⟨limit.lift _ (coneOfDiagramInitial I F),
⟨by
ext
simp, by simp⟩⟩⟩
#align category_theory.limits.is_iso_π_of_is_initial CategoryTheory.Limits.isIso_π_of_isInitial
/- warning: category_theory.limits.is_iso_π_initial -> CategoryTheory.Limits.isIso_π_initial is a dubious translation:
lean 3 declaration is
forall {C : Type.{u4}} [_inst_1 : CategoryTheory.Category.{u2, u4} C] {J : Type.{u3}} [_inst_2 : CategoryTheory.Category.{u1, u3} J] [_inst_3 : CategoryTheory.Limits.HasInitial.{u1, u3} J _inst_2] (F : CategoryTheory.Functor.{u1, u2, u3, u4} J _inst_2 C _inst_1) [_inst_4 : CategoryTheory.Limits.HasLimit.{u1, u3, u2, u4} J _inst_2 C _inst_1 F], CategoryTheory.IsIso.{u2, u4} C _inst_1 (CategoryTheory.Limits.limit.{u1, u3, u2, u4} J _inst_2 C _inst_1 F _inst_4) (CategoryTheory.Functor.obj.{u1, u2, u3, u4} J _inst_2 C _inst_1 F (CategoryTheory.Limits.initial.{u1, u3} J _inst_2 _inst_3)) (CategoryTheory.Limits.limit.π.{u1, u3, u2, u4} J _inst_2 C _inst_1 F _inst_4 (CategoryTheory.Limits.initial.{u1, u3} J _inst_2 _inst_3))
but is expected to have type
forall {C : Type.{u4}} [_inst_1 : CategoryTheory.Category.{u2, u4} C] {J : Type.{u3}} [_inst_2 : CategoryTheory.Category.{u1, u3} J] [_inst_3 : CategoryTheory.Limits.HasInitial.{u1, u3} J _inst_2] (F : CategoryTheory.Functor.{u1, u2, u3, u4} J _inst_2 C _inst_1) [_inst_4 : CategoryTheory.Limits.HasLimit.{u1, u3, u2, u4} J _inst_2 C _inst_1 F], CategoryTheory.IsIso.{u2, u4} C _inst_1 (CategoryTheory.Limits.limit.{u1, u3, u2, u4} J _inst_2 C _inst_1 F _inst_4) (Prefunctor.obj.{succ u1, succ u2, u3, u4} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) C (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} C (CategoryTheory.Category.toCategoryStruct.{u2, u4} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{u1, u2, u3, u4} J _inst_2 C _inst_1 F) (CategoryTheory.Limits.initial.{u1, u3} J _inst_2 _inst_3)) (CategoryTheory.Limits.limit.π.{u1, u3, u2, u4} J _inst_2 C _inst_1 F _inst_4 (CategoryTheory.Limits.initial.{u1, u3} J _inst_2 _inst_3))
Case conversion may be inaccurate. Consider using '#align category_theory.limits.is_iso_π_initial CategoryTheory.Limits.isIso_π_initialₓ'. -/
instance isIso_π_initial [HasInitial J] (F : J ⥤ C) [HasLimit F] : IsIso (limit.π F (⊥_ J)) :=
isIso_π_of_isInitial initialIsInitial F
#align category_theory.limits.is_iso_π_initial CategoryTheory.Limits.isIso_π_initial
/- warning: category_theory.limits.is_iso_π_of_is_terminal -> CategoryTheory.Limits.isIso_π_of_isTerminal is a dubious translation:
lean 3 declaration is
forall {C : Type.{u4}} [_inst_1 : CategoryTheory.Category.{u2, u4} C] {J : Type.{u3}} [_inst_2 : CategoryTheory.Category.{u1, u3} J] {j : J}, (CategoryTheory.Limits.IsTerminal.{u1, u3} J _inst_2 j) -> (forall (F : CategoryTheory.Functor.{u1, u2, u3, u4} J _inst_2 C _inst_1) [_inst_3 : CategoryTheory.Limits.HasLimit.{u1, u3, u2, u4} J _inst_2 C _inst_1 F] [_inst_4 : forall (i : J) (j : J) (f : Quiver.Hom.{succ u1, u3} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) i j), CategoryTheory.IsIso.{u2, u4} C _inst_1 (CategoryTheory.Functor.obj.{u1, u2, u3, u4} J _inst_2 C _inst_1 F i) (CategoryTheory.Functor.obj.{u1, u2, u3, u4} J _inst_2 C _inst_1 F j) (CategoryTheory.Functor.map.{u1, u2, u3, u4} J _inst_2 C _inst_1 F i j f)], CategoryTheory.IsIso.{u2, u4} C _inst_1 (CategoryTheory.Limits.limit.{u1, u3, u2, u4} J _inst_2 C _inst_1 F _inst_3) (CategoryTheory.Functor.obj.{u1, u2, u3, u4} J _inst_2 C _inst_1 F j) (CategoryTheory.Limits.limit.π.{u1, u3, u2, u4} J _inst_2 C _inst_1 F _inst_3 j))
but is expected to have type
forall {C : Type.{u4}} [_inst_1 : CategoryTheory.Category.{u2, u4} C] {J : Type.{u3}} [_inst_2 : CategoryTheory.Category.{u1, u3} J] {j : J}, (CategoryTheory.Limits.IsTerminal.{u1, u3} J _inst_2 j) -> (forall (F : CategoryTheory.Functor.{u1, u2, u3, u4} J _inst_2 C _inst_1) [_inst_3 : CategoryTheory.Limits.HasLimit.{u1, u3, u2, u4} J _inst_2 C _inst_1 F] [_inst_4 : forall (i : J) (j : J) (f : Quiver.Hom.{succ u1, u3} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) i j), CategoryTheory.IsIso.{u2, u4} C _inst_1 (Prefunctor.obj.{succ u1, succ u2, u3, u4} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) C (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} C (CategoryTheory.Category.toCategoryStruct.{u2, u4} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{u1, u2, u3, u4} J _inst_2 C _inst_1 F) i) (Prefunctor.obj.{succ u1, succ u2, u3, u4} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) C (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} C (CategoryTheory.Category.toCategoryStruct.{u2, u4} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{u1, u2, u3, u4} J _inst_2 C _inst_1 F) j) (Prefunctor.map.{succ u1, succ u2, u3, u4} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) C (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} C (CategoryTheory.Category.toCategoryStruct.{u2, u4} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{u1, u2, u3, u4} J _inst_2 C _inst_1 F) i j f)], CategoryTheory.IsIso.{u2, u4} C _inst_1 (CategoryTheory.Limits.limit.{u1, u3, u2, u4} J _inst_2 C _inst_1 F _inst_3) (Prefunctor.obj.{succ u1, succ u2, u3, u4} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) C (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} C (CategoryTheory.Category.toCategoryStruct.{u2, u4} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{u1, u2, u3, u4} J _inst_2 C _inst_1 F) j) (CategoryTheory.Limits.limit.π.{u1, u3, u2, u4} J _inst_2 C _inst_1 F _inst_3 j))
Case conversion may be inaccurate. Consider using '#align category_theory.limits.is_iso_π_of_is_terminal CategoryTheory.Limits.isIso_π_of_isTerminalₓ'. -/
theorem isIso_π_of_isTerminal {j : J} (I : IsTerminal j) (F : J ⥤ C) [HasLimit F]
[∀ (i j : J) (f : i ⟶ j), IsIso (F.map f)] : IsIso (limit.π F j) :=
⟨⟨limit.lift _ (coneOfDiagramTerminal I F), by
ext
simp, by simp⟩⟩
#align category_theory.limits.is_iso_π_of_is_terminal CategoryTheory.Limits.isIso_π_of_isTerminal
/- warning: category_theory.limits.is_iso_π_terminal -> CategoryTheory.Limits.isIso_π_terminal is a dubious translation:
lean 3 declaration is
forall {C : Type.{u4}} [_inst_1 : CategoryTheory.Category.{u2, u4} C] {J : Type.{u3}} [_inst_2 : CategoryTheory.Category.{u1, u3} J] [_inst_3 : CategoryTheory.Limits.HasTerminal.{u1, u3} J _inst_2] (F : CategoryTheory.Functor.{u1, u2, u3, u4} J _inst_2 C _inst_1) [_inst_4 : CategoryTheory.Limits.HasLimit.{u1, u3, u2, u4} J _inst_2 C _inst_1 F] [_inst_5 : forall (i : J) (j : J) (f : Quiver.Hom.{succ u1, u3} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) i j), CategoryTheory.IsIso.{u2, u4} C _inst_1 (CategoryTheory.Functor.obj.{u1, u2, u3, u4} J _inst_2 C _inst_1 F i) (CategoryTheory.Functor.obj.{u1, u2, u3, u4} J _inst_2 C _inst_1 F j) (CategoryTheory.Functor.map.{u1, u2, u3, u4} J _inst_2 C _inst_1 F i j f)], CategoryTheory.IsIso.{u2, u4} C _inst_1 (CategoryTheory.Limits.limit.{u1, u3, u2, u4} J _inst_2 C _inst_1 F _inst_4) (CategoryTheory.Functor.obj.{u1, u2, u3, u4} J _inst_2 C _inst_1 F (CategoryTheory.Limits.terminal.{u1, u3} J _inst_2 _inst_3)) (CategoryTheory.Limits.limit.π.{u1, u3, u2, u4} J _inst_2 C _inst_1 F _inst_4 (CategoryTheory.Limits.terminal.{u1, u3} J _inst_2 _inst_3))
but is expected to have type
forall {C : Type.{u4}} [_inst_1 : CategoryTheory.Category.{u2, u4} C] {J : Type.{u3}} [_inst_2 : CategoryTheory.Category.{u1, u3} J] [_inst_3 : CategoryTheory.Limits.HasTerminal.{u1, u3} J _inst_2] (F : CategoryTheory.Functor.{u1, u2, u3, u4} J _inst_2 C _inst_1) [_inst_4 : CategoryTheory.Limits.HasLimit.{u1, u3, u2, u4} J _inst_2 C _inst_1 F] [_inst_5 : forall (i : J) (j : J) (f : Quiver.Hom.{succ u1, u3} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) i j), CategoryTheory.IsIso.{u2, u4} C _inst_1 (Prefunctor.obj.{succ u1, succ u2, u3, u4} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) C (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} C (CategoryTheory.Category.toCategoryStruct.{u2, u4} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{u1, u2, u3, u4} J _inst_2 C _inst_1 F) i) (Prefunctor.obj.{succ u1, succ u2, u3, u4} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) C (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} C (CategoryTheory.Category.toCategoryStruct.{u2, u4} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{u1, u2, u3, u4} J _inst_2 C _inst_1 F) j) (Prefunctor.map.{succ u1, succ u2, u3, u4} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) C (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} C (CategoryTheory.Category.toCategoryStruct.{u2, u4} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{u1, u2, u3, u4} J _inst_2 C _inst_1 F) i j f)], CategoryTheory.IsIso.{u2, u4} C _inst_1 (CategoryTheory.Limits.limit.{u1, u3, u2, u4} J _inst_2 C _inst_1 F _inst_4) (Prefunctor.obj.{succ u1, succ u2, u3, u4} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) C (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} C (CategoryTheory.Category.toCategoryStruct.{u2, u4} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{u1, u2, u3, u4} J _inst_2 C _inst_1 F) (CategoryTheory.Limits.terminal.{u1, u3} J _inst_2 _inst_3)) (CategoryTheory.Limits.limit.π.{u1, u3, u2, u4} J _inst_2 C _inst_1 F _inst_4 (CategoryTheory.Limits.terminal.{u1, u3} J _inst_2 _inst_3))
Case conversion may be inaccurate. Consider using '#align category_theory.limits.is_iso_π_terminal CategoryTheory.Limits.isIso_π_terminalₓ'. -/
instance isIso_π_terminal [HasTerminal J] (F : J ⥤ C) [HasLimit F]
[∀ (i j : J) (f : i ⟶ j), IsIso (F.map f)] : IsIso (limit.π F (⊤_ J)) :=
isIso_π_of_isTerminal terminalIsTerminal F
#align category_theory.limits.is_iso_π_terminal CategoryTheory.Limits.isIso_π_terminal
/- warning: category_theory.limits.is_iso_ι_of_is_terminal -> CategoryTheory.Limits.isIso_ι_of_isTerminal is a dubious translation:
lean 3 declaration is
forall {C : Type.{u4}} [_inst_1 : CategoryTheory.Category.{u2, u4} C] {J : Type.{u3}} [_inst_2 : CategoryTheory.Category.{u1, u3} J] {j : J}, (CategoryTheory.Limits.IsTerminal.{u1, u3} J _inst_2 j) -> (forall (F : CategoryTheory.Functor.{u1, u2, u3, u4} J _inst_2 C _inst_1) [_inst_3 : CategoryTheory.Limits.HasColimit.{u1, u3, u2, u4} J _inst_2 C _inst_1 F], CategoryTheory.IsIso.{u2, u4} C _inst_1 (CategoryTheory.Functor.obj.{u1, u2, u3, u4} J _inst_2 C _inst_1 F j) (CategoryTheory.Limits.colimit.{u1, u3, u2, u4} J _inst_2 C _inst_1 F _inst_3) (CategoryTheory.Limits.colimit.ι.{u1, u3, u2, u4} J _inst_2 C _inst_1 F _inst_3 j))
but is expected to have type
forall {C : Type.{u4}} [_inst_1 : CategoryTheory.Category.{u2, u4} C] {J : Type.{u3}} [_inst_2 : CategoryTheory.Category.{u1, u3} J] {j : J}, (CategoryTheory.Limits.IsTerminal.{u1, u3} J _inst_2 j) -> (forall (F : CategoryTheory.Functor.{u1, u2, u3, u4} J _inst_2 C _inst_1) [_inst_3 : CategoryTheory.Limits.HasColimit.{u1, u3, u2, u4} J _inst_2 C _inst_1 F], CategoryTheory.IsIso.{u2, u4} C _inst_1 (Prefunctor.obj.{succ u1, succ u2, u3, u4} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) C (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} C (CategoryTheory.Category.toCategoryStruct.{u2, u4} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{u1, u2, u3, u4} J _inst_2 C _inst_1 F) j) (CategoryTheory.Limits.colimit.{u1, u3, u2, u4} J _inst_2 C _inst_1 F _inst_3) (CategoryTheory.Limits.colimit.ι.{u1, u3, u2, u4} J _inst_2 C _inst_1 F _inst_3 j))
Case conversion may be inaccurate. Consider using '#align category_theory.limits.is_iso_ι_of_is_terminal CategoryTheory.Limits.isIso_ι_of_isTerminalₓ'. -/
/-- If `j` is terminal in the index category, then the map `colimit.ι F j` is an isomorphism.
-/
theorem isIso_ι_of_isTerminal {j : J} (I : IsTerminal j) (F : J ⥤ C) [HasColimit F] :
IsIso (colimit.ι F j) :=
⟨⟨colimit.desc _ (coconeOfDiagramTerminal I F),
⟨by simp, by
ext
simp⟩⟩⟩
#align category_theory.limits.is_iso_ι_of_is_terminal CategoryTheory.Limits.isIso_ι_of_isTerminal
/- warning: category_theory.limits.is_iso_ι_terminal -> CategoryTheory.Limits.isIso_ι_terminal is a dubious translation:
lean 3 declaration is
forall {C : Type.{u4}} [_inst_1 : CategoryTheory.Category.{u2, u4} C] {J : Type.{u3}} [_inst_2 : CategoryTheory.Category.{u1, u3} J] [_inst_3 : CategoryTheory.Limits.HasTerminal.{u1, u3} J _inst_2] (F : CategoryTheory.Functor.{u1, u2, u3, u4} J _inst_2 C _inst_1) [_inst_4 : CategoryTheory.Limits.HasColimit.{u1, u3, u2, u4} J _inst_2 C _inst_1 F], CategoryTheory.IsIso.{u2, u4} C _inst_1 (CategoryTheory.Functor.obj.{u1, u2, u3, u4} J _inst_2 C _inst_1 F (CategoryTheory.Limits.terminal.{u1, u3} J _inst_2 _inst_3)) (CategoryTheory.Limits.colimit.{u1, u3, u2, u4} J _inst_2 C _inst_1 F _inst_4) (CategoryTheory.Limits.colimit.ι.{u1, u3, u2, u4} J _inst_2 C _inst_1 F _inst_4 (CategoryTheory.Limits.terminal.{u1, u3} J _inst_2 _inst_3))
but is expected to have type
forall {C : Type.{u4}} [_inst_1 : CategoryTheory.Category.{u2, u4} C] {J : Type.{u3}} [_inst_2 : CategoryTheory.Category.{u1, u3} J] [_inst_3 : CategoryTheory.Limits.HasTerminal.{u1, u3} J _inst_2] (F : CategoryTheory.Functor.{u1, u2, u3, u4} J _inst_2 C _inst_1) [_inst_4 : CategoryTheory.Limits.HasColimit.{u1, u3, u2, u4} J _inst_2 C _inst_1 F], CategoryTheory.IsIso.{u2, u4} C _inst_1 (Prefunctor.obj.{succ u1, succ u2, u3, u4} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) C (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} C (CategoryTheory.Category.toCategoryStruct.{u2, u4} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{u1, u2, u3, u4} J _inst_2 C _inst_1 F) (CategoryTheory.Limits.terminal.{u1, u3} J _inst_2 _inst_3)) (CategoryTheory.Limits.colimit.{u1, u3, u2, u4} J _inst_2 C _inst_1 F _inst_4) (CategoryTheory.Limits.colimit.ι.{u1, u3, u2, u4} J _inst_2 C _inst_1 F _inst_4 (CategoryTheory.Limits.terminal.{u1, u3} J _inst_2 _inst_3))
Case conversion may be inaccurate. Consider using '#align category_theory.limits.is_iso_ι_terminal CategoryTheory.Limits.isIso_ι_terminalₓ'. -/
instance isIso_ι_terminal [HasTerminal J] (F : J ⥤ C) [HasColimit F] : IsIso (colimit.ι F (⊤_ J)) :=
isIso_ι_of_isTerminal terminalIsTerminal F
#align category_theory.limits.is_iso_ι_terminal CategoryTheory.Limits.isIso_ι_terminal
/- warning: category_theory.limits.is_iso_ι_of_is_initial -> CategoryTheory.Limits.isIso_ι_of_isInitial is a dubious translation:
lean 3 declaration is
forall {C : Type.{u4}} [_inst_1 : CategoryTheory.Category.{u2, u4} C] {J : Type.{u3}} [_inst_2 : CategoryTheory.Category.{u1, u3} J] {j : J}, (CategoryTheory.Limits.IsInitial.{u1, u3} J _inst_2 j) -> (forall (F : CategoryTheory.Functor.{u1, u2, u3, u4} J _inst_2 C _inst_1) [_inst_3 : CategoryTheory.Limits.HasColimit.{u1, u3, u2, u4} J _inst_2 C _inst_1 F] [_inst_4 : forall (i : J) (j : J) (f : Quiver.Hom.{succ u1, u3} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) i j), CategoryTheory.IsIso.{u2, u4} C _inst_1 (CategoryTheory.Functor.obj.{u1, u2, u3, u4} J _inst_2 C _inst_1 F i) (CategoryTheory.Functor.obj.{u1, u2, u3, u4} J _inst_2 C _inst_1 F j) (CategoryTheory.Functor.map.{u1, u2, u3, u4} J _inst_2 C _inst_1 F i j f)], CategoryTheory.IsIso.{u2, u4} C _inst_1 (CategoryTheory.Functor.obj.{u1, u2, u3, u4} J _inst_2 C _inst_1 F j) (CategoryTheory.Limits.colimit.{u1, u3, u2, u4} J _inst_2 C _inst_1 F _inst_3) (CategoryTheory.Limits.colimit.ι.{u1, u3, u2, u4} J _inst_2 C _inst_1 F _inst_3 j))
but is expected to have type
forall {C : Type.{u4}} [_inst_1 : CategoryTheory.Category.{u2, u4} C] {J : Type.{u3}} [_inst_2 : CategoryTheory.Category.{u1, u3} J] {j : J}, (CategoryTheory.Limits.IsInitial.{u1, u3} J _inst_2 j) -> (forall (F : CategoryTheory.Functor.{u1, u2, u3, u4} J _inst_2 C _inst_1) [_inst_3 : CategoryTheory.Limits.HasColimit.{u1, u3, u2, u4} J _inst_2 C _inst_1 F] [_inst_4 : forall (i : J) (j : J) (f : Quiver.Hom.{succ u1, u3} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) i j), CategoryTheory.IsIso.{u2, u4} C _inst_1 (Prefunctor.obj.{succ u1, succ u2, u3, u4} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) C (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} C (CategoryTheory.Category.toCategoryStruct.{u2, u4} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{u1, u2, u3, u4} J _inst_2 C _inst_1 F) i) (Prefunctor.obj.{succ u1, succ u2, u3, u4} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) C (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} C (CategoryTheory.Category.toCategoryStruct.{u2, u4} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{u1, u2, u3, u4} J _inst_2 C _inst_1 F) j) (Prefunctor.map.{succ u1, succ u2, u3, u4} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) C (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} C (CategoryTheory.Category.toCategoryStruct.{u2, u4} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{u1, u2, u3, u4} J _inst_2 C _inst_1 F) i j f)], CategoryTheory.IsIso.{u2, u4} C _inst_1 (Prefunctor.obj.{succ u1, succ u2, u3, u4} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) C (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} C (CategoryTheory.Category.toCategoryStruct.{u2, u4} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{u1, u2, u3, u4} J _inst_2 C _inst_1 F) j) (CategoryTheory.Limits.colimit.{u1, u3, u2, u4} J _inst_2 C _inst_1 F _inst_3) (CategoryTheory.Limits.colimit.ι.{u1, u3, u2, u4} J _inst_2 C _inst_1 F _inst_3 j))
Case conversion may be inaccurate. Consider using '#align category_theory.limits.is_iso_ι_of_is_initial CategoryTheory.Limits.isIso_ι_of_isInitialₓ'. -/
theorem isIso_ι_of_isInitial {j : J} (I : IsInitial j) (F : J ⥤ C) [HasColimit F]
[∀ (i j : J) (f : i ⟶ j), IsIso (F.map f)] : IsIso (colimit.ι F j) :=
⟨⟨colimit.desc _ (coconeOfDiagramInitial I F),
⟨by tidy, by
ext
simp⟩⟩⟩
#align category_theory.limits.is_iso_ι_of_is_initial CategoryTheory.Limits.isIso_ι_of_isInitial
/- warning: category_theory.limits.is_iso_ι_initial -> CategoryTheory.Limits.isIso_ι_initial is a dubious translation:
lean 3 declaration is
forall {C : Type.{u4}} [_inst_1 : CategoryTheory.Category.{u2, u4} C] {J : Type.{u3}} [_inst_2 : CategoryTheory.Category.{u1, u3} J] [_inst_3 : CategoryTheory.Limits.HasInitial.{u1, u3} J _inst_2] (F : CategoryTheory.Functor.{u1, u2, u3, u4} J _inst_2 C _inst_1) [_inst_4 : CategoryTheory.Limits.HasColimit.{u1, u3, u2, u4} J _inst_2 C _inst_1 F] [_inst_5 : forall (i : J) (j : J) (f : Quiver.Hom.{succ u1, u3} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) i j), CategoryTheory.IsIso.{u2, u4} C _inst_1 (CategoryTheory.Functor.obj.{u1, u2, u3, u4} J _inst_2 C _inst_1 F i) (CategoryTheory.Functor.obj.{u1, u2, u3, u4} J _inst_2 C _inst_1 F j) (CategoryTheory.Functor.map.{u1, u2, u3, u4} J _inst_2 C _inst_1 F i j f)], CategoryTheory.IsIso.{u2, u4} C _inst_1 (CategoryTheory.Functor.obj.{u1, u2, u3, u4} J _inst_2 C _inst_1 F (CategoryTheory.Limits.initial.{u1, u3} J _inst_2 _inst_3)) (CategoryTheory.Limits.colimit.{u1, u3, u2, u4} J _inst_2 C _inst_1 F _inst_4) (CategoryTheory.Limits.colimit.ι.{u1, u3, u2, u4} J _inst_2 C _inst_1 F _inst_4 (CategoryTheory.Limits.initial.{u1, u3} J _inst_2 _inst_3))
but is expected to have type
forall {C : Type.{u4}} [_inst_1 : CategoryTheory.Category.{u2, u4} C] {J : Type.{u3}} [_inst_2 : CategoryTheory.Category.{u1, u3} J] [_inst_3 : CategoryTheory.Limits.HasInitial.{u1, u3} J _inst_2] (F : CategoryTheory.Functor.{u1, u2, u3, u4} J _inst_2 C _inst_1) [_inst_4 : CategoryTheory.Limits.HasColimit.{u1, u3, u2, u4} J _inst_2 C _inst_1 F] [_inst_5 : forall (i : J) (j : J) (f : Quiver.Hom.{succ u1, u3} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) i j), CategoryTheory.IsIso.{u2, u4} C _inst_1 (Prefunctor.obj.{succ u1, succ u2, u3, u4} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) C (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} C (CategoryTheory.Category.toCategoryStruct.{u2, u4} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{u1, u2, u3, u4} J _inst_2 C _inst_1 F) i) (Prefunctor.obj.{succ u1, succ u2, u3, u4} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) C (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} C (CategoryTheory.Category.toCategoryStruct.{u2, u4} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{u1, u2, u3, u4} J _inst_2 C _inst_1 F) j) (Prefunctor.map.{succ u1, succ u2, u3, u4} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) C (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} C (CategoryTheory.Category.toCategoryStruct.{u2, u4} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{u1, u2, u3, u4} J _inst_2 C _inst_1 F) i j f)], CategoryTheory.IsIso.{u2, u4} C _inst_1 (Prefunctor.obj.{succ u1, succ u2, u3, u4} J (CategoryTheory.CategoryStruct.toQuiver.{u1, u3} J (CategoryTheory.Category.toCategoryStruct.{u1, u3} J _inst_2)) C (CategoryTheory.CategoryStruct.toQuiver.{u2, u4} C (CategoryTheory.Category.toCategoryStruct.{u2, u4} C _inst_1)) (CategoryTheory.Functor.toPrefunctor.{u1, u2, u3, u4} J _inst_2 C _inst_1 F) (CategoryTheory.Limits.initial.{u1, u3} J _inst_2 _inst_3)) (CategoryTheory.Limits.colimit.{u1, u3, u2, u4} J _inst_2 C _inst_1 F _inst_4) (CategoryTheory.Limits.colimit.ι.{u1, u3, u2, u4} J _inst_2 C _inst_1 F _inst_4 (CategoryTheory.Limits.initial.{u1, u3} J _inst_2 _inst_3))
Case conversion may be inaccurate. Consider using '#align category_theory.limits.is_iso_ι_initial CategoryTheory.Limits.isIso_ι_initialₓ'. -/
instance isIso_ι_initial [HasInitial J] (F : J ⥤ C) [HasColimit F]
[∀ (i j : J) (f : i ⟶ j), IsIso (F.map f)] : IsIso (colimit.ι F (⊥_ J)) :=
isIso_ι_of_isInitial initialIsInitial F
#align category_theory.limits.is_iso_ι_initial CategoryTheory.Limits.isIso_ι_initial
end
end CategoryTheory.Limits
|
{"author": "leanprover-community", "repo": "mathlib3port", "sha": "62505aa236c58c8559783b16d33e30df3daa54f4", "save_path": "github-repos/lean/leanprover-community-mathlib3port", "path": "github-repos/lean/leanprover-community-mathlib3port/mathlib3port-62505aa236c58c8559783b16d33e30df3daa54f4/Mathbin/CategoryTheory/Limits/Shapes/Terminal.lean"}
|
SUBROUTINE QGAUS(FUNC,A,B,SS)
DIMENSION X(5),W(5)
DATA X/.1488743389,.4333953941,.6794095682,.8650633666,.9739065285
*/
DATA W/.2955242247,.2692667193,.2190863625,.1494513491,.0666713443
*/
XM=0.5*(B+A)
XR=0.5*(B-A)
SS=0
DO 11 J=1,5
DX=XR*X(J)
SS=SS+W(J)*(FUNC(XM+DX)+FUNC(XM-DX))
11 CONTINUE
SS=XR*SS
RETURN
END
|
{"hexsha": "6dd325cbf9b4187b672a614da5ce1f0bd8fb96d7", "size": 408, "ext": "for", "lang": "FORTRAN", "max_stars_repo_path": "Math3/NumRec/source/qgaus.for", "max_stars_repo_name": "domijin/MM3", "max_stars_repo_head_hexsha": "cf696d0cf26ea8e8e24c86287cf8856cab7eaf77", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Math3/NumRec/source/qgaus.for", "max_issues_repo_name": "domijin/MM3", "max_issues_repo_head_hexsha": "cf696d0cf26ea8e8e24c86287cf8856cab7eaf77", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Math3/NumRec/source/qgaus.for", "max_forks_repo_name": "domijin/MM3", "max_forks_repo_head_hexsha": "cf696d0cf26ea8e8e24c86287cf8856cab7eaf77", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.0, "max_line_length": 72, "alphanum_fraction": 0.5416666667, "num_tokens": 166}
|
import numpy as np
import cv2
import os
video = cv2.VideoCapture(0)
ORANGE = False
DARK = False
while True:
_, original = video.read()
gray = cv2.cvtColor(original, cv2.COLOR_BGR2GRAY)
h, w, c = original.shape
laplacian = cv2.Laplacian(original, cv2.CV_64F)
sobelx = cv2.Sobel(original, cv2.CV_64F, 1, 0, ksize=5)
sobely = cv2.Sobel(original, cv2.CV_64F, 0, 1, ksize=5)
output_combo = original
edges = cv2.Canny(gray, 120, 150)
cv2.imshow("Me", original)
cv2.imshow("laplacian", laplacian)
# cv2.imshow("sobelx", sobelx)
# cv2.imshow("sobely", sobely)
cv2.imshow("canny", edges)
key = cv2.waitKey(delay=10)
if key == ord("q"):
break
|
{"hexsha": "a729176274351437c1761ff800d50feea5fe5de8", "size": 708, "ext": "py", "lang": "Python", "max_stars_repo_path": "modules/open_cv/edge_and_gradients.py", "max_stars_repo_name": "GrzegorzKrug/face_morph", "max_stars_repo_head_hexsha": "64e5e47207d30ac8968a0b1b73e11a8ae74b3fec", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "modules/open_cv/edge_and_gradients.py", "max_issues_repo_name": "GrzegorzKrug/face_morph", "max_issues_repo_head_hexsha": "64e5e47207d30ac8968a0b1b73e11a8ae74b3fec", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "modules/open_cv/edge_and_gradients.py", "max_forks_repo_name": "GrzegorzKrug/face_morph", "max_forks_repo_head_hexsha": "64e5e47207d30ac8968a0b1b73e11a8ae74b3fec", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.8387096774, "max_line_length": 59, "alphanum_fraction": 0.6440677966, "include": true, "reason": "import numpy", "num_tokens": 243}
|
"""
AbstractZero <: AbstractDifferential
This is zero-like differential types.
If a AD system encounter a propagator taking as input only subtypes of `AbstractZero` then
it can stop performing any AD operations, as all propagator are linear functions, and thus
the final result will be zero.
All `AbstractZero` subtypes are singleton types.
There are two of them [`Zero()`](@ref) and [`DoesNotExist()`](@ref).
"""
abstract type AbstractZero <: AbstractDifferential end
Base.iszero(::AbstractZero) = true
Base.iterate(x::AbstractZero) = (x, nothing)
Base.iterate(::AbstractZero, ::Any) = nothing
Base.Broadcast.broadcastable(x::AbstractZero) = Ref(x)
Base.Broadcast.broadcasted(::Type{T}) where T<:AbstractZero = T()
# Linear operators
Base.adjoint(z::AbstractZero) = z
Base.transpose(z::AbstractZero) = z
Base.:/(z::AbstractZero, ::Any) = z
"""
Zero() <: AbstractZero
The additive identity for differentials.
This is basically the same as `0`.
A derivative of `Zero()`. does not propagate through the primal function.
"""
struct Zero <: AbstractZero end
extern(x::Zero) = false # false is a strong 0. E.g. `false * NaN = 0.0`
Base.eltype(::Type{Zero}) = Zero
Base.zero(::AbstractDifferential) = Zero()
Base.zero(::Type{<:AbstractDifferential}) = Zero()
"""
DoesNotExist() <: AbstractZero
This differential indicates that the derivative does not exist.
It is the differential for a Primal type that is not differentiable.
Such an Integer, or Boolean (when not being used as a represention of a value that normally
would be a floating point.)
The only valid way to pertube such a values is to not change it at all.
As such, `DoesNotExist` is functionally identical to `Zero()`,
but provides additional semantic information.
If you are adding this differential to a primal then something is wrong.
A optimization package making use of this might like to check for such a case.
!!! note:
This does not indicate that the derivative it is not implemented,
but rather that mathematically it is not defined.
This mostly shows up as the deriviative with respect to dimension, index, or size
arguments.
```
function rrule(fill, x, len::Int)
y = fill(x, len)
fill_pullback(ȳ) = (NO_FIELDS, @thunk(sum(Ȳ)), DoesNotExist())
return y, fill_pullback
end
```
"""
struct DoesNotExist <: AbstractZero end
function extern(x::DoesNotExist)
throw(ArgumentError("Derivative does not exit. Cannot be converted to an external type."))
end
|
{"hexsha": "75bd7472701c199a41415772f34ed58ac8ec636a", "size": 2490, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/differentials/abstract_zero.jl", "max_stars_repo_name": "piever/ChainRulesCore.jl", "max_stars_repo_head_hexsha": "b90ee242228d9cda7c0a90deab816621da93cfaf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/differentials/abstract_zero.jl", "max_issues_repo_name": "piever/ChainRulesCore.jl", "max_issues_repo_head_hexsha": "b90ee242228d9cda7c0a90deab816621da93cfaf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/differentials/abstract_zero.jl", "max_forks_repo_name": "piever/ChainRulesCore.jl", "max_forks_repo_head_hexsha": "b90ee242228d9cda7c0a90deab816621da93cfaf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.2, "max_line_length": 94, "alphanum_fraction": 0.7333333333, "num_tokens": 600}
|
module mod_monolis_shape_util
use mod_monolis_prm
contains
subroutine monolis_get_inverse_matrix_2d(xj, inv, det, is_fail)
implicit none
real(kdouble) :: xj(2,2), inv(2,2), det, detinv
logical, optional :: is_fail
det = xj(1,1) * xj(2,2) &
- xj(2,1) * xj(1,2)
if(det < 0.0d0)then
if(present(is_fail))then
is_fail = .true.
else
stop "determinant < 0.0"
endif
endif
detinv = 1.0d0/det
inv(1,1) = xj(2,2)*detinv
inv(1,2) = -xj(1,2)*detinv
inv(2,1) = -xj(2,1)*detinv
inv(2,2) = xj(1,1)*detinv
end subroutine monolis_get_inverse_matrix_2d
subroutine monolis_get_inverse_matrix_3d(xj, inv, det, is_fail)
implicit none
real(kdouble) :: xj(3,3), inv(3,3), det, detinv
logical, optional :: is_fail
if(present(is_fail)) is_fail = .false.
det = xj(1,1) * xj(2,2) * xj(3,3) &
+ xj(2,1) * xj(3,2) * xj(1,3) &
+ xj(3,1) * xj(1,2) * xj(2,3) &
- xj(3,1) * xj(2,2) * xj(1,3) &
- xj(2,1) * xj(1,2) * xj(3,3) &
- xj(1,1) * xj(3,2) * xj(2,3)
if(det < 0.0d0)then
if(present(is_fail))then
is_fail = .true.
else
stop "determinant < 0.0"
endif
endif
detinv = 1.0d0/det
inv(1,1) = detinv * ( xj(2,2)*xj(3,3) - xj(3,2)*xj(2,3))
inv(1,2) = detinv * (-xj(1,2)*xj(3,3) + xj(3,2)*xj(1,3))
inv(1,3) = detinv * ( xj(1,2)*xj(2,3) - xj(2,2)*xj(1,3))
inv(2,1) = detinv * (-xj(2,1)*xj(3,3) + xj(3,1)*xj(2,3))
inv(2,2) = detinv * ( xj(1,1)*xj(3,3) - xj(3,1)*xj(1,3))
inv(2,3) = detinv * (-xj(1,1)*xj(2,3) + xj(2,1)*xj(1,3))
inv(3,1) = detinv * ( xj(2,1)*xj(3,2) - xj(3,1)*xj(2,2))
inv(3,2) = detinv * (-xj(1,1)*xj(3,2) + xj(3,1)*xj(1,2))
inv(3,3) = detinv * ( xj(1,1)*xj(2,2) - xj(2,1)*xj(1,2))
end subroutine monolis_get_inverse_matrix_3d
end module mod_monolis_shape_util
|
{"hexsha": "38f9782ab5c08416714dbb84aff8cd91dee971f9", "size": 1888, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/shape/shape_util.f90", "max_stars_repo_name": "nqomorita/monolis", "max_stars_repo_head_hexsha": "55d746a480fd7b9639216be19e0a253e6137dfe9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2020-03-11T20:24:40.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T02:31:06.000Z", "max_issues_repo_path": "src/shape/shape_util.f90", "max_issues_repo_name": "nqomorita/monolis", "max_issues_repo_head_hexsha": "55d746a480fd7b9639216be19e0a253e6137dfe9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/shape/shape_util.f90", "max_forks_repo_name": "nqomorita/monolis", "max_forks_repo_head_hexsha": "55d746a480fd7b9639216be19e0a253e6137dfe9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-08-01T09:34:26.000Z", "max_forks_repo_forks_event_max_datetime": "2019-08-01T09:34:26.000Z", "avg_line_length": 29.5, "max_line_length": 65, "alphanum_fraction": 0.531779661, "num_tokens": 937}
|
import pandas as pd
import numpy as np
from scipy import optimize
import re
import copy
from datetime import timedelta
from collections import defaultdict
import logging
from .pool import Uniswapv3Pool
from uniswapv3_simulator.tick import MIN_TICK, MAX_TICK
from .math import *
logger = logging.getLogger('uniswap-v3.utils')
def pool_init_price(token0, token1, tick_upper, tick_lower, liquidity_delta,
token0_decimals, token1_decimals):
"""
TODO: finish documentation
:param token0:
:param token1:
:param tick_upper:
:param tick_lower:
:param liquidity_delta: Can get from etherscan.io using the txn hash
(check the logs).
:param token0_decimals:
:param token1_decimals:
:return:
"""
if (token0 == 0) or (token1 == 0):
raise ValueError('Tick range does not span the initial price.')
sqrt_price_lower = tick_to_sqrt_price(tick_lower)
sqrt_price_upper = tick_to_sqrt_price(tick_upper)
# adjust tokens if different decimal conventions are used
token0_multiplier = 10.0 ** max(token1_decimals - token0_decimals, 0)
token1_multiplier = 10.0 ** max(token0_decimals - token1_decimals, 0)
token0 = token0 / token0_multiplier
token1 = token1 / token1_multiplier
# formula 6.29
sqrt_price = token1 / liquidity_delta + sqrt_price_lower
# formula 6.30
calc_token0 = liquidity_delta * (1 / sqrt_price - 1 / sqrt_price_upper)
# verify that the calculated price satisfies formula 6.30
assert np.isclose(token0, calc_token0, atol=1e-12, rtol=1e-8), (
f'Calculated token0 {calc_token0:,.4f} does not match input '
f'token0 {token0:,.4f}.'
)
return sqrt_price ** 2
def solve_for_liquidity_delta(token0, token1, tick_lower, tick_upper,
sqrt_price, token0_decimals, token1_decimals,
check_res=False):
"""
TODO: finish documentation
:param token0:
:param token1:
:param tick_lower:
:param tick_upper:
:param sqrt_price:
:param check_res:
:param token0_decimals:
:param token1_decimals:
:return:
"""
sqrt_price_lower = tick_to_sqrt_price(tick_lower)
sqrt_price_upper = tick_to_sqrt_price(tick_upper)
tick_current = sqrt_price_to_tick(sqrt_price)
# adjust tokens if different decimal conventions are used
token0_multiplier = 10.0 ** max(token1_decimals - token0_decimals, 0)
token1_multiplier = 10.0 ** max(token0_decimals - token1_decimals, 0)
token0 = token0 / token0_multiplier
token1 = token1 / token1_multiplier
if tick_current < tick_lower:
assert token1 == 0, f'Expected token1 to be 0, not {token1:,.4f}.'
liquidity_delta = token0 / (1 / sqrt_price_lower - 1 / sqrt_price_upper)
elif tick_current >= tick_upper:
assert token0 == 0, f'Expected token0 to be 0, not {token0:,.4f}.'
liquidity_delta = token1 / (sqrt_price_upper - sqrt_price_lower)
else:
liquidity_delta = token1 / (sqrt_price - sqrt_price_lower)
if check_res:
check = token0 / (1 / sqrt_price - 1 / sqrt_price_upper)
assert np.isclose(liquidity_delta, check), (
f'liquidity_delta {liquidity_delta:,.4f} does not match check '
f'value {check:,.4f}.'
)
return liquidity_delta
def set_positions(pool, liquidity_fn, position_width, min_price, max_price,
min_liquidity=1, position_id='pos_id', separate_pos=False):
"""
TODO: finish documentation
:param pool:
:param liquidity_fn:
:param position_width:
:param min_price:
:param max_price:
:param min_liquidity:
:param position_id:
:param separate_pos:
:return:
"""
tokens = {}
lower_bounds = np.linspace(
min_price,
max_price,
int((max_price - min_price) / position_width) + 1
)
for i, price_lower in enumerate(lower_bounds[:-1]):
if price_lower == 0:
tick_lower = sqrt_price_to_tick((price_lower + 1e-8) ** 0.5)
else:
tick_lower = sqrt_price_to_tick(price_lower ** 0.5)
tick_upper = sqrt_price_to_tick((price_lower + position_width) ** 0.5)
tick_mid = int((tick_lower + tick_upper) / 2)
price_mid = tick_to_sqrt_price(tick_mid) ** 2
liquidity = liquidity_fn(price_mid)
if liquidity >= min_liquidity:
pid = position_id + str(i + 1) if separate_pos else position_id
token0, token1 = pool.set_position(
pid,
tick_lower,
tick_upper,
liquidity
)
if pid in tokens:
tokens[pid]['token0'] += token0
tokens[pid]['token1'] += token1
else:
tokens[pid] = {'token0': token0, 'token1': token1}
return tokens
def close_all_positions(pool, account_id=None):
"""
TODO: finish documentation
:param pool:
:param account_id:
:return:
"""
total_token0 = 0
total_token1 = 0
total_fees_token0 = 0
total_fees_token1 = 0
if account_id is None:
position_iter = list(pool.position_map.values())
else:
position_iter = list(pool.account_map[account_id])
for position in position_iter:
token0, token1 = pool.set_position(
position.account_id,
position.tick_lower,
position.tick_upper,
-position.liquidity
)
fees_token0, fees_token1 = pool.collect_fees_earned(
position.account_id,
position.tick_lower,
position.tick_upper,
)
total_token0 += token0
total_token1 += token1
total_fees_token0 += fees_token0
total_fees_token1 += fees_token1
return total_token0, total_token1, total_fees_token0, total_fees_token1
def organize_txns(liquidity, swaps, max_date=None):
cols = ['tx_hash', 'txn_time', 'liquidity_event']
liqu_txn = liquidity.loc[:, cols].copy()
liqu_txn.reset_index(drop=False, inplace=True)
liqu_txn.rename(columns={'liquidity_event': 'event', 'index': 'orig_idx'},
inplace=True)
cols = ['tx_hash', 'swap_time']
swap_txn = swaps.loc[:, cols].copy()
swap_txn.reset_index(drop=False, inplace=True)
swap_txn.rename(columns={'swap_time': 'txn_time', 'index': 'orig_idx'},
inplace=True)
swap_txn['event'] = 'SWAP'
all_txn = pd.concat([liqu_txn, swap_txn], axis=0)
# when there is more than one transaction for a timestamp, we first do
# liquidity adds, then removes, then swaps, which corresponding the
# ascending alphabetical order there are quite a few large liquidity
# transactions that are immediately reversed so doing the adds before the
# subtracts ensures these are processed correctly
all_txn = all_txn.sort_values(['txn_time', 'event']).reset_index(drop=True)
if max_date is not None:
all_txn.drop(all_txn.index[all_txn['txn_time'] > max_date],
axis=0, inplace=True)
return all_txn
def run_historical_pool(init_price, all_txn, liquidity, swaps,
save_freq='D', position_id='generic_LP',
checks_on=False, verbose=True,
token0_tols={'atol': 1e-12, 'rtol': 1e-8},
token1_tols={'atol': 1e-12, 'rtol': 1e-8},
liquidity_tols={'atol': 1e-8, 'rtol': 1e-5}):
fee = liquidity.at[0, 'pool_fee'] / 1e+6
tick_spacing = liquidity.at[0, 'pool_tick_spacing']
token0_decimals = liquidity.at[0, 'contract_decimals_token_0']
token1_decimals = liquidity.at[0, 'contract_decimals_token_1']
pool = Uniswapv3Pool(fee, tick_spacing, init_price,
token0_decimals=token0_decimals,
token1_decimals=token1_decimals)
if verbose:
print(f'{pool}')
tx_results = []
pool_snapshots = {}
for i, row in all_txn.iterrows():
logger.debug(f'Transaction {i}.')
current_time = row['txn_time'].floor(save_freq)
txn = row['event']
idx = row['orig_idx']
if 'LIQUIDITY' in txn:
token0 = liquidity.at[idx, 'token_0_amount']
token1 = liquidity.at[idx, 'token_1_amount']
if txn == 'REMOVE_LIQUIDITY':
token0 = -1 * token0
token1 = -1 * token1
tick_lower = liquidity.at[idx, 'price_tick_lower']
tick_upper = liquidity.at[idx, 'price_tick_upper']
liquidity_delta = liquidity.at[idx, 'liquidity']
if pd.isnull(liquidity_delta):
liquidity_delta = solve_for_liquidity_delta(
token0,
token1,
tick_lower,
tick_upper,
pool.sqrt_price,
token0_decimals,
token1_decimals
)
elif checks_on:
ld_calc = solve_for_liquidity_delta(
token0,
token1,
tick_lower,
tick_upper,
pool.sqrt_price,
token0_decimals,
token1_decimals
)
assert np.isclose(liquidity_delta, ld_calc, **liquidity_tols), (
f'Calculated liquidity_delta {ld_calc:,.12e} does '
f'not match liquidity_delta per the data '
f'{liquidity_delta:,.12e}.'
)
position = pool.position_map[(position_id, tick_lower, tick_upper)]
# If the liquidity_delta is very, very close to the position's
# total liquidity, set liquidity_delta to the total liquidity to
# completely close out the position
if np.isclose(-position.liquidity, liquidity_delta):
liquidity_delta = -position.liquidity
# we also make sure that liquidity_delta cannot be less than the
# position's total liquidity
if liquidity_delta < 0:
if position.liquidity + liquidity_delta < -1:
logger.warning(
'Transaction could have negative liquidity. Limiting '
'liquidity_delta to the current position liquidity.'
)
liquidity_delta = max(liquidity_delta, -position.liquidity)
token0_calc, token1_calc = pool.set_position(
position_id,
tick_lower,
tick_upper,
liquidity_delta
)
elif txn == 'SWAP':
token0 = swaps.at[idx, 'token_0_amount']
token1 = swaps.at[idx, 'token_1_amount']
token = 0 if token0 > 0 else 1
tokens_in = token0 if token == 0 else token1
token0_calc, token1_calc = pool.swap(token, tokens_in)
else:
raise ValueError(f'{txn} is not a valid transaction type.')
if checks_on:
assert np.isclose(token0, -token0_calc, **token0_tols), (
f'Transaction {i:,}: token0 output {-token0_calc:,.12e} does '
f'not match token0 in the data {token0:,.12e}.'
)
assert np.isclose(token1, -token1_calc, **token1_tols), (
f'Transaction {i:,}: token1 output {-token1_calc:,.12e} does '
f'not match token1 in the data {token1:,.12e}.'
)
if i + 1 < all_txn.shape[0]:
# if the next date is different than the current date, save the
# pool so we have the starting liquidity curve for the next day
next_time = all_txn.at[i + 1, 'txn_time'].floor(save_freq)
if next_time > current_time:
date_key = next_time.strftime('%Y-%m-%d %H:%M:%S')
pool_snapshots[date_key] = copy.deepcopy(pool)
tx_results.append({
'sqrt_price': pool.sqrt_price,
'liquidity': pool.liquidity
})
if verbose:
print(f'Completed transaction {i}.')
return pool_snapshots, tx_results
def get_bin_ticks(price_bins, pool):
# apply any multiplier to the price bins to get the scaled prices for
# the pool
adj_price_bins = [
price / (pool.token1_multiplier / pool.token0_multiplier)
for price in price_bins
]
bin_ticks = []
for price in adj_price_bins:
if price == 0:
tick = pool.tick_spacing * int(np.ceil(MIN_TICK / pool.tick_spacing))
bin_ticks.append(tick)
elif price == np.inf:
tick = pool.tick_spacing * int(np.floor(MAX_TICK / pool.tick_spacing))
bin_ticks.append(tick)
else:
tick = sqrt_price_to_tick(price ** 0.5)
tick = pool.tick_spacing * int(np.round(tick / pool.tick_spacing))
bin_ticks.append(tick)
return bin_ticks
def split_position(position_tuple, bin_ticks):
position_ticks = (position_tuple[1], position_tuple[2])
if position_ticks[0] < bin_ticks[0]:
raise ValueError('Position tick lower is less than the min bin tick.')
if position_ticks[1] > bin_ticks[-1]:
raise ValueError('Position tick upper is greater than the max bin tick.')
positions = []
start = position_ticks[0]
for i, tick in enumerate(bin_ticks):
if tick <= start:
continue
elif start < tick <= position_ticks[1]:
end = tick
bin_start = bin_ticks[i - 1]
bin_end = tick
if start == end:
logger.warning('Cannot set position if tick_lower == tick_upper.')
continue
position_id = f'{position_tuple[0]}_bin{i}_{bin_start}_{bin_end}'
positions.append((position_id, start, end, position_tuple[3]))
start = tick
elif tick > position_ticks[1]:
end = position_ticks[1]
bin_start = bin_ticks[i - 1]
bin_end = tick
if start == end:
logger.warning('Cannot set position if tick_lower == tick_upper.')
continue
position_id = f'{position_tuple[0]}_bin{i}_{bin_start}_{bin_end}'
positions.append((position_id, start, end, position_tuple[3]))
break
return positions
def set_binned_positions(pool, position_map, price_bins):
bin_ticks = get_bin_ticks(price_bins, pool)
bin_tokens = defaultdict(lambda: defaultdict(lambda: 0))
for position_id, position in position_map.items():
position_tuple = (
position.account_id,
position.tick_lower,
position.tick_upper,
position.liquidity
)
for new_position in split_position(position_tuple, bin_ticks):
token0, token1 = pool.set_position(*new_position)
bin_tokens[new_position[0]]['token0'] += token0
bin_tokens[new_position[0]]['token1'] += token1
return bin_tokens
def get_hours(timestamp):
return timestamp.hour + timestamp.minute / 60 + timestamp.second / 360
def calc_irr(cash_flows, times, init_guess=0.01):
def obj_fn(irr, cash_flows, time):
return np.sum(cash_flows / (1 + irr[0]) ** time)
irr, info, ier, msg = optimize.fsolve(obj_fn, np.array([init_guess]),
args=(cash_flows, times),
full_output=True)
if ier != 1:
logger.warning(f'Warning: {msg} The solution may not be accurate.')
return irr[0]
def calc_token_value(token0, token1, price, numeraire_token=1):
if numeraire_token == 1:
value = token1 + token0 * price
elif numeraire_token == 0:
value = token0 + token1 / price
return value
def calc_irr_per_bin(start_pool, price_bins, all_txn, liquidity, swaps,
period_start, period_end, numeraire_token=1,
position_id='generic_LP'):
token0_decimals = liquidity.at[0, 'contract_decimals_token_0']
token1_decimals = liquidity.at[0, 'contract_decimals_token_1']
multiplier = start_pool.token1_multiplier / start_pool.token0_multiplier
# initialize a new pool object
pool = Uniswapv3Pool(
start_pool.fee,
start_pool.tick_spacing,
start_pool.price / multiplier,
token0_decimals=token0_decimals,
token1_decimals=token1_decimals
)
# reset the positions in the start_pool, splitting them by the different
# price bins
bin_ticks = get_bin_ticks(price_bins, pool)
bin_tokens = set_binned_positions(pool, start_pool.position_map, price_bins)
cash_flows = defaultdict(list)
times = defaultdict(list)
# calculate the starting value in the numeraire token for each bin
for pos_bin, token_dict in bin_tokens.items():
init_value = calc_token_value(token_dict['token0'], token_dict['token1'],
pool.price, numeraire_token=numeraire_token)
cash_flows[pos_bin].append(init_value)
times[pos_bin].append(0)
# iterate through the transactions
for i, row in all_txn.iterrows():
logging.info(f'Transaction {i}.')
txn = row['event']
idx = row['orig_idx']
txn_time = row['txn_time']
if 'LIQUIDITY' in txn:
token0 = liquidity.at[idx, 'token_0_amount']
token1 = liquidity.at[idx, 'token_1_amount']
if txn == 'REMOVE_LIQUIDITY':
token0 = -1 * token0
token1 = -1 * token1
tick_lower = liquidity.at[idx, 'price_tick_lower']
tick_upper = liquidity.at[idx, 'price_tick_upper']
liquidity_delta = liquidity.at[idx, 'liquidity']
if pd.isnull(liquidity_delta):
liquidity_delta = solve_for_liquidity_delta(
token0,
token1,
tick_lower,
tick_upper,
pool.sqrt_price,
token0_decimals,
token1_decimals
)
position_tuple = (position_id, tick_lower, tick_upper, liquidity_delta)
for new_position in split_position(position_tuple, bin_ticks):
position = pool.position_map[tuple(new_position[:3])]
liquidity_delta = new_position[3]
# If the liquidity_delta is very, very close to the position's
# total liquidity, set liquidity_delta to the total liquidity
# to completely close out the position
if np.isclose(-position.liquidity, liquidity_delta):
liquidity_delta = -position.liquidity
# we also make sure that liquidity_delta cannot be less than the
# position's total liquidity
if liquidity_delta < 0:
if position.liquidity + liquidity_delta < -1:
logging.warning(
'Transaction could have negative liquidity. '
'Limitting liquidity_delta to the current position '
'liquidity.'
)
liquidity_delta = max(liquidity_delta, -position.liquidity)
token0, token1 = pool.set_position(
new_position[0],
new_position[1],
new_position[2],
liquidity_delta
)
# add any liquidity adds/removes to the cash flows for the bin
cf = calc_token_value(token0, token1, pool.price,
numeraire_token=numeraire_token)
cash_flows[new_position[0]].append(cf)
time_since_start = (
(txn_time - period_start) /
(period_end - period_start)
)
assert time_since_start <= 1.0, 'Time since start cannot be >1.0.'
times[new_position[0]].append(time_since_start)
elif txn == 'SWAP':
token0 = swaps.at[idx, 'token_0_amount']
token1 = swaps.at[idx, 'token_1_amount']
token = 0 if token0 > 0 else 1
tokens_in = token0 if token == 0 else token1
_, _ = pool.swap(token, tokens_in)
else:
raise ValueError(f'{txn} is not a valid transaction type.')
# close all positions for each bin (i.e., account_id)
for account_id in pool.account_map.keys():
tokens = close_all_positions(pool, account_id=account_id)
total_token0, total_token1, total_fees_token0, total_fees_token1 = tokens
token0 = total_token0 + total_fees_token0
token1 = total_token1 + total_fees_token1
# add the cash flows from the position to the cash flows
cf = calc_token_value(token0, token1, pool.price,
numeraire_token=numeraire_token)
cash_flows[account_id].append(cf)
times[account_id].append(1.0)
# calculate the IRR for each position
irrs = {}
for (account_id, cfs), (_, ts) in zip(cash_flows.items(), times.items()):
irr = calc_irr(cfs, ts, init_guess=0.01)
irrs[account_id] = irr
return irrs
def calc_all_returns_per_bin(pool_snapshots, all_txn, liquidity, swaps,
freq='D', sigma=0.04, numeraire_token=1):
all_returns = {}
date_range = pd.date_range(
min(pool_snapshots.keys()),
max(pool_snapshots.keys()),
freq=freq
)
# make sure the timestamps have no timezone so we can perform various
# operations on the timestamps
all_txn['txn_time'] = all_txn['txn_time'].dt.tz_localize(None)
liquidity['txn_time'] = liquidity['txn_time'].dt.tz_localize(None)
swaps['swap_time'] = swaps['swap_time'].dt.tz_localize(None)
# only iterate through first len(date_range) - 1 items as the last item
# will not have a full period
for i, period_start in enumerate(date_range[:-1]):
period_end = date_range[i + 1]
pool_key = period_start.strftime('%Y-%m-%d %H:%M:%S')
if pool_key not in pool_snapshots:
logger.warning(f'{pool_key} not included in the pool snapshots.')
continue
start_pool = pool_snapshots[pool_key]
price_bins = np.array(
[0]
+ [start_pool.price * (1 + i * sigma) for i in range(-10, 11)]
+ [np.inf]
)
period_idx = (
(all_txn['txn_time'] >= period_start) &
(all_txn['txn_time'] < period_end)
)
txns = all_txn.loc[period_idx]
# There are a few days for the DAI-WETH-500 pool where the swaps move
# to an area of slightly less than 0 liquidity (due to rounding errors),
# so we wrap this in a try/except. For simplicity, we just skip
# these days.
try:
irrs = calc_irr_per_bin(
start_pool,
price_bins,
txns,
liquidity,
swaps,
period_start,
period_end,
numeraire_token=numeraire_token,
position_id='generic_LP'
)
# There are a few days with extremely large IRRs, which appear
# to be due to LPs creating artificial limit orders. For simplicity,
# we just skip these days as well.
add_to_returns = True
for irr in irrs.values():
if irr > 1:
logger.warning(f'{period_start}: Abnormal IRR {irr:,.2%}.')
add_to_returns = False
break
if add_to_returns:
all_returns[period_start] = irrs
except AssertionError as e:
logger.warning(f'{period_start}: {e}')
return all_returns
|
{"hexsha": "37669f1aca62cca78cfdc6e86ddddc92e0f1e16a", "size": 24085, "ext": "py", "lang": "Python", "max_stars_repo_path": "uniswapv3_simulator/utils.py", "max_stars_repo_name": "pradeeptadas/uniswap-v3-project", "max_stars_repo_head_hexsha": "8f938dc5602fdb6e58b2cf42393a01994f48682d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "uniswapv3_simulator/utils.py", "max_issues_repo_name": "pradeeptadas/uniswap-v3-project", "max_issues_repo_head_hexsha": "8f938dc5602fdb6e58b2cf42393a01994f48682d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "uniswapv3_simulator/utils.py", "max_forks_repo_name": "pradeeptadas/uniswap-v3-project", "max_forks_repo_head_hexsha": "8f938dc5602fdb6e58b2cf42393a01994f48682d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.399068323, "max_line_length": 83, "alphanum_fraction": 0.5933983807, "include": true, "reason": "import numpy,from scipy", "num_tokens": 5559}
|
# ---------------------------------------------------------------------------------------------------------------------
# system
import sys
from math import sqrt
# ---------------------------------------------------------------------------------------------------------------------
# scientific
import numpy as np
# ---------------------------------------------------------------------------------------------------------------------
# PyQuantum.TC_Lindblad
from PyQuantum.TC_Lindblad.Cavity import Cavity
from PyQuantum.TC_Lindblad.Hamiltonian import Hamiltonian
from PyQuantum.TC_Lindblad.WaveFunction import WaveFunction
from PyQuantum.TC_Lindblad.DensityMatrix import DensityMatrix
from PyQuantum.TC_Lindblad.Evolution import run
import PyQuantum.TC_Lindblad.config as config
# ---------------------------------------------------------------------------------------------------------------------
# PyQuantum.TC3
from PyQuantum.TC.states_collection import *
# ---------------------------------------------------------------------------------------------------------------------
# PyQuantum.Tools
from PyQuantum.Tools.LoadPackage import load_pkg
from PyQuantum.Tools.Assert import *
from PyQuantum.Tools.Print import hr
from PyQuantum.Tools.MkDir import *
from PyQuantum.Tools.CSV import *
from PyQuantum.Tools.Units import *
from PyQuantum.Tools.Pickle import *
# ---------------------------------------------------------------------------------------------------------------------
# PyQuantum.Common
from PyQuantum.Common.Quantum.Operators import operator_a
# ---------------------------------------------------------------------------------------------------------------------
# -----------------------------------------------
l = config.g * 0.01
T = 1 * config.ms
# dt = 0.01 / l
# dt = 1 * config.ns
dt = 10 * config.ns
# dt = 1 * config.ns / 10
nt = int(T/dt)
# dt = (0.001/l)
# Assert(dt <= 0.01/l, 'dt > 0.01/l')
nt = int(T/dt)
cprint('T:', 'green', end='')
print(time_unit_full(T))
cprint('dt:', 'green', end='')
# print(time_unit_full(dt))
cprint('nt:', 'green', end='')
print(nt)
# -----------------------------------------------
H_1_00 = get_H_1_00()
w0_1_00 = get_w0_1_00(H_1_00)
H_1_D = get_H_1_D()
w0_1_D = get_w0_1_D(H_1_D)
# ---------------------------------------------------------------------------------------------------------------------
mkdir('sink')
mkdir('sink/1ms_l001g')
# ---------------------------------------------------------------------------------------------------------------------
for state in [
{
'name': '1_00',
'w0': w0_1_00,
'H': H_1_00,
},
{
'name': '1_D',
'w0': w0_1_D,
'H': H_1_D,
},
]:
# -----------------------------------------------------------------------------------------------------------------
state['w0'].normalize()
ro_0 = DensityMatrix(state['w0'])
T_list = []
sink_list = []
run({
"ro_0": ro_0,
"H": state['H'],
"dt": dt,
"sink_list": sink_list,
"T_list": T_list,
"precision": 1e-3,
'sink_limit': 1,
"thres": 0.001,
'lindblad': {
'out': {
'L': operator_a(state['H']),
'l': l
},
},
})
# MkDir('sink')
pickle_dump(T_list, 'sink/1ms_l001g/T_list_' + state['name'] + '.pkl')
pickle_dump(sink_list, 'sink/1ms_l001g/sink_list_' +
state['name'] + '.pkl')
# -----------------------------------------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------------------------------------------
# =====================================================================================================================
|
{"hexsha": "f36d0cffb2f07d82043a476029519f507c2fab53", "size": 3844, "ext": "py", "lang": "Python", "max_stars_repo_path": "prepare_sink.py", "max_stars_repo_name": "alexfmsu/pyquantum", "max_stars_repo_head_hexsha": "78b09987cbfecf549e67b919bb5cb2046b21ad44", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "prepare_sink.py", "max_issues_repo_name": "alexfmsu/pyquantum", "max_issues_repo_head_hexsha": "78b09987cbfecf549e67b919bb5cb2046b21ad44", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "prepare_sink.py", "max_forks_repo_name": "alexfmsu/pyquantum", "max_forks_repo_head_hexsha": "78b09987cbfecf549e67b919bb5cb2046b21ad44", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-07-28T08:40:06.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-16T23:04:58.000Z", "avg_line_length": 32.3025210084, "max_line_length": 119, "alphanum_fraction": 0.3514568158, "include": true, "reason": "import numpy", "num_tokens": 777}
|
from scipy import interpolate as interp
from typing import Union, Callable
import multiprocessing as mp
import numpy as np
import pyslm
class parallelprocess(mp.Process):
def __init__(self, inData, isPlayed, params):
# Inheriting the class multiprocessing.Process()
mp.Process.__init__(self)
# Other initializations for event flags
self.params = params
self.inData = inData
self.isPlayed = isPlayed
self.results = mp.Queue(self.params['numSamples']//2)
# Checking software version parameters
if self.params['version'] == 'AdvFreqAnalyzer':
# Configuring filters
self._set_band_filter()
# Set parameters
self.leq_bands_sliding = np.zeros(shape=self.bandfilter.fnom.shape)
self.Leq_bands = np.empty(shape=self.bandfilter.fnom.shape)
self.L_max_bands = np.ones(shape=self.bandfilter.fnom.shape) * -10e3
self.L_min_bands = np.ones(shape=self.bandfilter.fnom.shape) * 10e3
# Set parameters
self.time_interval = 1
self.leq_global_sliding = 0
self.lAeq_global_sliding = 0
self.sel_global_sliding = self.params['tau']
self.Leq_global = 0
self.Lglobal = np.array([])
self.Lpeak = 0
# Set filters
self.weightingfilter = pyslm.weighting(
fs=self.params['fs'],
tau=self.params['tau'],
kind=self.params['fweighting']
)
self.weightingPeak = pyslm.weighting(
fs=self.params['fs'],
tau=self.params['tau'],
kind='C'
)
self.weightingSEL = pyslm.weighting(
fs=self.params['fs'],
tau=self.params['tau'],
kind='A'
)
# Other variables
self.FC = float()
self.idMax = None
self.refPressure = 2e-05
if self.params['applyMicCorr'] or self.params['applyAdcCorr']:
if params['micCorr'] is not None or params['micCorr'] is not None:
self.corr = True
else:
self.corr = False
else:
self.corr = False
pass
# The multiprocessing class needs a run () method
if self.params['template'] == 'stand-by':
if self.params['version'] == 'AdvFreqAnalyzer':
self.run = self.stand_by
elif self.params['version'] == 'DataLogger':
self.run = self.stand_by_datalogger
else:
pass
elif self.params['template'] == 'spl':
self.run = self.datalogger
elif self.params['template'] == 'frequencyAnalyzer':
self.run = self.frequencyAnalyzer
elif self.params['template'] == 'reverberationTime':
self.run = self.reverberationTime
elif self.params['template'] == 'calibration':
self.run = self.calibration
else:
pass
def stand_by(self) -> Callable:
"""
Description
-----------
Function that receives the sound pressure measured by
the microphone in Pascal and returns the level of global
sound pressure and in frequency bands.
Processing steps
----------------
(1) Apply spectral correction if correction files exist
(2) Frequency weighting filter (A, C and Z)
(3) Filter in octave bands (1/1 or 1/3)
(5) Sound level by frequency bands
(4) Time weight filter (Impulse, Fast and Slow)
(6) Global sound level
Parameters
----------
signal : np.ndarray
Signal measured by the microphone [Pa]
Returns
-------
Lp_global : float
Global sound pressure level of the measured signal
Lp_bands : np.ndarray
Sound pressure level by bands
"""
try:
while not self.isPlayed.is_set():
continue
while self.isPlayed.is_set():
if not self.inData.empty():
rawData = self.inData.get_nowait()
# Applying calibration factor
rawData = rawData * self.params['calibFactor']
# Getting global and band levels
signal = rawData[:, 0]
# 1) Apply spectral correction if correction files exist
if self.corr:
signal = self._apply_correction(signal=signal, domain='time')
# 2) Applying frequency weighting filter
signal_freq_weighting = self.weightingfilter.frequency(
signal=signal)
# 3) Applying octave band filter
filteredSignal = self.bandfilter.filter(data=signal_freq_weighting)
# 4) Calculating sound pressure level by bands
Lp_bands = np.round(10 * \
np.log10(rms(a=filteredSignal**2, #signal_time_weighting,
axis=0)**2/self.refPressure**2), 2)
# 5) Applying time weighting filter
signal_time_weighting = self.weightingfilter.time(
signal=signal_freq_weighting**2, reshape=False)
# 6) Calculating overall sound pressure level
Lp_global = np.round(10 * np.log10(rms(a=signal_time_weighting, axis=0)**2/self.refPressure**2), 2)
# Queuing results
self.results.put_nowait({'Lp_global': Lp_global,
'Lp_bands': Lp_bands,
'strBands': self.strBands,
'x_axis': self.x_axis,
'bands': self.bands})
else:
if self.isPlayed.is_set():
continue
else:
break
else:
pass
except Exception as E:
print("parallelprocess.run(): ", E, "\n")
return
def stand_by_datalogger(self) -> Callable:
"""
Description
-----------
Function that receives the sound pressure measured by
the microphone in Pascal and returns the level of global
sound pressure and in frequency bands.
Processing steps
----------------
(1) Apply spectral correction if correction files exist
(2) Frequency weighting filter (A, C and Z)
(3) Time weight filter (Impulse, Fast and Slow)
(4) Global sound level
Parameters
----------
signal : np.ndarray
Signal measured by the microphone [Pa]
Returns
-------
Lp_global : float
Global sound pressure level of the measured signal
Lp_bands : np.ndarray
Sound pressure level by bands
"""
try:
while not self.isPlayed.is_set():
continue
while self.isPlayed.is_set():
if not self.inData.empty():
rawData = self.inData.get_nowait()
# Applying calibration factor
rawData = rawData * self.params['calibFactor']
# Getting global and band levels
signal = rawData[:, 0]
# 1) Apply spectral correction if correction files exist
if self.corr:
signal = self._apply_correction(signal=signal, domain='time')
# 2) Applying frequency weighting filter
signal_freq_weighting = self.weightingfilter.frequency(
signal=signal)
# 3) Applying time weighting filter
signal_time_weighting = self.weightingfilter.time(
signal=signal_freq_weighting**2, reshape=False)
# 4) Calculating overall sound pressure level
Lp_global = np.round(10 * np.log10(rms(a=signal_time_weighting, axis=0)**2/self.refPressure**2), 2)
# Queuing results
self.results.put_nowait({'Lp_global': Lp_global})
else:
if self.isPlayed.is_set():
continue
else:
break
else:
pass
except Exception as E:
print("parallelprocess.run(): ", E, "\n")
return
def datalogger(self) -> Callable:
"""
Description
-----------
Function that receives the sound pressure measured by
the microphone in Pascal and returns the level of global
sound pressure.
Processing steps
----------------
(1) Apply spectral correction if correction files exist
(2) Frequency weighting filter (A, C and Z)
(4) Time weight filter (Impulse, Fast and Slow)
(5) Global sound level
(6) Peak sound level
(7) Calculating equivalent continuous sound level
Parameters
----------
signal : np.ndarray
Signal measured by the microphone [Pa]
Returns
-------
Lp_global : float
Global sound pressure level of the measured signal
"""
try:
while not self.isPlayed.is_set():
continue
while self.isPlayed.is_set():
if not self.inData.empty():
rawData, _ = self.inData.get_nowait()
# Applying calibration factor
rawData = rawData * self.params['calibFactor']
# Getting global and band levels
signal = rawData[:, 0]
# 1) Apply spectral correction if correction files exist
if self.corr:
signal = self._apply_correction(signal=signal, domain='time')
# 2) Applying frequency weighting filter
signal_freq_weighting = self.weightingfilter.frequency(
signal=signal)
# 3) Applying time weighting filter
signal_time_weighting = self.weightingfilter.time(
signal=signal_freq_weighting**2, reshape=False)
# 4) Calculating overall sound pressure level
Lp_global = np.round(10*np.log10(rms(a=signal_time_weighting, axis=0)**2/self.refPressure**2), 2)
self.Lglobal = np.append(self.Lglobal, Lp_global)
# 5) Peak sound level
if self.time_interval > 1:
C_weighting_Peak = self.weightingPeak.frequency(signal=signal)
C_Peak = np.max(np.abs(C_weighting_Peak))
Lpeak = np.round(10*np.log10(C_Peak**2/self.refPressure**2), 2)
if Lpeak > self.Lpeak:
self.Lpeak = Lpeak
else:
pass
else:
pass
# 6) Calculating equivalent continuous sound level
self.leq_global_sliding += 10**(Lp_global/10)
self.Leq_global = np.round(10*np.log10(1/self.time_interval * self.leq_global_sliding), 2)
# 7) Sound Exposure Level A-weighted
signal_freq_weighting_SEL = self.weightingSEL.frequency(signal=signal)
signal_time_weighting_SEL = self.weightingSEL.time(signal=signal_freq_weighting_SEL**2, reshape=False)
self.lAeq_global_sliding += rms(a=signal_time_weighting_SEL, axis=0)**2/self.refPressure**2
LAeq_global = np.round(10*np.log10(1/self.time_interval * self.lAeq_global_sliding), 2)
SEL = np.round(LAeq_global + 10*np.log10(self.sel_global_sliding), 2)
self.sel_global_sliding += self.params['tau']
self.time_interval += 1
# Queuing results
self.results.put_nowait({'Lp_global': Lp_global,
'Leq_global': self.Leq_global,
'Lpeak': self.Lpeak,
'Lglobal': self.Lglobal,
'SEL': SEL,
'signal': rawData})
else:
if self.isPlayed.is_set():
continue
else:
break
else:
pass
except Exception as E:
print("parallelprocess.run(): ", E, "\n")
return
def frequencyAnalyzer(self) -> Callable:
"""
Description
-----------
Function that receives the sound pressure measured by
the microphone in Pascal and returns the level of global
sound pressure and in frequency bands.
Processing steps
----------------
(1) Apply spectral correction if correction files exist
(2) Frequency weighting filter (A, C and Z)
(3) Filter in octave bands (1/1 or 1/3)
(4) Sound level by frequency bands
(5) Time weight filter (Impulse, Fast and Slow)
(6) Global sound level
(7) Peak sound level
(8) Calculating equivalent continuous sound level
Parameters
----------
signal : np.ndarray
Signal measured by the microphone [Pa]
Returns
-------
Lp_global : float
Global sound pressure level of the measured signal
Lp_bands : np.ndarray
Sound pressure level by bands
"""
try:
while not self.isPlayed.is_set():
continue
while self.isPlayed.is_set():
if not self.inData.empty():
rawData, _ = self.inData.get_nowait()
# Applying calibration factor
rawData = rawData * self.params['calibFactor']
# Getting global and band levels
signal = rawData[:, 0]
# 1) Apply spectral correction if correction files exist
if self.corr:
signal = self._apply_correction(signal=signal, domain='time')
# 2) Applying frequency weighting filter
signal_freq_weighting = self.weightingfilter.frequency(
signal=signal)
# 3) Applying octave band filter
filteredSignal = self.bandfilter.filter(data=signal_freq_weighting)
# 4) Calculating sound pressure level by bands
Lp_bands = np.round(10*np.log10(rms(a=filteredSignal**2, axis=0)**2/self.refPressure**2), 2)
for i in range(self.bands.size):
if Lp_bands[i] > self.L_max_bands[i]:
self.L_max_bands[i] = Lp_bands[i]
if Lp_bands[i] < self.L_min_bands[i]:
self.L_min_bands[i] = Lp_bands[i]
# 5) Applying time weighting filter
signal_time_weighting = self.weightingfilter.time(
signal=signal_freq_weighting**2, reshape=False)
# 6) Calculating overall sound pressure level
Lp_global = np.round(10*np.log10(rms(a=signal_time_weighting, axis=0)**2/self.refPressure**2), 2)
self.Lglobal = np.append(self.Lglobal, Lp_global)
# 7) Peak sound level
if self.time_interval > 1:
C_weighting_Peak = self.weightingPeak.frequency(signal=signal)
C_Peak = np.max(np.abs(C_weighting_Peak))
Lpeak = np.round(10*np.log10(C_Peak**2/self.refPressure**2), 2)
if Lpeak > self.Lpeak:
self.Lpeak = Lpeak
else:
pass
else:
pass
# 8) Calculating equivalent continuous sound level
self.leq_bands_sliding += 10**(Lp_bands/10)
self.leq_global_sliding += 10**(Lp_global/10)
self.Leq_bands = np.round(10*np.log10(1/self.time_interval * self.leq_bands_sliding), 2)
self.Leq_global = np.round(10*np.log10(1/self.time_interval * self.leq_global_sliding), 2)
# 9) Sound Exposure Level
# 7) Sound Exposure Level A-weighted
signal_freq_weighting_SEL = self.weightingSEL.frequency(signal=signal)
signal_time_weighting_SEL = self.weightingSEL.time(signal=signal_freq_weighting_SEL**2, reshape=False)
self.lAeq_global_sliding += rms(a=signal_time_weighting_SEL, axis=0)**2/self.refPressure**2
LAeq_global = np.round(10*np.log10(1/self.time_interval * self.lAeq_global_sliding), 2)
SEL = np.round(LAeq_global + 10*np.log10(self.sel_global_sliding), 2)
self.sel_global_sliding += self.params['tau']
self.time_interval += 1
# Queuing results
self.results.put_nowait({'Lp_global': Lp_global,
'Lp_bands': Lp_bands,
'L_max_bands': self.L_max_bands,
'L_min_bands': self.L_min_bands,
'Leq_bands': self.Leq_bands,
'Leq_global': self.Leq_global,
'Lpeak': self.Lpeak,
'Lglobal': self.Lglobal,
'SEL': SEL,
'signal': rawData,
'strBands': self.strBands,
'x_axis': self.x_axis,
'bands': self.bands})
else:
if self.isPlayed.is_set():
continue
else:
break
else:
pass
except Exception as E:
print("parallelprocess.run(): ", E, "\n")
return
def reverberationTime(self) -> Callable:
"""
Description
-----------
Function that receives the sound pressure measured by
the microphone in Pascal and returns the level of global
sound pressure and in frequency bands.
Processing steps
----------------
(1) Apply spectral correction if correction files exist
(2) Frequency weighting filter (A, C and Z)
(3) Filter in octave bands (1/1 or 1/3)
(5) Sound level by frequency bands
(4) Time weight filter (Impulse, Fast and Slow)
(6) Global sound level
Parameters
----------
signal : np.ndarray
Signal measured by the microphone [Pa]
Returns
-------
Lp_global : float
Global sound pressure level of the measured signal
Lp_bands : np.ndarray
Sound pressure level by bands
"""
try:
while not self.isPlayed.is_set():
continue
while self.isPlayed.is_set():
if not self.inData.empty():
rawData, framesRead, countDecay = self.inData.get_nowait()
# Applying calibration factor
rawData = rawData * self.params['calibFactor']
# Getting global and band levels
signal = rawData[:, 0]
# 1) Apply spectral correction if correction files exist
if self.corr:
signal = self._apply_correction(signal=signal, domain='time')
# 2) Applying frequency weighting filter
signal_freq_weighting = self.weightingfilter.frequency(
signal=signal)
# 3) Applying octave band filter
filteredSignal = self.bandfilter.filter(data=signal_freq_weighting)
# 4) Calculating sound pressure level by bands
Lp_bands = np.round(10 * \
np.log10(rms(a=filteredSignal**2, #signal_time_weighting,
axis=0)**2/self.refPressure**2), 2)
# 5) Applying time weighting filter
signal_time_weighting = self.weightingfilter.time(
signal=signal_freq_weighting**2, reshape=False)
# 6) Calculating overall sound pressure level
Lp_global = np.round(10 * np.log10(rms(a=signal_time_weighting, axis=0)**2/self.refPressure**2), 2)
# Queuing results
self.results.put_nowait({'Lp_global': Lp_global,
'Lp_bands': Lp_bands,
'strBands': self.strBands,
'x_axis': self.x_axis,
'signal': rawData,
'framesRead': framesRead,
'countDecay': countDecay,
'bands': self.bands})
else:
if self.isPlayed.is_set():
continue
else:
break
else:
pass
except Exception as E:
print("parallelprocess.run(): ", E, "\n")
return
def calibration(self) -> Callable:
"""
Description
-----------
Function that performs the transformation of the Fourier, returning the vectors
of complex amplitude and frequency to display on the calibration screen.
Parameters
----------
signal : np.ndarray
Signal measured by the microphone [Pa]
Returns
-------
freqSignal : np.ndarray
Complex amplitude vector [Pa]
freqVector : np.ndarray
Frequency vector [Hz]
"""
try:
while not self.isPlayed.is_set():
continue
while self.isPlayed.is_set():
if not self.inData.empty():
self._inData, framesRead = self.inData.get_nowait()
# Getting global and band levels
signal = self._inData[:, 0]
# 1) Apply spectral correction if correction files exist
if self.corr:
freqSignal = self._apply_correction(
signal=signal, domain='freq')
else:
freqSignal = np.fft.rfft(signal, axis=0, norm=None)
numSamples = len(signal)
freqSignal /= 2**0.5
freqSignal /= len(freqSignal)
freqVector = np.linspace(0, (numSamples - 1) *
self.params['fs'] /
(2*numSamples),
(int(numSamples/2)+1)
if numSamples % 2 == 0
else int((numSamples+1)/2))
a = np.where(freqVector >= self.params['fCalib'] - 50)[0][0]
b = np.where(freqVector <= self.params['fCalib'] + 50)[0][-1]
sensitivity = np.abs(freqSignal[a:b]).max()
if 20 * np.log10(sensitivity/self.refPressure) > 104:
FC = 10/sensitivity
else:
FC = 1/sensitivity
with np.errstate(divide='ignore'):
SPL = 20 * np.log10(np.abs(freqSignal)/self.refPressure)
sensitivity = np.round(sensitivity, 2)
correction = np.round(np.abs(10*np.log10(sensitivity)) -
np.abs(10*np.log10(1/self.params['calibFactor'])), 2)
idMax = np.where(SPL == SPL[a:b].max())[0][0]
SPLmax = np.round(SPL[idMax], 2)
freqmax = np.round(freqVector[idMax], 2)
# Queuing results
results = {}
if self.params['version'] == 'AdvFreqAnalyzer':
results['SPL'] = SPL
results['freqVector'] = freqVector
results['SPLmax'] = SPLmax
results['freqmax'] = freqmax
results['sensitivity'] = sensitivity*1000
results['correction'] = correction
results['FC'] = FC
results['signal'] = self._inData
results['framesRead'] = framesRead
self.results.put_nowait(results)
else:
if self.isPlayed.is_set():
continue
else:
break
else:
pass
except Exception as E:
print("parallelprocess.run(): ", E, "\n")
return
def _set_band_filter(self) -> Callable:
"""
Description
-----------
This function sets up the weighting filter in octave bands,
and also a list with frequency labels
Returns
-------
Callable
"""
try:
# OctaFilter frequency filter
self.bandfilter = pyslm.OctFilter(
fstart=self.params['fstart'],
fend=self.params['fend'],
b=self.params['b'],
fs=self.params['fs']
)
# Nominal frequencies used on the "x" axis of the plots
self.bands = self.bandfilter.fnom
self.x_axis = np.asarray(range(self.bands.size), dtype=np.int32)
freq = {
31.5: '31.5',
63. :'63',
125.: '125',
250.: '250',
500.: '500',
1000.: '1k',
2000.: '2k',
4000.: '4k',
8000.: '8k',
16000.: '16k'
}
self.strBands = list()
for i in range(self.bands.size):
if self.bands[i] in freq.keys():
self.strBands.append((i, freq[self.bands[i]]))
else:
self.strBands.append((i, ''))
except Exception as E:
print("parallelprocess._set_band_filter(): ", E, "\n")
return
def _apply_correction(self, signal: np.ndarray, domain: str = 'time') -> np.ndarray:
"""
Description
-----------
Function that applies spectral correction to the audio signal, removing the spectral
'coloring' of the microphone and/or the analog to digital converter.
Parameters
----------
signal : np.ndarray
Audio time series in which spectrum correction will be applied
domain : str
Signal domain ('time' or 'freq')
Returns
-------
correctedSignal : numpy.ndarray
Audio signal with correction applied
"""
try:
with np.errstate(divide='ignore'):
freqSignal = np.fft.rfft(signal, axis=0, norm=None)
MagfreqSignal = 20 * \
np.log10(np.abs(freqSignal))
correctedMagfreqSignal = MagfreqSignal
# Loading data from microphone
if self.params['micCorr'] is not None and self.params['applyMicCorr']:
# Apply magnitude correction
correctedMagfreqSignal -= self.params['micCorr']
# Carregando dados do ADC
if self.params['adcCorr'] is not None and self.params['applyAdcCorr']:
# Apply magnitude correction
correctedMagfreqSignal -= self.params['adcCorr']
# Return to complex amplitude vector with magnitude and phase
correctedfreqSignal = 10**(correctedMagfreqSignal /
20)
r = correctedfreqSignal
teta = np.angle(freqSignal)
correctedfreqSignal = r*(np.cos(teta) + np.sin(teta)*1j)
if domain.lower() == 'time':
# Get the inverse Fourier transform (ifft)
correctedSignal = np.fft.irfft(a=correctedfreqSignal)
elif domain.lower() == 'freq':
correctedSignal = correctedfreqSignal
else:
AttributeError(
"Unsupported domain, please try domain = 'freq'" +
" if the `signal` parameter is the signal power spectrum," +
" or try domain = 'time' if the` signal` parameter is the signal" +
" measured in the time domain."
)
except Exception as E:
print("parallelprocess._apply_correction(): ", E, "\n")
return correctedSignal
class finalprocessing(object):
def __init__(self, inData, params, bandfilter, weightingfilter):
self.params = params
self.inData = inData
self.bandfilter = bandfilter
self.weightingfilter = weightingfilter
self.refPressure = 2e-05
if self.params['template'] in ['spl', 'frequencyAnalyzer']:
self.results = self.statisticallevels()
elif self.params['template'] == 'reverberationTime':
self.results = self.reverberationTime()
elif self.params['template'] == 'calibration':
self.results = self.calibration()
else:
AttributeError(
"Template %s not supported, please try 'frequencyAnalyzer', " +\
"'reverberationTime' or 'calibration'." % self.params['template']
)
return
def statisticallevels(self) -> dict:
"""
Description:
------------
Function that returns the statistical levels L10, L50 and 690.
Parameters
----------
Leq : np.ndarray
Vector containing Leq values recorded during measurement
Returns
-------
StatisticalLevels : dict
Statistical levels (L10, L50 and L90) based on the Leq levels
recorded during the measurement.
"""
try:
Leq = self.inData
L10 = np.round(np.percentile(Leq, 90), 2)
L50 = np.round(np.percentile(Leq, 50), 2)
L90 = np.round(np.percentile(Leq, 10), 2)
StatisticalLevels = {'L10': L10,
'L50': L50,
'L90': L90}
except Exception as E:
print("finalprocessing.StatisticalLevels(): ", E, "\n")
return StatisticalLevels
def reverberationTime(self) -> dict:
try:
# Load the impulse response into a vector of type numpy.ndarray
IR = self.inData
# Sampling rate
fs = self.params['fs']
# Initial frequency of octave band
fstart = self.params['fstart']
# Final frequency of octave band
fend = self.params['fend']
# Octave band filter fraction
b = self.params['b']
# Key for use Lundeby method
bypassLundeby = False
# Key to plot the decay curve and Lundeby parameters
plotLundebyResults = False
# Key to not suppress warning messages
suppressWarnings = False
# Final cut-off time in seconds for the impulsive
# response in the background noise level
IREndManualCut = None
######### Applying input parameters in the room class #########
# Instantiate the room class with the input parameters
roomsParams = pyslm.rooms(IR=IR,
fs=fs,
fstart=fstart,
fend=fend,
b=b,
bypassLundeby=bypassLundeby,
plotLundebyResults=plotLundebyResults,
suppressWarnings=suppressWarnings,
IREndManualCut=IREndManualCut)
except Exception as E:
print("finalprocessing.reverberationTime(): ", E, "\n")
return roomsParams.results
def calibration(self) -> dict:
"""
Description:
------------
Function that performs the transformation of the Fourier, returning the vectors
of complex amplitude and frequency to display on the calibration screen.
Parameters
----------
signal : np.ndarray
Signal measured by the microphone [Pa]
Returns
-------
results : dict
Dictionary containing results of the calibration procedure
"""
try:
signal = self.inData
numSamples = len(signal)
freqSignal = np.fft.rfft(signal, axis=0, norm=None)
freqSignal /= 2**0.5
freqSignal /= len(freqSignal)
freqVector = np.linspace(0, (numSamples - 1) *
self.params['fs'] /
(2*numSamples),
(int(numSamples/2)+1)
if numSamples % 2 == 0
else int((numSamples+1)/2))
a = np.where(freqVector >= self.params['fCalib'] - 50)[0][0]
b = np.where(freqVector <= self.params['fCalib'] + 50)[0][-1]
with np.errstate(divide='ignore'):
sensitivity = np.abs(freqSignal[a:b]).max()
if 20 * np.log10(sensitivity/self.refPressure) > 104:
FC = 10/sensitivity
else:
FC = 1/sensitivity
SPL = 20 * np.log10(np.abs(freqSignal)/self.refPressure)
sensitivity = np.round(sensitivity, 2)
correction = np.round(np.abs(10*np.log10(sensitivity)) -
np.abs(10*np.log10(1/self.params['calibFactor'])), 2)
idMax = np.where(SPL == SPL[a:b].max())[0][0]
SPLmax = np.round(SPL[idMax], 2)
freqmax = np.round(freqVector[idMax], 2)
results = {}
if self.params['version'] == 'AdvFreqAnalyzer':
results['SPL'] = SPL
results['freqVector'] = freqVector
results['SPLmax'] = SPLmax
results['freqmax'] = freqmax
results['sensitivity'] = sensitivity*1000
results['correction'] = correction
results['FC'] = FC
except Exception as E:
print("finalprocessing.calibration(): ", E, "\n")
return results
def ImpulseResponse(signal: np.ndarray, fs: int, excitTime: int,
numDecay: int, scapeTime: int, method: str, excitation: Union[None, np.ndarray] = None) -> np.ndarray:
"""
Description
-----------
asdfgh
Parameters
----------
signal : numpy.ndarray
fs : int
excitTime : int
numDecay : int
scapeTime : int
method : str
excitation : None | numpy.array
Returns
-------
impulseResponse : numpy.ndarray
"""
try:
if numDecay > 1:
if signal.shape[-1] > signal.shape[0]:
signal = signal.transpose()
else:
pass
signal = np.mean(a=signal, axis=1)
else:
signal = signal[:,0]
if method in ['pinkNoise', 'whiteNoise']:
time = np.arange(0, signal.size/fs, 1/fs)
cutPoint = np.where(time >= scapeTime + excitTime)[0][0]
signal = signal[cutPoint:]
square = signal**2
maxPoint = np.where(square == square.max())[0][0]
impulseResponse = signal[maxPoint:]
else:
if signal.size > excitation.size:
size = signal.size - excitation.size
zeros = np.zeros(shape=(size))
excitation = np.concatenate((excitation, zeros), axis=0)
elif excitation.size > signal.size:
size = excitation.size - signal.size
zeros = np.zeros(shape=(size))
excitation = np.concatenate((signal, zeros), axis=0)
else:
pass
time = np.arange(0, signal.size/fs, 1/fs)
cutPoint = np.where(time >= scapeTime)[0][0]
signal = signal[cutPoint:]
excitation = excitation[cutPoint:]
freqSignal = np.fft.rfft(signal, axis=0, norm=None)
freqExcitation = np.fft.rfft(excitation, axis=0, norm=None)
freqIR = freqSignal/freqExcitation
impulseResponse = np.fft.irfft(a=freqIR)
square = impulseResponse**2
maxPoint = np.where(square == square.max())[0][0]
impulseResponse = impulseResponse[maxPoint:]
except Exception as E:
print("ImpulseResponse(): ", E, "\n")
return impulseResponse
def rms(a: np.ndarray, axis: int) -> np.ndarray:
"""
Description
-----------
Function that calculates the root mean square of the sound pressure
Parameters
----------
a : np.ndarray
Quadratic sound pressure (e.g. pressure**2)
axis : int
Vector calculation axis
Returns
-------
root_mean_square : np.ndarray
root mean square of the sound pressure
"""
try:
root_mean_square = np.sqrt(np.mean(a=a, axis=axis))
except Exception as E:
print("rms(): ", E, "\n")
return root_mean_square
def apply_correction(signal: np.ndarray, fs: int, micCorr: Union[None, np.ndarray],
adcCorr: Union[None, np.ndarray], applyMicCorr: bool, applyAdcCorr: bool) -> np.ndarray:
"""
Description
-----------
asdfghj
Parameters
----------
signal : numpy.ndarray
fs : int
micCorr : None | numpy.ndarray
adcCorr : None | numpy.ndarray
applyMicCorr : bool
applyAdcCorr : bool
Returns
-------
correctedSignal : numpy.ndarray
"""
try:
refPressure = 2e-05
freqSignal = np.fft.rfft(signal, axis=0, norm=None)
freqVector = np.linspace(0, (signal.size - 1) *
fs / (2*signal.size),
(int(signal.size/2)+1)
if signal.size % 2 == 0
else int((signal.size+1)/2))
MagfreqSignal = 20 * \
np.log10(np.abs(freqSignal)/refPressure)
correctedMagfreqSignal = MagfreqSignal
# Carregando dados do microfone
if micCorr is not None and applyMicCorr:
freq = micCorr[:, 0]
mag = micCorr[:, 1]
# Microphone response interpolation
interp_func = interp.interp1d(
freq, mag, fill_value='extrapolate')
mag_interp = interp_func(freqVector)
# Aplica correcao na magnitude
correctedMagfreqSignal -= mag_interp
# Carregando dados do ADC
if adcCorr is not None and applyAdcCorr:
freq = adcCorr[:, 0]
mag = adcCorr[:, 1]
# Response interpolation of the digital analog converter
interp_func = interp.interp1d(
freq, mag, fill_value='extrapolate')
mag_interp = interp_func(freqVector)
# Aplica correcao na magnitude
correctedMagfreqSignal -= mag_interp
# Retorna ao vetor de amplitude complexa com magnitude e fase
correctedfreqSignal = 10**(correctedMagfreqSignal /
20) * refPressure
r = correctedfreqSignal
teta = np.angle(freqSignal)
correctedfreqSignal = r*(np.cos(teta) + np.sin(teta)*1j)
correctedSignal = np.fft.irfft(a=correctedfreqSignal)
except Exception as E:
print("apply_correction(): ", E, "\n")
return correctedSignal
|
{"hexsha": "680576a78a89a3028799e4c1cb5054d7eaac2691", "size": 41665, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyslm/processing.py", "max_stars_repo_name": "leonardojacomussi/PySLM", "max_stars_repo_head_hexsha": "94283afa022cd113129b4f42f1745e22ad3730f9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-07-23T17:13:40.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-23T17:13:40.000Z", "max_issues_repo_path": "pyslm/processing.py", "max_issues_repo_name": "leonardojacomussi/PySLM", "max_issues_repo_head_hexsha": "94283afa022cd113129b4f42f1745e22ad3730f9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pyslm/processing.py", "max_forks_repo_name": "leonardojacomussi/PySLM", "max_forks_repo_head_hexsha": "94283afa022cd113129b4f42f1745e22ad3730f9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-06-14T10:47:44.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-29T16:29:30.000Z", "avg_line_length": 41.2933597621, "max_line_length": 122, "alphanum_fraction": 0.496123845, "include": true, "reason": "import numpy,from scipy", "num_tokens": 8350}
|
import numpy as np
import random
from torch.utils import data as data
import os.path as osp
from basicsr.utils import FileClient, imfrombytes, img2tensor
from basicsr.utils.registry import DATASET_REGISTRY
import os
import torch
import cv2
@DATASET_REGISTRY.register()
class SlidingWindowDataset(data.Dataset):
def __init__(self, opt):
super(SlidingWindowDataset, self).__init__()
self.opt = opt
self.num_frame = opt['num_frame']
self.num_half_frames = opt['num_frame'] // 2
self.dataroot_lq = opt["dataroot_lq"]
self.length = len(os.listdir(self.dataroot_lq))
# file client (io backend)
self.file_client = None
self.io_backend_opt = opt['io_backend']
def __getitem__(self, index):
if self.file_client is None:
self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt)
frame_name = "{:08d}".format(index)
center_frame_idx = int(frame_name)
# ensure not exceeding the borders
neighbor_list = []
seq = [x for x in range(-self.num_half_frames, self.num_half_frames + 1)]
for index in seq:
if index != 0:
tmp_index = index + center_frame_idx
if tmp_index < 0:
pad = 0
elif tmp_index > self.length - 1:
pad = self.length - 1
else: pad = tmp_index
neighbor_list.append(pad)
else: neighbor_list.append(center_frame_idx)
# get the neighboring LQ frames
img_lqs = []
for neighbor in neighbor_list:
img_lq_path = osp.join(self.dataroot_lq, f'{neighbor:08d}.png')
img_bytes = self.file_client.get(img_lq_path, 'lq')
img_lq = imfrombytes(img_bytes, float32=True)
# numpy to tensor
img_lq_tensor = img2tensor(img_lq)
img_lqs.append(img_lq_tensor)
# img_lqs: (t, c, h, w)
return torch.stack(img_lqs, dim=0)
def __len__(self):
return len(os.listdir(self.dataroot_lq))
|
{"hexsha": "5cf80e5f1dedd44e3acdf23dfe50a1dbe560ea32", "size": 2107, "ext": "py", "lang": "Python", "max_stars_repo_path": "basicsr/data/sliding_window_dataset.py", "max_stars_repo_name": "ACALJJ32/BasicSR_ACALJJ32", "max_stars_repo_head_hexsha": "81235ad78102e194f7442ff600ff2dfd9992f857", "max_stars_repo_licenses": ["Apache-2.0", "MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-11-21T01:42:56.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-21T11:54:06.000Z", "max_issues_repo_path": "basicsr/data/sliding_window_dataset.py", "max_issues_repo_name": "ACALJJ32/BasicSR_ACALJJ32", "max_issues_repo_head_hexsha": "81235ad78102e194f7442ff600ff2dfd9992f857", "max_issues_repo_licenses": ["Apache-2.0", "MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "basicsr/data/sliding_window_dataset.py", "max_forks_repo_name": "ACALJJ32/BasicSR_ACALJJ32", "max_forks_repo_head_hexsha": "81235ad78102e194f7442ff600ff2dfd9992f857", "max_forks_repo_licenses": ["Apache-2.0", "MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.4444444444, "max_line_length": 97, "alphanum_fraction": 0.6160417655, "include": true, "reason": "import numpy", "num_tokens": 509}
|
#!/usr/bin/env python
"""
https://en.wikipedia.org/wiki/Trapezoidal_rule
Integral ( f(x) ) dx ~ (b - a) 0.5(f(a) + f(b))
a->b
https://numpy.org/doc/stable/reference/generated/numpy.trapz.html
"""
import numpy as np
import matplotlib.pyplot as plt
if __name__ == '__main__':
x = np.arange(10, dtype=np.float64)
y = x*x
i_ = lambda _:np.power(_, 3)/3.
area_approx = np.trapz( y, x )
area = i_( x[-1] ) - i_(x[0])
fig, ax = plt.subplots()
fig.suptitle( " np.trapz(y,x) area_approx : %10.4f area : %10.4f " % (area_approx, area) )
ax.plot( x, y )
ax.scatter( x, y )
fig.show()
|
{"hexsha": "a2ade0b8b3a3aa5408fec86e3fcc47b6fb081d90", "size": 652, "ext": "py", "lang": "Python", "max_stars_repo_path": "ana/trapz.py", "max_stars_repo_name": "hanswenzel/opticks", "max_stars_repo_head_hexsha": "b75b5929b6cf36a5eedeffb3031af2920f75f9f0", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2020-07-05T02:39:32.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-20T18:52:44.000Z", "max_issues_repo_path": "ana/trapz.py", "max_issues_repo_name": "hanswenzel/opticks", "max_issues_repo_head_hexsha": "b75b5929b6cf36a5eedeffb3031af2920f75f9f0", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ana/trapz.py", "max_forks_repo_name": "hanswenzel/opticks", "max_forks_repo_head_hexsha": "b75b5929b6cf36a5eedeffb3031af2920f75f9f0", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2020-09-03T20:36:32.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-19T07:42:21.000Z", "avg_line_length": 19.7575757576, "max_line_length": 99, "alphanum_fraction": 0.5659509202, "include": true, "reason": "import numpy", "num_tokens": 223}
|
import cv2
import numpy as np
import os
def empty(x):
pass
cv2.namedWindow("TrackBars")
cv2.resizeWindow("TrackBars",600,300)
cv2.createTrackbar("Hue Min","TrackBars", 0, 179, empty)
cv2.createTrackbar("Hue Max","TrackBars", 179, 179, empty)
cv2.createTrackbar("Sat Min","TrackBars", 0, 255, empty)
cv2.createTrackbar("Sat Max","TrackBars", 255, 255, empty)
cv2.createTrackbar("Val Min","TrackBars", 0, 255, empty)
cv2.createTrackbar("Val Max","TrackBars", 255, 255, empty)
img = cv2.imread('shooting-blue-1.png')
# img = cv2.imread('four-rings.jpg')
# img = cv2.imread('no-rings.jpg')
# img = cv2.imread('one-ring.jpg')
# imgResize = cv2.resize(img,(img.shape[1]//4, img.shape[0]//4), interpolation=cv2.INTER_LINEAR)
# cv2.imshow('Image', imgResize)
imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
cv2.imshow('HSV Image', imgHSV)
while True:
h_min = cv2.getTrackbarPos("Hue Min","TrackBars")
h_max = cv2.getTrackbarPos("Hue Max","TrackBars")
s_min = cv2.getTrackbarPos("Sat Min","TrackBars")
s_max = cv2.getTrackbarPos("Sat Max","TrackBars")
v_min = cv2.getTrackbarPos("Val Min","TrackBars")
v_max = cv2.getTrackbarPos("Val Max","TrackBars")
print(h_min, h_max, s_min, s_max, v_min, v_max)
lower = np.array([h_min, s_min, v_min])
upper = np.array([h_max, s_max, v_max])
mask = cv2.inRange(imgHSV, lower, upper)
kernel = np.ones((3, 3), np.uint8)
# Repeat the erosion and dilation by changing iterations.
mask_erode = cv2.erode(mask, kernel, iterations=1)
blurMask = cv2.dilate(mask_erode, kernel, iterations=1)
# blurMask = cv2.medianBlur(mask,5)
cv2.imshow('Mask', mask)
cv2.imshow('Blur Mask', blurMask)
if cv2.waitKey(1) == ord('q'):
break
|
{"hexsha": "beaeea3bede99775bba34af53137c585c7c9dcb3", "size": 1763, "ext": "py", "lang": "Python", "max_stars_repo_path": "opencv/findgoal.py", "max_stars_repo_name": "ftc8569/2020-ftc-vision", "max_stars_repo_head_hexsha": "ecf6bacb840059351a0734a037e9bca5dbaf3b7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-09-16T22:57:01.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-16T22:57:18.000Z", "max_issues_repo_path": "opencv/findgoal.py", "max_issues_repo_name": "ftc8569/2020-ftc-vision", "max_issues_repo_head_hexsha": "ecf6bacb840059351a0734a037e9bca5dbaf3b7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "opencv/findgoal.py", "max_forks_repo_name": "ftc8569/2020-ftc-vision", "max_forks_repo_head_hexsha": "ecf6bacb840059351a0734a037e9bca5dbaf3b7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.3965517241, "max_line_length": 96, "alphanum_fraction": 0.6738513897, "include": true, "reason": "import numpy", "num_tokens": 556}
|
/-
Copyright (c) 2018 Scott Morrison. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Scott Morrison, Markus Himmel, Bhavik Mehta, Andrew Yang
-/
import category_theory.limits.shapes.wide_pullbacks
import category_theory.limits.shapes.binary_products
/-!
# Pullbacks
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
We define a category `walking_cospan` (resp. `walking_span`), which is the index category
for the given data for a pullback (resp. pushout) diagram. Convenience methods `cospan f g`
and `span f g` construct functors from the walking (co)span, hitting the given morphisms.
We define `pullback f g` and `pushout f g` as limits and colimits of such functors.
## References
* [Stacks: Fibre products](https://stacks.math.columbia.edu/tag/001U)
* [Stacks: Pushouts](https://stacks.math.columbia.edu/tag/0025)
-/
noncomputable theory
open category_theory
namespace category_theory.limits
universes w v₁ v₂ v u u₂
local attribute [tidy] tactic.case_bash
/--
The type of objects for the diagram indexing a pullback, defined as a special case of
`wide_pullback_shape`.
-/
abbreviation walking_cospan : Type := wide_pullback_shape walking_pair
/-- The left point of the walking cospan. -/
@[pattern] abbreviation walking_cospan.left : walking_cospan := some walking_pair.left
/-- The right point of the walking cospan. -/
@[pattern] abbreviation walking_cospan.right : walking_cospan := some walking_pair.right
/-- The central point of the walking cospan. -/
@[pattern] abbreviation walking_cospan.one : walking_cospan := none
/--
The type of objects for the diagram indexing a pushout, defined as a special case of
`wide_pushout_shape`.
-/
abbreviation walking_span : Type := wide_pushout_shape walking_pair
/-- The left point of the walking span. -/
@[pattern] abbreviation walking_span.left : walking_span := some walking_pair.left
/-- The right point of the walking span. -/
@[pattern] abbreviation walking_span.right : walking_span := some walking_pair.right
/-- The central point of the walking span. -/
@[pattern] abbreviation walking_span.zero : walking_span := none
namespace walking_cospan
/-- The type of arrows for the diagram indexing a pullback. -/
abbreviation hom : walking_cospan → walking_cospan → Type := wide_pullback_shape.hom
/-- The left arrow of the walking cospan. -/
@[pattern] abbreviation hom.inl : left ⟶ one := wide_pullback_shape.hom.term _
/-- The right arrow of the walking cospan. -/
@[pattern] abbreviation hom.inr : right ⟶ one := wide_pullback_shape.hom.term _
/-- The identity arrows of the walking cospan. -/
@[pattern] abbreviation hom.id (X : walking_cospan) : X ⟶ X := wide_pullback_shape.hom.id X
instance (X Y : walking_cospan) : subsingleton (X ⟶ Y) := by tidy
end walking_cospan
namespace walking_span
/-- The type of arrows for the diagram indexing a pushout. -/
abbreviation hom : walking_span → walking_span → Type := wide_pushout_shape.hom
/-- The left arrow of the walking span. -/
@[pattern] abbreviation hom.fst : zero ⟶ left := wide_pushout_shape.hom.init _
/-- The right arrow of the walking span. -/
@[pattern] abbreviation hom.snd : zero ⟶ right := wide_pushout_shape.hom.init _
/-- The identity arrows of the walking span. -/
@[pattern] abbreviation hom.id (X : walking_span) : X ⟶ X := wide_pushout_shape.hom.id X
instance (X Y : walking_span) : subsingleton (X ⟶ Y) := by tidy
end walking_span
open walking_span.hom walking_cospan.hom wide_pullback_shape.hom wide_pushout_shape.hom
variables {C : Type u} [category.{v} C]
/-- To construct an isomorphism of cones over the walking cospan,
it suffices to construct an isomorphism
of the cone points and check it commutes with the legs to `left` and `right`. -/
def walking_cospan.ext {F : walking_cospan ⥤ C} {s t : cone F} (i : s.X ≅ t.X)
(w₁ : s.π.app walking_cospan.left = i.hom ≫ t.π.app walking_cospan.left)
(w₂ : s.π.app walking_cospan.right = i.hom ≫ t.π.app walking_cospan.right) :
s ≅ t :=
begin
apply cones.ext i,
rintro (⟨⟩|⟨⟨⟩⟩),
{ have h₁ := s.π.naturality walking_cospan.hom.inl,
dsimp at h₁, simp only [category.id_comp] at h₁,
have h₂ := t.π.naturality walking_cospan.hom.inl,
dsimp at h₂, simp only [category.id_comp] at h₂,
simp_rw [h₂, ←category.assoc, ←w₁, ←h₁], },
{ exact w₁, },
{ exact w₂, },
end
/-- To construct an isomorphism of cocones over the walking span,
it suffices to construct an isomorphism
of the cocone points and check it commutes with the legs from `left` and `right`. -/
def walking_span.ext {F : walking_span ⥤ C} {s t : cocone F} (i : s.X ≅ t.X)
(w₁ : s.ι.app walking_cospan.left ≫ i.hom = t.ι.app walking_cospan.left)
(w₂ : s.ι.app walking_cospan.right ≫ i.hom = t.ι.app walking_cospan.right) :
s ≅ t :=
begin
apply cocones.ext i,
rintro (⟨⟩|⟨⟨⟩⟩),
{ have h₁ := s.ι.naturality walking_span.hom.fst,
dsimp at h₁, simp only [category.comp_id] at h₁,
have h₂ := t.ι.naturality walking_span.hom.fst,
dsimp at h₂, simp only [category.comp_id] at h₂,
simp_rw [←h₁, category.assoc, w₁, h₂], },
{ exact w₁, },
{ exact w₂, },
end
/-- `cospan f g` is the functor from the walking cospan hitting `f` and `g`. -/
def cospan {X Y Z : C} (f : X ⟶ Z) (g : Y ⟶ Z) : walking_cospan ⥤ C :=
wide_pullback_shape.wide_cospan Z
(λ j, walking_pair.cases_on j X Y) (λ j, walking_pair.cases_on j f g)
/-- `span f g` is the functor from the walking span hitting `f` and `g`. -/
def span {X Y Z : C} (f : X ⟶ Y) (g : X ⟶ Z) : walking_span ⥤ C :=
wide_pushout_shape.wide_span X
(λ j, walking_pair.cases_on j Y Z) (λ j, walking_pair.cases_on j f g)
@[simp] lemma cospan_left {X Y Z : C} (f : X ⟶ Z) (g : Y ⟶ Z) :
(cospan f g).obj walking_cospan.left = X := rfl
@[simp] lemma span_left {X Y Z : C} (f : X ⟶ Y) (g : X ⟶ Z) :
(span f g).obj walking_span.left = Y := rfl
@[simp] lemma cospan_right {X Y Z : C} (f : X ⟶ Z) (g : Y ⟶ Z) :
(cospan f g).obj walking_cospan.right = Y := rfl
@[simp] lemma span_right {X Y Z : C} (f : X ⟶ Y) (g : X ⟶ Z) :
(span f g).obj walking_span.right = Z := rfl
@[simp] lemma cospan_one {X Y Z : C} (f : X ⟶ Z) (g : Y ⟶ Z) :
(cospan f g).obj walking_cospan.one = Z := rfl
@[simp] lemma span_zero {X Y Z : C} (f : X ⟶ Y) (g : X ⟶ Z) :
(span f g).obj walking_span.zero = X := rfl
@[simp] lemma cospan_map_inl {X Y Z : C} (f : X ⟶ Z) (g : Y ⟶ Z) :
(cospan f g).map walking_cospan.hom.inl = f := rfl
@[simp] lemma span_map_fst {X Y Z : C} (f : X ⟶ Y) (g : X ⟶ Z) :
(span f g).map walking_span.hom.fst = f := rfl
@[simp] lemma cospan_map_inr {X Y Z : C} (f : X ⟶ Z) (g : Y ⟶ Z) :
(cospan f g).map walking_cospan.hom.inr = g := rfl
@[simp] lemma span_map_snd {X Y Z : C} (f : X ⟶ Y) (g : X ⟶ Z) :
(span f g).map walking_span.hom.snd = g := rfl
lemma cospan_map_id {X Y Z : C} (f : X ⟶ Z) (g : Y ⟶ Z) (w : walking_cospan) :
(cospan f g).map (walking_cospan.hom.id w) = 𝟙 _ := rfl
lemma span_map_id {X Y Z : C} (f : X ⟶ Y) (g : X ⟶ Z) (w : walking_span) :
(span f g).map (walking_span.hom.id w) = 𝟙 _ := rfl
/-- Every diagram indexing an pullback is naturally isomorphic (actually, equal) to a `cospan` -/
@[simps {rhs_md := semireducible}]
def diagram_iso_cospan (F : walking_cospan ⥤ C) :
F ≅ cospan (F.map inl) (F.map inr) :=
nat_iso.of_components (λ j, eq_to_iso (by tidy)) (by tidy)
/-- Every diagram indexing a pushout is naturally isomorphic (actually, equal) to a `span` -/
@[simps {rhs_md := semireducible}]
def diagram_iso_span (F : walking_span ⥤ C) :
F ≅ span (F.map fst) (F.map snd) :=
nat_iso.of_components (λ j, eq_to_iso (by tidy)) (by tidy)
variables {D : Type u₂} [category.{v₂} D]
/-- A functor applied to a cospan is a cospan. -/
def cospan_comp_iso (F : C ⥤ D) {X Y Z : C} (f : X ⟶ Z) (g : Y ⟶ Z) :
cospan f g ⋙ F ≅ cospan (F.map f) (F.map g) :=
nat_iso.of_components (by rintros (⟨⟩|⟨⟨⟩⟩); exact iso.refl _)
(by rintros (⟨⟩|⟨⟨⟩⟩) (⟨⟩|⟨⟨⟩⟩) ⟨⟩; repeat { dsimp, simp, })
section
variables (F : C ⥤ D) {X Y Z : C} (f : X ⟶ Z) (g : Y ⟶ Z)
@[simp] lemma cospan_comp_iso_app_left :
(cospan_comp_iso F f g).app walking_cospan.left = iso.refl _ :=
rfl
@[simp] lemma cospan_comp_iso_app_right :
(cospan_comp_iso F f g).app walking_cospan.right = iso.refl _ :=
rfl
@[simp] lemma cospan_comp_iso_app_one :
(cospan_comp_iso F f g).app walking_cospan.one = iso.refl _ :=
rfl
@[simp] lemma cospan_comp_iso_hom_app_left :
(cospan_comp_iso F f g).hom.app walking_cospan.left = 𝟙 _ :=
rfl
@[simp] lemma cospan_comp_iso_hom_app_right :
(cospan_comp_iso F f g).hom.app walking_cospan.right = 𝟙 _ :=
rfl
@[simp] lemma cospan_comp_iso_hom_app_one :
(cospan_comp_iso F f g).hom.app walking_cospan.one = 𝟙 _ :=
rfl
@[simp] lemma cospan_comp_iso_inv_app_left :
(cospan_comp_iso F f g).inv.app walking_cospan.left = 𝟙 _ :=
rfl
@[simp] lemma cospan_comp_iso_inv_app_right :
(cospan_comp_iso F f g).inv.app walking_cospan.right = 𝟙 _ :=
rfl
@[simp] lemma cospan_comp_iso_inv_app_one :
(cospan_comp_iso F f g).inv.app walking_cospan.one = 𝟙 _ :=
rfl
end
/-- A functor applied to a span is a span. -/
def span_comp_iso (F : C ⥤ D) {X Y Z : C} (f : X ⟶ Y) (g : X ⟶ Z) :
span f g ⋙ F ≅ span (F.map f) (F.map g) :=
nat_iso.of_components (by rintros (⟨⟩|⟨⟨⟩⟩); exact iso.refl _)
(by rintros (⟨⟩|⟨⟨⟩⟩) (⟨⟩|⟨⟨⟩⟩) ⟨⟩; repeat { dsimp, simp, })
section
variables (F : C ⥤ D) {X Y Z : C} (f : X ⟶ Y) (g : X ⟶ Z)
@[simp] lemma span_comp_iso_app_left : (span_comp_iso F f g).app walking_span.left = iso.refl _ :=
rfl
@[simp] lemma span_comp_iso_app_right : (span_comp_iso F f g).app walking_span.right = iso.refl _ :=
rfl
@[simp] lemma span_comp_iso_app_zero : (span_comp_iso F f g).app walking_span.zero = iso.refl _ :=
rfl
@[simp] lemma span_comp_iso_hom_app_left : (span_comp_iso F f g).hom.app walking_span.left = 𝟙 _ :=
rfl
@[simp] lemma span_comp_iso_hom_app_right :
(span_comp_iso F f g).hom.app walking_span.right = 𝟙 _ :=
rfl
@[simp] lemma span_comp_iso_hom_app_zero : (span_comp_iso F f g).hom.app walking_span.zero = 𝟙 _ :=
rfl
@[simp] lemma span_comp_iso_inv_app_left : (span_comp_iso F f g).inv.app walking_span.left = 𝟙 _ :=
rfl
@[simp] lemma span_comp_iso_inv_app_right :
(span_comp_iso F f g).inv.app walking_span.right = 𝟙 _ :=
rfl
@[simp] lemma span_comp_iso_inv_app_zero : (span_comp_iso F f g).inv.app walking_span.zero = 𝟙 _ :=
rfl
end
section
variables {X Y Z X' Y' Z' : C} (iX : X ≅ X') (iY : Y ≅ Y') (iZ : Z ≅ Z')
section
variables {f : X ⟶ Z} {g : Y ⟶ Z} {f' : X' ⟶ Z'} {g' : Y' ⟶ Z'}
/-- Construct an isomorphism of cospans from components. -/
def cospan_ext (wf : iX.hom ≫ f' = f ≫ iZ.hom) (wg : iY.hom ≫ g' = g ≫ iZ.hom) :
cospan f g ≅ cospan f' g' :=
nat_iso.of_components (by { rintros (⟨⟩|⟨⟨⟩⟩), exacts [iZ, iX, iY], })
(by rintros (⟨⟩|⟨⟨⟩⟩) (⟨⟩|⟨⟨⟩⟩) ⟨⟩; repeat { dsimp, simp [wf, wg], })
variables (wf : iX.hom ≫ f' = f ≫ iZ.hom) (wg : iY.hom ≫ g' = g ≫ iZ.hom)
@[simp] lemma cospan_ext_app_left : (cospan_ext iX iY iZ wf wg).app walking_cospan.left = iX :=
by { dsimp [cospan_ext], simp, }
@[simp] lemma cospan_ext_app_right : (cospan_ext iX iY iZ wf wg).app walking_cospan.right = iY :=
by { dsimp [cospan_ext], simp, }
@[simp] lemma cospan_ext_app_one : (cospan_ext iX iY iZ wf wg).app walking_cospan.one = iZ :=
by { dsimp [cospan_ext], simp, }
@[simp] lemma cospan_ext_hom_app_left :
(cospan_ext iX iY iZ wf wg).hom.app walking_cospan.left = iX.hom :=
by { dsimp [cospan_ext], simp, }
@[simp] lemma cospan_ext_hom_app_right :
(cospan_ext iX iY iZ wf wg).hom.app walking_cospan.right = iY.hom :=
by { dsimp [cospan_ext], simp, }
@[simp] lemma cospan_ext_hom_app_one :
(cospan_ext iX iY iZ wf wg).hom.app walking_cospan.one = iZ.hom :=
by { dsimp [cospan_ext], simp, }
@[simp] lemma cospan_ext_inv_app_left :
(cospan_ext iX iY iZ wf wg).inv.app walking_cospan.left = iX.inv :=
by { dsimp [cospan_ext], simp, }
@[simp] lemma cospan_ext_inv_app_right :
(cospan_ext iX iY iZ wf wg).inv.app walking_cospan.right = iY.inv :=
by { dsimp [cospan_ext], simp, }
@[simp] lemma cospan_ext_inv_app_one :
(cospan_ext iX iY iZ wf wg).inv.app walking_cospan.one = iZ.inv :=
by { dsimp [cospan_ext], simp, }
end
section
variables {f : X ⟶ Y} {g : X ⟶ Z} {f' : X' ⟶ Y'} {g' : X' ⟶ Z'}
/-- Construct an isomorphism of spans from components. -/
def span_ext (wf : iX.hom ≫ f' = f ≫ iY.hom) (wg : iX.hom ≫ g' = g ≫ iZ.hom) :
span f g ≅ span f' g' :=
nat_iso.of_components (by { rintros (⟨⟩|⟨⟨⟩⟩), exacts [iX, iY, iZ], })
(by rintros (⟨⟩|⟨⟨⟩⟩) (⟨⟩|⟨⟨⟩⟩) ⟨⟩; repeat { dsimp, simp [wf, wg], })
variables (wf : iX.hom ≫ f' = f ≫ iY.hom) (wg : iX.hom ≫ g' = g ≫ iZ.hom)
@[simp] lemma span_ext_app_left : (span_ext iX iY iZ wf wg).app walking_span.left = iY :=
by { dsimp [span_ext], simp, }
@[simp] lemma span_ext_app_right : (span_ext iX iY iZ wf wg).app walking_span.right = iZ :=
by { dsimp [span_ext], simp, }
@[simp] lemma span_ext_app_one : (span_ext iX iY iZ wf wg).app walking_span.zero = iX :=
by { dsimp [span_ext], simp, }
@[simp] lemma span_ext_hom_app_left :
(span_ext iX iY iZ wf wg).hom.app walking_span.left = iY.hom :=
by { dsimp [span_ext], simp, }
@[simp] lemma span_ext_hom_app_right :
(span_ext iX iY iZ wf wg).hom.app walking_span.right = iZ.hom :=
by { dsimp [span_ext], simp, }
@[simp] lemma span_ext_hom_app_zero :
(span_ext iX iY iZ wf wg).hom.app walking_span.zero = iX.hom :=
by { dsimp [span_ext], simp, }
@[simp] lemma span_ext_inv_app_left :
(span_ext iX iY iZ wf wg).inv.app walking_span.left = iY.inv :=
by { dsimp [span_ext], simp, }
@[simp] lemma span_ext_inv_app_right :
(span_ext iX iY iZ wf wg).inv.app walking_span.right = iZ.inv :=
by { dsimp [span_ext], simp, }
@[simp] lemma span_ext_inv_app_zero :
(span_ext iX iY iZ wf wg).inv.app walking_span.zero = iX.inv :=
by { dsimp [span_ext], simp, }
end
end
variables {W X Y Z : C}
/-- A pullback cone is just a cone on the cospan formed by two morphisms `f : X ⟶ Z` and
`g : Y ⟶ Z`.-/
abbreviation pullback_cone (f : X ⟶ Z) (g : Y ⟶ Z) := cone (cospan f g)
namespace pullback_cone
variables {f : X ⟶ Z} {g : Y ⟶ Z}
/-- The first projection of a pullback cone. -/
abbreviation fst (t : pullback_cone f g) : t.X ⟶ X := t.π.app walking_cospan.left
/-- The second projection of a pullback cone. -/
abbreviation snd (t : pullback_cone f g) : t.X ⟶ Y := t.π.app walking_cospan.right
@[simp] lemma π_app_left (c : pullback_cone f g) : c.π.app walking_cospan.left = c.fst := rfl
@[simp] lemma π_app_right (c : pullback_cone f g) : c.π.app walking_cospan.right = c.snd := rfl
@[simp] lemma condition_one (t : pullback_cone f g) : t.π.app walking_cospan.one = t.fst ≫ f :=
begin
have w := t.π.naturality walking_cospan.hom.inl,
dsimp at w, simpa using w,
end
/-- This is a slightly more convenient method to verify that a pullback cone is a limit cone. It
only asks for a proof of facts that carry any mathematical content -/
def is_limit_aux (t : pullback_cone f g) (lift : Π (s : pullback_cone f g), s.X ⟶ t.X)
(fac_left : ∀ (s : pullback_cone f g), lift s ≫ t.fst = s.fst)
(fac_right : ∀ (s : pullback_cone f g), lift s ≫ t.snd = s.snd)
(uniq : ∀ (s : pullback_cone f g) (m : s.X ⟶ t.X)
(w : ∀ j : walking_cospan, m ≫ t.π.app j = s.π.app j), m = lift s) :
is_limit t :=
{ lift := lift,
fac' := λ s j, option.cases_on j
(by { rw [← s.w inl, ← t.w inl, ←category.assoc], congr, exact fac_left s, } )
(λ j', walking_pair.cases_on j' (fac_left s) (fac_right s)),
uniq' := uniq }
/-- This is another convenient method to verify that a pullback cone is a limit cone. It
only asks for a proof of facts that carry any mathematical content, and allows access to the
same `s` for all parts. -/
def is_limit_aux' (t : pullback_cone f g)
(create : Π (s : pullback_cone f g),
{l // l ≫ t.fst = s.fst ∧ l ≫ t.snd = s.snd ∧
∀ {m}, m ≫ t.fst = s.fst → m ≫ t.snd = s.snd → m = l}) :
limits.is_limit t :=
pullback_cone.is_limit_aux t
(λ s, (create s).1)
(λ s, (create s).2.1)
(λ s, (create s).2.2.1)
(λ s m w, (create s).2.2.2 (w walking_cospan.left) (w walking_cospan.right))
/-- A pullback cone on `f` and `g` is determined by morphisms `fst : W ⟶ X` and `snd : W ⟶ Y`
such that `fst ≫ f = snd ≫ g`. -/
@[simps]
def mk {W : C} (fst : W ⟶ X) (snd : W ⟶ Y) (eq : fst ≫ f = snd ≫ g) : pullback_cone f g :=
{ X := W,
π := { app := λ j, option.cases_on j (fst ≫ f) (λ j', walking_pair.cases_on j' fst snd) } }
@[simp] lemma mk_π_app_left {W : C} (fst : W ⟶ X) (snd : W ⟶ Y) (eq : fst ≫ f = snd ≫ g) :
(mk fst snd eq).π.app walking_cospan.left = fst := rfl
@[simp] lemma mk_π_app_right {W : C} (fst : W ⟶ X) (snd : W ⟶ Y) (eq : fst ≫ f = snd ≫ g) :
(mk fst snd eq).π.app walking_cospan.right = snd := rfl
@[simp] lemma mk_π_app_one {W : C} (fst : W ⟶ X) (snd : W ⟶ Y) (eq : fst ≫ f = snd ≫ g) :
(mk fst snd eq).π.app walking_cospan.one = fst ≫ f := rfl
@[simp] lemma mk_fst {W : C} (fst : W ⟶ X) (snd : W ⟶ Y) (eq : fst ≫ f = snd ≫ g) :
(mk fst snd eq).fst = fst := rfl
@[simp] lemma mk_snd {W : C} (fst : W ⟶ X) (snd : W ⟶ Y) (eq : fst ≫ f = snd ≫ g) :
(mk fst snd eq).snd = snd := rfl
@[reassoc] lemma condition (t : pullback_cone f g) : fst t ≫ f = snd t ≫ g :=
(t.w inl).trans (t.w inr).symm
/-- To check whether a morphism is equalized by the maps of a pullback cone, it suffices to check
it for `fst t` and `snd t` -/
lemma equalizer_ext (t : pullback_cone f g) {W : C} {k l : W ⟶ t.X}
(h₀ : k ≫ fst t = l ≫ fst t) (h₁ : k ≫ snd t = l ≫ snd t) :
∀ (j : walking_cospan), k ≫ t.π.app j = l ≫ t.π.app j
| (some walking_pair.left) := h₀
| (some walking_pair.right) := h₁
| none := by rw [← t.w inl, reassoc_of h₀]
lemma is_limit.hom_ext {t : pullback_cone f g} (ht : is_limit t) {W : C} {k l : W ⟶ t.X}
(h₀ : k ≫ fst t = l ≫ fst t) (h₁ : k ≫ snd t = l ≫ snd t) : k = l :=
ht.hom_ext $ equalizer_ext _ h₀ h₁
lemma mono_snd_of_is_pullback_of_mono {t : pullback_cone f g} (ht : is_limit t) [mono f] :
mono t.snd :=
⟨λ W h k i, is_limit.hom_ext ht (by simp [←cancel_mono f, t.condition, reassoc_of i]) i⟩
lemma mono_fst_of_is_pullback_of_mono {t : pullback_cone f g} (ht : is_limit t) [mono g] :
mono t.fst :=
⟨λ W h k i, is_limit.hom_ext ht i (by simp [←cancel_mono g, ←t.condition, reassoc_of i])⟩
/-- To construct an isomorphism of pullback cones, it suffices to construct an isomorphism
of the cone points and check it commutes with `fst` and `snd`. -/
def ext {s t : pullback_cone f g} (i : s.X ≅ t.X)
(w₁ : s.fst = i.hom ≫ t.fst) (w₂ : s.snd = i.hom ≫ t.snd) :
s ≅ t :=
walking_cospan.ext i w₁ w₂
/-- If `t` is a limit pullback cone over `f` and `g` and `h : W ⟶ X` and `k : W ⟶ Y` are such that
`h ≫ f = k ≫ g`, then we have `l : W ⟶ t.X` satisfying `l ≫ fst t = h` and `l ≫ snd t = k`.
-/
def is_limit.lift' {t : pullback_cone f g} (ht : is_limit t) {W : C} (h : W ⟶ X) (k : W ⟶ Y)
(w : h ≫ f = k ≫ g) : {l : W ⟶ t.X // l ≫ fst t = h ∧ l ≫ snd t = k} :=
⟨ht.lift $ pullback_cone.mk _ _ w, ht.fac _ _, ht.fac _ _⟩
/--
This is a more convenient formulation to show that a `pullback_cone` constructed using
`pullback_cone.mk` is a limit cone.
-/
def is_limit.mk {W : C} {fst : W ⟶ X} {snd : W ⟶ Y} (eq : fst ≫ f = snd ≫ g)
(lift : Π (s : pullback_cone f g), s.X ⟶ W)
(fac_left : ∀ (s : pullback_cone f g), lift s ≫ fst = s.fst)
(fac_right : ∀ (s : pullback_cone f g), lift s ≫ snd = s.snd)
(uniq : ∀ (s : pullback_cone f g) (m : s.X ⟶ W)
(w_fst : m ≫ fst = s.fst) (w_snd : m ≫ snd = s.snd), m = lift s) :
is_limit (mk fst snd eq) :=
is_limit_aux _ lift fac_left fac_right
(λ s m w, uniq s m (w walking_cospan.left) (w walking_cospan.right))
/-- The flip of a pullback square is a pullback square. -/
def flip_is_limit {W : C} {h : W ⟶ X} {k : W ⟶ Y}
{comm : h ≫ f = k ≫ g} (t : is_limit (mk _ _ comm.symm)) :
is_limit (mk _ _ comm) :=
is_limit_aux' _ $ λ s,
begin
refine ⟨(is_limit.lift' t _ _ s.condition.symm).1,
(is_limit.lift' t _ _ _).2.2,
(is_limit.lift' t _ _ _).2.1, λ m m₁ m₂, t.hom_ext _⟩,
apply (mk k h _).equalizer_ext,
{ rwa (is_limit.lift' t _ _ _).2.1 },
{ rwa (is_limit.lift' t _ _ _).2.2 },
end
/--
The pullback cone `(𝟙 X, 𝟙 X)` for the pair `(f, f)` is a limit if `f` is a mono. The converse is
shown in `mono_of_pullback_is_id`.
-/
def is_limit_mk_id_id (f : X ⟶ Y) [mono f] :
is_limit (mk (𝟙 X) (𝟙 X) rfl : pullback_cone f f) :=
is_limit.mk _
(λ s, s.fst)
(λ s, category.comp_id _)
(λ s, by rw [←cancel_mono f, category.comp_id, s.condition])
(λ s m m₁ m₂, by simpa using m₁)
/--
`f` is a mono if the pullback cone `(𝟙 X, 𝟙 X)` is a limit for the pair `(f, f)`. The converse is
given in `pullback_cone.is_id_of_mono`.
-/
lemma mono_of_is_limit_mk_id_id (f : X ⟶ Y)
(t : is_limit (mk (𝟙 X) (𝟙 X) rfl : pullback_cone f f)) :
mono f :=
⟨λ Z g h eq, by { rcases pullback_cone.is_limit.lift' t _ _ eq with ⟨_, rfl, rfl⟩, refl } ⟩
/-- Suppose `f` and `g` are two morphisms with a common codomain and `s` is a limit cone over the
diagram formed by `f` and `g`. Suppose `f` and `g` both factor through a monomorphism `h` via
`x` and `y`, respectively. Then `s` is also a limit cone over the diagram formed by `x` and
`y`. -/
def is_limit_of_factors (f : X ⟶ Z) (g : Y ⟶ Z) (h : W ⟶ Z) [mono h]
(x : X ⟶ W) (y : Y ⟶ W) (hxh : x ≫ h = f) (hyh : y ≫ h = g) (s : pullback_cone f g)
(hs : is_limit s) : is_limit (pullback_cone.mk _ _ (show s.fst ≫ x = s.snd ≫ y,
from (cancel_mono h).1 $ by simp only [category.assoc, hxh, hyh, s.condition])) :=
pullback_cone.is_limit_aux' _ $ λ t,
⟨hs.lift (pullback_cone.mk t.fst t.snd $ by rw [←hxh, ←hyh, reassoc_of t.condition]),
⟨hs.fac _ walking_cospan.left, hs.fac _ walking_cospan.right, λ r hr hr',
begin
apply pullback_cone.is_limit.hom_ext hs;
simp only [pullback_cone.mk_fst, pullback_cone.mk_snd] at ⊢ hr hr';
simp only [hr, hr'];
symmetry,
exacts [hs.fac _ walking_cospan.left, hs.fac _ walking_cospan.right]
end⟩⟩
/-- If `W` is the pullback of `f, g`,
it is also the pullback of `f ≫ i, g ≫ i` for any mono `i`. -/
def is_limit_of_comp_mono (f : X ⟶ W) (g : Y ⟶ W) (i : W ⟶ Z) [mono i]
(s : pullback_cone f g) (H : is_limit s) :
is_limit (pullback_cone.mk _ _ (show s.fst ≫ f ≫ i = s.snd ≫ g ≫ i,
by rw [← category.assoc, ← category.assoc, s.condition])) :=
begin
apply pullback_cone.is_limit_aux',
intro s,
rcases pullback_cone.is_limit.lift' H s.fst s.snd
((cancel_mono i).mp (by simpa using s.condition)) with ⟨l, h₁, h₂⟩,
refine ⟨l,h₁,h₂,_⟩,
intros m hm₁ hm₂,
exact (pullback_cone.is_limit.hom_ext H (hm₁.trans h₁.symm) (hm₂.trans h₂.symm) : _)
end
end pullback_cone
/-- A pushout cocone is just a cocone on the span formed by two morphisms `f : X ⟶ Y` and
`g : X ⟶ Z`.-/
abbreviation pushout_cocone (f : X ⟶ Y) (g : X ⟶ Z) := cocone (span f g)
namespace pushout_cocone
variables {f : X ⟶ Y} {g : X ⟶ Z}
/-- The first inclusion of a pushout cocone. -/
abbreviation inl (t : pushout_cocone f g) : Y ⟶ t.X := t.ι.app walking_span.left
/-- The second inclusion of a pushout cocone. -/
abbreviation inr (t : pushout_cocone f g) : Z ⟶ t.X := t.ι.app walking_span.right
@[simp] lemma ι_app_left (c : pushout_cocone f g) : c.ι.app walking_span.left = c.inl := rfl
@[simp] lemma ι_app_right (c : pushout_cocone f g) : c.ι.app walking_span.right = c.inr := rfl
@[simp] lemma condition_zero (t : pushout_cocone f g) : t.ι.app walking_span.zero = f ≫ t.inl :=
begin
have w := t.ι.naturality walking_span.hom.fst,
dsimp at w, simpa using w.symm,
end
/-- This is a slightly more convenient method to verify that a pushout cocone is a colimit cocone.
It only asks for a proof of facts that carry any mathematical content -/
def is_colimit_aux (t : pushout_cocone f g) (desc : Π (s : pushout_cocone f g), t.X ⟶ s.X)
(fac_left : ∀ (s : pushout_cocone f g), t.inl ≫ desc s = s.inl)
(fac_right : ∀ (s : pushout_cocone f g), t.inr ≫ desc s = s.inr)
(uniq : ∀ (s : pushout_cocone f g) (m : t.X ⟶ s.X)
(w : ∀ j : walking_span, t.ι.app j ≫ m = s.ι.app j), m = desc s) :
is_colimit t :=
{ desc := desc,
fac' := λ s j, option.cases_on j (by { simp [← s.w fst, ← t.w fst, fac_left s] } )
(λ j', walking_pair.cases_on j' (fac_left s) (fac_right s)),
uniq' := uniq }
/-- This is another convenient method to verify that a pushout cocone is a colimit cocone. It
only asks for a proof of facts that carry any mathematical content, and allows access to the
same `s` for all parts. -/
def is_colimit_aux' (t : pushout_cocone f g)
(create : Π (s : pushout_cocone f g),
{l // t.inl ≫ l = s.inl ∧ t.inr ≫ l = s.inr ∧
∀ {m}, t.inl ≫ m = s.inl → t.inr ≫ m = s.inr → m = l}) :
is_colimit t :=
is_colimit_aux t
(λ s, (create s).1)
(λ s, (create s).2.1)
(λ s, (create s).2.2.1)
(λ s m w, (create s).2.2.2 (w walking_cospan.left) (w walking_cospan.right))
/-- A pushout cocone on `f` and `g` is determined by morphisms `inl : Y ⟶ W` and `inr : Z ⟶ W` such
that `f ≫ inl = g ↠ inr`. -/
@[simps]
def mk {W : C} (inl : Y ⟶ W) (inr : Z ⟶ W) (eq : f ≫ inl = g ≫ inr) : pushout_cocone f g :=
{ X := W,
ι := { app := λ j, option.cases_on j (f ≫ inl) (λ j', walking_pair.cases_on j' inl inr) } }
@[simp] lemma mk_ι_app_left {W : C} (inl : Y ⟶ W) (inr : Z ⟶ W) (eq : f ≫ inl = g ≫ inr) :
(mk inl inr eq).ι.app walking_span.left = inl := rfl
@[simp] lemma mk_ι_app_right {W : C} (inl : Y ⟶ W) (inr : Z ⟶ W) (eq : f ≫ inl = g ≫ inr) :
(mk inl inr eq).ι.app walking_span.right = inr := rfl
@[simp] lemma mk_ι_app_zero {W : C} (inl : Y ⟶ W) (inr : Z ⟶ W) (eq : f ≫ inl = g ≫ inr) :
(mk inl inr eq).ι.app walking_span.zero = f ≫ inl := rfl
@[simp] lemma mk_inl {W : C} (inl : Y ⟶ W) (inr : Z ⟶ W) (eq : f ≫ inl = g ≫ inr) :
(mk inl inr eq).inl = inl := rfl
@[simp] lemma mk_inr {W : C} (inl : Y ⟶ W) (inr : Z ⟶ W) (eq : f ≫ inl = g ≫ inr) :
(mk inl inr eq).inr = inr := rfl
@[reassoc] lemma condition (t : pushout_cocone f g) : f ≫ (inl t) = g ≫ (inr t) :=
(t.w fst).trans (t.w snd).symm
/-- To check whether a morphism is coequalized by the maps of a pushout cocone, it suffices to check
it for `inl t` and `inr t` -/
lemma coequalizer_ext (t : pushout_cocone f g) {W : C} {k l : t.X ⟶ W}
(h₀ : inl t ≫ k = inl t ≫ l) (h₁ : inr t ≫ k = inr t ≫ l) :
∀ (j : walking_span), t.ι.app j ≫ k = t.ι.app j ≫ l
| (some walking_pair.left) := h₀
| (some walking_pair.right) := h₁
| none := by rw [← t.w fst, category.assoc, category.assoc, h₀]
lemma is_colimit.hom_ext {t : pushout_cocone f g} (ht : is_colimit t) {W : C} {k l : t.X ⟶ W}
(h₀ : inl t ≫ k = inl t ≫ l) (h₁ : inr t ≫ k = inr t ≫ l) : k = l :=
ht.hom_ext $ coequalizer_ext _ h₀ h₁
/-- If `t` is a colimit pushout cocone over `f` and `g` and `h : Y ⟶ W` and `k : Z ⟶ W` are
morphisms satisfying `f ≫ h = g ≫ k`, then we have a factorization `l : t.X ⟶ W` such that
`inl t ≫ l = h` and `inr t ≫ l = k`. -/
def is_colimit.desc' {t : pushout_cocone f g} (ht : is_colimit t) {W : C} (h : Y ⟶ W) (k : Z ⟶ W)
(w : f ≫ h = g ≫ k) : {l : t.X ⟶ W // inl t ≫ l = h ∧ inr t ≫ l = k } :=
⟨ht.desc $ pushout_cocone.mk _ _ w, ht.fac _ _, ht.fac _ _⟩
lemma epi_inr_of_is_pushout_of_epi {t : pushout_cocone f g} (ht : is_colimit t) [epi f] :
epi t.inr :=
⟨λ W h k i, is_colimit.hom_ext ht (by simp [←cancel_epi f, t.condition_assoc, i]) i⟩
lemma epi_inl_of_is_pushout_of_epi {t : pushout_cocone f g} (ht : is_colimit t) [epi g] :
epi t.inl :=
⟨λ W h k i, is_colimit.hom_ext ht i (by simp [←cancel_epi g, ←t.condition_assoc, i])⟩
/-- To construct an isomorphism of pushout cocones, it suffices to construct an isomorphism
of the cocone points and check it commutes with `inl` and `inr`. -/
def ext {s t : pushout_cocone f g} (i : s.X ≅ t.X)
(w₁ : s.inl ≫ i.hom = t.inl) (w₂ : s.inr ≫ i.hom = t.inr) :
s ≅ t :=
walking_span.ext i w₁ w₂
/--
This is a more convenient formulation to show that a `pushout_cocone` constructed using
`pushout_cocone.mk` is a colimit cocone.
-/
def is_colimit.mk {W : C} {inl : Y ⟶ W} {inr : Z ⟶ W} (eq : f ≫ inl = g ≫ inr)
(desc : Π (s : pushout_cocone f g), W ⟶ s.X)
(fac_left : ∀ (s : pushout_cocone f g), inl ≫ desc s = s.inl)
(fac_right : ∀ (s : pushout_cocone f g), inr ≫ desc s = s.inr)
(uniq : ∀ (s : pushout_cocone f g) (m : W ⟶ s.X)
(w_inl : inl ≫ m = s.inl) (w_inr : inr ≫ m = s.inr), m = desc s) :
is_colimit (mk inl inr eq) :=
is_colimit_aux _ desc fac_left fac_right
(λ s m w, uniq s m (w walking_cospan.left) (w walking_cospan.right))
/-- The flip of a pushout square is a pushout square. -/
def flip_is_colimit {W : C} {h : Y ⟶ W} {k : Z ⟶ W}
{comm : f ≫ h = g ≫ k} (t : is_colimit (mk _ _ comm.symm)) :
is_colimit (mk _ _ comm) :=
is_colimit_aux' _ $ λ s,
begin
refine ⟨(is_colimit.desc' t _ _ s.condition.symm).1,
(is_colimit.desc' t _ _ _).2.2,
(is_colimit.desc' t _ _ _).2.1, λ m m₁ m₂, t.hom_ext _⟩,
apply (mk k h _).coequalizer_ext,
{ rwa (is_colimit.desc' t _ _ _).2.1 },
{ rwa (is_colimit.desc' t _ _ _).2.2 },
end
/--
The pushout cocone `(𝟙 X, 𝟙 X)` for the pair `(f, f)` is a colimit if `f` is an epi. The converse is
shown in `epi_of_is_colimit_mk_id_id`.
-/
def is_colimit_mk_id_id (f : X ⟶ Y) [epi f] :
is_colimit (mk (𝟙 Y) (𝟙 Y) rfl : pushout_cocone f f) :=
is_colimit.mk _
(λ s, s.inl)
(λ s, category.id_comp _)
(λ s, by rw [←cancel_epi f, category.id_comp, s.condition])
(λ s m m₁ m₂, by simpa using m₁)
/--
`f` is an epi if the pushout cocone `(𝟙 X, 𝟙 X)` is a colimit for the pair `(f, f)`.
The converse is given in `pushout_cocone.is_colimit_mk_id_id`.
-/
lemma epi_of_is_colimit_mk_id_id (f : X ⟶ Y)
(t : is_colimit (mk (𝟙 Y) (𝟙 Y) rfl : pushout_cocone f f)) :
epi f :=
⟨λ Z g h eq, by { rcases pushout_cocone.is_colimit.desc' t _ _ eq with ⟨_, rfl, rfl⟩, refl }⟩
/-- Suppose `f` and `g` are two morphisms with a common domain and `s` is a colimit cocone over the
diagram formed by `f` and `g`. Suppose `f` and `g` both factor through an epimorphism `h` via
`x` and `y`, respectively. Then `s` is also a colimit cocone over the diagram formed by `x` and
`y`. -/
def is_colimit_of_factors (f : X ⟶ Y) (g : X ⟶ Z) (h : X ⟶ W) [epi h]
(x : W ⟶ Y) (y : W ⟶ Z) (hhx : h ≫ x = f) (hhy : h ≫ y = g) (s : pushout_cocone f g)
(hs : is_colimit s) : is_colimit (pushout_cocone.mk _ _ (show x ≫ s.inl = y ≫ s.inr,
from (cancel_epi h).1 $ by rw [reassoc_of hhx, reassoc_of hhy, s.condition])) :=
pushout_cocone.is_colimit_aux' _ $ λ t,
⟨hs.desc (pushout_cocone.mk t.inl t.inr $
by rw [←hhx, ←hhy, category.assoc, category.assoc, t.condition]),
⟨hs.fac _ walking_span.left, hs.fac _ walking_span.right, λ r hr hr',
begin
apply pushout_cocone.is_colimit.hom_ext hs;
simp only [pushout_cocone.mk_inl, pushout_cocone.mk_inr] at ⊢ hr hr';
simp only [hr, hr'];
symmetry,
exacts [hs.fac _ walking_span.left, hs.fac _ walking_span.right]
end⟩⟩
/-- If `W` is the pushout of `f, g`,
it is also the pushout of `h ≫ f, h ≫ g` for any epi `h`. -/
def is_colimit_of_epi_comp (f : X ⟶ Y) (g : X ⟶ Z) (h : W ⟶ X) [epi h]
(s : pushout_cocone f g) (H : is_colimit s) :
is_colimit (pushout_cocone.mk _ _ (show (h ≫ f) ≫ s.inl = (h ≫ g) ≫ s.inr,
by rw [category.assoc, category.assoc, s.condition])) :=
begin
apply pushout_cocone.is_colimit_aux',
intro s,
rcases pushout_cocone.is_colimit.desc' H s.inl s.inr
((cancel_epi h).mp (by simpa using s.condition)) with ⟨l, h₁, h₂⟩,
refine ⟨l,h₁,h₂,_⟩,
intros m hm₁ hm₂,
exact (pushout_cocone.is_colimit.hom_ext H (hm₁.trans h₁.symm) (hm₂.trans h₂.symm) : _)
end
end pushout_cocone
/-- This is a helper construction that can be useful when verifying that a category has all
pullbacks. Given `F : walking_cospan ⥤ C`, which is really the same as
`cospan (F.map inl) (F.map inr)`, and a pullback cone on `F.map inl` and `F.map inr`, we
get a cone on `F`.
If you're thinking about using this, have a look at `has_pullbacks_of_has_limit_cospan`,
which you may find to be an easier way of achieving your goal. -/
@[simps]
def cone.of_pullback_cone
{F : walking_cospan ⥤ C} (t : pullback_cone (F.map inl) (F.map inr)) : cone F :=
{ X := t.X,
π := t.π ≫ (diagram_iso_cospan F).inv }
/-- This is a helper construction that can be useful when verifying that a category has all
pushout. Given `F : walking_span ⥤ C`, which is really the same as
`span (F.map fst) (F.mal snd)`, and a pushout cocone on `F.map fst` and `F.map snd`,
we get a cocone on `F`.
If you're thinking about using this, have a look at `has_pushouts_of_has_colimit_span`, which
you may find to be an easiery way of achieving your goal. -/
@[simps]
def cocone.of_pushout_cocone
{F : walking_span ⥤ C} (t : pushout_cocone (F.map fst) (F.map snd)) : cocone F :=
{ X := t.X,
ι := (diagram_iso_span F).hom ≫ t.ι }
/-- Given `F : walking_cospan ⥤ C`, which is really the same as `cospan (F.map inl) (F.map inr)`,
and a cone on `F`, we get a pullback cone on `F.map inl` and `F.map inr`. -/
@[simps]
def pullback_cone.of_cone
{F : walking_cospan ⥤ C} (t : cone F) : pullback_cone (F.map inl) (F.map inr) :=
{ X := t.X,
π := t.π ≫ (diagram_iso_cospan F).hom }
/-- A diagram `walking_cospan ⥤ C` is isomorphic to some `pullback_cone.mk` after
composing with `diagram_iso_cospan`. -/
@[simps] def pullback_cone.iso_mk {F : walking_cospan ⥤ C} (t : cone F) :
(cones.postcompose (diagram_iso_cospan.{v} _).hom).obj t ≅
pullback_cone.mk (t.π.app walking_cospan.left) (t.π.app walking_cospan.right)
((t.π.naturality inl).symm.trans (t.π.naturality inr : _)) :=
cones.ext (iso.refl _) $ by rintro (_|(_|_)); { dsimp, simp }
/-- Given `F : walking_span ⥤ C`, which is really the same as `span (F.map fst) (F.map snd)`,
and a cocone on `F`, we get a pushout cocone on `F.map fst` and `F.map snd`. -/
@[simps]
def pushout_cocone.of_cocone
{F : walking_span ⥤ C} (t : cocone F) : pushout_cocone (F.map fst) (F.map snd) :=
{ X := t.X,
ι := (diagram_iso_span F).inv ≫ t.ι }
/-- A diagram `walking_span ⥤ C` is isomorphic to some `pushout_cocone.mk` after composing with
`diagram_iso_span`. -/
@[simps] def pushout_cocone.iso_mk {F : walking_span ⥤ C} (t : cocone F) :
(cocones.precompose (diagram_iso_span.{v} _).inv).obj t ≅
pushout_cocone.mk (t.ι.app walking_span.left) (t.ι.app walking_span.right)
((t.ι.naturality fst).trans (t.ι.naturality snd).symm) :=
cocones.ext (iso.refl _) $ by rintro (_|(_|_)); { dsimp, simp }
/--
`has_pullback f g` represents a particular choice of limiting cone
for the pair of morphisms `f : X ⟶ Z` and `g : Y ⟶ Z`.
-/
abbreviation has_pullback {X Y Z : C} (f : X ⟶ Z) (g : Y ⟶ Z) := has_limit (cospan f g)
/--
`has_pushout f g` represents a particular choice of colimiting cocone
for the pair of morphisms `f : X ⟶ Y` and `g : X ⟶ Z`.
-/
abbreviation has_pushout {X Y Z : C} (f : X ⟶ Y) (g : X ⟶ Z) := has_colimit (span f g)
/-- `pullback f g` computes the pullback of a pair of morphisms with the same target. -/
abbreviation pullback {X Y Z : C} (f : X ⟶ Z) (g : Y ⟶ Z) [has_pullback f g] :=
limit (cospan f g)
/-- `pushout f g` computes the pushout of a pair of morphisms with the same source. -/
abbreviation pushout {X Y Z : C} (f : X ⟶ Y) (g : X ⟶ Z) [has_pushout f g] :=
colimit (span f g)
/-- The first projection of the pullback of `f` and `g`. -/
abbreviation pullback.fst {X Y Z : C} {f : X ⟶ Z} {g : Y ⟶ Z} [has_pullback f g] :
pullback f g ⟶ X :=
limit.π (cospan f g) walking_cospan.left
/-- The second projection of the pullback of `f` and `g`. -/
abbreviation pullback.snd {X Y Z : C} {f : X ⟶ Z} {g : Y ⟶ Z} [has_pullback f g] :
pullback f g ⟶ Y :=
limit.π (cospan f g) walking_cospan.right
/-- The first inclusion into the pushout of `f` and `g`. -/
abbreviation pushout.inl {X Y Z : C} {f : X ⟶ Y} {g : X ⟶ Z} [has_pushout f g] :
Y ⟶ pushout f g :=
colimit.ι (span f g) walking_span.left
/-- The second inclusion into the pushout of `f` and `g`. -/
abbreviation pushout.inr {X Y Z : C} {f : X ⟶ Y} {g : X ⟶ Z} [has_pushout f g] :
Z ⟶ pushout f g :=
colimit.ι (span f g) walking_span.right
/-- A pair of morphisms `h : W ⟶ X` and `k : W ⟶ Y` satisfying `h ≫ f = k ≫ g` induces a morphism
`pullback.lift : W ⟶ pullback f g`. -/
abbreviation pullback.lift {W X Y Z : C} {f : X ⟶ Z} {g : Y ⟶ Z} [has_pullback f g]
(h : W ⟶ X) (k : W ⟶ Y) (w : h ≫ f = k ≫ g) : W ⟶ pullback f g :=
limit.lift _ (pullback_cone.mk h k w)
/-- A pair of morphisms `h : Y ⟶ W` and `k : Z ⟶ W` satisfying `f ≫ h = g ≫ k` induces a morphism
`pushout.desc : pushout f g ⟶ W`. -/
abbreviation pushout.desc {W X Y Z : C} {f : X ⟶ Y} {g : X ⟶ Z} [has_pushout f g]
(h : Y ⟶ W) (k : Z ⟶ W) (w : f ≫ h = g ≫ k) : pushout f g ⟶ W :=
colimit.desc _ (pushout_cocone.mk h k w)
@[simp]
lemma pullback_cone.fst_colimit_cocone {X Y Z : C} (f : X ⟶ Z) (g : Y ⟶ Z)
[has_limit (cospan f g)] : pullback_cone.fst (limit.cone (cospan f g)) = pullback.fst :=
rfl
@[simp]
lemma pullback_cone.snd_colimit_cocone {X Y Z : C} (f : X ⟶ Z) (g : Y ⟶ Z)
[has_limit (cospan f g)] : pullback_cone.snd (limit.cone (cospan f g)) = pullback.snd :=
rfl
@[simp]
lemma pushout_cocone.inl_colimit_cocone {X Y Z : C} (f : Z ⟶ X) (g : Z ⟶ Y)
[has_colimit (span f g)] : pushout_cocone.inl (colimit.cocone (span f g)) = pushout.inl :=
rfl
@[simp]
lemma pushout_cocone.inr_colimit_cocone {X Y Z : C} (f : Z ⟶ X) (g : Z ⟶ Y)
[has_colimit (span f g)] : pushout_cocone.inr (colimit.cocone (span f g)) = pushout.inr :=
rfl
@[simp, reassoc]
lemma pullback.lift_fst {W X Y Z : C} {f : X ⟶ Z} {g : Y ⟶ Z} [has_pullback f g]
(h : W ⟶ X) (k : W ⟶ Y) (w : h ≫ f = k ≫ g) : pullback.lift h k w ≫ pullback.fst = h :=
limit.lift_π _ _
@[simp, reassoc]
lemma pullback.lift_snd {W X Y Z : C} {f : X ⟶ Z} {g : Y ⟶ Z} [has_pullback f g]
(h : W ⟶ X) (k : W ⟶ Y) (w : h ≫ f = k ≫ g) : pullback.lift h k w ≫ pullback.snd = k :=
limit.lift_π _ _
@[simp, reassoc]
lemma pushout.inl_desc {W X Y Z : C} {f : X ⟶ Y} {g : X ⟶ Z} [has_pushout f g]
(h : Y ⟶ W) (k : Z ⟶ W) (w : f ≫ h = g ≫ k) : pushout.inl ≫ pushout.desc h k w = h :=
colimit.ι_desc _ _
@[simp, reassoc]
lemma pushout.inr_desc {W X Y Z : C} {f : X ⟶ Y} {g : X ⟶ Z} [has_pushout f g]
(h : Y ⟶ W) (k : Z ⟶ W) (w : f ≫ h = g ≫ k) : pushout.inr ≫ pushout.desc h k w = k :=
colimit.ι_desc _ _
/-- A pair of morphisms `h : W ⟶ X` and `k : W ⟶ Y` satisfying `h ≫ f = k ≫ g` induces a morphism
`l : W ⟶ pullback f g` such that `l ≫ pullback.fst = h` and `l ≫ pullback.snd = k`. -/
def pullback.lift' {W X Y Z : C} {f : X ⟶ Z} {g : Y ⟶ Z} [has_pullback f g]
(h : W ⟶ X) (k : W ⟶ Y) (w : h ≫ f = k ≫ g) :
{l : W ⟶ pullback f g // l ≫ pullback.fst = h ∧ l ≫ pullback.snd = k} :=
⟨pullback.lift h k w, pullback.lift_fst _ _ _, pullback.lift_snd _ _ _⟩
/-- A pair of morphisms `h : Y ⟶ W` and `k : Z ⟶ W` satisfying `f ≫ h = g ≫ k` induces a morphism
`l : pushout f g ⟶ W` such that `pushout.inl ≫ l = h` and `pushout.inr ≫ l = k`. -/
def pullback.desc' {W X Y Z : C} {f : X ⟶ Y} {g : X ⟶ Z} [has_pushout f g]
(h : Y ⟶ W) (k : Z ⟶ W) (w : f ≫ h = g ≫ k) :
{l : pushout f g ⟶ W // pushout.inl ≫ l = h ∧ pushout.inr ≫ l = k} :=
⟨pushout.desc h k w, pushout.inl_desc _ _ _, pushout.inr_desc _ _ _⟩
@[reassoc]
lemma pullback.condition {X Y Z : C} {f : X ⟶ Z} {g : Y ⟶ Z} [has_pullback f g] :
(pullback.fst : pullback f g ⟶ X) ≫ f = pullback.snd ≫ g :=
pullback_cone.condition _
@[reassoc]
lemma pushout.condition {X Y Z : C} {f : X ⟶ Y} {g : X ⟶ Z} [has_pushout f g] :
f ≫ (pushout.inl : Y ⟶ pushout f g) = g ≫ pushout.inr :=
pushout_cocone.condition _
/--
Given such a diagram, then there is a natural morphism `W ×ₛ X ⟶ Y ×ₜ Z`.
W ⟶ Y
↘ ↘
S ⟶ T
↗ ↗
X ⟶ Z
-/
abbreviation pullback.map {W X Y Z S T : C} (f₁ : W ⟶ S) (f₂ : X ⟶ S) [has_pullback f₁ f₂]
(g₁ : Y ⟶ T) (g₂ : Z ⟶ T) [has_pullback g₁ g₂] (i₁ : W ⟶ Y) (i₂ : X ⟶ Z) (i₃ : S ⟶ T)
(eq₁ : f₁ ≫ i₃ = i₁ ≫ g₁) (eq₂ : f₂ ≫ i₃ = i₂ ≫ g₂) : pullback f₁ f₂ ⟶ pullback g₁ g₂ :=
pullback.lift (pullback.fst ≫ i₁) (pullback.snd ≫ i₂)
(by simp [← eq₁, ← eq₂, pullback.condition_assoc])
/-- The canonical map `X ×ₛ Y ⟶ X ×ₜ Y` given `S ⟶ T`. -/
abbreviation pullback.map_desc {X Y S T : C} (f : X ⟶ S) (g : Y ⟶ S) (i : S ⟶ T)
[has_pullback f g] [has_pullback (f ≫ i) (g ≫ i)] :
pullback f g ⟶ pullback (f ≫ i) (g ≫ i) :=
pullback.map f g (f ≫ i) (g ≫ i) (𝟙 _) (𝟙 _) i (category.id_comp _).symm (category.id_comp _).symm
/--
Given such a diagram, then there is a natural morphism `W ⨿ₛ X ⟶ Y ⨿ₜ Z`.
W ⟶ Y
↗ ↗
S ⟶ T
↘ ↘
X ⟶ Z
-/
abbreviation pushout.map {W X Y Z S T : C} (f₁ : S ⟶ W) (f₂ : S ⟶ X) [has_pushout f₁ f₂]
(g₁ : T ⟶ Y) (g₂ : T ⟶ Z) [has_pushout g₁ g₂] (i₁ : W ⟶ Y) (i₂ : X ⟶ Z) (i₃ : S ⟶ T)
(eq₁ : f₁ ≫ i₁ = i₃ ≫ g₁) (eq₂ : f₂ ≫ i₂ = i₃ ≫ g₂) : pushout f₁ f₂ ⟶ pushout g₁ g₂ :=
pushout.desc (i₁ ≫ pushout.inl) (i₂ ≫ pushout.inr)
(by { simp only [← category.assoc, eq₁, eq₂], simp [pushout.condition] })
/-- The canonical map `X ⨿ₛ Y ⟶ X ⨿ₜ Y` given `S ⟶ T`. -/
abbreviation pushout.map_lift {X Y S T : C} (f : T ⟶ X) (g : T ⟶ Y) (i : S ⟶ T)
[has_pushout f g] [has_pushout (i ≫ f) (i ≫ g)] :
pushout (i ≫ f) (i ≫ g) ⟶ pushout f g :=
pushout.map (i ≫ f) (i ≫ g) f g (𝟙 _) (𝟙 _) i (category.comp_id _) (category.comp_id _)
/-- Two morphisms into a pullback are equal if their compositions with the pullback morphisms are
equal -/
@[ext] lemma pullback.hom_ext {X Y Z : C} {f : X ⟶ Z} {g : Y ⟶ Z} [has_pullback f g]
{W : C} {k l : W ⟶ pullback f g} (h₀ : k ≫ pullback.fst = l ≫ pullback.fst)
(h₁ : k ≫ pullback.snd = l ≫ pullback.snd) : k = l :=
limit.hom_ext $ pullback_cone.equalizer_ext _ h₀ h₁
/-- The pullback cone built from the pullback projections is a pullback. -/
def pullback_is_pullback {X Y Z : C} (f : X ⟶ Z) (g : Y ⟶ Z) [has_pullback f g] :
is_limit (pullback_cone.mk (pullback.fst : pullback f g ⟶ _) pullback.snd pullback.condition) :=
pullback_cone.is_limit.mk _ (λ s, pullback.lift s.fst s.snd s.condition)
(by simp) (by simp) (by tidy)
/-- The pullback of a monomorphism is a monomorphism -/
instance pullback.fst_of_mono {X Y Z : C} {f : X ⟶ Z} {g : Y ⟶ Z} [has_pullback f g]
[mono g] : mono (pullback.fst : pullback f g ⟶ X) :=
pullback_cone.mono_fst_of_is_pullback_of_mono (limit.is_limit _)
/-- The pullback of a monomorphism is a monomorphism -/
instance pullback.snd_of_mono {X Y Z : C} {f : X ⟶ Z} {g : Y ⟶ Z} [has_pullback f g]
[mono f] : mono (pullback.snd : pullback f g ⟶ Y) :=
pullback_cone.mono_snd_of_is_pullback_of_mono (limit.is_limit _)
/-- The map `X ×[Z] Y ⟶ X × Y` is mono. -/
instance mono_pullback_to_prod {C : Type*} [category C] {X Y Z : C} (f : X ⟶ Z) (g : Y ⟶ Z)
[has_pullback f g] [has_binary_product X Y] :
mono (prod.lift pullback.fst pullback.snd : pullback f g ⟶ _) :=
⟨λ W i₁ i₂ h, begin
ext,
{ simpa using congr_arg (λ f, f ≫ prod.fst) h },
{ simpa using congr_arg (λ f, f ≫ prod.snd) h }
end⟩
/-- Two morphisms out of a pushout are equal if their compositions with the pushout morphisms are
equal -/
@[ext] lemma pushout.hom_ext {X Y Z : C} {f : X ⟶ Y} {g : X ⟶ Z} [has_pushout f g]
{W : C} {k l : pushout f g ⟶ W} (h₀ : pushout.inl ≫ k = pushout.inl ≫ l)
(h₁ : pushout.inr ≫ k = pushout.inr ≫ l) : k = l :=
colimit.hom_ext $ pushout_cocone.coequalizer_ext _ h₀ h₁
/-- The pushout cocone built from the pushout coprojections is a pushout. -/
def pushout_is_pushout {X Y Z : C} (f : X ⟶ Y) (g : X ⟶ Z) [has_pushout f g] :
is_colimit (pushout_cocone.mk (pushout.inl : _ ⟶ pushout f g) pushout.inr pushout.condition) :=
pushout_cocone.is_colimit.mk _ (λ s, pushout.desc s.inl s.inr s.condition)
(by simp) (by simp) (by tidy)
/-- The pushout of an epimorphism is an epimorphism -/
instance pushout.inl_of_epi {X Y Z : C} {f : X ⟶ Y} {g : X ⟶ Z} [has_pushout f g] [epi g] :
epi (pushout.inl : Y ⟶ pushout f g) :=
pushout_cocone.epi_inl_of_is_pushout_of_epi (colimit.is_colimit _)
/-- The pushout of an epimorphism is an epimorphism -/
instance pushout.inr_of_epi {X Y Z : C} {f : X ⟶ Y} {g : X ⟶ Z} [has_pushout f g] [epi f] :
epi (pushout.inr : Z ⟶ pushout f g) :=
pushout_cocone.epi_inr_of_is_pushout_of_epi (colimit.is_colimit _)
/-- The map ` X ⨿ Y ⟶ X ⨿[Z] Y` is epi. -/
instance epi_coprod_to_pushout {C : Type*} [category C] {X Y Z : C} (f : X ⟶ Y) (g : X ⟶ Z)
[has_pushout f g] [has_binary_coproduct Y Z] :
epi (coprod.desc pushout.inl pushout.inr : _ ⟶ pushout f g) :=
⟨λ W i₁ i₂ h, begin
ext,
{ simpa using congr_arg (λ f, coprod.inl ≫ f) h },
{ simpa using congr_arg (λ f, coprod.inr ≫ f) h }
end⟩
instance pullback.map_is_iso {W X Y Z S T : C} (f₁ : W ⟶ S) (f₂ : X ⟶ S) [has_pullback f₁ f₂]
(g₁ : Y ⟶ T) (g₂ : Z ⟶ T) [has_pullback g₁ g₂] (i₁ : W ⟶ Y) (i₂ : X ⟶ Z) (i₃ : S ⟶ T)
(eq₁ : f₁ ≫ i₃ = i₁ ≫ g₁) (eq₂ : f₂ ≫ i₃ = i₂ ≫ g₂) [is_iso i₁] [is_iso i₂] [is_iso i₃] :
is_iso (pullback.map f₁ f₂ g₁ g₂ i₁ i₂ i₃ eq₁ eq₂) :=
begin
refine ⟨⟨pullback.map _ _ _ _ (inv i₁) (inv i₂) (inv i₃) _ _, _, _⟩⟩,
{ rw [is_iso.comp_inv_eq, category.assoc, eq₁, is_iso.inv_hom_id_assoc] },
{ rw [is_iso.comp_inv_eq, category.assoc, eq₂, is_iso.inv_hom_id_assoc] },
tidy
end
/-- If `f₁ = f₂` and `g₁ = g₂`, we may construct a canonical
isomorphism `pullback f₁ g₁ ≅ pullback f₂ g₂` -/
@[simps hom]
def pullback.congr_hom {X Y Z : C} {f₁ f₂ : X ⟶ Z} {g₁ g₂ : Y ⟶ Z}
(h₁ : f₁ = f₂) (h₂ : g₁ = g₂) [has_pullback f₁ g₁] [has_pullback f₂ g₂] :
pullback f₁ g₁ ≅ pullback f₂ g₂ :=
as_iso $ pullback.map _ _ _ _ (𝟙 _) (𝟙 _) (𝟙 _) (by simp [h₁]) (by simp [h₂])
@[simp]
lemma pullback.congr_hom_inv {X Y Z : C} {f₁ f₂ : X ⟶ Z} {g₁ g₂ : Y ⟶ Z}
(h₁ : f₁ = f₂) (h₂ : g₁ = g₂) [has_pullback f₁ g₁] [has_pullback f₂ g₂] :
(pullback.congr_hom h₁ h₂).inv =
pullback.map _ _ _ _ (𝟙 _) (𝟙 _) (𝟙 _) (by simp [h₁]) (by simp [h₂]) :=
begin
apply pullback.hom_ext,
{ erw pullback.lift_fst,
rw iso.inv_comp_eq,
erw pullback.lift_fst_assoc,
rw [category.comp_id, category.comp_id] },
{ erw pullback.lift_snd,
rw iso.inv_comp_eq,
erw pullback.lift_snd_assoc,
rw [category.comp_id, category.comp_id] },
end
instance pushout.map_is_iso {W X Y Z S T : C} (f₁ : S ⟶ W) (f₂ : S ⟶ X) [has_pushout f₁ f₂]
(g₁ : T ⟶ Y) (g₂ : T ⟶ Z) [has_pushout g₁ g₂] (i₁ : W ⟶ Y) (i₂ : X ⟶ Z) (i₃ : S ⟶ T)
(eq₁ : f₁ ≫ i₁ = i₃ ≫ g₁) (eq₂ : f₂ ≫ i₂ = i₃ ≫ g₂) [is_iso i₁] [is_iso i₂] [is_iso i₃] :
is_iso (pushout.map f₁ f₂ g₁ g₂ i₁ i₂ i₃ eq₁ eq₂) :=
begin
refine ⟨⟨pushout.map _ _ _ _ (inv i₁) (inv i₂) (inv i₃) _ _, _, _⟩⟩,
{ rw [is_iso.comp_inv_eq, category.assoc, eq₁, is_iso.inv_hom_id_assoc] },
{ rw [is_iso.comp_inv_eq, category.assoc, eq₂, is_iso.inv_hom_id_assoc] },
tidy
end
lemma pullback.map_desc_comp {X Y S T S' : C} (f : X ⟶ T) (g : Y ⟶ T) (i : T ⟶ S)
(i' : S ⟶ S') [has_pullback f g] [has_pullback (f ≫ i) (g ≫ i)]
[has_pullback (f ≫ i ≫ i') (g ≫ i ≫ i')] [has_pullback ((f ≫ i) ≫ i') ((g ≫ i) ≫ i')] :
pullback.map_desc f g (i ≫ i') = pullback.map_desc f g i ≫ pullback.map_desc _ _ i' ≫
(pullback.congr_hom (category.assoc _ _ _) (category.assoc _ _ _)).hom :=
by { ext; simp }
/-- If `f₁ = f₂` and `g₁ = g₂`, we may construct a canonical
isomorphism `pushout f₁ g₁ ≅ pullback f₂ g₂` -/
@[simps hom]
def pushout.congr_hom {X Y Z : C} {f₁ f₂ : X ⟶ Y} {g₁ g₂ : X ⟶ Z}
(h₁ : f₁ = f₂) (h₂ : g₁ = g₂) [has_pushout f₁ g₁] [has_pushout f₂ g₂] :
pushout f₁ g₁ ≅ pushout f₂ g₂ :=
as_iso $ pushout.map _ _ _ _ (𝟙 _) (𝟙 _) (𝟙 _) (by simp [h₁]) (by simp [h₂])
@[simp]
lemma pushout.congr_hom_inv {X Y Z : C} {f₁ f₂ : X ⟶ Y} {g₁ g₂ : X ⟶ Z}
(h₁ : f₁ = f₂) (h₂ : g₁ = g₂) [has_pushout f₁ g₁] [has_pushout f₂ g₂] :
(pushout.congr_hom h₁ h₂).inv =
pushout.map _ _ _ _ (𝟙 _) (𝟙 _) (𝟙 _) (by simp [h₁]) (by simp [h₂]) :=
begin
apply pushout.hom_ext,
{ erw pushout.inl_desc,
rw [iso.comp_inv_eq, category.id_comp],
erw pushout.inl_desc,
rw category.id_comp },
{ erw pushout.inr_desc,
rw [iso.comp_inv_eq, category.id_comp],
erw pushout.inr_desc,
rw category.id_comp }
end
lemma pushout.map_lift_comp {X Y S T S' : C} (f : T ⟶ X) (g : T ⟶ Y) (i : S ⟶ T)
(i' : S' ⟶ S) [has_pushout f g] [has_pushout (i ≫ f) (i ≫ g)]
[has_pushout (i' ≫ i ≫ f) (i' ≫ i ≫ g)] [has_pushout ((i' ≫ i) ≫ f) ((i' ≫ i) ≫ g)] :
pushout.map_lift f g (i' ≫ i) =
(pushout.congr_hom (category.assoc _ _ _) (category.assoc _ _ _)).hom ≫
pushout.map_lift _ _ i' ≫ pushout.map_lift f g i :=
by { ext; simp }
section
variables (G : C ⥤ D)
/--
The comparison morphism for the pullback of `f,g`.
This is an isomorphism iff `G` preserves the pullback of `f,g`; see
`category_theory/limits/preserves/shapes/pullbacks.lean`
-/
def pullback_comparison (f : X ⟶ Z) (g : Y ⟶ Z)
[has_pullback f g] [has_pullback (G.map f) (G.map g)] :
G.obj (pullback f g) ⟶ pullback (G.map f) (G.map g) :=
pullback.lift (G.map pullback.fst) (G.map pullback.snd)
(by simp only [←G.map_comp, pullback.condition])
@[simp, reassoc]
lemma pullback_comparison_comp_fst (f : X ⟶ Z) (g : Y ⟶ Z)
[has_pullback f g] [has_pullback (G.map f) (G.map g)] :
pullback_comparison G f g ≫ pullback.fst = G.map pullback.fst :=
pullback.lift_fst _ _ _
@[simp, reassoc]
lemma pullback_comparison_comp_snd (f : X ⟶ Z) (g : Y ⟶ Z)
[has_pullback f g] [has_pullback (G.map f) (G.map g)] :
pullback_comparison G f g ≫ pullback.snd = G.map pullback.snd :=
pullback.lift_snd _ _ _
@[simp, reassoc]
lemma map_lift_pullback_comparison (f : X ⟶ Z) (g : Y ⟶ Z)
[has_pullback f g] [has_pullback (G.map f) (G.map g)]
{W : C} {h : W ⟶ X} {k : W ⟶ Y} (w : h ≫ f = k ≫ g) :
G.map (pullback.lift _ _ w) ≫ pullback_comparison G f g =
pullback.lift (G.map h) (G.map k) (by simp only [←G.map_comp, w]) :=
by { ext; simp [← G.map_comp] }
/--
The comparison morphism for the pushout of `f,g`.
This is an isomorphism iff `G` preserves the pushout of `f,g`; see
`category_theory/limits/preserves/shapes/pullbacks.lean`
-/
def pushout_comparison (f : X ⟶ Y) (g : X ⟶ Z)
[has_pushout f g] [has_pushout (G.map f) (G.map g)] :
pushout (G.map f) (G.map g) ⟶ G.obj (pushout f g) :=
pushout.desc (G.map pushout.inl) (G.map pushout.inr)
(by simp only [←G.map_comp, pushout.condition])
@[simp, reassoc]
lemma inl_comp_pushout_comparison (f : X ⟶ Y) (g : X ⟶ Z)
[has_pushout f g] [has_pushout (G.map f) (G.map g)] :
pushout.inl ≫ pushout_comparison G f g = G.map pushout.inl :=
pushout.inl_desc _ _ _
@[simp, reassoc]
lemma inr_comp_pushout_comparison (f : X ⟶ Y) (g : X ⟶ Z)
[has_pushout f g] [has_pushout (G.map f) (G.map g)] :
pushout.inr ≫ pushout_comparison G f g = G.map pushout.inr :=
pushout.inr_desc _ _ _
@[simp, reassoc]
lemma pushout_comparison_map_desc (f : X ⟶ Y) (g : X ⟶ Z)
[has_pushout f g] [has_pushout (G.map f) (G.map g)]
{W : C} {h : Y ⟶ W} {k : Z ⟶ W} (w : f ≫ h = g ≫ k) :
pushout_comparison G f g ≫ G.map (pushout.desc _ _ w) =
pushout.desc (G.map h) (G.map k) (by simp only [←G.map_comp, w]) :=
by { ext; simp [← G.map_comp] }
end
section pullback_symmetry
open walking_cospan
variables (f : X ⟶ Z) (g : Y ⟶ Z)
/-- Making this a global instance would make the typeclass seach go in an infinite loop. -/
lemma has_pullback_symmetry [has_pullback f g] : has_pullback g f :=
⟨⟨⟨pullback_cone.mk _ _ pullback.condition.symm,
pullback_cone.flip_is_limit (pullback_is_pullback _ _)⟩⟩⟩
local attribute [instance] has_pullback_symmetry
/-- The isomorphism `X ×[Z] Y ≅ Y ×[Z] X`. -/
def pullback_symmetry [has_pullback f g] :
pullback f g ≅ pullback g f :=
is_limit.cone_point_unique_up_to_iso
(pullback_cone.flip_is_limit (pullback_is_pullback f g) :
is_limit (pullback_cone.mk _ _ pullback.condition.symm))
(limit.is_limit _)
@[simp, reassoc] lemma pullback_symmetry_hom_comp_fst [has_pullback f g] :
(pullback_symmetry f g).hom ≫ pullback.fst = pullback.snd := by simp [pullback_symmetry]
@[simp, reassoc] lemma pullback_symmetry_hom_comp_snd [has_pullback f g] :
(pullback_symmetry f g).hom ≫ pullback.snd = pullback.fst := by simp [pullback_symmetry]
@[simp, reassoc] lemma pullback_symmetry_inv_comp_fst [has_pullback f g] :
(pullback_symmetry f g).inv ≫ pullback.fst = pullback.snd := by simp [iso.inv_comp_eq]
@[simp, reassoc] lemma pullback_symmetry_inv_comp_snd [has_pullback f g] :
(pullback_symmetry f g).inv ≫ pullback.snd = pullback.fst := by simp [iso.inv_comp_eq]
end pullback_symmetry
section pushout_symmetry
open walking_cospan
variables (f : X ⟶ Y) (g : X ⟶ Z)
/-- Making this a global instance would make the typeclass seach go in an infinite loop. -/
lemma has_pushout_symmetry [has_pushout f g] : has_pushout g f :=
⟨⟨⟨pushout_cocone.mk _ _ pushout.condition.symm,
pushout_cocone.flip_is_colimit (pushout_is_pushout _ _)⟩⟩⟩
local attribute [instance] has_pushout_symmetry
/-- The isomorphism `Y ⨿[X] Z ≅ Z ⨿[X] Y`. -/
def pushout_symmetry [has_pushout f g] :
pushout f g ≅ pushout g f :=
is_colimit.cocone_point_unique_up_to_iso
(pushout_cocone.flip_is_colimit (pushout_is_pushout f g) :
is_colimit (pushout_cocone.mk _ _ pushout.condition.symm))
(colimit.is_colimit _)
@[simp, reassoc] lemma inl_comp_pushout_symmetry_hom [has_pushout f g] :
pushout.inl ≫ (pushout_symmetry f g).hom = pushout.inr :=
(colimit.is_colimit (span f g)).comp_cocone_point_unique_up_to_iso_hom
(pushout_cocone.flip_is_colimit (pushout_is_pushout g f)) _
@[simp, reassoc] lemma inr_comp_pushout_symmetry_hom [has_pushout f g] :
pushout.inr ≫ (pushout_symmetry f g).hom = pushout.inl :=
(colimit.is_colimit (span f g)).comp_cocone_point_unique_up_to_iso_hom
(pushout_cocone.flip_is_colimit (pushout_is_pushout g f)) _
@[simp, reassoc] lemma inl_comp_pushout_symmetry_inv [has_pushout f g] :
pushout.inl ≫ (pushout_symmetry f g).inv = pushout.inr := by simp [iso.comp_inv_eq]
@[simp, reassoc] lemma inr_comp_pushout_symmetry_inv [has_pushout f g] :
pushout.inr ≫ (pushout_symmetry f g).inv = pushout.inl := by simp [iso.comp_inv_eq]
end pushout_symmetry
section pullback_left_iso
open walking_cospan
/-- The pullback of `f, g` is also the pullback of `f ≫ i, g ≫ i` for any mono `i`. -/
noncomputable
def pullback_is_pullback_of_comp_mono (f : X ⟶ W) (g : Y ⟶ W) (i : W ⟶ Z)
[mono i] [has_pullback f g] :
is_limit (pullback_cone.mk pullback.fst pullback.snd _) :=
pullback_cone.is_limit_of_comp_mono f g i _ (limit.is_limit (cospan f g))
instance has_pullback_of_comp_mono (f : X ⟶ W) (g : Y ⟶ W) (i : W ⟶ Z)
[mono i] [has_pullback f g] : has_pullback (f ≫ i) (g ≫ i) :=
⟨⟨⟨_,pullback_is_pullback_of_comp_mono f g i⟩⟩⟩
variables (f : X ⟶ Z) (g : Y ⟶ Z) [is_iso f]
/-- If `f : X ⟶ Z` is iso, then `X ×[Z] Y ≅ Y`. This is the explicit limit cone. -/
def pullback_cone_of_left_iso : pullback_cone f g :=
pullback_cone.mk (g ≫ inv f) (𝟙 _) $ by simp
@[simp] lemma pullback_cone_of_left_iso_X :
(pullback_cone_of_left_iso f g).X = Y := rfl
@[simp] lemma pullback_cone_of_left_iso_fst :
(pullback_cone_of_left_iso f g).fst = g ≫ inv f := rfl
@[simp] lemma pullback_cone_of_left_iso_snd :
(pullback_cone_of_left_iso f g).snd = 𝟙 _ := rfl
@[simp] lemma pullback_cone_of_left_iso_π_app_none :
(pullback_cone_of_left_iso f g).π.app none = g := by { delta pullback_cone_of_left_iso, simp }
@[simp] lemma pullback_cone_of_left_iso_π_app_left :
(pullback_cone_of_left_iso f g).π.app left = g ≫ inv f := rfl
@[simp] lemma pullback_cone_of_left_iso_π_app_right :
(pullback_cone_of_left_iso f g).π.app right = 𝟙 _ := rfl
/-- Verify that the constructed limit cone is indeed a limit. -/
def pullback_cone_of_left_iso_is_limit :
is_limit (pullback_cone_of_left_iso f g) :=
pullback_cone.is_limit_aux' _ (λ s, ⟨s.snd, by simp [← s.condition_assoc]⟩)
lemma has_pullback_of_left_iso : has_pullback f g :=
⟨⟨⟨_, pullback_cone_of_left_iso_is_limit f g⟩⟩⟩
local attribute [instance] has_pullback_of_left_iso
instance pullback_snd_iso_of_left_iso : is_iso (pullback.snd : pullback f g ⟶ _) :=
begin
refine ⟨⟨pullback.lift (g ≫ inv f) (𝟙 _) (by simp), _, by simp⟩⟩,
ext,
{ simp [← pullback.condition_assoc] },
{ simp [pullback.condition_assoc] },
end
variables (i : Z ⟶ W) [mono i]
instance has_pullback_of_right_factors_mono (f : X ⟶ Z) : has_pullback i (f ≫ i) :=
by { conv { congr, rw ←category.id_comp i, }, apply_instance }
instance pullback_snd_iso_of_right_factors_mono (f : X ⟶ Z) :
is_iso (pullback.snd : pullback i (f ≫ i) ⟶ _) :=
begin
convert (congr_arg is_iso (show _ ≫ pullback.snd = _,
from limit.iso_limit_cone_hom_π ⟨_,pullback_is_pullback_of_comp_mono (𝟙 _) f i⟩
walking_cospan.right)).mp infer_instance;
exact (category.id_comp _).symm
end
end pullback_left_iso
section pullback_right_iso
open walking_cospan
variables (f : X ⟶ Z) (g : Y ⟶ Z) [is_iso g]
/-- If `g : Y ⟶ Z` is iso, then `X ×[Z] Y ≅ X`. This is the explicit limit cone. -/
def pullback_cone_of_right_iso : pullback_cone f g :=
pullback_cone.mk (𝟙 _) (f ≫ inv g) $ by simp
@[simp] lemma pullback_cone_of_right_iso_X :
(pullback_cone_of_right_iso f g).X = X := rfl
@[simp] lemma pullback_cone_of_right_iso_fst :
(pullback_cone_of_right_iso f g).fst = 𝟙 _ := rfl
@[simp] lemma pullback_cone_of_right_iso_snd :
(pullback_cone_of_right_iso f g).snd = f ≫ inv g := rfl
@[simp] lemma pullback_cone_of_right_iso_π_app_none :
(pullback_cone_of_right_iso f g).π.app none = f := category.id_comp _
@[simp] lemma pullback_cone_of_right_iso_π_app_left :
(pullback_cone_of_right_iso f g).π.app left = 𝟙 _ := rfl
@[simp] lemma pullback_cone_of_right_iso_π_app_right :
(pullback_cone_of_right_iso f g).π.app right = f ≫ inv g := rfl
/-- Verify that the constructed limit cone is indeed a limit. -/
def pullback_cone_of_right_iso_is_limit :
is_limit (pullback_cone_of_right_iso f g) :=
pullback_cone.is_limit_aux' _ (λ s, ⟨s.fst, by simp [s.condition_assoc]⟩)
lemma has_pullback_of_right_iso : has_pullback f g :=
⟨⟨⟨_, pullback_cone_of_right_iso_is_limit f g⟩⟩⟩
local attribute [instance] has_pullback_of_right_iso
instance pullback_snd_iso_of_right_iso : is_iso (pullback.fst : pullback f g ⟶ _) :=
begin
refine ⟨⟨pullback.lift (𝟙 _) (f ≫ inv g) (by simp), _, by simp⟩⟩,
ext,
{ simp },
{ simp [pullback.condition_assoc] },
end
variables (i : Z ⟶ W) [mono i]
instance has_pullback_of_left_factors_mono (f : X ⟶ Z) : has_pullback (f ≫ i) i :=
by { conv { congr, skip, rw ←category.id_comp i, }, apply_instance }
instance pullback_snd_iso_of_left_factors_mono (f : X ⟶ Z) :
is_iso (pullback.fst : pullback (f ≫ i) i ⟶ _) :=
begin
convert (congr_arg is_iso (show _ ≫ pullback.fst = _,
from limit.iso_limit_cone_hom_π ⟨_,pullback_is_pullback_of_comp_mono f (𝟙 _) i⟩
walking_cospan.left)).mp infer_instance;
exact (category.id_comp _).symm
end
end pullback_right_iso
section pushout_left_iso
open walking_span
/-- The pushout of `f, g` is also the pullback of `h ≫ f, h ≫ g` for any epi `h`. -/
noncomputable
def pushout_is_pushout_of_epi_comp (f : X ⟶ Y) (g : X ⟶ Z) (h : W ⟶ X)
[epi h] [has_pushout f g] :
is_colimit (pushout_cocone.mk pushout.inl pushout.inr _) :=
pushout_cocone.is_colimit_of_epi_comp f g h _ (colimit.is_colimit (span f g))
instance has_pushout_of_epi_comp (f : X ⟶ Y) (g : X ⟶ Z) (h : W ⟶ X)
[epi h] [has_pushout f g] : has_pushout (h ≫ f) (h ≫ g) :=
⟨⟨⟨_,pushout_is_pushout_of_epi_comp f g h⟩⟩⟩
variables (f : X ⟶ Y) (g : X ⟶ Z) [is_iso f]
/-- If `f : X ⟶ Y` is iso, then `Y ⨿[X] Z ≅ Z`. This is the explicit colimit cocone. -/
def pushout_cocone_of_left_iso : pushout_cocone f g :=
pushout_cocone.mk (inv f ≫ g) (𝟙 _) $ by simp
@[simp] lemma pushout_cocone_of_left_iso_X :
(pushout_cocone_of_left_iso f g).X = Z := rfl
@[simp] lemma pushout_cocone_of_left_iso_inl :
(pushout_cocone_of_left_iso f g).inl = inv f ≫ g := rfl
@[simp] lemma pushout_cocone_of_left_iso_inr :
(pushout_cocone_of_left_iso f g).inr = 𝟙 _ := rfl
@[simp] lemma pushout_cocone_of_left_iso_ι_app_none :
(pushout_cocone_of_left_iso f g).ι.app none = g := by { delta pushout_cocone_of_left_iso, simp }
@[simp] lemma pushout_cocone_of_left_iso_ι_app_left :
(pushout_cocone_of_left_iso f g).ι.app left = inv f ≫ g := rfl
@[simp] lemma pushout_cocone_of_left_iso_ι_app_right :
(pushout_cocone_of_left_iso f g).ι.app right = 𝟙 _ := rfl
/-- Verify that the constructed cocone is indeed a colimit. -/
def pushout_cocone_of_left_iso_is_limit :
is_colimit (pushout_cocone_of_left_iso f g) :=
pushout_cocone.is_colimit_aux' _ (λ s, ⟨s.inr, by simp [← s.condition]⟩)
lemma has_pushout_of_left_iso : has_pushout f g :=
⟨⟨⟨_, pushout_cocone_of_left_iso_is_limit f g⟩⟩⟩
local attribute [instance] has_pushout_of_left_iso
instance pushout_inr_iso_of_left_iso : is_iso (pushout.inr : _ ⟶ pushout f g) :=
begin
refine ⟨⟨pushout.desc (inv f ≫ g) (𝟙 _) (by simp), (by simp), _⟩⟩,
ext,
{ simp [← pushout.condition] },
{ simp [pushout.condition_assoc] },
end
variables (h : W ⟶ X) [epi h]
instance has_pushout_of_right_factors_epi (f : X ⟶ Y) : has_pushout h (h ≫ f) :=
by { conv { congr, rw ←category.comp_id h, }, apply_instance }
instance pushout_inr_iso_of_right_factors_epi (f : X ⟶ Y) :
is_iso (pushout.inr : _ ⟶ pushout h (h ≫ f)) :=
begin
convert (congr_arg is_iso (show pushout.inr ≫ _ = _,
from colimit.iso_colimit_cocone_ι_inv ⟨_, pushout_is_pushout_of_epi_comp (𝟙 _) f h⟩
walking_span.right)).mp infer_instance;
exact (category.comp_id _).symm
end
end pushout_left_iso
section pushout_right_iso
open walking_span
variables (f : X ⟶ Y) (g : X ⟶ Z) [is_iso g]
/-- If `f : X ⟶ Z` is iso, then `Y ⨿[X] Z ≅ Y`. This is the explicit colimit cocone. -/
def pushout_cocone_of_right_iso : pushout_cocone f g :=
pushout_cocone.mk (𝟙 _) (inv g ≫ f) $ by simp
@[simp] lemma pushout_cocone_of_right_iso_X :
(pushout_cocone_of_right_iso f g).X = Y := rfl
@[simp] lemma pushout_cocone_of_right_iso_inl :
(pushout_cocone_of_right_iso f g).inl = 𝟙 _ := rfl
@[simp] lemma pushout_cocone_of_right_iso_inr :
(pushout_cocone_of_right_iso f g).inr = inv g ≫ f := rfl
@[simp] lemma pushout_cocone_of_right_iso_ι_app_none :
(pushout_cocone_of_right_iso f g).ι.app none = f := by { delta pushout_cocone_of_right_iso, simp }
@[simp] lemma pushout_cocone_of_right_iso_ι_app_left :
(pushout_cocone_of_right_iso f g).ι.app left = 𝟙 _ := rfl
@[simp] lemma pushout_cocone_of_right_iso_ι_app_right :
(pushout_cocone_of_right_iso f g).ι.app right = inv g ≫ f := rfl
/-- Verify that the constructed cocone is indeed a colimit. -/
def pushout_cocone_of_right_iso_is_limit :
is_colimit (pushout_cocone_of_right_iso f g) :=
pushout_cocone.is_colimit_aux' _ (λ s, ⟨s.inl, by simp [←s.condition]⟩)
lemma has_pushout_of_right_iso : has_pushout f g :=
⟨⟨⟨_, pushout_cocone_of_right_iso_is_limit f g⟩⟩⟩
local attribute [instance] has_pushout_of_right_iso
instance pushout_inl_iso_of_right_iso : is_iso (pushout.inl : _ ⟶ pushout f g) :=
begin
refine ⟨⟨pushout.desc (𝟙 _) (inv g ≫ f) (by simp), (by simp), _⟩⟩,
ext,
{ simp [←pushout.condition] },
{ simp [pushout.condition] },
end
variables (h : W ⟶ X) [epi h]
instance has_pushout_of_left_factors_epi (f : X ⟶ Y) : has_pushout (h ≫ f) h :=
by { conv { congr, skip, rw ←category.comp_id h, }, apply_instance }
instance pushout_inl_iso_of_left_factors_epi (f : X ⟶ Y) :
is_iso (pushout.inl : _ ⟶ pushout (h ≫ f) h) :=
begin
convert (congr_arg is_iso (show pushout.inl ≫ _ = _,
from colimit.iso_colimit_cocone_ι_inv ⟨_, pushout_is_pushout_of_epi_comp f (𝟙 _) h⟩
walking_span.left)).mp infer_instance;
exact (category.comp_id _).symm
end
end pushout_right_iso
section
open walking_cospan
variable (f : X ⟶ Y)
instance has_kernel_pair_of_mono [mono f] : has_pullback f f :=
⟨⟨⟨_, pullback_cone.is_limit_mk_id_id f⟩⟩⟩
lemma fst_eq_snd_of_mono_eq [mono f] : (pullback.fst : pullback f f ⟶ _) = pullback.snd :=
((pullback_cone.is_limit_mk_id_id f).fac (get_limit_cone (cospan f f)).cone left).symm.trans
((pullback_cone.is_limit_mk_id_id f).fac (get_limit_cone (cospan f f)).cone right : _)
@[simp] lemma pullback_symmetry_hom_of_mono_eq [mono f] :
(pullback_symmetry f f).hom = 𝟙 _ := by ext; simp [fst_eq_snd_of_mono_eq]
instance fst_iso_of_mono_eq [mono f] : is_iso (pullback.fst : pullback f f ⟶ _) :=
begin
refine ⟨⟨pullback.lift (𝟙 _) (𝟙 _) (by simp), _, by simp⟩⟩,
ext,
{ simp },
{ simp [fst_eq_snd_of_mono_eq] }
end
instance snd_iso_of_mono_eq [mono f] : is_iso (pullback.snd : pullback f f ⟶ _) :=
by { rw ← fst_eq_snd_of_mono_eq, apply_instance }
end
section
open walking_span
variable (f : X ⟶ Y)
instance has_cokernel_pair_of_epi [epi f] : has_pushout f f :=
⟨⟨⟨_, pushout_cocone.is_colimit_mk_id_id f⟩⟩⟩
lemma inl_eq_inr_of_epi_eq [epi f] : (pushout.inl : _ ⟶ pushout f f) = pushout.inr :=
((pushout_cocone.is_colimit_mk_id_id f).fac
(get_colimit_cocone (span f f)).cocone left).symm.trans
((pushout_cocone.is_colimit_mk_id_id f).fac
(get_colimit_cocone (span f f)).cocone right : _)
@[simp] lemma pullback_symmetry_hom_of_epi_eq [epi f] :
(pushout_symmetry f f).hom = 𝟙 _ := by ext; simp [inl_eq_inr_of_epi_eq]
instance inl_iso_of_epi_eq [epi f] : is_iso (pushout.inl : _ ⟶ pushout f f) :=
begin
refine ⟨⟨pushout.desc (𝟙 _) (𝟙 _) (by simp), by simp, _⟩⟩,
ext,
{ simp },
{ simp [inl_eq_inr_of_epi_eq] }
end
instance inr_iso_of_epi_eq [epi f] : is_iso (pushout.inr : _ ⟶ pushout f f) :=
by { rw ← inl_eq_inr_of_epi_eq, apply_instance }
end
section paste_lemma
variables {X₁ X₂ X₃ Y₁ Y₂ Y₃ : C} (f₁ : X₁ ⟶ X₂) (f₂ : X₂ ⟶ X₃) (g₁ : Y₁ ⟶ Y₂) (g₂ : Y₂ ⟶ Y₃)
variables (i₁ : X₁ ⟶ Y₁) (i₂ : X₂ ⟶ Y₂) (i₃ : X₃ ⟶ Y₃)
variables (h₁ : i₁ ≫ g₁ = f₁ ≫ i₂) (h₂ : i₂ ≫ g₂ = f₂ ≫ i₃)
/--
Given
X₁ - f₁ -> X₂ - f₂ -> X₃
| | |
i₁ i₂ i₃
∨ ∨ ∨
Y₁ - g₁ -> Y₂ - g₂ -> Y₃
Then the big square is a pullback if both the small squares are.
-/
def big_square_is_pullback (H : is_limit (pullback_cone.mk _ _ h₂))
(H' : is_limit (pullback_cone.mk _ _ h₁)) :
is_limit (pullback_cone.mk _ _ (show i₁ ≫ g₁ ≫ g₂ = (f₁ ≫ f₂) ≫ i₃,
by rw [← category.assoc, h₁, category.assoc, h₂, category.assoc])) :=
begin
fapply pullback_cone.is_limit_aux',
intro s,
have : (s.fst ≫ g₁) ≫ g₂ = s.snd ≫ i₃ := by rw [← s.condition, category.assoc],
rcases pullback_cone.is_limit.lift' H (s.fst ≫ g₁) s.snd this with ⟨l₁, hl₁, hl₁'⟩,
rcases pullback_cone.is_limit.lift' H' s.fst l₁ hl₁.symm with ⟨l₂, hl₂, hl₂'⟩,
use l₂,
use hl₂,
use show l₂ ≫ f₁ ≫ f₂ = s.snd, by { rw [← hl₁', ← hl₂', category.assoc], refl },
intros m hm₁ hm₂,
apply pullback_cone.is_limit.hom_ext H',
{ erw [hm₁, hl₂] },
{ apply pullback_cone.is_limit.hom_ext H,
{ erw [category.assoc, ← h₁, ← category.assoc, hm₁, ← hl₂,
category.assoc, category.assoc, h₁], refl },
{ erw [category.assoc, hm₂, ← hl₁', ← hl₂'] } }
end
/--
Given
X₁ - f₁ -> X₂ - f₂ -> X₃
| | |
i₁ i₂ i₃
∨ ∨ ∨
Y₁ - g₁ -> Y₂ - g₂ -> Y₃
Then the big square is a pushout if both the small squares are.
-/
def big_square_is_pushout (H : is_colimit (pushout_cocone.mk _ _ h₂))
(H' : is_colimit (pushout_cocone.mk _ _ h₁)) :
is_colimit (pushout_cocone.mk _ _ (show i₁ ≫ g₁ ≫ g₂ = (f₁ ≫ f₂) ≫ i₃,
by rw [← category.assoc, h₁, category.assoc, h₂, category.assoc])) :=
begin
fapply pushout_cocone.is_colimit_aux',
intro s,
have : i₁ ≫ s.inl = f₁ ≫ (f₂ ≫ s.inr) := by rw [s.condition, category.assoc],
rcases pushout_cocone.is_colimit.desc' H' s.inl (f₂ ≫ s.inr) this with ⟨l₁, hl₁, hl₁'⟩,
rcases pushout_cocone.is_colimit.desc' H l₁ s.inr hl₁' with ⟨l₂, hl₂, hl₂'⟩,
use l₂,
use show (g₁ ≫ g₂) ≫ l₂ = s.inl, by { rw [← hl₁, ← hl₂, category.assoc], refl },
use hl₂',
intros m hm₁ hm₂,
apply pushout_cocone.is_colimit.hom_ext H,
{ apply pushout_cocone.is_colimit.hom_ext H',
{ erw [← category.assoc, hm₁, hl₂, hl₁] },
{ erw [← category.assoc, h₂, category.assoc, hm₂, ← hl₂',
← category.assoc, ← category.assoc, ← h₂], refl } },
{ erw [hm₂, hl₂'] }
end
/--
Given
X₁ - f₁ -> X₂ - f₂ -> X₃
| | |
i₁ i₂ i₃
∨ ∨ ∨
Y₁ - g₁ -> Y₂ - g₂ -> Y₃
Then the left square is a pullback if the right square and the big square are.
-/
def left_square_is_pullback (H : is_limit (pullback_cone.mk _ _ h₂))
(H' : is_limit (pullback_cone.mk _ _ (show i₁ ≫ g₁ ≫ g₂ = (f₁ ≫ f₂) ≫ i₃,
by rw [← category.assoc, h₁, category.assoc, h₂, category.assoc]))) :
is_limit (pullback_cone.mk _ _ h₁) :=
begin
fapply pullback_cone.is_limit_aux',
intro s,
have : s.fst ≫ g₁ ≫ g₂ = (s.snd ≫ f₂) ≫ i₃ :=
by { rw [← category.assoc, s.condition, category.assoc, category.assoc, h₂] },
rcases pullback_cone.is_limit.lift' H' s.fst (s.snd ≫ f₂) this with ⟨l₁, hl₁, hl₁'⟩,
use l₁,
use hl₁,
split,
{ apply pullback_cone.is_limit.hom_ext H,
{ erw [category.assoc, ← h₁, ← category.assoc, hl₁, s.condition], refl },
{ erw [category.assoc, hl₁'], refl } },
{ intros m hm₁ hm₂,
apply pullback_cone.is_limit.hom_ext H',
{ erw [hm₁, hl₁] },
{ erw [hl₁', ← hm₂], exact (category.assoc _ _ _).symm } }
end
/--
Given
X₁ - f₁ -> X₂ - f₂ -> X₃
| | |
i₁ i₂ i₃
∨ ∨ ∨
Y₁ - g₁ -> Y₂ - g₂ -> Y₃
Then the right square is a pushout if the left square and the big square are.
-/
def right_square_is_pushout (H : is_colimit (pushout_cocone.mk _ _ h₁))
(H' : is_colimit (pushout_cocone.mk _ _ (show i₁ ≫ g₁ ≫ g₂ = (f₁ ≫ f₂) ≫ i₃,
by rw [← category.assoc, h₁, category.assoc, h₂, category.assoc]))) :
is_colimit (pushout_cocone.mk _ _ h₂) :=
begin
fapply pushout_cocone.is_colimit_aux',
intro s,
have : i₁ ≫ g₁ ≫ s.inl = (f₁ ≫ f₂) ≫ s.inr :=
by { rw [category.assoc, ← s.condition, ← category.assoc, ← category.assoc, h₁] },
rcases pushout_cocone.is_colimit.desc' H' (g₁ ≫ s.inl) s.inr this with ⟨l₁, hl₁, hl₁'⟩,
dsimp at *,
use l₁,
refine ⟨_,_,_⟩,
{ apply pushout_cocone.is_colimit.hom_ext H,
{ erw [← category.assoc, hl₁], refl },
{ erw [← category.assoc, h₂, category.assoc, hl₁', s.condition] } },
{ exact hl₁' },
{ intros m hm₁ hm₂,
apply pushout_cocone.is_colimit.hom_ext H',
{ erw [hl₁, category.assoc, hm₁] },
{ erw [hm₂, hl₁'] } }
end
end paste_lemma
section
variables (f : X ⟶ Z) (g : Y ⟶ Z) (f' : W ⟶ X)
variables [has_pullback f g] [has_pullback f' (pullback.fst : pullback f g ⟶ _)]
variables [has_pullback (f' ≫ f) g]
/-- The canonical isomorphism `W ×[X] (X ×[Z] Y) ≅ W ×[Z] Y` -/
noncomputable
def pullback_right_pullback_fst_iso :
pullback f' (pullback.fst : pullback f g ⟶ _) ≅ pullback (f' ≫ f) g :=
begin
let := big_square_is_pullback
(pullback.snd : pullback f' (pullback.fst : pullback f g ⟶ _) ⟶ _) pullback.snd
f' f pullback.fst pullback.fst g pullback.condition pullback.condition
(pullback_is_pullback _ _) (pullback_is_pullback _ _),
exact (this.cone_point_unique_up_to_iso (pullback_is_pullback _ _) : _)
end
@[simp, reassoc]
lemma pullback_right_pullback_fst_iso_hom_fst :
(pullback_right_pullback_fst_iso f g f').hom ≫ pullback.fst = pullback.fst :=
is_limit.cone_point_unique_up_to_iso_hom_comp _ _ walking_cospan.left
@[simp, reassoc]
lemma pullback_right_pullback_fst_iso_hom_snd :
(pullback_right_pullback_fst_iso f g f').hom ≫ pullback.snd = pullback.snd ≫ pullback.snd :=
is_limit.cone_point_unique_up_to_iso_hom_comp _ _ walking_cospan.right
@[simp, reassoc]
lemma pullback_right_pullback_fst_iso_inv_fst :
(pullback_right_pullback_fst_iso f g f').inv ≫ pullback.fst = pullback.fst :=
is_limit.cone_point_unique_up_to_iso_inv_comp _ _ walking_cospan.left
@[simp, reassoc]
lemma pullback_right_pullback_fst_iso_inv_snd_snd :
(pullback_right_pullback_fst_iso f g f').inv ≫ pullback.snd ≫ pullback.snd = pullback.snd :=
is_limit.cone_point_unique_up_to_iso_inv_comp _ _ walking_cospan.right
@[simp, reassoc]
lemma pullback_right_pullback_fst_iso_inv_snd_fst :
(pullback_right_pullback_fst_iso f g f').inv ≫ pullback.snd ≫ pullback.fst = pullback.fst ≫ f' :=
begin
rw ← pullback.condition,
exact pullback_right_pullback_fst_iso_inv_fst_assoc _ _ _ _
end
end
section
variables (f : X ⟶ Y) (g : X ⟶ Z) (g' : Z ⟶ W)
variables [has_pushout f g] [has_pushout (pushout.inr : _ ⟶ pushout f g) g']
variables [has_pushout f (g ≫ g')]
/-- The canonical isomorphism `(Y ⨿[X] Z) ⨿[Z] W ≅ Y ×[X] W` -/
noncomputable
def pushout_left_pushout_inr_iso :
pushout (pushout.inr : _ ⟶ pushout f g) g' ≅ pushout f (g ≫ g') :=
((big_square_is_pushout g g' _ _ f _ _ pushout.condition pushout.condition
(pushout_is_pushout _ _) (pushout_is_pushout _ _))
.cocone_point_unique_up_to_iso (pushout_is_pushout _ _) : _)
@[simp, reassoc]
lemma inl_pushout_left_pushout_inr_iso_inv :
pushout.inl ≫ (pushout_left_pushout_inr_iso f g g').inv = pushout.inl ≫ pushout.inl :=
((big_square_is_pushout g g' _ _ f _ _ pushout.condition pushout.condition
(pushout_is_pushout _ _) (pushout_is_pushout _ _))
.comp_cocone_point_unique_up_to_iso_inv (pushout_is_pushout _ _) walking_span.left : _)
@[simp, reassoc]
lemma inr_pushout_left_pushout_inr_iso_hom :
pushout.inr ≫ (pushout_left_pushout_inr_iso f g g').hom = pushout.inr :=
((big_square_is_pushout g g' _ _ f _ _ pushout.condition pushout.condition
(pushout_is_pushout _ _) (pushout_is_pushout _ _))
.comp_cocone_point_unique_up_to_iso_hom (pushout_is_pushout _ _) walking_span.right : _)
@[simp, reassoc]
lemma inr_pushout_left_pushout_inr_iso_inv :
pushout.inr ≫ (pushout_left_pushout_inr_iso f g g').inv = pushout.inr :=
by rw [iso.comp_inv_eq, inr_pushout_left_pushout_inr_iso_hom]
@[simp, reassoc]
lemma inl_inl_pushout_left_pushout_inr_iso_hom :
pushout.inl ≫ pushout.inl ≫ (pushout_left_pushout_inr_iso f g g').hom = pushout.inl :=
by rw [← category.assoc, ← iso.eq_comp_inv, inl_pushout_left_pushout_inr_iso_inv]
@[simp, reassoc]
lemma inr_inl_pushout_left_pushout_inr_iso_hom :
pushout.inr ≫ pushout.inl ≫ (pushout_left_pushout_inr_iso f g g').hom = g' ≫ pushout.inr :=
by rw [← category.assoc, ← iso.eq_comp_inv, category.assoc,
inr_pushout_left_pushout_inr_iso_inv, pushout.condition]
end
section pullback_assoc
/-
The objects and morphisms are as follows:
Z₂ - g₄ -> X₃
| |
g₃ f₄
∨ ∨
Z₁ - g₂ -> X₂ - f₃ -> Y₂
| |
g₁ f₂
∨ ∨
X₁ - f₁ -> Y₁
where the two squares are pullbacks.
We can then construct the pullback squares
W - l₂ -> Z₂ - g₄ -> X₃
| |
l₁ f₄
∨ ∨
Z₁ - g₂ -> X₂ - f₃ -> Y₂
and
W' - l₂' -> Z₂
| |
l₁' g₃
∨ ∨
Z₁ X₂
| |
g₁ f₂
∨ ∨
X₁ - f₁ -> Y₁
We will show that both `W` and `W'` are pullbacks over `g₁, g₂`, and thus we may construct a
canonical isomorphism between them. -/
variables {X₁ X₂ X₃ Y₁ Y₂ : C} (f₁ : X₁ ⟶ Y₁) (f₂ : X₂ ⟶ Y₁) (f₃ : X₂ ⟶ Y₂)
variables (f₄ : X₃ ⟶ Y₂) [has_pullback f₁ f₂] [has_pullback f₃ f₄]
include f₁ f₂ f₃ f₄
local notation `Z₁` := pullback f₁ f₂
local notation `Z₂` := pullback f₃ f₄
local notation `g₁` := (pullback.fst : Z₁ ⟶ X₁)
local notation `g₂` := (pullback.snd : Z₁ ⟶ X₂)
local notation `g₃` := (pullback.fst : Z₂ ⟶ X₂)
local notation `g₄` := (pullback.snd : Z₂ ⟶ X₃)
local notation `W` := pullback (g₂ ≫ f₃) f₄
local notation `W'` := pullback f₁ (g₃ ≫ f₂)
local notation `l₁` := (pullback.fst : W ⟶ Z₁)
local notation `l₂` := (pullback.lift (pullback.fst ≫ g₂) pullback.snd
((category.assoc _ _ _).trans pullback.condition) : W ⟶ Z₂)
local notation `l₁'`:= (pullback.lift pullback.fst (pullback.snd ≫ g₃)
(pullback.condition.trans (category.assoc _ _ _).symm) : W' ⟶ Z₁)
local notation `l₂'`:= (pullback.snd : W' ⟶ Z₂)
/-- `(X₁ ×[Y₁] X₂) ×[Y₂] X₃` is the pullback `(X₁ ×[Y₁] X₂) ×[X₂] (X₂ ×[Y₂] X₃)`. -/
def pullback_pullback_left_is_pullback [has_pullback (g₂ ≫ f₃) f₄] :
is_limit (pullback_cone.mk l₁ l₂ (show l₁ ≫ g₂ = l₂ ≫ g₃, from (pullback.lift_fst _ _ _).symm)) :=
begin
apply left_square_is_pullback,
exact pullback_is_pullback f₃ f₄,
convert pullback_is_pullback (g₂ ≫ f₃) f₄,
rw pullback.lift_snd
end
/-- `(X₁ ×[Y₁] X₂) ×[Y₂] X₃` is the pullback `X₁ ×[Y₁] (X₂ ×[Y₂] X₃)`. -/
def pullback_assoc_is_pullback [has_pullback (g₂ ≫ f₃) f₄] :
is_limit (pullback_cone.mk (l₁ ≫ g₁) l₂ (show (l₁ ≫ g₁) ≫ f₁ = l₂ ≫ (g₃ ≫ f₂),
by rw [pullback.lift_fst_assoc, category.assoc, category.assoc, pullback.condition])) :=
begin
apply pullback_cone.flip_is_limit,
apply big_square_is_pullback,
{ apply pullback_cone.flip_is_limit,
exact pullback_is_pullback f₁ f₂ },
{ apply pullback_cone.flip_is_limit,
apply pullback_pullback_left_is_pullback },
{ exact pullback.lift_fst _ _ _ },
{ exact pullback.condition.symm }
end
/-- `X₁ ×[Y₁] (X₂ ×[Y₂] X₃)` is the pullback `(X₁ ×[Y₁] X₂) ×[X₂] (X₂ ×[Y₂] X₃)`. -/
def pullback_pullback_right_is_pullback [has_pullback f₁ (g₃ ≫ f₂)] :
is_limit (pullback_cone.mk l₁' l₂' (show l₁' ≫ g₂ = l₂' ≫ g₃, from pullback.lift_snd _ _ _)) :=
begin
apply pullback_cone.flip_is_limit,
apply left_square_is_pullback,
{ apply pullback_cone.flip_is_limit,
exact pullback_is_pullback f₁ f₂ },
{ apply pullback_cone.flip_is_limit,
convert pullback_is_pullback f₁ (g₃ ≫ f₂),
rw pullback.lift_fst },
{ exact pullback.condition.symm }
end
/-- `X₁ ×[Y₁] (X₂ ×[Y₂] X₃)` is the pullback `(X₁ ×[Y₁] X₂) ×[Y₂] X₃`. -/
def pullback_assoc_symm_is_pullback [has_pullback f₁ (g₃ ≫ f₂)] :
is_limit (pullback_cone.mk l₁' (l₂' ≫ g₄) (show l₁' ≫ (g₂ ≫ f₃) = (l₂' ≫ g₄) ≫ f₄,
by rw [pullback.lift_snd_assoc, category.assoc, category.assoc, pullback.condition])) :=
begin
apply big_square_is_pullback,
exact pullback_is_pullback f₃ f₄,
apply pullback_pullback_right_is_pullback
end
lemma has_pullback_assoc_symm [has_pullback f₁ (g₃ ≫ f₂)] :
has_pullback (g₂ ≫ f₃) f₄ :=
⟨⟨⟨_, pullback_assoc_symm_is_pullback f₁ f₂ f₃ f₄⟩⟩⟩
variables [has_pullback (g₂ ≫ f₃) f₄] [has_pullback f₁ (g₃ ≫ f₂)]
/-- The canonical isomorphism `(X₁ ×[Y₁] X₂) ×[Y₂] X₃ ≅ X₁ ×[Y₁] (X₂ ×[Y₂] X₃)`. -/
noncomputable
def pullback_assoc :
pullback (pullback.snd ≫ f₃ : pullback f₁ f₂ ⟶ _) f₄ ≅
pullback f₁ (pullback.fst ≫ f₂ : pullback f₃ f₄ ⟶ _) :=
(pullback_pullback_left_is_pullback f₁ f₂ f₃ f₄).cone_point_unique_up_to_iso
(pullback_pullback_right_is_pullback f₁ f₂ f₃ f₄)
@[simp, reassoc]
lemma pullback_assoc_inv_fst_fst :
(pullback_assoc f₁ f₂ f₃ f₄).inv ≫ pullback.fst ≫ pullback.fst = pullback.fst :=
begin
transitivity l₁' ≫ pullback.fst,
rw ← category.assoc,
congr' 1,
exact is_limit.cone_point_unique_up_to_iso_inv_comp _ _ walking_cospan.left,
exact pullback.lift_fst _ _ _,
end
@[simp, reassoc]
lemma pullback_assoc_hom_fst :
(pullback_assoc f₁ f₂ f₃ f₄).hom ≫ pullback.fst = pullback.fst ≫ pullback.fst :=
by rw [← iso.eq_inv_comp, pullback_assoc_inv_fst_fst]
@[simp, reassoc]
lemma pullback_assoc_hom_snd_fst :
(pullback_assoc f₁ f₂ f₃ f₄).hom ≫ pullback.snd ≫ pullback.fst = pullback.fst ≫ pullback.snd :=
begin
transitivity l₂ ≫ pullback.fst,
rw ← category.assoc,
congr' 1,
exact is_limit.cone_point_unique_up_to_iso_hom_comp _ _ walking_cospan.right,
exact pullback.lift_fst _ _ _,
end
@[simp, reassoc]
lemma pullback_assoc_hom_snd_snd :
(pullback_assoc f₁ f₂ f₃ f₄).hom ≫ pullback.snd ≫ pullback.snd = pullback.snd :=
begin
transitivity l₂ ≫ pullback.snd,
rw ← category.assoc,
congr' 1,
exact is_limit.cone_point_unique_up_to_iso_hom_comp _ _ walking_cospan.right,
exact pullback.lift_snd _ _ _,
end
@[simp, reassoc]
lemma pullback_assoc_inv_fst_snd :
(pullback_assoc f₁ f₂ f₃ f₄).inv ≫ pullback.fst ≫ pullback.snd = pullback.snd ≫ pullback.fst :=
by rw [iso.inv_comp_eq, pullback_assoc_hom_snd_fst]
@[simp, reassoc]
lemma pullback_assoc_inv_snd :
(pullback_assoc f₁ f₂ f₃ f₄).inv ≫ pullback.snd = pullback.snd ≫ pullback.snd :=
by rw [iso.inv_comp_eq, pullback_assoc_hom_snd_snd]
end pullback_assoc
section pushout_assoc
/-
The objects and morphisms are as follows:
Z₂ - g₄ -> X₃
| |
g₃ f₄
∨ ∨
Z₁ - g₂ -> X₂ - f₃ -> Y₂
| |
g₁ f₂
∨ ∨
X₁ - f₁ -> Y₁
where the two squares are pushouts.
We can then construct the pushout squares
Z₁ - g₂ -> X₂ - f₃ -> Y₂
| |
g₁ l₂
∨ ∨
X₁ - f₁ -> Y₁ - l₁ -> W
and
Z₂ - g₄ -> X₃
| |
g₃ f₄
∨ ∨
X₂ Y₂
| |
f₂ l₂'
∨ ∨
Y₁ - l₁' -> W'
We will show that both `W` and `W'` are pushouts over `f₂, f₃`, and thus we may construct a
canonical isomorphism between them. -/
variables {X₁ X₂ X₃ Z₁ Z₂ : C} (g₁ : Z₁ ⟶ X₁) (g₂ : Z₁ ⟶ X₂) (g₃ : Z₂ ⟶ X₂)
variables (g₄ : Z₂ ⟶ X₃) [has_pushout g₁ g₂] [has_pushout g₃ g₄]
include g₁ g₂ g₃ g₄
local notation `Y₁` := pushout g₁ g₂
local notation `Y₂` := pushout g₃ g₄
local notation `f₁` := (pushout.inl : X₁ ⟶ Y₁)
local notation `f₂` := (pushout.inr : X₂ ⟶ Y₁)
local notation `f₃` := (pushout.inl : X₂ ⟶ Y₂)
local notation `f₄` := (pushout.inr : X₃ ⟶ Y₂)
local notation `W` := pushout g₁ (g₂ ≫ f₃)
local notation `W'` := pushout (g₃ ≫ f₂) g₄
local notation `l₁` := (pushout.desc pushout.inl (f₃ ≫ pushout.inr)
(pushout.condition.trans (category.assoc _ _ _)) : Y₁ ⟶ W)
local notation `l₂` := (pushout.inr : Y₂ ⟶ W)
local notation `l₁'`:= (pushout.inl : Y₁ ⟶ W')
local notation `l₂'`:= (pushout.desc (f₂ ≫ pushout.inl) pushout.inr
((category.assoc _ _ _).symm.trans pushout.condition) : Y₂ ⟶ W')
/-- `(X₁ ⨿[Z₁] X₂) ⨿[Z₂] X₃` is the pushout `(X₁ ⨿[Z₁] X₂) ×[X₂] (X₂ ⨿[Z₂] X₃)`. -/
def pushout_pushout_left_is_pushout [has_pushout (g₃ ≫ f₂) g₄] :
is_colimit (pushout_cocone.mk l₁' l₂'
(show f₂ ≫ l₁' = f₃ ≫ l₂', from (pushout.inl_desc _ _ _).symm)) :=
begin
apply pushout_cocone.flip_is_colimit,
apply right_square_is_pushout,
{ apply pushout_cocone.flip_is_colimit,
exact pushout_is_pushout _ _ },
{ apply pushout_cocone.flip_is_colimit,
convert pushout_is_pushout (g₃ ≫ f₂) g₄,
exact pushout.inr_desc _ _ _ },
{ exact pushout.condition.symm }
end
/-- `(X₁ ⨿[Z₁] X₂) ⨿[Z₂] X₃` is the pushout `X₁ ⨿[Z₁] (X₂ ⨿[Z₂] X₃)`. -/
def pushout_assoc_is_pushout [has_pushout (g₃ ≫ f₂) g₄] :
is_colimit (pushout_cocone.mk (f₁ ≫ l₁') l₂' (show g₁ ≫ (f₁ ≫ l₁') = (g₂ ≫ f₃) ≫ l₂',
by rw [category.assoc, pushout.inl_desc, pushout.condition_assoc])) :=
begin
apply big_square_is_pushout,
{ apply pushout_pushout_left_is_pushout },
{ exact pushout_is_pushout _ _ }
end
lemma has_pushout_assoc [has_pushout (g₃ ≫ f₂) g₄] :
has_pushout g₁ (g₂ ≫ f₃) :=
⟨⟨⟨_, pushout_assoc_is_pushout g₁ g₂ g₃ g₄⟩⟩⟩
/-- `X₁ ⨿[Z₁] (X₂ ⨿[Z₂] X₃)` is the pushout `(X₁ ⨿[Z₁] X₂) ×[X₂] (X₂ ⨿[Z₂] X₃)`. -/
def pushout_pushout_right_is_pushout [has_pushout g₁ (g₂ ≫ f₃)] :
is_colimit (pushout_cocone.mk l₁ l₂ (show f₂ ≫ l₁ = f₃ ≫ l₂, from pushout.inr_desc _ _ _)) :=
begin
apply right_square_is_pushout,
{ exact pushout_is_pushout _ _ },
{ convert pushout_is_pushout g₁ (g₂ ≫ f₃),
rw pushout.inl_desc }
end
/-- `X₁ ⨿[Z₁] (X₂ ⨿[Z₂] X₃)` is the pushout `(X₁ ⨿[Z₁] X₂) ⨿[Z₂] X₃`. -/
def pushout_assoc_symm_is_pushout [has_pushout g₁ (g₂ ≫ f₃)] :
is_colimit (pushout_cocone.mk l₁ (f₄ ≫ l₂) ((show (g₃ ≫ f₂) ≫ l₁ = g₄ ≫ (f₄ ≫ l₂),
by rw [category.assoc, pushout.inr_desc, pushout.condition_assoc]))) :=
begin
apply pushout_cocone.flip_is_colimit,
apply big_square_is_pushout,
{ apply pushout_cocone.flip_is_colimit,
apply pushout_pushout_right_is_pushout },
{ apply pushout_cocone.flip_is_colimit,
exact pushout_is_pushout _ _ },
{ exact pushout.condition.symm },
{ exact (pushout.inr_desc _ _ _).symm }
end
lemma has_pushout_assoc_symm [has_pushout g₁ (g₂ ≫ f₃)] :
has_pushout (g₃ ≫ f₂) g₄ :=
⟨⟨⟨_, pushout_assoc_symm_is_pushout g₁ g₂ g₃ g₄⟩⟩⟩
variables [has_pushout (g₃ ≫ f₂) g₄] [has_pushout g₁ (g₂ ≫ f₃)]
/-- The canonical isomorphism `(X₁ ⨿[Z₁] X₂) ⨿[Z₂] X₃ ≅ X₁ ⨿[Z₁] (X₂ ⨿[Z₂] X₃)`. -/
noncomputable
def pushout_assoc :
pushout (g₃ ≫ pushout.inr : _ ⟶ pushout g₁ g₂) g₄ ≅
pushout g₁ (g₂ ≫ pushout.inl : _ ⟶ pushout g₃ g₄) :=
(pushout_pushout_left_is_pushout g₁ g₂ g₃ g₄).cocone_point_unique_up_to_iso
(pushout_pushout_right_is_pushout g₁ g₂ g₃ g₄)
@[simp, reassoc]
lemma inl_inl_pushout_assoc_hom :
pushout.inl ≫ pushout.inl ≫ (pushout_assoc g₁ g₂ g₃ g₄).hom = pushout.inl :=
begin
transitivity f₁ ≫ l₁,
{ congr' 1,
exact (pushout_pushout_left_is_pushout g₁ g₂ g₃ g₄)
.comp_cocone_point_unique_up_to_iso_hom _ walking_cospan.left },
{ exact pushout.inl_desc _ _ _ }
end
@[simp, reassoc]
lemma inr_inl_pushout_assoc_hom :
pushout.inr ≫ pushout.inl ≫ (pushout_assoc g₁ g₂ g₃ g₄).hom = pushout.inl ≫ pushout.inr :=
begin
transitivity f₂ ≫ l₁,
{ congr' 1,
exact (pushout_pushout_left_is_pushout g₁ g₂ g₃ g₄)
.comp_cocone_point_unique_up_to_iso_hom _ walking_cospan.left },
{ exact pushout.inr_desc _ _ _ }
end
@[simp, reassoc]
lemma inr_inr_pushout_assoc_inv :
pushout.inr ≫ pushout.inr ≫ (pushout_assoc g₁ g₂ g₃ g₄).inv = pushout.inr :=
begin
transitivity f₄ ≫ l₂',
{ congr' 1,
exact (pushout_pushout_left_is_pushout g₁ g₂ g₃ g₄).comp_cocone_point_unique_up_to_iso_inv
(pushout_pushout_right_is_pushout g₁ g₂ g₃ g₄) walking_cospan.right },
{ exact pushout.inr_desc _ _ _ }
end
@[simp, reassoc]
lemma inl_pushout_assoc_inv :
pushout.inl ≫ (pushout_assoc g₁ g₂ g₃ g₄).inv = pushout.inl ≫ pushout.inl :=
by rw [iso.comp_inv_eq, category.assoc, inl_inl_pushout_assoc_hom]
@[simp, reassoc]
lemma inl_inr_pushout_assoc_inv :
pushout.inl ≫ pushout.inr ≫ (pushout_assoc g₁ g₂ g₃ g₄).inv = pushout.inr ≫ pushout.inl :=
by rw [← category.assoc, iso.comp_inv_eq, category.assoc, inr_inl_pushout_assoc_hom]
@[simp, reassoc]
lemma inr_pushout_assoc_hom :
pushout.inr ≫ (pushout_assoc g₁ g₂ g₃ g₄).hom = pushout.inr ≫ pushout.inr :=
by rw [← iso.eq_comp_inv, category.assoc, inr_inr_pushout_assoc_inv]
end pushout_assoc
variables (C)
/--
`has_pullbacks` represents a choice of pullback for every pair of morphisms
See <https://stacks.math.columbia.edu/tag/001W>
-/
abbreviation has_pullbacks := has_limits_of_shape walking_cospan C
/-- `has_pushouts` represents a choice of pushout for every pair of morphisms -/
abbreviation has_pushouts := has_colimits_of_shape walking_span C
/-- If `C` has all limits of diagrams `cospan f g`, then it has all pullbacks -/
lemma has_pullbacks_of_has_limit_cospan
[Π {X Y Z : C} {f : X ⟶ Z} {g : Y ⟶ Z}, has_limit (cospan f g)] :
has_pullbacks C :=
{ has_limit := λ F, has_limit_of_iso (diagram_iso_cospan F).symm }
/-- If `C` has all colimits of diagrams `span f g`, then it has all pushouts -/
lemma has_pushouts_of_has_colimit_span
[Π {X Y Z : C} {f : X ⟶ Y} {g : X ⟶ Z}, has_colimit (span f g)] :
has_pushouts C :=
{ has_colimit := λ F, has_colimit_of_iso (diagram_iso_span F) }
/-- The duality equivalence `walking_spanᵒᵖ ≌ walking_cospan` -/
@[simps]
def walking_span_op_equiv : walking_spanᵒᵖ ≌ walking_cospan :=
wide_pushout_shape_op_equiv _
/-- The duality equivalence `walking_cospanᵒᵖ ≌ walking_span` -/
@[simps]
def walking_cospan_op_equiv : walking_cospanᵒᵖ ≌ walking_span :=
wide_pullback_shape_op_equiv _
/-- Having wide pullback at any universe level implies having binary pullbacks. -/
@[priority 100] -- see Note [lower instance priority]
instance has_pullbacks_of_has_wide_pullbacks [has_wide_pullbacks.{w} C] : has_pullbacks C :=
begin
haveI := has_wide_pullbacks_shrink.{0 w} C,
apply_instance
end
variable {C}
/-- Given a morphism `f : X ⟶ Y`, we can take morphisms over `Y` to morphisms over `X` via
pullbacks. This is right adjoint to `over.map` (TODO) -/
@[simps obj_left obj_hom map_left {rhs_md := semireducible, simp_rhs := tt}]
def base_change [has_pullbacks C] {X Y : C} (f : X ⟶ Y) : over Y ⥤ over X :=
{ obj := λ g, over.mk (pullback.snd : pullback g.hom f ⟶ _),
map := λ g₁ g₂ i, over.hom_mk (pullback.map _ _ _ _ i.left (𝟙 _) (𝟙 _) (by simp) (by simp))
(by simp) }
end category_theory.limits
|
{"author": "leanprover-community", "repo": "mathlib", "sha": "5e526d18cea33550268dcbbddcb822d5cde40654", "save_path": "github-repos/lean/leanprover-community-mathlib", "path": "github-repos/lean/leanprover-community-mathlib/mathlib-5e526d18cea33550268dcbbddcb822d5cde40654/src/category_theory/limits/shapes/pullbacks.lean"}
|
#pragma once
#ifndef TRANSFORM_HPP
#define TRANSFORM_HPP
#include <stdio.h>
#include <armadillo>
class Transform
{
public:
Transform();
arma::dmat T(double tx, double ty, double tz);
arma::dmat S(double sx, double sy, double sz);
arma::dmat R(double ax, double ay, double az, double angle);
};
#endif // FACE_HPP
|
{"hexsha": "0ddd0a494783ff37a400bb168b323c224a94166e", "size": 333, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "proyecto/Transform.hpp", "max_stars_repo_name": "Pedejeca135/GRAFICACION_UASLP", "max_stars_repo_head_hexsha": "51674129cc3a853450509acc7e8c579bb167da11", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "proyecto/Transform.hpp", "max_issues_repo_name": "Pedejeca135/GRAFICACION_UASLP", "max_issues_repo_head_hexsha": "51674129cc3a853450509acc7e8c579bb167da11", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "proyecto/Transform.hpp", "max_forks_repo_name": "Pedejeca135/GRAFICACION_UASLP", "max_forks_repo_head_hexsha": "51674129cc3a853450509acc7e8c579bb167da11", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 20.8125, "max_line_length": 62, "alphanum_fraction": 0.6876876877, "num_tokens": 85}
|
import json
import os
from random import choice
import jams
import librosa
import numpy as np
import scipy
from massage import SF_PATH, ACOUSTIC_SF_MFCC
def compute_avg_mfcc(fpath=None, y=None, sr=None):
""" Compute the average mfcc of a signal y
Parameters
----------
fpath : str default=None
path to an audio file.
y : ndarray default=None
array containing a mono audio signal
sr : float
sample rate
Returns
-------
avg_mfcc : ndarray
a vector containing the average mfccs over time. discarding the
first bin.
"""
if fpath is not None:
y, sr = librosa.load(fpath, mono=True)
mfccs = librosa.feature.mfcc(y=y, sr=sr, n_mfcc=40)
avg_mfcc = np.mean(mfccs, axis=1)[1:]
return avg_mfcc
def onset_offset(y=None, sr=None, hop_length=512,
feature=librosa.feature.melspectrogram):
""" given the a signal, compute the onsets and offsets based on feature
Parameters
----------
y : ndarray, default=None
mono signal
sr : float, default=None
sample rate
hop_length : int, default=512
hop_length associated with feature computation
feature: function, default=librosa.feature.melspectrogram
feature function to be used in computing onsets.
Returns
-------
onsets_t : ndarray
array of times indicating the detected onsets
offsets_t : ndarray
array of times indicating the detected offsets
onset_strength : ndarray
strenth of the onsets
"""
onsets = librosa.onset.onset_detect(y=y, sr=sr, hop_length=hop_length)
X = feature(y=y, sr=sr, hop_length=hop_length)
boundaries = librosa.segment.subsegment(X, onsets, n_segments=2)
# Throw out everything before the first onset
boundaries = boundaries[boundaries > np.min(onsets)]
offsets = np.setdiff1d(boundaries, onsets)
onset_strength = librosa.onset.onset_strength(S=X)
onsets_t = librosa.core.frames_to_time(
onsets, hop_length=hop_length, sr=sr)
offsets_t = librosa.core.frames_to_time(
offsets, hop_length=hop_length, sr=sr)
return onsets_t, offsets_t, onset_strength[onsets]
def compute_envelope(y_input, thresh=0.01, lpf_cutoff=0.03, alpha=20.0,
win_length=4096, theta=0.15):
""" compute envelope of a single channel signal
Parameters
----------
y_input : ndarray
mono signal
thresh : float default=0.01
threshold for low energy
lpf_cutoff : float default=0.01
smoothing filter on y, in radians
alpha : float default=20.0
Controls the steepness of the envelope via sigmoid
higher alpha gives a steeper envelope transition
lower alpha gives a more gradual envelop transition
win_length : int default=4096
window size for doing stft analysis
theta : float default=0.15
bias on the smoothed signal in the context of logistic function
higher theta reduces envelope activation sensitivity
lower theta increases envelope activation sensitivity
Returns
-------
y_env : ndarray
a vector specifying the amplitude envelope
"""
S = librosa.stft(
y_input, n_fft=win_length, hop_length=win_length,
win_length=win_length)
S_samples = librosa.core.frames_to_samples(
range(len(S[0])), hop_length=win_length)
y_smooth = np.mean(np.abs(S), axis=0)
# normalization (to overall energy)
if np.max(np.abs(y_smooth)) > 0:
y_smooth = y_smooth / np.max(np.abs(y_smooth))
# binary thresholding for low overall energy events
y_smooth[y_smooth < thresh] = 0
# LP filter
b_coeff, a_coeff = scipy.signal.butter(2, lpf_cutoff, 'low')
y_smooth = scipy.signal.filtfilt(b_coeff, a_coeff, y_smooth)
# logistic function to semi-binarize the output; confidence value
y_conf = 1.0 - (1.0 / (1.0 + np.exp(np.dot(alpha, (y_smooth - theta)))))
energy_interpolator = scipy.interpolate.interp1d(
S_samples, y_conf, bounds_error=False, fill_value='extrapolate')
y_env = energy_interpolator(np.arange(len(y_input)))
return y_env
def get_energy_envelope(y):
""" Generate the energy envelope per channel of a signal y
Parameters
----------
y: ndarray
the audio signal in questions. y.shape = num_ch * samples
Returns
-------
energy_array: ndarray
energy envelope of the audio signal
"""
energy_list = []
# treat mono and multi-ch separately
max_amplitudes = [np.max(np.abs(y))] if y.ndim == 1 else np.max(np.abs(
y), 1)
num_ch = 1 if y.ndim == 1 else y.shape[0]
for ch in range(num_ch):
try:
energy_ch = compute_envelope(y[ch, :])
except IndexError: #mono input
energy_ch = compute_envelope(y)
if np.max(energy_ch) > 0.0:
energy_ch = max_amplitudes[ch] * energy_ch / np.max(energy_ch)
energy_list.append(energy_ch)
energy_array = np.array(energy_list)
return energy_array
def pick_sf(y, sr, instrument):
""" based on the audio signal, pick the closest soundfont based on
average mfcc.
Parameters
----------
y : ndarray of float
audio signal, y.shape = num_ch * samples
sr : float
sampling rate of the audio
instrument : str
label of the instrument family. This is supplied to pick which group
of mfccs this algorithm will look in. Must be one of the following:
-'acoustic guitar'
Returns
-------
soundfont_path : str
the path to the sf2 file
program : int
the MIDI program number to use for this specific soundfont
"""
y_mono = librosa.core.to_mono(y)
avg_mfcc = compute_avg_mfcc(y=y_mono, sr=sr)
if 'acoustic guitar' in instrument:
sf_mfcc = ACOUSTIC_SF_MFCC
else:
raise ValueError("invalid instrument {}".format(instrument))
# sound font Matching
z_avg = np.mean(sf_mfcc['matrix'], axis=0)
z_std = np.std(sf_mfcc['matrix'], axis=0)
z_mat = (sf_mfcc['matrix'] - z_avg) / z_std
z_current = (avg_mfcc - z_avg) / z_std
mfcc_diff = np.linalg.norm(z_mat - z_current, axis=1)
min_diff_idx = np.argmin(mfcc_diff)
program = sf_mfcc['programs'][min_diff_idx]
soundfont_name = sf_mfcc['soundfonts'][min_diff_idx]
soundfont_path = os.path.join(SF_PATH, "{}.sf2".format(soundfont_name))
return soundfont_path, program
def amplitude_to_velocity(energies, max_velo=120, min_velo=60):
""" Map amplitude values to sensible velocities
Parameters
----------
energies : ndarray of float
an array of energy values
max_velo : int
maximun allowed midi velocity value
min_velo : int
min allowed midi velocity value
Returns
-------
velocities : ndarray of int
an array of velocities in the range [60,120]
The range is chosen to be restricted on purpose
"""
v_range = float(max_velo - min_velo)
v_mean = np.mean([max_velo, min_velo])
velocities = v_range / 2.0 * (energies / np.max(np.abs(energies))) + v_mean
velocities = np.round(velocities).astype(int)
return velocities
def midi_to_jams(midi_data):
""" Turn PrettyMIDI objects into JAMS objects. Notes only.
Velocities are mapped to confidence, [0,127] -> [0.0,1.0]
Each instrument is mapped to a new annotation.
Parameters
----------
midi_data : PrettyMIDI
MIDI data with notes.
Returns
-------
jam : JAMS
JAMS objects with notes saved as annotation
"""
# Get all the note events from the first instrument
jam = jams.JAMS()
for inst_idx in range(len(midi_data.instruments)):
solo_inst = midi_data.instruments[inst_idx]
pm_notes = solo_inst.notes # assuming midi file is single insturment
inst_dur = solo_inst.get_end_time()
if jam.file_metadata.duration is None:
jam.file_metadata.duration = 0
if jam.file_metadata.duration < inst_dur:
jam.file_metadata.duration = inst_dur
# Create annotation container for the notes.
jam_notes = jams.Annotation(
namespace='pitch_midi', time=0, duration=inst_dur)
for pm_note in pm_notes:
jam_notes.append(
time=pm_note.start, value=pm_note.pitch,
duration=pm_note.end - pm_note.start,
confidence=pm_note.velocity / 127.0)
# Associate the annotation with the container
jam.annotations.append(jam_notes)
return jam
def voicing_dist(previous_voicing, voicing_candidate):
""" Find the 'distance' between the previous voicing and the candidate.
Parameters
----------
previous_voicing : list of int
list elements should be MIDI values from 0 to 127, indicating
pitches in a chord voicing
voicing_candidate : list of int
list elements should be MIDI values from 0 to 127, indicating
pitches in a chord voicing
Returns
-------
dist : float
average of min distance between notes
"""
previous_voicing = np.array(previous_voicing)
voicing_candidate = np.array(voicing_candidate)
note_dists = np.zeros(len(previous_voicing))
for i, note in enumerate(previous_voicing):
can_dist = np.abs(note - voicing_candidate)
value = voicing_candidate[np.where(can_dist == can_dist.min())]
if len(value) > 1:
value = value[0]
note_dists[i] = np.abs(note - value)
return np.mean(note_dists)
def get_all_voicings(voicing_file):
""" Load chord voicings
Parameters
----------
voicing_file : str
path to json file of voicings
Returns
-------
voicing : dict
keys are chord names, vals are lists of voicings (lists).
Each voicing is a list of up to length 6 of midi note numbers.
All voicings that include notes lower than E2 are discarded.
"""
with open(voicing_file, 'r') as f_handle:
voicings = json.load(f_handle)
min_note = librosa.note_to_midi('E2')
for k in voicings.keys():
chords = []
for voicing in voicings[k]:
if np.min(voicing) >= min_note:
chords.append(sorted(list(set(voicing))))
voicings[k] = chords
return voicings
def choose_voicing(chord_name, voicings, prev_voicing=None):
""" Given a chord name, a set of possible voicings, and the previous
voicing, choose the best voicing.
Parameters
----------
chord_name : str
chord name of the form C:maj6, G:dim7, etc.
voicings : dict
dictionary of possible voicings
prev_voicing : list of int
Optional - previous voicing.
Returns
-------
voicing : list of int
best voicing for the given chord name.
List members are int midi values
"""
chord_parts = chord_name.split(':')
if chord_parts[0] == 'D#':
chord_parts[0] = 'Eb'
elif chord_parts[0] == 'G#':
chord_parts[0] = 'Ab'
elif chord_parts[0] == 'A#':
chord_parts[0] = 'Bb'
chord_name = '{}:{}'.format(chord_parts[0], chord_parts[1])
voicing_candidates = voicings[chord_name]
if prev_voicing is not None:
cand_dist = np.zeros(len(voicing_candidates))
for i, cand in enumerate(voicing_candidates):
cand_dist[i] = voicing_dist(prev_voicing, cand)
voicing = voicing_candidates[np.argmin(cand_dist)]
else:
voicing = choice(voicing_candidates)
return voicing
|
{"hexsha": "1722e4f6b66fa2a8aabd8ef9322b933bc9ee30e4", "size": 11662, "ext": "py", "lang": "Python", "max_stars_repo_path": "massage/resynth/util.py", "max_stars_repo_name": "justinsalamon/massage", "max_stars_repo_head_hexsha": "b92888f3f8f14d2ad57aef7844e2cd03e6598b42", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2017-07-07T07:36:11.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-05T00:57:25.000Z", "max_issues_repo_path": "massage/resynth/util.py", "max_issues_repo_name": "justinsalamon/massage", "max_issues_repo_head_hexsha": "b92888f3f8f14d2ad57aef7844e2cd03e6598b42", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2017-07-07T20:40:58.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-08T14:23:49.000Z", "max_forks_repo_path": "massage/resynth/util.py", "max_forks_repo_name": "justinsalamon/massage", "max_forks_repo_head_hexsha": "b92888f3f8f14d2ad57aef7844e2cd03e6598b42", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-06-10T08:03:09.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-18T08:27:08.000Z", "avg_line_length": 31.5189189189, "max_line_length": 79, "alphanum_fraction": 0.6470588235, "include": true, "reason": "import numpy,import scipy", "num_tokens": 2911}
|
from __future__ import annotations # postpone evaluation of annotations
import logging
from typing import Any, Dict, List, Optional, Tuple
import cv2
import numpy as np
import numpy.typing as npt
from pyquaternion import Quaternion
from scipy import ndimage
from scipy.spatial.transform import Rotation as R
from sqlalchemy import Column, inspect
from sqlalchemy.orm import relationship
from sqlalchemy.schema import ForeignKey
from sqlalchemy.types import Float, Integer
from nuplan.database.common import sql_types
from nuplan.database.common.utils import simple_repr
from nuplan.database.maps_db.gpkg_mapsdb import GPKGMapsDB
from nuplan.database.maps_db.utils import build_lane_segments_from_blps, connect_blp_predecessor, connect_blp_successor
from nuplan.database.nuplan_db.models import Base, Image, generate_multi_scale_connections
from nuplan.database.nuplan_db.utils import crop_rect, get_candidates
from nuplan.database.nuplan_db.vector_map_np import VectorMapNp
logger = logging.getLogger()
class EgoPose(Base):
"""
Ego vehicle pose at a particular timestamp. Given with respect to global coordinate system.
"""
__tablename__ = "ego_pose"
token = Column(sql_types.HexLen8, primary_key=True) # type: str
timestamp = Column(Integer) # field type: int
x = Column(Float) # type: float
y = Column(Float) # type: float
z = Column(Float) # type: float
qw: float = Column(Float)
qx: float = Column(Float)
qy: float = Column(Float)
qz: float = Column(Float)
vx = Column(Float) # type: float
vy = Column(Float) # type: float
vz = Column(Float) # type: float
acceleration_x = Column(Float) # type: float
acceleration_y = Column(Float) # type: float
acceleration_z = Column(Float) # type: float
angular_rate_x = Column(Float) # type: float
angular_rate_y = Column(Float) # type: float
angular_rate_z = Column(Float) # type: float
epsg = Column(Integer) # type: int
log_token = Column(sql_types.HexLen8, ForeignKey("log.token"), nullable=False) # type: str
image = relationship(
"Image", foreign_keys="Image.ego_pose_token", back_populates="ego_pose", uselist=False
) # type: Image
@property
def _session(self) -> Any:
"""
Get the underlying session.
:return: The underlying session.
"""
return inspect(self).session
def __repr__(self) -> str:
"""
Return the string representation.
:return: The string representation.
"""
desc: str = simple_repr(self)
return desc
@property
def quaternion(self) -> Quaternion:
"""
Get the orientation of ego vehicle as quaternion respect to global coordinate system.
:return: The orientation in quaternion.
"""
return Quaternion(self.qw, self.qx, self.qy, self.qz)
@property
def translation_np(self) -> npt.NDArray[np.float64]:
"""
Position of ego vehicle respect to global coordinate system.
:return: <np.float: 3> Translation.
"""
return np.array([self.x, self.y, self.z])
@property
def trans_matrix(self) -> npt.NDArray[np.float64]:
"""
Get the transformation matrix.
:return: <np.float: 4, 4>. Transformation matrix.
"""
tm: npt.NDArray[np.float64] = self.quaternion.transformation_matrix
tm[:3, 3] = self.translation_np
return tm
@property
def trans_matrix_inv(self) -> npt.NDArray[np.float64]:
"""
Get the inverse transformation matrix.
:return: <np.float: 4, 4>. Inverse transformation matrix.
"""
tm: npt.NDArray[np.float64] = np.eye(4)
rot_inv = self.quaternion.rotation_matrix.T
tm[:3, :3] = rot_inv
tm[:3, 3] = rot_inv.dot(np.transpose(-self.translation_np))
return tm
def rotate_2d_points2d_to_ego_vehicle_frame(self, points2d: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
"""
Rotate 2D points from global frame to ego-vehicle frame.
:param points2d: <np.float: num_points, 2>. 2D points in global frame.
:return: <np.float: num_points, 2>. 2D points rotated to ego-vehicle frame.
"""
# Add zeros to the z dimension to make them 3D points.
points3d: npt.NDArray[np.float32] = np.concatenate((points2d, np.zeros_like(points2d[:, 0:1])), axis=-1)
# We need to extract the rotation around the z-axis only. since we are cropping a 2D map.
# Construct scipy rotation instance using the rotation matrix from quaternion.
rotation = R.from_matrix(self.quaternion.rotation_matrix.T)
# Extract the angle of rotation around z-axis from the rotation.
ego_rotation_angle = rotation.as_euler('zxy', degrees=True)[0]
# Construct scipy rotation instance using ego_rotation_angle.
xy_rotation = R.from_euler('z', ego_rotation_angle, degrees=True)
# Rotate the corner points of the desired map crop to align with ego pose.
rotated_points3d = xy_rotation.apply(points3d)
# Remove the z dimension.
rotated_points2d: npt.NDArray[np.float64] = rotated_points3d[:, :2]
return rotated_points2d
def get_map_crop(
self,
maps_db: Optional[GPKGMapsDB],
xrange: Tuple[float, float],
yrange: Tuple[float, float],
map_layer_name: str,
rotate_face_up: bool,
target_imsize_xy: Optional[Tuple[float, float]] = None,
) -> Tuple[Optional[npt.NDArray[np.float64]], npt.NDArray[np.float64], Tuple[float, ...]]:
"""
This function returns the crop of the map centered at the current ego-pose with the given xrange and yrange.
:param maps_db: Map database associated with this database.
:param xrange: The range in x direction in meters relative to the current ego-pose. Eg: (-60, 60]).
:param yrange: The range in y direction in meters relative to the current ego-pose Eg: (-60, 60).
:param map_layer_name: A relevant map layer. Eg: 'drivable_area' or 'intensity'.
:param rotate_face_up: Boolean indicating whether to rotate the image face up with respect to ego-pose.
:param target_imsize_xy: The target grid xy dimensions for the output array. The xy resolution in meters / grid
may be scaled by zooming to the desired dimensions.
:return: (map_crop, map_translation, map_scale). Where:
map_crop: The desired crop of the map.
map_translation: The translation in map coordinates from the origin to the ego-pose.
map_scale: Map scale (inverse of the map precision). This will be a tuple specifying the zoom in both the x
and y direction if the target_imsize_xy parameter was set, which causes the resolution to change.
map_scale and map_translation are useful for transforming objects like pointcloud/boxes to the map_crop.
Refer to render_on_map().
"""
if maps_db is None:
precision: float = 1
def to_pixel_coords(x: float, y: float) -> Tuple[float, float]:
"""
Get the image coordinates given the x-y coordinates of point. This implementation simply returns the
same coordinates.
:param x: Global x coordinate.
:param y: Global y coordinate.
:return: Pixel coordinates in map.
"""
return x, y
else:
map_layer = maps_db.load_layer(self.log.map_version, map_layer_name)
precision = map_layer.precision
to_pixel_coords = map_layer.to_pixel_coords
map_scale: Tuple[float, ...] = (1.0 / precision, 1.0 / precision, 1.0)
ego_translation = self.translation_np
center_x, center_y = to_pixel_coords(ego_translation[0], ego_translation[1])
center_x, center_y = int(center_x), int(center_y)
top_left = int(xrange[0] * map_scale[0]), int(yrange[0] * map_scale[1])
bottom_right = int(xrange[1] * map_scale[0]), int(yrange[1] * map_scale[1])
# We need to extract the rotation around the z-axis only. since we are cropping a 2D map.
# Construct scipy rotation instance using the rotation matrix from quaternion.
rotation = R.from_matrix(self.quaternion.rotation_matrix.T)
# Extract the angle of rotation around z-axis from the rotation.
ego_rotation_angle = rotation.as_euler('zxy', degrees=True)[0]
# Construct scipy rotation instance using ego_rotation_angle.
xy_rotation = R.from_euler('z', ego_rotation_angle, degrees=True)
map_rotate = 0
# Rotate the corner points of the desired map crop to align with ego pose.
rotated = xy_rotation.apply(
[
[top_left[0], top_left[1], 0],
[top_left[0], bottom_right[1], 0],
[bottom_right[0], top_left[1], 0],
[bottom_right[0], bottom_right[1], 0],
]
)[:, :2]
# Construct minAreaRect using 4 corner points
rect = cv2.minAreaRect(np.hstack([rotated[:, :1] + center_x, rotated[:, 1:] + center_y]).astype(int))
rect_angle = rect[2]
# Due to rounding error, the dimensions returned by cv2 may be off by 1, therefore it's better to manually
# calculate the cropped dimensions instead of relying on the values returned by cv2 in rect[1]
cropped_dimensions: npt.NDArray[np.float32] = np.array(
[map_scale[0] * (xrange[1] - xrange[0]), map_scale[1] * (yrange[1] - yrange[0])]
)
rect = (rect[0], cropped_dimensions, rect_angle)
# In OpenCV 4.4, the angle returned by cv2.minAreaRect is [-90,0). In OpenCV 4.5, the angle returned
# appears to be [0, 90), though this isn't documented anywhere. To be compatible with both versions,
# we adjust the angle to be [-90,0) if it isn't already.
rect_angle = rect[2]
cropped_dimensions = np.array([map_scale[0] * (xrange[1] - xrange[0]), map_scale[1] * (yrange[1] - yrange[0])])
if rect_angle >= 0:
rect = (rect[0], cropped_dimensions, rect_angle - 90)
else:
rect = (rect[0], cropped_dimensions, rect_angle)
# We construct rect using cv2.minAreaRect, which takes only 4 unordered corner points, and can not consider
# the angle of the required rect. The range of of 'angle' in cv2.minAreaRect is [-90,0).
# A good explanation for the angle can be found at :
# https://namkeenman.wordpress.com/2015/12/18/open-cv-determine-angle-of-rotatedrect-minarearect/
# Hence, we have to manually rotate the map after cropping based on the initial rotation angle.
if ego_rotation_angle < -90:
map_rotate = -90
if -90 < ego_rotation_angle < 0:
map_rotate = 0
if 0 < ego_rotation_angle < 90:
map_rotate = 90
if 90 < ego_rotation_angle < 180:
map_rotate = 180
if map_layer is None:
map_crop = None
else:
# Crop the rect using minAreaRect.
map_crop = crop_rect(map_layer.data, rect)
# Rotate the cropped map using adjusted angles,
# since the angle is reset in cv2.minAreaRect every 90 degrees.
map_crop = ndimage.rotate(map_crop, map_rotate, reshape=False)
if rotate_face_up:
# The map_crop is aligned with the ego_pose, but ego_pose is facing towards the right of the canvas,
# but we need ego_pose to be facing up, hence rotating an extra 90 degrees.
map_crop = np.rot90(map_crop)
# These are in units of pixels, where x points to the right and y points *down*.
if map_layer is None:
map_upper_left_offset_from_global_coordinate_origin = np.zeros((2,))
else:
map_upper_left_offset_from_global_coordinate_origin = np.array(
[-map_layer.transform_matrix[0, -1], map_layer.transform_matrix[1, -1]]
)
ego_offset_from_map_upper_left: npt.NDArray[np.float32] = np.array([center_x, -center_y])
crop_upper_left_offset_from_ego: npt.NDArray[np.float32] = np.array(
[xrange[0] * map_scale[0], yrange[0] * map_scale[1]]
)
map_translation: npt.NDArray[np.float64] = (
-map_upper_left_offset_from_global_coordinate_origin
- ego_offset_from_map_upper_left
- crop_upper_left_offset_from_ego
)
map_translation_with_z: npt.NDArray[np.float64] = np.array(
[map_translation[0], map_translation[1], 0]
) # add z-coordinate
if target_imsize_xy is not None:
zoom_size_x = target_imsize_xy[0] / cropped_dimensions[0]
zoom_size_y = target_imsize_xy[1] / cropped_dimensions[1]
map_crop = ndimage.zoom(map_crop, [zoom_size_x, zoom_size_y])
map_scale = (zoom_size_x, zoom_size_y)
return map_crop, map_translation_with_z, map_scale
def get_vector_map(
self,
maps_db: Optional[GPKGMapsDB],
xrange: Tuple[float, float],
yrange: Tuple[float, float],
connection_scales: Optional[List[int]] = None,
) -> VectorMapNp:
"""
This function returns the crop of baseline paths (blps) map centered at the current ego-pose with
the given xrange and yrange.
:param maps_db: Map database associated with this database.
:param xrange: The range in x direction in meters relative to the current ego-pose. Eg: [-60, 60].
:param yrange: The range in y direction in meters relative to the current ego-pose Eg: [-60, 60].
:param connection_scales: Connection scales to generate. Use the 1-hop connections if it's left empty.
:return: Vector map data including lane segment coordinates and connections within the given range.
"""
# load geopandas data
map_version = self.lidar_pc.log.map_version.replace('.gpkg', '')
blps_gdf = maps_db.load_vector_layer(map_version, 'baseline_paths') # type: ignore
lane_poly_gdf = maps_db.load_vector_layer(map_version, 'lanes_polygons') # type: ignore
intersections_gdf = maps_db.load_vector_layer(map_version, 'intersections') # type: ignore
lane_connectors_gdf = maps_db.load_vector_layer(map_version, 'lane_connectors') # type: ignore
lane_groups_gdf = maps_db.load_vector_layer(map_version, 'lane_groups_polygons') # type: ignore
if (
(blps_gdf is None)
or (lane_poly_gdf is None)
or (intersections_gdf is None)
or (lane_connectors_gdf is None)
or (lane_groups_gdf is None)
):
# This sample has no vector map.
coords: npt.NDArray[np.float32] = np.empty([0, 2, 2], dtype=np.float32)
if not connection_scales:
# Use the 1-hop connections if connection_scales is not specified.
connection_scales = [1]
multi_scale_connections: Dict[int, Any] = {
scale: np.empty([0, 2], dtype=np.int64) for scale in connection_scales
}
return VectorMapNp(
coords=coords,
multi_scale_connections=multi_scale_connections,
)
# data enhancement
blps_in_lanes = blps_gdf[blps_gdf['lane_fid'].notna()]
blps_in_intersections = blps_gdf[blps_gdf['lane_connector_fid'].notna()]
# enhance blps_in_lanes
lane_group_info = lane_poly_gdf[['lane_fid', 'lane_group_fid']]
blps_in_lanes = blps_in_lanes.merge(lane_group_info, on='lane_fid', how='outer')
# enhance blps_in_intersections
lane_connectors_gdf['lane_connector_fid'] = lane_connectors_gdf['fid']
lane_conns_info = lane_connectors_gdf[
['lane_connector_fid', 'intersection_fid', 'exit_lane_fid', 'entry_lane_fid']
]
# Convert the exit_fid field of both data frames to the same dtype for merging.
lane_conns_info = lane_conns_info.astype({'lane_connector_fid': int})
blps_in_intersections = blps_in_intersections.astype({'lane_connector_fid': int})
blps_in_intersections = blps_in_intersections.merge(lane_conns_info, on='lane_connector_fid', how='outer')
# enhance blps_connection info
lane_blps_info = blps_in_lanes[['fid', 'lane_fid']]
from_blps_info = lane_blps_info.rename(columns={'fid': 'from_blp', 'lane_fid': 'exit_lane_fid'})
to_blps_info = lane_blps_info.rename(columns={'fid': 'to_blp', 'lane_fid': 'entry_lane_fid'})
blps_in_intersections = blps_in_intersections.merge(from_blps_info, on='exit_lane_fid', how='inner')
blps_in_intersections = blps_in_intersections.merge(to_blps_info, on='entry_lane_fid', how='inner')
# Select in-range blps
candidate_lane_groups, candidate_intersections = get_candidates(
self.translation_np, xrange, yrange, lane_groups_gdf, intersections_gdf
)
candidate_blps_in_lanes = blps_in_lanes[
blps_in_lanes['lane_group_fid'].isin(candidate_lane_groups['fid'].astype(int))
]
candidate_blps_in_intersections = blps_in_intersections[
blps_in_intersections['intersection_fid'].isin(candidate_intersections['fid'].astype(int))
]
ls_coordinates_list: List[List[List[float]]] = []
ls_connections_list: List[List[int]] = []
ls_groupings_list: List[List[int]] = []
cross_blp_connection: Dict[str, List[int]] = dict()
# generate lane_segments from blps in lanes
build_lane_segments_from_blps(
candidate_blps_in_lanes, ls_coordinates_list, ls_connections_list, ls_groupings_list, cross_blp_connection
)
# generate lane_segments from blps in intersections
build_lane_segments_from_blps(
candidate_blps_in_intersections,
ls_coordinates_list,
ls_connections_list,
ls_groupings_list,
cross_blp_connection,
)
# generate connections between blps
for blp_id, blp_info in cross_blp_connection.items():
# Add predecessors
connect_blp_predecessor(blp_id, candidate_blps_in_intersections, cross_blp_connection, ls_connections_list)
# Add successors
connect_blp_successor(blp_id, candidate_blps_in_intersections, cross_blp_connection, ls_connections_list)
ls_coordinates: npt.NDArray[np.float64] = np.asarray(ls_coordinates_list, self.translation_np.dtype)
ls_connections: npt.NDArray[np.int64] = np.asarray(ls_connections_list, np.int64)
# Transform the lane coordinates from global frame to ego vehicle frame.
# Flatten ls_coordinates from (num_ls, 2, 2) to (num_ls * 2, 2) for easier processing.
ls_coordinates = ls_coordinates.reshape(-1, 2)
ls_coordinates = ls_coordinates - self.translation_np[:2]
ls_coordinates = self.rotate_2d_points2d_to_ego_vehicle_frame(ls_coordinates)
ls_coordinates = ls_coordinates.reshape(-1, 2, 2).astype(np.float32)
if connection_scales:
# Generate multi-scale connections.
multi_scale_connections = generate_multi_scale_connections(ls_connections, connection_scales)
else:
# Use the 1-hop connections if connection_scales is not specified.
multi_scale_connections = {1: ls_connections}
return VectorMapNp(
coords=ls_coordinates,
multi_scale_connections=multi_scale_connections,
)
Image.ego_pose = relationship("EgoPose", foreign_keys=[Image.ego_pose_token], back_populates="image")
|
{"hexsha": "86ac9bfbee96df45102f38fd78fc1b01bd9b2d4d", "size": 19894, "ext": "py", "lang": "Python", "max_stars_repo_path": "nuplan/database/nuplan_db/ego_pose.py", "max_stars_repo_name": "motional/nuplan-devkit", "max_stars_repo_head_hexsha": "e39029e788b17f47f2fcadb774098ef8fbdd0d67", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 128, "max_stars_repo_stars_event_min_datetime": "2021-12-06T15:41:14.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T13:16:32.000Z", "max_issues_repo_path": "nuplan/database/nuplan_db/ego_pose.py", "max_issues_repo_name": "motional/nuplan-devkit", "max_issues_repo_head_hexsha": "e39029e788b17f47f2fcadb774098ef8fbdd0d67", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 28, "max_issues_repo_issues_event_min_datetime": "2021-12-11T08:11:31.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-25T02:35:43.000Z", "max_forks_repo_path": "nuplan/database/nuplan_db/ego_pose.py", "max_forks_repo_name": "motional/nuplan-devkit", "max_forks_repo_head_hexsha": "e39029e788b17f47f2fcadb774098ef8fbdd0d67", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 14, "max_forks_repo_forks_event_min_datetime": "2021-12-11T04:12:26.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-24T06:38:30.000Z", "avg_line_length": 47.8221153846, "max_line_length": 119, "alphanum_fraction": 0.6614054489, "include": true, "reason": "import numpy,from scipy", "num_tokens": 4734}
|
[STATEMENT]
lemma used_appIR: "X \<in> used evs \<Longrightarrow> X \<in> used (evs @ evs')"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. X \<in> used evs \<Longrightarrow> X \<in> used (evs @ evs')
[PROOF STEP]
by (erule used_sub_app [THEN subsetD])
|
{"llama_tokens": 105, "file": null, "length": 1}
|
import matplotlib.pyplot as plt
import numpy as np
from scipy import fftpack
from scipy import signal
import math
def chromagram_stft(data, rate=1.0, winlen=2048, scale='sharp', winn='ret'):
'''
Calculates the chromagram of an audio sample.
Args:
data: array of time series of an audio sample.
rate: sample frequency of 'data'.
wilen: lenght of segment of FFT.
scale: type of chroma-scale (sharp, flat or number)
winn: type of window - ret, gauss, hann, flattop, exp
Returns:
chromas: array of chroma-scale (C, C#, D, ..., B).
t: time scale array.
chomagram: ndarray of the chromagram. Last axis is the time scale.
'''
f, t, Stf = signal.stft(data, fs=rate, nperseg=winlen, nfft=int(32*winlen))
# Better results with abs^2
Stf = np.abs(Stf)**2
fmax = f[len(f)-1]
chromagram = np.zeros(shape=(12, len(t)))
for timeidx in range(len(t)):
spectrum = Stf[:, timeidx]
# First frequency is an A at 27.5Hz
center = 27.5
# Deviation between chromas
devfreq = 0.028060
chromaidx = 9
centeridx = 0
while center*(1.0 + devfreq) < fmax and center*(1.0 + devfreq) < 5e3:
loidx = center*(1.0 - devfreq)
loidx = math.ceil((len(f) - 1)*(loidx - f[0])/(fmax - f[0]))
upidx = center*(1.0 + devfreq)
upidx = math.floor((len(f) - 1)*(upidx - f[0])/(fmax - f[0]))
winfilter = np.zeros(shape=spectrum.shape)
if winn == 'gauss':
# Gaussian window inst
sigma = (upidx - loidx + 1.0)/6.0
g = signal.windows.gaussian(upidx - loidx + 1, std=sigma)
winfilter[loidx:upidx+1] = g[0:upidx-loidx+1]
elif winn == 'hann':
h = signal.windows.hann(upidx - loidx + 1)
winfilter[loidx:upidx+1] = h[0:upidx-loidx+1]
elif winn == 'flattop':
flat = signal.windows.flattop(upidx - loidx + 1)
winfilter[loidx:upidx+1] = flat[0:upidx-loidx+1]
elif winn == 'exp':
avg = (upidx - loidx + 1.0)/4.0
e = signal.windows.exponential(upidx - loidx + 1, tau=avg)
winfilter[loidx:upidx+1] = e[0:upidx-loidx+1]
elif winn == 'ret':
winfilter[loidx:upidx+1] = 1.0
# Throwing errors on winn argument
elif isinstance(winn, str):
raise ValueError(
'window format \'{}\' is not implemented.'. format(winn))
else:
raise TypeError(
'a string was expected. Received: {}'. format(type(winn)))
chromagram[chromaidx, timeidx] += np.dot(spectrum, winfilter)
chromaidx = (chromaidx + 1) % 12
centeridx += 1
center = 27.5*np.power(2, centeridx/12)
if sum(chromagram[:, timeidx]) > 0:
chromagram[:, timeidx] /= sum(chromagram[:, timeidx])
if scale == 'sharp':
chromas = np.array(['C', 'C#', 'D', 'D#', 'E', 'F',
'F#', 'G', 'G#', 'A', 'A#', 'B'])
elif scale == 'flat':
chromas = np.array(['C', 'Db', 'D', 'Eb', 'E', 'F',
'Gb', 'G', 'Ab', 'A', 'Bb', 'B'])
elif scale == 'number':
chromas = np.array([i for i in range(12)])
# Throwing errors on winn argument
elif isinstance(scale, str):
raise ValueError(
'scale format \'{}\' is not implemented.'. format(scale))
else:
raise TypeError(
'a string was expected. Received: {}'. format(type(scale)))
return chromas, t, chromagram
def chromaplot(t, scale, chroma, **kwargs):
ax = plt.imshow(chroma, cmap='hot', interpolation=None, extent=[
t[0], t[len(t)-1], -0.5, len(scale)-0.5], origin='lower', aspect='auto', **kwargs)
plt.yticks(
ticks=[i for i in range(len(scale))], labels=scale)
return ax
def __gaussian(x, mu, sig):
return (1.0/(sig*np.sqrt(2.0*np.pi)))*np.exp((((x-mu)/sig)**2)/(-2.0))
|
{"hexsha": "f255ccebccfea944ca990cf6b631c783cfb47142", "size": 4369, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/chroma.py", "max_stars_repo_name": "rodrigocaus/chord_classifier", "max_stars_repo_head_hexsha": "2b9991d828bfa86685fdd971a3e7da5dfaba5699", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/chroma.py", "max_issues_repo_name": "rodrigocaus/chord_classifier", "max_issues_repo_head_hexsha": "2b9991d828bfa86685fdd971a3e7da5dfaba5699", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-03-24T17:17:28.000Z", "max_issues_repo_issues_event_max_datetime": "2021-02-02T22:22:15.000Z", "max_forks_repo_path": "src/chroma.py", "max_forks_repo_name": "rodrigocaus/chord_classifier", "max_forks_repo_head_hexsha": "2b9991d828bfa86685fdd971a3e7da5dfaba5699", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.3603603604, "max_line_length": 99, "alphanum_fraction": 0.500343328, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1218}
|
#
# Copyright (c) 2021 Tobias Thummerer, Lars Mikelsons, Josef Kircher
# Licensed under the MIT license. See LICENSE file in the project root for details.
#
# What is included in the file `FMI2_comp_wraps.jl` (FMU component wrappers)?
# - wrappers to call fmi2ComponentFunctions from FMUs (FMI-functions, last instantiated component is used) [exported]
# - wrappers to call fmi2ComponentFunctions from FMUs (additional functions, last instantiated component is used) [exported]
# FMI-spec
function fmi2Simulate(fmu::FMU2, args...; kwargs...)
return fmi2Simulate(fmu, nothing, args...; kwargs...)
end
function fmi2SimulateCS(fmu::FMU2, args...; kwargs...)
return fmi2SimulateCS(fmu, nothing, args...; kwargs...)
end
function fmi2SimulateME(fmu::FMU2, args...; kwargs...)
return fmi2SimulateME(fmu, nothing, args...; kwargs...)
end
function fmi2FreeInstance!(fmu::FMU2)
@assert length(fmu.components) > 0 ["No FMU instance allocated, have you already called fmiInstantiate?"]
fmi2FreeInstance!(fmu.components[end]) # this command also removes the component from the array
end
function fmi2SetDebugLogging(fmu::FMU2)
@assert length(fmu.components) > 0 ["No FMU instance allocated, have you already called fmiInstantiate?"]
fmi2SetDebugLogging(fmu.components[end])
end
function fmi2SetupExperiment(fmu::FMU2, args...; kwargs...)
@assert length(fmu.components) > 0 ["No FMU instance allocated, have you already called fmiInstantiate?"]
fmi2SetupExperiment(fmu.components[end], args...; kwargs...)
end
function fmi2EnterInitializationMode(fmu::FMU2)
@assert length(fmu.components) > 0 ["No FMU instance allocated, have you already called fmiInstantiate?"]
fmi2EnterInitializationMode(fmu.components[end])
end
function fmi2ExitInitializationMode(fmu::FMU2)
@assert length(fmu.components) > 0 ["No FMU instance allocated, have you already called fmiInstantiate?"]
fmi2ExitInitializationMode(fmu.components[end])
end
function fmi2Terminate(fmu::FMU2)
@assert length(fmu.components) > 0 ["No FMU instance allocated, have you already called fmiInstantiate?"]
fmi2Terminate(fmu.components[end])
end
function fmi2Reset(fmu::FMU2)
@assert length(fmu.components) > 0 ["No FMU instance allocated, have you already called fmiInstantiate?"]
fmi2Reset(fmu.components[end])
end
function fmi2GetReal(fmu::FMU2, args...; kwargs...)
@assert length(fmu.components) > 0 ["No FMU instance allocated, have you already called fmiInstantiate?"]
fmi2GetReal(fmu.components[end], args...; kwargs...)
end
function fmi2GetReal!(fmu::FMU2, args...; kwargs...)
@assert length(fmu.components) > 0 ["No FMU instance allocated, have you already called fmiInstantiate?"]
fmi2GetReal!(fmu.components[end], args...; kwargs...)
end
function fmiGet(fmu::FMU2, args...; kwargs...)
@assert length(fmu.components) > 0 ["No FMU instance allocated, have you already called fmiInstantiate?"]
fmi2Get(fmu.components[end], args...; kwargs...)
end
function fmiGet!(fmu::FMU2, args...; kwargs...)
@assert length(fmu.components) > 0 ["No FMU instance allocated, have you already called fmiInstantiate?"]
fmi2Get!(fmu.components[end], args...; kwargs...)
end
function fmiSet(fmu::FMU2, args...; kwargs...)
@assert length(fmu.components) > 0 ["No FMU instance allocated, have you already called fmiInstantiate?"]
fmi2Set(fmu.components[end], args...; kwargs...)
end
function fmi2GetRealOutputDerivatives(fmu::FMU2, args...; kwargs...)
@assert length(fmu.components) > 0 ["No FMU instance allocated, have you already called fmiInstantiate?"]
fmi2GetRealOutputDerivatives(fmu.components[end], args...; kwargs...)
end
function fmi2SetReal(fmu::FMU2, args...; kwargs...)
@assert length(fmu.components) > 0 ["No FMU instance allocated, have you already called fmiInstantiate?"]
fmi2SetReal(fmu.components[end], args...; kwargs...)
end
function fmi2SetRealInputDerivatives(fmu::FMU2, args...; kwargs...)
@assert length(fmu.components) > 0 ["No FMU instance allocated, have you already called fmiInstantiate?"]
fmi2SetRealInputDerivatives(fmu.components[end], args...; kwargs...)
end
function fmi2GetInteger(fmu::FMU2, args...; kwargs...)
@assert length(fmu.components) > 0 ["No FMU instance allocated, have you already called fmiInstantiate?"]
fmi2GetInteger(fmu.components[end], args...; kwargs...)
end
function fmi2GetInteger!(fmu::FMU2, args...; kwargs...)
@assert length(fmu.components) > 0 ["No FMU instance allocated, have you already called fmiInstantiate?"]
fmi2GetInteger!(fmu.components[end], args...; kwargs...)
end
function fmi2SetInteger(fmu::FMU2, args...; kwargs...)
@assert length(fmu.components) > 0 ["No FMU instance allocated, have you already called fmiInstantiate?"]
fmi2SetInteger(fmu.components[end], args...; kwargs...)
end
function fmi2GetBoolean(fmu::FMU2, args...; kwargs...)
@assert length(fmu.components) > 0 ["No FMU instance allocated, have you already called fmiInstantiate?"]
fmi2GetBoolean(fmu.components[end], args...; kwargs...)
end
function fmi2GetBoolean!(fmu::FMU2, args...; kwargs...)
@assert length(fmu.components) > 0 ["No FMU instance allocated, have you already called fmiInstantiate?"]
fmi2GetBoolean!(fmu.components[end], args...; kwargs...)
end
function fmi2SetBoolean(fmu::FMU2, args...; kwargs...)
@assert length(fmu.components) > 0 ["No FMU instance allocated, have you already called fmiInstantiate?"]
fmi2SetBoolean(fmu.components[end], args...; kwargs...)
end
function fmi2GetString(fmu::FMU2, args...; kwargs...)
@assert length(fmu.components) > 0 ["No FMU instance allocated, have you already called fmiInstantiate?"]
fmi2GetString(fmu.components[end], args...; kwargs...)
end
function fmi2GetString!(fmu::FMU2, args...; kwargs...)
@assert length(fmu.components) > 0 ["No FMU instance allocated, have you already called fmiInstantiate?"]
fmi2GetString!(fmu.components[end], args...; kwargs...)
end
function fmi2SetString(fmu::FMU2, args...; kwargs...)
@assert length(fmu.components) > 0 ["No FMU instance allocated, have you already called fmiInstantiate?"]
fmi2SetString(fmu.components[end], args...; kwargs...)
end
function fmi2GetFMUstate(fmu::FMU2, args...; kwargs...)
@assert length(fmu.components) > 0 ["No FMU instance allocated, have you already called fmiInstantiate?"]
fmi2GetFMUstate(fmu.components[end], args...; kwargs...)
end
function fmi2SetFMUstate(fmu::FMU2, args...; kwargs...)
@assert length(fmu.components) > 0 ["No FMU instance allocated, have you already called fmiInstantiate?"]
fmi2SetFMUstate(fmu.components[end], args...; kwargs...)
end
function fmi2FreeFMUstate!(fmu::FMU2, args...; kwargs...)
@assert length(fmu.components) > 0 ["No FMU instance allocated, have you already called fmiInstantiate?"]
fmi2FreeFMUstate!(fmu.components[end], args...; kwargs...)
end
function fmi2SerializedFMUstateSize(fmu::FMU2, args...; kwargs...)
@assert length(fmu.components) > 0 ["No FMU instance allocated, have you already called fmiInstantiate?"]
fmi2SerializedFMUstateSize(fmu.components[end], args...; kwargs...)
end
function fmi2SerializeFMUstate(fmu::FMU2, args...; kwargs...)
@assert length(fmu.components) > 0 ["No FMU instance allocated, have you already called fmiInstantiate?"]
fmi2SerializeFMUstate(fmu.components[end], args...; kwargs...)
end
function fmi2DeSerializeFMUstate(fmu::FMU2, args...; kwargs...)
@assert length(fmu.components) > 0 ["No FMU instance allocated, have you already called fmiInstantiate?"]
fmi2DeSerializeFMUstate(fmu.components[end], args...; kwargs...)
end
function fmi2GetDirectionalDerivative!(fmu::FMU2, args...; kwargs...)
@assert length(fmu.components) > 0 ["No FMU instance allocated, have you already called fmiInstantiate?"]
fmi2GetDirectionalDerivative!(fmu.components[end], args...; kwargs...)
end
function fmi2GetDirectionalDerivative(fmu::FMU2, args...; kwargs...)
@assert length(fmu.components) > 0 ["No FMU instance allocated, have you already called fmiInstantiate?"]
fmi2GetDirectionalDerivative(fmu.components[end], args...; kwargs...)
end
function fmi2SampleDirectionalDerivative!(fmu::FMU2, args...; kwargs...)
@assert length(fmu.components) > 0 ["No FMU instance allocated, have you already called fmiInstantiate?"]
fmi2SampleDirectionalDerivative!(fmu.components[end], args...; kwargs...)
end
function fmi2SampleDirectionalDerivative(fmu::FMU2, args...; kwargs...)
@assert length(fmu.components) > 0 ["No FMU instance allocated, have you already called fmiInstantiate?"]
fmi2SampleDirectionalDerivative(fmu.components[end], args...; kwargs...)
end
function fmi2GetJacobian!(fmu::FMU2, args...; kwargs...)
@assert length(fmu.components) > 0 ["No FMU instance allocated, have you already called fmiInstantiate?"]
fmi2GetJacobian!(fmu.components[end], args...; kwargs...)
end
function fmi2GetJacobian(fmu::FMU2, args...; kwargs...)
@assert length(fmu.components) > 0 ["No FMU instance allocated, have you already called fmiInstantiate?"]
fmi2GetJacobian(fmu.components[end], args...; kwargs...)
end
function fmi2DoStep(fmu::FMU2, args...; kwargs...)
@assert length(fmu.components) > 0 ["No FMU instance allocated, have you already called fmiInstantiate?"]
fmi2DoStep(fmu.components[end], args...; kwargs...)
end
function fmi2CancelStep(fmu::FMU2)
@assert length(fmu.components) > 0 ["No FMU instance allocated, have you already called fmiInstantiate?"]
fmi2CancelStep(fmu.components[end])
end
function fmi2GetStatus(fmu::FMU2, args...; kwargs...)
@assert length(fmu.components) > 0 ["No FMU instance allocated, have you already called fmiInstantiate?"]
fmi2GetStatus(fmu.components[end], args...; kwargs...)
end
function fmi2GetRealStatus(fmu::FMU2, args...; kwargs...)
@assert length(fmu.components) > 0 ["No FMU instance allocated, have you already called fmiInstantiate?"]
fmi2GetRealStatus(fmu.components[end], args...; kwargs...)
end
function fmi2GetIntegerStatus(fmu::FMU2, args...; kwargs...)
@assert length(fmu.components) > 0 ["No FMU instance allocated, have you already called fmiInstantiate?"]
fmi2GetIntegerStatus(fmu.components[end], args...; kwargs...)
end
function fmi2GetBooleanStatus(fmu::FMU2, args...; kwargs...)
@assert length(fmu.components) > 0 ["No FMU instance allocated, have you already called fmiInstantiate?"]
fmi2GetBooleanStatus(fmu.components[end], args...; kwargs...)
end
function fmi2GetStringStatus(fmu::FMU2, args...; kwargs...)
@assert length(fmu.components) > 0 ["No FMU instance allocated, have you already called fmiInstantiate?"]
fmi2GetStringStatus(fmu.components[end], args...; kwargs...)
end
function fmi2SetTime(fmu::FMU2, args...; kwargs...)
@assert length(fmu.components) > 0 ["No FMU instance allocated, have you already called fmiInstantiate?"]
fmi2SetTime(fmu.components[end], args...; kwargs...)
end
function fmi2SetContinuousStates(fmu::FMU2, args...; kwargs...)
@assert length(fmu.components) > 0 ["No FMU instance allocated, have you already called fmiInstantiate?"]
fmi2SetContinuousStates(fmu.components[end], args...; kwargs...)
end
function fmi2EnterEventMode(fmu::FMU2)
@assert length(fmu.components) > 0 ["No FMU instance allocated, have you already called fmiInstantiate?"]
fmi2EnterEventMode(fmu.components[end])
end
function fmi2NewDiscreteStates(fmu::FMU2, args...; kwargs...)
@assert length(fmu.components) > 0 ["No FMU instance allocated, have you already called fmiInstantiate?"]
fmi2NewDiscreteStates(fmu.components[end], args...; kwargs...)
end
function fmi2EnterContinuousTimeMode(fmu::FMU2)
@assert length(fmu.components) > 0 ["No FMU instance allocated, have you already called fmiInstantiate?"]
fmi2EnterContinuousTimeMode(fmu.components[end])
end
function fmi2CompletedIntegratorStep(fmu::FMU2, args...; kwargs...)
@assert length(fmu.components) > 0 ["No FMU instance allocated, have you already called fmiInstantiate?"]
fmi2CompletedIntegratorStep(fmu.components[end], args...; kwargs...)
end
function fmi2GetDerivatives(fmu::FMU2)
@assert length(fmu.components) > 0 ["No FMU instance allocated, have you already called fmiInstantiate?"]
fmi2GetDerivatives(fmu.components[end])
end
function fmi2GetEventIndicators(fmu::FMU2)
@assert length(fmu.components) > 0 ["No FMU instance allocated, have you already called fmiInstantiate?"]
fmi2GetEventIndicators(fmu.components[end])
end
function fmi2GetContinuousStates(fmu::FMU2)
@assert length(fmu.components) > 0 ["No FMU instance allocated, have you already called fmiInstantiate?"]
fmi2GetContinuousStates(fmu.components[end])
end
function fmi2GetNominalsOfContinuousStates(fmu::FMU2)
@assert length(fmu.components) > 0 ["No FMU instance allocated, have you already called fmiInstantiate?"]
fmi2GetNominalsOfContinuousStates(fmu.components[end])
end
# additionals
function fmi2GetStartValue(fmu::FMU2, args...; kwargs...)
@assert length(fmu.components) > 0 ["No FMU instance allocated, have you already called fmiInstantiate?"]
fmi2GetStartValue(fmu.components[end], args...; kwargs...)
end
|
{"hexsha": "e563391382e28f86eaf1b6eb8ed0a9d102407bc3", "size": 13287, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/FMI2_comp_wraps.jl", "max_stars_repo_name": "adribrune/FMI.jl", "max_stars_repo_head_hexsha": "7fb390e56822fc7c99b2824bc4a3e59c61361d08", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/FMI2_comp_wraps.jl", "max_issues_repo_name": "adribrune/FMI.jl", "max_issues_repo_head_hexsha": "7fb390e56822fc7c99b2824bc4a3e59c61361d08", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/FMI2_comp_wraps.jl", "max_forks_repo_name": "adribrune/FMI.jl", "max_forks_repo_head_hexsha": "7fb390e56822fc7c99b2824bc4a3e59c61361d08", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.193877551, "max_line_length": 124, "alphanum_fraction": 0.7315421088, "num_tokens": 3650}
|
!
! AtmProfile_netCDF_IO
!
! Module containing routines to read and write AtmProfile netCDF
! format files.
!
!
! CREATION HISTORY:
! Written by: Paul van Delst, CIMSS/SSEC 08-Jul-2002
! paul.vandelst@noaa.gov
!
MODULE AtmProfile_netCDF_IO
! -----------------
! Environment setup
! -----------------
! Module use
USE Type_Kinds , ONLY: Long, Double
USE Message_Handler, ONLY: SUCCESS, FAILURE, WARNING, INFORMATION, &
Display_Message
USE String_Utility, ONLY: StrClean
USE AtmProfile_Define, ONLY: ATMPROFILE_ABSORBER_UNITS_NAME, &
ATMPROFILE_ABSORBER_UNITS_CHAR, &
ATMPROFILE_FP_INVALID, &
AtmProfileDateTime_type, &
AtmProfile_type, &
Associated_AtmProfile, &
Destroy_AtmProfile, &
Allocate_AtmProfile, &
CheckRelease_AtmProfile, &
Info_AtmProfile
USE netcdf
! Disable implicit typing
IMPLICIT NONE
! ------------
! Visibilities
! ------------
PRIVATE
PUBLIC :: Inquire_AtmProfile_netCDF
PUBLIC :: Write_AtmProfile_netCDF
PUBLIC :: Read_AtmProfile_netCDF
! -----------------
! Module parameters
! -----------------
! Module RCS Id string
CHARACTER(*), PARAMETER :: MODULE_RCS_ID = &
! Keyword set value
INTEGER, PARAMETER :: SET = 1
! msg string length
INTEGER, PARAMETER :: ML = 512
! Literal constants
REAL(Double), PARAMETER :: ZERO = 0.0_Double
REAL(Double), PARAMETER :: ONE = 1.0_Double
! Global attribute names. Case sensitive
CHARACTER(*), PARAMETER :: TITLE_GATTNAME = 'title'
CHARACTER(*), PARAMETER :: HISTORY_GATTNAME = 'history'
CHARACTER(*), PARAMETER :: COMMENT_GATTNAME = 'comment'
CHARACTER(*), PARAMETER :: ID_TAG_GATTNAME = 'id_tag'
CHARACTER(*), PARAMETER :: RELEASE_GATTNAME = 'Release'
CHARACTER(*), PARAMETER :: VERSION_GATTNAME = 'Version'
! Dimension names
CHARACTER(*), PARAMETER :: LEVEL_DIMNAME = 'n_levels'
CHARACTER(*), PARAMETER :: LAYER_DIMNAME = 'n_layers'
CHARACTER(*), PARAMETER :: ABSORBER_DIMNAME = 'n_absorbers'
CHARACTER(*), PARAMETER :: PROFILE_DIMNAME = 'n_profiles'
CHARACTER(*), PARAMETER :: DESCRIPTION_DIMNAME = 'pdsl'
! Variable names
CHARACTER(*), PARAMETER :: DESCRIPTION_VARNAME = 'profile_description'
CHARACTER(*), PARAMETER :: CLIMATOLOGY_MODEL_VARNAME = 'climatology_model'
CHARACTER(*), PARAMETER :: DATETIME_VARNAME = 'date_time'
CHARACTER(*), PARAMETER :: LATITUDE_VARNAME = 'latitude'
CHARACTER(*), PARAMETER :: LONGITUDE_VARNAME = 'longitude'
CHARACTER(*), PARAMETER :: SURFACE_ALTITUDE_VARNAME = 'surface_altitude'
CHARACTER(*), PARAMETER :: ABSORBER_ID_VARNAME = 'absorber_id'
CHARACTER(*), PARAMETER :: ABSORBER_UNITS_ID_VARNAME = 'absorber_units_id'
CHARACTER(*), PARAMETER :: LEVEL_PRESSURE_VARNAME = 'level_pressure'
CHARACTER(*), PARAMETER :: LEVEL_TEMPERATURE_VARNAME = 'level_temperature'
CHARACTER(*), PARAMETER :: LEVEL_ABSORBER_VARNAME = 'level_absorber'
CHARACTER(*), PARAMETER :: LEVEL_ALTITUDE_VARNAME = 'level_altitude'
CHARACTER(*), PARAMETER :: LAYER_PRESSURE_VARNAME = 'layer_pressure'
CHARACTER(*), PARAMETER :: LAYER_TEMPERATURE_VARNAME = 'layer_temperature'
CHARACTER(*), PARAMETER :: LAYER_ABSORBER_VARNAME = 'layer_absorber'
CHARACTER(*), PARAMETER :: LAYER_DELTA_Z_VARNAME = 'layer_delta_z'
! Variable long name attribute.
CHARACTER(*), PARAMETER :: LONGNAME_ATTNAME = 'long_name'
CHARACTER(*), PARAMETER :: DESCRIPTION_LONGNAME = 'Profile Description'
CHARACTER(*), PARAMETER :: CLIMATOLOGY_MODEL_LONGNAME = 'Climatology Model'
CHARACTER(*), PARAMETER :: DATETIME_LONGNAME = 'Date/Time'
CHARACTER(*), PARAMETER :: LATITUDE_LONGNAME = 'Latitude'
CHARACTER(*), PARAMETER :: LONGITUDE_LONGNAME = 'Longitude'
CHARACTER(*), PARAMETER :: SURFACE_ALTITUDE_LONGNAME = 'Surface Altitude'
CHARACTER(*), PARAMETER :: ABSORBER_ID_LONGNAME = 'Absorber ID'
CHARACTER(*), PARAMETER :: ABSORBER_UNITS_ID_LONGNAME = 'Absorber Units ID'
CHARACTER(*), PARAMETER :: LEVEL_PRESSURE_LONGNAME = 'Level pressure'
CHARACTER(*), PARAMETER :: LEVEL_TEMPERATURE_LONGNAME = 'Level temperature'
CHARACTER(*), PARAMETER :: LEVEL_ABSORBER_LONGNAME = 'Level absorber'
CHARACTER(*), PARAMETER :: LEVEL_ALTITUDE_LONGNAME = 'Level altitude'
CHARACTER(*), PARAMETER :: LAYER_PRESSURE_LONGNAME = 'Layer pressure'
CHARACTER(*), PARAMETER :: LAYER_TEMPERATURE_LONGNAME = 'Layer temperature'
CHARACTER(*), PARAMETER :: LAYER_ABSORBER_LONGNAME = 'Layer absorber'
CHARACTER(*), PARAMETER :: LAYER_DELTA_Z_LONGNAME = 'Layer thickness'
! Variable description attribute.
CHARACTER(*), PARAMETER :: DESCRIPTION_ATTNAME = 'description'
CHARACTER(*), PARAMETER :: DESCRIPTION_DESCRIPTION = 'Description of atmospheric profile and modification'
CHARACTER(*), PARAMETER :: CLIMATOLOGY_MODEL_DESCRIPTION = 'Climatology model associated with profile date/time/location.'
CHARACTER(*), PARAMETER :: DATETIME_DESCRIPTION = 'Date/Time at which profile was measured(sonde) or generated(model)'
CHARACTER(*), PARAMETER :: LATITUDE_DESCRIPTION = 'Latitude of profile location'
CHARACTER(*), PARAMETER :: LONGITUDE_DESCRIPTION = 'Longitude of profile location'
CHARACTER(*), PARAMETER :: SURFACE_ALTITUDE_DESCRIPTION = 'Surface altitude of profile'
CHARACTER(*), PARAMETER :: ABSORBER_ID_DESCRIPTION = 'HITRAN/LBLRTM absorber ID number for atmospheric absorbers'
CHARACTER(*), PARAMETER :: ABSORBER_UNITS_ID_DESCRIPTION = 'LBLRTM absorber units ID number'
CHARACTER(*), PARAMETER :: LEVEL_PRESSURE_DESCRIPTION = 'Level pressure'
CHARACTER(*), PARAMETER :: LEVEL_TEMPERATURE_DESCRIPTION = 'Level temperature'
CHARACTER(*), PARAMETER :: LEVEL_ABSORBER_DESCRIPTION = 'Level absorber amount'
CHARACTER(*), PARAMETER :: LEVEL_ALTITUDE_DESCRIPTION = 'Level geopotential altitude'
CHARACTER(*), PARAMETER :: LAYER_PRESSURE_DESCRIPTION = 'Average layer pressure'
CHARACTER(*), PARAMETER :: LAYER_TEMPERATURE_DESCRIPTION = 'Average layer temperature'
CHARACTER(*), PARAMETER :: LAYER_ABSORBER_DESCRIPTION = 'Average layer absorber amount'
CHARACTER(*), PARAMETER :: LAYER_DELTA_Z_DESCRIPTION = 'Layer thickness'
! Variable units attribute.
CHARACTER(*), PARAMETER :: UNITS_ATTNAME = 'units'
CHARACTER(*), PARAMETER :: DESCRIPTION_UNITS = 'N/A'
CHARACTER(*), PARAMETER :: CLIMATOLOGY_MODEL_UNITS = 'N/A'
CHARACTER(*), PARAMETER :: DATETIME_UNITS = 'YYYYMMDD.HH'
CHARACTER(*), PARAMETER :: LATITUDE_UNITS = 'degress North (-90->+90)'
CHARACTER(*), PARAMETER :: LONGITUDE_UNITS = 'degress East (0->360)'
CHARACTER(*), PARAMETER :: SURFACE_ALTITUDE_UNITS = 'metres (m)'
CHARACTER(*), PARAMETER :: ABSORBER_ID_UNITS = 'N/A'
CHARACTER(*), PARAMETER :: ABSORBER_UNITS_ID_UNITS = 'N/A'
CHARACTER(*), PARAMETER :: LEVEL_PRESSURE_UNITS = 'hectoPascals (hPa)'
CHARACTER(*), PARAMETER :: LEVEL_TEMPERATURE_UNITS = 'Kelvin (K)'
CHARACTER(*), PARAMETER :: LEVEL_ABSORBER_UNITS = 'Variable (see Absorber_Units_ID)'
CHARACTER(*), PARAMETER :: LEVEL_ALTITUDE_UNITS = 'metres (m)'
CHARACTER(*), PARAMETER :: LAYER_PRESSURE_UNITS = 'hectoPascals (hPa)'
CHARACTER(*), PARAMETER :: LAYER_TEMPERATURE_UNITS = 'Kelvin (K)'
CHARACTER(*), PARAMETER :: LAYER_ABSORBER_UNITS = 'Variable (see Absorber_Units_ID)'
CHARACTER(*), PARAMETER :: LAYER_DELTA_Z_UNITS = 'metres (m)'
! Variable fill value attribute
CHARACTER(*), PARAMETER :: FILLVALUE_ATTNAME = '_FillValue'
CHARACTER(*) , PARAMETER :: DESCRIPTION_FILLVALUE = NF90_FILL_CHAR
INTEGER(Long), PARAMETER :: CLIMATOLOGY_MODEL_FILLVALUE = 0
REAL(Double) , PARAMETER :: DATETIME_FILLVALUE = ZERO
REAL(Double) , PARAMETER :: LATITUDE_FILLVALUE = ATMPROFILE_FP_INVALID
REAL(Double) , PARAMETER :: LONGITUDE_FILLVALUE = ATMPROFILE_FP_INVALID
REAL(Double) , PARAMETER :: SURFACE_ALTITUDE_FILLVALUE = ATMPROFILE_FP_INVALID
INTEGER(Long), PARAMETER :: ABSORBER_ID_FILLVALUE = 0
INTEGER(Long), PARAMETER :: ABSORBER_UNITS_ID_FILLVALUE = 0
REAL(Double) , PARAMETER :: LEVEL_PRESSURE_FILLVALUE = ATMPROFILE_FP_INVALID
REAL(Double) , PARAMETER :: LEVEL_TEMPERATURE_FILLVALUE = ATMPROFILE_FP_INVALID
REAL(Double) , PARAMETER :: LEVEL_ABSORBER_FILLVALUE = ATMPROFILE_FP_INVALID
REAL(Double) , PARAMETER :: LEVEL_ALTITUDE_FILLVALUE = ATMPROFILE_FP_INVALID
REAL(Double) , PARAMETER :: LAYER_PRESSURE_FILLVALUE = ATMPROFILE_FP_INVALID
REAL(Double) , PARAMETER :: LAYER_TEMPERATURE_FILLVALUE = ATMPROFILE_FP_INVALID
REAL(Double) , PARAMETER :: LAYER_ABSORBER_FILLVALUE = ATMPROFILE_FP_INVALID
REAL(Double) , PARAMETER :: LAYER_DELTA_Z_FILLVALUE = ATMPROFILE_FP_INVALID
! Variable datatypes
INTEGER, PARAMETER :: DESCRIPTION_TYPE = NF90_CHAR
INTEGER, PARAMETER :: CLIMATOLOGY_MODEL_TYPE = NF90_INT
INTEGER, PARAMETER :: DATETIME_TYPE = NF90_DOUBLE
INTEGER, PARAMETER :: LATITUDE_TYPE = NF90_DOUBLE
INTEGER, PARAMETER :: LONGITUDE_TYPE = NF90_DOUBLE
INTEGER, PARAMETER :: SURFACE_ALTITUDE_TYPE = NF90_DOUBLE
INTEGER, PARAMETER :: ABSORBER_ID_TYPE = NF90_INT
INTEGER, PARAMETER :: ABSORBER_UNITS_ID_TYPE = NF90_INT
INTEGER, PARAMETER :: LEVEL_PRESSURE_TYPE = NF90_DOUBLE
INTEGER, PARAMETER :: LEVEL_TEMPERATURE_TYPE = NF90_DOUBLE
INTEGER, PARAMETER :: LEVEL_ABSORBER_TYPE = NF90_DOUBLE
INTEGER, PARAMETER :: LEVEL_ALTITUDE_TYPE = NF90_DOUBLE
INTEGER, PARAMETER :: LAYER_PRESSURE_TYPE = NF90_DOUBLE
INTEGER, PARAMETER :: LAYER_TEMPERATURE_TYPE = NF90_DOUBLE
INTEGER, PARAMETER :: LAYER_ABSORBER_TYPE = NF90_DOUBLE
INTEGER, PARAMETER :: LAYER_DELTA_Z_TYPE = NF90_DOUBLE
CONTAINS
!##################################################################################
!##################################################################################
!## ##
!## ## PUBLIC MODULE ROUTINES ## ##
!## ##
!##################################################################################
!##################################################################################
!------------------------------------------------------------------------------
!:sdoc+:
!
! NAME:
! Inquire_AtmProfile_netCDF
!
! PURPOSE:
! Function to inquire a netCDF AtmProfile format file to obtain the
! dimensions and global attributes.
!
! CALLING SEQUENCE:
! Error_Status = Inquire_AtmProfile_netCDF( NC_Filename , & ! Input
! n_Layers =n_Layers , & ! Optional output
! n_Absorbers=n_Absorbers, & ! Optional output
! n_Profiles =n_Profiles , & ! Optional output
! Release =Release , & ! Optional output
! Version =Version , & ! Optional output
! ID_Tag =ID_Tag , & ! Optional output
! Title =Title , & ! Optional output
! History =History , & ! Optional output
! Comment =Comment , & ! Optional output
! RCS_Id =RCS_Id , & ! Revision control
! Message_Log=Message_Log ) ! Error messaging
!
! INPUT ARGUMENTS:
! NC_Filename: Character string specifying the name of the netCDF
! format AtmProfile data file to inquire.
! UNITS: N/A
! TYPE: CHARACTER(*)
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN)
!
! OPTIONAL INPUT ARGUMENTS:
! Message_Log: Character string specifying a filename in which any
! Messages will be logged. If not specified, or if an
! error occurs opening the log file, the default action
! is to output Messages to standard output.
! UNITS: N/A
! TYPE: CHARACTER(*)
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN), OPTIONAL
!
! OPTIONAL OUTPUT ARGUMENTS:
! n_Layers: The number of atmospheric layers dimension of the
! atmospheric profile data.
! UNITS: N/A
! TYPE: INTEGER
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(OUT), OPTIONAL
!
! n_Absorbers: The number of molecular absorbers dimension of the
! atmospheric profile data.
! UNITS: N/A
! TYPE: INTEGER
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(OUT), OPTIONAL
!
! n_Profiles: The number of profiles contained in the netCDF
! dataset.
! UNITS: N/A
! TYPE: INTEGER
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(OUT), OPTIONAL
!
! Release: The release number of the netCDF FitStats file.
! UNITS: N/A
! TYPE: INTEGER
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(OUT), OPTIONAL
!
! Version: The version number of the netCDF FitStats file.
! UNITS: N/A
! TYPE: INTEGER
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(OUT), OPTIONAL
!
! ID_Tag: Character string written into the ID_TAG global
! attribute field of the netCDF AtmProfile file.
! UNITS: N/A
! TYPE: CHARACTER(*)
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(OUT), OPTIONAL
!
! Title: Character string written into the TITLE global
! attribute field of the netCDF AtmProfile file.
! UNITS: N/A
! TYPE: CHARACTER(*)
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(OUT), OPTIONAL
!
! History: Character string written into the HISTORY global
! attribute field of the netCDF AtmProfile file.
! UNITS: N/A
! TYPE: CHARACTER(*)
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(OUT), OPTIONAL
!
! Comment: Character string written into the COMMENT global
! attribute field of the netCDF AtmProfile file.
! UNITS: N/A
! TYPE: CHARACTER(*)
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(OUT), OPTIONAL
!
! RCS_Id: Character string containing the Revision Control
! System Id field for the module.
! UNITS: N/A
! TYPE: CHARACTER(*)
! DIMENSION: Scalar
! ATTRIBUTES: OPTIONAL, INTENT(OUT)
!
! FUNCTION RESULT:
! Error_Status: The return value is an integer defining the error status.
! The error codes are defined in the ERROR_HANDLER module.
! If == SUCCESS the netCDF file inquiry was successful.
! == FAILURE - an error occurred opening the netCDF file, or
! - an error occurred reading any of the requested
! dimension or variable data.
! == WARNING - an error occurred reading any of the requested
! global file attributes, or
! - an error occurred closing the netCDF file.
! UNITS: N/A
! TYPE: INTEGER
! DIMENSION: Scalar
!
!:sdoc-:
!------------------------------------------------------------------------------
FUNCTION Inquire_AtmProfile_netCDF( NC_Filename, & ! Input
n_Layers , & ! Optional output
n_Absorbers, & ! Optional output
n_Profiles , & ! Optional output
Release , & ! Optional output
Version , & ! Optional output
ID_Tag , & ! Optional output
Title , & ! Optional output
History , & ! Optional output
Comment , & ! Optional output
RCS_Id , & ! Revision control
Message_Log) & ! Error messaging
RESULT( Error_Status )
! Arguments
CHARACTER(*), INTENT(IN) :: NC_Filename
INTEGER , OPTIONAL, INTENT(OUT) :: n_Layers
INTEGER , OPTIONAL, INTENT(OUT) :: n_Absorbers
INTEGER , OPTIONAL, INTENT(OUT) :: n_Profiles
INTEGER , OPTIONAL, INTENT(OUT) :: Release
INTEGER , OPTIONAL, INTENT(OUT) :: Version
CHARACTER(*), OPTIONAL, INTENT(OUT) :: ID_Tag
CHARACTER(*), OPTIONAL, INTENT(OUT) :: Title
CHARACTER(*), OPTIONAL, INTENT(OUT) :: History
CHARACTER(*), OPTIONAL, INTENT(OUT) :: Comment
CHARACTER(*), OPTIONAL, INTENT(OUT) :: RCS_Id
CHARACTER(*), OPTIONAL, INTENT(IN) :: Message_Log
! Function result
INTEGER :: Error_Status
! Function parameters
CHARACTER(*), PARAMETER :: ROUTINE_NAME = 'Inquire_AtmProfile_netCDF'
! Function variables
CHARACTER(ML) :: msg
INTEGER :: NC_FileID
INTEGER :: NF90_STATUS
INTEGER :: DimId, n
! Set up
! ------
Error_Status = SUCCESS
IF ( PRESENT(RCS_Id) ) RCS_Id = MODULE_RCS_ID
! Open the file
! -------------
NF90_Status = NF90_OPEN( NC_Filename,NF90_NOWRITE,NC_FileId )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error opening '//TRIM(NC_Filename)//' for read access - '// &
TRIM(NF90_STRERROR( NF90_Status ))
CALL Inquire_Cleanup(); RETURN
END IF
! Get the layer dimension
! -----------------------
! Get the dimension id
NF90_Status = NF90_INQ_DIMID( NC_FileId,LAYER_DIMNAME,DimId )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error inquiring dimension ID for '//LAYER_DIMNAME//' - '// &
TRIM(NF90_STRERROR( NF90_Status ))
CALL Inquire_Cleanup(Close_File=.TRUE.); RETURN
END IF
! Get the dimension value
NF90_Status = NF90_INQUIRE_DIMENSION( NC_FileId,DimId,Len=n )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error reading dimension value for '//LAYER_DIMNAME//' - '// &
TRIM(NF90_STRERROR( NF90_Status ))
CALL Inquire_Cleanup(Close_File=.TRUE.); RETURN
END IF
! Keep it if necessary
IF ( PRESENT(n_Layers) ) n_Layers = n
! Get the absorber dimension
! --------------------------
! Get the dimension id
NF90_Status = NF90_INQ_DIMID( NC_FileId,ABSORBER_DIMNAME,DimId )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error inquiring dimension ID for '//ABSORBER_DIMNAME//' - '// &
TRIM(NF90_STRERROR( NF90_Status ))
CALL Inquire_Cleanup(Close_File=.TRUE.); RETURN
END IF
! Get the dimension value
NF90_Status = NF90_INQUIRE_DIMENSION( NC_FileId,DimId,Len=n )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error reading dimension value for '//ABSORBER_DIMNAME//' - '// &
TRIM(NF90_STRERROR( NF90_Status ))
CALL Inquire_Cleanup(Close_File=.TRUE.); RETURN
END IF
! Keep it if necessary
IF ( PRESENT(n_Absorbers) ) n_Absorbers = n
! Get the profile dimension
! -------------------------
! Get the dimension id
NF90_Status = NF90_INQ_DIMID( NC_FileId,PROFILE_DIMNAME,DimId )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error inquiring dimension ID for '//PROFILE_DIMNAME//' - '// &
TRIM(NF90_STRERROR( NF90_Status ))
CALL Inquire_Cleanup(Close_File=.TRUE.); RETURN
END IF
! Get the dimension value
NF90_Status = NF90_INQUIRE_DIMENSION( NC_FileId,DimId,Len=n )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error reading dimension value for '//PROFILE_DIMNAME//' - '// &
TRIM(NF90_STRERROR( NF90_Status ))
CALL Inquire_Cleanup(Close_File=.TRUE.); RETURN
END IF
! Keep it if necessary
IF ( PRESENT(n_Profiles) ) n_Profiles = n
! Get the global attributes
! -------------------------
Error_Status = ReadGAtts( NC_Filename , &
NC_FileID , &
Release =Release , &
Version =Version , &
ID_Tag =ID_Tag , &
Title =Title , &
History =History , &
Comment =Comment , &
Message_Log=Message_Log )
IF ( Error_Status /= SUCCESS ) THEN
msg = 'Error reading global attributes from '//TRIM(NC_Filename)
CALL Inquire_Cleanup(); RETURN
END IF
! Close the file
! --------------
NF90_Status = NF90_CLOSE( NC_FileId )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error closing input file - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL Inquire_Cleanup(); RETURN
END IF
CONTAINS
SUBROUTINE Inquire_CleanUp( Close_File )
LOGICAL, OPTIONAL, INTENT(IN) :: Close_File
! Close file if necessary
IF ( PRESENT(Close_File) ) THEN
IF ( Close_File ) THEN
NF90_Status = NF90_CLOSE( NC_FileId )
IF ( NF90_Status /= NF90_NOERR ) &
msg = TRIM(msg)//'; Error closing input file during error cleanup - '//&
TRIM(NF90_STRERROR( NF90_Status ))
END IF
END IF
! Set error status and print error msg
Error_Status = FAILURE
CALL Display_Message( ROUTINE_NAME,TRIM(msg),Error_Status,Message_Log=Message_Log )
END SUBROUTINE Inquire_CleanUp
END FUNCTION Inquire_AtmProfile_netCDF
!------------------------------------------------------------------------------
!:sdoc+:
!
! NAME:
! Write_AtmProfile_netCDF
!
! PURPOSE:
! Function to write AtmProfile data to a netCDF format AtmProfile file.
!
! CALLING SEQUENCE:
! Error_Status = Write_AtmProfile_netCDF( NC_Filename , & ! Input
! AtmProfile , & ! Input
! Quiet =Quiet , & ! Optional input
! ID_Tag =ID_Tag , & ! Optional input
! Title =Title , & ! Optional input
! History =History , & ! Optional input
! Comment =Comment , & ! Optional input
! RCS_Id =RCS_Id , & ! Revision control
! Message_Log=Message_Log ) ! Error messaging
!
! INPUT ARGUMENTS:
! NC_Filename: Character string specifying the name of the netCDF
! format AtmProfile data file to write data into.
! UNITS: N/A
! TYPE: CHARACTER(*)
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN)
!
! AtmProfile: Structure containing the AtmProfile data
! to write to file.
! UNITS: N/A
! TYPE: TYPE(AtmProfile_type)
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN)
!
! OPTIONAL INPUT ARGUMENTS:
! Quiet: Set this keyword to suppress information msgs being
! printed to standard output (or the msg log file if
! the Message_Log optional argument is used.) By default,
! information msgs are printed.
! If QUIET = 0, information msgs are OUTPUT.
! QUIET = 1, information msgs are SUPPRESSED.
! UNITS: N/A
! TYPE: Integer
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN), OPTIONAL
!
! ID_Tag: Character string written into the ID_TAG global
! attribute field of the netCDF AtmProfile file.
! Identifies the dependent profile set.
! UNITS: N/A
! TYPE: CHARACTER(*)
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN), OPTIONAL
!
! Title: Character string written into the TITLE global
! attribute field of the netCDF AtmProfile file.
! UNITS: N/A
! TYPE: CHARACTER(*)
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN), OPTIONAL
!
! History: Character string written into the HISTORY global
! attribute field of the netCDF AtmProfile file.
! UNITS: N/A
! TYPE: CHARACTER(*)
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN), OPTIONAL
!
! Comment: Character string written into the COMMENT global
! attribute field of the netCDF AtmProfile file.
! UNITS: N/A
! TYPE: CHARACTER(*)
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN), OPTIONAL
!
! Message_Log: Character string specifying a filename in which any
! msgs will be logged. If not specified, or if an
! error occurs opening the log file, the default action
! is to output msgs to standard output.
! UNITS: N/A
! TYPE: CHARACTER(*)
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN), OPTIONAL
!
! OPTIONAL OUTPUT ARGUMENTS:
! RCS_Id: Character string containing the Revision Control
! System Id field for the module.
! UNITS: N/A
! TYPE: CHARACTER(*)
! DIMENSION: Scalar
! ATTRIBUTES: OPTIONAL, INTENT(OUT)
!
! FUNCTION RESULT:
! Error_Status: The return value is an integer defining the error status.
! The error codes are defined in the Message_Handler module.
! If == SUCCESS the netCDF data write was successful
! == FAILURE an unrecoverable error occurred.
! UNITS: N/A
! TYPE: INTEGER
! DIMENSION: Scalar
!
!:sdoc-:
!------------------------------------------------------------------------------
FUNCTION Write_AtmProfile_netCDF( NC_Filename , & ! Input
AtmProfile , & ! Input
Quiet , & ! Optional input
ID_Tag , & ! Optional input
Title , & ! Optional input
History , & ! Optional input
Comment , & ! Optional input
RCS_Id , & ! Revision control
Message_Log ) & ! Error messaging
RESULT( Error_Status )
! Arguments
CHARACTER(*), INTENT(IN) :: NC_Filename
TYPE(AtmProfile_type) , INTENT(IN) :: AtmProfile
INTEGER , OPTIONAL, INTENT(IN) :: Quiet
CHARACTER(*), OPTIONAL, INTENT(IN) :: ID_Tag
CHARACTER(*), OPTIONAL, INTENT(IN) :: Title
CHARACTER(*), OPTIONAL, INTENT(IN) :: History
CHARACTER(*), OPTIONAL, INTENT(IN) :: Comment
CHARACTER(*), OPTIONAL, INTENT(OUT) :: RCS_Id
CHARACTER(*), OPTIONAL, INTENT(IN) :: Message_Log
! Function result
INTEGER :: Error_Status
! Local parameters
CHARACTER(*), PARAMETER :: ROUTINE_NAME = 'Write_AtmProfile_netCDF'
! Local variables
CHARACTER(ML) :: msg
LOGICAL :: Noisy
INTEGER :: NC_FileID
INTEGER :: NF90_Status
! Set up
! ------
Error_Status = SUCCESS
IF ( PRESENT(RCS_Id) ) RCS_Id = MODULE_RCS_ID
! Output informational msgs....
Noisy = .TRUE.
! ....unless the QUIET keyword is set.
IF ( PRESENT(Quiet) ) THEN
IF ( Quiet == SET ) Noisy = .FALSE.
END IF
! Check structure association
IF ( .NOT. Associated_AtmProfile( AtmProfile ) ) THEN
msg = 'Some or all INPUT AtmProfile pointer members are NOT associated.'
CALL Write_Cleanup(); RETURN
END IF
! Create the output data file
! ---------------------------
Error_Status = CreateFile( NC_Filename , & ! Input
AtmProfile%n_Layers , & ! Input
AtmProfile%n_Absorbers , & ! Input
AtmProfile%n_Profiles , & ! Input
NC_FileID , & ! Output
Version =AtmProfile%Version, & ! Optional input
ID_Tag =ID_Tag , & ! Optional input
Title =Title , & ! Optional input
History =History , & ! Optional input
Comment =Comment , & ! Optional input
Message_Log=Message_Log ) ! Error messaging
IF ( Error_Status /= SUCCESS ) THEN
msg = 'Error creating output file '//TRIM(NC_Filename)
CALL Write_Cleanup(); RETURN
END IF
! Write the AtmProfile data
! -------------------------
Error_Status = WriteVar( NC_Filename , &
NC_FileID , &
AtmProfile , &
Message_Log=Message_Log )
IF ( Error_Status /= SUCCESS ) THEN
msg = 'Error writing AtmProfile variables to output file '//TRIM(NC_Filename)
CALL Write_Cleanup(Close_File=.TRUE.); RETURN
END IF
! Close the file
! --------------
NF90_Status = NF90_CLOSE( NC_FileId )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error closing input file - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL Write_Cleanup(); RETURN
END IF
! Output an info msg
! ----------------------
IF ( Noisy ) THEN
CALL Info_AtmProfile( AtmProfile, msg )
CALL Display_Message( ROUTINE_NAME, &
'FILE: '//TRIM(NC_Filename)//'; '//TRIM(msg), &
INFORMATION, &
Message_Log=Message_Log )
END IF
CONTAINS
SUBROUTINE Write_CleanUp( Close_File )
LOGICAL, OPTIONAL, INTENT(IN) :: Close_File
! Close file if necessary
IF ( PRESENT(Close_File) ) THEN
IF ( Close_File ) THEN
NF90_Status = NF90_CLOSE( NC_FileId )
IF ( NF90_Status /= NF90_NOERR ) &
msg = TRIM(msg)//'; Error closing input file during error cleanup - '//&
TRIM(NF90_STRERROR( NF90_Status ))
END IF
END IF
! Set error status and print error msg
Error_Status = FAILURE
CALL Display_Message( ROUTINE_NAME,TRIM(msg),Error_Status,Message_Log=Message_Log )
END SUBROUTINE Write_CleanUp
END FUNCTION Write_AtmProfile_netCDF
!------------------------------------------------------------------------------
!:sdoc+:
!
! NAME:
! Read_AtmProfile_netCDF
!
! PURPOSE:
! Function to read data from a netCDF format AtmProfile file.
!
! CALLING SEQUENCE:
! Error_Status = Read_AtmProfile_netCDF( NC_Filename , & ! Input
! AtmProfile , & ! Output
! Quiet =Quiet , & ! Optional input
! Reverse =Reverse , & ! Optional input
! ID_Tag =ID_Tag , & ! Optional output
! Title =Title , & ! Optional output
! History =History , & ! Optional output
! Comment =Comment , & ! Optional output
! RCS_Id =RCS_Id , & ! Revision control
! Message_Log=Message_Log ) ! Error messaging
!
! INPUT ARGUMENTS:
! NC_Filename: Character string specifying the name of the
! netCDF format AtmProfile data file to read.
! UNITS: N/A
! TYPE: CHARACTER(*)
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN)
!
! OUTPUT ARGUMENTS:
! AtmProfile: Structure to contain the AtmProfile data
! read from file.
! UNITS: N/A
! TYPE: TYPE(AtmProfile_type)
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(OUT)
!
! OPTIONAL INPUT ARGUMENTS:
! Quiet: Set this keyword to suppress information messages being
! printed to standard output (or the msg log file if
! the Message_Log optional argument is used.) By default,
! information messages are printed.
! If QUIET = 0, information messages are OUTPUT.
! QUIET = 1, information messages are SUPPRESSED.
! UNITS: N/A
! TYPE: INTEGER
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN), OPTIONAL
!
! Reverse: Set this keyword to reverse the order of the profile data
! arrays in the K index (vertical) dimension.
! If REVERSE = 0, arrays are returned as they are stored in
! the netCDF input file (DEFAULT)
! REVERSE = 1, arrays are returned in reverse order to how
! they are stored in the netCDF input file.
! UNITS: N/A
! TYPE: INTEGER
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN), OPTIONAL
!
! Message_Log: Character string specifying a filename in which any
! msgs will be logged. If not specified, or if an
! error occurs opening the log file, the default action
! is to output msgs to standard output.
! UNITS: N/A
! TYPE: CHARACTER(*)
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN), OPTIONAL
!
! OPTIONAL OUTPUT ARGUMENTS:
! ID_Tag: Character string written into the ID_TAG global
! attribute field of the netCDF AtmProfile file.
! Identifies the dependent profile set.
! UNITS: N/A
! TYPE: CHARACTER(*)
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(OUT), OPTIONAL
!
! Title: Character string written into the TITLE global
! attribute field of the netCDF AtmProfile file.
! UNITS: N/A
! TYPE: CHARACTER(*)
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(OUT), OPTIONAL
!
! History: Character string written into the HISTORY global
! attribute field of the netCDF AtmProfile file.
! UNITS: N/A
! TYPE: CHARACTER(*)
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(OUT), OPTIONAL
!
! Comment: Character string written into the COMMENT global
! attribute field of the netCDF AtmProfile file.
! UNITS: N/A
! TYPE: CHARACTER(*)
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(OUT), OPTIONAL
!
! RCS_Id: Character string containing the Revision Control
! System Id field for the module.
! UNITS: N/A
! TYPE: CHARACTER(*)
! DIMENSION: Scalar
! ATTRIBUTES: OPTIONAL, INTENT(OUT)
!
! FUNCTION RESULT:
! Error_Status: The return value is an integer defining the error status.
! The error codes are defined in the Message_Handler module.
! If == SUCCESS the netCDF data read was successful.
! == FAILURE an unrecoverable error occurred.
! == WARNING an error occurred closing the netCDF
! input file after a successful read.
! UNITS: N/A
! TYPE: INTEGER
! DIMENSION: Scalar
!
! COMMENTS:
! If specified as the output data type, the INTENT on the output AtmProfile
! structure argument is IN OUT rather than just OUT. This is necessary
! because the argument may be defined on input. To prevent memory leaks,
! the IN OUT INTENT is a must.
!
!:sdoc-:
!------------------------------------------------------------------------------
FUNCTION Read_AtmProfile_netCDF( NC_Filename, & ! Input
AtmProfile , & ! Output
Quiet , & ! Optional input
Reverse , & ! Optional input
ID_Tag , & ! Optional output
Title , & ! Optional output
History , & ! Optional output
Comment , & ! Optional output
RCS_Id , & ! Revision control
Message_Log) & ! Error messaging
RESULT( Error_Status )
! Arguments
CHARACTER(*), INTENT(IN) :: NC_Filename
TYPE(AtmProfile_type) , INTENT(IN OUT) :: AtmProfile
INTEGER, OPTIONAL, INTENT(IN) :: Quiet
INTEGER, OPTIONAL, INTENT(IN) :: Reverse
CHARACTER(*), OPTIONAL, INTENT(OUT) :: ID_Tag
CHARACTER(*), OPTIONAL, INTENT(OUT) :: Title
CHARACTER(*), OPTIONAL, INTENT(OUT) :: History
CHARACTER(*), OPTIONAL, INTENT(OUT) :: Comment
CHARACTER(*), OPTIONAL, INTENT(OUT) :: RCS_Id
CHARACTER(*), OPTIONAL, INTENT(IN) :: Message_Log
! Function result
INTEGER :: Error_Status
! Function parameters
CHARACTER(*), PARAMETER :: ROUTINE_NAME = 'Read_AtmProfile_netCDF'
! Function variables
CHARACTER(ML) :: msg
LOGICAL :: Noisy, ReverseProfile
INTEGER :: NC_FileID
INTEGER :: NF90_Status
INTEGER :: n_Layers , k
INTEGER :: n_Absorbers, j
INTEGER :: n_Profiles
! Set up
! ------
Error_Status = SUCCESS
IF ( PRESENT(RCS_Id) ) RCS_Id = MODULE_RCS_ID
! Output informational msgs....
Noisy = .TRUE.
! ....unless the QUIET keyword is set.
IF ( PRESENT(Quiet) ) THEN
IF ( Quiet == SET ) Noisy = .FALSE.
END IF
! Do NOT reverse profile....
ReverseProfile = .FALSE.
! ....unless the REVERSE keyword is set.
IF ( PRESENT(Reverse) ) THEN
IF ( Reverse == SET ) ReverseProfile = .TRUE.
END IF
! Allocate the structure for the netCDF read
! ------------------------------------------
! Read the dimension values
Error_Status = Inquire_AtmProfile_netCDF( NC_Filename , &
n_Layers =n_Layers , &
n_Absorbers=n_Absorbers, &
n_Profiles =n_Profiles , &
Message_Log=Message_Log )
IF ( Error_Status /= SUCCESS ) THEN
msg = 'Error obtaining AtmProfile dimensions from '//TRIM(NC_Filename)
CALL Read_Cleanup(); RETURN
END IF
! Allocate the structure
Error_Status = Allocate_AtmProfile( n_Layers,n_Absorbers,n_Profiles, &
AtmProfile,Message_Log=Message_Log )
IF ( Error_Status /= SUCCESS ) THEN
msg = 'Error occurred allocating AtmProfile structure.'
CALL Read_Cleanup(); RETURN
END IF
! Open the netCDF file for reading
! --------------------------------
NF90_Status = NF90_OPEN( NC_Filename,NF90_NOWRITE,NC_FileId )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error opening '//TRIM(NC_Filename)//' for read access - '//&
TRIM(NF90_STRERROR( NF90_Status ))
CALL Read_Cleanup(Destroy_Structure=.TRUE.); RETURN
END IF
! Read the global attributes
! --------------------------
Error_Status = ReadGAtts( NC_Filename , &
NC_FileID , &
! Release =AtmProfile%Release, &
! Version =AtmProfile%Version, &
ID_Tag =ID_Tag , &
Title =Title , &
History =History , &
Comment =Comment , &
Message_Log=Message_Log )
IF ( Error_Status /= SUCCESS ) THEN
msg = 'Error reading global attribute from '//TRIM(NC_Filename)
CALL Read_Cleanup(Close_File=.TRUE.,Destroy_Structure=.TRUE.); RETURN
END IF
! Check the release
Error_Status = CheckRelease_AtmProfile( AtmProfile,Message_Log=Message_Log )
IF ( Error_Status /= SUCCESS ) THEN
msg = 'AtmProfile Release check failed for '//TRIM(NC_Filename)
CALL Read_Cleanup(Close_File=.TRUE.,Destroy_Structure=.TRUE.); RETURN
END IF
! Read the AtmProfile data
! ------------------------
Error_Status = ReadVar( NC_Filename , &
NC_FileID , &
AtmProfile , &
Message_Log=Message_Log )
IF ( Error_Status /= SUCCESS ) THEN
msg = 'Error reading AtmProfile variables from '//TRIM(NC_Filename)
CALL Read_Cleanup(Close_File=.TRUE.,Destroy_Structure=.TRUE.); RETURN
END IF
! Close the file
! --------------
NF90_Status = NF90_CLOSE( NC_FileId )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error closing input file - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL Read_Cleanup(Destroy_Structure=.TRUE.); RETURN
END IF
! Finish up with the data structure
! ---------------------------------
! Fill the other Absorber_Units structure members
DO j = 1, AtmProfile%n_Absorbers
AtmProfile%Absorber_Units_Name(j) = ATMPROFILE_ABSORBER_UNITS_NAME(AtmProfile%Absorber_Units_ID(j))
AtmProfile%Absorber_Units_LBLRTM(j) = ATMPROFILE_ABSORBER_UNITS_CHAR(AtmProfile%Absorber_Units_ID(j))
END DO
! Reverse the profile data direction if required
IF ( ReverseProfile ) THEN
! Level data
k = AtmProfile%n_Levels
AtmProfile%Level_Pressure(1:k,:) = AtmProfile%Level_Pressure(k:1:-1,:)
AtmProfile%Level_Temperature(1:k,:) = AtmProfile%Level_Temperature(k:1:-1,:)
AtmProfile%Level_Absorber(1:k,:,:) = AtmProfile%Level_Absorber(k:1:-1,:,:)
AtmProfile%Level_Altitude(1:k,:) = AtmProfile%Level_Altitude(k:1:-1,:)
! Layer data
k = AtmProfile%n_Layers
AtmProfile%Layer_Pressure(1:k,:) = AtmProfile%Layer_Pressure(k:1:-1,:)
AtmProfile%Layer_Temperature(1:k,:) = AtmProfile%Layer_Temperature(k:1:-1,:)
AtmProfile%Layer_Absorber(1:k,:,:) = AtmProfile%Layer_Absorber(k:1:-1,:,:)
AtmProfile%Layer_Delta_Z(1:k,:) = AtmProfile%Layer_Delta_Z(k:1:-1,:)
END IF
! Output an info message
! ----------------------
IF ( Noisy ) THEN
CALL Info_AtmProfile( AtmProfile, msg )
CALL Display_Message( ROUTINE_NAME, &
'FILE: '//TRIM(NC_Filename)//'; '//TRIM(msg), &
INFORMATION, &
Message_Log=Message_Log )
END IF
CONTAINS
SUBROUTINE Read_CleanUp( Close_File, Destroy_Structure )
LOGICAL, OPTIONAL, INTENT(IN) :: Close_File
LOGICAL, OPTIONAL, INTENT(IN) :: Destroy_Structure
! Close file if necessary
IF ( PRESENT(Close_File) ) THEN
IF ( Close_File ) THEN
NF90_Status = NF90_CLOSE( NC_FileId )
IF ( NF90_Status /= NF90_NOERR ) &
msg = TRIM(msg)//'; Error closing input file during error cleanup - '//&
TRIM(NF90_STRERROR( NF90_Status ))
END IF
END IF
! Destroy the structure if necessary
IF ( PRESENT(Destroy_Structure) ) THEN
IF ( Destroy_Structure ) THEN
Error_Status = Destroy_AtmProfile(AtmProfile, Message_Log=Message_Log)
IF ( Error_Status /= SUCCESS ) &
msg = TRIM(msg)//'; Error destroying AtmProfile during error cleanup.'
END IF
END IF
! Set error status and print error msg
Error_Status = FAILURE
CALL Display_Message( ROUTINE_NAME,TRIM(msg),Error_Status,Message_Log=Message_Log )
END SUBROUTINE Read_CleanUp
END FUNCTION Read_AtmProfile_netCDF
!##################################################################################
!##################################################################################
!## ##
!## ## PRIVATE MODULE ROUTINES ## ##
!## ##
!##################################################################################
!##################################################################################
!------------------------------------------------------------------------------
!
! NAME:
! Convert_DateTime_to_Double
!
! PURPOSE:
! Subroutine to convert the data in the AtmProfileDateTime structure
! to a double precision value of YYYYMMDD.HH
!
! CALLING SEQUENCE:
! CALL Convert_DateTime_to_Double( AtmProfileDateTime, & ! Input
! DoubleDateTime ) ! Output
!
! INPUT ARGUMENTS:
! AtmProfileDateTime: Structure containing data and time information.
! UNITS: N/A
! TYPE: AtmPRofileDateTime_type
! DIMENSION: Rank-1
! ATTRIBUTES: INTENT(IN)
!
! OUTPUT ARGUMENTS:
! DoubleDateTime: Double precision floating point array holding
! the converted time in the format YYYYMMDD.HH
! where YYYY = year
! MM = month
! DD = day of month
! HH = hour of day (0-23)
! UNITS: N/A
! TYPE: REAL(Double)
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(OUT)
!
!------------------------------------------------------------------------------
SUBROUTINE Convert_DateTime_to_Double( aDT, dDT )
TYPE(AtmProfileDateTime_type), INTENT(IN) :: aDT(:)
REAL(Double), INTENT(OUT) :: dDT(:)
INTEGER :: n
DO n = 1, SIZE(aDT)
dDT(n) = REAL((aDT(n)%Year*10000) + (aDT(n)%Month*100) + aDT(n)%Day, Double ) + &
REAL(aDT(n)%Hour,Double) / 100.0_Double
END DO
END SUBROUTINE Convert_DateTime_to_Double
!------------------------------------------------------------------------------
!
! NAME:
! Convert_DateTime_to_Type
!
! PURPOSE:
! Sub routine to convert a double precision date/time to an
! AtmProfileDateTime data type
!
! CALLING SEQUENCE:
! CALL Convert_DateTime_to_Type( DoubleDateTime , & ! Input
! AtmProfileDateTime ) ! Output
!
! INPUT ARGUMENTS:
! DoubleDateTime: Double precision floating point array holding
! the data and time time in the format YYYYMMDD.HH
! where YYYY = year
! MM = month
! DD = day of month
! HH = hour of day (0-23)
! UNITS: N/A
! TYPE: REAL(Double)
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN)
! OUTPUT ARGUMENTS:
! AtmProfileDateTime: Structure containing data and time information.
! UNITS: N/A
! TYPE: AtmPRofileDateTime_type
! DIMENSION: Rank-1
! ATTRIBUTES: INTENT(OUT)
!
!
!------------------------------------------------------------------------------
SUBROUTINE Convert_DateTime_to_Type( dDT, aDT )
REAL(Double), INTENT(IN) :: dDT(:)
TYPE(AtmProfileDateTime_type), INTENT(OUT) :: aDT(:)
INTEGER(Long) :: x
INTEGER :: n
DO n = 1, SIZE( dDT )
! The year
x = INT(dDT(n),Long)
aDT(n)%Year = ( x - MOD(x,10000_Long) ) / 10000_Long
! The month
x = MOD(x,10000_Long)
aDT(n)%Month = ( x - MOD(x,100_Long) ) / 100_Long
! The day of the month
aDT(n)%Day = MOD(x,100_Long)
! The hour of the day
aDT(n)%Hour = NINT(MOD(dDT(n),ONE) * 100.0_Double )
END DO
END SUBROUTINE Convert_DateTime_to_Type
!--------------------------------------------------------------------------------
!
! NAME:
! WriteGAtts
!
! PURPOSE:
! Function to write the global attributes to a netCDF AtmProfile
! data file.
!
! CALLING SEQUENCE:
! Error_Status = WriteGAtts( NC_Filename , & ! Input
! NC_FileID , & ! Input
! Version =Version , & ! Optional input
! Title =Title , & ! Optional input
! History =History , & ! Optional input
! Comment =Comment , & ! Optional input
! ID_Tag =ID_Tag , & ! Optional input
! Message_Log=Message_Log ) ! Error messaging
!
! INPUT ARGUMENTS:
! NC_Filename: Character string specifying the name of the
! netCDF AtmProfile format data file to create.
! UNITS: N/A
! TYPE: CHARACTER(*)
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN)
!
! NC_FileID: NetCDF file ID number returned from the
! Open_ or Create_AtmProfile_netCDF() function.
! UNITS: N/A
! TYPE: Integer
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN)
!
!
! OPTIONAL INPUT ARGUMENTS:
! Version: The version number of the netCDF AtmProfile file.
! UNITS: N/A
! TYPE: INTEGER
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN), OPTIONAL
! Title: Character string written into the TITLE global
! attribute field of the netCDF AtmProfile file.
! Should contain a succinct description of what
! is in the netCDF datafile.
! UNITS: N/A
! TYPE: CHARACTER(*)
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN), OPTIONAL
!
! History: Character string written into the HISTORY global
! attribute field of the netCDF AtmProfile file.
! UNITS: N/A
! TYPE: CHARACTER(*)
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN), OPTIONAL
!
! Comment: Character string written into the COMMENT global
! attribute field of the netCDF AtmProfile file.
! UNITS: N/A
! TYPE: CHARACTER(*)
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN), OPTIONAL
!
! ID_Tag: Character string written into the ID_TAG global
! attribute field of the netCDF AtmProfile file.
! Should contain a short tag used to identify the
! profile set.
! UNITS: N/A
! TYPE: CHARACTER(*)
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN), OPTIONAL
!
! Message_Log: Character string specifying a filename in which
! any Messages will be logged. If not specified,
! or if an error occurs opening the log file, the
! default action is to output Messages to standard
! output.
! UNITS: N/A
! TYPE: CHARACTER(*)
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN), OPTIONAL
!
! FUNCTION RESULT:
! Error_Status: The return value is an integer defining the error status.
! The error codes are defined in the ERROR_HANDLER module.
! If == SUCCESS the global attribute write was successful.
! == WARNING an error occurred writing the supplied
! global attributes.
! UNITS: N/A
! TYPE: INTEGER
! DIMENSION: Scalar
!
!--------------------------------------------------------------------------------
FUNCTION WriteGAtts( NC_Filename, & ! Input
NC_FileID , & ! Input
Version , & ! Optional input
Title , & ! Optional input
History , & ! Optional input
Comment , & ! Optional input
ID_Tag , & ! Optional input
Message_Log) & ! Error messaging
RESULT( Error_Status )
! Arguments
CHARACTER(*), INTENT(IN) :: NC_Filename
INTEGER , INTENT(IN) :: NC_FileID
INTEGER , OPTIONAL, INTENT(IN) :: Version
CHARACTER(*), OPTIONAL, INTENT(IN) :: Title
CHARACTER(*), OPTIONAL, INTENT(IN) :: History
CHARACTER(*), OPTIONAL, INTENT(IN) :: Comment
CHARACTER(*), OPTIONAL, INTENT(IN) :: ID_Tag
CHARACTER(*), OPTIONAL, INTENT(IN) :: Message_Log
! Function result
INTEGER :: Error_Status
! Local parameters
CHARACTER(*), PARAMETER :: ROUTINE_NAME = 'Write_AtmProfile_GAtts'
CHARACTER(*), PARAMETER :: WRITE_MODULE_HISTORY_GATTNAME = 'write_module_history'
CHARACTER(*), PARAMETER :: CREATION_DATE_AND_TIME_GATTNAME = 'creation_date_and_time'
! Local variables
CHARACTER(ML) :: msg
CHARACTER(ML) :: GAttName
CHARACTER(8) :: cdate
CHARACTER(10) :: ctime
CHARACTER(5) :: czone
INTEGER :: Ver
INTEGER :: NF90_Status
TYPE(AtmProfile_type) :: AtmProfile_Default
! Set up
! ------
Error_Status = SUCCESS
msg = ' '
! Mandatory global attributes
! ---------------------------
! Software ID
GAttName = WRITE_MODULE_HISTORY_GATTNAME
NF90_Status = NF90_PUT_ATT( NC_FileID, &
NF90_GLOBAL, &
TRIM(GAttName), &
MODULE_RCS_ID )
IF ( NF90_Status /= NF90_NOERR ) THEN
CALL WriteGAtts_Cleanup(); RETURN
END IF
! Creation date
CALL DATE_AND_TIME( cdate, ctime, czone )
GAttName = CREATION_DATE_AND_TIME_GATTNAME
NF90_Status = NF90_PUT_ATT( NC_FileID, &
NF90_GLOBAL, &
TRIM(GAttName), &
cdate(1:4)//'/'//cdate(5:6)//'/'//cdate(7:8)//', '// &
ctime(1:2)//':'//ctime(3:4)//':'//ctime(5:6)//' '// &
czone//'UTC' )
IF ( NF90_Status /= NF90_NOERR ) THEN
CALL WriteGAtts_Cleanup(); RETURN
END IF
! The Release
GAttName = RELEASE_GATTNAME
NF90_Status = NF90_PUT_ATT( NC_FileId, &
NF90_GLOBAL, &
TRIM(GAttName), &
AtmProfile_Default%Release )
IF ( NF90_Status /= NF90_NOERR ) THEN
CALL WriteGAtts_Cleanup(); RETURN
END IF
! Optional global attributes
! --------------------------
! The Version
IF ( PRESENT(Version) ) THEN
Ver = Version
ELSE
Ver = AtmProfile_Default%Version
END IF
GAttName = VERSION_GATTNAME
NF90_Status = NF90_PUT_ATT( NC_FileId, &
NF90_GLOBAL, &
TRIM(GAttName), &
Ver )
IF ( NF90_Status /= NF90_NOERR ) THEN
CALL WriteGAtts_Cleanup(); RETURN
END IF
! The ID_Tag
IF ( PRESENT(ID_Tag) ) THEN
GAttName = ID_TAG_GATTNAME
NF90_Status = NF90_PUT_ATT( NC_FileID, &
NF90_GLOBAL, &
TRIM(GAttName), &
ID_Tag )
IF ( NF90_Status /= NF90_NOERR ) THEN
CALL WriteGAtts_Cleanup(); RETURN
END IF
END IF
! The Title
IF ( PRESENT(Title) ) THEN
GAttName = TITLE_GATTNAME
NF90_Status = NF90_PUT_ATT( NC_FileID, &
NF90_GLOBAL, &
TRIM(GAttName), &
Title )
IF ( NF90_Status /= NF90_NOERR ) THEN
CALL WriteGAtts_Cleanup(); RETURN
END IF
END IF
! The History
IF ( PRESENT(History) ) THEN
GAttName = HISTORY_GATTNAME
NF90_Status = NF90_PUT_ATT( NC_FileID, &
NF90_GLOBAL, &
TRIM(GAttName), &
History )
IF ( NF90_Status /= NF90_NOERR ) THEN
CALL WriteGAtts_Cleanup(); RETURN
END IF
END IF
! The Comment
IF ( PRESENT(Comment) ) THEN
GAttName = COMMENT_GATTNAME
NF90_Status = NF90_PUT_ATT( NC_FileID, &
NF90_GLOBAL, &
TRIM(GAttName), &
Comment )
IF ( NF90_Status /= NF90_NOERR ) THEN
CALL WriteGAtts_Cleanup(); RETURN
END IF
END IF
CONTAINS
SUBROUTINE WriteGAtts_CleanUp()
! Close file
NF90_Status = NF90_CLOSE( NC_FileID )
IF ( NF90_Status /= NF90_NOERR ) &
msg = '; Error closing input file during error cleanup - '//&
TRIM(NF90_STRERROR( NF90_Status ) )
! Set error status and print error msg
Error_Status = FAILURE
CALL Display_Message( ROUTINE_NAME, &
'Error writing '//TRIM(GAttName)//' attribute to '//&
TRIM(NC_Filename)//' - '// &
TRIM(NF90_STRERROR( NF90_Status ) )//TRIM(msg), &
Error_Status, &
Message_Log=Message_Log )
END SUBROUTINE WriteGAtts_CleanUp
END FUNCTION WriteGAtts
!------------------------------------------------------------------------------
!
! NAME:
! ReadGAtts
!
! PURPOSE:
! Function to read the global attributes from a netCDF AtmProfile
! data file.
!
! CALLING SEQUENCE:
! Error_Status = ReadGAtts( NC_Filename , & ! Input
! NC_FileID , & ! Input
! Release =Release , & ! Optional output
! Version =Version , & ! Optional output
! ID_Tag =ID_Tag , & ! Optional output
! Title =Title , & ! Optional output
! History =History , & ! Optional output
! Comment =Comment , & ! Optional output
! Message_Log=Message_Log ) ! Error messaging
!
! INPUT ARGUMENTS:
! NC_Filename: Character string specifying the name of the
! netCDF AtmProfile format data file to read from.
! UNITS: N/A
! TYPE: CHARACTER(*)
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN)
!
! NC_FileID: NetCDF file ID number.
! function.
! UNITS: N/A
! TYPE: Integer
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN)
!
! OPTIONAL INPUT ARGUMENTS:
! Message_Log: Character string specifying a filename in which
! any msgs will be logged. If not specified,
! or if an error occurs opening the log file, the
! default action is to output msgs to standard
! output.
! UNITS: N/A
! TYPE: CHARACTER(*)
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN), OPTIONAL
!
! OPTIONAL OUTPUT ARGUMENTS:
! Release: The release number of the netCDF AtmProfile file.
! UNITS: N/A
! TYPE: INTEGER
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN), OPTIONAL
!
! Version: The version number of the netCDF AtmProfile file.
! UNITS: N/A
! TYPE: INTEGER
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(OUT), OPTIONAL
!
! ID_Tag: Character string written into the ID_TAG global
! attribute field of the netCDF AtmProfile file.
! Should contain a short tag used to identify the
! dependent profile set.
! UNITS: N/A
! TYPE: CHARACTER(*)
! DIMENSION: Scalar
! ATTRIBUTES: OPTIONAL, INTENT(OUT)
!
! Title: Character string written into the TITLE global
! attribute field of the netCDF AtmProfile file.
! Should contain a succinct description of what
! is in the netCDF datafile.
! UNITS: N/A
! TYPE: CHARACTER(*)
! DIMENSION: Scalar
! ATTRIBUTES: OPTIONAL, INTENT(OUT)
!
! History: Character string written into the HISTORY global
! attribute field of the netCDF AtmProfile file.
! UNITS: N/A
! TYPE: CHARACTER(*)
! DIMENSION: Scalar
! ATTRIBUTES: OPTIONAL, INTENT(OUT)
!
! Comment: Character string written into the COMMENT global
! attribute field of the netCDF AtmProfile file.
! UNITS: N/A
! TYPE: CHARACTER(*)
! DIMENSION: Scalar
! ATTRIBUTES: OPTIONAL, INTENT(OUT)
!
! FUNCTION RESULT:
! Error_Status: The return value is an integer defining the error status.
! The error codes are defined in the Message_Handler module.
! If == SUCCESS the global attribute read was successful.
! == FAILURE an error occurred.
! UNITS: N/A
! TYPE: INTEGER
! DIMENSION: Scalar
!
!------------------------------------------------------------------------------
FUNCTION ReadGAtts( NC_Filename , & ! Input
NC_FileID , & ! Input
Release , & ! Optional output
Version , & ! Optional output
ID_Tag , & ! Optional output
Title , & ! Optional output
History , & ! Optional output
Comment , & ! Optional output
Message_Log ) & ! Error messaging
RESULT( Error_Status )
! Arguments
CHARACTER(*), INTENT(IN) :: NC_Filename
INTEGER, INTENT(IN) :: NC_FileID
INTEGER , OPTIONAL, INTENT(OUT) :: Release
INTEGER , OPTIONAL, INTENT(OUT) :: Version
CHARACTER(*), OPTIONAL, INTENT(OUT) :: ID_Tag
CHARACTER(*), OPTIONAL, INTENT(OUT) :: Title
CHARACTER(*), OPTIONAL, INTENT(OUT) :: History
CHARACTER(*), OPTIONAL, INTENT(OUT) :: Comment
CHARACTER(*), OPTIONAL, INTENT(IN) :: Message_Log
! Function result
INTEGER :: Error_Status
! Local parameters
CHARACTER(*), PARAMETER :: ROUTINE_NAME = 'ReadGAtts'
! Local variables
CHARACTER(256) :: GAttName
CHARACTER(5000) :: GAttString
INTEGER :: Rel
INTEGER :: NF90_Status
TYPE(AtmProfile_type) :: AtmProfile_Default
! Set up
! ------
Error_Status = SUCCESS
! The mandatory GAtts for checking
! --------------------------------
! The Release
IF ( PRESENT(Release) ) THEN
GAttName = RELEASE_GATTNAME
NF90_Status = NF90_GET_ATT( NC_FileId, &
NF90_GLOBAL, &
TRIM(GAttName), &
Rel )
IF ( NF90_Status /= NF90_NOERR .OR. Rel /= AtmProfile_Default%Release) THEN
CALL ReadGAtts_Cleanup(); RETURN
END IF
Release = AtmProfile_Default%Release
END IF
! The optional GAtts
! ------------------
! The Version
IF ( PRESENT(Version) ) THEN
GAttName = VERSION_GATTNAME
NF90_Status = NF90_GET_ATT( NC_FileID, &
NF90_GLOBAL, &
TRIM(GAttName), &
Version )
IF ( NF90_Status /= NF90_NOERR ) THEN
CALL ReadGAtts_Cleanup(); RETURN
END IF
END IF
! The ID_Tag
IF ( PRESENT(ID_Tag) ) THEN
GAttString = ' '; ID_Tag = ' '
GAttName = ID_TAG_GATTNAME
NF90_Status = NF90_GET_ATT( NC_FileID, &
NF90_GLOBAL, &
TRIM(GAttName), &
GAttString )
IF ( NF90_Status /= NF90_NOERR ) THEN
CALL ReadGAtts_Cleanup(); RETURN
END IF
CALL StrClean( GAttString )
ID_Tag = GAttString(1:MIN( LEN(ID_Tag), LEN_TRIM(GAttString) ))
END IF
! The Title
IF ( PRESENT(Title) ) THEN
GAttString = ' '; Title = ' '
GAttName = TITLE_GATTNAME
NF90_Status = NF90_GET_ATT( NC_FileID, &
NF90_GLOBAL, &
TRIM(GAttName), &
GAttString )
IF ( NF90_Status /= NF90_NOERR ) THEN
CALL ReadGAtts_Cleanup(); RETURN
END IF
CALL StrClean( GAttString )
Title = GAttString(1:MIN( LEN(Title), LEN_TRIM(GAttString) ))
END IF
! The History
IF ( PRESENT(History) ) THEN
GAttString = ' '; History = ' '
GAttName = HISTORY_GATTNAME
NF90_Status = NF90_GET_ATT( NC_FileID, &
NF90_GLOBAL, &
TRIM(GAttName), &
GAttString )
IF ( NF90_Status /= NF90_NOERR ) THEN
CALL ReadGAtts_Cleanup(); RETURN
END IF
CALL StrClean( GAttString )
History = GAttString(1:MIN( LEN(History), LEN_TRIM(GAttString) ))
END IF
! The Comment
IF ( PRESENT(Comment) ) THEN
GAttString = ' '; Comment = ' '
GAttName = COMMENT_GATTNAME
NF90_Status = NF90_GET_ATT( NC_FileID, &
NF90_GLOBAL, &
TRIM(GAttName), &
GAttString )
IF ( NF90_Status /= NF90_NOERR ) THEN
CALL ReadGAtts_Cleanup(); RETURN
END IF
CALL StrClean( GAttString )
Comment = GAttString(1:MIN( LEN(Comment), LEN_TRIM(GAttString) ))
END IF
CONTAINS
SUBROUTINE ReadGAtts_CleanUp()
Error_Status = FAILURE
CALL Display_Message( ROUTINE_NAME, &
'Error reading '//TRIM(GAttName)//&
' attribute from '//TRIM(NC_Filename)//' - '// &
TRIM(NF90_STRERROR( NF90_Status ) ), &
Error_Status, &
Message_Log=Message_Log )
END SUBROUTINE ReadGAtts_CleanUp
END FUNCTION ReadGAtts
!------------------------------------------------------------------------------
!
! NAME:
! DefineVar
!
! PURPOSE:
! Function to define the AtmProfile variables in an output
! netCDF file.
!
! CALLING SEQUENCE:
! Error_Status = DefineVar( NC_Filename , & ! Input
! NC_FileID , & ! Input
! Level_DimID , & ! Input
! Layer_DimID , & ! Input
! Absorber_DimID , & ! Input
! Profile_DimID , & ! Input
! PL_DimID , & ! Input
! Message_Log=Message_Log ) ! Error messaging
!
! INPUT ARGUMENTS
! NC_Filename: Character string specifying the name of the
! already created netCDF AtmProfile format file.
! UNITS: N/A
! TYPE: CHARACTER(*)
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN)
!
! NC_FileID: NetCDF file ID number of the file in which
! the variables are to be defned.
! UNITS: N/A
! TYPE: INTEGER
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN)
!
! Level_DimID: NetCDF dimension ID of the number of levels
! (n_Levels).
! UNITS: N/A
! TYPE: INTEGER
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN)
!
! Layer_DimID: NetCDF dimension ID of the number of layers
! (n_Layers).
! UNITS: N/A
! TYPE: INTEGER
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN)
!
! Absorber_DimID: NetCDF dimension ID of the number of absorbers
! (n_Absorbers).
! UNITS: N/A
! TYPE: INTEGER
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN)
!
! Profile_DimID: NetCDF dimension ID of the number of profiles
! (n_Profiles).
! UNITS: N/A
! TYPE: INTEGER
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN)
!
! PL_DimID: NetCDF dimension ID for the string length of
! the profile description.
! UNITS: N/A
! TYPE: INTEGER
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN)
!
! OPTIONAL INPUT ARGUMENTS
! Message_Log: Character string specifying a filename in which any
! messages will be logged. If not specified, or if an
! error occurs opening the log file, the default action
! is to output messages to standard output.
! UNITS: N/A
! TYPE: CHARACTER(*)
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN), OPTIONAL
!
!------------------------------------------------------------------------------
FUNCTION DefineVar( NC_Filename , & ! Input
NC_FileID , & ! Input
Level_DimID , & ! Input
Layer_DimID , & ! Input
Absorber_DimID, & ! Input
Profile_DimID , & ! Input
PL_DimID , & ! Input
Message_Log ) & ! Error messaging
RESULT( Error_Status )
! Arguments
CHARACTER(*), INTENT(IN) :: NC_Filename
INTEGER , INTENT(IN) :: NC_FileID
INTEGER , INTENT(IN) :: Level_DimID
INTEGER , INTENT(IN) :: Layer_DimID
INTEGER , INTENT(IN) :: Absorber_DimID
INTEGER , INTENT(IN) :: Profile_DimID
INTEGER , INTENT(IN) :: PL_DimID
CHARACTER(*), OPTIONAL, INTENT(IN) :: Message_Log
! Function result
INTEGER :: Error_Status
! Local parameters
CHARACTER(*), PARAMETER :: ROUTINE_NAME = 'DefineVar'
! Local variables
CHARACTER(ML) :: msg
INTEGER :: NF90_Status(4)
INTEGER :: varID
! Set up
! ------
Error_Status = SUCCESS
! Begin all the variable definitions
! ----------------------------------
NF90_Status(1) = NF90_DEF_VAR( NC_FileID,DESCRIPTION_VARNAME,DESCRIPTION_TYPE, &
dimIDs=(/PL_DimID,Profile_DimID/),varID=VarID )
IF ( NF90_Status(1) /= NF90_NOERR ) THEN
msg = 'Error defining '//DESCRIPTION_VARNAME//' variable in '//&
TRIM(NC_Filename)//' - '//TRIM(NF90_STRERROR( NF90_Status(1) ))
CALL DefineVar_Cleanup(); RETURN
END IF
NF90_Status(1) = NF90_PUT_ATT( NC_FileID,VarID,LONGNAME_ATTNAME,DESCRIPTION_LONGNAME )
NF90_Status(2) = NF90_PUT_ATT( NC_FileID,VarID,DESCRIPTION_ATTNAME,DESCRIPTION_DESCRIPTION )
NF90_Status(3) = NF90_PUT_ATT( NC_FileID,VarID,UNITS_ATTNAME,DESCRIPTION_UNITS )
NF90_Status(4) = NF90_PUT_ATT( NC_FileID,VarID,FILLVALUE_ATTNAME,DESCRIPTION_FILLVALUE )
IF ( ANY(NF90_Status /= SUCCESS) ) THEN
msg = 'Error writing '//DESCRIPTION_VARNAME//' variable attributes to '//TRIM(NC_Filename)
CALL DefineVar_Cleanup(); RETURN
END IF
NF90_Status(1) = NF90_DEF_VAR( NC_FileID,CLIMATOLOGY_MODEL_VARNAME,CLIMATOLOGY_MODEL_TYPE, &
dimIDs=(/Profile_DimID/),varID=VarID )
IF ( NF90_Status(1) /= NF90_NOERR ) THEN
msg = 'Error defining '//CLIMATOLOGY_MODEL_VARNAME//' variable in '//&
TRIM(NC_Filename)//' - '//TRIM(NF90_STRERROR( NF90_Status(1) ))
CALL DefineVar_Cleanup(); RETURN
END IF
NF90_Status(1) = NF90_PUT_ATT( NC_FileID,VarID,LONGNAME_ATTNAME,CLIMATOLOGY_MODEL_LONGNAME )
NF90_Status(2) = NF90_PUT_ATT( NC_FileID,VarID,DESCRIPTION_ATTNAME,CLIMATOLOGY_MODEL_DESCRIPTION )
NF90_Status(3) = NF90_PUT_ATT( NC_FileID,VarID,UNITS_ATTNAME,CLIMATOLOGY_MODEL_UNITS )
NF90_Status(4) = NF90_PUT_ATT( NC_FileID,VarID,FILLVALUE_ATTNAME,CLIMATOLOGY_MODEL_FILLVALUE )
IF ( ANY(NF90_Status /= SUCCESS) ) THEN
msg = 'Error writing '//CLIMATOLOGY_MODEL_VARNAME//' variable attributes to '//TRIM(NC_Filename)
CALL DefineVar_Cleanup(); RETURN
END IF
NF90_Status(1) = NF90_DEF_VAR( NC_FileID,DATETIME_VARNAME,DATETIME_TYPE, &
dimIDs=(/Profile_DimID/),varID=VarID )
IF ( NF90_Status(1) /= NF90_NOERR ) THEN
msg = 'Error defining '//DATETIME_VARNAME//' variable in '//&
TRIM(NC_Filename)//' - '//TRIM(NF90_STRERROR( NF90_Status(1) ))
CALL DefineVar_Cleanup(); RETURN
END IF
NF90_Status(1) = NF90_PUT_ATT( NC_FileID,VarID,LONGNAME_ATTNAME,DATETIME_LONGNAME )
NF90_Status(2) = NF90_PUT_ATT( NC_FileID,VarID,DESCRIPTION_ATTNAME,DATETIME_DESCRIPTION )
NF90_Status(3) = NF90_PUT_ATT( NC_FileID,VarID,UNITS_ATTNAME,DATETIME_UNITS )
NF90_Status(4) = NF90_PUT_ATT( NC_FileID,VarID,FILLVALUE_ATTNAME,DATETIME_FILLVALUE )
IF ( ANY(NF90_Status /= SUCCESS) ) THEN
msg = 'Error writing '//DATETIME_VARNAME//' variable attributes to '//TRIM(NC_Filename)
CALL DefineVar_Cleanup(); RETURN
END IF
NF90_Status(1) = NF90_DEF_VAR( NC_FileID,LATITUDE_VARNAME,LATITUDE_TYPE, &
dimIDs=(/Profile_DimID/),varID=VarID )
IF ( NF90_Status(1) /= NF90_NOERR ) THEN
msg = 'Error defining '//LATITUDE_VARNAME//' variable in '//&
TRIM(NC_Filename)//' - '//TRIM(NF90_STRERROR( NF90_Status(1) ))
CALL DefineVar_Cleanup(); RETURN
END IF
NF90_Status(1) = NF90_PUT_ATT( NC_FileID,VarID,LONGNAME_ATTNAME,LATITUDE_LONGNAME )
NF90_Status(2) = NF90_PUT_ATT( NC_FileID,VarID,DESCRIPTION_ATTNAME,LATITUDE_DESCRIPTION )
NF90_Status(3) = NF90_PUT_ATT( NC_FileID,VarID,UNITS_ATTNAME,LATITUDE_UNITS )
NF90_Status(4) = NF90_PUT_ATT( NC_FileID,VarID,FILLVALUE_ATTNAME,LATITUDE_FILLVALUE )
IF ( ANY(NF90_Status /= SUCCESS) ) THEN
msg = 'Error writing '//LATITUDE_VARNAME//' variable attributes to '//TRIM(NC_Filename)
CALL DefineVar_Cleanup(); RETURN
END IF
NF90_Status(1) = NF90_DEF_VAR( NC_FileID,LONGITUDE_VARNAME,LONGITUDE_TYPE, &
dimIDs=(/Profile_DimID/),varID=VarID )
IF ( NF90_Status(1) /= NF90_NOERR ) THEN
msg = 'Error defining '//LONGITUDE_VARNAME//' variable in '//&
TRIM(NC_Filename)//' - '//TRIM(NF90_STRERROR( NF90_Status(1) ))
CALL DefineVar_Cleanup(); RETURN
END IF
NF90_Status(1) = NF90_PUT_ATT( NC_FileID,VarID,LONGNAME_ATTNAME,LONGITUDE_LONGNAME )
NF90_Status(2) = NF90_PUT_ATT( NC_FileID,VarID,DESCRIPTION_ATTNAME,LONGITUDE_DESCRIPTION )
NF90_Status(3) = NF90_PUT_ATT( NC_FileID,VarID,UNITS_ATTNAME,LONGITUDE_UNITS )
NF90_Status(4) = NF90_PUT_ATT( NC_FileID,VarID,FILLVALUE_ATTNAME,LONGITUDE_FILLVALUE )
IF ( ANY(NF90_Status /= SUCCESS) ) THEN
msg = 'Error writing '//LONGITUDE_VARNAME//' variable attributes to '//TRIM(NC_Filename)
CALL DefineVar_Cleanup(); RETURN
END IF
NF90_Status(1) = NF90_DEF_VAR( NC_FileID,SURFACE_ALTITUDE_VARNAME,SURFACE_ALTITUDE_TYPE, &
dimIDs=(/Profile_DimID/),varID=VarID )
IF ( NF90_Status(1) /= NF90_NOERR ) THEN
msg = 'Error defining '//SURFACE_ALTITUDE_VARNAME//' variable in '//&
TRIM(NC_Filename)//' - '//TRIM(NF90_STRERROR( NF90_Status(1) ))
CALL DefineVar_Cleanup(); RETURN
END IF
NF90_Status(1) = NF90_PUT_ATT( NC_FileID,VarID,LONGNAME_ATTNAME,SURFACE_ALTITUDE_LONGNAME )
NF90_Status(2) = NF90_PUT_ATT( NC_FileID,VarID,DESCRIPTION_ATTNAME,SURFACE_ALTITUDE_DESCRIPTION )
NF90_Status(3) = NF90_PUT_ATT( NC_FileID,VarID,UNITS_ATTNAME,SURFACE_ALTITUDE_UNITS )
NF90_Status(4) = NF90_PUT_ATT( NC_FileID,VarID,FILLVALUE_ATTNAME,SURFACE_ALTITUDE_FILLVALUE )
IF ( ANY(NF90_Status /= SUCCESS) ) THEN
msg = 'Error writing '//SURFACE_ALTITUDE_VARNAME//' variable attributes to '//TRIM(NC_Filename)
CALL DefineVar_Cleanup(); RETURN
END IF
NF90_Status(1) = NF90_DEF_VAR( NC_FileID,ABSORBER_ID_VARNAME,ABSORBER_ID_TYPE, &
dimIDs=(/Absorber_DimID/),varID=VarID )
IF ( NF90_Status(1) /= NF90_NOERR ) THEN
msg = 'Error defining '//ABSORBER_ID_VARNAME//' variable in '//&
TRIM(NC_Filename)//' - '//TRIM(NF90_STRERROR( NF90_Status(1) ))
CALL DefineVar_Cleanup(); RETURN
END IF
NF90_Status(1) = NF90_PUT_ATT( NC_FileID,VarID,LONGNAME_ATTNAME,ABSORBER_ID_LONGNAME )
NF90_Status(2) = NF90_PUT_ATT( NC_FileID,VarID,DESCRIPTION_ATTNAME,ABSORBER_ID_DESCRIPTION )
NF90_Status(3) = NF90_PUT_ATT( NC_FileID,VarID,UNITS_ATTNAME,ABSORBER_ID_UNITS )
NF90_Status(4) = NF90_PUT_ATT( NC_FileID,VarID,FILLVALUE_ATTNAME,ABSORBER_ID_FILLVALUE )
IF ( ANY(NF90_Status /= SUCCESS) ) THEN
msg = 'Error writing '//ABSORBER_ID_VARNAME//' variable attributes to '//TRIM(NC_Filename)
CALL DefineVar_Cleanup(); RETURN
END IF
NF90_Status(1) = NF90_DEF_VAR( NC_FileID,ABSORBER_UNITS_ID_VARNAME,ABSORBER_UNITS_ID_TYPE, &
dimIDs=(/Absorber_DimID/),varID=VarID )
IF ( NF90_Status(1) /= NF90_NOERR ) THEN
msg = 'Error defining '//ABSORBER_UNITS_ID_VARNAME//' variable in '//&
TRIM(NC_Filename)//' - '//TRIM(NF90_STRERROR( NF90_Status(1) ))
CALL DefineVar_Cleanup(); RETURN
END IF
NF90_Status(1) = NF90_PUT_ATT( NC_FileID,VarID,LONGNAME_ATTNAME,ABSORBER_UNITS_ID_LONGNAME )
NF90_Status(2) = NF90_PUT_ATT( NC_FileID,VarID,DESCRIPTION_ATTNAME,ABSORBER_UNITS_ID_DESCRIPTION )
NF90_Status(3) = NF90_PUT_ATT( NC_FileID,VarID,UNITS_ATTNAME,ABSORBER_UNITS_ID_UNITS )
NF90_Status(4) = NF90_PUT_ATT( NC_FileID,VarID,FILLVALUE_ATTNAME,ABSORBER_UNITS_ID_FILLVALUE )
IF ( ANY(NF90_Status /= SUCCESS) ) THEN
msg = 'Error writing '//ABSORBER_UNITS_ID_VARNAME//' variable attributes to '//TRIM(NC_Filename)
CALL DefineVar_Cleanup(); RETURN
END IF
NF90_Status(1) = NF90_DEF_VAR( NC_FileID,LEVEL_PRESSURE_VARNAME,LEVEL_PRESSURE_TYPE, &
dimIDs=(/Level_DimID,Profile_DimID/),varID=VarID )
IF ( NF90_Status(1) /= NF90_NOERR ) THEN
msg = 'Error defining '//LEVEL_PRESSURE_VARNAME//' variable in '//&
TRIM(NC_Filename)//' - '//TRIM(NF90_STRERROR( NF90_Status(1) ))
CALL DefineVar_Cleanup(); RETURN
END IF
NF90_Status(1) = NF90_PUT_ATT( NC_FileID,VarID,LONGNAME_ATTNAME,LEVEL_PRESSURE_LONGNAME )
NF90_Status(2) = NF90_PUT_ATT( NC_FileID,VarID,DESCRIPTION_ATTNAME,LEVEL_PRESSURE_DESCRIPTION )
NF90_Status(3) = NF90_PUT_ATT( NC_FileID,VarID,UNITS_ATTNAME,LEVEL_PRESSURE_UNITS )
NF90_Status(4) = NF90_PUT_ATT( NC_FileID,VarID,FILLVALUE_ATTNAME,LEVEL_PRESSURE_FILLVALUE )
IF ( ANY(NF90_Status /= SUCCESS) ) THEN
msg = 'Error writing '//LEVEL_PRESSURE_VARNAME//' variable attributes to '//TRIM(NC_Filename)
CALL DefineVar_Cleanup(); RETURN
END IF
NF90_Status(1) = NF90_DEF_VAR( NC_FileID,LEVEL_TEMPERATURE_VARNAME,LEVEL_TEMPERATURE_TYPE, &
dimIDs=(/Level_DimID,Profile_DimID/),varID=VarID )
IF ( NF90_Status(1) /= NF90_NOERR ) THEN
msg = 'Error defining '//LEVEL_TEMPERATURE_VARNAME//' variable in '//&
TRIM(NC_Filename)//' - '//TRIM(NF90_STRERROR( NF90_Status(1) ))
CALL DefineVar_Cleanup(); RETURN
END IF
NF90_Status(1) = NF90_PUT_ATT( NC_FileID,VarID,LONGNAME_ATTNAME,LEVEL_TEMPERATURE_LONGNAME )
NF90_Status(2) = NF90_PUT_ATT( NC_FileID,VarID,DESCRIPTION_ATTNAME,LEVEL_TEMPERATURE_DESCRIPTION )
NF90_Status(3) = NF90_PUT_ATT( NC_FileID,VarID,UNITS_ATTNAME,LEVEL_TEMPERATURE_UNITS )
NF90_Status(4) = NF90_PUT_ATT( NC_FileID,VarID,FILLVALUE_ATTNAME,LEVEL_TEMPERATURE_FILLVALUE )
IF ( ANY(NF90_Status /= SUCCESS) ) THEN
msg = 'Error writing '//LEVEL_TEMPERATURE_VARNAME//' variable attributes to '//TRIM(NC_Filename)
CALL DefineVar_Cleanup(); RETURN
END IF
NF90_Status(1) = NF90_DEF_VAR( NC_FileID,LEVEL_ABSORBER_VARNAME,LEVEL_ABSORBER_TYPE, &
dimIDs=(/Level_DimID,Absorber_DimID,Profile_DimID/),varID=VarID )
IF ( NF90_Status(1) /= NF90_NOERR ) THEN
msg = 'Error defining '//LEVEL_ABSORBER_VARNAME//' variable in '//&
TRIM(NC_Filename)//' - '//TRIM(NF90_STRERROR( NF90_Status(1) ))
CALL DefineVar_Cleanup(); RETURN
END IF
NF90_Status(1) = NF90_PUT_ATT( NC_FileID,VarID,LONGNAME_ATTNAME,LEVEL_ABSORBER_LONGNAME )
NF90_Status(2) = NF90_PUT_ATT( NC_FileID,VarID,DESCRIPTION_ATTNAME,LEVEL_ABSORBER_DESCRIPTION )
NF90_Status(3) = NF90_PUT_ATT( NC_FileID,VarID,UNITS_ATTNAME,LEVEL_ABSORBER_UNITS )
NF90_Status(4) = NF90_PUT_ATT( NC_FileID,VarID,FILLVALUE_ATTNAME,LEVEL_ABSORBER_FILLVALUE )
IF ( ANY(NF90_Status /= SUCCESS) ) THEN
msg = 'Error writing '//LEVEL_ABSORBER_VARNAME//' variable attributes to '//TRIM(NC_Filename)
CALL DefineVar_Cleanup(); RETURN
END IF
NF90_Status(1) = NF90_DEF_VAR( NC_FileID,LEVEL_ALTITUDE_VARNAME,LEVEL_ALTITUDE_TYPE, &
dimIDs=(/Level_DimID,Profile_DimID/),varID=VarID )
IF ( NF90_Status(1) /= NF90_NOERR ) THEN
msg = 'Error defining '//LEVEL_ALTITUDE_VARNAME//' variable in '//&
TRIM(NC_Filename)//' - '//TRIM(NF90_STRERROR( NF90_Status(1) ))
CALL DefineVar_Cleanup(); RETURN
END IF
NF90_Status(1) = NF90_PUT_ATT( NC_FileID,VarID,LONGNAME_ATTNAME,LEVEL_ALTITUDE_LONGNAME )
NF90_Status(2) = NF90_PUT_ATT( NC_FileID,VarID,DESCRIPTION_ATTNAME,LEVEL_ALTITUDE_DESCRIPTION )
NF90_Status(3) = NF90_PUT_ATT( NC_FileID,VarID,UNITS_ATTNAME,LEVEL_ALTITUDE_UNITS )
NF90_Status(4) = NF90_PUT_ATT( NC_FileID,VarID,FILLVALUE_ATTNAME,LEVEL_ALTITUDE_FILLVALUE )
IF ( ANY(NF90_Status /= SUCCESS) ) THEN
msg = 'Error writing '//LEVEL_ALTITUDE_VARNAME//' variable attributes to '//TRIM(NC_Filename)
CALL DefineVar_Cleanup(); RETURN
END IF
NF90_Status(1) = NF90_DEF_VAR( NC_FileID,LAYER_PRESSURE_VARNAME,LAYER_PRESSURE_TYPE, &
dimIDs=(/Layer_DimID,Profile_DimID/),varID=VarID )
IF ( NF90_Status(1) /= NF90_NOERR ) THEN
msg = 'Error defining '//LAYER_PRESSURE_VARNAME//' variable in '//&
TRIM(NC_Filename)//' - '//TRIM(NF90_STRERROR( NF90_Status(1) ))
CALL DefineVar_Cleanup(); RETURN
END IF
NF90_Status(1) = NF90_PUT_ATT( NC_FileID,VarID,LONGNAME_ATTNAME,LAYER_PRESSURE_LONGNAME )
NF90_Status(2) = NF90_PUT_ATT( NC_FileID,VarID,DESCRIPTION_ATTNAME,LAYER_PRESSURE_DESCRIPTION )
NF90_Status(3) = NF90_PUT_ATT( NC_FileID,VarID,UNITS_ATTNAME,LAYER_PRESSURE_UNITS )
NF90_Status(4) = NF90_PUT_ATT( NC_FileID,VarID,FILLVALUE_ATTNAME,LAYER_PRESSURE_FILLVALUE )
IF ( ANY(NF90_Status /= SUCCESS) ) THEN
msg = 'Error writing '//LAYER_PRESSURE_VARNAME//' variable attributes to '//TRIM(NC_Filename)
CALL DefineVar_Cleanup(); RETURN
END IF
NF90_Status(1) = NF90_DEF_VAR( NC_FileID,LAYER_TEMPERATURE_VARNAME,LAYER_TEMPERATURE_TYPE, &
dimIDs=(/Layer_DimID,Profile_DimID/),varID=VarID )
IF ( NF90_Status(1) /= NF90_NOERR ) THEN
msg = 'Error defining '//LAYER_TEMPERATURE_VARNAME//' variable in '//&
TRIM(NC_Filename)//' - '//TRIM(NF90_STRERROR( NF90_Status(1) ))
CALL DefineVar_Cleanup(); RETURN
END IF
NF90_Status(1) = NF90_PUT_ATT( NC_FileID,VarID,LONGNAME_ATTNAME,LAYER_TEMPERATURE_LONGNAME )
NF90_Status(2) = NF90_PUT_ATT( NC_FileID,VarID,DESCRIPTION_ATTNAME,LAYER_TEMPERATURE_DESCRIPTION )
NF90_Status(3) = NF90_PUT_ATT( NC_FileID,VarID,UNITS_ATTNAME,LAYER_TEMPERATURE_UNITS )
NF90_Status(4) = NF90_PUT_ATT( NC_FileID,VarID,FILLVALUE_ATTNAME,LAYER_TEMPERATURE_FILLVALUE )
IF ( ANY(NF90_Status /= SUCCESS) ) THEN
msg = 'Error writing '//LAYER_TEMPERATURE_VARNAME//' variable attributes to '//TRIM(NC_Filename)
CALL DefineVar_Cleanup(); RETURN
END IF
NF90_Status(1) = NF90_DEF_VAR( NC_FileID,LAYER_ABSORBER_VARNAME,LAYER_ABSORBER_TYPE, &
dimIDs=(/Layer_DimID,Absorber_DimID,Profile_DimID/),varID=VarID )
IF ( NF90_Status(1) /= NF90_NOERR ) THEN
msg = 'Error defining '//LAYER_ABSORBER_VARNAME//' variable in '//&
TRIM(NC_Filename)//' - '//TRIM(NF90_STRERROR( NF90_Status(1) ))
CALL DefineVar_Cleanup(); RETURN
END IF
NF90_Status(1) = NF90_PUT_ATT( NC_FileID,VarID,LONGNAME_ATTNAME,LAYER_ABSORBER_LONGNAME )
NF90_Status(2) = NF90_PUT_ATT( NC_FileID,VarID,DESCRIPTION_ATTNAME,LAYER_ABSORBER_DESCRIPTION )
NF90_Status(3) = NF90_PUT_ATT( NC_FileID,VarID,UNITS_ATTNAME,LAYER_ABSORBER_UNITS )
NF90_Status(4) = NF90_PUT_ATT( NC_FileID,VarID,FILLVALUE_ATTNAME,LAYER_ABSORBER_FILLVALUE )
IF ( ANY(NF90_Status /= SUCCESS) ) THEN
msg = 'Error writing '//LAYER_ABSORBER_VARNAME//' variable attributes to '//TRIM(NC_Filename)
CALL DefineVar_Cleanup(); RETURN
END IF
NF90_Status(1) = NF90_DEF_VAR( NC_FileID,LAYER_DELTA_Z_VARNAME,LAYER_DELTA_Z_TYPE, &
dimIDs=(/Layer_DimID,Profile_DimID/),varID=VarID )
IF ( NF90_Status(1) /= NF90_NOERR ) THEN
msg = 'Error defining '//LAYER_DELTA_Z_VARNAME//' variable in '//&
TRIM(NC_Filename)//' - '//TRIM(NF90_STRERROR( NF90_Status(1) ))
CALL DefineVar_Cleanup(); RETURN
END IF
NF90_Status(1) = NF90_PUT_ATT( NC_FileID,VarID,LONGNAME_ATTNAME,LAYER_DELTA_Z_LONGNAME )
NF90_Status(2) = NF90_PUT_ATT( NC_FileID,VarID,DESCRIPTION_ATTNAME,LAYER_DELTA_Z_DESCRIPTION )
NF90_Status(3) = NF90_PUT_ATT( NC_FileID,VarID,UNITS_ATTNAME,LAYER_DELTA_Z_UNITS )
NF90_Status(4) = NF90_PUT_ATT( NC_FileID,VarID,FILLVALUE_ATTNAME,LAYER_DELTA_Z_FILLVALUE )
IF ( ANY(NF90_Status /= SUCCESS) ) THEN
msg = 'Error writing '//LAYER_DELTA_Z_VARNAME//' variable attributes to '//TRIM(NC_Filename)
CALL DefineVar_Cleanup(); RETURN
END IF
CONTAINS
SUBROUTINE DefineVar_CleanUp()
! Close file
NF90_Status(1) = NF90_CLOSE( NC_FileID )
IF ( NF90_Status(1) /= NF90_NOERR ) &
msg = TRIM(msg)//'; Error closing input file during error cleanup - '//&
TRIM(NF90_STRERROR( NF90_Status(1) ) )
! Set error status and print error msg
Error_Status = FAILURE
CALL Display_Message( ROUTINE_NAME,TRIM(msg),Error_Status,Message_Log=Message_Log )
END SUBROUTINE DefineVar_CleanUp
END FUNCTION DefineVar
!------------------------------------------------------------------------------
!
! NAME:
! WriteVar
!
! PURPOSE:
! Function to write the AtmProfile variables in an output
! netCDF file in which they have been defined.
!
! CALLING SEQUENCE:
! Error_Status = WriteVar( NC_Filename , & ! Input
! NC_FileID , & ! Input
! AtmProfile , & ! Input
! RCS_Id =RCS_Id , & ! Revision control
! Message_Log=Message_Log ) ! Error messaging
!
! INPUT ARGUMENTS
! NC_Filename: Character string specifying the name of the
! already created netCDF AtmProfile format file.
! UNITS: N/A
! TYPE: CHARACTER(*)
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN)
!
! NC_FileID: NetCDF file ID number of the file in which
! the variables are to be written.
! UNITS: N/A
! TYPE: INTEGER
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN)
!
! AtmProfile: Structure containing the data to write to file.
! UNITS: N/A
! TYPE: TYPE(AtmProfile_type)
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN)
!
! OPTIONAL INPUT ARGUMENTS
! Message_Log: Character string specifying a filename in which any
! msgs will be logged. If not specified, or if an
! error occurs opening the log file, the default action
! is to output msgs to standard output.
! UNITS: N/A
! TYPE: CHARACTER(*)
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN), OPTIONAL
!
!
! OPTIONAL OUTPUT ARGUMENTS:
! RCS_Id: Character string containing the Revision Control
! System Id field for the module.
! UNITS: N/A
! TYPE: CHARACTER(*)
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(OUT), OPTIONAL
!
! SIDE EFFECTS:
! If an error occurs, the netCDF file is closed.
!
!------------------------------------------------------------------------------
FUNCTION WriteVar( NC_Filename, & ! Input
NC_FileID , & ! Input
AtmProfile , & ! Input
RCS_Id , & ! Revision control
Message_Log) & ! Error messaging
RESULT( Error_Status )
! Arguments
CHARACTER(*) , INTENT(IN) :: NC_Filename
INTEGER , INTENT(IN) :: NC_FileID
TYPE(AtmProfile_type) , INTENT(IN) :: AtmProfile
CHARACTER(*), OPTIONAL, INTENT(OUT) :: RCS_Id
CHARACTER(*), OPTIONAL, INTENT(IN) :: Message_Log
! Function result
INTEGER :: Error_Status
! Local parameters
CHARACTER(*), PARAMETER :: ROUTINE_NAME = 'WriteVar'
! Local variables
CHARACTER(ML) :: msg
INTEGER :: NF90_Status
INTEGER :: VarId
REAL(Double) :: DateTime(AtmProfile%n_Profiles)
! Set up
! ------
Error_Status = SUCCESS
IF ( PRESENT(RCS_Id) ) RCS_Id = MODULE_RCS_ID
! Write the variable data
! -----------------------
! The Absorber_ID
NF90_Status = NF90_INQ_VARID( NC_FileId,ABSORBER_ID_VARNAME,VarId )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error inquiring '//TRIM(NC_Filename)//' for '//ABSORBER_ID_VARNAME//&
' variable ID - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL WriteVar_Cleanup(); RETURN
END IF
NF90_Status = NF90_PUT_VAR( NC_FileId,VarID,AtmProfile%Absorber_ID )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error writing '//ABSORBER_ID_VARNAME//' to '//TRIM(NC_Filename)//&
' - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL WriteVar_Cleanup(); RETURN
END IF
! The Absorber_Units_ID
NF90_Status = NF90_INQ_VARID( NC_FileId,ABSORBER_UNITS_ID_VARNAME,VarId )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error inquiring '//TRIM(NC_Filename)//' for '//ABSORBER_UNITS_ID_VARNAME//&
' variable ID - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL WriteVar_Cleanup(); RETURN
END IF
NF90_Status = NF90_PUT_VAR( NC_FileId,VarID,AtmProfile%Absorber_Units_ID )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error writing '//ABSORBER_UNITS_ID_VARNAME//' to '//TRIM(NC_Filename)//&
' - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL WriteVar_Cleanup(); RETURN
END IF
! The Description
NF90_Status = NF90_INQ_VARID( NC_FileId,DESCRIPTION_VARNAME,VarId )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error inquiring '//TRIM(NC_Filename)//' for '//DESCRIPTION_VARNAME//&
' variable ID - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL WriteVar_Cleanup(); RETURN
END IF
NF90_Status = NF90_PUT_VAR( NC_FileId,VarID,AtmProfile%Description )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error writing '//DESCRIPTION_VARNAME//' to '//TRIM(NC_Filename)//&
' - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL WriteVar_Cleanup(); RETURN
END IF
! The Climatology_Model
NF90_Status = NF90_INQ_VARID( NC_FileId,CLIMATOLOGY_MODEL_VARNAME,VarId )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error inquiring '//TRIM(NC_Filename)//' for '//CLIMATOLOGY_MODEL_VARNAME//&
' variable ID - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL WriteVar_Cleanup(); RETURN
END IF
NF90_Status = NF90_PUT_VAR( NC_FileId,VarID,AtmProfile%Climatology_Model )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error writing '//CLIMATOLOGY_MODEL_VARNAME//' to '//TRIM(NC_Filename)//&
' - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL WriteVar_Cleanup(); RETURN
END IF
! The DateTime
NF90_Status = NF90_INQ_VARID( NC_FileId,DATETIME_VARNAME,VarId )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error inquiring '//TRIM(NC_Filename)//' for '//DATETIME_VARNAME//&
' variable ID - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL WriteVar_Cleanup(); RETURN
END IF
CALL Convert_DateTime_to_Double( AtmProfile%DateTime, DateTime )
NF90_Status = NF90_PUT_VAR( NC_FileId,VarID,DateTime )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error writing '//DATETIME_VARNAME//' to '//TRIM(NC_Filename)//&
' - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL WriteVar_Cleanup(); RETURN
END IF
! The Latitude
NF90_Status = NF90_INQ_VARID( NC_FileId,LATITUDE_VARNAME,VarId )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error inquiring '//TRIM(NC_Filename)//' for '//LATITUDE_VARNAME//&
' variable ID - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL WriteVar_Cleanup(); RETURN
END IF
NF90_Status = NF90_PUT_VAR( NC_FileId,VarID,AtmProfile%Location%Latitude )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error writing '//LATITUDE_VARNAME//' to '//TRIM(NC_Filename)//&
' - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL WriteVar_Cleanup(); RETURN
END IF
! The Longitude
NF90_Status = NF90_INQ_VARID( NC_FileId,LONGITUDE_VARNAME,VarId )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error inquiring '//TRIM(NC_Filename)//' for '//LONGITUDE_VARNAME//&
' variable ID - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL WriteVar_Cleanup(); RETURN
END IF
NF90_Status = NF90_PUT_VAR( NC_FileId,VarID,AtmProfile%Location%Longitude )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error writing '//LONGITUDE_VARNAME//' to '//TRIM(NC_Filename)//&
' - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL WriteVar_Cleanup(); RETURN
END IF
! The Surface_Altitude
NF90_Status = NF90_INQ_VARID( NC_FileId,SURFACE_ALTITUDE_VARNAME,VarId )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error inquiring '//TRIM(NC_Filename)//' for '//SURFACE_ALTITUDE_VARNAME//&
' variable ID - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL WriteVar_Cleanup(); RETURN
END IF
NF90_Status = NF90_PUT_VAR( NC_FileId,VarID,AtmProfile%Location%Surface_Altitude )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error writing '//SURFACE_ALTITUDE_VARNAME//' to '//TRIM(NC_Filename)//&
' - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL WriteVar_Cleanup(); RETURN
END IF
! The Level_Pressure
NF90_Status = NF90_INQ_VARID( NC_FileId,LEVEL_PRESSURE_VARNAME,VarId )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error inquiring '//TRIM(NC_Filename)//' for '//LEVEL_PRESSURE_VARNAME//&
' variable ID - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL WriteVar_Cleanup(); RETURN
END IF
NF90_Status = NF90_PUT_VAR( NC_FileId,VarID,AtmProfile%Level_Pressure )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error writing '//LEVEL_PRESSURE_VARNAME//' to '//TRIM(NC_Filename)//&
' - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL WriteVar_Cleanup(); RETURN
END IF
! The Level_Temperature
NF90_Status = NF90_INQ_VARID( NC_FileId,LEVEL_TEMPERATURE_VARNAME,VarId )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error inquiring '//TRIM(NC_Filename)//' for '//LEVEL_TEMPERATURE_VARNAME//&
' variable ID - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL WriteVar_Cleanup(); RETURN
END IF
NF90_Status = NF90_PUT_VAR( NC_FileId,VarID,AtmProfile%Level_Temperature )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error writing '//LEVEL_TEMPERATURE_VARNAME//' to '//TRIM(NC_Filename)//&
' - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL WriteVar_Cleanup(); RETURN
END IF
! The Level_Absorber
NF90_Status = NF90_INQ_VARID( NC_FileId,LEVEL_ABSORBER_VARNAME,VarId )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error inquiring '//TRIM(NC_Filename)//' for '//LEVEL_ABSORBER_VARNAME//&
' variable ID - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL WriteVar_Cleanup(); RETURN
END IF
NF90_Status = NF90_PUT_VAR( NC_FileId,VarID,AtmProfile%Level_Absorber )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error writing '//LEVEL_ABSORBER_VARNAME//' to '//TRIM(NC_Filename)//&
' - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL WriteVar_Cleanup(); RETURN
END IF
! The Level_Altitude
NF90_Status = NF90_INQ_VARID( NC_FileId,LEVEL_ALTITUDE_VARNAME,VarId )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error inquiring '//TRIM(NC_Filename)//' for '//LEVEL_ALTITUDE_VARNAME//&
' variable ID - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL WriteVar_Cleanup(); RETURN
END IF
NF90_Status = NF90_PUT_VAR( NC_FileId,VarID,AtmProfile%Level_Altitude )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error writing '//LEVEL_ALTITUDE_VARNAME//' to '//TRIM(NC_Filename)//&
' - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL WriteVar_Cleanup(); RETURN
END IF
! The Layer_Pressure
NF90_Status = NF90_INQ_VARID( NC_FileId,LAYER_PRESSURE_VARNAME,VarId )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error inquiring '//TRIM(NC_Filename)//' for '//LAYER_PRESSURE_VARNAME//&
' variable ID - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL WriteVar_Cleanup(); RETURN
END IF
NF90_Status = NF90_PUT_VAR( NC_FileId,VarID,AtmProfile%Layer_Pressure )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error writing '//LAYER_PRESSURE_VARNAME//' to '//TRIM(NC_Filename)//&
' - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL WriteVar_Cleanup(); RETURN
END IF
! The Layer_Temperature
NF90_Status = NF90_INQ_VARID( NC_FileId,LAYER_TEMPERATURE_VARNAME,VarId )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error inquiring '//TRIM(NC_Filename)//' for '//LAYER_TEMPERATURE_VARNAME//&
' variable ID - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL WriteVar_Cleanup(); RETURN
END IF
NF90_Status = NF90_PUT_VAR( NC_FileId,VarID,AtmProfile%Layer_Temperature )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error writing '//LAYER_TEMPERATURE_VARNAME//' to '//TRIM(NC_Filename)//&
' - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL WriteVar_Cleanup(); RETURN
END IF
! The Layer_Absorber
NF90_Status = NF90_INQ_VARID( NC_FileId,LAYER_ABSORBER_VARNAME,VarId )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error inquiring '//TRIM(NC_Filename)//' for '//LAYER_ABSORBER_VARNAME//&
' variable ID - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL WriteVar_Cleanup(); RETURN
END IF
NF90_Status = NF90_PUT_VAR( NC_FileId,VarID,AtmProfile%Layer_Absorber )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error writing '//LAYER_ABSORBER_VARNAME//' to '//TRIM(NC_Filename)//&
' - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL WriteVar_Cleanup(); RETURN
END IF
! The Layer_Delta_Z
NF90_Status = NF90_INQ_VARID( NC_FileId,LAYER_DELTA_Z_VARNAME,VarId )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error inquiring '//TRIM(NC_Filename)//' for '//LAYER_DELTA_Z_VARNAME//&
' variable ID - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL WriteVar_Cleanup(); RETURN
END IF
NF90_Status = NF90_PUT_VAR( NC_FileId,VarID,AtmProfile%Layer_Delta_Z )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error writing '//LAYER_DELTA_Z_VARNAME//' to '//TRIM(NC_Filename)//&
' - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL WriteVar_Cleanup(); RETURN
END IF
CONTAINS
SUBROUTINE WriteVar_CleanUp()
! Close file
NF90_Status = NF90_CLOSE( NC_FileID )
IF ( NF90_Status /= NF90_NOERR ) &
msg = TRIM(msg)//'; Error closing input file during error cleanup - '//&
TRIM(NF90_STRERROR( NF90_Status ) )
! Set error status and print error msg
Error_Status = FAILURE
CALL Display_Message( ROUTINE_NAME,TRIM(msg),Error_Status,Message_Log=Message_Log )
END SUBROUTINE WriteVar_CleanUp
END FUNCTION WriteVar
!------------------------------------------------------------------------------
!
! NAME:
! ReadVar
!
! PURPOSE:
! Function to read the AtmProfile variables from any input
! netCDF file in which they have been defined.
!
! CALLING SEQUENCE:
! Error_Status = ReadVar( NC_Filename , & ! Input
! NC_FileID , & ! Input
! AtmProfile , & ! Output
! RCS_Id =RCS_Id , & ! Revision control
! Message_Log=Message_Log ) ! Error messaging
!
! INPUT ARGUMENTS
! NC_Filename: Character string specifying the name of the
! already created netCDF AtmProfile format file.
! UNITS: N/A
! TYPE: CHARACTER(*)
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN)
!
! NC_FileID: NetCDF file ID number of the file in which
! the variables are to be written.
! UNITS: N/A
! TYPE: INTEGER
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN)
!
! OUTPUT ARGUMENTS:
! AtmProfile: Structure containing the data that was read
! from file.
! UNITS: N/A
! TYPE: TYPE(AtmProfile_type)
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN OUT)
!
! OPTIONAL INPUT ARGUMENTS
! Message_Log: Character string specifying a filename in which any
! msgs will be logged. If not specified, or if an
! error occurs opening the log file, the default action
! is to output msgs to standard output.
! UNITS: N/A
! TYPE: CHARACTER(*)
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN), OPTIONAL
!
!
! OPTIONAL OUTPUT ARGUMENTS:
! RCS_Id: Character string containing the Revision Control
! System Id field for the module.
! UNITS: N/A
! TYPE: CHARACTER(*)
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(OUT), OPTIONAL
!
! SIDE EFFECTS:
! If an error occurs, the netCDF file is closed.
!
! COMMENTS:
! The INTENT on the output AtmProfile argument is IN OUT rather
! than just OUT. This is necessary because the argument may be defined
! upon input. To prevent memory leaks, the IN OUT INTENT is a must.
!
!------------------------------------------------------------------------------
FUNCTION ReadVar( NC_Filename, & ! Input
NC_FileID , & ! Input
AtmProfile , & ! Output
RCS_Id , & ! Revision control
Message_Log) & ! Error messaging
RESULT( Error_Status )
! Arguments
CHARACTER(*) , INTENT(IN) :: NC_Filename
INTEGER , INTENT(IN) :: NC_FileID
TYPE(AtmProfile_type) , INTENT(IN OUT) :: AtmProfile
CHARACTER(*), OPTIONAL, INTENT(OUT) :: RCS_Id
CHARACTER(*), OPTIONAL, INTENT(IN) :: Message_Log
! Function result
INTEGER :: Error_Status
! Local parameters
CHARACTER(*), PARAMETER :: ROUTINE_NAME = 'ReadVar'
! Local variables
CHARACTER(ML) :: msg
INTEGER :: NF90_Status
INTEGER :: VarId
REAL(Double) :: DateTime(AtmProfile%n_Profiles)
! Set up
! ------
Error_Status = SUCCESS
IF ( PRESENT(RCS_Id) ) RCS_Id = MODULE_RCS_ID
! Read the variable data
! ----------------------
! The Absorber_ID
NF90_Status = NF90_INQ_VARID( NC_FileId,ABSORBER_ID_VARNAME,VarId )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error inquiring '//TRIM(NC_Filename)//' for '//ABSORBER_ID_VARNAME//&
' variable ID - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL ReadVar_Cleanup(); RETURN
END IF
NF90_Status = NF90_GET_VAR( NC_FileId,VarID,AtmProfile%Absorber_ID )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error reading '//ABSORBER_ID_VARNAME//' from '//TRIM(NC_Filename)//&
' - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL ReadVar_Cleanup(); RETURN
END IF
! The Absorber_Units_ID
NF90_Status = NF90_INQ_VARID( NC_FileId,ABSORBER_UNITS_ID_VARNAME,VarId )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error inquiring '//TRIM(NC_Filename)//' for '//ABSORBER_UNITS_ID_VARNAME//&
' variable ID - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL ReadVar_Cleanup(); RETURN
END IF
NF90_Status = NF90_GET_VAR( NC_FileId,VarID,AtmProfile%Absorber_Units_ID )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error reading '//ABSORBER_UNITS_ID_VARNAME//' from '//TRIM(NC_Filename)//&
' - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL ReadVar_Cleanup(); RETURN
END IF
! The Description
NF90_Status = NF90_INQ_VARID( NC_FileId,DESCRIPTION_VARNAME,VarId )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error inquiring '//TRIM(NC_Filename)//' for '//DESCRIPTION_VARNAME//&
' variable ID - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL ReadVar_Cleanup(); RETURN
END IF
NF90_Status = NF90_GET_VAR( NC_FileId,VarID,AtmProfile%Description )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error reading '//DESCRIPTION_VARNAME//' from '//TRIM(NC_Filename)//&
' - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL ReadVar_Cleanup(); RETURN
END IF
! The Climatology_Model
NF90_Status = NF90_INQ_VARID( NC_FileId,CLIMATOLOGY_MODEL_VARNAME,VarId )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error inquiring '//TRIM(NC_Filename)//' for '//CLIMATOLOGY_MODEL_VARNAME//&
' variable ID - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL ReadVar_Cleanup(); RETURN
END IF
NF90_Status = NF90_GET_VAR( NC_FileId,VarID,AtmProfile%Climatology_Model )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error reading '//CLIMATOLOGY_MODEL_VARNAME//' from '//TRIM(NC_Filename)//&
' - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL ReadVar_Cleanup(); RETURN
END IF
! The DateTime
NF90_Status = NF90_INQ_VARID( NC_FileId,DATETIME_VARNAME,VarId )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error inquiring '//TRIM(NC_Filename)//' for '//DATETIME_VARNAME//&
' variable ID - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL ReadVar_Cleanup(); RETURN
END IF
NF90_Status = NF90_GET_VAR( NC_FileId,VarID,DateTime )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error reading '//DATETIME_VARNAME//' from '//TRIM(NC_Filename)//&
' - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL ReadVar_Cleanup(); RETURN
END IF
CALL Convert_DateTime_to_Type( DateTime, AtmProfile%DateTime )
! The Latitude
NF90_Status = NF90_INQ_VARID( NC_FileId,LATITUDE_VARNAME,VarId )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error inquiring '//TRIM(NC_Filename)//' for '//LATITUDE_VARNAME//&
' variable ID - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL ReadVar_Cleanup(); RETURN
END IF
NF90_Status = NF90_GET_VAR( NC_FileId,VarID,AtmProfile%Location%Latitude )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error reading '//LATITUDE_VARNAME//' from '//TRIM(NC_Filename)//&
' - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL ReadVar_Cleanup(); RETURN
END IF
! The Longitude
NF90_Status = NF90_INQ_VARID( NC_FileId,LONGITUDE_VARNAME,VarId )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error inquiring '//TRIM(NC_Filename)//' for '//LONGITUDE_VARNAME//&
' variable ID - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL ReadVar_Cleanup(); RETURN
END IF
NF90_Status = NF90_GET_VAR( NC_FileId,VarID,AtmProfile%Location%Longitude )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error reading '//LONGITUDE_VARNAME//' from '//TRIM(NC_Filename)//&
' - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL ReadVar_Cleanup(); RETURN
END IF
! The Surface_Altitude
NF90_Status = NF90_INQ_VARID( NC_FileId,SURFACE_ALTITUDE_VARNAME,VarId )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error inquiring '//TRIM(NC_Filename)//' for '//SURFACE_ALTITUDE_VARNAME//&
' variable ID - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL ReadVar_Cleanup(); RETURN
END IF
NF90_Status = NF90_GET_VAR( NC_FileId,VarID,AtmProfile%Location%Surface_Altitude )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error reading '//SURFACE_ALTITUDE_VARNAME//' from '//TRIM(NC_Filename)//&
' - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL ReadVar_Cleanup(); RETURN
END IF
! The Level_Pressure
NF90_Status = NF90_INQ_VARID( NC_FileId,LEVEL_PRESSURE_VARNAME,VarId )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error inquiring '//TRIM(NC_Filename)//' for '//LEVEL_PRESSURE_VARNAME//&
' variable ID - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL ReadVar_Cleanup(); RETURN
END IF
NF90_Status = NF90_GET_VAR( NC_FileId,VarID,AtmProfile%Level_Pressure )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error reading '//LEVEL_PRESSURE_VARNAME//' from '//TRIM(NC_Filename)//&
' - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL ReadVar_Cleanup(); RETURN
END IF
! The Level_Temperature
NF90_Status = NF90_INQ_VARID( NC_FileId,LEVEL_TEMPERATURE_VARNAME,VarId )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error inquiring '//TRIM(NC_Filename)//' for '//LEVEL_TEMPERATURE_VARNAME//&
' variable ID - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL ReadVar_Cleanup(); RETURN
END IF
NF90_Status = NF90_GET_VAR( NC_FileId,VarID,AtmProfile%Level_Temperature )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error reading '//LEVEL_TEMPERATURE_VARNAME//' from '//TRIM(NC_Filename)//&
' - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL ReadVar_Cleanup(); RETURN
END IF
! The Level_Absorber
NF90_Status = NF90_INQ_VARID( NC_FileId,LEVEL_ABSORBER_VARNAME,VarId )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error inquiring '//TRIM(NC_Filename)//' for '//LEVEL_ABSORBER_VARNAME//&
' variable ID - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL ReadVar_Cleanup(); RETURN
END IF
NF90_Status = NF90_GET_VAR( NC_FileId,VarID,AtmProfile%Level_Absorber )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error reading '//LEVEL_ABSORBER_VARNAME//' from '//TRIM(NC_Filename)//&
' - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL ReadVar_Cleanup(); RETURN
END IF
! The Level_Altitude
NF90_Status = NF90_INQ_VARID( NC_FileId,LEVEL_ALTITUDE_VARNAME,VarId )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error inquiring '//TRIM(NC_Filename)//' for '//LEVEL_ALTITUDE_VARNAME//&
' variable ID - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL ReadVar_Cleanup(); RETURN
END IF
NF90_Status = NF90_GET_VAR( NC_FileId,VarID,AtmProfile%Level_Altitude )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error reading '//LEVEL_ALTITUDE_VARNAME//' from '//TRIM(NC_Filename)//&
' - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL ReadVar_Cleanup(); RETURN
END IF
! The Layer_Pressure
NF90_Status = NF90_INQ_VARID( NC_FileId,LAYER_PRESSURE_VARNAME,VarId )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error inquiring '//TRIM(NC_Filename)//' for '//LAYER_PRESSURE_VARNAME//&
' variable ID - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL ReadVar_Cleanup(); RETURN
END IF
NF90_Status = NF90_GET_VAR( NC_FileId,VarID,AtmProfile%Layer_Pressure )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error reading '//LAYER_PRESSURE_VARNAME//' from '//TRIM(NC_Filename)//&
' - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL ReadVar_Cleanup(); RETURN
END IF
! The Layer_Temperature
NF90_Status = NF90_INQ_VARID( NC_FileId,LAYER_TEMPERATURE_VARNAME,VarId )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error inquiring '//TRIM(NC_Filename)//' for '//LAYER_TEMPERATURE_VARNAME//&
' variable ID - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL ReadVar_Cleanup(); RETURN
END IF
NF90_Status = NF90_GET_VAR( NC_FileId,VarID,AtmProfile%Layer_Temperature )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error reading '//LAYER_TEMPERATURE_VARNAME//' from '//TRIM(NC_Filename)//&
' - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL ReadVar_Cleanup(); RETURN
END IF
! The Layer_Absorber
NF90_Status = NF90_INQ_VARID( NC_FileId,LAYER_ABSORBER_VARNAME,VarId )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error inquiring '//TRIM(NC_Filename)//' for '//LAYER_ABSORBER_VARNAME//&
' variable ID - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL ReadVar_Cleanup(); RETURN
END IF
NF90_Status = NF90_GET_VAR( NC_FileId,VarID,AtmProfile%Layer_Absorber )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error reading '//LAYER_ABSORBER_VARNAME//' from '//TRIM(NC_Filename)//&
' - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL ReadVar_Cleanup(); RETURN
END IF
! The Layer_Delta_Z
NF90_Status = NF90_INQ_VARID( NC_FileId,LAYER_DELTA_Z_VARNAME,VarId )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error inquiring '//TRIM(NC_Filename)//' for '//LAYER_DELTA_Z_VARNAME//&
' variable ID - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL ReadVar_Cleanup(); RETURN
END IF
NF90_Status = NF90_GET_VAR( NC_FileId,VarID,AtmProfile%Layer_Delta_Z )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error reading '//LAYER_DELTA_Z_VARNAME//' from '//TRIM(NC_Filename)//&
' - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL ReadVar_Cleanup(); RETURN
END IF
CONTAINS
SUBROUTINE ReadVar_CleanUp()
! Close file
NF90_Status = NF90_CLOSE( NC_FileID )
IF ( NF90_Status /= NF90_NOERR ) &
msg = TRIM(msg)//'; Error closing input file during error cleanup - '//&
TRIM(NF90_STRERROR( NF90_Status ) )
! Set error status and print error msg
Error_Status = FAILURE
CALL Display_Message( ROUTINE_NAME,TRIM(msg),Error_Status,Message_Log=Message_Log )
END SUBROUTINE ReadVar_CleanUp
END FUNCTION ReadVar
!------------------------------------------------------------------------------
!
! NAME:
! CreateFile
!
! PURPOSE:
! Function to create a netCDF AtmProfile data file for writing.
!
! CALLING SEQUENCE:
! Error_Status = CreateFile( NC_Filename , & ! Input
! n_Layers , & ! Input
! n_Absorbers , & ! Input
! n_Profiles , & ! Input
! NC_FileID , & ! Output
! Version =Version , & ! Optional input
! ID_Tag =ID_Tag , & ! Optional input
! Title =Title , & ! Optional input
! History =History , & ! Optional input
! Comment =Comment , & ! Optional input
! Message_Log=Message_Log ) ! Error messaging
!
! INPUT ARGUMENTS:
! NC_Filename: Character string specifying the name of the
! netCDF AtmProfile format data file to create.
! UNITS: N/A
! TYPE: CHARACTER(*)
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN)
!
! n_Layers: Number of profile layers.
! Must be > 0.
! UNITS: N/A
! TYPE: INTEGER
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN)
!
! n_Absorbers: Number of profile absorbers.
! Must be > 0.
! UNITS: N/A
! TYPE: INTEGER
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN)
!
! n_Profiles: Number of profiles.
! Must be > 0.
! UNITS: N/A
! TYPE: INTEGER
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN)
!
! OUTPUT ARGUMENTS:
! NC_FileID: NetCDF file ID number to be used for subsequent
! writing to the output file.
! UNITS: N/A
! TYPE: INTEGER
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(OUT)
!
! OPTIONAL INPUT ARGUMENTS:
! Version: The version number of the netCDF AtmProfile file.
! UNITS: N/A
! TYPE: INTEGER
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN), OPTIONAL
!
! ID_Tag: Character string written into the ID_TAG global
! attribute field of the netCDF AtmProfile file.
! Should contain a short tag used to identify the
! dependent profile set.
! UNITS: N/A
! TYPE: CHARACTER(*)
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN), OPTIONAL
!
! Title: Character string written into the TITLE global
! attribute field of the netCDF AtmProfile file.
! Should contain a succinct description of what
! is in the netCDF datafile.
! UNITS: N/A
! TYPE: CHARACTER(*)
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN), OPTIONAL
!
! History: Character string written into the HISTORY global
! attribute field of the netCDF AtmProfile file.
! UNITS: N/A
! TYPE: CHARACTER(*)
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN), OPTIONAL
!
! Comment: Character string written into the COMMENT global
! attribute field of the netCDF AtmProfile file.
! UNITS: N/A
! TYPE: CHARACTER(*)
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN), OPTIONAL
!
! Message_Log: Character string specifying a filename in which
! any msgs will be logged. If not specified,
! or if an error occurs opening the log file, the
! default action is to output msgs to standard
! output.
! UNITS: N/A
! TYPE: CHARACTER(*)
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN), OPTIONAL
!
! FUNCTION RESULT:
! Error_Status: The return value is an integer defining the error status.
! The error codes are defined in the Message_Handler module.
! If == SUCCESS the netCDF file creation was successful.
! == FAILURE an unrecoverable error occurred.
! == WARNING - an error occurred writing any of the
! supplied global attributes.
! - an error occurred closing the netCDF file.
! UNITS: N/A
! TYPE: INTEGER
! DIMENSION: Scalar
!
!------------------------------------------------------------------------------
FUNCTION CreateFile( NC_Filename, & ! Input
n_Layers , & ! Input
n_Absorbers, & ! Input
n_Profiles , & ! Input
NC_FileID , & ! Output
Version , & ! Optional input
ID_Tag , & ! Optional input
Title , & ! Optional input
History , & ! Optional input
Comment , & ! Optional input
Message_Log) & ! Error messaging
RESULT( Error_Status )
! Arguments
CHARACTER(*) , INTENT(IN) :: NC_Filename
INTEGER , INTENT(IN) :: n_Layers
INTEGER , INTENT(IN) :: n_Absorbers
INTEGER , INTENT(IN) :: n_Profiles
INTEGER , INTENT(OUT) :: NC_FileID
INTEGER , OPTIONAL, INTENT(IN) :: Version
CHARACTER(*), OPTIONAL, INTENT(IN) :: ID_Tag
CHARACTER(*), OPTIONAL, INTENT(IN) :: Title
CHARACTER(*), OPTIONAL, INTENT(IN) :: History
CHARACTER(*), OPTIONAL, INTENT(IN) :: Comment
CHARACTER(*), OPTIONAL, INTENT(IN) :: Message_Log
! Function result
INTEGER :: Error_Status
! Local parameters
CHARACTER(*), PARAMETER :: ROUTINE_NAME = 'CreateFile'
! Local variables
CHARACTER(ML) :: msg
INTEGER :: NF90_Status
INTEGER :: n_Levels
INTEGER :: Level_DimID
INTEGER :: Layer_DimID
INTEGER :: Absorber_DimID
INTEGER :: Profile_DimID
INTEGER :: PL_DimID
TYPE(AtmProfile_type) :: Dummy
! Set up
! ------
Error_Status = SUCCESS
! Check input
IF ( n_Layers < 1 .OR. &
n_Absorbers < 1 .OR. &
n_Profiles < 1 ) THEN
msg = 'Invalid dimension input detected.'
CALL Create_Cleanup(); RETURN
END IF
n_Levels = n_Layers+1
! Create the data file
! --------------------
NF90_Status = NF90_CREATE( NC_Filename,NF90_CLOBBER,NC_FileID )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error creating '//TRIM(NC_Filename)//' - '//&
TRIM(NF90_STRERROR( NF90_Status ))
CALL Create_Cleanup(); RETURN
END IF
! Define the dimensions
! ---------------------
NF90_Status = NF90_DEF_DIM( NC_FileID,LEVEL_DIMNAME,n_Levels,Level_DimID )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error defining '//LEVEL_DIMNAME//' dimension in '//&
TRIM(NC_Filename)//' - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL Create_Cleanup(Close_File=.TRUE.); RETURN
END IF
NF90_Status = NF90_DEF_DIM( NC_FileID,LAYER_DIMNAME,n_Layers,Layer_DimID )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error defining '//LAYER_DIMNAME//' dimension in '//&
TRIM(NC_Filename)//' - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL Create_Cleanup(Close_File=.TRUE.); RETURN
END IF
NF90_Status = NF90_DEF_DIM( NC_FileID,ABSORBER_DIMNAME,n_Absorbers,Absorber_DimID )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error defining '//ABSORBER_DIMNAME//' dimension in '//&
TRIM(NC_Filename)//' - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL Create_Cleanup(Close_File=.TRUE.); RETURN
END IF
NF90_Status = NF90_DEF_DIM( NC_FileID,PROFILE_DIMNAME,n_Profiles,Profile_DimID )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error defining '//PROFILE_DIMNAME//' dimension in '//&
TRIM(NC_Filename)//' - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL Create_Cleanup(Close_File=.TRUE.); RETURN
END IF
NF90_Status = NF90_DEF_DIM( NC_FileID,DESCRIPTION_DIMNAME,LEN(Dummy%Description),PL_DimID )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error defining '//DESCRIPTION_DIMNAME//' dimension in '//&
TRIM(NC_Filename)//' - '//TRIM(NF90_STRERROR( NF90_Status ))
CALL Create_Cleanup(Close_File=.TRUE.); RETURN
END IF
! Write the global attributes
! ---------------------------
Error_Status = WriteGAtts( NC_Filename , &
NC_FileID , &
Version =Version , &
ID_Tag =ID_Tag , &
Title =Title , &
History =History , &
Comment =Comment , &
Message_Log =Message_Log )
IF ( Error_Status /= SUCCESS ) THEN
msg = 'Error writing global attributes to '//TRIM(NC_Filename)
CALL Create_Cleanup(); RETURN
END IF
! Define the AtmProfile variables
! -------------------------------
Error_Status = DefineVar( NC_Filename , & ! Input
NC_FileID , & ! Input
Level_DimID , & ! Input
Layer_DimID , & ! Input
Absorber_DimID , & ! Input
Profile_DimID , & ! Input
PL_DimID , & ! Input
Message_Log=Message_Log ) ! Error messaging
IF ( Error_Status /= SUCCESS ) THEN
msg = 'Error defining variables in '//TRIM(NC_Filename)
CALL Create_Cleanup(); RETURN
END IF
! Take netCDF file out of define mode
! -----------------------------------
NF90_Status = NF90_ENDDEF( NC_FileID )
IF ( NF90_Status /= NF90_NOERR ) THEN
msg = 'Error taking '//TRIM(NC_Filename)//' out of define mode.'
CALL Create_Cleanup(Close_File=.TRUE.); RETURN
END IF
CONTAINS
SUBROUTINE Create_CleanUp( Close_File )
LOGICAL, OPTIONAL, INTENT(IN) :: Close_File
! Close file if necessary
IF ( PRESENT(Close_File) ) THEN
IF ( Close_File ) THEN
NF90_Status = NF90_CLOSE( NC_FileID )
IF ( NF90_Status /= NF90_NOERR ) &
msg = TRIM(msg)//'; Error closing input file during error cleanup - '//&
TRIM(NF90_STRERROR( NF90_Status ) )
END IF
END IF
! Set error status and print error msg
Error_Status = FAILURE
CALL Display_Message( ROUTINE_NAME,TRIM(msg),Error_Status,Message_Log=Message_Log )
END SUBROUTINE Create_CleanUp
END FUNCTION CreateFile
END MODULE AtmProfile_netCDF_IO
|
{"hexsha": "eb590bd4eff0add30ed9d26318583e9651e4db16", "size": 136195, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/TauRegress/ODAS/ODAS_Regress/TmpAtmProfile_netCDF_IO.f90", "max_stars_repo_name": "hsbadr/crtm", "max_stars_repo_head_hexsha": "bfeb9955637f361fc69fa0b7af0e8d92d40718b1", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2020-11-19T10:00:22.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-10T02:42:18.000Z", "max_issues_repo_path": "src/TauRegress/ODAS/ODAS_Regress/TmpAtmProfile_netCDF_IO.f90", "max_issues_repo_name": "hsbadr/crtm", "max_issues_repo_head_hexsha": "bfeb9955637f361fc69fa0b7af0e8d92d40718b1", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2020-11-05T21:04:37.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-18T18:23:10.000Z", "max_forks_repo_path": "src/TauRegress/ODAS/ODAS_Regress/TmpAtmProfile_netCDF_IO.f90", "max_forks_repo_name": "hsbadr/crtm", "max_forks_repo_head_hexsha": "bfeb9955637f361fc69fa0b7af0e8d92d40718b1", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2020-10-29T17:54:31.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-29T08:42:45.000Z", "avg_line_length": 46.1365176152, "max_line_length": 129, "alphanum_fraction": 0.5391681046, "num_tokens": 32491}
|
"""
Support for example tables wrapping data stored on a PostgreSQL server.
"""
import functools
import logging
import threading
import warnings
from contextlib import contextmanager
from itertools import islice
from time import strftime
import numpy as np
from Orange.data import Table, Domain, Value, Instance, filter
from Orange.data.sql import filter as sql_filter
from Orange.data.sql.backend import Backend
from Orange.data.sql.backend.base import TableDesc, BackendError
from Orange.misc import import_late_warning
LARGE_TABLE = 100000
AUTO_DL_LIMIT = 10000
DEFAULT_SAMPLE_TIME = 1
sql_log = logging.getLogger("sql_log")
sql_log.debug("Logging started: {}".format(strftime("%Y-%m-%d %H:%M:%S")))
class SqlTable(Table):
table_name = None
domain = None
row_filters = ()
def __new__(cls, *args, **kwargs):
# We do not (yet) need the magic of the Table.__new__, so we call it
# with no parameters.
return super().__new__(cls)
def __init__(
self,
connection_params,
table_or_sql,
backend=None,
type_hints=None,
inspect_values=False,
):
"""
Create a new proxy for sql table.
To create a new SqlTable, specify the connection parameters
for psycopg2 and the name of the table/sql query used to fetch
the data.
table = SqlTable('database_name', 'table_name')
table = SqlTable('database_name', 'SELECT * FROM table')
For complex configurations, dictionary of connection parameters can
be used instead of the database name. For documentation about
connection parameters, see:
http://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-PARAMKEYWORDS
Data domain is inferred from the columns of the table/query.
The (very quick) default setting is to treat all numeric columns as
continuous variables and everything else as strings and placed among
meta attributes.
If inspect_values parameter is set to True, all column values are
inspected and int/string columns with less than 21 values are
intepreted as discrete features.
Domains can be constructed by the caller and passed in
type_hints parameter. Variables from the domain are used for
the columns with the matching names; for columns without the matching
name in the domain, types are inferred as described above.
"""
if isinstance(connection_params, str):
connection_params = dict(database=connection_params)
if backend is None:
for backend in Backend.available_backends():
try:
self.backend = backend(connection_params)
break
except BackendError as ex:
print(ex)
else:
raise ValueError("No backend could connect to server")
else:
self.backend = backend(connection_params)
if table_or_sql is not None:
if isinstance(table_or_sql, TableDesc):
table = table_or_sql.sql
elif "select" in table_or_sql.lower():
table = "(%s) as my_table" % table_or_sql.strip("; ")
else:
table = self.backend.quote_identifier(table_or_sql)
self.table_name = table
self.domain = self.get_domain(type_hints, inspect_values)
self.name = table
@property
def connection_params(self):
warnings.warn("Use backend.connection_params", DeprecationWarning)
return self.backend.connection_params
def get_domain(self, type_hints=None, inspect_values=False):
table_name = self.table_name
if type_hints is None:
type_hints = Domain([])
inspect_table = table_name if inspect_values else None
attrs, class_vars, metas = [], [], []
for field_name, *field_metadata in self.backend.get_fields(table_name):
var = self.backend.create_variable(
field_name, field_metadata, type_hints, inspect_table
)
if var.is_string:
metas.append(var)
else:
if var in type_hints.class_vars:
class_vars.append(var)
elif var in type_hints.metas:
metas.append(var)
else:
attrs.append(var)
return Domain(attrs, class_vars, metas)
def __getitem__(self, key):
""" Indexing of SqlTable is performed in the following way:
If a single row is requested, it is fetched from the database and
returned as a SqlRowInstance.
A new SqlTable with appropriate filters is constructed and returned
otherwise.
"""
if isinstance(key, int):
# one row
return self._fetch_row(key)
if not isinstance(key, tuple):
# row filter
key = (key, Ellipsis)
if len(key) != 2:
raise IndexError("Table indices must be one- or two-dimensional")
row_idx, col_idx = key
if isinstance(row_idx, int):
try:
col_idx = self.domain.index(col_idx)
var = self.domain[col_idx]
return Value(var, next(self._query([var], rows=[row_idx]))[0])
except TypeError:
pass
elif not (row_idx is Ellipsis or row_idx == slice(None)):
# TODO if row_idx specify multiple rows, one of the following must
# happen
# - the new table remembers which rows are selected (implement
# table.limit_rows and whatever else is necessary)
# - return an ordinary (non-SQL) Table
# - raise an exception
raise NotImplementedError("Row indices must be integers.")
# multiple rows OR single row but multiple columns:
# construct a new table
table = self.copy()
table.domain = self.domain.select_columns(col_idx)
# table.limit_rows(row_idx)
return table
@functools.lru_cache(maxsize=128)
def _fetch_row(self, row_index):
attributes = self.domain.variables + self.domain.metas
rows = [row_index]
values = list(self._query(attributes, rows=rows))
if not values:
raise IndexError(
"Could not retrieve row {} from table {}".format(row_index, self.name)
)
return SqlRowInstance(self.domain, values[0])
def __iter__(self):
""" Iterating through the rows executes the query using a cursor and
then yields resulting rows as SqlRowInstances as they are requested.
"""
attributes = self.domain.variables + self.domain.metas
for row in self._query(attributes):
yield SqlRowInstance(self.domain, row)
def _query(self, attributes=None, filters=(), rows=None):
if attributes is not None:
fields = []
for attr in attributes:
assert hasattr(
attr, "to_sql"
), "Cannot use ordinary attributes with sql backend"
field_str = '(%s) AS "%s"' % (attr.to_sql(), attr.name)
fields.append(field_str)
if not fields:
raise ValueError("No fields selected.")
else:
fields = ["*"]
filters = [f.to_sql() for f in filters]
offset = limit = None
if rows is not None:
if isinstance(rows, slice):
offset = rows.start or 0
if rows.stop is not None:
limit = rows.stop - offset
else:
rows = list(rows)
offset, stop = min(rows), max(rows)
limit = stop - offset + 1
# TODO: this returns all rows between min(rows) and max(rows): fix!
query = self._sql_query(fields, filters, offset=offset, limit=limit)
with self.backend.execute_sql_query(query) as cur:
while True:
row = cur.fetchone()
if row is None:
break
yield row
def copy(self):
"""Return a copy of the SqlTable"""
table = SqlTable.__new__(SqlTable)
table.backend = self.backend
table.domain = self.domain
table.row_filters = self.row_filters
table.table_name = self.table_name
table.name = self.name
return table
def __bool__(self):
"""Return True if the SqlTable is not empty."""
query = self._sql_query(["1"], limit=1)
with self.backend.execute_sql_query(query) as cur:
return cur.fetchone() is not None
_cached__len__ = None
def __len__(self):
"""
Return number of rows in the table. The value is cached so it is
computed only the first time the length is requested.
"""
if self._cached__len__ is None:
return self._count_rows()
return self._cached__len__
def _count_rows(self):
query = self._sql_query(["COUNT(*)"])
with self.backend.execute_sql_query(query) as cur:
self._cached__len__ = cur.fetchone()[0]
return self._cached__len__
def approx_len(self, get_exact=False):
if self._cached__len__ is not None:
return self._cached__len__
approx_len = None
try:
query = self._sql_query(["*"])
approx_len = self.backend.count_approx(query)
if get_exact:
threading.Thread(target=len, args=(self,)).start()
except NotImplementedError:
pass
if approx_len is None:
approx_len = len(self)
return approx_len
_X = None
_Y = None
_metas = None
_W = None
_ids = None
def download_data(self, limit=None, partial=False):
"""Download SQL data and store it in memory as numpy matrices."""
if limit and not partial and self.approx_len() > limit:
raise ValueError("Too many rows to download the data into memory.")
X = [np.empty((0, len(self.domain.attributes)))]
Y = [np.empty((0, len(self.domain.class_vars)))]
metas = [np.empty((0, len(self.domain.metas)))]
for row in islice(self, limit):
X.append(row._x)
Y.append(row._y)
metas.append(row._metas)
self._X = np.vstack(X).astype(np.float64)
self._Y = np.vstack(Y).astype(np.float64)
self._metas = np.vstack(metas).astype(object)
self._W = np.empty((self._X.shape[0], 0))
self._init_ids(self)
if not partial or limit and self._X.shape[0] < limit:
self._cached__len__ = self._X.shape[0]
@property
def X(self):
"""Numpy array with attribute values."""
if self._X is None:
self.download_data(AUTO_DL_LIMIT)
return self._X
@property
def Y(self):
"""Numpy array with class values."""
if self._Y is None:
self.download_data(AUTO_DL_LIMIT)
return self._Y
@property
def metas(self):
"""Numpy array with class values."""
if self._metas is None:
self.download_data(AUTO_DL_LIMIT)
return self._metas
@property
def W(self):
"""Numpy array with class values."""
if self._W is None:
self.download_data(AUTO_DL_LIMIT)
return self._W
@property
def ids(self):
"""Numpy array with class values."""
if self._ids is None:
self.download_data(AUTO_DL_LIMIT)
return self._ids
@ids.setter
def ids(self, value):
self._ids = value
@ids.deleter
def ids(self):
del self._ids
def has_weights(self):
return False
def _compute_basic_stats(
self, columns=None, include_metas=False, compute_var=False
):
if self.approx_len() > LARGE_TABLE:
self = self.sample_time(DEFAULT_SAMPLE_TIME)
if columns is not None:
columns = [self.domain[col] for col in columns]
else:
columns = self.domain.variables
if include_metas:
columns += self.domain.metas
return self._get_stats(columns)
def _get_stats(self, columns):
columns = [(c.to_sql(), c.is_continuous) for c in columns]
sql_fields = []
for field_name, continuous in columns:
stats = self.CONTINUOUS_STATS if continuous else self.DISCRETE_STATS
sql_fields.append(stats % dict(field_name=field_name))
query = self._sql_query(sql_fields)
with self.backend.execute_sql_query(query) as cur:
results = cur.fetchone()
stats = []
i = 0
for ci, (field_name, continuous) in enumerate(columns):
if continuous:
stats.append(results[i : i + 6])
i += 6
else:
stats.append((None,) * 4 + results[i : i + 2])
i += 2
return stats
def _compute_distributions(self, columns=None):
if self.approx_len() > LARGE_TABLE:
self = self.sample_time(DEFAULT_SAMPLE_TIME)
if columns is not None:
columns = [self.domain[col] for col in columns]
else:
columns = self.domain.variables
return self._get_distributions(columns)
def _get_distributions(self, columns):
dists = []
for col in columns:
field_name = col.to_sql()
fields = field_name, "COUNT(%s)" % field_name
query = self._sql_query(
fields,
filters=["%s IS NOT NULL" % field_name],
group_by=[field_name],
order_by=[field_name],
)
with self.backend.execute_sql_query(query) as cur:
dist = np.array(cur.fetchall())
if col.is_continuous:
dists.append((dist.T, []))
else:
dists.append((dist[:, 1].T, []))
return dists
def _compute_contingency(self, col_vars=None, row_var=None):
if self.approx_len() > LARGE_TABLE:
self = self.sample_time(DEFAULT_SAMPLE_TIME)
if col_vars is None:
col_vars = range(len(self.domain.variables))
if len(col_vars) != 1:
raise NotImplementedError(
"Contingency for multiple columns " "has not yet been implemented."
)
if row_var is None:
raise NotImplementedError("Defaults have not been implemented yet")
row = self.domain[row_var]
if not row.is_discrete:
raise TypeError("Row variable must be discrete")
columns = [self.domain[var] for var in col_vars]
if any(not (var.is_continuous or var.is_discrete) for var in columns):
raise ValueError(
"contingency can be computed only for discrete " "and continuous values"
)
row_field = row.to_sql()
all_contingencies = [None] * len(columns)
for i, column in enumerate(columns):
column_field = column.to_sql()
fields = [row_field, column_field, "COUNT(%s)" % column_field]
group_by = [row_field, column_field]
order_by = [column_field]
filters = ["%s IS NOT NULL" % f for f in (row_field, column_field)]
query = self._sql_query(
fields, filters=filters, group_by=group_by, order_by=order_by
)
with self.backend.execute_sql_query(query) as cur:
data = list(cur.fetchall())
if column.is_continuous:
all_contingencies[i] = (
self._continuous_contingencies(data, row),
[],
)
else:
all_contingencies[i] = (
self._discrete_contingencies(data, row, column),
[],
)
return all_contingencies, None
def _continuous_contingencies(self, data, row):
values = np.zeros(len(data))
counts = np.zeros((len(row.values), len(data)))
last = None
i = -1
for row_value, column_value, count in data:
if column_value == last:
counts[row.to_val(row_value), i] += count
else:
i += 1
last = column_value
values[i] = column_value
counts[row.to_val(row_value), i] += count
return (values, counts)
def _discrete_contingencies(self, data, row, column):
conts = np.zeros((len(row.values), len(column.values)))
for row_value, col_value, count in data:
row_index = row.to_val(row_value)
col_index = column.to_val(col_value)
conts[row_index, col_index] = count
return conts
def X_density(self):
return self.DENSE
def Y_density(self):
return self.DENSE
def metas_density(self):
return self.DENSE
# Filters
def _filter_is_defined(self, columns=None, negate=False):
if columns is None:
columns = range(len(self.domain.variables))
columns = [self.domain[i].to_sql() for i in columns]
t2 = self.copy()
t2.row_filters += (sql_filter.IsDefinedSql(columns, negate),)
return t2
def _filter_has_class(self, negate=False):
columns = [c.to_sql() for c in self.domain.class_vars]
t2 = self.copy()
t2.row_filters += (sql_filter.IsDefinedSql(columns, negate),)
return t2
def _filter_same_value(self, column, value, negate=False):
var = self.domain[column]
if value is None:
pass
elif var.is_discrete:
value = var.to_val(value)
value = "'%s'" % var.repr_val(value)
else:
pass
t2 = self.copy()
t2.row_filters += (sql_filter.SameValueSql(var.to_sql(), value, negate),)
return t2
def _filter_values(self, f):
conditions = []
for cond in f.conditions:
var = self.domain[cond.column]
if isinstance(cond, filter.FilterDiscrete):
if cond.values is None:
values = None
else:
values = ["'%s'" % var.repr_val(var.to_val(v)) for v in cond.values]
new_condition = sql_filter.FilterDiscreteSql(
column=var.to_sql(), values=values
)
elif isinstance(cond, filter.FilterContinuous):
new_condition = sql_filter.FilterContinuousSql(
position=var.to_sql(), oper=cond.oper, ref=cond.ref, max=cond.max
)
elif isinstance(cond, filter.FilterString):
new_condition = sql_filter.FilterString(
var.to_sql(),
oper=cond.oper,
ref=cond.ref,
max=cond.max,
case_sensitive=cond.case_sensitive,
)
elif isinstance(cond, filter.FilterStringList):
new_condition = sql_filter.FilterStringList(
column=var.to_sql(),
values=cond.values,
case_sensitive=cond.case_sensitive,
)
else:
raise ValueError("Invalid condition %s" % type(cond))
conditions.append(new_condition)
t2 = self.copy()
t2.row_filters += (
sql_filter.ValuesSql(
conditions=conditions, conjunction=f.conjunction, negate=f.negate
),
)
return t2
@classmethod
def from_table(cls, domain, source, row_indices=...):
assert row_indices is ...
table = source.copy()
table.domain = domain
return table
# sql queries
def _sql_query(
self,
fields,
filters=(),
group_by=None,
order_by=None,
offset=None,
limit=None,
use_time_sample=None,
):
row_filters = [f.to_sql() for f in self.row_filters]
row_filters.extend(filters)
return self.backend.create_sql_query(
self.table_name,
fields,
row_filters,
group_by,
order_by,
offset,
limit,
use_time_sample,
)
DISCRETE_STATS = (
"SUM(CASE TRUE WHEN %(field_name)s IS NULL THEN 1 "
"ELSE 0 END), "
"SUM(CASE TRUE WHEN %(field_name)s IS NULL THEN 0 "
"ELSE 1 END)"
)
CONTINUOUS_STATS = (
"MIN(%(field_name)s)::double precision, "
"MAX(%(field_name)s)::double precision, "
"AVG(%(field_name)s)::double precision, "
"STDDEV(%(field_name)s)::double precision, " + DISCRETE_STATS
)
def sample_percentage(self, percentage, no_cache=False):
if percentage >= 100:
return self
return self._sample("system", percentage, no_cache=no_cache)
def sample_time(self, time_in_seconds, no_cache=False):
return self._sample(
"system_time", int(time_in_seconds * 1000), no_cache=no_cache
)
def _sample(self, method, parameter, no_cache=False):
import psycopg2
if "," in self.table_name:
raise NotImplementedError("Sampling of complex queries is not supported")
parameter = str(parameter)
if "." in self.table_name:
schema, name = self.table_name.split(".")
sample_name = "__%s_%s_%s" % (
self.backend.unquote_identifier(name),
method,
parameter.replace(".", "_").replace("-", "_"),
)
sample_table_q = ".".join(
[schema, self.backend.quote_identifier(sample_name)]
)
else:
sample_table = "__%s_%s_%s" % (
self.backend.unquote_identifier(self.table_name),
method,
parameter.replace(".", "_").replace("-", "_"),
)
sample_table_q = self.backend.quote_identifier(sample_table)
create = False
try:
query = "SELECT * FROM " + sample_table_q + " LIMIT 0;"
with self.backend.execute_sql_query(query):
pass
if no_cache:
query = "DROP TABLE " + sample_table_q
with self.backend.execute_sql_query(query):
pass
create = True
except BackendError:
create = True
if create:
with self.backend.execute_sql_query(
" ".join(
[
"CREATE TABLE",
sample_table_q,
"AS",
"SELECT * FROM",
self.table_name,
"TABLESAMPLE",
method,
"(",
parameter,
")",
]
)
):
pass
sampled_table = self.copy()
sampled_table.table_name = sample_table_q
with sampled_table.backend.execute_sql_query("ANALYZE" + sample_table_q):
pass
return sampled_table
@contextmanager
def _execute_sql_query(self, query, param=None):
warnings.warn("Use backend.execute_sql_query", DeprecationWarning)
with self.backend.execute_sql_query(query, param) as cur:
yield cur
def checksum(self, include_metas=True):
return np.nan
class SqlRowInstance(Instance):
"""
Extends :obj:`Orange.data.Instance` to correctly handle values of meta
attributes.
"""
def __init__(self, domain, data=None):
nvar = len(domain.variables)
super().__init__(domain, data[:nvar])
if len(data) > nvar:
self._metas = np.asarray(data[nvar:], dtype=object)
|
{"hexsha": "8dd48f406b84d8aa82f770b85a91233bc395bf81", "size": 24284, "ext": "py", "lang": "Python", "max_stars_repo_path": "orange3/Orange/data/sql/table.py", "max_stars_repo_name": "rgschmitz1/BioDepot-workflow-builder", "max_stars_repo_head_hexsha": "f74d904eeaf91ec52ec9b703d9fb38e9064e5a66", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 54, "max_stars_repo_stars_event_min_datetime": "2017-01-08T17:21:49.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-02T08:46:07.000Z", "max_issues_repo_path": "orange3/Orange/data/sql/table.py", "max_issues_repo_name": "Synthia-3/BioDepot-workflow-builder", "max_issues_repo_head_hexsha": "4ee93abe2d79465755e82a145af3b6a6e1e79fd4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 22, "max_issues_repo_issues_event_min_datetime": "2017-03-28T06:03:14.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-28T05:43:55.000Z", "max_forks_repo_path": "orange3/Orange/data/sql/table.py", "max_forks_repo_name": "Synthia-3/BioDepot-workflow-builder", "max_forks_repo_head_hexsha": "4ee93abe2d79465755e82a145af3b6a6e1e79fd4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 21, "max_forks_repo_forks_event_min_datetime": "2017-01-26T21:12:09.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-31T21:34:59.000Z", "avg_line_length": 34.2028169014, "max_line_length": 92, "alphanum_fraction": 0.566669412, "include": true, "reason": "import numpy", "num_tokens": 5090}
|
SUBROUTINE GH_KGST ( iblk, iret )
C************************************************************************
C* GH_KGST *
C* *
C* This subroutine plots the U.S. two character state identifiers, *
C* tropical and Carribean country names. *
C* *
C* GH_KGST ( IBLK, IRET ) *
C* *
C* Input parameters: *
C* IBLK INTEGER Black color value *
C* *
C* Output parameters: *
C* IRET INTEGER Return code *
C* 0 = normal return *
C* *
C** *
C* Log: *
C* A. Hardy/SAIC 8/01 Created *
C* D. Kidwell/NCEP 4/02 Moved P.R. lon -66 to -65 *
C* D. Kidwell/NCEP 8/02 Corrected Columbia to Colombia *
C* S. Gilbert/NCEP 04/07 Added labels for islands (etc) for CPHC *
C************************************************************************
PARAMETER ( NMCON = 25 )
C*
CHARACTER states(50)*2, cdproj*20, stid*2,
+ contry(NMCON)*22
REAL conlat(NMCON), conlon(NMCON), fsize(NMCON)
INTEGER ixoff(NMCON), iyoff(NMCON)
C*
DATA states / 'AL', 'AK', 'AZ', 'AR', 'CA', 'CO', 'CT', 'DE',
+ 'FL', 'GA', 'HI', 'ID', 'IL', 'IN', 'IA', 'KS',
+ 'KY', 'LA', 'ME', 'MD', 'MA', 'MI', 'MN', 'MS',
+ 'MO', 'MT', 'NE', 'NV', 'NH', 'NJ','NM', 'NY',
+ 'NC', 'ND', 'OH', 'OK', 'OR', 'PA', 'RI', 'SC',
+ 'SD', 'TN', 'TX', 'UT', 'VT', 'VA', 'WA', 'WV',
+ 'WI', 'WY' /
C
DATA contry / 'Bahamas', 'Bermuda', 'Belize', 'Colombia',
+ 'Costa Rica', 'Cuba', 'Dominican Rep.',
+ 'El Salvador', 'Guatemala', 'Haiti', 'Honduras',
+ 'Jamaica', 'Mexico', 'Nicaragua', 'Panama',
+ 'Puerto Rico', 'Venezuela', 'Johnston Atoll',
+ 'Nihoa', 'French Frigate\nShoals', 'Maro Reef',
+ 'Lisianski', 'Pearl and\nHermes Atoll',
+ 'Midway', 'Kure Atoll' /
C
DATA conlat / 24.50, 32.00, 17.10, 9.00,
+ 9.90, 22.00, 18.70,
+ 13.78, 15.50, 19.30, 14.90,
+ 17.50, 23.50, 12.80, 8.50,
+ 18.90, 9.00, 16.75,
+ 23.06, 23.85, 25.33,
+ 26.08, 27.83,
+ 28.21, 28.41 /
C
DATA conlon / -73.80, -63.70, -88.50, -75.00,
+ -84.00, -79.50, -69.50,
+ -88.90, -90.30, -72.50, -86.70,
+ -77.00, -100.30, -85.50, -80.00,
+ -65.00, -67.50, -169.51,
+ -161.91, -166.26, -170.50,
+ -174.00, -175.83,
+ -177.36, -178.33 /
C
DATA fsize / 1.0, 1.0, 1.0, 1.0,
+ 1.0, 1.0, 1.0,
+ 1.0, 1.0, 1.0, 1.0,
+ 1.0, 1.0, 1.0, 1.0,
+ 1.0, 0.8, 0.8,
+ 0.8, 0.8, 0.8,
+ 0.8, 0.8,
+ 0.8, 0.8 /
C
DATA ixoff / 0, 0, 0, 0,
+ 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0,
+ 0, 0, 0,
+ 0, 0,
+ 6, -10 /
C
DATA iyoff / 0, 0, 0, 0,
+ 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0,
+ 0, 0, 0,
+ 0, -2,
+ 1, 1 /
C------------------------------------------------------------------------
iret = 0
C
C* Query color and line attributes.
C
CALL GH_SAVE ( ier )
C
C* Set text attributes.
C
CALL GSCOLR ( iblk, ier )
CALL GSTEXT ( 2, 2, 1.0, 1, 111, 1, 2, ier )
C
C* Find state ID in the geographic file list.
C
DO ii = 1, 50
stid = states(ii)
CALL TB_FGEO ( stid, tlatll, tlonll, tlatur, tlonur,
+ cdproj, cenlat, cenlon, ier )
CALL GTEXT ( 'M', cenlat, cenlon, stid, 0.0,
+ 0, 0, ier )
END DO
C
DO ii = 1, NMCON
CALL ST_LSTR ( contry(ii), lens, ier )
CALL GSTEXT ( 2, 2, fsize(ii), 1, 111, 1, 2, ier )
CALL GTEXT ( 'M', conlat(ii), conlon(ii), contry(ii)(:lens),
+ 0.0, ixoff(ii), iyoff(ii), ier )
END DO
C
C* Restore color and line attributes.
C
CALL GH_REST ( ier )
C*
RETURN
END
|
{"hexsha": "492ee5dc7219e61cdcabd11a6120a587afabc851", "size": 4813, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "gempak/source/cgemlib/gh/ghkgst.f", "max_stars_repo_name": "oxelson/gempak", "max_stars_repo_head_hexsha": "e7c477814d7084c87d3313c94e192d13d8341fa1", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 42, "max_stars_repo_stars_event_min_datetime": "2015-06-03T15:26:21.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-28T22:36:03.000Z", "max_issues_repo_path": "gempak/source/cgemlib/gh/ghkgst.f", "max_issues_repo_name": "oxelson/gempak", "max_issues_repo_head_hexsha": "e7c477814d7084c87d3313c94e192d13d8341fa1", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 60, "max_issues_repo_issues_event_min_datetime": "2015-05-11T21:36:08.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T16:22:42.000Z", "max_forks_repo_path": "gempak/source/cgemlib/gh/ghkgst.f", "max_forks_repo_name": "oxelson/gempak", "max_forks_repo_head_hexsha": "e7c477814d7084c87d3313c94e192d13d8341fa1", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 27, "max_forks_repo_forks_event_min_datetime": "2016-06-06T21:55:14.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-18T18:23:28.000Z", "avg_line_length": 37.8976377953, "max_line_length": 73, "alphanum_fraction": 0.3448992312, "num_tokens": 1791}
|
"""
import_bibtex(file::String)
Import a BibTeX file and convert it to the internal bibliography format.
"""
function import_bibtex(file::String)
return BibParser.parse_file(file)
end
function int_to_spaces(n::Int)
str = ""
for i in 1:n
str *= " "
end
return str
end
# Dictionnary to handle spaces while exporting BibTeX
const spaces = Dict{String,String}(map(
s -> (string(s) => int_to_spaces(BibInternal.space(s))),
BibInternal.fields)
)
# Function to write required fields
function field_to_bibtex(
key::String,
value::String
)
space = get(spaces, key, int_to_spaces(BibInternal.space(Symbol(key))))
swp = length(key) > 3 && key[1:3] == "swp"
return value == "" || swp ? "" : " $key$space = {$value},\n"
end
function name_to_string(name::BibInternal.Name)
str = "$(name.particle)"
if str != "" != name.last
str *= " "
end
str *= name.last
str *= name.junior == "" ? "" : ", $(name.junior)"
if name.first != ""
str *= ", $(name.first)"
end
if name.middle != ""
str *= " $(name.middle)"
end
return str
end
function names_to_strings(names::BibInternal.Names)
if length(names) ≥ 1
str = name_to_string(names[1])
end
if length(names) > 1
for name in names[2:end]
str *= " and " * name_to_string(name)
end
end
return str
end
function access_to_bibtex!(
fields::BibInternal.Fields,
a::BibInternal.Access
)
fields["doi"] = a.doi
fields["howpublished"] = a.howpublished
fields["url"] = a.url
end
function date_to_bibtex!(
fields::BibInternal.Fields,
d::BibInternal.Date
)
fields["day"] = d.day
fields["month"] = d.month
fields["year"] = d.year
end
function eprint_to_bibtex!(
fields::BibInternal.Fields,
e::BibInternal.Eprint
)
fields["archivePrefix"] = e.archive_prefix
fields["eprint"] = e.eprint
fields["primaryClass"] = e.primary_class
end
function in_to_bibtex!(
fields::BibInternal.Fields,
i::BibInternal.In
)
fields["address"] = i.address
fields["chapter"] = i.chapter
fields["edition"] = i.edition
fields["institution"] = i.institution
fields["journal"] = i.journal
fields["number"] = i.number
fields["organization"] = i.organization
fields["pages"] = i.pages
fields["publisher"] = i.publisher
fields["school"] = i.school
fields["series"] = i.series
fields["volume"] = i.volume
end
function export_bibtex(e::Entry)
access_to_bibtex!(e.fields, e.access)
e.fields["author"] = names_to_strings(e.authors)
date_to_bibtex!(e.fields, e.date)
e.fields["editor"] = names_to_strings(e.editors)
eprint_to_bibtex!(e.fields, e.eprint)
in_to_bibtex!(e.fields, e.in)
e.fields["title"] = e.title
str = "@$(e.type){$(e.id),\n"
for (name, value) in collect(e.fields)
m = match(r"swp-",name)
if m === nothing || m.offset > 1
str *= value == "" ? "" : field_to_bibtex(name, value)
end
end
return str[1:end - 2] * "\n}"
end
function export_bibtex(bibliography::DataStructures.OrderedDict{String,Entry})
str = ""
for e in values(bibliography)
str *= export_bibtex(e) * "\n"
end
return str[1:end - 1]
end
|
{"hexsha": "4114b6e32bf25294fa3768e79c4bd751c939723b", "size": 3321, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/bibtex.jl", "max_stars_repo_name": "charleskawczynski/Bibliography.jl", "max_stars_repo_head_hexsha": "8d3f159387bc43df1ae899b68d3022de445bc5cc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/bibtex.jl", "max_issues_repo_name": "charleskawczynski/Bibliography.jl", "max_issues_repo_head_hexsha": "8d3f159387bc43df1ae899b68d3022de445bc5cc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/bibtex.jl", "max_forks_repo_name": "charleskawczynski/Bibliography.jl", "max_forks_repo_head_hexsha": "8d3f159387bc43df1ae899b68d3022de445bc5cc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.1590909091, "max_line_length": 78, "alphanum_fraction": 0.6145739235, "num_tokens": 956}
|
import numpy as np
import pandas as pd
from build.chart_data_functions import get_cumulative_cases_chart_data
from build.chart_data_functions import get_cumulative_tests_chart_data
from build.chart_data_functions import get_hospital_data
from build.chart_data_functions import get_in_intensive_data
from build.chart_data_functions import get_new_cases_per_day_chart_data
from build.chart_data_functions import get_on_ventilation_data
from build.chart_data_functions import get_tests_per_day_chart_data
from build.constants import DATE_SETTINGS, STATS_BAR_PATH, POPULATION
from build.constants import DEATHS_PATH
from build.constants import HOSPITALIZATION_PATH
from build.constants import MANUAL_DATA_PATH
from build.constants import TEST_RESULTS_PATH
from build.constants import TODAY_DMYHM
from build.constants import VACCINATIONS_PATH
from build.constants import YESTERDAY_YMD
from build.utils import analyze_memory
from build.utils import analyze_time
from build.utils import logger
from build.utils import read_json_from_file
from build.utils import save_as_json
@analyze_time
@analyze_memory
def main():
# Log status
logger.info("Loading local data files")
test_results = read_json_from_file(TEST_RESULTS_PATH)
hospitalization = read_json_from_file(HOSPITALIZATION_PATH)
vaccination = read_json_from_file(VACCINATIONS_PATH)
deaths = read_json_from_file(DEATHS_PATH)
manual_data = read_json_from_file(MANUAL_DATA_PATH)
# Log status
logger.info("Calculating main statistics")
# Statsbar
# Find count of confirmed cases
n_confirmed_cases = np.sum([res["ResultValue"] == "P" for res in test_results])
# Find total number of tests
n_tests_administered = len(test_results)
# Create date ranges for charts
case_dates = pd.date_range(start=DATE_SETTINGS["firstCaseDate"], end=YESTERDAY_YMD)
# Set recovered, deceased, hospitalized and ICU time-series
hospital = get_hospital_data(hospitalization, DATE_SETTINGS["firstCaseDate"])
recovered = hospital["discharged"]
manual_data["deceased"].update(deaths)
deceased = list(manual_data["deceased"].values())
hospitalized = hospital["activeHospitalizations"]
# TODO: Based on cross-checking with the hospitalization data publishedby TEHIK, the data listed
# in the manual_data.json file with the field name "intensive" appears to show the number
# of patients on ventilation. We should fix the terminology and make sure that the intensive
# and on ventilation statistics are being calculated correctly.
intensive = list(
get_in_intensive_data(hospitalization, manual_data["intensive"]).values()
)
on_ventilation = list(get_on_ventilation_data(hospitalization).values())
n_deaths = deceased[-1]
n_deaths_change = int(deceased[-1]) - int(deceased[-2])
# Get data for each chart
logger.info("Calculating data for charts")
tests_per_day_chart_data = get_tests_per_day_chart_data(test_results, case_dates)
cumulative_cases_chart_data = get_cumulative_cases_chart_data(
test_results,
recovered,
deceased,
hospitalized,
intensive,
on_ventilation,
case_dates,
test_per_day_chart_data=tests_per_day_chart_data,
)
new_cases_per_day_chart_data = get_new_cases_per_day_chart_data(
cumulative_cases_chart_data
)
cumulative_tests_chart_data = get_cumulative_tests_chart_data(
test_results, case_dates
)
n_active_cases = cumulative_cases_chart_data["active"][-1]
n_active_cases_change = (
cumulative_cases_chart_data["active"][-1]
- cumulative_cases_chart_data["active"][-2]
)
# Calculate vaccination data
logger.info("Calculating vaccination data")
last_day_vaccination_data = [
x for x in vaccination if x["MeasurementType"] == "Vaccinated"
][-1]
last_day_completed_vaccination_data = [
x for x in vaccination if x["MeasurementType"] == "FullyVaccinated"
][-1]
# TODO: Doses administered
completed_vaccination_number_total = last_day_completed_vaccination_data[
"TotalCount"
]
completed_vaccination_number_last_day = last_day_completed_vaccination_data[
"DailyCount"
]
all_vaccination_number_total = last_day_vaccination_data["TotalCount"]
all_vaccination_number_last_day = last_day_vaccination_data["DailyCount"]
vaccination_number_total = (
all_vaccination_number_total - completed_vaccination_number_total
)
vaccination_number_last_day = (
all_vaccination_number_last_day - completed_vaccination_number_last_day
)
fully_vaccinated_from_total_vaccinated_percentage = round(
completed_vaccination_number_total * 100 / (all_vaccination_number_total), 2
)
raw_active_100k = (
cumulative_cases_chart_data["active"][-1]
- cumulative_cases_chart_data["active"][-2]
)
raw_confirmed_changed = new_cases_per_day_chart_data["confirmedCases"][-1]
raw_per_hundred_changed = (
cumulative_cases_chart_data["active100k"][-1]
- cumulative_cases_chart_data["active100k"][-2]
)
confirmed_changed = new_cases_per_day_chart_data["confirmedCases"][-1]
tests_changed = (
cumulative_tests_chart_data["testsAdministered"][-1]
- cumulative_tests_chart_data["testsAdministered"][-2]
)
partially_immunized_total = all_vaccination_number_total + n_confirmed_cases
partially_immunized_pct = round(
(float(partially_immunized_total) / float(POPULATION) * 100), 2
)
# Create dictionary for final JSON
logger.info("Compiling final JSON")
final_json = {
"updatedOn": TODAY_DMYHM,
"activeCasesNumber": str(n_active_cases),
"activeChanged": str(n_active_cases_change),
"allVaccinationFromPopulationPercentage": last_day_vaccination_data[
"PopulationCoverage"
],
"allVaccinationNumberLastDay": all_vaccination_number_last_day,
"allVaccinationNumberTotal": all_vaccination_number_total,
"completedVaccinationNumberLastDay": completed_vaccination_number_last_day,
"completedVaccinationNumberTotal": completed_vaccination_number_total,
"completelyVaccinatedFromTotalVaccinatedPercentage": fully_vaccinated_from_total_vaccinated_percentage,
"confirmedCasesNumber": str(n_confirmed_cases),
"confirmedChanged": confirmed_changed,
"deceasedChanged": str(n_deaths_change),
"deceasedNumber": str(n_deaths),
"hospitalizedChanged": str(
hospital["activeHospitalizations"][-1]
- hospital["activeHospitalizations"][-2]
),
"hospitalizedNumber": hospital["activeHospitalizations"][-1],
"partiallyImmunized": partially_immunized_total,
"partiallyImmunizedPercentage": partially_immunized_pct,
"perHundred": cumulative_cases_chart_data["active100k"][-1],
"positiveTestAverage14Percent": tests_per_day_chart_data[
"positiveTestAverage14Percent"
],
"rawActiveChanged": raw_active_100k,
"rawConfirmedChanged": raw_confirmed_changed,
"rawPerHundredChanged": raw_per_hundred_changed,
"recoveredChanged": str(
hospital["discharged"][-1] - hospital["discharged"][-2]
),
"recoveredNumber": hospital["discharged"][-1],
"testsAdministeredNumber": str(n_tests_administered),
"testsChanged": tests_changed,
"vaccinationNumberLastDay": vaccination_number_last_day,
"vaccinationNumberTotal": vaccination_number_total,
}
# Dump JSON output
save_as_json(STATS_BAR_PATH, final_json)
# Log finish time
logger.info("Finished update process")
if __name__ == "__main__":
main()
|
{"hexsha": "d96f1e62bdd82d919520c1c24355fa78fd881e5b", "size": 7831, "ext": "py", "lang": "Python", "max_stars_repo_path": "build/generate_stats_bar.py", "max_stars_repo_name": "jtagcat/koroonakaart", "max_stars_repo_head_hexsha": "16a6eb24a19b286589b063742b03a123315feefc", "max_stars_repo_licenses": ["CC0-1.0", "MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-20T23:05:58.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-20T23:05:58.000Z", "max_issues_repo_path": "build/generate_stats_bar.py", "max_issues_repo_name": "jtagcat/koroonakaart", "max_issues_repo_head_hexsha": "16a6eb24a19b286589b063742b03a123315feefc", "max_issues_repo_licenses": ["CC0-1.0", "MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "build/generate_stats_bar.py", "max_forks_repo_name": "jtagcat/koroonakaart", "max_forks_repo_head_hexsha": "16a6eb24a19b286589b063742b03a123315feefc", "max_forks_repo_licenses": ["CC0-1.0", "MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-12-20T23:05:47.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-20T23:05:47.000Z", "avg_line_length": 41.0, "max_line_length": 111, "alphanum_fraction": 0.7433277998, "include": true, "reason": "import numpy", "num_tokens": 1775}
|
#encoding:utf-8
# -----------------------------------------------------------
# "Remote Sensing Cross-Modal Text-Image Retrieval Based on Global and Local Information"
# Yuan, Zhiqiang and Zhang, Wenkai and Changyuan Tian and Xuee, Rong and Zhengyuan Zhang and Wang, Hongqi and Fu, Kun and Sun, Xian
# Writen by YuanZhiqiang, 2021. Our code is depended on AMFMN
# ------------------------------------------------------------
import torch
import numpy as np
import sys
import math
from torch.autograd import Variable
from collections import OrderedDict
import torch.nn as nn
import shutil
import time
# 从npy中读取
def load_from_npy(filename):
info = np.load(filename, allow_pickle=True)
return info
# 保存结果到txt文件
def log_to_txt( contexts=None,filename="save.txt", mark=False,encoding='UTF-8',mode='a'):
f = open(filename, mode,encoding=encoding)
if mark:
sig = "------------------------------------------------\n"
f.write(sig)
elif isinstance(contexts, dict):
tmp = ""
for c in contexts.keys():
tmp += str(c)+" | "+ str(contexts[c]) +"\n"
contexts = tmp
f.write(contexts)
else:
if isinstance(contexts,list):
tmp = ""
for c in contexts:
tmp += str(c)
contexts = tmp
else:
contexts = contexts + "\n"
f.write(contexts)
f.close()
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=0):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / (.0001 + self.count)
def __str__(self):
"""String representation for logging
"""
# for values that should be recorded exactly e.g. iteration number
if self.count == 0:
return str(self.val)
# for stats
return '%.4f (%.4f)' % (self.val, self.avg)
class LogCollector(object):
"""A collection of logging objects that can change from train to val"""
def __init__(self):
# to keep the order of logged variables deterministic
self.meters = OrderedDict()
def update(self, k, v, n=0):
# create a new meter if previously not recorded
if k not in self.meters:
self.meters[k] = AverageMeter()
self.meters[k].update(v, n)
def __str__(self):
"""Concatenate the meters in one log line
"""
s = ''
for i, (k, v) in enumerate(self.meters.items()):
if i > 0:
s += ' '
s += k + ' ' + str(v)
return s
def tb_log(self, tb_logger, prefix='', step=None):
"""Log using tensorboard
"""
for k, v in self.meters.items():
tb_logger.log_value(prefix + k, v.val, step=step)
def update_values(dict_from, dict_to):
for key, value in dict_from.items():
if isinstance(value, dict):
update_values(dict_from[key], dict_to[key])
elif value is not None:
dict_to[key] = dict_from[key]
return dict_to
def params_count(model):
count = 0
for p in model.parameters():
c = 1
for i in range(p.dim()):
c *= p.size(i)
count += c
return count
def collect_match(input):
"""change the model output to the match matrix"""
image_size = input.size(0)
text_size = input.size(1)
# match_v = torch.zeros(image_size, text_size, 1)
# match_v = match_v.view(image_size*text_size, 1)
input_ = nn.LogSoftmax(2)(input)
output = torch.index_select(input_, 2, Variable(torch.LongTensor([1])).cuda())
return output
def collect_neg(input):
""""collect the hard negative sample"""
if input.dim() != 2:
return ValueError
batch_size = input.size(0)
mask = Variable(torch.eye(batch_size)>0.5).cuda()
output = input.masked_fill_(mask, 0)
output_r = output.max(1)[0]
output_c = output.max(0)[0]
loss_n = torch.mean(output_r) + torch.mean(output_c)
return loss_n
def calcul_loss(scores, size, margin, max_violation=False):
diagonal = scores.diag().view(size, 1)
d1 = diagonal.expand_as(scores)
d2 = diagonal.t().expand_as(scores)
# compare every diagonal score to scores in its column
# caption retrieval
cost_s = (margin + scores - d1).clamp(min=0)
# compare every diagonal score to scores in its row
# image retrieval
cost_im = (margin + scores - d2).clamp(min=0)
mask = torch.eye(scores.size(0)) > .5
I = Variable(mask)
if torch.cuda.is_available():
I = I.cuda()
cost_s = cost_s.masked_fill_(I, 0)
cost_im = cost_im.masked_fill_(I, 0)
if max_violation:
cost_s = cost_s.max(1)[0]
cost_im = cost_im.max(0)[0]
return cost_s.sum() + cost_im.sum()
def acc_train(input):
predicted = input.squeeze().numpy()
batch_size = predicted.shape[0]
predicted[predicted > math.log(0.5)] = 1
predicted[predicted < math.log(0.5)] = 0
target = np.eye(batch_size)
recall = np.sum(predicted * target) / np.sum(target)
precision = np.sum(predicted * target) / np.sum(predicted)
acc = 1 - np.sum(abs(predicted - target)) / (target.shape[0] * target.shape[1])
return acc, recall, precision
def acc_i2t(input):
"""Computes the precision@k for the specified values of k of i2t"""
#input = collect_match(input).numpy()
image_size = input.shape[0]
ranks = np.zeros(image_size)
# ranks_ = np.zeros(image_size//5)
top1 = np.zeros(image_size)
for index in range(image_size):
inds = np.argsort(input[index])[::-1]
# Score
rank = 1e20
# index_ = index // 5
for i in range(5 * index, 5 * index + 5, 1):
tmp = np.where(inds == i)[0][0]
if tmp < rank:
rank = tmp
if rank == 1e20:
print('error')
ranks[index] = rank
top1[index] = inds[0]
# Compute metrics
r1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks)
r5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks)
r10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks)
medr = np.floor(np.median(ranks)) + 1
meanr = ranks.mean() + 1
return (r1, r5, r10, medr, meanr), (ranks, top1)
def acc_t2i(input):
"""Computes the precision@k for the specified values of k of t2i"""
#input = collect_match(input).numpy()
image_size = input.shape[0]
ranks = np.zeros(5*image_size)
top1 = np.zeros(5*image_size)
# ranks_ = np.zeros(image_size // 5)
# --> (5N(caption), N(image))
input = input.T
for index in range(image_size):
for i in range(5):
inds = np.argsort(input[5 * index + i])[::-1]
ranks[5 * index + i] = np.where(inds == index)[0][0]
top1[5 * index + i] = inds[0]
# Compute metrics
r1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks)
r5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks)
r10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks)
medr = np.floor(np.median(ranks)) + 1
meanr = ranks.mean() + 1
return (r1, r5, r10, medr, meanr), (ranks, top1)
def shard_dis(images, captions, model, shard_size=128, lengths=None):
"""compute image-caption pairwise distance during validation and test"""
n_im_shard = (len(images) - 1) // shard_size + 1
n_cap_shard = (len(captions) - 1) // shard_size + 1
d = np.zeros((len(images), len(captions)))
for i in range(n_im_shard):
im_start, im_end = shard_size*i, min(shard_size*(i+1), len(images))
# print("======================")
# print("im_start:",im_start)
# print("im_end:",im_end)
for j in range(n_cap_shard):
sys.stdout.write('\r>> shard_distance batch (%d,%d)' % (i,j))
cap_start, cap_end = shard_size * j, min(shard_size * (j + 1), len(captions))
im = Variable(torch.from_numpy(images[im_start:im_end]), volatile=True).float().cuda()
s = Variable(torch.from_numpy(captions[cap_start:cap_end]), volatile=True).cuda()
l = lengths[cap_start:cap_end]
sim = model(im, s,l)
sim = sim.squeeze()
d[im_start:im_end, cap_start:cap_end] = sim.data.cpu().numpy()
sys.stdout.write('\n')
return d
def acc_i2t2(input):
"""Computes the precision@k for the specified values of k of i2t"""
#input = collect_match(input).numpy()
image_size = input.shape[0]
ranks = np.zeros(image_size)
top1 = np.zeros(image_size)
for index in range(image_size):
inds = np.argsort(input[index])[::-1]
# Score
rank = 1e20
for i in range(5 * index, 5 * index + 5, 1):
tmp = np.where(inds == i)[0][0]
if tmp < rank:
rank = tmp
ranks[index] = rank
top1[index] = inds[0]
# Compute metrics
r1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks)
r5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks)
r10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks)
medr = np.floor(np.median(ranks)) + 1
meanr = ranks.mean() + 1
return (r1, r5, r10, medr, meanr), (ranks, top1)
def acc_t2i2(input):
"""Computes the precision@k for the specified values of k of t2i"""
#input = collect_match(input).numpy()
image_size = input.shape[0]
ranks = np.zeros(5*image_size)
top1 = np.zeros(5*image_size)
# --> (5N(caption), N(image))
input = input.T
for index in range(image_size):
for i in range(5):
inds = np.argsort(input[5 * index + i])[::-1]
ranks[5 * index + i] = np.where(inds == index)[0][0]
top1[5 * index + i] = inds[0]
# Compute metrics
r1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks)
r5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks)
r10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks)
medr = np.floor(np.median(ranks)) + 1
meanr = ranks.mean() + 1
return (r1, r5, r10, medr, meanr), (ranks, top1)
def shard_dis_reg(images, captions, model, shard_size=128, lengths=None):
"""compute image-caption pairwise distance during validation and test"""
n_im_shard = (len(images) - 1) // shard_size + 1
n_cap_shard = (len(captions) - 1) // shard_size + 1
d = np.zeros((len(images), len(captions)))
for i in range(len(images)):
# im_start, im_end = shard_size*i, min(shard_size*(i+1), len(images))
im_index = i
for j in range(n_cap_shard):
sys.stdout.write('\r>> shard_distance batch (%d,%d)' % (i,j))
cap_start, cap_end = shard_size * j, min(shard_size * (j + 1), len(captions))
s = Variable(torch.from_numpy(captions[cap_start:cap_end]), volatile=True).cuda()
im = Variable(torch.from_numpy(images[i]), volatile=True).float().unsqueeze(0).expand(len(s), 3, 256, 256).cuda()
l = lengths[cap_start:cap_end]
sim = model(im, s, l)[:, 1]
sim = sim.squeeze()
d[i, cap_start:cap_end] = sim.data.cpu().numpy()
sys.stdout.write('\n')
return d
def shard_dis_GaLR(images, input_local_rep, input_local_adj, captions, model, shard_size=128, lengths=None):
"""compute image-caption pairwise distance during validation and test"""
n_im_shard = (len(images) - 1) // shard_size + 1
n_cap_shard = (len(captions) - 1) // shard_size + 1
d = np.zeros((len(images), len(captions)))
all = []
for i in range(n_im_shard):
im_start, im_end = shard_size*i, min(shard_size*(i+1), len(images))
print("======================")
print("im_start:",im_start)
print("im_end:",im_end)
for j in range(n_cap_shard):
sys.stdout.write('\r>> shard_distance batch (%d,%d)' % (i,j))
cap_start, cap_end = shard_size * j, min(shard_size * (j + 1), len(captions))
im = Variable(torch.from_numpy(images[im_start:im_end]), volatile=True).float().cuda()
local_rep = Variable(torch.from_numpy(input_local_rep[im_start:im_end]), volatile=True).float().cuda()
local_adj = Variable(torch.from_numpy(input_local_adj[im_start:im_end]), volatile=True).float().cuda()
s = Variable(torch.from_numpy(captions[cap_start:cap_end]), volatile=True).cuda()
l = lengths[cap_start:cap_end]
t1 = time.time()
sim = model(im, local_rep, local_adj, s, l)
t2 = time.time()
all.append(t2-t1)
sim = sim.squeeze()
d[im_start:im_end, cap_start:cap_end] = sim.data.cpu().numpy()
sys.stdout.write('\n')
print("infer time:",np.average(all))
return d
def save_checkpoint(state, is_best, filename, prefix='', model_name = None):
tries = 15
error = None
# deal with unstable I/O. Usually not necessary.
while tries:
try:
# torch.save(state, prefix + filename)
if is_best:
torch.save(state, prefix +model_name +'_best.pth.tar')
except IOError as e:
error = e
tries -= 1
else:
break
print('model save {} failed, remaining {} trials'.format(filename, tries))
if not tries:
raise error
def adjust_learning_rate(options, optimizer, epoch):
"""Sets the learning rate to the initial LR
decayed by 10 every 30 epochs"""
for param_group in optimizer.param_groups:
lr = param_group['lr']
if epoch % options['optim']['lr_update_epoch'] == options['optim']['lr_update_epoch'] - 1:
lr = lr * options['optim']['lr_decay_param']
param_group['lr'] = lr
print("Current lr: {}".format(optimizer.state_dict()['param_groups'][0]['lr']))
def load_from_txt(filename, encoding="utf-8"):
f = open(filename,'r' ,encoding=encoding)
contexts = f.readlines()
return contexts
|
{"hexsha": "127740e7fd064366bbacb2b0187049c7d203b1af", "size": 14106, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils.py", "max_stars_repo_name": "xiaoyuan1996/GaLR", "max_stars_repo_head_hexsha": "77075507482453f59bab3ae65fa99ba1b61d716c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2021-09-27T03:22:27.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T02:11:37.000Z", "max_issues_repo_path": "utils.py", "max_issues_repo_name": "xiaoyuan1996/GaLR", "max_issues_repo_head_hexsha": "77075507482453f59bab3ae65fa99ba1b61d716c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils.py", "max_forks_repo_name": "xiaoyuan1996/GaLR", "max_forks_repo_head_hexsha": "77075507482453f59bab3ae65fa99ba1b61d716c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2021-11-07T03:13:38.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T10:44:57.000Z", "avg_line_length": 32.2791762014, "max_line_length": 131, "alphanum_fraction": 0.581880051, "include": true, "reason": "import numpy", "num_tokens": 3877}
|
from collections import deque
from multiprocessing import RawArray
from multiprocessing import Lock as mpLock
from multiprocessing import Process
from multiprocessing import Value
from ctypes import c_bool, c_ubyte, c_long
import time
import numpy as np
import cv2
from trtis_client import TrtisClient
def infer(trtis_client, state):
# state is a list of ndarray.
gray_state = [cv2.cvtColor(s, cv2.COLOR_RGB2GRAY) for s in state]
dqn_state = [cv2.resize(s, (84, 84), interpolation=cv2.INTER_AREA) \
for s in gray_state]
input_tensor = np.array(dqn_state).astype(np.float32) / 255.
q_value = trtis_client.infer(input_tensor)
return np.argmax(q_value)
def show_trtis_client_stats(trtis_client):
infer_stats = trtis_client.get_time_stats()
print('TRTIS inference request time[sec.]: mean, median = ({}, {})'.format(
infer_stats['infer_request'][0], infer_stats['infer_request'][1]))
print('TRTIS whole inference time[sec.]: mean, median = ({}, {})'.format(
infer_stats['whole_inference'][0], infer_stats['whole_inference'][1]))
class AsyncAgent(Process):
def __init__(self,
host='localhost',
port=8001,
model='atari',
observation_shape=None,
n_stack_frames=4,
wait_interval_msec=30,
**kwargs):
super(AsyncAgent, self).__init__(**kwargs)
self._wait_interval_sec = wait_interval_msec / 1000.0
self._observation_shape = observation_shape
n_bytes = np.prod(observation_shape)
self._n_stack_frames = n_stack_frames
self._state_buffers = [RawArray(c_ubyte, range(n_bytes)) \
for _ in range(self._n_stack_frames)]
self._n_frames = Value(c_long, 0)
self._action_buffer = Value(c_long, 1)
self._stop_signal = Value(c_bool, 0)
self._state_lock = mpLock()
self._trtis_client = TrtisClient(
host=host,
port=port,
model_name=model)
def put_state(self, state):
# This state management is the same as `FrameStack` below.
# https://github.com/chainer/chainerrl/blob/master/chainerrl/wrappers/atari_wrappers.py
flattened_state = state.ravel().tolist()
self._state_lock.acquire()
bidx = self._n_frames.value % self._n_stack_frames
self._state_buffers[bidx][:] = flattened_state[:]
self._n_frames.value = self._n_frames.value + 1
self._state_lock.release()
def get_action(self):
# Note that in the default setting, multiprocessing.Value uses Lock, internally.
# Therefore, the code below should be multiprocess-safe.
return self._action_buffer.value
def _get_state(self):
self._state_lock.acquire()
if self._n_frames.value < self._n_stack_frames:
state = None
else:
# Buffer has enough frames.
n_frames = self._n_frames.value
latest_frame_id = (n_frames - 1)
oldest_frame_id = (latest_frame_id - (self._n_stack_frames-1))
buf_idx_list = [i % self._n_stack_frames \
for i in range(oldest_frame_id, latest_frame_id+1)]
state = np.array(self._state_buffers)
self._state_lock.release()
if state is not None:
state = [state[i] for i in buf_idx_list]
state = [s.reshape(self._observation_shape).astype(np.uint8) for s in state]
return state
def _put_action(self, action):
self._action_buffer.value = action
def stop(self):
self._stop_signal.value = True
def run(self):
# Note that setup() have to be called in a child process to avoid connection error.
self._trtis_client.setup()
while self._is_running():
state = self._get_state()
if state is None:
time.sleep(self._wait_interval_sec)
else:
# Make a tensor to be sent to TRTIS.
action = infer(self._trtis_client, state)
self._put_action(action)
self._trtis_client.shutdown()
# Show stats.
show_trtis_client_stats(self._trtis_client)
def _is_running(self):
return not self._stop_signal.value
class SyncAgent(object):
def __init__(self,
host='localhost',
port=8001,
model='atari',
n_stack_frames=4):
self._state = deque([], maxlen=n_stack_frames)
self._action = 0
self._trtis_client = TrtisClient(
host=host,
port=port,
model_name=model)
def start(self):
self._trtis_client.setup()
def stop(self):
# Show stats.
show_trtis_client_stats(self._trtis_client)
self._trtis_client.shutdown()
def join(self):
pass
def get_action(self):
return self._action
def put_state(self, state):
# Note: should devide this code to 2 parts:
# putting state part and do inference part...
self._state.append(state)
if len(self._state) < self._state.maxlen:
# Need to wait.
return
state = list(self._state)
self._action = infer(self._trtis_client, state)
|
{"hexsha": "cdeb1e5fb20b277dcf65cfdbaa519b09a47834cd", "size": 5382, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/agent.py", "max_stars_repo_name": "lazykyama/atari_trtis_demo", "max_stars_repo_head_hexsha": "521615bdd00aa02836dae174be6dae63bdeb5eb8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/agent.py", "max_issues_repo_name": "lazykyama/atari_trtis_demo", "max_issues_repo_head_hexsha": "521615bdd00aa02836dae174be6dae63bdeb5eb8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/agent.py", "max_forks_repo_name": "lazykyama/atari_trtis_demo", "max_forks_repo_head_hexsha": "521615bdd00aa02836dae174be6dae63bdeb5eb8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.2275449102, "max_line_length": 95, "alphanum_fraction": 0.6179858789, "include": true, "reason": "import numpy", "num_tokens": 1253}
|
## Derivadas ##
```python
from sympy import *
x,f=symbols("x f")
f=-2*x**3-4*x**2+13*x-1
init_printing()
diff(f,x)
```
```python
from sympy import *
x,f=symbols("x f")
f=-2*x**5+23*x**3-7*x
diff(f,x)
```
```python
x,g=symbols("x g")
g=2*x+ln(x)
diff(g, x)
```
```python
x,h=symbols("x h")
h=sin(x)
diff(h, x)
```
```python
x,r=symbols("x r")
r=tan(x)
diff(r, x)
```
```python
x,q=symbols("x q")
q=sin(x)*cos(x)
diff(q, x)
```
```python
x,v=symbols("x v")
v=(x**2-5*x)**(1/2)
diff(v, x)
```
```python
x,t=symbols("x t")
t=(3*x**2-4*x)/(2*x**3+6)
diff(t, x)
```
```python
x,f=symbols("x f")
f=18*x**4-14*x**2+22*x-1
diff(f,x)
```
## Derivada segunda ##
```python
x,f=symbols("x f")
f=19*x**6-33*x**3+111*x+19
diff(f,x,2)
```
```python
from sympy import *
x,g=symbols("x g")
g=2*x+ln(x)
diff(g,x,2)
```
```python
x,h=symbols("x h")
h=sin(x)
diff(h,x,2)
```
# Máximos e Mínimos #
```python
from sympy import *
x,y=symbols("x y")
y=-4*x**2+4000*x-200000
df=diff(y,x)
d2f=diff(y,x,2)
p=solve(Eq(df,0))
l=y.subs(x,p[0])
ds=d2f.subs(x,p[0])
print('Preço ótimo:',p[0])
print('Lucro máximo:',l)
print('Derivada segunda:',ds)
```
Preço ótimo: 500
Lucro máximo: 800000
Derivada segunda: -8
```python
from sympy import *
x,y=symbols("x y")
y=-2*x**2+7000*x-12000
df=diff(y,x)
print('Lucro Máximo:',l)
```
Lucro Máximo: 6113000
#### O custo c referente á produção diária de x unidade de certo item correspondente a c(x)=x**2-20*x+300
Qual é o nível de produção que minimiza o custo ?
```python
from sympy import *
x,c=symbols("x c")
c=x**2-20*x+300
df=diff(c,x)
d2f=diff(c,x,2)
p=solve(Eq(df,0))
ds=d2f.subs(x,p[0])
print('Produção ótima:',p[0])
print('Derivada segunda:',ds)
```
Produção ótima: 10
Derivada segunda: 2
```python
import matplotlib.pyplot as plt
import numpy as np
x=np.linspace(0,25,100)
c=x**2-20*x+300
plt.plot(x,c)
plt.show()
```
### Uma indústria de carne chegou a conclusão que o lucro mensal L(x) é dado em função do preço x do quilo da carne congelada, e essa relação é descrita pela função L(x)=-120x**2+4800x
Determine para quais valores de x o lucro mensal é máximo
```python
from sympy import *
x,L=symbols("x L")
L=-120*x**2+4800*x
df=diff(L,x)
d2f=diff(L,x,2)
p=solve(Eq(df,0))
ds=d2f.subs(x,p[0])
print('Preço ótimo:',p[0])
print('Derivada segunda:',ds)
```
Preço ótimo: 20
Derivada segunda: -240
```python
x,L=symbols("x L")
L=1340*x-0.08*x**2
df=diff(L,x)
print('Preço:',p[0])
```
Preço: 8375.00000000000
```python
from sympy import *
s,t=symbols("s t")
s=2*t**2+3*t
ds=diff(s,t)
v=ds.subs(t,2)
print('Velocidade:%.2f m/s'%v)
```
Velocidade:11.00 m/s
## Otimização 3d
#### gráfico de uma função:
```python
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
xx=np.linspace(-5,5,100)
yy=np.linspace(-5,5,100)
x,y=np.meshgrid(xx,yy)
f=x**2+y**2
fig=plt.figure()
ax=plt.axes(projection='3d')
ax.plot_surface(x,y,z)
```
## INTEGRAlS
#### obs: não é sempre possível calcular uma integral indefinida
```python
from sympy import *
x,f=symbols("x f")
f=-2*x**3-4*x**2+13*x-1
integrate(f,x)
```
```python
from sympy import *
x,f=symbols("x f")
f=4*x**3+12*x-7
integrate(f,(x,1,2))
```
26
```python
x,q=symbols("x q")
q=sin(x)*cos(x)
integrate(q,x)
```
```python
x,v=symbols("x v")
v=(x**2-5*x)**(1/2)
integrate(v,x)
```
```python
x,f=symbols("x f")
f=-2*x**3-4*x**2+13*x-1
integrate(f,(x,1,2))
```
```python
x,f=symbols("x f")
f=5*x**2+3*x+1
integrate(f,(x,0,4))
```
## Áreas
```python
from sympy import *
x,f=symbols("x f")
f=x**2
integrate(f,(x,0,2))
```
```python
import matplotlib.pyplot as plt
import numpy as np
import sympy as sy
x=np.linspace(-1,3,1000)
f=x**2
plt.plot(x,f,color='blue')
plt.axhline(color='blue')
plt.fill_between(x,f,where=[(x>0)and (x<2) for x in x],color='green')
```
### Qual é a área abaixo da curva y=x**3, de x=1 a x=3?
```python
import matplotlib.pyplot as plt
from sympy import *
import numpy as np
x,f=symbols("x f")
y=x**3
A=integrate(f,(x,1,3))
x=np.linspace(0.5,3.5,1000)
f=x**3
plt.plot(x,f,color='blue')
plt.axhline(color='blue')
plt.fill_between(x,f,where=[(x>1)and (x<3)for x in x],color='magenta')
print('Área', A)
```
## Calcule a área limitada pelo fráfico da função y=-x**2+4*x+1 e pelo
```python
import matplotlib.pyplot as plt
from sympy import *
import numpy as np
x,f=symbols("x f")
f=-x**2+4*x+1
coeff=[-1,4,1]
r=np.roots(coeff)
A=integrate(f,(x,min(r), max(r)))
x=np.linspace(min(r)-0.5,max(r)+0.5,1000)
f=-x**2+4*x+1
plt.plot(x,f,color='blue')
plt.axhline(color='blue')
plt.fill_between(x,f,where=[(x>min(r))and(x<max(r)) for x in x],color='yellow')
print('Área:',A)
```
```python
x,f=symbols("x f")
f=-4*x**2+40*x-7
coeff=[-4,40,-7]
r=np.roots(coeff)
integrate(f,(x,min(r), max(r)))
```
```python
```
|
{"hexsha": "e6e8b33b82a7c328be4fb865da2c19c0d5c504c5", "size": 148946, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "Operations/Derivada e Integral.ipynb", "max_stars_repo_name": "Edu-Kobus/Exercise-in-python", "max_stars_repo_head_hexsha": "44321dcaa8bb6f8fd574b2bc603f93eafbf76ce1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-07-24T22:21:39.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-24T22:21:39.000Z", "max_issues_repo_path": "Operations/Derivada e Integral.ipynb", "max_issues_repo_name": "Edu-Kobus/Exercise-in-python", "max_issues_repo_head_hexsha": "44321dcaa8bb6f8fd574b2bc603f93eafbf76ce1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Operations/Derivada e Integral.ipynb", "max_forks_repo_name": "Edu-Kobus/Exercise-in-python", "max_forks_repo_head_hexsha": "44321dcaa8bb6f8fd574b2bc603f93eafbf76ce1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 207.7350069735, "max_line_length": 50958, "alphanum_fraction": 0.8861667987, "converted": true, "num_tokens": 1916}
|
import pandas as pd
import numpy as np
import gym
from gym import error, spaces, utils
from sklearn.preprocessing import StandardScaler
from mikasa import *
ACTION_LOOKUP = {
0: 'nop',
1: 'buy',
2: 'sell'
}
class MikasaEnv(gym.Env):
metadata = {'render.modes': ['human']}
def __init__(self, source_filename=None, look_back=1, fields=None, balance=1000.0):
if fields is None:
fields = [
'open',
'high',
'low',
'close'
]
if not source_filename:
raise NotImplemented
self.source_filename = source_filename
self.fields = fields
self.look_back = look_back
self.balance = balance
self.action_space = spaces.Discrete(3)
self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=(len(fields) + 1, ))
self.scaler = StandardScaler()
def _get_reward(self):
return self.bt.get_profit()
def _step(self, action):
reward = self._take_action(action)
ob = self._get_observation()
episode_over = self.ds.is_end()
return ob, reward, episode_over, {}
def _take_action(self, action):
reward = 0.0
origin_close = self.scaler.inverse_transform([getattr(self.ds[0], field) for field in self.fields])[-1]
if ACTION_LOOKUP[action] == 'buy' and not self.bt.position:
self.bt.buy(origin_close, self.balance)
if ACTION_LOOKUP[action] == 'sell' and self.bt.position:
self.bt.sell(origin_close)
reward = self.bt.trades[-1].get_profit()
if not self.bt.ds.is_end():
self.bt.go()
return reward
def _reset(self):
df = pd.read_csv(self.source_filename).rename(columns={
'Close': 'close',
'Date time': 'datetime',
'Open': 'open',
'High': 'high',
'Low': 'low',
'Volume': 'volume'
})
df[self.fields] = self.scaler.fit_transform(df[self.fields])
self.balance = self.balance
self.ds = DataSeries(df, index=self.look_back)
self.bt = BT(self.ds, self.balance)
return self._get_observation()
def _get_observation(self):
ob = []
for n in range(self.look_back):
for k in self.fields:
ob.append(getattr(self.ds[n-(self.look_back-1)], k))
ob.append(1.0 if self.bt.position is None else 0.0)
return ob
def _render(self, mode='human', close=False):
pass
|
{"hexsha": "623970068dc8b5fc0a2fae887197a2eae0e77e6f", "size": 2575, "ext": "py", "lang": "Python", "max_stars_repo_path": "mikasa_gym/mikasa_env.py", "max_stars_repo_name": "alifanov/mikasa_gym", "max_stars_repo_head_hexsha": "7cf23ec6ca69d835edbea980fc7959ea977b6668", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2017-07-30T08:33:08.000Z", "max_stars_repo_stars_event_max_datetime": "2017-07-30T08:33:08.000Z", "max_issues_repo_path": "mikasa_gym/mikasa_env.py", "max_issues_repo_name": "alifanov/mikasa_gym", "max_issues_repo_head_hexsha": "7cf23ec6ca69d835edbea980fc7959ea977b6668", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mikasa_gym/mikasa_env.py", "max_forks_repo_name": "alifanov/mikasa_gym", "max_forks_repo_head_hexsha": "7cf23ec6ca69d835edbea980fc7959ea977b6668", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.2941176471, "max_line_length": 111, "alphanum_fraction": 0.5759223301, "include": true, "reason": "import numpy", "num_tokens": 602}
|
import xgboost as xgb
import numpy as np
from sklearn.cross_validation import KFold, train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.grid_search import GridSearchCV
from sklearn.datasets import load_iris, load_digits, load_boston
import unittest
rng = np.random.RandomState(1337)
class TestTrainingContinuation(unittest.TestCase):
xgb_params = {
'colsample_bytree': 0.7,
'silent': 1,
'nthread': 1,
}
def test_training_continuation(self):
digits = load_digits(2)
X = digits['data']
y = digits['target']
dtrain = xgb.DMatrix(X,label=y)
gbdt_01 = xgb.train(self.xgb_params, dtrain, num_boost_round=10)
ntrees_01 = len(gbdt_01.get_dump())
assert ntrees_01 == 10
gbdt_02 = xgb.train(self.xgb_params, dtrain, num_boost_round=0)
gbdt_02.save_model('xgb_tc.model')
gbdt_02a = xgb.train(self.xgb_params, dtrain, num_boost_round=10, xgb_model=gbdt_02)
gbdt_02b = xgb.train(self.xgb_params, dtrain, num_boost_round=10, xgb_model="xgb_tc.model")
ntrees_02a = len(gbdt_02a.get_dump())
ntrees_02b = len(gbdt_02b.get_dump())
assert ntrees_02a == 10
assert ntrees_02b == 10
assert mean_squared_error(y, gbdt_01.predict(dtrain)) == mean_squared_error(y, gbdt_02a.predict(dtrain))
assert mean_squared_error(y, gbdt_01.predict(dtrain)) == mean_squared_error(y, gbdt_02b.predict(dtrain))
gbdt_03 = xgb.train(self.xgb_params, dtrain, num_boost_round=3)
gbdt_03.save_model('xgb_tc.model')
gbdt_03a = xgb.train(self.xgb_params, dtrain, num_boost_round=7, xgb_model=gbdt_03)
gbdt_03b = xgb.train(self.xgb_params, dtrain, num_boost_round=7, xgb_model="xgb_tc.model")
ntrees_03a = len(gbdt_03a.get_dump())
ntrees_03b = len(gbdt_03b.get_dump())
assert ntrees_03a == 10
assert ntrees_03b == 10
assert mean_squared_error(y, gbdt_03a.predict(dtrain)) == mean_squared_error(y, gbdt_03b.predict(dtrain))
|
{"hexsha": "fec7a6a62a4b496e2b47148d0eb15fa06f5222bb", "size": 2058, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/external/xgboost/tests/python/test_training_continuation.py", "max_stars_repo_name": "shreyasvj25/turicreate", "max_stars_repo_head_hexsha": "32e84ca16aef8d04aff3d49ae9984bd49326bffd", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 11356, "max_stars_repo_stars_event_min_datetime": "2017-12-08T19:42:32.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T16:55:25.000Z", "max_issues_repo_path": "src/external/xgboost/tests/python/test_training_continuation.py", "max_issues_repo_name": "shreyasvj25/turicreate", "max_issues_repo_head_hexsha": "32e84ca16aef8d04aff3d49ae9984bd49326bffd", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 2402, "max_issues_repo_issues_event_min_datetime": "2017-12-08T22:31:01.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-28T19:25:52.000Z", "max_forks_repo_path": "src/external/xgboost/tests/python/test_training_continuation.py", "max_forks_repo_name": "ZeroInfinite/turicreate", "max_forks_repo_head_hexsha": "dd210c2563930881abd51fd69cb73007955b33fd", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1343, "max_forks_repo_forks_event_min_datetime": "2017-12-08T19:47:19.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-26T11:31:36.000Z", "avg_line_length": 38.8301886792, "max_line_length": 113, "alphanum_fraction": 0.695335277, "include": true, "reason": "import numpy", "num_tokens": 611}
|
[STATEMENT]
lemma matchPres:
fixes P :: pi
and Q :: pi
and a :: name
and b :: name
assumes "P \<simeq>\<^sup>s Q"
shows "[a\<frown>b]P \<simeq>\<^sup>s [a\<frown>b]Q"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. [a\<frown>b]P \<simeq>\<^sup>s [a\<frown>b]Q
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
P \<simeq>\<^sup>s Q
goal (1 subgoal):
1. [a\<frown>b]P \<simeq>\<^sup>s [a\<frown>b]Q
[PROOF STEP]
by(auto simp add: weakCongruenceSubst_def intro: Weak_Early_Cong_Pres.matchPres)
|
{"llama_tokens": 242, "file": "Pi_Calculus_Weak_Early_Cong_Subst_Pres", "length": 2}
|
using Documenter, RxNav
makedocs(sitename = "RxNav Module Documentation", format = Documenter.HTML(prettyurls = false))
|
{"hexsha": "5ccc25aa6fc0b261183e21657079284ab1016ac0", "size": 124, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "docs/make.jl", "max_stars_repo_name": "wherrera10/RxNav.jl", "max_stars_repo_head_hexsha": "54874be9ecf09dd41983ef5a5dfe7de68f3e7a38", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-06-22T19:09:16.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-13T19:12:51.000Z", "max_issues_repo_path": "docs/make.jl", "max_issues_repo_name": "wherrera10/RxNav.jl", "max_issues_repo_head_hexsha": "54874be9ecf09dd41983ef5a5dfe7de68f3e7a38", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "docs/make.jl", "max_forks_repo_name": "wherrera10/RxNav.jl", "max_forks_repo_head_hexsha": "54874be9ecf09dd41983ef5a5dfe7de68f3e7a38", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.8, "max_line_length": 95, "alphanum_fraction": 0.7661290323, "num_tokens": 32}
|
\chapter{Software tester}
\input{missions/testgeneralites}
%\glspl{pois} \\
%\glspl{vache} \\
%\glspl{pigeon} \\
% \glspl{TEM} \\
% \gls{latex} \\
%\glspl{lvm}
\input{missions/testtravailrealise}
\input{missions/testsynthese}
\input{missions/testbilanpremiereperiode}
|
{"hexsha": "d85a46e5c2a730c7afaa4a061c0a076c7f8f8bf1", "size": 275, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "missions/Software-tester.tex", "max_stars_repo_name": "syncrase/reportContrat", "max_stars_repo_head_hexsha": "b9243c54744ace9e360fa48065ea5886851919ec", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "missions/Software-tester.tex", "max_issues_repo_name": "syncrase/reportContrat", "max_issues_repo_head_hexsha": "b9243c54744ace9e360fa48065ea5886851919ec", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "missions/Software-tester.tex", "max_forks_repo_name": "syncrase/reportContrat", "max_forks_repo_head_hexsha": "b9243c54744ace9e360fa48065ea5886851919ec", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 16.1764705882, "max_line_length": 41, "alphanum_fraction": 0.7127272727, "num_tokens": 99}
|
import numpy as np
import healpy as hp
from plancklens.qcinv.util import read_map
class template:
def __init__(self):
self.nmodes = 0
assert 0
def apply(self, m, coeffs):
# map -> map*[coeffs combination of templates]
assert 0
def apply_mode(self, m, mode):
assert (mode < self.nmodes)
assert (mode >= 0)
tcoeffs = np.zeros(self.nmodes)
tcoeffs[mode] = 1.0
self.apply(m, tcoeffs)
def accum(self, m, coeffs):
assert 0
def dot(self, m):
ret = []
for i in range(0, self.nmodes):
tmap = np.copy(m)
self.apply_mode(tmap, i)
ret.append(np.sum(tmap))
return ret
class template_map(template):
def __init__(self, m):
self.nmodes = 1
self.map = m
def apply(self, m, coeffs):
assert (len(coeffs) == self.nmodes)
m *= self.map * coeffs[0]
def accum(self, m, coeffs):
assert (len(coeffs) == self.nmodes)
m += self.map * coeffs[0]
def dot(self, m):
return [(self.map * m).sum()]
class template_qmap(template):
def __init__(self, m):
"""Polarization Q template
"""
self.nmodes = 1
self.map = m
def apply(self, pmap, coeffs):
assert (len(coeffs) == self.nmodes)
if len(pmap) == 2: # Q and U maps
pmap[0] *= read_map(self.map) * coeffs[0]
pmap[1] *= 0.
elif len(pmap) == 1: # Only Q
pmap[0] *= read_map(self.map) * coeffs[0]
else:
assert 0
def accum(self, pmap, coeffs):
assert len(pmap) == 2, len(pmap)
assert (len(coeffs) == self.nmodes)
pmap[0] += read_map(self.map) * coeffs[0]
def dot(self, pmap):
return [np.sum(read_map(self.map) * pmap[0])] # either Q, U or Q only
class template_umap(template):
def __init__(self, m):
"""Polarization U template
"""
self.nmodes = 1
self.map = m
def apply(self, pmap, coeffs):
assert (len(coeffs) == self.nmodes)
if len(pmap) == 2: # Q and U maps
pmap[1] *= read_map(self.map) * coeffs[0]
pmap[0] *= 0.
elif len(pmap) == 1: # Only U
pmap[0] *= read_map(self.map) * coeffs[0]
else:
assert 0
def accum(self, pmap, coeffs):
assert (len(coeffs) == self.nmodes)
if len(pmap) == 2: # Q and U maps
pmap[1] += read_map(self.map) * coeffs[0]
elif len(pmap) == 1: # Assumed U-only
pmap[0] += read_map(self.map) * coeffs[0]
else:
assert 0
def dot(self, pmap):
if len(pmap) == 2: # Q and U maps
return [np.sum(read_map(self.map) * pmap[1])]
elif len(pmap) == 1: # Assumed U-only
return [np.sum(read_map(self.map) * pmap[0])]
assert 0
class template_monopole(template):
def __init__(self):
self.nmodes = 1
def apply(self, m, coeffs):
assert (len(coeffs) == self.nmodes)
m *= coeffs[0]
def accum(self, m, coeffs):
m += coeffs[0]
def dot(self, m):
return [np.sum(m)]
class template_dipole(template):
def __init__(self):
self.nmodes = 3
def apply(self, tmap, coeffs):
assert (len(coeffs) == self.nmodes)
nside = hp.npix2nside(len(tmap))
tmap *= hp.alm2map(xyz_to_alm(coeffs), nside)
def accum(self, tmap, coeffs):
assert (len(coeffs) == self.nmodes)
nside = hp.npix2nside(len(tmap))
tmap += hp.alm2map(xyz_to_alm(coeffs), nside)
def dot(self, tmap):
npix = len(tmap)
return alm_to_xyz(hp.map2alm(tmap, lmax=1, iter=0)) * npix / 3.
def xyz_to_alm(xyz):
assert len(xyz) == 3
alm = np.zeros(3, dtype=complex)
alm[1] = +xyz[2] * np.sqrt(4. * np.pi / 3.)
alm[2] = (-xyz[0] + 1.j * xyz[1]) * np.sqrt(2. * np.pi / 3.)
return alm
def alm_to_xyz(alm):
assert len(alm) == 3
x = -alm[2].real / np.sqrt(2. * np.pi / 3.)
y = +alm[2].imag / np.sqrt(2. * np.pi / 3.)
z = +alm[1].real / np.sqrt(4. * np.pi / 3.)
return np.array([x, y, z])
|
{"hexsha": "1d34d3c221d93c9f60e97ccd1df573fc73aaf1f2", "size": 4202, "ext": "py", "lang": "Python", "max_stars_repo_path": "plancklens/qcinv/template_removal.py", "max_stars_repo_name": "louisl3grand/plancklens", "max_stars_repo_head_hexsha": "2a7d832e044da87f2833628816e0d74fe83743f7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "plancklens/qcinv/template_removal.py", "max_issues_repo_name": "louisl3grand/plancklens", "max_issues_repo_head_hexsha": "2a7d832e044da87f2833628816e0d74fe83743f7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "plancklens/qcinv/template_removal.py", "max_forks_repo_name": "louisl3grand/plancklens", "max_forks_repo_head_hexsha": "2a7d832e044da87f2833628816e0d74fe83743f7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.1616766467, "max_line_length": 78, "alphanum_fraction": 0.5283198477, "include": true, "reason": "import numpy", "num_tokens": 1294}
|
"""
This file defines the datatypes of variables to be used globally.
"""
import torch
import numpy as np
from functools import partial
mat = np.atleast_2d
tensor = partial(torch.tensor, dtype=torch.float32)
device = torch.device('cpu')
|
{"hexsha": "80c16a933ebd2265e275e5e8d67d7c504c89858b", "size": 241, "ext": "py", "lang": "Python", "max_stars_repo_path": "worlds/utils.py", "max_stars_repo_name": "forgi86/RNN-adaptation", "max_stars_repo_head_hexsha": "d32e8185c6a746060dd726a0f5080231e0c9439b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-13T10:50:02.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-13T10:50:02.000Z", "max_issues_repo_path": "worlds/utils.py", "max_issues_repo_name": "forgi86/RNN-adaptation", "max_issues_repo_head_hexsha": "d32e8185c6a746060dd726a0f5080231e0c9439b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "worlds/utils.py", "max_forks_repo_name": "forgi86/RNN-adaptation", "max_forks_repo_head_hexsha": "d32e8185c6a746060dd726a0f5080231e0c9439b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 17.2142857143, "max_line_length": 65, "alphanum_fraction": 0.755186722, "include": true, "reason": "import numpy", "num_tokens": 55}
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import unittest
import paddle
import os
__all__ = ['get_vocab_list', 'stable_softmax', 'cross_entropy']
def get_vocab_list(vocab_path):
with open(vocab_path, "r", encoding="utf-8") as f:
vocab_list = [
vocab.rstrip("\n").split("\t")[0] for vocab in f.readlines()
]
return vocab_list
def stable_softmax(x):
"""Compute the softmax of vector x in a numerically stable way."""
# clip to shiftx, otherwise, when calc loss with
# log(exp(shiftx)), may get log(0)=INF
shiftx = (x - np.max(x)).clip(-64.)
exps = np.exp(shiftx)
return exps / np.sum(exps)
def cross_entropy(softmax, label, soft_label, axis, ignore_index=-1):
if soft_label:
return (-label * np.log(softmax)).sum(axis=axis, keepdims=True)
shape = softmax.shape
axis %= len(shape)
n = int(np.prod(shape[:axis]))
axis_dim = shape[axis]
remain = int(np.prod(shape[axis + 1:]))
softmax_reshape = softmax.reshape((n, axis_dim, remain))
label_reshape = label.reshape((n, 1, remain))
result = np.zeros_like(label_reshape, dtype=softmax.dtype)
for i in range(n):
for j in range(remain):
lbl = label_reshape[i, 0, j]
if lbl != ignore_index:
result[i, 0, j] -= np.log(softmax_reshape[i, lbl, j])
return result.reshape(label.shape)
def softmax_with_cross_entropy(logits,
label,
soft_label=False,
axis=-1,
ignore_index=-1):
softmax = np.apply_along_axis(stable_softmax, -1, logits)
return cross_entropy(softmax, label, soft_label, axis, ignore_index)
def assert_raises(Error=AssertionError):
def assert_raises_error(func):
def wrapper(self, *args, **kwargs):
with self.assertRaises(Error):
func(self, *args, **kwargs)
return wrapper
return assert_raises_error
def create_test_data(file=__file__):
dir_path = os.path.dirname(os.path.realpath(file))
test_data_file = os.path.join(dir_path, 'dict.txt')
with open(test_data_file, "w") as f:
vocab_list = [
'[UNK]', 'AT&T', 'B超', 'c#', 'C#', 'c++', 'C++', 'T恤', 'A座', 'A股',
'A型', 'A轮', 'AA制', 'AB型', 'B座', 'B股', 'B型', 'B轮', 'BB机', 'BP机',
'C盘', 'C座', 'C语言', 'CD盒', 'CD机', 'CALL机', 'D盘', 'D座', 'D版', 'E盘',
'E座', 'E化', 'E通', 'F盘', 'F座', 'G盘', 'H盘', 'H股', 'I盘', 'IC卡', 'IP卡',
'IP电话', 'IP地址', 'K党', 'K歌之王', 'N年', 'O型', 'PC机', 'PH值', 'SIM卡',
'U盘', 'VISA卡', 'Z盘', 'Q版', 'QQ号', 'RSS订阅', 'T盘', 'X光', 'X光线', 'X射线',
'γ射线', 'T恤衫', 'T型台', 'T台', '4S店', '4s店', '江南style', '江南Style',
'1号店', '小S', '大S', '阿Q', '一', '一一', '一一二', '一一例', '一一分', '一一列举',
'一一对', '一一对应', '一一记', '一一道来', '一丁', '一丁不识', '一丁点', '一丁点儿', '一七',
'一七八不', '一万', '一万一千', '一万一千五百二十颗', '一万一千八百八十斤', '一万一千多间',
'一万一千零九十五册', '一万七千', '一万七千余', '一万七千多', '一万七千多户', '一万万'
]
for vocab in vocab_list:
f.write("{}\n".format(vocab))
return test_data_file
|
{"hexsha": "be46d31aaf11383efcdcd6e2346affef8098aff1", "size": 3756, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/util.py", "max_stars_repo_name": "monika19950721/PaddleNLP", "max_stars_repo_head_hexsha": "d8931a5253dfc4f110daec73d96b0cc78d150042", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/util.py", "max_issues_repo_name": "monika19950721/PaddleNLP", "max_issues_repo_head_hexsha": "d8931a5253dfc4f110daec73d96b0cc78d150042", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/util.py", "max_forks_repo_name": "monika19950721/PaddleNLP", "max_forks_repo_head_hexsha": "d8931a5253dfc4f110daec73d96b0cc78d150042", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.9393939394, "max_line_length": 80, "alphanum_fraction": 0.5782747604, "include": true, "reason": "import numpy", "num_tokens": 1243}
|
#! /usr/bin/env python3
'''
Author : BCC
Date : 2022/03/31
'''
import argparse
import math
import sys
import re
import copy
import numpy as np
import gzip
import matplotlib.pyplot as plt
from matplotlib.pyplot import MultipleLocator #for setting of scale of separating along with x-axis & y-axis.
#########################
# Class-Definition #
#########################
class color:
PURPLE = '\033[1;35;48m'
CYAN = '\033[1;36;48m'
BOLD = '\033[1;37;48m'
BLUE = '\033[1;34;48m'
GREEN = '\033[1;32;48m'
YELLOW = '\033[1;33;48m'
RED = '\033[1;31;48m'
BLACK = '\033[1;30;48m'
UNDERLINE = '\033[4;37;48m'
END = '\033[1;37;0m'
#########################
# Main-Routine #
#########################
def main():
#Process the argument
(m, s, n, w, a, gaussian_meth, is_debug) = ArgumentParser()
#Generate a random data point from Univariate Gaussian distribution
uni_gauss_point = UnivariateGaussianRandomGenerator(m, s, gaussian_meth)
#Generate a random data point from Polynomial Basis Linear Model Data Generator
poly_point = PolynomialBasisRandomGenerator(n, w, a, gaussian_meth)
#Print the result
PrintResult(uni_gauss_point, poly_point)
if(is_debug):
DrawGaussianDistribution(m, s, 1)
DrawGaussianDistribution(m, s, 0)
#########################
# Sub-Routine #
#########################
def ArgumentParser():
m = None
s = None
n = None
w = None
a = None
gaussian_meth = None
is_debug = 0
parser = argparse.ArgumentParser()
parser.add_argument("--m", "-m", help="The mean for univariate Gaussian data generator.")
parser.add_argument("--s", "-s", help="The variance for univariate Gaussian data generator.")
parser.add_argument("--n", "-n", help="The basis number of Polynomial basis linear model data generator.")
parser.add_argument("--w", "-w", help="The n coefficient of Polynomial basis linear model data generator.")
parser.add_argument("--a", "-a", help="The variance of noise of Polynomial basis linear model data generator.")
parser.add_argument("--gaussian_meth", "-gm", help="Set '0' to use 12 unform distribution to approximate standard Gaussian. Set '1' to use Box-Muller method to generate standard Gaussian. Default is '0'.")
parser.add_argument("--is_debug", "-isd", help="1 for debug mode; 0 for normal mode.")
args = parser.parse_args()
if(args.m):
m = float(args.m)
if(args.s):
s = float(args.s)
if(args.n):
n = int(args.n)
if(args.w):
w = ConvertToList(args.w)
if(args.a):
a = float(args.a)
if(args.gaussian_meth):
gaussian_meth = int(args.gaussian_meth)
if(args.is_debug):
is_debug = int(args.is_debug)
if(m == None):
print(f"Error: You should set '--m' or '-m' for the input of mean for univariate Gaussian data generator.")
sys.exit()
if(s == None):
print(f"Error: You should set '--s' or '-s' for the input of variance for univariate Gaussian data generator.")
sys.exit()
if(n == None):
print(f"Error: You should set '--n' or '-n' for the input of the number of basis for Polynomial basis linear model data generator.")
sys.exit()
if(w == None):
print(f"Error: You should set '--w' or '-w' for the input of the vector W for Polynomial basis linear model data generator.")
sys.exit()
if(a == None):
print(f"Error: You should set '--a' or '-a' for the input of the variance of noise e for Polynomial basis linear model data generator.")
sys.exit()
if(gaussian_meth == None):
gaussian_meth = 0
if(is_debug):
print(f"m = {m}")
print(f"s = {s}")
print(f"n = {n}")
print(f"w = {w}")
print(f"a = {a}")
print(f"gaussian_meth = {gaussian_meth}")
return (m, s, n, w, a, gaussian_meth, is_debug)
def DrawGaussianDistribution(m, s, method):
xaxis_range = np.arange(-10000, 10000, 1)
yaxis_range = [UnivariateGaussianRandomGenerator(m, s, method) for x in xaxis_range]
plt.hist(yaxis_range, 100)
plt.title(f"Gaussian distribution using method {method}")
plt.show()
def UnivariateGaussianRandomGenerator(m, s, method=0):
data_point = 0.0
if(method==0):
data_point = (sum(np.random.uniform(0, 1, 12)) - 6) * math.sqrt(s) + m
else:
u_dist = np.random.uniform(0, 1)
v_dist = np.random.uniform(0, 1)
data_point = (math.sqrt((-2)*math.log(u_dist)) * math.sin(2*math.pi*v_dist)) * math.sqrt(s) + m
return data_point
def PolynomialBasisRandomGenerator(n, w, a, gaussian_meth):
x = np.random.uniform(-1, 1)
y = 0
tmp_x = 1
for i in range(n):
if(i == 0):
y += w[i]
else:
tmp_x *= x
y += w[i]*tmp_x
y += UnivariateGaussianRandomGenerator(0, a, gaussian_meth)
return (x, y)
def PrintResult(uni_gauss_point, poly_point):
print(f"Data point from Univariate Gaussian Data Generator : {uni_gauss_point}")
print(f"Data point from Polynomial Basis Linear Model Data Generator : ({poly_point[0]}, {poly_point[1]})")
def ConvertToList(w_string):
return (list(map(float, w_string.strip('[]').split(','))))
#---------------Execution---------------#
if __name__ == '__main__':
main()
|
{"hexsha": "e156a5eff779a92906956542fc81922a7d0e99d1", "size": 5491, "ext": "py", "lang": "Python", "max_stars_repo_path": "HW3/python/Q1/hw3.py", "max_stars_repo_name": "Coslate/Machine_Learning", "max_stars_repo_head_hexsha": "fd1e51cfdb02e1249819aa7d54a18b91fcd4225e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "HW3/python/Q1/hw3.py", "max_issues_repo_name": "Coslate/Machine_Learning", "max_issues_repo_head_hexsha": "fd1e51cfdb02e1249819aa7d54a18b91fcd4225e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "HW3/python/Q1/hw3.py", "max_forks_repo_name": "Coslate/Machine_Learning", "max_forks_repo_head_hexsha": "fd1e51cfdb02e1249819aa7d54a18b91fcd4225e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.6845238095, "max_line_length": 209, "alphanum_fraction": 0.5982516846, "include": true, "reason": "import numpy", "num_tokens": 1452}
|
(t::Todo) -> begin
el(
id = t.id |> Base.get,
todo = t.title,
completed = t.completed
)
end
|
{"hexsha": "00e56eeca40db091ffa00daca87838ee16f98f03", "size": 99, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "app/resources/todos/views/partials/item.json.jl", "max_stars_repo_name": "essenciary/genie-todo-mvc", "max_stars_repo_head_hexsha": "bc18a5c0ab61481050daabee34eb3949e75de837", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2017-05-09T16:15:53.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-31T21:55:33.000Z", "max_issues_repo_path": "app/resources/todos/views/partials/item.json.jl", "max_issues_repo_name": "essenciary/genie-todo-mvc", "max_issues_repo_head_hexsha": "bc18a5c0ab61481050daabee34eb3949e75de837", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-02-25T12:21:11.000Z", "max_issues_repo_issues_event_max_datetime": "2018-02-28T10:03:08.000Z", "max_forks_repo_path": "app/resources/todos/views/partials/item.json.jl", "max_forks_repo_name": "essenciary/genie-todo-mvc", "max_forks_repo_head_hexsha": "bc18a5c0ab61481050daabee34eb3949e75de837", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-09-29T10:06:12.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-29T10:06:12.000Z", "avg_line_length": 12.375, "max_line_length": 25, "alphanum_fraction": 0.5656565657, "num_tokens": 34}
|
import numpy as np
import pandas as pd
import tornado
from tornado.websocket import websocket_connect
from tornado.httpclient import HTTPRequest
import json
import sys
def message(**kwargs):
return json.dumps(kwargs)
class FlightGearProperty(object):
""" Manages a property transmitted from FlightGear. """
def __init__(self, node):
self.node = node
self.values = []
self.indices = []
def get_timeseries(self, tail_n=-1):
""" Convert the captured values into a pandas.Series with a
TimeDeltaIndex.
"""
tail_n = min(len(self.values), 0)
if tail_n>0:
return pd.Series(self.values[:-tail_n], index=pd.to_timedelta(self.indices[:-tail_n], unit="s"))
else:
return pd.Series(self.values, index=pd.to_timedelta(self.indices, unit="s"))
def update(self, data):
""" Update the property with data from a websocket message. """
v = data['value']
data_type = data['type']
if data_type == "double":
v = float(v)
elif data_type == "int":
v = int(v)
t = data["ts"]
self.values.append(v)
self.indices.append(float(t))
class FlightGearConnection(object):
def __init__(self, host="localhost", port=9015):
""" When FlightGear is run with the --httpd=PORT option, it serves a
Webapplication, and also a Websocket URL to get, set and listen to
all properties inside the Simulation.
The FlightGearConnection handles the lifetime of such a connection.
The IPython Kernel uses Tornado to run asynchronously, which allows
us to communicate with websockets very nicely.
"""
self.connected = False
self.host = host
self.port = port
self.properties = {}
def connect(self):
self.connected = False
request = HTTPRequest("ws://%s:%i/PropertyListener" % (self.host, self.port))
self.future = websocket_connect(request, on_message_callback=self.on_message)
self.future.add_done_callback(self.on_connected)
def on_message(self, msg):
if msg is None:
return
data = json.loads(msg)
node = data['path']
try:
self.properties[node].update(data)
except KeyError:
# This should not happen
print("%s not found" % node)
def on_connected(self, *args, **kwargs):
sys.stdout.flush()
self.connected = True
self.websocket = self.future.result()
for node, prop in self.properties.items():
self._add_listener(prop)
def close(self):
self.connected = False
self.websocket.close()
def _add_listener(self, prop):
if self.connected:
self.websocket.write_message(message(command="get", node=prop.node))
self.websocket.write_message(message(command="addListener", node=prop.node))
def listen(self, node):
prop = FlightGearProperty(node)
self.properties[node] = prop
self._add_listener(prop)
return prop
|
{"hexsha": "785331c5298d220ec38d7eb12f40ed1c977f66bf", "size": 3157, "ext": "py", "lang": "Python", "max_stars_repo_path": "flightgear.py", "max_stars_repo_name": "akloster/jupyter-flightgear", "max_stars_repo_head_hexsha": "7135884a31d648b0a768c151a11a47eff287f414", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 23, "max_stars_repo_stars_event_min_datetime": "2016-01-03T13:54:14.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-16T07:56:30.000Z", "max_issues_repo_path": "flightgear.py", "max_issues_repo_name": "akloster/jupyter-flightgear", "max_issues_repo_head_hexsha": "7135884a31d648b0a768c151a11a47eff287f414", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2016-07-14T20:16:37.000Z", "max_issues_repo_issues_event_max_datetime": "2016-07-15T17:31:58.000Z", "max_forks_repo_path": "flightgear.py", "max_forks_repo_name": "akloster/jupyter-flightgear", "max_forks_repo_head_hexsha": "7135884a31d648b0a768c151a11a47eff287f414", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2017-01-08T09:41:00.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-07T09:08:37.000Z", "avg_line_length": 31.2574257426, "max_line_length": 108, "alphanum_fraction": 0.6135571745, "include": true, "reason": "import numpy", "num_tokens": 663}
|
(*
Copyright (C) 2017 M.A.L. Marques
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
*)
(* type: lda_exc *)
a := [0.4581652932831429, 2.217058676663745, 0.7405551735357053, 0.01968227878617998 ]:
ap := [0.119086804055547, 0.6157402568883345, 0.1574201515892867, 0.003532336663397157]:
b := [1.0000000000000000, 4.504130959426697, 1.110667363742916, 0.02359291751427506 ]:
bp := [0.000000000000000, 0.2673612973836267, 0.2052004607777787, 0.004200005045691381]:
f := (rs, zeta) ->
- add((a[i] + f_zeta(zeta)*ap[i])*rs^(i-1), i=1..4) /
add((b[i] + f_zeta(zeta)*bp[i])*rs^i, i=1..4):
|
{"hexsha": "3280d970aaab5871a640bdce5899edc57baa54dc", "size": 752, "ext": "mpl", "lang": "Maple", "max_stars_repo_path": "libxc-5.1.6/maple/lda_exc/lda_xc_teter93.mpl", "max_stars_repo_name": "pwang234/lsms", "max_stars_repo_head_hexsha": "6044153b6138512093e457bdc0c15c699c831778", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2018-04-03T15:35:47.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-01T03:19:23.000Z", "max_issues_repo_path": "libxc-5.1.6/maple/lda_exc/lda_xc_teter93.mpl", "max_issues_repo_name": "pwang234/lsms", "max_issues_repo_head_hexsha": "6044153b6138512093e457bdc0c15c699c831778", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2019-07-30T13:59:18.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T17:43:35.000Z", "max_forks_repo_path": "libxc-5.1.6/maple/lda_exc/lda_xc_teter93.mpl", "max_forks_repo_name": "pwang234/lsms", "max_forks_repo_head_hexsha": "6044153b6138512093e457bdc0c15c699c831778", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2018-06-30T00:30:48.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-31T09:14:29.000Z", "avg_line_length": 39.5789473684, "max_line_length": 89, "alphanum_fraction": 0.6848404255, "num_tokens": 314}
|
#include "../Includes/USBReader.h"
/* Copyright (c) 2021 [Rick de Bondt] - USBReader.cpp */
#include <array>
#include <chrono>
#include <cstdio>
#include <cstring>
#include <iostream>
#include <string>
#include <vector>
// Not needed for this file but xlinkconnection needs it and this solves an ordering issue on windows
#include <boost/asio.hpp>
#include <sys/types.h>
// This garbage is needed for it to compile when building statically
#ifdef BUILD_STATIC
#ifdef _MSC_VER
#pragma comment(lib, "legacy_stdio_definitions.lib")
#ifdef __cplusplus
FILE iob[] = {*stdin, *stdout, *stderr};
extern "C" {
FILE* __cdecl _iob(void) { return iob; }
}
#endif
#endif
#endif
#include <libusb.h>
#include "../Includes/Logger.h"
#include "../Includes/NetConversionFunctions.h"
#include "../Includes/USBReceiveThread.h"
#include "../Includes/USBSendThread.h"
#include "../Includes/XLinkKaiConnection.h"
#if defined(_MSC_VER) && defined(__MINGW32__)
#include <hidclass.h>
#endif
namespace
{
constexpr unsigned int cPSPVID{0x54C};
constexpr unsigned int cPSPPID{0x1C9};
} // namespace
using namespace std::chrono_literals;
using namespace USB_Constants;
USBReader::USBReader(
int aMaxBufferedMessages, int aMaxFatalRetries, int aMaxReadWriteRetries, int aReadTimeoutMS, int aWriteTimeoutMS) :
mMaxBufferedMessages(aMaxBufferedMessages),
mMaxFatalRetries(aMaxFatalRetries), mMaxReadWriteRetries(aMaxReadWriteRetries), mReadTimeoutMS(aReadTimeoutMS),
mWriteTimeoutMS(aWriteTimeoutMS)
{
libusb_init(nullptr);
}
/**
* Checks if command is a debugprint command with the given mode.
* @param aData - Data to check.
* @param aLength - Length of data to check.
* @return number of debugprint command if it is a debugprint command
*/
static inline int IsDebugPrintCommand(AsyncCommand& aData, int aLength)
{
int lReturn{0};
int lLength{aLength};
// Check if it has a subheader
if (lLength > cAsyncHeaderAndSubHeaderSize) {
Logger::GetInstance().Log("Size of packet:" + std::to_string(aLength), Logger::Level::TRACE);
lLength -= cAsyncHeaderSize;
auto* lSubHeader{reinterpret_cast<AsyncSubHeader*>(reinterpret_cast<char*>(&aData) + cAsyncHeaderSize)};
if (lSubHeader->magic == DebugPrint) {
if (lSubHeader->mode == cAsyncModePacket && lSubHeader->ref == cAsyncCommandSendPacket) {
Logger::GetInstance().Log("Size reported: " + std::to_string(lSubHeader->size), Logger::Level::TRACE);
lReturn = cAsyncModePacket;
} else if (lSubHeader->mode == cAsyncModeDebug) {
lReturn = cAsyncModeDebug;
}
}
}
return lReturn;
}
void USBReader::Close()
{
// Close thread nicely
mStopRequest = true;
if (mUSBThread != nullptr) {
while (mStopRequest && !mUSBThread->joinable()) {
std::this_thread::sleep_for(1s);
}
mUSBThread->join();
mUSBThread = nullptr;
} else {
HandleClose();
}
if (mUSBSendThread != nullptr) {
mUSBSendThread->StopThread();
mUSBSendThread = nullptr;
}
if (mUSBReceiveThread != nullptr) {
mUSBReceiveThread->StopThread();
mUSBReceiveThread = nullptr;
}
}
void USBReader::HandleAsynchronous(AsyncCommand& aData, int aLength)
{
if (aData.channel == cAsyncUserChannel) {
BinaryStitchUSBPacket lPacket{};
if (!mReceiveStitching) {
int lPacketMode{IsDebugPrintCommand(aData, aLength)};
if (lPacketMode > 0) {
// We know it's a DebugPrint command, so we can skip past this header as well now
unsigned int lLength = aLength - cAsyncHeaderAndSubHeaderSize;
int lActualPacketLength{0};
// We are a packet, so we can check if we can send it off
switch (lPacketMode) {
case cAsyncModePacket:
// Grab the packet length from the packet
lActualPacketLength =
reinterpret_cast<AsyncSubHeader*>(reinterpret_cast<char*>(&aData) + cAsyncHeaderSize)->size;
lPacket.stitch = lActualPacketLength > (cMaxUSBPacketSize - cAsyncHeaderAndSubHeaderSize);
mActualLength = lActualPacketLength;
mReceiveStitching = lPacket.stitch;
// Skip headers already
lPacket.length = aLength - cAsyncHeaderAndSubHeaderSize;
if (lPacket.stitch) {
mStitchingLength = lPacket.length;
}
memcpy(lPacket.data.data(),
reinterpret_cast<char*>(&aData) + cAsyncHeaderAndSubHeaderSize,
lPacket.length);
mUSBReceiveThread->AddToQueue(lPacket);
break;
case cAsyncModeDebug:
// We can just go ahead and print the debug data, I'm assuming it will never go past 512 bytes.
// If it does, we'll see when we get there :|
Logger::GetInstance().Log(
"PSP: " +
std::string(reinterpret_cast<char*>(&aData) + cAsyncHeaderAndSubHeaderSize, lLength),
Logger::Level::INFO);
break;
default:
// Don't know what we got
Logger::GetInstance().Log(
"Unknown data:" + PrettyHexString(std::string(reinterpret_cast<char*>(&aData), mLength)),
Logger::Level::DEBUG);
}
} else {
// Don't know what we got
Logger::GetInstance().Log(
"Unknown data:" + PrettyHexString(std::string(reinterpret_cast<char*>(&aData), mLength)),
Logger::Level::DEBUG);
}
} else {
Logger::GetInstance().Log("RecStitch: Old: " + std::to_string(mStitchingLength) +
" , Add: " + std::to_string(aLength - cAsyncHeaderSize) +
" of: " + std::to_string(mActualLength),
Logger::Level::TRACE);
mStitchingLength += aLength - cAsyncHeaderSize;
lPacket.stitch = (aLength > (cMaxUSBPacketSize - cAsyncHeaderSize)) && (mStitchingLength < mActualLength);
mReceiveStitching = lPacket.stitch;
if (!lPacket.stitch) {
mStitchingLength = 0;
}
// Skip headers already
lPacket.length = aLength - cAsyncHeaderSize;
memcpy(lPacket.data.data(), reinterpret_cast<char*>(&aData) + cAsyncHeaderSize, lPacket.length);
mUSBReceiveThread->AddToQueue(lPacket);
}
}
}
void USBReader::HandleAsynchronousSend()
{
while ((!mStopRequest) && mUSBSendThread->HasOutgoingData()) {
BinaryStitchUSBPacket lFormattedPacket = mUSBSendThread->PopFromOutgoingQueue();
mSendStitching = lFormattedPacket.stitch;
if (USBBulkWrite(
cUSBDataWriteEndpoint, lFormattedPacket.data.data(), lFormattedPacket.length, mWriteTimeoutMS) == -1) {
std::this_thread::sleep_for(std::chrono::milliseconds(mWriteTimeoutMS));
mReadWriteRetryCounter++;
if (mReadWriteRetryCounter > mMaxReadWriteRetries) {
mError = true;
}
}
}
}
void USBReader::HandleClose()
{
if (mDeviceHandle != nullptr) {
libusb_reset_device(mDeviceHandle);
libusb_release_interface(mDeviceHandle, 0);
libusb_attach_kernel_driver(mDeviceHandle, 0);
libusb_close(mDeviceHandle);
mDeviceHandle = nullptr;
}
mStopRequest = false;
}
void USBReader::HandleError()
{
// Do a full reset
HandleClose();
Open();
mRetryCounter++;
mError = false;
mUSBCheckSuccessful = false;
mReceiveStitching = false;
mSendStitching = false;
mUSBSendThread->ClearQueues();
mUSBReceiveThread->ClearQueues();
Logger::GetInstance().Log("Ran into a snag, restarting stack!", Logger::Level::DEBUG);
std::this_thread::sleep_for(1ms);
}
bool USBReader::Open()
{
libusb_device_handle* lDeviceHandle{nullptr};
libusb_device** lDevices{nullptr};
libusb_device* lDevice{nullptr};
int lAmountOfDevices{0};
int lReturn{0};
lAmountOfDevices = libusb_get_device_list(nullptr, &lDevices);
if (lAmountOfDevices >= 0 && lDevices != nullptr) {
for (int lCount = 0; (lCount < lAmountOfDevices) && (mDeviceHandle == nullptr); lCount++) {
lDevice = lDevices[lCount];
libusb_device_descriptor lDescriptor{0};
memset(&lDescriptor, 0, sizeof(lDescriptor));
lReturn = libusb_get_device_descriptor(lDevice, &lDescriptor);
if (lReturn >= 0) {
if ((lDescriptor.idVendor == cPSPVID) && (lDescriptor.idProduct == cPSPPID)) {
lReturn = libusb_open(lDevice, &lDeviceHandle);
libusb_free_device_list(lDevices, 1);
if (lReturn >= 0 && lDeviceHandle != nullptr) {
libusb_set_auto_detach_kernel_driver(lDeviceHandle, 1);
lReturn = libusb_set_configuration(lDeviceHandle, 1);
if (lReturn >= 0) {
lReturn = libusb_claim_interface(lDeviceHandle, 0);
if (lReturn == 0) {
mDeviceHandle = lDeviceHandle;
} else {
Logger::GetInstance().Log(std::string("Could not detach kernel driver: ") +
libusb_strerror(static_cast<libusb_error>(lReturn)),
Logger::Level::ERROR);
libusb_close(lDeviceHandle);
}
} else {
Logger::GetInstance().Log(std::string("Could set configuration: ") +
libusb_strerror(static_cast<libusb_error>(lReturn)),
Logger::Level::ERROR);
libusb_close(lDeviceHandle);
}
} else {
Logger::GetInstance().Log(std::string("Could not open USB device: ") +
libusb_strerror(static_cast<libusb_error>(lReturn)),
Logger::Level::ERROR);
}
} else {
std::stringstream lVidPid;
lVidPid << std::hex << std::setfill('0') << std::setw(4) << lDescriptor.idVendor << ":" << std::hex
<< std::setfill('0') << std::setw(4) << lDescriptor.idProduct;
Logger::GetInstance().Log(std::string("Non matching device found: ") + lVidPid.str(),
Logger::Level::TRACE);
}
} else {
Logger::GetInstance().Log(std::string("Cannot query device descriptor: ") +
libusb_strerror(static_cast<libusb_error>(lReturn)),
Logger::Level::ERROR);
libusb_free_device_list(lDevices, 1);
}
}
} else {
Logger::GetInstance().Log(
std::string("Could not get device list: ") + libusb_strerror(static_cast<libusb_error>(lReturn)),
Logger::Level::ERROR);
}
return (mDeviceHandle != nullptr);
}
void USBReader::SetIncomingConnection(std::shared_ptr<XLinkKaiConnection> aDevice)
{
mIncomingConnection = aDevice;
}
int USBReader::USBBulkRead(int aEndpoint, int aSize, int aTimeOut)
{
int lReturn{-1};
int lError{0};
if (mDeviceHandle != nullptr) {
lError = libusb_bulk_transfer(mDeviceHandle,
aEndpoint,
reinterpret_cast<unsigned char*>(mTemporaryReceiveBuffer.data()),
aSize,
&lReturn,
aTimeOut);
if (lError != 0) {
lReturn = lError;
}
} else {
lReturn = -1;
}
return lReturn;
}
int USBReader::USBBulkWrite(int aEndpoint, char* aData, int aSize, int aTimeOut)
{
int lReturn{-1};
if (mDeviceHandle != nullptr) {
int lError = libusb_bulk_transfer(
mDeviceHandle, aEndpoint, reinterpret_cast<unsigned char*>(aData), aSize, &lReturn, aTimeOut);
if (lError < 0) {
Logger::GetInstance().Log(
std::string("Error during Bulk write: ") + libusb_strerror(static_cast<libusb_error>(lError)),
Logger::Level::ERROR);
lReturn = -1;
}
} else {
lReturn = -1;
}
return lReturn;
}
bool USBReader::USBCheckDevice()
{
bool lReturn{true};
Logger::GetInstance().Log("USBCheckDevice", Logger::Level::TRACE);
if (mDeviceHandle != nullptr) {
int lMagic = HostFS;
int lLength =
USBBulkWrite(cUSBHelloEndpoint, reinterpret_cast<char*>(&lMagic), sizeof(int), cMaxUSBHelloTimeout);
if (lLength != sizeof(int)) {
Logger::GetInstance().Log(std::string("Amount of bytes written did not match: ") + std::to_string(lLength),
Logger::Level::WARNING);
lReturn = false;
}
}
return lReturn;
}
void USBReader::ReceiveCallback()
{
int lLength{mLength};
// Length should be atleast the size of a command header
if (lLength >= cHostFSHeaderSize) {
auto* lCommand{reinterpret_cast<HostFsCommand*>(mTemporaryReceiveBuffer.data())};
switch (static_cast<eMagicType>(lCommand->magic)) {
case HostFS:
if (lCommand->command == (Hello)) {
SendHello();
} else {
mError = true;
Logger::GetInstance().Log("PSP is being rude and not sending a Hello back :V. Disconnecting!" +
std::to_string(lCommand->command),
Logger::Level::ERROR);
}
std::this_thread::sleep_for(100ms);
break;
case Asynchronous:
// We know it's asynchronous data now
HandleAsynchronous(*reinterpret_cast<AsyncCommand*>(lCommand), lLength);
break;
case Bulk:
Logger::GetInstance().Log("Bulk received, weird", Logger::Level::DEBUG);
default:
Logger::GetInstance().Log("Magic not recognized: " + std::to_string(lCommand->magic),
Logger::Level::DEBUG);
break;
}
} else {
Logger::GetInstance().Log("Packet too short to be usable", Logger::Level::DEBUG);
}
mLength = 0;
}
int USBReader::SendHello()
{
HostFsCommand lResponse{};
memset(&lResponse, 0, cHostFSHeaderSize);
lResponse.magic = HostFS;
lResponse.command = Hello;
Logger::GetInstance().Log(PrettyHexString(std::string(reinterpret_cast<char*>(&lResponse), 12)),
Logger::Level::TRACE);
return USBBulkWrite(cUSBHelloEndpoint, reinterpret_cast<char*>(&lResponse), cHostFSHeaderSize, cMaxUSBHelloTimeout);
}
bool USBReader::StartReceiverThread()
{
bool lReturn{true};
if (mDeviceHandle != nullptr && mUSBThread == nullptr) {
mUSBReceiveThread = std::make_shared<USBReceiveThread>(*mIncomingConnection, mMaxBufferedMessages);
mUSBReceiveThread->StartThread();
mUSBSendThread = std::make_shared<USBSendThread>(mMaxBufferedMessages);
mUSBSendThread->StartThread();
mUSBThread = std::make_shared<std::thread>([&] {
// If we didn't get a graceful disconnect retry making connection.
while ((mDeviceHandle != nullptr) || ((mRetryCounter > 0) && !mStopRequest)) {
if (mStopRequest) {
HandleClose();
} else {
if (!mUSBCheckSuccessful) {
mUSBCheckSuccessful = USBCheckDevice();
}
if ((mError && mRetryCounter < mMaxFatalRetries) || (!mUSBCheckSuccessful)) {
HandleError();
} else if (mRetryCounter >= mMaxFatalRetries) {
Logger::GetInstance().Log("Too many errors! Bailing out!", Logger::Level::ERROR);
HandleClose();
mRetryCounter = 0;
}
if (!mSendStitching) {
// First read, then write
int lLength{USBBulkRead(cUSBDataReadEndpoint, cMaxUSBPacketSize, mReadTimeoutMS)};
if (lLength > 0) {
mLength = lLength;
mRetryCounter = 0;
ReceiveCallback();
} else if (lLength == LIBUSB_ERROR_TIMEOUT || lLength == LIBUSB_ERROR_BUSY) {
std::this_thread::sleep_for(std::chrono::milliseconds(mReadTimeoutMS));
} else if (mDeviceHandle == nullptr) {
mError = true;
} else {
std::this_thread::sleep_for(std::chrono::milliseconds(mReadTimeoutMS));
mReadWriteRetryCounter++;
if (mReadWriteRetryCounter > mMaxReadWriteRetries) {
mError = true;
}
// Probably fatal, try a restart of the device
}
}
if (!mReceiveStitching) {
HandleAsynchronousSend();
}
}
}
});
} else {
lReturn = false;
}
return lReturn;
}
void USBReader::Send(std::string_view aData)
{
// Handle in send thread
mUSBSendThread->AddToQueue(aData);
}
USBReader::~USBReader()
{
Close();
libusb_exit(nullptr);
}
|
{"hexsha": "fbd60330f91893fdcd96678ed77a4ec5b2961376", "size": 18892, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "Sources/USBReader.cpp", "max_stars_repo_name": "codedwrench/PSPXLinkBridge", "max_stars_repo_head_hexsha": "a95ffc602547d5b39f29bac0acbeb245bc294bc5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3.0, "max_stars_repo_stars_event_min_datetime": "2021-07-02T12:53:38.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-29T10:40:10.000Z", "max_issues_repo_path": "Sources/USBReader.cpp", "max_issues_repo_name": "codedwrench/PSPXLinkBridge", "max_issues_repo_head_hexsha": "a95ffc602547d5b39f29bac0acbeb245bc294bc5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2021-01-19T11:05:05.000Z", "max_issues_repo_issues_event_max_datetime": "2021-01-21T23:46:27.000Z", "max_forks_repo_path": "Sources/USBReader.cpp", "max_forks_repo_name": "codedwrench/PSPXLinkBridge", "max_forks_repo_head_hexsha": "a95ffc602547d5b39f29bac0acbeb245bc294bc5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2021-03-01T18:54:46.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-01T18:54:46.000Z", "avg_line_length": 38.5551020408, "max_line_length": 120, "alphanum_fraction": 0.5410755876, "num_tokens": 4023}
|
[STATEMENT]
lemma [simp]: "(typeof\<^bsub>h\<^esub> v = Some Integer) = (\<exists>i. v = Intg i)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (typeof\<^bsub>h\<^esub> v = \<lfloor>Integer\<rfloor>) = (\<exists>i. v = Intg i)
[PROOF STEP]
(*<*)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (typeof\<^bsub>h\<^esub> v = \<lfloor>Integer\<rfloor>) = (\<exists>i. v = Intg i)
[PROOF STEP]
by(cases v) auto
|
{"llama_tokens": 180, "file": "Jinja_Common_Objects", "length": 2}
|
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from os import walk
import sys
import os
import math
from matplotlib.ticker import AutoMinorLocator
import fnmatch
def read_file():
'''
This function will take input from the user [e.g. name (half name*) of the file
containing data ('.dat') to read, name (half name*) of the file containing
parameters and various condition in which data is taken from '.txt' both files
are stored in the 'data_iv' subdirectory].
User input: 'data_name' , 'information_name'
Output: 'location_and_data', location_and_information, data_file_name
'''
#data_name = ''
#information_name = ''
## Program to read data.
dir_location = os.getcwd() # To get the current working directory.
#dir_location = os.path.dirname(__file__) #Not working in Jupyter-notbook (website: urle.me/cY1)
location = os.path.join(dir_location, 'data_iv/') #joining the current working directory to a new folder "data_iv"
print("The data_iv directory contains following contents:\n",os.listdir(location))
data_name = input("\nEnter the name of the file containing data : ") #data_name : contains name of .dat file.
data_name = data_name if data_name != "" else "*.dat"
information_name = input("\nEnter the name of the file containing information : ") #information_name : contains name of .txt file.
information_name = information_name if information_name != "" else "*__LOG.txt"
for (dirpath, _, filenames) in walk(location):
for filename in filenames:
if fnmatch.fnmatch(filename, data_name):
location_and_data = location + filename
data_file_name = filename.split('.')[0]
if fnmatch.fnmatch(filename, information_name):
location_and_information = location + filename
return(location_and_data, location_and_information, data_file_name)
def read_content_of_file(location_and_data, location_and_information ,data_file_name):
'''
This function will take data file name with location and generate the data for the plotter function.
Input: location_and_data, location_and_information, data_file_name
Output: total_x_axis, total_y_axis, information, data_file_name
'''
## Using the location with name obtain from above program
## Reading the data from the '*.dat' file.
data = pd.read_table(location_and_data)
data = pd.DataFrame(data).drop([0]) # The first row contains units i.e., string.
# Not involve in plotting and calculation
#[generates error while floating conversion].
data = data.astype(float) # Converting the data into float type.
#print(pd.DataFrame(data))
##----------------------------------------------
## Program to get the condition of the experiment from the '*__LOG.txt' file.
information_file = open(location_and_information)
lines = information_file.readlines()
information = ''
count = 0 # If count (i.e., '#$') is odd read from '.txt' file.
# This means the message/information is written between two '#$'.
for line in lines:
if "#$" in line:
count +=1
if count % 2 != 0:
if "#$" in line :
continue
else:
information += line
##-----------------------------------------------------
total_x_axis = pd.Series(dtype='float64')
total_y_axis = pd.Series(dtype='float64')
num_of_column=data.count(axis='columns').head(1)
for i in range(0, int(num_of_column/2)):
x = str('S'+str(i+1)+'C1')
y = str('S'+str(i+1)+'C2')
total_x_axis = total_x_axis.append(data[x])
total_y_axis = total_y_axis.append(data[y])
#total_x_axis = (data['S1C1'].append(data['S2C1'])).append(data['S3C1'])
#total_y_axis = (data['S1C2'].append(data['S2C2'])).append(data['S3C2'])
return(total_x_axis, total_y_axis, information, data_file_name)
def plotter(total_x_axis, total_y_axis, data_file_name, information = '' ):
'''
This program plots the figure based on the data received.
Input: total_x_axis, data_file_name, total_y_axis, information = ''
User input: current_unit, Title of the plot.
Output: graph
'''
### For ploting
##---------------------------
## Program for selecting the unit of the current in y axis. The default unit is 'nA'.
str_current_unit = str.lower(input('Enter the current input (i.e., nA for 1e-9) : '))
if str_current_unit == '':
current_unit = 1e-9
str_current_unit = 'nA'
elif str_current_unit == 'a':
current_unit = 1
str_current_unit = 'A'
elif str_current_unit == 'ma':
current_unit = 1e-3
str_current_unit = 'mA'
elif str_current_unit == 'ua':
current_unit = 1e-6
str_current_unit = 'µA'
elif str_current_unit == 'na':
current_unit = 1e-9
str_current_unit = 'nA'
elif str_current_unit == 'pa':
current_unit = 1e-12
str_current_unit = 'pA'
#current_unit = 1e-9 if str_current_unit == '' else current_unit #if nothing is given it will take 'nA' i.e., 1e-9
current_unit = float(current_unit)
##---------------------------
## Program for taking the 'Title of the plot'
title_plot = input('Enter the title of the plot : ')
title_plot = 'IV characteriscs' if title_plot == '' else title_plot #if nothing is given it will take 'IV characteriscs'
##---------------------------
left, width = 0.0, 0.0 # For setting the 'text/information' location
bottom, height = -0.15, 0.0 # in the figure.
##---------------------------
fig, ax = plt.subplots(figsize=(8,6))
ax.set_title(title_plot)
ax.plot(total_x_axis, total_y_axis/current_unit, 'k:',label="IV characteristics",)
ax.grid(which='both')
#ax.set_xlabel('Voltage (V)\n\n'+information)
ax.set_xlabel('Voltage (V)')
ax.set_ylabel('Current ('+str_current_unit+')')
#ax.text(left,bottom,information)
ax.text(left, bottom, information, horizontalalignment='left', verticalalignment='top', transform=ax.transAxes)
ax.set_ylim(math.floor(min(total_y_axis)/current_unit)-5,math.ceil(max(total_y_axis)/current_unit)+5)
ax.set_xlim((min(total_x_axis)-5),5+(max(total_x_axis)))
ax.legend()
ax.xaxis.set_minor_locator(AutoMinorLocator(2))
ax.yaxis.set_minor_locator(AutoMinorLocator(2))
ax.tick_params(which='both', width=2)
ax.tick_params(which='major', length=7)
ax.tick_params(which='minor', length=4, color='k')
#plt.savefig(,bbox_inches='tight',transparent=False)
#plt.savefig('./data_iv/'+data_file_name+'.png', dpi=None, facecolor='w', edgecolor='w',orientation='portrait', format=None,transparent=False, bbox_inches='tight', pad_inches=0.1, metadata=None)
plt.show()
###---------------------------------------
if __name__ == '__main__':
try:
location_and_data, location_and_information, data_file_name = read_file()
except UnboundLocalError:
print("File not found !")
exit()
else:
total_x_axis, total_y_axis,data_file_name, information = read_content_of_file(location_and_data, location_and_information, data_file_name)
plotter(total_x_axis, total_y_axis, information, data_file_name)
|
{"hexsha": "6fa13e3932a5a8687c690b1e78563cf7937b09b6", "size": 7630, "ext": "py", "lang": "Python", "max_stars_repo_path": "labview_plot/labview_plot.py", "max_stars_repo_name": "Pankwings/rough_python_practice_", "max_stars_repo_head_hexsha": "3a1e62630c92223b1f02995afa3947504e509858", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "labview_plot/labview_plot.py", "max_issues_repo_name": "Pankwings/rough_python_practice_", "max_issues_repo_head_hexsha": "3a1e62630c92223b1f02995afa3947504e509858", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "labview_plot/labview_plot.py", "max_forks_repo_name": "Pankwings/rough_python_practice_", "max_forks_repo_head_hexsha": "3a1e62630c92223b1f02995afa3947504e509858", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.1546961326, "max_line_length": 198, "alphanum_fraction": 0.6263433814, "include": true, "reason": "import numpy", "num_tokens": 1800}
|
using LinearAlgebra
"""
implementation of the Limiter
"""
# functions to be called when there is a need of Limiting the slopes
# data= dofs of particular cell
# k = varible number . which column in dofs to be modify
# basis =basis of a problem , node informaton
# a = a in linear eqaution, of that particular k
# b = b in linear eqaution, of that particular k
# c = c in linear eqaution, of that particular k
function update_solution(data, k, basis, a, b, c, cell)
linearindex = vec(collect(Iterators.product(1:basis.order, 1:basis.order))) #linear indexing
cellcenter = cell.center
for i in 1:length(basis)
x, y = globalposition(cell, (basis.quadpoints[linearindex[i][1]], basis.quadpoints[linearindex[i][2]]))
data[i,k] = a + b * ( x - cellcenter[1] ) + c * ( y - cellcenter[2] )
end
end
# Caluations required to decide limiting
function minmod_limiter(eq, scenario, dofs, grid, basis, ndofs, ndims)
# Travelling each cell in a grid
for i in eachindex(grid.cells)
@views cell = grid.cells[i]
facetypes = cell.facetypes
@views data = dofs[:,:, cell.dataidx]
weights = kron(basis.quadweights, basis.quadweights)
# calculation of cell (weighted) averages of own cell and its neighbouring cells
# calculation of a
# a = ∑_n w_n u_n
uown = vec(data' * weights)
# For all neighbouring cells
u_mean_neighbour = zeros(4,ndofs)
for (i, neigh) in enumerate(cell.neighbors)
@views dofsneigh = dofs[:,:,neigh.dataidx]
u_mean_neighbour[i,:] = dofsneigh' * weights
end
needs_limiting = BitArray(undef, ndofs) #initialise boolean for whether this cell will use limiting
s_limit = Array{Float64}(undef, ndofs, ndims)
for j in 1:ndims
# points for (x - 1/2) or (y - 1/2)
if j == 1
points = repeat(basis.quadpoints, length(basis.quadpoints))
else
points = vcat(fill.(basis.quadpoints, length(basis.quadpoints))...)
end
#for x-dim: s1 = 12* ∑_n w_n (x_n - 1/2) u_n
s1 = 12 * vec(sum(weights .* (points .- 0.5) .* data, dims=1))
s1 /= cell.size
s2 = (uown - u_mean_neighbour[j, :]) / cell.size
if facetypes[j] == 2 #at boundaries set slope to 0
s2 = 0
end
s3 = (u_mean_neighbour[j + 2, :] - uown) / cell.size
if facetypes[j + 2] == 2 #at boundaries set slope to 0
s3 = 0
end
for k in 1:ndofs
if s1[k]*s2[k] > 0 && s1[k]*s3[k] > 0 #if all have same sign
s_limit[k, j] = sign(s1[k])* min(abs(s1[k]),abs(s2[k]), abs(s3[k]))
else
s_limit[k, j] = 0
end
if s_limit[k, j] != s1[k]
needs_limiting[k] = true
end
end
end
for k in 1:ndofs
if needs_limiting[k]
update_solution(data, k, basis, uown[k], s_limit[k,1], s_limit[k,2], cell)
end
end
end
end
|
{"hexsha": "290219379de40effbc6f96b32ef1792f6fad94a8", "size": 3207, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/kernels/limiter.jl", "max_stars_repo_name": "ashishdarekar/Modern-Wave-Propagation-Discontinuous-Galerkin-Julia", "max_stars_repo_head_hexsha": "b0fa6cda2fc2bdf3ad422abcba7f29a6c168db64", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/kernels/limiter.jl", "max_issues_repo_name": "ashishdarekar/Modern-Wave-Propagation-Discontinuous-Galerkin-Julia", "max_issues_repo_head_hexsha": "b0fa6cda2fc2bdf3ad422abcba7f29a6c168db64", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/kernels/limiter.jl", "max_forks_repo_name": "ashishdarekar/Modern-Wave-Propagation-Discontinuous-Galerkin-Julia", "max_forks_repo_head_hexsha": "b0fa6cda2fc2bdf3ad422abcba7f29a6c168db64", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.4838709677, "max_line_length": 111, "alphanum_fraction": 0.5528531338, "num_tokens": 916}
|
# preprocess_dream_prior.py
#
#
import pandas as pd
import numpy as np
import argparse
import json
def build_weighted_adj(eda_filename):
df = pd.read_csv(eda_filename, sep=" ")
df.reset_index(inplace=True)
antibodies = df["level_0"].unique()
print("ANTIBODIES: ", antibodies)
antibody_map = { a:i for (i,a) in enumerate(antibodies) }
V = len(antibody_map)
adj = np.zeros((V,V))
for (_, row) in df.iterrows():
a = row["level_0"]
b = row["level_2"]
adj[antibody_map[a],antibody_map[b]] = row["EdgeScore"]
print(adj)
antibody_ls = [0 for i in antibody_map]
for (name, idx) in antibody_map.items():
antibody_ls[idx] = name
return adj, antibody_ls
if __name__=="__main__":
parser = argparse.ArgumentParser(description="")
parser.add_argument("eda_file", help="path to a DREAM challenge time series CSV file")
parser.add_argument("output_file", help="path where the output CSV will be written")
parser.add_argument("antibody_file", help="path to output JSON file containing the indices of antibodies")
args = parser.parse_args()
adj_mat, antibody_ls = build_weighted_adj(args.eda_file)
df = pd.DataFrame(adj_mat)
df.to_csv(args.output_file, sep=",", index=False, header=False)
json.dump(antibody_ls, open(args.antibody_file, "w"))
|
{"hexsha": "a67c3ac738601af7f85782663a44a8da602ef6b6", "size": 1360, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/preprocess_dream_prior.py", "max_stars_repo_name": "gitter-lab/ssps", "max_stars_repo_head_hexsha": "8557cb1961bcd951c5f78102070925e945567600", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2020-05-13T03:57:19.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-29T02:45:13.000Z", "max_issues_repo_path": "scripts/preprocess_dream_prior.py", "max_issues_repo_name": "gitter-lab/ssps", "max_issues_repo_head_hexsha": "8557cb1961bcd951c5f78102070925e945567600", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 18, "max_issues_repo_issues_event_min_datetime": "2020-04-29T14:33:48.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-06T23:12:00.000Z", "max_forks_repo_path": "scripts/preprocess_dream_prior.py", "max_forks_repo_name": "gitter-lab/ssps", "max_forks_repo_head_hexsha": "8557cb1961bcd951c5f78102070925e945567600", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.6603773585, "max_line_length": 110, "alphanum_fraction": 0.6786764706, "include": true, "reason": "import numpy", "num_tokens": 356}
|
import numpy as np
import matplotlib
import datetime
matplotlib.use('TkAgg')
from fastai.text import *
import sys
sys.excepthook = sys.__excepthook__ # See https://groups.io/g/insync/topic/13778827?p=,,,20,0,0,0::recentpostdate%2Fsticky,,,20,2,0,13778827
import json
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
import traceback
import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
def load_vader_analyzer():
return SentimentIntensityAnalyzer()
def load_model(itos_filename, classifier_filename):
"""Load the classifier and int to string mapping
Args:
itos_filename (str): The filename of the int to string mapping file (usually called itos.pkl)
classifier_filename (str): The filename of the trained classifier
Returns:
string to int mapping, trained classifer model
"""
# load the int to string mapping file
itos = pickle.load(Path(itos_filename).open('rb'))
# turn it into a string to int mapping (which is what we need)
stoi = collections.defaultdict(lambda: 0, {str(v): int(k) for k, v in enumerate(itos)})
# these parameters aren't used, but this is the easiest way to get a model
bptt, em_sz, nh, nl = 70, 400, 1150, 3
dps = np.array([0.4, 0.5, 0.05, 0.3, 0.4]) * 0.5
num_classes = 2 # this is the number of classes we want to predict
vs = len(itos)
model = get_rnn_classifer(bptt, 20 * 70, num_classes, vs, emb_sz=em_sz, n_hid=nh, n_layers=nl, pad_token=1,
layers=[em_sz * 3, 50, num_classes], drops=[dps[4], 0.1],
dropouti=dps[0], wdrop=dps[1], dropoute=dps[2], dropouth=dps[3])
# load the trained classifier
model.load_state_dict(torch.load(classifier_filename, map_location=lambda storage, loc: storage))
# put the classifier into evaluation mode
model.reset()
model.eval()
return stoi, model
def softmax(x):
'''
Numpy Softmax, via comments on https://gist.github.com/stober/1946926
>>> res = softmax(np.array([0, 200, 10]))
>>> np.sum(res)
1.0
>>> np.all(np.abs(res - np.array([0, 1, 0])) < 0.0001)
True
>>> res = softmax(np.array([[0, 200, 10], [0, 10, 200], [200, 0, 10]]))
>>> np.sum(res, axis=1)
array([ 1., 1., 1.])
>>> res = softmax(np.array([[0, 200, 10], [0, 10, 200]]))
>>> np.sum(res, axis=1)
array([ 1., 1.])
'''
if x.ndim == 1:
x = x.reshape((1, -1))
max_x = np.max(x, axis=1).reshape((-1, 1))
exp_x = np.exp(x - max_x)
return exp_x / np.sum(exp_x, axis=1).reshape((-1, 1))
# Warning: If there are not enough cores and try parallel, we may get the "ValueError: max_workers must be greater than 0" exception
def predict_text_sentiment(stoi, model, text, parallel=False):
"""Do the actual prediction on the text using the
model and mapping files passed
"""
# prefix text with tokens:
# xbos: beginning of sentence
# xfld 1: we are using a single field here
input_str = 'xbos xfld 1 ' + text
# predictions are done on arrays of input.
# We only have a single input, so turn it into a 1x1 array
texts = [input_str]
# tokenize using the fastai wrapper around spacy
if parallel:
tok = Tokenizer().proc_all_mp(partition_by_cores(texts))
else:
tok = Tokenizer().proc_all(texts, lang='en')
# turn into integers for each word
encoded = [stoi[p] for p in tok[0]]
# we want a [x,1] array where x is the number
# of words inputted (including the prefix tokens)
ary = np.reshape(np.array(encoded), (-1, 1))
# turn this array into a tensor
tensor = torch.from_numpy(ary)
# wrap in a torch Variable
variable = Variable(tensor)
# do the predictions
predictions = model(variable)
# convert back to numpy
numpy_preds = predictions[0].data.numpy()
return softmax(numpy_preds[0])[0]
def predict_text_sentiment_vader_normalized(vader_analyzer, text):
# [-1.0, 1.0]
vs = vader_analyzer.polarity_scores(text)['compound']
# [-1.0, 1.0] -> [0. 1.0]
return (vs+1)/2
def calculate_bull_vader_sentiment(model_scores, vader_score):
#Weighted avg for the time being.
return model_scores[1]*0.8 + vader_score*0.2
def predict_record(
record, stoi, model, vader_analyzer,
itos_file_path, trained_classifier_file_path, input_data_file_path
):
text = record['data']['text']
# Softmax: model_scores[1] (bull) + model_scores[0] (bear) = 1.0
scores = predict_text_sentiment(stoi, model, text, num_cpus() > 1)
vader_score = predict_text_sentiment_vader_normalized(vader_analyzer, text)
delta_ts = datetime.datetime.utcnow() - datetime.datetime(1970, 1, 1)
prediction_processed_ts_ms = int((delta_ts.days * 24 * 60 * 60 + delta_ts.seconds) * 1000 + delta_ts.microseconds / 1000.0)
record['predictions'] = {
'model': trained_classifier_file_path,
'context': [{'stoi': itos_file_path, 'inputDataset': input_data_file_path}],
'predictionProcessedTsMs': prediction_processed_ts_ms,
'values': {
'bear_sentiment': float(scores[0]),
'bull_sentiment': float(scores[1]),
'vader_sentiment': float(vader_score),
'bull_vader_sentiment': calculate_bull_vader_sentiment(scores, vader_score)
}
}
# TODO: We'll do more robust / elegant field management once message processing matures in this project.
record['msgType'] = record['msgType'].replace('request', 'response')
return record
|
{"hexsha": "e524d90ccc700a700708f2816a20b8927510a619", "size": 5677, "ext": "py", "lang": "Python", "max_stars_repo_path": "inference/sentiment_inference.py", "max_stars_repo_name": "MiguelPeralvo/teslamonitor", "max_stars_repo_head_hexsha": "7e81ec47d028e847d44183542a1bf6d2b2ce024c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "inference/sentiment_inference.py", "max_issues_repo_name": "MiguelPeralvo/teslamonitor", "max_issues_repo_head_hexsha": "7e81ec47d028e847d44183542a1bf6d2b2ce024c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "inference/sentiment_inference.py", "max_forks_repo_name": "MiguelPeralvo/teslamonitor", "max_forks_repo_head_hexsha": "7e81ec47d028e847d44183542a1bf6d2b2ce024c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.0432098765, "max_line_length": 141, "alphanum_fraction": 0.6580940638, "include": true, "reason": "import numpy", "num_tokens": 1605}
|
from dataloader.DataloaderApi import *
import torch.nn as nn
import torch.optim
import time
import difflib
import torch.nn.functional as F
import datetime
from collections import Counter
from GPT.RAdam.radam.radam import RAdam
from math import log
from numpy import array
from numpy import argmax
from collections import defaultdict
import copy
import re
class Candidate:
def __init__(self, pre_ids, pro, is_complete):
self.pre_ids = pre_ids
self.pro = pro
self.is_complete = is_complete
class BestToken:
def __init__(self, pre_ids, pro):
self.pre_ids = pre_ids
self.pro = pro
class Classifier:
def __init__(self, model, model_lstm, args, vocab, word_vocab, Rvocab, tokenizer):
self.model = model
self.model_lstm = model_lstm
self.vocab = vocab
self.args = args
self.word_vocab = word_vocab
self.Rvocab = Rvocab
self.counter = Counter()
self.next_api = 0
self.pad_id = tokenizer.pad_token_id
self.eos_id = tokenizer.eos_token_id
self.tokenizer = tokenizer
self.ep = 0
self.control_num = 0
# 打印模型参数
def summary(self):
print(self.model)
# 训练
def train(self, train_data, dev_data, args_device, arg, train_batch_len=None,
dev_batch_len=None):
optimizer = RAdam(filter(lambda p: p.requires_grad, self.model.parameters()), lr=self.args.lr,
weight_decay=self.args.weight_decay)
train_loss_list, train_acc_list = [], []
best_acc = 0
dev_loss_list, dev_acc_list = [], []
patenice = 0
for ep in range(self.args.epoch):
self.ep = ep
train_data_num = 1
train_num = 1
train_acc = 0
train_acc_1 = 0
train_loss = 0
word_acc = 0
# word_num = 0
start_time = datetime.datetime.now()
# 让模型进入训练模式
self.model.train()
print("start train")
for onebatch in get_batch_train(train_data, self.args.batch_size, arg, train_batch_len):
words, tags, mask, seq_lengths = batch_numberize(onebatch, args_device, arg)
targets = words[:, 1:].contiguous()
pred = self.model(words)
pred = pred[:, :-1].contiguous()
# 反向传播,计算误差
loss = self.compuate_loss(targets.view(-1), pred.view(-1, pred.shape[-1]))
optimizer.zero_grad()
loss.backward()
#记得改回来
optimizer.step()
train_loss += loss.data.item()
acc_1, num = self.compuate_acc(targets.view(-1), pred.view(-1, pred.shape[-1]))
train_acc += acc_1
train_data_num += num
end_time = datetime.datetime.now()
during_time = end_time - start_time
print (len(dev_data))
dev_acc, dev_loss, dev_data_num, dev_word_acc, dev_num, perplexity = self.validate(dev_data, args_device,
arg, dev_batch_len,
False)
train_acc /= train_data_num
train_loss /= train_data_num
# train_acc_list.append(train_acc)
train_loss_list.append(train_loss)
# dev_acc_list.append(dev_acc)
if patenice > 2:
# self.ep = self.ep - patenice
# # torch.save(self.model, f="data/API/trained_model_{}_jc".format(ep+1))
break
# epoch经过epoch轮,如果开发集acc没有上升或者loss没有下降,则停止训练
if dev_acc > best_acc:
print(dev_acc, best_acc)
dev_acc_list.append(dev_acc)
best_acc = dev_acc_list[-1]
patenice = 0
else:
patenice += 1
print(patenice)
print("[Epoch {}] train loss :{} train_acc:{} % Time:{} train_word_acc:{} 训练数据总数:{} 训练数据词总数:{}".format(
ep + 1, train_loss, train_acc * 100, during_time, word_acc / train_num, train_data_num, train_num))
print(
"[Epoch {}] dev loss :{} dev_acc:{} dev_word_acc:{} % 测试数据总数:{} 测试数据词总数:{} 困惑度:{}".format(ep + 1,
dev_loss,
dev_acc * 100,
dev_word_acc / dev_num,
dev_data_num,
dev_num,
perplexity))
s = datetime.datetime.now()
print(datetime.datetime.now() - s)
if arg.is_save:
# torch.save(self.model.state_dict(), "data/API/trained_GPT_{}_jdbc_bpe".format(-1))
torch.save(self.model, "data/API/data/trained_GPT_{}_swing_bpe_1".format(-1))
return dev_acc_list[-1], perplexity, self.ep
def validate(self, dev_data, args_device, arg, batch_len, is_refine):
dev_loss = 0
dev_acc = 0
batch_num = 1
dev_data_num = 1
perplexity = 0.0
dev_word_acc = 0
num_1 = 1
num_2 = 0
self.model.eval()
# with torch.no_grad:
for line_num, onebatch in enumerate(get_batch_train(dev_data, arg.batch_size, arg, None)):
batch_num += 1
words, tags, mask, seq_lengths = batch_numberize(onebatch, args_device, arg)
targets = words[:, 1:].contiguous()
pred = self.model(words)
pred = pred[:, :-1].contiguous()
loss = self.compuate_loss(targets.view(-1), pred.view(-1, pred.shape[-1]))
dev_loss += loss.data.item()
acc_1, num = self.compuate_acc(targets.view(-1), pred.view(-1, pred.shape[-1]))
perplexity += torch.exp(loss).data.item()
dev_acc += acc_1
dev_data_num += num
dev_acc /= dev_data_num
dev_loss /= dev_data_num
perplexity /= batch_num
return dev_acc, dev_loss, dev_data_num, dev_word_acc, num_1, perplexity
# 评估模型
def evluate(self, dev_data,train_cover_data, args_device, arg, batch_len, is_refine, search_word_dict):
reject_token = ["[EOS]","[BOS]","[PAD]","[UNK]"]
appendControlNodesStrings = [
"IF", "CONDITION", "THEN", "ELSE",
"WHILE", "BODY",
"TRY", "TRYBLOCK", "CATCH", "FINALLY",
"FOR", "INITIALIZATION", "COMPARE", "UPDATE",
"FOREACH", "VARIABLE", "ITERABLE",
]
self.model.eval()
control_node = 0
k = 10
beam_size = arg.boundary
batch_num = 0
# dev_data_num = 0
# perplexity = 0.0
#domain acc
dev_word_acc_top1 = 0
dev_word_acc_top10 = 0
dev_word_acc_top3 = 0
dev_word_acc_top5 = 0
dev_word_acc_class_1 = 0
dev_word_acc_class_3 = 0
dev_word_acc_class_5 = 0
dev_word_acc_class_10 = 0
#non-control-node-num
num_1 = 1
#control-node-num
num_2 = 0
#coreect_non-control-node-num
num_3 = 0
num_4 = 0
num_5 = 0
new_num = 0
#rec point num
rec_point_num = 0
domain_count = 1
correct_cross_api_counter = set()
train_data_cover_api_counter = set()
c_api_counter = set()
cross_domain = 0
tokensDone_list = []
for line_num, onebatch in enumerate(get_batch_train(train_cover_data, 1, arg, None)):
words, tags, mask, seq_lengths = batch_numberize(onebatch, args_device, arg)
targets = words[:, 1:].contiguous()
pred_index = 0
for word_loc, word_len in enumerate(onebatch[0].word_index):
true_token = []
if word_loc == 0:
pred_index = word_len
continue
for word_dex in range(word_len):
true_token.append(
self.tokenizer.convert_id_to_token(targets[0, pred_index:pred_index + 1].item()))
pred_index += 1
true_api = "".join(true_token).replace("▁", "").replace("</t>","")
# print(true_api)
if onebatch[0].tags[word_loc] != 1:
continue
else:
if true_api.find(".new") != -1:
continue
else:
if true_api in appendControlNodesStrings or true_api in reject_token:
continue
else:
if true_api.find(".") == -1:
continue
search_word_dict[true_api.split(".")[0]].add(true_api.split(".")[1])
train_data_cover_api_counter.add(true_api)
print("cover api num:" ,len(train_data_cover_api_counter))
for line_num, onebatch in enumerate(get_batch_train(dev_data, 1, arg, None)):
words, tags, mask, seq_lengths = batch_numberize(onebatch, args_device, arg)
targets = words[:, 1:].contiguous()
pred_index = 0
for word_loc, word_len in enumerate(onebatch[0].word_index):
true_token = []
if word_loc == 0:
pred_index = word_len
continue
for word_dex in range(word_len):
true_token.append(
self.tokenizer.convert_id_to_token(targets[0, pred_index:pred_index + 1].item()))
pred_index += 1
if onebatch[0].tags[word_loc] != 1:
continue
true_api = "".join(true_token).replace("▁", "").replace("</t>","")
# print(true_api)
if onebatch[0].tags[word_loc] != 1:
continue
else:
if true_api.find(".new") != -1:
continue
else:
if true_api in appendControlNodesStrings or true_api in reject_token:
continue
else:
if true_api.find(".") == -1:
continue
search_word_dict[true_api.split(".")[0]].add(true_api.split(".")[1])
#API Recommendation process
for line_num, onebatch in enumerate(get_batch_train(dev_data, 1, arg, None)):
batch_num += 1
words, tags, mask, seq_lengths = batch_numberize(onebatch, args_device, arg)
targets = words[:, 1:].contiguous()
if is_refine:
true_seq = "".join(self.tokenizer.convert_ids_to_tokens(onebatch[0].input_ids))
cahe_list =true_seq.replace("[BOS]","").replace("[PAD]","").replace("▁","").replace("</t>"," ").replace("[EOS]","").split(" ")
pred_index_1 = 0
pred_index = 0
invalidate = []
#iter words by words
for word_loc, word_len in enumerate(onebatch[0].word_index):
#统计domain api数量
candidate_list = []
bestToken_list = []
beam_candidate_list = []
tokensDone = 0
iter = 0
count = 0
# if The probability of the best candidate is less than the worst current complete top-k tokens
hope = True
cur_word = words.contiguous().clone()
cur_word = cur_word.expand(beam_size, 512)
true_token = []
if word_loc == 0:
# true_token1 = [self.tokenizer.convert_id_to_token(targets[0, index:index + 1].item()) for index
# in range(word_len)]
pred_index_1 = word_len
pred_index = word_len
continue
pred_index = pred_index_1
varible_cut_dot = 0
for word_dex in range(word_len):
if self.tokenizer.convert_id_to_token(targets[0, pred_index_1].item()) =="▁.":
varible_cut_dot = word_dex+1
true_token.append(
self.tokenizer.convert_id_to_token(targets[0, pred_index_1:pred_index_1 + 1].item()))
pred_index_1 += 1
true_api = "".join(true_token).replace("▁", "")
if true_api.find(".new") != -1:
if onebatch[0].tags[word_loc] == 0:
new_num += 1
continue
#all_rec_point
rec_point_num += 1
if arg.is_only_domain:
if onebatch[0].tags[word_loc] == 1:
if true_api.find(".") == -1:
continue
domain_count += 1
c_api_counter.add(true_api)
else:
continue
else:
if onebatch[0].tags[word_loc] == 1:
domain_count += 1
c_api_counter.add(true_api)
true_api_nop = re.sub(u"\\(.*?\\)|\\{.*?}|\\[.*?]|\\<.*?>", "", true_api)
true_api_nop = re.sub(u"\\(\\)|\\{|\\[\\]|\\>|\\<", "", true_api_nop)
class_name = [words[0, index].item() for index in range(pred_index+1,pred_index+varible_cut_dot+1)]
if len(class_name) == 0:
class_name_var ="".join(self.tokenizer.convert_ids_to_tokens(class_name)).replace("▁", "")
else:
class_name_var = "".join(self.tokenizer.convert_ids_to_tokens(class_name)).replace("▁", "")
pred_index = pred_index+varible_cut_dot
append_info = [[words[0, pred_index].item(), 1]]
while ((tokensDone <= 5000) and hope):
iter += 1
novalidate = 0
if len(beam_candidate_list) > 1:
if count >= 100:
break
for i in range(len(beam_candidate_list)):
if pred_index+ len(beam_candidate_list[i].pre_ids) >= 512:
# print()
count += 1
# tokensDone += 1
# print("over the limit")
continue
cur_word[i, pred_index:pred_index + len(beam_candidate_list[i].pre_ids)] = torch.tensor(
beam_candidate_list[i].pre_ids, dtype=torch.long)
currt_pred = self.model(cur_word[i:i + 1, :])
singel_word_pred = currt_pred[:, pred_index + len(beam_candidate_list[i].pre_ids)-1, :].clone()
singel_word_pred = F.softmax(singel_word_pred, dim=1)
subword_pro_order = torch.argsort(singel_word_pred, dim=1, descending=True)[0][:beam_size]
for pred_subword in subword_pro_order:
if self.tokenizer.convert_id_to_token(pred_subword.item()).find("</t>") != -1 or self.tokenizer.convert_id_to_token(pred_subword.item()) in reject_token:
if self.tokenizer.convert_id_to_token(pred_subword.item()) in reject_token:
continue
tokensDone += 1
update_list = [index for index in beam_candidate_list[i].pre_ids]
# print(candidate_list[i].pre_ids)
update_list.append(pred_subword.item())
method_name ="".join([self.tokenizer.convert_id_to_token(index) for index in
update_list]).replace("▁", "").replace("</t>", "").replace(
"[EOS]", "").replace("[UNK]", "").replace("[PAD]", "").replace("[BOS]","")
if class_name_var != "":
if method_name not in search_word_dict[class_name_var.replace(".","")]:
novalidate += 1
continue
bestToken_list.append(BestToken(update_list,
beam_candidate_list[i].pro * singel_word_pred[0][
pred_subword.item()].item()))
else:
if method_name not in appendControlNodesStrings:
continue
bestToken_list.append(BestToken(update_list,
beam_candidate_list[i].pro *
singel_word_pred[0][
pred_subword.item()].item()))
# token_pro_sum += beam_candidate_list[i].pro * singel_word_pred[0][pred_subword.item()].item()
bestToken_list = sorted(bestToken_list, key=lambda x: x.pro, reverse=True)
if len(bestToken_list) > k :
bestToken_list.pop(-1)
else:
# if self.tokenizer.convert_id_to_token(pred_subword.item()) in reject_token:
# # print("end-end")
# # print(self.tokenizer.convert_id_to_token(pred_subword.item()))
# continue
update_list = [index for index in beam_candidate_list[i].pre_ids]
update_list.append(pred_subword.item())
candidate_list.append(Candidate(update_list,
beam_candidate_list[i].pro * singel_word_pred[0][
pred_subword.item()].item(), False))
candidate_list = sorted(candidate_list, key=lambda x: x.pro, reverse=True)
token_pro_sum = sum([token.pro for token in bestToken_list])
if len(bestToken_list) >= 1 and len(candidate_list) != 0:
if candidate_list[0].pro < bestToken_list[-1].pro:
hope = False
if len(candidate_list) < beam_size:
for i in range(len(candidate_list), 0, -1):
beam_candidate_list[i - 1] = candidate_list.pop(i - 1)
else:
for i in range(beam_size,0,-1):
beam_candidate_list[i-1] = candidate_list.pop(i-1)
else:
cur_word[0, pred_index] = append_info[0][0]
currt_pred = self.model(cur_word[0:1, :])
init_candidate_list, init_bestTokens = self.compuate_acc_2(
currt_pred[0, pred_index:pred_index + 1, :], append_info, k, reject_token,search_word_dict,class_name_var,appendControlNodesStrings,beam_size=beam_size)
candidate_list = [data for data in init_candidate_list]
bestToken_list =[data for data in init_bestTokens]
beam_candidate_list = [data for data in init_candidate_list]
# token_pro_sum = sum([token.pro for token in bestToken_list])
if len(bestToken_list) >= 1 and len(candidate_list) != 0:
if candidate_list[0].pro < bestToken_list[-1].pro:
hope = False
pred_index += 1
for i in range(beam_size,0,-1):
candidate_list.pop(i-1)
invalidate.append(novalidate / tokensDone)
tokensDone_list.append(tokensDone)
final_result = []
final_result_check = []
final_result_nop = []
final_class_result = []
bestToken_list = sorted(bestToken_list, key=lambda x: x.pro, reverse=True)
for best_token in bestToken_list[:10]:
final_result.append(class_name_var+"".join([self.tokenizer.convert_id_to_token(index) for index in best_token.pre_ids]).replace("▁", "").replace("</t>",""))
final_result_check.append(best_token.pro)
final_class_result.append("".join(
[self.tokenizer.convert_id_to_token(index) for index in best_token.pre_ids]).replace("▁",
"").replace(
"</t>", "").replace("[EOS]", "").replace("[UNK]", "").split(".")[0])
raw_api = class_name_var + "".join([self.tokenizer.convert_id_to_token(index) for index in best_token.pre_ids]).replace("▁","").replace("</t>", "").replace("[EOS]", "").replace("[UNK]", "").replace("[PAD]", "")
final_result_nop.append('')
if true_api.replace("</t>","") in final_result[:1]:
dev_word_acc_top1 += 1
if onebatch[0].tags[word_loc] == 1:
dev_word_acc_class_1 += 1
else:
if true_api.replace("</t>","") not in appendControlNodesStrings:
pass
else:
#未在top1的control node的数量
control_node += 1
pass
if true_api.replace("</t>","") in final_result[:3]:
dev_word_acc_top3 += 1
if onebatch[0].tags[word_loc] == 1:
dev_word_acc_class_3 += 1
if true_api.replace("</t>", "") in final_result[:5]:
dev_word_acc_top5 += 1
if onebatch[0].tags[word_loc] == 1:
dev_word_acc_class_5 += 1
if true_api.replace("</t>", "") in final_result:
if true_api.replace("</t>", "") not in appendControlNodesStrings:
num_3 += 1
else:
num_4 += 1
dev_word_acc_top10 += 1
if true_api not in train_data_cover_api_counter:
if onebatch[0].tags[word_loc] == 1:
correct_cross_api_counter.add(true_api)
cross_domain += 1
if onebatch[0].tags[word_loc] == 1:
dev_word_acc_class_10 += 1
else:
# print(true_api)
# print(final_result)
if true_api.replace("</t>", "") not in appendControlNodesStrings:
num_1 += 1
if true_api_nop.replace("</t>", "") in final_result_nop:
print("参数错误")
else:
if onebatch[0].tags[word_loc] == 0:
num_5 += 1
else:
num_2 += 1
word_acc_1 = dev_word_acc_top1 / rec_point_num
word_acc_3 = dev_word_acc_top3 / rec_point_num
word_acc_5 = dev_word_acc_top5 / rec_point_num
word_acc_10 = dev_word_acc_top10 / rec_point_num
dev_1 = dev_word_acc_class_1 / domain_count
dev_3 = dev_word_acc_class_3 / domain_count
dev_5 = dev_word_acc_class_5 / domain_count
dev_10 = dev_word_acc_class_10 / domain_count
nondev_1 = (dev_word_acc_top1 - dev_word_acc_class_1) / (rec_point_num - domain_count)
nondev_3 = (dev_word_acc_top3 - dev_word_acc_class_3) / (rec_point_num - domain_count)
nondev_5 = (dev_word_acc_top5 - dev_word_acc_class_5) / (rec_point_num - domain_count)
nondev_10 = (dev_word_acc_top10 - dev_word_acc_class_10) / (rec_point_num - domain_count)
print(dev_word_acc_top1,dev_word_acc_class_1)
print(dev_word_acc_top10, dev_word_acc_class_10)
print("非控制结构:",num_3,"控制结构",num_4)
print("非参数错误:",num_5)
print("非控制结构错误:", num_1, "控制结构错误", num_2)
print("domain acc: top1:{} top3:{} top5:{} top10:{}".format(dev_1,dev_3,dev_5,dev_10))
avg_npvalid = np.mean(invalidate)
print("No:avg:",avg_npvalid)
print(np.mean(tokensDone_list))
print(np.max(tokensDone_list))
d_api_counter = train_data_cover_api_counter & c_api_counter
print("------------------------------")
print("cross domain")
print(correct_cross_api_counter)
print (len(correct_cross_api_counter))
correct_cross_api_per = cross_domain /domain_count
correct_cross_api_per_1 = cross_domain / dev_word_acc_class_10
# if len(c_api_counter) == 0:
# print ("coverage:",len(d_api_counter)/ 1)
# else:
# print("coverage:", len(d_api_counter) / len(c_api_counter))
return word_acc_1,word_acc_3,word_acc_5,word_acc_10, rec_point_num,[dev_1,dev_3,dev_5,dev_10],[nondev_1,nondev_3,nondev_5,nondev_10],domain_count,[len(correct_cross_api_counter),correct_cross_api_per,correct_cross_api_per_1]
# 评估模型
# 计算准确率
def compuate_acc(self, true_tags, logit):
# logit = F.softmax(logit,dim=1)
# true_tags = true_tags[:, :1]
# print(true_tags.shape)
# print(logit.shape)
correct_num = 0
# true_tags = true_tags[:, :1]
# true_tags = true_tags.squeeze(dim=1)
# 返回正确的item的数目,eq是返回一个矩阵,sum之后返回总数
# for i in range(logit.shape[0]):
# if true_tags[i] in torch.argsort(logit[i], descending=True)[: 5]:
# correct_num += 1
# true_tags = true_tags.squeeze(dim=1)
# if not self.model.training:
# for i in range(true_tags.shape[0]):
# if words[i][seq_lengths[i]-1].item() == 2:
# self.next_api += 1
# if torch.eq(torch.argmax(logit[i],dim=0),true_tags[i]).sum().item() == 0:
# # print(111)
# self.counter[true_tags[i].item()] += 1
select_index = []
for i in range(logit.shape[0]):
if true_tags[i].item() != 0:
# prediction[i] = logit[i]
select_index.append(i)
if len(select_index) == 0:
# print(true_tags)
return 0, 0
# print(len(select_index))
logit = torch.index_select(logit, 0, torch.tensor(select_index).long().to(self.args.device))
true_tags = torch.index_select(true_tags, 0, torch.tensor(select_index).long().to(self.args.device))
logit = F.softmax(logit, dim=1)
for i in range(logit.shape[0]):
if true_tags[i] in torch.argsort(logit[i], descending=True)[: 2]:
correct_num += 1
# 返回正确的item的数目,eq是返回一个矩阵,sum之后返回总数
# return torch.eq(torch.argmax(logit, dim=1), true_tags).sum().item(), true_tags.shape[0]
# 返回正确的item的数目,eq是返回一个矩阵,sum之后返回总数
# return torch.eq(torch.argmax(logit,dim=1),true_tags).sum().item()
return correct_num, true_tags.shape[0]
def compuate_acc_1(self, true_tags, logit):
# print(true_tags.shape)
# print(logit.shape)
correct_num = 0
select_index = []
append_info = []
for i in range(true_tags.shape[0]):
# if true_tags[i].item() != 0:
# prediction[i] = logit[i]
select_index.append(i)
# print(select_index)
# if len(select_index) == 0:
#
# return 0, 0,0
# print(len(select_index))
logit = torch.index_select(logit, 0, torch.tensor(select_index).long().to(self.args.device))
true_tags = torch.index_select(true_tags, 0, torch.tensor(select_index).long().to(self.args.device))
logit = F.softmax(logit, dim=1)
# print(torch.argsort(logit[i], descending=True)[: 5])
for i in range(logit.shape[0]):
if true_tags[i].item() in torch.argsort(logit[i], descending=True)[: 5].tolist():
correct_num += 1
# if true_tags[i].item() == torch.argsort(logit[i], descending=True)[0].item():
# append_info.append(true_tags[i].item())
# # append_info.append(torch.argsort(logit[i], descending=True)[0].item())
# # append_info.append(true_tags[i].item())
# else:
for i in range(self.args.boundary):
append_info.append(torch.argsort(logit[-1], descending=True)[i].item())
else:
for i in range(self.args.boundary):
append_info.append(torch.argsort(logit[-1], descending=True)[i].item())
# if true_tags[-1] not in torch.argsort(logit[-1], descending=True)[: 5]:
# if len(append_info) < 5:
# print(append_info)
# append_info = append_info + [append_info[0]] * (5-len(append_info))
# 返回正确的item的数目,eq是返回一个矩阵,sum之后返回总数
# return torch.eq(torch.argmax(logit, dim=1), true_tags).sum().item(), true_tags.shape[0]
# 返回正确的item的数目,eq是返回一个矩阵,sum之后返回总数
# return torch.eq(torch.argmax(logit,dim=1),true_tags).sum().item()
return correct_num, true_tags.shape[0], append_info
def compuate_acc_2(self, logit, pre_info, k, reject_token,search_dict,class_name,control_lable,beam_size,target=None):
bestTokens = []
pre_candidate = []
lowest_pro = 0.0
logit = F.softmax(logit, dim=1)
sort = torch.argsort(logit, dim=1, descending=True)
flag1 = False
flag2 = False
# acc_num = 0
# print(target)
# print(sort[0][:5].tolist())
# if target in sort[0][:5].tolist():
# acc_num += 1
if len(pre_info) != 1:
for i in range(logit.shape[0]):
for j in range(self.args.boundary):
append_info.append((sort[-1][j].item() % self.tokenizer.vocab_size,
logit[-1][sort[-1][j].item()].item() *
pre_info[int(sort[-1][j].item() / self.tokenizer.vocab_size)][1]))
pre_candidate.append(pre_info[int(sort[-1][j].item() / self.tokenizer.vocab_size)][0])
else:
# for i in range(logit.shape[0]):
for j in range(logit.shape[1]):
if flag1 and flag2:
break
# print(pre_info[0][1])
# print(logit[-1][sort[-1][j].item()])
# print(sort[-1][j].item())
# print(target)
if len(pre_candidate) < beam_size:
if self.tokenizer.convert_id_to_token(sort[0][j].item()).find(
"</t>") == -1 and self.tokenizer.convert_id_to_token(
sort[0][j].item()) not in reject_token:
pre_candidate.append(
Candidate([sort[0][j].item()], logit[0][sort[0][j].item()].item(), False))
else:
flag1 = True
if len(bestTokens) < k:
method_name = self.tokenizer.convert_id_to_token(sort[0][j].item()).replace("▁","")
if class_name == "":
# print(method_name)
if method_name.replace("</t>", "") in control_lable:
bestTokens.append(BestToken([sort[0][j].item()], logit[0][sort[0][j].item()].item()))
# bestTokens.append(BestToken([sort[0][j].item()], logit[0][sort[0][j].item()].item()))
else:
if method_name.find(
"</t>") != -1 or method_name in reject_token:
if method_name in reject_token:
continue
# bestTokens.append(BestToken([sort[0][j].item()], logit[0][sort[0][j].item()].item()))
else:
if method_name.replace("</t>","") in search_dict[class_name]:
bestTokens.append(BestToken([sort[0][j].item()], logit[0][sort[0][j].item()].item()))
else:
# print (len(bestTokens))
# print (11111)
# print (flag1)
flag2 = True
# pre_candidate.append(sort[-1][j].item() % 4000)
bestTokens = sorted(bestTokens, key=lambda x: x.pro, reverse=True)
# print(lowest_pro)
# print(len(bestTokens))
return pre_candidate, bestTokens
# 计算损失
def compuate_loss(self, true_tags, logit):
# CrossEntropyLoss = LogSoftmax + NLLoss
loss = nn.CrossEntropyLoss(ignore_index=self.pad_id)
loss = loss(logit, true_tags)
# loss = loss(logit.view(-1, logit.shape[-1]), true_tags.view(-1))
# print(loss.data)
return loss
def is_validate(self, pred_sub_word_order, validate_class_name, search_word_dict):
currt_api = validate_class_name[0]
currt_api_index = 0
currt_api_search_path = None
is_complete = False
flag = False
for i in range(pred_sub_word_order.shape[0]):
if flag:
break
# if validate_class_name[1] is None:
# print("validate_class is none")
# print(currt_api)
# is_complete = True
# break
for validate_class in validate_class_name[1]:
if flag:
break
for c_class in search_word_dict[validate_class]:
if c_class.find(
currt_api + self.tokenizer.convert_id_to_token(pred_sub_word_order[i].item()).replace("▁",
"")) != -1:
currt_api_index = pred_sub_word_order[i].item()
currt_api = currt_api + self.tokenizer.convert_id_to_token(pred_sub_word_order[i].item())
currt_api_search_path = validate_class
# if self.tokenizer.convert_id_to_token(currt_api_index).find("▁") != -1 and pred_sub_word_order[i].item() != 3927:
if self.tokenizer.convert_id_to_token(currt_api_index).find("▁") != -1:
# print(pred_sub_word_order[i].item())
currt_api = currt_api.replace("▁", "")
is_complete = True
flag = True
break
else:
continue
# if currt_api_index == 0:
# print()
# print(validate_class_name[1])
# print("--------------------")
# print(currt_api_search_path)
return currt_api, is_complete, currt_api_index, currt_api_search_path
def found_validate_class(self, pred_subword, all_classes, n):
# validate_class_name = ()
refine_words = []
# print(all_classes)
pred_subword = self.tokenizer.convert_id_to_token(pred_subword.item()).replace("▁", "")
# print(pred_subword)
if len(pred_subword) < 4:
refine_api = difflib.get_close_matches(pred_subword, all_classes.keys(), 100, 0.1)
for can_dai in refine_api:
if len(refine_words) == n:
break
if can_dai.startswith(pred_subword, 0, len(pred_subword)) and len(can_dai) > 4:
refine_words.append(can_dai)
else:
refine_api = difflib.get_close_matches(pred_subword, all_classes.keys(), 100, 0.1)
for can_dai in refine_api:
if len(refine_words) == n:
break
if can_dai.find(pred_subword) != -1:
refine_words.append(can_dai)
if len(refine_api) == 0:
refine_words = n * ["null"]
else:
for r_api in refine_api:
refine_words.append(r_api)
refine_words = refine_words + (n - len(refine_api)) * ["null"]
return refine_words
def refine(self, pred, true_tags):
appendControlNodesStrings = [
"IF", "CONDITION", "THEN", "ELSE",
"WHILE", "BODY",
"TRY", "TRYBLOCK", "CATCH", "FINALLY",
"FOR", "INITIALIZATION", "COMPARE", "UPDATE",
"FOREACH", "VARIABLE", "ITERABLE",
]
tokens = []
true_token = []
flag = False
refine_words = []
pred = F.softmax(pred, dim=1)
# print(true_tags.shape,pred.shape)
# 概率值top5
for j in range(pred.shape[0]):
true_token.append(self.tokenizer.convert_id_to_token(true_tags[j].item()).replace("▁", ""))
for i in range(10):
token = []
for j in range(pred.shape[0]):
top5 = torch.argsort(pred[j], descending=True)[: 10]
token.append(self.tokenizer.convert_id_to_token(top5[i].item()).replace("▁", ""))
word = "".join(token)
tokens.append(word)
# print(true_word)
refine_word = difflib.get_close_matches(word, self.word_vocab, 1, 0.6)
if len(refine_word) == 0:
refine_words.append("null")
else:
refine_words.append(refine_word[0])
true_word = "".join(true_token)
if true_word in appendControlNodesStrings:
self.control_num += 1
# print(word, true_word, refine_word)
# if len(refine_word) != 0:
if true_word in refine_words:
# print(true_word,tokens)
# print(true_word, refine_words)
flag = True
else:
# print(true_word, refine_words)
pass
return true_word, tokens
# def refine(self, pred, true_tags):
# token = []
# true_token = []
# flag = False
# refine_words = []
#
# for i in range(pred.shape[0]):
# # top5 = torch.argsort(pred[i], descending=True)[: 5]
# token.append(self.tokenizer.convert_id_to_token(torch.argmax(pred[i]).item()).replace("▁", ""))
# true_token.append(self.tokenizer.convert_id_to_token(true_tags[i].item()).replace("▁", ""))
# word = "".join(token)
# # tokens.append(word)
# true_word = "".join(true_token)
# # print(true_word)
# refine_word = difflib.get_close_matches(word, self.word_vocab, 1, 0.7)
# # if len(refine_word) == 0:
# # refine_words.append("null")
# # else:
# # refine_words.append(refine_word[0])
#
# # print(word, true_word, refine_word)
# if len(refine_word) != 0:
#
# if true_word == refine_word[0]:
# # print(true_word,word,refine_word[0])
# flag = True
# else:
# pass
# # print(true_word,refine_word[0])
# return flag
# def refine_for_detect(self,pred,true_tags):
# appendControlNodesStrings = [
# "IF", "CONDITION", "THEN", "ELSE",
# "WHILE", "BODY",
# "TRY", "TRYBLOCK", "CATCH", "FINALLY",
# "FOR", "INITIALIZATION", "COMPARE", "UPDATE",
# "FOREACH", "VARIABLE", "ITERABLE",
# ]
# tokens = []
# true_token = []
# words = []
# refine_words = []
# flag = False
# for data in reversed(pred):
# # print(data[1])
# tokens.append([self.tokenizer.convert_id_to_token(ids).replace("▁","") for ids in data[0]])
# for i in range(true_tags.shape[0]):
# true_token.append(self.tokenizer.convert_id_to_token(true_tags[i].item()).replace("▁",""))
# for token in tokens:
# print("".join(token))
# words.append("".join(token))
# true_word_1 = "".join(true_token)
# if true_word_1 in appendControlNodesStrings:
# self.control_num += 1
# # true_word = "".join(true_token)
# for word in words:
# # print(word)
# try:
# refine_words.append(difflib.get_close_matches(word,self.word_vocab,1,0.6)[0])
# except IndexError:
# refine_words.append("null")
#
# return refine_words,true_word_1,words
def refine_for_rec(self, pred):
appendControlNodesStrings = [
"IF", "CONDITION", "THEN", "ELSE",
"WHILE", "BODY",
"TRY", "TRYBLOCK", "CATCH", "FINALLY",
"FOR", "INITIALIZATION", "COMPARE", "UPDATE",
"FOREACH", "VARIABLE", "ITERABLE",
]
refine_words = []
for word in pred:
# print(word)
refine_api = difflib.get_close_matches(word.replace("▁", ""), self.word_vocab, 10, 0.05)
# try:
if len(refine_api) == 0:
refine_words.append("null")
else:
for r_api in refine_api:
refine_words.append(r_api)
# except IndexError:
return refine_words
def beam_search_decoder(self, data, k):
data = F.softmax(data, dim=1)
# print(data.shape)
sequences = [[list(), 1.0]]
# walk over each step in sequence
data = data.cpu().detach().numpy().tolist()
for row in data:
all_candidates = list()
# expand each current candidate
for i in range(len(sequences)):
seq, score = sequences[i]
for j in range(len(row)):
# print(row[j].item())
# a = -log(row[j].item())
try:
candidate = [seq + [j], score * -log(row[j])]
all_candidates.append(candidate)
except:
# print(row[j])
candidate = [seq + [j], score * -log(0.00001)]
all_candidates.append(candidate)
# order all candidates by score
ordered = sorted(all_candidates, key=lambda tup: tup[1])
# select k best
sequences = ordered[:k]
return sequences
def cache(self,top20_result,class_cahe_list):
for i,best_token in enumerate(top20_result):
token = "".join(self.tokenizer.convert_ids_to_tokens(best_token.pre_ids)).replace("▁", "").replace("</t>","").replace("[EOS]","").replace("[UNK]","").replace("[PAD]","")
if token.split(".")[0] in class_cahe_list:
# count = cur_cahe_list.count(token)
top20_result[i].pro = 0.7 * top20_result[i].pro + 0.3
else:
top20_result[i].pro = 0.7 * top20_result[i].pro
# count = 0
return top20_result
# def greedy_decoder(self,data):
# # index for largest probability each row
# return [argmax(s) for s in data]
# def beam_search(decoder, num_beams, max_len, *input):
# """
# a beam search implementation about seq2seq with attention
# :param decoder:
# :param num_beams: number of beam, int
# :param max_len: max length of result
# :param input: input of decoder
# :return: list of index
# """
# # init
# state = input[0] # state of decoder
# outputs = input[1] # outputs of encoder
# src_len = input[2] # length of encode sequence
# beams = [[[1], 1, state]]
#
# cur_pro = 0
# cur_seq = None
# for i in range(max_len):
# results = []
# for beam in beams:
# tgt = torch.LongTensor(beam[0][-1:]).unsqueeze(0).cuda()
# input = [tgt, beam[2], outputs, src_len, 1]
# output, state = decoder(*input)
# v, i = torch.topk(output.view(-1).data, k=num_beams)
# for m, n in zip(v, i):
# gen_seq = beam[0] + [n.item()]
# pro = beam[1] * m.item()
# results.append([gen_seq, pro, state])
#
# if n.item() == 2 and pro > cur_pro: # eos_token = 2
# cur_pro = pro
# cur_seq = gen_seq
#
# # filter beams
# beams = []
# for gen_seq, pro, state in results:
# if pro > cur_pro:
# beams.append([gen_seq, pro, state])
# # cut
# if len(beams) > num_beams:
# results = []
# pros = []
# for beam in beams:
# pros.append(beam[1])
# pros_idx = np.array(pros).argsort()[-1 * num_beams:]
# for pro_idx in pros_idx:
# results.append(beams[pro_idx])
# beams = results
#
# if len(beams) == 0:
# return cur_seq
#
# if cur_seq is not None:
# return cur_seq
# else:
# max_pro = 0
# max_seq = None
# for beam in beams:
# if beam[1] > max_pro:
# max_pro = beam[1]
# max_seq = beam[0]
# return max_seq
# return max_seq
|
{"hexsha": "6d108f093373a078c118f2d2b91b96e40e31d12d", "size": 48312, "ext": "py", "lang": "Python", "max_stars_repo_path": "finetune/Classifier6.py", "max_stars_repo_name": "yuningkang/APIRecX", "max_stars_repo_head_hexsha": "aaef5f3f0b669d7a907ddb3273e6658c9267c68a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "finetune/Classifier6.py", "max_issues_repo_name": "yuningkang/APIRecX", "max_issues_repo_head_hexsha": "aaef5f3f0b669d7a907ddb3273e6658c9267c68a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "finetune/Classifier6.py", "max_forks_repo_name": "yuningkang/APIRecX", "max_forks_repo_head_hexsha": "aaef5f3f0b669d7a907ddb3273e6658c9267c68a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.2783505155, "max_line_length": 234, "alphanum_fraction": 0.4813503891, "include": true, "reason": "from numpy", "num_tokens": 10632}
|
module Effekt.IteratedStaged
import Effekt.CpsStaged
import Effekt.IteratedUnstaged
STM : List Type -> Type -> Type
STM [] a = Exp a
STM (r :: rs) a = (Exp a -> STM rs r) -> STM rs r
pure : Exp a -> STM rs a
pure{ rs= []} a = a
pure{ rs= r :: rs} a = \k => k a
push : (Exp a -> STM (r :: rs) b) -> (Exp b -> STM rs r) -> (Exp a -> STM rs r)
push f k = \a => f a k
bind : STM rs a -> (Exp a -> STM rs b) -> STM rs b
bind{ rs= []} m f = f m
bind{ rs= r :: rs} m f = \k => m (push f k)
(>>=) : STM rs a -> (Exp a -> STM rs b) -> STM rs b
(>>=) = bind
lift : STM rs a -> STM (r :: rs) a
lift = bind
shift0 : ((Exp a -> STM rs r) -> STM rs r) -> STM (r :: rs) a
shift0 = id
runIn0 : STM (r :: rs) a -> (Exp a -> STM rs r) -> STM rs r
runIn0 = id
reset0 : STM (a :: rs) a -> STM rs a
reset0 m = runIn0 m pure
mutual
reify : STM rs a -> Exp (Stm rs a)
reify {rs = []} m = m
reify {rs = (q :: qs)} m =
(Lam $ \ k => reify (m (\a => reflect {a = q} {rs = qs} (App k a))))
reflect : Exp (Stm rs a) -> STM rs a
reflect {rs = []} m = m
reflect {rs = (q :: qs)} m =
\k => reflect (App m ((Lam $ \ a => reify (k a))))
emit : Exp Int -> STM (List Int :: rs) ()
emit {rs} a = shift0 (\c => do
as <- c Uni
pure {rs} (Con a as))
emitTwice : Exp Int -> STM (List Int :: List Int :: rs) ()
emitTwice {rs} a =
bind {rs=List Int::List Int::rs} (emit {rs=List Int::rs} a) (\u =>
lift {r=List Int} {rs=List Int::rs} (emit a))
export
reifiedEmitTwice : Exp (Int -> Stm [List Int,List Int] ())
reifiedEmitTwice = Lam (\x => reify {rs=[List Int,List Int]} (emitTwice x))
export
reifiedEmitTwice' : Exp (Int -> Stm [List Int,List Int,()] ())
reifiedEmitTwice' = Lam (\x => reify {rs=[List Int,List Int,()]} (emitTwice {rs=[()]} x))
|
{"hexsha": "550454d1cb15f87fa2db8ae604cfe1a47b616af8", "size": 1774, "ext": "idr", "lang": "Idris", "max_stars_repo_path": "Effekt/IteratedStaged.idr", "max_stars_repo_name": "b-studios/idris-effekt", "max_stars_repo_head_hexsha": "d9c5094456677914e034ef1ade9525ac0c338108", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-10-14T00:28:55.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-14T00:28:55.000Z", "max_issues_repo_path": "Effekt/IteratedStaged.idr", "max_issues_repo_name": "b-studios/idris-effekt", "max_issues_repo_head_hexsha": "d9c5094456677914e034ef1ade9525ac0c338108", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Effekt/IteratedStaged.idr", "max_forks_repo_name": "b-studios/idris-effekt", "max_forks_repo_head_hexsha": "d9c5094456677914e034ef1ade9525ac0c338108", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.71875, "max_line_length": 89, "alphanum_fraction": 0.52367531, "num_tokens": 693}
|
/*
Copyright (C) 2020 Skandinaviska Enskilda Banken AB (publ)
All rights reserved.
This file is part of ORE, a free-software/open-source library
for transparent pricing and risk analysis - http://opensourcerisk.org
ORE is free software: you can redistribute it and/or modify it
under the terms of the Modified BSD License. You should have received a
copy of the license along with this program.
The license is also available online at <http://opensourcerisk.org>
This program is distributed on the basis that it will form a useful
contribution to risk analytics and model standardisation, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the license for more details.
*/
#include <ored/portfolio/commodityasianoption.hpp>
#include <boost/make_shared.hpp>
#include <qle/indexes/commodityindex.hpp>
#include <qle/termstructures/pricetermstructure.hpp>
#include <ql/errors.hpp>
#include <ored/portfolio/enginefactory.hpp>
#include <ored/utilities/log.hpp>
#include <ored/utilities/to_string.hpp>
namespace ore {
namespace data {
void CommodityAsianOption::build(const boost::shared_ptr<EngineFactory>& engineFactory) {
// Checks
QL_REQUIRE(quantity_ > 0, "Commodity Asian option requires a positive quatity");
QL_REQUIRE(strike_ >= 0, "Commodity Asian option requires a strike >= 0");
// Get the price curve for the commodity.
const boost::shared_ptr<Market>& market = engineFactory->market();
Handle<QuantExt::PriceTermStructure> priceCurve =
market->commodityPriceCurve(assetName_, engineFactory->configuration(MarketContext::pricing));
// Populate the index_ in case the option is automatic exercise.
// Intentionally use null calendar because we will ask for index value on the expiry date without adjustment.
if (!isFuturePrice_ || *isFuturePrice_) {
// Assume future price if isFuturePrice_ is not explicitly set or if it is and true.
// If we are given an explicit future contract expiry date, use it, otherwise use option's expiry.
Date expiryDate;
if (futureExpiryDate_ != Date()) {
expiryDate = futureExpiryDate_;
} else {
// Get the expiry date of the option. This is the expiry date of the commodity future index.
const vector<string>& expiryDates = option_.exerciseDates();
QL_REQUIRE(expiryDates.size() == 1, "Expected exactly one expiry date for CommodityAsianOption but got "
<< expiryDates.size() << ".");
expiryDate = parseDate(expiryDates[0]);
}
index_ = boost::make_shared<QuantExt::CommodityFuturesIndex>(assetName_, expiryDate, NullCalendar(), priceCurve);
} else {
// If the underlying is a commodity spot, create a spot index.
index_ = boost::make_shared<QuantExt::CommoditySpotIndex>(assetName_, NullCalendar(), priceCurve);
}
AsianOptionTrade::build(engineFactory);
}
std::map<AssetClass, std::set<std::string>> CommodityAsianOption::underlyingIndices(const boost::shared_ptr<ReferenceDataManager>& referenceDataManager) const {
return {{AssetClass::COM, std::set<std::string>({assetName_})}};
}
void CommodityAsianOption::fromXML(XMLNode* node) {
Trade::fromXML(node);
XMLNode* commodityNode = XMLUtils::getChildNode(node, "CommodityAsianOptionData");
QL_REQUIRE(commodityNode, "A commodity Asian option needs a 'CommodityAsianOptionData' node");
option_.fromXML(XMLUtils::getChildNode(commodityNode, "OptionData"));
QL_REQUIRE(option_.payoffType() == "Asian", "Expected PayoffType Asian for CommodityAsianOption.");
XMLNode* asianNode = XMLUtils::getChildNode(commodityNode, "AsianData");
if (asianNode)
asianData_.fromXML(asianNode);
XMLNode* scheduleDataNode = XMLUtils::getChildNode(commodityNode, "ObservationDates");
observationDates_.fromXML(scheduleDataNode);
XMLNode* tmp = XMLUtils::getChildNode(commodityNode, "Underlying");
if (!tmp)
tmp = XMLUtils::getChildNode(commodityNode, "Name");
commodityUnderlying_.fromXML(tmp);
assetName_ = commodityUnderlying_.name();
currency_ = XMLUtils::getChildValue(commodityNode, "Currency", true);
// Require explicit Strike
strike_ = XMLUtils::getChildValueAsDouble(commodityNode, "Strike", true);
quantity_ = XMLUtils::getChildValueAsDouble(commodityNode, "Quantity", true);
isFuturePrice_ = boost::none;
if (XMLNode* n = XMLUtils::getChildNode(commodityNode, "IsFuturePrice"))
isFuturePrice_ = parseBool(XMLUtils::getNodeValue(n));
futureExpiryDate_ = Date();
if (XMLNode* n = XMLUtils::getChildNode(commodityNode, "FutureExpiryDate"))
futureExpiryDate_ = parseDate(XMLUtils::getNodeValue(n));
}
XMLNode* CommodityAsianOption::toXML(XMLDocument& doc) {
XMLNode* node = Trade::toXML(doc);
XMLNode* comNode = doc.allocNode("CommodityAsianOptionData");
XMLUtils::appendNode(node, comNode);
XMLUtils::appendNode(comNode, option_.toXML(doc));
XMLUtils::appendNode(comNode, asianData_.toXML(doc));
auto tmp = observationDates_.toXML(doc);
XMLUtils::setNodeName(doc, tmp, "ObservationDates");
XMLUtils::appendNode(comNode, tmp);
XMLUtils::appendNode(comNode, commodityUnderlying_.toXML(doc));
XMLUtils::addChild(doc, comNode, "Currency", currency_);
XMLUtils::addChild(doc, comNode, "Strike", strike_);
XMLUtils::addChild(doc, comNode, "Quantity", quantity_);
if (isFuturePrice_)
XMLUtils::addChild(doc, comNode, "IsFuturePrice", *isFuturePrice_);
if (futureExpiryDate_ != Date())
XMLUtils::addChild(doc, comNode, "FutureExpiryDate", to_string(futureExpiryDate_));
return node;
}
} // namespace data
} // namespace ore
|
{"hexsha": "8534b6659a76ee66daec0d6407ee76914e3b6e0c", "size": 5946, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "OREData/ored/portfolio/commodityasianoption.cpp", "max_stars_repo_name": "mrslezak/Engine", "max_stars_repo_head_hexsha": "c46ff278a2c5f4162db91a7ab500a0bb8cef7657", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 335.0, "max_stars_repo_stars_event_min_datetime": "2016-10-07T16:31:10.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-02T07:12:03.000Z", "max_issues_repo_path": "OREData/ored/portfolio/commodityasianoption.cpp", "max_issues_repo_name": "mrslezak/Engine", "max_issues_repo_head_hexsha": "c46ff278a2c5f4162db91a7ab500a0bb8cef7657", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 59.0, "max_issues_repo_issues_event_min_datetime": "2016-10-31T04:20:24.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-03T16:39:57.000Z", "max_forks_repo_path": "OREData/ored/portfolio/commodityasianoption.cpp", "max_forks_repo_name": "mrslezak/Engine", "max_forks_repo_head_hexsha": "c46ff278a2c5f4162db91a7ab500a0bb8cef7657", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 180.0, "max_forks_repo_forks_event_min_datetime": "2016-10-08T14:23:50.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-28T10:43:05.000Z", "avg_line_length": 44.0444444444, "max_line_length": 161, "alphanum_fraction": 0.7082071981, "num_tokens": 1408}
|
import cv2
import sys
from ..utils import model_utils
from ..utils.mtcnntf.utils import detect_face
import tensorflow as tf
import numpy as np
import os
os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'
ssd_model = None
pnet, rnet, onet = None, None, None
dlib_hog_model = None
dlib_68_landmark = None
dlib_cnn_model = None
def ssd_detect(frame, align=False):
'''
input: rgb image
output: boxes
'''
global ssd_model
(h, w) = frame.shape[:2]
t1,t2 = 0,0
if w>h:
frame300 = np.zeros((w,w,3))
t1 = int((w-h)/2)
frame300[t1:t1+h, :, :] = frame
frame = frame300
else:
frame300 = np.zeros((h,h,3))
t2 = int((h-w)/2)
frame300[:, t2:t2+w, :] = frame
frame = frame300
(h, w) = frame.shape[:2]
if ssd_model is None:
ssd_model = model_utils.load_ssd_model()
frame = frame.astype(np.uint8)
imageBlob = cv2.dnn.blobFromImage(frame, 1.0, (300, 300), [104, 117, 123], False, False)
ssd_model.setInput(imageBlob)
detections = ssd_model.forward()
boxes = []
for i in range(0, detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence > 0.7:
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
face = frame[startY:endY, startX:endX]
(fH, fW) = face.shape[:2]
box = [max(box[0],0), max(box[1],0), min(box[2],h), min(box[3],w)]
boxes.append((box - np.array([t2,t1,t2,t1])).astype("int"))
return dlib_align(frame, boxes) if align is True else np.array(boxes)
def mtcnn_detect(frame, align=False):
'''
input: rgb image
output: boxes
'''
global pnet, rnet, onet
if pnet is None or rnet is None or onet is None:
pnet, rnet, onet = model_utils.load_mtcnn_model()
# PNet(), RNet(), ONet()
# pnet(tf.ones(shape=[1, 12, 12, 3]))
# rnet(tf.ones(shape=[1, 24, 24 ,3]))
# onet(tf.ones(shape=[1, 48, 48, 3]))
# load_weights(pnet, "./det1.npy"), load_weights(rnet, "./det2.npy"), load_weights(onet, "./det3.npy")
total_boxes, points = detect_face(frame, 20, pnet, rnet, onet, [0.6, 0.7, 0.7], 0.709)
(h, w) = frame.shape[:2]
results = []
boxes = []
for bbox, keypoints in zip(total_boxes, points.T):
result = {
'box': [int(max(bbox[0],0)), int(max(bbox[1],0)),
int(min(bbox[2],h)), int(min(bbox[3],w))],
'confidence': bbox[-1],
'keypoints': {
'left_eye': (int(keypoints[0]), int(keypoints[5])),
'right_eye': (int(keypoints[1]), int(keypoints[6])),
'nose': (int(keypoints[2]), int(keypoints[7])),
'mouth_left': (int(keypoints[3]), int(keypoints[8])),
'mouth_right': (int(keypoints[4]), int(keypoints[9])),
}
}
results.append(result)
boxes.append(result['box'])
return boxes, results
def dlib_hog_detect(frame, align=False):
'''
input: rgb image
output: boxes
'''
global dlib_hog_model
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
if dlib_hog_model is None:
dlib_hog_model = model_utils.load_dlib_hog()
faces = dlib_hog_model(gray, 1)
boxes = []
for result in faces:
x = result.left()
y = result.top()
x1 = result.right()
y1 = result.bottom()
boxes.append([x,y,x1,y1])
return boxes
def dlib_cnn_detect(frame, align=False):
'''
input: rgb image
output: boxes
'''
global dlib_cnn_model
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
if dlib_cnn_model is None:
dlib_cnn_model = model_utils.load_dlib_hog()
faces = dlib_cnn_model(rgb, 1)
boxes = []
for result in faces:
x = result.left()
y = result.top()
x1 = result.right()
y1 = result.bottom()
boxes.append([x,y,x1,y1])
return boxes
def dlib_align(frame, boxes):
if dlib_68_landmark is None:
dlib_68_landmark = model_utils.load_dlib_68_landmark()
bb = [dlib.rectangle(css[2], css[1], css[0], css[3]) for css in boxes]
landmarks = [dlib_68_landmark(frame,box) for box in bb]
boxes = []
for landmark in landmarks:
x1 = min([p.x for p in landmark.parts()])
y1 = min([p.y for p in landmark.parts()])
x2 = max([p.x for p in landmark.parts()])
y2 = max([p.y for p in landmark.parts()])
boxes.append([x1,y1,x2,y2])
return boxes
def test(id_cam=0, func='ssd', align=False, show=True):
assert func in ['ssd', 'mtcnn', 'dlib']
cap = cv2.VideoCapture(id_cam)
ret, _ = cap.read()
while ret==True :
ret, frame = cap.read()
frame2 = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
boxes = []
if func == 'ssd':
boxes = ssd_detect(frame2, align=align)
elif func == 'mtcnn':
boxes, _ = mtcnn_detect(frame2, align=align)
elif func== 'dlib':
boxes = dlib_cnn_detect(frame2, align=align)
else:
print("Func select ssd, mtcnn, dlib")
break
if show:
if len(boxes)>0:
for box in boxes:
cv2.rectangle(frame, (box[0],box[1]), (box[2],box[3]), (0,255,0),thickness=2)
cv2.imshow('Show', frame)
k=cv2.waitKey(1)
if k == ord('q'):
break
if __name__ == "__main__":
test(show=True, func='mtcnn', align=False)
|
{"hexsha": "03f4cb36ca00d0b916cbe5cb5ccd371bcc602561", "size": 5629, "ext": "py", "lang": "Python", "max_stars_repo_path": "duyai/cv/face/detection.py", "max_stars_repo_name": "DuyNguyen-ai/duyai", "max_stars_repo_head_hexsha": "8c9ef23942f6e2965e487d57b67331c8c66b935a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "duyai/cv/face/detection.py", "max_issues_repo_name": "DuyNguyen-ai/duyai", "max_issues_repo_head_hexsha": "8c9ef23942f6e2965e487d57b67331c8c66b935a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "duyai/cv/face/detection.py", "max_forks_repo_name": "DuyNguyen-ai/duyai", "max_forks_repo_head_hexsha": "8c9ef23942f6e2965e487d57b67331c8c66b935a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.7595628415, "max_line_length": 110, "alphanum_fraction": 0.555338426, "include": true, "reason": "import numpy", "num_tokens": 1684}
|
# -*- coding: utf-8 -*-
"""
Title: Multimedia Data Formats
Date: 01.06.2018
Description:
"""
import cv2
import numpy as np
from scipy.spatial import distance
class Detect:
def __init__(self):
return None
def detect_all(self, keypoint_descriptors):
"""detect_all(in_file):
Detects a copy-move attack in a given file with these methods:
- SIFT (concentrate on those two for now)
- SURF
"""
results = {'ORB': self.detect(keypoint_descriptors['ORB'],Tr=0.7,Tc=12, norm=cv2.NORM_HAMMING),
'BRISK': self.detect(keypoint_descriptors['BRISK'],Tr=0.55,Tc=3, norm=cv2.NORM_HAMMING),
'SIFT': self.detect(keypoint_descriptors['SIFT'],Tr=0.4,Tc=10, norm=cv2.NORM_L2),
'SURF': self.detect(keypoint_descriptors['SURF'],Tr=0.4,Tc=11, norm=cv2.NORM_L2)}
return results
def detect(self, keypoint_descriptors, Tr=0.5, Tc=10, norm=cv2.NORM_L2):
""" detectSURF(img_file)
detects a copy-move attack within a given filename
"""
kps = keypoint_descriptors['keypoints']
descs = keypoint_descriptors['descriptors']
# instantiation of the BruteForceMatcher class
bf = cv2.BFMatcher(norm)
# the knnMatch returns for each descriptor out of the queryDescriptors the k nearest (with respect to the used norm)
# descriptors out of the trainDescriptors
kmatch = bf.knnMatch(queryDescriptors=descs, trainDescriptors=descs, k=3)
# create the clusterlist, one entry represents one cluster and should contain the mean x and y coordinate
# of the cluster members, the number keypoints in this cluster and a placeholder for the later distance calculation.
cluster = list()
# ratio test, if the ratio of the distance from the nearest over the second nearest descriptor is less than the threshold Tr
# cluster the query keypoint with the nearest train keypoint.
for i in np.arange(len(kmatch)):
if kmatch[i][0] != 0:
if kmatch[i][1].distance == 0 or (kmatch[i][1].distance / kmatch[i][2].distance) < Tr:
# for the agglomerative hierarchical clustering we compare the distances between the centroid
# (the mean x and y coordinate of all keypoints of the cluster) from two cluster.
# therefore add the mean x and y coordinates and the number of cluster members to the cluster list
x = (kps[kmatch[i][0].trainIdx].pt[0] + kps[kmatch[i][1].trainIdx].pt[0]) / 2
y = (kps[kmatch[i][0].trainIdx].pt[1] + kps[kmatch[i][1].trainIdx].pt[1]) / 2
cluster.append(([x, y], 2, 0))
# to avoid duplicate cluster set the match result for the already added descriptor to zero
kmatch[kmatch[i][1].trainIdx] = (0, 0, 0)
# agglomerative hierarchical clustering, merge the two nearest (distance between the centroids) if the distacne
# is less than the threshold Tc
# to store the actual minimum distance and the index of the according two cluster
mindist = (0, 0, 0)
# store the number of cluster with more than 3 Keypoints
clustercount = 0
# do the clustering until the minimum distance is greater than the threshold or more than 2 cluster has more than 3 Keypoints
while mindist[0] < Tc and clustercount < 2:
mindist = (Tc + 1, 0, 0)
for i in np.arange(len(cluster)):
for j in np.arange(i):
tempdist = distance.euclidean(cluster[i][0], cluster[j][0])
if tempdist < mindist[0]:
mindist = (tempdist, i, j)
if mindist[0] < Tc:
tempi = cluster.pop(mindist[1])
tempj = cluster.pop(mindist[2])
new_k = tempi[1] + tempj[1]
new_x = (tempi[1] * tempi[0][0] + tempj[0][0] * tempj[1]) / new_k
new_y = (tempi[1] * tempi[0][1] + tempj[0][1] * tempj[1]) / new_k
cluster.append(([new_x, new_y], new_k, 0))
# to avoid to increase the clustercount for cluster which already had more than 3 Keypoints
if tempi[1] <= 3 and tempj[1] <= 3:
clustercount = clustercount + 1
if clustercount >= 2:
# Copy move attack detected
return True
else:
# No detection
return False
|
{"hexsha": "63099394edd7daf52e6ab81c228c0b9cf63f648a", "size": 4576, "ext": "py", "lang": "Python", "max_stars_repo_path": "detect.py", "max_stars_repo_name": "Steinthor/MediaDataProject", "max_stars_repo_head_hexsha": "5137b96da7dce11499ae729ac6986c88ad3169e9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2018-03-19T15:43:42.000Z", "max_stars_repo_stars_event_max_datetime": "2019-11-03T10:19:45.000Z", "max_issues_repo_path": "detect.py", "max_issues_repo_name": "Steinthor/MediaDataProject", "max_issues_repo_head_hexsha": "5137b96da7dce11499ae729ac6986c88ad3169e9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "detect.py", "max_forks_repo_name": "Steinthor/MediaDataProject", "max_forks_repo_head_hexsha": "5137b96da7dce11499ae729ac6986c88ad3169e9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 49.2043010753, "max_line_length": 133, "alphanum_fraction": 0.5963723776, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1170}
|
[STATEMENT]
lemma dlex_pp_zero_min: "dlex_pp 0 s"
for s t::"(_, _::add_linorder_min) pp"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. dlex_pp 0 s
[PROOF STEP]
by (transfer, fact dlex_pm_zero_min)
|
{"llama_tokens": 99, "file": "Polynomials_PP_Type", "length": 1}
|
From Ordinal Require Import sflib Basics.
From Ordinal Require Export Ordinal Arithmetic.
Require Import Coq.Classes.RelationClasses Coq.Classes.Morphisms.
Set Implicit Arguments.
Set Primitive Projections.
Module Hessenberg.
Section ADD.
Program Let _add: Ord.t * Ord.t -> Ord.t :=
Fix (double_rel_well_founded Ord.lt_well_founded Ord.lt_well_founded) (fun _ => Ord.t)
(fun o0o1 add =>
match o0o1 with
| (o0, o1) =>
Ord.union
(match o1 with
| @Ord.build X1 os1 => Ord.build (fun x1 => @add (o0, os1 x1) _)
end)
(match o0 with
| @Ord.build X0 os0 => Ord.build (fun x0 => @add (os0 x0, o1) _)
end)
end).
Next Obligation.
right. econs. reflexivity.
Qed.
Next Obligation.
left. econs. reflexivity.
Qed.
Definition add (o0 o1: Ord.t): Ord.t :=
_add (o0, o1).
Let _add_red (o0o1: Ord.t * Ord.t)
:
Ord.eq
(_add o0o1)
(match o0o1 with
| (o0, o1) =>
Ord.union
(match o1 with
| @Ord.build X1 os1 => Ord.build (fun x1 => _add (o0, os1 x1))
end)
(match o0 with
| @Ord.build X0 os0 => Ord.build (fun x0 => _add (os0 x0, o1))
end)
end).
Proof.
unfold _add. rewrite Fix_equiv; ss.
{ destruct o0o1 as [[] []]. ss. reflexivity. }
{ i. ss. destruct x as [[] []].
eapply Ord.eq_union.
{ split.
{ econs. i. exists a0. eapply H. }
{ econs. i. exists a0. eapply H. }
}
{ split.
{ econs. i. exists a0. eapply H. }
{ econs. i. exists a0. eapply H. }
}
}
Qed.
Lemma add_red X0 (os0: X0 -> Ord.t) X1 (os1: X1 -> Ord.t)
:
Ord.eq
(add (@Ord.build X0 os0) (@Ord.build X1 os1))
(Ord.union
(Ord.build (fun x1 => add (@Ord.build X0 os0) (os1 x1)))
(Ord.build (fun x0 => add (os0 x0) (@Ord.build X1 os1)))).
Proof.
unfold add. apply _add_red.
Qed.
Global Opaque add.
Lemma add_supremum o0 o1 o
(LE0: forall o2 (LT: Ord.lt o2 o0), Ord.lt (add o2 o1) o)
(LE1: forall o2 (LT: Ord.lt o2 o1), Ord.lt (add o0 o2) o)
:
Ord.le (add o0 o1) o.
Proof.
destruct o0, o1. rewrite add_red.
apply Ord.union_spec.
{ eapply Ord.build_supremum. i. eapply LE1. eapply Ord.build_upperbound. }
{ eapply Ord.build_supremum. i. eapply LE0. eapply Ord.build_upperbound. }
Qed.
Lemma le_add_l o0 o1 o2 (LE: Ord.le o0 o1)
:
Ord.le (add o0 o2) (add o1 o2).
Proof.
revert o0 LE. pattern o1, o2. revert o1 o2.
eapply (double_well_founded_induction Ord.lt_well_founded Ord.lt_well_founded).
i. dup LE. inv LE. destruct b1.
rewrite add_red. rewrite add_red.
eapply Ord.le_union.
{ econs. i. exists a0. eapply IHR; auto. econs. reflexivity. }
{ econs. i. specialize (LE1 a0). des.
exists a1. eapply IHL; auto. econs. reflexivity. }
Qed.
Lemma le_add_r o0 o1 o2 (LE: Ord.le o1 o2)
:
Ord.le (add o0 o1) (add o0 o2).
Proof.
revert o1 LE. pattern o0. pattern o2. revert o0 o2.
eapply (double_well_founded_induction Ord.lt_well_founded Ord.lt_well_founded).
i. dup LE. inv LE. destruct a1.
rewrite add_red. rewrite add_red.
eapply Ord.le_union.
{ econs. i. specialize (LE1 a0). des.
exists a1. eapply IHR; auto. econs. reflexivity. }
{ econs. i. exists a0. eapply IHL; auto. econs. reflexivity. }
Qed.
Lemma eq_add_l o0 o1 o2 (EQ: Ord.eq o0 o1)
:
Ord.eq (add o0 o2) (add o1 o2).
Proof.
econs.
{ eapply le_add_l. eapply EQ. }
{ eapply le_add_l. eapply EQ. }
Qed.
Lemma eq_add_r o0 o1 o2 (EQ: Ord.eq o1 o2)
:
Ord.eq (add o0 o1) (add o0 o2).
Proof.
econs.
{ eapply le_add_r. eapply EQ. }
{ eapply le_add_r. eapply EQ. }
Qed.
Global Program Instance add_eq_proper: Proper (Ord.eq ==> Ord.eq ==> Ord.eq) (add).
Next Obligation.
ii.
etransitivity.
- eapply eq_add_l; eauto.
- eapply eq_add_r; eauto.
Qed.
Global Program Instance add_le_proper: Proper (Ord.le ==> Ord.le ==> Ord.le) (add).
Next Obligation.
ii.
etransitivity.
- eapply le_add_l; eauto.
- eapply le_add_r; eauto.
Qed.
Lemma lt_add_l o0 o1 o2 (LT: Ord.lt o0 o1)
:
Ord.lt (add o0 o2) (add o1 o2).
Proof.
inv LT. destruct o2. rewrite add_red. eapply Ord.lt_le_lt.
2:{ eapply Ord.union_r. }
{ eapply Ord.le_lt_lt.
{ rewrite LE. reflexivity. }
{ econs. reflexivity. }
}
Qed.
Lemma lt_add_r o0 o1 o2 (LT: Ord.lt o1 o2)
:
Ord.lt (add o0 o1) (add o0 o2).
Proof.
inv LT. destruct o0. rewrite add_red. eapply Ord.lt_le_lt.
2:{ eapply Ord.union_l. }
{ eapply Ord.le_lt_lt.
{ rewrite LE. reflexivity. }
{ econs. reflexivity. }
}
Qed.
Lemma add_spec o0 o1 o2
(SUP0: forall o (LT: Ord.lt o o0), Ord.lt (add o o1) o2)
(SUP1: forall o (LT: Ord.lt o o1), Ord.lt (add o0 o) o2)
:
Ord.le (add o0 o1) o2.
Proof.
destruct o0, o1. rewrite add_red. eapply Ord.union_spec.
{ eapply Ord.build_supremum. i. eapply SUP1. econs. reflexivity. }
{ eapply Ord.build_supremum. i. eapply SUP0. econs. reflexivity. }
Qed.
Lemma add_comm o0 o1
:
Ord.eq (add o0 o1) (add o1 o0).
Proof.
revert o0 o1.
cut (forall o0 o1, Ord.le (add o0 o1) (add o1 o0)).
{ i. split; eauto. }
i. pattern o0, o1. revert o0 o1.
eapply (double_well_founded_induction Ord.lt_well_founded Ord.lt_well_founded).
i. destruct a1, b1. rewrite add_red. rewrite add_red.
rewrite Ord.union_comm. eapply Ord.le_union.
{ econs. i. exists a0. eapply IHL. econs. reflexivity. }
{ econs. i. exists a0. eapply IHR. econs. reflexivity. }
Qed.
Lemma add_assoc o0 o1 o2
:
Ord.eq (add (add o0 o1) o2) (add o0 (add o1 o2)).
Proof.
revert o0 o1 o2.
cut (forall o0 o1o2, (fun o0 o1o2 => (add (add o0 (fst o1o2)) (snd o1o2) == add o0 (add (fst o1o2) (snd o1o2)))%ord) o0 o1o2).
{ i. eapply (H o0 (o1, o2)). }
eapply (double_well_founded_induction Ord.lt_well_founded (double_rel_well_founded Ord.lt_well_founded Ord.lt_well_founded)).
intros o0 [o1 o2] IH0 IH12. ss.
assert (IH1: forall o (LT: (o < o1)%ord), (add (add o0 o) o2 == add o0 (add o o2))%ord).
{ i. eapply (IH12 (o, o2)). left. auto. }
assert (IH2: forall o (LT: (o < o2)%ord), (add (add o0 o1) o == add o0 (add o1 o))%ord).
{ i. eapply (IH12 (o1, o)). right. auto. }
clear IH12. destruct o0, o1, o2.
rewrite add_red. rewrite add_red.
rewrite Ord.union_build. rewrite Ord.union_build.
rewrite add_red. rewrite add_red.
rewrite Ord.union_build. rewrite Ord.union_build. split.
{ econs.
{ i. destruct a0 as [|[]].
{ exists (inl (inl a)). ss. rewrite <- IH2.
{ rewrite add_red. rewrite Ord.union_build. reflexivity. }
{ econs. reflexivity. }
}
{ exists (inl (inr a)). ss.
eapply IH1. econs. reflexivity. }
{ exists (inr a). ss. rewrite IH0.
{ rewrite add_red. rewrite Ord.union_build. reflexivity. }
{ econs. reflexivity. }
}
}
}
{ econs.
{ i. destruct a0 as [[]|].
{ exists (inl a). ss. rewrite <- IH2.
{ rewrite add_red. rewrite Ord.union_build. reflexivity. }
{ econs. reflexivity. }
}
{ exists (inr (inl a)). ss.
eapply IH1. econs. reflexivity. }
{ exists (inr (inr a)). ss. rewrite IH0.
{ rewrite add_red. rewrite Ord.union_build. reflexivity. }
{ econs. reflexivity. }
}
}
}
Qed.
Lemma add_base_l o0 o1
:
Ord.le o0 (add o0 o1).
Proof.
revert o1. pattern o0. revert o0.
eapply (well_founded_induction Ord.lt_well_founded).
i. destruct x. eapply Ord.build_supremum. i.
eapply Ord.le_lt_lt.
{ eapply H. econs. reflexivity. }
{ eapply lt_add_l. econs. reflexivity. }
Qed.
Lemma add_base_r o0 o1
:
Ord.le o1 (add o0 o1).
Proof.
rewrite add_comm. eapply add_base_l.
Qed.
Lemma arith_add_larger o0 o1
:
Ord.le (OrdArith.add o0 o1) (add o0 o1).
Proof.
revert o0. pattern o1. revert o1.
eapply (well_founded_induction Ord.lt_well_founded).
i. destruct x. rewrite OrdArith.add_build. eapply Ord.union_spec.
{ eapply add_base_l. }
{ eapply Ord.join_supremum. i. eapply Ord.S_supremum.
eapply Ord.le_lt_lt.
{ eapply H. econs. reflexivity. }
{ eapply lt_add_r. econs. reflexivity. }
}
Qed.
Lemma add_O_r o
:
Ord.eq (add o Ord.O) o.
Proof.
pattern o. revert o. eapply (well_founded_induction Ord.lt_well_founded).
intros o IH. split.
{ eapply add_spec.
{ i. rewrite IH; auto. }
{ i. exfalso. eapply Ord.lt_StrictOrder. eapply Ord.lt_le_lt.
{ eapply LT. }
{ eapply Ord.O_bot. }
}
}
{ eapply add_base_l. }
Qed.
Lemma add_O_l o
:
Ord.eq (add Ord.O o) o.
Proof.
rewrite add_comm. eapply add_O_r.
Qed.
Lemma add_S_r o0 o1
:
Ord.eq (add o0 (Ord.S o1)) (Ord.S (add o0 o1)).
Proof.
revert o1. pattern o0. revert o0.
eapply (well_founded_induction Ord.lt_well_founded). i.
split.
{ eapply add_spec.
{ i. rewrite H; auto. eapply Ord.lt_S. eapply lt_add_l; auto. }
{ i. eapply Ord.le_lt_lt; [|eapply Ord.S_lt].
eapply Ord.S_supremum in LT. eapply le_add_r.
eapply Ord.le_S_rev; auto.
}
}
{ eapply Ord.S_supremum. eapply lt_add_r. eapply Ord.S_lt. }
Qed.
Lemma add_S_l o0 o1
:
Ord.eq (add (Ord.S o0) o1) (Ord.S (add o0 o1)).
Proof.
rewrite add_comm. rewrite add_S_r. rewrite add_comm. reflexivity.
Qed.
Lemma arith_add_from_nat o (n: nat)
:
Ord.eq (add o n) (OrdArith.add o n).
Proof.
revert o. induction n; i.
{ rewrite Ord.from_nat_O.
rewrite add_O_r. rewrite OrdArith.add_O_r. reflexivity.
}
{ rewrite Ord.from_nat_S.
rewrite add_S_r. rewrite OrdArith.add_S. rewrite IHn. reflexivity.
}
Qed.
Lemma add_from_nat n0 n1:
Ord.eq (n0 + n1) (add (Ord.from_nat n0) (Ord.from_nat n1)).
Proof.
induction n1; ss.
{ rewrite PeanoNat.Nat.add_0_r.
symmetry. eapply add_O_r. }
{ rewrite PeanoNat.Nat.add_succ_r.
rewrite Ord.from_nat_S.
rewrite Ord.from_nat_S.
rewrite add_S_r.
eapply Ord.eq_S. auto.
}
Qed.
Lemma add_lt_l o0 o1 (LT: Ord.lt Ord.O o1): Ord.lt o0 (add o0 o1).
Proof.
eapply Ord.lt_le_lt.
2:{ eapply arith_add_larger. }
eapply OrdArith.add_lt_l. auto.
Qed.
Lemma add_lt_r o0 o1 (LT: Ord.lt Ord.O o0): Ord.lt o1 (add o0 o1).
Proof.
rewrite add_comm. apply add_lt_l; auto.
Qed.
End ADD.
End Hessenberg.
Global Opaque Hessenberg.add.
Infix "⊕" := Hessenberg.add (at level 80, right associativity) : ord_scope.
(* following: *)
(* https://arxiv.org/pdf/1501.05747.pdf *)
(* (INTERMEDIATE ARITHMETIC OPERATIONS ON ORDINAL NUMBERS - HARRY ALTMAN) *)
Module Jacobsthal.
Section MULT.
Definition mult (o0: Ord.t): forall (o1: Ord.t), Ord.t := Ord.orec Ord.O (Hessenberg.add o0).
Let _mult_gen_le := Hessenberg.add_base_r.
Let _mult_gen_mon := Hessenberg.le_add_r.
Lemma arith_mult_larger o0 o1
:
Ord.le (OrdArith.mult o0 o1) (mult o0 o1).
Proof.
Local Transparent OrdArith.mult.
eapply Ord.orec_mon.
{ reflexivity. }
{ i. rewrite LE. rewrite Hessenberg.add_comm.
eapply Hessenberg.arith_add_larger.
}
Qed.
Lemma mult_O_r o: Ord.eq (mult o Ord.O) Ord.O.
Proof.
eapply (@Ord.orec_O Ord.O (Hessenberg.add o)); auto.
Qed.
Lemma mult_S o0 o1: Ord.eq (mult o0 (Ord.S o1)) (Hessenberg.add o0 (mult o0 o1)).
Proof.
eapply (@Ord.orec_S Ord.O (Hessenberg.add o0)); auto.
Qed.
Lemma mult_join o A (os: A -> Ord.t):
Ord.eq (mult o (Ord.join os)) (Ord.join (fun a => mult o (os a))).
Proof.
transitivity (Ord.union Ord.O (Ord.join (fun a => mult o (os a)))).
{ eapply (@Ord.orec_join Ord.O (Hessenberg.add _)); eauto. }
{ eapply Ord.union_max. eapply Ord.O_bot. }
Qed.
Lemma mult_build o A (os: A -> Ord.t)
:
Ord.eq (mult o (Ord.build os)) (Ord.join (fun a => Hessenberg.add o (mult o (os a)))).
Proof.
transitivity (Ord.union Ord.O (Ord.join (fun a => Hessenberg.add o (mult o (os a))))).
{ eapply (@Ord.orec_build Ord.O (Hessenberg.add _)); eauto. }
{ eapply Ord.union_max. eapply Ord.O_bot. }
Qed.
Lemma mult_union o0 o1 o2
:
Ord.eq (mult o0 (Ord.union o1 o2)) (Ord.union (mult o0 o1) (mult o0 o2)).
Proof.
eapply Ord.orec_union; auto.
Qed.
Lemma le_mult_r o0 o1 o2 (LE: Ord.le o1 o2)
:
Ord.le (mult o0 o1) (mult o0 o2).
Proof.
eapply Ord.le_orec; auto.
Qed.
Lemma eq_mult_r o0 o1 o2 (EQ: Ord.eq o1 o2)
:
Ord.eq (mult o0 o1) (mult o0 o2).
Proof.
split.
- eapply le_mult_r; eauto. eapply EQ.
- eapply le_mult_r; eauto. eapply EQ.
Qed.
Lemma le_mult_l o0 o1 o2 (LE: Ord.le o0 o1)
:
Ord.le (mult o0 o2) (mult o1 o2).
Proof.
eapply (@Ord.orec_mon Ord.O (Hessenberg.add o0) Ord.O (Hessenberg.add o1)); auto.
{ reflexivity. }
{ i. transitivity (Hessenberg.add o0 o4).
{ eapply Hessenberg.le_add_r; auto. }
{ eapply Hessenberg.le_add_l; auto. }
}
Qed.
Lemma eq_mult_l o0 o1 o2 (EQ: Ord.eq o0 o1)
:
Ord.eq (mult o0 o2) (mult o1 o2).
Proof.
split.
- eapply le_mult_l; eauto. eapply EQ.
- eapply le_mult_l; eauto. eapply EQ.
Qed.
Lemma lt_mult_r o0 o1 o2 (LT: Ord.lt o1 o2) (POS: Ord.lt Ord.O o0)
:
Ord.lt (mult o0 o1) (mult o0 o2).
Proof.
eapply Ord.S_supremum in LT.
eapply Ord.lt_le_lt.
2: { eapply le_mult_r. eapply LT. }
eapply Ord.lt_eq_lt.
{ eapply mult_S. }
eapply Hessenberg.add_lt_r. auto.
Qed.
Lemma mult_O_l o: Ord.eq (mult Ord.O o) Ord.O.
Proof.
induction o. etransitivity.
{ eapply mult_build. }
{ split.
- eapply Ord.join_supremum. i.
transitivity (mult Ord.O (os a)); auto.
{ eapply Hessenberg.add_O_l. }
{ eapply H. }
- eapply Ord.O_bot. }
Qed.
Lemma mult_1_r o: Ord.eq (mult o (Ord.S Ord.O)) o.
Proof.
etransitivity.
{ eapply mult_S. }
etransitivity.
{ eapply Hessenberg.eq_add_r. eapply mult_O_r. }
eapply Hessenberg.add_O_r.
Qed.
Lemma mult_1_l o: Ord.eq (mult (Ord.S Ord.O) o) o.
Proof.
transitivity (Ord.orec Ord.O Ord.S o).
2: { symmetry. eapply Ord.orec_of_S. }
split.
{ eapply Ord.orec_mon.
{ reflexivity. }
{ i. etransitivity.
{ eapply Hessenberg.add_S_l. }
{ apply Ord.le_S. transitivity o0; auto.
eapply Hessenberg.add_O_l.
}
}
}
{ eapply Ord.orec_mon.
{ reflexivity. }
{ i. etransitivity.
{ apply Ord.le_S. eapply LE. }
transitivity (Ord.S (Hessenberg.add Ord.O o1)); auto.
{ apply Ord.le_S. eapply Hessenberg.add_O_l. }
{ eapply Hessenberg.add_S_l. }
}
}
Qed.
Global Program Instance mult_eq_proper: Proper (Ord.eq ==> Ord.eq ==> Ord.eq) (mult).
Next Obligation.
ii.
etransitivity.
- eapply eq_mult_l; eauto.
- eapply eq_mult_r; eauto.
Qed.
Global Program Instance add_le_proper: Proper (Ord.le ==> Ord.le ==> Ord.le) (mult).
Next Obligation.
ii.
etransitivity.
- eapply le_mult_l; eauto.
- eapply le_mult_r; eauto.
Qed.
Lemma mult_from_nat n0 n1:
Ord.eq (n0 * n1) (mult (Ord.from_nat n0) (Ord.from_nat n1)).
Proof.
induction n1; ss.
{ rewrite PeanoNat.Nat.mul_0_r.
symmetry. eapply mult_O_r. }
{ rewrite PeanoNat.Nat.mul_succ_r.
rewrite Ord.from_nat_S.
rewrite mult_S.
rewrite Hessenberg.add_from_nat.
rewrite Hessenberg.add_comm. rewrite IHn1. reflexivity.
}
Qed.
End MULT.
End Jacobsthal.
Global Opaque Jacobsthal.mult.
Infix "×" := Jacobsthal.mult (at level 80, right associativity) : ord_scope.
|
{"author": "minkiminki", "repo": "Ordinal", "sha": "225f2f2b18ec8d65a637d964839528eeeb1829ce", "save_path": "github-repos/coq/minkiminki-Ordinal", "path": "github-repos/coq/minkiminki-Ordinal/Ordinal-225f2f2b18ec8d65a637d964839528eeeb1829ce/src/Hessenberg.v"}
|
// __BEGIN_LICENSE__
// Copyright (C) 2006-2011 United States Government as represented by
// the Administrator of the National Aeronautics and Space Administration.
// All Rights Reserved.
// __END_LICENSE__
#include <vw/Mosaic/GigapanQuadTreeConfig.h>
#include <boost/bind.hpp>
#include <boost/filesystem/path.hpp>
#include <boost/filesystem/operations.hpp>
#include <boost/filesystem/fstream.hpp>
#include <boost/filesystem/convenience.hpp>
namespace fs = boost::filesystem;
namespace vw {
namespace mosaic {
struct GigapanQuadTreeConfigData {
BBox2 m_longlat_bbox;
std::vector<std::pair<std::string,vw::BBox2i> > branch_func( QuadTreeGenerator const&, std::string const& name, BBox2i const& region ) const;
void metadata_func( QuadTreeGenerator const&, QuadTreeGenerator::TileInfo const& info ) const;
public:
GigapanQuadTreeConfigData() : m_longlat_bbox(0, 0, 0, 0) {}
};
GigapanQuadTreeConfig::GigapanQuadTreeConfig()
: m_data( new GigapanQuadTreeConfigData() )
{}
std::string GigapanQuadTreeConfig::image_path( QuadTreeGenerator const& qtree, std::string const& name ) {
return QuadTreeGenerator::tiered_image_path()(qtree, name);
}
void GigapanQuadTreeConfig::configure( QuadTreeGenerator& qtree ) const {
qtree.set_image_path_func( &image_path );
qtree.set_cull_images( true );
qtree.set_metadata_func( boost::bind(&GigapanQuadTreeConfigData::metadata_func,m_data,_1,_2) );
if (m_data->m_longlat_bbox.width() != 0 || m_data->m_longlat_bbox.height() != 0) {
qtree.set_branch_func( boost::bind(&GigapanQuadTreeConfigData::branch_func,m_data,_1,_2,_3) );
}
}
void GigapanQuadTreeConfig::set_longlat_bbox( BBox2 const& bbox ) {
m_data->m_longlat_bbox = bbox;
}
std::vector<std::pair<std::string,vw::BBox2i> > GigapanQuadTreeConfigData::branch_func( QuadTreeGenerator const& qtree, std::string const& name, BBox2i const& region ) const {
std::vector<std::pair<std::string,vw::BBox2i> > children;
if( region.height() > qtree.get_tile_size() ) {
Vector2i dims = qtree.get_dimensions();
double aspect_ratio = 2 * (region.width()/region.height()) * ( (m_longlat_bbox.width()/dims.x()) / (m_longlat_bbox.height()/dims.y()) );
double bottom_lat = m_longlat_bbox.max().y() - region.max().y()*m_longlat_bbox.height() / dims.y();
double top_lat = m_longlat_bbox.max().y() - region.min().y()*m_longlat_bbox.height() / dims.y();
bool top_merge = ( bottom_lat > 0 ) && ( ( 1.0 / cos(M_PI/180 * bottom_lat) ) > aspect_ratio );
bool bottom_merge = ( top_lat < 0 ) && ( ( 1.0 / cos(M_PI/180 * top_lat) ) > aspect_ratio );
if( top_merge ) {
children.push_back( std::make_pair( name + "4", BBox2i( region.min(), region.max() - Vector2i(0,region.height()/2) ) ) );
}
else {
children.push_back( std::make_pair( name + "0", BBox2i( (region + region.min()) / 2 ) ) );
children.push_back( std::make_pair( name + "1", BBox2i( (region + Vector2i(region.max().x(),region.min().y())) / 2 ) ) );
}
if( bottom_merge ) {
children.push_back( std::make_pair( name + "5", BBox2i( region.min() + Vector2i(0,region.height()/2), region.max() ) ) );
}
else {
children.push_back( std::make_pair( name + "2", BBox2i( (region + Vector2i(region.min().x(),region.max().y())) / 2 ) ) );
children.push_back( std::make_pair( name + "3", BBox2i( (region + region.max()) / 2 ) ) );
}
}
return children;
}
void GigapanQuadTreeConfigData::metadata_func( QuadTreeGenerator const& qtree, QuadTreeGenerator::TileInfo const& info ) const {
bool root_node = ( info.name.size() == 0 );
if ( root_node) {
std::ostringstream json;
fs::path file_path( info.filepath, fs::native );
fs::path json_path = change_extension( file_path, ".json" );
json << "{" << std::endl
<< " \"width\": " << qtree.get_dimensions()[0] << "," << std::endl
<< " \"height\": " << qtree.get_dimensions()[1] << "," << std::endl
<< " \"nlevels\": " << qtree.get_tree_levels() << std::endl
<< "}" << std::endl;
fs::ofstream jsonfs(json_path);
jsonfs << json.str();
}
}
// TODO: Is this actually the right function for Gigapan?
cartography::GeoReference GigapanQuadTreeConfig::output_georef(uint32 xresolution, uint32 yresolution) {
if (yresolution == 0)
yresolution = xresolution;
VW_ASSERT(xresolution == yresolution, LogicErr() << "TMS requires square pixels");
cartography::GeoReference r;
r.set_pixel_interpretation(cartography::GeoReference::PixelAsArea);
// Note: the global TMS pixel space extends from +270 to -90
// latitude, so that the lower-left hand corner is tile-
// aligned, since TMS uses an origin in the lower left.
Matrix3x3 transform;
transform(0,0) = 360.0 / xresolution;
transform(0,2) = -180;
transform(1,1) = -360.0 / yresolution;
transform(1,2) = 270;
transform(2,2) = 1;
r.set_transform(transform);
return r;
}
} // namespace mosaic
} // namespace vw
|
{"hexsha": "66fc1ae8e1e8793148115791883df0745f64e44b", "size": 5128, "ext": "cc", "lang": "C++", "max_stars_repo_path": "src/vw/Mosaic/GigapanQuadTreeConfig.cc", "max_stars_repo_name": "digimatronics/ComputerVision", "max_stars_repo_head_hexsha": "2af5da17dfd277f0cb3f19a97e3d49ba19cc9d24", "max_stars_repo_licenses": ["NASA-1.3"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2021-05-16T23:57:32.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-16T23:57:32.000Z", "max_issues_repo_path": "src/vw/Mosaic/GigapanQuadTreeConfig.cc", "max_issues_repo_name": "rkrishnasanka/visionworkbench", "max_issues_repo_head_hexsha": "2af5da17dfd277f0cb3f19a97e3d49ba19cc9d24", "max_issues_repo_licenses": ["NASA-1.3"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/vw/Mosaic/GigapanQuadTreeConfig.cc", "max_forks_repo_name": "rkrishnasanka/visionworkbench", "max_forks_repo_head_hexsha": "2af5da17dfd277f0cb3f19a97e3d49ba19cc9d24", "max_forks_repo_licenses": ["NASA-1.3"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2017-03-18T04:06:32.000Z", "max_forks_repo_forks_event_max_datetime": "2019-01-17T10:34:39.000Z", "avg_line_length": 39.7519379845, "max_line_length": 177, "alphanum_fraction": 0.6560062402, "num_tokens": 1446}
|
# This file is a part of BAT.jl, licensed under the MIT License (MIT).
"""
NamedTupleTransform <: VariateTransform
*BAT-internal, not part of stable public API.*
"""
struct NamedTupleTransform{
names,
ST <: VariateSpace,
SF <: VariateSpace,
N,
TT <: NTuple{N,VariateTransform{<:VariateForm}},
AT <: NTuple{N,ValueShapes.ValueAccessor},
} <: VariateTransform{ValueShapes.NamedTupleVariate{names},ST,SF}
_internal_transforms::NamedTuple{names,TT}
_internal_shape::NamedTupleShape{names,AT}
end
_nt_acc_nt_type(::NamedTupleShape{names,AT}) where {names,AT} = AT
function NamedTupleTransform(transforms::NamedTuple{names,<:NTuple{N,VariateTransform}}) where {names,N}
trg = product_varspace(map(target_space, values(transforms))...)
src = product_varspace(map(source_space, values(transforms))...)
shape = NamedTupleShape(map(varshape, transforms))
ST = typeof(trg)
SF = typeof(src)
TT = typeof(transforms)
AT = _nt_acc_nt_type(shape)
NamedTupleTransform{names,ST,SF,T,TT,AT}(transforms, shape)
end
@inline NamedTupleTransform(;named_dists...) = NamedTupleTransform(values(named_dists))
@inline _transforms(d::NamedTupleTransform) = getfield(d, :_internal_transforms)
@inline _shape(d::NamedTupleTransform) = getfield(d, :_internal_shape)
@inline Base.keys(d::NamedTupleTransform) = keys(_transforms(d))
@inline Base.values(d::NamedTupleTransform) = values(_transforms(d))
@inline function Base.getproperty(d::NamedTupleTransform, s::Symbol)
# Need to include internal fields of NamedTupleShape to make Zygote happy:
if s == :_internal_transforms
getfield(d, :_internal_transforms)
elseif s == :_internal_shape
getfield(d, :_internal_shape)
else
getproperty(_transforms(d), s)
end
end
@inline function Base.propertynames(d::NamedTupleTransform, private::Bool = false)
names = propertynames(_transforms(d))
if private
(names..., :_internal_transforms, :_internal_shape)
else
names
end
end
@inline Base.map(f, dist::NamedTupleTransform) = map(f, _transforms(dist))
Base.merge(a::NamedTuple, dist::NamedTupleTransform{names}) where {names} = merge(a, _transforms(dist))
Base.merge(a::NamedTupleTransform) = a
Base.merge(a::NamedTupleTransform, b::NamedTupleTransform, cs::NamedTupleTransform...) = merge(NamedTupleTransform(;a..., b...), cs...)
ValueShapes.varshape(trafo::NamedTupleTransform) = _shape(trafo)
target_space(trafo::NamedTupleTransform) = trafo.target_space
source_space(trafo::NamedTupleTransform) = MixedSpace
# trafo_fwd(trafo::VariateTransformChain, v::Any) = ...
# trafo_inv(trafo::VariateTransformChain, v::Any) = ...
# ToDo: Finish implementation
|
{"hexsha": "27cdefa79db64cfb4d54a568a0e125bf6a5baa86", "size": 2736, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/transforms/named_tuple_transform.jl", "max_stars_repo_name": "Cornelius-G/BAT.jl", "max_stars_repo_head_hexsha": "1bb577c8d976066c1f52070984d86020728f599c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-01-09T06:50:18.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-15T17:09:42.000Z", "max_issues_repo_path": "src/transforms/named_tuple_transform.jl", "max_issues_repo_name": "Cornelius-G/BAT.jl", "max_issues_repo_head_hexsha": "1bb577c8d976066c1f52070984d86020728f599c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/transforms/named_tuple_transform.jl", "max_forks_repo_name": "Cornelius-G/BAT.jl", "max_forks_repo_head_hexsha": "1bb577c8d976066c1f52070984d86020728f599c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.0659340659, "max_line_length": 135, "alphanum_fraction": 0.7295321637, "num_tokens": 714}
|
# Loading the required modules
import numpy as np
from scipy.spatial.distance import cdist
# Defining our function
def kmeans(x, k, no_of_iterations):
idx = np.random.choice(len(x), k, replace=False)
# Randomly choosing Centroids
centroids = x[idx, :] # Step 1 #to jest tablica z centroidami [[x,y],[x,y]]
print(x)
# finding the distance between centroids and all the data points
distances = cdist(x, centroids, 'euclidean') # Step 2 tu jest generator
# print(distances)
# który oblicza odległości od wszystkich centroidów do wszystkich punktów
# print(distances[0])
# Centroid with the minimum Distance
points = np.array([np.argmin(i) for i in distances]) # Step 3
# Repeating the above steps for a defined number of iterations
# Step 4
for _ in range(no_of_iterations):
centroids = []
for idx in range(k):
temp_cent = x[points == idx].mean(axis=0)
centroids.append(temp_cent)
centroids = np.vstack(centroids) # Updated Centroids
distances = cdist(x, centroids, 'euclidean')
points = np.array([np.argmin(i) for i in distances])
return points
|
{"hexsha": "fc6fd9168bc401840eccb166dcba4b5da51e7163", "size": 1179, "ext": "py", "lang": "Python", "max_stars_repo_path": "kmean.py", "max_stars_repo_name": "gitgeoman/Astar_algorigtm", "max_stars_repo_head_hexsha": "93eaf7b27a42a392f1d5b1f5f928b26f77c08696", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "kmean.py", "max_issues_repo_name": "gitgeoman/Astar_algorigtm", "max_issues_repo_head_hexsha": "93eaf7b27a42a392f1d5b1f5f928b26f77c08696", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "kmean.py", "max_forks_repo_name": "gitgeoman/Astar_algorigtm", "max_forks_repo_head_hexsha": "93eaf7b27a42a392f1d5b1f5f928b26f77c08696", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.75, "max_line_length": 80, "alphanum_fraction": 0.6658184902, "include": true, "reason": "import numpy,from scipy", "num_tokens": 300}
|
# -*- coding: utf-8 -*-
"""Provides an ability to serialise Stone Soup objects into and from YAML.
Stone Soup utilises YAML_ for serialisation. The :doc:`stonesoup.base`
feature of components is exploited in order to store the data of the
components and data types.
This module functions as a plug-in for ruamel.yaml_, specified by
:code:`typ='stonesoup'`, but for convenience it is recommended to
use :class:`~.stonesoup.serialise.YAML` which defaults with the plug-in
enabled.
It is also possible to extend the serialisation for other types with
Stone Soup, via `stonesoup.serialise.yaml` entry point, typically
expected to be used with :mod:`stonesoup.plugins`. The entry point
should point to a function which expects a single argument, a
:class:`ruamel.yaml.YAML` instance.
For example:
.. code-block:: python
setup(
...
entry_points={
'stonesoup.plugins': 'my_plugin = my_package',
'stonesoup.serialise.yaml': 'my_plugin = my_package:yaml_init_func}
...
)
.. _YAML: http://yaml.org/
.. _ruamel.yaml: https://yaml.readthedocs.io/
"""
import datetime
import warnings
from io import StringIO
from collections import OrderedDict, deque
from functools import lru_cache
from pathlib import Path
from importlib import import_module
import numpy as np
import pkg_resources
import ruamel.yaml
from ruamel.yaml.constructor import ConstructorError
from .base import Base, Property
from .types.angle import Angle
from .types.array import Matrix, StateVector
from .types.numeric import Probability
from .sensor.sensor import Sensor
__all__ = ['YAML']
typ = 'stonesoup'
def init_typ(yaml):
# Load additional custom serialisation
for entry_point in pkg_resources.iter_entry_points('stonesoup.serialise.yaml'):
try:
entry_point.load()(yaml)
except (ImportError, ModuleNotFoundError) as e:
warnings.warn(f'Failed to load module. {e}')
# NumPy
yaml.representer.add_multi_representer(np.ndarray, ndarray_to_yaml)
yaml.constructor.add_constructor("!numpy.ndarray", ndarray_from_yaml)
yaml.representer.add_multi_representer(np.integer, yaml.representer.yaml_representers[int])
yaml.representer.add_multi_representer(np.floating, yaml.representer.yaml_representers[float])
# Datetime
yaml.representer.add_representer(datetime.timedelta, timedelta_to_yaml)
yaml.constructor.add_constructor("!datetime.timedelta", timedelta_from_yaml)
# Path
yaml.representer.add_multi_representer(Path, path_to_yaml)
yaml.constructor.add_constructor("!pathlib.Path", path_from_yaml)
# deque
yaml.representer.add_representer(deque, deque_to_yaml)
yaml.constructor.add_constructor("!collections.deque", deque_from_yaml)
# Probability
yaml.representer.add_representer(Probability, probability_to_yaml)
yaml.constructor.add_constructor(yaml_tag(Probability), probability_from_yaml)
# Angle
yaml.representer.add_multi_representer(Angle, angle_to_yaml)
yaml.constructor.add_multi_constructor('!stonesoup.types.angle.', angle_from_yaml)
# Array
yaml.representer.add_multi_representer(Matrix, ndarray_to_yaml)
yaml.constructor.add_multi_constructor('!stonesoup.types.array.', array_from_yaml)
# Declarative classes
yaml.representer.add_multi_representer(Base, declarative_to_yaml)
yaml.constructor.add_multi_constructor('!stonesoup.', declarative_from_yaml)
class YAML(ruamel.yaml.YAML):
"""Class for YAML serialisation in Stone Soup."""
def __init__(self, **kwargs):
typ = kwargs.pop('typ', ['rt'])
if isinstance(typ, str):
typ = [typ]
typ.append('stonesoup')
if kwargs.get('plug_ins') is None:
kwargs['plug_ins'] = []
kwargs['plug_ins'].append('stonesoup.serialise')
super().__init__(typ=typ, **kwargs)
self.representer.default_flow_style = False
self.representer.sort_base_mapping_type_on_output = False
def dumps(self, data, *args, **kwargs):
"""Return as a string."""
stream = StringIO()
self.dump(data, stream, *args, **kwargs)
return stream.getvalue()
def yaml_tag(class_):
"""Return YAML tag for object.
Constructed from module and class name."""
return f"!{class_.__module__}.{class_.__qualname__}"
def declarative_to_yaml(representer, node):
"""Convert declarative class instances to YAML.
Store as mapping of declared properties, skipping any which are the
default value."""
node_properties = OrderedDict(type(node).properties)
# Special case of a sensor with a default platform
if isinstance(node, Sensor) and node._has_internal_controller:
node_properties['position'] = Property(StateVector)
node_properties['orientation'] = Property(StateVector)
return representer.represent_omap(
yaml_tag(type(node)),
OrderedDict((name, getattr(node, name))
for name, property_ in node_properties.items()
if getattr(node, name) is not property_.default))
def declarative_from_yaml(constructor, tag_suffix, node):
"""Convert YAML to declarative class instances."""
try:
class_ = get_class(f'!stonesoup.{tag_suffix}')
except ImportError:
raise ConstructorError(
"while constructing a Stone Soup component", node.start_mark,
f"unable to import component 'stonesoup.{tag_suffix}'", node.start_mark)
# Must have deep construct here to ensure mutable sub-objects are fully created.
constructor.deep_construct = True
properties = [
data
for data in constructor.construct_yaml_omap(node)][0]
try:
return class_(**properties)
except Exception as e:
raise ConstructorError("while constructing Stone Soup component",
node.start_mark, str(e), node.start_mark)
@lru_cache(None)
def get_class(tag):
classes = [
subclass
for subclass in Base.subclasses
if yaml_tag(subclass) == tag]
if len(classes) > 1:
warnings.warn(
f"Multiple possible classes found for YAML tag {tag!r}", UserWarning)
elif not classes:
module_name, class_name = tag.lstrip('!').rsplit(".", 1)
module = import_module(module_name)
classes = [getattr(module, class_name, None)]
if classes[0] is None:
raise ImportError(f"Unable to find {tag!r}")
return classes[0]
def probability_to_yaml(representer, node):
return representer.represent_scalar(yaml_tag(type(node)), str(node))
def probability_from_yaml(constructor, node):
string = constructor.construct_scalar(node)
if string.startswith('exp('):
return Probability(float(string[4:-1]), log_value=True)
else:
return Probability(float(string))
def angle_to_yaml(representer, node):
return representer.represent_scalar(yaml_tag(type(node)), str(node))
def angle_from_yaml(constructor, tag_suffix, node):
class_ = get_class(f'!stonesoup.types.angle.{tag_suffix}')
return class_(float(constructor.construct_scalar(node)))
def ndarray_to_yaml(representer, node):
"""Convert numpy.ndarray to YAML."""
# If using "round trip" type, change flow style to make more readable
if node.ndim > 1 and 'rt' in representer.dumper.typ:
array = [representer.dumper.seq(row) for row in node.tolist()]
[seq.fa.set_flow_style() for seq in array]
else:
array = node.tolist()
return representer.represent_sequence(yaml_tag(type(node)), array)
def ndarray_from_yaml(constructor, node):
"""Convert YAML to numpy.ndarray."""
return np.array(constructor.construct_sequence(node, deep=True))
def array_from_yaml(constructor, tag_suffix, node):
"""Convert YAML to numpy.ndarray."""
class_ = get_class(f'!stonesoup.types.array.{tag_suffix}')
return class_(constructor.construct_sequence(node, deep=True))
def timedelta_to_yaml(representer, node):
"""Convert datetime.timedelta to YAML.
Value is total number of seconds."""
return representer.represent_scalar("!datetime.timedelta", str(node.total_seconds()))
def timedelta_from_yaml(constructor, node):
"""Convert YAML to datetime.timedelta.
Value should be total number of seconds."""
return datetime.timedelta(seconds=float(constructor.construct_scalar(node)))
def path_to_yaml(representer, node):
"""Convert path to YAML.
Value is total number of seconds."""
return representer.represent_scalar("!pathlib.Path", str(node))
def path_from_yaml(constructor, node):
"""Convert YAML to datetime.timedelta.
Value should be total number of seconds."""
return Path(constructor.construct_scalar(node))
def deque_to_yaml(representer, node):
"""Convert collections.deque to YAML"""
return representer.represent_sequence("!collections.deque", (list(node), node.maxlen))
def deque_from_yaml(constructor, node):
"""Convert YAML to collections.deque"""
iterable, maxlen = constructor.construct_sequence(node, deep=True)
return deque(iterable, maxlen)
|
{"hexsha": "d9ff647e5222be23f4b20e59b74e00dc7c81cbe9", "size": 9143, "ext": "py", "lang": "Python", "max_stars_repo_path": "stonesoup/serialise.py", "max_stars_repo_name": "io8ex/Stone-Soup", "max_stars_repo_head_hexsha": "071abc8f6004296ab35094db04c7ec410103c419", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 157, "max_stars_repo_stars_event_min_datetime": "2019-04-14T20:43:11.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T08:30:33.000Z", "max_issues_repo_path": "stonesoup/serialise.py", "max_issues_repo_name": "io8ex/Stone-Soup", "max_issues_repo_head_hexsha": "071abc8f6004296ab35094db04c7ec410103c419", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 364, "max_issues_repo_issues_event_min_datetime": "2019-04-18T15:54:49.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T09:50:02.000Z", "max_forks_repo_path": "stonesoup/serialise.py", "max_forks_repo_name": "io8ex/Stone-Soup", "max_forks_repo_head_hexsha": "071abc8f6004296ab35094db04c7ec410103c419", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 86, "max_forks_repo_forks_event_min_datetime": "2019-04-20T02:01:18.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-28T01:03:11.000Z", "avg_line_length": 34.3721804511, "max_line_length": 98, "alphanum_fraction": 0.7138794706, "include": true, "reason": "import numpy", "num_tokens": 2001}
|
#!/usr/bin/python3
# -*- coding: UTF-8 -*-
import numpy as np
import cv2
from tensorflow.python.platform import test
from tensorflow.python.framework import test_util
from custom_helper_op import DepthProjectLayer
import tensorflow as tf
from scipy import interpolate
class DepthProjectTest(test_util.parameterized.TestCase):
@test_util.parameterized.parameters(
{'batch_size': 3, 'out_shape': [5, 6]},
{'batch_size': 4, 'out_shape': [7, 8]},
{'batch_size': 1, 'out_shape': [7, 8]},
{'batch_size': 3, 'out_shape': [100, 200]},
)
def testIndexInitializer(self, batch_size=5, out_shape = [3, 4], out_channel=5):
dpl = DepthProjectLayer()
input_feature = np.random.random([batch_size, out_shape[0], out_shape[1], out_channel]).astype(np.float32)
input_depth = np.random.random([batch_size, out_shape[0], out_shape[1]]).astype(np.float32)
input_project = np.random.random([batch_size, 3, 4]).astype(np.float32)
true_index = np.empty([batch_size, out_shape[0], out_shape[1], 2], dtype=np.float32)
for i in range(batch_size):
for h in range(out_shape[0]):
for w in range(out_shape[1]):
ref_pix = np.array([w, h, 1])*input_depth[i, h, w]
ref_pix = np.array([ref_pix[0], ref_pix[1], ref_pix[2], 1])
ref_pix = input_project[i].dot(ref_pix)
true_index[i, h, w, 0] = ref_pix[0]/ref_pix[2]
true_index[i, h, w, 1] = ref_pix[1]/ref_pix[2]
true_feature = np.empty([batch_size, out_shape[0], out_shape[1], out_channel], dtype=np.float32)
for i in range(batch_size):
for c in range(out_channel):
f = interpolate.interp2d(list(range(out_shape[1])), list(range(out_shape[0])), input_feature[i, :, :, c], fill_value=0)
for h in range(out_shape[0]):
for w in range(out_shape[1]):
true_feature[i, h, w, c] =f(true_index[i, h, w, 0], true_index[i, h, w, 1])
predict_feature = dpl((input_feature, input_depth, input_project))
np.testing.assert_allclose(true_feature[:,1:-1, 1:-1, :], predict_feature.numpy()[:,1:-1, 1:-1, :], rtol=1e-5)
if __name__ == '__main__':
test.main()
|
{"hexsha": "d3855aa6f62d028eeb237ed93e2733b724101908", "size": 2131, "ext": "py", "lang": "Python", "max_stars_repo_path": "custom_helper_op/python/layers/depth_projection_layer_test.py", "max_stars_repo_name": "zhuimeng999/custom_helper_op", "max_stars_repo_head_hexsha": "439c01a9112160ab0a1589454393139d213dcc63", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "custom_helper_op/python/layers/depth_projection_layer_test.py", "max_issues_repo_name": "zhuimeng999/custom_helper_op", "max_issues_repo_head_hexsha": "439c01a9112160ab0a1589454393139d213dcc63", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "custom_helper_op/python/layers/depth_projection_layer_test.py", "max_forks_repo_name": "zhuimeng999/custom_helper_op", "max_forks_repo_head_hexsha": "439c01a9112160ab0a1589454393139d213dcc63", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.3958333333, "max_line_length": 127, "alphanum_fraction": 0.6602534022, "include": true, "reason": "import numpy,from scipy", "num_tokens": 627}
|
"""
apply_axis!(g, a)
Internal function to apply an `Axis` object `a` in a GLE context.
"""
function apply_axis!(g::GLE, a::Axis, parent_font::String)
parent_font = ifelse(isdef(a.textstyle.font), a.textstyle.font, parent_font)
apply_ticks!(g, a.ticks, a.prefix, parent_font)
if isdef(a.title)
apply_title!(g, a.title, a.prefix, parent_font)
end
# XXX subticks disabled for now
"\n\t$(a.prefix)subticks off" |> g
#
"\n\t$(a.prefix)axis" |> g
a.off && ("off" |> g; return nothing)
a.log && "log" |> g
isdef(a.base) && "base $(a.base)" |> g
isdef(a.lwidth) && "lwidth $(a.lwidth)" |> g
isdef(a.min) && "min $(a.min)" |> g
isdef(a.max) && "max $(a.max)" |> g
a.ticks.grid && "grid" |> g
apply_textstyle!(g, a.textstyle, parent_font)
return nothing
end
"""
apply_axes!(g, a, figid)
Internal function to apply an `Axes2D` object `a` in a GLE context.
The `figid` is useful to keep track of the figure the axes belong to
which is required in the `apply_drawings` subroutine that is called.
"""
function apply_axes!(g::GLE, a::Axes2D, figid::String, axidx::Int)
a.off && return nothing
isdef(a.origin) && "\namove $(a.origin[1]) $(a.origin[2])" |> g
if a.scale != ""
scale = ifelse(isdef(a.origin), "fullsize", "scale $(a.scale)")
end
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
"\nbegin graph\n\t$scale" |> g
# graph >> math mode (crossing axis)
a.math && "\n\tmath" |> g
# -- size of the axes, see also layout
isdef(a.size) && "\n\tsize $(a.size[1]) $(a.size[2])" |> g
# graph >> apply axis (ticks, ...), passing the figure font as parent font (see issue #76)
parent_font = Figure(figid; _noreset=true).textstyle.font
for axis in (a.xaxis, a.x2axis, a.yaxis, a.y2axis)
apply_axis!(g, axis, parent_font)
end
# graph >> apply axes title, passing the figure font as parent font
isdef(a.title) && apply_title!(g, a.title, "", parent_font)
# graph >> apply drawings
apply_drawings!(g, a.drawings, figid, axidx)
"\nend graph" |> g
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
# apply legend and other floating objects
isdef(a.legend) && apply_legend!(g, a.legend, parent_font, figid)
isempty(a.objects) || apply_objects!(g, a.objects, figid)
return nothing
end
function apply_axes!(g::GLE, a::Axes3D, figid::String, axidx::Int)
# TODO
# -- title
# -- perspective
a.off && return nothing
axid = "a3d_$(hash(a))"
#
# begin object ax3d_hash
# begin surface
# size x y
# cube xlen 10 ylen 10 zlen 10 lstyle 9 color blue
# xaxis ...
# xtitle ...
# yaxis ...
# ytitle ...
# zaxis ...
# ztitle ...
# (surface)
# end surface
# (objects)
# end object
#
# amove (appropriate location)
# draw ax3d_hash.cc
#
"\nbegin object $axid" |> g
"\n\tbegin surface" |> g
"\n\t\tsize $(a.size[1]) $(a.size[2])" |> g
# ------------------------------------------
# CUBE
"\n\t\tcube" |> g
a.nocube && "off" |> g
"xlen $(a.cubedims[1]) ylen $(a.cubedims[2]) zlen $(a.cubedims[3])" |> g
a.nocube || apply_linestyle!(g, a.linestyle)
# TODO should become apply_axis3 or something
δx = round((a.xaxis.max - a.xaxis.min)/5, digits=1)
"\n\t\txaxis min $(a.xaxis.min) max $(a.xaxis.max) dticks $δx" |> g
δy = round((a.yaxis.max - a.yaxis.min)/5, digits=1)
"\n\t\tyaxis min $(a.yaxis.min) max $(a.yaxis.max) dticks $δy" |> g
δz = round((a.zaxis.max - a.zaxis.min)/5, digits=1)
"\n\t\tzaxis min $(a.zaxis.min) max $(a.zaxis.max) dticks $δz" |> g
# ROTATION
if isdef(a.rotate)
"\n\t\trotate $(a.rotation[1]) $(a.rotation[2]) 0" |> g
else
"\n\t\trotate 65 20 0" |> g
end
# XXX AXIS
# parent_font = Figure(figid; _noreset=true).textstyle.font
# for axis in (a.xaxis, a.yaxis, a.zaxis)
# apply_axis!(g, axis, parent_font)
# end
# SURFACE
if isempty(a.drawings) || all(d->!isa(d, Surface), a.drawings)
# NOTE if there is no surface, we MUST add dummy data otherwise ghostscript crashes.
fd = joinpath(GP_ENV["TMP_PATH"], "$(figid)_dummy.z")
write(fd, "! nx 2 ny 2 xmin 1 xmax 2 ymin 1 ymax 2\n1 2\n2 2\n")
"\n\t\tdata \"$fd\"" |> g
"\n\t\ttop off" |> g
"\n\t\tunderneath off" |> g
end
surfs = [i for i ∈ 1:length(a.drawings) if a.drawings[i] isa Surface]
apply_drawings!(g, a.drawings[surfs], figid, axidx)
# -----------------------------------------
"\n\tend surface" |> g
apply_drawings!(g, a.drawings[setdiff(1:length(a.drawings), surfs)], figid, axidx)
# OBJECTS
apply_objects!(g, a.objects, figid)
"\nend object" |> g
if isdef(a.origin)
# move to center of container
cx = a.origin[1] + a.size[1]/2
cy = a.origin[2] + a.size[2]/2
"\namove $cx $cy" |> g
else
# move to center of page
"\namove pagewidth()/2 pageheight()/2" |> g
end
# draw the overall container centered
"\ndraw $axid.cc" |> g
end
|
{"hexsha": "f2e4a5d587626bdca79cbde5c572346e55886632", "size": 5299, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/apply_gle/ax.jl", "max_stars_repo_name": "tlienart/GPlot.jl", "max_stars_repo_head_hexsha": "c9dc537329b237b45dfb38442a89ad1868fdd0db", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2018-11-17T21:39:07.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-26T11:03:04.000Z", "max_issues_repo_path": "src/apply_gle/ax.jl", "max_issues_repo_name": "tlienart/GPlot.jl", "max_issues_repo_head_hexsha": "c9dc537329b237b45dfb38442a89ad1868fdd0db", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 161, "max_issues_repo_issues_event_min_datetime": "2019-01-14T05:36:15.000Z", "max_issues_repo_issues_event_max_datetime": "2020-08-09T09:32:32.000Z", "max_forks_repo_path": "src/apply_gle/ax.jl", "max_forks_repo_name": "tlienart/GPlot.jl", "max_forks_repo_head_hexsha": "c9dc537329b237b45dfb38442a89ad1868fdd0db", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-06-12T13:46:07.000Z", "max_forks_repo_forks_event_max_datetime": "2019-06-12T13:46:07.000Z", "avg_line_length": 32.9130434783, "max_line_length": 94, "alphanum_fraction": 0.5457633516, "num_tokens": 1685}
|
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 14 19:02:25 2021
@author: Administrator
"""
import os
import pandas as pd
import numpy as np
# import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
from matplotlib import pyplot as pl
from MyClass_python import base_function as bf
#--figure 1
base_folder = r'.\0429\figure1'
"""
Loss function + CNN architecture
"""
final_acc_list = []
final_fscore_list = []
final_kappa_list = []
nRepeat = 5
for idx in range(nRepeat):
repeat_name = 'repeat_' + str(idx)
# print(repeat_name)
acc_list = []
fscore_list = []
kappa_list = []
method_list = []
cnn_arch_list = os.listdir(base_folder)
for cnn_arch in cnn_arch_list:
print(cnn_arch)
cnn_arch_folder = os.path.join(base_folder, cnn_arch, repeat_name)
cnn_loss_list = os.listdir(cnn_arch_folder)
# cnn_loss_list = cnn_loss_list[0:3] + cnn_loss_list[4:]
# print(cnn_loss_list)
for cnn_loss in cnn_loss_list:
cnn_loss_path = os.path.join(cnn_arch_folder, cnn_loss)
tmp_list = os.listdir(cnn_loss_path)
for tmp_name in tmp_list:
final_path = os.path.join(cnn_loss_path, tmp_name)
# print(final_path)
acf_data = pd.read_csv(os.path.join(final_path, 'accuracy_fscore_kappa.csv'), header=None)
acf_value = acf_data.values
acc_list.append(acf_value[0])
fscore_list.append(acf_value[1])
kappa_list.append(acf_value[2])
method_list.append([cnn_arch, cnn_loss])
acc_array = np.vstack(acc_list)
fscore_array = np.vstack(fscore_list)
kappa_array = np.vstack(kappa_list)
final_acc_list.append(acc_array)
final_fscore_list.append(fscore_array)
final_kappa_list.append(kappa_array)
#############################################################
#--calculate Mean and Std
acc_mean = np.mean(np.hstack(final_acc_list), axis=1)
acc_std = np.std(np.hstack(final_acc_list), axis=1)
fscore_mean = np.mean(np.hstack(final_fscore_list), axis=1)
fscore_std = np.std(np.hstack(final_fscore_list), axis=1)
kappa_mean, kappa_std = np.mean(np.hstack(final_kappa_list), axis=1), np.std(np.hstack(final_kappa_list), axis=1)
#############################################################
result_name = 'aus_fscore'
if result_name == 'aus_acc':
_mean_mat = acc_mean.reshape(3,4)
_std_mat = acc_std.reshape(3,4)
ylabel_name = 'Accuracy'
elif result_name == 'aus_fscore':
_mean_mat = fscore_mean.reshape(3,4)
_std_mat = fscore_std.reshape(3,4)
ylabel_name = 'F1 score'
elif result_name == 'aus_kappa':
_mean_mat = kappa_mean.reshape(3,4)
_std_mat = kappa_std.reshape(3,4)
ylabel_name = 'Kappa'
else:
print('error')
#-------#
n_cnn, n_loss = _mean_mat.shape
x = np.arange(0,n_loss)
y1_mean, y2_mean, y3_mean = _mean_mat[0,:], _mean_mat[1,:], _mean_mat[2,:]
y1_std, y2_std, y3_std = _std_mat[0,:], _std_mat[1,:], _std_mat[2,:]
pl.plot(x, y1_mean, 'D-', color='#CC4F1B', label='1D-2D-CNN', linewidth=2, markersize=8)
pl.fill_between(x, y1_mean-y1_std, y1_mean+y1_std,
alpha=0.5, edgecolor='#CC4F1B', facecolor='#FF9848')
pl.plot(x, y2_mean, 's-', color='#1B2ACC', label='1D-CNN', linewidth=2, markersize=8)
pl.fill_between(x, y2_mean-y2_std, y2_mean+y2_std,
alpha=0.5, edgecolor='#1B2ACC', facecolor='#089FFF')
pl.plot(x, y3_mean, 'o-', color='#3F7F4C', label='2D-CNN', linewidth=2, markersize=8)
pl.fill_between(x, y3_mean-y3_std, y3_mean+y3_std,
alpha=0.5, edgecolor='#3F7F4C', facecolor='#7EFF99')
loss_label = ['Focal loss', 'CE loss', 'Hybrid loss', 'WCE loss']
plt.xticks(x, loss_label, rotation=0)
plt.legend()
font2 = {'family' : 'Times New Roman',
'weight' : 'normal',
'size' : 15,
}
plt.xlabel('Loss Function', font2)
plt.ylabel(ylabel_name, font2)
plt.ylim(0.8, 0.9)
# plt.title('Masked and NaN data')
# plt.show()
# save_folder = './plot/'
# bf.make_sure_path_exists(save_folder)
# plt.savefig(save_folder + result_name)
|
{"hexsha": "3013b277eb9a8613aa437ba99ea2e14a1eb4e985", "size": 4249, "ext": "py", "lang": "Python", "max_stars_repo_path": "Project_FrogLossFunctionCNN_Aus/Aus_plot_step2_Loss_repeat.py", "max_stars_repo_name": "Frog-Analysis/Project_FrogLossFunctionCNN", "max_stars_repo_head_hexsha": "c2a1d440d5eb45577f5e3b28b3d29ab42eb606df", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Project_FrogLossFunctionCNN_Aus/Aus_plot_step2_Loss_repeat.py", "max_issues_repo_name": "Frog-Analysis/Project_FrogLossFunctionCNN", "max_issues_repo_head_hexsha": "c2a1d440d5eb45577f5e3b28b3d29ab42eb606df", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Project_FrogLossFunctionCNN_Aus/Aus_plot_step2_Loss_repeat.py", "max_forks_repo_name": "Frog-Analysis/Project_FrogLossFunctionCNN", "max_forks_repo_head_hexsha": "c2a1d440d5eb45577f5e3b28b3d29ab42eb606df", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.9047619048, "max_line_length": 114, "alphanum_fraction": 0.6286184985, "include": true, "reason": "import numpy", "num_tokens": 1191}
|
#include "FeaturesComputer.hpp"
#include <itkImageRegionIteratorWithIndex.h>
#include <boost/program_options.hpp>
#include <iostream>
#include <string>
namespace po = boost::program_options;
typedef itk::ImageRegionIteratorWithIndex< OutputImageType > OutputImageIterator;
class CoordinatesComputer : public FeaturesComputer
{
private:
boost::program_options::options_description options;
unsigned int dimension;
bool normalization;
public:
CoordinatesComputer():
options("CoordinatesComputer")
{
options.add_options()
("dimension,d",
po::value< unsigned int >(&this->dimension)->default_value(3),
"Dimensions of the coordinates space: 2 or (default) 3")
("normalize,n",
"Enables normalization (default: disabled)")
;
}
virtual void print_usage(std::ostream &os)
{
os << this->options;
}
virtual OutputImageType::Pointer compute( InputImageType::Pointer input_image, std::vector< std::string > params )
{
po::variables_map vm;
po::store(po::command_line_parser(params).options(this->options).run(), vm);
vm.notify();
if((this->dimension != 2) && (this->dimension != 3))
{
boost::program_options::validation_error err =
po::validation_error(
po::validation_error::invalid_option_value,
boost::lexical_cast< std::string >(this->dimension),
"dimension");
throw err;
}
this->normalization = vm.count("normalize") > 0;
OutputImageType::Pointer output_image = OutputImageType::New();
output_image->SetRegions( input_image->GetLargestPossibleRegion() );
output_image->SetVectorLength(this->dimension);
output_image->Allocate();
const typename OutputImageType::RegionType image_region = output_image->GetLargestPossibleRegion();
typename OutputImageType::RegionType::SizeType image_size = image_region.GetSize();
/*
for(itk::SizeValueType i = 0; i < OutputImageType::RegionType::SizeType::Dimension; ++i)
{
if(image_size[i] > 1)
image_size[i] -= 1;
}
*/
OutputImageIterator ito(output_image, image_region);
OutputImageType::PixelType out_pix;
OutputImageType::IndexType coords;
out_pix.SetSize(this->dimension);
ito.GoToBegin();
while( !ito.IsAtEnd() )
{
coords = ito.GetIndex();
out_pix[0] = coords[0];
out_pix[1] = coords[1];
if(this->dimension == 3)
out_pix[2] = coords[2];
if(this->normalization) {
out_pix[0] /= image_size[0];
out_pix[1] /= image_size[1];
if(this->dimension == 3)
out_pix[2] /= image_size[2];
}
std::cout << out_pix << std::endl;
ito.Set(out_pix);
++ito;
}
return output_image;
}
};
extern "C" FeaturesComputer* create() {
return new CoordinatesComputer;
}
|
{"hexsha": "a96af74005c5a304cdd5eb26ee213afdc2b5ee8a", "size": 2672, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "CoordinatesComputer.cpp", "max_stars_repo_name": "Sigill/ImageFeaturesComputer", "max_stars_repo_head_hexsha": "3e1058d7e97413d0a3e928bdc802535e85a73a59", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "CoordinatesComputer.cpp", "max_issues_repo_name": "Sigill/ImageFeaturesComputer", "max_issues_repo_head_hexsha": "3e1058d7e97413d0a3e928bdc802535e85a73a59", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "CoordinatesComputer.cpp", "max_forks_repo_name": "Sigill/ImageFeaturesComputer", "max_forks_repo_head_hexsha": "3e1058d7e97413d0a3e928bdc802535e85a73a59", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.2909090909, "max_line_length": 115, "alphanum_fraction": 0.6991017964, "num_tokens": 710}
|
#!/usr/bin/python
"""
Plot sigma(fNL) as a fn. of z, for different fiducial fNL
"""
import numpy as np
import pylab as P
from rfwrapper import rf
# Load sigma(fNL) and z bins
fNL = [0., 1., 10.]
sigmas = []; sigmas_bz = []
sigma_fNL = []; sigma_fNL_bz = []
sigma_bias = []
for j in range(len(fNL)):
zc = np.load("Xnongauss-fnl-zc-%d.npy"%j)
sigmas.append(np.load("tmp/Xnongauss-fnl-%d.npy" % j))
sigmas_bz.append(np.load("tmp/nongauss-fnl-zbias-%d.npy" % j))
sigma_fNL.append(sigmas[j][-zc.size:])
sigma_fNL_bz.append(sigmas_bz[j][-zc.size:])
sigma_bias.append(sigmas_bz[j][1:zc.size+1])
col = ['r', 'b', 'y']
name = ["f_NL = " + str(int(_fNL)) for _fNL in fNL]
# Get bias as fn. of z
cosmo = rf.experiments.cosmo
cosmo['bHI0'] = 0.702
bias = rf.bias_HI(zc, cosmo)
P.subplot(111)
for i in range(len(fNL)):
#P.errorbar(zc, fNL[i]*np.ones(zc.size), yerr=sigma_fNL[i], marker='.', color=col[i], lw=1.5, label=name[i])
P.plot(zc, sigma_fNL[i], lw=1.5, color=col[i], marker='.', label=name[i])
#P.plot(zc, sigma_fNL_bz[i], lw=1.5, color=col[i], marker='.', ls='dashed')
#P.plot(zc, bias - 1., 'k-')
#P.axhline(0., ls='dotted', color='k')
#P.xlim((np.min(zs), np.max(zs)))
P.ylim((0., 150.))
P.xlim((0., 3.6))
# Display options
P.legend(loc='upper right', prop={'size':'x-large'})
P.ylabel("$\sigma_{fNL}(z)$", fontdict={'fontsize':'22'})
P.xlabel("$z$", fontdict={'fontsize':'20'})
fontsize = 18.
for tick in P.gca().yaxis.get_major_ticks():
tick.label1.set_fontsize(fontsize)
for tick in P.gca().xaxis.get_major_ticks():
tick.label1.set_fontsize(fontsize)
"""
P.subplot(212)
for i in range(len(fNL)):
P.plot(zc, sigma_bias[i])
"""
P.tight_layout()
P.show()
|
{"hexsha": "6285bf02703d68f94bdbb6815561d95b1e169b4e", "size": 1708, "ext": "py", "lang": "Python", "max_stars_repo_path": "plotting/plot_pub_nongaussianity_fn_fnl.py", "max_stars_repo_name": "sjforeman/RadioFisher", "max_stars_repo_head_hexsha": "fe25f969de9a700c5697168ba9e0d2645c55ed81", "max_stars_repo_licenses": ["AFL-3.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-12-05T11:28:47.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-09T02:42:21.000Z", "max_issues_repo_path": "plotting/plot_pub_nongaussianity_fn_fnl.py", "max_issues_repo_name": "sjforeman/RadioFisher", "max_issues_repo_head_hexsha": "fe25f969de9a700c5697168ba9e0d2645c55ed81", "max_issues_repo_licenses": ["AFL-3.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "plotting/plot_pub_nongaussianity_fn_fnl.py", "max_forks_repo_name": "sjforeman/RadioFisher", "max_forks_repo_head_hexsha": "fe25f969de9a700c5697168ba9e0d2645c55ed81", "max_forks_repo_licenses": ["AFL-3.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-07-09T02:42:23.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-30T06:37:47.000Z", "avg_line_length": 27.1111111111, "max_line_length": 112, "alphanum_fraction": 0.6317330211, "include": true, "reason": "import numpy", "num_tokens": 602}
|
Load LFindLoad.
Load LFindLoad.
From adtind Require Import goal82.
From lfind Require Import LFind.
Require Import Arith.
Require Import Extraction.
Extract Inductive nat => nat [ "(O)" "S" ].
Extract Inductive list => list [ "Nil" "Cons" ].
Extraction "/home/yousef/lemmafinder/benchmark/_lfind_clam_lf_goal82_rev_append_49_append_nil/goal82_lfind_orig.ml" adtind.goal82.append adtind.goal82.rev.
Success.
|
{"author": "yalhessi", "repo": "lemmaranker", "sha": "53bc2ad63ad7faba0d7fc9af4e1e34216173574a", "save_path": "github-repos/coq/yalhessi-lemmaranker", "path": "github-repos/coq/yalhessi-lemmaranker/lemmaranker-53bc2ad63ad7faba0d7fc9af4e1e34216173574a/benchmark/clam/_lfind_clam_lf_goal82_rev_append_49_append_nil/lfind_ml_generator.v"}
|
/*
microsoft-oms-auditd-plugin
Copyright (c) Microsoft Corporation
All rights reserved.
MIT License
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the ""Software""), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#define BOOST_TEST_DYN_LINK
#define BOOST_TEST_MODULE "EventProcessorTests"
#include <boost/test/unit_test.hpp>
#include "PriorityQueue.h"
#include "Logger.h"
#include "TempDir.h"
#include "TestEventData.h"
#include "RawEventProcessor.h"
#include "RawEventAccumulator.h"
#include "StringUtils.h"
#include "EventPrioritizer.h"
#include "InputBuffer.h"
#include <fstream>
#include <stdexcept>
#include <iostream>
extern "C" {
#include <sys/types.h>
#include <unistd.h>
#include <stdlib.h>
};
void write_file(const std::string& path, const std::string& text) {
std::ofstream out;
out.exceptions(std::ofstream::failbit|std::ofstream::badbit|std::ofstream::eofbit);
out.open(path);
out << text;
out.close();
}
class RawEventQueue: public IEventBuilderAllocator {
public:
explicit RawEventQueue(std::shared_ptr<RawEventProcessor> proc): _buffer(), _size(0), _proc(std::move(proc)) {}
bool Allocate(void** data, size_t size) override {
if (_size != size) {
_size = size;
}
if (_buffer.size() < _size) {
_buffer.resize(_size);
}
*data = _buffer.data();
return true;
}
int Commit() override {
if (_size > InputBuffer::MAX_DATA_SIZE) {
return -1;
}
_proc->ProcessData(_buffer.data(), _size);
_size = 0;
return 1;
}
bool Rollback() override {
_size = 0;
return true;
}
private:
std::vector<uint8_t> _buffer;
size_t _size;
std::shared_ptr<RawEventProcessor> _proc;
};
void diff_event(int idx, const Event& e, const Event& a) {
std::stringstream msg;
if (e.Seconds() != a.Seconds()) {
msg << "Event["<<idx<<"] Seconds Mismatch: expected " << e.Seconds() << ", got " << a.Seconds();
throw std::runtime_error(msg.str());
}
if (e.Milliseconds() != a.Milliseconds()) {
msg << "Event["<<idx<<"] Milliseconds Mismatch: expected " << e.Milliseconds() << ", got " << a.Milliseconds();
throw std::runtime_error(msg.str());
}
if (e.Serial() != a.Serial()) {
msg << "Event["<<idx<<"] Serial Mismatch: expected " << e.Serial() << ", got " << a.Serial();
throw std::runtime_error(msg.str());
}
if (e.Flags() != a.Flags()) {
msg << "Event["<<idx<<"] Flags Mismatch: expected " << e.Flags() << ", got " << a.Flags();
throw std::runtime_error(msg.str());
}
if (e.Pid() != a.Pid()) {
msg << "Event["<<idx<<"] Pid Mismatch: expected " << e.Pid() << ", got " << a.Pid();
throw std::runtime_error(msg.str());
}
if (e.NumRecords() != a.NumRecords()) {
msg << "Event["<<idx<<"] NumRecords Mismatch: expected " << e.NumRecords() << ", got " << a.NumRecords();
throw std::runtime_error(msg.str());
}
for (int r = 0; r < e.NumRecords(); ++r) {
auto er = e.RecordAt(r);
auto ar = a.RecordAt(r);
if (er.RecordType() != ar.RecordType()) {
msg << "Event["<<idx<<"].Record[" << r << "] RecordType Mismatch: expected " << er.RecordType() << ", got " << ar.RecordType();
throw std::runtime_error(msg.str());
}
if (er.RecordTypeNamePtr() == nullptr || ar.RecordTypeNamePtr() == nullptr) {
if (er.RecordTypeNamePtr() != ar.RecordTypeNamePtr()) {
msg << "Event["<<idx<<"].Record[" << r << "] RecordTypeName Mismatch: expected "
<< (er.RecordTypeNamePtr() == nullptr ? "null" : er.RecordTypeName())
<< ", got "
<< (ar.RecordTypeNamePtr() == nullptr ? "null" : ar.RecordTypeName());
throw std::runtime_error(msg.str());
}
} else {
if (strcmp(er.RecordTypeNamePtr(), ar.RecordTypeNamePtr()) != 0) {
msg << "Event["<<idx<<"].Record[" << r << "] RecordTypeName Mismatch: expected " << er.RecordTypeNamePtr() << ", got " << ar.RecordTypeNamePtr();
throw std::runtime_error(msg.str());
}
}
if (er.RecordTextPtr() == nullptr || ar.RecordTextPtr() == nullptr) {
if (er.RecordTextPtr() != ar.RecordTextPtr()) {
msg << "Event["<<idx<<"].Record[" << r << "] RecordText Mismatch: expected "
<< (er.RecordTextPtr() == nullptr ? "null" : er.RecordTextPtr())
<< ", got "
<< (ar.RecordTextPtr() == nullptr ? "null" : ar.RecordTextPtr());
throw std::runtime_error(msg.str());
}
} else {
if (strcmp(er.RecordTextPtr(), ar.RecordTextPtr()) != 0) {
msg << "Event["<<idx<<"].Record[" << r << "] RecordText Mismatch: expected " << er.RecordTextPtr() << ", got " << ar.RecordTextPtr();
throw std::runtime_error(msg.str());
}
}
if (er.NumFields() != ar.NumFields()) {
msg << "Event["<<idx<<"].Record[" << r << "] NumFields Mismatch: expected " << er.NumFields() << ", got " << ar.NumFields() << "\n";
std::unordered_set<std::string> _en;
std::unordered_set<std::string> _an;
for (auto f : er) {
_en.emplace(f.FieldNamePtr(), f.FieldNameSize());
}
for (auto f : ar) {
_an.emplace(f.FieldNamePtr(), f.FieldNameSize());
}
for (auto name : _en) {
if (_an.count(name) == 0) {
msg << " Expected Field Name Not Found: " << name << "\n";
}
}
for (auto name : _an) {
if (_en.count(name) == 0) {
msg << " Unxpected Field Name Found: " << name << "\n";
}
}
throw std::runtime_error(msg.str());
}
for (int f = 0; f < er.NumFields(); ++f) {
auto ef = er.FieldAt(f);
auto af = ar.FieldAt(f);
if (ef.FieldNamePtr() == nullptr || af.FieldNamePtr() == nullptr) {
if (ef.FieldNamePtr() != af.FieldNamePtr()) {
msg << "Event["<<idx<<"].Record[" << r << "].Field[" << f << "] FieldName Mismatch: expected "
<< (ef.FieldNamePtr() == nullptr ? "null" : ef.FieldNamePtr())
<< ", got "
<< (af.FieldNamePtr() == nullptr ? "null" : af.FieldNamePtr());
throw std::runtime_error(msg.str());
}
} else {
if (strcmp(ef.FieldNamePtr(), af.FieldNamePtr()) != 0) {
msg << "Event["<<idx<<"].Record[" << r << "].Field[" << f << "] FieldName Mismatch: expected " << ef.FieldNamePtr() << ", got " << af.FieldNamePtr();
throw std::runtime_error(msg.str());
}
}
if (ef.RawValuePtr() == nullptr || af.RawValuePtr() == nullptr) {
if (ef.RawValuePtr() != af.RawValuePtr()) {
msg << "Event["<<idx<<"].Record[" << r << "].Field[" << f << "] RawValue Mismatch: expected "
<< (ef.RawValuePtr() == nullptr ? "null" : ef.RawValuePtr())
<< ", got "
<< (af.RawValuePtr() == nullptr ? "null" : af.RawValuePtr());
throw std::runtime_error(msg.str());
}
} else {
if (strcmp(ef.RawValuePtr(), af.RawValuePtr()) != 0) {
msg << "Event["<<idx<<"].Record[" << r << "].Field[" << f << "] RawValue Mismatch: expected " << ef.RawValuePtr() << ", got " << af.RawValuePtr();
throw std::runtime_error(msg.str());
}
}
if (ef.InterpValuePtr() == nullptr || af.InterpValuePtr() == nullptr) {
if (ef.InterpValuePtr() != af.InterpValuePtr()) {
msg << "Event["<<idx<<"].Record[" << r << "].Field[" << f << "] (Name="<<ef.FieldName()<<") InterpValue Mismatch: expected "
<< (ef.InterpValuePtr() == nullptr ? "null" : ef.InterpValuePtr())
<< ", got "
<< (af.InterpValuePtr() == nullptr ? "null" : af.InterpValuePtr());
throw std::runtime_error(msg.str());
}
} else {
if (strcmp(ef.InterpValuePtr(), af.InterpValuePtr()) != 0) {
msg << "Event["<<idx<<"].Record[" << r << "].Field[" << f << "] (Name="<<ef.FieldName()<<") InterpValue Mismatch: expected " << ef.InterpValuePtr() << ", got " << af.InterpValuePtr();
throw std::runtime_error(msg.str());
}
}
if (ef.FieldType() != af.FieldType()) {
msg << "Event["<<idx<<"].Record[" << r << "].Field[" << f << "] (Name="<<ef.FieldName()<<") FieldType Mismatch: expected " << static_cast<uint>(ef.FieldType()) << ", got " << static_cast<uint>(af.FieldType());
throw std::runtime_error(msg.str());
}
}
}
}
BOOST_AUTO_TEST_CASE( basic_test ) {
TempDir dir("/tmp/EventProcessorTests");
write_file(dir.Path() + "/passwd", passwd_file_text);
write_file(dir.Path() + "/group", group_file_text);
auto user_db = std::make_shared<UserDB>(dir.Path());
user_db->update();
auto expected_queue = new TestEventQueue();
auto actual_queue = new TestEventQueue();
auto metrics_queue = new TestEventQueue();
auto prioritizer = DefaultPrioritizer::Create(0);
auto expected_allocator = std::shared_ptr<IEventBuilderAllocator>(expected_queue);
auto actual_allocator = std::shared_ptr<IEventBuilderAllocator>(actual_queue);
auto metrics_allocator = std::shared_ptr<IEventBuilderAllocator>(metrics_queue);
auto expected_builder = std::make_shared<EventBuilder>(expected_allocator, prioritizer);
auto actual_builder = std::make_shared<EventBuilder>(actual_allocator, prioritizer);
auto metrics_builder = std::make_shared<EventBuilder>(metrics_allocator, prioritizer);
auto proc_filter = std::make_shared<ProcFilter>(user_db);
std::shared_ptr<FiltersEngine> filtersEngine; // Intentionally left unassigned
std::shared_ptr<ProcessTree> processTree; // Intentionally left unassigned
auto metrics = std::make_shared<Metrics>("test", metrics_builder);
auto cmdline_redactor = std::make_shared<CmdlineRedactor>();
auto test_rule = std::make_shared<const CmdlineRedactionRule>(test_redaction_rule_filename, test_redaction_rule_name, test_redaction_rule_regex, '*');
cmdline_redactor->AddRule(test_rule);
auto raw_proc = std::make_shared<RawEventProcessor>(actual_builder, user_db, cmdline_redactor, processTree, filtersEngine, metrics);
auto actual_raw_queue = new RawEventQueue(raw_proc);
auto actual_raw_allocator = std::shared_ptr<IEventBuilderAllocator>(actual_raw_queue);
auto actual_raw_builder = std::make_shared<EventBuilder>(actual_raw_allocator, prioritizer);
for (auto e : test_events) {
e.Write(expected_builder);
}
RawEventAccumulator accumulator(actual_raw_builder, metrics);
for (int i = 0; i < raw_test_events.size(); i++) {
auto raw_event = raw_test_events[i];
auto do_flush = raw_events_do_flush[i];
std::string event_txt = raw_event;
auto lines = split(event_txt, '\n');
for (auto& line: lines) {
std::unique_ptr<RawEventRecord> record = std::make_unique<RawEventRecord>();
std::memcpy(record->Data(), line.c_str(), line.size());
if (record->Parse(RecordType::UNKNOWN, line.size())) {
accumulator.AddRecord(std::move(record));
} else {
Logger::Warn("Received unparsable event data: %s", line.c_str());
}
}
if (do_flush) {
accumulator.Flush(0);
}
}
BOOST_REQUIRE_EQUAL(expected_queue->GetEventCount(), actual_queue->GetEventCount());
for (size_t idx = 0; idx < expected_queue->GetEventCount(); ++idx) {
diff_event(idx, expected_queue->GetEvent(idx), actual_queue->GetEvent(idx));
}
}
BOOST_AUTO_TEST_CASE( oversized_event_test ) {
TempDir dir("/tmp/EventProcessorTests");
write_file(dir.Path() + "/passwd", passwd_file_text);
write_file(dir.Path() + "/group", group_file_text);
auto user_db = std::make_shared<UserDB>(dir.Path());
user_db->update();
auto actual_queue = new TestEventQueue();
auto metrics_queue = new TestEventQueue();
auto prioritizer = DefaultPrioritizer::Create(0);
auto actual_allocator = std::shared_ptr<IEventBuilderAllocator>(actual_queue);
auto metrics_allocator = std::shared_ptr<IEventBuilderAllocator>(metrics_queue);
auto actual_builder = std::make_shared<EventBuilder>(actual_allocator, prioritizer);
auto metrics_builder = std::make_shared<EventBuilder>(metrics_allocator, prioritizer);
auto proc_filter = std::make_shared<ProcFilter>(user_db);
auto filtersEngine = std::make_shared<FiltersEngine>();
auto processTree = std::make_shared<ProcessTree>(user_db, filtersEngine);
auto metrics = std::make_shared<Metrics>("test", metrics_builder);
auto cmdline_redactor = std::make_shared<CmdlineRedactor>();
auto raw_proc = std::make_shared<RawEventProcessor>(actual_builder, user_db, cmdline_redactor, processTree, filtersEngine, metrics);
auto actual_raw_queue = new RawEventQueue(raw_proc);
auto actual_raw_allocator = std::shared_ptr<IEventBuilderAllocator>(actual_raw_queue);
auto actual_raw_builder = std::make_shared<EventBuilder>(actual_raw_allocator, prioritizer);
RawEventAccumulator accumulator(actual_raw_builder, metrics);
auto lines = split(oversized_event_text, '\n');
for (auto& line: lines) {
std::unique_ptr<RawEventRecord> record = std::make_unique<RawEventRecord>();
std::memcpy(record->Data(), line.c_str(), line.size());
if (record->Parse(RecordType::UNKNOWN, line.size())) {
accumulator.AddRecord(std::move(record));
} else {
Logger::Warn("Received unparsable event data: %s", line.c_str());
}
}
accumulator.Flush(0);
BOOST_REQUIRE_EQUAL(actual_queue->GetEventCount(), 1);
Event e = actual_queue->GetEvent(0);
BOOST_REQUIRE_LE(e.Size(), InputBuffer::MAX_DATA_SIZE);
}
|
{"hexsha": "f5147b9419e3274527b2ec0d0e1033facfef732c", "size": 15575, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "EventProcessorTests.cpp", "max_stars_repo_name": "kovdan01/OMS-Auditd-Plugin", "max_stars_repo_head_hexsha": "529db434129f43f5763a405eb3357bedad757968", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 13.0, "max_stars_repo_stars_event_min_datetime": "2019-07-01T04:58:51.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-28T13:38:02.000Z", "max_issues_repo_path": "EventProcessorTests.cpp", "max_issues_repo_name": "kovdan01/OMS-Auditd-Plugin", "max_issues_repo_head_hexsha": "529db434129f43f5763a405eb3357bedad757968", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 16.0, "max_issues_repo_issues_event_min_datetime": "2019-05-08T00:40:43.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-27T23:11:26.000Z", "max_forks_repo_path": "EventProcessorTests.cpp", "max_forks_repo_name": "kovdan01/OMS-Auditd-Plugin", "max_forks_repo_head_hexsha": "529db434129f43f5763a405eb3357bedad757968", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 9.0, "max_forks_repo_forks_event_min_datetime": "2019-12-19T00:06:53.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-03T09:45:20.000Z", "avg_line_length": 43.6274509804, "max_line_length": 464, "alphanum_fraction": 0.5858747994, "num_tokens": 3622}
|
# -*- coding: utf-8 -*-
"""
@date: 2021/7/20 下午10:23
@file: prune_mobilenet_v2.py
@author: zj
@description:
"""
import numpy as np
import torch
import torch.nn as nn
from torchvision.models.mobilenetv2 import ConvBNActivation, ConvBNReLU, InvertedResidual
from slim.prune.layers import create_conv2d, create_batchnorm2d, create_linear
from slim.prune.misc import set_module_list, round_to_multiple_of
from .misc import computer_bn_threshold
def prune_conv_bn(old_conv2d, old_batchnorm2d, bn_threshold,
in_channels=3, in_idx=None, minimum_channels=8, divisor=8, is_dw=False):
"""
For deep-wise convolution, the number of groups is the same as the number of input channels,
and the number of output channels can be divided by the number of groups
"""
assert isinstance(old_conv2d, nn.Conv2d)
assert isinstance(old_batchnorm2d, nn.BatchNorm2d)
weight_copy = old_batchnorm2d.weight.data.abs().clone()
# If the number of BN channels is less than or equal to minimum_channels, pruning is not performed
if len(weight_copy) <= minimum_channels:
out_idx = np.arange(minimum_channels)
else:
mask = weight_copy.gt(bn_threshold).float()
# get pruning mask
out_idx = np.squeeze(np.argwhere(np.asarray(mask.cpu().numpy())))
if out_idx.size == 1:
out_idx = np.resize(out_idx, (1,))
old_prune_len = len(out_idx)
# For the deep-wise convolution of mobilenet-v2, the number of input and output channels is consistent
if is_dw:
new_prune_len = in_channels
else:
# If the feature length after pruning is less than minimum_channels or not multiples of divisor,
# then round up to multiples of divisor
new_prune_len = round_to_multiple_of(old_prune_len, divisor)
if new_prune_len > old_prune_len:
mask = weight_copy.le(bn_threshold).float()
tmp_idx = np.squeeze(np.argwhere(np.asarray(mask.cpu().numpy())))
if tmp_idx.size == 1:
tmp_idx = np.resize(tmp_idx, (1,))
res_idx = np.random.choice(tmp_idx, new_prune_len - old_prune_len, replace=False)
out_idx = np.array(sorted(np.concatenate((out_idx, res_idx))))
elif new_prune_len < old_prune_len:
out_idx = np.random.choice(out_idx, new_prune_len, replace=False)
# get output channels
out_channels = len(out_idx)
# new Conv2d/BatchNorm2d
new_conv2d = create_conv2d(old_conv2d, in_channels, out_channels, old_groups=in_channels if is_dw else None)
new_batchnorm2d = create_batchnorm2d(old_batchnorm2d, out_channels)
new_conv2d.weight.data = old_conv2d.weight.data[out_idx.tolist(), :, :, :].clone()
if new_conv2d.bias is not None:
new_conv2d.bias.data = old_conv2d.bias.data[out_idx.tolist()].clone()
if in_idx is not None and not is_dw:
new_conv2d.weight.data = new_conv2d.weight.data[:, in_idx.tolist(), :, :].clone()
new_batchnorm2d.weight.data = old_batchnorm2d.weight.data[out_idx.tolist()].clone()
new_batchnorm2d.bias.data = old_batchnorm2d.bias.data[out_idx.tolist()].clone()
new_batchnorm2d.running_mean = old_batchnorm2d.running_mean[out_idx.tolist()].clone()
new_batchnorm2d.running_var = old_batchnorm2d.running_var[out_idx.tolist()].clone()
return new_conv2d, new_batchnorm2d, out_channels, out_idx
def computer_mask(old_batchnorm2d, bn_threshold, out_channels):
"""
Calculate pruning mask
"""
assert isinstance(old_batchnorm2d, nn.BatchNorm2d)
weight_copy = old_batchnorm2d.weight.data.abs().clone()
mask = weight_copy.gt(bn_threshold).float()
# get pruning mask
out_idx = np.squeeze(np.argwhere(np.asarray(mask.cpu().numpy())))
if out_idx.size == 1:
out_idx = np.resize(out_idx, (1,))
old_pruned_len = len(out_idx)
if out_channels > old_pruned_len:
# If the feature length after pruning is less than the number of output channels, it will be supplemented
mask = weight_copy.le(bn_threshold).float()
tmp_idx = np.squeeze(np.argwhere(np.asarray(mask.cpu().numpy())))
if tmp_idx.size == 1:
tmp_idx = np.resize(tmp_idx, (1,))
res_idx = np.random.choice(tmp_idx, out_channels - old_pruned_len, replace=False)
out_idx = np.array(sorted(np.concatenate((out_idx, res_idx))))
elif out_channels < old_pruned_len:
# If the length after pruning is greater than the number of output channels, prune again
out_idx = np.random.choice(out_idx, out_channels, replace=False)
return out_idx
def prune_conv_bn_activation(module_list, in_channels, in_idx, bn_threshold, minimum_channels=8, divisor=8):
assert len(module_list) == 3
new_module_list = list()
old_conv = module_list[0]
old_batchnorm2d = module_list[1]
new_conv2d, new_batchnorm2d, out_channels, out_idx = prune_conv_bn(old_conv,
old_batchnorm2d,
bn_threshold,
in_channels=in_channels,
in_idx=in_idx,
minimum_channels=minimum_channels,
divisor=divisor)
assert new_conv2d.weight.shape[:2] == torch.Size(
[new_conv2d.out_channels, new_conv2d.in_channels]), '0, {} {}'.format(new_conv2d, new_conv2d.weight.shape)
assert new_batchnorm2d.weight.shape == torch.Size((new_batchnorm2d.num_features,))
new_module_list.append(new_conv2d)
new_module_list.append(new_batchnorm2d)
new_module_list.append(nn.ReLU6(inplace=True))
return new_module_list, out_channels, out_idx
def prune_inverted_residual(module_list, in_channels, in_idx, bn_threshold, minimum_channels=8, divisor=8,
use_res_connect=False):
assert len(module_list) in [5, 8]
src_in_channels = in_channels
new_module_list = list()
idx = 0
# first ConvBNActivation
new_conv2d, new_batchnorm2d, out_channels, out_idx = prune_conv_bn(module_list[idx],
module_list[idx + 1],
bn_threshold,
in_channels=in_channels,
in_idx=in_idx,
minimum_channels=minimum_channels,
divisor=divisor,
is_dw=len(module_list) == 5
)
if len(module_list) == 5:
assert new_conv2d.weight.shape[:2] == torch.Size(
[new_conv2d.out_channels, 1]), '0, {} {}'.format(new_conv2d, new_conv2d.weight.shape)
else:
assert new_conv2d.weight.shape[:2] == torch.Size(
[new_conv2d.out_channels, new_conv2d.in_channels]), '0, {} {}'.format(new_conv2d, new_conv2d.weight.shape)
assert new_batchnorm2d.weight.shape == torch.Size((new_batchnorm2d.num_features,))
new_module_list.append(new_conv2d)
new_module_list.append(new_batchnorm2d)
new_module_list.append(nn.ReLU6(inplace=True))
idx += 3
in_channels = out_channels
in_idx = out_idx
if len(module_list) == 8:
# second ConvBNActivation
new_conv2d, new_batchnorm2d, out_channels, out_idx = prune_conv_bn(module_list[idx],
module_list[idx + 1],
bn_threshold,
in_channels=in_channels,
in_idx=in_idx,
minimum_channels=minimum_channels,
divisor=divisor,
is_dw=True
)
assert new_conv2d.weight.shape[:2] == torch.Size(
[new_conv2d.out_channels, 1]), '1, {} {}'.format(new_conv2d, new_conv2d.weight.shape)
assert new_batchnorm2d.weight.shape == torch.Size((new_batchnorm2d.num_features,))
new_module_list.append(new_conv2d)
new_module_list.append(new_batchnorm2d)
new_module_list.append(nn.ReLU6(inplace=True))
idx += 3
in_channels = out_channels
in_idx = out_idx
if use_res_connect:
out_channels = src_in_channels
# identity map
old_conv2d = module_list[idx]
old_batchnorm2d = module_list[idx + 1]
out_idx = computer_mask(old_batchnorm2d, bn_threshold, out_channels)
# new Conv2d/BatchNorm2d
new_conv2d = create_conv2d(old_conv2d, in_channels, out_channels)
new_batchnorm2d = create_batchnorm2d(old_batchnorm2d, out_channels)
new_conv2d.weight.data = old_conv2d.weight.data[out_idx.tolist(), :, :, :].clone()
if new_conv2d.bias is not None:
new_conv2d.bias.data = old_conv2d.bias.data[out_idx.tolist()].clone()
if in_idx is not None:
new_conv2d.weight.data = new_conv2d.weight.data[:, in_idx.tolist(), :, :].clone()
new_batchnorm2d.weight.data = old_batchnorm2d.weight.data[out_idx.tolist()].clone()
new_batchnorm2d.bias.data = old_batchnorm2d.bias.data[out_idx.tolist()].clone()
new_batchnorm2d.running_mean = old_batchnorm2d.running_mean[out_idx.tolist()].clone()
new_batchnorm2d.running_var = old_batchnorm2d.running_var[out_idx.tolist()].clone()
assert new_conv2d.weight.shape[:2] == torch.Size(
[new_conv2d.out_channels, new_conv2d.in_channels]), '2, {} {}'.format(new_conv2d, new_conv2d.weight.shape)
assert new_batchnorm2d.weight.shape == torch.Size((new_batchnorm2d.num_features,))
new_module_list.append(new_conv2d)
new_module_list.append(new_batchnorm2d)
else:
new_conv2d, new_batchnorm2d, out_channels, out_idx = prune_conv_bn(module_list[idx],
module_list[idx + 1],
bn_threshold,
in_channels=in_channels,
in_idx=in_idx,
minimum_channels=minimum_channels,
divisor=divisor,
is_dw=False
)
assert new_conv2d.weight.shape[:2] == torch.Size(
[new_conv2d.out_channels, new_conv2d.in_channels]), '3, {} {}'.format(new_conv2d, new_conv2d.weight.shape)
assert new_batchnorm2d.weight.shape == torch.Size((new_batchnorm2d.num_features,))
new_module_list.append(new_conv2d)
new_module_list.append(new_batchnorm2d)
return new_module_list, out_channels, out_idx
def prune_classifier(module_list, in_channels, in_idx):
new_module_list = list()
new_module_list.append(module_list[0])
old_linear = module_list[1]
assert isinstance(old_linear, nn.Linear)
new_linear, in_channels = create_linear(old_linear, in_channels)
new_linear.weight.data = old_linear.weight.data[:, in_idx].clone()
if new_linear.bias is not None:
new_linear.bias.data = old_linear.bias.data.clone()
new_module_list.append(new_linear)
return new_module_list
def prune(model, percent, minimum_channels=8, divisor=8):
total, threshold = computer_bn_threshold(model, percent)
model = list(model.children())[0]
# print(model)
in_channels = 3
in_idx = None
# Process one by one
features_name = 'features'
features_stage = model.features
assert isinstance(features_stage, nn.Sequential)
for block_name, block in features_stage.named_children():
if isinstance(block, ConvBNActivation) or isinstance(block, ConvBNReLU):
layer_name_list = list()
layer_list = list()
for layer_name, layer in block.named_children():
layer_name_list.append(f'{features_name}.{block_name}.{layer_name}')
layer_list.append(layer)
new_module_list, in_channels, in_idx = prune_conv_bn_activation(layer_list,
in_channels,
in_idx,
threshold,
minimum_channels,
divisor)
assert len(new_module_list) == len(layer_list) == len(layer_name_list)
set_module_list(model, layer_name_list, layer_list, new_module_list)
elif isinstance(block, InvertedResidual):
# print(block_name)
sub_block = block.conv
layer_name_list = list()
layer_list = list()
for layer_name, layer in sub_block.named_children():
if isinstance(layer, ConvBNActivation):
for sub_layer_name, sub_layer in layer.named_children():
layer_name_list.append(f'{features_name}.{block_name}.conv.{layer_name}.{sub_layer_name}')
layer_list.append(sub_layer)
else:
layer_name_list.append(f'{features_name}.{block_name}.conv.{layer_name}')
layer_list.append(layer)
new_module_list, in_channels, in_idx = prune_inverted_residual(layer_list,
in_channels,
in_idx,
threshold,
minimum_channels,
divisor,
block.use_res_connect
)
assert len(new_module_list) == len(layer_list) == len(layer_name_list)
set_module_list(model, layer_name_list, layer_list, new_module_list)
# Finally, process classifier
classifier_name_list = list()
classifier_module_list = list()
for name, children in list(model.classifier.named_children()):
classifier_name_list.append(f'classifier.{name}')
classifier_module_list.append(children)
new_module_list = prune_classifier(classifier_module_list, in_channels, in_idx)
assert len(new_module_list) == len(classifier_module_list) == len(classifier_name_list)
set_module_list(model, classifier_name_list, classifier_module_list, new_module_list)
new_total, _ = computer_bn_threshold(model, percent)
return 1 - (1.0 * new_total / total), threshold
|
{"hexsha": "0575af71198a2d4cd1727af7eda389873fae9eca", "size": 16030, "ext": "py", "lang": "Python", "max_stars_repo_path": "slim/prune/prune_mobilenet_v2.py", "max_stars_repo_name": "ZJCV/NetworkSlimming", "max_stars_repo_head_hexsha": "1d3d355e538ad8c2d29ec388a57b397aaf387d8c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-09-27T03:00:11.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-25T13:45:56.000Z", "max_issues_repo_path": "slim/prune/prune_mobilenet_v2.py", "max_issues_repo_name": "ZJCV/NetworkSlimming", "max_issues_repo_head_hexsha": "1d3d355e538ad8c2d29ec388a57b397aaf387d8c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-07-28T02:38:39.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-30T11:17:48.000Z", "max_forks_repo_path": "slim/prune/prune_mobilenet_v2.py", "max_forks_repo_name": "ZJCV/NetworkSlimming", "max_forks_repo_head_hexsha": "1d3d355e538ad8c2d29ec388a57b397aaf387d8c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 51.050955414, "max_line_length": 118, "alphanum_fraction": 0.5676232065, "include": true, "reason": "import numpy", "num_tokens": 3151}
|
using Dash, DashHtmlComponents, DashCoreComponents
app = dash()
app.layout = html_div() do
dcc_input(id = "input-3", value = "initial value", type = "text"),
html_div(id="output-1")
end
callback!(app, Output("output-1", "children"), Input("input-3", "value")) do input_value
"You've entered $(input_value)"
end
run_server(app, "0.0.0.0", debug=true)
|
{"hexsha": "2cd2f0fb883570d25227feffc12e58f022f73739", "size": 366, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "dash_docs/chapters/basic_callbacks/examples/simple-callback.jl", "max_stars_repo_name": "joelostblom/dash-docs", "max_stars_repo_head_hexsha": "7be5aed7795f61ac32375ce33a18046b8f2f5254", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 379, "max_stars_repo_stars_event_min_datetime": "2017-06-21T14:35:52.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-20T01:47:14.000Z", "max_issues_repo_path": "dash_docs/chapters/basic_callbacks/examples/simple-callback.jl", "max_issues_repo_name": "joelostblom/dash-docs", "max_issues_repo_head_hexsha": "7be5aed7795f61ac32375ce33a18046b8f2f5254", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 746, "max_issues_repo_issues_event_min_datetime": "2017-06-21T19:58:17.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-23T14:51:24.000Z", "max_forks_repo_path": "dash_docs/chapters/basic_callbacks/examples/simple-callback.jl", "max_forks_repo_name": "joelostblom/dash-docs", "max_forks_repo_head_hexsha": "7be5aed7795f61ac32375ce33a18046b8f2f5254", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 201, "max_forks_repo_forks_event_min_datetime": "2017-06-21T21:53:19.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-17T13:23:55.000Z", "avg_line_length": 24.4, "max_line_length": 88, "alphanum_fraction": 0.6721311475, "num_tokens": 108}
|
import argparse, os, csv, sys, errno
import networkx as nx
"""
This is functionally equivalent to the mkdir -p [fname] bash command
"""
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
def read_csv(file_path):
content = None
with open(file_path, "r") as fp:
content = fp.read().strip().split("\n")
header = content[0].split(",")
# header_lu = {header[i].strip():i for i in range(0, len(header))}
content = content[1:]
lines = [{header[i]: l[i] for i in range(len(header))} for l in csv.reader(content, quotechar='"', delimiter=',', quoting=csv.QUOTE_ALL, skipinitialspace=True)]
return lines
def main():
# Setup the commandline argument parser
parser = argparse.ArgumentParser(description="Data aggregation script.")
parser.add_argument("--data", type=str, help="Where should we pull graph data?")
parser.add_argument("--dump", type=str, help="Where to dump this?", default=".")
parser.add_argument("--igraphs", action="store_true", help="Should we output igraph files?")
# Extract arguments from commandline
args = parser.parse_args()
data_dir = args.data
dump_dir = args.dump
dump_igraphs = args.igraphs
print(dump_igraphs)
if not os.path.isdir(data_dir):
print("Unable to locate all data.")
exit(-1)
mkdir_p(dump_dir)
# find all reg graphs
graphs = [fname for fname in os.listdir(data_dir) if "reg-graph" in fname]
for graph_file in graphs:
print("Processing", graph_file)
run_id = graph_file.strip(".csv").split("run-id-")[-1]
data_path = os.path.join(data_dir, graph_file)
lines = read_csv(data_path)
testcase_ids = {line["testcase_id"] for line in lines}
lines_by_testcase_id = {testcase_id:[] for testcase_id in testcase_ids}
for line in lines:
testcase_id = line["testcase_id"]
lines_by_testcase_id[testcase_id].append(line)
for testcase_id in lines_by_testcase_id:
nodes = {} # nodes are indexed off of module ids
promoted_edges = {} # edges are indexed off of frozen tuples
repressed_edges = {}
# nodes have the following properties:
# - id, times_active, times_repressed, times_promoted
# edges have the following properties:
# - to, from, type (promote, repress), delta
def GetModIDs(mod_list_str):
ids = list({int(mod) for mod in mod_list_str.strip("[]").split(",") if mod != ''})
return ids
def GenNewNodeDict(node_id = None):
return {"id": node_id,
"times_active": 0,
"times_repressed": 0,
"times_promoted": 0}
def GenNewEdgeDict():
return {"to": None, "from": None, "type": None, "reg_delta": 0}
for info in lines_by_testcase_id[testcase_id]:
active_modules = GetModIDs(info["active_modules"])
promoted_modules = GetModIDs(info["promoted"])
repressed_modules = GetModIDs(info["repressed"])
reg_deltas = list(map(float, info["reg_deltas"].strip("[]").split(",")))
# match_deltas = list(map(float, info["match_deltas"].strip("[]").split(",")))
# Update nodes
for mod_id in active_modules:
if mod_id not in nodes:
nodes[mod_id] = GenNewNodeDict(mod_id)
nodes[mod_id]["times_active"] += 1
for mod_id in repressed_modules:
if mod_id not in nodes:
nodes[mod_id] = GenNewNodeDict(mod_id)
nodes[mod_id]["times_repressed"] += 1
for mod_id in promoted_modules:
if mod_id not in nodes:
nodes[mod_id] = GenNewNodeDict(mod_id)
nodes[mod_id]["times_promoted"] += 1
# Update edges
for active_id in active_modules:
# update all repressed edges
for repressed_id in repressed_modules:
edge = (active_id, repressed_id)
if edge not in repressed_edges:
repressed_edges[edge] = GenNewEdgeDict()
repressed_edges[edge]["to"] = repressed_id
repressed_edges[edge]["from"] = active_id
repressed_edges[edge]["type"] = "repress"
edge_delta = reg_deltas[int(repressed_id)]
repressed_edges[edge]["reg_delta"] += edge_delta
# update all promoted edges
for promoted_id in promoted_modules:
edge = (active_id, promoted_id)
if edge not in promoted_edges:
promoted_edges[edge] = GenNewEdgeDict()
promoted_edges[edge]["to"] = promoted_id
promoted_edges[edge]["from"] = active_id
promoted_edges[edge]["type"] = "promote"
edge_delta = reg_deltas[int(promoted_id)]
promoted_edges[edge]["reg_delta"] += edge_delta
# dump igraph
nodes_fields = ["id", "times_active", "times_repressed","times_promoted"]
edges_fields = ["from", "to", "type", "reg_delta"]
nodes_content = [",".join(nodes_fields)] + [",".join([str(nodes[node][field]) for field in nodes_fields]) for node in nodes]
edges_content = [",".join(edges_fields)]
edges_content += [",".join([str(promoted_edges[edge][field]) for field in edges_fields]) for edge in promoted_edges]
edges_content += [",".join([str(repressed_edges[edge][field]) for field in edges_fields]) for edge in repressed_edges]
nodes_out_path = os.path.join(dump_dir, f"reg_graph_id-{run_id}_test-{testcase_id}_nodes.csv")
edges_out_path = os.path.join(dump_dir, f"reg_graph_id-{run_id}_test-{testcase_id}_edges.csv")
with open(nodes_out_path, "w") as fp:
fp.write("\n".join(nodes_content))
with open(edges_out_path, "w") as fp:
fp.write("\n".join(edges_content))
if __name__ == "__main__":
main()
|
{"hexsha": "57e18a84335ef291318206d15647d590ab1e612d", "size": 6568, "ext": "py", "lang": "Python", "max_stars_repo_path": "experiments/2020-11-28-bool-calc-prefix/analysis/genIGraphsByTestID.py", "max_stars_repo_name": "mmore500/Tag-based-Genetic-Regulation-for-LinearGP", "max_stars_repo_head_hexsha": "eda84198123cce32d8282d6920bf80b48d74c248", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-09-17T13:17:07.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-17T13:17:07.000Z", "max_issues_repo_path": "experiments/2020-11-28-bool-calc-prefix/analysis/genIGraphsByTestID.py", "max_issues_repo_name": "mmore500/Tag-based-Genetic-Regulation-for-LinearGP", "max_issues_repo_head_hexsha": "eda84198123cce32d8282d6920bf80b48d74c248", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-11-09T16:52:34.000Z", "max_issues_repo_issues_event_max_datetime": "2020-12-11T02:32:40.000Z", "max_forks_repo_path": "experiments/2020-11-28-bool-calc-prefix/analysis/genIGraphsByTestID.py", "max_forks_repo_name": "mmore500/Tag-based-Genetic-Regulation-for-LinearGP", "max_forks_repo_head_hexsha": "eda84198123cce32d8282d6920bf80b48d74c248", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-08-11T00:08:01.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-12T18:14:45.000Z", "avg_line_length": 46.5815602837, "max_line_length": 164, "alphanum_fraction": 0.5669914738, "include": true, "reason": "import networkx", "num_tokens": 1451}
|
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2014-2014.
//
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// See http://www.boost.org/libs/container for documentation.
//
//////////////////////////////////////////////////////////////////////////////
#ifndef BOOST_CONTAINER_DETAIL_ALGORITHM_HPP
#define BOOST_CONTAINER_DETAIL_ALGORITHM_HPP
#ifndef BOOST_CONFIG_HPP
# include <boost/config.hpp>
#endif
#if defined(BOOST_HAS_PRAGMA_ONCE)
# pragma once
#endif
#include <boost/intrusive/detail/algorithm.hpp>
namespace boost {
namespace container {
using boost::intrusive::algo_equal;
using boost::intrusive::algo_lexicographical_compare;
template<class Func>
class binder1st
{
public:
typedef typename Func::second_argument_type argument_type;
typedef typename Func::result_type result_type;
binder1st(const Func& func, const typename Func::first_argument_type& arg)
: op(func), value(arg)
{}
result_type operator()(const argument_type& arg) const
{ return op(value, arg); }
result_type operator()(argument_type& arg) const
{ return op(value, arg); }
private:
Func op;
typename Func::first_argument_type value;
};
template<class Func, class T>
inline binder1st<Func> bind1st(const Func& func, const T& arg)
{ return boost::container::binder1st<Func>(func, arg); }
template<class Func>
class binder2nd
{
public:
typedef typename Func::first_argument_type argument_type;
typedef typename Func::result_type result_type;
binder2nd(const Func& func, const typename Func::second_argument_type& arg)
: op(func), value(arg)
{}
result_type operator()(const argument_type& arg) const
{ return op(arg, value); }
result_type operator()(argument_type& arg) const
{ return op(arg, value); }
private:
Func op;
typename Func::second_argument_type value;
};
template<class Func, class T>
inline binder2nd<Func> bind2nd(const Func& func, const T& arg)
{
return (boost::container::binder2nd<Func>(func, arg));
}
template<class Func>
class unary_negate
{
public:
typedef typename Func::argument_type argument_type;
typedef typename Func::result_type result_type;
explicit unary_negate(const Func& func)
: m_func(func)
{}
bool operator()(const typename Func::argument_type& arg) const
{ return !m_func(arg); }
private:
Func m_func;
};
template<class Func> inline
unary_negate<Func> not1(const Func& func)
{
return boost::container::unary_negate<Func>(func);
}
template<class InputIt, class UnaryPredicate>
InputIt find_if(InputIt first, InputIt last, UnaryPredicate p)
{
for (; first != last; ++first) {
if (p(*first)) {
return first;
}
}
return last;
}
template<class ForwardIt1, class ForwardIt2, class BinaryPredicate>
ForwardIt1 find_end (ForwardIt1 first1, ForwardIt1 last1
,ForwardIt2 first2, ForwardIt2 last2
,BinaryPredicate p)
{
if (first2==last2)
return last1; // specified in C++11
ForwardIt1 ret = last1;
while (first1!=last1)
{
ForwardIt1 it1 = first1;
ForwardIt2 it2 = first2;
while ( p(*it1, *it2) ) {
++it1; ++it2;
if (it2==last2) {
ret=first1;
break;
}
if (it1==last1)
return ret;
}
++first1;
}
return ret;
}
template<class InputIt, class ForwardIt, class BinaryPredicate>
InputIt find_first_of(InputIt first1, InputIt last1, ForwardIt first2, ForwardIt last2, BinaryPredicate p)
{
for (; first1 != last1; ++first1) {
for (ForwardIt it = first2; it != last2; ++it) {
if (p(*first1, *it)) {
return first1;
}
}
}
return last1;
}
template<class ForwardIt1, class ForwardIt2, class BinaryPredicate>
ForwardIt1 search(ForwardIt1 first1, ForwardIt1 last1,
ForwardIt2 first2, ForwardIt2 last2, BinaryPredicate p)
{
for (; ; ++first1) {
ForwardIt1 it = first1;
for (ForwardIt2 it2 = first2; ; ++it, ++it2) {
if (it2 == last2) {
return first1;
}
if (it == last1) {
return last1;
}
if (!p(*it, *it2)) {
break;
}
}
}
}
} //namespace container {
} //namespace boost {
#endif //#ifndef BOOST_CONTAINER_DETAIL_ALGORITHM_HPP
|
{"hexsha": "ce5582bcc57c8bd2e083fa8c62b670b7b22da92a", "size": 4525, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "ReactNativeFrontend/ios/Pods/boost/boost/container/detail/algorithm.hpp", "max_stars_repo_name": "Harshitha91/Tmdb-react-native-node", "max_stars_repo_head_hexsha": "e06e3f25a7ee6946ef07a1f524fdf62e48424293", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2728.0, "max_stars_repo_stars_event_min_datetime": "2015-01-01T10:06:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T18:12:58.000Z", "max_issues_repo_path": "ReactNativeFrontend/ios/Pods/boost/boost/container/detail/algorithm.hpp", "max_issues_repo_name": "Harshitha91/Tmdb-react-native-node", "max_issues_repo_head_hexsha": "e06e3f25a7ee6946ef07a1f524fdf62e48424293", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1192.0, "max_issues_repo_issues_event_min_datetime": "2015-01-01T06:03:19.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T09:14:36.000Z", "max_forks_repo_path": "ReactNativeFrontend/ios/Pods/boost/boost/container/detail/algorithm.hpp", "max_forks_repo_name": "Harshitha91/Tmdb-react-native-node", "max_forks_repo_head_hexsha": "e06e3f25a7ee6946ef07a1f524fdf62e48424293", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 334.0, "max_forks_repo_forks_event_min_datetime": "2015-01-08T20:47:03.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-18T07:07:01.000Z", "avg_line_length": 24.3279569892, "max_line_length": 106, "alphanum_fraction": 0.635359116, "num_tokens": 1143}
|
/*****************************************************************************
* slicing.cpp Blitz++ Array slicing & subarrays example
*****************************************************************************/
#include <blitz/array.h>
BZ_USING_NAMESPACE(blitz)
int main()
{
Array<int,2> A(6,6), B(3,3);
// Set the upper left quadrant of A to 5
A(Range(0,2), Range(0,2)) = 5;
// Set the upper right quadrant of A to an identity matrix
B = 1, 0, 0,
0, 1, 0,
0, 0, 1;
A(Range(0,2), Range(3,5)) = B;
// Set the fourth row to 1
#ifdef BZ_HAVE_PARTIAL_ORDERING
A(3, Range::all()) = 1;
#else
cout << "Warning: your compiler does not support partial ordering of"
<< endl << "member templates; using kludge." << endl;
A(Range(3,3), Range::all()) = 1;
#endif
// Set the last two rows to 0
A(Range(4, toEnd), Range::all()) = 0;
// Set the bottom right element to 8
A(5,5) = 8;
cout << "A = " << A << endl;
return 0;
}
|
{"hexsha": "aa83a8eebf58b74fc494cc9abae8da483b844d48", "size": 1025, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "depspawn-blitz-0.10/examples/slicing.cpp", "max_stars_repo_name": "fraguela/depspawn", "max_stars_repo_head_hexsha": "b5760f4c0d38a1b245ee5274e2ccc5c5fe2d3d45", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8.0, "max_stars_repo_stars_event_min_datetime": "2017-04-12T11:05:40.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T11:10:27.000Z", "max_issues_repo_path": "ibtk/third_party/blitz-0.10/examples/slicing.cpp", "max_issues_repo_name": "MSV-Project/IBAMR", "max_issues_repo_head_hexsha": "3cf614c31bb3c94e2620f165ba967cba719c45ea", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ibtk/third_party/blitz-0.10/examples/slicing.cpp", "max_forks_repo_name": "MSV-Project/IBAMR", "max_forks_repo_head_hexsha": "3cf614c31bb3c94e2620f165ba967cba719c45ea", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.8372093023, "max_line_length": 79, "alphanum_fraction": 0.4712195122, "num_tokens": 303}
|
# Python script to run DaCapo benchmarks and collect performance statistics
# Usage: python3 runDaCapo.py
# Note: dacapo-9.12-MR1-bach.jar msut be present in current directory
# The script can be customized as follows:
# 1. Specify which benchmarks to run ==> change "benchmark" list below
# 2. Specify the number iterations for each benchmark (to warm up the JVM) ==> change "benchmarkOpts" below
# 3. Specify the number of runs for each benchmark ==> change "numIter" below
# 4. Specify JDK options ==> change "jvmOption" list below
# 5. Specify JDK to use ==> change "jdks" list below
import re # for regular expressions
import sys # for accessing parameters and exit
import shlex, subprocess
import logging
import numpy as np
numIter = 10 # number of iterations to use for each benchmark in each configuration
#level=logging.DEBUG,
logging.basicConfig(level=logging.INFO,
format='%(asctime)s :: %(levelname)s :: (%(threadName)-6s) :: %(message)s',
)
benchmarkOpts = "--iterations 20 -s default" # not all benchmarks can use size large. Better to use "default"
# List of benchmarks to run
# avrora batik eclipse fop h2 jython luindex lusearch lusearch-fix pmd sunflow tomcat tradebeans tradesoap xalan
benchmarks = [
#"avrora",
#"batik", Not working at all with OpenJDK
#"eclipse", # only working with Java8
#"fop",
#"h2",
#"jython",
#"luindex",
#"lusearch-fix",
#"pmd",
#"sunflow",
#"tomcat", # does not work at all
#"tradebeans", # does not work at all
#"tradesoap", # does not work at all
"xalan"
]
# List of JVM options to try
jvmOptions = [
#"-Xmx1G",
#"-XX:-EnableHCR -Xmx1G",
"-XX:-OSRSafePoint -Xmx1G",
#"-Xaggressive -Xmx1G",
#"-Xms1G -Xmx1G",
#"-Xtune:throughput -Xmx1G",
#"-Xtune:throughput -Xmx1G -Xjit:acceptHugeMethods",
#"-Xtune:throughput -Xmx1G -Xjit:inlineVeryLargeCompiledMethods",
#"-Xtune:throughput -Xmx1G -Xjit:bigCalleeFreqCutoffAtHot=0",
#"-Xtune:throughput -Xmx1G -Xjit:bigCalleeThresholdForColdCallsAtHot=600",
]
jdks = [
#"/home/mpirvu/FullJava11/openj9-openjdk-jdk11/build/linux-x86_64-normal-server-release/images/jdk",
#"/home/mpirvu/FullVM/openj9-openjdk-jdk8/build/linux-x86_64-normal-server-release/images/j2re-image",
"/home/mpirvu/sdks/OpenJ9-JDK8-x86-64_linux-20220126-021513",
]
'''
Returns the execution time in milliseconds as a float
or Nan if the experiment fails
'''
def runBenchmarkOnce(benchmarkName, jvm, jvmOpts):
cmd = f"{jvm}/bin/java {jvmOpts} -jar dacapo-9.12-MR1-bach.jar {benchmarkOpts} {benchmarkName}"
logging.info("Starting: {cmd}".format(cmd=cmd))
output = subprocess.check_output(shlex.split(cmd), universal_newlines=True, stderr=subprocess.STDOUT)
# Parse the output and look for "PASSED in nnnn msec ====
lines = output.splitlines()
pattern = re.compile('^===== DaCapo .+ PASSED in (\d+) msec ====')
for line in lines:
m = pattern.match(line)
if m:
print(line)
execTime = float(m.group(1))
print(execTime)
return execTime
return np.nan
#print(output)
def tdistribution(degreesOfFreedom):
table = [6.314, 2.92, 2.353, 2.132, 2.015, 1.943, 1.895, 1.860, 1.833, 1.812, 1.796, 1.782, 1.771, 1.761, 1.753, 1.746, 1.740, 1.734, 1.729, 1.725]
if degreesOfFreedom < 1:
return -1.0
if degreesOfFreedom <= 20:
return table[degreesOfFreedom-1]
if degreesOfFreedom < 30:
return 1.697
if degreesOfFreedom < 40:
return 1.684
if degreesOfFreedom < 50:
return 1.676
if degreesOfFreedom < 60:
return 1.671
if degreesOfFreedom < 70:
return 1.667
if degreesOfFreedom < 80:
return 1.664
if degreesOfFreedom < 90:
return 1.662
if degreesOfFreedom < 100:
return 1.660
return 1.65
#import scipy.stats as st
#def computeCI95(a):
# results = st.t.interval(0.95, len(a)-1, loc=0, scale=st.sem(a))
# return 100.0 * results[1] / st.tmean(a)
# multi-dimensional array of results
results = np.full((len(benchmarks), len(jdks), len(jvmOptions), numIter), fill_value=np.nan, dtype=np.float)
for bench in range(len(benchmarks)):
for jdk in range(len(jdks)):
for opt in range(len(jvmOptions)):
for i in range(numIter):
execTime = runBenchmarkOnce(benchmarks[bench], jdks[jdk], jvmOptions[opt])
results[bench, jdk, opt, i] = execTime
# Stats ignoring Nan which are due to failed experiments
mean = np.nanmean(results, axis=3)
std = np.nanstd(results, axis=3)
min = np.nanmin(results, axis=3)
max = np.nanmax(results, axis=3)
# Count valid experiments excluding Nan values
numValidExperiments = np.count_nonzero(~np.isnan(results), axis=3)
print(numValidExperiments)
# Create my function that will apply "tdistribution" to all elements in an ndarray
tdist_vec = np.vectorize(tdistribution)
# Compute 95% confidence intervals as percentages of the mean value
ci95 = tdist_vec(numValidExperiments-1) * std / np.sqrt(numValidExperiments) / mean *100.0
# np.percentile(s1, [25, 50, 75], interpolation='midpoint')
# Count how many non-NaN values are in the array
# np.count_nonzero(~np.isnan(data))
for bench in range(len(benchmarks)):
for jdk in range(len(jdks)):
for opt in range(len(jvmOptions)):
print("Bench =", benchmarks[bench], "JDK =", jdks[jdk], "Opt =", jvmOptions[opt])
print("mean = {m:5.0f} \tCI95 = {ci:4.2}% \tStdDev = {s:3.1f} \tMin = {mi:5.0f} \tMax = {ma:5.0f} \tNum = {n:2d}".
format(m=mean[bench, jdk, opt], ci=ci95[bench, jdk, opt], s=std[bench, jdk, opt], mi= min[bench, jdk, opt], ma=max[bench, jdk, opt], n=numValidExperiments[bench, jdk, opt]))
|
{"hexsha": "4aaa84c620ef8d8234d2b4cb966c0fbc84199c25", "size": 5809, "ext": "py", "lang": "Python", "max_stars_repo_path": "runDaCapo.py", "max_stars_repo_name": "mpirvu/Utils", "max_stars_repo_head_hexsha": "60884803de9d499ef79527c5a6386aabc876c85e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "runDaCapo.py", "max_issues_repo_name": "mpirvu/Utils", "max_issues_repo_head_hexsha": "60884803de9d499ef79527c5a6386aabc876c85e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "runDaCapo.py", "max_forks_repo_name": "mpirvu/Utils", "max_forks_repo_head_hexsha": "60884803de9d499ef79527c5a6386aabc876c85e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.9673202614, "max_line_length": 189, "alphanum_fraction": 0.6663797556, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1787}
|
SUBROUTINE icsd_t1_7(d_a,k_a_offset,d_b,k_b_offset,d_c,k_c_offset,ctx,ex)
IMPLICIT NONE
INTEGER :: POSTPROCESSMARKER1
INTEGER :: ga_max_dim
parameter(ga_max_dim = 7)
INTEGER :: ga_nnodes, ga_nodeid, ga_read_inc
INTEGER :: ga_pgroup_nnodes, ga_pgroup_nodeid
INTEGER :: nga_pgroup_nnodes, nga_pgroup_nodeid
INTEGER :: ga_spd_invert, ga_solve, ga_llt_solve
INTEGER :: ga_inquire_memory, ga_memory_avail
INTEGER :: nga_inquire_memory, nga_memory_avail
LOGICAL :: ga_create, ga_destroy, ga_locate, ga_create_irreg
LOGICAL :: nga_destroy
LOGICAL :: ga_locate_region
LOGICAL :: ga_compare_distr, ga_duplicate, ga_uses_ma
LOGICAL :: nga_compare_distr, nga_duplicate, nga_uses_ma
LOGICAL :: ga_memory_limited, nga_memory_limited
LOGICAL :: ga_create_mutexes
LOGICAL :: nga_create_mutexes
LOGICAL :: ga_destroy_mutexes
LOGICAL :: nga_destroy_mutexes
LOGICAL :: ga_valid_handle, nga_valid_handle
LOGICAL :: ga_verify_handle, nga_verify_handle
LOGICAL :: ga_update2_ghosts
LOGICAL :: ga_update3_ghosts
LOGICAL :: ga_update4_ghosts
LOGICAL :: ga_update5_ghosts
LOGICAL :: ga_update6_ghosts
LOGICAL :: ga_update7_ghosts
LOGICAL :: ga_set_update4_info
LOGICAL :: ga_set_update5_info
LOGICAL :: nga_update_ghost_dir
LOGICAL :: ga_has_ghosts, nga_has_ghosts
INTEGER :: ga_create_handle
INTEGER :: nga_create_handle
LOGICAL :: ga_allocate
INTEGER :: ga_pgroup_create, nga_pgroup_create
INTEGER :: ga_pgroup_split, nga_pgroup_split
INTEGER :: ga_pgroup_split_irreg, nga_pgroup_split_irreg
DOUBLE PRECISION :: ga_ddot, ga_ddot_patch
COMPLEX :: ga_zdot, ga_zdot_patch
COMPLEX :: nga_zdot_patch
COMPLEX :: ga_cdot, ga_cdot_patch
COMPLEX :: nga_cdot_patch
DOUBLE PRECISION :: nga_ddot_patch, ga_wtime
INTEGER :: ga_idot
INTEGER :: nga_idot_patch
INTEGER :: ga_pgroup_get_default, ga_pgroup_get_mirror
INTEGER :: nga_pgroup_get_default, nga_pgroup_get_mirror
INTEGER :: ga_pgroup_get_world, nga_pgroup_get_world
INTEGER :: ga_ndim, nga_ndim
REAL :: ga_sdot, ga_sdot_patch, nga_sdot_patch
INTEGER :: ga_is_mirrored, nga_is_mirrored
INTEGER :: ga_nbtest, nga_nbtest
INTEGER :: nga_read_inc
INTEGER :: ga_cluster_nprocs, ga_cluster_nodeid, ga_cluster_nnodes
INTEGER :: ga_cluster_procid, ga_cluster_proc_nodeid
INTEGER :: nga_locate_num_blocks
INTEGER :: ga_total_blocks, nga_total_blocks
LOGICAL :: ga_uses_proc_grid, nga_uses_proc_grid
LOGICAL :: nga_create, nga_locate, nga_create_irreg
LOGICAL :: nga_locate_region
LOGICAL :: nga_create_ghosts_irreg, nga_create_ghosts
LOGICAL :: nga_create_config, nga_create_irreg_config
LOGICAL :: nga_create_ghosts_irreg_config, nga_create_ghosts_config
LOGICAL :: ga_get_debug, nga_get_debug
INTEGER :: ga_get_dimension, nga_get_dimension
INTEGER :: ga_get_pgroup, nga_get_pgroup
INTEGER :: ga_get_pgroup_size, nga_get_pgroup_size
LOGICAL :: ga_pgroup_destroy, nga_pgroup_destroy
INTEGER :: ga_pgroup_absolute_id, nga_pgroup_absolute_id
INTEGER :: nga_register_type, nga_deregister_type
LOGICAL :: nga_check_notify
external :: ga_create, ga_destroy, ga_ddot, ga_locate
external :: nga_destroy
external :: ga_locate_region
external :: ga_nnodes, ga_nodeid, ga_read_inc, ga_create_irreg
external :: ga_pgroup_nnodes, ga_pgroup_nodeid
external :: nga_pgroup_nnodes, nga_pgroup_nodeid
external :: ga_ddot_patch, ga_compare_distr, ga_duplicate
external :: nga_compare_distr, nga_duplicate
external :: ga_inquire_memory, ga_uses_ma, ga_memory_limited
external :: nga_inquire_memory, nga_uses_ma, nga_memory_limited
external :: ga_memory_avail
external :: ga_zdot, ga_zdot_patch
external :: ga_cdot, ga_cdot_patch
external :: ga_create_mutexes
external :: nga_create_mutexes
external :: ga_destroy_mutexes
external :: nga_destroy_mutexes
external :: ga_valid_handle, nga_valid_handle
external :: ga_verify_handle, nga_verify_handle
external :: ga_update2_ghosts
external :: ga_update3_ghosts
external :: ga_update4_ghosts
external :: ga_update5_ghosts
external :: ga_update6_ghosts
external :: ga_update7_ghosts
external :: ga_set_update4_info
external :: ga_set_update5_info
external :: nga_update_ghost_dir
external :: ga_create_handle
external :: nga_create_handle
external :: ga_allocate
external :: ga_pgroup_create, nga_pgroup_create
external :: ga_pgroup_split, nga_pgroup_split
external :: ga_pgroup_split_irreg, nga_pgroup_split_irreg
external :: ga_has_ghosts, nga_has_ghosts
external :: ga_pgroup_get_default, ga_pgroup_get_mirror
external :: nga_pgroup_get_default, nga_pgroup_get_mirror
external :: ga_pgroup_get_world, nga_pgroup_get_world
external :: ga_ndim, nga_ndim
external :: ga_spd_invert, ga_solve, ga_llt_solve
external :: nga_read_inc, nga_create, nga_locate, nga_create_irreg
external :: nga_locate_region
external :: nga_create_ghosts_irreg, nga_create_ghosts
external :: nga_create_config, nga_create_irreg_config
external :: nga_create_ghosts_irreg_config, nga_create_ghosts_config
external :: nga_ddot_patch, nga_zdot_patch, nga_cdot_patch
external :: nga_idot_patch, ga_idot
external :: ga_sdot, ga_sdot_patch, nga_sdot_patch
external :: ga_cluster_nprocs, ga_cluster_nodeid, ga_cluster_nnodes
external :: ga_cluster_procid, ga_cluster_proc_nodeid
external :: ga_is_mirrored
external :: nga_locate_num_blocks
external :: ga_total_blocks
external :: ga_uses_proc_grid, nga_uses_proc_grid
external :: ga_get_debug, nga_get_debug
external :: ga_get_pgroup, nga_get_pgroup
external :: ga_get_pgroup_size, nga_get_pgroup_size
external :: ga_pgroup_destroy, nga_pgroup_destroy
external :: ga_wtime
external :: ga_nbtest, nga_nbtest
external :: ga_pgroup_absolute_id, nga_pgroup_absolute_id
external :: nga_register_type, nga_deregister_type
external :: nga_get_field, nga_nbget_field
external :: nga_put_field, nga_nbput_field
external :: nga_check_notify
INTEGER :: MT_BYTE
INTEGER :: MT_INT
INTEGER :: MT_LOG
INTEGER :: MT_REAL
INTEGER :: MT_DBL
INTEGER :: MT_SCPL
INTEGER :: MT_DCPL
INTEGER :: MT_F_FIRST
INTEGER :: MT_F_LAST
parameter(MT_BYTE = (1000 + 9))
parameter(MT_INT = (1000 + 10))
parameter(MT_LOG = (1000 + 11))
parameter(MT_REAL = (1000 + 12))
parameter(MT_DBL = (1000 + 13))
parameter(MT_SCPL = (1000 + 14))
parameter(MT_DCPL = (1000 + 15))
parameter(MT_F_FIRST = MT_BYTE)
parameter(MT_F_LAST = MT_DCPL)
LOGICAL :: MA_alloc_get
LOGICAL :: MA_allocate_heap
LOGICAL :: MA_chop_stack
LOGICAL :: MA_free_heap
LOGICAL :: MA_free_heap_piece
LOGICAL :: MA_get_index
LOGICAL :: MA_get_next_memhandle
LOGICAL :: MA_get_numalign
LOGICAL :: MA_init
LOGICAL :: MA_initialized
LOGICAL :: MA_init_memhandle_iterator
INTEGER :: MA_inquire_avail
INTEGER :: MA_inquire_heap
INTEGER :: MA_inquire_heap_check_stack
INTEGER :: MA_inquire_heap_no_partition
INTEGER :: MA_inquire_stack
INTEGER :: MA_inquire_stack_check_heap
INTEGER :: MA_inquire_stack_no_partition
LOGICAL :: MA_pop_stack
LOGICAL :: MA_push_get
LOGICAL :: MA_push_stack
LOGICAL :: MA_set_auto_verify
LOGICAL :: MA_set_error_print
LOGICAL :: MA_set_hard_fail
LOGICAL :: MA_set_numalign
INTEGER :: MA_sizeof
INTEGER :: MA_sizeof_overhead
LOGICAL :: MA_verify_allocator_stuff
external :: MA_alloc_get
external :: MA_allocate_heap
external :: MA_chop_stack
external :: MA_free_heap
external :: MA_free_heap_piece
external :: MA_get_index
external :: MA_get_next_memhandle
external :: MA_get_numalign
external :: MA_init
external :: MA_initialized
external :: MA_init_memhandle_iterator
external :: MA_inquire_avail
external :: MA_inquire_heap
external :: MA_inquire_heap_check_stack
external :: MA_inquire_heap_no_partition
external :: MA_inquire_stack
external :: MA_inquire_stack_check_heap
external :: MA_inquire_stack_no_partition
external :: MA_pop_stack
external :: MA_print_stats
external :: MA_push_get
external :: MA_push_stack
external :: MA_set_auto_verify
external :: MA_set_error_print
external :: MA_set_hard_fail
external :: MA_set_numalign
external :: MA_sizeof
external :: MA_sizeof_overhead
external :: MA_summarize_allocated_blocks
external :: MA_trace
external :: MA_verify_allocator_stuff
CHARACTER(len=1), DIMENSION(2) :: byte_mb
INTEGER, DIMENSION(2) :: int_mb
LOGICAL, DIMENSION(2) :: log_mb
REAL, DIMENSION(2) :: real_mb
DOUBLE PRECISION, DIMENSION(2) :: dbl_mb
COMPLEX, DIMENSION(2) :: scpl_mb
COMPLEX, DIMENSION(2) :: dcpl_mb
LOGICAL :: sym_shell, sym_shell_pair, sym_atom, sym_atom_pair
LOGICAL :: sym_char_table, sym_abelian_group
LOGICAL :: sym_atom_quartet
INTEGER :: sym_center_map, sym_number_ops
external :: sym_shell, sym_atom
external :: sym_center_map, sym_number_ops
external :: sym_shell_pair, sym_atom_pair
external :: sym_atom_quartet, sym_char_table
external :: sym_abelian_group
INTEGER :: UERR, UNKNOWN_ERR, MEM_ERR, RTDB_ERR, INPUT_ERR, CAPMIS_ERR
INTEGER :: BASIS_ERR, GEOM_ERR, GA_ERR, MA_ERR, INT_ERR, DISK_ERR
INTEGER :: CALC_ERR, FMM_ERR, STACK_ERR, HEAP_ERR
parameter(UERR = 0,UNKNOWN_ERR = 0,MEM_ERR = 10,RTDB_ERR = 20,INPUT_ERR = 30)
parameter(CAPMIS_ERR = 40,BASIS_ERR = 50,GEOM_ERR = 60,GA_ERR = 70)
parameter(MA_ERR = 80,INT_ERR = 90,DISK_ERR = 100,CALC_ERR = 110)
parameter(FMM_ERR = 120,STACK_ERR = 11,HEAP_ERR = 12)
INTEGER :: l_spin, k_spin
INTEGER :: l_sym, k_sym
INTEGER :: l_range, k_range
INTEGER :: noa, nob, nva, nvb
INTEGER :: noab, nvab
INTEGER :: irrep_e
parameter(irrep_e = 0)
INTEGER :: irrep_e2
parameter(irrep_e2 = 0)
INTEGER :: irrep_f
parameter(irrep_f = 0)
INTEGER :: irrep_v
parameter(irrep_v = 0)
INTEGER :: irrep_t
parameter(irrep_t = 0)
INTEGER :: irrep_t1
parameter(irrep_t1 = 0)
INTEGER :: irrep_t2
parameter(irrep_t2 = 0)
INTEGER :: irrep_t3
parameter(irrep_t3 = 0)
INTEGER :: irrep_x
INTEGER :: irrep_y
INTEGER :: irrep_d
INTEGER :: irrep_o
INTEGER :: irrep_a
INTEGER :: irrep_b
INTEGER :: irrep_c
INTEGER :: irrep_tr
INTEGER :: irrep_yr
INTEGER :: irrep_oa
INTEGER :: irrep_ob
INTEGER :: irrep_oc
INTEGER :: irrep_od
INTEGER :: irrep_tra
INTEGER :: irrep_trb
INTEGER :: irrep_trc
INTEGER :: irrep_trd
INTEGER :: idiv2e
LOGICAL :: restricted
LOGICAL :: intorb
LOGICAL :: read_int, write_int
LOGICAL :: read_ta, write_ta
LOGICAL :: read_xa, write_xa
LOGICAL :: read_in3, write_in3
INTEGER :: nproc_read_tensor, nproc_write_tensor
COMMON / tceinteger / noa,nob,nva,nvb,noab,nvab,l_spin,k_spin,l_sym,k_sym,l_range,k_range,irrep_x,irrep_y,irrep_d,irrep_o,irrep_tr,irrep_yr,irrep_a,irrep_b,irrep_c,irrep_oa,irrep_ob,irrep_oc,irrep_od,irrep_tra,irrep_trb,irrep_trc,irrep_trd,nproc_read_tensor,nproc_write_tensor,idiv2e
COMMON / tcelogical / restricted,intorb,read_int,write_int,read_ta,write_ta,read_xa,write_xa,read_in3,write_in3
INTEGER :: print_none, print_low, print_medium, print_high, print_debug, print_default, print_never
parameter(print_none = 0,print_low = 10,print_medium = 20,print_high = 30,print_debug = 100,print_never = 1000000)
parameter(print_default = print_medium)
LOGICAL :: util_print
DOUBLE PRECISION :: ddot
DOUBLE PRECISION :: util_cpusec
DOUBLE PRECISION :: util_wallsec
DOUBLE PRECISION :: util_random
INTEGER :: util_batch_job_time_remaining
INTEGER :: util_time_remaining
LOGICAL :: util_test_time_remaining
LOGICAL :: util_nwchemrc_get
external :: util_print
external :: ddot
external :: util_cpusec
external :: util_wallsec
external :: util_random
external :: util_batch_job_time_remaining
external :: util_time_remaining
external :: util_test_time_remaining
external :: util_nwchemrc_get
LOGICAL :: util_xyz_seek
external :: util_xyz_seek
LOGICAL :: util_xyz_nframes
external :: util_xyz_nframes
LOGICAL :: util_get_io_unit
external :: util_get_io_unit
INTEGER :: nw_max_path_len
parameter(nw_max_path_len = 255)
INTEGER :: POSTPROCESSMARKER2
INTEGER :: d_a
INTEGER :: k_a_offset
INTEGER :: d_b
INTEGER :: k_b_offset
INTEGER :: d_c
INTEGER :: k_c_offset
INTEGER :: ctx, ex
external :: nxt_ctx_create, nxt_ctx_destroy, nxt_ctx_next
! -------------------------
INTEGER :: next
INTEGER :: nprocs
INTEGER :: count
INTEGER :: p2b
INTEGER :: h1b
INTEGER :: dimc
INTEGER :: l_c_sort
INTEGER :: k_c_sort
INTEGER :: p3b
DOUBLE PRECISION :: all_t1, all_t2
INTEGER :: p4b
INTEGER :: h5b
INTEGER :: p3b_1
INTEGER :: p4b_1
INTEGER :: h1b_1
INTEGER :: h5b_1
INTEGER :: p2b_2
INTEGER :: h5b_2
INTEGER :: p3b_2
INTEGER :: p4b_2
INTEGER :: dim_common
INTEGER :: dima_sort
INTEGER :: dima
INTEGER :: dimb_sort
INTEGER :: dimb
INTEGER :: l_a_sort
INTEGER :: k_a_sort
INTEGER :: l_a
INTEGER :: k_a
INTEGER :: l_b_sort
INTEGER :: k_b_sort
INTEGER :: l_b
INTEGER :: k_b
INTEGER, DIMENSION(2) :: nsuperp
INTEGER :: isuperp
INTEGER :: l_c
INTEGER :: k_c
DOUBLE PRECISION :: FACTORIAL
external :: FACTORIAL
nprocs = ga_nnodes()
count = 0
CALL nxt_ctx_next(ctx,ex,next)
! ----------------
DO p2b = noab + 1, noab + nvab
DO h1b = 1, noab
! %CCSD version=expanded
IF (next .EQ. count) THEN
all_t1 = util_wallsec()
IF ((.NOT.restricted) .OR. (int_mb(k_spin + p2b - 1) + int_mb(k_spin + h1b - 1) .NE. 4)) THEN
IF (int_mb(k_spin + p2b - 1) .EQ. int_mb(k_spin + h1b - 1)) THEN
IF (ieor(int_mb(k_sym + p2b - 1),int_mb(k_sym + h1b - 1)) .EQ. ieor(irrep_v,irrep_t)) THEN
dimc = int_mb(k_range + p2b - 1) * int_mb(k_range + h1b - 1)
IF (.NOT.MA_push_get(MT_DBL,dimc,'noname',l_c_sort,k_c_sort)) CALL ERRQUIT('icsd_t1_7',0,MA_ERR)
CALL DFILL(dimc,0.0d0,dbl_mb(k_c_sort),1)
DO p3b = noab + 1, noab + nvab
DO p4b = p3b, noab + nvab
DO h5b = 1, noab
IF (int_mb(k_spin + p3b - 1) + int_mb(k_spin + p4b - 1) .EQ. int_mb(k_spin + h1b - 1) + int_mb(k_spin + h5b - 1)) THEN
IF (ieor(int_mb(k_sym + p3b - 1),ieor(int_mb(k_sym + p4b - 1),ieor(int_mb(k_sym + h1b - 1),int_mb(k_sym + h5b - 1)))) .EQ. irrep_t) THEN
CALL TCE_RESTRICTED_4(p3b,p4b,h1b,h5b,p3b_1,p4b_1,h1b_1,h5b_1)
CALL TCE_RESTRICTED_4(p2b,h5b,p3b,p4b,p2b_2,h5b_2,p3b_2,p4b_2)
dim_common = int_mb(k_range + p3b - 1) * int_mb(k_range + p4b - 1) * int_mb(k_range + h5b - 1)
dima_sort = int_mb(k_range + h1b - 1)
dima = dim_common * dima_sort
dimb_sort = int_mb(k_range + p2b - 1)
dimb = dim_common * dimb_sort
IF ((dima > 0) .AND. (dimb > 0)) THEN
IF (.NOT.MA_push_get(MT_DBL,dima,'noname',l_a_sort,k_a_sort)) CALL ERRQUIT('icsd_t1_7',1,MA_ERR)
IF (.NOT.MA_push_get(MT_DBL,dima,'noname',l_a,k_a)) CALL ERRQUIT('icsd_t1_7',2,MA_ERR)
IF ((h5b < h1b)) THEN
CALL GET_HASH_BX(ex,d_a,dbl_mb(k_a),dima,int_mb(k_a_offset),(h1b_1 - 1 + noab * (h5b_1 - 1 + noab * (p4b_1 - noab - 1 + nvab * (p3b_1 - noab - 1)))))
CALL SO_4X(ex,dbl_mb(k_a),dbl_mb(k_a_sort),int_mb(k_range + p3b - 1),int_mb(k_range + p4b - 1),int_mb(k_range + h5b - 1),int_mb(k_range + h1b - 1),4,3,2,1,-1.0d0)
END IF
IF ((h1b <= h5b)) THEN
CALL GET_HASH_BX(ex,d_a,dbl_mb(k_a),dima,int_mb(k_a_offset),(h5b_1 - 1 + noab * (h1b_1 - 1 + noab * (p4b_1 - noab - 1 + nvab * (p3b_1 - noab - 1)))))
CALL SO_4X(ex,dbl_mb(k_a),dbl_mb(k_a_sort),int_mb(k_range + p3b - 1),int_mb(k_range + p4b - 1),int_mb(k_range + h1b - 1),int_mb(k_range + h5b - 1),3,4,2,1,1.0d0)
END IF
IF (.NOT.MA_pop_stack(l_a)) CALL ERRQUIT('icsd_t1_7',3,MA_ERR)
IF (.NOT.MA_push_get(MT_DBL,dimb,'noname',l_b_sort,k_b_sort)) CALL ERRQUIT('icsd_t1_7',4,MA_ERR)
IF (.NOT.MA_push_get(MT_DBL,dimb,'noname',l_b,k_b)) CALL ERRQUIT('icsd_t1_7',5,MA_ERR)
IF ((h5b <= p2b)) THEN
IF (.NOT.intorb) THEN
CALL GET_HASH_BX(ex,d_b,dbl_mb(k_b),dimb,int_mb(k_b_offset),(p4b_2 - 1 + (noab + nvab) * (p3b_2 - 1 + (noab + nvab) * (p2b_2 - 1 + (noab + nvab) * (h5b_2 - 1)))))
ELSE
CALL GET_HASH_BX_I(ex,d_b,dbl_mb(k_b),dimb,int_mb(k_b_offset),(p4b_2 - 1 + (noab + nvab) * (p3b_2 - 1 + (noab + nvab) * (p2b_2 - 1 + (noab + nvab) * (h5b_2 - 1)))),p4b_2,p3b_2,p2b_2,h5b_2)
END IF
CALL SO_4X(ex,dbl_mb(k_b),dbl_mb(k_b_sort),int_mb(k_range + h5b - 1),int_mb(k_range + p2b - 1),int_mb(k_range + p3b - 1),int_mb(k_range + p4b - 1),2,1,4,3,1.0d0)
END IF
IF (.NOT.MA_pop_stack(l_b)) CALL ERRQUIT('icsd_t1_7',6,MA_ERR)
nsuperp(1) = 1
nsuperp(2) = 1
isuperp = 1
IF (p3b .EQ. p4b) THEN
nsuperp(isuperp) = nsuperp(isuperp) + 1
ELSE
isuperp = isuperp + 1
END IF
CALL DM(ex,'T','N',dima_sort,dimb_sort,dim_common,2.0d0 / FACTORIAL(nsuperp(1)) / FACTORIAL(nsuperp(2)),dbl_mb(k_a_sort),dim_common,dbl_mb(k_b_sort),dim_common,1.0d0,dbl_mb(k_c_sort),dima_sort)
IF (.NOT.MA_pop_stack(l_b_sort)) CALL ERRQUIT('icsd_t1_7',7,MA_ERR)
IF (.NOT.MA_pop_stack(l_a_sort)) CALL ERRQUIT('icsd_t1_7',8,MA_ERR)
END IF
END IF
END IF
END DO
END DO
END DO
IF (.NOT.MA_push_get(MT_DBL,dimc,'noname',l_c,k_c)) CALL ERRQUIT('icsd_t1_7',9,MA_ERR)
CALL SO_2X(ex,dbl_mb(k_c_sort),dbl_mb(k_c),int_mb(k_range + p2b - 1),int_mb(k_range + h1b - 1),1,2,-1.0d0 / 2.0d0)
CALL ADD_HASH_BX(ex,d_c,dbl_mb(k_c),dimc,int_mb(k_c_offset),(h1b - 1 + noab * (p2b - noab - 1)))
IF (.NOT.MA_pop_stack(l_c)) CALL ERRQUIT('icsd_t1_7',10,MA_ERR)
IF (.NOT.MA_pop_stack(l_c_sort)) CALL ERRQUIT('icsd_t1_7',11,MA_ERR)
all_t2 = util_wallsec()
CALL record_time(ex,all_t2 - all_t1,7)
END IF
END IF
END IF
CALL nxt_ctx_next(ctx,ex,next)
END IF
count = count + 1
END DO
END DO
RETURN
END SUBROUTINE
|
{"hexsha": "e256218f0b8bcec114201b2ef4c08221ac717997", "size": 19322, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "projects/minitermite/regression/fortran/expanded.f90", "max_stars_repo_name": "haiwangcat/ROSE", "max_stars_repo_head_hexsha": "75bf4106a5febe40269ea0361a024b7811668d45", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2015-03-17T13:52:21.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-12T05:32:47.000Z", "max_issues_repo_path": "projects/minitermite/regression/fortran/expanded.f90", "max_issues_repo_name": "haiwangcat/ROSE", "max_issues_repo_head_hexsha": "75bf4106a5febe40269ea0361a024b7811668d45", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "projects/minitermite/regression/fortran/expanded.f90", "max_forks_repo_name": "haiwangcat/ROSE", "max_forks_repo_head_hexsha": "75bf4106a5febe40269ea0361a024b7811668d45", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.0334075724, "max_line_length": 289, "alphanum_fraction": 0.6696511748, "num_tokens": 5977}
|
#!/usr/bin/python3
'''
Function and data integration tools
'''
try:
import numpy as np
except ModuleNotFoundError:
print("\"numpy\" not found. This module requires numpy")
try:
import matplotlib.pyplot as plt
except ModuleNotFoundError:
print("\"matplotlib\" not found. This module requires matplotlib")
from time import time
from . import Interpolation
from .Core import *
from .Limits import limit
from .Core import MethodError
#######################################################################################################################################
#######################################################################################################################################
#######################################################################################################################################
class Methods:
def __init__(self):
pass
def Riemann(x,y,upper=False):
'''
Riemann sums calculator over the points (x,y)
'''
shapes_comparation(x,y)
s=0
if upper:
for i in range(len(x)-1):
s+=(x[i+1]-x[i])*y[i+1]
else:
for i in range(len(x)-1):
s+=(x[i+1]-x[i])*y[i]
return s
def Trapezoid(x,y):
'''
Trapezoid-rule sums over (x,y)
'''
shapes_comparation(x,y)
s=0
for i in range(len(x)-1):
s+=(y[i+1]+y[i])*(x[i+1]-x[i])/2
return s
def Simpson(x,y):
'''
Simpson rule. Newton-Cotes formula for parabolas.
'''
shapes_comparation(x,y)
if np.std(np.array(x[1:])-np.array(x[:-1]))>10**(-7):
raise ValueError("The Data must be evenly spaced.")
s=0
if len(x)&2==1:
for i in range(len(x)):
if i%2==0:
s+=(x[i+2]-x[i])/6 * (y[i]+4*y[i+1]+y[i+2])
else:
for i in range(len(x)-2):
if i%2==0:
s+=(x[i+2]-x[i])/6 * (y[i]+4*y[i+1]+y[i+2])
s+=(y[-1]+y[-2])*(x[-2]-x[-2])/2
return s
#######################################################################################################################################
def DataIntegrate(x,y,method="Simpson",interpolation=1,points_for_interpolation=10):
'''
Main function to integrate data.
'''
methods=["RiemannUpper","RiemannLower","Trapezoid","Simpson"]
xx=x;yy=y
if not (method in methods):
raise MethodError("This method is not aviable. Methods aviable are: %s" % methods)
if interpolation>1:
xx,yy=Interpolation.Interpolate_Data(x,y,interpolation,points_for_interpolation)
if method==methods[0]:
# RiemannUpper
return Methods.Riemann(xx,yy,upper=True)
elif method==methods[1]:
# RiemannUpper
return Methods.Riemann(xx,yy,upper=False)
elif method==methods[2]:
# Trapezoid
return Methods.Trapezoid(xx,yy)
elif method==methods[3]:
# Simpson
return Methods.Simpson(xx,yy)
def NIntegrate(f,a,b,n=300,method="Simpson",temperated=False,round_to=10):
'''
Main function to integrate functions with real limits.
'''
methods=["RiemannUpper","RiemannLower","Trapezoid","Simpson","MonteCarlo"]
if not (method in methods):
raise MethodError("This method is not aviable. Methods aviable are: %s" % methods)
vf=np.vectorize(f)
if method==methods[4]:
# MonteCarlo
in_d=0
out_d=0
h=max(vf(np.linspace(a,b,300)))
d=min(vf(np.linspace(a,b,300)))
p=0
for i in range(n):
p=[np.random.uniform(a,b),np.random.uniform(d,h)]
if p[1]<=f(p[0]):
in_d+=1
else:
out_d+=1
return round((in_d/(out_d+in_d))*(abs(h-d))*(b-a),round_to)
xspace=np.linspace(a,b,n,dtype=np.float128)
if temperated:
image=generate_temperated(f,a,b)
else:
image=vf(xspace)
if method==methods[0]:
# RiemannUpper
return round(Methods.Riemann(xspace,image,upper=True),round_to)
elif method==methods[1]:
# RiemannUpper
return round(Methods.Riemann(xspace,image,upper=False),round_to)
elif method==methods[2]:
# Trapezoid
return round(Methods.Trapezoid(xspace,image),round_to)
elif method==methods[3]:
# Simpson
return round(Methods.Simpson(xspace,image),round_to)
def InfinityIntegrate(f,a,positive=True,method="Trapezoid",resolution=3.5,points1=10**6,method1="Simpson"):
'''
An attempt to integrate functions from a to infinity. Seems to work very well but very slow.
'''
g=np.vectorize(lambda t: f(1/t)/(t**2))
h=np.vectorize(lambda t: f(-1/t)/(t**2))
if positive:
if a>=1:
xspace=real_interval(omega=resolution,end=1/a)
return DataIntegrate(xspace,g(xspace),method)
elif a<1:
xspace=real_interval(omega=resolution,end=1)
return NIntegrate(f,a,1,points1,method1)+DataIntegrate(xspace,g(xspace),method)
def InfinityIntegrate2(f,a,positive=True,method="Trapezoid",points=10**3,points1=10**5,method1="Trapezoid"):
g=np.vectorize(lambda t: f(1/t)/(t**2))
if a>=1:
xspace=np.linspace(np.finfo(np.longfloat).eps,1/a,points,dtype=np.float128)
return DataIntegrate(xspace,g(xspace),method)
elif a<1:
xspace=np.linspace(np.finfo(np.longfloat).eps,1,points,dtype=np.float128)
return NIntegrate(f,a,1,points1,method1)+DataIntegrate(xspace,g(xspace),method)
|
{"hexsha": "1490583d27b8405095f62a1f436ec1dbc79ac1de", "size": 5611, "ext": "py", "lang": "Python", "max_stars_repo_path": "Integration.py", "max_stars_repo_name": "santiagohenao/santiagohenao_tools", "max_stars_repo_head_hexsha": "81a772114b482b83639c835b0a2096f11ca45075", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Integration.py", "max_issues_repo_name": "santiagohenao/santiagohenao_tools", "max_issues_repo_head_hexsha": "81a772114b482b83639c835b0a2096f11ca45075", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Integration.py", "max_forks_repo_name": "santiagohenao/santiagohenao_tools", "max_forks_repo_head_hexsha": "81a772114b482b83639c835b0a2096f11ca45075", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.8012048193, "max_line_length": 135, "alphanum_fraction": 0.5334165033, "include": true, "reason": "import numpy", "num_tokens": 1446}
|
#include "PyInlineReductions.h"
#include <boost/python.hpp>
#include <string>
#include "Halide.h"
#include "PyExpr.h"
namespace h = Halide;
namespace p = boost::python;
h::Expr sum0(h::Expr e, const std::string name) {
return h::sum(e, name);
}
h::Expr sum1(h::RDom r, h::Expr e, const std::string name) {
return h::sum(r, e, name);
}
h::Expr product0(h::Expr e, const std::string name) {
return h::product(e, name);
}
h::Expr product1(h::RDom r, h::Expr e, const std::string name) {
return h::product(r, e, name);
}
h::Expr maximum0(h::Expr e, const std::string name) {
return h::maximum(e, name);
}
h::Expr maximum1(h::RDom r, h::Expr e, const std::string name) {
return h::maximum(r, e, name);
}
h::Expr minimum0(h::Expr e, const std::string name) {
return h::minimum(e, name);
}
h::Expr minimum1(h::RDom r, h::Expr e, const std::string name) {
return h::minimum(r, e, name);
}
p::object argmin0(h::Expr e, const std::string name) {
return expr_vector_to_python_tuple(h::argmin(e, name).as_vector());
}
p::object argmin1(h::RDom r, h::Expr e, const std::string name) {
return expr_vector_to_python_tuple(h::argmin(r, e, name).as_vector());
}
p::object argmax0(h::Expr e, const std::string name) {
return expr_vector_to_python_tuple(h::argmin(e, name).as_vector());
}
p::object argmax1(h::RDom r, h::Expr e, const std::string name) {
return expr_vector_to_python_tuple(h::argmax(r, e, name).as_vector());
}
void define_inline_reductions() {
// Defines some inline reductions: sum, product, minimum, maximum.
p::def("sum", &sum0, (p::arg("e"), p::arg("name") = "sum"),
"An inline reduction.");
p::def("sum", &sum1, (p::arg("r"), p::arg("e"), p::arg("name") = "sum"),
"An inline reduction.");
p::def("product", &product0, (p::arg("e"), p::arg("name") = "product"),
"An inline reduction.");
p::def("product", &product1, (p::arg("r"), p::arg("e"), p::arg("name") = "product"),
"An inline reduction.");
p::def("maximum", &maximum0, (p::arg("e"), p::arg("name") = "maximum"),
"An inline reduction.");
p::def("maximum", &maximum1, (p::arg("r"), p::arg("e"), p::arg("name") = "maximum"),
"An inline reduction.");
p::def("minimum", &minimum0, (p::arg("e"), p::arg("name") = "minimum"),
"An inline reduction.");
p::def("minimum", &minimum1, (p::arg("r"), p::arg("e"), p::arg("name") = "minimum"),
"An inline reduction.");
p::def("argmin", &argmin0, (p::arg("e"), p::arg("name") = "argmin"),
"An inline reduction.");
p::def("argmin", &argmin1, (p::arg("r"), p::arg("e"), p::arg("name") = "argmin"),
"An inline reduction.");
p::def("argmax", &argmax0, (p::arg("e"), p::arg("name") = "argmax"),
"An inline reduction.");
p::def("argmax", &argmax1, (p::arg("r"), p::arg("e"), p::arg("name") = "argmax"),
"An inline reduction.");
}
|
{"hexsha": "0772c731eabd90cc95b051dc43f3487053e894ea", "size": 2974, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "python_bindings/src/PyInlineReductions.cpp", "max_stars_repo_name": "jrayzero/Halide", "max_stars_repo_head_hexsha": "174bfe850d225e5e3644cd14fb4ea06b50173ae3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2019-11-19T15:57:58.000Z", "max_stars_repo_stars_event_max_datetime": "2019-11-19T15:57:58.000Z", "max_issues_repo_path": "python_bindings/src/PyInlineReductions.cpp", "max_issues_repo_name": "jrayzero/Halide", "max_issues_repo_head_hexsha": "174bfe850d225e5e3644cd14fb4ea06b50173ae3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python_bindings/src/PyInlineReductions.cpp", "max_forks_repo_name": "jrayzero/Halide", "max_forks_repo_head_hexsha": "174bfe850d225e5e3644cd14fb4ea06b50173ae3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2019-11-25T13:27:17.000Z", "max_forks_repo_forks_event_max_datetime": "2020-03-20T15:29:14.000Z", "avg_line_length": 31.3052631579, "max_line_length": 88, "alphanum_fraction": 0.5756556826, "num_tokens": 935}
|
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 30 19:38:41 2019
@author: Yadnyesh
"""
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 30 18:24:14 2019
@author: Yadnyesh
"""
# Import libraries
import numpy as np
import os
from PIL import Image
import cv2 as cv
import matplotlib.pyplot as plt
# Set global values
ROWS = 224
COLS = 224
newsize = 105
#-----------Functions start here-----------#
def read_from_folder(filename):
a = sorted(os.listdir(filename)) # Sorted list of files
m = len(a)
images = np.zeros(shape = (m, ROWS, COLS, 3))
for i in range(m):
print(i)
b = a[i]
c = os.path.join(filename, b) # Full path to read image
print(c)
img = Image.open(c)
img = img.resize((COLS, ROWS), Image.ANTIALIAS)
img = np.array(img, dtype = np.float64)
# Detect if image is grayscale
if(len(img.shape) == 2):
# Normalize the image
temp = img;
temp = temp/255.0
# Copy grayscale into each channel seperately
images[i, :, :, 0] = temp
images[i, :, :, 1] = temp
images[i, :, :, 2] = temp
continue
i1 = img[:, :, 0]
i1 = i1/255.0
i2 = img[:, :, 1]
i2 = i2/255.0
i3 = img[:, :, 2]
i3 = i3/255.0
img[:, :, 0] = i1
img[:, :, 1] = i2
img[:, :, 2] = i3
images[i, :, :, :] = img
return(images)
def read_nonbullying(filename, number):
a = sorted(os.listdir(filename))
m = len(a)
n = np.random.randint(0,m, size = (number))
b = len(n)
images = np.zeros(shape = (b, ROWS, COLS, 3))
for i in range(b):
c = a[n[i]]
d = os.path.join(filename, c)
# Read image
img = Image.open(d)
img = img.resize((COLS, ROWS))
img = np.array(img, dtype = np.float64)
if(len(img.shape) == 2):
# Normalize the image
temp = img;
temp = temp/255.0
# Copy grayscale into each channel seperately
images[i, :, :, 0] = temp
images[i, :, :, 1] = temp
images[i, :, :, 2] = temp
continue
i1 = img[:, :, 0]
i1 = i1/255.0
i2 = img[:, :, 1]
i2 = i2/255.0
i3 = img[:, :, 2]
i3 = i3/255.0
img[:, :, 0] = i1
img[:, :, 1] = i2
img[:, :, 2] = i3
images[i, :, :, :] = img
return(images)
def flip_images(dataset):
N = dataset.shape[0]
images = np.zeros(shape = (N, ROWS, COLS, 3))
for i in range(N):
for j in range(3):
temp = dataset[i,:,:,j]
rot = np.fliplr(temp) # Actual Flip LR operation
images[i,:,:,j] = rot
return(images)
def add_noise(dataset, mean, std_dev):
N = dataset.shape[0]
images = np.zeros(shape = (N, ROWS, COLS, 3))
noise = np.random.normal(mean, std_dev, (ROWS, COLS, 3))
images = dataset + noise
return(images)
def jitter(dataset, brightness):
N = dataset.shape[0]
images = np.zeros(shape = (N, ROWS, COLS, 3))
bright = np.ones((ROWS, COLS, 3)) + np.random.uniform(-brightness, brightness, (ROWS, COLS, 3))
images = dataset * bright
return(images)
def get_next_batch(index, dataset, classes, batch_size):
X_batch = np.array(dataset[index:index + batch_size,:,:,:], dtype = np.float32)
Y_batch = np.array(classes[index:index + batch_size], dtype = np.int32)
return(X_batch,Y_batch)
def augment_labels(labels, previous, L):
labels[previous: previous + L[0]] = 1 # Do not use l10 -> total number of stanford images
labels[previous + L[0]: previous + L[1]] = 2
labels[previous + L[1]: previous + L[2]] = 3
labels[previous + L[2]: previous + L[3]] = 4
labels[previous + L[3]: previous + L[4]] = 5
labels[previous + L[4]: previous + L[5]] = 6
labels[previous + L[5]: previous + L[6]] = 7
labels[previous + L[6]: previous + L[7]] = 8
labels[previous + L[7]: previous + L[8]] = 9
labels[previous + L[8]: previous + L[9]] = 0
def min_max_normalize(dataset, min_value, max_value):
N = dataset.shape[0]
images = np.zeros(shape = (N, ROWS, COLS, 3))
img = np.zeros(shape = (ROWS, COLS))
for i in range(N):
for c in range(3):
img = dataset[i, :, :, c]
img_std = (img - np.amin(img, axis = (0,1)))/ (np.amax(img, axis = (0,1)) - np.amin(img, axis = (0,1)))
img_scl = img_std*(max_value - min_value) + min_value
images[i, :, :, c] = img_scl
return(images)
def standard_scaler(dataset):
N = dataset.shape[0]
images = np.zeros(shape = (N, ROWS, COLS, 3))
img = np.zeros(shape = (ROWS, COLS))
for i in range(N):
for c in range(3):
img = dataset[i, :, :, c]
img_scl = (img - np.mean(img, axis = (0, 1)))/np.std(img, axis = (0, 1))
images[i, :, :, c] = img_scl
return(images)
def translate(dataset, shift_cols, shift_rows):
N = dataset.shape[0]
images = np.zeros(shape = (N, ROWS, COLS, 3))
img = np.zeros(shape = (ROWS, COLS))
for i in range(N):
for c in range(3):
img = dataset[i, :, :, c]
M = np.float32([[1,0,shift_cols],[0,1,shift_rows]])
images[i,:,:,c] = cv.warpAffine(img,M,(COLS,ROWS))
return(images)
def rotate_90(dataset):
N = dataset.shape[0]
images = np.zeros(shape = (N, ROWS, COLS, 3))
img = np.zeros(shape = (ROWS, COLS))
for i in range(N):
for c in range(3):
img = dataset[i, :, :, c]
M = cv.getRotationMatrix2D(((COLS - 1)/2.0,(ROWS - 1)/2.0),90,1)
images[i,:,:,c] = cv.warpAffine(img,M,(COLS,ROWS))
return(images)
def affine_transform(dataset):
N = dataset.shape[0]
images = np.zeros(shape = (N, ROWS, COLS, 3))
img = np.zeros(shape = (ROWS, COLS))
# 3 points in original image -> rotate 3 points in destination image
for i in range(N):
for c in range(3):
img = dataset[i, :, :, c]
pts1 = np.float32([[10,10],[200,10],[10,200]])
pts2 = np.float32([[10,100],[200,10],[100,200]])
M = cv.getAffineTransform(pts1,pts2)
images[i, :, :, c] = cv.warpAffine(img,M,(COLS,ROWS))
return(images)
def perspective_transform(dataset):
N = dataset.shape[0]
images = np.zeros(shape = (N, ROWS, COLS, 3))
img = np.zeros(shape = (ROWS, COLS))
# 4 points of perspective transform
for i in range(N):
for c in range(3):
img = dataset[i, :, :, c]
pts1 = np.float32([[30,30],[150,30],[30,190],[190,190]])
pts2 = np.float32([[0,0],[ROWS,0],[0,COLS],[ROWS,COLS]])
M = cv.getPerspectiveTransform(pts1,pts2)
images[i, :, :, c] = cv.warpPerspective(img,M,(ROWS,COLS))
return(images)
def median_blur(dataset, region):
N = dataset.shape[0]
images = np.zeros(shape = (N, ROWS, COLS, 3))
img = np.zeros(shape = (ROWS, COLS))
for i in range(N):
for c in range(3):
img = dataset[i, :, :, c]
images[i, :, :, c] = cv.medianBlur(img, region)
return(images)
def avg_smooth(dataset, region):
N = dataset.shape[0]
images = np.zeros(shape = (N, ROWS, COLS, 3))
img = np.zeros(shape = (ROWS, COLS))
for i in range(N):
for c in range(3):
img = dataset[i, :, :, c]
images[i, :, :, c] = cv.blur(img,(region,region))
return(images)
filename = 'F:\Clemson University\ECE 8810_Deep Learning\Project\gossiping\Folder_1'
a1 = sorted(os.listdir(filename))
l1 = len(a1)
show_img = 6
D = np.zeros((l1, ROWS, COLS, 3))
D_trx = D_rot = D_aff = D_per = D_avg = np.zeros((l1, ROWS, COLS, 3))
D = read_from_folder(filename)
plt.figure(1)
plt.imshow(D[show_img, :, :, :])
D_trx = translate(D, 25, 25)
plt.figure(2)
plt.imshow(D_trx[show_img, :, :, :])
D_rot = rotate_90(D)
D_rot = add_noise(D_rot, 0, 0.25)
D_rot = avg_smooth(D_rot, 5)
plt.figure(3)
plt.imshow(D_rot[show_img, :, :, :])
D_aff = affine_transform(D)
plt.figure(4)
plt.imshow(D_aff[show_img, :, :, :])
D_per = perspective_transform(D)
plt.figure(5)
plt.imshow(D_per[show_img, :, :, :])
D_avg = add_noise(D, 0, 0.05)
D_avg = avg_smooth(D_avg, 5)
plt.figure(6)
plt.imshow(D_avg[show_img, :, :, :])
|
{"hexsha": "86c5973035833661ee5d057ea65b19db4d72d29f", "size": 9065, "ext": "py", "lang": "Python", "max_stars_repo_path": "added_data_aug.py", "max_stars_repo_name": "Yashgh7076/CPSC-8810-Project", "max_stars_repo_head_hexsha": "81a32072e4888f6bbdaf4a74fd5ab9ef386b299b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "added_data_aug.py", "max_issues_repo_name": "Yashgh7076/CPSC-8810-Project", "max_issues_repo_head_hexsha": "81a32072e4888f6bbdaf4a74fd5ab9ef386b299b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "added_data_aug.py", "max_forks_repo_name": "Yashgh7076/CPSC-8810-Project", "max_forks_repo_head_hexsha": "81a32072e4888f6bbdaf4a74fd5ab9ef386b299b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.3867069486, "max_line_length": 116, "alphanum_fraction": 0.5011583012, "include": true, "reason": "import numpy", "num_tokens": 2651}
|
import sys
import random
import json
import re
import numpy as np
# remove non-ascii text in a string
def remove_nonascii(text):
return str(''.join([i if ord(i) < 128 else "" for i in text]))
# kepp characters in a string iff it's in the alphabet
# For training
def filter_with_alphabet(text, alphabet):
return ''.join(c for c in text if c in alphabet)
# sanitizes a line read from raw sources
# For general purposes
def sanitize_line(line):
return remove_nonascii(" ".join(line.split())).lower()
# core function that will return the parsed sentence as a list of grams
def tokenize(Dict, sentence, gram_length, token_weight):
try:
text = sentence.split(" ") # string
except:
text = sentence # list
if text[0] == "":
text = text[1:]
result = []
N = len(text)
# randomized algorithm, up to change
it = 0
while (it < N):
mass = np.zeros(gram_length)
for i in range(1, gram_length + 1):
if (it + i <= N):
gram = " ".join(text[it:it + i])
if not gram in Dict: continue
mass[i - 1] = Dict.get(gram) * token_weight[i] * 1.0
mass /= sum(mass)
tmp_len = np.random.choice(gram_length, p = mass) + 1# gram length for this time
result.append(" ".join(text[it:it + tmp_len]))
it += tmp_len
return result
|
{"hexsha": "a26b288c13d86550ccc818b87d419bc2e8168e8b", "size": 1241, "ext": "py", "lang": "Python", "max_stars_repo_path": "helper.py", "max_stars_repo_name": "pearfish16/ngram_sentiment", "max_stars_repo_head_hexsha": "1f20f30b5eaaffdc791802b8ce8ce6e177404344", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "helper.py", "max_issues_repo_name": "pearfish16/ngram_sentiment", "max_issues_repo_head_hexsha": "1f20f30b5eaaffdc791802b8ce8ce6e177404344", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "helper.py", "max_forks_repo_name": "pearfish16/ngram_sentiment", "max_forks_repo_head_hexsha": "1f20f30b5eaaffdc791802b8ce8ce6e177404344", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.5777777778, "max_line_length": 82, "alphanum_fraction": 0.6752618856, "include": true, "reason": "import numpy", "num_tokens": 360}
|
[STATEMENT]
theorem from_dtree_fin_list_dir: "fin_list_directed_tree (root t) (from_dtree dt dh t)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. fin_list_directed_tree (dtree.root t) (from_dtree dt dh t)
[PROOF STEP]
unfolding fin_list_directed_tree_def fin_list_directed_tree_axioms_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. finite_directed_tree (from_dtree dt dh t) (dtree.root t) \<and> (\<forall>u v. u \<in> verts (from_dtree dt dh t) \<longrightarrow> v \<in> verts (from_dtree dt dh t) \<longrightarrow> u \<noteq> v \<longrightarrow> set u \<inter> set v = {}) \<and> (\<forall>v. v \<in> verts (from_dtree dt dh t) \<longrightarrow> v \<noteq> [])
[PROOF STEP]
by (auto simp: from_dtree_fin_directed empty_notin_wf_dlverts[OF wf_lverts]
intro: wf_lverts dverts_same_if_set_wf)
|
{"llama_tokens": 336, "file": "Query_Optimization_List_Dtree", "length": 2}
|
import tensorflow as tf
from tensorflow.contrib import rnn
import numpy as np
from gensim.models.keyedvectors import KeyedVectors
class Model(object):
def __init__(self, article_max_len, summary_max_len, embedding_dim, hidden_dim, layers_num,
learning_rate, beam_width, keep_prob, vocabulary_size, batch_size, word2vec_embeddings,
forward_only, using_word2vec_embeddings=True):
self.vocabulary_size = vocabulary_size # len(int2word_dict)
# print(vocabulary_size)
self.embedding_dim = embedding_dim
self.num_hidden = hidden_dim
self.num_layers = layers_num
self.learning_rate = learning_rate
self.beam_width = beam_width
self.batch_size = batch_size
self.vocabulary_size = vocabulary_size
if not forward_only:
self.keep_prob = keep_prob
else:
self.keep_prob = 1.0
self.cell = tf.nn.rnn_cell.BasicLSTMCell
# with tf.device(device):
with tf.variable_scope("decoder/projection", reuse=tf.AUTO_REUSE):
self.projection_layer = tf.layers.Dense(self.vocabulary_size, use_bias=False)
self.batch_size = tf.placeholder(tf.int32, (), name="batch_size")
self.X = tf.placeholder(tf.int32, [None, article_max_len])
self.X_len = tf.placeholder(tf.int32, [None])
self.decoder_input = tf.placeholder(tf.int32, [None, summary_max_len])
self.decoder_len = tf.placeholder(tf.int32, [None])
self.decoder_target = tf.placeholder(tf.int32, [None, summary_max_len])
self.global_step = tf.Variable(0, trainable=False)
with tf.name_scope("embedding"), tf.variable_scope("embedding", reuse=tf.AUTO_REUSE):
# with tf.variable_scope('embedding'):
if not forward_only and using_word2vec_embeddings:
init_embeddings = tf.constant(word2vec_embeddings, dtype=tf.float32)
else:
init_embeddings = tf.random_uniform([self.vocabulary_size, self.embedding_dim], -1.0, 1.0)
self.embeddings = tf.get_variable("embeddings", initializer=init_embeddings)
self.encoder_emb_inp = tf.transpose(tf.nn.embedding_lookup(self.embeddings, self.X), perm=[1, 0, 2])
self.decoder_emb_inp = tf.transpose(tf.nn.embedding_lookup(self.embeddings, self.decoder_input),
perm=[1, 0, 2])
with tf.name_scope("encoder"), tf.variable_scope("encoder", reuse=tf.AUTO_REUSE):
fw_cells = [self.cell(self.num_hidden) for _ in range(self.num_layers)]
bw_cells = [self.cell(self.num_hidden) for _ in range(self.num_layers)]
fw_cells = [rnn.DropoutWrapper(cell) for cell in fw_cells]
bw_cells = [rnn.DropoutWrapper(cell) for cell in bw_cells]
encoder_outputs, encoder_state_fw, encoder_state_bw = tf.contrib.rnn.stack_bidirectional_dynamic_rnn(
fw_cells, bw_cells, self.encoder_emb_inp,
sequence_length=self.X_len, time_major=True, dtype=tf.float32)
self.encoder_output = tf.concat(encoder_outputs, 2)
encoder_state_c = tf.concat((encoder_state_fw[0].c, encoder_state_bw[0].c), 1)
encoder_state_h = tf.concat((encoder_state_fw[0].h, encoder_state_bw[0].h), 1)
self.encoder_state = rnn.LSTMStateTuple(c=encoder_state_c, h=encoder_state_h)
with tf.name_scope("decoder"), tf.variable_scope("decoder", reuse=tf.AUTO_REUSE) as decoder_scope:
decoder_cell = self.cell(self.num_hidden * 2)
if not forward_only:
attention_states = tf.transpose(self.encoder_output, [1, 0, 2])
attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(
self.num_hidden * 2, attention_states, memory_sequence_length=self.X_len, normalize=True)
decoder_cell = tf.contrib.seq2seq.AttentionWrapper(decoder_cell, attention_mechanism,
attention_layer_size=self.num_hidden * 2)
initial_state = decoder_cell.zero_state(dtype=tf.float32, batch_size=self.batch_size)
initial_state = initial_state.clone(cell_state=self.encoder_state)
helper = tf.contrib.seq2seq.TrainingHelper(self.decoder_emb_inp, self.decoder_len, time_major=True)
decoder = tf.contrib.seq2seq.BasicDecoder(decoder_cell, helper, initial_state)
outputs, _, _ = tf.contrib.seq2seq.dynamic_decode(decoder, output_time_major=True,
scope=decoder_scope)
self.decoder_output = outputs.rnn_output
self.logits = tf.transpose(
self.projection_layer(self.decoder_output), perm=[1, 0, 2])
self.logits_reshape = tf.concat(
[self.logits,
tf.zeros([self.batch_size, summary_max_len - tf.shape(self.logits)[1], self.vocabulary_size])],
axis=1)
else:
tiled_encoder_output = tf.contrib.seq2seq.tile_batch(
tf.transpose(self.encoder_output, perm=[1, 0, 2]), multiplier=self.beam_width)
tiled_encoder_final_state = tf.contrib.seq2seq.tile_batch(self.encoder_state,
multiplier=self.beam_width)
tiled_seq_len = tf.contrib.seq2seq.tile_batch(self.X_len, multiplier=self.beam_width)
attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(
self.num_hidden * 2, tiled_encoder_output, memory_sequence_length=tiled_seq_len, normalize=True)
decoder_cell = tf.contrib.seq2seq.AttentionWrapper(decoder_cell, attention_mechanism,
attention_layer_size=self.num_hidden * 2)
initial_state = decoder_cell.zero_state(dtype=tf.float32,
batch_size=self.batch_size * self.beam_width)
initial_state = initial_state.clone(cell_state=tiled_encoder_final_state)
decoder = tf.contrib.seq2seq.BeamSearchDecoder(
cell=decoder_cell,
embedding=self.embeddings,
start_tokens=tf.fill([self.batch_size], tf.constant(2)),
end_token=tf.constant(3),
initial_state=initial_state,
beam_width=self.beam_width,
output_layer=self.projection_layer
)
outputs, _, _ = tf.contrib.seq2seq.dynamic_decode(
decoder, output_time_major=True, maximum_iterations=summary_max_len, scope=decoder_scope)
self.prediction = tf.transpose(outputs.predicted_ids, perm=[1, 2, 0])
with tf.name_scope("loss"):
if not forward_only:
crossent = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=self.logits_reshape, labels=self.decoder_target)
weights = tf.sequence_mask(self.decoder_len, summary_max_len, dtype=tf.float32)
self.loss = tf.reduce_sum(crossent * weights / tf.to_float(self.batch_size))
params = tf.trainable_variables()
gradients = tf.gradients(self.loss, params)
clipped_gradients, _ = tf.clip_by_global_norm(gradients, 5.0)
optimizer = tf.train.AdamOptimizer(self.learning_rate)
self.update = optimizer.apply_gradients(zip(clipped_gradients, params),
global_step=self.global_step)
def encoding_layer(self, rnn_inputs, rnn_size, num_layers, keep_prob, source_vocab_size, encoding_embedding_size):
# :return: tuple (RNN output, RNN state)
embed = tf.contrib.layers.embed_sequence(rnn_inputs,
vocab_size=source_vocab_size,
embed_dim=encoding_embedding_size)
stacked_cells = tf.contrib.rnn.MultiRNNCell(
[tf.contrib.rnn.DropoutWrapper(tf.contrib.rnn.LSTMCell(rnn_size), keep_prob) for _ in range(num_layers)])
outputs, state = tf.nn.dynamic_rnn(stacked_cells, embed, dtype=tf.float32)
return outputs, state
@staticmethod
def get_init_embedding(int2word_dict, embedding_dim, word2vec_file):
word_vectors = KeyedVectors.load_word2vec_format(word2vec_file)
word_vec_list = list()
for _, word in sorted(int2word_dict.items()):
try:
word = word.split(sep="_")[0]
if word in ['LOCATION', 'PERSON', 'ORGANIZATION']:
word = word.lower()
word_vec = word_vectors.word_vec(word)
except KeyError:
word_vec = np.zeros([embedding_dim], dtype=np.float32)
word_vec_list.append(word_vec)
# random vector for <s> and </s>
word_vec_list[2] = np.random.normal(0, 1, embedding_dim)
word_vec_list[3] = np.random.normal(0, 1, embedding_dim)
# print(word_vec_list)
return np.array(word_vec_list)
|
{"hexsha": "33035f88d835eb00149ee3bce26752e537111aee", "size": 9328, "ext": "py", "lang": "Python", "max_stars_repo_path": "model.py", "max_stars_repo_name": "KrisG04/encoder-decoder-text-summarizer", "max_stars_repo_head_hexsha": "db2680a89bc4d6a0651cf734445a376da73d4ffb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "model.py", "max_issues_repo_name": "KrisG04/encoder-decoder-text-summarizer", "max_issues_repo_head_hexsha": "db2680a89bc4d6a0651cf734445a376da73d4ffb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "model.py", "max_forks_repo_name": "KrisG04/encoder-decoder-text-summarizer", "max_forks_repo_head_hexsha": "db2680a89bc4d6a0651cf734445a376da73d4ffb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 61.7748344371, "max_line_length": 118, "alphanum_fraction": 0.6224271012, "include": true, "reason": "import numpy", "num_tokens": 1875}
|
from ocdata.vasp import run_vasp, write_vasp_input_files
from ocdata.adsorbates import Adsorbate
from ocdata.bulk_obj import Bulk
from ocdata.surfaces import Surface
from ocdata.combined import Combined
import argparse
import logging
import math
import numpy as np
import os
import pickle
import time
class StructureSampler():
'''
A class that creates adsorbate/bulk/surface objects and
writes vasp input files for one of the following options:
- one random adsorbate/bulk/surface/config, based on a specified random seed
- one specified adsorbate, n specified bulks, and all possible surfaces and configs
- one specified adsorbate, n specified bulks, one specified surface, and all possible configs
The output directory structure will look like the following:
- For sampling a random structure, the directories will be `random{seed}/surface` and
`random{seed}/adslab` for the surface alone and the adsorbate+surface, respectively.
- For enumerating all structures, the directories will be `{adsorbate}_{bulk}_{surface}/surface`
and `{adsorbate}_{bulk}_{surface}/adslab{config}`, where everything in braces are the
respective indices.
Attributes
----------
args : argparse.Namespace
contains all command line args
logger : logging.RootLogger
logging class to print info
adsorbate : Adsorbate
the selected adsorbate object
all_bulks : list
list of `Bulk` objects
bulk_indices_list : list
list of specified bulk indices (ints) that we want to select
Public methods
--------------
run()
selects the appropriate materials and writes to files
'''
def __init__(self, args):
'''
Set up args from argparse, random seed, and logging.
'''
self.args = args
self.logger = logging.getLogger()
logging.basicConfig(format='[%(asctime)s] %(levelname)s: %(message)s',
datefmt='%H:%M:%S')
self.logger.setLevel(logging.INFO if self.args.verbose else logging.WARNING)
if self.args.enumerate_all_structures:
self.bulk_indices_list = [int(ind) for ind in args.bulk_indices.split(',')]
self.logger.info(f'Enumerating all surfaces/configs for adsorbate {self.args.adsorbate_index} and bulks {self.bulk_indices_list}')
else:
self.logger.info('Sampling one random structure')
np.random.seed(self.args.seed)
def run(self):
'''
Runs the entire job: generates adsorbate/bulk/surface objects and writes to files.
'''
start = time.time()
if self.args.enumerate_all_structures:
self.adsorbate = Adsorbate(self.args.adsorbate_db, self.args.adsorbate_index)
self._load_bulks()
self._load_and_write_surfaces()
end = time.time()
self.logger.info(f'Done! ({round(end - start, 2)}s)')
def _load_bulks(self):
'''
Loads bulk structures (one random or a list of specified ones)
and stores them in self.all_bulks
'''
self.all_bulks = []
with open(self.args.bulk_db, 'rb') as f:
bulk_db_lookup = pickle.load(f)
if self.args.enumerate_all_structures:
for ind in self.bulk_indices_list:
self.all_bulks.append(Bulk(bulk_db_lookup, self.args.precomputed_structures, ind))
else:
self.all_bulks.append(Bulk(bulk_db_lookup, self.args.precomputed_structures))
def _load_and_write_surfaces(self):
'''
Loops through all bulks and chooses one random or all possible surfaces;
writes info for that surface and combined surface+adsorbate
'''
for bulk_ind, bulk in enumerate(self.all_bulks):
possible_surfaces = bulk.get_possible_surfaces()
if self.args.enumerate_all_structures:
if self.args.surface_index is not None:
assert 0 <= self.args.surface_index < len(possible_surfaces), 'Invalid surface index provided'
self.logger.info(f'Loading only surface {self.args.surface_index} for bulk {self.bulk_indices_list[bulk_ind]}')
included_surface_indices = [self.args.surface_index]
else:
self.logger.info(f'Enumerating all {len(possible_surfaces)} surfaces for bulk {self.bulk_indices_list[bulk_ind]}')
included_surface_indices = range(len(possible_surfaces))
for cur_surface_ind in included_surface_indices:
surface_info = possible_surfaces[cur_surface_ind]
surface = Surface(bulk, surface_info, cur_surface_ind, len(possible_surfaces))
self._combine_and_write(surface, self.bulk_indices_list[bulk_ind], cur_surface_ind)
else:
surface_info_index = np.random.choice(len(possible_surfaces))
surface = Surface(bulk, possible_surfaces[surface_info_index], surface_info_index, len(possible_surfaces))
self.adsorbate = Adsorbate(self.args.adsorbate_db)
self._combine_and_write(surface)
def _combine_and_write(self, surface, cur_bulk_index=None, cur_surface_index=None):
'''
Add the adsorbate onto a given surface in a Combined object.
Writes output files for the surface itself and the combined surface+adsorbate
Args:
surface: a Surface object to combine with self.adsorbate
cur_bulk_index: current bulk index from self.bulk_indices_list
cur_surface_index: current surface index if enumerating all
'''
if self.args.enumerate_all_structures:
output_name_template = f'{self.args.adsorbate_index}_{cur_bulk_index}_{cur_surface_index}'
else:
output_name_template = f'random{self.args.seed}'
self._write_surface(surface, output_name_template)
combined = Combined(self.adsorbate, surface, self.args.enumerate_all_structures)
self._write_adsorbed_surface(combined, output_name_template)
def _write_surface(self, surface, output_name_template):
'''
Write VASP input files and metadata for the surface alone.
Args:
surface: the Surface object to write info for
output_name_template: parent directory name for output files
'''
bulk_dict = surface.get_bulk_dict()
bulk_dir = os.path.join(self.args.output_dir, output_name_template, 'surface')
write_vasp_input_files(bulk_dict['bulk_atomsobject'], bulk_dir)
self._write_metadata_pkl(bulk_dict, os.path.join(bulk_dir, 'metadata.pkl'))
self.logger.info(f"wrote surface ({bulk_dict['bulk_samplingstr']}) to {bulk_dir}")
def _write_adsorbed_surface(self, combined, output_name_template):
'''
Write VASP input files and metadata for the adsorbate placed on surface.
Args:
combined: the Combined object to write info for, containing any number of adslabs
output_name_template: parent directory name for output files
'''
self.logger.info(f'Writing {combined.num_configs} adslab configs')
for config_ind in range(combined.num_configs):
if self.args.enumerate_all_structures:
adsorbed_bulk_dir = os.path.join(self.args.output_dir, output_name_template, f'adslab{config_ind}')
else:
adsorbed_bulk_dir = os.path.join(self.args.output_dir, output_name_template, 'adslab')
adsorbed_bulk_dict = combined.get_adsorbed_bulk_dict(config_ind)
write_vasp_input_files(adsorbed_bulk_dict['adsorbed_bulk_atomsobject'], adsorbed_bulk_dir)
self._write_metadata_pkl(adsorbed_bulk_dict, os.path.join(adsorbed_bulk_dir, 'metadata.pkl'))
if config_ind == 0:
self.logger.info(f"wrote adsorbed surface ({adsorbed_bulk_dict['adsorbed_bulk_samplingstr']}) to {adsorbed_bulk_dir}")
def _write_metadata_pkl(self, dict_to_write, path):
'''
Writes a dict as a metadata pickle
Args:
dict_to_write: dict containing all info to dump as file
path: output file path
'''
file_path = os.path.join(path, 'metadata.pkl')
with open(path, 'wb') as f:
pickle.dump(dict_to_write, f)
def parse_args():
parser = argparse.ArgumentParser(description='Sample adsorbate and bulk surface(s)')
parser.add_argument('--seed', type=int, default=None, help='Random seed for sampling')
# input and output
parser.add_argument('--bulk_db', type=str, required=True, help='Underlying db for bulks')
parser.add_argument('--adsorbate_db', type=str, required=True, help='Underlying db for adsorbates')
parser.add_argument('--output_dir', type=str, required=True, help='Root directory for outputs')
# for optimized (automatically try to use optimized if this is provided)
parser.add_argument('--precomputed_structures', type=str, default=None, help='Root directory of precomputed structures')
# args for enumerating all combinations:
parser.add_argument('--enumerate_all_structures', action='store_true', default=False,
help='Find all possible structures given a specific adsorbate and a list of bulks')
parser.add_argument('--adsorbate_index', type=int, default=None, help='Adsorbate index (int)')
parser.add_argument('--bulk_indices', type=str, default=None, help='Comma separated list of bulk indices')
parser.add_argument('--surface_index', type=int, default=None, help='Optional surface index (int)')
parser.add_argument('--verbose', action='store_true', default=False, help='Log detailed info')
# check that all needed args are supplied
args = parser.parse_args()
if args.enumerate_all_structures:
if args.adsorbate_index is None or args.bulk_indices is None:
parser.error('Enumerating all structures requires specified adsorbate and bulks')
elif args.seed is None:
parser.error('Seed is required when sampling one random structure')
return args
if __name__ == '__main__':
args = parse_args()
job = StructureSampler(args)
job.run()
|
{"hexsha": "f061782062de2d88c6e498ee3486a1c1a62e8d38", "size": 10284, "ext": "py", "lang": "Python", "max_stars_repo_path": "sample_structure.py", "max_stars_repo_name": "cesmix-mit/Open-Catalyst-Dataset", "max_stars_repo_head_hexsha": "ffd73a9bf5c5ec26efd15e3cf66c1c0b376886d5", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "sample_structure.py", "max_issues_repo_name": "cesmix-mit/Open-Catalyst-Dataset", "max_issues_repo_head_hexsha": "ffd73a9bf5c5ec26efd15e3cf66c1c0b376886d5", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sample_structure.py", "max_forks_repo_name": "cesmix-mit/Open-Catalyst-Dataset", "max_forks_repo_head_hexsha": "ffd73a9bf5c5ec26efd15e3cf66c1c0b376886d5", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.3039647577, "max_line_length": 142, "alphanum_fraction": 0.6811551925, "include": true, "reason": "import numpy", "num_tokens": 2226}
|
SUBROUTINE TSTIVF (WKID, INCSIZ,INCSET, EXCSIZ,EXCSET)
C TSTIVF tests that the appropriate primitives are invisible for
C a given inclusion and exclusion set used as the invisibility
C filter.
C Input parameters:
C WKID : workstation identifier
C INCSIZ,INCSET : inclusion set
C EXCSIZ,EXCSET : exclusion set
INTEGER WKID, INCSIZ,INCSET(*), EXCSIZ,EXCSET(*)
INTEGER TRUSIZ,TRULIS(8)
C <set invisibility filter>:
CALL PSIVFT (WKID, INCSIZ,INCSET, EXCSIZ,EXCSET)
C Compute expected subset of invisible primitives from incl,excl
CALL ELGPRM (INCSIZ,INCSET, EXCSIZ,EXCSET, TRUSIZ,TRULIS)
CALL DLSTPF ('INVISIBILITY FILTER: Which primitives are ' //
1 'invisible (n if none)?', TRUSIZ, TRULIS, 'S')
END
|
{"hexsha": "f5de0215fcf68d7a4cd1eda6eb5e71e7fab32c7d", "size": 789, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "third_party/Phigs/PVT/PVT_fort/V2LIB/tstivf.f", "max_stars_repo_name": "n1ckfg/Telidon", "max_stars_repo_head_hexsha": "f4e2c693ec7d67245974b73a602d5d40df6a6d69", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 18, "max_stars_repo_stars_event_min_datetime": "2017-07-08T02:34:02.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-08T03:42:48.000Z", "max_issues_repo_path": "third_party/Phigs/PVT/PVT_fort/V2LIB/tstivf.f", "max_issues_repo_name": "n1ckfg/Telidon", "max_issues_repo_head_hexsha": "f4e2c693ec7d67245974b73a602d5d40df6a6d69", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "third_party/Phigs/PVT/PVT_fort/V2LIB/tstivf.f", "max_forks_repo_name": "n1ckfg/Telidon", "max_forks_repo_head_hexsha": "f4e2c693ec7d67245974b73a602d5d40df6a6d69", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2018-02-03T04:44:00.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-05T15:31:18.000Z", "avg_line_length": 32.875, "max_line_length": 66, "alphanum_fraction": 0.690747782, "num_tokens": 254}
|
//Copyright 2019 CrazyOverdose
#ifndef INCLUDE_ANALYSIS_HPP_
#define INCLUDE_ANALYSIS_HPP_
#include <boost/filesystem.hpp>
#include <vector>
#include <string>
#include <cstdlib>
struct Date {
int day;
int month;
int year;
};
struct information {
std::string title; //название файла
std:: string broker; //папка
std:: string type;
int account; //аккаунт
Date data; // дата
};
std::ostream& operator<< (std::ostream &out, const Date &date)
{
out << date.day << "." << date.month << "." << date.year;
return out;
}
bool lastdate(information file1, information file2)
{
if (file1.data.year < file2.data.year)
return 1;
if (file1.data.year > file2.data.year)
return 0;
if (file1.data.month < file2.data.month)
return 1;
if (file1.data.month > file2.data.month)
return 0;
if (file1.data.day < file2.data.day)
return 1;
if (file1.data.day > file2.data.day)
return 0;
return 0;
}
class analysis
{
protected:
boost::filesystem :: path path_to_ftp;
std::vector <information> informations;
std::unordered_map<int, std::vector<information>> accounts_groups;
public:
explicit analysis(boost::filesystem :: path path)
{
this->path_to_ftp = path;
}
void work(boost::filesystem :: path path) {
for (boost::filesystem::directory_entry dir_iter :
boost::filesystem::directory_iterator{ path })
{
if (boost::filesystem::is_regular_file(dir_iter))
file(dir_iter);
if (boost::filesystem::is_directory(dir_iter))
work(dir_iter); }
}
void file(boost::filesystem :: path path)
{
try {
information new_file = parcer(path.filename().string());
informations.push_back(new_file);
accounts_groups[new_file.account].push_back(new_file);
}
catch (const std::logic_error&){}
}
void print_name_files()
{
std::cout << "Task one " << std::endl;
for (size_t i = 0; i < informations.size(); ++i)
{
std::cout << informations[i].broker << " ";
std::cout << informations[i].title << std::endl;
}
}
void print_information()
{
std::cout << "Task two " << std::endl;
for (const auto& i : accounts_groups )
{
std::cout << "broker: " << i.second[0].broker << "| ";
std::cout << "account: " << i.first << "| ";
std::cout << "files: " << i.second.size() << "| ";
std::cout << "lastdate: ";
std::cout << std::max_element(i.second.begin(), i.second.end(), lastdate)->data;
std::cout << " ";
std::cout << std::endl;
}
}
information parcer(std::string file)
{
information new_file;
new_file.title = file;
new_file.type = file.substr(0, file.find('_'));
file = file.substr(file.find('_') + 1);
new_file.account = std::stoi(file.substr(0, file.find('_')));
file = file.substr(file.find('_') + 1);
new_file.data.year = std::stoi(file.substr(0, 4));
new_file.data.month = std::stoi(file.substr(4, 2));
new_file.data.day = std::stoi(file.substr(6, 2));
file = file.substr(8);
if (file[0] != '.' || file.substr(0, 4) == ".old")
throw std::logic_error("");
if (file.substr(1).find('.') != std::string::npos)
throw std::logic_error("");
return new_file;
}
};
#endif // INCLUDE_ANALYSIS_HPP_
|
{"hexsha": "6cc08351d8363737a0780d9c5df9b00d1d1de980", "size": 3602, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/analysis.hpp", "max_stars_repo_name": "CrazyOverdose/lab-04-boost-filesystem", "max_stars_repo_head_hexsha": "b0ce1ab53ae3e9fe2f8a7e60bf56d0555bb7d42a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "include/analysis.hpp", "max_issues_repo_name": "CrazyOverdose/lab-04-boost-filesystem", "max_issues_repo_head_hexsha": "b0ce1ab53ae3e9fe2f8a7e60bf56d0555bb7d42a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "include/analysis.hpp", "max_forks_repo_name": "CrazyOverdose/lab-04-boost-filesystem", "max_forks_repo_head_hexsha": "b0ce1ab53ae3e9fe2f8a7e60bf56d0555bb7d42a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.3378378378, "max_line_length": 80, "alphanum_fraction": 0.5577456968, "num_tokens": 892}
|
# -*- coding: utf-8 -*-
# @Time : 2021/2/15 23:03
# @Author : Zeqi@@
# @FileName: Prediction_Decoder.py
# @Software: PyCharm
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import MaxPooling2D
def nms(heat, kernel=3):
hmax = MaxPooling2D((kernel, kernel), strides=1, padding='SAME')(heat)
heat = tf.where(tf.equal(hmax, heat), heat, tf.zeros_like(heat))
return heat
def topk(hm, max_objects=100):
#-------------------------------------------------------------------------#
# 当利用512x512x3图片进行coco数据集预测的时候
# h = w = 128 num_classes = 80
# Hot map热力图 -> b, 128, 128, 80
# 进行热力图的非极大抑制,利用3x3的卷积对热力图进行最大值筛选
# 找出一定区域内,得分最大的特征点。
#-------------------------------------------------------------------------#
# print(hm, np.shape(hm), np.min(hm), np.max(hm))
hm = nms(hm)
b, h, w, c = tf.shape(hm)[0], tf.shape(hm)[1], tf.shape(hm)[2], tf.shape(hm)[3]
#-------------------------------------------#
# 将所有结果平铺,获得(b, 128 * 128 * 80)
#-------------------------------------------#
hm = tf.reshape(hm, (b, -1))
#-----------------------------#
# (b, k), (b, k)
#-----------------------------#
scores, indices = tf.math.top_k(hm, k=max_objects, sorted=True)
#--------------------------------------#
# 计算求出种类、网格点以及索引。
#--------------------------------------#
class_ids = indices % c
xs = indices // c % w
ys = indices // c // w
indices = ys * w + xs
return scores, indices, class_ids, xs, ys
def decode(hm, wh, reg, max_objects=100, num_classes=20):
# -----------------------------------------------------#
# hm b, 128, 128, num_classes
# wh b, 128, 128, 2
# reg b, 128, 128, 2
# scores b, max_objects
# indices b, max_objects
# class_ids b, max_objects
# xs b, max_objects
# ys b, max_objects
# -----------------------------------------------------#
scores, indices, class_ids, xs, ys = topk(hm, max_objects=max_objects)
b = tf.shape(hm)[0]
# -----------------------------------------------------#
# wh b, 128 * 128, 2
# reg b, 128 * 128, 2
# -----------------------------------------------------#
reg = tf.reshape(reg, [b, -1, 2])
wh = tf.reshape(wh, [b, -1, 2])
length = tf.shape(wh)[1]
# -----------------------------------------------------#
# 找到其在1维上的索引
# batch_idx b, max_objects
# -----------------------------------------------------#
batch_idx = tf.expand_dims(tf.range(0, b), 1)
batch_idx = tf.tile(batch_idx, (1, max_objects))
full_indices = tf.reshape(batch_idx, [-1]) * tf.cast(length, tf.int32) + tf.reshape(indices, [-1])
# -----------------------------------------------------#
# 取出top_k个框对应的参数
# -----------------------------------------------------#
topk_reg = tf.gather(tf.reshape(reg, [-1, 2]), full_indices)
topk_reg = tf.reshape(topk_reg, [b, -1, 2])
topk_wh = tf.gather(tf.reshape(wh, [-1, 2]), full_indices)
topk_wh = tf.reshape(topk_wh, [b, -1, 2])
# -----------------------------------------------------#
# 利用参数获得调整后预测框的中心
# topk_cx b,k,1
# topk_cy b,k,1
# -----------------------------------------------------#
topk_cx = tf.cast(tf.expand_dims(xs, axis=-1), tf.float32) + topk_reg[..., 0:1]
topk_cy = tf.cast(tf.expand_dims(ys, axis=-1), tf.float32) + topk_reg[..., 1:2]
# -----------------------------------------------------#
# 计算预测框左上角和右下角
# topk_x1 b,k,1 预测框左上角x轴坐标
# topk_y1 b,k,1 预测框左上角y轴坐标
# topk_x2 b,k,1 预测框右下角x轴坐标
# topk_y2 b,k,1 预测框右下角y轴坐标
# -----------------------------------------------------#
topk_x1, topk_y1 = topk_cx - topk_wh[..., 0:1] / 2, topk_cy - topk_wh[..., 1:2] / 2
topk_x2, topk_y2 = topk_cx + topk_wh[..., 0:1] / 2, topk_cy + topk_wh[..., 1:2] / 2
# -----------------------------------------------------#
# scores b,k,1 预测框得分
# class_ids b,k,1 预测框种类
# -----------------------------------------------------#
scores = tf.expand_dims(scores, axis=-1)
class_ids = tf.cast(tf.expand_dims(class_ids, axis=-1), tf.float32)
# -----------------------------------------------------#
# detections 预测框所有参数的堆叠
# 前四个是预测框的坐标,后两个是预测框的得分与种类
# -----------------------------------------------------#
detections = tf.concat([topk_x1, topk_y1, topk_x2, topk_y2, scores, class_ids], axis=-1)
return detections
|
{"hexsha": "815f0cf687b46cab475a14a9637050f7f19943ac", "size": 4634, "ext": "py", "lang": "Python", "max_stars_repo_path": "Utils/Prediction_Decoder.py", "max_stars_repo_name": "monchhichizzq/CenterNet", "max_stars_repo_head_hexsha": "718bfbfa1940a8b068ab359aaca6737c3c173ad0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Utils/Prediction_Decoder.py", "max_issues_repo_name": "monchhichizzq/CenterNet", "max_issues_repo_head_hexsha": "718bfbfa1940a8b068ab359aaca6737c3c173ad0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Utils/Prediction_Decoder.py", "max_forks_repo_name": "monchhichizzq/CenterNet", "max_forks_repo_head_hexsha": "718bfbfa1940a8b068ab359aaca6737c3c173ad0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.6068376068, "max_line_length": 102, "alphanum_fraction": 0.4013810962, "include": true, "reason": "import numpy", "num_tokens": 1515}
|
import numpy
from generate import *
def generate():
vectors = []
x = random_float32(256)
vectors.append(TestVector([2], [x], [x[0::2], x[1::2]], "Deinterleave 2 channels, Float32 input"))
vectors.append(TestVector([3], [x], [x[0::3], x[1::3], x[2::3]], "Deinterleave 3 channels, Float32 input"))
x = random_complex64(256)
vectors.append(TestVector([2], [x], [x[0::2], x[1::2]], "Deinterleave 2 channels, ComplexFloat32 input"))
vectors.append(TestVector([3], [x], [x[0::3], x[1::3], x[2::3]], "Deinterleave 3 channels, ComplexFloat32 input"))
return BlockSpec("DeinterleaveBlock", vectors, 1e-6)
|
{"hexsha": "dfda1843cb9153d682bebb653b1bdcbc603b6ebe", "size": 633, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/blocks/signal/deinterleave_spec.py", "max_stars_repo_name": "telent/luaradio", "max_stars_repo_head_hexsha": "c1cb47325e4eb2886915f810fff5324571aeb59d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 559, "max_stars_repo_stars_event_min_datetime": "2016-07-02T19:07:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T15:02:21.000Z", "max_issues_repo_path": "tests/blocks/signal/deinterleave_spec.py", "max_issues_repo_name": "telent/luaradio", "max_issues_repo_head_hexsha": "c1cb47325e4eb2886915f810fff5324571aeb59d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 68, "max_issues_repo_issues_event_min_datetime": "2016-07-03T05:35:47.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T21:24:07.000Z", "max_forks_repo_path": "tests/blocks/signal/deinterleave_spec.py", "max_forks_repo_name": "telent/luaradio", "max_forks_repo_head_hexsha": "c1cb47325e4eb2886915f810fff5324571aeb59d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 64, "max_forks_repo_forks_event_min_datetime": "2016-07-02T23:59:10.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-02T18:11:07.000Z", "avg_line_length": 37.2352941176, "max_line_length": 118, "alphanum_fraction": 0.6366508689, "include": true, "reason": "import numpy", "num_tokens": 208}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""Test desiutil.depend.
"""
import unittest
import sys
from collections import OrderedDict
from ..depend import (setdep, getdep, hasdep, iterdep, Dependencies,
add_dependencies)
from .. import __version__ as desiutil_version
try:
from astropy.io import fits
test_fits_header = True
except ImportError:
test_fits_header = False
class TestDepend(unittest.TestCase):
"""Test desiutil.depend
"""
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
def test_setdep(self):
"""Test function that sets dependency keywords.
"""
hdr = dict()
setdep(hdr, 'blat', '1.2.3')
self.assertEqual(hdr['DEPNAM00'], 'blat')
self.assertEqual(hdr['DEPVER00'], '1.2.3')
setdep(hdr, 'foo', '2.3.4')
self.assertEqual(hdr['DEPNAM01'], 'foo')
self.assertEqual(hdr['DEPVER01'], '2.3.4')
setdep(hdr, 'blat', '3.4.5')
self.assertEqual(hdr['DEPNAM00'], 'blat')
self.assertEqual(hdr['DEPVER00'], '3.4.5')
setdep(hdr, 'bar', '7.8.9')
setdep(hdr, 'baz', '9.8.7')
setdep(hdr, 'foo', '4.3.2')
self.assertEqual(hdr['DEPNAM01'], 'foo')
self.assertEqual(hdr['DEPVER01'], '4.3.2')
hdr = dict()
with self.assertRaises(IndexError):
for i in range(101):
setdep(hdr, 'test{0:03d}'.format(i), "v{0:d}.0.1".format(i))
def test_getdep(self):
"""Test function that gets dependency values.
"""
hdr = dict()
for i in range(10):
setdep(hdr, 'test{0:03d}'.format(i), "v{0:d}.0.1".format(i))
self.assertEqual(hdr['DEPNAM00'], 'test000')
self.assertEqual(hdr['DEPVER00'], 'v0.0.1')
self.assertEqual(getdep(hdr, 'test005'), 'v5.0.1')
hdr = dict()
for i in range(10):
hdr["DEPNAM{0:02d}".format(i+1)] = "test{0:03d}".format(i+1)
hdr["DEPVER{0:02d}".format(i+1)] = "v{0:d}.0.1".format(i+1)
self.assertEqual(getdep(hdr, 'test005'), 'v5.0.1')
with self.assertRaises(KeyError):
foo = getdep(hdr, 'test100')
def test_hasdep(self):
"""Test function that checks for the existence of a dependency.
"""
hdr = dict()
for i in range(10):
hdr["DEPNAM{0:02d}".format(i+1)] = "test{0:03d}".format(i+1)
hdr["DEPVER{0:02d}".format(i+1)] = "v{0:d}.0.1".format(i+1)
self.assertTrue(hasdep(hdr, 'test001'))
self.assertTrue(hasdep(hdr, 'test010'))
self.assertFalse(hasdep(hdr, 'test020'))
@unittest.skipUnless(test_fits_header, 'requires astropy.io.fits')
def test_fits_header(self):
"""Test dependency functions with an actual FITS header.
"""
hdr = fits.Header()
setdep(hdr, 'blat', '1.2.3')
self.assertEqual(getdep(hdr, 'blat'), '1.2.3')
self.assertTrue(hasdep(hdr, 'blat'))
self.assertFalse(hasdep(hdr, 'zoom'))
setdep(hdr, 'blat', '1.5')
self.assertEqual(getdep(hdr, 'blat'), '1.5')
self.assertTrue(hasdep(hdr, 'blat'))
with self.assertRaises(KeyError):
getdep(hdr, 'foo')
def test_update(self):
"""Test updates of dependencies.
"""
hdr = dict()
setdep(hdr, 'blat', '1.0')
self.assertEqual(getdep(hdr, 'blat'), '1.0')
setdep(hdr, 'blat', '2.0')
self.assertEqual(getdep(hdr, 'blat'), '2.0')
self.assertNotIn('DEPNAM01', hdr)
setdep(hdr, 'foo', '3.0')
self.assertEqual(hdr['DEPNAM01'], 'foo')
self.assertEqual(hdr['DEPVER01'], '3.0')
def test_iter(self):
"""Test iteration methods.
"""
hdr = dict()
for i in range(100):
hdr["DEPNAM{0:02d}".format(i)] = "test{0:03d}".format(i)
hdr["DEPVER{0:02d}".format(i)] = "v{0:d}.0.1".format(i)
y = Dependencies(hdr)
for name in y:
self.assertEqual(y[name], getdep(hdr, name))
for name, version in y.items():
self.assertEqual(version, getdep(hdr, name))
for name, version in iterdep(hdr):
self.assertEqual(version, getdep(hdr, name))
#
# Test dependency index starting from one.
#
hdr = dict()
for j in range(1, 20):
hdr["DEPNAM{0:02d}".format(i)] = "test{0:03d}".format(i)
hdr["DEPVER{0:02d}".format(i)] = "v{0:d}.0.1".format(i)
y = Dependencies(hdr)
for name in y:
self.assertEqual(y[name], getdep(hdr, name))
for name, version in y.items():
self.assertEqual(version, getdep(hdr, name))
for name, version in iterdep(hdr):
self.assertEqual(version, getdep(hdr, name))
def test_class(self):
"""Test the Dependencies object.
"""
d = Dependencies()
self.assertTrue(isinstance(d.header, OrderedDict))
hdr = dict()
x = Dependencies(hdr)
x['blat'] = '1.2.3'
x['foo'] = '0.1'
self.assertEqual(x['blat'], hdr['DEPVER00'])
self.assertEqual(x['foo'], hdr['DEPVER01'])
for name, version in x.items():
self.assertEqual(version, x[name])
for name in x:
self.assertEqual(x[name], getdep(hdr, name))
def test_add_dependencies(self):
"""Test add_dependencies function.
"""
hdr = OrderedDict()
add_dependencies(hdr, long_python=True)
self.assertEqual(getdep(hdr, 'python'),
sys.version.replace('\n', ' '))
hdr = OrderedDict()
add_dependencies(hdr)
self.assertEqual(getdep(hdr, 'python'),
".".join(map(str, sys.version_info[0:3])))
self.assertEqual(getdep(hdr, 'desiutil'), desiutil_version)
import numpy
add_dependencies(hdr)
self.assertEqual(getdep(hdr, 'numpy'), numpy.__version__)
# ok, but no action
add_dependencies(hdr, ['blatbar', 'quatlarm'])
self.assertFalse(hasdep(hdr, 'blatbar'))
self.assertFalse(hasdep(hdr, 'quatlarm'))
# no .__version__
add_dependencies(hdr, ['os.path', 'unittest', 'sys'])
self.assertTrue(hasdep(hdr, 'os.path'))
self.assertTrue(getdep(hdr, 'os.path').startswith('unknown'))
self.assertTrue(hasdep(hdr, 'unittest'))
self.assertTrue(getdep(hdr, 'unittest').startswith('unknown'))
self.assertTrue(hasdep(hdr, 'sys'))
self.assertTrue(getdep(hdr, 'sys').startswith('unknown'))
def test_suite():
"""Allows testing of only this module with the command::
python setup.py test -m <modulename>
"""
return unittest.defaultTestLoader.loadTestsFromName(__name__)
|
{"hexsha": "86693ddb94efa4c74fb8da28b1740893d5c7b6c2", "size": 6905, "ext": "py", "lang": "Python", "max_stars_repo_path": "py/desiutil/test/test_depend.py", "max_stars_repo_name": "sdss/lvmutil", "max_stars_repo_head_hexsha": "1938f6e1d7f4074a90a55570a316886850c5c6af", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "py/desiutil/test/test_depend.py", "max_issues_repo_name": "sdss/lvmutil", "max_issues_repo_head_hexsha": "1938f6e1d7f4074a90a55570a316886850c5c6af", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "py/desiutil/test/test_depend.py", "max_forks_repo_name": "sdss/lvmutil", "max_forks_repo_head_hexsha": "1938f6e1d7f4074a90a55570a316886850c5c6af", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.0507614213, "max_line_length": 76, "alphanum_fraction": 0.5656770456, "include": true, "reason": "import numpy,from astropy", "num_tokens": 1816}
|
import numpy as np
import pandas as pd
def add_features(x: pd.DataFrame):
method_cols = ['financialDebt', 'CreditLeverage', 'FinancialIndependence', 'DebtBurden',
'CoverageDebtWithAccumulatedProfit',
'ReturnAssetsNetProfit', 'ReturnAssetsOperatingProfit', 'OperatingMargin', 'NetProfitMargin',
'LiabilityCoverageOperatingProfit', 'OperatingProfitFinancialDebtRatio', 'FinancialDebtRevenueRatio',
'CurrentLiquidity', 'QuickLiquidity', 'InstantLiquidity', 'LevelOfOperatingAssets', 'turnoverDebtorDebt',
'turnoverReserves', 'turnoverCreditDebt', 'FinancialCycle', 'AssetTurnover']
for year in [0, -1]:
x[f'year_{year}_okved2'] = x[f'year_{year}_okved'].str.extract(r'(^[0-9]+.[0-9]+)').fillna('__null__')
x[f'year_{year}_okved1'] = x[f'year_{year}_okved'].str.extract(r'(^[0-9]+)').fillna('__null__')
x[f'year_{year}_financialDebt'] = x[[f'year_{year}_1500', f'year_{year}_1400', f'year_{year}_1250']].sum(axis=1)
financial_debt = x[f'year_{year}_financialDebt']
x[f'year_{year}_CreditLeverage'] = x[f'year_{year}_1300'] / x[f'year_{year}_1500']
x[f'year_{year}_FinancialIndependence'] = x[f'year_{year}_1300'] / x[f'year_{year}_1600']
x[f'year_{year}_DebtBurden'] = financial_debt / x[f'year_{year}_1600']
x[f'year_{year}_CoverageDebtWithAccumulatedProfit'] = x[f'year_{year}_1300'] / financial_debt
x[f'year_{year}_ReturnAssetsNetProfit'] = x[f'year_{year}_2400'] / x[f'year_{year}_1600']
x[f'year_{year}_ReturnAssetsOperatingProfit'] = x[f'year_{year}_2200'] / x[f'year_{year}_1600']
x[f'year_{year}_OperatingMargin'] = x[f'year_{year}_2200'] / x[[f'year_{year}_2110', f'year_{year}_financialDebt']].max(axis=1)
x[f'year_{year}_NetProfitMargin'] = x[f'year_{year}_2400'] / x[[f'year_{year}_2110', f'year_{year}_financialDebt']].max(axis=1)
x[f'year_{year}_LiabilityCoverageOperatingProfit'] = x[f'year_{year}_2200'] / x[[f'year_{year}_1400', f'year_{year}_1500']].sum(axis=1)
x[f'year_{year}_OperatingProfitFinancialDebtRatio'] = x[f'year_{year}_2200'] / financial_debt
x[f'year_{year}_FinancialDebtRevenueRatio'] = financial_debt / x[f'year_{year}_2110']
x[f'year_{year}_CurrentLiquidity'] = x[f'year_{year}_1200'] / x[f'year_{year}_1500']
x[f'year_{year}_QuickLiquidity'] = (x[f'year_{year}_1200'] - x[f'year_{year}_1210']) / x[f'year_{year}_1500']
x[f'year_{year}_InstantLiquidity'] = x[f'year_{year}_1250'] / x[f'year_{year}_1500']
x[f'year_{year}_LevelOfOperatingAssets'] = (x[f'year_{year}_1210'] + x[f'year_{year}_1230'] - x[f'year_{year}_1520']) / x[f'year_{year}_2110']
x[f'year_{year}_turnoverDebtorDebt'] = 365 * (x[f'year_{year}_1230'] + x[f'year_{year - 1}_1230']) / (2 * x[f'year_{year}_2110'])
x[f'year_{year}_turnoverReserves'] = 365 * (x[f'year_{year}_1210'] + x[f'year_{year - 1}_1210']) / (2 * x[f'year_{year}_2110'])
x[f'year_{year}_turnoverCreditDebt'] = 365 * (x[f'year_{year}_1520'] + x[f'year_{year - 1}_1520']) / (2 * x[f'year_{year}_2110'])
x[f'year_{year}_FinancialCycle'] = x[f'year_{year}_turnoverDebtorDebt'] + x[f'year_{year}_turnoverReserves'] - x[f'year_{year}_turnoverCreditDebt']
x[f'year_{year}_AssetTurnover'] = x[f'year_{year}_2110'] / x[f'year_{year}_1600']
for col in method_cols:
x[f'year_{year}_{col}'].replace(np.inf, 1000000, inplace=True)
x[f'year_{year}_{col}'].replace(-np.inf, -1000000, inplace=True)
x[f'year_{year}_{col}'].replace(np.nan, 0, inplace=True)
|
{"hexsha": "9c7d92e79cb4d9eee8546158b2f2af4691da8fed", "size": 3650, "ext": "py", "lang": "Python", "max_stars_repo_path": "PythonBackend/feature_generation.py", "max_stars_repo_name": "goo-goo-goo-joob/CreditRisks", "max_stars_repo_head_hexsha": "c874941f3787a0c73063883a019a61672e7bef2f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-09-19T12:32:45.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-19T12:32:45.000Z", "max_issues_repo_path": "PythonBackend/feature_generation.py", "max_issues_repo_name": "goo-goo-goo-joob/CreditRisks", "max_issues_repo_head_hexsha": "c874941f3787a0c73063883a019a61672e7bef2f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "PythonBackend/feature_generation.py", "max_forks_repo_name": "goo-goo-goo-joob/CreditRisks", "max_forks_repo_head_hexsha": "c874941f3787a0c73063883a019a61672e7bef2f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 84.8837209302, "max_line_length": 155, "alphanum_fraction": 0.6567123288, "include": true, "reason": "import numpy", "num_tokens": 1231}
|
# -*- coding: utf-8 -*-
# cheby_checker/cheby_checker/obs_pos.py
"""
--------------------------------------------------------------
Parse an obscode and figure out where the observer is at a given time.
Jan 2020
Matt Payne & Mike Alexandersen
*WRITE MORE STUFF*
--------------------------------------------------------------
"""
# Import third-party packages
# --------------------------------------------------------------
import numpy as np
# Import local modules
# --------------------------------------------------------------
from . import MPC_library as mpc
class ObsPos:
"""Class containing all the functionality for figuring out where
the observer is. """
def __init__(self):
# MPC Observatory list:
self.obsCodes = mpc.Observatory()
def get_heliocentric_equatorial_xyz(self, jd_utc, obsCode=None,
verbose=False):
"""
Get the heliocentric EQUATORIAL vector coordinates of the
observatory at the time jd_utc.
"""
obsCode = self.check_obsCode(obsCode, verbose)
return self.obsCodes.getObservatoryPosition(obsCode, jd_utc,
old=False) #* u.au
def get_heliocentric_ecliptic_xyz(self, jd_utc, obsCode=None, verbose=False):
"""
Get the heliocentric ECLIPTIC vector coordinates of the
observatory at the time jd_utc.
"""
obsCode = self.check_obsCode(obsCode, verbose)
helio_OBS_equ = self.get_heliocentric_equatorial_xyz(jd_utc, obsCode)
return self.equatorial_to_ecliptic(helio_OBS_equ)
def equatorial_to_ecliptic(self, input_xyz, backwards=False):
"""
Convert a cartesian vector from mean equatorial to mean ecliptic.
backwards=True converts backwards, from ecliptic to equatorial.
input:
input_xyz - np.array length 3
backwards - boolean
output:
output_xyz - np.array length 3
"""
direction = -1 if backwards else +1
rotation_matrix = mpc.rotate_matrix(-mpc.Constants.ecl * direction)
return np.dot(rotation_matrix, input_xyz.reshape(-1, 1)).flatten()
def check_obsCode(self, obsCode=None, verbose=False):
"""
Check whether a valid Observatory Code has been supplied.
If None, use 500 (Geocentre).
"""
if obsCode is None:
return '500'
if obsCode in ['XXX', '']: # Observations with no ObsCode
print('Bad ObsCode. Will use geocenter.\n' if verbose else '', end='')
return '500'
if isinstance(obsCode, int):
obsCode = str(obsCode)
if len(obsCode) != 3:
raise NotImplementedError(
f"Bad Observatory Code!\n Observatory Code given: {obsCode}, must be a three digit number!" \
"\nFor four digit Observatory Codes, please bug M. Alexandersen or M. Payne.")
return obsCode
|
{"hexsha": "975bf77517f03c67185b9a3f7cad841a0d4f3d5a", "size": 3001, "ext": "py", "lang": "Python", "max_stars_repo_path": "cheby_checker/obs_pos.py", "max_stars_repo_name": "Smithsonian/cheby_checker", "max_stars_repo_head_hexsha": "ce1542e4b1b3303ac08ea823be1eaec06322fd48", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-03-05T15:20:30.000Z", "max_stars_repo_stars_event_max_datetime": "2020-03-05T15:20:30.000Z", "max_issues_repo_path": "cheby_checker/obs_pos.py", "max_issues_repo_name": "Smithsonian/cheby_checker", "max_issues_repo_head_hexsha": "ce1542e4b1b3303ac08ea823be1eaec06322fd48", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cheby_checker/obs_pos.py", "max_forks_repo_name": "Smithsonian/cheby_checker", "max_forks_repo_head_hexsha": "ce1542e4b1b3303ac08ea823be1eaec06322fd48", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-02-04T15:26:08.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-04T18:23:13.000Z", "avg_line_length": 34.8953488372, "max_line_length": 109, "alphanum_fraction": 0.5701432856, "include": true, "reason": "import numpy", "num_tokens": 633}
|
#!/usr/bin/env python
import math
import csv
import numpy as np
from floripy.mathutils import xform as tr
from floripy.mathutils.linalg import unitized
from .miura_sheet_trajectory import MiuraSheetTrajectory
def get_phi_theta(v):
'''
v: (3,) ndarray
Returns phi and theta in degrees.
phi: Angle measured from the y-axis of the projection on the xy-plane
-pi<=phi<=pi
theta: Angle measured from the z-axis, -pi/2<=theta<=pi/2
'''
vec = unitized(v)
phi = math.atan2(vec[0], vec[1])
theta = math.acos(vec[2])
phi_deg = math.degrees(phi)
theta_deg = math.degrees(theta)
return phi_deg, theta_deg
def basic_data(fn_traj, fn_model, fn_data):
#Open trajectory file and calculate
mt = MiuraSheetTrajectory(fn_traj, fn_model)
num_frames = len(mt)
print('Number of frames: ', num_frames)
field_names = ['time', 'beta',
'chord', 'chord_normalized',
'span', 'span_normalized',
'aspect_ratio', 'aspect_ratio_normalized',
'theta_director', 'theta_codirector', 'theta_bidirector',
'phi_director', 'phi_codirector','phi_bidirector',
'roll', 'yaw', 'pitch',
'comx', 'comy', 'comz',
'directorx', 'directory', 'directorz',
'codirectorx', 'codirectory', 'codirectorz',
'bidirectorx', 'bidirectory', 'bidirectorz']
data = {}
with open(fn_data, 'w') as fh_data:
writer = csv.DictWriter(fh_data, field_names)
writer.writeheader()
for k in range(num_frames):
print('Frame: ', k) if k%100==0 else None
time, ms = mt.get_frame(k)
data['time'] = time
data['beta'] = math.degrees(ms.beta)
data['chord'] = ms.chord
data['chord_normalized'] = ms.chord/ms.max_chord
data['span'] = ms.span
data['span_normalized'] = ms.span/ms.max_span
data['aspect_ratio'] = ms.aspect_ratio
data['aspect_ratio_normalized'] = ms.aspect_ratio/ms.max_aspect_ratio
director = ms.director
codirector = ms.codirector
bidirector = ms.bidirector
data['directorx'] = director[0]
data['directory'] = director[1]
data['directorz'] = director[2]
data['codirectorx'] = codirector[0]
data['codirectory'] = codirector[1]
data['codirectorz'] = codirector[2]
data['bidirectorx'] = bidirector[0]
data['bidirectory'] = bidirector[1]
data['bidirectorz'] = bidirector[2]
#Theta: Angle measured from the z-axis, -pi/2<=theta<=pi/2
#Phi: Angle measured from the y-axis of the projection on the xy-plane
#-pi<=phi<=pi
phi_director, theta_director = get_phi_theta(director)
phi_codirector, theta_codirector = get_phi_theta(codirector)
phi_bidirector, theta_bidirector = get_phi_theta(bidirector)
data['theta_director'] = theta_director
data['theta_codirector'] = theta_codirector
data['theta_bidirector'] = theta_bidirector
data['phi_director'] = phi_director
data['phi_codirector'] = phi_codirector
data['phi_bidirector'] = phi_bidirector
ori = ms.orientation
ori_euler_body = np.rad2deg(tr.quat_to_euler(ori, seq='XYZ', world=False))
data['roll'], data['yaw'], data['pitch'] = tuple(ori_euler_body)
data['comx'], data['comy'], data['comz'] = tuple(ms.com)
writer.writerow(data)
mt.close()
|
{"hexsha": "08834223fcaad2875e4def020be85919bb2fb7e5", "size": 3755, "ext": "py", "lang": "Python", "max_stars_repo_path": "models/miura_sheet/analyze_traj.py", "max_stars_repo_name": "saridut/FloriPy", "max_stars_repo_head_hexsha": "0117d358b9c2362ea32ecf9ec719fdaed87d3e14", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "models/miura_sheet/analyze_traj.py", "max_issues_repo_name": "saridut/FloriPy", "max_issues_repo_head_hexsha": "0117d358b9c2362ea32ecf9ec719fdaed87d3e14", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "models/miura_sheet/analyze_traj.py", "max_forks_repo_name": "saridut/FloriPy", "max_forks_repo_head_hexsha": "0117d358b9c2362ea32ecf9ec719fdaed87d3e14", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.7113402062, "max_line_length": 86, "alphanum_fraction": 0.5805592543, "include": true, "reason": "import numpy", "num_tokens": 918}
|
(* Sections 2.1 through 2.3 *)
theory AToyList
imports Datatype
begin
datatype 'a list = Nil ("[]") | Cons 'a "'a list" (infixr "#" 65)
(* append one list to another *)
primrec app :: "'a list \<Rightarrow> 'a list \<Rightarrow> 'a list" (infixr "@" 65) where
"([] @ ys) = ys" |
"(x # xs) @ ys = x # xs @ ys"
(* reverse a list *)
primrec rev :: "'a list \<Rightarrow> 'a list" where
"rev [] = []" |
"rev (x # xs) = rev xs @ x # []"
lemma app_Nil2 [simp]: "xs @ [] = xs"
apply (induct_tac xs)
apply auto
done
lemma app_assoc [simp]: "(xs @ yz) @ zs = xs @ (yz @ zs)"
apply (induct_tac xs)
apply auto
done
lemma rev_app [simp]: "rev (xs @ ys) = rev ys @ rev xs"
apply (induct_tac xs)
apply auto
done
theorem rev_rev [simp]: "rev (rev xs) = xs"
apply (induct_tac xs)
apply auto
done
end
|
{"author": "spl", "repo": "isabelle-tutorial", "sha": "56ee8d748d6d639ea7238e5fbb9edce4330637f2", "save_path": "github-repos/isabelle/spl-isabelle-tutorial", "path": "github-repos/isabelle/spl-isabelle-tutorial/isabelle-tutorial-56ee8d748d6d639ea7238e5fbb9edce4330637f2/AToyList.thy"}
|
[STATEMENT]
lemma fps_Lcm:
assumes "A \<noteq> {}" "0 \<notin> A" "bdd_above (subdegree`A)"
shows "Lcm A = fps_X ^ (SUP f\<in>A. subdegree f)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Lcm A = fps_X ^ Sup (subdegree ` A)
[PROOF STEP]
proof (rule sym, rule LcmI)
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. \<And>a. a \<in> A \<Longrightarrow> a dvd fps_X ^ Sup (subdegree ` A)
2. \<And>c. (\<And>a. a \<in> A \<Longrightarrow> a dvd c) \<Longrightarrow> fps_X ^ Sup (subdegree ` A) dvd c
3. normalize (fps_X ^ Sup (subdegree ` A)) = fps_X ^ Sup (subdegree ` A)
[PROOF STEP]
fix f
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. \<And>a. a \<in> A \<Longrightarrow> a dvd fps_X ^ Sup (subdegree ` A)
2. \<And>c. (\<And>a. a \<in> A \<Longrightarrow> a dvd c) \<Longrightarrow> fps_X ^ Sup (subdegree ` A) dvd c
3. normalize (fps_X ^ Sup (subdegree ` A)) = fps_X ^ Sup (subdegree ` A)
[PROOF STEP]
assume "f \<in> A"
[PROOF STATE]
proof (state)
this:
f \<in> A
goal (3 subgoals):
1. \<And>a. a \<in> A \<Longrightarrow> a dvd fps_X ^ Sup (subdegree ` A)
2. \<And>c. (\<And>a. a \<in> A \<Longrightarrow> a dvd c) \<Longrightarrow> fps_X ^ Sup (subdegree ` A) dvd c
3. normalize (fps_X ^ Sup (subdegree ` A)) = fps_X ^ Sup (subdegree ` A)
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
f \<in> A
goal (3 subgoals):
1. \<And>a. a \<in> A \<Longrightarrow> a dvd fps_X ^ Sup (subdegree ` A)
2. \<And>c. (\<And>a. a \<in> A \<Longrightarrow> a dvd c) \<Longrightarrow> fps_X ^ Sup (subdegree ` A) dvd c
3. normalize (fps_X ^ Sup (subdegree ` A)) = fps_X ^ Sup (subdegree ` A)
[PROOF STEP]
from assms(3)
[PROOF STATE]
proof (chain)
picking this:
bdd_above (subdegree ` A)
[PROOF STEP]
have "bdd_above (subdegree ` A)"
[PROOF STATE]
proof (prove)
using this:
bdd_above (subdegree ` A)
goal (1 subgoal):
1. bdd_above (subdegree ` A)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
bdd_above (subdegree ` A)
goal (3 subgoals):
1. \<And>a. a \<in> A \<Longrightarrow> a dvd fps_X ^ Sup (subdegree ` A)
2. \<And>c. (\<And>a. a \<in> A \<Longrightarrow> a dvd c) \<Longrightarrow> fps_X ^ Sup (subdegree ` A) dvd c
3. normalize (fps_X ^ Sup (subdegree ` A)) = fps_X ^ Sup (subdegree ` A)
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
f \<in> A
bdd_above (subdegree ` A)
[PROOF STEP]
show "f dvd fps_X ^ (SUP f\<in>A. subdegree f)"
[PROOF STATE]
proof (prove)
using this:
f \<in> A
bdd_above (subdegree ` A)
goal (1 subgoal):
1. f dvd fps_X ^ Sup (subdegree ` A)
[PROOF STEP]
using assms(2)
[PROOF STATE]
proof (prove)
using this:
f \<in> A
bdd_above (subdegree ` A)
0 \<notin> A
goal (1 subgoal):
1. f dvd fps_X ^ Sup (subdegree ` A)
[PROOF STEP]
by (cases "f = 0") (auto simp: fps_dvd_iff intro!: cSUP_upper)
[PROOF STATE]
proof (state)
this:
f dvd fps_X ^ Sup (subdegree ` A)
goal (2 subgoals):
1. \<And>c. (\<And>a. a \<in> A \<Longrightarrow> a dvd c) \<Longrightarrow> fps_X ^ Sup (subdegree ` A) dvd c
2. normalize (fps_X ^ Sup (subdegree ` A)) = fps_X ^ Sup (subdegree ` A)
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>c. (\<And>a. a \<in> A \<Longrightarrow> a dvd c) \<Longrightarrow> fps_X ^ Sup (subdegree ` A) dvd c
2. normalize (fps_X ^ Sup (subdegree ` A)) = fps_X ^ Sup (subdegree ` A)
[PROOF STEP]
fix d
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>c. (\<And>a. a \<in> A \<Longrightarrow> a dvd c) \<Longrightarrow> fps_X ^ Sup (subdegree ` A) dvd c
2. normalize (fps_X ^ Sup (subdegree ` A)) = fps_X ^ Sup (subdegree ` A)
[PROOF STEP]
assume d: "\<And>f. f \<in> A \<Longrightarrow> f dvd d"
[PROOF STATE]
proof (state)
this:
?f \<in> A \<Longrightarrow> ?f dvd d
goal (2 subgoals):
1. \<And>c. (\<And>a. a \<in> A \<Longrightarrow> a dvd c) \<Longrightarrow> fps_X ^ Sup (subdegree ` A) dvd c
2. normalize (fps_X ^ Sup (subdegree ` A)) = fps_X ^ Sup (subdegree ` A)
[PROOF STEP]
from assms
[PROOF STATE]
proof (chain)
picking this:
A \<noteq> {}
0 \<notin> A
bdd_above (subdegree ` A)
[PROOF STEP]
obtain f where f: "f \<in> A" "f \<noteq> 0"
[PROOF STATE]
proof (prove)
using this:
A \<noteq> {}
0 \<notin> A
bdd_above (subdegree ` A)
goal (1 subgoal):
1. (\<And>f. \<lbrakk>f \<in> A; f \<noteq> 0\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
f \<in> A
f \<noteq> 0
goal (2 subgoals):
1. \<And>c. (\<And>a. a \<in> A \<Longrightarrow> a dvd c) \<Longrightarrow> fps_X ^ Sup (subdegree ` A) dvd c
2. normalize (fps_X ^ Sup (subdegree ` A)) = fps_X ^ Sup (subdegree ` A)
[PROOF STEP]
show "fps_X ^ (SUP f\<in>A. subdegree f) dvd d"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. fps_X ^ Sup (subdegree ` A) dvd d
[PROOF STEP]
proof (cases "d = 0")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. d = 0 \<Longrightarrow> fps_X ^ Sup (subdegree ` A) dvd d
2. d \<noteq> 0 \<Longrightarrow> fps_X ^ Sup (subdegree ` A) dvd d
[PROOF STEP]
assume "d \<noteq> 0"
[PROOF STATE]
proof (state)
this:
d \<noteq> 0
goal (2 subgoals):
1. d = 0 \<Longrightarrow> fps_X ^ Sup (subdegree ` A) dvd d
2. d \<noteq> 0 \<Longrightarrow> fps_X ^ Sup (subdegree ` A) dvd d
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
d \<noteq> 0
goal (2 subgoals):
1. d = 0 \<Longrightarrow> fps_X ^ Sup (subdegree ` A) dvd d
2. d \<noteq> 0 \<Longrightarrow> fps_X ^ Sup (subdegree ` A) dvd d
[PROOF STEP]
from d
[PROOF STATE]
proof (chain)
picking this:
?f \<in> A \<Longrightarrow> ?f dvd d
[PROOF STEP]
have "\<And>f. f \<in> A \<Longrightarrow> f \<noteq> 0 \<Longrightarrow> f dvd d"
[PROOF STATE]
proof (prove)
using this:
?f \<in> A \<Longrightarrow> ?f dvd d
goal (1 subgoal):
1. \<And>f. \<lbrakk>f \<in> A; f \<noteq> 0\<rbrakk> \<Longrightarrow> f dvd d
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
\<lbrakk>?f \<in> A; ?f \<noteq> 0\<rbrakk> \<Longrightarrow> ?f dvd d
goal (2 subgoals):
1. d = 0 \<Longrightarrow> fps_X ^ Sup (subdegree ` A) dvd d
2. d \<noteq> 0 \<Longrightarrow> fps_X ^ Sup (subdegree ` A) dvd d
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
d \<noteq> 0
\<lbrakk>?f \<in> A; ?f \<noteq> 0\<rbrakk> \<Longrightarrow> ?f dvd d
[PROOF STEP]
have "subdegree d \<ge> (SUP f\<in>A. subdegree f)"
[PROOF STATE]
proof (prove)
using this:
d \<noteq> 0
\<lbrakk>?f \<in> A; ?f \<noteq> 0\<rbrakk> \<Longrightarrow> ?f dvd d
goal (1 subgoal):
1. Sup (subdegree ` A) \<le> subdegree d
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
d \<noteq> 0
\<lbrakk>?f \<in> A; ?f \<noteq> 0\<rbrakk> \<Longrightarrow> ?f dvd d
A \<noteq> {}
0 \<notin> A
bdd_above (subdegree ` A)
goal (1 subgoal):
1. Sup (subdegree ` A) \<le> subdegree d
[PROOF STEP]
by (intro cSUP_least) (auto simp: fps_dvd_iff)
[PROOF STATE]
proof (state)
this:
Sup (subdegree ` A) \<le> subdegree d
goal (2 subgoals):
1. d = 0 \<Longrightarrow> fps_X ^ Sup (subdegree ` A) dvd d
2. d \<noteq> 0 \<Longrightarrow> fps_X ^ Sup (subdegree ` A) dvd d
[PROOF STEP]
with \<open>d \<noteq> 0\<close>
[PROOF STATE]
proof (chain)
picking this:
d \<noteq> 0
Sup (subdegree ` A) \<le> subdegree d
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
d \<noteq> 0
Sup (subdegree ` A) \<le> subdegree d
goal (1 subgoal):
1. fps_X ^ Sup (subdegree ` A) dvd d
[PROOF STEP]
by (simp add: fps_dvd_iff)
[PROOF STATE]
proof (state)
this:
fps_X ^ Sup (subdegree ` A) dvd d
goal (1 subgoal):
1. d = 0 \<Longrightarrow> fps_X ^ Sup (subdegree ` A) dvd d
[PROOF STEP]
qed simp_all
[PROOF STATE]
proof (state)
this:
fps_X ^ Sup (subdegree ` A) dvd d
goal (1 subgoal):
1. normalize (fps_X ^ Sup (subdegree ` A)) = fps_X ^ Sup (subdegree ` A)
[PROOF STEP]
qed simp_all
|
{"llama_tokens": 3506, "file": null, "length": 33}
|
/* Copyright © 2017 Apple Inc. All rights reserved.
*
* Use of this source code is governed by a BSD-3-clause license that can
* be found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
*/
#ifndef TURI_SERIALIZE_UNORDERED_MAP_HPP
#define TURI_SERIALIZE_UNORDERED_MAP_HPP
#include <boost/unordered_map.hpp>
#include <core/storage/serialization/iarchive.hpp>
#include <core/storage/serialization/oarchive.hpp>
#include <core/storage/serialization/iterator.hpp>
namespace turi {
namespace archive_detail {
/** Serializes a map */
template <typename OutArcType, typename T, typename U>
struct serialize_impl<OutArcType, boost::unordered_map<T,U>, false > {
static void exec(OutArcType& oarc,
const boost::unordered_map<T,U>& vec){
serialize_iterator(oarc,
vec.begin(), vec.end(), vec.size());
}
};
/** deserializes a map */
template <typename InArcType, typename T, typename U>
struct deserialize_impl<InArcType, boost::unordered_map<T,U>, false > {
static void exec(InArcType& iarc, boost::unordered_map<T,U>& vec){
vec.clear();
// get the number of elements to deserialize
size_t length = 0;
iarc >> length;
// iterate through and send to the output iterator
for (size_t x = 0; x < length ; ++x){
std::pair<T, U> v;
iarc >> v;
vec[v.first] = v.second;
}
}
};
} // archive_detail
} // turicreate
#if defined(__cplusplus) && __cplusplus >= 201103L
#include <unordered_map>
namespace turi {
namespace archive_detail {
/** Serializes a map */
template <typename OutArcType, typename T, typename U>
struct serialize_impl<OutArcType, std::unordered_map<T,U>, false > {
static void exec(OutArcType& oarc,
const std::unordered_map<T,U>& vec){
serialize_iterator(oarc,
vec.begin(), vec.end(), vec.size());
}
};
/** deserializes a map */
template <typename InArcType, typename T, typename U>
struct deserialize_impl<InArcType, std::unordered_map<T,U>, false > {
static void exec(InArcType& iarc, std::unordered_map<T,U>& vec){
vec.clear();
// get the number of elements to deserialize
size_t length = 0;
iarc >> length;
// iterate through and send to the output iterator
for (size_t x = 0; x < length ; ++x){
std::pair<T, U> v;
iarc >> v;
vec[v.first] = v.second;
}
}
};
} // archive_detail
} // turicreate
#endif
#endif
|
{"hexsha": "cb26278575a258fb32e9fafdf978edb24b38dff5", "size": 2490, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/core/storage/serialization/unordered_map.hpp", "max_stars_repo_name": "Bpowers4/turicreate", "max_stars_repo_head_hexsha": "73dad213cc1c4f74337b905baea2b3a1e5a0266c", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 11356.0, "max_stars_repo_stars_event_min_datetime": "2017-12-08T19:42:32.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T16:55:25.000Z", "max_issues_repo_path": "src/core/storage/serialization/unordered_map.hpp", "max_issues_repo_name": "Bpowers4/turicreate", "max_issues_repo_head_hexsha": "73dad213cc1c4f74337b905baea2b3a1e5a0266c", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 2402.0, "max_issues_repo_issues_event_min_datetime": "2017-12-08T22:31:01.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-28T19:25:52.000Z", "max_forks_repo_path": "src/core/storage/serialization/unordered_map.hpp", "max_forks_repo_name": "Bpowers4/turicreate", "max_forks_repo_head_hexsha": "73dad213cc1c4f74337b905baea2b3a1e5a0266c", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1343.0, "max_forks_repo_forks_event_min_datetime": "2017-12-08T19:47:19.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-26T11:31:36.000Z", "avg_line_length": 27.6666666667, "max_line_length": 86, "alphanum_fraction": 0.6570281124, "num_tokens": 640}
|
import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
from sources.pdesolver.finite_differences_method.FiniteDifferencesSolver_V2 import GridConfiguration, \
ConstantGridValueProvider, FiniteDifferencesMethod4
from sources.pdesolver.finite_differences_method.boundaryconditions import RectangularBoundaryCondition
from sources.pdesolver.finite_differences_method.geometry import Geometry
from sources.experiments.charges_generators import make_single_charge, make_double_charge, make_n_fold_charge
from sources.pdesolver.finite_differences_method.rectangle import Rectangle
from keras import layers, optimizers, losses
from keras import models
import keras.backend as K
def calc_charge_weight_matrix_orig(geometry, charges):
matrix = np.zeros(shape=(len(geometry.Y)*2,len(geometry.X)*2))
firstCharge = charges.chargesList[0]
x = firstCharge[0]+geometry.numX/2
y = firstCharge[1]+geometry.numX/2
for row in range(0, geometry.numY*2):
for col in range(0, geometry.numX*2):
matrix[row, col] = np.sqrt( (x-col)**2 + (y-row)**2 )
return matrix
def calc_charge_weight_matrix(geometry, charges, index=0):
matrix = np.zeros(shape=(len(geometry.Y)*2,len(geometry.X)*2))
firstCharge = charges.chargesList[index]
x = firstCharge[0]+geometry.numX/2
y = firstCharge[1]+geometry.numX/2
for row in range(0, geometry.numY*2):
for col in range(0, geometry.numX*2):
matrix[row, col] = 1./(1.2+np.sqrt( (x-col)**2 + (y-row)**2 ))
return matrix
def calc_charge_weight_matrix_multi(geometry, charges):
matrix = np.zeros(shape=(len(geometry.Y)*2,len(geometry.X)*2))
multiplier = 1
for charge in charges.chargesList:
x = charge[0] + geometry.numX/2
y = charge[1] + geometry.numY/2
if charge[2] < 0:
multiplier = 1
else:
multiplier = -1
if multiplier == 1:
for row in range(0, geometry.numY*2):
for col in range(0, geometry.numX*2):
#matrix[row, col] += np.sqrt((x - col) ** 2 + (y - row) ** 2)
matrix[row, col] += 1./(1.2+np.sqrt( (x-col)**2 + (y-row)**2 ))
else:
for row in range(0, geometry.numY*2):
for col in range(0, geometry.numX*2):
#matrix[row, col] += np.sqrt((x - col) ** 2 + (y - row) ** 2)
matrix[row, col] -= 1./(1.2+np.sqrt( (x-col)**2 + (y-row)**2 ))
multiplier *= -1
return matrix
def plotChargesWeightMatrix(matrix):
plt.figure()
ax = plt.axes(projection='3d')
ax.plot_surface(g.X, g.Y, matrix, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
plt.show()
def plotMatrixList(g, matrices):
fig = plt.figure()
axes = []
for i,m in enumerate(matrices):
ax = fig.add_subplot(1, len(matrices), i+1, projection='3d')
axes.append(ax)
ax.plot_surface(g.X, g.Y, m, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
# scale second last element to last element
maxZ = np.max(matrices[-1])
axes[-2].set_zlim(0.0, maxZ*1.1)
plt.show()
def create_finite_differences_configuration():
gridConfig = GridConfiguration()
gridConfig.add(ConstantGridValueProvider(1.0), 1, 0)
gridConfig.add(ConstantGridValueProvider(1.0), -1, 0)
gridConfig.add(ConstantGridValueProvider(1.0), 0, 1)
gridConfig.add(ConstantGridValueProvider(1.0), 0, -1)
gridConfig.add(ConstantGridValueProvider(-4.0), 0, 0)
return gridConfig
def solvePDE(geometry, charges):
#g = Geometry(rect, delta)
g = geometry
boundaryCondition = RectangularBoundaryCondition(geometry)
index = 1
start = time.time()
gridConfig = create_finite_differences_configuration()
fdm = FiniteDifferencesMethod4(g, boundaryCondition, gridConfig, charges)
fdm.solve()
resulting_matrix = fdm.values
index = index + 1
duration = time.time() - start
#print('Total duration for solving {0} PDEs lasted :{1}'.format(inputDataset.count(), duration))
return resulting_matrix
def saveModel(model, filename):
model_json = model.to_json()
with open(filename + '.json', "w") as json_file:
json_file.write(model_json)
json_file.close()
model.save_weights(filename + '.h5')
def learn(input, target):
count = len(input)
train_count = int(count * 0.6)
validation_count = int(count * 0.2)
test_count = int(count*0.2)
train_input = input[:train_count]
train_result = target[:train_count]
validation_input = input[train_count:train_count+validation_count]
validation_result = target[train_count:train_count+validation_count]
test_input = input[train_count+validation_count:]
test_result = target[train_count+validation_count:]
channelCount = input.shape[-1]
height = input.shape[-2]
width = input.shape[-3]
# 100 = train=60%+validation=20%+test=20%
model = models.Sequential()
#model.add(layers.Conv2D(16, (11, 11), activation='relu', input_shape=(width, height, channelCount)))
#model.add(layers.Conv2D(32, (11,11), activation='relu'))
#model.add(layers.Conv2D(64, (5, 5), activation='relu'))
#model.add(layers.Conv2D(64, (5, 5), activation='relu'))
#model.add(layers.Conv2D(64, (3, 3), activation='relu'))
#model.add(layers.Conv2D(128, (3, 3), activation='relu'))
#model.add(layers.Conv2D(64, (1, 1), activation='relu'))
#model.add(layers.Conv2D(1, (1, 1), activation='relu'))
#model.add(layers.Conv2D(16, (5, 5), activation='relu', input_shape=(width, height, channelCount)))
#model.add(layers.Conv2D(32, (5, 5), activation='relu'))
#model.add(layers.Conv2D(64, (5, 5), activation='relu'))
#model.add(layers.Conv2D(64, (5, 5), activation='relu'))
#model.add(layers.Conv2D(64, (3, 3), activation='relu'))
#model.add(layers.Conv2D(64, (3, 3), activation='relu'))
#model.add(layers.Conv2D(128, (3, 3), activation='relu'))
#model.add(layers.Conv2D(128, (3, 3), activation='relu'))
#model.add(layers.Conv2D(64, (3, 3), activation='relu'))
#model.add(layers.Conv2D(64, (3, 3), activation='relu'))
#model.add(layers.Conv2D(64, (3, 3), activation='relu'))
#model.add(layers.Conv2D(64, (3, 3), activation='relu'))
#model.add(layers.Conv2D(64, (1, 1), activation='relu'))
#model.add(layers.Conv2D(1, (1, 1), activation='relu'))
#model.add(layers.Conv2D(16, (11, 11), activation='relu', input_shape=(width, height, channelCount)))
#model.add(layers.Conv2D(32, (9, 9), activation='relu'))
#model.add(layers.Conv2D(64, (7, 7), activation='relu'))
#model.add(layers.Conv2D(64, (7, 7), activation='relu'))
#model.add(layers.Conv2D(128, (3, 3), activation='relu'))
#model.add(layers.Conv2D(64, (1, 1), activation='relu'))
#model.add(layers.Conv2D(1, (1, 1), activation='relu'))
#model.add(layers.Conv2D(16, (31, 31), activation='relu', input_shape=(width, height, channelCount)))
#model.add(layers.Conv2D(128, (3, 3), activation='relu'))
#model.add(layers.Conv2D(64, (1, 1), activation='relu'))
#model.add(layers.Conv2D(1, (1, 1), activation='relu'))
# model.add(layers.Conv2D(16, (63, 63), activation='relu', input_shape=(width, height, channelCount)))
# model.add(layers.Conv2D(128, (3, 3), activation='relu'))
# model.add(layers.Conv2D(64, (1, 1), activation='relu'))
# model.add(layers.Conv2D(1, (1, 1), activation='relu'))
model.add(layers.Conv2D(16, (31, 31), activation='relu', input_shape=(width, height, channelCount)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.Conv2D(64, (1, 1), activation='relu'))
model.add(layers.Conv2D(1, (1, 1), activation='relu'))
#model.add(layers.Conv2D(128, (31, 31), activation='relu', input_shape=(width, height, channelCount)))
#model.add(layers.Conv2D(512, (2, 2), activation='relu'))
#model.add(layers.Conv2D(1, (2, 2), activation='relu'))
#model.add(layers.Conv2D(128, (2, 2), activation='relu'))
#model.add(layers.Conv2D(96, (2, 2), activation='relu'))
#model.add(layers.Conv2D(16, (1, 1), activation='relu'))
#model.add(layers.Conv2D(1, (1, 1), activation='relu'))
model.summary()
# model.add(layers.Conv2D(16,(11,11), activation='relu', input_shape=(32,32,1)))
# model.add(layers.Conv2D(64,(5,5), activation='relu'))
# model.add(layers.Conv2D(64, (5, 5), activation='relu'))
# model.add(layers.Conv2D(64, (3, 3), activation='relu'))
# model.add(layers.Conv2D(128, (3, 3), activation='relu'))
# model.add(layers.Conv2D(64, (1, 1), activation='relu'))
# model.add(layers.Conv2D(1, (1, 1), activation='relu'))
from keras.optimizers import SGD
#sgd = SGD(lr=0.0001, decay=1e-6, momentum=0.9, nesterov=True)
#modela.compile(optimizer=optimizers.RMSprop(lr=1e-4),
# loss=losses.mean_squared_error, metrics=['accuracy'])
#modela.compile(optimizer=sgd,
# loss=losses.mean_squared_error, metrics=['accuracy'])
#losses.mean_squared_logarithmic_error
#loss = 'binary_crossentropy'
from keras.optimizers import SGD
sgd = SGD() # lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
#sgd = SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True)
lossFn = losses.mean_squared_logarithmic_error
def coeff(y_true, y_pred, smooth, tresh):
return K.sqrt(K.sum(K.square(y_true - y_pred)*K.abs(y_true)))
def my_loss(smooth, thresh):
def loss1(y_true, y_pred):
return coeff(y_true, y_pred, smooth, thresh)
return loss1
#lossFn = my_loss(smooth=1e-5, thresh=0.5)
model.compile(optimizer=sgd,loss=lossFn,
metrics=['mse'])
epochs = 10
history = model.fit(x=train_input, y=train_result, epochs=epochs,
batch_size=1,
validation_data=(validation_input, validation_result)
)
test_predicted = model.predict(test_input)
#saveModel(model)
return history, test_input, test_predicted, test_result
def plotHistory(history):
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(acc) + 1)
plt.figure(1)
plt.subplot(211)
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.subplot(212)
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
if __name__ == '__main__':
delta = 1.0
gridWidth = 32.0
gridHeight = 32.0
rect = Rectangle(0, 0, gridWidth, gridHeight)
g = Geometry(rect, delta)
chargeCount = 3
channelChargeCount = int(chargeCount)
count = 400
x_areaWithoutCharge = 4.0
y_areaWithoutCharge = 4.0
x_positions = np.linspace(x_areaWithoutCharge, gridWidth-x_areaWithoutCharge, gridWidth-2*x_areaWithoutCharge)
y_positions = np.linspace(y_areaWithoutCharge, gridHeight-y_areaWithoutCharge, gridHeight-2*y_areaWithoutCharge)
x = np.random.choice(x_positions, size=count*chargeCount)/gridWidth
y = np.random.choice(y_positions, size=count*chargeCount)/gridHeight
charges = np.zeros(shape=(count,g.numX*2,g.numY*2, channelChargeCount))
results = np.zeros(shape=(count,g.numX,g.numY))
for i in range(0, count):
#charge = make_single_charge(g, x[i], y[i], -10)
charge = make_n_fold_charge(g, x, y, i, chargeCount, -10, variateSign=False)
charges_weight_matrix = []
for channel in range(0, chargeCount):
charges_weight_matrix.append(calc_charge_weight_matrix(g, charge, channel))
#charges_weight_matrix.append(calc_charge_weight_matrix_multi(g, charge))
charges_stacked = np.stack(charges_weight_matrix, axis=-1)
charges[i] = charges_stacked
result_matrix = solvePDE(g, charge)
results[i] = result_matrix
print(charges.shape)
print(results.shape)
c = charges.reshape((count,int(gridWidth*2),int(gridHeight*2),channelChargeCount))
r = results.reshape((count,int(gridWidth),int(gridHeight),1))
#c1 = np.min(c)
#r1 = np.min(r)
#c -= c1
#r -= r1
#c1 = np.min(c)
#r1 = np.min(r)
c = c / np.max(c)
r = r / np.max(r)
ma = np.max(c)
mi = np.min(c)
history, test_input, test_predicted, test_result = learn(c,r)
#plotHistory(history)
#c1 = c[:, 16:48,16:48, :]
ct = test_input[:, int(gridWidth/2):int(gridWidth/2*3), int(gridWidth/2):int(gridWidth/2*3), :]
#c1 *= -1
#m = np.min(c1, axis=(1,2))[0]
#c1 -= m
#plotMatrixList(g, [r[5,:,:,0], c1[5,:,:,0]])
idx = 5
#print(x[i], y[i], (int)(len(g.X) * x[idx]),(int)(len(g.Y) * y[idx]))
#test_predicted
#max_numeric_solution = np.max(test_result[idx,:,:,0])
#max_prediced_solution = np.max(test_predicted[idx,:,:,0])
#scale_factor = max_numeric_solution / max_prediced_solution
#test_predicted[idx, :, :, 0] *= scale_factor
#plotMatrixList(g, [r[idx,:,:,0], c1[idx,:,:,0]])
#plotMatrixList(g, [ct[idx,:,:,0], test_predicted[idx,:,:,0], test_result[idx,:,:,0]])
#idx+=1
diff = test_predicted[idx,:,:,0] - test_result[idx,:,:,0]
plotMatrixList(g, [diff, test_predicted[idx,:,:,0], test_result[idx,:,:,0]])
|
{"hexsha": "e1396e76d12a4e63a37841e8d4d310fcfb5a6249", "size": 13655, "ext": "py", "lang": "Python", "max_stars_repo_path": "sources/experiments/calc_charge_matrix.py", "max_stars_repo_name": "JohannOberleitner/pdesolver", "max_stars_repo_head_hexsha": "f01f83bde44e9f5aae424a4daa13219f986c5884", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "sources/experiments/calc_charge_matrix.py", "max_issues_repo_name": "JohannOberleitner/pdesolver", "max_issues_repo_head_hexsha": "f01f83bde44e9f5aae424a4daa13219f986c5884", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sources/experiments/calc_charge_matrix.py", "max_forks_repo_name": "JohannOberleitner/pdesolver", "max_forks_repo_head_hexsha": "f01f83bde44e9f5aae424a4daa13219f986c5884", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.9342105263, "max_line_length": 116, "alphanum_fraction": 0.6484071769, "include": true, "reason": "import numpy", "num_tokens": 3957}
|
!*******************************************************************
! *
SUBROUTINE NINE(J1,J2,J3,L1,L2,L3,K1,K2,K3,I,INN,AA)
!
! *
! THIS PACKAGE DETERMINES THE VALUES OF 9j COEFFICIENT *
! *
! | J1/2 J2/2 J3/2 | *
! | L1/2 L2/2 L3/2 | *
! | K1/2 K2/2 K3/2 | *
! *
! Written by G. Gaigalas, *
! Vilnius, Lithuania March 1995 *
! Transform to fortran 90/95 by G. Gaigalas December 2012 *
! The last modification made by G. Gaigalas October 2017 *
! *
!*******************************************************************
!
!-----------------------------------------------
! M o d u l e s
!-----------------------------------------------
USE vast_kind_param, ONLY: DOUBLE
USE CONS_C, ONLY: ZERO
!-----------------------------------------------
! I n t e r f a c e B l o c k s
!-----------------------------------------------
USE ittk_I
USE nine0_I
USE sixj_I
IMPLICIT NONE
!-----------------------------------------------
! D u m m y A r g u m e n t s
!-----------------------------------------------
INTEGER :: J1, J2, J3, L1, L2, L3, K1, K2, K3
INTEGER, INTENT(IN) :: I
INTEGER, INTENT(OUT) :: INN
REAL(DOUBLE) :: AA
!-----------------------------------------------
! L o c a l V a r i a b l e s
!-----------------------------------------------
INTEGER :: N1, N2, N3, N4, N5, N6, MAX_, MIN_, IX
REAL(DOUBLE) :: S1, S2, S3, X
!-----------------------------------------------
IF (I == 1) THEN
INN = 0
IF (ITTK(J1,J2,J3) == 0) RETURN
IF (ITTK(L1,L2,L3) == 0) RETURN
IF (ITTK(K1,K2,K3) == 0) RETURN
IF (ITTK(J1,L1,K1) == 0) RETURN
IF (ITTK(J2,L2,K2) == 0) RETURN
IF (ITTK(J3,L3,K3) == 0) RETURN
INN = 1
RETURN
ENDIF
IF (J1*J2*J3*L1*L2*L3*K1*K2*K3 == 0) THEN
INN = 1
CALL NINE0 (J1, J2, J3, L1, L2, L3, K1, K2, K3, AA)
ELSE
N1 = IABS(J1 - K3)
N2 = IABS(L3 - J2)
N3 = IABS(L1 - K2)
N4 = IABS(J2 - L3)
N5 = IABS(K2 - L1)
N6 = IABS(J1 - K3)
MAX_ = MAX0(N1,N2,N3,N4,N5,N6)
N1 = J1 + K3
N2 = L3 + J2
N3 = J2 + L3
N4 = K2 + L1
N5 = J1 + K3
N6 = L1 + K2
MIN_ = MIN0(N1,N2,N3,N4,N5,N6)
INN = 1
AA = ZERO
DO IX = MAX_, MIN_, 2
CALL SIXJ (J1, J2, J3, L3, K3, IX, 0, S1)
CALL SIXJ (L1, L2, L3, J2, IX, K2, 0, S2)
CALL SIXJ (K1, K2, K3, IX, J1, L1, 0, S3)
X = S1*S2*S3*DBLE(IX + 1)
IF (MOD(IX,2) /= 0) X = -X
AA = X + AA
END DO
ENDIF
RETURN
END SUBROUTINE NINE
|
{"hexsha": "0061219f3a7a1f0af4bb6a28f13bd5ea8ffc83c9", "size": 3293, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/lib/librang90/nine.f90", "max_stars_repo_name": "sylas/grasp-continuum", "max_stars_repo_head_hexsha": "f5e2fb18bb2bca4f715072190bf455fba889320f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 33, "max_stars_repo_stars_event_min_datetime": "2019-03-10T04:00:32.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-08T22:01:15.000Z", "max_issues_repo_path": "src/lib/librang90/nine.f90", "max_issues_repo_name": "sylas/grasp-continuum", "max_issues_repo_head_hexsha": "f5e2fb18bb2bca4f715072190bf455fba889320f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 65, "max_issues_repo_issues_event_min_datetime": "2019-03-07T17:56:32.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-16T16:45:24.000Z", "max_forks_repo_path": "src/lib/librang90/nine.f90", "max_forks_repo_name": "sylas/grasp-continuum", "max_forks_repo_head_hexsha": "f5e2fb18bb2bca4f715072190bf455fba889320f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 15, "max_forks_repo_forks_event_min_datetime": "2019-03-10T04:00:43.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-23T02:06:40.000Z", "avg_line_length": 38.2906976744, "max_line_length": 68, "alphanum_fraction": 0.3006377164, "num_tokens": 1006}
|
\documentclass{article}
\usepackage[T1]{fontenc}
\usepackage[utf8]{inputenc}
\usepackage{newtxtext,newtxmath}
\usepackage{microtype}
\usepackage{amsmath,amsfonts,amssymb}
\usepackage{microtype}
\usepackage{hyperref}
\usepackage{bookmark}
\begin{document}
I, William C. Dawn, originally prepared this document for configuring my Lenovo ThinkPad x220 experiment(s).
Information is also provided relating to ``\verb|dawnsvr|'' which was some sort of HP monstrosity.
This is a living document.
I update it as I learn more information.
At any point run \verb|lsblk| to see the status of block drives on the system.
\section{Make Arch Bootable USB}
\begin{enumerate}
\item Download latest Arch Linux iso\\
https://www.archlinux.org/download/
\item Do \emph{NOT} mount the drive. It is also a good idea to format the drive to FAT/VFAT.
\item Use \verb|dd| (``Data Dump'' or ``Disk Destroyer'') to make the arch live USB.
If \verb|/dev/sdb| is the usb drive:
\begin{verbatim}
$ sudo dd if=/path/to/archlinux.iso of=/dev/sdb status="progress"
\end{verbatim}
\end{enumerate}
\section{Boot from Live USB.}
\begin{itemize}
\item Ethernet will need to be connected at boot.
\item Go ahead and boot from the Live USB. Use F12 on the Lenovo ThinkPad x220.
\item It's a good idea to check ping to make sure you have internet because you'll need it.
\item Hook up an ethernet cable.
\begin{verbatim}
# ping archlinux.org
\end{verbatim}
\item If the ping fails, run \verb|dhcpcd| to get an ip address.
\end{itemize}
\section{BIOS Notes.}
\begin{itemize}
\item Much of the same applies for a BIOS only system (like the old HP desktop machine I use called \verb|dawnsvr|).
\item The differences are best summarized on the Arch Linux wiki: ``A BIOS boot partition is only required when using GRUB for BIOS booting from a GPT disk.
The partition has nothing to do with \verb|/boot| and it must not be formatted with a filesystem or mounted.
\item The following are exceptions:
\begin{itemize}
\item Do \textbf{NOT} format \verb|/dev/sda1|. The BIOS partition must contain no filesystem.
\item Do \textbf{NOT} mount \verb|/dev/sda1|.
\item When it comes to install a boot loader.
\begin{verbatim}
# grub-install /dev/sda
# grub-mkconfig -o /boot/grub/grub.cfg
\end{verbatim}
\item And that should do it...
\end{itemize}
\end{itemize}
\section{Make Drive Partitions.}
\begin{itemize}
\item We will assume three partitions.
Mostly just boot \& root.
\begin{enumerate}
\item \verb|/boot| (boot for efi)
\item \verb|SWAP| to allow for system suspend.
\begin{itemize}
\item \verb|SWAP| partition must be $1.5\times$ RAM (e.g.~12GB).
\item This is necessary to allow all RAM to write to disk for suspend.
\end{itemize}
\item \verb|/| (root)
\end{enumerate}
\item This also assumes that we're installing on a bare-metal device with UEFI hardware.
This is based on the Lenovo ThinkPad x220 that I bought on eBay.
\item Use \verb|gdisk| to accomplish this. Run:
\begin{verbatim}
# gdisk /dev/sda
\end{verbatim}
If \verb|/dev/sda| is the main hard drive disk.
\item If the drive has partitions on it, it is first necessary to delete existing partitions.
This can be done within \verb|gdisk|.
\item It may be a good idea to write a new partition table by beginning with the \verb|o| option in \verb|gdisk|.
\item When you run \verb|gdisk| you'll be led through a series of prompts.
Hypothetically, these are some recommended partition sizes and drive types (specified within \verb|gdisk|).
\begin{enumerate}
\item \verb|/boot|, 1 GB, EFI System
\item \verb|SWAP|, 12 GB, Linux Swap
\item \verb|/|, remainder, Linux System
\end{enumerate}
\end{itemize}
\section{Format the Partitions.}
\begin{itemize}
\item Drives are formatted to the default linux file system \verb|ext4|.
Except for the UEFI \verb|/boot| partition which will be formatted in \verb|FAT|.
\item Don't sweat file systems too much.
It's a waste of time.
\item If you do want to waste time, some people seem to think \verb|btrfs| is better.
It is certainly newer, but it is also less stable.
It does not do journaling so it decreases the number of write-to-disk operations.
\item \verb|SWAP| partition must be established specially.
\item Format each of the partitions as follows:
\begin{enumerate}
\item \verb|/boot| \textbf{NOTE} The efi partition must be FAT.
\begin{verbatim}
# mkfs.fat -F32 /dev/sda1
\end{verbatim}
\item \verb|SWAP|
\begin{verbatim}
# mkswap /dev/sda2
# swapon /dev/sda2
\end{verbatim}
\item \verb|/|
\begin{verbatim}
# mkfs.ext4 /dev/sda3
\end{verbatim}
\end{enumerate}
\end{itemize}
\section{Mount the Partitions.}
\begin{itemize}
\item Now we have made and formatted the partitions.
We're going to mount them at \verb|/mnt| because that's what people do.
\item Intermediately, I have to pause to make directories where I can subsequently mount drives.
\begin{enumerate}
\item \verb|/|
\end{enumerate}
\begin{verbatim}
# mount /dev/sda3 /mnt
\end{verbatim}
\begin{enumerate}
\item \verb|/boot|
\end{enumerate}
\begin{verbatim}
# mkdir /mnt/boot
# mount /dev/sda1 /mnt/boot
\end{verbatim}
\end{itemize}
\section{Install Arch Linux.}
\begin{itemize}
\item This is done using the \verb|pacstrap| command.
\item Install \verb|base|, \verb|base-devel|, and \verb|vim| packages because we'll probably need them all anyway.
We also need \verb|intel-ucode| or else we can't boot.
\begin{verbatim}
# pacstrap /mnt base base-devel vim intel-ucode linux linux-firmware networkmanager
\end{verbatim}
\end{itemize}
\section{Make fstab (File System TABle)}
\begin{itemize}
\item We want the \verb|-U| option to use UUIDs.
\begin{verbatim}
# genfstab -U /mnt > /mnt/etc/fstab
\end{verbatim}
\item You need to check after the command runs to make sure it did what you thought it was going to do.
\begin{verbatim}
# vim /mnt/etc/fstab
\end{verbatim}
\item Use set \verb|noatime| for root (\verb|/|) and home (\verb|/home|).
\verb|noatime| decreases the number of times data is written to a disk and the SSD can benefit.
It prevents the last time a file was opened from being recorded.
An example from the arch wiki is provided.
\end{itemize}
\begin{verbatim}
# <device> <dir> <type> <options> <dump> <fsck>
/dev/sda1 /boot vfat defaults 0 1
/dev/sda2 none swap defaults 0 0
/dev/sda3 / ext4 noatime 0 1
\end{verbatim}
\section{Change root.}
\begin{itemize}
\item This is when we (mostly) have an operating system.
\begin{verbatim}
# arch-chroot /mnt
\end{verbatim}
\item Now everything is on the main computer/hard drive.
\end{itemize}
\section{Download and Configure Network Manager.}
\begin{itemize}
\item Use \verb|networkmanager| for network management and WiFi.
It will work with eduroam.
\item Connman may seem like a good idea but it's not as robust.
\item The commandline command is \verb|nmcli|.
There is also a text-based interface via \verb|nmtui|.
\item Download the package.
\begin{verbatim}
# pacman -S networkmanager
\end{verbatim}
\item Start with systemd. Note the capitalization.
\begin{verbatim}
# systemctl enable NeworkManager
\end{verbatim}
\end{itemize}
\section{Set TRIM settings for SSD.}
\begin{itemize}
\item For a SSD, TRIM support can significantly improve timing by keeping the disk in a state that is always ready to be written.
\item This is done periodically, once per week, (\emph{not} continuously) by a systemd task.
\item Simply execute this to kick the whole thing off.
\end{itemize}
\begin{verbatim}
# systemctl enable fstrim.timer
\end{verbatim}
\section{Install a Boot Loader/Manager.}
\begin{itemize}
\item Traditionally, a bootloader would be used that would start the OS.
Now a days, OSes can typically start themselves and just need to be told to do so via a bootmanager.
\item \verb|systemd-boot| is the most lightweight bootmanager on the market it seems.
\item If this doesn't work, use \verb|grub|.
\item Command should be installed in the Arch ``base'' package group.
\begin{verbatim}
# bootctl --path=/boot install
\end{verbatim}
\item It seems like you need to generate your own config files at \verb|/boot/loader/loader.conf| and \verb|/boot/loader/entries/arch.conf|.
\item The loader config at \verb|/boot/loader/loader.conf| should contain
\begin{verbatim}
default arch
timeout 0
editor 0
\end{verbatim}
\item The default profile at \verb|/boot/loader/entries/arch.conf| (corresponding to default name) should contain
\begin{verbatim}
title Arch Linux
linux /vmlinuz-linux
initrd /intel-ucode.img
initrd /initramfs-linux.img
options root=PARTUUID=###### rw
\end{verbatim}
\item Generate the PARTUUID with the command
\begin{verbatim}
# blkid -s PARTUUID -o value /dev/sda3
\end{verbatim}
To dump this straight into vim, use \verb|:read !<shell>|.
A hook must be placed to allow pacman to automatically update systemd-boot.
Otherwise a special command is required (and I'd forget).
A file at \verb|/etc/pacman.d/hooks/100-systemd-boot.hook| should contain
\begin{verbatim}
[Trigger]
Type = Package
Operation = Upgrade
Target = systemd
[Action]
Description = Updating systemd-boot
When = PostTransaction
Exec = /usr/bin/bootctl update
\end{verbatim}
\end{itemize}
\subsection{Set Root Password.}
\begin{itemize}
\item Use the \verb|passwd| command.
\begin{verbatim}
# passwd
\end{verbatim}
\end{itemize}
\section{Set Locale Information.}
\begin{itemize}
\item Set information necessary for timezone and language and character sets.
\item Set the time zone. Use tab complete to make a symlink.
\begin{verbatim}
# ln -sf /usr/share/zoneinfo/America/CITY /etc/localtime
# hwclock --systohc
\end{verbatim}
\item Editing the file \verb|/etc/locale.gen|. Uncomment
\verb|en_US.UTF-8 UTF-8| and \verb|en_US ISO-8859-1|.
\item Run the command to generate the locale file.
\begin{verbatim}
# locale-gen
\end{verbatim}
\item Set the \verb|LANG| variable in \verb|/etc/locale.conf|.
\begin{verbatim}
LANG=en_US.UTF-8
\end{verbatim}
\end{itemize}
\section{Set Hostname.}
\begin{itemize}
\item Edit the value of the file in \verb|/etc/hostname|.
\item For now, I'm thinking \verb|archpad|.
\end{itemize}
\section{Unmount and Reboot.}
\begin{itemize}
\item Everything (should be) (is) done.
\item Exit the new install back onto the Live USB.
\begin{verbatim}
# exit
\end{verbatim}
\item Unmount all of the drives.
\begin{verbatim}
# swapoff /dev/sda2
# umount -R /mnt
\end{verbatim}
\item Reboot.
\begin{verbatim}
# reboot
\end{verbatim}
\item You should get dropped back in to the tty.
\end{itemize}
\section{Post-Install Configuration.}
\subsection{Connect to WiFi.}
\begin{itemize}
\item This seemed flaky\ldots But, it seems you just have to start this.
\begin{verbatim}
# nmcli device wifi connect NETGEAR35 password MYPASSWORD
\end{verbatim}
\item \textbf{\emph{NOTE}} the password will be stored in plain text.
\item This didn't work to automatically connect the first time I tried it.
The only way I found to fix it was to disable and delete the connection named \verb|NETGEAR35| and then run the above command a second time.
\end{itemize}
\subsection{Make a user.}
\begin{itemize}
\item You'll want a user that's not root.
Just a good idea.
\item Think of the \verb|wheel| group as the administrator group.
\begin{verbatim}
# useradd -m -g wheel wcdawn
# passwd wcdawn
\end{verbatim}
\item Edit the sudoers file.
Users of the wheel group should be allowed to run any command without a password.
Uncomment the line
\begin{verbatim}
%wheel ALL=(ALL) NOPASSWD: ALL
\end{verbatim}
\end{itemize}
\subsection{Hibernate/Suspend.}
\begin{itemize}
\item It looks like systemd should do this automatically.
\item If it doesn't, look in the config file \verb|/etc/systemd/logind.conf| and grep the line \verb|HandleLidSwitch|.
\item You'll probably need to uncomment the lines relating to \verb|HandleLidSwitch|.
\end{itemize}
\subsection{Setup Graphical Environment.}
\begin{itemize}
\item Install the graphics and TrackPoint drivers for the ThinkPad x220.
\begin{verbatim}
# pacman -S xf86-video-intel xf86-input-libinput
\end{verbatim}
\item Install the X.org packages using pacman.
\begin{verbatim}
# pacman -S xorg-server xorg-xinit
\end{verbatim}
\item Install i3-gaps.
\begin{verbatim}
# pacman -S i3-gaps compton feh i3blocks dmenu ttf-ibm-plex
\end{verbatim}
\item Configuration of Window Managers are placed in \verb|~/.xinitrc|.
\item Setup xinit config.
In \verb|~/.xinitrc| add the line \verb|exec i3|.
\item Type \verb|xinit| into the tty.
\item Next time you login, use \verb|startx|.
Probably add \verb|startx| to bashrc.
\begin{verbatim}
if [[ "$(tty)" = "/dev/tty1" ]]
then
pgrep i3 || startx
fi
\end{verbatim}
\item At this point, it's probably a good idea to try to clone my dotfiles repo.
\item You'll want to use \verb|~/scripts/install_st.sh| to install and configure suckless terminal.
\end{itemize}
\subsection{Setup Audio.}
You'll want to use the \verb|Alsa| package to manage audio.
\begin{verbatim}
$ sudo pacman -S alsa-utils
\end{verbatim}
Then, run the manager program to set the audio levels.
\begin{verbatim}
$ alsamixer
\end{verbatim}
Type \verb|M| to unmute the master channel and then use the arrow keys to set the volume.
Type \verb|Esc| to exit.
You can then use
\begin{verbatim}
$ speaker-test -c2
\end{verbatim}
to perform a two channel audio test.
\subsection{Setup \LaTeX.}
\begin{itemize}
\item You gotta have it\ldots{} I'm working on my thesis right now.
\item Don't use mupdf.
Use poppler instead.
I had weird color rendering problems and segfaults with mupdf.
\begin{verbatim}
# pacman -S texlive-most zathura biber zathura-pdf-poppler ghostscript
\end{verbatim}
\end{itemize}
\subsection{Numeric/Computational Packages}
\begin{itemize}
\item TODO need to build some sort of list of packages.
\item Install gfortran (it's not bundled with the default gcc) and lapack.
\begin{verbatim}
# pacman -S gcc-fortran lapack
\end{verbatim}
\end{itemize}
\subsection{Install a web browser}
\begin{itemize}
\item For now, I've selected Firefox.
\begin{verbatim}
# pacman -S firefox
\end{verbatim}
\end{itemize}
\subsection{Installing a package from the Arch User Repository}
\begin{itemize}
\item We will install \verb|yay| which will manage and update subsequent AUR packages.
\item Begin by making a directory for downloading the package.
\begin{verbatim}
$ mkdir ~/aur
$ cd ~/aur
\end{verbatim}
\item Download the repository.
\begin{verbatim}
$ git clone https://aur.archlinux.org/yay.git
$ cd ./yay
\end{verbatim}
\item Build the package.
This uses the \verb|PKGBUILD| file which is (semi) readable.
\begin{verbatim}
$ makepkg -si
\end{verbatim}
\item \verb|yay| is now installed.
Subsequently, use \verb|yay -S PACKAGE| or \verb|yay-Suy| just like pacman to install and update AUR packages respectively.
\item While you're at it, \verb|yay -s ttf-ms-fonts| for Times New Roman.
\end{itemize}
\subsection{Python Packages}
\begin{itemize}
\item Here's what I've got so far:
\begin{verbatim}
# pacman -S python python-scipy python-numpy python-matplotlib python-sympy
\end{verbatim}
\end{itemize}
\subsection{Install a Printer}
\begin{itemize}
\item Printer is managed by CUPS. Package \verb|cups-pdf| allows for print to pdf.
\item The Epson WF-3520 printer driver is in the repository \verb|epson-inkjet-printer-escpr|.
\begin{verbatim}
$ sudo pacman -S cups cups-pdf epson-inkjet-printer-escpr
\end{verbatim}
\item Using systemd, enable and start CUPS.
\begin{verbatim}
# systemctl enable org.cups.cupsd.service
# systemctl start org.cups.cupsd.service
\end{verbatim}
\item Use the CUPS web/html interface to install the Epson WF-3520 and cups-pdf printers.
Point a web browser to \verb|http://localhost:631| (port 631 on the localhost).
\item When I did this, it automatically found the Epson printer on the network.
Follow the prompts.
I had to select the WF-3620 driver instead.
It seems to work alright.
\item When installing the cups-pdf printer, select ``Generic'' when prompted to select a make/brand.
\item For cups-pdf, by default the pdf files are saved to \verb|/var/spool/cups-pdf/wcdawn|.
You can change this by editing \verb|/etc/cups/cups-pdf.conf| and I set it to dump to my home directory.
I can rename and move from there.
\end{itemize}
\subsubsection{What to do when a printer isn't working.}
\begin{itemize}
\item List all of the printers on the computer.
\begin{verbatim}
$ lpstat -t
\end{verbatim}
\item Try disabling and re-enabling the printer.
For a device named \verb|HomePrinter|.
\begin{verbatim}
# cupsdisable HomePrinter
# cupsenable HomePrinter
\end{verbatim}
\item If everything is wrong, try deleting the printer.
\begin{verbatim}
# lpadmin -x HomePrinter
\end{verbatim}
\end{itemize}
\subsection{Install a scanner.}
\begin{itemize}
\item This uses the \verb|sane| package.
SANE stands for Scanner Access Now Easy.
\item It will also be useful to have \verb|ghostscript| installed but this is also required for \verb|epstopdf| in \LaTeX.
\begin{verbatim}
# pacman -S sane ghostscript
\end{verbatim}
\item Now, \verb|scanimage -L| will list all available scanners.
Mine (Epson) was found automatically.
\item Use \verb|scanimage --help --device="DEVICENAME"| to list all of the options for the device.
\item For this purpose, I have written a script to scan multiple page pdfs using fairly standard options.
\end{itemize}
\subsection{Configuring pacman.}
\begin{itemize}
\item Pacman is the arch linux package manager.
It's configuration file is located at \verb|/etc/pacman.conf|.
\item In \verb|/etc/pacman.conf| uncomment the \verb|Color| line to enable pretty text formatting.
\item Optionally, add a line with \verb|ILoveCandy| to turn the loading bar into a pacman eating dots.
\end{itemize}
\subsubsection{Useful Pacman Commands.}
\begin{itemize}
\item \verb|pacman -S NAME| to intall a package named NAME.
\item \verb|pacman -Ss NAME| search the remote repositories for a package named NAME.
The argument also acceps regular expressions.
\item \verb|pacman -Qn| to list installed packages.
\verb|pacman -Qm| to list packages installed from the AUR.
\begin{itemize}
\item Supply the \verb|-q| option to not output version numbers.
\item Supply the \verb|-e| option (explicit) to only list packages explicitly downloaded.
This is useful for outputing to a text file and installing with a script.
\end{itemize}
\item \verb|pacman -Qdt| lists truly orphaned packages.
\item \verb|pacman -Rns NAME| to remove a package named NAME and all of its dependencies and all of its system config files.
\end{itemize}
\section{Things I Know but haven't really figured out.}
\begin{itemize}
\item Tools:
\begin{itemize}
\item \verb|nnn| (terminal file browser)
\item \verb|chrony| (for chron jobs / time automation)
\begin{itemize}
\item Luke Smith has a video about chron jobs.
\item One of his chron jobs automatically downloads package updates which seems super useful.
\end{itemize}
\end{itemize}
\item There is no \verb|ifconfig| support natively. Use \verb|ip addr| instead.
\end{itemize}
\end{document}
|
{"hexsha": "17f0c921de67549c40f564fe8e3980714c2eecf3", "size": 19901, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "tex_projects/arch_install/arch_install.tex", "max_stars_repo_name": "wcdawn/dotfiles", "max_stars_repo_head_hexsha": "d069b53b7f19b53767df9e8d67b38b1d6fbf4e28", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-01-30T01:34:21.000Z", "max_stars_repo_stars_event_max_datetime": "2019-01-30T01:34:21.000Z", "max_issues_repo_path": "tex_projects/arch_install/arch_install.tex", "max_issues_repo_name": "wcdawn/dotfiles", "max_issues_repo_head_hexsha": "d069b53b7f19b53767df9e8d67b38b1d6fbf4e28", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tex_projects/arch_install/arch_install.tex", "max_forks_repo_name": "wcdawn/dotfiles", "max_forks_repo_head_hexsha": "d069b53b7f19b53767df9e8d67b38b1d6fbf4e28", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.3713298791, "max_line_length": 158, "alphanum_fraction": 0.7192100899, "num_tokens": 5625}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
The module provides several custom descriptor classes for attribute
validation of region classes.
"""
import abc
from astropy.coordinates import SkyCoord
from astropy.units import Quantity
import numpy as np
from .pixcoord import PixCoord
__all__ = []
class RegionAttr(abc.ABC):
"""
Base descriptor class for region attribute validation.
"""
def __init__(self, name):
self.name = name
def __get__(self, instance, owner):
if instance is None:
return self
return instance.__dict__[self.name]
def __set__(self, instance, value):
self._validate(value)
instance.__dict__[self.name] = value
def __delete__(self, instance):
del instance.__dict__[self.name]
@abc.abstractmethod
def _validate(self, value):
"""
Validate the attribute value.
An exception is raised if the value is invalid.
"""
pass
class ScalarPix(RegionAttr):
"""
Descriptor class for `~regions.PixelRegion`, which takes a scalar
`~regions.PixCoord` object.
"""
def _validate(self, value):
if not (isinstance(value, PixCoord) and value.isscalar):
raise ValueError(f'The {self.name} must be a scalar PixCoord '
'object')
class OneDPix(RegionAttr):
"""
Descriptor class for `~regions.PixelRegion`, which takes a
one-dimensional `regions.PixCoord` object.
"""
def _validate(self, value):
if not (isinstance(value, PixCoord) and not value.isscalar
and value.x.ndim == 1):
raise ValueError(f'The {self.name} must be a 1D PixCoord object')
class ScalarLength(RegionAttr):
"""
Descriptor class for `~regions.PixelRegion`, which takes a scalar
python/numpy number.
"""
def _validate(self, value):
if not np.isscalar(value):
raise ValueError(
f'The {self.name} must be a scalar numpy/python number')
class ScalarSky(RegionAttr):
"""
Descriptor class for `~regions.SkyRegion`, which takes a scalar
`~astropy.coordinates.SkyCoord` object.
"""
def _validate(self, value):
if not (isinstance(value, SkyCoord) and value.isscalar):
raise ValueError(f'The {self.name} must be a scalar SkyCoord '
'object')
class OneDSky(RegionAttr):
"""
Descriptor class for `~regions.SkyRegion`, which takes a
one-dimensional `~astropy.coordinates.SkyCoord` object.
"""
def _validate(self, value):
if not (isinstance(value, SkyCoord) and value.ndim == 1):
raise ValueError(f'The {self.name} must be a 1D SkyCoord object')
class QuantityLength(RegionAttr):
"""
Descriptor class for `~regions.SkyRegion`, which takes a scalar
`~astropy.units.Quantity` object.
"""
def _validate(self, value):
if not (isinstance(value, Quantity) and value.isscalar):
raise ValueError(f'The {self.name} must be a scalar astropy '
'Quantity object')
class RegionType(RegionAttr):
"""
Descriptor class for compound pixel and sky regions.
"""
def __init__(self, name, regionclass):
self.name = name
self.regionclass = regionclass
def _validate(self, value):
if not isinstance(value, self.regionclass):
raise ValueError(f'The {self.name} must be a '
f'{self.regionclass.__name__} object')
|
{"hexsha": "9dd9d498d6adca8e8f5b83c90b3044b4a20affdd", "size": 3572, "ext": "py", "lang": "Python", "max_stars_repo_path": "regions/core/attributes.py", "max_stars_repo_name": "dhomeier/regions", "max_stars_repo_head_hexsha": "5055128abda57c3b463f51ede0a6ac0ef5a0c698", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 46, "max_stars_repo_stars_event_min_datetime": "2015-05-26T20:59:24.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T04:57:50.000Z", "max_issues_repo_path": "regions/core/attributes.py", "max_issues_repo_name": "dhomeier/regions", "max_issues_repo_head_hexsha": "5055128abda57c3b463f51ede0a6ac0ef5a0c698", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 338, "max_issues_repo_issues_event_min_datetime": "2015-05-15T20:33:35.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T21:43:43.000Z", "max_forks_repo_path": "regions/core/attributes.py", "max_forks_repo_name": "dhomeier/regions", "max_forks_repo_head_hexsha": "5055128abda57c3b463f51ede0a6ac0ef5a0c698", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 49, "max_forks_repo_forks_event_min_datetime": "2016-03-21T22:12:31.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-29T21:50:25.000Z", "avg_line_length": 27.0606060606, "max_line_length": 77, "alphanum_fraction": 0.6318589026, "include": true, "reason": "import numpy,from astropy", "num_tokens": 809}
|
% This is samplepaper.tex, a sample chapter demonstrating the
% LLNCS macro package for Springer Computer Science proceedings;
% Version 2.20 of 2017/10/04
%
\documentclass[runningheads]{llncs}
%
\usepackage{fixltx2e}
\usepackage[american]{babel}
\usepackage[utf8]{inputenc}
\usepackage{csquotes}
\usepackage{graphicx}
\usepackage{xcolor}
\usepackage{hyperref}
\usepackage{url}
\usepackage[T1]{fontenc}
\usepackage{lmodern}
\usepackage{microtype}
\usepackage{eurosym}
\usepackage{biblatex}
\addbibresource{bibliography.bib}
\addbibresource{rpackages.bib}
% Used for displaying a sample figure. If possible, figure files should
% be included in EPS format.
%
% If you use the hyperref package, please uncomment the following line
% to display URLs in blue roman font according to Springer's eBook style:
%\renewcommand\UrlFont{\color{blue}\rmfamily}
\hypersetup{breaklinks=true,
bookmarks=true,
pdfauthor={},
pdftitle={Influencing Factors on Social Network Evolution},
colorlinks=true,
citecolor=blue,
urlcolor=blue,
linkcolor=magenta,
pdfborder={0 0 0}}
\urlstyle{same}
\usepackage{longtable,booktabs}
\newcommand{\eg}{e.\,g.,\ }
\newcommand{\ie}{i.\,e.,\ }
%\setkeys{Gin}{width=\maxwidth,height=\maxheight,keepaspectratio}
\IfFileExists{upquote.sty}{\usepackage{upquote}}{}
% use microtype if available
\IfFileExists{microtype.sty}{%
\usepackage{microtype}
\UseMicrotypeSet[protrusion]{basicmath} % disable protrusion for tt fonts
}{}
\providecommand{\tightlist}{%
\setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}}
\begin{document}
%
\title{Influencing Factors on Social Network Evolution}
%
%\titlerunning{Abbreviated paper title}
% If the paper title is too long for the running head, you can set
% an abbreviated paper title here
%
\input{anonauthors.tex}
%
\maketitle % typeset the header of the contribution
%
\begin{abstract}
The abstract should briefly summarize the contents of the paper in
150--250 words.
\keywords{First keyword \and Second keyword \and Another keyword.}
\end{abstract}
%
%
%
\hypertarget{introduction}{%
\section{Introduction}\label{introduction}}
In the past decade the evolution of the internet and social media
platforms raised new forms of social networks that changed our
interpersonal communication and the methods of information procurement
considerably. It has become very easy to connect to existing friends
online, look for new friends and exchange information with them for
example using platforms like Facebook or Twitter. As the formation of an
individual's opinion is based on all available resources it is important
to understand how the information received in online social networks is
embedded into the process of opinion formation and how people behave in
such networks.
\hypertarget{theory}{%
\section{Theory}\label{theory}}
Existing research states that the vast availability of similar-minded
people in online social networks leads us to enclose ourselves in
so-called echo chambers and to disconnect from people who are too
different from us. This leads us to reinforce our opinion solely through
finding others that think similar. If this reinforcement would continue,
people would be separated into different camps quickly and would not be
able to agree on each other anymore. As most people prefer to make
compromises it is interesting to have a closer look on the thresholds
which let them keep in touch with people who they don't agree on and how
a variation of these thresholds changes the overall picture. Therefore,
we created an agent-based model that allows us to simulate the desired
behaviors and to compare the resulting network structures.
To reproduce an online social network in a simulation we must rely on a
network generator that is comparable to a real network. As social media
platforms such as Facebook and Twitter are close to a scale-free network
in their overall network structure and follow a powerlaw distribution,
the Barabasi-Albert network generator is most suitable for generating a
realistic network topology. Within a Barabasi-Albert network there
exists a little amount of very well-connected hubs while most nodes have
only few connections to others. The Barabasi-Albert generator provides
several parameters like the initial network size, the number of new
nodes that are added to an initial network and the number of edges which
are created by the joining nodes to existing nodes using preferential
attachment. This allows us to get the simulation close to a real social
network structure.
\hypertarget{method}{%
\section{Method}\label{method}}
We chose the programming language Julia to conduct our research. With
the LightGraphs package, this language provides performant network
simulation and the required network generators for our agent-based
model. It is also possible to implement batch runs that are based on the
same random seed so that the network evolution following different
parameters can be analyzed subsequently.
In our research, we focused on the variation of limited parameters for
answering our research questions:
\begin{itemize}
\tightlist
\item
Size of the network: How do network and opinion dynamics interplay
with the size of a social network?
\item
Adding friends: What is the difference between randomly making friends
in the network and choosing only from the friends of existing friends?
\item
Removing friends: How does the threshold for accepting opinion
differences interfere with the overall opinion and network dynamics?
The distribution of opinions throughout the agents was not varied, but
uniformly distributed, because their variation would have blurred the
effect of the examined parameters on the network evolution.
\end{itemize}
To analyze the effect of our parameters, we chose different approaches
of social network analysis and evaluated the resulting networks and
their nodes regarding their degree, centrality, communality, diameter,
and clustering coefficient.
\hypertarget{results}{%
\section{Results}\label{results}}
Our simulation study comprised in total 18 simulation runs that show the
impact and relationships of the varied factors on the network evolution
and opinion dynamics.
\hypertarget{first-section}{%
\section{First Section}\label{first-section}}
\hypertarget{a-subsection-sample}{%
\subsection{A Subsection Sample}\label{a-subsection-sample}}
Please note that the first paragraph of a section or subsection is not
indented. The first paragraph that follows a table, figure, equation
etc. does not need an indent, either.
Subsequent paragraphs, however, are indented.
\hypertarget{sample-heading-third-level}{%
\subsubsection{Sample Heading (Third
Level)}\label{sample-heading-third-level}}
Only two levels of headings should be numbered. Lower level headings
remain unnumbered; they are formatted as run-in headings.
\hypertarget{sample-heading-fourth-level}{%
\paragraph{Sample Heading (Fourth
Level)}\label{sample-heading-fourth-level}}
The contribution should contain no more than four levels of headings.
Table~\ref{tab1} gives a summary of all heading levels.
Another nice feature are shortcuts for \eg and \ie
\hypertarget{references}{%
\section{References}\label{references}}
You can cite any paper in parenthesis as following
\autocite{valdez2017priming} or inline saying that
\textcite{valdez2017priming} found something. Multiple citations are
possible as well~\autocite{valdez2017priming,valdez2019users}.
You can refer to other sections by kebab-casing to
section~\ref{a-subsection-sample}. You can easily cite an r-package
directly in the text by using the \texttt{cite\_pkg} function from the
package \texttt{rmdtemplates}~\autocite{R-rmdtemplates}.
\hypertarget{environments}{%
\section{Environments}\label{environments}}
The environments \enquote{definition}, \enquote{lemma},
\enquote{proposition}, \enquote{corollary}, \enquote{remark}, and
\enquote{example} are defined in the LLNCS document class as well.
\hypertarget{theorems}{%
\subsection{Theorems}\label{theorems}}
\begin{theorem}
This is a sample theorem. The run-in heading is set in bold, while
the following text appears in italics. Definitions, lemmas,
propositions, and corollaries are styled the same way.
\end{theorem}
\hypertarget{equations}{%
\subsection{Equations}\label{equations}}
\begin{equation}
x + y = z
\end{equation}
\hypertarget{tables}{%
\subsection{Tables}\label{tables}}
You can get the non breaking space in RStudio by pressing ALT+SPACE. You
can refer to tables by using Table~\ref{tab:table_1}.
\begin{longtable}[]{@{}rrrrl@{}}
\caption{Test\label{tab:table_1}}\tabularnewline
\toprule
Sepal.Length & Sepal.Width & Petal.Length & Petal.Width &
Species\tabularnewline
\midrule
\endfirsthead
\toprule
Sepal.Length & Sepal.Width & Petal.Length & Petal.Width &
Species\tabularnewline
\midrule
\endhead
5.1 & 3.5 & 1.4 & 0.2 & setosa\tabularnewline
4.9 & 3.0 & 1.4 & 0.2 & setosa\tabularnewline
4.7 & 3.2 & 1.3 & 0.2 & setosa\tabularnewline
4.6 & 3.1 & 1.5 & 0.2 & setosa\tabularnewline
5.0 & 3.6 & 1.4 & 0.2 & setosa\tabularnewline
5.4 & 3.9 & 1.7 & 0.4 & setosa\tabularnewline
\bottomrule
\end{longtable}
\hypertarget{inline-latex-tables}{%
\subsubsection{Inline Latex Tables}\label{inline-latex-tables}}
You can directly add latex tables.
\begin{table}
\caption{Table captions should be placed above the
tables.}\label{tab1}
\begin{tabular}{|l|l|l|}
\hline
Heading level & Example & Font size and style\\
\hline
Title (centered) & {\Large\bfseries Lecture Notes} & 14 point, bold\\
1st-level heading & {\large\bfseries 1 Introduction} & 12 point, bold\\
2nd-level heading & {\bfseries 2.1 Printing Area} & 10 point, bold\\
3rd-level heading & {\bfseries Run-in Heading in Bold.} Text follows & 10 point, bold\\
4th-level heading & {\itshape Lowest Level Heading.} Text follows & 10 point, italic\\
\hline
\end{tabular}
\end{table}
\hypertarget{figures}{%
\subsection{Figures}\label{figures}}
You can refer to tables by using Figure~\ref{fig:fig1}.
\begin{figure}
\includegraphics[width=1\linewidth]{NetworkEvolution_files/figure-latex/fig1-1} \caption{This is the text caption under the figure}\label{fig:fig1}
\end{figure}
\hypertarget{acknowledgements}{%
\section*{Acknowledgements}\label{acknowledgements}}
\addcontentsline{toc}{section}{Acknowledgements}
We would like to thank xyz. We would further like to thank the authors
of the packages we have used. We used the following packages to create
this document: \texttt{knitr}~\autocite{R-knitr},
\texttt{tidyverse}~\autocite{R-tidyverse},
\texttt{rmdformats}~\autocite{R-rmdformats},
\texttt{kableExtra}~\autocite{R-kableExtra},
\texttt{scales}~\autocite{R-scales}, \texttt{psych}~\autocite{R-psych},
\texttt{rmdtemplates}~\autocite{R-rmdtemplates}.
%
% ---- Bibliography ----
%
% BibTeX users should specify bibliography style 'splncs04'.
% References will then be sorted and formatted in the correct style.
%
%\bibliographystyle{splncs04}
%\bibliography{bibliography,rpackages}
\printbibliography
\end{document}
|
{"hexsha": "4492d9c6f3d2f205e70b0f472a89c097f0b55407", "size": 11109, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "NetworkEvolution/NetworkEvolution.tex", "max_stars_repo_name": "Pat93h/hcii2020patrick", "max_stars_repo_head_hexsha": "21a91eda359f19682f6159840769f73ea4e3f5e2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "NetworkEvolution/NetworkEvolution.tex", "max_issues_repo_name": "Pat93h/hcii2020patrick", "max_issues_repo_head_hexsha": "21a91eda359f19682f6159840769f73ea4e3f5e2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "NetworkEvolution/NetworkEvolution.tex", "max_forks_repo_name": "Pat93h/hcii2020patrick", "max_forks_repo_head_hexsha": "21a91eda359f19682f6159840769f73ea4e3f5e2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.835483871, "max_line_length": 147, "alphanum_fraction": 0.7792780628, "num_tokens": 2870}
|
from __future__ import division
import time
import numpy as np
from torch import nn, optim
import torch.nn.functional as F
import torch
import itertools
from dim_red.support_func import loss_permutation, loss_top_1_in_lat_top_k, normalize_numpy,\
get_nearestneighbors, sanitize, forward_pass, Normalize, stopping_time,\
repeat, pairwise_NNs_inner, get_nearestneighbors_partly, save_transformed_data, ifelse, validation_function,\
save_net_as_matrix
from dim_red.data import write_fvecs, write_ivecs
net_style = "triplet"
def uniform_loss(x, t=2):
return torch.pdist(x, p=2).pow(2).mul(-t).exp().mean().log()
def triplet_optimize(xt, xv, gt_nn, xq, net, args, lambda_uniform, k_pos, k_neg, val_k, margin, dfl, valid):
lr_schedule = [float(x.rstrip().lstrip()) for x in args.lr_schedule.split(",")]
assert args.epochs % len(lr_schedule) == 0
lr_schedule = repeat(lr_schedule, args.epochs // len(lr_schedule))
print("Lr schedule", lr_schedule)
N = gt_nn.shape[0]
acc = []
xt_var = torch.from_numpy(xt).to(args.device)
valid_char = ""
if len(valid) > 0:
valid_char = "v"
# prepare optimizer
optimizer = optim.SGD(net.parameters(), lr_schedule[0], momentum=args.momentum)
pdist = nn.PairwiseDistance(2)
all_logs = []
for epoch in range(args.epochs):
# Update learning rate
args.lr = lr_schedule[epoch]
for param_group in optimizer.param_groups:
param_group['lr'] = args.lr
t0 = time.time()
# Sample positives for triplet
rank_pos = np.random.choice(k_pos, size=N)
positive_idx = gt_nn[np.arange(N), rank_pos]
# Sample negatives for triplet
net.eval()
print(" Forward pass")
xl_net = forward_pass(net, xt, 1024)
print(" Distances")
I = get_nearestneighbors(xl_net, xl_net, k_neg, args.device, needs_exact=False)
negative_idx = I[:, -1]
# training pass
print(" Train")
net.train()
avg_triplet, avg_uniform, avg_loss = 0, 0, 0
offending = idx_batch = 0
# process dataset in a random order
perm = np.random.permutation(N)
t1 = time.time()
for i0 in range(0, N, args.batch_size):
i1 = min(i0 + args.batch_size, N)
n = i1 - i0
data_idx = perm[i0:i1]
# anchor, positives, negatives
ins = xt_var[data_idx]
pos = xt_var[positive_idx[data_idx]]
neg = xt_var[negative_idx[data_idx]]
# do the forward pass (+ record gradients)
ins, pos, neg = net(ins), net(pos), net(neg)
# triplet loss
per_point_loss = pdist(ins, pos) - pdist(ins, neg) + margin
per_point_loss = F.relu(per_point_loss)
loss_triplet = per_point_loss.mean()
offending += torch.sum(per_point_loss.data > 0).item()
# uniform
loss_uniform = uniform_loss(ins)
# combined loss
loss = loss_triplet + lambda_uniform * loss_uniform
# collect some stats
avg_triplet += loss_triplet.data.item()
avg_uniform += loss_uniform.data.item()
avg_loss += loss.data.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
idx_batch += 1
avg_triplet /= idx_batch
avg_uniform /= idx_batch
avg_loss /= idx_batch
logs = {
'epoch': epoch,
'loss_triplet': avg_triplet,
'loss_uniform': avg_uniform,
'loss': avg_loss,
'offending': offending,
'lr': args.lr
}
all_logs.append(logs)
t2 = time.time()
if (epoch + 1) % args.val_freq == 0 or epoch == args.epochs - 1:
logs_val = validation_function(net, xt, xv, xq, args, val_k)
logs.update(logs_val)
net.train()
t3 = time.time()
print ('epoch %d, times: [hn %.2f s epoch %.2f s val %.2f s]'
' lr = %f'
' loss = %g = %g + lam * %g, offending %d' % (
epoch, t1 - t0, t2 - t1, t3 - t2,
args.lr,
avg_loss, avg_triplet, avg_uniform, offending
))
logs['times'] = (t1 - t0, t2 - t1, t3 - t2)
if args.val_freq_search > 0 and ((epoch + 1) % args.val_freq_search == 0 or epoch == args.epochs - 1):
import wrap.c_support as c_support
net.eval()
dim = xt.shape[1]
yt = forward_pass(net, xt, 1024)
knn_low_path = "/mnt/data/shekhale/models/nns_graphs/" + args.database + "/knn_1k_" + net_style + valid + ".ivecs"
get_nearestneighbors_partly(yt, yt, 1000, args.device, bs=3 * 10 ** 5, needs_exact=True, path=knn_low_path)
save_transformed_data(xt, net, args.database + "/" + args.database + "_base_" + net_style + valid + ".fvecs",
args.device)
save_transformed_data(xv, net, args.database + "/" + args.database + "_query_" + net_style + valid + ".fvecs",
args.device)
acc_cur = c_support.get_graphs_and_search_tests("t", dfl, dim, args.dout, xq.shape[0], valid_char, xt.shape[0], False)
acc_cur = round(acc_cur, 5)
if args.save_optimal > 0:
if len(acc) > 0 and acc_cur > max(acc):
net_path = "/mnt/data/shekhale/models/nns_graphs/" + str(args.database) + "/" + \
str(args.database) + "_net_triplet_optimal.pth"
torch.save(net.state_dict(), net_path)
acc.append(acc_cur)
net.train()
print("Acc list ", acc)
logs['acc'] = acc
if stopping_time(acc, 0.002):
return all_logs
return all_logs
def train_triplet(xb, xt, xv, xq, args, results_file_name):
lambdas = ifelse(args.lambda_uniform, list(np.logspace(-2, 0, 3)))
dints = ifelse(args.dint, [512])
ranks_pos = [5]
ranks_neg = [10]
dataset_first_letter = args.database[0]
if args.database == "sift":
ranks_pos = [5]
ranks_neg = [10]
lambdas = [0.01]
elif args.database == "glove":
ranks_pos = [5]
ranks_neg = [40]
lambdas = [0.01]
dataset_first_letter = "w"
learning_params = list(itertools.product(lambdas, dints, ranks_pos, ranks_neg))
print(learning_params)
for lambda_uniform, dint, k_pos, k_neg in learning_params:
margin = 0
print(lambda_uniform, dint, k_pos, k_neg)
dim = xt.shape[1]
dout = args.dout
print ("computing training ground truth")
xt_gt = get_nearestneighbors(xt, xt, k_pos, device=args.device, needs_exact=True)
print ("build network")
net = nn.Sequential(
nn.Linear(in_features=dim, out_features=dint, bias=True),
nn.BatchNorm1d(dint),
nn.ReLU(),
nn.Linear(in_features=dint, out_features=dint, bias=True),
nn.BatchNorm1d(dint),
nn.ReLU(),
nn.Linear(in_features=dint, out_features=dout, bias=True),
Normalize()
)
net.to(args.device)
val_k = 2 * args.dout
models_path = "/mnt/data/shekhale/models/nns_graphs/" + args.database + "/" + args.database
data_path = "/mnt/data/shekhale/data/" + args.database + "/" + args.database
valid = ""
if args.full != 1:
valid = "_valid"
base_path = data_path + "_base_valid.fvecs"
write_fvecs(base_path, xt)
query_path = data_path + "_query_valid.fvecs"
write_fvecs(query_path, xv)
gt_path = data_path + "_groundtruth_valid.ivecs"
get_nearestneighbors_partly(xv, xt, 100, args.device, bs=3 * 10 ** 5, needs_exact=True, path=gt_path)
all_logs = triplet_optimize(xt, xv, xt_gt, xq, net, args, lambda_uniform, k_pos, k_neg, val_k, margin,
dataset_first_letter, valid)
if args.print_results > 0:
with open(results_file_name, "a") as rfile:
rfile.write("\n")
rfile.write(
"Triplet, DATABASE %s, xt_size = %d, batch_size = %d, lat_dim = %d\n" %
(args.database, xt.shape[0], args.batch_size, args.dout))
rfile.write(
"k = %d, lam_u = %.7f, r_pos = %d, r_neg = %d , dint = %d, margin = %.5f,"
" net_state_dict_size = %d \n" %
(val_k, lambda_uniform, k_pos, k_neg, dint, margin, len(list(net.state_dict().keys()))))
log = all_logs[-1]
rfile.write(
"last perm = %.4f, train_top1_k = %.3f, valid_top1_k = %.3f, query_top1_k = %.3f,"
" query_top1_2k = %.3f \n" %
(log['perm'], log['train_top1_k'], log['valid_top1_k'], log['query_top1_k'],
log['query_top1_2k']))
rfile.write(
"last logs: epochs %d, loss_uniform = %.6f, loss_triplet = %.6f, loss = %.6f, offending = %d,"
" times %f %f %f \n" %
(log['epoch'] + 1, log['loss_uniform'], log['loss_triplet'], log['loss'], log['offending'],
log['times'][0], log['times'][1], log['times'][2]))
if args.val_freq_search > 0:
rfile.write("Acc list: ")
rfile.write(' '.join([str(e) for e in log['acc']]))
rfile.write("\n")
rfile.write("------------------------------------------------------ \n")
yb = forward_pass(net, xb, 1024)
yq = forward_pass(net, xq, 1024)
if args.save > 0:
if args.save_knn_1k > 0:
knn_low_path = models_path + "_knn_1k_" + net_style + ".ivecs"
get_nearestneighbors_partly(yb, yb, 1000, args.device, bs=3*10**5, needs_exact=True, path=knn_low_path)
gt_low_path = "/mnt/data/shekhale/data/" + args.database + "/"\
+ args.database + "_groundtruth_" + net_style + ".ivecs"
get_nearestneighbors_partly(yq, yb, 100, args.device, bs=3*10**5, needs_exact=True, path=gt_low_path)
save_transformed_data(xb, net, args.database + "/" + args.database + "_base_" + net_style + ".fvecs",
args.device)
save_transformed_data(xq, net, args.database + "/" + args.database + "_query_" + net_style + ".fvecs",
args.device)
# ------------------------- SAVING PART --------------------------------------------------------------------
params_string = str(dout) + "_l_" + str(int(-np.log10(lambda_uniform))) + "_1m_" + str(k_pos) + "_" + \
str(k_neg) + "_w_" + str(dint) + "_e_" + str(args.epochs)
net_path = models_path + "_net_" + params_string + ".pth"
net_script_path = models_path + "_net_" + params_string + "_scr.pth"
if args.save > 0:
torch.save(net.state_dict(), net_path)
net_script = torch.jit.script(net)
net_script.save(net_script_path)
save_net_as_matrix(net, models_path + "_net_as_matrix_" + args.method + "_optimal")
|
{"hexsha": "7be519d1c5a4b343914df04f9688657d3f25a2ef", "size": 11432, "ext": "py", "lang": "Python", "max_stars_repo_path": "dim_red/triplet.py", "max_stars_repo_name": "symphony233/gbnns_dim_red", "max_stars_repo_head_hexsha": "2403411600a60ad4365aba3d78a81da144a456b7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "dim_red/triplet.py", "max_issues_repo_name": "symphony233/gbnns_dim_red", "max_issues_repo_head_hexsha": "2403411600a60ad4365aba3d78a81da144a456b7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dim_red/triplet.py", "max_forks_repo_name": "symphony233/gbnns_dim_red", "max_forks_repo_head_hexsha": "2403411600a60ad4365aba3d78a81da144a456b7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.2852233677, "max_line_length": 130, "alphanum_fraction": 0.5499475157, "include": true, "reason": "import numpy", "num_tokens": 2903}
|
# Choose model and dimension of x array and y array, and nn output
model_type, kx, ky, ky_nn = "Plasticity", 1, 2, 1
#model_type, kx, ky, ky_nn = "Plasticity", 1, 2, 2
#model_type, kx, ky, ky_nn = "PlasticityLawBased", 1, 1, 1
nn_type = "piecewise2"
#nn_type = "ae"
include("CommonFuncs.jl")
# m set of data, each has n time steps(including initial points)
m, n = 4, 201
xs_set, ys_set = generate_data(model_type, m, n)
sess = Session()
Random.seed!(2333)
loss = compute_sequence_loss(xs_set, ys_set, nn)
init(sess)
BFGS!(sess, loss)
# point2point test
ys_pred_set = point2point_test(xs_set, ys_set, sess)
colors = ["blue", "green" , "red", "cyan", "magenta", "yellow", "black"]
close("all")
for i = 1:m
plot(xs_set[i], ys_set[i][:,1], color=colors[i])
plot(xs_set[i], ys_pred_set[i][:,1], color=colors[i], ".-")
end
savefig("S_Train_P2P_Test_NN$(nn_type)_Prob$(model_type)_ky_nn$(ky_nn).png")
# sequence test
ys_pred_set = sequence_test(xs_set, sess)
colors = ["blue", "green" , "red", "cyan", "magenta", "yellow", "black"]
close("all")
for i = 1:m
plot(xs_set[i], ys_set[i][:,1], color=colors[i])
plot(xs_set[i], ys_pred_set[i][:,1], color=colors[i], ".-")
end
savefig("S_Train_S_Test_NN$(nn_type)_Prob$(model_type)_ky_nn$(ky_nn).png")
|
{"hexsha": "9551188ef4728f5c95fc6409659c242e29c9ba7c", "size": 1274, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "research/TransitionFunction/NN_Train.jl", "max_stars_repo_name": "PallHaraldsson/NNFEM.jl", "max_stars_repo_head_hexsha": "33deee97a5897a6f4df6581bd14e853805a2af7e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 19, "max_stars_repo_stars_event_min_datetime": "2020-05-05T02:06:40.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T06:53:09.000Z", "max_issues_repo_path": "research/TransitionFunction/NN_Train.jl", "max_issues_repo_name": "PallHaraldsson/NNFEM.jl", "max_issues_repo_head_hexsha": "33deee97a5897a6f4df6581bd14e853805a2af7e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2020-04-01T23:20:04.000Z", "max_issues_repo_issues_event_max_datetime": "2020-12-23T21:59:58.000Z", "max_forks_repo_path": "research/TransitionFunction/NN_Train.jl", "max_forks_repo_name": "PallHaraldsson/NNFEM.jl", "max_forks_repo_head_hexsha": "33deee97a5897a6f4df6581bd14e853805a2af7e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 13, "max_forks_repo_forks_event_min_datetime": "2020-04-01T11:11:06.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-06T10:50:18.000Z", "avg_line_length": 25.48, "max_line_length": 76, "alphanum_fraction": 0.6679748823, "num_tokens": 446}
|
from __future__ import annotations
from typing import TYPE_CHECKING
import numpy as np
from ...utils.geometry import (
clamp_point_to_bounding_box,
point_in_bounding_box,
)
if TYPE_CHECKING:
from ...utils.events import Event
from .image import Image
def move_plane_along_normal(layer: Image, event: Event):
"""Move a layers slicing plane along its normal vector on click and drag."""
# early exit clauses
if (
'Shift' not in event.modifiers
or layer.visible is False
or layer.interactive is False
or len(event.dims_displayed) < 3
):
return
# Store mouse position at start of drag
initial_position_world = np.asarray(event.position)
initial_view_direction_world = np.asarray(event.view_direction)
initial_position_data = layer._world_to_displayed_data(
initial_position_world, event.dims_displayed
)
initial_view_direction_data = layer._world_to_displayed_data_ray(
initial_view_direction_world, event.dims_displayed
)
# Calculate intersection of click with plane through data in data coordinates
intersection = layer.plane.intersect_with_line(
line_position=initial_position_data,
line_direction=initial_view_direction_data,
)
# Check if click was on plane and if not, exit early.
if not point_in_bounding_box(
intersection, layer.extent.data[:, event.dims_displayed]
):
return
layer.plane.position = intersection
# Store original plane position and disable interactivity during plane drag
original_plane_position = np.copy(layer.plane.position)
layer.interactive = False
yield
while event.type == 'mouse_move':
# Project mouse drag onto plane normal
drag_distance = layer.projected_distance_from_mouse_drag(
start_position=initial_position_world,
end_position=np.asarray(event.position),
view_direction=np.asarray(event.view_direction),
vector=layer.plane.normal,
dims_displayed=event.dims_displayed,
)
# Calculate updated plane position
updated_position = original_plane_position + (
drag_distance * np.array(layer.plane.normal)
)
clamped_plane_position = clamp_point_to_bounding_box(
updated_position, layer._display_bounding_box(event.dims_displayed)
)
layer.plane.position = clamped_plane_position
yield
# Re-enable volume_layer interactivity after the drag
layer.interactive = True
def set_plane_position(layer: Image, event: Event):
"""Set plane position on double click."""
# early exit clauses
if (
layer.visible is False
or layer.interactive is False
or len(event.dims_displayed) < 3
):
return
# Calculate intersection of click with plane through data in data coordinates
intersection = layer.plane.intersect_with_line(
line_position=np.asarray(event.position)[event.dims_displayed],
line_direction=np.asarray(event.view_direction)[event.dims_displayed],
)
# Check if click was on plane and if not, exit early.
if not point_in_bounding_box(
intersection, layer.extent.data[:, event.dims_displayed]
):
return
layer.plane.position = intersection
|
{"hexsha": "1e757b5fae6b763bd1a960390eabc0d753bbe225", "size": 3353, "ext": "py", "lang": "Python", "max_stars_repo_path": "napari/layers/image/_image_mouse_bindings.py", "max_stars_repo_name": "chili-chiu/napari", "max_stars_repo_head_hexsha": "eb6e672975ce105ac0125f71da3d0970d17cefb9", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2018-07-03T17:35:46.000Z", "max_stars_repo_stars_event_max_datetime": "2018-11-07T15:48:58.000Z", "max_issues_repo_path": "napari/layers/image/_image_mouse_bindings.py", "max_issues_repo_name": "chili-chiu/napari", "max_issues_repo_head_hexsha": "eb6e672975ce105ac0125f71da3d0970d17cefb9", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 120, "max_issues_repo_issues_event_min_datetime": "2018-09-04T22:05:13.000Z", "max_issues_repo_issues_event_max_datetime": "2019-03-02T01:13:57.000Z", "max_forks_repo_path": "napari/layers/image/_image_mouse_bindings.py", "max_forks_repo_name": "chili-chiu/napari", "max_forks_repo_head_hexsha": "eb6e672975ce105ac0125f71da3d0970d17cefb9", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2018-09-04T21:48:26.000Z", "max_forks_repo_forks_event_max_datetime": "2019-01-29T04:48:30.000Z", "avg_line_length": 31.0462962963, "max_line_length": 81, "alphanum_fraction": 0.7014613779, "include": true, "reason": "import numpy", "num_tokens": 696}
|
/**
* ex_gnuplot.h
*
* utility functions for plotting with gnuplot (v 4.6)
*
* - minimizes boilerplate needed to use
* - popen -> process piping available, less temp files
* - idea based on myexamples/gnuplot (syntax) and gnuplot_i by N. Devillard (pipes), except a bit simpler
*
* Aaro Salosensaari 2016
*
* Original gnuplot_i was written by N. Devillard 1998 (version 2.10 2003) and is in public domain.
*/
#ifndef EX_GNUPLOT
#define EX_GNUPLOT
#include <stdarg.h>
#include <stdio.h>
#include <gsl/gsl_vector.h>
#include "ex_util.h"
#define GP_MAX_TMP_FILES 32
typedef struct {
/* Pipe to gnuplot process */
FILE *gnucmd ;
/* Number of currently active plots */
int nplots ;
/* Pointer to table of names of temporary files */
char *tmp_filename_tbl[GP_MAX_TMP_FILES] ;
/* Number of temporary files */
int ntmp ;
} gnuplot_ctrl ;
/**
* Public interface function declarations
*/
gnuplot_ctrl *gnuplot_init(void);
void gnuplot_close(gnuplot_ctrl *handle);
/*
* gnuplot_cmd
*
* This sends a string to an active gnuplot session, to be executed.
* There is strictly no way to know if the command has been
* successfully executed or not.
* The command syntax is the same as printf.
*/
void gnuplot_cmd(gnuplot_ctrl *handle, char const *cmd, ...);
/*
* ex_plot_xf can plot a variable number n of x, f(x) plots.
*
* n: number of plots
* xlow:
* xhigh:
* xstep: (self-evident)
* f1: function to plot
* title1: title
* style1: gnuplot style spef
* (repeat fi, titlei, stylei until i=n)
*/
void ex_plot_xf(gnuplot_ctrl *handle, const int n, double xlow, double xhigh, double xstep, db_fx f1, const char *title1, const char *style1, ...);
/*
* ex_plot_xys can plot a variable number n of x, y plots.
*
* n: number of plots
* xdata: x points
* ydata: y points
* title1: title
* style1: gnuplot style spef
* (repeat xdatai, ydatai, titlei, stylei until i=n)
*/
void ex_plot_xys(gnuplot_ctrl *handle, const int n, gsl_vector *xdata1, gsl_vector *ydata1, const char *title1, const char *style1, ...);
void gnuplot_xy(gnuplot_ctrl *handle, gsl_vector *xdata, gsl_vector *ydata, const char *title1, const char *style1);
#endif /* ifndef EX_GNUPLOT */
|
{"hexsha": "53374b58aa4e13606c7ec735ff962844b18d9143", "size": 2232, "ext": "h", "lang": "C", "max_stars_repo_path": "ex_gnuplot.h", "max_stars_repo_name": "aa-m-sa/ex_gnuplot_util", "max_stars_repo_head_hexsha": "17319213c85f65850c7b017bb4f3030d978ba936", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2017-10-15T03:41:21.000Z", "max_stars_repo_stars_event_max_datetime": "2017-10-15T03:41:21.000Z", "max_issues_repo_path": "ex_gnuplot.h", "max_issues_repo_name": "aa-m-sa/ex_gnuplot_util", "max_issues_repo_head_hexsha": "17319213c85f65850c7b017bb4f3030d978ba936", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ex_gnuplot.h", "max_forks_repo_name": "aa-m-sa/ex_gnuplot_util", "max_forks_repo_head_hexsha": "17319213c85f65850c7b017bb4f3030d978ba936", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.0, "max_line_length": 147, "alphanum_fraction": 0.6975806452, "num_tokens": 663}
|
"""
AbstractOperator
An abstract type for linear operators on Banach spaces.
"""
abstract type AbstractOperator end
"""
KernelOperator{K,S} <: AbstractOperator
A type for representing kernel integral operators over Hilbert spaces.
"""
struct KernelOperator{K,S} <: AbstractOperator
kernel::K
solver::S
end
KernelOperator(k) = KernelOperator(k,HCubatureJL())
function eval_(ko::KernelOperator, f; solver=ko.solver)
k = ko.kernel
fun = function(u,p)
k(p,u) * f(u)
end
return function(x)
prob = QuadratureProblem{false}(fun,0,1,x)
solve(prob, solver).u
end
end
function (ko::KernelOperator)(x)
eval_(ko, x)
end
Base.:*(ko::KernelOperator, x) = ko(x)
function ChainRulesCore.rrule(ko::KernelOperator, x)
y = ko(x)
function ko_pullback(δy)
δko = NoTangent()
δx = t -> kernel_transpose_integral(ko.kernel, δy, t, ko.solver)
return δko, δx
end
return y, ko_pullback
end
"""
FourierKernel{T<:Real}
An object representing a Fourier kernel function ``k(x,y) = sin (ω π xy + θ)``
"""
struct FourierKernel{T<:Real}
ω::T
θ::T
end
FourierKernel(; a = 0.0, b = 1.0) = FourierKernel((b-a), a)
(ff::FourierKernel)(x,y) = sin(ff.ω*π*x*y + ff.θ)
"""
PolynomialKernel{M<:AbstractMatrix}
A type representing a polynomial kernel function.
"""
struct PolynomialKernel{M<:AbstractMatrix}
m::M
end
function (pk::PolynomialKernel)(x,y)
(m,n) = size(pk.m)
X = [x^(k-1) for k in 1:m]
Y = [y^(k-1) for k in 1:n]
X * pk.m * Y
end
|
{"hexsha": "b63ae7818668e3da77223279c3770182be7102de", "size": 1557, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/kernel_operators/kernel_operators.jl", "max_stars_repo_name": "csimal/SpectralLearning.jl", "max_stars_repo_head_hexsha": "4999657700a0d84dfff470a52ddb3e3b37a44aae", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/kernel_operators/kernel_operators.jl", "max_issues_repo_name": "csimal/SpectralLearning.jl", "max_issues_repo_head_hexsha": "4999657700a0d84dfff470a52ddb3e3b37a44aae", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/kernel_operators/kernel_operators.jl", "max_forks_repo_name": "csimal/SpectralLearning.jl", "max_forks_repo_head_hexsha": "4999657700a0d84dfff470a52ddb3e3b37a44aae", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 20.4868421053, "max_line_length": 79, "alphanum_fraction": 0.6403339756, "num_tokens": 484}
|
using Keldysh, Test
@testset "contour" begin
let c = FullContour(tmax=2.0, β=5.0)
@test nbranches(c) == 3
b = map(x -> x.domain, c.branches)
@test b == (forward_branch, backward_branch, imaginary_branch)
c = twist(c)
b = map(x -> x.domain, c.branches)
@test b == (backward_branch, imaginary_branch, forward_branch)
for b in (forward_branch, backward_branch, imaginary_branch)
@test c[b].domain == b
end
@test forward_branch ∈ c
@test backward_branch ∈ c
@test imaginary_branch ∈ c
end
let c = KeldyshContour(tmax=2.0)
@test nbranches(c) == 2
b = map(x -> x.domain, c.branches)
@test b == (forward_branch, backward_branch)
c = twist(c)
b = map(x -> x.domain, c.branches)
@test b == (backward_branch, forward_branch)
for b in (forward_branch, backward_branch)
@test c[b].domain == b
end
@test forward_branch ∈ c
@test backward_branch ∈ c
@test imaginary_branch ∉ c
end
let c = ImaginaryContour(β=5.0)
@test nbranches(c) == 1
b = map(x -> x.domain, c.branches)
@test b == (imaginary_branch,)
for b in (imaginary_branch,)
@test c[b].domain == b
end
@test forward_branch ∉ c
@test backward_branch ∉ c
@test imaginary_branch ∈ c
end
end
|
{"hexsha": "0e1dfb74a889c6f7ca3c151a4dd622c9fad1585f", "size": 1295, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/contour.jl", "max_stars_repo_name": "kleinhenz/Keldysh.jl", "max_stars_repo_head_hexsha": "997cd6e06bff4d580ee7fc03dba7e218983caae4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 23, "max_stars_repo_stars_event_min_datetime": "2019-07-07T20:08:05.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-10T20:58:08.000Z", "max_issues_repo_path": "test/contour.jl", "max_issues_repo_name": "kleinhenz/Keldysh.jl", "max_issues_repo_head_hexsha": "997cd6e06bff4d580ee7fc03dba7e218983caae4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2019-10-23T20:06:55.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-24T15:42:43.000Z", "max_forks_repo_path": "test/contour.jl", "max_forks_repo_name": "kleinhenz/Keldysh.jl", "max_forks_repo_head_hexsha": "997cd6e06bff4d580ee7fc03dba7e218983caae4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-03-19T13:27:39.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-18T18:50:28.000Z", "avg_line_length": 22.7192982456, "max_line_length": 66, "alphanum_fraction": 0.6231660232, "num_tokens": 402}
|
function [paramsOut] = globalTrendGUI(hObject, callbackdata, inputData)%#ok<INUSL>
theTitle ='Global trend parameters';
defaultStruct = inputData.userData.globalTrend;
while(true)
mainFigure = findobj('Type', 'Figure', '-and', 'Name', inputData.name);
userdata = get(mainFigure, 'UserData');
if isempty(userdata) || ~isfield(userdata, 'globalTrend')
paramsOut = struct();
else
paramsOut = userdata.globalTrend;
end
[defaultStruct, errors] = checkStructureDefaults(paramsOut, defaultStruct);
if ~isempty(errors)
warning('globalTrendGUI:bad parameters', getMessageString(errors)); %#ok<CTPCT>
end
% Creates structure for
fNamesDefault = fieldnames(defaultStruct);
for k = 1:length(fNamesDefault)
textColorStruct.(fNamesDefault{k}) = 'k';
end
if defaultStruct.doLocal.value
checkValue = 1;
else
checkValue = 0;
end
%% starts the while loop, sets up the uilist and creates the GUI
closeOpenWindows(theTitle);
geometry = {[1,4], 1, [3,1,3,1],[3,1,4]};
geomvert = [];
uilist={{'style', 'text', 'string', 'Global trend channels', ...
'TooltipString', defaultStruct.globalTrendChannels.description}...
{'style', 'edit', 'string', ...
num2str(defaultStruct.globalTrendChannels.value), ...
'tag', 'globalTrendChannels', 'ForegroundColor', ...
textColorStruct.globalTrendChannels}...
{'style', 'text', 'string', ''}...
{'style', 'text', 'string', 'Do local', 'TooltipString', ...
defaultStruct.doLocal.description}...
{'style', 'checkbox', 'Value', checkValue, ...
'tag', 'doLocal', 'ForegroundColor', textColorStruct.doLocal}...
{'style', 'text', 'string', 'Local cutoff', ...
'TooltipString', defaultStruct.localCutoff.description}...
{'style', 'edit', 'string', ...
num2str(defaultStruct.localCutoff.value), ...
'tag', 'localCutoff', 'ForegroundColor', textColorStruct.localCutoff}...
{'style', 'text', 'string', 'Local step size', ...
'TooltipString', defaultStruct.localStepSize.description}...
{'style', 'edit', 'string', ...
num2str(defaultStruct.localStepSize.value), ...
'tag', 'localStepSize', 'ForegroundColor', ...
textColorStruct.localStepSize}...
{'style', 'text', 'string', ''}};
[~, ~, ~, paramsOut] = inputgui('geometry', geometry, ...
'geomvert', geomvert, 'uilist', uilist, 'title', theTitle, ...
'helpcom', 'pophelp(''pop_prepPipeline'')');
if(isempty(paramsOut))
break;
end
[paramsOut, typeErrors, fNamesErrors] = ...
changeType(paramsOut, defaultStruct);
%paramsOut.detrendType = typeMenuString(paramsOut.detrendType);
mainFigure = findobj('Type', 'Figure', '-and', 'Name', inputData.name);
userdata = get(mainFigure, 'UserData');
userdata.globalTrend = paramsOut;
set(mainFigure, 'UserData', userdata);
if isempty(typeErrors)
break;
end
textColorStruct = highlightErrors(fNamesErrors, ...
fNamesDefault, textColorStruct);
displayErrors(typeErrors); % Displays the errors and restarts GUI
end
end
|
{"author": "VisLab", "repo": "EEG-Clean-Tools", "sha": "9ac9ea0c21d44b57f9e9f93b62ca727c7b75c73e", "save_path": "github-repos/MATLAB/VisLab-EEG-Clean-Tools", "path": "github-repos/MATLAB/VisLab-EEG-Clean-Tools/EEG-Clean-Tools-9ac9ea0c21d44b57f9e9f93b62ca727c7b75c73e/PrepPipeline/interface/globalTrendGUI.m"}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# write a correct test!
import unittest
import pygimli as pg
import numpy as np
class TestSparseMatrix(unittest.TestCase):
def test_Convert(self):
"""
"""
colIds = range(10)
rowIds = range(10)
vals = np.ones(10)
# Construct SparseMap Matrix from python arrays
A = pg.matrix.SparseMapMatrix(colIds, rowIds, vals)
# Construct SparseMap -> CRS (compressed row storage)
S = pg.matrix.SparseMatrix(A)
# Construct CRS -> SparseMap
A2 = pg.matrix.SparseMapMatrix(S)
# all should by identity matrix
np.testing.assert_equal(A2.getVal(1, 1), 1.0)
np.testing.assert_equal(sum(S * np.ones(S.cols())), S.rows())
np.testing.assert_equal(sum(A2 * np.ones(A2.cols())), A2.rows())
MAP1 = pg.matrix.SparseMapMatrix(r=3, c=15)
CSR = pg.matrix.SparseMatrix(MAP1)
MAP2 = pg.matrix.SparseMapMatrix(CSR)
v3 = pg.Vector(3)
v15 = pg.Vector(15)
np.testing.assert_equal((MAP1*v15).size(), 3)
np.testing.assert_equal((MAP1.transMult(v3)).size(), 15)
np.testing.assert_equal((CSR*v15).size(), 3)
np.testing.assert_equal((CSR.transMult(v3)).size(), 15)
np.testing.assert_equal(MAP1.cols(), MAP2.cols())
np.testing.assert_equal(CSR.cols(), MAP1.cols())
np.testing.assert_equal(CSR.rows(), MAP1.rows())
np.testing.assert_equal(MAP1.rows(), MAP2.rows())
# testing SparseMatrix to Numpy
mm = pg.matrix.SparseMapMatrix(r=4, c=5)
check_rows = [0, 0, 1, 2, 3]
check_cols = [0, 1, 2, 3, 4]
check_vals = np.array([1.0, 3, np.pi, 1e-12, -1.12345e13])
for i in range(len(check_rows)):
mm.addVal(check_rows[i], check_cols[i], check_vals[i])
#pg.solver.showSparseMatrix(mm, full=True)
check_csr_rows = [0, 1, 2, 3, 4]
check_csr_colPtr = [0, 2, 3, 4, 5]
check_csc_cols = [0, 0, 1, 2, 3]
check_csc_rowptr = [0, 1, 2, 3, 4, 5]
r1, c1, v1 = pg.utils.sparseMatrix2Array(mm)
np.testing.assert_allclose(r1, check_csr_rows)
np.testing.assert_allclose(c1, check_csr_colPtr)
np.testing.assert_allclose(v1, check_vals)
sciA1 = pg.utils.sparseMatrix2csr(pg.matrix.SparseMatrix(mm))
np.testing.assert_equal(sciA1.indices, check_csr_rows)
np.testing.assert_equal(sciA1.indptr, check_csr_colPtr)
sciA1 = pg.utils.sparseMatrix2csr(mm)
np.testing.assert_equal(sciA1.indices, check_csr_rows)
np.testing.assert_equal(sciA1.indptr, check_csr_colPtr)
r2, c2, v2 = pg.utils.sparseMatrix2Array(pg.matrix.SparseMatrix(mm),
getInCRS=False)
np.testing.assert_allclose(r2, check_rows)
np.testing.assert_allclose(c2, check_cols)
np.testing.assert_allclose(v2, check_vals)
A1 = pg.matrix.SparseMapMatrix(colIds, rowIds, vals)
A2 = pg.matrix.SparseMapMatrix(colIds, rowIds, vals)
A1 += A2
sciA1 = pg.utils.sparseMatrix2csr(pg.matrix.SparseMatrix(mm))
sciA2 = pg.utils.sparseMatrix2csr(mm)
np.testing.assert_equal(len(sciA1.data), mm.size())
np.testing.assert_equal(sciA1.data, sciA2.data)
np.testing.assert_equal(sciA1.indices, sciA2.indices)
np.testing.assert_equal(sciA1.indptr, sciA2.indptr)
sciA1 = pg.utils.sparseMatrix2coo(pg.matrix.SparseMatrix(mm))
sciA2 = pg.utils.sparseMatrix2coo(mm)
np.testing.assert_equal(len(sciA1.data), mm.size())
np.testing.assert_equal(sciA1.data, sciA2.data)
np.testing.assert_equal(sciA1.row, sciA2.row)
np.testing.assert_equal(sciA1.col, sciA2.col)
### toSparseMatrix
sciCSR = pg.utils.sparseMatrix2csr(pg.matrix.SparseMatrix(mm))
np.testing.assert_equal(pg.utils.toSparseMatrix(sciCSR) == mm, True)
def test_Access(self):
#addVal(0, 1, 1.2) kommt nach der konvertierung auch wieder [0], [1], [1.2]
pass
def test_Operators(self):
colIds = range(10)
rowIds = range(10)
vals = np.ones(10)
A = pg.matrix.SparseMapMatrix(colIds, rowIds, vals)
S = pg.matrix.SparseMatrix(A)
S2 = S + S * 0.1 * 0.3
def test_ComplexMatrix(self):
verbose = False
grid = pg.createGrid(3, 3)
# print(grid)
alpha = pg.math.toComplex(np.ones(grid.cellCount()),
np.ones(grid.cellCount())*1.0)
A = pg.solver.createStiffnessMatrix(grid, a=alpha)
pg.solver.solver.applyDirichlet(A, None, [0], [0.0])
#pg.solver.showSparseMatrix(A)
#pg.solver.assembleDirichletBC(A, [[grid.boundary(0), 0.0]])
b = pg.math.toComplex(np.ones(A.rows()), np.ones(A.rows())*0.0)
x = pg.solver.linSolve(A, b, verbose=verbose, solver='pg')
np.testing.assert_allclose(A.mult(x), b, rtol=1e-10)
x2 = pg.solver.linSolve(A, b, verbose=verbose, solver='scipy')
np.testing.assert_allclose(x2, x, rtol=1e-10)
x3 = pg.solver.linSolve(pg.utils.squeezeComplex(A),
pg.utils.squeezeComplex(b),
verbose=verbose, solver='pg')
np.testing.assert_allclose(pg.utils.toComplex(x3), x, rtol=1e-10)
def test_BlockMatrix(self):
A = pg.SparseMapMatrix(2, 2)
A.setVal(0, 0, 1.0)
B = pg.BlockMatrix()
B.add(A, 0, 0)
np.testing.assert_allclose(B.row(0), [1.0, 0.0], rtol=1e-10)
B.add(A, 0, 0)
np.testing.assert_allclose(B.row(0), [2.0, 0.0], rtol=1e-10)
C = B.sparseMapMatrix()
np.testing.assert_allclose(C.row(0), [2.0, 0.0], rtol=1e-10)
B.add(A, 10, 10)
print(B)
def test_Misc(self):
D = pg.SparseMapMatrix(3, 4)
for i in range(D.rows()):
for j in range(D.cols()):
D.setVal(i, j, 1.0)
np.testing.assert_allclose(D.col(2), pg.Vector(D.rows(), 1.0))
np.testing.assert_allclose(D.row(2), pg.Vector(D.cols(), 1.0))
D.cleanRow(1)
np.testing.assert_allclose(D.col(2), [1.0, 0.0, 1.0])
D.cleanCol(1)
np.testing.assert_allclose(D.row(2), [1.0, 0.0, 1.0, 1.0])
if __name__ == '__main__':
unittest.main()
|
{"hexsha": "868ef11c4d8a4c4702be3aeb6845834a169da208", "size": 6394, "ext": "py", "lang": "Python", "max_stars_repo_path": "pygimli/testing/test_SparseMatrix.py", "max_stars_repo_name": "JuliusHen/gimli", "max_stars_repo_head_hexsha": "a5c5779261acfe5a53015c9ee6f7c9ed2dd6c57f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 224, "max_stars_repo_stars_event_min_datetime": "2015-02-20T21:36:24.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T07:27:43.000Z", "max_issues_repo_path": "pygimli/testing/test_SparseMatrix.py", "max_issues_repo_name": "JuliusHen/gimli", "max_issues_repo_head_hexsha": "a5c5779261acfe5a53015c9ee6f7c9ed2dd6c57f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 341, "max_issues_repo_issues_event_min_datetime": "2015-05-21T14:39:51.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T01:54:07.000Z", "max_forks_repo_path": "pygimli/testing/test_SparseMatrix.py", "max_forks_repo_name": "JuliusHen/gimli", "max_forks_repo_head_hexsha": "a5c5779261acfe5a53015c9ee6f7c9ed2dd6c57f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 107, "max_forks_repo_forks_event_min_datetime": "2015-01-24T14:40:21.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-25T12:12:13.000Z", "avg_line_length": 34.192513369, "max_line_length": 83, "alphanum_fraction": 0.5991554582, "include": true, "reason": "import numpy", "num_tokens": 1874}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.