text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
module Types.IND where
open import Data.Nat
open import Data.Fin hiding (_+_)
open import Data.Product
open import Function
open import Relation.Binary.PropositionalEquality hiding (Extensionality)
open import Types.Direction
open import Auxiliary.Extensionality
open import Auxiliary.RewriteLemmas
private
variable
m n : ℕ
----------------------------------------------------------------------
-- session type inductively with explicit rec
data Polarity : Set where
POS NEG : Polarity
mutual
data Type n : Set where
TUnit TInt : Type n
TPair : (T₁ : Type n) (T₂ : Type n) → Type n
TChan : (S : SType n) → Type n
data SType n : Set where
gdd : (G : GType n) → SType n
rec : (G : GType (suc n) ) → SType n
var : (p : Polarity) → (x : Fin n) → SType n
data GType n : Set where
transmit : (d : Dir) (T : Type n) (S : SType n) → GType n
choice : (d : Dir) (m : ℕ) (alt : Fin m → SType n) → GType n
end : GType n
TType = Type
-- weakening
weakenS : (n : ℕ) → SType m → SType (m + n)
weakenG : (n : ℕ) → GType m → GType (m + n)
weakenT : (n : ℕ) → TType m → TType (m + n)
weakenS n (gdd gst) = gdd (weakenG n gst)
weakenS n (rec gst) = rec (weakenG n gst)
weakenS n (var p x) = var p (inject+ n x)
weakenG n (transmit d t s) = transmit d (weakenT n t) (weakenS n s)
weakenG n (choice d m alt) = choice d m (weakenS n ∘ alt)
weakenG n end = end
weakenT n TUnit = TUnit
weakenT n TInt = TInt
weakenT n (TPair ty ty₁) = TPair (weakenT n ty) (weakenT n ty₁)
weakenT n (TChan x) = TChan (weakenS n x)
weaken1 : SType m → SType (suc m)
weaken1{m} stm with weakenS 1 stm
... | r rewrite n+1=suc-n {m} = r
module CheckWeaken where
s0 : SType 0
s0 = rec (transmit SND TUnit (var POS zero))
s1 : SType 1
s1 = rec (transmit SND TUnit (var POS zero))
s2 : SType 2
s2 = rec (transmit SND TUnit (var POS zero))
check-weakenS1 : weakenS 1 s0 ≡ s1
check-weakenS1 = cong rec (cong (transmit SND TUnit) refl)
check-weakenS2 : weakenS 2 s0 ≡ s2
check-weakenS2 = cong rec (cong (transmit SND TUnit) refl)
weaken1'N : Fin (suc n) → Fin n → Fin (suc n)
weaken1'N zero x = suc x
weaken1'N (suc i) zero = zero
weaken1'N (suc i) (suc x) = suc (weaken1'N i x)
weaken1'S : Fin (suc n) → SType n → SType (suc n)
weaken1'G : Fin (suc n) → GType n → GType (suc n)
weaken1'T : Fin (suc n) → TType n → TType (suc n)
weaken1'S i (gdd gst) = gdd (weaken1'G i gst)
weaken1'S i (rec gst) = rec (weaken1'G (suc i) gst)
weaken1'S i (var p x) = var p (weaken1'N i x)
weaken1'G i (transmit d t s) = transmit d (weaken1'T i t) (weaken1'S i s)
weaken1'G i (choice d m alt) = choice d m (weaken1'S i ∘ alt)
weaken1'G i end = end
weaken1'T i TUnit = TUnit
weaken1'T i TInt = TInt
weaken1'T i (TPair t₁ t₂) = TPair (weaken1'T i t₁) (weaken1'T i t₂)
weaken1'T i (TChan x) = TChan (weaken1'S i x)
weaken1S : SType n → SType (suc n)
weaken1G : GType n → GType (suc n)
weaken1T : Type n → Type (suc n)
weaken1S = weaken1'S zero
weaken1G = weaken1'G zero
weaken1T = weaken1'T zero
module CheckWeaken1' where
sxy : ∀ n → Fin (suc n) → SType n
sxy n x = rec (transmit SND TUnit (var POS x))
s00 : SType 0
s00 = sxy 0 zero
s10 : SType 1
s10 = sxy 1 zero
s11 : SType 1
s11 = sxy 1 (suc zero)
s22 : SType 2
s22 = sxy 2 (suc (suc zero))
check-weaken-s01 : weaken1'S zero s00 ≡ s10
check-weaken-s01 = refl
check-weaken-s1-s2 : weaken1'S zero s11 ≡ s22
check-weaken-s1-s2 = refl
check-weaken-s21 : weaken1'S (suc zero) (sxy 2 (suc zero)) ≡ sxy 3 (suc zero)
check-weaken-s21 = refl
--------------------------------------------------------------------
dual-pol : Polarity → Polarity
dual-pol POS = NEG
dual-pol NEG = POS
dual-pol-inv : ∀ p → dual-pol (dual-pol p) ≡ p
dual-pol-inv POS = refl
dual-pol-inv NEG = refl
swap-polS : (i : Fin (suc n)) → SType (suc n) → SType (suc n)
swap-polG : (i : Fin (suc n)) → GType (suc n) → GType (suc n)
swap-polT : (i : Fin (suc n)) → Type (suc n) → Type (suc n)
swap-polG i (transmit d t st) = transmit d (swap-polT i t) (swap-polS i st)
swap-polG i (choice d m alt) = choice d m (swap-polS i ∘ alt)
swap-polG i end = end
swap-polS i (gdd gst) = gdd (swap-polG i gst)
swap-polS i (rec st) = rec (swap-polG (suc i) st)
swap-polS zero (var p zero) = var (dual-pol p) zero
swap-polS (suc i) (var p zero) = var p zero
swap-polS zero (var p (suc x)) = var p (suc x)
swap-polS {suc n} (suc i) (var p (suc x)) = weaken1S (swap-polS i (var p x))
swap-polT i TUnit = TUnit
swap-polT i TInt = TInt
swap-polT i (TPair t₁ t₂) = TPair (swap-polT i t₁) (swap-polT i t₂)
swap-polT i (TChan x) = TChan (swap-polS i x)
--------------------------------------------------------------------
weak-weakN : (i : Fin (suc n)) (j : Fin (suc n)) (le : Data.Fin._≤_ j i) (x : Fin n)
→ weaken1'N (suc i) (weaken1'N j x) ≡ weaken1'N (inject₁ j) (weaken1'N i x)
weak-weakG : (i : Fin (suc n)) (j : Fin (suc n)) (le : Data.Fin._≤_ j i) (g : GType n)
→ weaken1'G (suc i) (weaken1'G j g) ≡ weaken1'G (inject₁ j) (weaken1'G i g)
weak-weakS : (i : Fin (suc n)) (j : Fin (suc n)) (le : Data.Fin._≤_ j i) (s : SType n)
→ weaken1'S (suc i) (weaken1'S j s) ≡ weaken1'S (inject₁ j) (weaken1'S i s)
weak-weakT : (i : Fin (suc n)) (j : Fin (suc n)) (le : Data.Fin._≤_ j i) (t : Type n)
→ weaken1'T (suc i) (weaken1'T j t) ≡ weaken1'T (inject₁ j) (weaken1'T i t)
weak-weakN zero zero le x = refl
weak-weakN (suc i) zero le x = refl
weak-weakN (suc i) (suc j) (s≤s le) zero = refl
weak-weakN{suc n} (suc i) (suc j) (s≤s le) (suc x) = cong suc (weak-weakN i j le x)
weak-weakG i j le (transmit d t s) = cong₂ (transmit d) (weak-weakT i j le t) (weak-weakS i j le s)
weak-weakG i j le (choice d m alt) = cong (choice d m) (ext (weak-weakS i j le ∘ alt))
weak-weakG i j le end = refl
weak-weakS i j le (gdd gst) = cong gdd (weak-weakG i j le gst)
weak-weakS i j le (rec gst) = cong rec (weak-weakG (suc i) (suc j) (s≤s le) gst)
weak-weakS i j le (var p x) = cong (var p) (weak-weakN i j le x)
weak-weakT i j le TUnit = refl
weak-weakT i j le TInt = refl
weak-weakT i j le (TPair t t₁) = cong₂ TPair (weak-weakT i j le t) (weak-weakT i j le t₁)
weak-weakT i j le (TChan s) = cong TChan (weak-weakS i j le s)
weaken1-weakenN : (m : ℕ) (j : Fin (suc n)) (x : Fin n)
→ inject+ m (weaken1'N j x) ≡ weaken1'N (inject+ m j) (inject+ m x)
weaken1-weakenN m zero zero = refl
weaken1-weakenN m zero (suc x) = refl
weaken1-weakenN m (suc j) zero = refl
weaken1-weakenN m (suc j) (suc x) = cong suc (weaken1-weakenN m j x)
weaken1-weakenS : (m : ℕ) (j : Fin (suc n)) (s : SType n)
→ weakenS m (weaken1'S j s) ≡ weaken1'S (inject+ m j) (weakenS m s)
weaken1-weakenG : (m : ℕ) (j : Fin (suc n)) (g : GType n)
→ weakenG m (weaken1'G j g) ≡ weaken1'G (inject+ m j) (weakenG m g)
weaken1-weakenT : (m : ℕ) (j : Fin (suc n)) (t : Type n)
→ weakenT m (weaken1'T j t) ≡ weaken1'T (inject+ m j) (weakenT m t)
weaken1-weakenS m j (gdd gst) = cong gdd (weaken1-weakenG m j gst)
weaken1-weakenS m j (rec gst) = cong rec (weaken1-weakenG m (suc j) gst)
weaken1-weakenS m zero (var p zero) = refl
weaken1-weakenS m zero (var p (suc x)) = refl
weaken1-weakenS {suc n} m (suc j) (var p zero) = refl
weaken1-weakenS {suc n} m (suc j) (var p (suc x)) = cong (var p) (cong suc (weaken1-weakenN m j x))
weaken1-weakenG m j (transmit d t s) = cong₂ (transmit d) (weaken1-weakenT m j t) (weaken1-weakenS m j s)
weaken1-weakenG m j (choice d m₁ alt) = cong (choice d m₁) (ext (weaken1-weakenS m j ∘ alt))
weaken1-weakenG m j end = refl
weaken1-weakenT m j TUnit = refl
weaken1-weakenT m j TInt = refl
weaken1-weakenT m j (TPair t t₁) = cong₂ TPair (weaken1-weakenT m j t) (weaken1-weakenT m j t₁)
weaken1-weakenT m j (TChan x) = cong TChan (weaken1-weakenS m j x)
--------------------------------------------------------------------
-- weakening of later index
{-# TERMINATING #-}
swap-weaken1'G : (i : Fin (suc (suc n))) (j : Fin′ i) (gst : GType (suc n)) →
swap-polG (inject j) (weaken1'G i gst) ≡ weaken1'G i (swap-polG (inject! j) gst)
swap-weaken1'S : (i : Fin (suc (suc n))) (j : Fin′ i) (sst : SType (suc n)) →
swap-polS (inject j) (weaken1'S i sst) ≡ weaken1'S i (swap-polS (inject! j) sst)
swap-weaken1'T : (i : Fin (suc (suc n))) (j : Fin′ i) (t : Type (suc n)) →
swap-polT (inject j) (weaken1'T i t) ≡ weaken1'T i (swap-polT (inject! j) t)
swap-weaken1'G i j (transmit d t s) = cong₂ (transmit d) (swap-weaken1'T i j t) (swap-weaken1'S i j s)
swap-weaken1'G i j (choice d m alt) = cong (choice d m) (ext (swap-weaken1'S i j ∘ alt))
swap-weaken1'G i j end = refl
swap-weaken1'S i j (gdd gst) = cong gdd (swap-weaken1'G i j gst)
swap-weaken1'S i j (rec gst) = cong rec (swap-weaken1'G (suc i) (suc j) gst)
swap-weaken1'S zero () (var p x)
swap-weaken1'S (suc i) zero (var p zero) = refl
swap-weaken1'S (suc i) zero (var p (suc x)) = refl
swap-weaken1'S (suc i) (suc j) (var p zero) = refl
swap-weaken1'S{suc n} (suc i) (suc j) (var p (suc x)) rewrite (weak-weakS i zero z≤n (swap-polS (inject! j) (var p x))) =
let sws = swap-weaken1'S{n} i j (var p x) in cong (weaken1'S zero) sws
swap-weaken1'T i j TUnit = refl
swap-weaken1'T i j TInt = refl
swap-weaken1'T i j (TPair t₁ t₂) = cong₂ TPair (swap-weaken1'T i j t₁) (swap-weaken1'T i j t₂)
swap-weaken1'T i j (TChan s) = cong TChan (swap-weaken1'S i j s)
--------------------------------------------------------------------
-- weakening of earlier index
{-# TERMINATING #-}
swap-weaken1'S< : (i : Fin (suc n)) (j : Fin (suc n)) (le : Data.Fin._≤_ j i) → (s : SType (suc n)) →
swap-polS (suc i) (weaken1'S (inject₁ j) s) ≡ weaken1'S (inject₁ j) (swap-polS i s)
swap-weaken1'G< : (i : Fin (suc n)) (j : Fin (suc n)) (le : Data.Fin._≤_ j i) → (g : GType (suc n)) →
swap-polG (suc i) (weaken1'G (inject₁ j) g) ≡ weaken1'G (inject₁ j) (swap-polG i g)
swap-weaken1'T< : (i : Fin (suc n)) (j : Fin (suc n)) (le : Data.Fin._≤_ j i) → (t : Type (suc n)) →
swap-polT (suc i) (weaken1'T (inject₁ j) t) ≡ weaken1'T (inject₁ j) (swap-polT i t)
swap-weaken1'S< i j le (gdd gst) = cong gdd (swap-weaken1'G< i j le gst)
swap-weaken1'S< i j le (rec gst) = cong rec (swap-weaken1'G< (suc i) (suc j) (s≤s le) gst)
swap-weaken1'S< zero zero le (var p x) = refl
swap-weaken1'S< (suc i) zero le (var p x) = refl
swap-weaken1'S<{suc n} (suc i) (suc j) le (var p zero) = refl
swap-weaken1'S<{suc n} (suc i) (suc j) (s≤s le) (var p (suc x)) rewrite (weak-weakS (inject₁ j) zero z≤n (swap-polS i (var p x))) =
let sws = swap-weaken1'S<{n} i j le (var p x) in cong (weaken1'S zero) sws
swap-weaken1'G< i j le (transmit d t s) = cong₂ (transmit d) (swap-weaken1'T< i j le t) (swap-weaken1'S< i j le s)
swap-weaken1'G< i j le (choice d m alt) = cong (choice d m) (ext ((swap-weaken1'S< i j le) ∘ alt))
swap-weaken1'G< i j le end = refl
swap-weaken1'T< i j le TUnit = refl
swap-weaken1'T< i j le TInt = refl
swap-weaken1'T< i j le (TPair t t₁) = cong₂ (TPair) (swap-weaken1'T< i j le t) (swap-weaken1'T< i j le t₁)
swap-weaken1'T< i j le (TChan s) = cong (TChan) (swap-weaken1'S< i j le s)
swap-weakenS : (i : Fin (suc n)) → (s : SType (suc n)) →
swap-polS (suc i) (weaken1S s) ≡ weaken1S (swap-polS i s)
swap-weakenG : (i : Fin (suc n)) → (g : GType (suc n)) →
swap-polG (suc i) (weaken1G g) ≡ weaken1G (swap-polG i g)
swap-weakenT : (i : Fin (suc n)) → (t : Type (suc n)) →
swap-polT (suc i) (weaken1T t) ≡ weaken1T (swap-polT i t)
swap-weakenS i s = swap-weaken1'S< i zero z≤n s
swap-weakenG i g = swap-weaken1'G< i zero z≤n g
swap-weakenT i t = swap-weaken1'T< i zero z≤n t
--------------------------------------------------------------------
-- swapping of general weakening
{-# TERMINATING #-}
swap-weakenG' : (m : ℕ) (j : Fin (suc n)) (gst : GType (suc n))
→ swap-polG (inject+ m j) (weakenG m gst) ≡ weakenG m (swap-polG j gst)
swap-weakenS' : (m : ℕ) (j : Fin (suc n)) (s : SType (suc n))
→ swap-polS (inject+ m j) (weakenS m s) ≡ weakenS m (swap-polS j s)
swap-weakenT' : (m : ℕ) (j : Fin (suc n)) (t : Type (suc n))
→ swap-polT (inject+ m j) (weakenT m t) ≡ weakenT m (swap-polT j t)
swap-weakenG' m j (transmit d t s) = cong₂ (transmit d) (swap-weakenT' m j t) (swap-weakenS' m j s)
swap-weakenG' m j (choice d m₁ alt) = cong (choice d m₁) (ext (swap-weakenS' m j ∘ alt))
swap-weakenG' m j end = refl
swap-weakenS' m j (gdd gst) = cong gdd (swap-weakenG' m j gst)
swap-weakenS' m j (rec gst) = cong rec (swap-weakenG' m (suc j) gst)
swap-weakenS' m zero (var p zero) = refl
swap-weakenS' m (suc j) (var p zero) = refl
swap-weakenS' m zero (var p (suc zero)) = refl
swap-weakenS' m zero (var p (suc (suc x))) = refl
swap-weakenS' {suc n} m (suc j) (var p (suc x)) rewrite (weaken1-weakenS m zero (swap-polS j (var p x))) =
let rst = swap-weakenS'{n} m j (var p x) in cong weaken1S rst
swap-weakenT' m j TUnit = refl
swap-weakenT' m j TInt = refl
swap-weakenT' m j (TPair t t₁) = cong₂ TPair (swap-weakenT' m j t) (swap-weakenT' m j t₁)
swap-weakenT' m j (TChan x) = cong TChan (swap-weakenS' m j x)
--------------------------------------------------------------------
{-# TERMINATING #-}
swap-pol-invS : (i : Fin (suc n)) → (st : SType (suc n)) →
swap-polS i (swap-polS i st) ≡ st
swap-pol-invG : (i : Fin (suc n)) → (st : GType (suc n)) →
swap-polG i (swap-polG i st) ≡ st
swap-pol-invT : (i : Fin (suc n)) → (ty : Type (suc n)) →
swap-polT i (swap-polT i ty) ≡ ty
swap-pol-invS i (gdd gst) = cong gdd (swap-pol-invG i gst)
swap-pol-invS i (rec gst) = cong rec (swap-pol-invG (suc i) gst)
swap-pol-invS zero (var p zero) rewrite dual-pol-inv p = refl
swap-pol-invS (suc i) (var p zero) = refl
swap-pol-invS zero (var p (suc x)) rewrite dual-pol-inv p = refl
swap-pol-invS {suc n} (suc i) (var p (suc x))
rewrite swap-weakenS i (swap-polS i (var p x)) | swap-pol-invS i (var p x) = refl
-- extensionality needed
swap-pol-invG i (transmit d t s) = cong₂ (transmit d) (swap-pol-invT i t) (swap-pol-invS i s)
swap-pol-invG i (choice d m alt) = cong (choice d m) (ext R)
where R : ∀ x → swap-polS i (swap-polS i (alt x)) ≡ alt x
R x rewrite swap-pol-invS i (alt x) = refl
swap-pol-invG i end = refl
swap-pol-invT i TUnit = refl
swap-pol-invT i TInt = refl
swap-pol-invT i (TPair ty ty₁) = cong₂ TPair (swap-pol-invT i ty) (swap-pol-invT i ty₁)
swap-pol-invT i (TChan x) = cong TChan (swap-pol-invS i x)
--------------------------------------------------------------------
-- LM duality
dualS : SType n → SType n
dualG : GType n → GType n
dualG (transmit d t st) = transmit (dual-dir d) t (dualS st)
dualG (choice d m alt) = choice (dual-dir d) m (dualS ∘ alt)
dualG end = end
dualS (gdd gst) = gdd (dualG gst)
dualS (rec gst) = rec (swap-polG zero (dualG gst))
dualS (var p x) = var (dual-pol p) x
--------------------------------------------------------------------
dual-weakenS : (i : Fin (suc n)) (s : SType n) → dualS (weaken1'S i s) ≡ weaken1'S i (dualS s)
dual-weakenG : (i : Fin (suc n)) (g : GType n) → dualG (weaken1'G i g) ≡ weaken1'G i (dualG g)
dual-weakenS i (gdd gst) = cong gdd (dual-weakenG i gst)
dual-weakenS i (rec gst) rewrite (sym (swap-weaken1'G (suc i) zero (dualG gst))) = cong rec (cong (swap-polG zero) (dual-weakenG (suc i) gst))
dual-weakenS i (var p x) = refl
dual-weakenG i (transmit d t s) = cong₂ (transmit (dual-dir d)) refl (dual-weakenS i s)
dual-weakenG i (choice d m alt) = cong (choice (dual-dir d) m) (ext (dual-weakenS i ∘ alt))
dual-weakenG i end = refl
dual-weakenS' : (m : ℕ) (s : SType n) → dualS (weakenS m s) ≡ weakenS m (dualS s)
dual-weakenG' : (m : ℕ) (g : GType n) → dualG (weakenG m g) ≡ weakenG m (dualG g)
dual-weakenS' n (gdd gst) = cong gdd (dual-weakenG' n gst)
dual-weakenS' n (rec gst) rewrite (sym (swap-weakenG' n zero (dualG gst))) = cong rec (cong (swap-polG zero) (dual-weakenG' n gst))
dual-weakenS' n (var p x) = refl
dual-weakenG' n (transmit d t s) = cong₂ (transmit (dual-dir d)) refl (dual-weakenS' n s)
dual-weakenG' n (choice d m alt) = cong (choice (dual-dir d) m) (ext (dual-weakenS' n ∘ alt))
dual-weakenG' n end = refl
--------------------------------------------------------------------
aux : (i : Fin n) (x : Fin n) (p' : Polarity) →
var p' (suc (suc x)) ≡ weaken1S (var p' (suc x))
aux i x p = refl
var-suc : (i : Fin n) (x : Fin n) (p : Polarity) →
∃ λ p' → swap-polS (suc i) (var p (suc x)) ≡ var p' (suc x)
var-suc zero zero p = dual-pol p , refl
var-suc (suc i) zero p = p , refl
var-suc zero (suc x) p = p , refl
var-suc (suc i) (suc x) p
with var-suc i x p
... | p' , snd
rewrite sym (aux i x p') = p' , cong weaken1S snd
--------------------------------------------------------------------
{-# TERMINATING #-}
swap-swapG : (gst : GType (suc n)) → (i : Fin (suc n)) (j : Fin′ i) →
swap-polG i (swap-polG (inject j) gst) ≡ swap-polG (inject j) (swap-polG i gst)
swap-swapT : (t : Type (suc n)) → (i : Fin (suc n)) (j : Fin′ i) →
swap-polT i (swap-polT (inject j) t) ≡ swap-polT (inject j) (swap-polT i t)
swap-swapS : (st : SType (suc n)) → (i : Fin (suc n)) (j : Fin′ i) →
swap-polS i (swap-polS (inject j) st) ≡ swap-polS (inject j) (swap-polS i st)
swap-swapG (transmit d t s) i j = cong₂ (transmit d) (swap-swapT t i j) (swap-swapS s i j)
swap-swapG (choice d m alt) i j = cong (choice d m) (ext (λ x → swap-swapS (alt x) i j))
swap-swapG end i j = refl
swap-swapT TUnit i j = refl
swap-swapT TInt i j = refl
swap-swapT (TPair t t₁) i j = cong₂ TPair (swap-swapT t i j) (swap-swapT t₁ i j)
swap-swapT (TChan x) i j = cong TChan (swap-swapS x i j)
swap-swapS (gdd gst) i j = cong gdd (swap-swapG gst i j)
swap-swapS (rec gst) i j = cong rec (swap-swapG gst (suc i) (suc j))
swap-swapS (var p zero) zero ()
swap-swapS (var p zero) (suc i) zero = refl
swap-swapS (var p zero) (suc i) (suc j) = refl
swap-swapS (var p (suc x)) zero ()
swap-swapS (var p (suc x)) (suc i) zero
with var-suc i x p
... | p' , snd rewrite snd = refl
swap-swapS {suc n} (var p (suc x)) (suc i) (suc j)
rewrite swap-weakenS i (swap-polS (inject j) (var p x))
with swap-swapS (var p x) i j
... | pxij rewrite swap-weakenS (inject j) (swap-polS i (var p x))
= cong weaken1S pxij
{-# TERMINATING #-}
swap-pol-dualG : (i : Fin (suc n)) (gst : GType (suc n)) →
swap-polG i (dualG gst) ≡ dualG (swap-polG i gst)
swap-pol-dualS : (i : Fin (suc n)) (st : SType (suc n)) →
swap-polS i (dualS st) ≡ dualS (swap-polS i st)
swap-pol-dualG i (transmit d t s) = cong (transmit _ (swap-polT i t)) (swap-pol-dualS i s)
swap-pol-dualG i (choice d m alt) = cong (choice _ _) (ext (swap-pol-dualS i ∘ alt))
swap-pol-dualG i end = refl
swap-pol-dualS i (gdd gst) = cong gdd (swap-pol-dualG i gst)
swap-pol-dualS i (rec gst) rewrite sym (swap-pol-dualG (suc i) gst) =
cong rec (swap-swapG (dualG gst) (suc i) zero)
swap-pol-dualS zero (var p zero) = refl
swap-pol-dualS (suc i) (var p zero) = refl
swap-pol-dualS zero (var p (suc x)) = refl
swap-pol-dualS {suc n} (suc i) (var p (suc x))
rewrite (dual-weakenS zero (swap-polS i (var p x))) = cong weaken1S (swap-pol-dualS i (var p x))
--------------------------------------------------------------------
dual-invS : (st : SType n) → st ≡ dualS (dualS st)
dual-invG : (gst : GType n) → gst ≡ dualG (dualG gst)
dual-invS (gdd gst) = cong gdd (dual-invG gst)
dual-invS (rec gst) rewrite sym (swap-pol-dualG zero (dualG gst)) | swap-pol-invG zero (dualG (dualG gst)) = cong rec (dual-invG gst)
dual-invS (var p x) rewrite dual-pol-inv p = refl
dual-invG (transmit d t s) rewrite dual-dir-inv d = cong₂ (transmit d) refl (dual-invS s)
dual-invG (choice d m alt) rewrite dual-dir-inv d = cong (choice d m) (ext R)
where R : (x : Fin m) → alt x ≡ dualS (dualS (alt x))
R x rewrite sym (dual-invS (alt x)) = refl
dual-invG end = refl
dual-if : Polarity → SType n → SType n
dual-if POS s = s
dual-if NEG s = dualS s
dual-if-dual : (p : Polarity) (ist : SType 0) → dual-if p ist ≡ dual-if (dual-pol p) (dualS ist)
dual-if-dual POS ist = (dual-invS ist)
dual-if-dual NEG ist = refl
--------------------------------------------------------------------
-- substitution
st-substS : SType (suc n) → Fin (suc n) → SType 0 → SType n
st-substG : GType (suc n) → Fin (suc n) → SType 0 → GType n
st-substT : Type (suc n) → Fin (suc n) → SType 0 → Type n
st-substS (gdd gst) i st0 = gdd (st-substG gst i st0)
st-substS (rec gst) i st0 = rec (st-substG gst (suc i) st0)
st-substS {n} (var p zero) zero st0 = weakenS n (dual-if p st0)
st-substS {suc n} (var p zero) (suc i) st0 = var p zero
st-substS {suc n} (var p (suc x)) zero st0 = var p x
st-substS {suc n} (var p (suc x)) (suc i) st0 = weaken1S (st-substS (var p x) i st0)
st-substG (transmit d t s) i st0 = transmit d (st-substT t i st0) (st-substS s i st0)
st-substG (choice d m alt) i st0 = choice d m (λ j → st-substS (alt j) i st0)
st-substG end i st0 = end
st-substT TUnit i st0 = TUnit
st-substT TInt i st0 = TInt
st-substT (TPair ty ty₁) i st0 = TPair (st-substT ty i st0) (st-substT ty₁ i st0)
st-substT (TChan st) i st0 = TChan (st-substS st i st0)
--------------------------------------------------------------------
trivial-subst-var : (p : Polarity) (x : Fin n) (ist₁ ist₂ : SType 0)
→ st-substS (var p (suc x)) zero ist₁ ≡ st-substS (var p (suc x)) zero ist₂
trivial-subst-var p zero ist1 ist2 = refl
trivial-subst-var p (suc x) ist1 ist2 = refl
trivial-subst-var' : (p : Polarity) (i : Fin n) (ist₁ ist₂ : SType 0)
→ st-substS (var p zero) (suc i) ist₁ ≡ st-substS (var p zero) (suc i) ist₂
trivial-subst-var' p zero ist1 ist2 = refl
trivial-subst-var' p (suc x) ist1 ist2 = refl
--------------------------------------------------------------------
-- equivalence
variable
t t₁ t₂ t₁' t₂' : Type n
s s₁ s₂ : SType n
g g₁ g₂ : GType n
unfold : SType 0 → GType 0
unfold (gdd gst) = gst
unfold (rec gst) = st-substG gst zero (rec gst)
-- type equivalence
data EquivT (R : SType n → SType n → Set) : Type n → Type n → Set where
eq-unit : EquivT R TUnit TUnit
eq-int : EquivT R TInt TInt
eq-pair : EquivT R t₁ t₁' → EquivT R t₂ t₂' → EquivT R (TPair t₁ t₂) (TPair t₁' t₂')
eq-chan : R s₁ s₂ → EquivT R (TChan s₁) (TChan s₂)
-- session type equivalence
data EquivG (R : SType n → SType n → Set) : GType n → GType n → Set where
eq-transmit : (d : Dir) → EquivT R t₁ t₂ → R s₁ s₂ → EquivG R (transmit d t₁ s₁) (transmit d t₂ s₂)
eq-choice : ∀ {alt alt'} → (d : Dir) → ((i : Fin m) → R (alt i) (alt' i)) → EquivG R (choice d m alt) (choice d m alt')
eq-end : EquivG R end end
record Equiv (s₁ s₂ : SType 0) : Set where
coinductive
field force : EquivG Equiv (unfold s₁) (unfold s₂)
open Equiv
_≈_ = Equiv
_≈'_ = EquivG Equiv
_≈ᵗ_ = EquivT Equiv
-- reflexive
≈-refl : s ≈ s
≈'-refl : g ≈' g
≈ᵗ-refl : t ≈ᵗ t
force (≈-refl {s}) = ≈'-refl
≈'-refl {transmit d t s} = eq-transmit d ≈ᵗ-refl ≈-refl
≈'-refl {choice d m alt} = eq-choice d (λ i → ≈-refl)
≈'-refl {end} = eq-end
≈ᵗ-refl {TUnit} = eq-unit
≈ᵗ-refl {TInt} = eq-int
≈ᵗ-refl {TPair t t₁} = eq-pair ≈ᵗ-refl ≈ᵗ-refl
≈ᵗ-refl {TChan x} = eq-chan ≈-refl
-- symmetric
≈-symm : s₁ ≈ s₂ → s₂ ≈ s₁
≈'-symm : g₁ ≈' g₂ → g₂ ≈' g₁
≈ᵗ-symm : t₁ ≈ᵗ t₂ → t₂ ≈ᵗ t₁
force (≈-symm s₁≈s₂) = ≈'-symm (force s₁≈s₂)
≈'-symm (eq-transmit d x x₁) = eq-transmit d (≈ᵗ-symm x) (≈-symm x₁)
≈'-symm (eq-choice d x) = eq-choice d (≈-symm ∘ x)
≈'-symm eq-end = eq-end
≈ᵗ-symm eq-unit = eq-unit
≈ᵗ-symm eq-int = eq-int
≈ᵗ-symm (eq-pair t₁≈ᵗt₂ t₁≈ᵗt₃) = eq-pair (≈ᵗ-symm t₁≈ᵗt₂) (≈ᵗ-symm t₁≈ᵗt₃)
≈ᵗ-symm (eq-chan x) = eq-chan (≈-symm x)
|
{"hexsha": "4d60a5b5789108b822ca5566e0982c9ae49e5ed4", "size": 23563, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "src/Types/IND.agda", "max_stars_repo_name": "peterthiemann/dual-session", "max_stars_repo_head_hexsha": "7a8bc1f6b2f808bd2a22c592bd482dbcc271979c", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-02-13T05:43:25.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-13T05:43:25.000Z", "max_issues_repo_path": "src/Types/IND.agda", "max_issues_repo_name": "peterthiemann/dual-session", "max_issues_repo_head_hexsha": "7a8bc1f6b2f808bd2a22c592bd482dbcc271979c", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Types/IND.agda", "max_forks_repo_name": "peterthiemann/dual-session", "max_forks_repo_head_hexsha": "7a8bc1f6b2f808bd2a22c592bd482dbcc271979c", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-12-07T16:12:50.000Z", "max_forks_repo_forks_event_max_datetime": "2019-12-07T16:12:50.000Z", "avg_line_length": 40.5559380379, "max_line_length": 142, "alphanum_fraction": 0.5956372279, "num_tokens": 9588}
|
module Unmarshal
# package code goes here
# Helper function
function prettyPrint(verboseLvl, str)
tabs = ""
for cntr = 1:verboseLvl
tabs = tabs * "\t"
end
println("$(tabs)$(str)")
end
export unmarshal # returns a reconstructed variable from a JSON parsed string
using Requires
using JSON
import Missings: Missing, missing
import Nullables: Nullable
function __init__()
@require LazyJSON="fc18253b-5e1b-504c-a4a2-9ece4944c004" include("lazyjson.jl")
end
unmarshal(::Type{Any}, x::String, verbose :: Bool = false, verboseLvl :: Int = 0) = x
unmarshal(::Type{Any}, x, verbose :: Bool = false, verboseLvl :: Int = 0) = x
function unmarshal(::Type{Any}, xs::Union{Vector{E}, AbstractArray}, verbose :: Bool = false, verboseLvl :: Int = 0) where E
if verbose
prettyPrint(verboseLvl, "Any{$(eltype(xs))}")
prettyPrint(verboseLvl, "List")
verboseLvl += 1
end
[(unmarshal(eltype(xs), x, verbose, verboseLvl) for x in xs)...]
end
function unmarshal(DT :: Type{String}, parsedJson :: String, verbose :: Bool = false, verboseLvl :: Int = 0)
if verbose
prettyPrint(verboseLvl, "$(DT) (String)")
verboseLvl += 1
end
parsedJson
end
function unmarshal(::Type{Vector{E}}, parsedJson::Union{Vector, AbstractArray}, verbose :: Bool = false, verboseLvl :: Int = 0) where E
if verbose
prettyPrint(verboseLvl, "Vector{$E}")
verboseLvl += 1
end
[(unmarshal(E, field, verbose, verboseLvl) for field in parsedJson)...]
end
unmarshal(::Type{Array{E}}, xs::Union{Vector, AbstractArray}, verbose :: Bool = false, verboseLvl :: Int = 0) where E = unmarshal(Vector{E}, xs, verbose, verboseLvl)
function unmarshal(::Type{Array{E, N}}, parsedJson::Union{Vector, AbstractArray}, verbose :: Bool = false, verboseLvl :: Int = 0) where {E, N}
if verbose
prettyPrint(verboseLvl, "Array{$E, $N}")
verboseLvl += 1
end
cat((unmarshal(Array{E,N-1}, x, verbose, verboseLvl) for x in parsedJson)..., dims=N)
end
unmarshal(::Type{Array{E, N} where E}, xs::Union{Vector, AbstractArray}, verbose :: Bool = false, verboseLvl :: Int = 0) where N = unmarshal(Array{Any, N}, xs, verbose, verboseLvl)
unmarshal(::Type{Array}, xs::Union{Vector,AbstractArray}, verbose :: Bool = false, verboseLvl :: Int = 0) where N = unmarshal(Vector{Any}, xs, verbose, verboseLvl)
"""
unmarshal(T, dict[, verbose[, verboselvl]])
Reconstructs an object of Type T using the dictionary output of a `JSON.parse` or now 'LazyJSON.parse`.
Set verbose `true` to get debug information about how the data hierarchy is unmarshalled. This might be useful to track down parsing errors and/or mismatches between the JSON object and the Type definition.
# Example
```jldoctest
julia> using JSON
julia> var = randn(Float64, 5); # Should work for most other variations of types you can think of
julia> unmarshal(typeof(var), JSON.parse(JSON.json(var)) ) == var
true
or
julia> using LazyJSON
julia> var = randn(Float64, 5); # Should work for most other variations of types you can think of
julia> unmarshal(typeof(var), LazyJSON.parse(JSON.json(var)) ) == var
true
```
"""
function unmarshal(DT :: Type, parsedJson :: AbstractDict, verbose :: Bool = false, verboseLvl :: Int = 0)
if verbose
prettyPrint(verboseLvl, "$(DT) AbstractDict")
verboseLvl += 1
end
if !isconcretetype(DT)
throw(ArgumentError("Cannot unmarshal a non-leaf type $(DT) without a custom specialization"))
end
tup = ()
for iter in fieldnames(DT)
DTNext = fieldtype(DT,iter)
if verbose
prettyPrint(verboseLvl-1, "\\--> $(iter) <: $(DTNext) ")
end
if !haskey(parsedJson, string(iter)) || parsedJson[string(iter)] === nothing
# check whether DTNext is compatible with any scheme for missing values
val = if DTNext <: Nullable
DTNext()
elseif Missing <: DTNext
missing
elseif Nothing <: DTNext
Nothing()
elseif !haskey(parsedJson, string(iter))
throw(ArgumentError("Key $(string(iter)) is missing from the structure $DT, and field is neither Nullable nor Missings nor Nothing-compatible"))
else
throw(ArgumentError("Key $(string(iter)) is null, but the field is neither Nullable nor Missings nor Nothing-compatible"))
end
else
val = unmarshal( DTNext, parsedJson[string(iter)], verbose, verboseLvl)
end
tup = (tup..., val)
end
DT(tup...)
end
function unmarshal(DT :: Type{Pair{TF, TS}}, parsedJson :: AbstractDict, verbose :: Bool = false, verboseLvl :: Int = 0) where {TF, TS}
if verbose
prettyPrint(verboseLvl, "Pair $(DT) AbstractDict")
verboseLvl += 1
end
if (length(keys(parsedJson)) > 1)
@warn "Expected a single pair, but found a multi-entry dictionary, just using the first key: $(collect(keys(parsedJson))[1])"
end
firstVal = (collect(keys(parsedJson))[1]) #, verbose, verboseLvl)
secondVal = (parsedJson[firstVal]) #, verbose, verboseLvl)
# @show firstVal, secondVal
if !isa(firstVal, TF)
try
firstVal = TF(firstVal)
catch ex
firstVal = unmarshal(TF, JSON.parse(firstVal), verbose, verboseLvl)
end
end
if !isa(secondVal, TS)
try
secondVal = TS(secondVal)
catch ex
throw(ArgumentError("Error trying to convert value $(secondVal) of type $(typeof(secondVal)) to a $(TS), please provide a conversion"))
end
end
# @show firstVal, secondVal
(firstVal => secondVal)
end
function unmarshal(::Type{T}, parsedJson :: Union{Vector, AbstractArray}, verbose :: Bool = true, verboseLvl :: Int = 0) where {T <: Tuple}
if verbose
prettyPrint(verboseLvl, "Tuple: $T")
verboseLvl += 1
end
len = length(parsedJson) # fallback value
try
# If we have concrete lengths, rather use data type and ignore length of JSON
len = fieldcount(T)
catch ex
# If we do not have concrete lengths, use length of JSON
end
((unmarshal(fieldtype(T, i), parsedJson[i], verbose, verboseLvl) for i in 1:len)...,)
end
function _unmarshal(::Type{T}, key :: Symbol, parsedJson :: AbstractDict, verbose :: Bool = false, verboseLvl :: Int = 0) where T
if verbose
prettyPrint(verboseLvl - 1, "\\--> $(key) ")
end
unmarshal(T, parsedJson[string(key)], verbose, verboseLvl)
end
function unmarshal(::Type{NamedTuple{NS, TS}}, parsedJson :: AbstractDict, verbose :: Bool = false, verboseLvl :: Int = 0) where {NS, TS}
if verbose
prettyPrint(verboseLvl, "NamedTuple{$NS,$TS}")
verboseLvl += 1
end
NamedTuple{NS}(_unmarshal(T, key, parsedJson, verbose, verboseLvl) for (i, (T, key)) in enumerate(zip(TS.parameters, NS)))
end
unmarshal(::Type{NamedTuple{NS}}, parsedJson :: AbstractDict, verbose :: Bool = false, verboseLvl :: Int = 0) where NS = unmarshal(NamedTuple{NS, NTuple{length(NS), Any}}, parsedJson, verbose, verboseLvl)
unmarshal(::Type{NamedTuple}, parsedJson :: AbstractDict, verbose :: Bool = false, verboseLvl :: Int = 0) = unmarshal(NamedTuple{(Symbol.(keys(parsedJson))...,)}, parsedJson, verbose, verboseLvl)
function unmarshal(DT :: Type{T}, parsedJson :: AbstractDict, verbose :: Bool = false, verboseLvl :: Int = 0) where T <: Dict
if verbose
prettyPrint(verboseLvl, "$(DT) Dict ")
verboseLvl += 1
end
val = DT()
for iter in keys(parsedJson)
if verbose
prettyPrint(verboseLvl - 1, "\\--> $(iter) $(valtype(DT))")
end
tmp = unmarshal(valtype(DT), parsedJson[iter], verbose, verboseLvl)
if keytype(DT) <: AbstractString
val[iter] = tmp
else
try
val[unmarshal(keytype(DT),JSON.parse(iter),verbose, verboseLvl)] = tmp # Use JSON.parse and Unmarshal to cast from type of iter to ketype(DT)
catch ex
val[keytype(DT)(iter)] = tmp # Try direct casting, which will hopefully generate a readable enough exception error if it fails
end
end
end
val
end
unmarshal(::Type{T}, x::Number, verbose :: Bool = false, verboseLvl :: Int = 0) where T <: Number = T(x)
unmarshal(::Type{Nullable{T}}, x, verbose :: Bool = false, verboseLvl :: Int = 0) where T = Nullable(unmarshal(T, x))
unmarshal(::Type{Nullable{T}}, x::Nothing, verbose :: Bool = false, verboseLvl :: Int = 0) where T = Nullable{T}()
unmarshal(::Type{Union{T,Missing}}, x, verbose :: Bool = false, verboseLvl :: Int = 0) where T = unmarshal(T, x, verbose, verboseLvl)
unmarshal(::Type{Union{T,Nothing}}, x::Nothing, verbose :: Bool = false, verboseLvl :: Int = 0) where T = nothing
unmarshal(::Type{Union{T,Nothing}}, x::T, verbose :: Bool = false, verboseLvl :: Int = 0) where T = unmarshal(T, x, verbose, verboseLvl)
function unmarshal(T::Type, x, verbose :: Bool = false, verboseLvl :: Int = 0)
try
T(x)
catch
throw(ArgumentError("no unmarshal function defined to convert $(typeof(x)) to $(T); consider providing a specialization"))
end
end
end # module
|
{"hexsha": "76bae07e0de82098751314e9cda6d337fae176fd", "size": 9270, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Unmarshal.jl", "max_stars_repo_name": "DhairyaLGandhi/Unmarshal.jl", "max_stars_repo_head_hexsha": "1478ff0829f1a835a8775bd8204fe814cbb05bb8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 18, "max_stars_repo_stars_event_min_datetime": "2017-02-03T07:30:37.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-20T12:59:59.000Z", "max_issues_repo_path": "src/Unmarshal.jl", "max_issues_repo_name": "DhairyaLGandhi/Unmarshal.jl", "max_issues_repo_head_hexsha": "1478ff0829f1a835a8775bd8204fe814cbb05bb8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 23, "max_issues_repo_issues_event_min_datetime": "2017-02-05T04:15:54.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-20T07:01:24.000Z", "max_forks_repo_path": "src/Unmarshal.jl", "max_forks_repo_name": "DhairyaLGandhi/Unmarshal.jl", "max_forks_repo_head_hexsha": "1478ff0829f1a835a8775bd8204fe814cbb05bb8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2017-02-06T12:53:15.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-20T12:31:05.000Z", "avg_line_length": 37.6829268293, "max_line_length": 206, "alphanum_fraction": 0.6420711974, "num_tokens": 2532}
|
import bpy
import bmesh
from mathutils import Vector
import numpy as np
# Blender import system clutter
import sys
from pathlib import Path
UTILS_PATH = Path.home() / "Documents/python_workspace/data-science-learning"
sys.path.append(str(UTILS_PATH))
import utils.blender_utils
import importlib
importlib.reload(utils.blender_utils)
from utils.blender_utils import init_greasy_pencil
class Automaton_1D:
def __init__(self, n: int, states: int = 2):
"""
1D Automaton
:param n: number of cells
"""
self.n = n
self.space = np.zeros(n, dtype=np.uint8)
self.space[n // 2] = 1
# np.array([0,0,0,0,1,0,0,0,0,0])#np.random.choice(2, n)
def update(self, rule: dict):
"""
Update automaton state
"""
tmp_space = self.space.copy()
for i in range(self.n):
neighbours = self.get_neighbours(i)
tmp_space[i] = rule["".join([str(s) for s in neighbours])]
self.space = tmp_space
def get_neighbours(self, i: int):
if i == 0:
return np.insert(self.space[:2], 0, self.space[-1])
elif i == self.n - 1:
return np.insert(self.space[-2:], 2, self.space[0])
else:
return self.space[max(0, i - 1):i + 2]
def draw_cell(pos: tuple, gp_frame):
x, y, z = pos
gp_stroke = gp_frame.strokes.new()
gp_stroke.line_width = 500
gp_stroke.points.add(count=2)
gp_stroke.points[0].co = (x, 0, y)
gp_stroke.points[1].co = (x + 0.5, 0, y)
def animate_automata(rule):
automaton_size = 100
automaton = Automaton_1D(automaton_size)
nb_frames = 100
bpy.context.scene.frame_end = nb_frames
gp_layer = init_greasy_pencil()
gp_frame = gp_layer.frames.new(0)
#bpy.context.active_gpencil_brush.size = 100
#bpy.context.active_gpencil_brush.strength = 1.
# bpy.data.brushes["Draw Pencil"].size = 500
for frame in range(1, nb_frames+1):
#gp_frame = gp_layer.frames.new(frame)
gp_frame = gp_layer.frames.copy(gp_frame)
for i, cell in enumerate(automaton.space):
if cell:
draw_cell((i, frame, 0), gp_frame)
automaton.update(rule)
rule_0 = {'111': 1, '110': 1, '101': 1, '100': 1, '011': 1, '010': 1, '001': 1, '000': 0}
rule_sierpinski = {'111': 0, '110': 1, '101': 0, '100': 1, '011': 1, '010': 0, '001': 1, '000': 0}
rule_x = {'111': 0, '110': 0, '101': 0, '100': 1, '011': 1, '010': 1, '001': 1, '000': 0}
animate_automata(rule_0)
|
{"hexsha": "701308a91fe12edf6e6037eef1f740d386fc4a96", "size": 2523, "ext": "py", "lang": "Python", "max_stars_repo_path": "cellular automata/blender-scripting/automata_blender.py", "max_stars_repo_name": "ChrizH/data-science-learning", "max_stars_repo_head_hexsha": "c5770955256ef535e22346f977fb070db98a135d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-09-22T20:32:05.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-22T20:32:05.000Z", "max_issues_repo_path": "cellular automata/blender-scripting/automata_blender.py", "max_issues_repo_name": "ChrizH/data-science-learning", "max_issues_repo_head_hexsha": "c5770955256ef535e22346f977fb070db98a135d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cellular automata/blender-scripting/automata_blender.py", "max_forks_repo_name": "ChrizH/data-science-learning", "max_forks_repo_head_hexsha": "c5770955256ef535e22346f977fb070db98a135d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-09-22T20:31:55.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-22T20:31:55.000Z", "avg_line_length": 30.7682926829, "max_line_length": 98, "alphanum_fraction": 0.6048355133, "include": true, "reason": "import numpy", "num_tokens": 795}
|
\section{Introduction}
The involvement of the brain and spinal cord in motor control has been recognized since the earliest known clinical records of head and spinal injuries, dating back to ancient Egypt \citep{Louis1994,VanMiddendorp2010}. However, the mechanism used by the nervous system to generate movement was not fully appreciated until Galvani first reported his famous experiments on \textit{animal electricity} \citep{Galvani1791}. By isolating the sciatic nerve and gastrocnemius muscle in the frog, Galvani clearly demonstrated in a series of stimulation experiments that an electrical process, contained entirely within the biology of the frog's leg, was responsible for the spontaneous generation of muscle contractions. This would lead to the discovery and physiological characterization of the nerve impulse, the action potential, that travels across the nerve to initiate muscle movement \citep{DuBois-Reymond1843,Bernstein1868,Schuetze1983}. The success of these seminal experiments immediately raised a fundamental question regarding nerve conduction: if spontaneous muscle contraction is generated by nerve impulses transmitted throughout the nervous system, how is this transmission coordinated in order to generate the complex patterns of muscle activity observed in natural behaviour?
\subsection{Discovery of the motor cortex}
In search of answers to this question, researchers next turned to the brain, the seat of anatomical convergence of the nervous system. Following Galvani's footsteps, several attempts were made to stimulate the cerebral cortex electrically, but with little success \citep{Gross2007}. It wasn't until the 1870s that the first indications of a direct involvement of the cortex in the production of movement came to light, around the time when Hughlings Jackson undertook his studies on epileptic convulsions \citep{Jackson1870}. He observed that in some patients the fits would start by a deliberate spasm on one side of the body, and that different body parts would become systematically affected one after the other. He connected the orderly march of these spasms to the existence of localized lesions in the \emph{post-mortem} brain of his patients and hypothesized that the origin of these fits was uncontrolled excitation caused by local changes in cortical grey matter \citep{Jackson1870}. In that same year, Fritsch and Hitzig published their famous study demonstrating that it is possible to elicit movements by direct stimulation of the cortex in dogs \citep{Fritsch1870}. Furthermore, stimulation of different parts of the cortex produced movement in different parts of the body \citep{Fritsch1870}. It appeared that the causal mechanism for epileptic convulsions predicted by Hughlings Jackson had been found, and with it a possible explanation for how the intact brain might control movement. The cerebral cortex was already considered at the time to be the seat of reasoning and sensation, so if activity over this so-called \emph{motor cortex} was able to exert direct control over the musculature of the body, then it might, in the normal brain, be the area that connects volition to muscles \citep{Fritsch1870}.
\subsection{The Goltz-Ferrier debates}
David Ferrier, a Scottish neurologist deeply impressed by the ideas of Hughlings Jackson and by the positive results of Fritsch and Hitzig's experiments, proceeded to reproduce and expand on their observations with comprehensive stimulation studies showing how activity in the motor cortex was sufficient to produce a large variety of movements across a wide range of mammalian species \citep{Ferrier1873}. Meanwhile, other researchers across Europe such as Goltz and Christiani were facing a dilemma: in many of the so-called ``lower mammals'' massive lesions of the cerebral cortex failed to demonstrate any visible long-term impairments in the motor behaviour of animals \citep{James1885,Goltz1888}. These two lines of inquiry first clashed at the seventh International Medical Congress held in London in August 1881, where Goltz of Strassburg and Ferrier of London presented their results in a series of debates on the localization of function in the cerebral cortex \citep{Phillips1984,Tyler2000}.
Goltz assumed a clear anti-localizationist position. He advanced that it was impossible to produce a complete paresis of any muscle, or complete dysfunction of any perception, by destruction of any part of the cerebral cortex, and that he found mostly deficits of general intelligence in his dogs \citep{Tyler2000}. Following Goltz's presentation, Ferrier emphasized the danger of generalizing from the dog to animals of other orders (e.g. man and monkey). He then proceeded to exhibit his own lesion results by means of antiseptic surgery in the monkey, describing how a circumscribed unilateral lesion of the motor cortex produced complete contralateral paralysis of the leg. He also produced a striking series of microscopic sections of Wallerian degeneration \citep{Waller1850} of the ``motor path'' from the cortex to the contralateral spinal cord, the crossed descending projections forming the pyramidal corticospinal tract \citep{Tyler2000}.
The debates concluded with the public demonstration of live specimens: a dog with large lesions to the parietal and posterior lobes from Goltz; and from Ferrier, a hemiplegic monkey with a unilateral lesion to the motor cortex of the contralateral side. As predicted, Goltz's dog showed a clear ability to locomote and avoid obstacles and to make use of its other basic senses, while displaying peculiar deficits of intelligence such as failing to respond with fear to the cracking of a whip or ignoring tobacco smoke blown in its face. On the other hand, Ferrier's monkey appeared severely hemiplegic, in a condition similar to human stroke patients. After the demonstrations, the animals were killed and their brains removed. Preliminary observations revealed that the lesions in Goltz's dog were less extensive than expected, particularly on the left hemisphere. Ferrier's lesions on the other hand were precisely circumscribed to the contralateral motor cortex. These demonstrations secured the triumph of Ferrier, who went on to firmly establish the localizationist approach to neurology and the idea of a somatotopic arrangement over the motor cortex.
The Goltz-Ferrier debates had far-reaching implications throughout the entire research community of the time, and the basic dilemma that was presented has sparked controversy and confusion for over a hundred years since \citep{Phillips1984,Lashley1924,DeBarenne1933,Tyler2000,Gross2007}. In the meantime, views of motor cortex have evolved to suggest it plays a role in ``understanding'' the movements of others \citep{Rizzolatti2004}, imagining one's own movements \citep{Porro1996}, or in learning new movements \citep{Kawai2015}, but where are we today regarding its role in directly controlling movement?
\subsection{Stimulating motor cortex causes movement; motor cortex is active during movement}
Motor cortex is still broadly defined as the region of the cerebral hemispheres from which movements can be evoked by low-current stimulation, following Fritsch and Hitzig's original experiments in 1870 \citep{Fritsch1870}. Stimulating different parts of the motor cortex elicits movement in different parts of the body, and systematic stimulation surveys have revealed a topographical representation of the entire skeletal musculature across the cortical surface \citep{Leyton1917, Penfield1937, Neafsey1986}. Electrophysiological recordings in motor cortex have routinely found correlations between neural activity and many different movement parameters, such as muscle force \citep{Evarts1968}, movement direction \citep{Georgopoulos1986}, speed \citep{Schwartz1993}, or even anisotropic limb mechanics \citep{Scott2001} at the level of both single neurons \citep{Evarts1968,Churchland2007} and populations \citep{Georgopoulos1986,Churchland2012}. Determining what exactly this activity in motor cortex controls \citep{Todorov2000} has been further complicated by studies using long stimulation durations in which continuous stimulation at a single location in motor cortex evokes complex, multi-muscle movements \citep{Graziano2002,Aflalo2006}. However, as a whole, these observations all support the long standing view that activity in motor cortex is involved in the direct control of movement.
\subsection{Motor cortex lesions produce different deficits in different species}
What types of movement require motor cortex? In humans, a motor cortical lesion is devastating. Permanent injury to the frontal lobes of the brain by stroke or mechanical means is often followed by weakness or paralysis of the limbs in the side of the body opposite to the lesion \citep{Louis1994}. Although the paretic symptoms have a tendency to recover partially, especially with training and rehabilitation, permanent movement deficits and loss of muscle control in the affected limbs is the common prognosis; movement is permanently and obviously impaired \citep{Laplane1977,Kwakkel2003}. In non-human primates, similar gross movement deficits are observed after lesions, albeit transiently \citep{Leyton1917,Travis1955}. The longest lasting effect of a motor cortical lesion is the decreased motility of distal forelimbs, especially the control of individual finger movements required for precision skills \citep{Leyton1917,Darling2011}. But equally impressive is the extent to which other movements fully recover, including the ability to sit, stand, walk, climb and even reach to grasp, as long as precise finger movements are not required \citep{Leyton1917,Darling2011,Zaaimi2012}. In non-primate mammals, the \emph{absence} of lasting deficits following motor cortical lesion is even more striking. Careful studies of skilled reaching in rats have revealed an impairment in paw grasping behaviours \citep{Whishaw1991,Alaverdashvili2008a}, comparable to the long lasting deficits seen in primates, but this is a limited impairment when compared to the range of movements that are preserved \citep{Whishaw1991,Kawai2015}. In fact, even after complete decortication, rats, cats and dogs retain a shocking amount of their movement repertoire \citep{Goltz1888,Bjursten1976,Terry1989}. If we are to accept the simple hypothesis that motor cortex is the structure responsible for ``voluntary movement production'', then why is there such a blatant difference in the severity of deficits caused by motor cortical lesions in humans versus other mammals? With over a century of stimulation and electrophysiology studies clearly suggesting that motor cortex is involved in many types of movement, in all mammalian species, how can these divergent results be reconciled?
\subsection{The role of the corticospinal tract}
It must have felt uncanny to those early researchers to find that surface stimulation of the cortex produces discrete muscle responses, in a way so similar to what Galvani did with the frog's leg. Indeed, Sherrington himself conveys the feeling clearly in the opening of his seminal lecture on the motor cortex \cite[p.271]{Sherrington1906}, confessing ``that although it is not surprising that such territorial subdivision of function should exist in the cerebral cortex, it is surprising that by our relatively imperfect artifices for stimulation we should be able to obtain clear evidence thereof.''
Of course, it did not go unnoticed that this fact might be due to the massive projection from cortex to the spinal cord, which had been fully traced by Ludwig Türck only twenty years before Fritsch and Hitzig's experiment \citep{Nathan1955}. This corticospinal tract was found to originate in the anterior regions of the cerebral cortex and terminate directly in the lateral columns of the spinal cord after decussating (i.e. crossing over) at the level of the brainstem's \emph{medulla oblongata}. The existence of this corticospinal pathway presented compelling anatomical evidence of the means by which the motor cortex might be able to exert a direct influence on movement by electrical conduction of nerve impulses, but the role of this connection remained elusive.
\subsection{The effects of lesions in the corticospinal tract}
In the wake of the Goltz-Ferrier debates, investigations of the role of the direct corticospinal descending pathway were conducted in multiple animal species. Sherrington himself started out his work by tracing spinal cord degeneration over large periods of time (up to 11 months) following cortical lesions in Goltz's dogs \citep{Langley1884,Sherrington1885}. He confirmed that many of the properties of the corticospinal tract in the primate held for the dog, and furthermore became one of the first to observe the presence of a degenerated ``re-crossed'' pyramidal tract that travels down the cord ipsilateral to the side of the lesion \citep{Sherrington1885}. These fibers would later come to be called the ipsilateral, ventral corticospinal tract, and have since been found and described in most mammalian species as forming roughly 10\% of the entire corticospinal projections \citep{Kuypers1981,Brosamle2000,Lacroix2004}. However, he also had the chance during this time to observe first hand the negative effects of corticospinal degeneration following lesion, which had been previously reported by Goltz and others in a variety of non-primate specimens. In his own words:
\blockquote[{\protect\cite[p.189]{Sherrington1885}}]{That the pyramidal tracts are in the dog requisite for volitional~impulses to reach limbs and body seems negatived by the fact that the animal can run, leap, turn to either side, use neck and jaws, \&c. with ease and success after nearly, if not wholly, complete degeneration of these tracts on both sides. Further, after complete degeneration of one pyramid, there is in the dog no obvious difference between the movements of the right and left sides.}
Interestingly, he does note that \enquote{defect of motion is observable only as a clumsiness in execution of fine movements} \citep{Sherrington1885}. These observations once again stood out in stark contrast with lesion experiments reported by Ferrier in the monkey, where cauterization of specific motor cortical areas produced complete and persistent paralysis of the corresponding body parts \citep{Ferrier1884}.
Years later, Sherrington would come back to the motor cortex with a new set of studies on stimulation and ablation of the precentral region \citep{Grunbaum1903,GrahamBrown1913,Leyton1917}. In these studies together with Gr\"unbaum, Sherrington targeted motor cortical lesions to the excitable area of the arm or the leg and tracked the recovery of the animals over time. Following the initial paresis and loss of muscle control they observed dramatic recovery of most skilled motor acts, such as peeling open a banana or climbing cages \citep{Leyton1917}. In order to test whether the recovery process was due to cortical reorganization, they systematically stimulated the areas adjacent to the lesion as well as the motor cortex of the opposite hemisphere, but failed to evoke movements in the affected limb \citep{Leyton1917}, as would be expected if commands were traveling down the corticospinal tract in spared regions. Furthermore, subsequent ablation of those areas failed to produce any new impairments in the recovered limb, leaving Sherrington and his colleagues at a loss to find the locus of recovery \citep{Leyton1917}.
Confused by these results, which they thought ``caused concern to, students of cerebral physiology'', Glees and Cole introduced a set of more quantitative behavioural assays in the hope of tracking in detail the recovery of motor control \citep{Glees1950,Cole1952}. They studied the behaviour of monkeys solving various puzzle boxes following successive circumscribed lesions to the thumb, index and arm areas of the motor cortex. As Sherrington reported, there was a quick recovery after an initial period of paralysis and loss of motor control. However, even though the monkeys fully recovered their ability to skillfully open the puzzle box, some subtle movement deficits and paresis in the control of fine movements of the digits was reported to persist \citep{Glees1950}. When stimulating motor cortical areas surrounding the circumscribed lesions, they were able to evoke movements in the impacted digits and reinstate the paretic symptoms after further ablation \citep{Glees1950}. This suggested the hypothesis that surrounding areas of the motor cortex could undergo reorganization following the lesion. However, an important difference to emphasize between these experiments and those of Sherrington is the fact that only relatively circumscribed motor cortical regions were removed in each surgery, whereas in the original Sherrington study the entire elbow, wrist, index, thumb and remaining digit motor areas were excised at once \citep{Leyton1917}, most likely causing degeneration of the entire corticospinal pathway for the affected limb. The presence of an intact corticospinal tract, excitability of movements to low-current stimulation and transient paretic symptoms following ablation thus seem to go hand in hand.
In the hopes of clarifying the confusion of exactly which movements were controlled by cortex, other studies focused on lesions restricted to the corticospinal tract, using both unilateral and bilateral section at the level of the medullary pyramids \citep{Tower1940,Lawrence1968,Lawrence1968a}. The goal was to isolate the effects of all the individual descending pathways to the spinal cord and resolve once and for all the question of whether the corticospinal tract of the motor cortex was the source of all ``voluntary'' movements. Sarah Tower was the first to describe in detail the results of unilateral and bilateral pyramidotomy in primates, with and without lesion of the motor cortex \citep{Tower1940}. She summarized the condition as ``hypotonic paresis'', characterized by a loss of skeletal muscle tone and depression of the vasomotor system, along with general weakening of the reflexes involving the affected limb segments. Although all discrete usage of the hand and digits was eliminated, she did emphasize the clear presence of voluntary movements in the various purposeful compensations produced by the animals to deal with the affliction. Tower attributed these compensations to the preserved capacities of brainstem circuits.
A more definitive study to dissociate the effects of direct corticospinal and indirect brainstem descending pathways was conducted by Lawrence and Kuypers, and presented in their now classical publications \citep{Lawrence1968,Lawrence1968a}. Using the Kl\"uver board, a task where monkeys have to pick morsels of food from differently sized round holes, they observed that while normal monkeys routinely pick up the food by pinching individual bits with their fingers, monkeys with bilateral corticospinal lesions were mostly unable to perform this precise pincer movement, and instead employed coarser compensatory clasping strategies to retrieve the food \citep{Lawrence1968}. In addition, lesioned monkeys were consistently reported to be somewhat slower and less agile than normal animals. However, most of their overall movement repertoire was surprisingly preserved. Their final conclusions fit remarkably well with the initial observations of Sherrington in the dog, suggesting that the corticospinal pathways superimpose speed and agility on subcortical mechanisms, and provide the capacity for fractionation of movements such as independent finger movements \citep{Lawrence1968}. These observations recapitulate the effects of motor cortical lesions reported by Sherrington, but remain at odds with the primary role assigned to motor cortex, and the direct corticospinal tract, with the control of all voluntary movements.
\subsection{There are anatomical differences in corticospinal projections between primates and other mammals}
In primates, the conspicuous effects of motor cortical lesion can also be induced by sectioning the corticospinal tract, the direct monosynaptic projection that connects motor cortex, and other cortical regions, to the spinal cord \citep{Tower1940,Lawrence1968}. In monkeys, and similarly in humans, this pathway has been found to directly terminate on spinal motor neurons responsible for the control of distal muscles \citep{Leyton1917,Bernhard1954} and is also thought to support the low-current movement responses evoked by electrical stimulation of the cortex, as evidenced by the increased difficulty in obtaining a stimulation response following section at the level of the medulla \citep{Woolsey1972}.
However, the corticospinal tract is by no means the only pathway from cortex to movement (Figure \ref{fig:descendingTaxa}). Motor cortex targets many other brain regions that can themselves generate movement. In fact, this specialized connection from telencephalon to spinal cord appeared only recently in vertebrate evolution \citep{TenDonkelaar2009}, and was further elaborated to include a direct connection from cortex to motor neurons only in some primate species and other highly manipulative mammals such as raccoons \citep{Heffner1983}. In all other mammals, including cats and rats, the termination pattern of the corticospinal tract largely avoids the motor neuron pools in ventral spinal cord and concentrates instead on intermediate zone interneurons and dorsal sensory neurons \citep{Kuypers1981,Yang2003}. Why then is there such a large dependency on this tract for human motor control? One possibility is that the rubrospinal tract---a descending pathway originating in the brainstem and terminating in the intermediate zone---is degenerated in humans compared to other primates and mammals \citep{Nathan1955,Nathan1982}, and is thought to play a role in compensating for the loss of the corticospinal tract in non-human species \citep{Lawrence1968a,Zaaimi2012}.
It thus seems likely that most mammals rely on ``indirect'' pathways to convey cortical motor commands to muscles. These differences in anatomy might explain the lack of conspicuous, lasting movement deficits following motor cortical lesion in non-primates, but leaves behind a significant question: what is the motor cortex actually controlling in all these other mammals?
\subsection{What is the role of motor cortex in non-primate mammals?}
In the rat, a large portion of cortex is considered ``motor'' based on anatomical \citep{Donoghue1982}, stimulation \citep{Donoghue1982,Neafsey1986} and electrophysiological evidence \citep{Hyland1998}. However, the most consistently observed long-term motor control deficit following motor cortical lesion has been an impairment in supination of the wrist and individuation of digits during grasping, which in turn impairs reaching for food pellets through a narrow vertical slit \citep{Whishaw1991,Alaverdashvili2008a}. Despite the fact that activity in rodent motor cortex has been correlated with movements in every part of the body (not just distal limbs) \citep{Hill2011,Erlich2011}, it would appear we are led to conclude that this large high-level motor structure, with dense efferent projections to motor areas in the spinal cord \citep{Kuypers1981}, basal ganglia \citep{Turner2000,Wu2009}, thalamus \citep{Lee2008}, cerebellum \citep{Baker2001} and brainstem \citep{Jarratt1999}, as well as to most primary sensory areas \citep{Petreanu2012,Schneider2014}, evolved simply to facilitate more precise wrist rotations and grasping gestures. Maybe we are missing something. Might there be other problems in movement control that motor cortex is solving, but that we may be overlooking with our current assays?
\subsection{A role in modulating the movements generated by lower motor centres}
The idea that the descending cortical pathways superimpose speed and precision on an existing baseline of behaviour has been suggested by lesion work in the primate \citep{Lawrence1968a}, but has been investigated much more thoroughly in the context of studies on the neural control of locomotion in cats. These studies have suggested that the corticospinal tract can play a role in the \emph{adjustment} of ongoing movements, modulating the activity and sensory feedback in spinal circuits in order to adapt a lower movement controller to challenging conditions.
It has been known for more than a century that completely decerebrate cats are capable of sustaining the locomotor rhythms necessary for walking on a flat treadmill utilizing only spinal circuits \citep{GrahamBrown1911}. In addition, there is a general capacity for spinal circuits to modulate network activity with incoming sensory input in order to coordinate and switch between different responses, even during specific phases of movement \citep{Forssberg1975}. Brainstem and midbrain circuits are sufficient to initiate the activity of these spinal central pattern generators \citep{Grillner1973}, so what exactly is the contribution of motor cortex to the control of locomotion? Single-unit recordings of pyramidal tract neurons (PTNs) from cats walking on a treadmill have shown that a large proportion of these neurons are locked to the step cycle \citep{Armstrong1984a}. However, we know from the decerebrate studies that this activity is not necessary for the basic locomotor pattern. What then is its role?
Lesions of the lateral descending pathways (containing corticospinal and rubrospinal projections) produce a long term impairment in the ability of cats to step over obstacles \citep{Drew2002}. Recordings of PTN neurons during locomotion show increased activity during these visually guided modifications to the basic step cycle \citep{Drew1996}. These observations suggest that motor cortex neurons are necessary for precise stepping and adjustment of ongoing locomotion to changing conditions. However, long-term effects seem to require complete lesion of \emph{both} the corticospinal and rubrospinal tracts \citep{Drew2002}. Even in these animals, the voluntary act of stepping over an obstacle does not disappear entirely, and moreover, they can adapt to changes in the height of the obstacles \citep{Drew2002}. Although they never regain the ability to gracefully clear an obstacle, these animals still adjust their stepping height when faced with a higher obstacle in such a way that would have allowed them to comfortably clear the lower obstacle \citep{Drew2002}. Furthermore, deficits caused by lesions restricted to the pyramidal tract seem to disappear over time \citep{Liddell1944}, and are most clearly visible only the first time an animal encounters a new obstacle \citep{Liddell1944}.
The view that motor cortex in non-primate mammals is principally responsible for adjusting ongoing movement patterns generated by lower brain structures is appealing. What is this modulation good for? What does it allow an animal to achieve? How can we assay its necessity?
\subsection{Towards a new teleology; new experiments required}
It should now be clear that the involvement of motor cortex in the direct control of all ``voluntary movement'' is human-specific. There is a role for motor cortex across mammals in the control of precise movements of the extremities, especially those requiring individual movements of the fingers, but these effects are subtle in non-primate mammals. Furthermore, what would be a devastating impairment for humans may not be so severe for mammals that do not depend on precision finger movements for survival. Therefore, generalizing this specific role of motor cortex from humans to all other mammals would be misleading. We could be missing another, more primordial role for this structure that predominates in other mammals, and by doing so, we may also be missing an important role in humans.
The proposal that motor cortex induces modifications of ongoing movement synergies, prompted by the electrophysiological studies of cat locomotion, definitely points to a role consistent with the results of various lesion studies. However, in assays used thus far, the ability to modify ongoing movement generally recovers after a motor cortical lesion. What are the environmental situations in which motor cortical modulation is most useful?
Cortex has long been proposed to be the structure responsible for integrating a representation of the world and improving the predictive power of this representation with experience \citep{Barlow1985,Doya1999}. If motor cortex is the means by which these representations can gain influence over the body, however subtle and ``modulatory'', can we find situations (i.e. tasks) in which this cortical control is required?
The necessity of cortex for various behavioural tasks has been actively investigated in experimental psychology for over a century, including the foundational work of Karl Lashley and his students \citep{Lashley1921a,Lashley1950a}. In the rat, large cortical lesions were found to produce little to no impairment in movement control, and even deficits in learning and decision making abilities were difficult to demonstrate consistently over repeated trials. However, Lashley did notice some evidence that cortical control may be involved in postural adaptations to unexpected perturbations \citep{Lashley1921a}. These studies once again seem to recapitulate the two most consistent observations found across the entire motor cortical lesion literature in non-primate mammals since Hitzig \citep{Fritsch1870}, Goltz \citep{Goltz1888}, Sherrington \citep{Sherrington1885} and others \citep{Oakley1979,Terry1989}. One, direct voluntary control over movement is most definitely not abolished through lesion; and two, certain aspects of some movements are definitely impaired, but only under certain challenging situations. The latter are often reported only anecdotally. It was this collection of intriguing observations in animals with motor cortical lesions that prompted us to expand the scope of standard laboratory tasks to include a broader range of motor control challenges that brains encounter in their natural environments.
\section{Experiment Introduction}
In the natural world, an animal must be able to adapt locomotion to any surface, not only in anticipation of upcoming terrain, but also in response to the unexpected perturbations that often occur during movement. This allows animals to move robustly through the world, even when navigating a changing environment. Testing the ability of the motor system to generate a robust response to an unexpected change can be difficult as it requires introducing a perturbation without cueing the animal about the altered state of the world. Marple-Horvat and colleagues built a circular ladder assay for cats that was specifically designed to record from motor cortex during such conditions \citep{Marple-Horvat1993}. One of the modifications they introduced was to make one of the rungs of the ladder fall unexpectedly under the weight of the animal. When they recorded from motor cortical neurons during the rung drop, they noticed a marked increase in activity, well above the recorded baseline from normal stepping, as the animal recovered from the fall and resumed walking. However, whether this increased activity of motor cortex was necessary for the recovery response has never been assayed.
\begin{featurebox}
\caption{Some cautionary remarks on lesion techniques}
The original methods used to induce a permanent lesion to the motor cortex were very crude, often involving gross mechanical aggression to the neural tissue by using surgical knife cuts or ablation by water-jet, aspiration, and thermo- or electrocoagulation. These methods are still widely used in lesion studies for their simplicity and bluntness, but have the disadvantage of making it hard to limit the lesion to a single area because of possible damage to subcortical areas or the destruction of fibers of passage. These limitations made it more difficult to interpret the effects of cortical lesions, and eventually led to the development of new techniques designed to work around such problems. Chemical injections of neurotoxic compounds such as ibotenic acid or kainic acid aim to increase selectivity of the lesion by limiting damage to neural cell bodies in the target area while leaving the fibers of passage intact \citep{Schwarcz1979}. Photothrombosis \citep{Watson1985} or devascularization by pial stripping \citep{Meyer1971} aim to reproduce the effects of clinical stroke while avoiding extension of the lesion to subcortical areas as much as possible.
The early studies of Broca localizing the function of articulate language to a specific region in the cerebral hemispheres \citep{Broca1861} established a long tradition of correlating the location of surgical brain injury with detailed analysis of any subsequent behavioural deficits. This method is not without its difficulties. The problems of plasticity and diaschisis will forever complicate conclusions based on injury and manipulation of nervous tissue \citep{Lashley1933}. Many recent methods for reversible chemical or optogenetic inactivation of the cortex have been proposed to improve statistical power of behavioural assessments \citep{DeFeudis1980,Dong2010,Guo2015}. Unfortunately, given that the cortex maintains a tight balance of excitation and inhibition during normal functioning and is also densely interconnected with the rest of the brain, the effects of such transient manipulations are prone to cause multiple downstream effects that can confound inferences about behavioural relevance \citep{Otchy2015}. In this respect, they are similar to stimulation experiments in that they are very useful in determining that two areas are connected in a circuit, but not necessarily what the connection means. Of course, permanent lesions themselves can induce plasticity changes in the function of downstream and upstream circuits. The expectation, however, is that such changes represent a homeostatically stable state of the system, allowing simultaneous investigation of the limits of recovery, as well as the kinds of problems for which a fully intact structure is definitely required.
\end{featurebox}
|
{"hexsha": "9d035b9056673885615256497a810b57ce6973ca", "size": 34398, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "src/sections/mc-intro.tex", "max_stars_repo_name": "kampff-lab/shuttling-paper", "max_stars_repo_head_hexsha": "00ca13d59b45123cbc77483e0dd93e9b94dbe552", "max_stars_repo_licenses": ["CC-BY-4.0", "MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/sections/mc-intro.tex", "max_issues_repo_name": "kampff-lab/shuttling-paper", "max_issues_repo_head_hexsha": "00ca13d59b45123cbc77483e0dd93e9b94dbe552", "max_issues_repo_licenses": ["CC-BY-4.0", "MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/sections/mc-intro.tex", "max_forks_repo_name": "kampff-lab/shuttling-paper", "max_forks_repo_head_hexsha": "00ca13d59b45123cbc77483e0dd93e9b94dbe552", "max_forks_repo_licenses": ["CC-BY-4.0", "MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 373.8913043478, "max_line_length": 2269, "alphanum_fraction": 0.8255421827, "num_tokens": 7161}
|
#!/usr/bin/env python
"""
Class Timer which provides a context for timing blocks of code.
See Also: pisa.utils.profile module, which contains decorators for timing
functions and methods.
"""
from __future__ import absolute_import, division
from time import sleep, time
import numpy as np
from pisa.utils.format import timediff
from pisa.utils.log import logging, set_verbosity
__all__ = ['Timer', 'test_Timer']
__license__ = '''Copyright (c) 2014-2017, The IceCube Collaboration
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.'''
# TODO: add unit tests!
class Timer(object):
"""Simple timer context (i.e. designed to be used via `with` sematics).
Parameters
----------
label
verbose
fmt_args : None or Mapping
Passed to `timediff` via **fmt_args as optional format parameters.
See that function for details of valid arguments
"""
def __init__(self, label=None, verbose=False, fmt_args=None):
self.label = label
self.verbose = verbose
self.fmt_args = fmt_args if fmt_args is not None else {}
self.start = np.nan
self.end = np.nan
self.secs = np.nan
self.msecs = np.nan
def __enter__(self):
self.start = time()
return self
def __exit__(self, *args):
self.end = time()
self.secs = self.end - self.start
self.msecs = self.secs * 1000
if self.verbose:
formatted = timediff(dt_sec=self.secs, **self.fmt_args)
logging.info('Elapsed time: ' + formatted)
def test_Timer():
"""Unit tests for Timer class"""
with Timer(verbose=True):
sleep(0.1)
logging.info('<< PASS : test_Timer >>')
if __name__ == '__main__':
set_verbosity(3)
test_Timer()
|
{"hexsha": "1ff48a51591cc5dff0fc5665917a8aadd7603a95", "size": 2248, "ext": "py", "lang": "Python", "max_stars_repo_path": "pisa/utils/timer.py", "max_stars_repo_name": "wym109/pisa", "max_stars_repo_head_hexsha": "696803320f577d241651df900726b76a770d072a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pisa/utils/timer.py", "max_issues_repo_name": "wym109/pisa", "max_issues_repo_head_hexsha": "696803320f577d241651df900726b76a770d072a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-05-03T15:46:07.000Z", "max_issues_repo_issues_event_max_datetime": "2020-05-03T17:57:17.000Z", "max_forks_repo_path": "pisa/utils/timer.py", "max_forks_repo_name": "wym109/pisa", "max_forks_repo_head_hexsha": "696803320f577d241651df900726b76a770d072a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-05-15T13:48:48.000Z", "max_forks_repo_forks_event_max_datetime": "2019-05-15T13:48:48.000Z", "avg_line_length": 26.7619047619, "max_line_length": 75, "alphanum_fraction": 0.6757117438, "include": true, "reason": "import numpy", "num_tokens": 523}
|
(*
* Copyright 2019, NTU
*
* This software may be distributed and modified according to the terms of
* the BSD 2-Clause license. Note that NO WARRANTY is provided.
* See "LICENSE_BSD2.txt" for details.
*
* Author: Albert Rizaldi, NTU Singapore
*)
theory NAND_Hoare_Typed
imports VHDL_Hoare_Typed NAND_Femto
begin
subsection \<open>Proving @{term "nand3"}: NAND with transport delay \<close>
abbreviation "bval_of_wline tw sig n \<equiv> bval_of (wline_of tw sig n)"
abbreviation "lof_wline tw sig n \<equiv> lval_of (wline_of tw sig n)"
locale scalar_type_nand3 =
fixes \<Gamma> :: "sig tyenv"
assumes "\<Gamma> A = Bty" and "\<Gamma> B = Bty" and "\<Gamma> C = Bty"
begin
text \<open>Invariant for NAND: at all times @{term "i"}, the signal @{term "C :: sig"} at @{term "i"}
should be the NAND value of @{term "A :: sig"} and @{term "B :: sig"} at time @{term "i - 1"}.\<close>
definition nand_inv :: "sig assn2" where
"nand_inv \<equiv> (\<lambda>tw. bval_of_wline tw C (fst tw) \<longleftrightarrow> \<not> (bval_of_wline tw A (fst tw - 1) \<and> bval_of_wline tw B (fst tw - 1)))"
definition nand_inv2 :: "sig assn2" where
"nand_inv2 \<equiv> (\<lambda>tw. disjnt {A, B} (event_of tw) \<longrightarrow> (\<forall>i > fst tw. bval_of_wline tw C i \<longleftrightarrow> bval_of_wline tw C (fst tw)))"
lemma nand_inv_next_time:
fixes tw
defines "v \<equiv> eval_world_raw2 tw (Bnand (Bsig A) (Bsig B))"
defines "tw' \<equiv> tw[C, 1 :=\<^sub>2 v]"
assumes "wityping \<Gamma> (snd tw)"
shows "nand_inv (fst tw' + 1, snd tw')"
proof -
have bexpA: "bexp_wt \<Gamma> (Bsig A) Bty" and bexpB: "bexp_wt \<Gamma> (Bsig B) Bty"
using scalar_type_nand3_axioms unfolding scalar_type_nand3_def by (metis bexp_wt.intros(3))+
have "bval_of_wline tw' C (fst tw + 1) \<longleftrightarrow> bval_of v"
unfolding tw'_def worldline_upd2_def worldline_upd_def by auto
also have "... \<longleftrightarrow> \<not> (bval_of_wline tw A (fst tw) \<and> bval_of_wline tw B (fst tw))"
using eval_world_raw_bv[OF bexpA `wityping \<Gamma> (snd tw)`] eval_world_raw_bv[OF bexpB `wityping \<Gamma> (snd tw)`]
unfolding v_def by (auto split:val.split)(metis val.distinct(1))+
finally show ?thesis
unfolding nand_inv_def
by (metis (no_types, lifting) add_diff_cancel_right' comp_apply fst_conv less_add_one snd_conv
tw'_def worldline_upd2_before_dly worldline_upd2_def)
qed
lemma nand_inv2_next_time:
fixes tw
defines "v \<equiv> eval_world_raw2 tw (Bnand (Bsig A) (Bsig B))"
defines "tw' \<equiv> tw[C, 1 :=\<^sub>2 v]"
shows "nand_inv2 (fst tw' + 1, snd tw')"
using assms unfolding nand_inv2_def tw'_def worldline_upd2_def worldline_upd_def by auto
lemma pre_nand_conc_hoare':
"\<And>tw. nand_inv tw \<and> nand_inv2 tw \<and> disjnt {A, B} (event_of tw) \<Longrightarrow> nand_inv (fst tw + 1, snd tw)"
proof -
fix tw
assume "nand_inv tw \<and> nand_inv2 tw \<and> disjnt {A, B} (event_of tw)"
hence "nand_inv tw" and "nand_inv2 tw" and "disjnt {A, B} (event_of tw)"
by auto
have "bval_of_wline tw C (fst tw + 1) \<longleftrightarrow> bval_of_wline tw C (fst tw)"
using `nand_inv2 tw` `disjnt {A, B} (event_of tw)` unfolding nand_inv2_def
by (simp add: next_time_world_at_least)
also have "... \<longleftrightarrow> \<not> (bval_of_wline tw A (fst tw - 1) \<and> bval_of_wline tw B (fst tw - 1))"
using `nand_inv tw` unfolding nand_inv_def by auto
also have "... \<longleftrightarrow> \<not> (bval_of_wline tw A (fst tw) \<and> bval_of_wline tw B (fst tw))"
using `disjnt {A, B} (event_of tw)` unfolding event_of_alt_def
by (smt diff_0_eq_0 disjnt_insert1 mem_Collect_eq)
finally show "nand_inv (fst tw + 1, snd tw)"
unfolding nand_inv_def by auto
qed
lemma nand_conc_hoare2:
"\<And>tw. nand_inv2 tw \<and> disjnt {A, B} (event_of tw) \<Longrightarrow> nand_inv2 (fst tw + 1, snd tw)"
unfolding nand_inv2_def by auto
lemma conc_stmt_wf_nand3:
"conc_stmt_wf nand3"
unfolding nand3_def conc_stmt_wf_def by auto
lemma nonneg_delay_conc_nand3:
"nonneg_delay_conc nand3"
unfolding nand3_def by auto
lemma nonneg_delay_conc_nand3':
"nonneg_delay_conc ( process {A, B} : Bassign_trans C (Bnand (Bsig A) (Bsig B)) 1)"
using nonneg_delay_conc_nand3 unfolding nand3_def by auto
lemma conc_wt_nand3:
"conc_wt \<Gamma> nand3"
unfolding nand3_def by (metis bexp_wt.intros(3) bexp_wt.intros(9) conc_wt.intros(1)
scalar_type_nand3_axioms scalar_type_nand3_def seq_wt.intros(4))
lemma conc_wt_nand3':
"conc_wt \<Gamma> ( process {A, B} : Bassign_trans C (Bnand (Bsig A) (Bsig B)) 1)"
using conc_wt_nand3 unfolding nand3_def by auto
lemma nand_conc_sim2':
"\<Gamma> \<turnstile>\<^sub>s \<lbrace>\<lambda>tw. nand_inv tw \<and> nand_inv2 tw\<rbrace> nand3 \<lbrace>\<lambda>tw. nand_inv tw \<and> nand_inv2 tw\<rbrace>"
apply (rule While_Suc)
apply (rule Conseq'[where P="wp3_conc \<Gamma> nand3 (\<lambda>tw. nand_inv (fst tw + 1, snd tw) \<and>
nand_inv2 (fst tw + 1, snd tw))", rotated])
apply (rule wp3_conc_is_pre, rule conc_stmt_wf_nand3, rule nonneg_delay_conc_nand3, rule conc_wt_nand3, simp)
unfolding nand3_def wp3_conc_single'[OF conc_wt_nand3' nonneg_delay_conc_nand3'] wp3_fun.simps
using nand_conc_hoare2 nand_inv2_next_time nand_inv_next_time pre_nand_conc_hoare' by presburger
text \<open>Initialisation preserves the invariant\<close>
lemma seq_wt_nand3':
"seq_wt \<Gamma> (Bassign_trans C (Bnand (Bsig A) (Bsig B)) 1)"
using conc_wt_nand3' by auto
lemma nonneg_delay_nand3:
" nonneg_delay (Bassign_trans C (Bnand (Bsig A) (Bsig B)) 1)"
using nonneg_delay_conc_nand3' by auto
lemma init_sat_nand_inv_comb:
"init_sim2_hoare_wt \<Gamma> (\<lambda>tw. fst tw = 0) nand3 (\<lambda>tw. nand_inv tw \<and> nand_inv2 tw)"
unfolding nand3_def
apply (rule AssignI_suc, rule SingleI)
apply (rule Conseq3[where Q="\<lambda>tw. nand_inv (fst tw + 1, snd tw) \<and> nand_inv2 (fst tw + 1, snd tw)", rotated])
apply (rule wp3_fun_is_pre[OF seq_wt_nand3' nonneg_delay_nand3], simp)
unfolding wp3_fun.simps using nand_inv_next_time nand_inv2_next_time by blast
lemma nand_correctness:
assumes "sim_fin2 w (i + 1) nand3 tw'" and "wityping \<Gamma> w"
shows "bval_of_wline tw' C (i + 1) \<longleftrightarrow> \<not> (bval_of_wline tw' A i \<and> bval_of_wline tw' B i)"
using grand_correctness[OF assms conc_stmt_wf_nand3 conc_wt_nand3 nonneg_delay_conc_nand3 nand_conc_sim2' init_sat_nand_inv_comb]
unfolding nand_inv_def by (metis (no_types, lifting) add_diff_cancel_right' assms(1) sim_fin2.cases world_maxtime_lt_fst_tres)
end
|
{"author": "rizaldialbert", "repo": "vhdl-semantics", "sha": "352f89c9ccdfe830c054757dfd86caeadbd67159", "save_path": "github-repos/isabelle/rizaldialbert-vhdl-semantics", "path": "github-repos/isabelle/rizaldialbert-vhdl-semantics/vhdl-semantics-352f89c9ccdfe830c054757dfd86caeadbd67159/NAND_Hoare_Typed.thy"}
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
from kfilter.simplify import *
from sympy import *
dt = Symbol('dt')
x0, v0 = symbols('x0, v0')
Q00,Q11 = symbols('Q_x, Q_v')
# x = x0 + v0*dt + 1/2*a*dt**2
# v = v0 + a*dt
x = Matrix([[x0],[v0]]) # vetor de estados
A = Matrix([[1, dt], [0, 1]]) # matriz de transição de estados
B = Matrix([[1/2.*dt**2], [dt]]) # matriz de entradas de controle
H = Matrix([[1, 0]]) # modelo de observação
Q = Matrix([[Q00, 0], [0, Q11]])*dt # covariância do ruído do processo
u,z = symbols('a,x_m')
# imprimir dados
print(x)
print(A)
print(B)
print(u)
print(H)
simplify = Simplify(A,x,B,u,z,H)
simplify.setQ(Q)
simplify.compute() # calcular simplificações
simplify.printq() # imprimir filtro simplificado
|
{"hexsha": "5564cbce9f822e342a909accfa60aef3b9e3cbe8", "size": 738, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/ex2.py", "max_stars_repo_name": "clnrp/kfilter", "max_stars_repo_head_hexsha": "5f9e67397f84eccbdd0b14867763b9315c2cd7c8", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/ex2.py", "max_issues_repo_name": "clnrp/kfilter", "max_issues_repo_head_hexsha": "5f9e67397f84eccbdd0b14867763b9315c2cd7c8", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/ex2.py", "max_forks_repo_name": "clnrp/kfilter", "max_forks_repo_head_hexsha": "5f9e67397f84eccbdd0b14867763b9315c2cd7c8", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.0625, "max_line_length": 70, "alphanum_fraction": 0.6341463415, "include": true, "reason": "from sympy", "num_tokens": 258}
|
#include <boost/mpl/aux_/preprocessed/mwcw/unpack_args.hpp>
|
{"hexsha": "3fbbc483c1f8f3fe161aeb00c11ec57597e1a76d", "size": 60, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/boost_mpl_aux__preprocessed_mwcw_unpack_args.hpp", "max_stars_repo_name": "miathedev/BoostForArduino", "max_stars_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 10.0, "max_stars_repo_stars_event_min_datetime": "2018-03-17T00:58:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-06T02:48:49.000Z", "max_issues_repo_path": "src/boost_mpl_aux__preprocessed_mwcw_unpack_args.hpp", "max_issues_repo_name": "miathedev/BoostForArduino", "max_issues_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2021-03-26T15:17:35.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-20T23:55:08.000Z", "max_forks_repo_path": "src/boost_mpl_aux__preprocessed_mwcw_unpack_args.hpp", "max_forks_repo_name": "miathedev/BoostForArduino", "max_forks_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2019-05-28T21:06:37.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-06T03:06:52.000Z", "avg_line_length": 30.0, "max_line_length": 59, "alphanum_fraction": 0.8166666667, "num_tokens": 18}
|
clear
rm a.out -f
g++ a.cpp SFMT.c -O3 -fopenmp
#gcc -O3 -finline-functions -fomit-frame-pointer -DNDEBUG -fno-strict-aliasing --param max-inline-insns-single=1800 -Wmissing-prototypes -Wall -std=c99 --param inline-unit-growth=500 --param large-function-growth=900 -DSFMT_MEXP=19937 \
#a.c SFMT.c
export OMP_NUM_THREADS=5
time ./a.out
|
{"hexsha": "2c7c47e18c78adfd2c01558c8ea4fb7fa32b97a1", "size": 339, "ext": "r", "lang": "R", "max_stars_repo_path": "Trim/src/sfmt/.r", "max_stars_repo_name": "kkhuang81/AdaptiveSM", "max_stars_repo_head_hexsha": "e960c9f397171014c6d979adfb155ddbeeb11f82", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 23, "max_stars_repo_stars_event_min_datetime": "2018-03-09T03:03:21.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-13T12:12:44.000Z", "max_issues_repo_path": "Trim/src/sfmt/.r", "max_issues_repo_name": "kkhuang81/AdaptiveSM", "max_issues_repo_head_hexsha": "e960c9f397171014c6d979adfb155ddbeeb11f82", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Trim/src/sfmt/.r", "max_forks_repo_name": "kkhuang81/AdaptiveSM", "max_forks_repo_head_hexsha": "e960c9f397171014c6d979adfb155ddbeeb11f82", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2019-03-26T14:17:38.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-14T06:13:37.000Z", "avg_line_length": 33.9, "max_line_length": 237, "alphanum_fraction": 0.7463126844, "num_tokens": 118}
|
import numpy as np
from nengo.utils.progress import ProgressTracker
class GenericSimulator(object):
def __init__(self, dt=0.001, progress_bar=True):
self.dt = dt
self.progress_bar = progress_bar
self.n_steps = 0
self.data = {}
def run(self, time_in_seconds, progress_bar=None):
steps = int(np.round(float(time_in_seconds) / self.dt))
self.run_steps(steps, progress_bar=progress_bar)
def run_steps(self, steps, progress_bar=None):
if progress_bar is None:
progress_bar = self.progress_bar
with ProgressTracker(steps, progress_bar, "Simulating") as progress:
for i in range(steps):
self.step()
progress.step()
def step(self):
self.n_steps += 1
def trange(self, dt=None):
dt = self.dt if dt is None else dt
n_steps = int(self.n_steps * (self.dt / dt))
return dt * np.arange(1, n_steps + 1)
def __enter__(self):
pass
def close(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
self.close()
|
{"hexsha": "1b5195f66824ea9eab0ea7b8e1610ab7050cffd0", "size": 1116, "ext": "py", "lang": "Python", "max_stars_repo_path": "nengo_normal_form/generic.py", "max_stars_repo_name": "tcstewar/nengo_normal_form", "max_stars_repo_head_hexsha": "37ca02b20c4cc143a7bf9c27912ead36d23a04d7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "nengo_normal_form/generic.py", "max_issues_repo_name": "tcstewar/nengo_normal_form", "max_issues_repo_head_hexsha": "37ca02b20c4cc143a7bf9c27912ead36d23a04d7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "nengo_normal_form/generic.py", "max_forks_repo_name": "tcstewar/nengo_normal_form", "max_forks_repo_head_hexsha": "37ca02b20c4cc143a7bf9c27912ead36d23a04d7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.6153846154, "max_line_length": 76, "alphanum_fraction": 0.6137992832, "include": true, "reason": "import numpy", "num_tokens": 263}
|
[STATEMENT]
lemma knowledge_equiv_eq_NS: "
evs \<in> ns_public \<Longrightarrow>
knows A evs \<union> {Key (priEK B), Key (priSK B), Key (shrK B)} =
knows B evs \<union> {Key (priEK A), Key (priSK A), Key (shrK A)}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. evs \<in> ns_public \<Longrightarrow> knows A evs \<union> {Key (priEK B), Key (priSK B), Key (shrK B)} = knows B evs \<union> {Key (priEK A), Key (priSK A), Key (shrK A)}
[PROOF STEP]
apply (force simp only: knowledge_eval NS_no_Notes)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
|
{"llama_tokens": 251, "file": "Inductive_Confidentiality_GeneralAttacker_Knowledge", "length": 2}
|
import os
import numpy as np
import pandas as pd
import scipy.sparse as sparse
from lib5c.util.system import check_outdir
from lib5c.util.statistics import adjust_pvalues
from hic3defdr.util.printing import eprint
from hic3defdr.util.clusters import load_clusters
from hic3defdr.util.simulation import simulate
from hic3defdr.util.evaluation import make_y_true, evaluate
from hic3defdr.util.progress import tqdm_maybe as tqdm
from hic3defdr.util.parallelization import parallel_apply
class SimulatingHiC3DeFDR(object):
"""
Mixin class containing plotting functions for HiC3DeFDR.
"""
def simulate(self, cond, chrom=None, beta=0.5, p_diff=0.4, skip_bias=False,
loop_pattern=None, outdir='sim', n_threads=-1, verbose=True):
"""
Simulates raw contact matrices based on previously fitted scaled means
and dispersions in a specific condition.
Can only be run after ``estimate_dispersions()`` has been run.
Parameters
----------
cond : str
Name of the condition to base the simulation on.
chrom : str, optional
Name of the chromosome to simulate. Pass None to simulate all
chromosomes in series.
beta : float
The effect size of the loop perturbations to use when simulating.
Perturbed loops will be strengthened or weakened by this fraction of
their original strength.
p_diff : float or list of float
Pass a single float to specify the probability that a loop will be
perturbed across the simulated conditions. Pass four floats to
specify the probabilities of all four specific perturbations: up in
A, down in A, up in B, down in B. The remaining loops will be
constitutive.
skip_bias : bool
Pass True to set all bias factors and size factors to 1,
effectively simulating "unbiased" raw data.
loop_pattern : str, optional
File path pattern to sparse JSON formatted cluster files
representing loop cluster locations for the simulation. Should
contain at least one '<chrom>' which will be replaced with the
chromosome name when loading data for specific chromosomes. Pass
None to use ``self.loop_patterns[cond]``.
outdir : str
Path to a directory to store the simulated data to.
n_threads : int
The number of threads (technically GIL-avoiding child processes) to
use to process multiple chromosomes in parallel. Pass -1 to use as
many threads as there are CPUs. Pass 0 to process the chromosomes
serially.
verbose : bool
Pass False to silence reporting of progress to stderr.
"""
if chrom is None:
if n_threads:
parallel_apply(
self.simulate,
[{'cond': cond, 'chrom': c, 'beta': beta, 'p_diff': p_diff,
'skip_bias': skip_bias, 'loop_pattern': loop_pattern,
'outdir': outdir, 'verbose': False}
for c in self.chroms],
n_threads=n_threads
)
else:
for chrom in self.chroms:
self.simulate(cond, chrom=chrom, beta=beta, p_diff=p_diff,
loop_pattern=loop_pattern, outdir=outdir)
return
eprint('simulating data for chrom %s' % chrom)
# resolve loop_pattern
if loop_pattern is None:
loop_pattern = self.loop_patterns[cond]
# load everything
bias = self.load_bias(chrom)[:, self.design[cond]]
size_factors = self.load_data('size_factors', chrom)
if len(size_factors.shape) == 2:
size_factors = size_factors[:, self.design[cond]]
else:
size_factors = size_factors[self.design[cond]]
row = self.load_data('row', chrom)
col = self.load_data('col', chrom)
scaled = self.load_data('scaled', chrom)[:, self.design[cond]]
disp_fn = self.load_disp_fn(cond)
clusters = load_clusters(loop_pattern.replace('<chrom>', chrom))
# compute pixel-wise mean of normalized data
mean = np.mean(scaled, axis=1)
# book keeping
check_outdir('%s/' % outdir)
n_sim_per_cond = size_factors.shape[-1]
repnames = sum((['%s%i' % (c, i+1) for i in range(n_sim_per_cond)]
for c in ['A', 'B']), [])
# write design to disk if not present
design_file = '%s/design.csv' % outdir
if not os.path.isfile(design_file):
pd.DataFrame(
{'A': [1]*n_sim_per_cond + [0]*n_sim_per_cond,
'B': [0]*n_sim_per_cond + [1]*n_sim_per_cond},
dtype=bool,
index=repnames
).to_csv(design_file)
# rewrite size_factor matrix in terms of distance
if len(size_factors.shape) == 2:
eprint(' converting size factors', skip=not verbose)
dist = col - row
n_dists = dist.max() + 1
new_size_factors = np.zeros((n_dists, size_factors.shape[1]))
for d in tqdm(range(n_dists)):
idx = np.argmax(dist == d)
new_size_factors[d, :] = size_factors[idx, :]
size_factors = new_size_factors
# get rid of bias
if skip_bias:
bias = np.ones_like(bias)
size_factors = np.ones_like(size_factors)
# tile bias and size_factors
bias = np.tile(bias, 2)
size_factors = np.tile(size_factors, 2)
# simulate and save
classes, sim_iter = simulate(
row, col, mean, disp_fn, bias, size_factors, clusters, beta=beta,
p_diff=p_diff, trend='dist', verbose=verbose)
np.savetxt('%s/labels_%s.txt' % (outdir, chrom), classes, fmt='%s')
for rep, csr in zip(repnames, sim_iter):
sparse.save_npz('%s/%s_%s_raw.npz' % (outdir, rep, chrom), csr)
def evaluate(self, cluster_pattern, label_pattern, min_dist=None,
max_dist=None, rerun_bh=False, outfile=None):
"""
Evaluates the results of this analysis, comparing it to true labels.
Parameters
----------
cluster_pattern : str
File path pattern to sparse JSON formatted cluster files
representing loop cluster locations. Should contain at least one
'<chrom>' which will be replaced with the chromosome name when
loading data for specific chromosomes. Pass a condition name to use
``self.loop_patterns[cluster_pattern]`` instead.
label_pattern : str
File path pattern to true label files for each chromosome. Should
contain at least one '<chrom>' which will be replaced with the
chromosome name when loading data for specific chromosomes. Files
should be loadable with ``np.loadtxt(..., dtype='U7')`` to yield a
vector of true labels parallel to the clusters pointed to by
``cluster_pattern``.
min_dist, max_dist : int, optional
Specify minimum and maximum distances to evaluate performance
within, respectively. Pass None to leave one or both ends unbounded.
rerun_bh : bool
If ``min_dist`` and/or ``max_dist`` are used to constrain the
distances, pass True to re-run BH-FDR on the subset of p-values at
the selected distances. Pass False to use the original dataset-wide
q-values. Does nothing if ``min_dist`` and ``max_dist`` are both
None.
outfile : str, optional
Name of a file to save the evaluation results to inside this
object's ``outdir``. Default is 'eval.npz' if ``min_dist`` and
``max_dist`` are both None, otherwise it is
'eval_<min_dist>_<max_dist>.npz'.
"""
# resolve outfile
if outfile is None:
if min_dist is None and max_dist is None:
outfile = 'eval.npz'
else:
outfile = 'eval_%s_%s.npz' % (min_dist, max_dist)
# resolve case where a condition name was passed to cluster_pattern
if cluster_pattern in self.loop_patterns.keys():
cluster_pattern = self.loop_patterns[cluster_pattern]
# make y_true and pvalues/qvalues (if necessary) one chrom at a time
y_true = []
pvalues = []
qvalues = []
for chrom in self.chroms:
# load data
disp_idx = self.load_data('disp_idx', chrom)
loop_idx = self.load_data('loop_idx', chrom)
row = self.load_data('row', chrom, idx=(disp_idx, loop_idx))
col = self.load_data('col', chrom, idx=(disp_idx, loop_idx))
clusters = load_clusters(cluster_pattern.replace('<chrom>', chrom))
labels = np.loadtxt(label_pattern.replace('<chrom>', chrom),
dtype='U7')
# construct dist_idx
dist = col - row
dist_idx = np.ones(len(dist), dtype=bool)
if min_dist is not None:
dist_idx[dist < min_dist] = False
if max_dist is not None:
dist_idx[dist > max_dist] = False
# append to y_true and pvalues/qvalues (if necessary)
y_true.append(make_y_true(
row[dist_idx], col[dist_idx], clusters, labels))
if min_dist is not None or max_dist is not None:
if rerun_bh:
pvalues.append(self.load_data('pvalues', chrom,
idx=(loop_idx, dist_idx)))
else:
qvalues.append(self.load_data('qvalues', chrom,
idx=dist_idx))
# concatenate y_true and make or load qvalues
y_true = np.concatenate(y_true)
if pvalues:
qvalues = adjust_pvalues(np.concatenate(pvalues))
elif qvalues:
qvalues = np.concatenate(qvalues)
else:
qvalues, _ = self.load_data('qvalues', 'all')
# evaluate and save to disk
fdr, fpr, tpr, thresh = evaluate(y_true, qvalues)
# save to disk
np.savez('%s/%s' % (self.outdir, outfile),
**{'fdr': fdr, 'fpr': fpr, 'tpr': tpr, 'thresh': thresh})
|
{"hexsha": "c3906544e33c805471d93075c473bc643090ebb1", "size": 10545, "ext": "py", "lang": "Python", "max_stars_repo_path": "hic3defdr/analysis/simulation.py", "max_stars_repo_name": "thomasgilgenast/hic3defdr", "max_stars_repo_head_hexsha": "7498ac468ccc21fa530d584944c1b12c73926755", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "hic3defdr/analysis/simulation.py", "max_issues_repo_name": "thomasgilgenast/hic3defdr", "max_issues_repo_head_hexsha": "7498ac468ccc21fa530d584944c1b12c73926755", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-05-02T08:55:20.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-21T16:16:40.000Z", "max_forks_repo_path": "hic3defdr/analysis/simulation.py", "max_forks_repo_name": "thomasgilgenast/hic3defdr", "max_forks_repo_head_hexsha": "7498ac468ccc21fa530d584944c1b12c73926755", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.9375, "max_line_length": 80, "alphanum_fraction": 0.5920341394, "include": true, "reason": "import numpy,import scipy", "num_tokens": 2312}
|
\name{circos.raster}
\alias{circos.raster}
\title{
Add raster images
}
\description{
Add raster images
}
\usage{
circos.raster(image, x, y, width, height,
facing = c("inside", "outside", "reverse.clockwise", "clockwise",
"downward", "bending.inside", "bending.outside"),
niceFacing = FALSE, sector.index = get.cell.meta.data("sector.index"),
track.index = get.cell.meta.data("track.index"),
scaling = 1)
}
\arguments{
\item{image}{a \code{raster} object, or an object that can be converted by \code{\link[grDevices]{as.raster}}}
\item{x}{position of the center of the raster image, measued in the data coordinate in the cell}
\item{y}{position of the center of the raster image, measued in the data coordinate in the cell}
\item{width}{width of the raster image. When \code{facing} is one of "inside", "outside", "clockwise" and "reverse.clockwise", the image should have absolute size where the value of \code{width} should be specified like \code{20mm}, \code{1cm} or \code{0.5inche}. When \code{facing} is one of \code{bending.inside} and \code{bending.outside}, the value of \code{width} is measured in the data coordinate in the cell.}
\item{height}{height of the raster image. Same format as \code{width}. If the value of \code{height} is omit, default height is calculated by taking the aspect ratio of the original image. But when \code{facing} is one of \code{bending.inside} and \code{bending.outside}, \code{height} is mandatory to set.}
\item{facing}{facing of the raster image}
\item{niceFacing}{facing of text. Please refer to vignette for different settings}
\item{sector.index}{index for the sector}
\item{track.index}{index for the track}
\item{scaling}{scaling factor to resize the raster image.}
}
\author{
Zuguang Gu <z.gu@dkfz.de>
}
\examples{
require(png)
image = system.file("extdata", "Rlogo.png", package = "circlize")
image = as.raster(readPNG(image))
circos.initialize(letters[1:8], xlim = c(0, 1))
circos.track(ylim = c(0, 1), panel.fun = function(x, y) {
circos.raster(image, CELL_META$xcenter, CELL_META$ycenter, width = "2cm",
facing = "inside", niceFacing = TRUE)
})
circos.clear()
\dontrun{
# NOTE: following takes quite a long time to run
load(system.file("extdata", "doodle.RData", package = "circlize"))
circos.par("cell.padding" = c(0, 0, 0, 0))
circos.initialize(letters[1:16], xlim = c(0, 1))
circos.track(ylim = c(0, 1), panel.fun = function(x, y) {
img = img_list[[CELL_META$sector.numeric.index]]
circos.raster(img, CELL_META$xcenter, CELL_META$ycenter, width = 1,
height = 1, facing = "bending.inside")
}, track.height = 0.25, bg.border = NA)
circos.track(ylim = c(0, 1), panel.fun = function(x, y) {
img = img_list[[CELL_META$sector.numeric.index + 16]]
circos.raster(img, CELL_META$xcenter, CELL_META$ycenter, width = 1,
height = 1, facing = "bending.inside")
}, track.height = 0.25, bg.border = NA)
circos.clear()
}
}
|
{"hexsha": "2cbbafd78c74bc18c333eb4f394fb4ea6e3e1f74", "size": 2955, "ext": "rd", "lang": "R", "max_stars_repo_path": "man/circos.raster.rd", "max_stars_repo_name": "calpan/circlize", "max_stars_repo_head_hexsha": "33f8f23663768367188e50e93d3f9b2b57edd0e7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-09-16T12:30:42.000Z", "max_stars_repo_stars_event_max_datetime": "2019-09-16T12:30:47.000Z", "max_issues_repo_path": "man/circos.raster.rd", "max_issues_repo_name": "Nexller/circlize", "max_issues_repo_head_hexsha": "71df6b5316680dcee4d39d3ac7c224fcfc32439b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-08-16T14:55:12.000Z", "max_issues_repo_issues_event_max_datetime": "2019-08-16T14:55:12.000Z", "max_forks_repo_path": "man/circos.raster.rd", "max_forks_repo_name": "Nexller/circlize", "max_forks_repo_head_hexsha": "71df6b5316680dcee4d39d3ac7c224fcfc32439b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.9047619048, "max_line_length": 419, "alphanum_fraction": 0.7008460237, "num_tokens": 892}
|
[STATEMENT]
lemma "(pi * (real u * 2) = pi * (real (xa v) * - 2))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. pi * (real u * 2) = pi * (real (xa v) * - 2)
[PROOF STEP]
apply simp
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. pi * real u = - (pi * real (xa v))
[PROOF STEP]
oops
|
{"llama_tokens": 138, "file": null, "length": 2}
|
export _At_mul_B,
_At_ldiv_B,
DEFAULT_COND_TOL,
hasfullrowrank,
issquare,
isinvertible,
cross_product,
nonzero_columns,
extend,
projection_matrix,
remove_zero_columns
# default tolerance for matrix condition number (see 'isinvertible')
const DEFAULT_COND_TOL = 1e6
# matrix-matrix multiplication
@inline _At_mul_B(A, B) = transpose(A) * B
# matrix-matrix division
@inline _At_ldiv_B(A, B) = transpose(A) \ B
# rank of sparse submatrix (see #1497)
LinearAlgebra.rank(M::SubArray{N, 2, <:SparseMatrixCSC}) where {N} = rank(sparse(M))
"""
issquare(M::AbstractMatrix)
Check whether a matrix is square.
### Input
- `M` -- matrix
### Output
`true` iff the matrix is square.
"""
function issquare(M::AbstractMatrix)
m, n = size(M)
return m == n
end
"""
hasfullrowrank(M::AbstractMatrix)
Check whether a matrix has full row rank.
### Input
- `M` -- matrix
### Output
`true` iff the matrix has full row rank.
"""
function hasfullrowrank(M::AbstractMatrix)
return rank(M) == size(M, 1)
end
"""
isinvertible(M::Matrix; [cond_tol]::Number=DEFAULT_COND_TOL)
A sufficient check of a matrix being invertible (or nonsingular).
### Input
- `M` -- matrix
- `cond_tol` -- (optional, default: `DEFAULT_COND_TOL`) tolerance of matrix
condition
### Output
If the result is `true`, `M` is invertible.
If the result is `false`, the matrix is non-square or this function could not
conclude.
### Algorithm
We check whether the matrix is square and whether the
[matrix condition number](https://en.wikipedia.org/wiki/Condition_number#Matrices)
`cond(M)` is below some prescribed tolerance.
"""
function isinvertible(M::Matrix; cond_tol::Number=DEFAULT_COND_TOL)
return issquare(M) && cond(M) < cond_tol
end
# cond is not available for sparse matrices; see JuliaLang#6485 and related issues
function isinvertible(M::AbstractSparseMatrix;
cond_tol::Number=DEFAULT_COND_TOL)
return issquare(M) && isinvertible(Matrix(M), cond_tol=cond_tol)
end
function isinvertible(M::Diagonal; cond_tol=nothing)
return !any(iszero, diag(M))
end
"""
cross_product(M::AbstractMatrix{N}) where {N<:Real}
Compute the high-dimensional cross product of ``n-1`` ``n``-dimensional vectors.
### Input
- `M` -- ``n × n - 1``-dimensional matrix
### Output
A vector.
### Algorithm
The cross product is defined as follows:
```math
\\left[ \\dots, (-1)^{n+1} \\det(M^{[i]}), \\dots \\right]^T
```
where ``M^{[i]}`` is defined as ``M`` with the ``i``-th row removed.
See *Althoff, Stursberg, Buss: Computing Reachable Sets of Hybrid Systems Using
a Combination of Zonotopes and Polytopes. 2009.*
"""
function cross_product(M::AbstractMatrix{N}) where {N<:Real}
n = size(M, 1)
@assert 1 < n == size(M, 2) + 1 "the matrix must be n x (n-1) dimensional"
v = Vector{N}(undef, n)
minus = false
for i in 1:n
Mi = view(M, 1:n .!= i, :) # remove i-th row
d = det(Mi)
if minus
v[i] = -d
minus = false
else
v[i] = d
minus = true
end
end
return v
end
# det cannot handle sparse matrices in some cases
cross_product(M::AbstractSparseMatrix) = cross_product(Matrix(M))
cross_product(M::SubArray{N, 2, <:AbstractSparseMatrix}) where {N} = cross_product(Matrix(M))
"""
nonzero_columns(A::AbstractMatrix)
Return all columns that have at least one non-zero entry.
### Input
- `A` -- matrix
### Output
A vector of indices.
"""
function nonzero_columns(A::AbstractMatrix)
n = size(A, 2)
nzcol = Vector{Int}()
sizehint!(nzcol, n)
for j in 1:n
if !iszero(view(A, :, j))
push!(nzcol, j)
end
end
return nzcol
end
function nonzero_columns(A::SparseMatrixCSC)
dropzeros!(A)
return collect(j for j in 1:A.n if A.colptr[j] < A.colptr[j+1])
end
"""
extend(M::AbstractMatrix; check_rank=true)
Return an invertible extension of `M` whose first `n` columns span the column
space of `M`, assuming that `size(M) = (m, n)`, `m > n` and the rank of `M` is `n`.
### Input
- `M` -- rectangular `m × n` matrix with `m > n` and full rank (i.e. its
rank is `n`)
- `check_rank` -- (optional, default: `true`) if `true`, check the rank assumption,
otherwise do not perform this check
### Output
The tuple `(Mext, inv_Mext)`, where `Mext` is a square `m × m` invertible matrix
that extends `M`, i.e. in the sense that `Mext = [M | Q2]`, and the rank of `Mext`
is `m`. Here, `inv_Mext` is the inverse of `Mext`.
### Algorithm
First we compute the QR decomposition of `M` to extract a suitable subspace of
column vectors (`Q2`) that are orthogonal to the column span of `M`. Then we observe
that the inverse of the extended matrix `Mext = [M | Q2]` is `[R⁻¹Qᵀ; Q2ᵀ]`.
"""
function extend(M::AbstractMatrix; check_rank=true)
m, n = size(M)
m <= n && throw(ArgumentError("this function requires that the number " *
"of rows is greater than the number of columns, but they are of size $m and " *
"$n respectively"))
if check_rank
r = rank(M)
r != n && throw(ArgumentError("the rank of the given matrix is " *
"$r, but this function assumes that it is $n"))
end
# compute QR decomposition of M
Q, R = qr(M)
# Q2 spans the null space of M
Q2 = Q[:, (n + 1):end]
# extend M by appending the columns orthogonal to the column span of M
Mext = hcat(M, Q2)
# since the inverse is easy to compute, return it
inv_Mext = vcat(inv(R) * Q', Q2')
return Mext, inv_Mext
end
"""
projection_matrix(block::AbstractVector{Int}, n::Int, [N]::Type{<:Number}=Float64)
Return the projection matrix associated to the given block of variables.
### Input
- `block` -- integer vector with the variables of interest
- `n` -- integer representing the ambient dimension
- `N` -- (optional, default: `Float64`) number type
### Output
A sparse matrix that corresponds to the projection onto the variables in `block`.
### Examples
```jldoctest projection_matrix
julia> using LazySets: projection_matrix
julia> projection_matrix([1, 3], 4)
2×4 SparseArrays.SparseMatrixCSC{Float64, Int64} with 2 stored entries:
1.0 ⋅ ⋅ ⋅
⋅ ⋅ 1.0 ⋅
julia> Matrix(ans)
2×4 Matrix{Float64}:
1.0 0.0 0.0 0.0
0.0 0.0 1.0 0.0
```
"""
function projection_matrix(block::AbstractVector{Int}, n::Int, N::Type{<:Number}=Float64)
m = length(block)
return sparse(1:m, block, ones(N, m), m, n)
end
# fallback: represent the projection matrix as a sparse array
function projection_matrix(block::AbstractVector{Int}, n::Int, VN::Type{<:AbstractVector{N}}) where {N}
return projection_matrix(block, n, N)
end
function load_projection_matrix_static()
return quote
# represent the projection matrix with a static array
function projection_matrix(block::AbstractVector{Int}, n::Int, VN::Type{<:SVector{L, N}}) where {L, N}
mat = projection_matrix(block, n, N)
m = size(mat, 1)
return SMatrix{m, n}(mat)
end
end # quote
end # end load_projection_matrix_static
"""
remove_zero_columns(A::AbstractMatrix)
Return a matrix with all columns containing only zero entries removed.
### Input
- `A` -- matrix
### Output
The original matrix `A` if it contains no zero columns or otherwise a new matrix
where those columns have been removed.
"""
function remove_zero_columns(A::AbstractMatrix)
nzcol = nonzero_columns(A)
if length(nzcol) == size(A, 2)
return A
else
return A[:, nzcol]
end
end
|
{"hexsha": "a87ec84979dc189416068deff194ba40edffa29e", "size": 7674, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Arrays/matrix_operations.jl", "max_stars_repo_name": "nablabits/LazySets.jl", "max_stars_repo_head_hexsha": "e839322ae970e5b61271b709f8a865184b32c8e5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 194, "max_stars_repo_stars_event_min_datetime": "2017-11-01T20:07:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T22:06:30.000Z", "max_issues_repo_path": "src/Arrays/matrix_operations.jl", "max_issues_repo_name": "nablabits/LazySets.jl", "max_issues_repo_head_hexsha": "e839322ae970e5b61271b709f8a865184b32c8e5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1986, "max_issues_repo_issues_event_min_datetime": "2017-10-23T18:46:28.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T06:13:40.000Z", "max_forks_repo_path": "src/Arrays/matrix_operations.jl", "max_forks_repo_name": "nablabits/LazySets.jl", "max_forks_repo_head_hexsha": "e839322ae970e5b61271b709f8a865184b32c8e5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 33, "max_forks_repo_forks_event_min_datetime": "2017-11-08T17:10:32.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-22T07:33:59.000Z", "avg_line_length": 25.2434210526, "max_line_length": 106, "alphanum_fraction": 0.6548084441, "num_tokens": 2191}
|
import sys
# import libraries
import pandas as pd
import numpy as np
import seaborn as sns
import sqlite3
from sqlalchemy import create_engine
import matplotlib.pyplot as plt
# import statements
import nltk
nltk.download('punkt')
nltk.download('stopwords')
nltk.download('wordnet')
import re
from nltk.corpus import stopwords
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.tokenize import word_tokenize
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer, TfidfVectorizer
from sklearn.multioutput import MultiOutputClassifier
from sklearn import metrics
from sklearn.model_selection import GridSearchCV
#from sklearn.linear_model import SGDClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.preprocessing import FunctionTransformer
from sklearn.decomposition import TruncatedSVD
from scipy.sparse import random as sparse_random
from tqdm import tqdm
import pickle
class TextLenghExtractor(BaseEstimator, TransformerMixin):
"""
Class for defining an extractor for calculating the lenghts of a strings array.
"""
def fit(self, X, y=None):
"""
Function for fitting the transformer
Parameters: self is the same extractor
X is an array of strings
Return: The extractor
"""
return self
def transform(self, text):
"""
Function for calculating the lenghts of an array of strings
Parameters: self is the same extractor
text is an 2D array
Return: lenghts The extracted lenghts of each array in the 2D array
"""
#print(text.shape)
lens = []
for x in text:
lens.append(len(x))
#print(len(x), lens)
lengths = np.asarray(lens).reshape(-1,1)
return lengths
def display_results(y_test, y_pred):
"""
Function to display the predicted values with the pipeline, and reports the scores for the
multiclass-multioutput-multilabel classifier: f1 score, precision and recall
Parameters: y_test is the vector with the testing labels
y_pred is the vector with the predicted labels
"""
labels = np.unique(y_pred)
y_test2 = np.array(y_test)
y_pred2 = np.array(y_pred)
columnsTest = y_test.shape[1]
columnsPred = y_pred.shape[1]
# Obtain the precision, recall, and F1 metrics for each feature in y_pred
if columnsTest == columnsPred:
for i in range(columnsTest):
print("Category of message= ",y_test.columns[i])
print(metrics.classification_report(y_test2[:,i],y_pred2[:,i]))
else:
print("Columns number in y_test and y_pred are different.")
return
def load_data(database_filepath):
"""
Function to load the data from a database with the messages already cleaned
Parameters: database_filepath is the filepaht name of the database
"""
# load data from database
database_name = "sqlite:///"+ database_filepath # Get the database name from the IO
print(database_name)
engine = create_engine(database_name)
df = pd.read_sql_table('DisasterResponse', database_name)
#df.head()
# Extract messages
X = df[['id','message','original','genre']]
columns_categories = df.columns.drop(['id','message','original','genre'])
print(columns_categories)
# Extract categories columns
Y = df[columns_categories]
return X,Y,columns_categories
def tokenize(text):
"""
This function obtain the tokens from a string.
Parameters: text is the string from the tokens are obtained
Return: clean_tokens as the list of tokens
"""
# Normalize text and remove punctuation
text = re.sub(r"[^a-zA-Z0-9]", " ", text.lower())
stop_words = stopwords.words("english")
lemmatizer = WordNetLemmatizer()
# Tokenize text
tokens = word_tokenize(text)
#print("\nTokens=", tokens)
# Remove stop words and lemmatize
#tokens = [lemmatizer.lemmatize(word) for word in tokens if word not in stop_words]
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
#print("\nStop words removal and lematizer =", clean_tokens)
return clean_tokens
def build_model(X_train):
"""
This function build a model with a gridsearch to test hyper-parameters for the ExtraTrees and K-Nearest
classifiers
Parameters: X_train is the train data to obtain the messages
Return: cv is the created model
"""
textlen = TextLenghExtractor()
pipeline = Pipeline([
('features', FeatureUnion([
('nlp_pipeline', Pipeline([ ('vect', CountVectorizer(tokenizer=tokenize)),
('tfidf', TfidfTransformer()) ]) ),
('textlen', textlen ),
]) #End of duplas list for FeatureUnion
),
('estimator', MultiOutputClassifier(ExtraTreesClassifier(random_state=0, bootstrap=True, max_depth=3))),
#('estimator', MultiOutputClassifier(RandomForestClassifier())),
])
#pipeline.fit(X_train, y_train)
parameters = [
{
'features__nlp_pipeline__vect__ngram_range':[(1,2)], # Allow unigrams, bigrams or both.
'features__nlp_pipeline__tfidf__norm':['l2'], # Test if l1, l2 or None train better
'estimator':[MultiOutputClassifier(ExtraTreesClassifier(random_state=0, bootstrap=True, max_depth=3))],
'estimator__estimator__n_estimators': [10],
},
{
'features__nlp_pipeline__vect__ngram_range':[(1,2)], # Allow unigrams, bigrams or both.
'features__nlp_pipeline__tfidf__norm':['l2'], # Test if l1, l2 or None train better
'estimator':[MultiOutputClassifier(RandomForestClassifier())],
'estimator__estimator__n_estimators': [8],
}
]
cv = GridSearchCV(estimator=pipeline, param_grid=parameters,refit=True,verbose=2,n_jobs=-1)
return cv
def evaluate_model(model, X_test, Y_test, category_names):
"""
This function evaluates a model by predicting with the test data and then displaying the precision, recall,
and the f1-score for each category of the messages. The best hyperparameters found for the best model are also displayed.
Parameters: model is the trained model
X_test is the train data
Y_test is the test data
category_names is the categories names for the messages, and a message can have more than one
Return: None
"""
# Print out the best hyperparameters results
print("Best parameter (CV score=%0.3f):" % model.best_score_)
print(model.best_params_)
# Make the prediction for the testing data
y_pred = model.predict(X_test)
# Display results, reporting the f1 score, precision and recall for each
# output category of the dataset
display_results(Y_test, y_pred)
pass
def save_model(model, model_filepath):
"""
This function saves the model into a pickle file.
Parameters: model is the trained model
model_filepath is the name of the pickle file where the model is saved into.
Return: None
"""
# Exporting the model to a file
pickle.dump(model, open(model_filepath,'wb'))
pass
def main():
"""
This program upload data from a database, train a model (ExtraTrees and K-nearest classifiers) with the best hyper-parameters, test the model and then save it into a pickle file.
Parameters: None
Return: None
"""
if len(sys.argv) == 3:
database_filepath, model_filepath = sys.argv[1:]
print('Loading data...\n DATABASE: {}'.format(database_filepath))
X, Y, category_names = load_data(database_filepath)
X_train, X_test, Y_train, Y_test = train_test_split(X.message, Y, test_size=0.2)
print('Building model...')
model = build_model(X_train)
print('Training model...')
model.fit(X_train, Y_train)
print('Evaluating model...')
evaluate_model(model, X_test, Y_test, category_names)
print('Saving model...\n MODEL: {}'.format(model_filepath))
save_model(model, model_filepath)
print('Trained model saved!')
else:
print('Please provide the filepath of the disaster messages database '\
'as the first argument and the filepath of the pickle file to '\
'save the model to as the second argument. \n\nExample: python '\
'train_classifier.py ../data/DisasterResponse.db classifier.pkl')
if __name__ == '__main__':
main()
|
{"hexsha": "6f345a39f77ec571e665837daf25ee97a282a190", "size": 9358, "ext": "py", "lang": "Python", "max_stars_repo_path": "models/train_classifier.py", "max_stars_repo_name": "angmx/DisasterResponseProject", "max_stars_repo_head_hexsha": "c2c29fb588570972d81622be79b66d0c41977861", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "models/train_classifier.py", "max_issues_repo_name": "angmx/DisasterResponseProject", "max_issues_repo_head_hexsha": "c2c29fb588570972d81622be79b66d0c41977861", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "models/train_classifier.py", "max_forks_repo_name": "angmx/DisasterResponseProject", "max_forks_repo_head_hexsha": "c2c29fb588570972d81622be79b66d0c41977861", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.2783882784, "max_line_length": 182, "alphanum_fraction": 0.6559093823, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2032}
|
MODULE param_db
! Module where global PARAMETERS of Program are defined
IMPLICIT NONE
!--- Constants for character lenght
INTEGER(kind=4),PARAMETER:: midn = 6 ,& !Maximum length for identifier
milb = 6 ,& !Maximum length for internal labels
mchr = 6 ,& !Maximum length for ???
miln = 132,& !Maximum length for lines of data input
mlin = 131,& !Maximum length for general lines
mttl = 99,& !Maximum length for problem title
mset = 15,& !Maximum length for sets comments
mvar = 15,& !Maximum length for variable name and components
mich = 11 !Maximum length of digits for integers
INTEGER(kind=4),PARAMETER:: mlen = 128,& !Maximum length for input parameters and file names
mprg = 8, & !Maximum length for program label
mnam = 30 !Maximum length for data label names
INTEGER(kind=4),PARAMETER:: mstl = 128, & !Maximum length for long character variables
msts = 30 !Maximum length for medium character variables
!--- Constants to limit usage
!MS$if acad > 0
!--- for academic version
! INTEGER(kind=4),PARAMETER:: max_npoin_2d = 400, & !Maximum number of nodes por 2D problems
! max_npoin_3d = 1000, & !Maximum number of nodes for 3D problems
! max_memo = 524000 !Maximum value for memo in beginp
!MS$else
!--- full version
INTEGER(kind=4),PARAMETER:: max_npoin_2d = 100000, & !Maximum number of nodes por 2D problems
max_npoin_3d = 1000000, & !Maximum number of nodes for 3D problems
max_memo = 13000000 !Maximum value for memo in beginp
!MS$endif
END MODULE param_db
|
{"hexsha": "d98c160c5fea7ab55fe01ce48b2a25860337b503", "size": 2037, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/mainp/param_db.f90", "max_stars_repo_name": "jerebenitez/IFE-simpact-openfoam", "max_stars_repo_head_hexsha": "2dbcbf3195b22fca1c80ad0da6b3822b6cad5cdf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/mainp/param_db.f90", "max_issues_repo_name": "jerebenitez/IFE-simpact-openfoam", "max_issues_repo_head_hexsha": "2dbcbf3195b22fca1c80ad0da6b3822b6cad5cdf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/mainp/param_db.f90", "max_forks_repo_name": "jerebenitez/IFE-simpact-openfoam", "max_forks_repo_head_hexsha": "2dbcbf3195b22fca1c80ad0da6b3822b6cad5cdf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 56.5833333333, "max_line_length": 99, "alphanum_fraction": 0.5405007364, "num_tokens": 472}
|
import os
from torch.utils.data import DataLoader
from continuum.datasets import CIFAR10, InMemoryDataset
from continuum.datasets import MNIST
import torchvision
from continuum.scenarios import TransformationIncremental
import pytest
import numpy as np
from continuum.transforms.bg_swap import BackgroundSwap
DATA_PATH = os.environ.get("CONTINUUM_DATA_PATH")
# Uncomment for debugging via image output
# import matplotlib.pyplot as plt
def test_bg_swap_fast():
"""
Fast test for background swap.
"""
bg_x = np.ones(shape=[2, 5, 5, 3]) * -1
bg_y = np.random.rand(2)
fg = np.random.normal(loc=.5, scale=.1, size=[5, 5])
bg = InMemoryDataset(bg_x, bg_y)
bg_swap = BackgroundSwap(bg, input_dim=(5, 5), normalize_bg=None)
spliced_1_channel = bg_swap(fg)[:, :, 0]
assert np.array_equal((spliced_1_channel <= -1), (fg <= .5))
@pytest.mark.slow
def test_background_swap_numpy():
"""
Test background swap on a single ndarray input.
"""
mnist = MNIST(DATA_PATH, download=True, train=True)
cifar = CIFAR10(DATA_PATH, download=True, train=True)
bg_swap = BackgroundSwap(cifar, input_dim=(28, 28))
im = mnist.get_data()[0][0]
im = bg_swap(im)
# Uncomment for debugging
# plt.imshow(im, interpolation='nearest')
# plt.show()
@pytest.mark.slow
def test_background_swap_torch():
"""
Test background swap on a single tensor input.
"""
cifar = CIFAR10(DATA_PATH, download=True, train=True)
mnist = torchvision.datasets.MNIST(DATA_PATH, train=True, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor()
]))
bg_swap = BackgroundSwap(cifar, input_dim=(28, 28))
im = mnist[0][0]
im = bg_swap(im)
# Uncomment for debugging
# plt.imshow(im.permute(1, 2, 0), interpolation='nearest')
# plt.show()
@pytest.mark.slow
def test_background_tranformation():
"""
Example code using TransformationIncremental to create a setting with 3 tasks.
"""
cifar = CIFAR10(DATA_PATH, train=True)
mnist = MNIST(DATA_PATH, download=False, train=True)
nb_task = 3
list_trsf = []
for i in range(nb_task):
list_trsf.append([torchvision.transforms.ToTensor(), BackgroundSwap(cifar, bg_label=i, input_dim=(28, 28)),
torchvision.transforms.ToPILImage()])
scenario = TransformationIncremental(mnist, base_transformations=[torchvision.transforms.ToTensor()],
incremental_transformations=list_trsf)
folder = "tests/samples/background_trsf/"
if not os.path.exists(folder):
os.makedirs(folder)
for task_id, task_data in enumerate(scenario):
task_data.plot(path=folder, title=f"background_{task_id}.jpg", nb_samples=100, shape=[28, 28, 3])
loader = DataLoader(task_data)
_, _, _ = next(iter(loader))
|
{"hexsha": "f7000bc963cc817a5a5dca6aba86f5ea6dde667e", "size": 3008, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_background_swap.py", "max_stars_repo_name": "pclucas14/continuum", "max_stars_repo_head_hexsha": "3b9b0fc3c2f21dcaeafbccfa29987cefe55f37a0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-04-15T14:31:42.000Z", "max_stars_repo_stars_event_max_datetime": "2020-04-24T17:07:34.000Z", "max_issues_repo_path": "tests/test_background_swap.py", "max_issues_repo_name": "pclucas14/continuum", "max_issues_repo_head_hexsha": "3b9b0fc3c2f21dcaeafbccfa29987cefe55f37a0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 18, "max_issues_repo_issues_event_min_datetime": "2020-04-15T14:57:27.000Z", "max_issues_repo_issues_event_max_datetime": "2020-05-02T14:05:36.000Z", "max_forks_repo_path": "tests/test_background_swap.py", "max_forks_repo_name": "arthurdouillard/continual_loader", "max_forks_repo_head_hexsha": "09034db1371e9646ca660fd4d4df73e61bf77067", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-04-15T15:50:28.000Z", "max_forks_repo_forks_event_max_datetime": "2020-04-15T15:50:28.000Z", "avg_line_length": 31.0103092784, "max_line_length": 115, "alphanum_fraction": 0.6572473404, "include": true, "reason": "import numpy", "num_tokens": 705}
|
import json
import os
import sys
import tensorflow as tf
from keras import backend as K
from keras import optimizers, utils
from keras.callbacks import CSVLogger
from keras.engine import Model
from keras.layers import Dropout, Flatten, Dense
from keras.layers import GlobalAveragePooling2D, GlobalMaxPooling2D
from src.GrayscaleModels.densenet_gray import DenseNet169
from src.train.GradientCheckpointing import memory_saving_gradients
WDIR = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.join(WDIR, "GradientCheckpointing"))
sys.path.insert(0, os.path.join(WDIR, "../GrayscaleModels"))
K.__dict__["gradients"] = memory_saving_gradients.gradients_memory
import pandas as pd
import numpy as np
import scipy.misc
from scipy.ndimage.interpolation import rotate
from scipy.ndimage.filters import gaussian_filter
from skimage import exposure
from sklearn.metrics import roc_auc_score, cohen_kappa_score, accuracy_score, f1_score
def get_model(base_model,
layer,
lr=1e-3,
input_shape=(224, 224, 1),
classes=2,
activation="softmax",
dropout=None,
pooling="avg",
weights=None,
pretrained="imagenet"):
global x
base = base_model(input_shape=input_shape,
include_top=False,
weights=pretrained,
channels="gray")
if pooling == "avg":
x = GlobalAveragePooling2D()(base.output)
elif pooling == "max":
x = GlobalMaxPooling2D()(base.output)
elif pooling is None:
x = Flatten()(base.output)
if dropout is not None:
x = Dropout(dropout)(x)
x = Dense(classes, activation=activation)(x)
model = Model(inputs=base.input, outputs=x)
if weights is not None:
model.load_weights(weights)
for l in model.layers[:layer]:
l.trainable = False
model.compile(loss="binary_crossentropy", metrics=["accuracy"],
optimizer=optimizers.Adam(lr))
return model
##########
## DATA ##
##########
# == PREPROCESSING == #
def preprocess_input(x, model):
x = x.astype("float32")
if model in ("inception", "xception", "mobilenet"):
x /= 255.
x -= 0.5
x *= 2.
if model in ("densenet"):
x /= 255.
if x.shape[-1] == 3:
x[..., 0] -= 0.485
x[..., 1] -= 0.456
x[..., 2] -= 0.406
x[..., 0] /= 0.229
x[..., 1] /= 0.224
x[..., 2] /= 0.225
elif x.shape[-1] == 1:
x[..., 0] -= 0.449
x[..., 0] /= 0.226
elif model in ("resnet", "vgg"):
if x.shape[-1] == 3:
x[..., 0] -= 103.939
x[..., 1] -= 116.779
x[..., 2] -= 123.680
elif x.shape[-1] == 1:
x[..., 0] -= 115.799
return x
def apply_clahe(img):
img = img / 255.
img = exposure.equalize_adapthist(img)
img = img * 255.
return img
# == AUGMENTATION == #
def crop_center(img, cropx, cropy):
y, x = img.shape[:2]
startx = x // 2 - (cropx // 2)
starty = y // 2 - (cropy // 2)
return img[starty:starty + cropy, startx:startx + cropx, :]
def data_augmentation(image):
# Input should be ONE image with shape: (L, W, CH)
options = ["gaussian_smooth", "rotate", "zoom", "adjust_gamma"]
# Probabilities for each augmentation were arbitrarily assigned
which_option = np.random.choice(options)
if which_option == "gaussian_smooth":
sigma = np.random.uniform(0.2, 1.0)
image = gaussian_filter(image, sigma)
elif which_option == "zoom":
# Assumes image is square
min_crop = int(image.shape[0] * 0.85)
max_crop = int(image.shape[0] * 0.95)
crop_size = np.random.randint(min_crop, max_crop)
crop = crop_center(image, crop_size, crop_size)
if crop.shape[-1] == 1: crop = crop[:, :, 0]
image = scipy.misc.imresize(crop, image.shape)
elif which_option == "rotate":
angle = np.random.uniform(-15, 15)
image = rotate(image, angle, reshape=False)
elif which_option == "adjust_gamma":
image = image / 255.
image = exposure.adjust_gamma(image, np.random.uniform(0.75, 1.25))
image = image * 255.
if len(image.shape) == 2: image = np.expand_dims(image, axis=2)
return image
# == I/O == #
def load_sample(train_images, num_train_samples, z):
if len(train_images) < num_train_samples:
train_sample_images = np.random.choice(train_images, num_train_samples, replace=True)
train_sample_array = np.asarray([np.load(arr) for arr in train_sample_images])
return train_sample_array, []
train_sample_images = list(set(train_images) - set(z))
if len(train_sample_images) < num_train_samples:
sample_diff = num_train_samples - len(train_sample_images)
not_sampled = list(set(train_images) - set(train_sample_images))
train_sample_images.extend(np.random.choice(not_sampled, sample_diff, replace=False))
z = []
else:
train_sample_images = np.random.choice(train_sample_images, num_train_samples, replace=False)
z.extend(train_sample_images)
train_sample_array = np.asarray([np.load(arr) for arr in train_sample_images])
return train_sample_array, z
def load_sample_and_labels(df, train_images, num_train_samples, z):
if len(train_images) < num_train_samples:
z = []
train_sample_images = np.random.choice(train_images, num_train_samples, replace=True)
train_sample_images = list(set(train_images) - set(z))
if len(train_sample_images) < num_train_samples:
sample_diff = num_train_samples - len(train_sample_images)
not_sampled = list(set(train_images) - set(train_sample_images))
train_sample_images.extend(np.random.choice(not_sampled, sample_diff, replace=False))
z = []
else:
train_sample_images = np.random.choice(train_sample_images, num_train_samples, replace=False)
z.extend(train_sample_images)
train_sample_ids = [_.split("/")[-1].split(".")[0] for _ in train_sample_images]
train_sample_df = df[(df.patientId.isin(train_sample_ids))]
train_sample_df.index = train_sample_df.patientId
train_sample_df = train_sample_df.reindex(train_sample_ids)
train_sample_labels = np.asarray(train_sample_df["label"])
train_sample_array = np.asarray([np.load(arr) for arr in train_sample_images])
return train_sample_array, train_sample_labels, z
def TTA(img, model, model_name, seed=88, niter=0):
np.random.seed(seed)
original_img = img.copy()
inverted_img = np.invert(img.copy())
hflipped_img = np.fliplr(img.copy())
original_img_array = np.empty((niter + 1, img.shape[0], img.shape[1], img.shape[2]))
inverted_img_array = original_img_array.copy()
hflipped_img_array = original_img_array.copy()
original_img_array[0] = original_img
inverted_img_array[0] = inverted_img
hflipped_img_array[0] = hflipped_img
for each_iter in range(niter):
original_img_array[each_iter + 1] = data_augmentation(original_img)
inverted_img_array[each_iter + 1] = data_augmentation(inverted_img)
hflipped_img_array[each_iter + 1] = data_augmentation(hflipped_img)
tmp_array = np.vstack((original_img_array, inverted_img_array, hflipped_img_array))
tmp_array = preprocess_input(tmp_array, model_name)
if int(model.get_output_at(-1).get_shape()[1]) == 1:
prediction = np.mean(model.predict(tmp_array)[:, 0])
else:
prediction = np.mean(model.predict(tmp_array)[:, -1])
return prediction
############
# VALIDATE #
############
def reduce_learning_rate_or_not(metric_list, direction="max", patience=2):
# **NOTE: metric_list should have CURRENT metric as last element
if len(metric_list) < patience + 1:
return False
else:
if direction == "max":
if metric_list[-1] <= metric_list[(-1 - patience)]:
return True
else:
return False
elif direction == "min":
if metric_list[-1] >= metric_list[(-1 - patience)]:
return True
else:
return False
def competitionMetric(y_true, y_pred):
y_true = np.asarray(y_true)
y_pred = np.asarray(y_pred)
TP = np.sum((y_true == 1) & (y_pred == 1))
FP = np.sum((y_true == 0) & (y_pred == 1))
FN = np.sum((y_true == 1) & (y_pred == 0))
return float(TP) / (float(FP) + float(FN) + float(TP))
def calculate_metrics(val_results_dict, y_pred, y_val, suffix=""):
tmp_kappa_list = []
tmp_accur_list = []
tmp_f1_list = []
tmp_cm_list = []
y_val = utils.to_categorical(y_val)[:, -1]
for each_threshold in np.linspace(0.1, 0.9, 17):
tmp_pred = [1 if _ >= each_threshold else 0 for _ in y_pred]
tmp_kappa_list.append(cohen_kappa_score(tmp_pred, y_val))
tmp_accur_list.append(accuracy_score(tmp_pred, y_val))
tmp_f1_list.append(f1_score(tmp_pred, y_val))
tmp_cm_list.append(competitionMetric(tmp_pred, y_val))
auroc = round(roc_auc_score(y_val, y_pred), 3)
kappa = round(np.max(tmp_kappa_list), 3)
accur = round(np.max(tmp_accur_list), 3)
cm = round(np.max(tmp_cm_list), 3)
f1 = round(np.max(tmp_f1_list), 3)
val_results_dict["auc{}".format(suffix)].append(auroc)
val_results_dict["kap{}".format(suffix)].append(kappa)
val_results_dict["acc{}".format(suffix)].append(accur)
val_results_dict["f1{}".format(suffix)].append(f1)
val_results_dict["cm{}".format(suffix)].append(cm)
kappa_threshold = np.linspace(0.1, 0.9, 17)[tmp_kappa_list.index(np.max(tmp_kappa_list))]
accur_threshold = np.linspace(0.1, 0.9, 17)[tmp_accur_list.index(np.max(tmp_accur_list))]
f1_threshold = np.linspace(0.1, 0.9, 17)[tmp_f1_list.index(np.max(tmp_f1_list))]
cm_threshold = np.linspace(0.1, 0.9, 17)[tmp_cm_list.index(np.max(tmp_cm_list))]
val_results_dict["threshold_kap{}".format(suffix)].append(round(kappa_threshold, 2))
val_results_dict["threshold_acc{}".format(suffix)].append(round(accur_threshold, 2))
val_results_dict["threshold_f1{}".format(suffix)].append(round(f1_threshold, 2))
val_results_dict["threshold_cm{}".format(suffix)].append(round(cm_threshold, 2))
return val_results_dict
def validate(val_results_dict, model_name,
model, y_val, X_val, valid_ids, valid_views,
save_weights_path, val_results_path,
subepoch,
batch_size):
y_pred = np.asarray([TTA(img, model, model_name) for img in X_val])
val_results_dict = calculate_metrics(val_results_dict, y_pred, y_val)
val_results_dict = calculate_metrics(val_results_dict, y_pred[valid_views == "AP"], y_val[valid_views == "AP"],
"_AP")
val_results_dict = calculate_metrics(val_results_dict, y_pred[valid_views == "PA"], y_val[valid_views == "PA"],
"_PA")
val_results_dict["subepoch"].append(subepoch)
out_df = pd.DataFrame(val_results_dict)
out_df.to_csv(os.path.join(val_results_path, "results.csv"), index=False)
predictions_df = pd.DataFrame({"patientId": valid_ids, "y_pred": y_pred})
predictions_df.to_csv(os.path.join(val_results_path, "predictions.csv"), index=False)
model.save_weights(os.path.join(save_weights_path, "weights_subepoch_{}.h5".format(str(subepoch).zfill(3))))
return val_results_dict
def load_and_validate(val_results_dict,
model, model_name,
clahe,
valid_df, data_dir,
save_weights_path, val_results_path,
subepoch,
batch_size):
# Memory requirements may prevent all validation data from being
# loaded at once
# NOTE: data is NOT preprocessed
print(">>VALIDATING<<\n")
X_val = np.asarray([np.load(os.path.join(data_dir, "{}.npy".format(_))) for _ in valid_df.patientId])
if clahe:
X_val = np.asarray([apply_clahe(_) for _ in X_val])
X_val = np.expand_dims(X_val, axis=-1)
# X_val = preprocess_input(X_val, model_name)
valid_ids = np.asarray(list(valid_df["patientId"]))
y_val = np.asarray(list(valid_df["label"]))
valid_views = np.asarray(list(valid_df["view"]))
val_results_dict = validate(val_results_dict, model_name,
model, y_val, X_val, valid_ids, valid_views,
save_weights_path,
val_results_path,
subepoch, batch_size)
return val_results_dict
def train(df, fold,
model, model_name,
subepochs, batch_size, base_lr, augment_p,
save_weights_path, save_logs_path, val_results_path,
data_dir,
mode="weighted_loss",
clahe=False,
lr_schedule=None,
load_validation_data=True,
validate_every_nth_epoch=5,
resume=0,
num_train_samples=16000):
# lr_schedule : list of 3 integers OR list of 1 string and 2 integer
# - index 0: subepoch for first annealing
# - index 1: subepoch interval for annealing after first annealing
# - index 2: annealing_factor
# OR
# - index 0: "ReduceLROnPlateau"
# - index 1: annealing_factor
# - index 2: patience
if lr_schedule is None:
lr_schedule = [20, 10, 2]
global train_images, pos_train_images, z_pos, z_neg, neg_train_images, X_train, y_train, class_weight_dict, X_val
if not os.path.exists(save_weights_path):
os.makedirs(save_weights_path)
if not os.path.exists(save_logs_path):
os.makedirs(save_logs_path)
if not os.path.exists(val_results_path):
os.makedirs(val_results_path)
train_df = df[(df.fold != fold)]
valid_df = df[(df.fold == fold)]
# Load the validation data if specified
if load_validation_data:
print("Loading validation data ...")
X_val = np.asarray([np.load(os.path.join(data_dir, "{}.npy".format(_))) for _ in valid_df.patientId])
if clahe:
X_val = np.asarray([apply_clahe(_) for _ in X_val])
X_val = np.expand_dims(X_val, axis=-1)
# X_val = preprocess_input(X_val, model_name)
print("DONE !")
valid_ids = np.asarray(list(valid_df["patientId"]))
y_val = np.asarray(list(valid_df["label"]))
valid_views = np.asarray(list(valid_df["view"]))
if mode == "weighted_loss":
train_images = [os.path.join(data_dir, "{}.npy".format(_)) for _ in train_df.patientId]
z = []
elif mode == "sample_equally":
pos_train_df = train_df[train_df["label"] == 1]
neg_train_df = train_df[train_df["label"] == 0]
pos_train_images = [os.path.join(data_dir, "{}.npy".format(_)) for _ in pos_train_df.patientId]
neg_train_images = [os.path.join(data_dir, "{}.npy".format(_)) for _ in neg_train_df.patientId]
z_pos = []
z_neg = []
val_results_dict = {"auc": [],
"kap": [],
"acc": [],
"f1": [],
"cm": [],
"threshold_kap": [],
"threshold_acc": [],
"threshold_f1": [],
"threshold_cm": [],
"subepoch": [],
"auc_AP": [],
"kap_AP": [],
"acc_AP": [],
"f1_AP": [],
"cm_AP": [],
"threshold_kap_AP": [],
"threshold_acc_AP": [],
"threshold_f1_AP": [],
"threshold_cm_AP": [],
"auc_PA": [],
"kap_PA": [],
"acc_PA": [],
"f1_PA": [],
"cm_PA": [],
"threshold_kap_PA": [],
"threshold_acc_PA": [],
"threshold_f1_PA": [],
"threshold_cm_PA": []}
lr_annealing_counter = 0
for each_subepoch in range(resume, subepochs):
suffix = str(each_subepoch).zfill(3)
logs_path = os.path.join(save_logs_path, "log_subepoch_{}.csv".format(suffix))
csvlogger = CSVLogger(logs_path)
print("Loading training sample ...")
if mode == "weighted_loss":
X_train, y_train, z = load_sample_and_labels(train_df, train_images, num_train_samples, z)
class_weight_dict = {}
class_freq_list = []
y_train = utils.to_categorical(y_train)
for each_class in range(y_train.shape[1]):
class_freq_list.append(np.sum(y_train[:, each_class]) / float(y_train.shape[0]))
for each_class in range(y_train.shape[1]):
class_weight_dict[each_class] = np.max(class_freq_list) / class_freq_list[each_class]
elif mode == "sample_equally":
X_pos_train, z_pos = load_sample(pos_train_images, num_train_samples / 2, z_pos)
X_neg_train, z_neg = load_sample(neg_train_images, num_train_samples / 2, z_neg)
X_train = np.vstack((X_pos_train, X_neg_train))
y_train = np.concatenate((np.repeat(1, len(X_pos_train)),
np.repeat(0, len(X_neg_train))))
del X_pos_train, X_neg_train
if clahe:
X_train = np.asarray([apply_clahe(_) for _ in X_train])
X_train = np.expand_dims(X_train, axis=-1)
print("Augmenting training data ...")
for index, each_image in enumerate(X_train):
sys.stdout.write("{}/{} ...\r".format(index + 1, len(X_train)))
sys.stdout.flush()
if np.random.binomial(1, 0.5):
each_image = np.invert(each_image)
if np.random.binomial(1, 0.5):
each_image = np.fliplr(each_image)
if np.random.binomial(1, augment_p):
X_train[index] = data_augmentation(each_image)
X_train = preprocess_input(X_train, model_name)
print("DONE !")
if mode == "weighted_loss":
model.fit(X_train, y_train,
batch_size=batch_size, epochs=1,
shuffle=True, callbacks=[csvlogger],
class_weight=class_weight_dict)
elif mode == "sample_equally":
model.fit(X_train, y_train,
batch_size=batch_size, epochs=1,
shuffle=True, callbacks=[csvlogger])
##### VALIDATE #####
if (each_subepoch + 1) % validate_every_nth_epoch == 0:
if load_validation_data:
val_results_dict = validate(val_results_dict, model_name,
model, y_val, X_val, valid_ids, valid_views,
save_weights_path, val_results_path,
each_subepoch,
batch_size)
else:
val_results_dict = load_and_validate(val_results_dict,
model, model_name,
clahe,
valid_df, data_dir,
save_weights_path, val_results_path,
each_subepoch,
batch_size)
##### LEARNING RATE SCHEDULE #####
if lr_schedule[0] != "ReduceLROnPlateau":
if (each_subepoch + 1) >= lr_schedule[0] and (each_subepoch + 1) % lr_schedule[1] == 0:
lr_annealing_counter += 1.
# Step-wise learning rate annealing schedule
new_lr = base_lr / (lr_schedule[2] ** lr_annealing_counter)
K.set_value(model.optimizer.lr, new_lr)
else:
if (each_subepoch + 1) % validate_every_nth_epoch == 0:
if reduce_learning_rate_or_not(val_results_dict["acc"], "max", lr_schedule[2]):
lr_annealing_counter += 1.
new_lr = base_lr / (lr_schedule[1] ** lr_annealing_counter)
K.set_value(model.optimizer.lr, new_lr)
if __name__ == "__main__":
# Specify GPU
os.environ["CUDA_VISIBLE_DEVICES"] = '1'
with open(os.path.join(WDIR, "../../SETTINGS.json")) as f:
SETTINGS_JSON = json.load(f)
df = pd.read_csv(os.path.join(WDIR, "../..", SETTINGS_JSON["TRAIN_INFO_DIR"], "stratified_folds_df.csv"))
df["label"] = [1 if _ == "Lung Opacity" else 0 for _ in df["class"]]
"""
#####################
# InceptionResNetV2 #
#####################
fold = 0
input_size = 256
fold_save_dir = os.path.join(WDIR, "../../models/classifiers/snapshots/binary/InceptionResNetV2/fold{}".format(fold))
model = get_model(InceptionResNetV2, 0, 5e-5, dropout=None, input_shape=(input_size, input_size, 1),
pretrained=os.path.join(WDIR, "../../models/pretrained/InceptionResNetV2_NIH15_Px256.h5"))
model_name = "inception"
train(df, fold, model, model_name, 15, 16, 5e-5, 0.5,
os.path.join(fold_save_dir, "l0/weights/"),
os.path.join(fold_save_dir, "l0/logs/"),
os.path.join(fold_save_dir, "l0/val-results/"),
os.path.join(WDIR, "../../", SETTINGS_JSON["TRAIN_IMAGES_CLEAN_DIR"], "resized/i{}/".format(input_size)),
mode="weighted_loss",
lr_schedule=[6, 3, 2.],
validate_every_nth_epoch=2,
load_validation_data=False,
num_train_samples=8000)
#####################
# InceptionResNetV2 #
#####################
fold = 1
input_size = 320
fold_save_dir = os.path.join(WDIR, "../../models/classifiers/snapshots/binary/InceptionResNetV2/fold{}".format(fold))
model = get_model(InceptionResNetV2, 0, 5e-5, dropout=None, input_shape=(input_size, input_size, 1),
pretrained=os.path.join(WDIR, "../../models/pretrained/InceptionResNetV2_NIH15_Px256.h5"))
model_name = "inception"
train(df, fold, model, model_name, 20, 16, 5e-5, 0.5,
os.path.join(fold_save_dir, "l0/weights/"),
os.path.join(fold_save_dir, "l0/logs/"),
os.path.join(fold_save_dir, "l0/val-results/"),
os.path.join(WDIR, "../../", SETTINGS_JSON["TRAIN_IMAGES_CLEAN_DIR"], "resized/i{}/".format(input_size)),
mode="weighted_loss",
lr_schedule=[6, 3, 2.],
validate_every_nth_epoch=2,
load_validation_data=False,
num_train_samples=8000)
############
# Xception #
############
fold = 2
input_size = 384
fold_save_dir = os.path.join(WDIR, "../../models/classifiers/snapshots/binary/Xception/fold{}".format(fold))
model = get_model(Xception, 0, 5e-5, dropout=None, input_shape=(input_size, input_size, 1),
pretrained=os.path.join(WDIR, "../../models/pretrained/Xception_NIH15_Px320.h5"))
model_name = "xception"
train(df, fold, model, model_name, 20, 16, 5e-5, 0.5,
os.path.join(fold_save_dir, "l0/weights/"),
os.path.join(fold_save_dir, "l0/logs/"),
os.path.join(fold_save_dir, "l0/val-results/"),
os.path.join(WDIR, "../../", SETTINGS_JSON["TRAIN_IMAGES_CLEAN_DIR"], "resized/i{}/".format(input_size)),
mode="weighted_loss",
lr_schedule=[6, 3, 2.],
validate_every_nth_epoch=2,
load_validation_data=False,
num_train_samples=8000)
############
# Xception #
############
fold = 3
input_size = 448
fold_save_dir = os.path.join(WDIR, "../../models/classifiers/snapshots/binary/Xception/fold{}".format(fold))
model = get_model(Xception, 0, 5e-5, dropout=None, input_shape=(input_size, input_size, 1),
pretrained=os.path.join(WDIR, "../../models/pretrained/Xception_NIH15_Px320.h5"))
model_name = "xception"
train(df, fold, model, model_name, 20, 8, 5e-5, 0.5,
os.path.join(fold_save_dir, "l0/weights/"),
os.path.join(fold_save_dir, "l0/logs/"),
os.path.join(fold_save_dir, "l0/val-results/"),
os.path.join(WDIR, "../../", SETTINGS_JSON["TRAIN_IMAGES_CLEAN_DIR"], "resized/i{}/".format(input_size)),
mode="weighted_loss",
lr_schedule=[6, 3, 2.],
validate_every_nth_epoch=2,
load_validation_data=False,
num_train_samples=8000)
###############
# DenseNet169 #
###############
fold = 4
input_size = 512
fold_save_dir = os.path.join(WDIR, "../../models/classifiers/snapshots/binary/DenseNet169/fold{}".format(fold))
model = get_model(DenseNet169, 0, 5e-5, dropout=None, input_shape=(input_size, input_size, 1),
pretrained=os.path.join(WDIR, "../../models/pretrained/DenseNet169_NIH15_Px448.h5"))
model_name = "densenet"
train(df, fold, model, model_name, 20, 8, 5e-5, 0.5,
os.path.join(fold_save_dir, "l0/weights/"),
os.path.join(fold_save_dir, "l0/logs/"),
os.path.join(fold_save_dir, "l0/val-results/"),
os.path.join(WDIR, "../../", SETTINGS_JSON["TRAIN_IMAGES_CLEAN_DIR"], "resized/i{}/".format(input_size)),
mode="weighted_loss",
lr_schedule=[6, 3, 2.],
validate_every_nth_epoch=2,
load_validation_data=False,
num_train_samples=8000)
df = pd.read_csv(os.path.join(WDIR, "../..", SETTINGS_JSON["TRAIN_INFO_DIR"], "stage_1_stratified_folds_df.csv"))
df["label"] = None
df["label"][df["class"] == "Normal"] = 0
df["label"][df["class"] == "No Lung Opacity / Not Normal"] = 1
df["label"][df["class"] == "Lung Opacity"] = 2
#####################
# InceptionResNetV2 #
#####################
fold = 5
input_size = 256
fold_save_dir = os.path.join(WDIR,
"../../models/classifiers/snapshots/multiclass/InceptionResNetV2/fold{}".format(fold))
model = get_model(InceptionResNetV2, 0, 5e-5, classes=3, activation="softmax", dropout=None,
input_shape=(input_size, input_size, 1),
pretrained=os.path.join(WDIR, "../../models/pretrained/InceptionResNetV2_NIH15_Px256.h5"))
model_name = "inception"
train(df, fold, model, model_name, 20, 16, 5e-5, 0.5,
os.path.join(fold_save_dir, "l0/weights/"),
os.path.join(fold_save_dir, "l0/logs/"),
os.path.join(fold_save_dir, "l0/val-results/"),
os.path.join(WDIR, "../../", SETTINGS_JSON["TRAIN_IMAGES_CLEAN_DIR"], "resized/i{}/".format(input_size)),
mode="weighted_loss",
lr_schedule=[6, 3, 2.],
validate_every_nth_epoch=2,
load_validation_data=False,
num_train_samples=8000)
#####################
# InceptionResNetV2 #
#####################
fold = 6
input_size = 320
fold_save_dir = os.path.join(WDIR,
"../../models/classifiers/snapshots/multiclass/InceptionResNetV2/fold{}".format(fold))
model = get_model(InceptionResNetV2, 0, 5e-5, classes=3, activation="softmax", dropout=None,
input_shape=(input_size, input_size, 1),
pretrained=os.path.join(WDIR, "../../models/pretrained/InceptionResNetV2_NIH15_Px256.h5"))
model_name = "inception"
train(df, fold, model, model_name, 20, 16, 5e-5, 0.5,
os.path.join(fold_save_dir, "l0/weights/"),
os.path.join(fold_save_dir, "l0/logs/"),
os.path.join(fold_save_dir, "l0/val-results/"),
os.path.join(WDIR, "../../", SETTINGS_JSON["TRAIN_IMAGES_CLEAN_DIR"], "resized/i{}/".format(input_size)),
mode="weighted_loss",
lr_schedule=[6, 3, 2.],
validate_every_nth_epoch=2,
load_validation_data=False,
num_train_samples=8000)
############
# Xception #
############
fold = 7
input_size = 384
fold_save_dir = os.path.join(WDIR, "../../models/classifiers/snapshots/multiclass/Xception/fold{}".format(fold))
model = get_model(Xception, 0, 5e-5, classes=3, activation="softmax", dropout=None,
input_shape=(input_size, input_size, 1),
pretrained=os.path.join(WDIR, "../../models/pretrained/Xception_NIH15_Px320.h5"))
model_name = "xception"
train(df, fold, model, model_name, 20, 16, 5e-5, 0.5,
os.path.join(fold_save_dir, "l0/weights/"),
os.path.join(fold_save_dir, "l0/logs/"),
os.path.join(fold_save_dir, "l0/val-results/"),
os.path.join(WDIR, "../../", SETTINGS_JSON["TRAIN_IMAGES_CLEAN_DIR"], "resized/i{}/".format(input_size)),
mode="weighted_loss",
lr_schedule=[6, 3, 2.],
validate_every_nth_epoch=2,
load_validation_data=False,
num_train_samples=8000)
############
# Xception #
############
fold = 8
input_size = 448
fold_save_dir = os.path.join(WDIR, "../../models/classifiers/snapshots/multiclass/Xception/fold{}".format(fold))
model = get_model(Xception, 0, 5e-5, classes=3, activation="softmax", dropout=None,
input_shape=(input_size, input_size, 1),
pretrained=os.path.join(WDIR, "../../models/pretrained/Xception_NIH15_Px320.h5"))
model_name = "xception"
train(df, fold, model, model_name, 20, 8, 5e-5, 0.5,
os.path.join(fold_save_dir, "l0/weights/"),
os.path.join(fold_save_dir, "l0/logs/"),
os.path.join(fold_save_dir, "l0/val-results/"),
os.path.join(WDIR, "../../", SETTINGS_JSON["TRAIN_IMAGES_CLEAN_DIR"], "resized/i{}/".format(input_size)),
mode="weighted_loss",
lr_schedule=[6, 3, 2.],
validate_every_nth_epoch=2,
load_validation_data=False,
num_train_samples=8000)
"""
###############
# DenseNet169 #
###############
model_name = "densenet_512"
base_model = DenseNet169
input_size = 512
subepochs = 20
fold = 9
batch_size = 8
base_lr = 5e-5
augment_p = 0.5
fold_save_dir = os.path.join(WDIR,
"../../models/classifiers/snapshots/multiclass/DenseNet169_512/fold{}".format(fold))
save_weights_path = os.path.join(fold_save_dir, "l0/weights_512/")
save_logs_path = os.path.join(fold_save_dir, "l0/logs_512/")
val_results_path = os.path.join(fold_save_dir, "l0/val-results_512/")
data_dir = os.path.join(WDIR, "../../", SETTINGS_JSON["TRAIN_IMAGES_CLEAN_DIR"], "resized/i{}/".format(input_size))
mode = "weighted_loss"
lr_schedule = [6, 3, 2.]
load_validation_data = False
validate_every_nth_epoch = 2
num_train_samples = 8000
layer = 0
classes = 3
activation = "softmax"
dropout = 0.2,
input_shape = (input_size, input_size, 1),
pretrained = os.path.join(WDIR, "../../models/pretrained/DenseNet169_NIH15_Px448.h5")
print("model_name: " + model_name + \
"base_model: " + "DenseNet169" + \
"subepochs: " + str(subepochs) + \
"fold: " + str(fold) + \
"batch_size: " + str(batch_size) + \
"base_lr: " + str(base_lr) + \
"augment_p: " + str(augment_p) + \
"fold_save_dir: " + fold_save_dir + \
"save_weights_path: " + save_weights_path + \
"save_logs_path: " + save_logs_path + \
"val_results_path: " + val_results_path + \
"data_dir: " + data_dir + \
"mode: " + mode + \
"lr_schedule: " + str(lr_schedule) + \
"load_validation_data: " + str(load_validation_data) + \
"validate_every_nth_epoch: " + str(validate_every_nth_epoch) + \
"num_train_samples: " + str(num_train_samples) + \
"layer: " + str(layer) + \
"classes: " + str(classes) + \
"activation: " + activation + \
"dropout: " + str(dropout) + \
"input_shape: " + str(input_shape) + \
"pretrained: " + pretrained + \
"model_name: " + model_name)
strategy = tf.distribute.MirroredStrategy()
print('Number of devices: {}'.format(strategy.num_replicas_in_sync))
with strategy.scope():
# get and compile the model under the scope and begin the distributed training , play with the batch size
model = get_model(base_model=base_model, layer=layer, lr=base_lr, classes=classes, activation=activation,
dropout=dropout,
input_shape=input_shape,
pretrained=pretrained)
train(df=df, fold=fold,
model=model, model_name=model_name,
subepochs=subepochs, batch_size=batch_size, base_lr=base_lr, augment_p=augment_p,
save_weights_path=save_weights_path,
save_logs_path=save_logs_path,
val_results_path=val_results_path,
data_dir=data_dir,
mode=mode,
lr_schedule=lr_schedule,
load_validation_data=load_validation_data,
validate_every_nth_epoch=validate_every_nth_epoch,
num_train_samples=num_train_samples)
###############
# DenseNet169 #
###############
model_name = "densenet_1024"
base_model = DenseNet169
input_size = 1024
subepochs = 20
fold = 10
batch_size = 8
base_lr = 5e-5
augment_p = 0.5
fold_save_dir = os.path.join(WDIR,
"../../models/classifiers/snapshots/multiclass/DenseNet169_1024/fold{}".format(fold))
save_weights_path = os.path.join(fold_save_dir, "l0/weights_1024/")
save_logs_path = os.path.join(fold_save_dir, "l0/logs_1024/")
val_results_path = os.path.join(fold_save_dir, "l0/val-results_1024/")
data_dir = os.path.join(WDIR, "../../", SETTINGS_JSON["TRAIN_IMAGES_CLEAN_DIR"], "resized/i{}/".format(input_size))
mode = "weighted_loss"
lr_schedule = [6, 3, 2.]
load_validation_data = False
validate_every_nth_epoch = 2
num_train_samples = 8000
layer = 0
classes = 3
activation = "softmax"
dropout = 0.2,
input_shape = (input_size, input_size, 1),
pretrained = os.path.join(WDIR, "../../models/pretrained/DenseNet169_NIH15_Px448.h5")
print("model_name: " + model_name + \
"base_model: " + "DenseNet169" + \
"subepochs: " + str(subepochs) + \
"fold: " + str(fold) + \
"batch_size: " + str(batch_size) + \
"base_lr: " + str(base_lr) + \
"augment_p: " + str(augment_p) + \
"fold_save_dir: " + fold_save_dir + \
"save_weights_path: " + save_weights_path + \
"save_logs_path: " + save_logs_path + \
"val_results_path: " + val_results_path + \
"data_dir: " + data_dir + \
"mode: " + mode + \
"lr_schedule: " + str(lr_schedule) + \
"load_validation_data: " + str(load_validation_data) + \
"validate_every_nth_epoch: " + str(validate_every_nth_epoch) + \
"num_train_samples: " + str(num_train_samples) + \
"layer: " + str(layer) + \
"classes: " + str(classes) + \
"activation: " + activation + \
"dropout: " + str(dropout) + \
"input_shape: " + str(input_shape) + \
"pretrained: " + pretrained + \
"model_name: " + model_name)
strategy = tf.distribute.MirroredStrategy()
print('Number of devices: {}'.format(strategy.num_replicas_in_sync))
with strategy.scope():
# get and compile the model under the scope and begin the distributed training , play with the batch size
model = get_model(base_model=base_model, layer=layer, lr=base_lr, classes=classes, activation=activation,
dropout=dropout,
input_shape=input_shape,
pretrained=pretrained)
train(df=df, fold=fold,
model=model, model_name=model_name,
subepochs=subepochs, batch_size=batch_size, base_lr=base_lr, augment_p=augment_p,
save_weights_path=save_weights_path,
save_logs_path=save_logs_path,
val_results_path=val_results_path,
data_dir=data_dir,
mode=mode,
lr_schedule=lr_schedule,
load_validation_data=load_validation_data,
validate_every_nth_epoch=validate_every_nth_epoch,
num_train_samples=num_train_samples)
|
{"hexsha": "3928dc4b6e9cf63c1016ce407fd029075ce5875a", "size": 36845, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/train/TrainClassifierEnsemble.py", "max_stars_repo_name": "RamsteinWR/PneumoniaRSNA1", "max_stars_repo_head_hexsha": "08bdba51292307a78ef711c6be4a63faea240ddf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/train/TrainClassifierEnsemble.py", "max_issues_repo_name": "RamsteinWR/PneumoniaRSNA1", "max_issues_repo_head_hexsha": "08bdba51292307a78ef711c6be4a63faea240ddf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/train/TrainClassifierEnsemble.py", "max_forks_repo_name": "RamsteinWR/PneumoniaRSNA1", "max_forks_repo_head_hexsha": "08bdba51292307a78ef711c6be4a63faea240ddf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.3915662651, "max_line_length": 121, "alphanum_fraction": 0.595168951, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 9239}
|
from .parameters import Params
from .datasets import Datasets, FdDatasets
from .model import Model
from collections import OrderedDict
from ..detail.utilities import unique, lighten_color
from .detail.derivative_manipulation import numerical_jacobian
from .detail.utilities import print_styled, optimal_plot_layout
from .profile_likelihood import ProfileLikelihood1D
import numpy as np
import scipy.optimize as optim
import matplotlib.pyplot as plt
def front(x):
return next(iter(x))
class Fit:
"""Object which is used for fitting. It is a collection of models and their data. Once data is loaded, a fit object
contains parameters, which can be fitted by invoking fit.
Parameters
----------
*models
Variable number of `pylake.fitting.Model`.
Examples
--------
::
from lumicks import pylake
dna_model = pylake.inverted_odijk("DNA")
fit = pylake.FdFit(dna_model)
data = fit.add_data("Dataset 1", force, distance)
fit["DNA/Lp"].lower_bound = 35 # Set lower bound for DNA Lp
fit["DNA/Lp"].upper_bound = 80 # Set upper bound for DNA Lp
fit.fit()
fit.plot("Dataset 1", "k--") # Plot the fitted model
"""
def __init__(self, *models):
self.models = {id(m): m for m in models}
self.datasets = {id(m): self._dataset(m) for m in models}
self._params = Params()
self._invalidate_build()
def _dataset(self, model):
return Datasets(model, self)
def update_params(self, other):
"""Sets parameters if they are found in the target fit.
Parameters
----------
other : Fit or Params
"""
if isinstance(other, Params):
self.params.update_params(other)
elif isinstance(other, self.__class__):
self.params.update_params(other.params)
else:
raise RuntimeError("Did not pass compatible argument to update_params")
def __getitem__(self, item):
if isinstance(item, Model):
return self.datasets[id(item)]
elif len(self.datasets) == 1 and item in front(self.datasets.values()).names:
return self.params[front(self.datasets.values()).data[item]]
else:
return self.params[item]
@property
def data(self):
if len(self.datasets) > 1:
raise RuntimeError(
"This Fit is comprised of multiple models. Please access data for a particular model by "
"invoking fit[model].data[dataset_name]."
)
return front(self.datasets.values()).data
def _add_data(self, name, x, y, params={}):
if len(self.datasets) > 1:
raise RuntimeError(
"This Fit is comprised of multiple models. Please add data to a particular model by "
"invoking fit[model].add_data(...)"
)
return front(self.datasets.values())._add_data(name, x, y, params)
@property
def has_jacobian(self):
"""Returns true if it is possible to evaluate the Jacobian of the fit."""
has_jacobian = True
for model in self.models.values():
has_jacobian = has_jacobian and model.has_jacobian
return has_jacobian
@property
def n_residuals(self):
"""Number of data points."""
self._rebuild()
count = 0
for data in self.datasets.values():
count += data.n_residuals
return count
@property
def n_params(self):
"""Number of parameters in the Fit"""
self._rebuild()
return len(self._params)
@property
def params(self):
"""Fit parameters. See also `pylake.fitting.Params`"""
self._rebuild()
return self._params
@property
def dirty(self):
"""Validate that all the Datasets that we are about the fit were actually linked."""
dirty = not self._built
for data in self.datasets.values():
dirty = dirty or not data.built
return dirty
def _rebuild(self):
"""Checks whether the model state is up to date. Any user facing methods should ideally check whether the model
needs to be rebuilt."""
if self.dirty:
self._build_fit()
def _invalidate_build(self):
self._built = False
def _build_fit(self):
"""This function generates the global parameter list from the parameters of the individual sub models.
It also generates unique conditions from the data specification."""
all_parameter_names = [
p for data_set in self.datasets.values() for p in data_set._transformed_params
]
all_defaults = [d for data_set in self.datasets.values() for d in data_set._defaults]
unique_parameter_names = unique(all_parameter_names)
parameter_lookup = OrderedDict(
zip(unique_parameter_names, np.arange(len(unique_parameter_names)))
)
for data in self.datasets.values():
data._link_data(parameter_lookup)
defaults = [all_defaults[all_parameter_names.index(l)] for l in unique_parameter_names]
self._params._set_params(unique_parameter_names, defaults)
self._built = True
def _prepare_fit(self):
"""Checks whether the model is ready for fitting and returns the current parameter values, which parameters are
fitted and the parameter bounds."""
self._rebuild()
assert self.n_residuals > 0, "This model has no data associated with it."
assert self.n_params > 0, "This model has no parameters. There is nothing to fit."
return (
self.params.values,
self.params.fitted,
self.params.lower_bounds,
self.params.upper_bounds,
)
def _fit(self, parameter_vector, lb, ub, fitted, show_fit=False, **kwargs):
"""Fit the model
Parameters
----------
parameter_vector : array_like
List of parameters
lb : array_like
list of lower parameter bounds
ub : array_like
list of lower parameter bounds
show_fit : bool
show fitting (slow!)
fitted : array_like
list of which parameters are fitted
"""
if show_fit:
fig = plt.gcf()
def residual(params):
parameter_vector[fitted] = params
if show_fit:
parameter_names = self.params.keys()
for name, value in zip(parameter_names, parameter_vector):
self.params[name] = value
plt.figure(fig.number)
for model in self.models.values():
self[model].plot()
fig.canvas.draw()
fig.canvas.flush_events()
fig.clf()
return self._calculate_residual(parameter_vector)
def jacobian(params):
parameter_vector[fitted] = params
return self._calculate_jacobian(parameter_vector)[:, fitted]
result = optim.least_squares(
residual,
parameter_vector[fitted],
jac=jacobian if self.has_jacobian else "2-point",
bounds=(lb[fitted], ub[fitted]),
method="trf",
ftol=1e-8,
xtol=1e-8,
gtol=1e-8,
**kwargs,
)
parameter_vector[fitted] = result.x
return parameter_vector
def fit(self, show_fit=False, **kwargs):
"""Fit the model
Parameters
----------
show_fit : bool
Show the fitting procedure as it is progressing.
"""
parameter_vector, fitted, lb, ub = self._prepare_fit()
out_of_bounds = np.logical_or(
parameter_vector[fitted] < lb[fitted], parameter_vector[fitted] > ub[fitted]
)
if np.any(out_of_bounds):
raise ValueError(
f"Initial parameters {self.params.keys()[fitted][out_of_bounds]} are outside the "
f"parameter bounds. Please set value, lower_bound and upper_bound for these parameters"
f"to consistent values."
)
parameter_vector = self._fit(parameter_vector, lb, ub, fitted, show_fit=show_fit, **kwargs)
parameter_names = self.params.keys()
for name, value in zip(parameter_names, parameter_vector):
self.params[name] = value
if self.has_jacobian:
for name, value in zip(parameter_names, np.diag(self.cov)):
self.params[name].stderr = np.sqrt(value)
return self
def profile_likelihood(
self,
parameter_name,
min_step=1e-4,
max_step=1.0,
num_steps=100,
step_factor=2.0,
min_chi2_step=0.05,
max_chi2_step=0.25,
termination_significance=0.99,
confidence_level=0.95,
verbose=False,
):
"""Calculate a profile likelihood. This method traces an optimal path through parameter space in order to
estimate parameter confidence intervals. It iteratively performs a step for the profiled parameter, then fixes
that parameter and re-optimizes all the other parameters.
Parameters
----------
parameter_name: str
Which parameter to evaluate a profile likelihood for.
min_step: float
Minimum step size. This is multiplied by the current parameter value to come to a minimum step size used
in the step-size estimation procedure (default: 1e-4).
max_step: float
Maximum step size (default: 1.0).
num_steps: integer
Number of steps to take (default: 100).
step_factor: float
Which factor to change the step-size by when step-size is too large or too small (default: 2).
min_chi2_step: float
Minimal desired step in terms of chi squared change prior to re-optimization. When the step results in a fit
change smaller than this threshold, the step-size will be increased.
max_chi2_step: float
Minimal desired step in terms of chi squared change prior to re-optimization. When the step results in a fit
change bigger than this threshold, the step-size will be reduced.
termination_significance: float
Significance level for terminating the parameter scan. When the fit quality exceeds the
termination_significance confidence level, it stops scanning.
confidence_level: float
Significance level for the chi squared test.
verbose: bool
Controls the verbosity of the output.
"""
if parameter_name not in self.params:
raise KeyError(f"Parameter {parameter_name} not present in fitting object.")
if self.params[parameter_name].fixed:
raise RuntimeError(f"Parameter {parameter_name} is fixed in the fitting object.")
assert max_step > min_step
assert max_chi2_step > min_chi2_step
profile = ProfileLikelihood1D(
parameter_name,
min_step,
max_step,
step_factor,
min_chi2_step,
max_chi2_step,
termination_significance,
confidence_level,
1,
)
def trial(parameters=[]):
return -2.0 * self.log_likelihood(parameters, self.sigma)
profile._extend_profile(trial, self._fit, self.params, num_steps, True, verbose)
profile._extend_profile(trial, self._fit, self.params, num_steps, False, verbose)
self.params[parameter_name].profile = profile
return profile
def _calculate_residual(self, parameter_values=[]):
self._rebuild()
if len(parameter_values) == 0:
parameter_values = self.params.values
residual_idx = 0
residual = np.zeros(self.n_residuals)
for model in self.models.values():
current_residual = model._calculate_residual(self.datasets[id(model)], parameter_values)
current_n = len(current_residual)
residual[residual_idx : residual_idx + current_n] = current_residual
residual_idx += current_n
return residual
def _calculate_jacobian(self, parameter_values=[]):
self._rebuild()
if len(parameter_values) == 0:
parameter_values = self.params.values
residual_idx = 0
jacobian = np.zeros((self.n_residuals, len(parameter_values)))
for model in self.models.values():
current_jacobian = model._calculate_jacobian(self.datasets[id(model)], parameter_values)
current_n = current_jacobian.shape[0]
jacobian[residual_idx : residual_idx + current_n, :] = current_jacobian
residual_idx += current_n
return jacobian
def verify_jacobian(self, params, plot=0, verbose=True, dx=1e-6, **kwargs):
self._rebuild()
if len(params) != len(self._params):
raise ValueError(
"Parameter vector has invalid length. "
f"Expected: {len(self._params)}, got: {len(params)}."
)
jacobian = self._calculate_jacobian(params).transpose()
jacobian_fd = numerical_jacobian(self._calculate_residual, params, dx)
if plot:
n_x, n_y = optimal_plot_layout(len(self.params))
for i_parameter, parameter in enumerate(self.params):
plt.subplot(n_x, n_y, i_parameter + 1)
plt.plot(np.transpose(jacobian[i_parameter, :]), linewidth=2)
plt.plot(np.transpose(jacobian_fd[i_parameter, :]), "--", linewidth=1)
plt.title(parameter)
plt.legend(["Analytic", "FD"])
is_close = np.allclose(jacobian, jacobian_fd, **kwargs)
if not is_close:
parameter_names = list(self.params.keys())
if verbose:
maxima = np.max(jacobian - jacobian_fd, axis=1)
for i, v in enumerate(maxima):
if np.allclose(jacobian[i, :], jacobian_fd[i, :]):
print(f"Parameter {parameter_names[i]}({i}): {v}")
else:
print_styled("warning", f"Parameter {parameter_names[i]}({i}): {v}")
return is_close
def plot(
self,
data=None,
fmt="",
independent=None,
legend=True,
plot_data=True,
overrides=None,
**kwargs,
):
"""Plot model and data
Parameters
----------
data : str
Name of the data set to plot (optional, omission plots all for that model).
fmt : str
Format string, forwarded to :func:`matplotlib.pyplot.plot`.
independent : array_like
Array with values for the independent variable (used when plotting the model).
legend : bool
Show legend (default: True).
plot_data : bool
Show data (default: True).
overrides : dict
Parameter / value pairs which override parameter values in the current fit. Should be a dict of
{str: float} that provides values for parameters which should be set to particular values in the plot
(default: None);
``**kwargs``
Forwarded to :func:`matplotlib.pyplot.plot`.
Examples
--------
::
from lumicks import pylake
model = pylake.inverted_odijk("DNA")
fit = pylake.FdFit(model)
fit.add_data("Control", force, distance)
fit.fit()
# Basic plotting of one data set over a custom range can be done by just invoking plot.
fit.plot("Control", 'k--', np.arange(2.0, 5.0, 0.01))
# Have a quick look at what a stiffness of 5 would do to the fit.
fit.plot("Control", overrides={"DNA/St": 5})
# When dealing with multiple models in one fit, one has to select the model first when we want to plot.
model1 = pylake.odijk("DNA")
model2 = pylake.odijk("DNA") + pylake.odijk("protein")
fit[model1].add_data("Control", force1, distance2)
fit[model2].add_data("Control", force1, distance2)
fit.fit()
fit = pylake.FdFit(model1, model2)
fit[model1].plot("Control") # Plots data set Control for model 1
fit[model2].plot("Control") # Plots data set Control for model 2
"""
assert len(self.models) == 1, "Please select a model to plot using fit[model].plot(...)."
self._plot(
front(self.models.values()),
data,
fmt,
overrides,
independent,
legend,
plot_data,
**kwargs,
)
def _plot(self, model, data, fmt, overrides, independent, legend, plot_data, **kwargs):
self._rebuild()
params, _ = self._override_params(overrides)
dataset = self.datasets[id(model)]
def plot(fit_data):
x_values = fit_data.x if independent is None else independent
model_lines = model.plot(
params[fit_data],
x_values,
fmt,
**kwargs,
zorder=1,
label=fit_data.name + " (model)",
)
if plot_data:
color = model_lines[0].get_color()
fit_data.plot(
".",
**kwargs,
color=lighten_color(color, -0.3),
zorder=0,
label=fit_data.name + " (data)",
)
if data:
assert data in dataset.data, f"Error: Did not find dataset with name {data}"
plot(dataset.data[data])
else:
for data in dataset.data.values():
plot(data)
if legend:
plt.legend()
def _override_params(self, overrides=None):
from copy import deepcopy
params = self.params
params = deepcopy(params)
if overrides:
for key, value in overrides.items():
if key in params:
params[key] = value
else:
raise KeyError(f"Parameter {key} is not a parameter used in the fit")
return params, overrides
@property
def sigma(self):
"""Error variance of the data points."""
# TO DO: Ideally, this will eventually depend on the exact error model used. For now, we use the a-posteriori
# noise standard deviation estimate based on the residual.
res = self._calculate_residual()
est_sd = np.sqrt((res * res).sum() / (len(res) - np.sum(self.params.fitted)))
# This is toy data. Computing a variance estimate here makes no sense.
if est_sd == 0:
return np.ones(len(res))
return est_sd * np.ones(len(res))
def log_likelihood(self, params=[], sigma=None):
"""The model residual is given by chi squared = -2 log(L)"""
self._rebuild()
res = self._calculate_residual(params)
sigma = sigma if np.any(sigma) else self.sigma
return (
-(self.n_residuals / 2.0) * np.log(2.0 * np.pi)
- np.sum(np.log(sigma))
- sum((res / sigma) ** 2) / 2.0
)
@property
def aic(self):
"""Calculates the Akaike Information Criterion:
AIC = 2 k - 2 ln(L)
Where k refers to the number of parameters, n to the number of observations (or data points) and L to the
maximized value of the likelihood function
The emphasis of this criterion is future prediction. It does not lead to consistent model selection and is more
prone to over-fitting than the Bayesian Information Criterion.
References:
Cavanaugh, J.E., 1997. Unifying the derivations for the Akaike and corrected Akaike information criteria.
Statistics & Probability Letters, 33(2), pp.201-208.
"""
self._rebuild()
k = sum(self.params.fitted)
LL = self.log_likelihood()
return 2.0 * k - 2.0 * LL
@property
def aicc(self):
"""Calculates the Corrected Akaike Information Criterion:
.. math::
AICc = AIC + \\frac{2 k^2 + 2 k}{n - k - 1}
Where k refers to the number of parameters, n to the number of observations (or data points) and L to the
maximized value of the likelihood function
The emphasis of this criterion is future prediction. Compared to the AIC it should be less prone to overfitting
for smaller sample sizes. Analogously to the AIC, it does not lead to a consistent model selection procedure.
References:
Cavanaugh, J.E., 1997. Unifying the derivations for the Akaike and corrected Akaike information criteria.
Statistics & Probability Letters, 33(2), pp.201-208.
"""
aic = self.aic
k = sum(self.params.fitted)
return aic + (2.0 * k * k + 2.0 * k) / (self.n_residuals - k - 1.0)
@property
def bic(self):
"""Calculates the Bayesian Information Criterion:
BIC = k ln(n) - 2 ln(L)
Where k refers to the number of parameters, n to the number of observations (or data points) and L to the
maximized value of the likelihood function
The emphasis of the BIC is put on parsimonious models. As such it is less prone to over-fitting. Selection via
BIC leads to a consistent model selection procedure, meaning that as the number of data points tends to
infinity, BIC will select the true model assuming the true model is in the set of selected models.
"""
k = sum(self.params.fitted)
return k * np.log(self.n_residuals) - 2.0 * self.log_likelihood()
@property
def cov(self):
"""Returns the inverse of the approximate Hessian. This approximation is valid when the model fits well (small
residuals) and there is sufficient data to assume we're in the asymptotic regime.
It makes use of the Gauss-Newton approximation of the Hessian, which uses only the first order sensitivity
information. This is valid for linear problems and problems near the optimum (assuming the model fits).
References:
Press, W.H., Teukolsky, S.A., Vetterling, W.T. and Flannery, B.P., 1988. Numerical recipes in C.
Maiwald, T., Hass, H., Steiert, B., Vanlier, J., Engesser, R., Raue, A., Kipkeew, F., Bock, H.H.,
Kaschek, D., Kreutz, C. and Timmer, J., 2016. Driving the model to its limit: profile likelihood
based model reduction. PloS one, 11(9).
"""
# Note that this approximation is only valid if the noise on each data set is the same.
if self.has_jacobian:
J = self._calculate_jacobian()
J = J / np.transpose(np.tile(self.sigma, (J.shape[1], 1)))
return np.linalg.pinv(np.transpose(J).dot(J))
else:
raise NotImplementedError(
"In order to calculate a covariance matrix, a model Jacobian has to be specified"
"for the model."
)
def _repr_html_(self):
out_string = "<h4>Fit</h4>\n"
for model in self.models.values():
datasets = "".join(f"{self.datasets[id(model)]._repr_html_()}<br>\n")
out_string += f"<h5>Model: {model.name}</h5>\n"
eqn = model.get_formatted_equation_string(tex=True)
if eqn:
out_string += f"<h5> Equation:</h5>${eqn}$<br>\n"
out_string += f"<h5> Data:</h5>\n{datasets}<br>"
return out_string + f"<h5> Fitted parameters:</h5>\n{self.params._repr_html_()}"
def __repr__(self):
return (
f"lumicks.pylake.{self.__class__.__name__}"
f"(models={{{', '.join([x.name for x in self.models.values()])}}}, "
f"N={self.n_residuals})"
)
def __str__(self):
indent = 2
out_string = "Fit\n"
for model in self.models.values():
datasets = (" " * indent + "- " + self.datasets[id(model)].__str__()).splitlines(True)
datasets = (" " * (2 * indent)).join(datasets)
out_string += f"{' ' * indent}- Model: {model.name}\n"
eqn = model.get_formatted_equation_string(tex=False)
if eqn:
out_string += f"{' ' * indent}- Equation:\n {eqn}\n\n{datasets}"
return out_string + (
f"\n{' ' * indent}- Fitted parameters:\n"
f"{(' ' * (2 * indent))}"
f"{(' ' * (2 * indent)).join(self.params.__str__().splitlines(True))}"
)
class FdFit(Fit):
"""Object which is used for fitting. It is a collection of models and their data. Once data is loaded, a fit object
contains parameters, which can be fitted by invoking fit.
Examples
--------
::
from lumicks import pylake
dna_model = pylake.inverted_odijk("DNA")
fit = pylake.FdFit(dna_model)
data = fit.add_data("Dataset 1", force, distance)
fit["DNA/Lp"].lower_bound = 35 # Set lower bound for DNA Lp
fit["DNA/Lp"].upper_bound = 80 # Set upper bound for DNA Lp
fit.fit()
fit.plot("Dataset 1", "k--") # Plot the fitted model"""
def add_data(self, name, f, d, params={}):
"""Adds a data set to this fit.
Parameters
----------
name : str
Name of this data set.
f : array_like
An array_like containing force data.
d : array_like
An array_like containing distance data.
params : dict of {str : str or int}
List of parameter transformations. These can be used to convert one parameter in the model, to a new
parameter name or constant for this specific data set (for more information, see the examples).
Examples
--------
::
dna_model = pylake.inverted_odijk("DNA") # Use an inverted Odijk eWLC model.
fit = pylake.FdFit(dna_model)
fit.add_data("Data1", force1, distance1) # Load the first data set like that
fit.add_data("Data2", force2, distance2, params={"DNA/Lc": "DNA/Lc_RecA"}) # Different DNA/Lc
"""
if front(self.models.values()).independent == "f":
return self._add_data(name, f, d, params)
else:
return self._add_data(name, d, f, params)
def _dataset(self, model):
return FdDatasets(model, self)
|
{"hexsha": "b331dfbb3196cba18b9dae5e392bb6590dbce66d", "size": 26815, "ext": "py", "lang": "Python", "max_stars_repo_path": "lumicks/pylake/fitting/fit.py", "max_stars_repo_name": "lumicks/pylake", "max_stars_repo_head_hexsha": "b5875d156d6416793a371198f3f2590fca2be4cd", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2019-02-18T07:56:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-19T01:14:48.000Z", "max_issues_repo_path": "lumicks/pylake/fitting/fit.py", "max_issues_repo_name": "lumicks/pylake", "max_issues_repo_head_hexsha": "b5875d156d6416793a371198f3f2590fca2be4cd", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 42, "max_issues_repo_issues_event_min_datetime": "2018-11-30T14:40:35.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T11:43:45.000Z", "max_forks_repo_path": "lumicks/pylake/fitting/fit.py", "max_forks_repo_name": "lumicks/pylake", "max_forks_repo_head_hexsha": "b5875d156d6416793a371198f3f2590fca2be4cd", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2019-01-09T13:45:53.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-06T14:06:52.000Z", "avg_line_length": 36.8337912088, "max_line_length": 120, "alphanum_fraction": 0.5934365094, "include": true, "reason": "import numpy,import scipy", "num_tokens": 5922}
|
[STATEMENT]
lemma INF_commute:
assumes "\<forall>x\<in>U\<^sub>2. \<forall>y\<in>U\<^sub>3. f x y \<in> U" and "B \<subseteq> U\<^sub>3" and "A \<subseteq> U\<^sub>2"
shows
"\<Sqinter>\<^sub>o\<^sub>w ((\<lambda>x. \<Sqinter>\<^sub>o\<^sub>w (f x ` B)) ` A) = \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>j. \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>i. f i j) ` A)) ` B)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>x. \<Sqinter>\<^sub>o\<^sub>w (f x ` B)) ` A) = \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>j. \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>i. f i j) ` A)) ` B)
[PROOF STEP]
proof(cases \<open>U\<^sub>2 = {}\<close>)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. U\<^sub>2 = {} \<Longrightarrow> \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>x. \<Sqinter>\<^sub>o\<^sub>w (f x ` B)) ` A) = \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>j. \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>i. f i j) ` A)) ` B)
2. U\<^sub>2 \<noteq> {} \<Longrightarrow> \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>x. \<Sqinter>\<^sub>o\<^sub>w (f x ` B)) ` A) = \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>j. \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>i. f i j) ` A)) ` B)
[PROOF STEP]
case True
[PROOF STATE]
proof (state)
this:
U\<^sub>2 = {}
goal (2 subgoals):
1. U\<^sub>2 = {} \<Longrightarrow> \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>x. \<Sqinter>\<^sub>o\<^sub>w (f x ` B)) ` A) = \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>j. \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>i. f i j) ` A)) ` B)
2. U\<^sub>2 \<noteq> {} \<Longrightarrow> \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>x. \<Sqinter>\<^sub>o\<^sub>w (f x ` B)) ` A) = \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>j. \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>i. f i j) ` A)) ` B)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
U\<^sub>2 = {}
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
U\<^sub>2 = {}
goal (1 subgoal):
1. \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>x. \<Sqinter>\<^sub>o\<^sub>w (f x ` B)) ` A) = \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>j. \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>i. f i j) ` A)) ` B)
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
U\<^sub>2 = {}
\<forall>x\<in>U\<^sub>2. \<forall>y\<in>U\<^sub>3. f x y \<in> U
B \<subseteq> U\<^sub>3
A \<subseteq> U\<^sub>2
goal (1 subgoal):
1. \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>x. \<Sqinter>\<^sub>o\<^sub>w (f x ` B)) ` A) = \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>j. \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>i. f i j) ` A)) ` B)
[PROOF STEP]
by (simp add: inf_top.sl_neut.neutral_map Inf_top_conv(2))
[PROOF STATE]
proof (state)
this:
\<Sqinter>\<^sub>o\<^sub>w ((\<lambda>x. \<Sqinter>\<^sub>o\<^sub>w (f x ` B)) ` A) = \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>j. \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>i. f i j) ` A)) ` B)
goal (1 subgoal):
1. U\<^sub>2 \<noteq> {} \<Longrightarrow> \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>x. \<Sqinter>\<^sub>o\<^sub>w (f x ` B)) ` A) = \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>j. \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>i. f i j) ` A)) ` B)
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. U\<^sub>2 \<noteq> {} \<Longrightarrow> \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>x. \<Sqinter>\<^sub>o\<^sub>w (f x ` B)) ` A) = \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>j. \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>i. f i j) ` A)) ` B)
[PROOF STEP]
case ne_U\<^sub>2: False
[PROOF STATE]
proof (state)
this:
U\<^sub>2 \<noteq> {}
goal (1 subgoal):
1. U\<^sub>2 \<noteq> {} \<Longrightarrow> \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>x. \<Sqinter>\<^sub>o\<^sub>w (f x ` B)) ` A) = \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>j. \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>i. f i j) ` A)) ` B)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>x. \<Sqinter>\<^sub>o\<^sub>w (f x ` B)) ` A) = \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>j. \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>i. f i j) ` A)) ` B)
[PROOF STEP]
proof(cases \<open>U\<^sub>3 = {}\<close>)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. U\<^sub>3 = {} \<Longrightarrow> \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>x. \<Sqinter>\<^sub>o\<^sub>w (f x ` B)) ` A) = \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>j. \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>i. f i j) ` A)) ` B)
2. U\<^sub>3 \<noteq> {} \<Longrightarrow> \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>x. \<Sqinter>\<^sub>o\<^sub>w (f x ` B)) ` A) = \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>j. \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>i. f i j) ` A)) ` B)
[PROOF STEP]
case True
[PROOF STATE]
proof (state)
this:
U\<^sub>3 = {}
goal (2 subgoals):
1. U\<^sub>3 = {} \<Longrightarrow> \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>x. \<Sqinter>\<^sub>o\<^sub>w (f x ` B)) ` A) = \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>j. \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>i. f i j) ` A)) ` B)
2. U\<^sub>3 \<noteq> {} \<Longrightarrow> \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>x. \<Sqinter>\<^sub>o\<^sub>w (f x ` B)) ` A) = \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>j. \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>i. f i j) ` A)) ` B)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>x. \<Sqinter>\<^sub>o\<^sub>w (f x ` B)) ` A) = \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>j. \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>i. f i j) ` A)) ` B)
[PROOF STEP]
proof-
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>x. \<Sqinter>\<^sub>o\<^sub>w (f x ` B)) ` A) = \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>j. \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>i. f i j) ` A)) ` B)
[PROOF STEP]
from assms(2)
[PROOF STATE]
proof (chain)
picking this:
B \<subseteq> U\<^sub>3
[PROOF STEP]
have "B = {}"
[PROOF STATE]
proof (prove)
using this:
B \<subseteq> U\<^sub>3
goal (1 subgoal):
1. B = {}
[PROOF STEP]
unfolding True
[PROOF STATE]
proof (prove)
using this:
B \<subseteq> {}
goal (1 subgoal):
1. B = {}
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
B = {}
goal (1 subgoal):
1. \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>x. \<Sqinter>\<^sub>o\<^sub>w (f x ` B)) ` A) = \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>j. \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>i. f i j) ` A)) ` B)
[PROOF STEP]
from assms(1)
[PROOF STATE]
proof (chain)
picking this:
\<forall>x\<in>U\<^sub>2. \<forall>y\<in>U\<^sub>3. f x y \<in> U
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
\<forall>x\<in>U\<^sub>2. \<forall>y\<in>U\<^sub>3. f x y \<in> U
goal (1 subgoal):
1. \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>x. \<Sqinter>\<^sub>o\<^sub>w (f x ` B)) ` A) = \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>j. \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>i. f i j) ` A)) ` B)
[PROOF STEP]
unfolding \<open>B = {}\<close>
[PROOF STATE]
proof (prove)
using this:
\<forall>x\<in>U\<^sub>2. \<forall>y\<in>U\<^sub>3. f x y \<in> U
goal (1 subgoal):
1. \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>x. \<Sqinter>\<^sub>o\<^sub>w (f x ` {})) ` A) = \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>j. \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>i. f i j) ` A)) ` {})
[PROOF STEP]
by (force intro: INF_top)
[PROOF STATE]
proof (state)
this:
\<Sqinter>\<^sub>o\<^sub>w ((\<lambda>x. \<Sqinter>\<^sub>o\<^sub>w (f x ` B)) ` A) = \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>j. \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>i. f i j) ` A)) ` B)
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
\<Sqinter>\<^sub>o\<^sub>w ((\<lambda>x. \<Sqinter>\<^sub>o\<^sub>w (f x ` B)) ` A) = \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>j. \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>i. f i j) ` A)) ` B)
goal (1 subgoal):
1. U\<^sub>3 \<noteq> {} \<Longrightarrow> \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>x. \<Sqinter>\<^sub>o\<^sub>w (f x ` B)) ` A) = \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>j. \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>i. f i j) ` A)) ` B)
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. U\<^sub>3 \<noteq> {} \<Longrightarrow> \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>x. \<Sqinter>\<^sub>o\<^sub>w (f x ` B)) ` A) = \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>j. \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>i. f i j) ` A)) ` B)
[PROOF STEP]
case False
[PROOF STATE]
proof (state)
this:
U\<^sub>3 \<noteq> {}
goal (1 subgoal):
1. U\<^sub>3 \<noteq> {} \<Longrightarrow> \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>x. \<Sqinter>\<^sub>o\<^sub>w (f x ` B)) ` A) = \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>j. \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>i. f i j) ` A)) ` B)
[PROOF STEP]
from ne_U\<^sub>2 False assms
[PROOF STATE]
proof (chain)
picking this:
U\<^sub>2 \<noteq> {}
U\<^sub>3 \<noteq> {}
\<forall>x\<in>U\<^sub>2. \<forall>y\<in>U\<^sub>3. f x y \<in> U
B \<subseteq> U\<^sub>3
A \<subseteq> U\<^sub>2
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
U\<^sub>2 \<noteq> {}
U\<^sub>3 \<noteq> {}
\<forall>x\<in>U\<^sub>2. \<forall>y\<in>U\<^sub>3. f x y \<in> U
B \<subseteq> U\<^sub>3
A \<subseteq> U\<^sub>2
goal (1 subgoal):
1. \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>x. \<Sqinter>\<^sub>o\<^sub>w (f x ` B)) ` A) = \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>j. \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>i. f i j) ` A)) ` B)
[PROOF STEP]
by (rule ne_INF_commute)
[PROOF STATE]
proof (state)
this:
\<Sqinter>\<^sub>o\<^sub>w ((\<lambda>x. \<Sqinter>\<^sub>o\<^sub>w (f x ` B)) ` A) = \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>j. \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>i. f i j) ` A)) ` B)
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
\<Sqinter>\<^sub>o\<^sub>w ((\<lambda>x. \<Sqinter>\<^sub>o\<^sub>w (f x ` B)) ` A) = \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>j. \<Sqinter>\<^sub>o\<^sub>w ((\<lambda>i. f i j) ` A)) ` B)
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 4506, "file": "Types_To_Sets_Extension_Examples_SML_Relativization_Lattices_SML_Complete_Lattices", "length": 29}
|
# A complete use case
In this section we present a complete use case, based on the meaning classification dataset introduced in [Lorenz et al. (2021)](https://arxiv.org/abs/2102.12846) QNLP paper. The goal is to classify simple sentences (such as "skillful programmer creates software" and "chef prepares delicious meal") into two categories, food or IT. The dataset consists of 130 sentences created using a simple context-free grammar.
We will use a [SpiderAnsatz](../lambeq.rst#lambeq.tensor.SpiderAnsatz) to split large tensors into chains of smaller ones. For differentiation we will use JAX, and we will apply simple gradient-descent optimisation to train the tensors.
## Preparation
We start with a few essential imports.
```python
import warnings
warnings.filterwarnings('ignore') # Ignore warnings
from discopy.tensor import Tensor
from jax import numpy as np
import numpy
np.random = numpy.random
Tensor.np = np
np.random.seed(123458) # Fix the seed
```
<div class="alert alert-info">
**Note**
Note the `Tensor.np = np` assignment in the above code. This is required to let `discopy` know that from now on we use JAX's version of `numpy`.
</div>
Let's read the datasets:
```python
# Read data
def read_data(fname):
with open(fname, 'r') as f:
lines = f.readlines()
data, targets = [], []
for ln in lines:
t = int(ln[0])
data.append(ln[1:].strip())
targets.append(np.array([t, not(t)], dtype=np.float32))
return data, np.array(targets)
train_data, train_targets = read_data('datasets/mc_train_data.txt')
test_data, test_targets = read_data('datasets/mc_test_data.txt')
```
The first few lines of the train dataset:
```python
train_data[:10]
```
['skillful man prepares sauce',
'skillful man bakes dinner',
'woman cooks tasty meal',
'man prepares meal',
'skillful woman debugs program',
'woman prepares tasty meal',
'person runs program',
'person runs useful application',
'woman prepares sauce',
'woman prepares dinner']
Targets are represented as 2-dimensional arrays:
```python
train_targets
```
DeviceArray([[1., 0.],
[1., 0.],
[1., 0.],
...,
[0., 1.],
[1., 0.],
[0., 1.]], dtype=float32)
## Creating and parameterising diagrams
First step is to convert sentences into string diagrams:
```python
# Parse sentences to diagrams
from lambeq.ccg2discocat import DepCCGParser
parser = DepCCGParser()
train_diagrams = parser.sentences2diagrams(train_data)
test_diagrams = parser.sentences2diagrams(test_data)
train_diagrams[0].draw(figsize=(8,4), fontsize=13)
```
The produced diagrams need to be parameterised by a specific ansatz. For this experiment we will use a [SpiderAnsatz](../lambeq.rst#lambeq.tensor.SpiderAnsatz).
```python
# Create ansatz and convert to tensor diagrams
from lambeq.tensor import SpiderAnsatz
from lambeq.core.types import AtomicType
from discopy import Dim
N = AtomicType.NOUN
S = AtomicType.SENTENCE
# Create an ansatz by assigning 2 dimensions to both
# noun and sentence spaces
ansatz = SpiderAnsatz({N: Dim(2), S: Dim(2)})
train_circuits = [ansatz(d) for d in train_diagrams]
test_circuits = [ansatz(d) for d in test_diagrams]
all_circuits = train_circuits + test_circuits
all_circuits[0].draw(figsize=(8,4), fontsize=13)
```
## Creating a vocabulary
We are now ready to create a vocabulary.
```python
# Create vocabulary
from sympy import default_sort_key
vocab = sorted(
{sym for circ in all_circuits for sym in circ.free_symbols},
key=default_sort_key
)
tensors = [np.random.rand(w.size) for w in vocab]
tensors[0]
```
array([0.35743395, 0.45764418])
## Defining a loss function
This is a binary classification task, so we will use binary cross entropy as the loss.
```python
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def loss(tensors):
# Lambdify
np_circuits = [c.lambdify(*vocab)(*tensors) for c in train_circuits]
# Compute predictions
predictions = sigmoid(np.array([c.eval().array for c in np_circuits]))
# binary cross-entropy loss
cost = -np.sum(train_targets * np.log2(predictions)) / len(train_targets)
return cost
```
The loss function follows the steps below:
1. The symbols in the train diagrams are replaced with concrete ``numpy`` arrays.
2. The resulting tensor networks are evaluated and produce results.
3. Based on the predictions, an average loss is computed for the specific iteration.
We use JAX in order to get a gradient function on the loss, and "just-in-time" compile it to improve speed:
```python
from jax import jit, grad
training_loss = jit(loss)
gradient = jit(grad(loss))
```
## Training loop
We are now ready to start training. The following loop computes gradients and uses them to update the tensors associated with the symbols.
```python
training_losses = []
epochs = 90
for i in range(epochs):
gr = gradient(tensors)
for k in range(len(tensors)):
tensors[k] = tensors[k] - gr[k] * 1.0
training_losses.append(float(training_loss(tensors)))
if (i + 1) % 10 == 0:
print(f"Epoch {i + 1} - loss {training_losses[-1]}")
```
Epoch 10 - loss 0.07233709841966629
Epoch 20 - loss 0.015333528630435467
Epoch 30 - loss 0.00786149874329567
Epoch 40 - loss 0.00515687046572566
Epoch 50 - loss 0.0037753921933472157
Epoch 60 - loss 0.0029438300989568233
Epoch 70 - loss 0.002392344642430544
Epoch 80 - loss 0.0020021884702146053
Epoch 90 - loss 0.001713048666715622
## Testing
Finally, we use the trained model on the test dataset:
```python
# Testing
np_test_circuits = [c.lambdify(*vocab)(*tensors) for c in test_circuits]
test_predictions = sigmoid(np.array([c.eval().array for c in np_test_circuits]))
hits = 0
for i in range(len(np_test_circuits)):
target = test_targets[i]
pred = test_predictions[i]
if np.argmax(target) == np.argmax(pred):
hits += 1
print("Accuracy on test set:", hits / len(np_test_circuits))
```
Accuracy on test set: 0.9
## Working with quantum circuits
The process when working with quantum circuits is very similar, with two important differences:
1. The parameterisable part of the circuit is an array of parameters, as described in Section [Circuit Symbols](training-symbols.ipynb#Circuit-symbols), instead of tensors associated to words.
2. If optimisation takes place on quantum hardware, standard automatic differentiation cannot be used. An alternative is to use a gradient-approximation technique, such as [Simultaneous Perturbation Stochastic Approximation](https://en.wikipedia.org/wiki/Simultaneous_perturbation_stochastic_approximation) (SPSA).
Complete examples in training quantum circuits can be found in the following notebooks:
- [Quantum pipeline with JAX](../examples/quantum_pipeline_jax.ipynb)
- [Quantum pipeline with tket](../examples/quantum_pipeline_tket.ipynb)
**See also:**
- [Classical pipeline with PyTorch](../examples/classical_pipeline.ipynb)
|
{"hexsha": "f54e8c0e8e3674fd6144a462dfa25fe2df6f7f72", "size": 35474, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "docs/tutorials/training-usecase.ipynb", "max_stars_repo_name": "Thommy257/lambeq-pub", "max_stars_repo_head_hexsha": "502752346610a50fd26feb27ca6c7f5ceab5eff5", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-24T10:26:36.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-24T10:26:36.000Z", "max_issues_repo_path": "docs/tutorials/training-usecase.ipynb", "max_issues_repo_name": "Thommy257/lambeq-pub", "max_issues_repo_head_hexsha": "502752346610a50fd26feb27ca6c7f5ceab5eff5", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "docs/tutorials/training-usecase.ipynb", "max_forks_repo_name": "Thommy257/lambeq-pub", "max_forks_repo_head_hexsha": "502752346610a50fd26feb27ca6c7f5ceab5eff5", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 81.7373271889, "max_line_length": 13572, "alphanum_fraction": 0.8262389356, "converted": true, "num_tokens": 1845}
|
! Module to define simple error/exit codes
! and output messages.
!
MODULE Message_Handler
! Module use statements
USE File_Utility, ONLY: Get_Lun
! Disable all implicit typing
IMPLICIT NONE
! Visibilities
PRIVATE
! Module parameters
PUBLIC :: SUCCESS
PUBLIC :: INFORMATION
PUBLIC :: WARNING
PUBLIC :: FAILURE
PUBLIC :: UNDEFINED
! Module procedures
PUBLIC :: Program_Message
PUBLIC :: Display_Message
PUBLIC :: Open_Message_Log
! Integer values that define the error or exit state.
! Note: These values are totally arbitrary.
INTEGER, PARAMETER :: SUCCESS = 0
INTEGER, PARAMETER :: INFORMATION = 1
INTEGER, PARAMETER :: WARNING = 2
INTEGER, PARAMETER :: FAILURE = 3
INTEGER, PARAMETER :: UNDEFINED = 4
! Character descriptors of the error states
INTEGER, PARAMETER :: MAX_N_STATES = 4
CHARACTER(*), PARAMETER, DIMENSION( 0:MAX_N_STATES ) :: &
STATE_DESCRIPTOR = (/ 'SUCCESS ', &
'INFORMATION', &
'WARNING ', &
'FAILURE ', &
'UNDEFINED ' /)
CONTAINS
! Subroutine to output a program header consisting of
! the program name, description, and its revision
!
SUBROUTINE Program_Message( Name, Description, Revision )
! Arguments
CHARACTER(*), INTENT(IN) :: Name
CHARACTER(*), INTENT(IN) :: Description
CHARACTER(*), INTENT(IN) :: Revision
! Local parameters
CHARACTER(*), PARAMETER :: PROGRAM_HEADER = &
'**********************************************************'
CHARACTER(*), PARAMETER :: SPACE = ' '
! Local variables
INTEGER :: pn_pos
CHARACTER(80) :: pn_fmt
INTEGER :: phLen
INTEGER :: dLen
INTEGER :: i, i1, i2
! Determine the format for outputing the name
pn_pos = ( LEN(PROGRAM_HEADER) / 2 ) - ( LEN_TRIM(ADJUSTL(Name)) / 2 )
pn_pos = MAX( pn_pos, 0 ) + 5
WRITE( pn_fmt, '( "( ",i2,"x, a, / )" )' ) pn_pos
! Write the program header and program name
WRITE(*,'(/5x, a )' ) PROGRAM_HEADER
WRITE(*,FMT=TRIM(pn_fmt)) TRIM(ADJUSTL(Name))
! Write the program description splitting lines at spaces
phLen = LEN(PROGRAM_HEADER)-1
dLen = LEN_TRIM(Description)
i1=1
i2=phLen
DO
IF ( dLen > phLen ) THEN
IF ( Description(i2:i2) /= SPACE .AND. i2 /= dLen) THEN
! Search for a space character
i = INDEX( Description(i1:i2), SPACE, BACK=.TRUE. )
IF ( i > 0 ) THEN
! Found one. Update end-of-line
i2 = i1 + i - 1
ELSE
! No space. Output rest of description
i2 = dLen
END IF
END IF
ELSE
i2 = dLen
END IF
WRITE(*,'(6x, a )' ) Description(i1:i2)
i1 = i2+1
i2 = MIN(i1+phLen-1,dLen)
IF ( i1 > dLen ) EXIT
END DO
! Write the program revision and end header
WRITE(*,'(/6x, a )' ) TRIM(Revision)
WRITE(*,'(5x, a, / )' ) PROGRAM_HEADER
END SUBROUTINE Program_Message
! Subroutine to display messages.
!
! This routine calls itself if the optional argument Message_Log
! is passed and an error occurs opening the output log file.
!
RECURSIVE SUBROUTINE Display_Message(Routine_Name, &
Message, &
Error_State, &
Message_Log )
! Arguments
CHARACTER(*), INTENT(IN) :: Routine_Name
CHARACTER(*), INTENT(IN) :: Message
INTEGER, INTENT(IN) :: Error_State
CHARACTER(*), INTENT(IN), OPTIONAL :: Message_Log
! Local parameters
CHARACTER(*), PARAMETER :: THIS_ROUTINE_NAME = 'Display_Message'
CHARACTER(*), PARAMETER :: FMT_STRING = '( 1x, a, "(", a, ") : ", a )'
! Local variables
INTEGER :: Error_State_To_Use
LOGICAL :: Log_To_StdOut
INTEGER :: File_ID
INTEGER :: Error_Status
! Check the input error state
Error_State_To_Use = Error_State
IF ( Error_State < 0 .OR. Error_State > MAX_N_STATES ) THEN
Error_State_To_Use = UNDEFINED
END IF
! Set the message log. Default is output to stdout
Log_To_StdOut = .TRUE.
IF ( PRESENT( Message_Log ) ) THEN
Log_To_StdOut = .FALSE.
Error_Status = Open_Message_Log( TRIM( Message_Log ), File_ID )
IF ( Error_Status /= 0 ) THEN
CALL Display_Message( THIS_ROUTINE_NAME, &
'Error opening message log file', &
FAILURE )
Log_To_StdOut = .TRUE.
END IF
END IF
! Output the message
IF ( Log_To_StdOut ) THEN
WRITE( *, FMT = FMT_STRING ) &
TRIM( Routine_Name ), &
TRIM( STATE_DESCRIPTOR( Error_State_To_Use ) ), &
TRIM( Message )
ELSE
WRITE( File_ID, FMT = FMT_STRING ) &
TRIM( Routine_Name ), &
TRIM( STATE_DESCRIPTOR( Error_State_To_Use ) ), &
TRIM( Message )
CLOSE( File_ID )
END IF
END SUBROUTINE Display_Message
! Function to open the message log file.
!
! SIDE EFFECTS:
! The file is opened for SEQUENTIAL, FORMATTED access with
! UNKNOWN status, position of APPEND, and action of READWRITE.
!
! Hopefully all of these options will not cause an existing file
! to be inadvertantly overwritten.
!
FUNCTION Open_Message_Log(Message_Log, File_ID) RESULT(Error_Status)
! Arguments
CHARACTER(*), INTENT(IN) :: Message_Log
INTEGER, INTENT(OUT) :: File_ID
! Function result
INTEGER :: Error_Status
! Local variables
INTEGER :: Lun
INTEGER :: IO_Status
! Set successful return status
Error_Status = SUCCESS
! Get a file unit number
Lun = Get_Lun()
IF ( Lun < 0 ) THEN
Error_Status = FAILURE
RETURN
END IF
! Open the file
OPEN( Lun, FILE = TRIM( Message_Log ), &
ACCESS = 'SEQUENTIAL', &
FORM = 'FORMATTED', &
STATUS = 'UNKNOWN', &
POSITION = 'APPEND', &
ACTION = 'READWRITE', &
IOSTAT = IO_Status )
IF ( IO_Status /= 0 ) THEN
Error_Status = FAILURE
RETURN
END IF
! Return the file ID
File_ID = Lun
END FUNCTION Open_Message_Log
END MODULE Message_Handler
|
{"hexsha": "47bdbc638224ddf3f3bdda06f9d6cedbb5eaeb87", "size": 6437, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "var/external/crtm_2.3.0/libsrc/Message_Handler.f90", "max_stars_repo_name": "matzegoebel/WRF-fluxavg", "max_stars_repo_head_hexsha": "686ae53053bf7cb55d6f078916d0de50f819fc62", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2018-08-27T12:49:19.000Z", "max_stars_repo_stars_event_max_datetime": "2019-05-16T14:22:54.000Z", "max_issues_repo_path": "var/external/crtm_2.3.0/libsrc/Message_Handler.f90", "max_issues_repo_name": "teb-model/wrf-teb", "max_issues_repo_head_hexsha": "60882e61a2a3d91f1c94cb5b542f46ffaebfad71", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 25, "max_issues_repo_issues_event_min_datetime": "2018-09-18T16:44:30.000Z", "max_issues_repo_issues_event_max_datetime": "2019-07-07T10:59:59.000Z", "max_forks_repo_path": "NCEP_crtm/Message_Handler.f90", "max_forks_repo_name": "GEOS-ESM/NCEP_Shared", "max_forks_repo_head_hexsha": "6f8b2103d3ce8f1b829bff88b71ca4482b4a56a4", "max_forks_repo_licenses": ["NASA-1.3", "ECL-2.0", "Apache-2.0"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2020-10-29T17:54:31.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-29T08:42:45.000Z", "avg_line_length": 29.8009259259, "max_line_length": 74, "alphanum_fraction": 0.5744912226, "num_tokens": 1669}
|
import gym
from gym import spaces
from gym.utils import seeding
import pandas as pd
import numpy as np
from enum import Enum
import matplotlib.pyplot as plt
import csv
import gym_anytrading.datasets.b3 as b3
class TradingEnv(gym.Env):
def __init__(self):
self.n_stocks = 10
self.W = 2
self.count = 0
self.count_episodes = -1
self.max_steps = 5
#self.action = [1/(self.n_stocks+1)]*(self.n_stocks+1)
self.state = None
csv_filename = '../../../gym_anytrading/datasets/data/B3_COTAHIST.csv'
#csv_filename = 'gym_anytrading/datasets/data/B3_COTAHIST.csv'
self.df = pd.read_csv(csv_filename, parse_dates=True, index_col='Date')
#print(self.df.head())
## spaces
self.action_space = spaces.Box(low=0, high=1.0, shape=(self.n_stocks+1,), dtype=np.float32)
self.observation_space = spaces.Box(low=0.0, high=10.0, shape=((self.W+1)*(self.n_stocks+1), ), dtype=np.float32)
self.beta = 1
def seed(self, seed=None):
pass
def reset(self):
self.count = 0
self.count_episodes += 1
return self.receive_state().flatten()
#self._done = False
#self._current_tick = self._start_tick
#self._last_trade_tick = self._current_tick - 1
#self._position = Positions.Short
#self._position_history = (self.window_size * [None]) + [self._position]
#self._total_reward = 0.
#self._total_profit = 1. # unit
#self._first_rendering = True
#self.history = {}
#return self._get_observation()
#pass
def normalizeAction(self, action):
new_action = []
action = np.array(action)
for i in action: #range(len(action)):
new_action.append(i/action.sum())
#print(new_action, np.array(new_action).sum())
return new_action
def receive_state(self):
state = []
#print("AQUI.......")
for j in range(self.W, -1, -1):
start_point =self.n_stocks*self.W + self.count_episodes*self.max_steps*self.n_stocks + (self.count-j)*self.n_stocks
df_new = self.df.iloc[start_point:start_point+10]
df_new = df_new.iloc[:,[1,4]]
#print(self.count, df_new)
obs = [1]
for i in range(self.n_stocks):
#print(line)
obs.append(df_new.iloc[i, 1]/df_new.iloc[i, 0])
#print(obs)
state.append(np.array(obs))
#print(np.array(state))
return np.array(state)
#start_point = self.count_episodes*self.max_steps*self.n_stocks + self.count*self.n_stocks
#df_new = self.df.iloc[start_point:start_point+10]
#df_new = df_new.iloc[:,[1,4]]
#print(self.count, df_new)
#obs = [1]
#for i in range(self.n_stocks):
# #print(line)
# obs.append(df_new.iloc[i, 1]/df_new.iloc[i, 0])
#print(obs)
#state.append(obs)
#self.holdings = self.holdings -
#new_action = normalizeAction(action)
return []
def calculate_reward(self, action):
#self.state = self.observation_space.sample()
#print(self.state)
reward = self.beta*np.dot(self.state[-1], action)
done = False
if(self.count>=self.max_steps):
done = True
#print("REWARD ", reward)
return reward, done
#valueOfHolding = data["Close"]
#self.portifolio = valueOfHolding*self.holdings
def step(self, action):
action = self.normalizeAction(action)
self.state = self.receive_state()
#print(state)
self.count +=1
reward, done = self.calculate_reward(action)
#self.history.insert(0, [self.count, state, reward])
#if(len(self.history)>3):
# self.history.pop(3)
#print(self.history[0][1])
#self._done = False
#self._current_tick += 1
#if self._current_tick == self._end_tick:
# self._done = True
#step_reward = self._calculate_reward(action)
#self._total_reward += step_reward
#self._update_profit(action)
#trade = False
#if ((action == Actions.Buy.value and self._position == Positions.Short) or
# (action == Actions.Sell.value and self._position == Positions.Long)):
# trade = True
#if trade:
# self._position = self._position.opposite()
# self._last_trade_tick = self._current_tick
#self._position_history.append(self._position)
#observation = self._get_observation()
#info = dict(
# total_reward = self._total_reward,
# total_profit = self._total_profit,
# position = self._position.value
#)
#self._update_history(info)
return self.state.flatten(), reward, done, []
def readData(self):
ficheiro = open('gym_anytrading/datasets/data/STOCKS_AMBEV.csv', 'r')
reader = csv.DictReader(ficheiro, delimiter = ',')
#print(reader)
#for linha in reader:
# print (linha["Close"])
return reader
|
{"hexsha": "02dfe05ca154c107fbf21c455a24cfca7cd6d1c0", "size": 5178, "ext": "py", "lang": "Python", "max_stars_repo_path": "gym_anytrading/envs/trading_env.py", "max_stars_repo_name": "tsb4/dayTradingEnv", "max_stars_repo_head_hexsha": "16d1970a41c8933970152f1f41e504340d48cb08", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "gym_anytrading/envs/trading_env.py", "max_issues_repo_name": "tsb4/dayTradingEnv", "max_issues_repo_head_hexsha": "16d1970a41c8933970152f1f41e504340d48cb08", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gym_anytrading/envs/trading_env.py", "max_forks_repo_name": "tsb4/dayTradingEnv", "max_forks_repo_head_hexsha": "16d1970a41c8933970152f1f41e504340d48cb08", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.5660377358, "max_line_length": 127, "alphanum_fraction": 0.5905755118, "include": true, "reason": "import numpy", "num_tokens": 1302}
|
'''Functions for estimating an adjustment to the posterior prediction
over subtypes when making predictions online.
Author: Peter Schulam
'''
import numpy as np
from scipy.optimize import minimize
from sklearn.linear_model import LogisticRegressionCV
from sklearn.cross_validation import KFold
from mypy.models import softmax
def train_adjustment(P, Q, QQaux, n_dev=4, interp_groups=None):
n = P.shape[0]
Pmap = map_encode(P)
XXaux = [Qi[:, 1:] for Qi in QQaux]
QQdev = [np.zeros_like(Q) for _ in XXaux]
dev_folds = KFold(n, n_dev, shuffle=True, random_state=0)
for i, (train, test) in enumerate(dev_folds):
for j, Xj in enumerate(XXaux):
cv = KFold(train.size, 10, shuffle=True, random_state=0)
clf = LogisticRegressionCV(Cs=20, cv=cv, penalty='l2', solver='lbfgs', multi_class='multinomial')
clf.fit(Xj[train], np.argmax(P, axis=1)[train])
yhat = clf.predict(Xj[test])
QQdev[j][test] = clf.predict_proba(Xj[test])
for i, _ in enumerate(QQdev):
QQdev[i] = map_encode(QQdev[i])
if interp_groups is None:
interp_groups = np.zeros(Pmap.shape[0])
groups = np.unique(interp_groups)
weights = []
for g in groups:
ix = interp_groups == g
w = interpolate(Pmap[ix], [Q[ix]] + [Qi[ix] for Qi in QQdev])
weights.append(w)
aux_clf = []
for i, Xi in enumerate(XXaux):
cv = KFold(n, 10, shuffle=True, random_state=0)
clf = LogisticRegressionCV(Cs=20, cv=cv, penalty='l2', solver='lbfgs', multi_class='multinomial')
clf.fit(Xi, np.argmax(P, axis=1))
aux_clf.append(clf)
return aux_clf, weights
def apply_adjustment(Q, QQaux, aux_clf, weights, interp_groups=None):
n = Q.shape[0]
XXaux = [Qi[:, 1:] for Qi in QQaux]
QQmap = [clf.predict_proba(Xi) for clf, Xi in zip(aux_clf, XXaux)]
QQmap = [map_encode(Qi) for Qi in QQmap]
if interp_groups is None:
interp_groups = np.zeros(n)
Qhat = np.zeros_like(Q)
groups = np.unique(interp_groups)
for g in groups:
ix = interp_groups == g
w = weights[g]
Qhat[ix] = interp_mixture(w, [Q[ix]] + [Qi[ix] for Qi in QQmap])
return Qhat
def fit_multinomial(P, X):
k = P.shape[1]
d = X.shape[1]
W0 = np.zeros((k, d))
def f(w):
W = w.reshape(W0.shape)
y = [softmax.regression_ll(x, y, W) for x, y in zip(X, P)]
return -sum(y)
def g(w):
W = w.reshape(W0.shape)
y = [softmax.regression_ll_grad(x, y, W) for x, y in zip(X, P)]
y = -sum(y)
return y.ravel()
s = minimize(f, W0.ravel(), jac=g, method='BFGS')
if not s.success:
raise RuntimeError('Multinomial fit optimization failed.')
W = s.x.reshape(W0.shape)
return W
def predict_multinomial(W, X):
P = np.array([softmax.regression_proba(x, W) for x in X])
return P
def interpolate(P, QQ, seed=1):
'''Estimate interpolation weights of distributions in QQ to minimize
cross-entropy under P.
Author: Peter Schulam
'''
rnd = np.random.RandomState(seed)
M = len(QQ)
v = rnd.normal(size=M)
v[0] = 0.0
obj = lambda x: interp_perplexity(P, QQ, x)
jac = lambda x: interp_perplexity_jac(P, QQ, x)
sol = minimize(obj, v, jac=jac, method='BFGS')
if not sol.success:
raise RuntimeError('Interpolation optimization failed.')
w = softmax.softmax_func(sol.x)
return w
def interp_perplexity(P, QQ, v):
'''Compute cross-entropy of interpolated distributions under P.
Author: Peter Schulam
'''
w = softmax.softmax_func(v)
Q = interp_mixture(w, QQ)
return - np.sum(P * np.log(Q)) / P.shape[0]
def interp_perplexity_jac(P, QQ, v):
'''Compute the jacobian of the cross-entropy with respect to `v`.
Author: Peter Schulam
'''
M = v.size
w = softmax.softmax_func(v)
Q = interp_mixture(w, QQ)
dp_dw = np.zeros(M)
for m in range(M):
dp_dw[m] = np.sum(P * QQ[m] / Q)
dw_dv = -softmax.softmax_grad(v)
return np.dot(dp_dw, dw_dv) / P.shape[0]
def interp_mixture(w, QQ):
'''Compute the interpolated distribution.
Author: Peter Schulam
'''
Q = np.zeros_like(QQ[0])
for wi, Qi in zip(w, QQ):
Q += wi * Qi
return Q
def map_encode(P, eps=1e-2):
'''Compute a new distribution in each row that is dominated by the MAP
in the original.
Author: Peter Schulam
'''
cx = np.argmax(P, axis=1)
rx = list(range(P.shape[0]))
Q = eps * np.ones_like(P)
Q[rx, cx] = 1
Q /= Q.sum(axis=1)[:, np.newaxis]
return Q
|
{"hexsha": "302e3e3eb0268b8ccf8a0c17dc3d00dbb0d024cc", "size": 4717, "ext": "py", "lang": "Python", "max_stars_repo_path": "2015/07/online_old.py", "max_stars_repo_name": "pschulam/Notebook", "max_stars_repo_head_hexsha": "3404ce01a4ebdf23216ff01512a8f84b4f7758aa", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "2015/07/online_old.py", "max_issues_repo_name": "pschulam/Notebook", "max_issues_repo_head_hexsha": "3404ce01a4ebdf23216ff01512a8f84b4f7758aa", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "2015/07/online_old.py", "max_forks_repo_name": "pschulam/Notebook", "max_forks_repo_head_hexsha": "3404ce01a4ebdf23216ff01512a8f84b4f7758aa", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.4972972973, "max_line_length": 109, "alphanum_fraction": 0.606741573, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1357}
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
An extension to the standard STScI data model for MIRI readnoise data, based
on the base MIRI data model.
:Reference:
The STScI jwst.datamodels documentation. See
https://jwst-pipeline.readthedocs.io/en/latest/jwst/datamodels/index.html
:History:
16 Jul 2014: Created
29 Aug 2014: Included new reference file keywords (REFTYPE, AUTHOR, PEDIGREE)
25 Sep 2014: Updated the reference flags. insert_value_column function
used to convert between 3 column and 4 column flag tables.
TYPE and REFTYPE are no longer identical.
30 Sep 2014: Superflous flags commented out.
17 Oct 2014: Updated MiriGainModel which is now based on MiriDataModel, and
no longer has an ERR nor DQ extension.
20 May 2015: Corrected mistake in which data array was ignored. The model
now inherits from the HasData base class.
19 Aug 2015: Duplicated parts of schema now reference STScI model.
10 Dec 2015: TYPE and REFTYPE strings rationalised.
15 Jun 2017: meta.reffile schema level removed to match changes in the
JWST build 7.1 data models release. meta.reffile.type also
changed to meta.reftype. TYPE keyword replaced by DATAMODL.
12 Jul 2017: Replaced "clobber" parameter with "overwrite".
30 Jan 2019: self.meta.model_type now set to the name of the STScI data
model this model is designed to match (skipped if there isn't
a corresponding model defined in ancestry.py).
05 Mar 2020: Added get_truncated_noise function, to remove the high noise
levels defined around bad pixels.
26 Mar 2020: Ensure the model_type remains as originally defined when saving
to a file.
@author: Vincent Geers (DIAS), Steven Beard (UKATC)
"""
import numpy as np
# Import the MIRI data model.
from miri.datamodels.ancestry import get_my_model_type
from miri.datamodels.dqflags import insert_value_column
from miri.datamodels.miri_model_base import MiriDataModel
from miri.datamodels.operations import HasData
# List all classes and global functions here.
__all__ = ['MiriReadnoiseModel']
class MiriReadnoiseModel(MiriDataModel, HasData):
"""
A data model for MIRI readnoise data.
:Parameters:
init: shape tuple, file path, file object, pyfits.HDUList, numpy array
An optional initializer for the data model, which can have one
of the following forms:
* None: A default data model with no shape. (If a data array is
provided in the mask parameter, the shape is derived from the
array.)
* Shape tuple: Initialize with empty data of the given shape.
* File path: Initialize from the given file.
* Readable file object: Initialize from the given file object.
* pyfits.HDUList: Initialize from the given pyfits.HDUList.
data: numpy array (optional)
An array containing the last frame data.
If a data parameter is provided, its contents overwrite the
data initialized by the init parameter.
\*\*kwargs:
All other keyword arguments are passed to the DataModel initialiser.
See the jwst.datamodels documentation for the meaning of these keywords.
"""
schema_url = "miri_readnoise.schema"
def __init__(self, init=None, data=None, **kwargs):
"""
Initialises the MiriReadnoiseModel class.
Parameters: See class doc string.
"""
super(MiriReadnoiseModel, self).__init__(init=init, **kwargs)
# Data type is last frame.
self.meta.reftype = 'READNOISE'
# Initialise the model type
self._init_data_type()
# This is a reference data model.
self._reference_model()
# Update the data array if it has been specifically provided.
HasData.__init__(self, data)
def _init_data_type(self):
# Initialise the data model type
model_type = get_my_model_type( self.__class__.__name__ )
self.meta.model_type = model_type
def on_save(self, path):
super(MiriReadnoiseModel, self).on_save(path)
# Re-initialise data type on save
self._init_data_type()
def get_truncated_noise(self, nsigma=4.0):
"""
Returns a noise array with extreme values truncated.
:Parameters:
nsigma: float (optional)
Values greater than nsigma from the mean noise are clipped.
By default nsigma = 4.0.
"""
# MIRI-703. Some readnoise CDPs contain arbitrary values of 1000.0 DN
# in bad pixel regions.
first_mean = np.mean( self.data )
first_std = np.std( self.data )
first_clip = first_mean + nsigma * first_std
#print("First mean=", first_mean, "std=", first_std, "clip level=", first_clip)
clipped_data = np.clip(self.data, 0.0, first_clip)
second_mean = np.mean( clipped_data )
second_std = np.std( clipped_data )
second_clip = second_mean + nsigma * second_std
#print("Second mean=", second_mean, "std=", second_std, "clip level=", second_clip)
clipped_data = np.clip(clipped_data, 0.0, second_clip)
return clipped_data
#
# A minimal test is run when this file is run as a main program.
# For a more substantial test see miri/datamodels/tests.
#
if __name__ == '__main__':
print("Testing the MiriReadnoiseModel module.")
PLOTTING = False
SAVE_FILES = False
data3x3 = np.array([[1.0,1.2,1.1],[1.3,1.2,1.0],[1.1,0.8,0.9]])
print("Readnoise data with data + err + dq:")
with MiriReadnoiseModel(data=data3x3 ) as testdata:
print(testdata)
clipped = testdata.get_truncated_noise()
if PLOTTING:
testdata.plot(description="testdata")
if SAVE_FILES:
testdata.save("test_readnoise_model1.fits", overwrite=True)
del testdata
print("Test finished.")
|
{"hexsha": "60a752ed6b592a204a484cb87efbc2c027a8aa28", "size": 6038, "ext": "py", "lang": "Python", "max_stars_repo_path": "miri/datamodels/miri_readnoise_model.py", "max_stars_repo_name": "eslavich/MiriTE", "max_stars_repo_head_hexsha": "05e25e1222e854fef5a72011f6618fa8fb5eaaff", "max_stars_repo_licenses": ["CNRI-Python"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "miri/datamodels/miri_readnoise_model.py", "max_issues_repo_name": "eslavich/MiriTE", "max_issues_repo_head_hexsha": "05e25e1222e854fef5a72011f6618fa8fb5eaaff", "max_issues_repo_licenses": ["CNRI-Python"], "max_issues_count": 24, "max_issues_repo_issues_event_min_datetime": "2019-08-09T15:03:20.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-04T10:04:48.000Z", "max_forks_repo_path": "miri/datamodels/miri_readnoise_model.py", "max_forks_repo_name": "eslavich/MiriTE", "max_forks_repo_head_hexsha": "05e25e1222e854fef5a72011f6618fa8fb5eaaff", "max_forks_repo_licenses": ["CNRI-Python"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2019-06-16T15:03:23.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-02T19:51:52.000Z", "avg_line_length": 36.3734939759, "max_line_length": 91, "alphanum_fraction": 0.6695925803, "include": true, "reason": "import numpy", "num_tokens": 1468}
|
# coding:utf-8
import sys
import traceback
import talib
import numpy as np
import pandas as pd
from pandas import Series
from .base import *
from .talib_series import LINEARREG_SLOPE as SLOPE
def udf_cross(A, B):
if isinstance(A, float):
A1 = A0 = A
ls = len(B)
B1 = B.iloc[ls -2]
B0 = B.iloc[ls -1]
elif isinstance(B, float):
B1 = B0 = B
ls = len(A)
A1 = A.iloc[ls -2]
A0 = A.iloc[ls -1]
else:
return SINGLE_CROSS(A,B)
if A1 < B1 and A0 > B0:
return True
return False
def udf_ref_pct(A, B, N = 1):
if len(A) < N + 1:
return False
C = A / REF(A, N)
lc = len(C)
return C[lc - 1] > B
def udf_dapan_risk_df(data_df, N1=6, N2=12):
dsize = len(data_df)
if dsize <= N2:
return (False, None)
return udf_dapan_risk(data_df.close, data_df.high, data_df.low, N1, N2)
def udf_dapan_risk(C,H,L, N1=6, N2=12):
dsize = len(C)
if dsize <= N2:
return (False, None)
# C = data_df.close
# H = data_df.high
# L = data_df.low
s0=3.5
s5=3.3
b3=1.3
b5=0.5
# VAR2 = talib.MIN(L, N1)
# VAR3 = talib.MAX(H, N2)
VAR2 = LLV(L, N1)
VAR3 = HHV(H, N2)
DLX=talib.EMA((C-VAR2)/(VAR3-VAR2)*4,4)
flg = 0
if udf_cross(DLX, b5):
buy50 = 1
else:
buy50 = 0
flg = flg + buy50
if udf_cross(DLX, b3):
buy30 = 1
else:
buy30 = 0
flg = flg + buy30
if udf_cross(DLX, s5):
sell5 = 1
else:
sell5 = 0
flg = flg + sell5
if udf_cross(DLX, s0):
sell0 = 1
else:
sell0 = 0
flg = flg + sell0
return (flg > 0, {'buy50':buy50, 'buy30':buy30, 'sell5':sell5, 'sella':sell0})
def udf_index_risk(data, N1=6, N2=12):
qc=3.5
jb=3.3
j3=1.3
j5=0.5
L=data.low
H=data.high
C=data.close
VAR2=LLV(L, N1)
VAR3=HHV(H, N2)
DLX=EMA(((C-VAR2)/(VAR3-VAR2))*4, 4)
sj5=CROSS(DLX,j5)
sj3=CROSS(DLX,j3)
sjb=CROSS(DLX,jb)
sqc=CROSS(DLX,qc)
dict_rt = {'BUY50':sj5, 'ADD30':sj3, 'SELL50':sjb, 'SELL0':sqc}
return pd.DataFrame(dict_rt)
def udf_base_check_df(data_df, N1=70, N2=144, N3=250):
return udf_base_check(data_df.close, data_df.vol, N1, N2, N3)
def udf_base_check(C, V, N1=70, N2=144, N3=250):
len_d = len(C)
N = MAX(MAX(N1,N2),N3)
if len_d < N:
return (False, None)
len_d -= 1
# C = data_df.close
# V = data_df.vol
cn1 = C > MA(C, N1)
cn2 = C > MA(C, N2)
cn3 = C > MA(C, N3)
return (cn1[len_d] or cn2[len_d] or cn3[len_d], {str(N1):cn1[len_d], str(N2):cn2[len_d], str(N3):cn3[len_d]})
def udf_hangqing_start_df(data_df, snum=13, lnum=144):#, sd=20, ld=250):
return udf_hangqing_start(data_df.close, sum, lnum)
def udf_hangqing_start(C, snum=13, lnum=144):#, sd=20, ld=250):
if len(C) < lnum:
return False
# C = data_df.close
# A1 = (C-MA(C,lnum))/MA(C,lnum)*100
# N1 = BARSLAST(CROSS(C,MA(C,lnum)), 1)
# N2 = BARSLAST(CROSS(MA(C,lnum),C), 1)
# B1 = IF(N1<N2,N1+1,0)
# C1 = HHV(A1,B1)
# D1 = (C-REF(C,B1))/REF(C,B1)*100
N3 = BARSLAST(CROSS(C,MA(C,snum)))
N4 = BARSLAST(CROSS(MA(C,snum),C))
AA = IF(N3<N4,N3+1,0)
BB = (C-REF(C,AA))/REF(C,AA)*100
# IFAND(udf_cross(BB, 10.0), C/REF(C,1) > 1.05, True, False)
return udf_cross(BB, 10.0) and udf_ref_pct(C, 1.05)
def udf_niu_check_df(data_df, n1 = 36, n2 = 30, n3 = 25):
return udf_niu_check(data_df.close, data_df.high, data_df.low, data_df.vol, data_df.amount, n1, n2, n3)
def udf_niu_check(C,H,L,VOL,AMOUNT, n1 = 36, n2 = 30, n3 = 25):
if len(C) > n1:
return False
# L = data_df.low
# H = data_df.high
# C=data_df.close
# VOL=data_df.volume
VARR24=LLV(L,36)
VARR25=HHV(H,30)
VARR26=EMA((C-VARR24)/(VARR25-VARR24)*4,4)*25
VARB27=(((C-LLV(L,9))/(HHV(H,9)-LLV(L,9))*100)/2+22)*1
VARB28=(((C -(((EMA(AMOUNT*100,13) /EMA(VOL,13)) / 100))) / (((EMA(AMOUNT*100,13) /EMA(VOL,13)) / 100))) * 100)
# JD=((VARB28 < (0)) AND ((C-LLV(L,9))/(HHV(H,9)-LLV(L,9))*100)<VARB27-2 AND VARR26<10
JD1 = (VARB28 < (0))
JD2 = ((C-LLV(L,9))/(HHV(H,9)-LLV(L,9))*100)<VARB27-2
JD3 = VARR26<10
JD4=IFAND(JD1,JD2,JD1,False)
JD=IFAND(JD4,JD3,JD4,False)
CD=IF(JD,20,0)
AAA=REF(CD,1)>0
# BBB=CD=0
# DR=AAA AND BBB
# return JD OR DR
RTN=IFOR(JD, AAA, True, False)
lrtn = len(RTN)
return RTN[lrtn - 1] or RTN[lrtn - 2] or RTN[lrtn - 3]
def udf_top_df(data_df):
return udf_top(data_df.close, data_df.high, data_df.low)
def udf_top(C,H,L):
# {涨停板次日跳空高开的选股}
# REF(C,1)/REF(C,2)>1.098 AND REF(C,1)=REF(H,1) AND L>REF(H,1);
len_d = len(C)
if len_d < 2:
return False
# C=data_df.close
# H=data_df.high
# L=data_df.low
A1=REF(C,1)/REF(C,2)>1.098
A2=REF(C,1)==REF(H,1)
A3=L>REF(H,1)
A4 = IFAND(IFAND(A1,A2,True,False),A3,True,False)
return A4[len_d - 1]
def udf_top_last(C, PCT = 9.8, M = 5, N=30):
len_d = len(C) - 1
if len_d < N:
return False
rtn = False
if M <= 0:
M = 5
for i in range(0, M):
A = (REF(C,i) - REF(C,i+1)) * 100 /REF(C,i+1) > PCT
if A[len(A)-1]:
rtn = True
break
return rtn
def udf_macd_zq(C,O,H,L):
rtn = {'flg':False}
len_d = len(C) - 1
#macd zengqiang
DIFF=EMA(C,12)-EMA(C,26)
DEA=EMA(DIFF,9)
#MACD=2*(DIFF-DEA), COLORSTICK
MACD=2*(DIFF-DEA)
#DI_JIN=CROSS(DIFF,DEA) AND DIFF<-0.1
DI_JIN=IFAND(CROSS(DIFF,DEA), DIFF<-0.1, True, False)
rtn['DI_JIN'] = cv = DI_JIN[len_d]
if cv:
rtn['flg'] = True
#DRAWICON(DI_JIN,0.2,3)
#DRAWTEXT(DI_JIN,0.2,'DI_JIN'),COLORBLACK
JC=BARSLAST(DEA>=0)
#JCCOUNT=COUNT(CROSS(DIFF,DEA),JC)
JCCOUNT=COUNT(CROSS(DIFF,DEA),JC[len(JC)-1])
#JC2=COUNT(JCCOUNT=2,21)=1
JC2=COUNT(JCCOUNT==2,21)==1
#ER_JIN=CROSS(DIFF,DEA) AND DEA<0 AND JC2
ER_JIN=IFAND(IFAND(CROSS(DIFF,DEA), DEA<0, True, False), JC2,True,False)
rtn['ER_JIN'] = cv = ER_JIN[len_d]
if cv:
rtn['flg'] = True
#DRAWICON(ER_JIN,0.4,7)
#DRAWTEXT(ER_JIN,0.4,'ER_JIN'),COLORYELL
A1=BARSLAST(REF(CROSS(DIFF,DEA),1))
#DI_BEI_LI=REF(C,A1+1)>C AND DIFF>REF(DIFF,A1+1) AND CROSS(DIFF,DEA),NODRAW
DI_BEI_LI=IFAND(IFAND(REF(C,A1+1)>C, DIFF>REF(DIFF,A1+1),True,False), CROSS(DIFF,DEA), True,False)
rtn['DI_BEI_LI'] = cv = DI_BEI_LI[len_d]
if cv:
rtn['flg'] = True
#DRAWICON(DI_BEI_LI,0,1)
#DRAWTEXT(DI_BEI_LI,0,'DI_BEI_LI'),COLORBLUE
A2=BARSLAST(REF(CROSS(DEA,DIFF),1))
#DING_BEI_LI=REF(C,A2+1)<C AND REF(DIFF,A2+1)>DIFF AND CROSS(DEA,DIFF)
DING_BEI_LI=IFAND(IFAND(REF(C,A2+1)<C , REF(DIFF,A2+1)>DIFF, True,False) , CROSS(DEA,DIFF), True, False)
rtn['DING_BEI_LI'] = cv = DING_BEI_LI[len_d]
if cv:
rtn['flg'] = True
#DRAWTEXT(DING_BEI_LI,DEA,'DING_BEI_LI'),COLORBLUE
#DRAWICON(DING_BEI_LI,DEA,2)
#SAN_JIN=DI_JIN AND ER_JIN AND DI_BEI_LI,NODRAW
SAN_JIN=IFAND(IFAND(DI_JIN , ER_JIN , True,False), DI_BEI_LI,True, False)
rtn['SAN_JIN'] = cv = SAN_JIN[len_d]
if cv:
rtn['flg'] = True
#DRAWICON(SAN_JIN,0.6,16)
#DRAWTEXT(SAN_JIN,0.6,'SAN_JIN'),COLORRED,LINETHICK2
### #HONG_MIANJI=SUM(MACD,BARSLAST(MACD<0))*(MACD>0),COLOR0000FF,NODRAW
### HONG_MIANJI=SUMS(MACD,BARSLAST(MACD<0))*MACD(MACD>0))
### #LV_MIANJI=SUM(MACD,BARSLAST(MACD>0))*(MACD<0),COLORFFFF00,NODRAW
### LV_MIANJI=SUMS(MACD,BARSLAST(MACD>0)) #*(MACD<0)
### AA=REF(LV_MIANJI,1)*100
### BB=REF(HONG_MIANJI,1)*100
### #DRAWNUMBER(CROSS(0,MACD),HHV(REF(MACD,1),5)+0.03,ABS(BB)),COLORRED
### #DRAWNUMBER(CROSS(MACD,0),LLV(REF(MACD,1),5)-0.03,ABS(AA)),COLORBLUE
JC3 = DEA-DIFF
#LVZHU_MIANJI=IF(MACD<0,SUM(MACD,BARSLAST(JC3<0)),0)
LVZHU_MIANJI=SUMS(MACD,BARSLAST(JC3<0))
#HONGZHU_MIANJI=IF(MACD>0,SUM(MACD,BARSLAST(JC3>0)),0)
HONGZHU_MIANJI=SUMS(MACD,BARSLAST(JC3>0))
BEN_CI_LLV=LLV(L,BARSLAST(JC3<0))
BEN_CI_HHV=HHV(H,BARSLAST(JC3>0))
X1=IF (MACD<0,BARSLAST(CROSS(DIFF,DEA)),0)
### QIAN_CI_LVZHU_MIANJI=REF(LVZHU_MIANJI,X1+1)
QIAN_CI_LLV=REF(BEN_CI_LLV,X1+1)
#Y1=IF((LVZHU_MIANJI<0 AND ABS(LVZHU_MIANJI) AND BEN_CI_LLV<QIAN_CI_LLV ),1,0)
Y1=IFAND( IFAND(LVZHU_MIANJI<0 , ABS(LVZHU_MIANJI),True,False), BEN_CI_LLV<QIAN_CI_LLV , 1, 0)
### RS1= MACD<0 AND REF(MACD,1)<0 AND C<QIAN_CI_LLV AND ABS(LVZHU_MIANJI)
DI_BEI_CI=IF(CROSS(DIFF,DEA),REF(Y1,1),0) #,NODRAW
X2=IF (MACD>0,BARSLAST(CROSS(DEA,DIFF)),0)
QIAN_CI_HONGZHU_MIANJI=REF(HONGZHU_MIANJI,X2+1)
QIAN_CI_HHV=REF(BEN_CI_HHV,X2+1)
#Y2=IF((HONGZHU_MIANJI>0 AND HONGZHU_MIANJI<QIAN_CI_HONGZHU_MIANJI AND BEN_CI_HHV>QIAN_CI_HHV ),1,0)
Y2=IFAND(IFAND(HONGZHU_MIANJI>0 , HONGZHU_MIANJI<QIAN_CI_HONGZHU_MIANJI, True, False) , BEN_CI_HHV>QIAN_CI_HHV ,1,0)
### RS2= MACD>0 AND REF(MACD,1)>0 AND C>QIAN_CI_HHV AND ABS(HONGZHU_MIANJI)
DING_BEI_CI=IF(CROSS(DEA,DIFF),REF(Y2,1),0) #,NODRAW
rtn['DI_BEI_CI'] = cv = DI_BEI_CI[len_d]
if cv > 0:
rtn['flg'] = True
rtn['DING_BEI_CI'] = cv = DING_BEI_CI[len_d]
if cv > 0:
rtn['flg'] = True
#DRAWICON(DI_BEI_CI,DEA,1)
#DRAWTEXT(DI_BEI_CI,DEA,'DI_BEI_CI'),COLORRED
#DRAWICON(DING_BEI_CI,DIFF,2)
#DRAWTEXT(DING_BEI_CI,DIFF,'DING_BEI_CI'),COLORBLUE
return rtn
def udf_yao_check_df(data):
return udf_yao_check(data.close, data.open, data.high, data.low, data.vol)
def udf_yao_check(C,OPEN,HIGH,LOW,VOL):
##妖股公式
VAR0 = (3 * (SMA(((C - LLV(LOW,21)) / (HHV(HIGH,34) - LLV(LOW,21))) * 100,5,1))) - (2 * (SMA(SMA(((C - LLV(LOW,21)) / (HHV(HIGH,13) - LLV(LOW,8))) * 100,5,1),3,1)))
VAR1 = 10
VAR2 = MA(C,5)
VAR3 = MA(C,10)
VAR4 = VAR2 > VAR3
VAR5 = ((OPEN + HIGH) + LOW) / 3
VAR6 = EMA(VAR5,4)
VAR7 = C * VOL
VAR8 = EMA(((((EMA(VAR7,3) / EMA(VOL,3)) + (EMA(VAR7,6) / EMA(VOL,6))) + (EMA(VAR7,12) / EMA(VOL,12))) + (EMA(VAR7,24) / EMA(VOL,24))) / 4,13)
VAR9 = VAR6 > VAR8
# FLCS = CROSS(VAR0,VAR1) AND VAR4,COLORMAGENTA
result=pd.DataFrame({"cross":CROSS(VAR0, VAR1), "var4":VAR4, "var9": VAR9})
# FLCS = CROSS(VAR0,VAR1) AND VAR4,COLORMAGENTA
result['FLCS'] = result.apply(lambda x : x['cross'] > 0 and x['var4'], axis=1)
# FLTP = (CROSS(VAR0,VAR1) AND VAR4) AND VAR9,COLORRED,LINETHICK2
result['FLTP'] = result.apply(lambda x : x['cross'] > 0 and x['var4'] and x['var9'], axis=1)
return result
def udf_ctlsb_check(C,N1=2,N2=21,N3=20,N4=42):
BL=EMA(C,N1)
SL=EMA(SLOPE(C,N2)*N3 + C, N4)
BF=SINGLE_CROSS(BL,SL)
SF=SINGLE_CROSS(SL,BL)
return {'buy':BF,'sell':SF}
# 买线:EMA(C,2),COLOR0000AA;
# 卖线:EMA(SLOPE(C,21)*20+C,42),POINTDOT,COLOR0000CC,LINETHICK3;
# BUY:=CROSS(买线,卖线);
# SEL:=CROSS(卖线,买线);
# DRAWTEXT(BUY,LOW*0.99,'B'),COLORF00FF0,LINETHICK5;
# DRAWTEXT(SEL,HIGH*1.01,'S'),COLORWHITE,LINETHICK5;
|
{"hexsha": "080e4566e264f52642e55eeab1ea93a25a56d266", "size": 10352, "ext": "py", "lang": "Python", "max_stars_repo_path": "easyquant/indicator/udf_formula.py", "max_stars_repo_name": "dizzy21c/easyqtrs", "max_stars_repo_head_hexsha": "4704674d2175d40afdc306afd8a002a486c83220", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-12-30T13:43:13.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-23T13:39:54.000Z", "max_issues_repo_path": "easyquant/indicator/udf_formula.py", "max_issues_repo_name": "dizzy21c/easyqtrs", "max_issues_repo_head_hexsha": "4704674d2175d40afdc306afd8a002a486c83220", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "easyquant/indicator/udf_formula.py", "max_forks_repo_name": "dizzy21c/easyqtrs", "max_forks_repo_head_hexsha": "4704674d2175d40afdc306afd8a002a486c83220", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2021-10-22T01:44:47.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-05T05:49:20.000Z", "avg_line_length": 29.6618911175, "max_line_length": 166, "alphanum_fraction": 0.6309891808, "include": true, "reason": "import numpy", "num_tokens": 4806}
|
import tensorflow as tf
import numpy as np
from sklearn import datasets
import math
import sys
neurons = int(sys.argv[1])
val_checks = int(sys.argv[2])
num_epochs = int(sys.argv[3])
iris = datasets.load_iris()
x = iris.data
y = iris.target
perm = np.random.permutation(150)
x = x[perm,:]
y = y[perm]
y_shaped = np.zeros((150, 3))
for i in range(150):
if y[i] == 0:
y_shaped[i,0] = 1
elif y[i] == 1:
y_shaped[i,1] = 1
else:
y_shaped[i,2] = 1
x_train = x[:100,:]
y_train = y_shaped[:100]
x_test = x[100:125,:]
y_test = y_shaped[100:125]
x_val = x[125:150,:]
y_val = y_shaped[125:150]
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64)
val_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val))
val_dataset = val_dataset.batch(64)
earlystop_callback = tf.keras.callbacks.EarlyStopping(
monitor='val_loss', min_delta=0, patience=val_checks
)
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(neurons, activation='tanh'),
tf.keras.layers.Dense(3, activation='softmax')
])
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy','AUC'])
history = model.fit(train_dataset, epochs=num_epochs, validation_data=val_dataset, validation_freq=1, callbacks=[earlystop_callback])
result = model.evaluate(x_test, y_test)
print("FINAL_RESULTS")
print(str(result[0]))
print(str(result[1]))
print(str(result[2]))
|
{"hexsha": "e0b99d28cd33a4bb223103741bd26820176e1ed5", "size": 1586, "ext": "py", "lang": "Python", "max_stars_repo_path": "resources/python/model.py", "max_stars_repo_name": "SvelaT/laravel-app", "max_stars_repo_head_hexsha": "d82b367ef2a9d2f2acc40087ac3a47c51edf4174", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "resources/python/model.py", "max_issues_repo_name": "SvelaT/laravel-app", "max_issues_repo_head_hexsha": "d82b367ef2a9d2f2acc40087ac3a47c51edf4174", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "resources/python/model.py", "max_forks_repo_name": "SvelaT/laravel-app", "max_forks_repo_head_hexsha": "d82b367ef2a9d2f2acc40087ac3a47c51edf4174", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.671641791, "max_line_length": 133, "alphanum_fraction": 0.6670870113, "include": true, "reason": "import numpy", "num_tokens": 427}
|
"""
Support recovery on MEG data
============================
This example compares several methods that recover the support in the MEG/EEG
source localization problem with statistical guarantees. Here we work
with two datasets that study three different tasks (visual, audio, somato).
We reproduce the real data experiment of Chevalier et al. (2020) [1]_,
which shows the benefit of (ensemble) clustered inference such as
(ensemble of) clustered desparsified Multi-Task Lasso ((e)cd-MTLasso)
over standard approach such as sLORETA. Specifically, it retrieves
the support using a natural threshold (not computed a posteriori)
of the estimated parameter. The estimated support enjoys statistical
guarantees.
References
----------
.. [1] Chevalier, J. A., Gramfort, A., Salmon, J., & Thirion, B. (2020).
Statistical control for spatio-temporal MEG/EEG source imaging with
desparsified multi-task Lasso. In NeurIPS 2020-34h Conference on
Neural Information Processing Systems.
"""
import os
import numpy as np
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import mne
from scipy.sparse.csgraph import connected_components
from mne.datasets import sample, somato
from mne.inverse_sparse.mxne_inverse import _prepare_gain, _make_sparse_stc
from mne.minimum_norm import make_inverse_operator, apply_inverse
from sklearn.cluster import FeatureAgglomeration
from sklearn.metrics.pairwise import pairwise_distances
from hidimstat.clustered_inference import clustered_inference
from hidimstat.ensemble_clustered_inference import \
ensemble_clustered_inference
from hidimstat.stat_tools import zscore_from_pval
##############################################################################
# Specific preprocessing functions
# --------------------------------
# The functions below are used to load or preprocess the data or to put
# the solution in a convenient format. If you are reading this example
# for the first time, you should skip this section.
#
# The following function loads the data from the sample dataset.
def _load_sample(cond):
'''Load data from the sample dataset'''
# Get data paths
subject = 'sample'
data_path = sample.data_path()
fwd_fname_suffix = 'MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
fwd_fname = os.path.join(data_path, fwd_fname_suffix)
ave_fname = os.path.join(data_path, 'MEG/sample/sample_audvis-ave.fif')
cov_fname_suffix = 'MEG/sample/sample_audvis-shrunk-cov.fif'
cov_fname = os.path.join(data_path, cov_fname_suffix)
cov_fname = data_path + '/' + cov_fname_suffix
subjects_dir = os.path.join(data_path, 'subjects')
if cond == 'audio':
condition = 'Left Auditory'
elif cond == 'visual':
condition = 'Left visual'
# Read noise covariance matrix
noise_cov = mne.read_cov(cov_fname)
# Read forward matrix
forward = mne.read_forward_solution(fwd_fname)
# Handling average file
evoked = mne.read_evokeds(ave_fname, condition=condition,
baseline=(None, 0))
evoked = evoked.pick_types('grad')
# Selecting relevant time window
evoked.plot()
t_min, t_max = 0.05, 0.1
t_step = 0.01
pca = False
return (subject, subjects_dir, noise_cov, forward, evoked,
t_min, t_max, t_step, pca)
##############################################################################
# The next function loads the data from the somato dataset.
def _load_somato(cond):
'''Load data from the somato dataset'''
# Get data paths
data_path = somato.data_path()
subject = '01'
subjects_dir = data_path + '/derivatives/freesurfer/subjects'
raw_fname = os.path.join(data_path, f'sub-{subject}', 'meg',
f'sub-{subject}_task-{cond}_meg.fif')
fwd_fname = os.path.join(data_path, 'derivatives', f'sub-{subject}',
f'sub-{subject}_task-{cond}-fwd.fif')
# Read evoked
raw = mne.io.read_raw_fif(raw_fname)
events = mne.find_events(raw, stim_channel='STI 014')
reject = dict(grad=4000e-13, eog=350e-6)
picks = mne.pick_types(raw.info, meg=True, eeg=True, eog=True)
event_id, tmin, tmax = 1, -.2, .25
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
reject=reject, preload=True)
evoked = epochs.average()
evoked = evoked.pick_types('grad')
# evoked.plot()
# Compute noise covariance matrix
noise_cov = mne.compute_covariance(epochs, rank='info', tmax=0.)
# Read forward matrix
forward = mne.read_forward_solution(fwd_fname)
# Selecting relevant time window: focusing on early signal
t_min, t_max = 0.03, 0.05
t_step = 1.0 / 300
# We must reduce the whitener since data were preprocessed for removal
# of environmental noise with maxwell filter leading to an effective
# number of 64 samples.
pca = True
return (subject, subjects_dir, noise_cov, forward, evoked,
t_min, t_max, t_step, pca)
##############################################################################
# The function below preprocess the raw M/EEG data, it notably computes the
# whitened MEG/EEG measurements and prepares the gain matrix.
def preprocess_meg_eeg_data(evoked, forward, noise_cov, loose=0., depth=0.,
pca=False):
"""Preprocess MEG or EEG data to produce the whitened MEG/EEG measurements
(target) and the preprocessed gain matrix (design matrix). This function
is mainly wrapping the `_prepare_gain` MNE function.
Parameters
----------
evoked : instance of mne.Evoked
The evoked data.
forward : instance of Forward
The forward solution.
noise_cov : instance of Covariance
The noise covariance.
loose : float in [0, 1] or 'auto'
Value that weights the source variances of the dipole components
that are parallel (tangential) to the cortical surface. If loose
is 0 then the solution is computed with fixed orientation.
If loose is 1, it corresponds to free orientations.
The default value ('auto') is set to 0.2 for surface-oriented source
space and set to 1.0 for volumic or discrete source space.
See for details:
https://mne.tools/stable/auto_tutorials/inverse/35_dipole_orientations.html?highlight=loose
depth : None or float in [0, 1]
Depth weighting coefficients. If None, no depth weighting is performed.
pca : bool, optional (default=False)
If True, whitener is reduced.
If False, whitener is not reduced (square matrix).
Returns
-------
G : array, shape (n_channels, n_dipoles)
The preprocessed gain matrix. If pca=True then n_channels is
effectively equal to the rank of the data.
M : array, shape (n_channels, n_times)
The whitened MEG/EEG measurements. If pca=True then n_channels is
effectively equal to the rank of the data.
forward : instance of Forward
The preprocessed forward solution.
"""
all_ch_names = evoked.ch_names
# Handle depth weighting and whitening (here is no weights)
forward, G, gain_info, whitener, _, _ = \
_prepare_gain(forward, evoked.info, noise_cov, pca=pca, depth=depth,
loose=loose, weights=None, weights_min=None, rank=None)
# Select channels of interest
sel = [all_ch_names.index(name) for name in gain_info['ch_names']]
M = evoked.data[sel]
M = np.dot(whitener, M)
return G, M, forward
##############################################################################
# The next function translates the solution in a readable format for the
# MNE plotting functions that require a Source Time Course (STC) object.
def _compute_stc(zscore_active_set, active_set, evoked, forward):
"""Wrapper of `_make_sparse_stc`"""
X = np.atleast_2d(zscore_active_set)
if X.shape[1] > 1 and X.shape[0] == 1:
X = X.T
stc = _make_sparse_stc(X, active_set, forward, tmin=evoked.times[0],
tstep=1. / evoked.info['sfreq'])
return stc
##############################################################################
# The function below will be used to modify the connectivity matrix
# to avoid multiple warnings when we run the clustering algorithm.
def _fix_connectivity(X, connectivity, affinity):
"""Complete the connectivity matrix if necessary"""
# Convert connectivity matrix into LIL format
connectivity = connectivity.tolil()
# Compute the number of nodes
n_connected_components, labels = connected_components(connectivity)
if n_connected_components > 1:
for i in range(n_connected_components):
idx_i = np.where(labels == i)[0]
Xi = X[idx_i]
for j in range(i):
idx_j = np.where(labels == j)[0]
Xj = X[idx_j]
D = pairwise_distances(Xi, Xj, metric=affinity)
ii, jj = np.where(D == np.min(D))
ii = ii[0]
jj = jj[0]
connectivity[idx_i[ii], idx_j[jj]] = True
connectivity[idx_j[jj], idx_i[ii]] = True
return connectivity, n_connected_components
##############################################################################
# Downloading data
# ----------------
#
# After choosing a task, we run the function that loads the data to get
# the corresponding evoked, forward and noise covariance matrices.
# Choose the experiment (task)
list_cond = ['audio', 'visual', 'somato']
cond = list_cond[2]
print(f"Let's process the condition: {cond}")
# Load the data
if cond in ['audio', 'visual']:
sub, subs_dir, noise_cov, forward, evoked, t_min, t_max, t_step, pca = \
_load_sample(cond)
elif cond == 'somato':
sub, subs_dir, noise_cov, forward, evoked, t_min, t_max, t_step, pca = \
_load_somato(cond)
##############################################################################
# Preparing data for clustered inference
# --------------------------------------
#
# For clustered inference we need the targets ``Y``, the design matrix ``X``
# and the ``connectivity`` matrix, which is a sparse adjacency matrix.
# Collecting features' connectivity
connectivity = mne.source_estimate.spatial_src_adjacency(forward['src'])
# Croping evoked according to relevant time window
evoked.crop(tmin=t_min, tmax=t_max)
# Choosing frequency and number of clusters used for compression.
# Reducing the frequency to 100Hz to make inference faster
step = int(t_step * evoked.info['sfreq'])
evoked.decimate(step)
t_min = evoked.times[0]
t_step = 1. / evoked.info['sfreq']
# Preprocessing MEG data
X, Y, forward = preprocess_meg_eeg_data(evoked, forward, noise_cov, pca=pca)
##############################################################################
# Running clustered inference
# ---------------------------
#
# For MEG data ``n_clusters = 1000`` is generally a good default choice.
# Taking ``n_clusters > 2000`` might lead to an unpowerful inference.
# Taking ``n_clusters < 500`` might compress too much the data leading
# to a compressed problem not close enough to the original problem.
n_clusters = 1000
# Setting theoretical FWER target
fwer_target = 0.1
# Computing threshold taking into account for Bonferroni correction
correction_clust_inf = 1. / n_clusters
zscore_threshold = zscore_from_pval((fwer_target / 2) * correction_clust_inf)
# Initializing FeatureAgglomeration object used for the clustering step
connectivity_fixed, _ = \
_fix_connectivity(X.T, connectivity, affinity="euclidean")
ward = FeatureAgglomeration(n_clusters=n_clusters, connectivity=connectivity)
# Making the inference with the clustered inference algorithm
inference_method = 'desparsified-group-lasso'
beta_hat, pval, pval_corr, one_minus_pval, one_minus_pval_corr = \
clustered_inference(X, Y, ward, n_clusters, method=inference_method)
# Extracting active set (support)
active_set = np.logical_or(pval_corr < fwer_target / 2,
one_minus_pval_corr < fwer_target / 2)
active_set_full = np.copy(active_set)
active_set_full[:] = True
# Translating p-vals into z-scores for nicer visualization
zscore = zscore_from_pval(pval, one_minus_pval)
zscore_active_set = zscore[active_set]
##############################################################################
# Visualization
# -------------
# Now, let us plot the thresholded statistical maps derived thanks to the
# clustered inference algorithm referred as cd-MTLasso.
# Let's put the solution into the format supported by the plotting functions
stc = _compute_stc(zscore_active_set, active_set, evoked, forward)
# Plotting parameters
if cond == 'audio':
hemi = 'lh'
view = 'lateral'
elif cond == 'visual':
hemi = 'rh'
view = 'medial'
elif cond == 'somato':
hemi = 'rh'
view = 'lateral'
# Plotting clustered inference solution
mne.viz.set_3d_backend("pyvista")
if active_set.sum() != 0:
max_stc = np.max(np.abs(stc.data))
clim = dict(pos_lims=(3, zscore_threshold, max_stc), kind='value')
brain = stc.plot(subject=sub, hemi=hemi, clim=clim, subjects_dir=subs_dir,
views=view, time_viewer=False)
brain.add_text(0.05, 0.9, f'{cond} - cd-MTLasso', 'title',
font_size=20)
# Hack for nice figures on HiDimStat website
save_fig = False
plot_saved_fig = True
if save_fig:
brain.save_image(f'figures/meg_{cond}_cd-MTLasso.png')
if plot_saved_fig:
brain.close()
img = mpimg.imread(f'figures/meg_{cond}_cd-MTLasso.png')
plt.imshow(img)
plt.axis('off')
plt.show()
interactive_plot = False
if interactive_plot:
brain = \
stc.plot(subject=sub, hemi='both', subjects_dir=subs_dir, clim=clim)
##############################################################################
# Comparision with sLORETA
# ------------------------
# Now, we compare the results derived from cd-MTLasso with the solution
# obtained from the one of the most standard approach: sLORETA.
# Running sLORETA with standard hyper-parameter
lambda2 = 1. / 9
inv = make_inverse_operator(evoked.info, forward, noise_cov, loose=0.,
depth=0., fixed=True)
stc_full = apply_inverse(evoked, inv, lambda2=lambda2, method='sLORETA')
stc_full = stc_full.mean()
# Computing threshold taking into account for Bonferroni correction
n_features = stc_full.data.size
correction = 1. / n_features
zscore_threshold_no_clust = zscore_from_pval((fwer_target / 2) * correction)
# Computing estimated support by sLORETA
active_set = np.abs(stc_full.data) > zscore_threshold_no_clust
active_set = active_set.flatten()
# Putting the solution into the format supported by the plotting functions
sLORETA_solution = np.atleast_2d(stc_full.data[active_set]).flatten()
stc = _make_sparse_stc(sLORETA_solution, active_set, forward, stc_full.tmin,
tstep=stc_full.tstep)
# Plotting sLORETA solution
if active_set.sum() != 0:
max_stc = np.max(np.abs(stc.data))
clim = dict(pos_lims=(3, zscore_threshold_no_clust, max_stc), kind='value')
brain = stc.plot(subject=sub, hemi=hemi, clim=clim, subjects_dir=subs_dir,
views=view, time_viewer=False)
brain.add_text(0.05, 0.9, f'{cond} - sLORETA', 'title', font_size=20)
# Hack for nice figures on HiDimStat website
if save_fig:
brain.save_image(f'figures/meg_{cond}_sLORETA.png')
if plot_saved_fig:
brain.close()
img = mpimg.imread(f'figures/meg_{cond}_sLORETA.png')
plt.imshow(img)
plt.axis('off')
plt.show()
##############################################################################
# Analysis of the results
# -----------------------
# While the clustered inference solution always highlights the expected
# cortex (audio, visual or somato-sensory) with a universal predertemined
# threshold, the solution derived from the sLORETA method does not enjoy
# the same property. For the audio task the method is conservative and
# for the somato task the method makes false discoveries (then it seems
# anti-conservative).
##############################################################################
# Running ensemble clustered inference
# ------------------------------------
#
# To go further it is possible to run the ensemble clustered inference
# algorithm. It might take several minutes on standard device with
# ``n_jobs=1`` (around 10 min). Just set
# ``run_ensemble_clustered_inference=True`` below.
run_ensemble_clustered_inference = False
if run_ensemble_clustered_inference:
# Making the inference with the ensembled clustered inference algorithm
beta_hat, pval, pval_corr, one_minus_pval, one_minus_pval_corr = \
ensemble_clustered_inference(X, Y, ward, n_clusters,
inference_method=inference_method)
# Extracting active set (support)
active_set = np.logical_or(pval_corr < fwer_target / 2,
one_minus_pval_corr < fwer_target / 2)
active_set_full = np.copy(active_set)
active_set_full[:] = True
# Translating p-vals into z-scores for nicer visualization
zscore = zscore_from_pval(pval, one_minus_pval)
zscore_active_set = zscore[active_set]
# Putting the solution into the format supported by the plotting functions
stc = _compute_stc(zscore_active_set, active_set, evoked, forward)
# Plotting ensemble clustered inference solution
if active_set.sum() != 0:
max_stc = np.max(np.abs(stc._data))
clim = dict(pos_lims=(3, zscore_threshold, max_stc), kind='value')
brain = stc.plot(subject=sub, hemi=hemi, clim=clim,
subjects_dir=subs_dir, views=view,
time_viewer=False)
brain.add_text(0.05, 0.9, f'{cond} - ecd-MTLasso',
'title', font_size=20)
# Hack for nice figures on HiDimStat website
if save_fig:
brain.save_image(f'figures/meg_{cond}_ecd-MTLasso.png')
if plot_saved_fig:
brain.close()
img = mpimg.imread(f'figures/meg_{cond}_ecd-MTLasso.png')
plt.imshow(img)
plt.axis('off')
plt.show()
if interactive_plot:
brain = stc.plot(subject=sub, hemi='both',
subjects_dir=subs_dir, clim=clim)
|
{"hexsha": "fb955c42cb75fce437df668183295e8a0200a20d", "size": 18473, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/plot_meg_data_example.py", "max_stars_repo_name": "ja-che/hidimstat", "max_stars_repo_head_hexsha": "67189c41279613312688de519d7ea635c7da84ae", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2020-04-29T11:45:04.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-25T13:43:27.000Z", "max_issues_repo_path": "examples/plot_meg_data_example.py", "max_issues_repo_name": "ja-che/hidimstat", "max_issues_repo_head_hexsha": "67189c41279613312688de519d7ea635c7da84ae", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 19, "max_issues_repo_issues_event_min_datetime": "2020-04-29T13:22:13.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-15T20:32:34.000Z", "max_forks_repo_path": "examples/plot_meg_data_example.py", "max_forks_repo_name": "ja-che/hidimstat", "max_forks_repo_head_hexsha": "67189c41279613312688de519d7ea635c7da84ae", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-05-05T20:47:53.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-25T09:55:58.000Z", "avg_line_length": 37.09437751, "max_line_length": 99, "alphanum_fraction": 0.6515454988, "include": true, "reason": "import numpy,from scipy", "num_tokens": 4419}
|
# DAY 2 PROBLEM 2 ADVENT OF CODE
# Get the final position of the sub starting from (0,0)
commands = """forward 2
down 4
down 1
down 4
forward 3
down 6
down 5
forward 3
forward 8
down 2
down 3
up 8
down 5
up 7
down 7
forward 5
up 2
down 6
forward 7
forward 1
forward 2
forward 7
up 7
forward 6
down 3
down 1
up 9
down 2
up 1
down 1
up 6
forward 6
down 7
forward 6
up 1
down 6
forward 2
up 7
forward 4
forward 8
forward 7
down 7
forward 8
down 1
down 6
down 7
forward 4
down 3
up 7
down 5
down 9
up 8
up 4
down 2
down 3
up 7
forward 6
forward 6
forward 8
forward 2
up 5
down 8
down 3
down 3
down 4
down 9
down 6
up 6
forward 4
down 6
forward 3
forward 3
down 4
down 8
down 2
up 5
up 5
forward 3
forward 5
down 7
forward 6
forward 9
forward 8
forward 2
down 3
down 3
down 7
down 1
down 1
down 1
down 2
down 8
down 6
forward 6
up 1
up 6
down 7
down 1
forward 1
up 2
up 8
up 8
forward 2
down 1
down 8
down 7
down 1
forward 1
down 9
up 3
down 3
forward 2
down 3
up 6
down 2
forward 7
down 9
down 6
down 1
forward 6
down 4
down 1
down 3
forward 3
down 5
forward 9
down 5
down 7
up 8
forward 8
forward 8
down 6
down 1
forward 8
down 4
up 4
up 4
up 2
forward 2
forward 2
down 1
up 8
down 1
down 7
forward 5
down 9
down 2
up 3
down 1
down 5
forward 6
down 7
up 3
forward 7
down 4
down 3
forward 4
up 8
down 4
forward 4
forward 2
forward 5
down 5
up 2
forward 4
down 4
forward 6
down 4
forward 1
down 5
forward 2
forward 2
down 8
forward 4
forward 7
down 3
up 3
forward 2
forward 6
forward 8
down 2
forward 4
down 2
up 9
down 9
down 2
forward 5
up 4
forward 2
down 2
down 3
forward 1
down 2
forward 8
forward 8
down 4
forward 6
down 3
down 3
down 5
forward 8
forward 4
forward 1
up 4
up 2
forward 8
down 8
forward 2
forward 6
up 1
up 5
forward 2
forward 4
forward 7
forward 8
forward 2
down 3
down 1
down 9
down 6
up 5
up 6
forward 6
down 3
down 2
down 1
forward 5
forward 2
forward 7
down 8
down 7
forward 7
up 8
forward 7
down 1
up 4
forward 9
forward 4
forward 1
down 3
down 9
down 7
forward 1
down 3
forward 3
down 4
down 7
forward 4
up 6
down 8
up 1
forward 6
forward 1
down 7
down 8
up 9
up 4
down 3
down 7
forward 8
up 2
up 6
forward 8
down 1
up 4
up 4
forward 8
down 2
down 4
down 3
forward 5
down 8
forward 1
down 2
forward 9
forward 3
up 6
down 6
forward 6
forward 4
down 6
down 3
down 3
forward 6
down 5
up 4
down 9
down 3
down 6
up 9
forward 6
down 2
forward 7
up 8
down 3
down 7
down 9
forward 6
down 1
forward 2
down 1
down 3
down 3
forward 5
forward 2
up 5
forward 4
up 7
down 9
forward 7
forward 3
down 6
forward 1
down 1
up 8
down 9
up 3
down 7
up 9
forward 7
down 7
down 9
forward 9
forward 7
up 9
down 7
down 2
down 7
up 2
down 3
down 9
down 6
forward 7
forward 8
forward 8
forward 6
forward 9
forward 4
down 4
down 5
down 7
forward 6
forward 2
forward 4
forward 9
down 4
forward 6
down 7
up 1
down 7
forward 9
forward 7
down 4
down 3
up 6
forward 8
forward 7
down 8
forward 4
up 6
up 4
forward 9
forward 4
forward 4
forward 7
down 1
up 6
forward 8
forward 3
up 6
forward 4
down 1
up 2
forward 1
down 5
forward 5
up 4
down 6
down 3
up 8
forward 9
down 2
forward 4
forward 8
down 9
forward 5
forward 2
down 9
down 8
forward 8
down 7
up 6
forward 1
up 9
up 3
forward 9
down 6
forward 9
down 3
down 3
forward 7
forward 5
down 8
down 9
down 3
down 6
up 8
down 9
forward 8
down 7
down 5
down 1
up 4
down 9
forward 6
forward 9
up 6
up 4
forward 3
forward 2
forward 1
down 1
down 2
forward 8
up 6
forward 5
up 4
down 1
forward 5
down 3
down 6
up 7
forward 2
forward 6
forward 7
forward 4
down 5
down 4
forward 4
down 6
up 2
up 2
forward 7
forward 3
down 8
down 1
down 8
forward 7
forward 7
up 5
forward 4
up 8
down 9
down 4
down 4
forward 5
down 1
forward 2
down 6
up 4
down 8
down 1
down 9
down 5
up 5
forward 4
down 2
down 8
down 4
forward 4
forward 5
down 8
up 9
forward 7
forward 6
down 8
down 3
up 7
down 7
forward 2
forward 5
forward 7
down 9
up 1
down 6
down 2
forward 6
forward 3
forward 3
up 9
forward 4
down 5
down 7
forward 8
forward 6
forward 5
down 9
down 5
down 1
down 7
forward 9
forward 8
down 2
down 4
down 1
up 5
up 5
forward 5
down 3
down 1
forward 8
up 9
up 3
down 3
up 3
up 5
forward 8
down 3
up 3
down 9
up 6
up 8
forward 5
up 2
down 6
forward 3
down 2
down 4
forward 9
forward 6
forward 3
up 5
down 9
down 7
forward 9
forward 7
forward 5
up 5
up 1
down 6
forward 4
forward 4
down 7
down 1
up 3
forward 6
forward 4
down 1
forward 5
forward 3
forward 1
forward 3
up 3
up 9
down 7
down 4
forward 8
down 8
down 3
up 2
down 8
forward 5
down 7
forward 6
down 9
up 5
forward 4
down 2
forward 6
down 8
down 7
forward 8
forward 5
down 2
forward 7
forward 5
forward 7
down 8
forward 5
down 8
down 6
down 7
down 9
forward 9
down 6
forward 8
up 6
up 1
down 5
forward 1
forward 7
up 2
up 5
up 6
down 5
down 5
forward 7
down 9
down 2
forward 9
forward 3
down 5
up 2
up 8
forward 5
forward 8
up 1
forward 3
forward 1
up 4
forward 1
down 9
down 6
forward 1
down 4
down 4
forward 9
down 3
up 6
down 3
forward 6
forward 6
down 3
forward 6
down 3
down 1
forward 3
down 7
up 9
forward 1
down 7
down 2
up 8
down 1
down 9
down 1
down 4
down 6
down 3
down 7
down 2
down 9
down 2
forward 4
up 3
down 4
up 4
down 1
forward 5
forward 7
down 7
forward 9
forward 6
down 8
forward 6
forward 7
up 3
down 3
up 6
forward 7
up 4
forward 4
down 1
up 8
forward 7
down 2
up 6
forward 1
forward 3
up 9
up 8
up 5
forward 7
up 5
down 6
forward 7
forward 7
down 4
down 3
forward 2
down 8
up 9
up 6
forward 7
forward 5
down 9
down 2
up 5
down 3
down 3
up 5
down 8
forward 7
down 4
down 2
up 9
down 5
down 8
down 5
down 6
forward 9
down 3
down 8
forward 3
down 1
down 9
forward 1
down 3
up 9
up 3
forward 8
up 2
down 4
up 5
up 4
down 9
down 5
up 3
forward 2
down 8
forward 8
forward 7
up 4
down 9
down 6
up 1
forward 9
up 8
forward 4
up 3
down 4
up 2
up 7
down 2
forward 3
down 8
down 9
up 7
up 8
forward 3
forward 1
forward 7
forward 5
forward 9
forward 2
up 1
down 1
up 4
forward 1
up 9
forward 7
forward 2
down 6
down 5
forward 9
forward 4
down 6
down 6
up 8
down 3
up 8
down 3
forward 2
down 1
down 1
forward 5
down 1
forward 9
up 8
forward 2
down 5
up 8
up 8
forward 8
forward 8
forward 3
forward 2
forward 8
forward 9
forward 8
forward 6
forward 4
up 7
forward 9
forward 8
up 7
forward 6
forward 9
forward 8
down 7
forward 9
down 4
down 1
up 1
up 9
forward 2
down 6
down 2
down 8
down 6
up 8
forward 7
up 9
forward 5
forward 4
forward 8
up 4
forward 4
up 6
forward 7
forward 1
up 8
down 6
forward 7
forward 3
forward 2
down 4
forward 4
down 7
down 6
down 2
up 3
up 5
down 7
down 9
up 8
down 1
up 1
down 8
up 8
forward 8
down 6
down 1
down 6
forward 3
down 9
down 5
up 3
down 1
down 1
forward 4
down 4
up 3
forward 8
up 4
down 3
down 5
down 3
forward 6
forward 3
down 2
forward 9
forward 3
forward 2
down 2
forward 6
down 1
down 1
forward 5
forward 4
forward 6
down 7
forward 7
forward 3
forward 1
up 3
down 6
forward 1
up 9
forward 9
forward 5
forward 3
forward 3
down 3
up 8
forward 5
up 6
forward 2
down 7
forward 2
forward 8
forward 8
forward 3
up 9
down 5
down 3
forward 7
up 9
forward 4
down 1
down 3
down 5
down 2
forward 9
up 6
down 3
down 7
down 3
up 6
forward 3
down 4
forward 2
down 8
down 2
forward 7
down 2
down 9
forward 1
down 1
down 9
down 6
forward 5
down 1
up 1
forward 5
forward 4
forward 9
down 3
forward 3
forward 5
down 9
forward 9
down 8
down 2
forward 1
up 1
down 5
forward 2
up 9
forward 9
forward 7
forward 9
forward 3
down 7
forward 2
down 4
up 3
down 7
down 6
forward 2
down 2
forward 8
up 9
down 1
forward 7
down 8
forward 3
down 2
down 5
down 5
down 3
forward 1
up 9
up 9
down 8
down 6
up 7
forward 7
down 4
forward 6
down 9
up 5
up 6
forward 4
forward 1
forward 1
down 7
down 8
down 2
down 4
down 3
up 8
down 3
forward 3
forward 8
up 3
down 2
forward 4
down 3
forward 5
up 1
down 9
down 1
down 4
forward 3
forward 6
forward 7
forward 2
forward 9
forward 1
forward 6
forward 7
forward 2
up 1
down 6
down 1
forward 6
down 6
down 5
forward 1"""
"""
parse_commands(commands<:AbstractString) ::Vector{Vector{<:AbstractString}}
Get the list of commands and their parameter.
"""
function parse_commands(comms::AbstractString) ::Vector{Vector{AbstractString}}
return [split(x, " ") for x in split(comms, "\n")]
end
"""
execute_commands(comms::Vector{Vector{AbstractString}}) ::Vector{Integer}
Get the final position of the sub.
"""
function execute_commands(comms::Vector{Vector{AbstractString}}) ::Vector{Integer}
# Add third item for aim.
position::Vector{Integer} = [0, 0, 0]
for (command, param) in comms
param = parse(Int32, param)
if (command == "forward")
position[1] += param
position[2] += (position[3] * param)
# up and down now change aim.
elseif (command == "up")
position[3] -= param
elseif (command == "down")
position[3] += param
end
end
return position[1:2]
end
commands |> parse_commands |> execute_commands |> prod
|
{"hexsha": "fafb5573762afa90a99a2cf2b66f44a0195880a1", "size": 8876, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "2021/day2/dive_2.jl", "max_stars_repo_name": "CrosleyZack/advent_of_code", "max_stars_repo_head_hexsha": "5dee29c845b88027d1c4b17900e398fe9b3c1e44", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "2021/day2/dive_2.jl", "max_issues_repo_name": "CrosleyZack/advent_of_code", "max_issues_repo_head_hexsha": "5dee29c845b88027d1c4b17900e398fe9b3c1e44", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "2021/day2/dive_2.jl", "max_forks_repo_name": "CrosleyZack/advent_of_code", "max_forks_repo_head_hexsha": "5dee29c845b88027d1c4b17900e398fe9b3c1e44", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 8.5593056895, "max_line_length": 82, "alphanum_fraction": 0.7283686345, "num_tokens": 4287}
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Simple class to run post processing of Triton Inference outputs."""
import os
import numpy as np
from tao_triton.python.postprocessing.postprocessor import Postprocessor
class MultitaskClassificationPostprocessor(Postprocessor):
"""Class to run post processing of Triton Tensors."""
def __init__(self, batch_size, frames, output_path, data_format):
"""Initialize a post processor class for a multitaskclassification model.
Args:
batch_size (int): Number of images in the batch.
frames (list): List of images.
output_path (str): Unix path to the output rendered images and labels.
data_format (str): Order of the input model dimensions.
"channels_first": CHW order.
"channels_last": HWC order.
"""
super().__init__(batch_size, frames, output_path, data_format)
self.output_name_0 = "base_color/Softmax"
self.output_name_1 = "category/Softmax"
self.output_name_2 = "season/Softmax"
self.task_name = ["base_color", "category", "season"]
self.class_mapping = {"base_color": {"0": "Black", "1": "Blue", "2": "Brown", "3": "Green", \
"4": "Grey", "5": "Navy Blue", "6": "Pink", "7": "Purple", "8": "Red", \
"9": "Silver", "10": "White"},
"category": {"0": "Bags", "1": "Bottomwear", "2": "Eyewear", "3": "Fragrance", \
"4": "Innerwear", "5": "Jewellery", "6": "Sandal", "7": "Shoes", "8": "Topwear", \
"9": "Watches"},
"season": {"0": "Fall", "1": "Spring", "2": "Summer", "3": "Winter"}}
def apply(self, output_tensors, this_id, render=True, batching=True):
"""Apply the post processor to the outputs to the classification outputs."""
output_array_0 = output_tensors.as_numpy(self.output_name_0)
output_array_1 = output_tensors.as_numpy(self.output_name_1)
output_array_2 = output_tensors.as_numpy(self.output_name_2)
output_array = [output_array_0 , output_array_1 , output_array_2]
if not os.path.exists(self.output_path):
os.makedirs(self.output_path)
output_file = os.path.join(self.output_path, "results.txt")
for image_idx in range(self.batch_size):
current_idx = (int(this_id) - 1) * self.batch_size + image_idx
if current_idx >= len(self.frames):
break
current_frame = self.frames[current_idx]
img_in_path = current_frame._image_path
filename = os.path.basename(current_frame._image_path)
print("filename is {}".format(filename))
with open(output_file, "a") as j:
j.write("\n{}:\n".format(filename))
for idx, task in enumerate(self.task_name):
pred = output_array[idx].reshape(-1)
print("Task {}:".format(task))
#print("Predictions: {}".format(pred))
class_name = self.class_mapping[task][str(np.argmax(pred))]
print("Class name = {}".format(class_name))
print('********')
with open(output_file, "a") as j:
j.write("{}: {}\n".format(task,class_name))
|
{"hexsha": "4c39dd45bd94f2646af7527fd33afa7ba43f8645", "size": 4485, "ext": "py", "lang": "Python", "max_stars_repo_path": "tao_triton/python/postprocessing/multitask_classification_postprocessor.py", "max_stars_repo_name": "thtang-nv/tao-toolkit-triton-apps", "max_stars_repo_head_hexsha": "de72ae4fe96986db620b542feed917f4430ac768", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tao_triton/python/postprocessing/multitask_classification_postprocessor.py", "max_issues_repo_name": "thtang-nv/tao-toolkit-triton-apps", "max_issues_repo_head_hexsha": "de72ae4fe96986db620b542feed917f4430ac768", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tao_triton/python/postprocessing/multitask_classification_postprocessor.py", "max_forks_repo_name": "thtang-nv/tao-toolkit-triton-apps", "max_forks_repo_head_hexsha": "de72ae4fe96986db620b542feed917f4430ac768", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 49.8333333333, "max_line_length": 112, "alphanum_fraction": 0.6234113712, "include": true, "reason": "import numpy", "num_tokens": 1007}
|
[STATEMENT]
lemma norm_sq_mtx_def3: "\<parallel>A\<parallel> = (SUP x. (\<parallel>A *\<^sub>V x\<parallel>) / (\<parallel>x\<parallel>))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<parallel>A\<parallel> = (SUP x. \<parallel>A *\<^sub>V x\<parallel> / \<parallel>x\<parallel>)
[PROOF STEP]
unfolding norm_sq_mtx_def onorm_def sq_mtx_vec_mult_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (SUP x. \<parallel>to_vec A *v x\<parallel> / \<parallel>x\<parallel>) = (SUP x. \<parallel>map_fun to_vec id (*v) A x\<parallel> / \<parallel>x\<parallel>)
[PROOF STEP]
by simp
|
{"llama_tokens": 237, "file": "Matrices_for_ODEs_SQ_MTX", "length": 2}
|
#ifndef GLOBAL_TO_LOCAL_H
#define GLOBAL_TO_LOCAL_H
#include <ros/ros.h>
#include <uav_ros_lib/topic_handler.hpp>
#include <Eigen/Dense>
#include <mavros_msgs/HomePosition.h>
namespace tf_util {
/**
* @brief This class is used to transform global (Lat, Lon, Alt) to local (East, North,
* Up) coordinates and the other way around. It sets up a subsciber to
* mavros/global_position/home and transforms coordinates with respect to the home
* position found on that topic.
*
*/
class GlobalToLocal
{
public:
/**
* @brief Construct a new Global To Local object. Sets up a subscriber to the
* mavros/global_position/home.
*
* @param nh A ROS node handle
*/
explicit GlobalToLocal(ros::NodeHandle &nh);
/**
* @brief Transforms local ENU position to Global Lat,Lon,Alt with respect to the
* obtained home position from mavros/global_position/home.
*
* @param t_enuX ENU position x component
* @param t_enuY ENU position y component
* @param t_enuZ ENU position z compoenent
* @return Eigen::Vector3d Global position (Lat, Lon, Alt)
*/
Eigen::Vector3d toGlobal(double t_enuX, double t_enuY, double t_enuZ);
/**
* @brief
*
* @param lat
* @param lon
* @param alt
* @param altitudeRelative
* @return Eigen::Vector3d
*/
Eigen::Vector3d
toLocal(double lat, double lon, double alt, bool altitudeRelative = false);
private:
ros_util::TopicHandler<mavros_msgs::HomePosition> m_homeHandler;
};
}// namespace tf_util
#endif
|
{"hexsha": "b4b89b45581903d08898724f894bf15740d9cf61", "size": 1505, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/uav_ros_lib/global_to_local.hpp", "max_stars_repo_name": "lmark1/uav_ros_lib", "max_stars_repo_head_hexsha": "be75f43a498dc54a0f696fdaa5490ebcaecb87c4", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2022-03-22T14:28:46.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-22T14:28:46.000Z", "max_issues_repo_path": "include/uav_ros_lib/global_to_local.hpp", "max_issues_repo_name": "larics/uav_ros_lib", "max_issues_repo_head_hexsha": "be75f43a498dc54a0f696fdaa5490ebcaecb87c4", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2021-01-25T11:46:22.000Z", "max_issues_repo_issues_event_max_datetime": "2021-01-25T11:46:22.000Z", "max_forks_repo_path": "include/uav_ros_lib/global_to_local.hpp", "max_forks_repo_name": "larics/uav_ros_lib", "max_forks_repo_head_hexsha": "be75f43a498dc54a0f696fdaa5490ebcaecb87c4", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2021-05-24T12:45:20.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-24T12:45:20.000Z", "avg_line_length": 26.875, "max_line_length": 87, "alphanum_fraction": 0.707641196, "num_tokens": 405}
|
#!/bin/env python
""" Tool demonstrating how to convert from Python Dictionaries to XML"""
from xmltools import *
try :
from cxmltools import * # Use CXMLtools if possible
except :
pass
import sys
if __name__ == "__main__" :
input_stdin = False
output_stdout = False
if (len(sys.argv)==1) :
# use as filter: stdin and stdout
input_stdin = True
output_stdout = True
elif (len(sys.argv)==2) :
input_stdin = False
output_stdout = True
elif (len(sys.argv)!=3) :
print >> sys.stderr, "usage:" , sys.argv[0] , "[[input.pythondict] output.xml]"
print >> sys.stderr, " With no options, this reads stdin and output to stdout"
sys.exit(1)
if (input_stdin) :
f = sys.stdin
else :
f = file(sys.argv[1], 'r')
arr_dis = ARRAYDISPOSITION_AS_NUMERIC_WRAPPER
if arr_dis == ARRAYDISPOSITION_AS_NUMPY :
# NUMPY needs dtype, float64, etc. for the eval (below) to work
from numpy import *
v = eval(f.read())
xml_options = XML_DUMP_PRETTY | XML_STRICT_HDR | XML_DUMP_STRINGS_BEST_GUESS # Guess as to whether we need quotes
if (output_stdout) :
WriteToXMLStream(v, sys.stdout, "root", xml_options, arr_dis)
else :
WriteToXMLFile(v, sys.argv[2], "root", xml_options, arr_dis)
|
{"hexsha": "343396d15a57cf347e3c6f6d90849d2f09d003ab", "size": 1366, "ext": "py", "lang": "Python", "max_stars_repo_path": "deps/PicklingTools170Release/Python/dict2xml.py", "max_stars_repo_name": "dcanelhas/sdf_tracker-LS", "max_stars_repo_head_hexsha": "2685ce41fc1c8ae12d270c5e2b88afc987af9f45", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2017-07-06T12:38:39.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-10T08:06:18.000Z", "max_issues_repo_path": "deps/PicklingTools170Release/Xm/ptools170/python/dict2xml.py", "max_issues_repo_name": "dcanelhas/sdf_tracker-LS", "max_issues_repo_head_hexsha": "2685ce41fc1c8ae12d270c5e2b88afc987af9f45", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2016-01-29T22:57:15.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-29T19:07:09.000Z", "max_forks_repo_path": "Xm/ptools161/python/dict2xml.py", "max_forks_repo_name": "RichIsMyName/PicklingToolsRepo", "max_forks_repo_head_hexsha": "a53f64263bc82cef2f50fa02db90fb643c7e0fe0", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2017-07-24T01:50:42.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-10T08:06:20.000Z", "avg_line_length": 29.0638297872, "max_line_length": 119, "alphanum_fraction": 0.6185944363, "include": true, "reason": "from numpy", "num_tokens": 363}
|
[STATEMENT]
lemma iteratei_postfixed_correct :
assumes invar: "invar_trie (t :: ('key, 'val) trie)"
shows "set_iterator ((iteratei_postfixed ks0 t)::('key list \<times> 'val, '\<sigma>) set_iterator)
((\<lambda>ksv. (rev (fst ksv) @ ks0, (snd ksv))) ` (map_to_set (lookup_trie t)))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. set_iterator (iteratei_postfixed ks0 t) ((\<lambda>ksv. (rev (fst ksv) @ ks0, snd ksv)) ` map_to_set (lookup_trie t))
[PROOF STEP]
using invar
[PROOF STATE]
proof (prove)
using this:
invar_trie t
goal (1 subgoal):
1. set_iterator (iteratei_postfixed ks0 t) ((\<lambda>ksv. (rev (fst ksv) @ ks0, snd ksv)) ` map_to_set (lookup_trie t))
[PROOF STEP]
proof (induct t arbitrary: ks0)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>vo kvs ks0. \<lbrakk>\<And>k t ks0. \<lbrakk>(k, t) \<in> set kvs; invar_trie t\<rbrakk> \<Longrightarrow> set_iterator (iteratei_postfixed ks0 t) ((\<lambda>ksv. (rev (fst ksv) @ ks0, snd ksv)) ` map_to_set (lookup_trie t)); invar_trie (Trie vo kvs)\<rbrakk> \<Longrightarrow> set_iterator (iteratei_postfixed ks0 (Trie vo kvs)) ((\<lambda>ksv. (rev (fst ksv) @ ks0, snd ksv)) ` map_to_set (lookup_trie (Trie vo kvs)))
[PROOF STEP]
case (Trie vo kvs)
[PROOF STATE]
proof (state)
this:
\<lbrakk>(?k, ?t) \<in> set kvs; invar_trie ?t\<rbrakk> \<Longrightarrow> set_iterator (iteratei_postfixed ?ks0.0 ?t) ((\<lambda>ksv. (rev (fst ksv) @ ?ks0.0, snd ksv)) ` map_to_set (lookup_trie ?t))
invar_trie (Trie vo kvs)
goal (1 subgoal):
1. \<And>vo kvs ks0. \<lbrakk>\<And>k t ks0. \<lbrakk>(k, t) \<in> set kvs; invar_trie t\<rbrakk> \<Longrightarrow> set_iterator (iteratei_postfixed ks0 t) ((\<lambda>ksv. (rev (fst ksv) @ ks0, snd ksv)) ` map_to_set (lookup_trie t)); invar_trie (Trie vo kvs)\<rbrakk> \<Longrightarrow> set_iterator (iteratei_postfixed ks0 (Trie vo kvs)) ((\<lambda>ksv. (rev (fst ksv) @ ks0, snd ksv)) ` map_to_set (lookup_trie (Trie vo kvs)))
[PROOF STEP]
note ind_hyp = Trie(1)
[PROOF STATE]
proof (state)
this:
\<lbrakk>(?k, ?t) \<in> set kvs; invar_trie ?t\<rbrakk> \<Longrightarrow> set_iterator (iteratei_postfixed ?ks0.0 ?t) ((\<lambda>ksv. (rev (fst ksv) @ ?ks0.0, snd ksv)) ` map_to_set (lookup_trie ?t))
goal (1 subgoal):
1. \<And>vo kvs ks0. \<lbrakk>\<And>k t ks0. \<lbrakk>(k, t) \<in> set kvs; invar_trie t\<rbrakk> \<Longrightarrow> set_iterator (iteratei_postfixed ks0 t) ((\<lambda>ksv. (rev (fst ksv) @ ks0, snd ksv)) ` map_to_set (lookup_trie t)); invar_trie (Trie vo kvs)\<rbrakk> \<Longrightarrow> set_iterator (iteratei_postfixed ks0 (Trie vo kvs)) ((\<lambda>ksv. (rev (fst ksv) @ ks0, snd ksv)) ` map_to_set (lookup_trie (Trie vo kvs)))
[PROOF STEP]
note invar = Trie(2)
[PROOF STATE]
proof (state)
this:
invar_trie (Trie vo kvs)
goal (1 subgoal):
1. \<And>vo kvs ks0. \<lbrakk>\<And>k t ks0. \<lbrakk>(k, t) \<in> set kvs; invar_trie t\<rbrakk> \<Longrightarrow> set_iterator (iteratei_postfixed ks0 t) ((\<lambda>ksv. (rev (fst ksv) @ ks0, snd ksv)) ` map_to_set (lookup_trie t)); invar_trie (Trie vo kvs)\<rbrakk> \<Longrightarrow> set_iterator (iteratei_postfixed ks0 (Trie vo kvs)) ((\<lambda>ksv. (rev (fst ksv) @ ks0, snd ksv)) ` map_to_set (lookup_trie (Trie vo kvs)))
[PROOF STEP]
from invar
[PROOF STATE]
proof (chain)
picking this:
invar_trie (Trie vo kvs)
[PROOF STEP]
have dist_fst_kvs : "distinct (map fst kvs)"
and dist_kvs: "distinct kvs"
and invar_child: "\<And>k t. (k, t) \<in> set kvs \<Longrightarrow> invar_trie t"
[PROOF STATE]
proof (prove)
using this:
invar_trie (Trie vo kvs)
goal (1 subgoal):
1. distinct (map fst kvs) &&& distinct kvs &&& (\<And>k t. (k, t) \<in> set kvs \<Longrightarrow> invar_trie t)
[PROOF STEP]
by (simp_all add: Ball_def distinct_map)
\<comment> \<open>root iterator\<close>
[PROOF STATE]
proof (state)
this:
distinct (map fst kvs)
distinct kvs
(?k, ?t) \<in> set kvs \<Longrightarrow> invar_trie ?t
goal (1 subgoal):
1. \<And>vo kvs ks0. \<lbrakk>\<And>k t ks0. \<lbrakk>(k, t) \<in> set kvs; invar_trie t\<rbrakk> \<Longrightarrow> set_iterator (iteratei_postfixed ks0 t) ((\<lambda>ksv. (rev (fst ksv) @ ks0, snd ksv)) ` map_to_set (lookup_trie t)); invar_trie (Trie vo kvs)\<rbrakk> \<Longrightarrow> set_iterator (iteratei_postfixed ks0 (Trie vo kvs)) ((\<lambda>ksv. (rev (fst ksv) @ ks0, snd ksv)) ` map_to_set (lookup_trie (Trie vo kvs)))
[PROOF STEP]
define it_vo :: "('key list \<times> 'val, '\<sigma>) set_iterator"
where "it_vo =
(case vo of None \<Rightarrow> set_iterator_emp
| Some v \<Rightarrow> set_iterator_sng (ks0, v))"
[PROOF STATE]
proof (state)
this:
it_vo = (case vo of None \<Rightarrow> set_iterator_emp | Some v \<Rightarrow> set_iterator_sng (ks0, v))
goal (1 subgoal):
1. \<And>vo kvs ks0. \<lbrakk>\<And>k t ks0. \<lbrakk>(k, t) \<in> set kvs; invar_trie t\<rbrakk> \<Longrightarrow> set_iterator (iteratei_postfixed ks0 t) ((\<lambda>ksv. (rev (fst ksv) @ ks0, snd ksv)) ` map_to_set (lookup_trie t)); invar_trie (Trie vo kvs)\<rbrakk> \<Longrightarrow> set_iterator (iteratei_postfixed ks0 (Trie vo kvs)) ((\<lambda>ksv. (rev (fst ksv) @ ks0, snd ksv)) ` map_to_set (lookup_trie (Trie vo kvs)))
[PROOF STEP]
define vo_S where "vo_S = (case vo of None \<Rightarrow> {} | Some v \<Rightarrow> {(ks0, v)})"
[PROOF STATE]
proof (state)
this:
vo_S = (case vo of None \<Rightarrow> {} | Some v \<Rightarrow> {(ks0, v)})
goal (1 subgoal):
1. \<And>vo kvs ks0. \<lbrakk>\<And>k t ks0. \<lbrakk>(k, t) \<in> set kvs; invar_trie t\<rbrakk> \<Longrightarrow> set_iterator (iteratei_postfixed ks0 t) ((\<lambda>ksv. (rev (fst ksv) @ ks0, snd ksv)) ` map_to_set (lookup_trie t)); invar_trie (Trie vo kvs)\<rbrakk> \<Longrightarrow> set_iterator (iteratei_postfixed ks0 (Trie vo kvs)) ((\<lambda>ksv. (rev (fst ksv) @ ks0, snd ksv)) ` map_to_set (lookup_trie (Trie vo kvs)))
[PROOF STEP]
have it_vo_OK: "set_iterator it_vo vo_S"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. set_iterator it_vo vo_S
[PROOF STEP]
unfolding it_vo_def vo_S_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. set_iterator (case vo of None \<Rightarrow> set_iterator_emp | Some v \<Rightarrow> set_iterator_sng (ks0, v)) (case vo of None \<Rightarrow> {} | Some v \<Rightarrow> {(ks0, v)})
[PROOF STEP]
by (simp split: option.split
add: set_iterator_emp_correct set_iterator_sng_correct)
\<comment> \<open>children iterator\<close>
[PROOF STATE]
proof (state)
this:
set_iterator it_vo vo_S
goal (1 subgoal):
1. \<And>vo kvs ks0. \<lbrakk>\<And>k t ks0. \<lbrakk>(k, t) \<in> set kvs; invar_trie t\<rbrakk> \<Longrightarrow> set_iterator (iteratei_postfixed ks0 t) ((\<lambda>ksv. (rev (fst ksv) @ ks0, snd ksv)) ` map_to_set (lookup_trie t)); invar_trie (Trie vo kvs)\<rbrakk> \<Longrightarrow> set_iterator (iteratei_postfixed ks0 (Trie vo kvs)) ((\<lambda>ksv. (rev (fst ksv) @ ks0, snd ksv)) ` map_to_set (lookup_trie (Trie vo kvs)))
[PROOF STEP]
define it_prod :: "(('key \<times> ('key, 'val) trie) \<times> 'key list \<times> 'val, '\<sigma>) set_iterator"
where "it_prod = set_iterator_product (foldli kvs) (\<lambda>(k, y). iteratei_postfixed (k # ks0) y)"
[PROOF STATE]
proof (state)
this:
it_prod = set_iterator_product (foldli kvs) (\<lambda>(k, y). iteratei_postfixed (k # ks0) y)
goal (1 subgoal):
1. \<And>vo kvs ks0. \<lbrakk>\<And>k t ks0. \<lbrakk>(k, t) \<in> set kvs; invar_trie t\<rbrakk> \<Longrightarrow> set_iterator (iteratei_postfixed ks0 t) ((\<lambda>ksv. (rev (fst ksv) @ ks0, snd ksv)) ` map_to_set (lookup_trie t)); invar_trie (Trie vo kvs)\<rbrakk> \<Longrightarrow> set_iterator (iteratei_postfixed ks0 (Trie vo kvs)) ((\<lambda>ksv. (rev (fst ksv) @ ks0, snd ksv)) ` map_to_set (lookup_trie (Trie vo kvs)))
[PROOF STEP]
define it_prod_S where "it_prod_S = (SIGMA kt:set kvs.
(\<lambda>ksv. (rev (fst ksv) @ ((fst kt) # ks0), snd ksv)) `
map_to_set (lookup_trie (snd kt)))"
[PROOF STATE]
proof (state)
this:
it_prod_S = (SIGMA kt:set kvs. (\<lambda>ksv. (rev (fst ksv) @ fst kt # ks0, snd ksv)) ` map_to_set (lookup_trie (snd kt)))
goal (1 subgoal):
1. \<And>vo kvs ks0. \<lbrakk>\<And>k t ks0. \<lbrakk>(k, t) \<in> set kvs; invar_trie t\<rbrakk> \<Longrightarrow> set_iterator (iteratei_postfixed ks0 t) ((\<lambda>ksv. (rev (fst ksv) @ ks0, snd ksv)) ` map_to_set (lookup_trie t)); invar_trie (Trie vo kvs)\<rbrakk> \<Longrightarrow> set_iterator (iteratei_postfixed ks0 (Trie vo kvs)) ((\<lambda>ksv. (rev (fst ksv) @ ks0, snd ksv)) ` map_to_set (lookup_trie (Trie vo kvs)))
[PROOF STEP]
have it_prod_OK: "set_iterator it_prod it_prod_S"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. set_iterator it_prod it_prod_S
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. set_iterator it_prod it_prod_S
[PROOF STEP]
from set_iterator_foldli_correct[OF dist_kvs]
[PROOF STATE]
proof (chain)
picking this:
set_iterator (foldli kvs) (set kvs)
[PROOF STEP]
have it_foldli: "set_iterator (foldli kvs) (set kvs)"
[PROOF STATE]
proof (prove)
using this:
set_iterator (foldli kvs) (set kvs)
goal (1 subgoal):
1. set_iterator (foldli kvs) (set kvs)
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
set_iterator (foldli kvs) (set kvs)
goal (1 subgoal):
1. set_iterator it_prod it_prod_S
[PROOF STEP]
{
[PROOF STATE]
proof (state)
this:
set_iterator (foldli kvs) (set kvs)
goal (1 subgoal):
1. set_iterator it_prod it_prod_S
[PROOF STEP]
fix kt
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. set_iterator it_prod it_prod_S
[PROOF STEP]
assume kt_in: "kt \<in> set kvs"
[PROOF STATE]
proof (state)
this:
kt \<in> set kvs
goal (1 subgoal):
1. set_iterator it_prod it_prod_S
[PROOF STEP]
hence k_t_in: "(fst kt, snd kt) \<in> set kvs"
[PROOF STATE]
proof (prove)
using this:
kt \<in> set kvs
goal (1 subgoal):
1. (fst kt, snd kt) \<in> set kvs
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
(fst kt, snd kt) \<in> set kvs
goal (1 subgoal):
1. set_iterator it_prod it_prod_S
[PROOF STEP]
note ind_hyp [OF k_t_in, OF invar_child[OF k_t_in], of "fst kt # ks0"]
[PROOF STATE]
proof (state)
this:
set_iterator (iteratei_postfixed (fst kt # ks0) (snd kt)) ((\<lambda>ksv. (rev (fst ksv) @ fst kt # ks0, snd ksv)) ` map_to_set (lookup_trie (snd kt)))
goal (1 subgoal):
1. set_iterator it_prod it_prod_S
[PROOF STEP]
}
[PROOF STATE]
proof (state)
this:
?kt3 \<in> set kvs \<Longrightarrow> set_iterator (iteratei_postfixed (fst ?kt3 # ks0) (snd ?kt3)) ((\<lambda>ksv. (rev (fst ksv) @ fst ?kt3 # ks0, snd ksv)) ` map_to_set (lookup_trie (snd ?kt3)))
goal (1 subgoal):
1. set_iterator it_prod it_prod_S
[PROOF STEP]
note it_child = this
[PROOF STATE]
proof (state)
this:
?kt3 \<in> set kvs \<Longrightarrow> set_iterator (iteratei_postfixed (fst ?kt3 # ks0) (snd ?kt3)) ((\<lambda>ksv. (rev (fst ksv) @ fst ?kt3 # ks0, snd ksv)) ` map_to_set (lookup_trie (snd ?kt3)))
goal (1 subgoal):
1. set_iterator it_prod it_prod_S
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. set_iterator it_prod it_prod_S
[PROOF STEP]
unfolding it_prod_def it_prod_S_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. set_iterator (set_iterator_product (foldli kvs) (\<lambda>(k, y). iteratei_postfixed (k # ks0) y)) (SIGMA kt:set kvs. (\<lambda>ksv. (rev (fst ksv) @ fst kt # ks0, snd ksv)) ` map_to_set (lookup_trie (snd kt)))
[PROOF STEP]
apply (rule set_iterator_product_correct [OF it_foldli])
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>a. a \<in> set kvs \<Longrightarrow> set_iterator (case a of (k, x) \<Rightarrow> iteratei_postfixed (k # ks0) x) ((\<lambda>ksv. (rev (fst ksv) @ fst a # ks0, snd ksv)) ` map_to_set (lookup_trie (snd a)))
[PROOF STEP]
apply (insert it_child)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>a. \<lbrakk>a \<in> set kvs; \<And>kt. kt \<in> set kvs \<Longrightarrow> set_iterator (iteratei_postfixed (fst kt # ks0) (snd kt)) ((\<lambda>ksv. (rev (fst ksv) @ fst kt # ks0, snd ksv)) ` map_to_set (lookup_trie (snd kt)))\<rbrakk> \<Longrightarrow> set_iterator (case a of (k, x) \<Rightarrow> iteratei_postfixed (k # ks0) x) ((\<lambda>ksv. (rev (fst ksv) @ fst a # ks0, snd ksv)) ` map_to_set (lookup_trie (snd a)))
[PROOF STEP]
apply (simp add: case_prod_beta)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
[PROOF STATE]
proof (state)
this:
set_iterator it_prod it_prod_S
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
set_iterator it_prod it_prod_S
goal (1 subgoal):
1. \<And>vo kvs ks0. \<lbrakk>\<And>k t ks0. \<lbrakk>(k, t) \<in> set kvs; invar_trie t\<rbrakk> \<Longrightarrow> set_iterator (iteratei_postfixed ks0 t) ((\<lambda>ksv. (rev (fst ksv) @ ks0, snd ksv)) ` map_to_set (lookup_trie t)); invar_trie (Trie vo kvs)\<rbrakk> \<Longrightarrow> set_iterator (iteratei_postfixed ks0 (Trie vo kvs)) ((\<lambda>ksv. (rev (fst ksv) @ ks0, snd ksv)) ` map_to_set (lookup_trie (Trie vo kvs)))
[PROOF STEP]
have it_image_OK : "set_iterator (set_iterator_image snd it_prod) (snd ` it_prod_S)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. set_iterator (set_iterator_image snd it_prod) (snd ` it_prod_S)
[PROOF STEP]
proof (rule set_iterator_image_correct[OF it_prod_OK])
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. inj_on snd it_prod_S
2. snd ` it_prod_S = snd ` it_prod_S
[PROOF STEP]
from dist_fst_kvs
[PROOF STATE]
proof (chain)
picking this:
distinct (map fst kvs)
[PROOF STEP]
have "\<And>k v1 v2. (k, v1) \<in> set kvs \<Longrightarrow> (k, v2) \<in> set kvs \<Longrightarrow> v1 = v2"
[PROOF STATE]
proof (prove)
using this:
distinct (map fst kvs)
goal (1 subgoal):
1. \<And>k v1 v2. \<lbrakk>(k, v1) \<in> set kvs; (k, v2) \<in> set kvs\<rbrakk> \<Longrightarrow> v1 = v2
[PROOF STEP]
by (induct kvs) (auto simp add: image_iff)
[PROOF STATE]
proof (state)
this:
\<lbrakk>(?k, ?v1.0) \<in> set kvs; (?k, ?v2.0) \<in> set kvs\<rbrakk> \<Longrightarrow> ?v1.0 = ?v2.0
goal (2 subgoals):
1. inj_on snd it_prod_S
2. snd ` it_prod_S = snd ` it_prod_S
[PROOF STEP]
thus "inj_on snd it_prod_S"
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>(?k, ?v1.0) \<in> set kvs; (?k, ?v2.0) \<in> set kvs\<rbrakk> \<Longrightarrow> ?v1.0 = ?v2.0
goal (1 subgoal):
1. inj_on snd it_prod_S
[PROOF STEP]
unfolding inj_on_def it_prod_S_def
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>(?k, ?v1.0) \<in> set kvs; (?k, ?v2.0) \<in> set kvs\<rbrakk> \<Longrightarrow> ?v1.0 = ?v2.0
goal (1 subgoal):
1. \<forall>x\<in>SIGMA kt:set kvs. (\<lambda>ksv. (rev (fst ksv) @ fst kt # ks0, snd ksv)) ` map_to_set (lookup_trie (snd kt)). \<forall>y\<in>SIGMA kt:set kvs. (\<lambda>ksv. (rev (fst ksv) @ fst kt # ks0, snd ksv)) ` map_to_set (lookup_trie (snd kt)). snd x = snd y \<longrightarrow> x = y
[PROOF STEP]
apply (simp add: image_iff Ball_def map_to_set_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>k v1 v2. \<lbrakk>(k, v1) \<in> set kvs; (k, v2) \<in> set kvs\<rbrakk> \<Longrightarrow> v1 = v2) \<Longrightarrow> \<forall>a b aa ba. (a, b) \<in> set kvs \<and> (\<exists>ab. lookup_trie b ab = Some ba \<and> aa = rev ab @ a # ks0) \<longrightarrow> (\<forall>ab bb. (ab, bb) \<in> set kvs \<and> (\<exists>a. lookup_trie bb a = Some ba \<and> aa = rev a @ ab # ks0) \<longrightarrow> a = ab \<and> b = bb)
[PROOF STEP]
apply auto
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
[PROOF STATE]
proof (state)
this:
inj_on snd it_prod_S
goal (1 subgoal):
1. snd ` it_prod_S = snd ` it_prod_S
[PROOF STEP]
qed auto
\<comment> \<open>overall iterator\<close>
[PROOF STATE]
proof (state)
this:
set_iterator (set_iterator_image snd it_prod) (snd ` it_prod_S)
goal (1 subgoal):
1. \<And>vo kvs ks0. \<lbrakk>\<And>k t ks0. \<lbrakk>(k, t) \<in> set kvs; invar_trie t\<rbrakk> \<Longrightarrow> set_iterator (iteratei_postfixed ks0 t) ((\<lambda>ksv. (rev (fst ksv) @ ks0, snd ksv)) ` map_to_set (lookup_trie t)); invar_trie (Trie vo kvs)\<rbrakk> \<Longrightarrow> set_iterator (iteratei_postfixed ks0 (Trie vo kvs)) ((\<lambda>ksv. (rev (fst ksv) @ ks0, snd ksv)) ` map_to_set (lookup_trie (Trie vo kvs)))
[PROOF STEP]
have it_all_OK: "set_iterator
((iteratei_postfixed ks0 (Trie vo kvs)):: ('key list \<times> 'val, '\<sigma>) set_iterator)
(vo_S \<union> snd ` it_prod_S)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. set_iterator (iteratei_postfixed ks0 (Trie vo kvs)) (vo_S \<union> snd ` it_prod_S)
[PROOF STEP]
unfolding iteratei_postfixed_alt_def
it_vo_def[symmetric]
it_prod_def[symmetric]
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. set_iterator (set_iterator_union it_vo (set_iterator_image snd it_prod)) (vo_S \<union> snd ` it_prod_S)
[PROOF STEP]
proof (rule set_iterator_union_correct [OF it_vo_OK it_image_OK])
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. vo_S \<inter> snd ` it_prod_S = {}
[PROOF STEP]
show "vo_S \<inter> snd ` it_prod_S = {}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. vo_S \<inter> snd ` it_prod_S = {}
[PROOF STEP]
unfolding vo_S_def it_prod_S_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (case vo of None \<Rightarrow> {} | Some v \<Rightarrow> {(ks0, v)}) \<inter> snd ` (SIGMA kt:set kvs. (\<lambda>ksv. (rev (fst ksv) @ fst kt # ks0, snd ksv)) ` map_to_set (lookup_trie (snd kt))) = {}
[PROOF STEP]
by (simp split: option.split add: set_eq_iff image_iff)
[PROOF STATE]
proof (state)
this:
vo_S \<inter> snd ` it_prod_S = {}
goal:
No subgoals!
[PROOF STEP]
qed
\<comment> \<open>rewrite result set\<close>
[PROOF STATE]
proof (state)
this:
set_iterator (iteratei_postfixed ks0 (Trie vo kvs)) (vo_S \<union> snd ` it_prod_S)
goal (1 subgoal):
1. \<And>vo kvs ks0. \<lbrakk>\<And>k t ks0. \<lbrakk>(k, t) \<in> set kvs; invar_trie t\<rbrakk> \<Longrightarrow> set_iterator (iteratei_postfixed ks0 t) ((\<lambda>ksv. (rev (fst ksv) @ ks0, snd ksv)) ` map_to_set (lookup_trie t)); invar_trie (Trie vo kvs)\<rbrakk> \<Longrightarrow> set_iterator (iteratei_postfixed ks0 (Trie vo kvs)) ((\<lambda>ksv. (rev (fst ksv) @ ks0, snd ksv)) ` map_to_set (lookup_trie (Trie vo kvs)))
[PROOF STEP]
have it_set_rewr: "((\<lambda>ksv. (rev (fst ksv) @ ks0, snd ksv)) `
map_to_set (lookup_trie (Trie vo kvs))) = (vo_S \<union> snd ` it_prod_S)"
(is "?ls = ?rs")
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>ksv. (rev (fst ksv) @ ks0, snd ksv)) ` map_to_set (lookup_trie (Trie vo kvs)) = vo_S \<union> snd ` it_prod_S
[PROOF STEP]
apply (simp add: map_to_set_def lookup_eq_Some_iff[OF invar]
set_eq_iff image_iff vo_S_def it_prod_S_def Ball_def Bex_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>a b. (\<exists>aa. (aa = [] \<and> vo = Some b \<or> (\<exists>k t ks'. aa = k # ks' \<and> (k, t) \<in> set kvs \<and> lookup_trie t ks' = Some b)) \<and> a = rev aa @ ks0) = ((a, b) \<in> (case vo of None \<Rightarrow> {} | Some v \<Rightarrow> {(ks0, v)}) \<or> (\<exists>aa ba. (aa, ba) \<in> set kvs \<and> (\<exists>ab. lookup_trie ba ab = Some b \<and> a = rev ab @ aa # ks0)))
[PROOF STEP]
apply (simp split: option.split del: ex_simps add: ex_simps[symmetric])
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>x2. vo = Some x2 \<longrightarrow> (\<forall>a b. (\<exists>aa k t ks'. (aa = [] \<and> x2 = b \<or> aa = k # ks' \<and> (k, t) \<in> set kvs \<and> lookup_trie t ks' = Some b) \<and> a = rev aa @ ks0) = (\<exists>aa ba ab. a = ks0 \<and> b = x2 \<or> (aa, ba) \<in> set kvs \<and> lookup_trie ba ab = Some b \<and> a = rev ab @ aa # ks0))
[PROOF STEP]
apply (intro allI impI iffI)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>x2 a b. \<lbrakk>vo = Some x2; \<exists>aa k t ks'. (aa = [] \<and> x2 = b \<or> aa = k # ks' \<and> (k, t) \<in> set kvs \<and> lookup_trie t ks' = Some b) \<and> a = rev aa @ ks0\<rbrakk> \<Longrightarrow> \<exists>aa ba ab. a = ks0 \<and> b = x2 \<or> (aa, ba) \<in> set kvs \<and> lookup_trie ba ab = Some b \<and> a = rev ab @ aa # ks0
2. \<And>x2 a b. \<lbrakk>vo = Some x2; \<exists>aa ba ab. a = ks0 \<and> b = x2 \<or> (aa, ba) \<in> set kvs \<and> lookup_trie ba ab = Some b \<and> a = rev ab @ aa # ks0\<rbrakk> \<Longrightarrow> \<exists>aa k t ks'. (aa = [] \<and> x2 = b \<or> aa = k # ks' \<and> (k, t) \<in> set kvs \<and> lookup_trie t ks' = Some b) \<and> a = rev aa @ ks0
[PROOF STEP]
apply auto[]
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>x2 a b. \<lbrakk>vo = Some x2; \<exists>aa ba ab. a = ks0 \<and> b = x2 \<or> (aa, ba) \<in> set kvs \<and> lookup_trie ba ab = Some b \<and> a = rev ab @ aa # ks0\<rbrakk> \<Longrightarrow> \<exists>aa k t ks'. (aa = [] \<and> x2 = b \<or> aa = k # ks' \<and> (k, t) \<in> set kvs \<and> lookup_trie t ks' = Some b) \<and> a = rev aa @ ks0
[PROOF STEP]
apply (metis append_Cons append_Nil append_assoc rev.simps)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
\<comment> \<open>done\<close>
[PROOF STATE]
proof (state)
this:
(\<lambda>ksv. (rev (fst ksv) @ ks0, snd ksv)) ` map_to_set (lookup_trie (Trie vo kvs)) = vo_S \<union> snd ` it_prod_S
goal (1 subgoal):
1. \<And>vo kvs ks0. \<lbrakk>\<And>k t ks0. \<lbrakk>(k, t) \<in> set kvs; invar_trie t\<rbrakk> \<Longrightarrow> set_iterator (iteratei_postfixed ks0 t) ((\<lambda>ksv. (rev (fst ksv) @ ks0, snd ksv)) ` map_to_set (lookup_trie t)); invar_trie (Trie vo kvs)\<rbrakk> \<Longrightarrow> set_iterator (iteratei_postfixed ks0 (Trie vo kvs)) ((\<lambda>ksv. (rev (fst ksv) @ ks0, snd ksv)) ` map_to_set (lookup_trie (Trie vo kvs)))
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. set_iterator (iteratei_postfixed ks0 (Trie vo kvs)) ((\<lambda>ksv. (rev (fst ksv) @ ks0, snd ksv)) ` map_to_set (lookup_trie (Trie vo kvs)))
[PROOF STEP]
unfolding it_set_rewr
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. set_iterator (iteratei_postfixed ks0 (Trie vo kvs)) (vo_S \<union> snd ` it_prod_S)
[PROOF STEP]
using it_all_OK
[PROOF STATE]
proof (prove)
using this:
set_iterator (iteratei_postfixed ks0 (Trie vo kvs)) (vo_S \<union> snd ` it_prod_S)
goal (1 subgoal):
1. set_iterator (iteratei_postfixed ks0 (Trie vo kvs)) (vo_S \<union> snd ` it_prod_S)
[PROOF STEP]
by fast
[PROOF STATE]
proof (state)
this:
set_iterator (iteratei_postfixed ks0 (Trie vo kvs)) ((\<lambda>ksv. (rev (fst ksv) @ ks0, snd ksv)) ` map_to_set (lookup_trie (Trie vo kvs)))
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 9981, "file": "Collections_ICF_impl_Trie_Impl", "length": 65}
|
import os
import alignfaces as af
import numpy as np
# plotting results in nice figures
from skimage.util import montage
import matplotlib.pyplot as plt
def slim_fig(ax):
ax.set_axis_off()
plt.subplots_adjust(top = 1, bottom = 0, right = 1, left = 0,
hspace = 0, wspace = 0)
plt.margins(0,0)
ax.xaxis.set_major_locator(plt.NullLocator())
ax.yaxis.set_major_locator(plt.NullLocator())
return None
# ----------------------------------------------------------------------
# Align all faces of Angela Merkel
# Path for faces directory
merkel_folder = "faces-of-merkel"
my_project_path = os.path.dirname(os.path.abspath(__file__))
my_faces_path = my_project_path + os.path.sep + merkel_folder + os.path.sep
# Analyze all image files whose filenames have these properties ...
file_prefix = "merkel"
file_postfix = "jpg"
# Estimate landmarks.
af.get_landmarks(my_faces_path, file_prefix, file_postfix, start_fresh=True)
# Now we're ready to align the faces - via generalized Procrustes analysis.
aligned_path = af.align_procrustes(my_faces_path, file_prefix, file_postfix)
# Estimate landmarks of aligned faces.
af.get_landmarks(aligned_path, file_prefix, file_postfix)
# ----------------------------------------------------------------------
# Simple average
simple_average = af.get_mean_image(aligned_path, max_inner_face_contrast=True)
# ----------------------------------------------------------------------
# Warp each face to mean of landmarks
original_images, warped_to_mean = af.warp_to_mean_landmarks(aligned_path,
file_prefix=file_prefix,
file_postfix=file_postfix)
# Mean of warped faces
enhanced_average = warped_to_mean.mean(axis=0)
# ----------------------------------------------------------------------
# Figures to show results
# Make directory of results
# results_dir = "results"
# if not os.path.isdir(results_dir):
# os.mkdir(results_dir)
# Display results
fig, ax = plt.subplots(subplot_kw={'aspect': 'equal'})
im_montage = montage(original_images, rescale_intensity=True,
grid_shape=(2, 5))
ax.imshow(im_montage, cmap=plt.cm.gray, interpolation='nearest')
slim_fig(ax)
plt.savefig('A_the_many_faces_of_merkel.png', bbox_inches = 'tight', pad_inches = 0)
fig, ax = plt.subplots(subplot_kw={'aspect': 'equal'})
ax.imshow(simple_average, cmap=plt.cm.gray)
slim_fig(ax)
plt.savefig('B_simple_average.png', bbox_inches = 'tight', pad_inches = 0)
fig, ax = plt.subplots(subplot_kw={'aspect': 'equal'})
im_montage = montage(warped_to_mean, rescale_intensity=True, grid_shape=(2, 5))
ax.imshow(im_montage, cmap=plt.cm.gray, interpolation='nearest')
slim_fig(ax)
plt.savefig('C_the_many_warped_faces_of_merkel.png', bbox_inches = 'tight', pad_inches = 0)
fig, ax = plt.subplots(subplot_kw={'aspect': 'equal'})
ax.imshow(enhanced_average, cmap=plt.cm.gray)
slim_fig(ax)
plt.savefig('D_enhanced_average.png', bbox_inches = 'tight', pad_inches = 0)
# Compare the different average images
# from PIL import Image, ImageDraw, ImageFont
#
# im1 = Image.open('B_simple_average.png')
# im2 = Image.open('D_enhanced_average.png')
# d = ImageDraw.Draw(im1)
# d.text((10, 20), "simple", fill=(0, 0, 0))
# d = ImageDraw.Draw(im2)
# d.text((10, 20), "enhanced", fill=(0, 0, 0))
# dst = Image.new("L", (im1.width*2, im2.height))
# dst.paste(im1, (0, 0))
# dst.paste(im2, (im1.width, 0))
# dst.show()
#
# # Try different way
# im1 = np.array(im1.convert("L"))
# im2 = np.array(im2.convert("L"))
the_aperture = af.place_aperture(aligned_path, aligned_path, no_save=True)
inner_map = (the_aperture * 255) > 16
nim1 = af.contrast_stretch(simple_average, inner_locs=inner_map, type="mean_127")
nim2 = af.contrast_stretch(enhanced_average, inner_locs=inner_map, type="mean_127")
fig, ax = plt.subplots(subplot_kw={'aspect': 'equal'})
plt.imshow(np.c_[nim1, nim2], cmap="gray")
slim_fig(ax)
ax.text(28, 39, "simple", fontsize=36, color="k")
ax.text(280 + 28, 39, "enhanced", fontsize=36, color="k")
plt.savefig('comparison_average_types.png', bbox_inches = 'tight', pad_inches = 0)
# END
# ----------------------------------------------------------------------
|
{"hexsha": "81ceb70ba1bee562beb7c8b31f7e90e76eb42a48", "size": 4256, "ext": "py", "lang": "Python", "max_stars_repo_path": "demos/demo_3_averaging/run_demo.py", "max_stars_repo_name": "SourCherries/auto-face-align", "max_stars_repo_head_hexsha": "365bd01c22da6f3a44190261786fcc585687ea50", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2021-11-11T04:36:14.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-08T00:20:54.000Z", "max_issues_repo_path": "demos/demo_3_averaging/run_demo.py", "max_issues_repo_name": "SourCherries/auto-face-align", "max_issues_repo_head_hexsha": "365bd01c22da6f3a44190261786fcc585687ea50", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "demos/demo_3_averaging/run_demo.py", "max_forks_repo_name": "SourCherries/auto-face-align", "max_forks_repo_head_hexsha": "365bd01c22da6f3a44190261786fcc585687ea50", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.7647058824, "max_line_length": 91, "alphanum_fraction": 0.6527255639, "include": true, "reason": "import numpy", "num_tokens": 1068}
|
# Written by Mansur Yeşilbursa
import numpy as np
import pickle
def ml_sentence_splitter(text):
'''
Args:
text: given a string
Returns:
sentences: list of sentences in the string
'''
model_dir = '../models/sentence_splitting/'
with open(model_dir + 'model_liblinear.pkl', 'rb') as f:
model = pickle.load(f)
eos_markers = ':.!?'
quote_count = 0
beg_pos = 0
end_pos = 0
sentences = []
for ch, i in zip(text, range(len(text))):
quote_count += int(ch == '\"')
if ch in eos_markers and i < len(text) - 1:
x = [quote_count % 2, int(text[i+1] in eos_markers) - 0.5, int(text[i+1] == '\"') - 0.5,
int(text[i+1].isdigit()) - 0.5]
x = np.reshape(np.asarray(x).astype(np.float), (1, -1))
y = model.predict(x)
if y == 1: # end of sentence
end_pos = i + 1
sentences.append(text[beg_pos:end_pos])
beg_pos = end_pos
elif i == len(text) - 1: # finish the sentence nevertheless
sentences.append(text[beg_pos:])
return sentences
if __name__ == '__main__':
model_dir = '../models/sentence_splitting/'
with open(model_dir + 'model_liblinear.pkl', 'rb') as f:
model = pickle.load(f)
with open('../../../42bin_haber/news/siyaset/1.txt', 'r', encoding='utf8') as f:
text = f.read()
sentences = ml_sentence_splitter(text, model)
print(sentences[0])
print(sentences[1])
|
{"hexsha": "9d3d5f59d840bb3b0f51c3de97332d6bc2f815bd", "size": 1563, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/ml_sentence_splitter.py", "max_stars_repo_name": "garsontrier/turkish-nlp-preprocessor", "max_stars_repo_head_hexsha": "88180c21fe22b7d88e3d6bff82afbf1be7cadd75", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-02-04T13:25:19.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-04T13:25:19.000Z", "max_issues_repo_path": "src/ml_sentence_splitter.py", "max_issues_repo_name": "garsontrier/turkish-nlp-preprocessor", "max_issues_repo_head_hexsha": "88180c21fe22b7d88e3d6bff82afbf1be7cadd75", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/ml_sentence_splitter.py", "max_forks_repo_name": "garsontrier/turkish-nlp-preprocessor", "max_forks_repo_head_hexsha": "88180c21fe22b7d88e3d6bff82afbf1be7cadd75", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.5625, "max_line_length": 101, "alphanum_fraction": 0.547024952, "include": true, "reason": "import numpy", "num_tokens": 418}
|
import copy
import math
import numpy as np
from typing import Any, Mapping
from scipy.spatial.transform import Slerp
from scipy.spatial.transform import Rotation
from src.robots.motors import MotorCommand
from src.robots.robot import Robot
import lp_python_interface
def lerp(a: float, b: float, t: float) -> float:
return (a + t * (b - a))
def interpGRFArray(array_1: lp_python_interface.GRFArray,
array_2: lp_python_interface.GRFArray,
t_interp: float) -> lp_python_interface.GRFArray:
interp_array = lp_python_interface.GRFArray
# Interp grf array
for i in range(array_1.feet.size()):
interp_array.vectors[i].x = lerp(array_1.vectors[i].x,
array_2.vectors[i].x, t_interp)
interp_array.vectors[i].y = lerp(array_1.vectors[i].y,
array_2.vectors[i].y, t_interp)
interp_array.vectors[i].z = lerp(array_1.vectors[i].z,
array_2.vectors[i].z, t_interp)
return interp_array
def interpRobotState(state_1: lp_python_interface.RobotState,
state_2: lp_python_interface.RobotState,
t_interp: float) -> lp_python_interface.RobotState:
# Interp individual elements, t_interp is the new in the new mpc-afterward timeframe
interp_state = lp_python_interface.RobotState()
interp_state.body = interpOdometry(state_1.body, state_2.body, t_interp)
interp_state.joints = interpJointState(state_1.joints, state_2.joints,
t_interp)
interp_state.feet = interpMultiFootState(state_1.feet, state_2.feet,
t_interp, interp_state.feet)
return interp_state
def interpOdometry(state_1: lp_python_interface.BodyState,
state_2: lp_python_interface.BodyState,
t_interp: float) -> lp_python_interface.BodyState:
interp_state = lp_python_interface.BodyState()
# Interp body position
interp_state.pose.position.x = lerp(state_1.pose.position.x,
state_2.pose.position.x, t_interp)
interp_state.pose.position.y = lerp(state_1.pose.position.y,
state_2.pose.position.y, t_interp)
interp_state.pose.position.z = lerp(state_1.pose.position.z,
state_2.pose.position.z, t_interp)
# Interp body orientation with slerp
# tf2::Quaternion q_1, q_2, q_interp
# tf2::convert(state_1.pose.orientation, q_1)
# tf2::convert(state_2.pose.orientation, q_2)
# q_interp = q_1.slerp(q_2, t_interp)
# interp_state.pose.orientation = tf2::toMsg(q_interp)
q_1 = [
state_1.pose.orientation.x, state_1.pose.orientation.y,
state_1.pose.orientation.z, state_1.pose.orientation.w
]
q_2 = [
state_2.pose.orientation.x, state_2.pose.orientation.y,
state_2.pose.orientation.z, state_2.pose.orientation.w
]
r_1 = Rotation.from_quat(q_1)
r_2 = Rotation.from_quat(q_2)
slerp = Slerp([1, 2], [r_1, r_2])
r_interp = slerp(t_interp)
q_interp = Rotation.as_quat(r_interp)
interp_state.pose.orientation.x = q_interp[0]
interp_state.pose.orientation.y = q_interp[1]
interp_state.pose.orientation.z = q_interp[2]
interp_state.pose.orientation.w = q_interp[3]
# Interp twist
interp_state.twist.linear.x = lerp(state_1.twist.linear.x,
state_2.twist.linear.x, t_interp)
interp_state.twist.linear.y = lerp(state_1.twist.linear.y,
state_2.twist.linear.y, t_interp)
interp_state.twist.linear.z = lerp(state_1.twist.linear.z,
state_2.twist.linear.z, t_interp)
interp_state.twist.angular.x = lerp(state_1.twist.angular.x,
state_2.twist.angular.x, t_interp)
interp_state.twist.angular.y = lerp(state_1.twist.angular.y,
state_2.twist.angular.y, t_interp)
interp_state.twist.angular.z = lerp(state_1.twist.angular.z,
state_2.twist.angular.z, t_interp)
return interp_state
def interpMultiFootState(
state_1: lp_python_interface.MultiFootState,
state_2: lp_python_interface.MultiFootState,
t_interp: float) -> lp_python_interface.MultiFootState:
interp_state = lp_python_interface.MultiFootState()
# Interp foot state
interp_state.feet.resize(state_1.feet.size())
for i in range(interp_state.feet.size()):
interp_state.feet[i].position.x = lerp(state_1.feet[i].position.x,
state_2.feet[i].position.x,
t_interp)
interp_state.feet[i].position.y = lerp(state_1.feet[i].position.y,
state_2.feet[i].position.y,
t_interp)
interp_state.feet[i].position.z = lerp(state_1.feet[i].position.z,
state_2.feet[i].position.z,
t_interp)
# Interp foot velocity
interp_state.feet[i].velocity.x = lerp(state_1.feet[i].velocity.x,
state_2.feet[i].velocity.x,
t_interp)
interp_state.feet[i].velocity.y = lerp(state_1.feet[i].velocity.y,
state_2.feet[i].velocity.y,
t_interp)
interp_state.feet[i].velocity.z = lerp(state_1.feet[i].velocity.z,
state_2.feet[i].velocity.z,
t_interp)
# Interp foot acceleration
interp_state.feet[i].acceleration.x = lerp(
state_1.feet[i].acceleration.x, state_2.feet[i].acceleration.x,
t_interp)
interp_state.feet[i].acceleration.y = lerp(
state_1.feet[i].acceleration.y, state_2.feet[i].acceleration.y,
t_interp)
interp_state.feet[i].acceleration.z = lerp(
state_1.feet[i].acceleration.z, state_2.feet[i].acceleration.z,
t_interp)
# Set contact state to the first state
interp_state.feet[i].contact = state_1.feet[i].contact
return interp_state
def interpJointState(joint_1: lp_python_interface.JointState,
joint_2: lp_python_interface.JointState,
t_interp: float) -> lp_python_interface.JointState:
# Interp joints
interp_joint = lp_python_interface.JointState()
interp_joint.name.resize(joint_1.position.size())
interp_joint.position.resize(joint_1.position.size())
interp_joint.velocity.resize(joint_1.position.size())
interp_joint.effort.resize(joint_1.position.size())
for i in range(joint_1.position.size()):
interp_joint.name[i] = joint_1.name[i]
interp_joint.position[i] = lerp(joint_1.position[i],
joint_2.position[i], t_interp)
interp_joint.velocity[i] = lerp(joint_1.velocity[i],
joint_2.velocity[i], t_interp)
interp_joint.effort[i] = lerp(joint_1.effort[i], joint_2.effort[i],
t_interp)
return interp_joint
class LegController:
def __init__(self, robot: Robot,
robot_state: lp_python_interface.RobotState = None,
leg_array: lp_python_interface.MultiFootPlanContinuous = None,
grf_array: lp_python_interface.GRFArray = None,
interval: float = 0.03):
self._robot = robot
self._robot_state = robot_state
self._leg_array = leg_array
self._grf_array = grf_array
self._interval = interval
def reset(self, current_time: float) -> None:
return
@property
def robot_state(self):
return self._robot_state
@robot_state.setter
def robot_state(self, robot_state: lp_python_interface.RobotState) -> None:
self._robot_state = robot_state
@property
def leg_array(self):
return self._leg_array
@leg_array.setter
def leg_array(
self,
leg_array: lp_python_interface.MultiFootPlanContinuous) -> None:
self._leg_array = leg_array
@property
def grf_array(self):
return self._grf_array
@grf_array.setter
def grf_array(self, grf_array: lp_python_interface.GRFArray) -> None:
self._grf_array = grf_array
def receive_local_plan(self,
last_local_plan: lp_python_interface.RobotPlan):
self._last_local_plan = last_local_plan # actually robotplan is enough? MultiFootPlanDiscrete and MultiFootPlanContinuous not used
def get_action(self, t_elipsed: float) -> Mapping[Any, Any]:
# Interpolate: How to do this? flow:
# after solving one mpc, what we did is to start a new clock,
# using the simulation time inside this new clock timeframe to interpolate
# the grf, and footstate.
# But initially we need to be careful about when the mpc happens and the time delay it causes.
# so we let mpc to happen only after finish one step, basically fourfoot on ground
# that means we also not allowed to use time to track global_plan,
# but using a record to record which node will be tracked next
t_segment = math.floor(
t_elipsed / self._interval
) # I guess there is index difference between each segment?
t_interp = t_elipsed % self._interval
target_state = interpRobotState(
self.last_local_plan.states[t_segment],
self.last_local_plan.states[t_segment + 1], t_interp)
target_grf = interpGRFArray(self.last_local_plan.grf[t_segment],
self.last_local_plan.grf[t_segment + 1],
t_interp)
foot_traj = target_state.feet
grf = target_grf
all_joint_inputs = {}
action = {}
for joint in range(target_state.joint.position.size()):
all_joint_inputs[joint] = (target_state.joint.position[joint],
target_state.joint.velocity[joint], 0)
for leg, _ in enumerate(self._robot._foot_link_ids):
if foot_traj.feet[leg].contact == True: # stance leg
motor_torques = self._robot.map_contact_force_to_joint_torques(
leg, [
target_grf.vectors[leg].x, target_grf.vectors[leg].y,
target_grf.vectors[leg].z
])
for joint, torque in motor_torques.items():
to_list = list(all_joint_inputs[joint])
to_list[2] = torque
all_joint_inputs[joint] = to_list
# else:
# foot_position = foot_traj.feet[leg].position
# joint_ids, joint_angles = (
# self._robot.get_motor_angles_from_foot_position(
# leg, foot_position))
kps = self._robot.motor_group.kps
kds = self._robot.motor_group.kds
for joint, joint_input in all_joint_inputs.items():
#leg_id = joint_angle_leg_id[
action[joint] = MotorCommand(desired_position=joint_input[0],
kp=kps[joint],
desired_velocity=joint_input[1],
kd=kds[joint],
desired_extra_torque=joint_input[2])
return action
|
{"hexsha": "17ba5eda36ce013d4299cacb3439c8fb5ecaa400", "size": 11967, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/controller/leg_controller.py", "max_stars_repo_name": "jrenaf/fast_and_efficient", "max_stars_repo_head_hexsha": "708b2e79fba19330190046f29383d298a95c3abd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/controller/leg_controller.py", "max_issues_repo_name": "jrenaf/fast_and_efficient", "max_issues_repo_head_hexsha": "708b2e79fba19330190046f29383d298a95c3abd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/controller/leg_controller.py", "max_forks_repo_name": "jrenaf/fast_and_efficient", "max_forks_repo_head_hexsha": "708b2e79fba19330190046f29383d298a95c3abd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.3222222222, "max_line_length": 139, "alphanum_fraction": 0.5934653631, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2689}
|
import numpy as np
import copy
import math
import cv2
import time
import scipy.signal as ss
class Node(object):
def __init__(self, grid):
"""
:param grid: np.array nrow*ncolumn
author: weiwei
date: 20190828, 20200104
"""
self.grid = copy.deepcopy(grid)
self._nrow, self._ncolumn = self.grid.shape
self.ngrids = self._nrow*self._ncolumn
self.parent = None
self.gs = 0
@property
def nrow(self):
return self._nrow
@property
def ncolumn(self):
return self._ncolumn
def __getitem__(self, x):
return self.grid[x]
def __eq__(self, anothernode):
"""
determine if two nodes are the same
:return:
author: weiwei
date: 20190828
"""
return np.array_equal(self.grid, anothernode.grid)
def __repr__(self):
"""
overload the printed results
:return:
author: weiwei
date: 20191003
"""
outstring = "["
for i in range(self._nrow):
if i == 0:
outstring += "["
else:
outstring += " ["
for j in range(self._ncolumn):
outstring = outstring+str(self.grid[i][j])+","
outstring = outstring[:-1] + "]"
outstring += ",\n"
outstring = outstring[:-2] + "]]"
return outstring
class TubePuzzle(object):
def __init__(self, elearray):
"""
:param nrow:
:param ncolumn:
:param elearray: nrow*ncolumn int array, tube id starts from 1, maximum 4
author: weiwei
date: 20191003
"""
self._nrow = elearray.shape[0]
self._ncolumn = elearray.shape[1]
self.elearray = np.zeros((self._nrow, self._ncolumn), dtype="int")
self.openlist = []
self.closelist = []
self._setValues(elearray)
self.goalpattern = np.array([[1,1,1,1,0,0,2,2,2,2],
[1,1,1,1,0,0,2,2,2,2],
[1,1,1,1,0,0,2,2,2,2],
[1,1,1,0,0,0,0,2,2,2],
[1,1,1,0,0,0,0,2,2,2]])
def _setValues(self, elearray):
"""
change the elements of the puzzle using elearray
:param elearray: 2d array
:return:
author: weiwei
date: 20190828, 20200104osaka
"""
if elearray.shape != (self._nrow, self._ncolumn):
print("Wrong number of elements in elelist!")
raise Exception("Number of elements error!")
self.elearray = elearray
def _hs(self, node):
"""
heuristics
:return:
author: weiwei
date: 20200104
"""
return np.sum((self.goalpattern!=1)*(node.grid==1)+(self.goalpattern!=2)*(node.grid==2))
def isdone(self, node):
"""
:return:
author: weiwei
date: 20190828
"""
if np.any((self.goalpattern != 1)*(node.grid==1)) or np.any((self.goalpattern != 2)*(node.grid==2)):
return False
return True
def fcost(self, node):
hs = self._hs(node)
gs = node.gs
return hs, hs, gs
def getMovableFillablePair(self, node):
"""
get a list of movable and fillable pairs
:param node see Node
:return: [[(i,j), (k,l)], ...]
author: weiwei
date: 20191003osaka, 20200104osaka
"""
# filtering
mask_ulbr = np.array([[1,0,0],[0,0,0],[0,0,1]])
mask_urbl = np.array([[0,0,1],[0,0,0],[1,0,0]])
# mask_ulbr2 = np.array([[1,0,0],[1,0,0],[0,1,1]])
# mask_urbl2 = np.array([[0,1,1],[1,0,0],[1,0,0]])
# mask_ulbr2_flp = np.array([[1,1,0],[0,0,1],[0,0,1]])
# mask_urbl2_flp = np.array([[0,0,1],[0,0,1],[1,1,0]])
mask_ucbc = np.array([[0,1,0],[0,0,0],[0,1,0]])
mask_crcl = np.array([[0,0,0],[1,0,1],[0,0,0]])
cg_ulbr = ss.correlate2d(node.grid, mask_ulbr)[1:-1,1:-1]
cg_urbl = ss.correlate2d(node.grid, mask_urbl)[1:-1,1:-1]
# cg_ulbr2_flp = ss.correlate2d(node.grid, mask_ulbr2_flp)[1:-1,1:-1]
# cg_urbl2_flp = ss.correlate2d(node.grid, mask_urbl2_flp)[1:-1,1:-1]
# cg_ulbr2_flp = ss.correlate2d(node.grid, mask_ulbr2_flp)[1:-1,1:-1]
# cg_urbl2_flp = ss.correlate2d(node.grid, mask_urbl2_flp)[1:-1,1:-1]
cg_ucbc = ss.correlate2d(node.grid, mask_ucbc)[1:-1,1:-1]
cg_crcl = ss.correlate2d(node.grid, mask_crcl)[1:-1,1:-1]
# cf = ((cg_ulbr==0)+(cg_urbl==0)+(cg_ulbr_flp==0)+(cg_urbl_flp==0)+(cg_ucbc==0)+(cg_crcl==0))*(node.grid==0)
cf = ((cg_ulbr==0)+(cg_urbl==0)+(cg_ucbc==0)+(cg_crcl==0))*(node.grid==0)
# fillable 1
fillable_type1 = np.asarray(np.where((self.goalpattern==1)*cf)).T
# fillable 2
fillable_type2 = np.asarray(np.where((self.goalpattern==2)*cf)).T
cg_ulbr[node.grid==0]=-1
cg_urbl[node.grid==0]=-1
# cg_ulbr_flp[node.grid==0]=-1
# cg_urbl_flp[node.grid==0]=-1
cg_ucbc[node.grid==0]=-1
cg_crcl[node.grid==0]=-1
# cg = (cg_ulbr==0)+(cg_urbl==0)+(cg_ulbr_flp==0)+(cg_urbl_flp==0)+(cg_ucbc==0)+(cg_crcl==0)
cg = (cg_ulbr==0)+(cg_urbl==0)+(cg_ucbc==0)+(cg_crcl==0)
# movable 1
movable_type1 = np.asarray(np.where(cg*(node.grid==1))).T
# movable 2
movable_type2 = np.asarray(np.where(cg*(node.grid==2))).T
movable_expanded_type1 = np.repeat(movable_type1, len(fillable_type1), axis=0)
movable_expanded_type2 = np.repeat(movable_type2, len(fillable_type2), axis=0)
if len(movable_expanded_type1)==0:
movableeles = movable_expanded_type2
elif len(movable_expanded_type2)==0:
movableeles = movable_expanded_type1
else:
movableeles = np.concatenate((movable_expanded_type1, movable_expanded_type2), axis=0)
fillable_expanded_type1 = np.tile(fillable_type1, (len(movable_type1),1))
fillable_expanded_type2 = np.tile(fillable_type2, (len(movable_type2),1))
if len(fillable_expanded_type1)==0:
fillableeles = fillable_expanded_type2
elif len(fillable_expanded_type2)==0:
fillableeles = fillable_expanded_type1
else:
fillableeles = np.concatenate((fillable_expanded_type1, fillable_expanded_type2), axis=0)
return movableeles, fillableeles
def _reorderopenlist(self):
self.openlist.sort(key=lambda x: (self.fcost(x)[0], self.fcost(x)[1]))
def atarSearch(self):
"""
build a graph considering the movable and fillable ids
:return:
author: weiwei
date: 20191003
"""
startnode = Node(self.elearray)
self.openlist = [startnode]
while True:
# if len(self.openlist)>=2:
# for eachnode in self.openlist:
# print(eachnode)
# print(eachnode.fcost())
# print("\n")
self._reorderopenlist()
print(self.openlist[0])
print(self.fcost(self.openlist[0]))
print("\n")
self.closelist.append(self.openlist.pop(0))
# movableids = self.getMovableIds(self.closelist[-1])
# fillableids = self.getFillableIds(self.closelist[-1])
# if len(movableids) == 0 or len(fillableids) == 0:
# print("No path found!")
# return []
# for mid in movableids:
# for fid in fillableids:
movableeles, fillableeles = self.getMovableFillablePair(self.closelist[-1])
if movableeles.shape[0] == 0:
print("No path found!")
return []
for i in range(movableeles.shape[0]):
mi, mj = movableeles[i]
fi, fj = fillableeles[i]
tmpelearray = copy.deepcopy(self.closelist[-1])
tmpelearray.parent = self.closelist[-1]
tmpelearray.gs = self.closelist[-1].gs+1
tmpelearray[fi][fj] = tmpelearray[mi][mj]
tmpelearray[mi][mj] = 0
# check if path is found
if self.isdone(tmpelearray):
path = [tmpelearray]
parent = tmpelearray.parent
while parent is not None:
path.append(parent)
parent = parent.parent
print("Path found!")
# for eachnode in path:
# print(eachnode)
return path[::-1]
# check if in openlist
flaginopenlist = False
for eachnode in self.openlist:
if eachnode == tmpelearray:
flaginopenlist = True
if self.fcost(eachnode)[0] <= self.fcost(tmpelearray)[0]:
pass
# no need to update position
else:
eachnode.parent = tmpelearray.parent
# self._reorderopenlist()
# continue
break
if flaginopenlist:
continue
else:
# not in openlist append and sort openlist
self.openlist.append(tmpelearray)
if __name__=="__main__":
# down x, right y
elearray = np.array([[1,0,0,0,1,0,0,0,0,0],
[0,0,0,0,0,0,0,2,0,2],
[0,0,0,0,0,0,0,0,2,0],
[1,0,0,0,0,0,0,0,2,2],
[1,0,0,0,0,0,0,2,0,2]])
elearray = np.array([[0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0],
[2,2,0,2,1,0,0,0,0,0],
[1,1,0,1,2,0,0,0,0,2],
[0,2,0,0,0,0,0,0,0,2]])
elearray = np.array([[0,0,0,0,0,0,0,0,0,0],
[0,0,0,2,2,2,2,0,0,0],
[0,0,2,1,1,1,0,0,0,0],
[0,0,2,1,2,2,0,0,0,0],
[0,0,0,0,2,0,0,0,0,0]])
tp = TubePuzzle(elearray)
# tp.getMovableIds(Node(elearray))
# print(Node(elearray).fcost())
# print(tp.fcost(Node(elearray)))
path = tp.atarSearch()
# for node in path:////////////
# print(node)
|
{"hexsha": "7ffa769731987463134bd280ea5bb4c4c7fceff7", "size": 10567, "ext": "py", "lang": "Python", "max_stars_repo_path": "0000_huri/tubepuzzlefast.py", "max_stars_repo_name": "liang324/wrs", "max_stars_repo_head_hexsha": "46eadec355c61a9c7bac1fa0f3cf419b2aac19aa", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-08-07T04:51:07.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-07T04:51:07.000Z", "max_issues_repo_path": "0000_huri/tubepuzzlefast.py", "max_issues_repo_name": "liang324/wrs", "max_issues_repo_head_hexsha": "46eadec355c61a9c7bac1fa0f3cf419b2aac19aa", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "0000_huri/tubepuzzlefast.py", "max_forks_repo_name": "liang324/wrs", "max_forks_repo_head_hexsha": "46eadec355c61a9c7bac1fa0f3cf419b2aac19aa", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.3084415584, "max_line_length": 117, "alphanum_fraction": 0.5077126905, "include": true, "reason": "import numpy,import scipy", "num_tokens": 3096}
|
"""OpenCV Camera class for lens correction with Charuco calibration."""
from .Camera import Camera
from pathlib import Path
import numpy as np
from threading import Event, Thread
import time
import subprocess
import os
import sys
try:
import cv2
from cv2 import aruco
except ImportError:
raise ImportError('ERROR opencv-contrib-python must be installed!')
# TODO: implement height transform correction
# https://github.com/O-C-R/maproom-robots/tree/master/skycam
# TODO: AR example
# https://github.com/avmeer/ComputerVisionAugmentedReality
# Averaging
# ○ ArUco tags are hard to pick out perfectly each time
# ○ Position of the marker is noisy and subsequently the models would shake
# ○ Averaging the last three position matrices helped to stabilize the models.
def input_float(prompt=''):
"""Ask for a human float input.
Args:
prompt (string): Text to prompt as input.
"""
# try:
# return raw_input(prompt)
# except NameError:
# return input(prompt)
while True:
try:
float_input = float(input(prompt))
except ValueError:
print('Please enter a float.\n')
continue
else:
break
return float_input
class CameraCorrected(Camera):
"""CameraCorrected class used to setup and use a camera with lens correction.
Attributes:
aruco_dict_num (int): ChAruco dictionnary number used for calibr.
board (CharucoBoard): ChAruco board object used for calibration.
cap (VideoCapture): OpenCV VideoCapture element.
cam_id (string): Camera or V4L id (ex: /dev/video0 /dev/v4l_by_id/...).
charuco_marker_size (float): black square length on the printed board.
charuco_square_length (float): Aruco marker length on the print.
focus (float): Camera focus value for camera which supports focusing.
height (int): Camera frame height in pixels.
width (int): Camera frame width in pixels.
camera_matrix (OpenCV matrix): OpenCV camera correction matrix.
dist_coeffs (OpenCV matrix): OpenCV distance correction coefficients.
corners (list): List of detected corners positions as a buffer.
ids (list): List of detected corners ids as a buffer.
board_post (PostureBuffer): Buffer to filter the posture of the board.
settings (list): List of OpenCV VideoCapture (v4l) settings.
thread_ready (Event): Thread is ready Event.
thread (threading.Thread): VideoCapture reading thread.
t0 (time.time): Time counter buffer.
"""
def __init__(self, cam_id, aruco_dict_num, focus=None, vertical_flip=None,
settings=None):
"""Initialize the CameraCorrected object variables.
Args:
cam_id (string): Camera or V4L id.
aruco_dict_num (int): ChAruco dictionnary number used for calibr.
vertical_flip (bool): Trigger vertical frame flipping.
focus (float): Camera focus value for camera which supports focus.
settings (list): list of tuple with specific camera settings.
"""
Camera.__init__(self, cam_id, vertical_flip, settings)
self.focus = focus
# Corners points and identifiers buffers
self.aruco_dict_num = aruco_dict_num
self.corners = None
self.ids = None
# Moving/Rolling average posture filtering
# TODO: Low pass filtering on translation and rotation
self.board_post = PostureBuffer()
# Parameter files folder
if not Path('./data').exists():
os.makedirs('./data')
def initialize(self):
"""Set up camera and launch the calibration routine."""
self._setup()
# Camera correction
self.calibrate_camera_correction()
# Start the VideoCapture read() thread
self.stop = False
self.start_camera_thread()
self.thread_ready.wait()
# Quick test
self.test_camera()
print('Corrected camera %s initialization done!\n' % self.cam_id)
def calibrate_camera_correction(self):
"""Calibrate the camera lens correction."""
# Hints:
# https://github.com/opencv/opencv/blob/master/samples/python/calibrate.py
# https://longervision.github.io/2017/03/16/OpenCV/opencv-internal-calibration-chessboard/
# http://www.peterklemperer.com/blog/2017/10/29/opencv-charuco-camera-calibration/
# http://www.morethantechnical.com/2017/11/17/projector-camera-calibration-the-easy-way/
# https://mecaruco2.readthedocs.io/en/latest/notebooks_rst/Aruco/sandbox/ludovic/aruco_calibration_rotation.html
defaultConfig_path = Path('./data/defaultConfig.xml')
if defaultConfig_path.exists():
print(' Found defaultConfig.xml.\nCAUTION: be sure settings in d'
'efaultConfig.xml match the current hardware configuration.')
default_config = cv2.FileStorage(
str(defaultConfig_path), cv2.FILE_STORAGE_READ)
self.aruco_dict_num = int(
default_config.getNode('charuco_dict').real())
self.charuco_square_length = default_config.getNode(
'charuco_square_lenght').real() # ARGH, spelling mistake!
self.charuco_marker_size = default_config.getNode(
'charuco_marker_size').real()
self.width = int(default_config.getNode(
'camera_resolution').at(0).real())
self.height = int(default_config.getNode(
'camera_resolution').at(1).real())
default_config.release()
else:
self.write_defaultConfig()
aruco_dict = cv2.aruco.Dictionary_get(self.aruco_dict_num)
# Create specific camera calibration if no one already exists
# using the opencv_interactive-calibration program.
cameraParameters_path = Path(
'./data/cameraParameters_%s.xml' % self.cam_id)
if not cameraParameters_path.exists():
print('\nStarting the camera id%s lens calibration.' % self.cam_id)
self.cap.release() # Release VideoCapture before CLI usage
subprocess.call(
['opencv_interactive-calibration', '-d=0.25', '-h=7', '-w=5',
'-sz=%f' % self.charuco_square_length, '--t=charuco',
'-pf=' + str(defaultConfig_path),
'-ci=' + str(self.cam_id),
'-of=' + str(cameraParameters_path),
'-flip=' + str(self.vertical_flip).lower()])
if sys.platform == "linux" or platform == "linux2":
self.cap = cv2.VideoCapture(self.cam_path, cv2.CAP_V4L2)
else:
self.cap = cv2.VideoCapture(self.cam_id)
self.set_camera_settings() # Re-set camera settings
# Load the camera calibration file.
if cameraParameters_path.exists():
print(' Found cameraParameters_%s.xml' % self.cam_id)
calibration_file = cv2.FileStorage(
str(cameraParameters_path), cv2.FILE_STORAGE_READ)
self.camera_matrix = calibration_file.getNode('cameraMatrix').mat()
self.dist_coeffs = calibration_file.getNode('dist_coeffs').mat()
self.width = int(calibration_file.getNode(
'cameraResolution').at(0).real())
self.height = int(calibration_file.getNode(
'cameraResolution').at(1).real())
if calibration_file.getNode('focus').isReal(): # If focus val
self.focus = float(calibration_file.getNode('focus').real())
self.set_focus(self.focus * 50)
# Specific Fish-Eye parameters
# self.r = calibrationParams.getNode("R").mat()
# self.new_camera_matrix = calibrationParams.getNode(
# "newCameraMatrix").mat()
calibration_file.release()
else:
raise ValueError(
"cameraParameters_%s.xml not found!\n\t"
"Please finish the calibration and press 's' to save to file."
% self.cam_id)
self.board = cv2.aruco.CharucoBoard_create(
5, 7, self.charuco_square_length, self.charuco_marker_size,
aruco_dict)
print('Camera %s calibration correction done!' % self.cam_id)
def detect_markers(self):
"""Detect ChAruco markers.
Returns:
frame (OpenCV Mat): A frame read from the VideoCapture method.
corners (Numpy array): list of corners 2D coordinates.
ids (Numpy array): list of detected marker identifiers.
"""
parameters = cv2.aruco.DetectorParameters_create()
frame = self.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
corners, ids, rej = cv2.aruco.detectMarkers(
gray, self.board.dictionary, parameters=parameters)
corners, ids, rej, recov = cv2.aruco.refineDetectedMarkers(
gray, self.board, corners, ids, rej,
cameraMatrix=self.camera_matrix, distCoeffs=self.dist_coeffs)
return frame, corners, ids
def estimate_board_posture(self, frame=None, corners=None, ids=None):
"""Estimate ChAruco board posture.
Arguments:
frame (OpenCV Mat): A frame read from the VideoCapture method.
corners (Numpy array): list of corners 2D coordinates.
ids (Numpy array): list of detected marker identifiers.
Return:
frame (OpenCV Mat): Frame with the board posture drawn
"""
# If we do not already have detect markers:
if frame is None:
frame, corners, ids = self.detect_markers()
if ids is None: # No detected marker
frame = self.draw_text(frame, 'No ChAruco marker detected !')
# time.sleep(0.1) # Sleep to give the time to move the panel
else: # if there is at least one marker detected
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Draw axis for the global board
retval, cha_corns, cha_ids = cv2.aruco.interpolateCornersCharuco(
corners, ids, gray, self.board,
cameraMatrix=self.camera_matrix, distCoeffs=self.dist_coeffs)
if retval:
frame_with_board = cv2.aruco.drawDetectedCornersCharuco(
frame, cha_corns, cha_ids, (0, 255, 0))
# Posture estimation of the global ChAruco board
retval, rvecs, tvecs = cv2.aruco.estimatePoseCharucoBoard(
cha_corns, cha_ids, self.board,
self.camera_matrix, self.dist_coeffs)
if retval is True:
rvecs, tvecs = self.board_post.update(rvecs, tvecs)
frame = cv2.aruco.drawAxis(
frame_with_board, self.camera_matrix, self.dist_coeffs,
rvecs, tvecs, 4 * self.charuco_square_length)
else:
frame = self.draw_text(
frame, 'Not enough Charuco markers detected.')
else:
frame = self.draw_text(
frame, 'Not enough resolution. Board is too far.')
return frame
def estimate_markers_posture(self, frame=None, corners=None, ids=None):
"""Estimate ChAruco markers posture.
Arguments:
frame (OpenCV Mat): A frame read from the VideoCapture method.
corners (Numpy array): list of corners 2D coordinates.
ids (Numpy array): list of detected marker identifiers.
Return:
frame (OpenCV Mat): Frame with all detected markers posture drawn.
"""
# If we do not already have detect markers:
if frame is None:
frame, corners, ids = self.detect_markers()
if ids is None: # No detected marker
frame = self.draw_text(frame, 'No ChAruco marker detected !')
# time.sleep(0.1) # Sleep to give the time to move the panel
else: # if there is at least one marker detected
# Draw each detected marker
frame = cv2.aruco.drawDetectedMarkers(frame, corners, ids)
rvecs, tvecs, _objPoints = cv2.aruco.estimatePoseSingleMarkers(
corners, self.charuco_square_length,
self.camera_matrix, self.dist_coeffs)
# Draw axis for each marker
for rvec, tvec in zip(rvecs, tvecs):
frame = cv2.aruco.drawAxis(
frame, self.camera_matrix, self.dist_coeffs,
rvec, tvec, self.charuco_square_length)
return frame
def estimate_board_and_markers_posture(self):
"""Estimate posture of ChAruco markers and posture of global board.
Return:
frame (OpenCV Mat): Frame with the board and markers postures.
"""
frame, corners, ids = self.detect_markers()
frame = self.estimate_markers_posture(frame, corners, ids)
frame = self.estimate_board_posture(frame, corners, ids)
return frame
# def py_charuco_camera_calibration(self):
# """TODO: camera calibration with Python."""
# parameters = cv2.aruco.DetectorParameters_create()
# corners_list = []
# ids_list = []
# print('Move the charuco board in front of the', self.cam_id)
# while len(corners_list) < 50:
# frame = self.read()
# gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# corners, ids, rej = cv2.aruco.detectMarkers(
# gray, dictionary=aruco_dict, parameters=parameters)
# corners, ids, rej, recovered = cv2.aruco.refineDetectedMarkers(
# gray, cv2.aruco, corners, ids, rej,
# cameraMatrix=self.camera_matrix, distCoeffs=self.dist_coef)
# if corners is None or len(corners) == 0:
# print('No ChAruco corner detected!')
# continue
# ret, corners, ids = cv2.aruco.interpolateCornersCharuco(
# corners, ids, gray, cb)
# corners_list.append(corners)
# ids_list.append(ids)
# time.sleep(0.1) # Sleep to give the time to move the panel
# print('Enough frames for %s calibration!' % self.cam_id)
# # Calibrate camera
# ret, K, dist_coef, rvecs, tvecs = cv2.aruco.calibrateCameraCharuco(
# corners_list, ids_list, cv2.aruco, (w, h), K,
# dist_coef, flags=cv2.CALIB_USE_INTRINSIC_GUESS)
# print('camera calib mat after\n%s' % K)
# print('camera dist_coef %s' % dist_coef.T)
# print('calibration reproj err %s' % ret)
# distCoeffsInit = np.zeros((5, 1))
# flags = (cv2.CALIB_USE_INTRINSIC_GUESS + cv2.CALIB_RATIONAL_MODEL + cv2.CALIB_FIX_ASPECT_RATIO) # noqa
# # flags = (cv2.CALIB_RATIONAL_MODEL)
# (ret, camera_matrix, distortion_coefficients0,
# rotation_vectors, translation_vectors,
# stdDeviationsIntrinsics, stdDeviationsExtrinsics,
# perViewErrors) = cv2.aruco.calibrateCameraCharucoExtended(
# charucoCorners=allCorners, charucoIds=allIds, board=board,
# imageSize=imsize, cameraMatrix=cameraMatrixInit,
# distCoeffs=distCoeffsInit, flags=flags, criteria=(
# cv2.TERM_CRITERIA_EPS & cv2.TERM_CRITERIA_COUNT, 10000, 1e-9))
def read_undistort(self):
"""Read an undistored camera frame."""
return cv2.undistort(
src=self.read(), cameraMatrix=self.camera_matrix,
distCoeffs=self.dist_coeffs)
def save_focus(self):
"""Save the camera focus value to the cameraParameters.xml file."""
if self.focus:
cameraParameters_path = Path(
'./data/cameraParameters_%s.xml' % self.cam_id)
self.write_append_to_FileStorage(
str(cameraParameters_path),
string='<focus>%f</focus>\n' % self.focus)
def set_focus(self, focus):
"""Set camera focus."""
self.cap.set(28, focus * 0.02) # CV_CAP_PROP_FOCUS
# min: 0.0 (infinity), max: 1.0 (1cm), increment:0.02 for C525 & C920
self.focus = self.cap.get(28)
print('Camera %d | Focus set:%f' % (self.cam_id, self.focus))
def show_focus_window(self):
"""Show a window with a focus slider."""
cv2.namedWindow('Focus', cv2.WINDOW_FREERATIO)
cv2.resizeWindow('Focus', 600, 30)
focus = self.focus
cv2.createTrackbar('Camera %d focus' % self.cam_id, 'Focus', 0, 20,
self.set_focus)
if focus:
cv2.setTrackbarPos('Camera %d focus' % self.cam_id, 'Focus',
int(focus * 50))
def write_append_to_FileStorage(self, str_path, string):
"""Append a string to a .xml file opened with cv2.FileStorage.
Args:
str_path (str): the file path to append.
string (str): the string to append.
"""
f = open(str_path, 'r+')
ln = f.readline()
while ln != '</opencv_storage>\n':
ln = f.readline()
f.seek(f.tell() - 18)
f.write(string)
f.write('</opencv_storage>\n')
f.close()
def write_defaultConfig(self):
"""Write defaultConfig.xml with the ChAruco specific parameters."""
print('\n')
self.charuco_square_length = input_float(
'Enter the black square length in cm: ')
self.charuco_marker_size = input_float(
'Enter the Aruco marker length in cm: ')
defaultConfig_path = Path('./data/defaultConfig.xml')
file = cv2.FileStorage(
str(defaultConfig_path), cv2.FILE_STORAGE_WRITE)
file.write('charuco_dict', self.aruco_dict_num)
file.write('charuco_square_lenght', self.charuco_square_length)
# ARGH, spelling mistake in the opencv_interactive-calibration app..
# https://github.com/opencv/opencv/blob/master/apps/interactive-calibration/parametersController.cpp#L40
file.write('charuco_marker_size', self.charuco_marker_size)
file.write('max_frames_num', 40)
file.write('min_frames_num', 20)
# To write a right <camera_resolution> element we need to update
# OpenCV to add std::vect<int> support, see my fork and discussion:
# https://github.com/a1rb4Ck/opencv/commit/58a9adf0dd8ed5a7f1f712e99bf0f7b1340f39a8
# http://answers.opencv.org/question/199743/write-sequence-of-int-with-filestorage-in-python/
#
# Working code with the fork:
# file.write('camera_resolution', (
# [self.width, self.height]))
#
# <camera_resolution> is an Seq of Integers. In C++ it is written by <<
# Python bindings must be added to support seq of int as std::vect<int>
file.release()
# Without updating OpenCV, we seek to append <camera_resolution>
self.write_append_to_FileStorage(
str(defaultConfig_path),
string='<camera_resolution>\n %d %d</camera_resolution>\n' % (
self.width, self.height))
class PostureBuffer(object):
"""PostureBuffer class used to setup and use camera with lens correction.
Attributes:
window_length (int): Moving average window size (number of frame).
avg_max_std (float): Maximum moving average standard deviation.
buff_tvecs (Numpy array): Buffer of rotation vecs moving avg filter.
buff_rvecs (Numpy array): Buffer of translation vecs moving avg filter.
"""
def __init__(self, window_length=4, avg_max_std=0.1):
"""Initialize PostureBuffer class.
Args:
window_length (int): Moving average window size (number of frame).
avg_max_std (float): Maximum moving average standard deviation.
"""
self.window_length = window_length
self.avg_max_std = avg_max_std
self.buff_tvecs = None # TODO: pre-allocate array of window_length
self.buff_rvecs = None
def update(self, rvecs, tvecs):
"""Update the moving average posture buffer and do the filtering.
Arguments:
rvecs (Numpy array): Posture rotation vectors (3x1).
tvecs (Numpy array): Posture translation vectors (3x1).
Returns:
rvecs (Numpy array): Filtered (averaged) posture rotation vectors.
tvecs (Numpy array): Filtered (avg) posture translation vectors.
"""
# Notes:
# https://github.com/avmeer/ComputerVisionAugmentedReality
# ○ ArUco tags are hard to pick out perfectly each time.
# ○ Position of the marker is noisy and the models would shake.
# ○ Averaging the last THREE position matrices helped to stabilize.
# Appending rvec and tvec postures to buffer
if self.buff_rvecs is None:
self.buff_rvecs = rvecs
else:
self.buff_rvecs = np.append(self.buff_rvecs, rvecs, axis=1)
if self.buff_tvecs is None:
self.buff_tvecs = tvecs
else:
self.buff_tvecs = np.append(self.buff_tvecs, tvecs, axis=1)
if self.buff_rvecs.shape[1] > self.window_length:
self.buff_rvecs = np.delete(self.buff_rvecs, 0, 1)
if self.buff_tvecs.shape[1] > self.window_length:
self.buff_tvecs = np.delete(self.buff_tvecs, 0, 1)
# TODO: optimize delete without copying? But np.array are immutable..
# Standard deviation filtering, if the board had a to big displacement.
stdm = self.avg_max_std # Moving/Rolling average filter max std
rvecs_std = np.std(self.buff_rvecs, axis=1)
if rvecs_std[0] > stdm or rvecs_std[1] > stdm or rvecs_std[2] > stdm:
self.buff_rvecs = rvecs
else:
rvecs = np.mean(self.buff_rvecs, axis=1)
tvecs_std = np.std(self.buff_tvecs, axis=1)
if tvecs_std[0] > stdm or tvecs_std[1] > stdm or tvecs_std[2] > stdm:
self.buff_tvecs = tvecs
else:
tvecs = np.mean(self.buff_tvecs, axis=1)
return rvecs, tvecs
|
{"hexsha": "24dd74c29eb7efac9aec5b69f8c9360548d553d9", "size": 22318, "ext": "py", "lang": "Python", "max_stars_repo_path": "camera_fusion/CameraCorrected.py", "max_stars_repo_name": "a1rb4Ck/camera_fusion", "max_stars_repo_head_hexsha": "2c62ec2e9b26c88a5bc8180b3af63d39609b1c21", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 25, "max_stars_repo_stars_event_min_datetime": "2018-10-16T11:51:34.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-10T13:14:31.000Z", "max_issues_repo_path": "camera_fusion/CameraCorrected.py", "max_issues_repo_name": "a1rb4Ck/camera_fusion", "max_issues_repo_head_hexsha": "2c62ec2e9b26c88a5bc8180b3af63d39609b1c21", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-05-19T11:21:36.000Z", "max_issues_repo_issues_event_max_datetime": "2019-05-19T11:21:36.000Z", "max_forks_repo_path": "camera_fusion/CameraCorrected.py", "max_forks_repo_name": "a1rb4Ck/camera_fusion", "max_forks_repo_head_hexsha": "2c62ec2e9b26c88a5bc8180b3af63d39609b1c21", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2019-03-19T01:26:53.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-30T13:49:42.000Z", "avg_line_length": 43.16827853, "max_line_length": 120, "alphanum_fraction": 0.621113003, "include": true, "reason": "import numpy", "num_tokens": 5161}
|
# tests for tf_util
import numpy as np
import tensorflow as tf
from stable_baselines.common.tf_util import function, initialize, single_threaded_session, is_image
def test_function():
"""
test the function function in tf_util
"""
with tf.Graph().as_default():
x_ph = tf.placeholder(tf.int32, (), name="x")
y_ph = tf.placeholder(tf.int32, (), name="y")
z_ph = 3 * x_ph + 2 * y_ph
linear_fn = function([x_ph, y_ph], z_ph, givens={y_ph: 0})
with single_threaded_session():
initialize()
assert linear_fn(2) == 6
assert linear_fn(2, 2) == 10
def test_multikwargs():
"""
test the function function in tf_util
"""
with tf.Graph().as_default():
x_ph = tf.placeholder(tf.int32, (), name="x")
with tf.variable_scope("other"):
x2_ph = tf.placeholder(tf.int32, (), name="x")
z_ph = 3 * x_ph + 2 * x2_ph
linear_fn = function([x_ph, x2_ph], z_ph, givens={x2_ph: 0})
with single_threaded_session():
initialize()
assert linear_fn(2) == 6
assert linear_fn(2, 2) == 10
def test_image_detection():
rgb = (32, 64, 3)
gray = (43, 23, 1)
rgbd = (12, 32, 4)
invalid_1 = (32, 12)
invalid_2 = (12, 32, 6)
# TF checks
for shape in (rgb, gray, rgbd):
assert is_image(tf.placeholder(tf.uint8, shape=shape))
for shape in (invalid_1, invalid_2):
assert not is_image(tf.placeholder(tf.uint8, shape=shape))
# Numpy checks
for shape in (rgb, gray, rgbd):
assert is_image(np.ones(shape))
for shape in (invalid_1, invalid_2):
assert not is_image(np.ones(shape))
|
{"hexsha": "d71374da03ba1f88fb12b683b4348d5b7f556744", "size": 1713, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_tf_util.py", "max_stars_repo_name": "TreeKid/stable-baselines", "max_stars_repo_head_hexsha": "129c1958160b95962b887c312cd2273aed35df60", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3681, "max_stars_repo_stars_event_min_datetime": "2018-07-02T16:07:58.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T12:29:00.000Z", "max_issues_repo_path": "tests/test_tf_util.py", "max_issues_repo_name": "TreeKid/stable-baselines", "max_issues_repo_head_hexsha": "129c1958160b95962b887c312cd2273aed35df60", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1088, "max_issues_repo_issues_event_min_datetime": "2018-07-09T11:36:45.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T23:50:35.000Z", "max_forks_repo_path": "tests/test_tf_util.py", "max_forks_repo_name": "TreeKid/stable-baselines", "max_forks_repo_head_hexsha": "129c1958160b95962b887c312cd2273aed35df60", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 910, "max_forks_repo_forks_event_min_datetime": "2018-07-23T12:16:47.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-28T09:39:06.000Z", "avg_line_length": 27.6290322581, "max_line_length": 99, "alphanum_fraction": 0.5925277291, "include": true, "reason": "import numpy", "num_tokens": 487}
|
REBOL []
do %rugby.r
code: {add1: func [ n [number!]][n + 1]}
do get-rugby-service http://localhost:8002
extend-env [add1] code
|
{"hexsha": "6cb265a376424c35b217165cbc3d8a7c32b49c4e", "size": 129, "ext": "r", "lang": "R", "max_stars_repo_path": "xtest.r", "max_stars_repo_name": "mbk/rugby", "max_stars_repo_head_hexsha": "4c9507aae30b606e702349be8a3e6b9298e75ac7", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2016-05-08T08:57:41.000Z", "max_stars_repo_stars_event_max_datetime": "2016-05-08T08:57:41.000Z", "max_issues_repo_path": "xtest.r", "max_issues_repo_name": "mbk/rugby", "max_issues_repo_head_hexsha": "4c9507aae30b606e702349be8a3e6b9298e75ac7", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "xtest.r", "max_forks_repo_name": "mbk/rugby", "max_forks_repo_head_hexsha": "4c9507aae30b606e702349be8a3e6b9298e75ac7", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 18.4285714286, "max_line_length": 42, "alphanum_fraction": 0.6589147287, "num_tokens": 49}
|
// (C) Copyright John Maddock 2006.
// Use, modification and distribution are subject to the
// Boost Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_MATH_TOOLS_POLYNOMIAL_HPP
#define BOOST_MATH_TOOLS_POLYNOMIAL_HPP
#ifdef _MSC_VER
#pragma once
#endif
#include <boost/assert.hpp>
#include <boost/math/tools/rational.hpp>
#include <boost/math/tools/real_cast.hpp>
#include <boost/math/special_functions/binomial.hpp>
#include <vector>
#include <ostream>
#include <algorithm>
namespace boost{ namespace math{ namespace tools{
template <class T>
T chebyshev_coefficient(unsigned n, unsigned m)
{
BOOST_MATH_STD_USING
if(m > n)
return 0;
if((n & 1) != (m & 1))
return 0;
if(n == 0)
return 1;
T result = T(n) / 2;
unsigned r = n - m;
r /= 2;
BOOST_ASSERT(n - 2 * r == m);
if(r & 1)
result = -result;
result /= n - r;
result *= boost::math::binomial_coefficient<T>(n - r, r);
result *= ldexp(1.0f, m);
return result;
}
template <class Seq>
Seq polynomial_to_chebyshev(const Seq& s)
{
// Converts a Polynomial into Chebyshev form:
typedef typename Seq::value_type value_type;
typedef typename Seq::difference_type difference_type;
Seq result(s);
difference_type order = s.size() - 1;
difference_type even_order = order & 1 ? order - 1 : order;
difference_type odd_order = order & 1 ? order : order - 1;
for(difference_type i = even_order; i >= 0; i -= 2)
{
value_type val = s[i];
for(difference_type k = even_order; k > i; k -= 2)
{
val -= result[k] * chebyshev_coefficient<value_type>(static_cast<unsigned>(k), static_cast<unsigned>(i));
}
val /= chebyshev_coefficient<value_type>(static_cast<unsigned>(i), static_cast<unsigned>(i));
result[i] = val;
}
result[0] *= 2;
for(difference_type i = odd_order; i >= 0; i -= 2)
{
value_type val = s[i];
for(difference_type k = odd_order; k > i; k -= 2)
{
val -= result[k] * chebyshev_coefficient<value_type>(static_cast<unsigned>(k), static_cast<unsigned>(i));
}
val /= chebyshev_coefficient<value_type>(static_cast<unsigned>(i), static_cast<unsigned>(i));
result[i] = val;
}
return result;
}
template <class Seq, class T>
T evaluate_chebyshev(const Seq& a, const T& x)
{
// Clenshaw's formula:
typedef typename Seq::difference_type difference_type;
T yk2 = 0;
T yk1 = 0;
T yk = 0;
for(difference_type i = a.size() - 1; i >= 1; --i)
{
yk2 = yk1;
yk1 = yk;
yk = 2 * x * yk1 - yk2 + a[i];
}
return a[0] / 2 + yk * x - yk1;
}
template <class T>
class polynomial
{
public:
// typedefs:
typedef typename std::vector<T>::value_type value_type;
typedef typename std::vector<T>::size_type size_type;
// construct:
polynomial(){}
template <class U>
polynomial(const U* data, unsigned order)
: m_data(data, data + order + 1)
{
}
template <class U>
polynomial(const U& point)
{
m_data.push_back(point);
}
// copy:
polynomial(const polynomial& p)
: m_data(p.m_data) { }
template <class U>
polynomial(const polynomial<U>& p)
{
for(unsigned i = 0; i < p.size(); ++i)
{
m_data.push_back(boost::math::tools::real_cast<T>(p[i]));
}
}
// access:
size_type size()const { return m_data.size(); }
size_type degree()const { return m_data.size() - 1; }
value_type& operator[](size_type i)
{
return m_data[i];
}
const value_type& operator[](size_type i)const
{
return m_data[i];
}
T evaluate(T z)const
{
return boost::math::tools::evaluate_polynomial(&m_data[0], z, m_data.size());;
}
std::vector<T> chebyshev()const
{
return polynomial_to_chebyshev(m_data);
}
// operators:
template <class U>
polynomial& operator +=(const U& value)
{
if(m_data.size() == 0)
m_data.push_back(value);
else
{
m_data[0] += value;
}
return *this;
}
template <class U>
polynomial& operator -=(const U& value)
{
if(m_data.size() == 0)
m_data.push_back(-value);
else
{
m_data[0] -= value;
}
return *this;
}
template <class U>
polynomial& operator *=(const U& value)
{
for(size_type i = 0; i < m_data.size(); ++i)
m_data[i] *= value;
return *this;
}
template <class U>
polynomial& operator +=(const polynomial<U>& value)
{
size_type s1 = (std::min)(m_data.size(), value.size());
for(size_type i = 0; i < s1; ++i)
m_data[i] += value[i];
for(size_type i = s1; i < value.size(); ++i)
m_data.push_back(value[i]);
return *this;
}
template <class U>
polynomial& operator -=(const polynomial<U>& value)
{
size_type s1 = (std::min)(m_data.size(), value.size());
for(size_type i = 0; i < s1; ++i)
m_data[i] -= value[i];
for(size_type i = s1; i < value.size(); ++i)
m_data.push_back(-value[i]);
return *this;
}
template <class U>
polynomial& operator *=(const polynomial<U>& value)
{
// TODO: FIXME: use O(N log(N)) algorithm!!!
BOOST_ASSERT(value.size());
polynomial base(*this);
*this *= value[0];
for(size_type i = 1; i < value.size(); ++i)
{
polynomial t(base);
t *= value[i];
size_type s = size() - i;
for(size_type j = 0; j < s; ++j)
{
m_data[i+j] += t[j];
}
for(size_type j = s; j < t.size(); ++j)
m_data.push_back(t[j]);
}
return *this;
}
private:
std::vector<T> m_data;
};
template <class T>
inline polynomial<T> operator + (const polynomial<T>& a, const polynomial<T>& b)
{
polynomial<T> result(a);
result += b;
return result;
}
template <class T>
inline polynomial<T> operator - (const polynomial<T>& a, const polynomial<T>& b)
{
polynomial<T> result(a);
result -= b;
return result;
}
template <class T>
inline polynomial<T> operator * (const polynomial<T>& a, const polynomial<T>& b)
{
polynomial<T> result(a);
result *= b;
return result;
}
template <class T, class U>
inline polynomial<T> operator + (const polynomial<T>& a, const U& b)
{
polynomial<T> result(a);
result += b;
return result;
}
template <class T, class U>
inline polynomial<T> operator - (const polynomial<T>& a, const U& b)
{
polynomial<T> result(a);
result -= b;
return result;
}
template <class T, class U>
inline polynomial<T> operator * (const polynomial<T>& a, const U& b)
{
polynomial<T> result(a);
result *= b;
return result;
}
template <class U, class T>
inline polynomial<T> operator + (const U& a, const polynomial<T>& b)
{
polynomial<T> result(b);
result += a;
return result;
}
template <class U, class T>
inline polynomial<T> operator - (const U& a, const polynomial<T>& b)
{
polynomial<T> result(a);
result -= b;
return result;
}
template <class U, class T>
inline polynomial<T> operator * (const U& a, const polynomial<T>& b)
{
polynomial<T> result(b);
result *= a;
return result;
}
template <class charT, class traits, class T>
inline std::basic_ostream<charT, traits>& operator << (std::basic_ostream<charT, traits>& os, const polynomial<T>& poly)
{
os << "{ ";
for(unsigned i = 0; i < poly.size(); ++i)
{
if(i) os << ", ";
os << poly[i];
}
os << " }";
return os;
}
} // namespace tools
} // namespace math
} // namespace boost
#endif // BOOST_MATH_TOOLS_POLYNOMIAL_HPP
|
{"hexsha": "8225736b9e5838303d7bcfccdd76d9094ff3e022", "size": 8032, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "ReactAndroid/build/third-party-ndk/boost/boost_1_57_0/boost/math/tools/polynomial.hpp", "max_stars_repo_name": "kimwoongkyu/react-native-0-36-1-woogie", "max_stars_repo_head_hexsha": "4fb2d44945a6305ae3ca87be3872f9432d16f1fb", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 460.0, "max_stars_repo_stars_event_min_datetime": "2016-01-13T12:49:34.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-20T04:10:40.000Z", "max_issues_repo_path": "ReactAndroid/build/third-party-ndk/boost/boost_1_57_0/boost/math/tools/polynomial.hpp", "max_issues_repo_name": "kimwoongkyu/react-native-0-36-1-woogie", "max_issues_repo_head_hexsha": "4fb2d44945a6305ae3ca87be3872f9432d16f1fb", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 197.0, "max_issues_repo_issues_event_min_datetime": "2017-07-06T16:53:59.000Z", "max_issues_repo_issues_event_max_datetime": "2019-05-31T17:57:51.000Z", "max_forks_repo_path": "ReactAndroid/build/third-party-ndk/boost/boost_1_57_0/boost/math/tools/polynomial.hpp", "max_forks_repo_name": "kimwoongkyu/react-native-0-36-1-woogie", "max_forks_repo_head_hexsha": "4fb2d44945a6305ae3ca87be3872f9432d16f1fb", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 148.0, "max_forks_repo_forks_event_min_datetime": "2016-01-17T03:16:43.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-17T12:20:36.000Z", "avg_line_length": 24.7901234568, "max_line_length": 121, "alphanum_fraction": 0.5734561753, "num_tokens": 2192}
|
[STATEMENT]
lemma dim_solution_set_not_zero_imp_infinite_solutions_no_homogeneous:
fixes A::"'a::{field, semiring_char_0}^'n::{mod_type}^'rows::{mod_type}"
assumes dim_not_0: "vec.dim (solution_set A 0) > 0"
and con: "consistent A b"
shows "infinite (solution_set A b)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. infinite (solution_set A b)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. infinite (solution_set A b)
[PROOF STEP]
have "solution_set A 0 \<noteq> {0}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. solution_set A 0 \<noteq> {0}
[PROOF STEP]
using vec.dim_zero_subspace_eq[of "solution_set A 0"] dim_not_0
[PROOF STATE]
proof (prove)
using this:
vec.subspace (solution_set A 0) \<Longrightarrow> (vec.dim (solution_set A 0) = 0) = (solution_set A 0 = {0})
0 < vec.dim (solution_set A 0)
goal (1 subgoal):
1. solution_set A 0 \<noteq> {0}
[PROOF STEP]
by (metis less_numeral_extra(3) vec.dim_zero_eq')
[PROOF STATE]
proof (state)
this:
solution_set A 0 \<noteq> {0}
goal (1 subgoal):
1. infinite (solution_set A b)
[PROOF STEP]
from this
[PROOF STATE]
proof (chain)
picking this:
solution_set A 0 \<noteq> {0}
[PROOF STEP]
obtain x where x: "x \<in> solution_set A 0" and x_not_0: "x \<noteq> 0"
[PROOF STATE]
proof (prove)
using this:
solution_set A 0 \<noteq> {0}
goal (1 subgoal):
1. (\<And>x. \<lbrakk>x \<in> solution_set A 0; x \<noteq> 0\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using vec.subspace_0[OF homogeneous_solution_set_subspace, of A]
[PROOF STATE]
proof (prove)
using this:
solution_set A 0 \<noteq> {0}
0 \<in> solution_set A 0
goal (1 subgoal):
1. (\<And>x. \<lbrakk>x \<in> solution_set A 0; x \<noteq> 0\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
x \<in> solution_set A 0
x \<noteq> 0
goal (1 subgoal):
1. infinite (solution_set A b)
[PROOF STEP]
obtain y where y: "is_solution y A b"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>y. is_solution y A b \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using con
[PROOF STATE]
proof (prove)
using this:
consistent A b
goal (1 subgoal):
1. (\<And>y. is_solution y A b \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
unfolding consistent_def
[PROOF STATE]
proof (prove)
using this:
\<exists>x. is_solution x A b
goal (1 subgoal):
1. (\<And>y. is_solution y A b \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
is_solution y A b
goal (1 subgoal):
1. infinite (solution_set A b)
[PROOF STEP]
define f where "f = (\<lambda>n::nat. y + (of_nat n) *s x)"
[PROOF STATE]
proof (state)
this:
f = (\<lambda>n. y + of_nat n *s x)
goal (1 subgoal):
1. infinite (solution_set A b)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. infinite (solution_set A b)
[PROOF STEP]
proof (unfold infinite_iff_countable_subset, rule exI[of _ f], rule conjI)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. inj f
2. range f \<subseteq> solution_set A b
[PROOF STEP]
show "inj f"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. inj f
[PROOF STEP]
unfolding inj_on_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>x\<in>UNIV. \<forall>y\<in>UNIV. f x = f y \<longrightarrow> x = y
[PROOF STEP]
unfolding f_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>xa\<in>UNIV. \<forall>ya\<in>UNIV. y + of_nat xa *s x = y + of_nat ya *s x \<longrightarrow> xa = ya
[PROOF STEP]
using x_not_0
[PROOF STATE]
proof (prove)
using this:
x \<noteq> 0
goal (1 subgoal):
1. \<forall>xa\<in>UNIV. \<forall>ya\<in>UNIV. y + of_nat xa *s x = y + of_nat ya *s x \<longrightarrow> xa = ya
[PROOF STEP]
by (auto simp: vec_eq_iff)
[PROOF STATE]
proof (state)
this:
inj f
goal (1 subgoal):
1. range f \<subseteq> solution_set A b
[PROOF STEP]
show "range f \<subseteq> solution_set A b"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. range f \<subseteq> solution_set A b
[PROOF STEP]
unfolding solution_set_rel[OF y]
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. range f \<subseteq> {y} + solution_set A 0
[PROOF STEP]
using homogeneous_solution_set_subspace
[PROOF STATE]
proof (prove)
using this:
vec.subspace (solution_set ?A 0)
goal (1 subgoal):
1. range f \<subseteq> {y} + solution_set A 0
[PROOF STEP]
using x
[PROOF STATE]
proof (prove)
using this:
vec.subspace (solution_set ?A 0)
x \<in> solution_set A 0
goal (1 subgoal):
1. range f \<subseteq> {y} + solution_set A 0
[PROOF STEP]
unfolding vec.subspace_def image_def f_def
[PROOF STATE]
proof (prove)
using this:
0 \<in> solution_set ?A 0 \<and> (\<forall>x\<in>solution_set ?A 0. \<forall>y\<in>solution_set ?A 0. x + y \<in> solution_set ?A 0) \<and> (\<forall>c. \<forall>x\<in>solution_set ?A 0. c *s x \<in> solution_set ?A 0)
x \<in> solution_set A 0
goal (1 subgoal):
1. {ya. \<exists>xa\<in>UNIV. ya = y + of_nat xa *s x} \<subseteq> {y} + solution_set A 0
[PROOF STEP]
by fast
[PROOF STATE]
proof (state)
this:
range f \<subseteq> solution_set A b
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
infinite (solution_set A b)
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 2270, "file": "Gauss_Jordan_System_Of_Equations", "length": 28}
|
[STATEMENT]
lemma get_update_eq [simp]:
"get (update a i v h) a = (get h a) [i := v]"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. get (update a i v h) a = (get h a)[i := v]
[PROOF STEP]
by (simp add: update_def)
|
{"llama_tokens": 98, "file": null, "length": 1}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# gauss2d.py
"""
Class for generating and fitting 2D Gaussian peaks
Supports both least squares and MLE fitting and gaussian peaks
parameterized by a single width, widths along each axis and widths
along arbitrary axes. Fitting can be done with manually specified
guesses or initial guesses will be estimated from the data.
Supports parameter passing through dicts and tuples.
Copyright (c) 2016, David Hoffman
"""
import logging
# need to be able to deal with warnings
import warnings
# numpy for numerical
import numpy as np
from dphtools.utils.lm import curve_fit
# need basic curve fitting
from scipy.optimize import OptimizeWarning
# need measure to take image moments
from skimage.measure import moments
# need to detrend data before estimating parameters
from .utils import detrend, find_real_root_near_zero
# Eventually we'll want to abstract the useful, abstract bits of this
# class to a parent class called peak that will allow for multiple types
# of fits
# rho = cos(theta)
logger = logging.getLogger(__name__)
class Gauss2D(object):
"""
A class that encapsulates experimental data that is best modeled by a 2D
gaussian peak. It can estimate model parameters and perform a fit to the
data. Best fit parameters are stored in a dictionary that can be accessed
by helper functions.
Right now the class assumes that `data` has constant spacing
"""
def __init__(self, data, **kwargs):
"""
Holds experimental equi-spaced 2D-data best represented by a Gaussian
Parameters
----------
data : array_like
An array holding the experimental data, for now data is assumed to
have equal spacing
Returns
-------
out : object
A Gauss2D object holding the specified data. All other internal
variables are internalized to `None`
"""
# Note that we are only passing a reference to the original data here
# so DO NOT modify this field
self._data = data
# set all internal fields to point to NONE
self._guess_params = None
self._popt = None
self._pcov = None
self.errmsg = None
self.ier = None
self.noise = None
self.residuals = None
super().__init__(**kwargs)
########################
# PROPERTY DEFINITIONS #
########################
@property
def data(self):
"""
Internal data
"""
# This attribute should be read-only, which means that it should return
# a copy of the data not a pointer.
return self._data.copy()
@property
def opt_params(self):
"""
Optimized parameters from the fit
"""
# This attribute should be read-only, which means that it should return
# a copy of the data not a pointer.
return self._popt.copy()
@property
def pcov(self):
"""
Covariance matrix of model parameters from the fit
"""
# This attribute should be read-only, which means that it should return
# a copy of the data not a pointer.
return self._pcov.copy()
@property
def error(self):
"""Gives whether there's an error or not."""
if self.ier in [1, 2, 3, 4]:
return False
else:
return True
@property
def guess_params(self):
"""Guessed parameters"""
return self._guess_params.copy()
#############################
# STATIC METHOD DEFINITIONS #
#############################
@classmethod
def gauss2D(cls, xdata_tuple, amp, mu0, mu1, sigma0, sigma1, rho, offset):
"""
A model function for a bivariate normal distribution (not normalized)
see http://mathworld.wolfram.com/BivariateNormalDistribution.html for
details
Parameters
----------
xdata_tuple : tuple of array_like objects
First element is x0 and second is x1, each usually from np.meshgrid
x0 and x1 must have the same shape
amp : float
Amplitude
mu0 : float
center x position
mu1 : float
center y position
sigma0 : float
x width
sigma1 : float
y width
rho : float
correlation between x and y (defines the angle the distributions
major axes make with the coordinate system)
offset : float
offset
Returns
-------
g : array_like
A matrix of values that represent a 2D Gaussian peak. `g` will have
the same dimensions as `x0` and `x1`
Note: Area = 2*amp*np.pi*sigma_x*sigma_y*np.sqrt(1-rho**2)
"""
(x0, x1) = xdata_tuple
if x0.shape != x1.shape:
# All functions assume that data is 2D
raise RuntimeError("Grid is mishapen")
if not abs(rho) < 1:
rho = np.sign(rho) * 0.9999
logger.warning(
"rho cannot be greater than 1 or less than -1. Here rho is {}.".format(rho)
+ "\nCoercing to {}".format(rho)
)
z = (
((x0 - mu0) / sigma0) ** 2
- 2 * rho * (x0 - mu0) * (x1 - mu1) / (sigma0 * sigma1)
+ ((x1 - mu1) / sigma1) ** 2
)
g = offset + amp * np.exp(-z / (2 * (1 - rho ** 2)))
return g
@classmethod
def gauss2D_norot(cls, xdata_tuple, amp, x0, y0, sigma_x, sigma_y, offset):
"""A special case of gauss2D with rho = 0"""
# return the general form with a rho of 0
return cls.gauss2D(xdata_tuple, amp, x0, y0, sigma_x, sigma_y, 0.0, offset)
@classmethod
def gauss2D_sym(cls, xdata_tuple, amp, x0, y0, sigma_x, offset):
"""A special case of gauss2D_norot with sigma_x = sigma_y"""
# return the no rotation form with same sigmas
return cls.gauss2D_norot(xdata_tuple, amp, x0, y0, sigma_x, sigma_x, offset)
@classmethod
def model(cls, xdata_tuple, *args):
"""
Chooses the correct model function to use based on the number of
arguments passed to it
Parameters
----------
xdata_tuple : tuple of ndarrays (xx, yy)
The independent data
Returns
-------
modeldata :
Other Parameters
----------------
*args : model parameters
"""
num_args = len(args)
if num_args == 5:
return cls.gauss2D_sym(xdata_tuple, *args)
elif num_args == 6:
return cls.gauss2D_norot(xdata_tuple, *args)
elif num_args == 7:
return cls.gauss2D(xdata_tuple, *args)
else:
raise ValueError("len(args) = {}, number out of range!".format(num_args))
@classmethod
def gauss2D_jac(cls, params, xdata):
"""Jacobian for full model"""
x0 = xdata[0].ravel()
x1 = xdata[1].ravel()
amp, mu0, mu1, sigma0, sigma1, rho, offset = params
# calculate the main value, minus offset
# (derivative of constant is zero)
value = cls.gauss2D(xdata, *params).ravel() - offset
dydamp = value / amp
dydmu0 = (
value
* ((2 * (x0 - mu0)) / sigma0 ** 2 - (2 * rho * (x1 - mu1)) / (sigma0 * sigma1))
/ (2 * (1 - rho ** 2))
)
dydmu1 = (
value
* ((2 * (x1 - mu1)) / sigma1 ** 2 - (2 * rho * (x0 - mu0)) / (sigma0 * sigma1))
/ (2 * (1 - rho ** 2))
)
dydsigma0 = (
value
* (
((x0 - mu0) ** 2 / sigma0 ** 3)
- ((2 * rho * (x0 - mu0) * (x1 - mu1)) / (sigma0 ** 2 * sigma1))
)
/ (2 * (1 - rho ** 2))
)
dydsigma1 = (
value
* (
((x1 - mu1) ** 2 / sigma1 ** 3)
- ((2 * rho * (x0 - mu0) * (x1 - mu1)) / (sigma1 ** 2 * sigma0))
)
/ (2 * (1 - rho ** 2))
)
dydrho = value * (
((x0 - mu0) * (x1 - mu1)) / ((1 - rho ** 2) * sigma0 * sigma1)
+ (
rho
* (
-((x0 - mu0) ** 2 / sigma0 ** 2)
+ (2 * rho * (x0 - mu0) * (x1 - mu1)) / (sigma0 * sigma1)
- (x1 - mu1) ** 2 / sigma1 ** 2
)
)
/ ((1 - rho ** 2) ** 2)
)
# now return
return np.vstack(
(dydamp, dydmu0, dydmu1, dydsigma0, dydsigma1, dydrho, np.ones_like(value))
).T
@classmethod
def gauss2D_norot_jac(cls, params, xdata):
"""Jacobian for no rotation model"""
x = xdata[0].ravel()
y = xdata[1].ravel()
amp, x0, y0, sigma_x, sigma_y, offset = params
value = cls.gauss2D_norot(xdata, *params).ravel() - offset
dydamp = value / amp
dydx0 = value * (x - x0) / sigma_x ** 2
dydsigmax = value * (x - x0) ** 2 / sigma_x ** 3
dydy0 = value * (y - y0) / sigma_y ** 2
dydsigmay = value * (y - y0) ** 2 / sigma_y ** 3
return np.vstack((dydamp, dydx0, dydy0, dydsigmax, dydsigmay, np.ones_like(value))).T
# the below works, but speed up only for above
# new_params = np.insert(params, 5, 0)
# return np.delete(cls.gauss2D_jac(new_params, xdata), 5, axis=0)
@classmethod
def gauss2D_sym_jac(cls, params, xdata):
"""Jacobian for symmetric model"""
x = xdata[0].ravel()
y = xdata[1].ravel()
amp, x0, y0, sigma_x, offset = params
value = cls.gauss2D_sym(xdata, *params).ravel() - offset
dydamp = value / amp
dydx0 = value * (x - x0) / sigma_x ** 2
dydsigmax = value * (x - x0) ** 2 / sigma_x ** 3
dydy0 = value * (y - y0) / sigma_x ** 2
return np.vstack((dydamp, dydx0, dydy0, dydsigmax, np.ones_like(value))).T
# new_params = np.insert(params, 4, 0)
# new_params = np.insert(new_params, 4, params[3])
# return np.delete(cls.gauss2D_jac(new_params, xdata), (4, 5), axis=0)
@classmethod
def model_jac(cls, xdata_tuple, *params):
"""Chooses the correct model jacobian function to use based on the
number of arguments passed to it
Parameters
----------
xdata_tuple : tuple of ndarrays (xx, yy)
The independent data
Returns
-------
modeldata :
Other Parameters
----------------
*args : model parameters
"""
num_args = len(params)
if num_args == 5:
return cls.gauss2D_sym_jac(params, xdata_tuple)
elif num_args == 6:
return cls.gauss2D_norot_jac(params, xdata_tuple)
elif num_args == 7:
return cls.gauss2D_jac(params, xdata_tuple)
else:
raise RuntimeError("len(params) = {}, number out of range!".format(num_args))
@classmethod
def gen_model(cls, data, *args):
"""
A helper method to generate a fit if needed, useful for generating
residuals
Parameters
----------
*args : tuple
passed directly to `model`
Returns
-------
out : ndarray
Fit generated by the model.
"""
# generate data grid
yy, xx = np.indices(data.shape)
xdata_tuple = (xx, yy)
# return model
return cls.model(xdata_tuple, *args)
@property
def fit_model(self):
"""
Generate the model from this instance, if the fit hasn't been performed
yet an error will be raised
"""
return self.gen_model(self._data, *self._popt)
def area(self, **kwargs):
"""
A function for calculating the area of the model peak
Area = 2*amp*np.pi*sigma_x*sigma_y*np.sqrt(1-rho**2)
Parameters
----------
kwargs : dictionary
key word arguments to pass to `optimize_params`, only used if
`opt_params` has not been caculated yet.
Returns
-------
Area of the peak based on fit parameters.
"""
# this is for convenience so that the area can
# be returned quickly, i.e. a = Gauss2D(data).area()
if self._popt is None:
self.optimize_params(**kwargs)
# extract the optimal parameters
opt_params = self.opt_params
num_params = len(opt_params)
# depending on the specified model the area is calculated
if num_params == 7:
return abs(
2
* np.pi
* opt_params[0]
* opt_params[3]
* opt_params[4]
* np.sqrt(1 - opt_params[5] ** 2)
)
elif num_params == 6:
return abs(2 * np.pi * opt_params[0] * opt_params[3] * opt_params[4])
else:
return abs(2 * np.pi * opt_params[0] * opt_params[3] ** 2)
def optimize_params(
self,
guess_params=None,
modeltype="norot",
quiet=False,
bounds=None,
checkparams=True,
detrenddata=False,
fittype="ls",
):
"""
A function that will optimize the parameters for a 2D Gaussian model
using either a least squares or maximum likelihood method
Parameters
----------
guess_params : numeric sequence, or dict (optional)
The initial guesses for the model parameters. The number of
parameters determines the modeltype (see notes). If no
guesses are provided they will be estimated from the data.
The estimation is only valid for positive data
modeltype : {'sym', 'norot', 'full'}, default 'norot'
Determines the model to guess parameters for
fittype : {'ls', 'mle'}, default 'ls'
Specifies if a least squares fit ('ls') or maximum likelihood
estimation ('mle') should be performed
quiet : bool
Determines the verbosity of the output
bounds : (-np.inf, np.inf)
See `scipy.optimize.curve_fit` for details, if modeltype is
'full' then the bounds for $\rho$ are automatically set to
(-1, 1) while the rest are left as is
checkparams : bool
Checks the parameters for validity after the fit, maybe replaced
in the future by more intelligent default bounding
detrenddata : bool
Determines if the data should be detrended before parameter
estimation, may be removed in the future.
Returns
-------
opt_params : ndarray
The optimized parameters from the fit. If the fit wasn't
successful a series of np.nan's will be returned.
Notes
-----
This function will call scipy.optimize to optimize the parameters of
the model function
MLE is for poisson noise model while LS is for gaussian noise model.
"""
# Test if we've been provided guess parameters
# Need to test if the variable is good or not.
if guess_params is None:
# if not we generate them
guess_params = self.estimate_params(detrenddata=detrenddata)
if modeltype.lower() == "sym":
guess_params = np.delete(guess_params, (4, 5))
elif modeltype.lower() == "norot":
guess_params = np.delete(guess_params, 5)
elif modeltype.lower() == "full":
pass
else:
raise RuntimeError("modeltype is not one of: 'sym', 'norot', 'full'")
# handle the case where the user passes a dictionary of values.
if isinstance(guess_params, dict):
guess_params = self.dict_to_params(guess_params)
self._guess_params = guess_params
# pull the data attribute for use
data = self._data
# We need to generate the x an y coordinates for the fit
# remember that image data generally has the higher dimension first
# as do most python objects
yy, xx = np.indices(data.shape)
# define our function for fitting
def model_ravel(*args):
return self.model(*args).ravel()
# TODO: We also need a function to clear nan values from data and the
# associated xx and yy points.
# Here we fit the data but we catch any errors and instead set the
# optimized parameters to nan.
# full_output is an undocumented key word of `curve_fit` if set to true
# it returns the same output as leastsq's would, if False, as it is by
# default it returns only popt and pcov.
# we need to set the bounds if rho is available
if bounds is None:
# TODO: we can make better defaults, keep sigma_x/sigma_y positive,
# make sure amp is positive, etc...
# set to default for all params
if len(guess_params) == 7:
# make sure rho is restricted
ub = np.array((np.inf,) * 5 + (1, np.inf))
bounds = (-1 * ub, ub)
else:
bounds = (-np.inf, np.inf)
with warnings.catch_warnings():
# we'll catch this error later and alert the user with a printout
warnings.simplefilter("ignore", OptimizeWarning)
if fittype.lower() == "mle":
meth = "mle"
elif fittype.lower() == "ls":
# default to scipy
meth = None
else:
raise RuntimeError("fittype is not one of: 'ls', 'mle'")
try:
popt, pcov, infodict, errmsg, ier = curve_fit(
model_ravel,
(xx, yy),
data.ravel(),
p0=guess_params,
bounds=bounds,
full_output=True,
jac=self.model_jac,
method=meth,
)
except RuntimeError as e:
# print(e)
# now we need to re-parse the error message to set all the
# flags pull the message
self.errmsg = e.args[0].replace("Optimal parameters not found: ", "")
# run through possibilities for failure
errors = {
0: "Improper",
5: "maxfev",
6: "ftol",
7: "xtol",
8: "gtol",
"unknown": "Unknown",
}
# set the error flag correctly
for k, v in errors.items():
if v in self.errmsg:
self.ier = k
except ValueError as e:
# This except is for bounds checking gone awry
self.errmsg = str(e)
self.ier = -1
else:
# if we save the infodict as well then we'll start using a lot
# of memory
# self.infodict = infodict
self.errmsg = errmsg
self.ier = ier
if checkparams:
self._check_params(popt)
# check to see if the covariance is bunk
if not np.isfinite(pcov).all():
self.errmsg = """
Covariance of the parameters could not be estimated
"""
self.ier = 9
# save parameters for later use
# if the error flag is good, proceed
if self.ier in [1, 2, 3, 4]:
# make sure sigmas are positive
# if popt.size > 5:
# popt[3:5] = abs(popt[3:5])
# else:
# popt[3] = abs(popt[3])
self._popt = popt
self._pcov = pcov
else:
if not quiet:
logger.warning("Fitting error: " + self.errmsg)
self._popt = guess_params * np.nan
self._pcov = np.zeros((len(guess_params), len(guess_params))) * np.nan
if not self.error:
# if no fitting error calc residuals and noise
self.residuals = self.data - self.fit_model
self.noise = self.residuals.std()
else:
# if there is an error set the noise to nan
self.noise = np.nan
return self.opt_params
def _check_params(self, popt):
"""
A method that checks if optimized parameters are valid
and sets the fit flag
"""
data = self.data
# check to see if the gaussian is bigger than its fitting window by a
# large amount, generally the user is advised to enlarge the fitting
# window or disregard the results of the fit.
sigma_msg = "Sigma larger than ROI"
max_s = max(data.shape)
if len(popt) < 6:
if abs(popt[3]) > max_s:
self.errmsg = sigma_msg
self.ier = 10
else:
if abs(popt[3]) > max_s or abs(popt[4]) > max_s:
self.errmsg = sigma_msg
self.ier = 10
# check to see if the amplitude makes sense
# it must be greater than 0 but it can't be too much larger than the
# entire range of data values
if not (0 < popt[0] < (data.max() - data.min()) * 5):
self.errmsg = "Amplitude unphysical, amp = {:.3f}," " data range = {:.3f}"
# cast to float to avoid memmap problems
self.errmsg = self.errmsg.format(popt[0], np.float(data.max() - data.min()))
self.ier = 11
def estimate_params(self, detrenddata=False):
"""
Estimate the parameters that best model the data using it's moments
Parameters
----------
detrenddata : bool
a keyword that determines whether data should be detrended first.
Detrending takes *much* longer than not. Probably only useful for
large fields of view.
Returns
-------
params : array_like
params[0] = amp
params[1] = x0
params[2] = y0
params[3] = sigma_x
params[4] = sigma_y
params[5] = rho
params[6] = offset
Notes
-----
Bias is removed from data using detrend in the util module.
"""
# initialize the parameter array
params = np.zeros(7)
# iterate at most 10 times
for i in range(10):
# detrend data
if detrenddata:
# only try to remove a plane, any more should be done before
# passing object instatiation.
data, bg = detrend(self._data.copy(), degree=1)
offset = bg.mean()
amp = data.max()
else:
data = self._data.astype(float)
offset = data.min()
amp = data.max() - offset
# calculate the moments up to second order
M = moments(data, 2)
# calculate model parameters from the moments
# https://en.wikipedia.org/wiki/Image_moment# Central_moments
xbar = M[1, 0] / M[0, 0]
ybar = M[0, 1] / M[0, 0]
xvar = M[2, 0] / M[0, 0] - xbar ** 2
yvar = M[0, 2] / M[0, 0] - ybar ** 2
covar = M[1, 1] / M[0, 0] - xbar * ybar
# place the model parameters in the return array
params[:3] = amp, xbar, ybar
params[3] = np.sqrt(np.abs(xvar))
params[4] = np.sqrt(np.abs(yvar))
params[5] = covar / np.sqrt(np.abs(xvar * yvar))
params[6] = offset
if abs(params[5]) < 1 or not detrenddata:
# if the rho is valid or we're not detrending data,
# break the loop.
break
# save estimate for later use
self._guess_params = params
# return parameters to the caller as a `copy`, we don't want them to
# change the internal state
return params.copy()
@classmethod
def _params_dict(cls, params):
"""
Helper function to return a version of params in dictionary form to
make the user interface a little more friendly
Examples
--------
>>> Gauss2D._params_dict((1, 2, 3, 4, 5, 6, 7)) == {
... 'amp': 1,
... 'x0': 2,
... 'y0': 3,
... 'sigma_x': 4,
... 'sigma_y': 5,
... 'rho': 6,
... 'offset': 7}
True
"""
keys = ["amp", "x0", "y0", "sigma_x", "sigma_y", "rho", "offset"]
num_params = len(params)
# adjust the dictionary size
if num_params < 7:
keys.remove("rho")
if num_params < 6:
keys.remove("sigma_y")
return {k: p for k, p in zip(keys, params)}
def params_errors_dict(self):
"""Return a dictionary of errors"""
keys = ["amp_e", "x0_e", "y0_e", "sigma_x_e", "sigma_y_e", "rho_e", "offset_e"]
# pull the variances of the parameters from the covariance matrix
# take the sqrt to get the errors
with np.errstate(invalid="ignore"):
params = np.sqrt(np.diag(self.pcov))
num_params = len(params)
# adjust the dictionary size
if num_params < 7:
keys.remove("rho_e")
if num_params < 6:
keys.remove("sigma_y_e")
return {k: p for k, p in zip(keys, params)}
@classmethod
def dict_to_params(cls, d):
"""
Helper function to return a version of params in dictionary form
to make the user interface a little more friendly
>>> Gauss2D.dict_to_params({
... 'amp': 1,
... 'x0': 2,
... 'y0': 3,
... 'sigma_x': 4,
... 'sigma_y': 5,
... 'rho': 6,
... 'offset': 7})
array([1, 2, 3, 4, 5, 6, 7])
"""
keys = ["amp", "x0", "y0", "sigma_x", "sigma_y", "rho", "offset"]
values = []
for k in keys:
try:
values.append(d[k])
except KeyError:
pass
return np.array(values)
def opt_params_dict(self):
return self._params_dict(self.opt_params)
def all_params_dict(self):
"""Return the parameters and there estimated errors all in one dictionary
the errors will have the same key plus a '_e'"""
params_dict = self.opt_params_dict()
params_dict.update(self.params_errors_dict())
return params_dict
def guess_params_dict(self):
"""
>>> import numpy as np
>>> myg = Gauss2D(np.random.randn(10, 10))
>>> myg.guess_params = np.array([1, 2, 3, 4, 5, 6, 7])
>>> myg.guess_params_dict() == {
... 'amp': 1,
... 'x0': 2,
... 'y0': 3,
... 'sigma_x': 4,
... 'sigma_y': 5,
... 'rho': 6,
... 'offset': 7}
True
"""
return self._params_dict(self.guess_params)
class Gauss2Dz(Gauss2D):
"""
A class that encapsulates experimental data that is best modeled by a 2D
gaussian peak. It can estimate model parameters and perform a fit to the
data. Best fit parameters are stored in a dictionary that can be accessed
by helper functions.
Right now the class assumes that `data` has constant spacing
"""
def __init__(self, data, poly_coefs_df):
"""
Holds experimental equi-spaced 2D-data best represented by a Gaussian
Parameters
----------
data : array_like
An array holding the experimental data, for now data is assumed to
have equal spacing
poly_coefs_df : pd.DataFrame
A data frame holding the coefficients of polynomials
Returns
-------
out : object
A Gauss2D object holding the specified data. All other internal
variables are internalized to `None`
"""
# Note that we are only passing a reference to the original data here
# so DO NOT modify this field
super().__init__(data)
# set up polynomial functions for relating z to sigmax and y
self.sigma_x_poly = np.poly1d(poly_coefs_df.sigma_x)
self.sigma_y_poly = np.poly1d(poly_coefs_df.sigma_y)
# we need their derivatives too for the jacobian
self.sigma_x_polyd = self.sigma_x_poly.deriv()
self.sigma_y_polyd = self.sigma_y_poly.deriv()
@property
def fit_model(self):
yy, xx = np.indices(self.data.shape)
xdata_tuple = (xx, yy)
# return model
return self.model(xdata_tuple, *self._popt)
def model(self, xdata_tuple, amp, x0, y0, z0, offset):
"""
Chooses the correct model function to use based on the number of
arguments passed to it
Parameters
----------
xdata_tuple : tuple of ndarrays (xx, yy)
The independent data
Returns
-------
modeldata :
Other Parameters
----------------
*args : model parameters
"""
args = amp, x0, y0, self.sigma_x_poly(z0), self.sigma_y_poly(z0), offset
return self.gauss2D_norot(xdata_tuple, *args)
def model_jac(self, xdata_tuple, *params):
"""Chooses the correct model jacobian function to use based on the
number of arguments passed to it
Parameters
----------
xdata_tuple : tuple of ndarrays (xx, yy)
The independent data
Returns
-------
modeldata :
Other Parameters
----------------
*args : model parameters
"""
x = xdata_tuple[0].ravel()
y = xdata_tuple[1].ravel()
amp, x0, y0, z0, offset = params
sigma_x, sigma_y = self.sigma_x_poly(z0), self.sigma_y_poly(z0)
sigma_xd, sigma_yd = self.sigma_x_polyd(z0), self.sigma_y_polyd(z0)
value = self.model(xdata_tuple, *params).ravel() - offset
dydamp = value / amp
dydx0 = value * (x - x0) / sigma_x ** 2
dydsigmax = value * (x - x0) ** 2 / sigma_x ** 3
dydy0 = value * (y - y0) / sigma_y ** 2
dydsigmay = value * (y - y0) ** 2 / sigma_y ** 3
dydz0 = dydsigmax * sigma_xd + dydsigmay * sigma_yd
return np.vstack((dydamp, dydx0, dydy0, dydz0, np.ones_like(value))).T
# the below works, but speed up only for above
# new_params = np.insert(params, 5, 0)
# return np.delete(cls.gauss2D_jac(new_params, xdata), 5, axis=0)
def area(self, **kwargs):
raise NotImplementedError
def optimize_params(
self,
guess_params=None,
modeltype="norot",
quiet=False,
bounds=None,
checkparams=True,
detrenddata=False,
fittype="ls",
):
# Test if we've been provided guess parameters
# Need to test if the variable is good or not.
if guess_params is None:
# if not we generate them
guess_params = self.estimate_params(detrenddata=detrenddata)
# handle the case where the user passes a dictionary of values.
if isinstance(guess_params, dict):
guess_params = self.dict_to_params(guess_params)
return super().optimize_params(
guess_params=guess_params,
quiet=quiet,
bounds=bounds,
checkparams=checkparams,
detrenddata=detrenddata,
fittype=fittype,
)
optimize_params.__doc__ = Gauss2D.optimize_params.__doc__
def _check_params(self, popt):
"""
A method that checks if optimized parameters are valid
and sets the fit flag
"""
data = self.data
# check to see if the amplitude makes sense
# it must be greater than 0 but it can't be too much larger than the
# entire range of data values
if not (0 < popt[0] < (data.max() - data.min()) * 5):
self.errmsg = "Amplitude unphysical, amp = {:.3f}," " data range = {:.3f}"
# cast to float to avoid memmap problems
self.errmsg = self.errmsg.format(popt[0], np.float(data.max() - data.min()))
self.ier = 11
def estimate_params(self, detrenddata=False):
"""
Estimate the parameters that best model the data using it's moments
Parameters
----------
detrenddata : bool
a keyword that determines whether data should be detrended first.
Detrending takes *much* longer than not. Probably only useful for
large fields of view.
Returns
-------
params : array_like
params[0] = amp
params[1] = x0
params[2] = y0
params[3] = z0
params[4] = offset
Notes
-----
Bias is removed from data using detrend in the util module.
"""
gauss2d_params = super().estimate_params(detrenddata)
amp, x0, y0, sigma_x, sigma_y, rho, offset = gauss2d_params
# find z estimates based on sigmas
zx = find_real_root_near_zero(self.sigma_x_poly - sigma_x)
zy = find_real_root_near_zero(self.sigma_y_poly - sigma_y)
possible_z = np.array((zx, zy))
# remove nans
possible_z = possible_z[np.isfinite(possible_z)]
# choose the estimate closest to zero.
if len(possible_z):
z0 = possible_z[np.abs(possible_z).argmin()]
else:
z0 = 0
# save estimate for later use
params = self._guess_params = np.array([amp, x0, y0, z0, offset])
# return parameters to the caller as a `copy`, we don't want them to
# change the internal state
return params.copy()
def gen_model(self, *args):
"""
A helper method to generate a fit if needed, useful for generating
residuals
Parameters
----------
*args : tuple
passed directly to `model`
Returns
-------
out : ndarray
Fit generated by the model.
"""
# generate data grid
yy, xx = np.indices(self.data.shape)
xdata_tuple = (xx, yy)
# return model
return self.model(xdata_tuple, *args)
@classmethod
def _params_dict(cls, params):
"""
Helper function to return a version of params in dictionary form to
make the user interface a little more friendly
Examples
--------
>>> Gauss2D._params_dict((1, 2, 3, 4, 5, 6, 7)) == {
... 'amp': 1,
... 'x0': 2,
... 'y0': 3,
... 'sigma_x': 4,
... 'sigma_y': 5,
... 'rho': 6,
... 'offset': 7}
True
"""
keys = ["amp", "x0", "y0", "z0", "offset"]
return {k: p for k, p in zip(keys, params)}
def params_errors_dict(self):
"""Return a dictionary of errors"""
keys = ["amp_e", "x0_e", "y0_e", "z0_e", "offset_e"]
# pull the variances of the parameters from the covariance matrix
# take the sqrt to get the errors
with np.errstate(invalid="ignore"):
params = np.sqrt(np.diag(self.pcov))
return {k: p for k, p in zip(keys, params)}
@classmethod
def dict_to_params(cls, d):
"""
Helper function to return a version of params in dictionary form
to make the user interface a little more friendly
>>> Gauss2D.dict_to_params({
... 'amp': 1,
... 'x0': 2,
... 'y0': 3,
... 'sigma_x': 4,
... 'sigma_y': 5,
... 'rho': 6,
... 'offset': 7})
array([1, 2, 3, 4, 5, 6, 7])
"""
keys = ["amp", "x0", "y0", "z0", "offset"]
values = []
for k in keys:
try:
values.append(d[k])
except KeyError:
pass
return np.array(values)
if __name__ == "__main__":
# TODO: Make data, add noise, estimate, fit. Plot all 4 + residuals
raise NotImplementedError
|
{"hexsha": "30bbb83c9edf3039bfdc37510ad437e42ef27c8a", "size": 36593, "ext": "py", "lang": "Python", "max_stars_repo_path": "peaks/gauss2d.py", "max_stars_repo_name": "david-hoffman/peaks", "max_stars_repo_head_hexsha": "b31a13fcb93005ed01e5295389f91491bafc71cd", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-10-15T00:04:47.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-29T15:13:34.000Z", "max_issues_repo_path": "peaks/gauss2d.py", "max_issues_repo_name": "david-hoffman/peaks", "max_issues_repo_head_hexsha": "b31a13fcb93005ed01e5295389f91491bafc71cd", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-02-24T05:21:29.000Z", "max_issues_repo_issues_event_max_datetime": "2021-02-24T05:21:49.000Z", "max_forks_repo_path": "peaks/gauss2d.py", "max_forks_repo_name": "david-hoffman/peaks", "max_forks_repo_head_hexsha": "b31a13fcb93005ed01e5295389f91491bafc71cd", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-07-01T14:15:16.000Z", "max_forks_repo_forks_event_max_datetime": "2019-10-15T00:04:53.000Z", "avg_line_length": 32.643175736, "max_line_length": 93, "alphanum_fraction": 0.5351296696, "include": true, "reason": "import numpy,from scipy", "num_tokens": 9048}
|
import numpy as np
from libsvmdata.datasets import fetch_libsvm
from celer import LogisticRegression
# from sklearn.linear_model import LogisticRegression
from sparse_ho.models import SparseLogreg
from sparse_ho.criterion import LogisticMulticlass
from sparse_ho import ImplicitForward
from sparse_ho.optimizers import GradientDescent
from sparse_ho.ho import grad_search, hyperopt_wrapper
from sparse_ho.utils import Monitor
from sparse_ho.datasets.utils_datasets import (
alpha_max_multiclass, clean_dataset, get_splits)
# load data
n_samples = 1_000
n_features = 1_000
# n_samples = 1_100
# n_features = 3_200
# X, y = fetch_libsvm('sensit')
# X, y = fetch_libsvm('usps')
X, y = fetch_libsvm('rcv1_multiclass')
# X, y = fetch_libsvm('sector_scale')
# X, y = fetch_libsvm('sector')
# X, y = fetch_libsvm('smallNORB')
# X, y = fetch_libsvm('mnist')
# clean data and subsample
X, y = clean_dataset(X, y, n_samples, n_features)
idx_train, idx_val, idx_test = get_splits(X, y)
n_samples, n_features = X.shape
algo = ImplicitForward(n_iter_jac=1000)
estimator = LogisticRegression(
C=1, fit_intercept=False, warm_start=True, max_iter=2000, verbose=False)
model = SparseLogreg(estimator=estimator)
logit_multiclass = LogisticMulticlass(
idx_train, idx_val, algo, idx_test=idx_test)
alpha_max, n_classes = alpha_max_multiclass(X, y)
tol = 1e-5
n_alphas = 10
p_alphas = np.geomspace(1, 0.001, n_alphas)
p_alphas = np.tile(p_alphas, (n_classes, 1))
print("###################### GRID SEARCH ###################")
monitor_grid = Monitor()
for i in range(n_alphas):
log_alpha_i = np.log(alpha_max * p_alphas[:, i])
logit_multiclass.get_val(
model, X, y, log_alpha_i, None, monitor_grid, tol)
1/0
print("###################### GRAD SEARCH LS ###################")
n_outer = 100
model = SparseLogreg(estimator=estimator)
logit_multiclass = LogisticMulticlass(idx_train, idx_val, idx_test, algo)
monitor = Monitor()
log_alpha0 = np.ones(n_classes) * np.log(0.1 * alpha_max)
idx_min = np.argmin(np.array(monitor_grid.objs))
log_alpha0 = monitor_grid.log_alphas[idx_min]
optimizer = GradientDescent(
n_outer=n_outer, step_size=None, p_grad_norm=0.1, tol=tol)
grad_search(
algo, logit_multiclass, model, optimizer, X, y, log_alpha0, monitor)
print("###################### USE HYPEROPT ###################")
log_alpha_max = np.log(alpha_max)
log_alpha_min = np.log(alpha_max / 10_000)
monitor_hyp = Monitor()
hyperopt_wrapper(
algo, logit_multiclass, model, X, y, log_alpha_min, log_alpha_max,
monitor_hyp, tol=tol, size_space=n_classes, max_evals=10)
|
{"hexsha": "01effe22c116fec5b3631b30a6556f6f5fea913e", "size": 2599, "ext": "py", "lang": "Python", "max_stars_repo_path": "expes/multiclass/plot_multiclass.py", "max_stars_repo_name": "LeoIV/sparse-ho", "max_stars_repo_head_hexsha": "f0a5792766a7f0c03bba28cddb983621174cb4ea", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 27, "max_stars_repo_stars_event_min_datetime": "2020-04-06T13:03:43.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-28T11:00:13.000Z", "max_issues_repo_path": "expes/multiclass/plot_multiclass.py", "max_issues_repo_name": "LeoIV/sparse-ho", "max_issues_repo_head_hexsha": "f0a5792766a7f0c03bba28cddb983621174cb4ea", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 120, "max_issues_repo_issues_event_min_datetime": "2020-04-05T08:10:14.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-30T09:10:28.000Z", "max_forks_repo_path": "expes/multiclass/plot_multiclass.py", "max_forks_repo_name": "LeoIV/sparse-ho", "max_forks_repo_head_hexsha": "f0a5792766a7f0c03bba28cddb983621174cb4ea", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2020-04-05T08:06:58.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-09T10:04:10.000Z", "avg_line_length": 30.5764705882, "max_line_length": 76, "alphanum_fraction": 0.7272027703, "include": true, "reason": "import numpy", "num_tokens": 732}
|
import os
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import logging
import csv
import pickle
from tensorflow.python import debug as tf_debug
logging.basicConfig(level=logging.INFO)
DIR_DATASET = "../dataset"
FILENAME_PKL = "train.pkl"
PATH_IMG_PKL = os.path.join(DIR_DATASET, FILENAME_PKL)
PATH_LABELS = os.path.join(DIR_DATASET, "label.txt")
PATH_SEGM_LABELS = os.path.join(DIR_DATASET, "segm_labels.csv")
# Load dataset images and labels
labels = []
if os.path.exists(PATH_SEGM_LABELS):
with open(PATH_SEGM_LABELS, "r") as csvfile:
csv_reader = csv.reader(csvfile)
for line in csv_reader:
train_label = np.array(line, np.uint8)
labels.append(train_label)
logging.debug("label:{:}".format(train_label))
logging.debug("label.shape:{:}".format(train_label.shape))
with open(PATH_IMG_PKL, "rb") as f:
imgs = pickle.load(f)
logging.info(len(imgs))
split_rate = 0.7
split = int(len(imgs) * split_rate)
train_datas, train_labels = imgs[:split], labels[:split]
test_datas, test_labels = imgs[split:], labels[split:]
# logging.info('train_data.shape:{:}'.format(train_datas.shape))
# logging.info('train_label.shape:{:}'.format(train_labels.shape))
# logging.info('test_data.shape:{:}'.format(test_datas.shape))
# logging.info('test_label.shape:{:}'.format(test_labels.shape))
# for i in range(5):
# plt.imshow(np.squeeze(train_datas[i+5]), cmap='gray')
# plt.title(train_labels[i+5])
# plt.show()
n_classes = 11
epochs = 150
batch_size = 128
DIR_CKPT = "../ckpt"
x = tf.placeholder(tf.float32, [None, None, None, 1], name='x')
y = tf.placeholder(tf.uint8, [1], name="y")
def residual_sepconv_block(x, filters, name):
with tf.variable_scope(name):
relu1 = tf.nn.relu(x)
sepconv1 = tf.layers.separable_conv2d(relu1, filters, [3, 3],
strides=1,
padding='same',
name="sepconv1")
relu2 = tf.nn.relu(sepconv1)
sepconv2 = tf.layers.separable_conv2d(relu2, filters, [3, 3],
strides=1,
padding='same',
name="sepconv2")
identity_mapping = tf.layers.conv2d(x, filters, [1, 1],
kernel_initializer=tf.contrib.layers.xavier_initializer_conv2d(uniform=False),
bias_initializer=tf.contrib.layers.xavier_initializer(uniform=False),
name="identity_mapping")
return identity_mapping + sepconv2
with tf.variable_scope('conv'):
conv1 = tf.layers.conv2d(inputs=x,
filters=32,
kernel_size=[3, 3],
strides=1,
padding='same',
activation=tf.nn.relu,
name='conv1')
conv2 = tf.layers.conv2d(inputs=conv1,
filters=64,
kernel_size=[3, 3],
strides=1,
padding='same',
activation=tf.nn.relu,
name='conv2')
# resconv1 = residual_sepconv_block(conv2, 8, "residual_1")
# resconv2 = residual_sepconv_block(resconv1, 16, "residual_2")
with tf.variable_scope('fc'):
# conv3 = tf.reshape(resconv2, [-1, int(np.prod(resconv2.shape[1:]))])
average_pooling = tf.reduce_mean(conv2, axis=[1, 2])
fc = tf.layers.dense(inputs=average_pooling,
units=32,
kernel_initializer=tf.contrib.layers.xavier_initializer_conv2d(uniform=False),
bias_initializer=tf.contrib.layers.xavier_initializer(uniform=False),
activation=tf.nn.relu)
with tf.variable_scope('logits'):
# logit shape (batch, n_classes)
logits = tf.layers.dense(fc, n_classes)
logits = tf.identity(logits, name="logits")
with tf.name_scope('xent'):
y_one_hot = tf.one_hot(y, n_classes)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_one_hot))
optimizer = tf.train.AdamOptimizer().minimize(cost)
pred = tf.equal(tf.argmax(logits, 1), tf.argmax(y_one_hot, 1))
accuracy_char = tf.reduce_mean(tf.cast(pred, tf.float32), name='accuracy_char')
logging.info('model build!')
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
# sess = tf_debug.LocalCLIDebugWrapperSession(sess)
# sess.add_tensor_filter("has_inf_or_nan", tf_debug.has_inf_or_nan)
init = tf.global_variables_initializer()
sess.run(init)
i, loss_accum, train_acc_accum = 0, 0, 0
for epoch in range(epochs):
for train_data, train_label in zip(train_datas, train_labels):
i += 1
sess.run(optimizer, feed_dict={x: train_data, y: train_label})
loss_accum, train_acc = sess.run([cost, accuracy_char], feed_dict={x: train_data, y: train_label})
loss_accum += loss_accum
train_acc_accum += train_acc
logging.info("epoch:{:} i:{:} training loss:{:<5} accuracy_char:{:<5}"
.format(epoch, i, loss_accum / i, train_acc_accum / i))
# Test
i, loss_accum, test_acc_accum = 0, 0, 0
for test_data, test_label in zip(test_datas, test_labels):
i += 1
sess.run(optimizer, feed_dict={x: test_data, y: test_label})
loss_accum, test_acc = sess.run([cost, accuracy_char], feed_dict={x: test_data, y: test_label})
loss_accum += loss_accum
test_acc_accum += test_acc
logging.info("i:{:} test loss:{:<5} test_accuracy_char:{:<5}"
.format(i, loss_accum / i, test_acc_accum / i))
PATH_CKPT = os.path.join(DIR_CKPT, "model.ckpt")
saver = tf.train.Saver()
saver.save(sess, PATH_CKPT, global_step=epochs)
logging.info("Model saved in:{:}".format(DIR_CKPT))
|
{"hexsha": "ff991666eb966e2fbab7e2d73771a4557f0ecff1", "size": 6177, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/train.py", "max_stars_repo_name": "JaveyWang/5-char-real-number-recognition", "max_stars_repo_head_hexsha": "dca6cb748d466a774b4e5a1daa4a31af7e04a550", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/train.py", "max_issues_repo_name": "JaveyWang/5-char-real-number-recognition", "max_issues_repo_head_hexsha": "dca6cb748d466a774b4e5a1daa4a31af7e04a550", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/train.py", "max_forks_repo_name": "JaveyWang/5-char-real-number-recognition", "max_forks_repo_head_hexsha": "dca6cb748d466a774b4e5a1daa4a31af7e04a550", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.6381578947, "max_line_length": 122, "alphanum_fraction": 0.5998057309, "include": true, "reason": "import numpy", "num_tokens": 1437}
|
[STATEMENT]
lemma length_coeffs_degree':
"length (coeffs p) = (if p = 0 then 0 else Suc (degree p))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. length (coeffs p) = (if p = 0 then 0 else Suc (degree p))
[PROOF STEP]
by (cases "p = 0") (auto simp: length_coeffs_degree)
|
{"llama_tokens": 124, "file": "Factor_Algebraic_Polynomial_Roots_via_IA", "length": 1}
|
#Kaplan-Meier Estimator
import numpy as np
import numpy.linalg as la
import matplotlib.pyplot as plt
from scipy import stats
from statsmodels.iolib.table import SimpleTable
class KaplanMeier(object):
"""
KaplanMeier(...)
KaplanMeier(data, endog, exog=None, censoring=None)
Create an object of class KaplanMeier for estimating
Kaplan-Meier survival curves.
Parameters
----------
data: array_like
An array, with observations in each row, and
variables in the columns
endog: index (starting at zero) of the column
containing the endogenous variable (time)
exog: index of the column containing the exogenous
variable (must be catagorical). If exog = None, this
is equivalent to a single survival curve
censoring: index of the column containing an indicator
of whether an observation is an event, or a censored
observation, with 0 for censored, and 1 for an event
Attributes
-----------
censorings: List of censorings associated with each unique
time, at each value of exog
events: List of the number of events at each unique time
for each value of exog
results: List of arrays containing estimates of the value
value of the survival function and its standard error
at each unique time, for each value of exog
ts: List of unique times for each value of exog
Methods
-------
fit: Calcuate the Kaplan-Meier estimates of the survival
function and its standard error at each time, for each
value of exog
plot: Plot the survival curves using matplotlib.plyplot
summary: Display the results of fit in a table. Gives results
for all (including censored) times
test_diff: Test for difference between survival curves
Examples
--------
>>> import statsmodels.api as sm
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> from statsmodels.sandbox.survival2 import KaplanMeier
>>> dta = sm.datasets.strikes.load()
>>> dta = dta.values()[-1]
>>> dta[range(5),:]
array([[ 7.00000000e+00, 1.13800000e-02],
[ 9.00000000e+00, 1.13800000e-02],
[ 1.30000000e+01, 1.13800000e-02],
[ 1.40000000e+01, 1.13800000e-02],
[ 2.60000000e+01, 1.13800000e-02]])
>>> km = KaplanMeier(dta,0)
>>> km.fit()
>>> km.plot()
Doing
>>> km.summary()
will display a table of the estimated survival and standard errors
for each time. The first few lines are
Kaplan-Meier Curve
=====================================
Time Survival Std. Err
-------------------------------------
1.0 0.983870967742 0.0159984306572
2.0 0.91935483871 0.0345807888235
3.0 0.854838709677 0.0447374942184
4.0 0.838709677419 0.0467104592871
5.0 0.822580645161 0.0485169952543
Doing
>>> plt.show()
will plot the survival curve
Mutliple survival curves:
>>> km2 = KaplanMeier(dta,0,exog=1)
>>> km2.fit()
km2 will estimate a survival curve for each value of industrial
production, the column of dta with index one (1).
With censoring:
>>> censoring = np.ones_like(dta[:,0])
>>> censoring[dta[:,0] > 80] = 0
>>> dta = np.c_[dta,censoring]
>>> dta[range(5),:]
array([[ 7.00000000e+00, 1.13800000e-02, 1.00000000e+00],
[ 9.00000000e+00, 1.13800000e-02, 1.00000000e+00],
[ 1.30000000e+01, 1.13800000e-02, 1.00000000e+00],
[ 1.40000000e+01, 1.13800000e-02, 1.00000000e+00],
[ 2.60000000e+01, 1.13800000e-02, 1.00000000e+00]])
>>> km3 = KaplanMeier(dta,0,exog=1,censoring=2)
>>> km3.fit()
Test for difference of survival curves
>>> log_rank = km3.test_diff([0.0645,-0.03957])
The zeroth element of log_rank is the chi-square test statistic
for the difference between the survival curves for exog = 0.0645
and exog = -0.03957, the index one element is the degrees of freedom for
the test, and the index two element is the p-value for the test
Groups with nan names
>>> groups = np.ones_like(dta[:,1])
>>> groups = groups.astype('S4')
>>> groups[dta[:,1] > 0] = 'high'
>>> groups[dta[:,1] <= 0] = 'low'
>>> dta = dta.astype('S4')
>>> dta[:,1] = groups
>>> dta[range(5),:]
array([['7.0', 'high', '1.0'],
['9.0', 'high', '1.0'],
['13.0', 'high', '1.0'],
['14.0', 'high', '1.0'],
['26.0', 'high', '1.0']],
dtype='|S4')
>>> km4 = KaplanMeier(dta,0,exog=1,censoring=2)
>>> km4.fit()
"""
def __init__(self, data, endog, exog=None, censoring=None):
self.exog = exog
self.censoring = censoring
cols = [endog]
self.endog = 0
if exog != None:
cols.append(exog)
self.exog = 1
if censoring != None:
cols.append(censoring)
if exog != None:
self.censoring = 2
else:
self.censoring = 1
data = data[:,cols]
if data.dtype == float or data.dtype == int:
self.data = data[~np.isnan(data).any(1)]
else:
t = (data[:,self.endog]).astype(float)
if exog != None:
evec = data[:,self.exog]
evec = evec[~np.isnan(t)]
if censoring != None:
cvec = (data[:,self.censoring]).astype(float)
cvec = cvec[~np.isnan(t)]
t = t[~np.isnan(t)]
if censoring != None:
t = t[~np.isnan(cvec)]
if exog != None:
evec = evec[~np.isnan(cvec)]
cvec = cvec[~np.isnan(cvec)]
cols = [t]
if exog != None:
cols.append(evec)
if censoring != None:
cols.append(cvec)
data = (np.array(cols)).transpose()
self.data = data
def fit(self):
"""
Calculate the Kaplan-Meier estimator of the survival function
"""
self.results = []
self.ts = []
self.censorings = []
self.event = []
if self.exog == None:
self.fitting_proc(self.data)
else:
groups = np.unique(self.data[:,self.exog])
self.groups = groups
for g in groups:
group = self.data[self.data[:,self.exog] == g]
self.fitting_proc(group)
def plot(self):
"""
Plot the estimated survival curves. After using this method
do
plt.show()
to display the plot
"""
plt.figure()
if self.exog == None:
self.plotting_proc(0)
else:
for g in range(len(self.groups)):
self.plotting_proc(g)
plt.ylim(ymax=1.05)
plt.ylabel('Survival')
plt.xlabel('Time')
def summary(self):
"""
Print a set of tables containing the estimates of the survival
function, and its standard errors
"""
if self.exog == None:
self.summary_proc(0)
else:
for g in range(len(self.groups)):
self.summary_proc(g)
def fitting_proc(self, group):
"""
For internal use
"""
t = ((group[:,self.endog]).astype(float)).astype(int)
if self.censoring == None:
events = np.bincount(t)
t = np.unique(t)
events = events[:,list(t)]
events = events.astype(float)
eventsSum = np.cumsum(events)
eventsSum = np.r_[0,eventsSum]
n = len(group) - eventsSum[:-1]
else:
censoring = ((group[:,self.censoring]).astype(float)).astype(int)
reverseCensoring = -1*(censoring - 1)
events = np.bincount(t,censoring)
censored = np.bincount(t,reverseCensoring)
t = np.unique(t)
censored = censored[:,list(t)]
censored = censored.astype(float)
censoredSum = np.cumsum(censored)
censoredSum = np.r_[0,censoredSum]
events = events[:,list(t)]
events = events.astype(float)
eventsSum = np.cumsum(events)
eventsSum = np.r_[0,eventsSum]
n = len(group) - eventsSum[:-1] - censoredSum[:-1]
(self.censorings).append(censored)
survival = np.cumprod(1-events/n)
var = ((survival*survival) *
np.cumsum(events/(n*(n-events))))
se = np.sqrt(var)
(self.results).append(np.array([survival,se]))
(self.ts).append(t)
(self.event).append(events)
def plotting_proc(self, g):
"""
For internal use
"""
survival = self.results[g][0]
t = self.ts[g]
e = (self.event)[g]
if self.censoring != None:
c = self.censorings[g]
csurvival = survival[c != 0]
ct = t[c != 0]
if len(ct) != 0:
plt.vlines(ct,csurvival+0.02,csurvival-0.02)
x = np.repeat(t[e != 0], 2)
y = np.repeat(survival[e != 0], 2)
if self.ts[g][-1] in t[e != 0]:
x = np.r_[0,x]
y = np.r_[1,1,y[:-1]]
else:
x = np.r_[0,x,self.ts[g][-1]]
y = np.r_[1,1,y]
plt.plot(x,y)
def summary_proc(self, g):
"""
For internal use
"""
if self.exog != None:
myTitle = ('exog = ' + str(self.groups[g]) + '\n')
else:
myTitle = "Kaplan-Meier Curve"
table = np.transpose(self.results[g])
table = np.c_[np.transpose(self.ts[g]),table]
table = SimpleTable(table, headers=['Time','Survival','Std. Err'],
title = myTitle)
print(table)
def test_diff(self, groups, rho=None, weight=None):
"""
test_diff(groups, rho=0)
Test for difference between survival curves
Parameters
----------
groups: A list of the values for exog to test for difference.
tests the null hypothesis that the survival curves for all
values of exog in groups are equal
rho: compute the test statistic with weight S(t)^rho, where
S(t) is the pooled estimate for the Kaplan-Meier survival function.
If rho = 0, this is the logrank test, if rho = 0, this is the
Peto and Peto modification to the Gehan-Wilcoxon test.
weight: User specified function that accepts as its sole arguement
an array of times, and returns an array of weights for each time
to be used in the test
Returns
-------
An array whose zeroth element is the chi-square test statistic for
the global null hypothesis, that all survival curves are equal,
the index one element is degrees of freedom for the test, and the
index two element is the p-value for the test.
Examples
--------
>>> import statsmodels.api as sm
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> from statsmodels.sandbox.survival2 import KaplanMeier
>>> dta = sm.datasets.strikes.load()
>>> dta = dta.values()[-1]
>>> censoring = np.ones_like(dta[:,0])
>>> censoring[dta[:,0] > 80] = 0
>>> dta = np.c_[dta,censoring]
>>> km = KaplanMeier(dta,0,exog=1,censoring=2)
>>> km.fit()
Test for difference of survival curves
>>> log_rank = km3.test_diff([0.0645,-0.03957])
The zeroth element of log_rank is the chi-square test statistic
for the difference between the survival curves using the log rank test
for exog = 0.0645 and exog = -0.03957, the index one element
is the degrees of freedom for the test, and the index two element
is the p-value for the test
>>> wilcoxon = km.test_diff([0.0645,-0.03957], rho=1)
wilcoxon is the equivalent information as log_rank, but for the
Peto and Peto modification to the Gehan-Wilcoxon test.
User specified weight functions
>>> log_rank = km3.test_diff([0.0645,-0.03957], weight=np.ones_like)
This is equivalent to the log rank test
More than two groups
>>> log_rank = km.test_diff([0.0645,-0.03957,0.01138])
The test can be performed with arbitrarily many groups, so long as
they are all in the column exog
"""
groups = np.asarray(groups)
if self.exog == None:
raise ValueError("Need an exogenous variable for logrank test")
elif (np.in1d(groups,self.groups)).all():
data = self.data[np.in1d(self.data[:,self.exog],groups)]
t = ((data[:,self.endog]).astype(float)).astype(int)
tind = np.unique(t)
NK = []
N = []
D = []
Z = []
if rho != None and weight != None:
raise ValueError("Must use either rho or weights, not both")
elif rho != None:
s = KaplanMeier(data,self.endog,censoring=self.censoring)
s.fit()
s = (s.results[0][0]) ** (rho)
s = np.r_[1,s[:-1]]
elif weight != None:
s = weight(tind)
else:
s = np.ones_like(tind)
if self.censoring == None:
for g in groups:
dk = np.bincount((t[data[:,self.exog] == g]))
d = np.bincount(t)
if np.max(tind) != len(dk):
dif = np.max(tind) - len(dk) + 1
dk = np.r_[dk,[0]*dif]
dk = dk[:,list(tind)]
d = d[:,list(tind)]
dk = dk.astype(float)
d = d.astype(float)
dkSum = np.cumsum(dk)
dSum = np.cumsum(d)
dkSum = np.r_[0,dkSum]
dSum = np.r_[0,dSum]
nk = len(data[data[:,self.exog] == g]) - dkSum[:-1]
n = len(data) - dSum[:-1]
d = d[n>1]
dk = dk[n>1]
nk = nk[n>1]
n = n[n>1]
s = s[n>1]
ek = (nk * d)/(n)
Z.append(np.sum(s * (dk - ek)))
NK.append(nk)
N.append(n)
D.append(d)
else:
for g in groups:
censoring = ((data[:,self.censoring]).astype(float)).astype(int)
reverseCensoring = -1*(censoring - 1)
censored = np.bincount(t,reverseCensoring)
ck = np.bincount((t[data[:,self.exog] == g]),
reverseCensoring[data[:,self.exog] == g])
dk = np.bincount((t[data[:,self.exog] == g]),
censoring[data[:,self.exog] == g])
d = np.bincount(t,censoring)
if np.max(tind) != len(dk):
dif = np.max(tind) - len(dk) + 1
dk = np.r_[dk,[0]*dif]
ck = np.r_[ck,[0]*dif]
dk = dk[:,list(tind)]
ck = ck[:,list(tind)]
d = d[:,list(tind)]
dk = dk.astype(float)
d = d.astype(float)
ck = ck.astype(float)
dkSum = np.cumsum(dk)
dSum = np.cumsum(d)
ck = np.cumsum(ck)
ck = np.r_[0,ck]
dkSum = np.r_[0,dkSum]
dSum = np.r_[0,dSum]
censored = censored[:,list(tind)]
censored = censored.astype(float)
censoredSum = np.cumsum(censored)
censoredSum = np.r_[0,censoredSum]
nk = (len(data[data[:,self.exog] == g]) - dkSum[:-1]
- ck[:-1])
n = len(data) - dSum[:-1] - censoredSum[:-1]
d = d[n>1]
dk = dk[n>1]
nk = nk[n>1]
n = n[n>1]
s = s[n>1]
ek = (nk * d)/(n)
Z.append(np.sum(s * (dk - ek)))
NK.append(nk)
N.append(n)
D.append(d)
Z = np.array(Z)
N = np.array(N)
D = np.array(D)
NK = np.array(NK)
sigma = -1 * np.dot((NK/N) * ((N - D)/(N - 1)) * D
* np.array([(s ** 2)]*len(D))
,np.transpose(NK/N))
np.fill_diagonal(sigma, np.diagonal(np.dot((NK/N)
* ((N - D)/(N - 1)) * D
* np.array([(s ** 2)]*len(D))
,np.transpose(1 - (NK/N)))))
chisq = np.dot(np.transpose(Z),np.dot(la.pinv(sigma), Z))
df = len(groups) - 1
return np.array([chisq, df, stats.chi2.sf(chisq,df)])
else:
raise ValueError("groups must be in column exog")
|
{"hexsha": "bbba55503b0f916091ad183467e54bd6d8f8e2d4", "size": 17924, "ext": "py", "lang": "Python", "max_stars_repo_path": "statsmodels/sandbox/survival2.py", "max_stars_repo_name": "yarikoptic/statsmodels", "max_stars_repo_head_hexsha": "f990cb1a1ef0c9883c9394444e6f9d027efabec6", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2016-05-18T11:46:33.000Z", "max_stars_repo_stars_event_max_datetime": "2018-12-23T04:52:27.000Z", "max_issues_repo_path": "statsmodels/sandbox/survival2.py", "max_issues_repo_name": "yarikoptic/statsmodels", "max_issues_repo_head_hexsha": "f990cb1a1ef0c9883c9394444e6f9d027efabec6", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-09-11T14:30:32.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-11T14:30:32.000Z", "max_forks_repo_path": "statsmodels/sandbox/survival2.py", "max_forks_repo_name": "yarikoptic/statsmodels", "max_forks_repo_head_hexsha": "f990cb1a1ef0c9883c9394444e6f9d027efabec6", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2015-04-01T08:26:54.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-14T14:34:10.000Z", "avg_line_length": 35.848, "max_line_length": 84, "alphanum_fraction": 0.4868332961, "include": true, "reason": "import numpy,from scipy,import statsmodels,from statsmodels", "num_tokens": 4504}
|
\documentclass[a4paper]{article}
%\documentclass[a4paper]{scrartcl}
\usepackage{url}
\usepackage{amsfonts}
\usepackage{amsmath}
\usepackage{amssymb}
\usepackage{subcaption}
\usepackage{float}
\usepackage{comment}
\usepackage{graphicx}
\usepackage{xcolor}
\renewcommand{\i}[1]{\textit{#1}}
\newcommand\blue[1]{\textcolor{blue}{#1}}
\author{kwxm}
\date{December 2018}
\title{Some benchmarks for transformations of the CEK machine}
\begin{document}
\maketitle
\section*{Introduction}
\noindent This document contains some timing results for various
versions of the CEK machine. If you look at the description of the
CEK machine you can see that the \texttt{return} phase is essentially
calling a sort of concretised continuation, so I wondered if things
would be any faster with real continuations (short answer: no).
\section*{Experiments}
\noindent I used the following abstract machines.
\begin{itemize}
\item Three versions of the CEK machine from commit \texttt{96fb387}
(pretty much the earliest version), but with the bounds check for
sized integers in \texttt{Constant/Make.hs} updated to use the
\texttt{bit} function rather than calculating $2^n$ for large $n$
(this was slowing things down quite substantially and has been fixed
in more recent versions).
\begin{itemize}
\item The original CEK machine
\item The CEK machine with a refunctionalisation transformation
applied so that it uses explicit continuations rather than
the frames and \texttt{return} operation in the contextual version
of the CEK machine.
\item The refunctionalised version with an ``un-CPS'' transformation
applied. This essentially turns the machine into a simple recursive
evaluator providing a direct implementation of a standard structural
operational semantics.
\end{itemize}
\item The current CEK machine at commit \texttt{c9a8ae24}. This has been
significantly modified from the earlier version, using monads and
including the new infrastructure for ``dynamic'' built-in functions.
\end{itemize}
\noindent Olivier Danvy and a number of collaborators have done a lot
of work on transformations of abstract machines (see ``A Functional
Correspondence between Evaluators and Abstract Machines'', for
example), and the transformations here are instances of the kind of
thing they've studied.
\\
\subsection*{Inputs}
The programs were run with the inputs shown in
Figure~\ref{fig:benchmark-inputs}. These are the same (hand-written)
programs and inputs as were used for evaluation of the lazy machine in
a previous document, and the same statistics were used (collected using
\texttt{/usr/bin/time -f "\%U \%S \%M"} on Linux). The programs are all
recursive programs using the $Z$ combinator:
\begin{itemize}
\item \texttt{Loop}: loop $n$ times.
\item \texttt{Tri}: calculate $n + (n-1) + \ldots + 2 + 1$
\item \texttt{Fac}: calculate $n(n-1)\cdots2\cdot1$ (requires very large integers)
\item \texttt{Fib}: Naive recursive Fibonacci
\end{itemize}
The programs were
run once only for each input; ideally we'd run them several times
each and take the average, but this would be a lengthy process and
the results below don't suggest that we'd gain much from a more detailed
test.
\begin{figure}[H]
\centering
\begin{tabular}{|l|r|r|r|r|}
\hline
Program & Minimum input & Step & Maximum input & Integer size (bytes) \\
\hline
\texttt{Loop} & 0 & 20,000 & 1,000,000 & 4\\
\texttt{Tri} & 0 & 50,000 & 2,000,000 & 8\\
\texttt{Fac} & 0 & 5,000 & 100,000 & 190,000\\
\texttt{Fib} & 1 & 1 & 31 & 4 \\
\hline
\end{tabular}
\caption{Programs and inputs}\label{fig:benchmark-inputs}
\end{figure}
\newpage
\section*{Results}
\begin{figure}[H]
\centering \includegraphics[width=0.8\linewidth]{loop-times.pdf}
\includegraphics[width=0.8\linewidth]{loop-mem.pdf}
\caption{Loop}\label{fig:loop-graphs}
\end{figure}
\newpage
\begin{figure}[H]
\centering
\includegraphics[width=0.8\linewidth]{tri-times.pdf}
\includegraphics[width=0.8\linewidth]{tri-mem.pdf}
\caption{Triangular numbers}\label{fig:tri-graphs}
\end{figure}
\newpage
\begin{figure}[H]
\centering
\includegraphics[width=0.8\linewidth]{fac-times.pdf}
\includegraphics[width=0.8\linewidth]{fac-mem.pdf}
\caption{Factorial}\label{fig:fac-graphs}
\end{figure}
\newpage
\begin{figure}[H]
\centering
\includegraphics[width=0.8\linewidth]{fib-times.pdf}
\includegraphics[width=0.8\linewidth]{fib-mem.pdf}
\caption{Fibonacci}\label{fig:fib-graphs}
\end{figure}
\newpage
\section*{Conclusions}
The results are pretty inconclusive: the variations on the original
CEK machine don't seem to make a lot of difference, possibly because
GHC will be transforming things behind the scenes anyway.
It's notable that the current version of the CEK machine is quite a
bit slower than the original one in some cases. This is presumably
because it's quite a bit more complicated now, and also partly because
there's at least one problem (to do with renaming variables in
booleans) which we've identified but which I don't think has been
fixed in the master branch yet.
It's also the case that the memory usage of the current version is
significantly lower than the old version in some cases: I have no idea
why this is. We should do some detailed profiling on complicated
examples.
\end{document}
|
{"hexsha": "5adf7456586cbdeda7327ed4e02ee9501796b76a", "size": 5273, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "notes/fomega/cek-cps-experiments/tex/results.tex", "max_stars_repo_name": "AriFordsham/plutus", "max_stars_repo_head_hexsha": "f7d34336cd3d65f62b0da084a16f741dc9156413", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1299, "max_stars_repo_stars_event_min_datetime": "2018-10-02T13:41:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T01:10:02.000Z", "max_issues_repo_path": "notes/fomega/cek-cps-experiments/tex/results.tex", "max_issues_repo_name": "AriFordsham/plutus", "max_issues_repo_head_hexsha": "f7d34336cd3d65f62b0da084a16f741dc9156413", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2493, "max_issues_repo_issues_event_min_datetime": "2018-09-28T19:28:17.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T15:31:31.000Z", "max_forks_repo_path": "notes/fomega/cek-cps-experiments/tex/results.tex", "max_forks_repo_name": "AriFordsham/plutus", "max_forks_repo_head_hexsha": "f7d34336cd3d65f62b0da084a16f741dc9156413", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 399, "max_forks_repo_forks_event_min_datetime": "2018-10-05T09:36:10.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T11:18:25.000Z", "avg_line_length": 32.95625, "max_line_length": 82, "alphanum_fraction": 0.765029395, "num_tokens": 1470}
|
// Boost sorting_algo library float_sort_test.cpp file ---------------------------//
// Copyright Steven Ross 2009. Use, modification and
// distribution is subject to the Boost Software License, Version
// 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// See http://www.boost.org for updates, documentation, and revision history.
#include <boost/algorithm/sorting/spread_sort.hpp>
// Include unit test framework
#include <boost/test/included/test_exec_monitor.hpp>
#include <boost/test/test_tools.hpp>
#include <vector>
using namespace std;
using namespace boost;
//Casting to an integer before bitshifting
struct rightshift {
int operator()(const float &x, const unsigned offset) const { return float_mem_cast<float, int>(x) >> offset; }
};
struct rightshift_64 {
boost::int64_t operator()(const double &x, const boost::uint64_t offset) const { return float_mem_cast<double, boost::int64_t>(x) >> offset; }
};
boost::int32_t
rand_32(bool sign = true) {
boost::int32_t result = rand() | (rand()<< 16);
if(rand() % 2)
result |= 1 << 15;
//Adding the sign bit
if(sign && (rand() % 2))
result *= -1;
return result;
}
void float_test()
{
// Prepare inputs
vector<float> base_vec;
unsigned count = 100000;
//Generating semirandom numbers; no they're not large, but they'll work for basic testing
for(unsigned u = 0; u < count; ++u) {
float val = float(rand_32());
//As std::sort gives arbitrary results for NaNs and 0.0 vs. -0.0, treat all those as just 0.0 for testing
if(!(val < 0.0) && !(0.0 < val))
base_vec.push_back(0.0);
else
base_vec.push_back(val);
}
vector<float> sorted_vec = base_vec;
vector<float> test_vec = base_vec;
std::sort(sorted_vec.begin(), sorted_vec.end());
//Testing spread_sort version
test_vec = base_vec;
spread_sort(test_vec.begin(), test_vec.end());
BOOST_CHECK(test_vec == sorted_vec);
//One functor
test_vec = base_vec;
float_sort(test_vec.begin(), test_vec.end(), rightshift());
BOOST_CHECK(test_vec == sorted_vec);
//Both functors
test_vec = base_vec;
float_sort(test_vec.begin(), test_vec.end(), rightshift(), less<float>());
BOOST_CHECK(test_vec == sorted_vec);
//Testing for doubles
vector<double> long_base_vec;
for(unsigned u = 0; u < base_vec.size(); ++u) {
double val = double
((((boost::int64_t)rand_32()) << ((8 * sizeof(int)) -1)) + rand_32(false));
//As std::sort gives arbitrary results for NaNs and 0.0 vs. -0.0,
//treat all those as just 0.0 for testing
if(!(val < 0.0) && !(0.0 < val))
long_base_vec.push_back(0.0);
else
long_base_vec.push_back(val);
}
vector<double> long_sorted_vec = long_base_vec;
vector<double> long_test_vec = long_base_vec;
float_sort(long_test_vec.begin(), long_test_vec.end());
std::sort(long_sorted_vec.begin(), long_sorted_vec.end());
BOOST_CHECK(long_test_vec == long_sorted_vec);
//One functor
long_test_vec = long_base_vec;
float_sort(long_test_vec.begin(), long_test_vec.end(), rightshift_64());
BOOST_CHECK(long_test_vec == long_sorted_vec);
//Both functors
long_test_vec = long_base_vec;
float_sort(long_test_vec.begin(), long_test_vec.end(), rightshift_64(), less<double>());
BOOST_CHECK(long_test_vec == long_sorted_vec);
}
// test main
int test_main( int, char*[] )
{
float_test();
return 0;
}
|
{"hexsha": "105322e2e7065fb19c471a4d5328a02a01f696dd", "size": 3435, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "libs/algorithm/sorting/test/float_sort_test.cpp", "max_stars_repo_name": "spreadsort/algorithm_sorting", "max_stars_repo_head_hexsha": "bc425fcfe8c883f3f6c8a4068bb55d4b7da2d1da", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2020-06-21T20:19:03.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-21T20:19:03.000Z", "max_issues_repo_path": "libs/algorithm/sorting/test/float_sort_test.cpp", "max_issues_repo_name": "spreadsort/algorithm_sorting", "max_issues_repo_head_hexsha": "bc425fcfe8c883f3f6c8a4068bb55d4b7da2d1da", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "libs/algorithm/sorting/test/float_sort_test.cpp", "max_forks_repo_name": "spreadsort/algorithm_sorting", "max_forks_repo_head_hexsha": "bc425fcfe8c883f3f6c8a4068bb55d4b7da2d1da", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.4056603774, "max_line_length": 144, "alphanum_fraction": 0.6823871907, "num_tokens": 931}
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for resegmentation.
Resegmentation is local segmentation targeted to specific points in an already
segmented volume. The results of resegmentation can be compared to the original
segments in order to perform object agglomeration.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
import logging
import os
import numpy as np
from scipy import ndimage
from scipy.special import expit
from tensorflow import gfile
from . import storage
from .inference_utils import timer_counter
from ..utils import bounding_box
def get_starting_location(dists, exclusion_radius):
z, y, x = np.unravel_index(np.argmax(dists), tuple(dists.shape))
# Mark area around the new point as 'excluded' by clearing the distance
# map around it.
er = exclusion_radius
dists[max(z - er.z, 0):z + er.z + 1,
max(y - er.y, 0):y + er.y + 1,
max(x - er.x, 0):x + er.x + 1] = 0
return z, y, x
def get_target_path(request, point_num):
"""Computes the output path for a specific point.
Args:
request: ResegmentationRequest proto
point_num: index of the point of interest within the proto
Returns:
path to the output file where resegmentation results will be saved
"""
# Prepare the output directory.
output_dir = request.output_directory
id_a = request.points[point_num].id_a
id_b = request.points[point_num].id_b
if request.subdir_digits > 1:
m = hashlib.md5()
m.update(str(id_a))
m.update(str(id_b))
output_dir = os.path.join(output_dir, m.hexdigest()[:request.subdir_digits])
gfile.MakeDirs(output_dir)
# Terminate early if the output already exists.
dp = request.points[point_num].point
target_path = os.path.join(output_dir, '%d-%d_at_%d_%d_%d.npz' % (
id_a, id_b, dp.x, dp.y, dp.z))
if gfile.Exists(target_path):
logging.info('Output already exists: %s', target_path)
return
return target_path
def get_canvas(point, radius, runner):
"""Creates an FFN Canvas.
Args:
point: decision point as (z, y, x)
radius: radius around decision point as (z, y, x)
runner: inference Runner object
Returns:
inference Canvas object
"""
origin = np.array(point)
radius = np.array(radius)
corner = origin - radius
subvol_size = radius * 2 + 1
end = subvol_size + corner
if (np.any(corner < 0) or
runner.init_seg_volstore.size.z <= end[0] or
runner.init_seg_volstore.size.y <= end[1] or
runner.init_seg_volstore.size.x <= end[2]):
logging.error('Not enough context for: %d, %d, %d; corner: %r; end: %r',
point[2], point[1], point[0], corner, end)
return None, None
return runner.make_canvas(corner, subvol_size, keep_history=True)
def process_point(request, runner, point_num):
"""Runs resegmentation for a specific point.
Args:
request: ResegmentationRequest proto
runner: inference Runner object
point_num: index of the point of interest within the proto
"""
with timer_counter(runner.counters, 'resegmentation'):
target_path = get_target_path(request, point_num)
if target_path is None:
return
curr = request.points[point_num]
point = curr.point
point = point.z, point.y, point.x
radius = (request.radius.z, request.radius.y, request.radius.x)
canvas, alignment = get_canvas(point, radius, runner)
if canvas is None:
logging.warning('Could not get a canvas object.')
return
def unalign_prob(prob):
return alignment.align_and_crop(
canvas.corner_zyx,
prob,
alignment.corner,
alignment.size,
forward=False)
is_shift = (canvas.restrictor is not None and
np.any(canvas.restrictor.shift_mask))
is_endpoint = not curr.HasField('id_b')
seg_a = canvas.segmentation == canvas.local_id(curr.id_a)
size_a = np.sum(seg_a)
if is_endpoint:
size_b = -1
todo = [seg_a]
else:
seg_b = canvas.segmentation == canvas.local_id(curr.id_b)
size_b = np.sum(seg_b)
todo = [seg_a, seg_b]
if size_a == 0 or size_b == 0:
logging.warning('Segments (%d, %d) local ids (%d, %d) not found in input '
'at %r. Current values are: %r.',
curr.id_a, curr.id_b, canvas.local_id(curr.id_a),
canvas.local_id(curr.id_b), point,
np.unique(canvas.segmentation))
canvas._deregister_client() # pylint:disable=protected-access
return
if is_endpoint:
canvas.seg_prob[:] = 0.0
canvas.segmentation[:] = 0
else:
# Clear the two segments in question, but keep everything else as
# context.
canvas.segmentation[seg_a] = 0
canvas.segmentation[seg_b] = 0
canvas.seg_prob[seg_a] = 0.0
canvas.seg_prob[seg_b] = 0.0
transformed_point = alignment.transform(np.array([point]).T)
tz, ty, tx = transformed_point[:, 0]
oz, oy, ox = canvas.corner_zyx
tz -= oz
ty -= oy
tx -= ox
# First index enumerates the original segments. Second index,
# when present, enumerates segmentation attempts.
raw_probs = []
probs = []
deletes = []
histories = []
start_points = [[], []]
if request.HasField('analysis_radius'):
ar = request.analysis_radius
analysis_box = bounding_box.BoundingBox(
start=(radius[2] - ar.x,
radius[1] - ar.y,
radius[0] - ar.z),
size=(2 * ar.x + 1, 2 * ar.y + 1, 2 * ar.z + 1))
else:
analysis_box = bounding_box.BoundingBox(
(0, 0, 0), canvas.image.shape[::-1])
options = request.inference.inference_options
for i, seg in enumerate(todo):
logging.info('processing object %d', i)
with timer_counter(canvas.counters, 'edt'):
ps = runner.init_seg_volstore.info.pixelsize
dists = ndimage.distance_transform_edt(seg, sampling=(ps.z, ps.y, ps.x))
# Do not seed where not enough context is available.
dists[:canvas.margin[0], :, :] = 0
dists[:, :canvas.margin[1], :] = 0
dists[:, :, :canvas.margin[2]] = 0
dists[-canvas.margin[0]:, :, :] = 0
dists[:, -canvas.margin[1]:, :] = 0
dists[:, :, -canvas.margin[2]:] = 0
canvas.log_info('EDT computation done')
# Optionally exclude a region around the decision point from seeding.
if request.HasField('init_exclusion_radius'):
ier = request.init_exclusion_radius
dists[tz - ier.z:tz + ier.z + 1,
ty - ier.y:ty + ier.y + 1,
tx - ier.x:tx + ier.x + 1] = 0
seg_prob = None
recovered = False
for _ in range(request.max_retry_iters):
z0, y0, x0 = get_starting_location(dists, request.exclusion_radius)
if not seg[z0, y0, x0]:
continue
canvas.log_info('.. starting segmentation at (xyz): %d %d %d',
x0, y0, z0)
canvas.segment_at((z0, y0, x0))
seg_prob = expit(canvas.seed)
start_points[i].append((x0, y0, z0))
# Check if we recovered an acceptable fraction of the initial segment
# in which the seed was located.
recovered = True
crop_seg = seg[analysis_box.to_slice()]
crop_prob = seg_prob[analysis_box.to_slice()]
start_size = np.sum(crop_seg)
segmented_voxels = np.sum((crop_prob >= options.segment_threshold) &
crop_seg)
if request.segment_recovery_fraction > 0:
if segmented_voxels / start_size >= request.segment_recovery_fraction:
break
elif segmented_voxels >= options.min_segment_size:
break
recovered = False
# Store resegmentation results.
if seg_prob is not None:
qprob = storage.quantize_probability(seg_prob)
raw_probs.append(qprob)
probs.append(unalign_prob(qprob))
deletes.append(np.array(canvas.history_deleted))
histories.append(np.array(canvas.history))
if request.terminate_early:
if not recovered:
break
if (request.segment_recovery_fraction > 0 and i == 0 and
len(todo) > 1):
seg2 = todo[1]
crop_seg = seg2[analysis_box.to_slice()]
size2 = np.sum(crop_seg)
segmented_voxels2 = np.sum(
(crop_prob >= options.segment_threshold) & crop_seg)
if segmented_voxels2 / size2 < request.segment_recovery_fraction:
break
canvas.log_info('saving results to %s', target_path)
with storage.atomic_file(target_path) as fd:
np.savez_compressed(fd,
probs=np.array(probs),
raw_probs=np.array(raw_probs),
deletes=np.array(deletes),
histories=np.array(histories),
start_points=start_points,
request=request.SerializeToString(),
counters=canvas.counters.dumps(),
corner_zyx=canvas.corner_zyx,
is_shift=is_shift)
canvas.log_info('.. save complete')
# Cannot `del canvas` here in Python 2 -- deleting an object referenced
# in a nested scope is a syntax error.
canvas._deregister_client() # pylint:disable=protected-access
def process(request, runner):
num_points = len(request.points)
for i in range(num_points):
logging.info('processing %d/%d', i, num_points)
process_point(request, runner, i)
|
{"hexsha": "4a47cf9bf9062b89c76554720a337296e9ea2703", "size": 10240, "ext": "py", "lang": "Python", "max_stars_repo_path": "ffn/inference/resegmentation.py", "max_stars_repo_name": "necrodancer/ffn", "max_stars_repo_head_hexsha": "43552bbc2585ca350d8495454e7580c47806e637", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ffn/inference/resegmentation.py", "max_issues_repo_name": "necrodancer/ffn", "max_issues_repo_head_hexsha": "43552bbc2585ca350d8495454e7580c47806e637", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ffn/inference/resegmentation.py", "max_forks_repo_name": "necrodancer/ffn", "max_forks_repo_head_hexsha": "43552bbc2585ca350d8495454e7580c47806e637", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.4640522876, "max_line_length": 80, "alphanum_fraction": 0.6334960938, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2536}
|
import numpy as np
import qiskit.quantum_info as qi
def concurrence_single(dm):
# dm = qi.DensityMatrix(dm)
con = qi.concurrence(dm)
return con
def concurrence(dm_tensor):
con_list = list(map(concurrence_single, dm_tensor))
con_tensor = np.array(con_list)
return con_tensor
|
{"hexsha": "9931eecbfc2b91a2e1180514033ded05dc3f5ea4", "size": 299, "ext": "py", "lang": "Python", "max_stars_repo_path": "Toy-model/CP_werner_with_MA/utils/Concurrence_Measure.py", "max_stars_repo_name": "slohani-ai/data-centric-in-qis", "max_stars_repo_head_hexsha": "bbc545454f7d98a28a4fc83f2f6b14de253fcb6c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Toy-model/CP_werner_with_MA/utils/Concurrence_Measure.py", "max_issues_repo_name": "slohani-ai/data-centric-in-qis", "max_issues_repo_head_hexsha": "bbc545454f7d98a28a4fc83f2f6b14de253fcb6c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Toy-model/CP_werner_with_MA/utils/Concurrence_Measure.py", "max_forks_repo_name": "slohani-ai/data-centric-in-qis", "max_forks_repo_head_hexsha": "bbc545454f7d98a28a4fc83f2f6b14de253fcb6c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.9166666667, "max_line_length": 55, "alphanum_fraction": 0.72909699, "include": true, "reason": "import numpy", "num_tokens": 77}
|
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import matplotlib.patches as mpatches
import numpy as np
def histogram(df, df_column, binwidth=10, including=10,
ultima_medalha=3, ultima_mencao=10, ax=None):
"""Gera histograma com os dados de notas, com linhas destacando
a zona de medalhistas e a zona de menções honrosas"""
bins = np.arange(including, 101, binwidth)
xmax = np.floor(df[df_column].max() + binwidth / 2)
n = ax.hist(df[df_column], bins, align='mid', facecolor='g',
alpha=0.6, edgecolor='k', label='')
ax.set_xticks(np.arange(0, 101, 5))
ax.set_xlim(including, xmax + binwidth / 2)
ax.set_ylim(0, n[0].max())
ax.set_yticks(np.arange(0, n[0].max() + 1, 1))
ax.set_axisbelow(True)
ax.grid(b=True, which='major', linestyle=':', linewidth=1.0)
ax.tick_params(which='both', labelsize=14)
ax.axvline(x=df[df_column].median(), color='blue', zorder=2, label='Mediana ({0:0})'.format(
df[df_column].median()), linewidth=2, linestyle='--')
ax.axvline(x=df[df_column].mean(), color='orange', zorder=2, label='Média ({0:0.1f})'.format(
df[df_column].mean()), linewidth=2, linestyle='--')
ax.axvline(x=df.loc[ultima_mencao, df_column], color='red', zorder=2, label='Menções honrosas ({0:0})'.format(
df.loc[ultima_mencao, df_column]), linewidth=2, linestyle='--')
ax.axvline(x=df.loc[ultima_medalha, df_column], color='purple', zorder=2, label='Medalhas ({0:0})'.format(
df.loc[ultima_medalha, df_column]), linewidth=2, linestyle='--')
ax.set_xlabel('Notas', size=15)
ax.set_ylabel('Frequência', size=15)
lines, labels = ax.get_legend_handles_labels()
ax.legend(lines, labels, fontsize=12,
loc='upper left', bbox_to_anchor=(0.85, 1))
return
def boxplot(df, df_column, ax=None):
"""Gera boxplot com os dados de notas"""
flierprops = dict(markerfacecolor='c', marker='o', markersize=10)
meanlineprops = dict(linestyle='--', linewidth=2, color='orange')
medianprops = dict(linestyle='--', linewidth=2, color='blue')
ax.boxplot(df[df_column], vert=False, meanline=True, showmeans=True,
notch=False, labels=[''], flierprops=flierprops,
meanprops=meanlineprops, medianprops=medianprops, widths=0.95,
patch_artist=True, boxprops=dict(facecolor='c', alpha=0.5))
ax.yaxis.set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.grid(b=True, which='major', linestyle=':', linewidth=1.0)
# TODO: integrar legenda boxplot com histogram para quando houver outliers no boxplot
def make_autopct(values):
def my_autopct(pct):
total = sum(values)
val = int(round(pct * total / 100.0))
return '{p:.2f}% ({v:d})'.format(p=pct, v=val)
return my_autopct
def plot_pizza(values, labels, title, ax=None):
"""Padrão para gráficos em pizza
:type title: string
:param title: Título do gráfico
:type labels: pandas dataframe column
:param labels: labels para o gráfico
:type values: pandas dataframe column
"""
distance = 0.675
angle = 90
fontsize = 14
color = 'white'
patches, texts, autotexts = ax.pie(values, shadow=True,
startangle=angle,
autopct=make_autopct(values),
pctdistance=distance)
[i.set_fontsize(fontsize) for i in texts]
[i.set_fontsize(fontsize - 1) for i in autotexts]
[i.set_color(color) for i in autotexts]
[i.set_weight('bold') for i in autotexts]
ax.axis("equal")
ax.set_title(title, size=fontsize + 4)
ax.legend(patches, labels, fontsize=fontsize, bbox_to_anchor=(0.75, 1),
loc='upper left')
|
{"hexsha": "bfee71b38b0d7562b527a897d553df6fc15da678", "size": 3935, "ext": "py", "lang": "Python", "max_stars_repo_path": "resultados/plots.py", "max_stars_repo_name": "chicolucio/estatisticas-oiq-2019", "max_stars_repo_head_hexsha": "017da73f3bef6a6f40d7dd1214621cad7c14be2e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "resultados/plots.py", "max_issues_repo_name": "chicolucio/estatisticas-oiq-2019", "max_issues_repo_head_hexsha": "017da73f3bef6a6f40d7dd1214621cad7c14be2e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "resultados/plots.py", "max_forks_repo_name": "chicolucio/estatisticas-oiq-2019", "max_forks_repo_head_hexsha": "017da73f3bef6a6f40d7dd1214621cad7c14be2e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.9603960396, "max_line_length": 114, "alphanum_fraction": 0.6386277001, "include": true, "reason": "import numpy", "num_tokens": 1127}
|
import os.path
from os import path
from time import sleep
import time, random
import numpy as np
from absl import app, flags, logging
import cv2
import matplotlib.pyplot as plt
import tensorflow as tf
from yolov3_tf2.models import (
YoloV3, YoloV3Tiny
)
from yolov3_tf2.dataset import transform_images
from yolov3_tf2.utils import draw_outputs, convert_boxes
from deep_sort import iou_matching
from deep_sort import preprocessing
from deep_sort import nn_matching
from deep_sort.detection import Detection
from deep_sort.tracker import Tracker
from tools import generate_detections as gdet
def calc_speed_bbox_tlwh(bbox):
return np.array([bbox[0][0], bbox[0][1], (bbox[1][0] - bbox[0][0]), (bbox[1][1] - bbox[0][1])]).reshape(1, 4)
class object_tracker:
def calc_speed(self, i_total_frames):
l_total_time = i_total_frames / (self.ref_fps * 1.0)
rtn_speed = (self.ref_distance * 1.0 / l_total_time) * 3600 / 1600
return round(rtn_speed)
def __init__(self):
# Initiate deep sort parameter
self.max_distance = 0.5
self.distance_measure_type = 'cosine'
self.nn_budget = None
self.nms_max_overlap = 0.78
self.model_filename = 'model_data/mars-small128.pb'
# Initiate YOLO parameter
self.yolo_label_loc = './data/labels/coco.names'
self.yolo_weight_loc = './weights/yolov3.tf'
self.yolo_class_nbr = 80
self.crop_x_from = 170
self.crop_x_to = 1480
self.crop_y_from = 620
self.crop_y_to = 2040
self.speed_detect_bbox_top = ((950, 780), (1350, 860))
self.speed_detect_bbox_bottom = ((150, 1280), (1185, 1390))
self.max_frames_missing = 8
self.skip_frame = 2
self.ref_distance = 53
self.ref_fps = 60
self.detection_classes_str = 'truck,train,bus,car'
self.yolo_predict_time = 0
self.deep_sort_predict_time = 0
self.cv_draw_time = 0
self.feature_engineering_time = 0
self.embedding_time = 0
self.file_io_time = 0
self.code_segment_start = time.time()
self.detection_classes = self.detection_classes_str.split(',')
self.encoder = gdet.create_box_encoder(self.model_filename, batch_size=1)
self.metric = nn_matching.NearestNeighborDistanceMetric(self.distance_measure_type, self.max_distance,
self.nn_budget)
self.tracker = Tracker(self.metric)
self.physical_devices = tf.config.experimental.list_physical_devices('GPU')
if len(self.physical_devices) > 0:
tf.config.experimental.set_memory_growth(self.physical_devices[0], True)
self.speed_detect_bbox_top_tlwh = self.calc_speed_bbox_tlwh(self.speed_detect_bbox_top)
self.speed_detect_bbox_bottom_tlwh = self.calc_speed_bbox_tlwh(self.speed_detect_bbox_bottom)
self.video_loc = './data/video/h1.mp4'
self.vid = cv2.VideoCapture(self.video_loc)
self.fl_base = '/home/lilun/dockers/data/out/labels/h1_'
self.fl_ctr = 1
self.fl_wait_ctr = 0
self.cur_frame = 0
self.count = 0
def object_track_test(self, img, converted_boxes, scores, names):
self.cur_frame = self.cur_frame + 1
print('cur_frame is '+str(self.cur_frame))
cv2.imshow('output', img)
def object_track(self, img, converted_boxes, scores, names):
self.cur_frame = self.cur_frame + 1
if not ((self.cur_frame < 60) or (self.cur_frame % self.skip_frame == 0)):
features = self.encoder(img, converted_boxes)
zip_val = zip(converted_boxes, scores, names, features)
object_detected = [(bbox, score, class_name, feature) for bbox, score, class_name, feature in zip_val if
class_name in self.detection_classes]
detections = [Detection(bbox, score, class_name, feature) for bbox, score, class_name, feature in
object_detected]
cmap = plt.get_cmap('tab20b')
colors = [cmap(i)[:3] for i in np.linspace(0, 1, 20)]
# run non-maxima suppresion
boxs = np.array([d.tlwh for d in detections])
scores = np.array([d.confidence for d in detections])
classes = np.array([d.class_name for d in detections])
indices = preprocessing.non_max_suppression(boxs, classes, self.nms_max_overlap, scores)
detections = [detections[i] for i in indices]
# Call the tracker
self.tracker.predict()
self.tracker.update(detections)
code_segment_start = time.time()
for track in self.tracker.tracks:
if not track.is_confirmed() or track.time_since_update > self.max_frames_missing:
continue
bbox = track.to_tlbr()
class_name = track.get_class()
color = colors[int(track.track_id) % len(colors)]
color = [i * 255 for i in color]
if track.bbox_cross_at_frame != -1:
if track.class_name == 'truck' and track.track_id == 6:
print("Truck detected at " + str(self.cur_frame))
cv2.rectangle(img, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), color, 2)
cv2.rectangle(img, (int(bbox[0]), int(bbox[1] - 30)),
(int(bbox[0]) + (len(class_name) + len(str(track.track_id))) * 17, int(bbox[1])),
color,
-1)
track_bbox = track.to_tlwh()
iou_bbox_top_score = iou_matching.iou(track_bbox, self.speed_detect_bbox_top_tlwh)
iou_bbox_bottom_score = iou_matching.iou(track_bbox, self.speed_detect_bbox_bottom_tlwh)
if iou_bbox_top_score > 0 and iou_bbox_bottom_score <= 0:
if track.bbox_cross_at_frame == -1:
track.bbox_cross_at_frame = self.cur_frame
tag_str = class_name + "-" + str(track.track_id) + " (entering speed detect zone) "
elif iou_bbox_bottom_score > 0:
if track.estimated_speed == -1:
track.estimated_speed = self.calc_speed(self.ref_fps,
(self.cur_frame - track.bbox_cross_at_frame),
self.ref_distance)
tag_str = class_name + "-" + str(track.track_id) \
+ " Speed: " + str(track.estimated_speed) \
+ " mph (exiting speed detect zone)"
else:
tag_str = class_name + "-" + str(track.track_id)
if track.bbox_cross_at_frame != -1:
cv2.rectangle(img, (int(bbox[0]), int(bbox[1] - 30)),
(int(bbox[0]) + (len(class_name) + len(str(track.track_id))) * 17, int(bbox[1])),
color,
-1)
cv2.putText(img, tag_str, (int(bbox[0]), int(bbox[1] - 10)), 0, 0.75, (255, 255, 255), 2)
cv2.rectangle(img, self.speed_detect_bbox_top[0], self.speed_detect_bbox_top[1], (200.0, 0, 0), 2)
cv2.rectangle(img, self.speed_detect_bbox_bottom[0], self.speed_detect_bbox_bottom[1], (200.0, 0, 0), 2)
cv2.imshow('output', img)
|
{"hexsha": "6c8ce19ce2c8f70eeadf17b77833b7d7d12859bf", "size": 7564, "ext": "py", "lang": "Python", "max_stars_repo_path": "deep_sort/object_track.py", "max_stars_repo_name": "lilun-cheng/vehicle_highway_tracking", "max_stars_repo_head_hexsha": "99c9981c2d9f998d90df070d271b7b88f6dc0a35", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-03-27T06:24:08.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-27T06:24:08.000Z", "max_issues_repo_path": "deep_sort/object_track.py", "max_issues_repo_name": "lilun-cheng/vehicle_highway_tracking", "max_issues_repo_head_hexsha": "99c9981c2d9f998d90df070d271b7b88f6dc0a35", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "deep_sort/object_track.py", "max_forks_repo_name": "lilun-cheng/vehicle_highway_tracking", "max_forks_repo_head_hexsha": "99c9981c2d9f998d90df070d271b7b88f6dc0a35", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 50.0927152318, "max_line_length": 116, "alphanum_fraction": 0.5921470122, "include": true, "reason": "import numpy", "num_tokens": 1785}
|
\par
\section{Driver programs for the {\tt DFrontMtx} object}
\label{section:DFrontMtx:drivers}
\par
%=======================================================================
\begin{enumerate}
%-----------------------------------------------------------------------
\item
\begin{verbatim}
testGrid msglvl msgFile n1 n2 n3 maxzeros maxsize seed type
symmetryflag sparsityflag pivotingflag tau droptol
lockflag nrhs
\end{verbatim}
This driver program tests the serial {\tt FrontMtx\_factor()}
and {\tt FrontMtx\_solve()} methods for the linear system $AX = B$.
Use the script file {\tt do\_grid} for testing.
\par
\begin{itemize}
\item
The {\tt msglvl} parameter determines the amount of output.
Use {\tt msglvl = 1} for just timing output.
\item
The {\tt msgFile} parameter determines the message file --- if {\tt
msgFile} is {\tt stdout}, then the message file is {\it stdout},
otherwise a file is opened with {\it append} status to receive any
output data.
\item
{\tt n1} is the number of points in the first grid direction.
\item
{\tt n2} is the number of points in the second grid direction.
\item
{\tt n3} is the number of points in the third grid direction.
\item
{\tt maxzeros} is used to merge small fronts together into larger
fronts.
Look at the {\tt ETree} object for
the {\tt ETree\_mergeFronts\{One,All,Any\}()} methods.
\item
{\tt maxsize} is used to split large fronts into smaller
fronts.
See the {\tt ETree\_splitFronts()} method.
\item
The {\tt seed} parameter is a random number seed.
\item
The {\tt type} parameter specifies a real or complex linear system.
\begin{itemize}
\item
{\tt type = 1 (SPOOLES\_REAL)} for real,
\item
{\tt type = 2 (SPOOLES\_COMPLEX)} for complex.
\end{itemize}
\item
The {\tt symmetryflag} parameter specifies the symmetry of the matrix.
\begin{itemize}
\item
{\tt type = 0 (SPOOLES\_SYMMETRIC)} for $A$ real or complex symmetric,
\item
{\tt type = 1 (SPOOLES\_HERMITIAN)} for $A$ complex Hermitian,
\item
{\tt type = 2 (SPOOLES\_NONSYMMETRIC)}
\end{itemize}
for $A$ real or complex nonsymmetric.
\item
The {\tt sparsityflag} parameter signals a direct or approximate
factorization.
\begin{itemize}
\item
{\tt sparsityflag = 0 (FRONTMTX\_DENSE\_FRONTS)} implies a direct
factorization, the fronts will be stored as dense submatrices.
\item
{\tt sparsityflag = 1 (FRONTMTX\_SPARSE\_FRONTS)} implies an
approximate factorization.
The fronts will be stored as sparse submatrices, where
the entries in the triangular factors will be
subjected to a drop tolerance test --- if the magnitude of an entry
is {\tt droptol} or larger, it will be stored, otherwise it will be
dropped.
\end{itemize}
\item
The {\tt pivotingflag} parameter signals whether pivoting for
stability will be enabled or not.
\begin{itemize}
\item
If {\tt pivotingflag = 0 (SPOOLES\_NO\_PIVOTING)},
no pivoting will be done.
\item
If {\tt pivotingflag = 1 (SPOOLES\_PIVOTING)},
pivoting will be done to ensure that all
entries in $U$ and $L$ have magnitude less than {\tt tau}.
\end{itemize}
\item
The {\tt tau} parameter is an upper bound on the magnitude of the
entries in $L$ and $U$ when pivoting is enabled.
\item
The {\tt droptol} parameter is a lower bound on the magnitude of the
entries in $L$ and $U$ when the approximate factorization is enabled.
\item
When {\tt lockflag} is zero, the mutual exclusion lock for the
factor matrix is not enabled.
When {\tt lockflag} is not zero, the mutual exclusion lock is set.
This capability is here to test the overhead for the locks for a
serial factorization.
\item
The {\tt nrhs} parameter is the number of right hand sides to solve
as one block.
\end{itemize}
%-----------------------------------------------------------------------
\item
\begin{verbatim}
testQRgrid msglvl msgFile n1 n2 n3 seed nrhs type
\end{verbatim}
This driver program tests the serial {\tt FrontMtx\_QR\_factor()}
and {\tt FrontMtx\_QR\_solve()} methods for the least squares problem
$ \min_X \| F - A X \|_F$.
\par
\begin{itemize}
\item
The {\tt msglvl} parameter determines the amount of output.
Use {\tt msglvl = 1} for just timing output.
\item
The {\tt msgFile} parameter determines the message file --- if {\tt
msgFile} is {\tt stdout}, then the message file is {\it stdout},
otherwise a file is opened with {\it append} status to receive any
output data.
\item
{\tt n1} is the number of points in the first grid direction.
\item
{\tt n2} is the number of points in the second grid direction.
\item
{\tt n3} is the number of points in the third grid direction.
\item
The {\tt seed} parameter is a random number seed.
\item
The {\tt nrhs} parameter is the number of right hand sides to solve
as one block.
\item
The {\tt type} parameter specifies a real or complex linear system.
\begin{itemize}
\item
{\tt type = 1 (SPOOLES\_REAL)} for real,
\item
{\tt type = 2 (SPOOLES\_COMPLEX)} for complex.
\end{itemize}
\end{itemize}
%-----------------------------------------------------------------------
\end{enumerate}
|
{"hexsha": "bc351c7014311e2a4dbf52a05b0708a0573bce7e", "size": 4975, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "ccx_prool/SPOOLES.2.2/FrontMtx/doc/drivers.tex", "max_stars_repo_name": "alleindrach/calculix-desktop", "max_stars_repo_head_hexsha": "2cb2c434b536eb668ff88bdf82538d22f4f0f711", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ccx_prool/SPOOLES.2.2/FrontMtx/doc/drivers.tex", "max_issues_repo_name": "alleindrach/calculix-desktop", "max_issues_repo_head_hexsha": "2cb2c434b536eb668ff88bdf82538d22f4f0f711", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2017-09-21T17:03:55.000Z", "max_issues_repo_issues_event_max_datetime": "2018-01-25T16:08:31.000Z", "max_forks_repo_path": "ccx_prool/SPOOLES.2.2/FrontMtx/doc/drivers.tex", "max_forks_repo_name": "alleindrach/calculix-desktop", "max_forks_repo_head_hexsha": "2cb2c434b536eb668ff88bdf82538d22f4f0f711", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-08-29T18:41:28.000Z", "max_forks_repo_forks_event_max_datetime": "2019-08-29T18:41:28.000Z", "avg_line_length": 33.843537415, "max_line_length": 72, "alphanum_fraction": 0.7003015075, "num_tokens": 1388}
|
MODULE ps_local_fftw_module
use rgrid_module, only: Ngrid
use ggrid_module, only: NGgrid, MGL, MG_0,MG_1, LLG, allgatherv_ggrid &
,construct_ggrid, destruct_ggrid
use fftw_module, only: ML1_c, ML2_c, N_ML3_c, ML3_c0 &
,zwork3_ptr0, zwork3_ptr1, plan_backward, z3_to_d1_fftw
use,intrinsic :: iso_c_binding
implicit none
PRIVATE
PUBLIC :: construct_ps_local_fftw
CONTAINS
SUBROUTINE construct_ps_local_fftw( vqlg, SGK, Vion )
implicit none
real(8),intent(IN) :: vqlg(:,:)
complex(8),intent(IN) :: SGK(:,:)
real(8),intent(OUT) :: Vion(:)
#ifdef _FFTW_
integer :: i,i1,i2,i3,j1,j2,j3,ik,j,MG
integer :: ML1,ML2,ML3,ML,Nelement
complex(8),allocatable :: zwork3(:,:,:),vg(:)
include "fftw3-mpi.f03"
call write_border( 0, " construct_ps_local_fftw(start)" )
MG = NGgrid(0)
ML = Ngrid(0)
ML1 = Ngrid(1)
ML2 = Ngrid(2)
ML3 = Ngrid(3)
Nelement = size( SGK, 2 )
allocate( vg(MG) ) ; vg=(0.0d0,0.0d0)
do ik=1,Nelement
do i=MG_0,MG_1
j=MGL(i)
vg(i)=vg(i)+vqlg(j,ik)*SGK(i,ik)
end do
end do
call allgatherv_Ggrid(vg)
call construct_Ggrid(2)
allocate( zwork3(0:ML1-1,0:ML2-1,0:ML3-1) )
zwork3(:,:,:)=(0.0d0,0.0d0)
do i=1,NGgrid(0)
zwork3(LLG(1,i),LLG(2,i),LLG(3,i))=vg(i)
end do
call destruct_Ggrid
deallocate( vg )
do i3=1,N_ML3_c
do i2=1,ML2_c
do i1=1,ML1_c
j1=i1-1
j2=i2-1
j3=i3-1+ML3_c0
zwork3_ptr0(i1,i2,i3) = zwork3(j1,j2,j3)
end do
end do
end do
call fftw_mpi_execute_dft( plan_backward, zwork3_ptr0, zwork3_ptr1 )
call z3_to_d1_fftw( zwork3_ptr1, Vion )
deallocate( zwork3 )
call write_border( 0, " construct_ps_local_fftw(end)" )
#else
Vion=0.0d0
#endif
END SUBROUTINE construct_ps_local_fftw
END MODULE ps_local_fftw_module
|
{"hexsha": "2e105b70a0694f044d98ffc0f19e7f44bbe8737d", "size": 1935, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/ps_local_fftw_module.f90", "max_stars_repo_name": "j-iwata/RSDFT", "max_stars_repo_head_hexsha": "2a961b2c8339a49de9bd09f55e7d6a45b6159d2e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2016-10-31T02:11:39.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-04T17:45:30.000Z", "max_issues_repo_path": "src/ps_local_fftw_module.f90", "max_issues_repo_name": "j-iwata/RSDFT", "max_issues_repo_head_hexsha": "2a961b2c8339a49de9bd09f55e7d6a45b6159d2e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/ps_local_fftw_module.f90", "max_forks_repo_name": "j-iwata/RSDFT", "max_forks_repo_head_hexsha": "2a961b2c8339a49de9bd09f55e7d6a45b6159d2e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2016-10-31T02:11:41.000Z", "max_forks_repo_forks_event_max_datetime": "2020-04-18T14:26:38.000Z", "avg_line_length": 22.2413793103, "max_line_length": 73, "alphanum_fraction": 0.6149870801, "num_tokens": 750}
|
"""Random policies.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tf_agents.policies import random_tf_policy
from tf_agents.specs import tensor_spec
from tf_agents.trajectories import policy_step
from policies import tf_policy
class RandomPolicy(random_tf_policy.RandomTFPolicy):
"""Sample random antipodal grasps."""
def __init__(self,
time_step_spec,
action_spec,
config=None):
super(RandomPolicy, self).__init__(
time_step_spec,
action_spec)
class RandomPrimitivePolicy(tf_policy.TFPolicy):
"""Random policy using modes"""
def __init__(self,
time_step_spec,
action_spec,
config=None):
self._debug = config.DEBUG
self._primitives = np.array(config.PRIMITIVES, dtype=np.float32)
self._num_modes = self._primitives.shape[0]
self._noise = np.array(config.NOISE, dtype=np.float32)
self._use_uniform_action = config.USE_UNIFORM_ACTION
super(RandomPrimitivePolicy, self).__init__(
time_step_spec=time_step_spec,
action_spec=action_spec,
config=config,)
def _action(self, time_step, policy_state, seed):
if self._use_uniform_action:
use_mode = tf.random.categorical(
[[1., 1.]],
1,
dtype=tf.int64)
use_mode = tf.squeeze(use_mode, 0)
else:
use_mode = tf.cast([1], tf.int64)
mode = tf.random.uniform(
[1],
minval=0,
maxval=self._num_modes,
dtype=tf.int64)
# Sample action by mode.
noise = tf.random.uniform(
self.action_spec()['action'].shape,
minval=-1.0,
maxval=1.0,
dtype=tf.float32) * self._noise
mode_action = tf.gather(
self._primitives,
mode)
mode_action += tf.expand_dims(noise, 0)
# TODO(kuanfang): Assume actions are between [-1, 1].
mode_action = tf.clip_by_value(mode_action, -1, 1)
# Sample action uniformly from the action space.
uniform_action = tensor_spec.sample_spec_nest(
self.action_spec()['action'], seed=seed, outer_dims=[1])
# Choose the action between the two.
action = tf.where(tf.cast(use_mode, tf.bool),
mode_action,
uniform_action)
# Debug
if self._debug:
print_op = tf.print(
'use_mode: ', use_mode, '\n',
'mode: ', mode, '\n',
'noise: ', noise, '\n',
'mode_action: ', mode_action, '\n',
'uniform_action: ', uniform_action, '\n',
'action: ', action, '\n',
)
with tf.control_dependencies([print_op]):
action = tf.identity(action)
action = {
'action': action,
'use_mode': use_mode,
'mode': mode,
}
return policy_step.PolicyStep(action, policy_state)
def _distribution(self, time_step, policy_state):
raise NotImplementedError('Distributions are not implemented yet.')
|
{"hexsha": "92e04289f60703e9b083a2674202d72d57289f30", "size": 3370, "ext": "py", "lang": "Python", "max_stars_repo_path": "policies/random_policy.py", "max_stars_repo_name": "StanfordVL/cavin", "max_stars_repo_head_hexsha": "581f70fefb3a869db739d8539f3b74759ab71777", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2020-04-11T22:31:40.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-16T09:29:16.000Z", "max_issues_repo_path": "policies/random_policy.py", "max_issues_repo_name": "StanfordVL/cavin", "max_issues_repo_head_hexsha": "581f70fefb3a869db739d8539f3b74759ab71777", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "policies/random_policy.py", "max_forks_repo_name": "StanfordVL/cavin", "max_forks_repo_head_hexsha": "581f70fefb3a869db739d8539f3b74759ab71777", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2020-09-28T02:39:31.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-16T13:44:29.000Z", "avg_line_length": 30.6363636364, "max_line_length": 75, "alphanum_fraction": 0.5724035608, "include": true, "reason": "import numpy", "num_tokens": 718}
|
Name: Karahan Mete
Long time Davis resident.
Educator
Business Management Consultant
R&D Agriculture: Introduced Black tea farming in California
Support sustainable agriculture.
Nondenominational Reverend
Practice Sufism
Social Justice Activist.
Concerned and seeking to promote human welfare
Born and raised in Turkey
Lived and study Germany and United States
|
{"hexsha": "83b612ce8a742e27868d91794b7864530739dffe", "size": 363, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/Karahan_Mete.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/Karahan_Mete.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/Karahan_Mete.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.2, "max_line_length": 59, "alphanum_fraction": 0.8457300275, "num_tokens": 76}
|
[STATEMENT]
lemma "add_tvarsT T acc = acc \<union> tvsT T"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. add_tvarsT T acc = acc \<union> tvsT T
[PROOF STEP]
unfolding add_tvarsT_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. fold_atyps (case_typ (\<lambda>literal list. id) (\<lambda>idn s. insert (idn, s))) T acc = acc \<union> tvsT T
[PROOF STEP]
proof (induction T arbitrary: acc)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>x1 x2 acc. (\<And>x2a acc. x2a \<in> set x2 \<Longrightarrow> fold_atyps (case_typ (\<lambda>literal list. id) (\<lambda>idn s. insert (idn, s))) x2a acc = acc \<union> tvsT x2a) \<Longrightarrow> fold_atyps (case_typ (\<lambda>literal list. id) (\<lambda>idn s. insert (idn, s))) (Ty x1 x2) acc = acc \<union> tvsT (Ty x1 x2)
2. \<And>x1 x2 acc. fold_atyps (case_typ (\<lambda>literal list. id) (\<lambda>idn s. insert (idn, s))) (Tv x1 x2) acc = acc \<union> tvsT (Tv x1 x2)
[PROOF STEP]
case (Ty n Ts)
[PROOF STATE]
proof (state)
this:
?x2a \<in> set Ts \<Longrightarrow> fold_atyps (\<lambda>a. case a of Ty literal list \<Rightarrow> id | Tv idn s \<Rightarrow> insert (idn, s)) ?x2a ?acc = ?acc \<union> tvsT ?x2a
goal (2 subgoals):
1. \<And>x1 x2 acc. (\<And>x2a acc. x2a \<in> set x2 \<Longrightarrow> fold_atyps (case_typ (\<lambda>literal list. id) (\<lambda>idn s. insert (idn, s))) x2a acc = acc \<union> tvsT x2a) \<Longrightarrow> fold_atyps (case_typ (\<lambda>literal list. id) (\<lambda>idn s. insert (idn, s))) (Ty x1 x2) acc = acc \<union> tvsT (Ty x1 x2)
2. \<And>x1 x2 acc. fold_atyps (case_typ (\<lambda>literal list. id) (\<lambda>idn s. insert (idn, s))) (Tv x1 x2) acc = acc \<union> tvsT (Tv x1 x2)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
?x2a \<in> set Ts \<Longrightarrow> fold_atyps (\<lambda>a. case a of Ty literal list \<Rightarrow> id | Tv idn s \<Rightarrow> insert (idn, s)) ?x2a ?acc = ?acc \<union> tvsT ?x2a
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
?x2a \<in> set Ts \<Longrightarrow> fold_atyps (\<lambda>a. case a of Ty literal list \<Rightarrow> id | Tv idn s \<Rightarrow> insert (idn, s)) ?x2a ?acc = ?acc \<union> tvsT ?x2a
goal (1 subgoal):
1. fold_atyps (\<lambda>a. case a of Ty literal list \<Rightarrow> id | Tv idn s \<Rightarrow> insert (idn, s)) (Ty n Ts) acc = acc \<union> tvsT (Ty n Ts)
[PROOF STEP]
by (induction Ts arbitrary: acc) auto
[PROOF STATE]
proof (state)
this:
fold_atyps (\<lambda>a. case a of Ty literal list \<Rightarrow> id | Tv idn s \<Rightarrow> insert (idn, s)) (Ty n Ts) acc = acc \<union> tvsT (Ty n Ts)
goal (1 subgoal):
1. \<And>x1 x2 acc. fold_atyps (case_typ (\<lambda>literal list. id) (\<lambda>idn s. insert (idn, s))) (Tv x1 x2) acc = acc \<union> tvsT (Tv x1 x2)
[PROOF STEP]
qed auto
|
{"llama_tokens": 1151, "file": "Metalogic_ProofChecker_Term", "length": 7}
|
import os, sys
import csv
from os.path import isfile, join
from collections import Counter
import matplotlib.pyplot as plt
import streamlit as st
import pandas as pd
import numpy as np
import altair as alt
import sklearn
import numpy
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets import make_blobs
from sklearn.preprocessing import StandardScaler
import statistics
class SubGroups():
def __init__(self, dataPath=""):
self.dataPath = dataPath
#hasAllFileNamesInAnArray
self.fileNames = []
#currentFileData Array
self.currentFileDataArray = []
#endResultArray
self.endResultArray = []
def getFileNames(self):
""" returns list of all files in the path"""
dirs = os.listdir(self.dataPath)
#für alle Files in Directory
for file in dirs:
self.fileNames.append(file);
#liest die File
self.readFiles(file)
#print(self.fileNames)
def readFiles(self, file):
"""Opens every file and read it. It is Main extraction method"""
path = self.dataPath
backslash = "\\"
filename = self.dataPath+backslash+file
print("Das ist die zu extrahierende File" + file)
fp=open(filename)
resultArray = []
for line in fp:
res=line.split('|')
resultArray.append(res)
#jetzigeFile
self.currentFileDataArray = resultArray
self.extractAge(file)
#self.extractGender(file)
def extractAge(self, file):
""" extract important Glucose """
allAge = []
allGender = []
gender = 1000
for i in range(1, len(self.currentFileDataArray)):
if(self.currentFileDataArray[i][34] != "NaN"):
allAge.append(float(self.currentFileDataArray[i][34]))
#check if sepsis
if(self.currentFileDataArray[i][35] == "0"):
allGender.append(0)
if(self.currentFileDataArray[i][35] == "1"):
allGender.append(1)
allAge = list(dict.fromkeys(allAge))
allGender = list(dict.fromkeys(allGender))
if(len(allGender)== 1 and allGender[0]==0):
gender=0
if(len(allGender)== 1 and allGender[0]==1):
gender=1
for i in range(0, len(allAge)):
if(gender != 1000):
self.endResultArray.append([allAge[i], gender])
#Klasse 1: A1
A1 = SubGroups("D:\\Uni\\MasterSimo\\WiSe2122\\Data Challenges\\Datensatz\\training_setA\\training")
#A1.makeGenderDiagram()
#A1.makeAgeGenderDiagram()
A2 = SubGroups("D:\\Uni\\MasterSimo\\WiSe2122\\Data Challenges\\Datensatz\\training_setB\\training_setB")
A1.getFileNames()
#A1.readFiles("p000001.psv")
#totalEndResult = A1.endResultArray
#print("Das ist total Endresult")
#print(totalEndResult)
#for z, (directory, file_head) in enumerate(directorys):
# for i, filename in enumerate(tqdm(os.listdir(path + directory))):
# df_temp = pd.read_csv(path + directory + filename, skiprows=0, sep='|')
# patient_gender = df_temp["Gender"][1]
# if df_temp["Age"][1] >= 40:
# dfs.append(df_temp)
A2.getFileNames()
totalEndResult = A1.endResultArray + A2.endResultArray
#print(endResult)
print("Len totalEndResult")
print(len(totalEndResult))
#print("Das ist average Array:")
#print(A1.extract41average())
#print("Das ist Länge der Liste: "+str( A1.endResultArray ))
#print("Das ist totale Länge der Liste: "+ str(len( A1.endResultArray )))
#Load Data
xWerte = []
yWerte = []
for i in range(0, len(totalEndResult)):
xWerte.append(totalEndResult[i][0])
yWerte.append(totalEndResult[i][1])
#print(xWerte)
#print(yWerte)
#X, _ = make_blobs(n_samples= len(A1.endResultArray), centers=41, random_state=0)
_ = plt.plot(xWerte, yWerte, marker = '.', linewidth=0)
_ = plt.title('Gender-Age')
_ = plt.xlabel('Gender')
_ = plt.ylabel('Age values from Gender')
plt.show()
#AB HIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIER ist es experimentiell
####YOUTUBE ANFANG
#Prepare data for Model
dbscan_data = A1.endResultArray
#convert to numpy array
dbscan_data = np.array(dbscan_data, dtype=np.float32)
#convert to df
indexDF = []
for i in range(len(dbscan_data)):
indexDF.append(str(i))
dbscan_data = pd.DataFrame(data=dbscan_data,
index =indexDF,
columns=["xWerte", "yWerte"])
#print(dbscan_data)
#Normalize Data
dbscan_data_scaler = StandardScaler().fit(dbscan_data)
dbscan_data = dbscan_data_scaler.transform(dbscan_data)
#model = DBSCAN(eps=0.1, min_samples=1, metric='euclidean').\
# fit(dbscan_data)
model = DBSCAN(eps=0.05, min_samples=50).fit(dbscan_data)
core_samples_mask = np.zeros_like(model.labels_, dtype=bool)
core_samples_mask[model.core_sample_indices_] = True
labels = model.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
n_noise_ = list(labels).count(-1)
# Black removed and is used for noise instead.
unique_labels = set(labels)
colors = [plt.cm.Spectral(each) for each in np.linspace(0, 1, len(unique_labels))]
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = [0, 0, 0, 1]
class_member_mask = labels == k
xy = dbscan_data[class_member_mask & core_samples_mask]
plt.plot(
xy[:, 0],
xy[:, 1],
"o",
markerfacecolor=tuple(col),
markeredgecolor="k",
markersize=14,
)
xy = dbscan_data[class_member_mask & ~core_samples_mask]
plt.plot(
xy[:, 0],
xy[:, 1],
"o",
markerfacecolor=tuple(col),
markeredgecolor="k",
markersize=6,
)
plt.title("Estimated number of clusters: %d" % n_clusters_)
plt.show()
|
{"hexsha": "0980e9359446296b60c4fc76c73be29175075ccf", "size": 5955, "ext": "py", "lang": "Python", "max_stars_repo_path": "otherCodeTaskSnippets/AgeGenderOPTICS.py", "max_stars_repo_name": "s2812135/Data_Challenges_WiSe2122", "max_stars_repo_head_hexsha": "a55372f444e7344af4e2e1f04e4244fb8cefeefe", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "otherCodeTaskSnippets/AgeGenderOPTICS.py", "max_issues_repo_name": "s2812135/Data_Challenges_WiSe2122", "max_issues_repo_head_hexsha": "a55372f444e7344af4e2e1f04e4244fb8cefeefe", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "otherCodeTaskSnippets/AgeGenderOPTICS.py", "max_forks_repo_name": "s2812135/Data_Challenges_WiSe2122", "max_forks_repo_head_hexsha": "a55372f444e7344af4e2e1f04e4244fb8cefeefe", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.0120967742, "max_line_length": 105, "alphanum_fraction": 0.6408060453, "include": true, "reason": "import numpy", "num_tokens": 1553}
|
from sklearn import skbase
import numpy as np
#https://www.kaggle.com/c/ashrae-energy-prediction/discussion/113784#latest-656376
class DatetimeConvertCyclical(skbase.BaseEstimator, skbase.TransformerMixin):
def __init__(self):
self.time_periods = {'second': 24 * 60 * 60,
'minute': 24 * 60,
'hour': 24,
'day': 30,
'dayofweek': 7,
'month': 12}
def fit(self, X, y=None):
return self
def transform(self, X):
for period, value in self.time_periods.items():
X[period] = getattr(X['timestamp'].dt, period)
X['sin_' + period] = np.sin(2 * np.pi * X[period] / value)
X['cos_' + period] = np.cos(2 * np.pi * X[period] / value)
X.drop(str(period), axis=1, inplace=True)
return X
|
{"hexsha": "189af03652f0c7065c863635cf916ea2100e2baf", "size": 912, "ext": "py", "lang": "Python", "max_stars_repo_path": "python-package/LiteMORT/LiteMORT_time.py", "max_stars_repo_name": "closest-git/LiteMORT", "max_stars_repo_head_hexsha": "4c04277f2c5c7500e00ce4e3d26c2641ea85377e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 50, "max_stars_repo_stars_event_min_datetime": "2019-03-19T11:46:18.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-25T15:44:37.000Z", "max_issues_repo_path": "python-package/LiteMORT/LiteMORT_time.py", "max_issues_repo_name": "closest-git/LiteMORT", "max_issues_repo_head_hexsha": "4c04277f2c5c7500e00ce4e3d26c2641ea85377e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2019-09-24T07:47:32.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-23T23:53:09.000Z", "max_forks_repo_path": "python-package/LiteMORT/LiteMORT_time.py", "max_forks_repo_name": "closest-git/LiteMORT", "max_forks_repo_head_hexsha": "4c04277f2c5c7500e00ce4e3d26c2641ea85377e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2019-09-11T17:06:21.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-25T15:44:38.000Z", "avg_line_length": 33.7777777778, "max_line_length": 82, "alphanum_fraction": 0.5164473684, "include": true, "reason": "import numpy", "num_tokens": 234}
|
"""Utility functions used by the notebooks
"""
import json
from pathlib import Path
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
pd.options.display.max_columns = 48
pd.options.display.max_rows = 48
def cal_phrate_alex(d, stream, phrates=None, recompute=False):
"""Compute peak photon rate in bursts for all streams (1-spot ALEX version)
"""
if phrates is None:
phrates = {}
phrates_fname = Path('results/%s_phrate_%s.csv' % (mlabel_alex, stream))
phrates_fnameB = Path('results/%s_phrate_%sB.csv' % (mlabel_alex, stream))
if phrates_fname.is_file() and not recompute:
with open(phrates_fname) as f:
phrates[str(stream)] = json.load(f)
phr = pd.read_csv(phrates_fnameB, index_col=0)
phr.columns = [int(c) for c in phr.columns]
phr.columns.name = 'spot'
phrates[str(stream)+'B'] = phr
else:
try:
d.calc_max_rate(m=10, ph_sel=stream, compact=True)
except ValueError:
d.calc_max_rate(m=10, ph_sel=stream, compact=False)
phrates[str(stream)+'B'] = make_df_bursts_alex(d.max_rate)
phrates[str(stream)] = {
'num_bursts': int(d.num_bursts),
'num_nans': int(np.isnan(d.max_rate[0]).sum())}
phrates[str(stream)]['num_valid'] = int(d.num_bursts - phrates[str(stream)]['num_nans'])
phrates[str(stream)]['valid_fraction'] = float(100 * phrates[str(stream)]['num_valid'] / d.num_bursts)
phrates_fname.parent.mkdir(parents=True, exist_ok=True)
with open(phrates_fname, 'wt') as f:
json.dump(phrates[str(stream)], f)
phrates[str(stream)+'B'].to_csv(phrates_fnameB)
print(' Valid fraction (mean of all ch): %.1f %%' %
np.mean(phrates[str(stream)]['valid_fraction']))
return phrates
def make_df_bursts_alex(list_of_columns):
"""Create dataframe for burst data for 1-spot ALEX"""
ncols = len(list_of_columns)
nrows = max(len(x) for x in list_of_columns)
columns = np.arange(ncols)
df = pd.DataFrame(columns=columns, index=np.arange(nrows), dtype=float)
df.columns.name = 'spot'
for col, col_data in zip(columns, list_of_columns):
df.iloc[:len(col_data), col] = col_data
return df
def make_df_bursts(list_of_columns):
"""Create 48-column dataframe for 48-spot PAX data"""
ncols = 48
assert len(list_of_columns) == ncols
nrows = max(len(x) for x in list_of_columns)
columns = np.arange(ncols)
df = pd.DataFrame(columns=columns, index=np.arange(nrows), dtype=float)
df.columns.name = 'spot'
for col, col_data in zip(columns, list_of_columns):
df.iloc[:len(col_data), col] = col_data
return df
def make_df_spots(list_of_tuples=None):
"""Create 48-rows dataframe for 48-spot PAX data"""
nrows = 48
df = pd.DataFrame(index=np.arange(nrows))
if list_of_tuples is None:
list_of_tuples = []
for col, col_data in list_of_tuples:
df[col] = col_data
return df
def cal_phrate(d, stream, phrates=None, recompute=False):
Path('results').mkdir(exist_ok=True)
if phrates is None:
phrates = {}
phrates_fname = Path('results/%s_phrate_%s.csv' % (mlabel, stream))
phrates_fnameB = Path('results/%s_phrate_%sB.csv' % (mlabel, stream))
if phrates_fname.is_file() and not recompute:
phrates[str(stream)] = pd.read_csv(phrates_fname, index_col=0)
phrates[str(stream)].index.name = 'spot'
phr = pd.read_csv(phrates_fnameB, index_col=0)
phr.columns = [int(c) for c in phr.columns]
phr.columns.name = 'spot'
phrates[str(stream)+'B'] = phr
else:
try:
d.calc_max_rate(m=10, ph_sel=stream, compact=True)
except ValueError:
d.calc_max_rate(m=10, ph_sel=stream, compact=False)
phrates[str(stream)+'B'] = make_df_bursts(d.max_rate)
phrates[str(stream)] = (make_df_spots()
.assign(**{'num_bursts': d.num_bursts})
.assign(**{'num_nans': [np.isnan(x).sum() for x in d.max_rate]})
.assign(**{'num_valid': lambda x: x.num_bursts - x.num_nans})
.assign(**{'valid_fraction': lambda x: 100 * x.num_valid / x.num_bursts})
)
phrates[str(stream)].to_csv(phrates_fname)
phrates[str(stream)+'B'].to_csv(phrates_fnameB)
print(' Valid fraction (mean of all ch): %.1f %%' %
phrates[str(stream)].valid_fraction.mean())
return phrates
def info_html(d):
"""Display measurement info in the notebook"""
Dex, Aex = d.setup['excitation_input_powers']*1e3
s = """
<h3>File: {fname}</h3>
<blockquote><p class="lead">{descr}</p></blockquote>
<ul>
<li><span style='display: inline-block; width: 150px;'>Acquisition duration:</span> {time:.1f} s </li>
<li><span style='display: inline-block; width: 150px;'>Laser power:</span> <b>{Dex:.0f}mW</b> @ 532nm
<b>{Aex:.0f}mW</b> @ 628nm </li>
<li><span style='display: inline-block; width: 150px;'>ALEX period [offset]: </span> {period} ({period_us:.1f} μs) [{offset}] </li></ul>
""".format(fname=d.fname, time=float(d.acquisition_duration), Dex=Dex, Aex=Aex,
period=d.alex_period, period_us=d.alex_period*d.clk_p*1e6, offset=d.offset,
descr=d.description.decode())
return HTML(s)
def save_name(name, folder='.', label=None, nospaces=False):
"""Compute file name for saving a figure"""
if label is None:
label = mlabel
sname = '%s/%s_%s' % (folder, label, name)
if nospaces:
sname = sname.replace(' ', '_')
return sname
def savefig(name, nospaces=True, label=None, **kwargs):
"""Save a figure prepending the measurement label and other options"""
if not save_figures:
return
savefigpath = Path(savefigdir)
savefigpath.mkdir(exist_ok=True)
kwargs_ = dict(dpi=100, bbox_inches='tight')
#frameon=True, facecolor='white', transparent=False)
kwargs_.update(kwargs)
fname = save_name(name, savefigdir, nospaces=nospaces, label=label)
plt.savefig(fname, **kwargs_)
print('Saved: %s.png' % fname, flush=True)
if highres:
kwargs_['dpi'] = 300
name = name[:-4] if name.lower().endswith('.png') else name
fname = save_name(name + '_highres', savefigdir, nospaces=nospaces, label=label)
print('Saved hires: %s.png' % fname, flush=True)
plt.savefig(fname, **kwargs_)
|
{"hexsha": "cb08969a461738f57c15109c8e903f132edbe0bd", "size": 6689, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils.py", "max_stars_repo_name": "tritemio/48-spot-smFRET-PAX-analysis", "max_stars_repo_head_hexsha": "63c61dc93c9a605796b883ab44ed33ed5f3761d6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2017-09-15T18:43:52.000Z", "max_stars_repo_stars_event_max_datetime": "2017-09-15T18:43:52.000Z", "max_issues_repo_path": "utils.py", "max_issues_repo_name": "tritemio/48-spot-smFRET-PAX-analysis", "max_issues_repo_head_hexsha": "63c61dc93c9a605796b883ab44ed33ed5f3761d6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils.py", "max_forks_repo_name": "tritemio/48-spot-smFRET-PAX-analysis", "max_forks_repo_head_hexsha": "63c61dc93c9a605796b883ab44ed33ed5f3761d6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.036809816, "max_line_length": 141, "alphanum_fraction": 0.620720586, "include": true, "reason": "import numpy", "num_tokens": 1809}
|
"""
This module helps construct sums of non-hermitian operators for measuring
specific sets of amplitudes. The general procedure for measuring individual
amplitudes was outlined in
Nature Communications 7, Article number: 10439 (2016)
doi:10.1038/ncomms10439
Each amplitude c_{j} is associated with an operator |a><j| where |a> is an
arbitrary computation basis vector. These are referred to as column operators.
(Cj)
Each column operator has an expectation value
<Cj> = <psi|a>c_{j}
which is proportional to ta complex state vector coefficient...except has
an extra factor <psi|a> that does not depend on 'j'. Therefore, we can select
a single |a> and for C_{a} = |<psi|a>|^{2}. What this means is that we can
figure out |<psi|a>|^{2} and thus |<psi|a>| which gives us the normalization
constant we want.
"""
from functools import reduce
import numpy as np
from pyquil.paulis import sX, sY, sZ, sI, PauliSum
from grove.measurements.estimation import estimate_locally_commuting_operator
def _single_projector_generator(ket_op, bra_op, index):
"""
Generate the pauli sum terms corresponding to |ket_op><brak_op|
:param ket_op: single qubit computational basis state
:param bra_op: single qubit computational basis state
:param index: qubit index to assign to the projector
:return: pauli sum of single qubit projection operator
:rtype: PauliSum
"""
if not isinstance(ket_op, int):
raise TypeError("ket_op needs to be an integer")
if not isinstance(bra_op, int):
raise TypeError("ket_op needs to be an integer")
if ket_op not in [0, 1] or bra_op not in [0, 1]:
raise ValueError("bra and ket op needs to be either 0 or 1")
if ket_op == 0 and bra_op == 0:
return 0.5 * (sZ(index) + sI(index))
elif ket_op == 0 and bra_op == 1:
return 0.5 * (sX(index) + 1j * sY(index))
elif ket_op == 1 and bra_op == 0:
return 0.5 * (sX(index) - 1j * sY(index))
else:
return 0.5 * (sI(index) - sZ(index))
def projector_generator(ket, bra):
"""
Generate a Pauli Sum that corresponds to the projection operator |ket><bra|
note: ket and bra are numerically ordered such that ket = [msd, ..., lsd]
where msd == most significant digit and lsd = least significant digit.
:param List ket: string of zeros and ones corresponding to a computational
basis state.
:param List bra: string of zeros and ones corresponding to a computational
basis state.
:return: projector as a pauli sum
:rytpe: PauliSum
"""
projectors = []
for index, (ket_one_qubit, bra_one_qubit) in enumerate(zip(ket[::-1], bra[::-1])):
projectors.append(_single_projector_generator(ket_one_qubit,
bra_one_qubit, index))
return reduce(lambda x, y: x * y, projectors)
def measure_wf_coefficients(prep_program, coeff_list, reference_state,
quantum_resource, variance_bound=1.0E-6):
"""
Measure a set of coefficients with a phase relative to the reference_state
:param prep_program: pyQuil program to prepare the state
:param coeff_list: list of integers labeling amplitudes to measure
:param reference_state: Integer of the computational basis state to use as
a reference
:param quantum_resource: An instance of a quantum abstract machine
:param variance_bound: Default 1.0E-6. variance of the monte carlo
estimator for the non-hermitian operator
:return: returns a list of reference_state amplitude + coeff_list amplitudes
"""
num_qubits = len(prep_program.get_qubits())
normalizer_ops = projector_generator(reference_state, reference_state)
c0_coeff, _, _ = estimate_locally_commuting_operator(
prep_program, normalizer_ops, variance_bound=variance_bound,
quantum_resource=quantum_resource)
c0_coeff = np.sqrt(c0_coeff)
amplitudes = []
for ii in coeff_list:
if ii == reference_state:
amplitudes.append(c0_coeff)
else:
bra = list(map(int, np.binary_repr(ii, width=num_qubits)))
c_ii_op = projector_generator(reference_state, bra)
result = estimate_locally_commuting_operator(
prep_program, c_ii_op, variance_bound=variance_bound,
quantum_resource=quantum_resource)
amplitudes.append(result[0] / c0_coeff)
return amplitudes
def measure_pure_state(prep_program, reference_state, quantum_resource,
variance_bound=1.0E-6):
"""
Measure the coefficients of the pure state
:param prep_program: pyQuil program to prepare the state
:param reference_state: Integer of the computational basis state to use as
a reference
:param quantum_resource: An instance of a quantum abstract machine
:param variance_bound: Default 1.0E-6. variance of the monte carlo
estimator for the non-hermitian operator
:return: an estimate of the wavefunction as a numpy.ndarray
"""
num_qubits = len(prep_program.get_qubits())
amplitudes_to_measure = list(range(2 ** num_qubits))
amplitudes = measure_wf_coefficients(prep_program, amplitudes_to_measure,
reference_state,
quantum_resource,
variance_bound=variance_bound)
wavefunction = np.asarray(amplitudes)
return wavefunction.reshape((-1, 1))
|
{"hexsha": "94f6a771c24cc7357dcd7c86486c13f4ea818790", "size": 5623, "ext": "py", "lang": "Python", "max_stars_repo_path": "grove/measurements/amplitude_measurement.py", "max_stars_repo_name": "mkeshita/grove", "max_stars_repo_head_hexsha": "dc6bf6ec63e8c435fe52b1e00f707d5ce4cdb9b3", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 229, "max_stars_repo_stars_event_min_datetime": "2017-01-10T03:11:54.000Z", "max_stars_repo_stars_event_max_datetime": "2018-11-26T10:57:49.000Z", "max_issues_repo_path": "grove/measurements/amplitude_measurement.py", "max_issues_repo_name": "mkeshita/grove", "max_issues_repo_head_hexsha": "dc6bf6ec63e8c435fe52b1e00f707d5ce4cdb9b3", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 123, "max_issues_repo_issues_event_min_datetime": "2017-01-10T21:06:51.000Z", "max_issues_repo_issues_event_max_datetime": "2018-11-27T19:38:22.000Z", "max_forks_repo_path": "grove/measurements/amplitude_measurement.py", "max_forks_repo_name": "mkeshita/grove", "max_forks_repo_head_hexsha": "dc6bf6ec63e8c435fe52b1e00f707d5ce4cdb9b3", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 95, "max_forks_repo_forks_event_min_datetime": "2017-01-10T03:03:45.000Z", "max_forks_repo_forks_event_max_datetime": "2018-11-28T00:42:28.000Z", "avg_line_length": 41.6518518519, "max_line_length": 86, "alphanum_fraction": 0.6701049262, "include": true, "reason": "import numpy", "num_tokens": 1355}
|
# Plots a spectrogram as a figure. This should be a good app to
import argparse
import matplotlib.pyplot as plt
import numpy
import mir3.data.self_similarity_matrix as ssm
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('infile', help="""spectrogram file""")
parser.add_argument('outfile', help="""target image file""")
parser.add_argument('-t','--minimum_time',
default=0, help="""Minimum time (s) to plot""")
parser.add_argument('-T','--maximum_time',
default=None, help="""Maximum time (s) to plot""")
parser.add_argument('-W', '--width',
default=400, help="""Plot width (px)""")
parser.add_argument('-H', '--height',
default=400, help="""Plot height (px)""")
args = parser.parse_args()
s = ssm.SelfSimilarityMatrix()
s = s.load(open(args.infile))
maxT = s.data.shape[1]
maxTs = maxT / s.metadata.sampling_configuration.ofs
out = s.data
out = out/numpy.max(out)
out = 1 - out
im=plt.imshow(out, aspect='auto', origin='lower', cmap=plt.cm.gray,
extent=[0, maxTs, 0, maxTs])
plt.xlabel('Time (s)')
plt.ylabel('Time (s)')
fig = plt.gcf()
size = (args.width, args.height)
width_inches = size[0]/80.0
height_inches = size[1]/80.0
fig.set_size_inches((width_inches,height_inches))
plt.savefig(args.outfile,bbox_inches='tight')
|
{"hexsha": "5ddfbbfd3f736f37d6ac4b9000072bd0bc797fb3", "size": 1408, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/pyplot/plot-selfsimilarity.py", "max_stars_repo_name": "pymir3/pymir3", "max_stars_repo_head_hexsha": "c1bcca66a5ef1ff0ebd6373e3820e72dee6b0b70", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2015-08-03T12:41:11.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-18T07:55:23.000Z", "max_issues_repo_path": "scripts/pyplot/plot-selfsimilarity.py", "max_issues_repo_name": "pymir3/pymir3", "max_issues_repo_head_hexsha": "c1bcca66a5ef1ff0ebd6373e3820e72dee6b0b70", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2015-05-27T18:47:20.000Z", "max_issues_repo_issues_event_max_datetime": "2015-05-27T18:47:20.000Z", "max_forks_repo_path": "scripts/pyplot/plot-selfsimilarity.py", "max_forks_repo_name": "pymir3/pymir3", "max_forks_repo_head_hexsha": "c1bcca66a5ef1ff0ebd6373e3820e72dee6b0b70", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2016-03-18T03:30:02.000Z", "max_forks_repo_forks_event_max_datetime": "2018-07-05T02:29:16.000Z", "avg_line_length": 29.9574468085, "max_line_length": 71, "alphanum_fraction": 0.6392045455, "include": true, "reason": "import numpy", "num_tokens": 371}
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers.convolutional import Convolution1D
from keras.layers.convolutional import MaxPooling1D
from keras.layers.embeddings import Embedding
from keras.preprocessing import sequence
from keras.preprocessing.text import Tokenizer
from keras.utils import np_utils
from keras.models import load_model
from os.path import dirname, join
import sys
import time
import statistics
def load_data(data_file_name, h5File=False):
"""
Loads data from module_path/data/data_file_name.
Parameters
----------
data_file_name : string
name of csv file to be loaded from module_path/data/
data_file_name.
h5File : boolean, optional, default = False
if True opens hdf5 file
Returns
-------
data : Pandas DataFrame
"""
module_path = dirname(__file__)
if h5File:
data = load_model(join(module_path, 'data', data_file_name))
else:
with open(join(module_path, 'data', data_file_name), 'rb') as csv_file:
data = pd.read_csv(csv_file, encoding='latin1')
return data
def data_setup(top_words=1000, max_words=150):
"""
preprocesses the twitter climate data. Does things like changes output
to one hot encoding, performs word embedding/padding
Parameters
----------
top_words : int
defaults to 1000. Number of words to integerize
based on top occuring words
max_wrods : int
defaults to 150. Number of words to include per
tweet (i.e. the feature vector is 150 length)
Returns
-------
X : array
input array (features)
Y : array
output array (target)
"""
data = load_data("tweet_global_warming.csv")
print("Full dataset: {}".format(data.shape[0]))
data['existence'].fillna(value='ambiguous',
inplace=True)
data['existence'].replace(('Y', 'N'), ('Yes', 'No'),
inplace=True)
data = data.dropna() # now drop NA values
print("dataset without NaN: {}".format(data.shape[0]))
X = data.iloc[:, 0]
Y = data.iloc[:, 1]
print("Number of unique words: {}".format(len(np.unique(np.hstack(X)))))
# one hot encoding = dummy vars from categorical var
# Create a one-hot encoded binary matrix
# N, Y, Ambig
# 1, 0, 0
# 0, 1, 0
# 0, 0, 1
# encode class as integers
encoder = LabelEncoder()
encoder.fit(Y)
encoded_Y = encoder.transform(Y)
# convert integers to one hot encoded
Y = np_utils.to_categorical(encoded_Y)
# convert X to ints (y is already done)
token = Tokenizer(num_words=top_words,
filters='!"#$%&()*+,-./:;<=>?@[\]^_`{|}~', lower=True,
split=' ', char_level=False, oov_token=None)
token.fit_on_texts(texts=X)
X = token.texts_to_sequences(texts=X)
X = sequence.pad_sequences(X, maxlen=max_words)
return X, Y
def baseline_model(top_words=1000, max_words=150, filters=32):
"""
preprocesses the twitter climate data. Does things like changes output
to one hot encoding, performs word embedding/padding
Parameters
----------
top_words : int
defaults to 1000. Number of words to integerize
based on top occuring words
max_wrods : int
defaults to 150. Number of words to include per
tweet (i.e. the feature vector is 150 length)
Returns
-------
model : Keras model object
"""
model = Sequential()
model.add(Embedding(top_words + 1, filters,
input_length=max_words))
model.add(Convolution1D(filters=filters, kernel_size=3, padding='same',
activation='relu'))
model.add(MaxPooling1D(pool_size=2))
model.add(Flatten())
model.add(Dense(250, activation='relu'))
model.add(Dense(3, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam',
metrics=['accuracy'])
return model
class Benchmark:
"""
benchmark method used by the unittests
"""
@staticmethod
def run(function):
timings = []
stdout = sys.stdout
for i in range(5):
sys.stdout = None
startTime = time.time()
function()
seconds = time.time() - startTime
sys.stdout = stdout
timings.append(seconds)
mean = statistics.mean(timings)
print("{} {:3.2f} {:3.2f}".format(
1 + i, mean,
statistics.stdev(timings, mean) if i > 1 else 0))
|
{"hexsha": "3d1d52b30beec205d711fe78972f366744eadf8d", "size": 4746, "ext": "py", "lang": "Python", "max_stars_repo_path": "ECS_demo/core.py", "max_stars_repo_name": "reconjohn/dev", "max_stars_repo_head_hexsha": "aa2248338d9f3b54c345baf1dcd9531592586925", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2018-05-16T17:34:31.000Z", "max_stars_repo_stars_event_max_datetime": "2019-07-17T18:16:18.000Z", "max_issues_repo_path": "ecsdemo/core.py", "max_issues_repo_name": "wesleyktatum/ECS", "max_issues_repo_head_hexsha": "24291437ccc57a668a6d6b34d4d8cc3f69def14e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-05-14T22:37:04.000Z", "max_issues_repo_issues_event_max_datetime": "2018-05-14T22:37:04.000Z", "max_forks_repo_path": "ecsdemo/core.py", "max_forks_repo_name": "dacb/ecs", "max_forks_repo_head_hexsha": "24291437ccc57a668a6d6b34d4d8cc3f69def14e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2018-05-19T00:19:11.000Z", "max_forks_repo_forks_event_max_datetime": "2019-07-17T18:31:09.000Z", "avg_line_length": 30.8181818182, "max_line_length": 79, "alphanum_fraction": 0.6245259166, "include": true, "reason": "import numpy", "num_tokens": 1120}
|
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import argparse
import warnings
import astropy.units as u
from astropy.table import Table
from astropy.modeling.fitting import LevMarLSQFitter
from G21 import G21, G21_drude_asym
def clean_pnames(pnames):
"""
function to clean of the _? part of the names due to making a CompoundModel
"""
if pnames[0][-1] in ["0", "1"]:
clean_pnames = [cpname[:-2] for cpname in pnames]
return clean_pnames
else:
return pnames
if __name__ == "__main__":
# commandline parser
parser = argparse.ArgumentParser()
parser.add_argument(
"--symfit", help="include symmetric drude fit", action="store_true"
)
parser.add_argument("--notitle", help="no title on plot", action="store_true")
parser.add_argument("--png", help="save figure as a png file", action="store_true")
parser.add_argument("--pdf", help="save figure as a pdf file", action="store_true")
parser.add_argument("--path", help="path for the extinction curves")
args = parser.parse_args()
a = Table.read("litdata/alam_ak_apj_v2.dat", format="ascii.basic", data_start=1)
wave = a["lambda"].data * u.micron
y = a["alk"].data
sindxs = np.argsort(np.abs(wave - 0.55 * u.micron))
y /= y[sindxs[0]]
ofile = "fits/HD20_OB12.fits"
# remove units as fitting routines often cannot take numbers with units
x = wave.to(1.0 / u.micron, equivalencies=u.spectral()).value
g21_init = G21()
g21_asym_init = G21_drude_asym()
# fit the extinction only using data between 1 and 40 micron
gvals = (1.0 < 1.0 / x) & (1.0 / x < 40.0)
fit = LevMarLSQFitter()
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UserWarning)
g21_fit = fit(g21_init, x[gvals], y[gvals])
g21_asym_fit = fit(g21_asym_init, x[gvals], y[gvals])
# save the extinction curve and fit
best_params = (clean_pnames(g21_asym_fit.param_names), g21_asym_fit.parameters)
print(best_params)
# setup the plot
fontsize = 18
# fontsize = 10
font = {"size": fontsize}
matplotlib.rc("font", **font)
matplotlib.rc("lines", linewidth=1.5)
matplotlib.rc("axes", linewidth=2)
matplotlib.rc("xtick.major", width=2)
matplotlib.rc("xtick.minor", width=2)
matplotlib.rc("ytick.major", width=2)
matplotlib.rc("ytick.minor", width=2)
fig, ax = plt.subplots(
nrows=2, figsize=(12, 8), sharex=True, gridspec_kw={"height_ratios": [5, 1]}
)
ax[0].plot(wave, y, "k-")
# obsext.plot(ax[0], color="k")
g21_fit_y = g21_fit(wave[gvals])
g21_asym_fit_y = g21_asym_fit(wave[gvals])
ax[0].set_ylabel(r"$A(\lambda)/A(V)$", fontsize=1.3 * fontsize)
ax[1].set_xlabel(r"$\lambda$ [$\mu m$]", fontsize=1.3 * fontsize)
if args.symfit:
ax[0].plot(wave[gvals], g21_fit_y, "g-", label="Sym Best Fit", alpha=0.7)
ax[0].plot(wave[gvals], g21_asym_fit_y, "b-", label="Best Fit")
mmy = np.array([min(g21_fit_y), max(g21_fit_y)])
mmd = 0.1 * (mmy[1] - mmy[0])
ax[0].set_ylim(mmy + np.array([-1.0, 1.0]) * mmd)
ax[0].set_xlim(1.0, 40.0)
ax[0].set_xscale("log")
if not args.notitle:
ax[0].set_title(ofile)
g21_comps = g21_asym_fit.copy()
g21_comps.sil1_amp = 0.0
ax[0].plot(wave[gvals], g21_comps(wave[gvals]), "k--", alpha=0.5)
g21_comps = g21_asym_fit.copy()
g21_comps.sil2_amp = 0.0
ax[0].plot(wave[gvals], g21_comps(wave[gvals]), "k--", alpha=0.5)
g21_comps = g21_asym_fit.copy()
g21_comps.sil1_amp = 0.0
g21_comps.sil2_amp = 0.0
ax[0].plot(wave[gvals], g21_comps(wave[gvals]), "k--", alpha=0.5)
ax[0].legend(loc="best")
# residuals
ax[1].plot(wave[gvals], np.zeros((len(wave[gvals]))), "k--")
if args.symfit:
ax[1].plot(wave[gvals], y[gvals] - g21_fit_y, "g-", alpha=0.7)
ax[1].plot(
wave[gvals], y[gvals] - g21_asym_fit(wave[gvals]), "b-",
)
ax[1].set_ylim(np.array([-1.0, 1.0]) * mmd)
plt.tight_layout()
# plot or save to a file
outname = ofile.replace(".fits", "")
if args.png:
fig.savefig(outname + ".png")
elif args.pdf:
fig.savefig(outname + ".pdf")
else:
plt.show()
|
{"hexsha": "532e691ca3d154c5e3dcf83aba77da16c3c9582c", "size": 4283, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/fit_mir_ext_powerlaw_ob12.py", "max_stars_repo_name": "karllark/spitzer_mir_ext", "max_stars_repo_head_hexsha": "72b5af2610761b1056f966299c48f37714d3486c", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "utils/fit_mir_ext_powerlaw_ob12.py", "max_issues_repo_name": "karllark/spitzer_mir_ext", "max_issues_repo_head_hexsha": "72b5af2610761b1056f966299c48f37714d3486c", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2018-01-02T15:27:38.000Z", "max_issues_repo_issues_event_max_datetime": "2020-12-08T18:47:07.000Z", "max_forks_repo_path": "utils/fit_mir_ext_powerlaw_ob12.py", "max_forks_repo_name": "karllark/spitzer_mir_ext", "max_forks_repo_head_hexsha": "72b5af2610761b1056f966299c48f37714d3486c", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-02-10T22:34:23.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-10T22:34:23.000Z", "avg_line_length": 30.3758865248, "max_line_length": 87, "alphanum_fraction": 0.6289983656, "include": true, "reason": "import numpy,import astropy,from astropy", "num_tokens": 1388}
|
from .. import ParallelSampler
import numpy as np
from . import metropolis
from cosmosis.runtime.analytics import Analytics
import os
#We need a global pipeline
#object for MPI to work properly
pipeline=None
METROPOLIS_INI_SECTION = "metropolis"
def posterior(p):
return pipeline.run_results(p)
class MetropolisSampler(ParallelSampler):
parallel_output = True
sampler_outputs = [("prior", float),("post", float)]
understands_fast_subspaces = True
supports_resume = True
def config(self):
global pipeline
pipeline = self.pipeline
self.samples = self.read_ini("samples", int, default=20000)
random_start = self.read_ini("random_start", bool, default=False)
use_cobaya = self.read_ini("cobaya", bool, default=False)
self.Rconverge = self.read_ini("Rconverge", float, -1.0)
self.drag = self.read_ini("drag", int, 0)
self.oversampling = self.read_ini("oversampling", int, 5)
tuning_frequency = self.read_ini("tuning_frequency", int, -1)
tuning_grace = self.read_ini("tuning_grace", int, 5000)
self.tuning_end = self.read_ini("tuning_end", int, 100000)
self.save_during_tuning = self.read_ini("save_during_tuning", bool, False)
self.n = self.read_ini("nsteps", int, default=100)
self.exponential_probability = self.read_ini("exponential_probability", float, default=0.333)
self.split = None #work out later
if self.Rconverge==-1.0:
self.Rconverge=None
self.interrupted = False
self.num_samples = 0
self.ndim = len(self.pipeline.varied_params)
self.num_samples_post_tuning = 0
self.last_accept_count = 0
#Any other options go here
# if we are not tunning then there is no tuning phase
if tuning_frequency == -1:
self.tuning_end = 0
if (self.drag > 0) and not self.pipeline.do_fast_slow:
print("You asked for dragging, but the pipeline does not have fast/slow enabled"
", so no draggng will be done."
)
if (self.pipeline.do_fast_slow) and not (self.pipeline.first_fast_module):
raise ValueError("To use fast/slow splitting with metropolis please "
"manually define first_fast_module in the pipeline "
"section.")
#start values from prior
start = self.define_parameters(random_start)
print("MCMC starting point:")
for param, x in zip(self.pipeline.varied_params, start):
print(" ", param, x)
#Covariance matrix
covmat = self.load_covariance_matrix()
#Sampler object itself.
quiet = self.pipeline.quiet
if use_cobaya:
print("Using the Cobaya proposal")
print(f"Will tune every {tuning_frequency} samples, from samples "
f"{tuning_grace} to {self.tuning_end}.")
self.sampler = metropolis.MCMC(start, posterior, covmat,
quiet=quiet,
tuning_frequency=tuning_frequency, # Will be multiplied by the oversampling
tuning_grace=tuning_grace, # within the sampler if needed
tuning_end=self.tuning_end,
exponential_probability=self.exponential_probability,
use_cobaya=use_cobaya,
n_drag = self.drag,
)
self.analytics = Analytics(self.pipeline.varied_params, self.pool)
self.fast_slow_done = False
def worker(self):
while not self.is_converged():
self.execute()
if self.output:
self.output.flush()
def resume(self):
resume_info = self.read_resume_info()
if resume_info is None:
return
self.sampler, self.num_samples, self.num_samples_post_tuning = resume_info
# Fast slow is already configured on the sampler.
self.fast_slow_done = True
# If we started main sampling (as opposed to tuning phase)
# then we will have some existing chain, but this is not always the case
try:
data = np.genfromtxt(self.output._filename, invalid_raise=False)[:, :self.ndim]
self.analytics.add_traces(data)
except IndexError:
data = None
if self.num_samples >= self.samples:
print("You told me to resume the chain - it has already completed (with {} samples), so sampling will end.".format(len(data)))
print("Increase the 'samples' parameter to keep going.")
elif self.is_converged():
print("The resumed chain was already converged. You can change the converged testing parameters to extend it.")
elif data is None:
print("Continuing metropolis from existing chain - you were in the tuning phase, which will continue")
else:
print("Continuing metropolis from existing chain - have {} samples already".format(len(data)))
def execute(self):
#Run the MCMC sampler.
if self.pipeline.do_fast_slow and not self.fast_slow_done:
self.fast_slow_done = True
self.sampler.set_fast_slow(
self.pipeline.fast_param_indices,
self.pipeline.slow_param_indices,
self.oversampling
)
try:
samples = self.sampler.sample(self.n)
except KeyboardInterrupt:
self.interrupted=True
return
self.num_samples += self.n
self.num_samples_post_tuning = self.num_samples - self.tuning_end
overall_rate = (self.sampler.accepted * 1.0) / self.sampler.iterations
recent_accepted = self.sampler.accepted - self.last_accept_count
recent_rate = recent_accepted / self.n
print("Overall accepted {} / {} samples ({:.1%})" .format(
self.sampler.accepted, self.sampler.iterations, overall_rate))
print("Last {0} accepted {1} / {0} samples ({2:.1%})\n" .format(
self.n, recent_accepted, recent_rate))
self.last_accept_count = self.sampler.accepted
# Regardless of save settings we never use tuning samples
# for analytics
if self.num_samples_post_tuning > 0:
traces = np.array([r.vector for r in samples[-self.num_samples_post_tuning:]])
self.analytics.add_traces(traces)
if (self.num_samples_post_tuning > 0) or self.save_during_tuning:
for i, result in enumerate(samples):
self.output.parameters(result.vector, result.extra, result.prior, result.post)
if self.num_samples_post_tuning <= 0:
print("Tuning ends at {} samples\n".format(self.tuning_end))
self.write_resume_info([self.sampler, self.num_samples, self.num_samples_post_tuning])
def is_converged(self):
# user has pressed Ctrl-C
if self.interrupted:
return True
if self.num_samples >= self.samples:
print("Full number of samples generated; sampling complete")
return True
elif (self.num_samples > 0 and
self.pool is not None and
self.Rconverge is not None and
self.num_samples_post_tuning > 0):
R = self.analytics.gelman_rubin(quiet=False)
R1 = abs(R - 1)
return np.all(R1 <= self.Rconverge)
else:
return False
def load_covariance_matrix(self):
covmat_filename = self.read_ini("covmat", str, "").strip()
if covmat_filename == "" and self.distribution_hints.has_cov():
covmat = self.distribution_hints.get_cov()
print("Using covariance from previous sampler")
elif covmat_filename == "":
print("Using default covariance 1% of param widths")
covmat = np.array([p.width()/100.0 for p in self.pipeline.varied_params])
elif not os.path.exists(covmat_filename):
raise ValueError(
"Covariance matrix %s not found" % covmat_filename)
else:
print("Loading covariance from {}".format(covmat_filename))
covmat = np.loadtxt(covmat_filename)
if covmat.ndim == 0:
covmat = covmat.reshape((1, 1))
elif covmat.ndim == 1:
covmat = np.diag(covmat ** 2)
nparams = len(self.pipeline.varied_params)
if covmat.shape != (nparams, nparams):
raise ValueError("The covariance matrix was shape (%d x %d), "
"but there are %d varied parameters." %
(covmat.shape[0], covmat.shape[1], nparams))
return covmat
def define_parameters(self, random_start):
if random_start:
return self.pipeline.randomized_start()
else:
return self.pipeline.start_vector()
|
{"hexsha": "957514dd22e3d1915bee016f9f786f49752a2c4f", "size": 8875, "ext": "py", "lang": "Python", "max_stars_repo_path": "cosmosis/samplers/metropolis/metropolis_sampler.py", "max_stars_repo_name": "annis/cosmosis", "max_stars_repo_head_hexsha": "55efc1bc2260ca39298c584ae809fa2a8e72a38e", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-06-18T14:11:59.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-23T19:19:36.000Z", "max_issues_repo_path": "cosmosis/samplers/metropolis/metropolis_sampler.py", "max_issues_repo_name": "annis/cosmosis", "max_issues_repo_head_hexsha": "55efc1bc2260ca39298c584ae809fa2a8e72a38e", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-11-02T12:44:24.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T15:09:48.000Z", "max_forks_repo_path": "cosmosis/samplers/metropolis/metropolis_sampler.py", "max_forks_repo_name": "annis/cosmosis", "max_forks_repo_head_hexsha": "55efc1bc2260ca39298c584ae809fa2a8e72a38e", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2022-03-25T21:26:27.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-29T06:37:46.000Z", "avg_line_length": 38.5869565217, "max_line_length": 138, "alphanum_fraction": 0.6233239437, "include": true, "reason": "import numpy", "num_tokens": 1962}
|
import jax
import jax.numpy as jnp
import pytreearray as pta
def default_norm(res, t):
if isinstance(res, jnp.ndarray):
return jnp.sqrt(jnp.mean(jnp.abs(res) ** 2))
else:
return jnp.sqrt(
jax.tree_util.tree_reduce(
lambda x, y: x + y,
jax.tree_map(lambda x: jnp.sum(jnp.abs(x) ** 2), res),
)
/ res.size
)
def calculate_error(ut, u0, u1, a, p, internalnorm, t):
res = calculate_residuals(ut, u0, u1, a, p, internalnorm, t)
return internalnorm(res, t)
"""
calculate_residuals(ũ, u₀, u₁, α, ρ, internalnorm, t)
Calculate element-wise residuals
```math
\\frac{ũ}{α+\\max{|u₀|,|u₁|}*ρ}
```
"""
def calculate_residuals(ut, u0, u1, a, p, internalnorm, t):
return ut / (a + jnp.maximum(internalnorm(u0, t), internalnorm(u1, t)) * p)
|
{"hexsha": "62e31712d45ca31b3d4be7a8847e9cd0a726b7bc", "size": 851, "ext": "py", "lang": "Python", "max_stars_repo_path": "ode4jax/_src/base/residuals.py", "max_stars_repo_name": "PhilipVinc/netket_dynamics", "max_stars_repo_head_hexsha": "6e8009098c279271cb0f289ba9e85c039bb284e4", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-10-02T20:29:44.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-02T20:38:28.000Z", "max_issues_repo_path": "ode4jax/_src/base/residuals.py", "max_issues_repo_name": "PhilipVinc/netket_dynamics", "max_issues_repo_head_hexsha": "6e8009098c279271cb0f289ba9e85c039bb284e4", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2021-10-01T09:15:06.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-21T09:19:23.000Z", "max_forks_repo_path": "ode4jax/_src/base/residuals.py", "max_forks_repo_name": "PhilipVinc/netket_dynamics", "max_forks_repo_head_hexsha": "6e8009098c279271cb0f289ba9e85c039bb284e4", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.0, "max_line_length": 79, "alphanum_fraction": 0.5863689777, "include": true, "reason": "import jax", "num_tokens": 272}
|
#!/usr/bin/env python3
import json
import numpy
import os.path
import statistics
import sys
import operator
import functools
import numbers
import math
from collections import defaultdict, Counter
def main():
paths = sorted(sys.argv[1:], key=get_time)
games = list(collect_data(paths))
stats = get_stats(games)
print_stats(stats)
show_stats(stats)
def print_stats(stats):
row('metric', 'value', '%')
for metric in ('games', 'draws', 'zero_draws', 'unique_seeds'):
row(metric, stats[metric], stats[metric] / stats['games'] * 100)
print()
players = stats['players']
row('metric', *players, *['%s %%' % v for v in players], '%s / %s' % (players[0], players[1]), '%s / %s' % (players[1], players[0]), 'total')
for metric, values in stats.items():
print_metric(metric, values, players)
def print_metric(metric, values, players):
if isinstance(values, dict):
if values:
if isinstance(tuple(values.values())[0], dict):
for submetric, subvalues in values.items():
print_metric('%s %s' % (metric, submetric), subvalues, players)
elif isinstance(tuple(values.values())[0], numbers.Number):
print_counter(metric, values, players)
def print_counter(metric, values, players):
row_values = [values[v] for v in players]
total = sum(row_values)
fractions = [safe_div(v, total) * 100 for v in row_values]
ratios = [safe_div(values[players[0]], values[players[1]]), safe_div(values[players[1]], values[players[0]])]
row(metric, *row_values, *fractions, *ratios, total)
def safe_div(a, b):
return a / b if b else float('inf')
def show_stats(stats):
import matplotlib.pyplot as pyplot
show_metric_plot(pyplot, stats, 'scores_dynamic')
show_metric_plot(pyplot, stats, 'scores_dynamic_cumsum')
show_metric_plot(pyplot, stats, 'places_dynamic_cumsum')
show_metric_plot(pyplot, stats, 'wins_dynamic_cumsum')
show_metric_plot(pyplot, stats, 'losses_dynamic_cumsum')
show_ratio_plots(pyplot, stats, 'scores_dynamic_cumsum')
show_ratio_plots(pyplot, stats, 'places_dynamic_cumsum')
show_ratio_plots(pyplot, stats, 'wins_dynamic_cumsum')
show_ratio_plots(pyplot, stats, 'losses_dynamic_cumsum')
show_scores_distribution_plot(pyplot, stats)
show_positions_distribution_plot(pyplot, stats)
show_seeds_distribution_plot(pyplot, stats)
pyplot.show()
def show_ratio_plots(pyplot, stats, metric):
players = stats['players']
show_ratio_plot(
pyplot,
name='%s %s / %s' % (metric, players[0], players[1]),
values=stats[metric][players[0]] / stats[metric][players[1]],
)
show_ratio_plot(
pyplot,
name='%s %s / %s' % (metric, players[1], players[0]),
values=stats[metric][players[1]] / stats[metric][players[0]],
)
def show_ratio_plot(pyplot, name, values):
fig, ax = pyplot.subplots()
fig.canvas.set_window_title(name)
ax.set_title(name)
ax.plot(numpy.arange(0, len(values), 1), values, label=name)
filtered = [v for v in values[len(values) // 2:] if not math.isinf(v)]
if filtered:
filtered = numpy.array(filtered)
min_v = min(filtered)
ax.plot([len(values) // 2, len(values) - 1], [min_v, min_v], '-.', label='last half max %s' % min_v)
max_v = max(filtered)
ax.plot([len(values) // 2, len(values) - 1], [max_v, max_v], '-.', label='last half max %s' % max_v)
mean = statistics.mean(filtered)
ax.plot([len(values) // 2, len(values) - 1], [mean, mean], '--', label='last half mean %s' % mean)
ax.grid(True)
ax.legend()
def show_metric_plot(pyplot, stats, metric):
fig, ax = pyplot.subplots()
fig.canvas.set_window_title(metric)
ax.set_title(metric)
for player, values in stats[metric].items():
ax.plot(numpy.arange(0, len(values), 1), values, label=player)
total = functools.reduce(operator.add, stats[metric].values())
ax.plot(numpy.arange(0, len(total), 1), total, label='total')
ax.grid(True)
ax.legend()
def show_plot(pyplot, name, values):
fig, ax = pyplot.subplots()
fig.canvas.set_window_title(name)
ax.set_title(name)
ax.plot(numpy.arange(0, len(values), 1), values, label=name)
ax.grid(True)
ax.legend()
def show_scores_distribution_plot(pyplot, stats):
players = stats['players']
fig, ax = pyplot.subplots()
fig.canvas.set_window_title('scores_dynamic')
ax.set_title('scores_dynamic')
bins = numpy.linspace(0, max(stats['max_score'][v] for v in players) + 1, 50)
for player, values in stats['scores_dynamic'].items():
ax.hist(values, bins=bins, label=player, alpha=0.5)
ax.set_xticks(bins)
ax.grid(True)
ax.legend()
def show_positions_distribution_plot(pyplot, stats):
players = stats['players']
fig, ax = pyplot.subplots()
fig.canvas.set_window_title('positions_dynamic')
ax.set_title('positions_dynamic')
bins = [0, 1, 2]
for player, values in stats['positions_dynamic'].items():
ax.hist(values, bins=bins, label=player, alpha=0.5)
ax.set_xticks(bins)
ax.grid(True)
ax.legend()
def show_seeds_distribution_plot(pyplot, stats):
fig, ax = pyplot.subplots()
fig.canvas.set_window_title('seeds')
ax.set_title('seeds')
bins = numpy.linspace(0, 2**64, 32)
ax.hist(stats['seeds'], bins=32)
ax.set_xticks(bins)
ax.grid(True)
def get_stats(games):
draws = 0
zero_draws = 0
players = set()
wins = Counter()
losses = Counter()
places = defaultdict(Counter)
crashes = Counter()
positions = defaultdict(Counter)
places_positions = defaultdict(lambda: defaultdict(Counter))
seeds = set()
scores = defaultdict(list)
places_dynamic = defaultdict(list)
positions_dynamic = defaultdict(list)
wins_dynamic = defaultdict(list)
losses_dynamic = defaultdict(list)
for number, game in enumerate(games):
game_scores = numpy.array(sorted(v['score'] for v in game['results'].values()))
unique_game_scores = numpy.array(sorted(frozenset(v['score'] for v in game['results'].values()), reverse=True))
if len(unique_game_scores) == 1:
draws += 1
if unique_game_scores[0] == 0:
zero_draws += 1
max_score = max(unique_game_scores)
min_score = min(unique_game_scores)
if 1 == sum(1 for v in game_scores if v == max_score):
winner = next(k for k, v in game['results'].items() if v['score'] == max_score)
wins[winner] += 1
wins_dynamic[winner].append(1)
if 1 == sum(1 for v in game_scores if v == min_score):
loser = next(k for k, v in game['results'].items() if v['score'] == min_score)
losses[loser] += 1
losses_dynamic[loser].append(1)
for place, score in enumerate(unique_game_scores):
for k, v in game['results'].items():
if v['score'] == score:
places[place + 1][k] += 1
places_dynamic[k].append(place + 1)
places_positions[place + 1][v['position']][k] += 1
for k, v in game['results'].items():
players.add(k)
if v['crashed']:
crashes[k] += 1
scores[k].append(v['score'])
positions[v['position']][k] += 1
positions_dynamic[k].append(v['position'])
if len(wins_dynamic[k]) < number + 1:
wins_dynamic[k].append(0)
if len(losses_dynamic[k]) < number + 1:
losses_dynamic[k].append(0)
seeds.add(game['seed'])
for k in scores.keys():
scores[k] = numpy.array(scores[k])
places_dynamic[k] = numpy.array(places_dynamic[k])
return dict(
games=len(games),
draws=draws,
zero_draws=zero_draws,
unique_seeds=len(seeds),
players=sorted(players),
wins=wins,
losses=losses,
places=places,
crashes=crashes,
positions=positions,
places_positions=places_positions,
total_score={k: sum(v) for k, v in scores.items()},
median_score={k: statistics.median(v) for k, v in scores.items()},
mean_score={k: statistics.mean(v) for k, v in scores.items()},
stdev_score={k: statistics.stdev(v) for k, v in scores.items()},
min_score={k: min(v) for k, v in scores.items()},
max_score={k: max(v) for k, v in scores.items()},
q95_score={k: numpy.quantile(v, 0.95) for k, v in scores.items()},
scores_dynamic=scores,
scores_dynamic_cumsum=cumsums(scores),
places_dynamic=places_dynamic,
places_dynamic_cumsum=cumsums(places_dynamic),
wins_dynamic=wins_dynamic,
wins_dynamic_cumsum=cumsums(wins_dynamic),
losses_dynamic=losses_dynamic,
losses_dynamic_cumsum=cumsums(losses_dynamic),
positions_dynamic=positions_dynamic,
seeds=numpy.array(sorted(seeds)),
)
def cumsums(values):
return {k: numpy.cumsum(v) for k, v in values.items()}
def get_time(path):
return int(os.path.basename(path).split('.')[-1])
def row(*args):
print(('{:>25}' * len(args)).format(*args))
def collect_data(paths):
for path in paths:
config_path = os.path.join(path, 'config.json')
if not os.path.exists(config_path):
continue
config_content = read_file(config_path)
if not config_content:
continue
players = parse_config(config_content)
result_path = os.path.join(path, 'result.json')
if not os.path.exists(result_path):
continue
result_content = read_file(result_path)
if not result_content:
continue
yield parse_result(result_content, players)
def read_file(path):
with open(path) as f:
return f.read()
def parse_config(content):
data = json.loads(content)
return {get_player_name(v): n for n, v in enumerate(data['players'])}
def get_player_name(data):
if isinstance(data, str):
return data
if isinstance(data, dict):
if 'Empty' in data:
return 'Empty'
if 'Tcp' in data:
return 'Tcp_%s' % str(data['Tcp']['port'])[:3]
raise RuntimeError('Invalid player data: %s' % data)
def parse_result(content, players):
data = json.loads(content)
results = {name: get_record(data, index) for name, index in players.items()}
return dict(results=results, seed=data['seed'])
def get_record(data, index):
return dict(
crashed=data['players'][index]['crashed'],
score=data['results'][index],
position=index,
)
if __name__ == '__main__':
main()
|
{"hexsha": "84bdce1ea747e3344f7b3650458b8fd009188d95", "size": 10854, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/results_stats.py", "max_stars_repo_name": "elsid/CodeSide", "max_stars_repo_head_hexsha": "2c08f73114cd1e4d29cde61b342d1ef4e052e5cc", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts/results_stats.py", "max_issues_repo_name": "elsid/CodeSide", "max_issues_repo_head_hexsha": "2c08f73114cd1e4d29cde61b342d1ef4e052e5cc", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/results_stats.py", "max_forks_repo_name": "elsid/CodeSide", "max_forks_repo_head_hexsha": "2c08f73114cd1e4d29cde61b342d1ef4e052e5cc", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.91875, "max_line_length": 145, "alphanum_fraction": 0.6258522204, "include": true, "reason": "import numpy", "num_tokens": 2697}
|
[STATEMENT]
lemma sup_left_zero[simp]:
"top \<squnion> -x = top"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. top \<squnion> - x = top
[PROOF STEP]
by (metis complement_bot sub_commutative sup_right_zero)
|
{"llama_tokens": 89, "file": "Subset_Boolean_Algebras_Subset_Boolean_Algebras", "length": 1}
|
#include <boost/property_tree/ptree.hpp>
#include <iostream>
using boost::property_tree::ptree;
int main()
{
ptree pt;
pt.put("C:.Windows.System", "20 files");
ptree &c = pt.get_child("C:");
ptree &windows = c.get_child("Windows");
ptree &system = windows.get_child("System");
std::cout << system.get_value<std::string>() << '\n';
}
|
{"hexsha": "7178b6be4a2fd49bc9a185d54493bdd255cc60d9", "size": 347, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "Example/propertytree_01/main.cpp", "max_stars_repo_name": "KwangjoJeong/Boost", "max_stars_repo_head_hexsha": "29c4e2422feded66a689e3aef73086c5cf95b6fe", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Example/propertytree_01/main.cpp", "max_issues_repo_name": "KwangjoJeong/Boost", "max_issues_repo_head_hexsha": "29c4e2422feded66a689e3aef73086c5cf95b6fe", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Example/propertytree_01/main.cpp", "max_forks_repo_name": "KwangjoJeong/Boost", "max_forks_repo_head_hexsha": "29c4e2422feded66a689e3aef73086c5cf95b6fe", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.1333333333, "max_line_length": 55, "alphanum_fraction": 0.6570605187, "num_tokens": 99}
|
"""TRUNAJOD ttr tests."""
import string
from collections import namedtuple
import numpy as np
import pytest
from TRUNAJOD import ttr
Token = namedtuple("Token", ["lemma_", "pos_"])
@pytest.fixture
def test_doc():
"""Fixture to use a doc for tests."""
doc = [
Token(lemma_="hola", pos_="hola"),
Token(lemma_="hola", pos_="hola"),
Token(lemma_="chao", pos_="chao"),
Token(lemma_="hola", pos_="hola"),
Token(lemma_="perro", pos_="perro"),
Token(lemma_="hola", pos_="hola"),
]
yield doc
def test_type_token_ratio():
"""Test type_token_ratio func."""
assert (
ttr.type_token_ratio(["hola", "hola", "chao", "hola", "perro", "hola"])
== 0.5
)
def test_one_side_lexical_diversity_mtld():
"""Test one_side_lexical_diversity_mtld."""
assert (
ttr.one_side_lexical_diversity_mtld(
["hola", "hola", "chao", "hola", "perro", "hola"], ttr_segment=1
)
== 3
)
def test_lexical_diversity_mtld(test_doc):
"""Test lexical_diversity_mtld."""
assert ttr.lexical_diversity_mtld(test_doc, ttr_segment=1) == 3
def test_yule_k(test_doc):
"""Test yule_k."""
n = len(test_doc)
rs = {
1: 2,
4: 1,
}
expected_k = 1e4 * sum(r ** 2 * vr - n for r, vr in rs.items()) / n ** 2
assert ttr.yule_k(test_doc) == expected_k
def test_d_estimate():
"""Test d_estimate."""
text = (
"El espermatozoide y el ovocito son células son muy diferentes entre "
"sí, y poseen propiedades estructurales que van adquiriendo mediante "
"el proceso de gametogénesis. La gametogénesis masculina se denomina "
"espermatogénesis y la femenina, ovogénesis. Ocurre al interior de "
"los testículos, en unas estructuras llamadas túbulos seminíferos Se "
"inicia en la pubertad y, en condiciones normales, se mantiene "
"durante toda la vida de los hombres. A continuación, revisaremos sus "
"etapas. Durante el desarrollo embrionario, las células germinales "
"primordiales se multiplican, dando lugar a espermatogonias. Años más "
"tarde, en la pubertad, algunas espermatogonias proliferan, aumentan "
"de tamaño y se diferencian en espermatocitos primarios o "
"espermatocitos I. Luego, los espermatocitos I pasan por un proceso "
"que consta de dos divisiones celulares. La primera división, origina "
"los espermatocitos secundarios o espermatocitos II; estas células "
"experimentan la segunda división formando las espermátidas. Cada una "
"de ellas tiene la mitad del material genético de la especie. "
"Finalmente, las espermátidas experimentan cambios morfológicos que "
"darán origen a los espermatozoides."
)
doc = []
for token in text.translate(
str.maketrans("", "", string.punctuation)
).split():
word_lower = token.lower()
doc.append(Token(lemma_=token, pos_=token))
np.random.seed(0)
assert ttr.d_estimate(doc) == 119.4468681409897
def test_word_variation_index():
"""Test WVI."""
doc = [
Token(lemma_="hola", pos_="hola"),
Token(lemma_="hola", pos_="hola"),
Token(lemma_="chao", pos_="chao"),
]
expected = np.log(3) / np.log(2 - np.log(2) / np.log(3))
assert ttr.word_variation_index(doc) == expected
|
{"hexsha": "68b7ed9945a1b25acdffd6837ee0418f2b5e830b", "size": 3409, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/ttr_test.py", "max_stars_repo_name": "dpalmasan/TRUNAJOD2.0", "max_stars_repo_head_hexsha": "b718cacf2ec6bf1e868b7cb2c2b89bd4d08f37cc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 18, "max_stars_repo_stars_event_min_datetime": "2020-05-05T21:29:00.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-08T15:28:12.000Z", "max_issues_repo_path": "tests/ttr_test.py", "max_issues_repo_name": "dpalmasan/TRUNAJOD2.0", "max_issues_repo_head_hexsha": "b718cacf2ec6bf1e868b7cb2c2b89bd4d08f37cc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 50, "max_issues_repo_issues_event_min_datetime": "2020-04-11T01:13:31.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-03T15:38:46.000Z", "max_forks_repo_path": "tests/ttr_test.py", "max_forks_repo_name": "dpalmasan/TRUNAJOD2.0", "max_forks_repo_head_hexsha": "b718cacf2ec6bf1e868b7cb2c2b89bd4d08f37cc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2021-03-27T13:52:14.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-17T11:09:49.000Z", "avg_line_length": 32.7788461538, "max_line_length": 79, "alphanum_fraction": 0.6400704019, "include": true, "reason": "import numpy", "num_tokens": 996}
|
import Arena
from MCTS import MCTS
from go.Game import Game
from go.GoPlayers import *
from go.pytorch.NNet import NNetWrapper as NNet
import numpy as np
from utils import *
"""
use thisss script to play any two agents against each other, or play manually with
any agent.
"""
args = dotdict({
'size': 9, #board size
'numMCTSSims': 200, # Number of games moves for MCTS to simulate.
'arenaCompare': 2, # Number of games to play during arena play to determine if new net will be accepted.
'cpuct': 1.1,
'arenaNumMCTSSims': 20, # simulations for arena
'instinctArena': False, # if set true reset Arena's MTCL tree each time
'balancedGame': True, # if balanced, black should win over 6 scores
'resignThreshold': -0.9, # No Use. Resign Only in self-play Training
'maxLevel': 7,
'levelBased': True,
'maxLeaves': 4,
})
args2 = dotdict({
'size': 9, #board size
'numMCTSSims': 200, # Number of games moves for MCTS to simulate.
'arenaCompare': 2, # Number of games to play during arena play to determine if new net will be accepted.
'cpuct': 1.1,
'arenaNumMCTSSims': 20, # simulations for arena
'instinctArena': False, # if set true reset Arena's MTCL tree each time
'balancedGame': True, # if balanced, black should win over 6 scores
'resignThreshold': -0.9, # No Use. Resign Only in self-play Training
'maxLevel': 7,
'levelBased': True,
'maxLeaves': 10,
})
human_vs_cpu = True
g = Game(args)
# all players
rp = RandomPlayer(g).play
#gp = GreedyGoPlayer(g).play
hp = HumanGoPlayer(g).play
# nnet players
n1 = NNet(g)
n1.load_checkpoint('./temp/','9*9oct08th.tar')
mcts1 = MCTS(g, n1, args)
n1p = lambda x: np.argmax(mcts1.getActionProb(x, arena=1, temp=0, ew=-1,instinctPlay=args.instinctArena, levelBased=args.levelBased)[0])
# nnet players
n2 = NNet(g)
n2.load_checkpoint('./temp/','9*9aug16th.tar')
mcts2 = MCTS(g, n2, args2)
n2p = lambda x: np.argmax(mcts2.getActionProb(x, arena=1, temp=0,instinctPlay=args.instinctArena, levelBased=args.levelBased)[0])
player2 = hp
arena = Arena.Arena(n1p, hp, g, display=Game.display)
x, y, z, xb = arena.playGames(2, verbose=True)
print("94 win: ", x)
print("710 win: ", y)
print("Draw: ", z)
print("Bot Win with Black: ", xb )
|
{"hexsha": "b7bbfd666fe9a020e9d906bbf97885af30d37c2c", "size": 2362, "ext": "py", "lang": "Python", "max_stars_repo_path": "pit.py", "max_stars_repo_name": "jiz322/GoAgent", "max_stars_repo_head_hexsha": "d8a082348b7c0ce0e5cd83d449ad82bf4cc84c56", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pit.py", "max_issues_repo_name": "jiz322/GoAgent", "max_issues_repo_head_hexsha": "d8a082348b7c0ce0e5cd83d449ad82bf4cc84c56", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pit.py", "max_forks_repo_name": "jiz322/GoAgent", "max_forks_repo_head_hexsha": "d8a082348b7c0ce0e5cd83d449ad82bf4cc84c56", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.4933333333, "max_line_length": 136, "alphanum_fraction": 0.6604572396, "include": true, "reason": "import numpy", "num_tokens": 722}
|
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout
from keras.layers import LSTM, TimeDistributed
import random as rnd
import numpy as np
def genModel( nChars, nHidden, numLayers = 1, dropout = 0.5, recurrent_dropout = 0.5 ):
"""Generates the RNN model with nChars characters and numLayers hidden units with
dimension nHidden."""
model = Sequential()
model.add( LSTM( nHidden, input_shape = (None, nChars), return_sequences = True,
dropout = dropout, recurrent_dropout = recurrent_dropout ) )
for _ in range( numLayers - 1 ):
model.add( LSTM( nHidden, return_sequences = True,
dropout = dropout, recurrent_dropout = recurrent_dropout ) )
model.add( TimeDistributed( Dense(nChars) ) )
model.add( Activation('softmax') )
model.compile( loss = "categorical_crossentropy", optimizer = "adam" )
return model
def genCodedText( model, nChars, phraseLen = 282, rndLevel = 1.0 ):
"""Generates a phrase of length phraseLen. Starts from a random character.
rndLevel allows one to tune how deterministic the sampling will be."""
x = np.zeros( (1, phraseLen, nChars + 3) )
x[0, 0, :][nChars] = 1 #make first character the start character.
xi = nChars
phrase = [ nChars ]
for i in range(phraseLen):
x[0, i, :][xi] = 1
probDist = model.predict(x[:, :i+1, :])[0, i]
if ( rnd.random() < rndLevel ):
xi = np.random.choice( range(nChars + 3), p = probDist.ravel())
else:
xi = np.argmax( probDist )
phrase.append( xi )
return phrase
|
{"hexsha": "e951dc979213a017eb432c538cc086273035740f", "size": 1661, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/rnnModel.py", "max_stars_repo_name": "m0baxter/twitterBot", "max_stars_repo_head_hexsha": "446b0b76d80a5d2666e69013d9aaf13cb60cc65f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-03-05T19:04:21.000Z", "max_stars_repo_stars_event_max_datetime": "2018-03-05T19:04:21.000Z", "max_issues_repo_path": "src/rnnModel.py", "max_issues_repo_name": "m0baxter/twitterBot", "max_issues_repo_head_hexsha": "446b0b76d80a5d2666e69013d9aaf13cb60cc65f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/rnnModel.py", "max_forks_repo_name": "m0baxter/twitterBot", "max_forks_repo_head_hexsha": "446b0b76d80a5d2666e69013d9aaf13cb60cc65f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.7592592593, "max_line_length": 87, "alphanum_fraction": 0.6369656833, "include": true, "reason": "import numpy", "num_tokens": 421}
|
module IdentityVectorsTests
using Test
using Gridap.Arrays
l = 10
a = IdentityVector(l)
b = collect(1:l)
test_array(a,b)
c = rand(l)
d = lazy_map(Reindex(c),a)
@test d === c
end # module
|
{"hexsha": "3e67241e3e6b95d1b70d77777007f166b9853074", "size": 191, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/ArraysTests/IdentityVectorsTests.jl", "max_stars_repo_name": "aerappa/Gridap.jl", "max_stars_repo_head_hexsha": "1fb3dc9abf8c47685637901bd14a74e4355a9492", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 390, "max_stars_repo_stars_event_min_datetime": "2019-05-16T17:38:02.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T01:59:17.000Z", "max_issues_repo_path": "test/ArraysTests/IdentityVectorsTests.jl", "max_issues_repo_name": "aerappa/Gridap.jl", "max_issues_repo_head_hexsha": "1fb3dc9abf8c47685637901bd14a74e4355a9492", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 576, "max_issues_repo_issues_event_min_datetime": "2019-05-16T20:50:22.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T08:21:35.000Z", "max_forks_repo_path": "test/ArraysTests/IdentityVectorsTests.jl", "max_forks_repo_name": "aerappa/Gridap.jl", "max_forks_repo_head_hexsha": "1fb3dc9abf8c47685637901bd14a74e4355a9492", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 61, "max_forks_repo_forks_event_min_datetime": "2019-12-30T23:35:37.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-28T02:56:24.000Z", "avg_line_length": 11.9375, "max_line_length": 27, "alphanum_fraction": 0.6910994764, "num_tokens": 61}
|
from __future__ import absolute_import, division, print_function, unicode_literals
import fragment
import numpy as np
import os
import tensorflow as tf
import utils
OUTPUT_DIR = "data/processed"
def preprocess_fragment(fragment):
np_data = fragment.np_data
ds = np_data.shape
tf_input = np_data.reshape(1, ds[0], ds[1], 2)
#tf_input = tf_input.astype('float32')
tf_input
return tf_input
def postprocess_fragment(tf_output):
np_data = tf_output.numpy()
ds = np_data.shape
np_data = np_data.reshape(ds[1], ds[2], 2)
return np_data
def prepare_tensorflow_datasets():
frags = list(fragment.from_directory())
## Getting and stacking the data
data = np.stack([preprocess_fragment(f)
for f
in frags],
0)
ds = data.shape
data = data.reshape(ds[0], ds[2], ds[3], ds[4])
## Getting the labels
labels = [f.song for f in frags]
unique_labels = list(set(labels))
num_labels = np.array([unique_labels.index(label)
for label
in labels])
## shuffle the data
p = np.random.permutation(len(num_labels))
num_labels = num_labels[p]
data = data[p,:,:]
# 30% of data is for testing.
train_count = int(len(labels) * 7 / 10)
train_examples = data[:train_count]
train_labels = num_labels[:train_count]
test_examples = data[:train_count]
test_labels = num_labels[:train_count]
train_dataset = tf.data.Dataset.from_tensor_slices(
(train_examples, train_labels))
test_dataset = tf.data.Dataset.from_tensor_slices(
(test_examples, test_labels))
return train_dataset, test_dataset, max(num_labels) + 1
def shuffle(train_dataset, test_dataset):
BATCH_SIZE = 32
SHUFFLE_BUFFER_SIZE = 50
train_dataset = train_dataset.shuffle(
SHUFFLE_BUFFER_SIZE).batch(BATCH_SIZE)
test_dataset = test_dataset.batch(BATCH_SIZE)
return train_dataset, test_dataset
|
{"hexsha": "bf9b5d1c35fc7f82bcd55b6d44a60ba623a68bb7", "size": 2031, "ext": "py", "lang": "Python", "max_stars_repo_path": "load_data.py", "max_stars_repo_name": "sbenthall/deeptune", "max_stars_repo_head_hexsha": "ac74b09367f951df17c986a4890242bee9b2fcb7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-01-07T14:42:41.000Z", "max_stars_repo_stars_event_max_datetime": "2020-04-14T14:10:13.000Z", "max_issues_repo_path": "load_data.py", "max_issues_repo_name": "sbenthall/deeptune", "max_issues_repo_head_hexsha": "ac74b09367f951df17c986a4890242bee9b2fcb7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 21, "max_issues_repo_issues_event_min_datetime": "2018-06-17T15:00:19.000Z", "max_issues_repo_issues_event_max_datetime": "2019-11-16T20:04:00.000Z", "max_forks_repo_path": "load_data.py", "max_forks_repo_name": "sbenthall/deeptune", "max_forks_repo_head_hexsha": "ac74b09367f951df17c986a4890242bee9b2fcb7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.0384615385, "max_line_length": 82, "alphanum_fraction": 0.658788774, "include": true, "reason": "import numpy", "num_tokens": 484}
|
# -*- coding: utf-8 -*-
"""
1-D model
Module Name : Graph
Graph module for 2-D data
Fanghe @ gatech MoSE 3229
Version:
+ python => 3.5
+ Anaconda recommend
USTC-AEMOL
Gatech-Apollo
"""
import numpy as np
import matplotlib.pyplot as plt
def graph_output(data, time_step, fig_type = "contour"):
"""
This function is mean to initialize vars and give them IC
Args:
---------------
data : data for plot ,2-d Only
Return:
---------------
Nan
"""
assert(data.shape),"NO DATA"
if fig_type == "contour":
plt.style.context('Solarize_Light2')
plt.contourf(data)
plt.xlabel('Time')
plt.ylabel('level')
plt.colorbar()
plt.xlim((0,time_step - 1))
plt.ylim((20,79))
#plt.yticks(np.linspace(0,80,80),np.linspace(0,40000,80).astype(int))
plt.title('Time Series of Atlitude')
plt.show()
if fig_type == "pixel":
plt.imshow(data ,interpolation='nearest')
plt.gca().invert_yaxis()
plt.title('Time Series of Atlitude')
plt.xlim((0,time_step - 1))
plt.ylim((20,79))
plt.show()
return 0
|
{"hexsha": "c723ca9d42534fdb0c71fbaa0ab2e65391fba7ef", "size": 1160, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/plot_model.py", "max_stars_repo_name": "zfh1997/P1dD_model", "max_stars_repo_head_hexsha": "506da52a1c2251aee69e8d14cb354d4a6398e9df", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/plot_model.py", "max_issues_repo_name": "zfh1997/P1dD_model", "max_issues_repo_head_hexsha": "506da52a1c2251aee69e8d14cb354d4a6398e9df", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/plot_model.py", "max_forks_repo_name": "zfh1997/P1dD_model", "max_forks_repo_head_hexsha": "506da52a1c2251aee69e8d14cb354d4a6398e9df", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.4814814815, "max_line_length": 77, "alphanum_fraction": 0.5724137931, "include": true, "reason": "import numpy", "num_tokens": 311}
|
import argparse
import json
import os
from pprint import pprint
from collections import Counter
import torch
import numpy as np
from sklearn.metrics import (
precision_recall_fscore_support,
confusion_matrix,
accuracy_score,
)
from .data import SlotFeatures
from .wikievents import WikiEventsArgumentDataset
from .mnli import NLISlotClassifierWithMappingHead
from a2t.slot_classification.utils import (
apply_threshold,
find_optimal_threshold,
apply_individual_threshold,
find_optimal_individual_threshold,
)
CLASSIFIERS = {"mnli-mapping": NLISlotClassifierWithMappingHead}
parser = argparse.ArgumentParser(
prog="run_evaluation",
description="Run a evaluation for each configuration.",
)
parser.add_argument(
"--config",
type=str,
dest="config",
default="experiments/slot_classification/config.json",
help="Configuration file for the experiment.",
)
args = parser.parse_args()
with open(args.config, "rt") as f:
config = json.load(f)
for configuration in config:
# Create the output folder
os.makedirs(f"experiments/{configuration['name']}", exist_ok=True)
# Generate the label mappings
label2id = {label: i for i, label in enumerate(configuration["labels"])}
n_labels = len(configuration["labels"])
# Load the datasets
dev_dataset = WikiEventsArgumentDataset(
configuration["dev_file"],
create_negatives=True,
max_sentence_distance=configuration.get("max_sentence_distance", None),
mark_trigger=configuration.get("mark_trigger", False),
)
dev_labels = np.array([label2id[inst.role] for inst in dev_dataset])
if "test_file" in configuration:
test_dataset = WikiEventsArgumentDataset(
configuration["test_file"],
create_negatives=True,
max_sentence_distance=configuration.get("max_sentence_distance", None),
mark_trigger=configuration.get("mark_trigger", False),
)
test_labels = np.array([label2id[inst.role] for inst in test_dataset])
results = {}
for pretrained_model in configuration["nli_models"]:
results[pretrained_model] = {}
os.makedirs(
f"experiments/{configuration['name']}/{pretrained_model}",
exist_ok=True,
)
classifier = CLASSIFIERS[configuration["classification_model"]](
pretrained_model=pretrained_model,
negative_threshold=0.0,
**configuration,
)
# Dev
output = classifier(
dev_dataset.instances,
batch_size=configuration["batch_size"],
multiclass=True,
)
output[dev_labels == label2id["OOR"], 0] = 1.0
# Save the output
os.makedirs(
f"experiments/{configuration['name']}/{pretrained_model}/dev",
exist_ok=True,
)
np.save(
f"experiments/{configuration['name']}/{pretrained_model}/dev/output.npy",
output,
)
np.save(
f"experiments/{configuration['name']}/{pretrained_model}/dev/labels.npy",
dev_labels,
)
positive_mask = np.logical_and(dev_labels > 0, dev_labels < label2id["OOR"])
positive_acc = accuracy_score(dev_labels[positive_mask] - 1, output[positive_mask, 1:].argmax(-1))
# Individual threshold
optimal_indv_threshold, _ = find_optimal_individual_threshold(dev_labels, output, n_labels=n_labels)
output_ = apply_individual_threshold(output, optimal_indv_threshold)
with open(
f"experiments/{configuration['name']}/{pretrained_model}/dev/predictions.indv.jsonl",
"wt",
) as f:
for inst in dev_dataset.to_dict([configuration["labels"][o] for o in output_]):
f.write(f"{json.dumps(inst)}\n")
pre_indv, rec_indv, f1_indv, _ = precision_recall_fscore_support(
dev_labels,
output_,
average="micro",
labels=list(range(1, n_labels)),
)
# Global threshold
optimal_global_threshold, _ = find_optimal_threshold(dev_labels, output)
output_ = apply_threshold(output, threshold=optimal_global_threshold)
with open(
f"experiments/{configuration['name']}/{pretrained_model}/dev/predictions.global.jsonl",
"wt",
) as f:
for inst in dev_dataset.to_dict([configuration["labels"][o] for o in output_]):
f.write(f"{json.dumps(inst)}\n")
pre_global, rec_global, f1_global, _ = precision_recall_fscore_support(
dev_labels,
output_,
average="micro",
labels=list(range(1, n_labels)),
)
results[pretrained_model]["global_threshold"] = optimal_global_threshold
results[pretrained_model]["dev"] = {
"pos_accuracy": positive_acc,
"precision_global": pre_global,
"recall_global": rec_global,
"f1-score_global": f1_global,
"precision_indv": pre_indv,
"recall_indv": rec_indv,
"f1-score_indv": f1_indv,
"OOR%": 100 - 100 * positive_mask.sum() / (dev_labels > 0).sum(),
}
with open(f"experiments/{configuration['name']}/results.json", "wt") as f:
json.dump(results, f, indent=4)
if "test_file" in configuration:
# Test
output = classifier(
test_dataset.instances,
batch_size=configuration["batch_size"],
multiclass=True,
)
output[test_labels == label2id["OOR"], 0] = 1.0
# Save the output
os.makedirs(
f"experiments/{configuration['name']}/{pretrained_model}/test",
exist_ok=True,
)
np.save(
f"experiments/{configuration['name']}/{pretrained_model}/test/output.npy",
output,
)
np.save(
f"experiments/{configuration['name']}/{pretrained_model}/test/labels.npy",
test_labels,
)
positive_mask = np.logical_and(test_labels > 0, test_labels < label2id["OOR"])
positive_acc = accuracy_score(
test_labels[positive_mask] - 1,
output[positive_mask, 1:].argmax(-1),
)
# Individual threshold
output_ = apply_individual_threshold(output, optimal_indv_threshold)
with open(
f"experiments/{configuration['name']}/{pretrained_model}/test/predictions.indv.jsonl",
"wt",
) as f:
for inst in test_dataset.to_dict([configuration["labels"][o] for o in output_]):
f.write(f"{json.dumps(inst)}\n")
pre_indv, rec_indv, f1_indv, _ = precision_recall_fscore_support(
test_labels,
output_,
average="micro",
labels=list(range(1, n_labels)),
)
# Global threshold
output_ = apply_threshold(output, threshold=optimal_global_threshold)
with open(
f"experiments/{configuration['name']}/{pretrained_model}/test/predictions.global.jsonl",
"wt",
) as f:
for inst in test_dataset.to_dict([configuration["labels"][o] for o in output_]):
f.write(f"{json.dumps(inst)}\n")
(pre_global, rec_global, f1_global, _,) = precision_recall_fscore_support(
test_labels,
output_,
average="micro",
labels=list(range(1, n_labels)),
)
results[pretrained_model]["test"] = {
"pos_accuracy": positive_acc,
"precision_global": pre_global,
"recall_global": rec_global,
"f1-score_global": f1_global,
"precision_indv": pre_indv,
"recall_indv": rec_indv,
"f1-score_indv": f1_indv,
"OOR%": 100 - 100 * positive_mask.sum() / (test_labels > 0).sum(),
}
with open(f"experiments/{configuration['name']}/results.json", "wt") as f:
json.dump(results, f, indent=4)
classifier.clear_gpu_memory()
del classifier
torch.cuda.empty_cache()
|
{"hexsha": "55a8808f416767aa9be9fecb25ef62925cd91d75", "size": 8435, "ext": "py", "lang": "Python", "max_stars_repo_path": "a2t/slot_classification/run_evaluation.py", "max_stars_repo_name": "techthiyanes/Ask2Transformers", "max_stars_repo_head_hexsha": "7ea530eb39db3514f0d9147b783fcd14a184c599", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 63, "max_stars_repo_stars_event_min_datetime": "2020-07-02T11:03:01.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-16T14:38:54.000Z", "max_issues_repo_path": "a2t/slot_classification/run_evaluation.py", "max_issues_repo_name": "barana91/Ask2Transformers", "max_issues_repo_head_hexsha": "c89307241a2d8be9ac2896c5e66e7fbc947237bb", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2021-03-17T18:29:25.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-23T06:19:28.000Z", "max_forks_repo_path": "a2t/slot_classification/run_evaluation.py", "max_forks_repo_name": "barana91/Ask2Transformers", "max_forks_repo_head_hexsha": "c89307241a2d8be9ac2896c5e66e7fbc947237bb", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2021-09-02T20:21:10.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-18T13:35:17.000Z", "avg_line_length": 35.0, "max_line_length": 108, "alphanum_fraction": 0.5956135151, "include": true, "reason": "import numpy", "num_tokens": 1808}
|
import pickle
import os
import numpy as np
from gala import imio, agglo, features, classify
fman = features.default.snemi3d()
def train(index):
out_fn = 'training-data-%i.h5' % index
if os.path.exists(out_fn):
data, labels = classify.load_training_data_from_disk(out_fn,
names=['data', 'labels'])
else:
ws_tr = imio.read_image_stack('watershed-%i.lzf.h5' % index)
pr_tr = imio.read_image_stack('probabilities-%i.lzf.h5' % index) / 255
gt_tr = imio.read_image_stack('ground-truth-%i.lzf.h5' % index)
g = agglo.Rag(ws_tr, pr_tr,
feature_manager=fman)
data, labels = g.learn_agglomerate(gt_tr, fman, min_num_epochs=4)[0][:2]
classify.save_training_data_to_disk([data, labels],
fn='training-data-%i.h5' % index,
names=['data', 'labels'])
print('total training data:', data.shape)
print('size in MB:', data.size * data.itemsize / 1e6)
rf = classify.DefaultRandomForest()
rf.fit(data, labels[:, 0])
policy = agglo.classifier_probability(fman, rf)
return policy
def test(index, policy):
ws = imio.read_image_stack('watershed-%i.lzf.h5' % index)
pr = imio.read_image_stack('probabilities-%i.lzf.h5' % index) / 255
g = agglo.Rag(ws, pr, merge_priority_function=policy,
feature_manager=fman)
g.agglomerate(np.inf)
return g.tree
if __name__ == '__main__':
trees = {}
for training_index in range(4):
print('training %i' % training_index)
policy = train(training_index)
for testing_index in range(4):
if testing_index == training_index:
continue
print('testing %i' % testing_index)
tree = test(testing_index, policy)
trees[(training_index, testing_index)] = tree
with open('results-%i-%i.pickle' % (training_index, testing_index),
'wb') as fout:
pickle.dump(tree, fout, protocol=-1)
with open('results.pickle', 'wb') as fout:
pickle.dump(trees, fout, protocol=-1)
|
{"hexsha": "2a709f51c95cc669dd78cdec425df1f397b2e9ec", "size": 2192, "ext": "py", "lang": "Python", "max_stars_repo_path": "crossval4x.py", "max_stars_repo_name": "jni/gala-scripts", "max_stars_repo_head_hexsha": "fc0ee0d418496f0ec8da01e8bd8e2d12024accaa", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "crossval4x.py", "max_issues_repo_name": "jni/gala-scripts", "max_issues_repo_head_hexsha": "fc0ee0d418496f0ec8da01e8bd8e2d12024accaa", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "crossval4x.py", "max_forks_repo_name": "jni/gala-scripts", "max_forks_repo_head_hexsha": "fc0ee0d418496f0ec8da01e8bd8e2d12024accaa", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.4561403509, "max_line_length": 80, "alphanum_fraction": 0.5935218978, "include": true, "reason": "import numpy", "num_tokens": 541}
|
(*
The well-ordering theorem. The proof mostly follows Section III.2 of
Bourbaki.
*)
theory WellOrder
imports Interval Wfrec
begin
section \<open>Operation of adjoining a greatest element to an order\<close>
(* Abbreviated to ++ in this theory only *)
definition adjoin_greatest :: "[i, i] \<Rightarrow> i" (infix "++" 55) where [rewrite]:
"R ++ a = Order(carrier(R)\<union>{a}, \<lambda>x y. y = a \<or> x \<le>\<^sub>R y)"
setup {* register_wellform_data ("R ++ a", ["a \<notin> carrier(R)"]) *}
setup {* add_prfstep_check_req ("R ++ a", "a \<notin> carrier(R)") *}
lemma adjoin_greatest_type [typing]:
"order(R) \<Longrightarrow> a \<notin> carrier(R) \<Longrightarrow> R ++ a \<in> raworder_space(carrier(R)\<union>{a})" by auto2
lemma adjoin_greatest_is_order:
"order(R) \<Longrightarrow> a \<notin> carrier(R) \<Longrightarrow> order(R ++ a)" by auto2
setup {* add_forward_prfstep_cond @{thm adjoin_greatest_is_order} [with_term "?R ++ ?a"] *}
lemma adjoin_greatest_eval1:
"x \<in> carrier(R)\<union>{a} \<Longrightarrow> S = R ++ a \<Longrightarrow> x \<le>\<^sub>S a" by auto2
setup {* add_forward_prfstep_cond @{thm adjoin_greatest_eval1} [with_term "?R ++ ?a"] *}
lemma adjoin_greatest_eval2:
"order(R) \<Longrightarrow> a \<notin> carrier(R) \<Longrightarrow> S = R ++ a \<Longrightarrow> x \<in>. R \<Longrightarrow> \<not>a \<le>\<^sub>S x" by auto2
setup {* add_forward_prfstep_cond @{thm adjoin_greatest_eval2} [with_term "?R ++ ?a"] *}
lemma adjoin_greatest_eval3 [rewrite]:
"a \<notin> carrier(R) \<Longrightarrow> S = R ++ a \<Longrightarrow> x \<in>. R \<Longrightarrow> y \<in>. R \<Longrightarrow> x \<le>\<^sub>S y \<longleftrightarrow> x \<le>\<^sub>R y" by auto2
setup {* del_prfstep_thm @{thm adjoin_greatest_def} *}
lemma adjoin_greatest_restrict [rewrite]:
"ord_form(R) \<Longrightarrow> order(R) \<Longrightarrow> a \<notin> carrier(R) \<Longrightarrow> suborder(R ++ a,carrier(R)) = R" by auto2
lemma adjoin_greatest_prop:
"order(R) \<Longrightarrow> a \<notin> carrier(R) \<Longrightarrow>
has_greatest(R ++ a,carrier(R)\<union>{a}) \<and> greatest(R ++ a,carrier(R)\<union>{a}) = a" by auto2
lemma adjoin_greatest_unique [backward]:
"order(R) \<Longrightarrow> ord_form(S) \<Longrightarrow> order(S) \<Longrightarrow> a \<notin> carrier(R) \<Longrightarrow> carrier(S) = carrier(R) \<union> {a} \<Longrightarrow>
\<forall>x\<in>.S. x \<le>\<^sub>S a \<Longrightarrow> suborder(S,carrier(R)) = R \<Longrightarrow> S = R ++ a" by auto2
lemma linorder_adjoin:
"linorder(R) \<Longrightarrow> a \<notin> carrier(R) \<Longrightarrow> linorder(R ++ a)" by auto2
setup {* add_forward_prfstep_cond @{thm linorder_adjoin} [with_term "?R ++ ?a"] *}
lemma adjoin_greatest_less_interval [rewrite]:
"linorder(M) \<Longrightarrow> a \<notin> carrier(M) \<Longrightarrow> less_interval(M ++ a,a) = carrier(M)" by auto2
lemma adjoin_greatest_less_interval2 [rewrite]:
"linorder(M) \<Longrightarrow> a \<notin> carrier(M) \<Longrightarrow> x \<in>. M \<Longrightarrow> less_interval(M ++ a,x) = less_interval(M,x)" by auto2
section \<open>Well-ordered sets\<close> (* Bourbaki III.2.1 *)
(* Definition of well_order *)
definition well_order :: "i \<Rightarrow> o" where [rewrite]:
"well_order(R) \<longleftrightarrow> linorder(R) \<and> wf(R)"
(* With linorder condition, wf is equivalent to the condition that each
nonempty subset has a least element. We use this condition here. *)
lemma well_orderD1 [forward]:
"well_order(R) \<Longrightarrow> linorder(R)"
"well_order(R) \<Longrightarrow> wf(R)" by auto2+
lemma well_orderD2 [forward, backward]:
"well_order(R) \<Longrightarrow> X \<subseteq> carrier(R) \<Longrightarrow> X \<noteq> \<emptyset> \<Longrightarrow> has_least(R,X)"
@proof
@obtain "x\<in>X" where "ord_minimal(R,X,x)"
@have "has_least(R,X) \<and> least(R,X) = x"
@qed
lemma well_orderI' [forward]:
"wf(R) \<Longrightarrow> linorder(R) \<Longrightarrow> well_order(R)" by auto2
lemma well_orderI [backward]:
"linorder(R) \<and> (\<forall>X. X \<subseteq> carrier(R) \<longrightarrow> X \<noteq> \<emptyset> \<longrightarrow> has_least(R,X)) \<Longrightarrow> well_order(R)"
@proof
@have "\<forall>X. X \<subseteq> carrier(R) \<longrightarrow> X \<noteq> \<emptyset> \<longrightarrow> (\<exists>x\<in>X. ord_minimal(R,X,x))" @with
@have "ord_minimal(R,X,least(R,X))"
@end
@qed
setup {* del_prfstep_thm @{thm well_order_def} *}
lemma wellorder_iso [forward]:
"well_order(R) \<Longrightarrow> ord_isomorphic(R,S) \<Longrightarrow> well_order(S)"
@proof
@obtain "f \<in> R \<cong>\<^sub>O S"
@have (@rule) "\<forall>y\<in>.S. \<exists>x\<in>.R. f`x = y"
@have "\<forall>X. X \<subseteq> carrier(S) \<longrightarrow> X \<noteq> \<emptyset> \<longrightarrow> has_least(S,X)" @with
@let "U = f -`` X"
@have "has_least(R,U)"
@have "has_least(S,X) \<and> least(S,X) = f ` least(R,U)"
@end
@qed
lemma well_order_suborder:
"well_order(R) \<Longrightarrow> linorder(A) \<Longrightarrow> is_suborder(A,R) \<Longrightarrow> well_order(A)"
@proof
@have "\<forall>X. X \<subseteq> carrier(A) \<longrightarrow> X \<noteq> \<emptyset> \<longrightarrow> has_least(A,X)" @with
@have "has_least(A,X) \<and> least(A,X) = least(R,X)"
@end
@qed
lemma well_order_adjoin [resolve]:
"well_order(R) \<Longrightarrow> a \<notin> carrier(R) \<Longrightarrow> well_order(R ++ a)"
@proof
@have "\<forall>X. X \<subseteq> carrier(R)\<union>{a} \<longrightarrow> X \<noteq> \<emptyset> \<longrightarrow> has_least(R ++ a,X)" @with
@contradiction
@have "has_least(R,X\<midarrow>{a})"
@have "has_least(R ++ a, X\<midarrow>{a}) \<and> least(R ++ a, X\<midarrow>{a}) = least(R, X\<midarrow>{a})"
@end
@qed
(* Segments. Correspond to less_intervals for well_order. The main result in
this portion is that if R is well-ordered, the set of segments of R is well-ordered,
and ord_isomorphic to R adjoining a greatest element. *)
definition is_segment :: "[i, i] \<Rightarrow> o" where [rewrite]:
"is_segment(R,S) \<longleftrightarrow> (S \<subseteq> carrier(R) \<and> (\<forall>x\<in>S. \<forall>y\<in>.R. y \<le>\<^sub>R x \<longrightarrow> y \<in> S))"
lemma is_segmentD [forward]:
"is_segment(R,S) \<Longrightarrow> S \<subseteq> carrier(R)"
"is_segment(R,S) \<Longrightarrow> x \<in> S \<Longrightarrow> \<forall>y\<in>.R. y \<le>\<^sub>R x \<longrightarrow> y \<in> S" by auto2+
setup {* del_prfstep_thm_eqforward @{thm is_segment_def} *}
lemma segment_is_interval [backward2]:
"well_order(R) \<Longrightarrow> is_segment(R,S) \<Longrightarrow> S \<noteq> carrier(R) \<Longrightarrow> \<exists>a\<in>.R. S = less_interval(R,a)"
@proof
@have "has_least(R,carrier(R)\<midarrow>S)"
@have "least(R,carrier(R)\<midarrow>S) \<in>. R"
@have "carrier(R)\<midarrow>S = ge_interval(R,least(R,carrier(R)\<midarrow>S))"
@qed
lemma interval_is_segment [resolve]:
"order(R) \<Longrightarrow> is_segment(R,less_interval(R,a))" by auto2
definition segments :: "i \<Rightarrow> i" where [rewrite]:
"segments(R) = {S \<in> Pow(carrier(R)). is_segment(R,S)}"
definition pt_to_segment_fun :: "i \<Rightarrow> i" where [rewrite]:
"pt_to_segment_fun(R) = Mor(R,subset_order(segments(R)),\<lambda>a. less_interval(R,a))"
lemma pt_to_segment_fun_is_fun [typing]:
"well_order(R) \<Longrightarrow> pt_to_segment_fun(R) \<in> R \<rightharpoonup> subset_order(segments(R))" by auto2
lemma pt_to_segment_eval [rewrite]:
"well_order(R) \<Longrightarrow> a \<in> source(pt_to_segment_fun(R)) \<Longrightarrow>
pt_to_segment_fun(R) ` a = less_interval(R,a)" by auto2
setup {* del_prfstep_thm @{thm pt_to_segment_fun_def} *}
lemma pt_to_segment_fun_inj [forward]:
"well_order(R) \<Longrightarrow> injective(pt_to_segment_fun(R))" by auto2
lemma pt_to_segment_fun_incr [forward]:
"well_order(R) \<Longrightarrow> incr(pt_to_segment_fun(R))" by auto2
setup {* add_forward_prfstep_cond @{thm pt_to_segment_fun_incr} [with_term "pt_to_segment_fun(?R)"] *}
lemma pt_to_segment_fun_image [rewrite]:
"well_order(R) \<Longrightarrow> image(pt_to_segment_fun(R)) = segments(R) \<midarrow> {carrier(R)}"
@proof
@have "\<forall>S\<in>segments(R) \<midarrow> {carrier(R)}. S \<in> image(pt_to_segment_fun(R))" @with
@obtain "a\<in>.R" where "S = less_interval(R,a)"
@have "S = pt_to_segment_fun(R) ` a" @end
@qed
lemma pt_to_segment_fun_iso:
"well_order(R) \<Longrightarrow> S = subset_order(segments(R)\<midarrow>{carrier(R)}) \<Longrightarrow> well_order(S)"
@proof @have "mor_restrict_image_ord(pt_to_segment_fun(R)) \<in> R \<cong>\<^sub>O S" @qed
setup {* add_forward_prfstep_cond @{thm pt_to_segment_fun_iso} [with_term "?S"] *}
lemma segments_order [rewrite_back]:
"well_order(R) \<Longrightarrow> subset_order(segments(R)) = subset_order(segments(R)\<midarrow>{carrier(R)}) ++ carrier(R)" by auto2
lemma segments_wellorder:
"well_order(R) \<Longrightarrow> well_order(subset_order(segments(R)))"
@proof
@have "subset_order(segments(R)) = subset_order(segments(R)\<midarrow>{carrier(R)}) ++ carrier(R)"
@qed
(* Families of well-ordered sets *)
definition is_segment_rel :: "[i, i] \<Rightarrow> o" where [rewrite]:
"is_segment_rel(R,S) \<longleftrightarrow> (is_suborder(R,S) \<and> is_segment(S,carrier(R)))"
definition well_order_family :: "i \<Rightarrow> o" where [rewrite]:
"well_order_family(X) \<longleftrightarrow> ((\<forall>R\<in>X. well_order(R)) \<and>
(\<forall>R\<in>X. \<forall>S\<in>X. is_segment_rel(R,S) \<or> is_segment_rel(S,R)))"
lemma well_order_familyD [forward]:
"well_order_family(X) \<Longrightarrow> R \<in> X \<Longrightarrow> well_order(R)"
"well_order_family(X) \<Longrightarrow> \<not>is_segment_rel(R,S) \<Longrightarrow> R \<in> X \<Longrightarrow> S \<in> X \<Longrightarrow> is_segment_rel(S,R)" by auto2+
setup {* del_prfstep_thm_eqforward @{thm well_order_family_def} *}
lemma well_order_familyD' [backward]:
"well_order_family(X) \<Longrightarrow> R \<in> X \<Longrightarrow> y \<in> union_src(X) \<Longrightarrow> \<exists>S\<in>X. y \<in>. S \<and> is_segment_rel(R,S)"
by auto2
lemma well_order_family_directed [forward]:
"well_order_family(X) \<Longrightarrow> directed_rels(X)"
@proof
@have "\<forall>R\<in>X. \<forall>S\<in>X. \<exists>T\<in>X. is_suborder(R,T) \<and> is_suborder(S,T)" @with
@case "is_segment_rel(R,S)" @end
@qed
lemma is_segment_union [backward]:
"well_order_family(X) \<Longrightarrow> R \<in> X \<Longrightarrow> U = union_rel(X) \<Longrightarrow> is_segment_rel(R,U)"
@proof
@have "\<forall>x\<in>.R. \<forall>y\<in>union_src(X). y \<le>\<^sub>U x \<longrightarrow> y \<in>. R" @with
@obtain "S\<in>X" where "y \<in>. S" "is_segment_rel(R,S)" @end
@qed
lemma well_order_family_union_prop [forward]:
"well_order_family(X) \<Longrightarrow> well_order(union_rel(X))"
@proof
@have "\<forall>H. H \<subseteq> union_src(X) \<longrightarrow> H \<noteq> \<emptyset> \<longrightarrow> has_least(union_rel(X),H)" @with
@obtain "R\<in>X" where "H \<inter> carrier(R) \<noteq> \<emptyset>" @with
@obtain "x \<in> H" @obtain "S\<in>X" where "x \<in>. S"
@end
@have "has_least(R, H \<inter> carrier(R))"
@let "m = least(R, H \<inter> carrier(R))"
@have "has_least(union_rel(X),H) \<and> least(union_rel(X),H) = m" @with
@have "\<forall>x\<in>H. ge(x,union_rel(X),m)" @with
@case "x \<in>. R" @with @have "x \<in> H \<inter> carrier(R)" @end
@obtain "S\<in>X" where "x \<in>. S" "is_segment_rel(R,S)"
@end
@end
@end
@qed
lemma well_order_family_segments [rewrite]:
"well_order_family(X) \<Longrightarrow> x \<in>. R \<Longrightarrow> R \<in> X \<Longrightarrow> less_interval(R,x) = less_interval(union_rel(X),x)"
@proof@have "is_segment_rel(R,union_rel(X))" @qed
lemma well_order_family_segments2:
"well_order_family(X) \<Longrightarrow> is_segment(union_rel(X),S) \<Longrightarrow> S \<noteq> union_src(X) \<Longrightarrow> \<exists>R\<in>X. is_segment(R,S)"
@proof
@obtain "x\<in>union_src(X)" where "S = less_interval(union_rel(X),x)"
@obtain "R\<in>X" where "x \<in>. R"
@have "less_interval(R,x) = less_interval(union_rel(X),x)"
@qed
section \<open>Zermelo's Theorem\<close> (* Bourbaki III.2.3 *)
(* Set of relations on subsets of E. *)
definition suborder_space :: "i \<Rightarrow> i" where [rewrite]:
"suborder_space(E) = (\<Union>X\<in>Pow(E). raworder_space(X))"
lemma suborder_space_iff [rewrite]:
"R \<in> suborder_space(E) \<longleftrightarrow> (ord_form(R) \<and> raworder(R) \<and> carrier(R) \<subseteq> E)" by auto2
setup {* del_prfstep_thm @{thm suborder_space_def} *}
(* Given a set E, a collection S of subsets of E, and a map p from S to E such
that p(X) \<notin> X for all X \<in> S, define collection of compatible well-ordering
on subsets of E. *)
definition compat_wellorders :: "[i, i, i] \<Rightarrow> i" where [rewrite]:
"compat_wellorders(E,S,p) = {R\<in>suborder_space(E).
well_order(R) \<and> (\<forall>x\<in>.R. less_interval(R,x) \<in> S \<and> p`(less_interval(R,x)) = x)}"
lemma compat_wellordersD:
"R \<in> compat_wellorders(E,S,p) \<Longrightarrow> well_order(R) \<and> carrier(R) \<subseteq> E"
"x \<in>. R \<Longrightarrow> R \<in> compat_wellorders(E,S,p) \<Longrightarrow> less_interval(R,x) \<in> S"
"x \<in>. R \<Longrightarrow> R \<in> compat_wellorders(E,S,p) \<Longrightarrow> p`(less_interval(R,x)) = x" by auto2+
setup {* fold add_forward_prfstep @{thms compat_wellordersD(1-2)} *}
setup {* add_rewrite_rule @{thm compat_wellordersD(3)} *}
lemma compat_wellordersI [backward]:
"ord_form(R) \<Longrightarrow> well_order(R) \<Longrightarrow> carrier(R) \<subseteq> E \<Longrightarrow>
\<forall>x\<in>.R. less_interval(R,x) \<in> S \<and> p`(less_interval(R,x)) = x \<Longrightarrow>
R \<in> compat_wellorders(E,S,p)" by auto2
setup {* del_prfstep_thm @{thm compat_wellorders_def} *}
lemma less_interval_rel_is_segment:
"well_order(R) \<Longrightarrow> is_segment_rel(less_interval_rel(R,x),R)" by auto2
setup {* add_forward_prfstep_cond @{thm less_interval_rel_is_segment}
[with_term "less_interval_rel(?R,?x)"] *}
definition compat_wellorder_segs :: "[i, i, i, i, i] \<Rightarrow> i" where [rewrite]:
"compat_wellorder_segs(E,S,p,R1,R2) =
{x\<in>carrier(R1)\<inter>carrier(R2). less_interval_rel(R1,x) = less_interval_rel(R2,x)}"
setup {* register_wellform_data ("compat_wellorder_segs(E,S,p,R1,R2)",
["R1 \<in> compat_wellorders(E,S,p)", "R2 \<in> compat_wellorders(E,S,p)"]) *}
(* Basic properties of compat_wellorder_segs *)
lemma compat_wellorder_segs_basic:
"R1 \<in> compat_wellorders(E,S,p) \<Longrightarrow> R2 \<in> compat_wellorders(E,S,p) \<Longrightarrow>
is_segment(R1,compat_wellorder_segs(E,S,p,R1,R2)) \<and>
is_segment(R2,compat_wellorder_segs(E,S,p,R1,R2)) \<and>
suborder(R1,compat_wellorder_segs(E,S,p,R1,R2)) = suborder(R2,compat_wellorder_segs(E,S,p,R1,R2))" by auto2
setup {* add_forward_prfstep_cond @{thm compat_wellorder_segs_basic}
[with_term "compat_wellorder_segs(?E,?S,?p,?R1.0,?R2.0)"] *}
(* Condition for Zermelo's theorem *)
definition compat_wellorder_cond :: "[i, i, i] \<Rightarrow> o" where [rewrite]:
"compat_wellorder_cond(E,S,p) \<longleftrightarrow> (S \<subseteq> Pow(E) \<and> p \<in> S \<rightarrow> E \<and> (\<forall>X\<in>S. p`X \<notin> X))"
lemma compat_wellorder_prop [forward]:
"R1 \<in> compat_wellorders(E,S,p) \<Longrightarrow> R2 \<in> compat_wellorders(E,S,p) \<Longrightarrow>
compat_wellorder_cond(E,S,p) \<Longrightarrow> is_segment_rel(R1,R2) \<or> is_segment_rel(R2,R1)"
@proof
@let "V = compat_wellorder_segs(E,S,p,R1,R2)"
@have (@rule) "V = carrier(R1) \<or> V = carrier(R2)" @with
@contradiction
@obtain "x\<in>.R1" where "V = less_interval(R1,x)"
@obtain "y\<in>.R2" where "V = less_interval(R2,y)" @have "x = y" @end
@case "V = carrier(R1)"
@qed
lemma compat_wellorders_family [forward]:
"compat_wellorder_cond(E,S,p) \<Longrightarrow> well_order_family(compat_wellorders(E,S,p))" by auto2
setup {* del_prfstep_thm @{thm compat_wellorder_prop} *}
definition compat_wellorder :: "[i, i, i] \<Rightarrow> i" where [rewrite]:
"compat_wellorder(E,S,p) = union_rel(compat_wellorders(E,S,p))"
lemma compat_wellorders_step [backward2]:
"compat_wellorder_cond(E,S,p) \<Longrightarrow> carrier(M) \<in> S \<Longrightarrow> M \<in> compat_wellorders(E,S,p) \<Longrightarrow>
M' = M ++ p`carrier(M) \<Longrightarrow> M' \<in> compat_wellorders(E,S,p)"
@proof
@have "well_order(M')"
@have "\<forall>x\<in>.M'. less_interval(M',x) \<in> S \<and> p`(less_interval(M',x)) = x" @with
@case "x = p`carrier(M)" @have "x \<in>. M"
@have "less_interval(M,x) = less_interval(M',x)" @end
@qed
lemma compat_wellorders_rel_not_in [forward]:
"compat_wellorder_cond(E,S,p) \<Longrightarrow> M = compat_wellorder(E,S,p) \<Longrightarrow> carrier(M) \<notin> S"
@proof
@contradiction
@let "a = p`carrier(M)"
@let "M' = compat_wellorder(E,S,p) ++ a"
@have "M' \<in> compat_wellorders(E,S,p)"
@qed
(* The final result *)
lemma compat_wellorders_cond_prop [resolve]:
"compat_wellorder_cond(E,S,p) \<Longrightarrow>
\<exists>\<Gamma>. ord_form(\<Gamma>) \<and> well_order(\<Gamma>) \<and> (\<forall>x\<in>.\<Gamma>. less_interval(\<Gamma>,x)\<in>S \<and> p`less_interval(\<Gamma>,x) = x) \<and>
carrier(\<Gamma>) \<subseteq> E \<and> carrier(\<Gamma>) \<notin> S" by auto2
lemma compat_wellorders_cond_prop' [resolve]:
"compat_wellorder_cond(E,S,p) \<Longrightarrow>
\<exists>\<Gamma>. ord_form(\<Gamma>) \<and> well_order(\<Gamma>) \<and> carrier(\<Gamma>) \<subseteq> E \<and> carrier(\<Gamma>) \<notin> S" by auto2
setup {* del_prfstep_thm @{thm compat_wellorders_rel_not_in} *}
(* Wellordering theorem *)
lemma wellorder_theorem [resolve]:
"\<exists>R\<in>raworder_space(E). well_order(R)"
@proof
@let "S = Pow(E)\<midarrow>{E}"
@let "p = Fun(S,E, \<lambda>X. SOME x\<in>E. x \<notin> X)"
@have "compat_wellorder_cond(E,S,p)"
@obtain \<Gamma> where "ord_form(\<Gamma>)" "well_order(\<Gamma>)" "carrier(\<Gamma>) \<subseteq> E" "carrier(\<Gamma>) \<notin> S"
@have "\<Gamma> \<in> raworder_space(E)"
@qed
no_notation adjoin_greatest (infix "++" 55)
section \<open>Zorn's lemma\<close> (* Bourbaki III.2.4 *)
definition inductive_order :: "i \<Rightarrow> o" where [rewrite]:
"inductive_order(R) \<longleftrightarrow> (order(R) \<and>
(\<forall>X. X \<subseteq> carrier(R) \<longrightarrow> linorder(suborder(R,X)) \<longrightarrow> upper_bound(R,X) \<noteq> \<emptyset>))"
lemma inductive_orderE1 [forward]: "inductive_order(R) \<Longrightarrow> order(R)" by auto2
lemma inductive_orderE2 [backward]:
"inductive_order(R) \<Longrightarrow> X \<subseteq> carrier(R) \<Longrightarrow> linorder(suborder(R,X)) \<Longrightarrow> upper_bound(R,X) \<noteq> \<emptyset>" by auto2
setup {* del_prfstep_thm_eqforward @{thm inductive_order_def} *}
lemma zorn_aux [resolve]:
"order(R) \<Longrightarrow> \<forall>X. X \<subseteq> carrier(R) \<longrightarrow> well_order(suborder(R,X)) \<longrightarrow> upper_bound(R,X) \<noteq> \<emptyset> \<Longrightarrow>
\<exists>x. maximal(R,x)"
@proof
@let "E = carrier(R)"
@let "S = {X\<in>Pow(carrier(R)). upper_bound(R,X) \<midarrow> X \<noteq> \<emptyset>}"
@let "p = Fun(S, E, \<lambda>X. SOME x\<in>upper_bound(R,X). x \<notin> X)"
@have "p \<in> S \<rightarrow> E"
@have (@rule) "\<forall>X\<in>S. p`X \<in> upper_bound(R,X)"
@have "compat_wellorder_cond(carrier(R),S,p)"
@obtain \<Gamma> where "ord_form(\<Gamma>)" "well_order(\<Gamma>)" "\<forall>x\<in>.\<Gamma>. less_interval(\<Gamma>,x)\<in>S \<and> p`less_interval(\<Gamma>,x) = x"
"carrier(\<Gamma>) \<subseteq> E" "carrier(\<Gamma>) \<notin> S"
@let "M = carrier(\<Gamma>)"
@have "\<Gamma> = suborder(R,M)" @with
@have "\<forall>x\<in>M. \<forall>y\<in>M. x <\<^sub>\<Gamma> y \<longrightarrow> less(suborder(R,M),x,y)" @with
@have "p`less_interval(\<Gamma>,y) = y" @end @end
@obtain x where "x \<in> upper_bound(R,M)" @have "maximal(R,x)"
@qed
lemma zorn [resolve]:
"inductive_order(R) \<Longrightarrow> \<exists>x. maximal(R,x)"
@proof
@have "\<forall>X. X \<subseteq> carrier(R) \<longrightarrow> well_order(suborder(R,X)) \<longrightarrow> upper_bound(R,X) \<noteq> \<emptyset>"
@qed
lemma inductive_ge_interval:
"inductive_order(R) \<Longrightarrow> a \<in>. R \<Longrightarrow> S = suborder(R,ge_interval(R,a)) \<Longrightarrow> inductive_order(S)"
@proof
@have "\<forall>X. X \<subseteq> carrier(S) \<longrightarrow> linorder(suborder(S,X)) \<longrightarrow> upper_bound(S,X) \<noteq> \<emptyset>" @with
@case "X = \<emptyset>" @with @have "a \<in> upper_bound(S,X)" @end
@have "suborder(R,X) = suborder(S,X)"
@obtain x where "x \<in> upper_bound(R,X)"
@obtain "y \<in> X" @have "x \<ge>\<^sub>R y"
@have "x \<in> upper_bound(S,X)"
@end
@qed
setup {* add_forward_prfstep_cond @{thm inductive_ge_interval} [with_term "?S"] *}
lemma zorn_ge_elt: "inductive_order(R) \<Longrightarrow> a \<in>. R \<Longrightarrow> \<exists>x. x \<ge>\<^sub>R a \<and> maximal(R,x)"
@proof
@have "inductive_order(suborder(R,ge_interval(R,a)))"
@obtain x where "maximal(suborder(R,ge_interval(R,a)),x)"
@have "maximal(R,x)"
@qed
lemma zorn_subsets:
"F \<subseteq> Pow(E) \<Longrightarrow> R = subset_order(F) \<Longrightarrow>
\<forall>X. X \<subseteq> F \<longrightarrow> linorder(subset_order(X)) \<longrightarrow> \<Union>X \<in> F \<Longrightarrow> \<exists>x. maximal(R,x)"
@proof
@have "inductive_order(R)" @with
@have "\<forall>X. X \<subseteq> carrier(R) \<longrightarrow> linorder(suborder(R,X)) \<longrightarrow> upper_bound(R,X) \<noteq> \<emptyset>" @with
@have "\<Union>X \<in> upper_bound(R,X)"
@end
@end
@qed
end
|
{"author": "bzhan", "repo": "auto2", "sha": "2e83c30b095f2ed9fa5257f79570eb354ed6e6a7", "save_path": "github-repos/isabelle/bzhan-auto2", "path": "github-repos/isabelle/bzhan-auto2/auto2-2e83c30b095f2ed9fa5257f79570eb354ed6e6a7/FOL/WellOrder.thy"}
|
# Gets parameters from a fit, puts them in a dataframe, prints them pretty.
from StringIO import StringIO
import pandas as pd
import numpy as np
import prettytable
def get_params(fit_params, u_param=None):
afit_params = np.asarray(fit_params)
rfit_params = np.around(afit_params, decimals=6)
ffit_params = rfit_params.tolist()
if u_param is not None:
au_param = np.asarray(u_param)
ru_param = np.around(au_param, decimals=6)
fu_param = ru_param.tolist()
# Include uncertainties in fit parameters
par_count = 0
columns = []
for par in ffit_params:
par_count = par_count + 1
columns.append('parameter_' + str(par_count))
index = ['fit_parameters', 'u_parameter']
df = pd.DataFrame(columns=columns, index=index)
df.loc[index[0]] = ffit_params
df.loc[index[1]] = u_param
output = StringIO()
df.to_csv(output)
output.seek(0)
pt = prettytable.from_csv(output)
print pt
else:
# No uncertainties in fit parameters
par_count = 0
columns = []
for par in ffit_params:
par_count = par_count + 1
columns.append('parameter_' + str(par_count))
index = ['fit_parameters']
df = pd.DataFrame(columns=columns, index=index)
df.loc[index[0]] = ffit_params
output = StringIO()
df.to_csv(output)
output.seek(0)
pt = prettytable.from_csv(output)
print pt
|
{"hexsha": "5f7442ef97fde7fb4185bc8d40641e0e851de9dc", "size": 1538, "ext": "py", "lang": "Python", "max_stars_repo_path": "userlib/analysislib/paco_analysis/fit_table.py", "max_stars_repo_name": "specialforcea/labscript_suite", "max_stars_repo_head_hexsha": "a4ad5255207cced671990fff94647b1625aa0049", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "userlib/analysislib/paco_analysis/fit_table.py", "max_issues_repo_name": "specialforcea/labscript_suite", "max_issues_repo_head_hexsha": "a4ad5255207cced671990fff94647b1625aa0049", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "userlib/analysislib/paco_analysis/fit_table.py", "max_forks_repo_name": "specialforcea/labscript_suite", "max_forks_repo_head_hexsha": "a4ad5255207cced671990fff94647b1625aa0049", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.0416666667, "max_line_length": 75, "alphanum_fraction": 0.611183355, "include": true, "reason": "import numpy", "num_tokens": 364}
|
//------------------------------------------------------------------------------
/*
This file is part of cbcd: https://github.com/cbc/cbcd
Copyright (c) 2012, 2013 cbc Labs Inc.
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
//==============================================================================
#include <BeastConfig.h>
#include <cbc/app/paths/Credit.h>
#include <cbc/app/paths/Flow.h>
#include <cbc/app/paths/impl/AmountSpec.h>
#include <cbc/app/paths/impl/StrandFlow.h>
#include <cbc/app/paths/impl/Steps.h>
#include <cbc/basics/Log.h>
#include <cbc/protocol/IOUAmount.h>
#include <cbc/protocol/XRPAmount.h>
#include <boost/container/flat_set.hpp>
#include <numeric>
#include <sstream>
namespace cbc {
template<class FlowResult>
static
auto finishFlow (PaymentSandbox& sb,
Issue const& srcIssue, Issue const& dstIssue,
FlowResult&& f)
{
path::cbcCalc::Output result;
if (f.ter == tesSUCCESS)
f.sandbox->apply (sb);
else
result.removableOffers = std::move (f.removableOffers);
result.setResult (f.ter);
result.actualAmountIn = toSTAmount (f.in, srcIssue);
result.actualAmountOut = toSTAmount (f.out, dstIssue);
return result;
};
path::cbcCalc::Output
flow (
PaymentSandbox& sb,
STAmount const& deliver,
AccountID const& src,
AccountID const& dst,
STPathSet const& paths,
bool defaultPaths,
bool partialPayment,
bool ownerPaysTransferFee,
bool offerCrossing,
boost::optional<Quality> const& limitQuality,
boost::optional<STAmount> const& sendMax,
beast::Journal j,
path::detail::FlowDebugInfo* flowDebugInfo)
{
Issue const srcIssue = [&] {
if (sendMax)
return sendMax->issue ();
if (!isXRP (deliver.issue ().currency))
return Issue (deliver.issue ().currency, src);
return xrpIssue ();
}();
Issue const dstIssue = deliver.issue ();
boost::optional<Issue> sendMaxIssue;
if (sendMax)
sendMaxIssue = sendMax->issue ();
// convert the paths to a collection of strands. Each strand is the collection
// of account->account steps and book steps that may be used in this payment.
auto sr = toStrands (sb, src, dst, dstIssue, limitQuality, sendMaxIssue,
paths, defaultPaths, ownerPaysTransferFee, offerCrossing, j);
if (sr.first != tesSUCCESS)
{
path::cbcCalc::Output result;
result.setResult (sr.first);
return result;
}
auto& strands = sr.second;
if (j.trace())
{
j.trace() << "\nsrc: " << src << "\ndst: " << dst
<< "\nsrcIssue: " << srcIssue << "\ndstIssue: " << dstIssue;
j.trace() << "\nNumStrands: " << strands.size ();
for (auto const& curStrand : strands)
{
j.trace() << "NumSteps: " << curStrand.size ();
for (auto const& step : curStrand)
{
j.trace() << '\n' << *step << '\n';
}
}
}
const bool srcIsXRP = isXRP (srcIssue.currency);
const bool dstIsXRP = isXRP (dstIssue.currency);
auto const asDeliver = toAmountSpec (deliver);
// The src account may send either xrp or iou. The dst account may receive
// either xrp or iou. Since XRP and IOU amounts are represented by different
// types, use templates to tell `flow` about the amount types.
if (srcIsXRP && dstIsXRP)
{
return finishFlow (sb, srcIssue, dstIssue,
flow<XRPAmount, XRPAmount> (
sb, strands, asDeliver.xrp, partialPayment, offerCrossing,
limitQuality, sendMax, j, flowDebugInfo));
}
if (srcIsXRP && !dstIsXRP)
{
return finishFlow (sb, srcIssue, dstIssue,
flow<XRPAmount, IOUAmount> (
sb, strands, asDeliver.iou, partialPayment, offerCrossing,
limitQuality, sendMax, j, flowDebugInfo));
}
if (!srcIsXRP && dstIsXRP)
{
return finishFlow (sb, srcIssue, dstIssue,
flow<IOUAmount, XRPAmount> (
sb, strands, asDeliver.xrp, partialPayment, offerCrossing,
limitQuality, sendMax, j, flowDebugInfo));
}
assert (!srcIsXRP && !dstIsXRP);
return finishFlow (sb, srcIssue, dstIssue,
flow<IOUAmount, IOUAmount> (
sb, strands, asDeliver.iou, partialPayment, offerCrossing,
limitQuality, sendMax, j, flowDebugInfo));
}
} // cbc
|
{"hexsha": "9cf53d0e985ee61b7f65cea92e9ef0b2b55621a0", "size": 5160, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/cbc/app/paths/Flow.cpp", "max_stars_repo_name": "sergeym610/ripple_fork", "max_stars_repo_head_hexsha": "a2c3dbf8cd61adf02d32a32ea6c2c21e547a1367", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/cbc/app/paths/Flow.cpp", "max_issues_repo_name": "sergeym610/ripple_fork", "max_issues_repo_head_hexsha": "a2c3dbf8cd61adf02d32a32ea6c2c21e547a1367", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/cbc/app/paths/Flow.cpp", "max_forks_repo_name": "sergeym610/ripple_fork", "max_forks_repo_head_hexsha": "a2c3dbf8cd61adf02d32a32ea6c2c21e547a1367", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.0769230769, "max_line_length": 82, "alphanum_fraction": 0.6187984496, "num_tokens": 1306}
|
import OpenMORe.model_order_reduction as model_order_reduction
from OpenMORe.utilities import *
import matplotlib.pyplot as plt
import numpy as np
import os
#######################################################################################
# In this example it's shown how to use the Varimax rotation to increase the quality
# of the lower-dimensional manifold
#######################################################################################
# Dictionary to load the input matrix, found in .csv format
file_options = {
"path_to_file" : os.path.abspath(os.path.join(__file__ ,"../../../data/reactive_flow/")),
"input_file_name" : "turbo2D.csv",
}
# Dictionary with the instructions for the Varimax rotation (found in PCA class):
settings ={
#centering and scaling options
"center" : True,
"centering_method" : "mean",
"scale" : True,
"scaling_method" : "auto",
#set the final dimensionality
"number_of_eigenvectors" : 7,
#enable to plot the cumulative explained variance
"enable_plot_variance" : True,
#set the number of the variable whose reconstruction must be plotted
"variable_to_plot" : 0,
}
# Load the input matrix:
X = readCSV(file_options["path_to_file"], file_options["input_file_name"])
#perform the dimensionality reduction via Principal Component Analysis,
#and return the eigenvectors of the reduced manifold
model = model_order_reduction.PCA(X, settings)
PCs, ____ = model.fit()
#plot the original PC
model.plot_PCs()
#apply the varimax rotation algorithm from the utilities module
rotated = varimax_rotation(X, PCs)
#plot the rotated PC
fig = plt.figure()
axes = fig.add_axes([0.15,0.15,0.7,0.7], frameon=True)
x = np.linspace(1, X.shape[1], X.shape[1])
axes.bar(x, rotated[:,0])
axes.set_xlabel('Variables [-]')
axes.set_ylabel('Weights on the rotated PC number: {} [-]'.format(0))
plt.show()
|
{"hexsha": "94bf606b4769dce0cf6a55b2875b0b810ce2c844", "size": 1992, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/others_general/varimaxRotation.py", "max_stars_repo_name": "gdalessi/clustering", "max_stars_repo_head_hexsha": "79988ee565c9d1b00bbcd3c1dbd9a69d9c1c80f1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2020-11-05T15:19:16.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-24T17:17:36.000Z", "max_issues_repo_path": "examples/others_general/varimaxRotation.py", "max_issues_repo_name": "gdalessi/clustering", "max_issues_repo_head_hexsha": "79988ee565c9d1b00bbcd3c1dbd9a69d9c1c80f1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/others_general/varimaxRotation.py", "max_forks_repo_name": "gdalessi/clustering", "max_forks_repo_head_hexsha": "79988ee565c9d1b00bbcd3c1dbd9a69d9c1c80f1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-11-05T15:19:21.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-19T12:19:33.000Z", "avg_line_length": 33.7627118644, "max_line_length": 106, "alphanum_fraction": 0.6270080321, "include": true, "reason": "import numpy", "num_tokens": 446}
|
import unittest
import os
import numpy as np
from gym.spaces.box import Box
from pathlib import Path
# BARK imports
from bark.runtime.commons.parameters import ParameterServer
# BARK-ML imports
from bark_ml.library_wrappers.lib_tf2rl.runners.gail_runner import GAILRunner
from bark_ml.library_wrappers.lib_tf2rl.agents.gail_agent import BehaviorGAILAgent
# TF2RL imports:
from tf2rl.experiments.utils import restore_latest_n_traj
class PyTrainingBARKTests(unittest.TestCase):
"""This test aims to test whether the agent learns or not.
The aim is to learn that the expert action is always the multiplicative
of the observation by 2. The expert trajectories are generated accordingly.
The environment also gives back a reward 1 if the action is nearly the double
of the observation and -1 otherways. (The reward of the environment during training does not
matter actually, but it is easier to see this way, wheteher the agent learns or not.)
The agent is trained for a while, and the test passes if the accuracy of the trained agent on
some test observations is significantly better than it was before training.
"""
def setUp(self):
"""
Setup
"""
self.params = ParameterServer(
filename=os.path.join(
os.path.dirname(__file__),
"gail_data/params/gail_params_test_env.json"))
local_params = self.params["ML"]["GAILRunner"]["tf2rl"]
# creating the dirs for logging if they are not present already:
for key in ['logdir', 'model_dir', 'expert_path_dir']:
local_params[key] = os.path.join(Path.home(), local_params[key])
if not os.path.exists(local_params[key]):
os.makedirs(local_params[key])
# create environment
self.env = test_env()
# Dummy expert trajectories:
random_obses = np.random.uniform(
low=-1, high=1, size=(1001, 2)).astype(np.float32)
self.expert_trajs = {
'obses': random_obses[:-1, :],
'next_obses': random_obses[1:, :],
'acts': random_obses[:-1, :] * 2
}
# create agent and runner:
self.agent = BehaviorGAILAgent(
environment=self.env,
params=self.params
)
self.runner = GAILRunner(
environment=self.env,
agent=self.agent,
params=self.params,
expert_trajs=self.expert_trajs)
def test_training(self):
"""
tests the Train() method of the GAILRunner class with the test environment.
"""
test_obses = np.random.uniform(
low=-1, high=1, size=(100, 2)).astype(np.float32)
# evaluation before training:
actions = self.runner._agent.Act(test_obses)
accuracy_before = np.sum(
np.isclose(actions / test_obses, 2., rtol=5e-2, atol=1e-1)) / (
test_obses.shape[0] * test_obses.shape[1])
self.runner.Train()
# evaluation after training:
actions = self.runner._agent.Act(test_obses)
accuracy_after = np.sum(
np.isclose(actions / test_obses, 2., rtol=5e-2, atol=1e-1)) / (
test_obses.shape[0] * test_obses.shape[1])
# assert that the performance after training is much better:
if accuracy_before == 0:
self.assertGreater(accuracy_after, 0.4)
else:
self.assertGreater(accuracy_after, accuracy_before * 10)
class test_env():
"""simple environment to check whether the gail agent learns or not.
The environment gives back the reward 1 if:
action ~= obs * 2
The reward is -1 in every other case.
Every episode consists of 1 step.
"""
def __init__(self):
"""initializes the test environment."""
self.action_space = Box(low=np.array([-2, -2]), high=np.array([2, 2]))
self.observation_space = Box(low=np.array([-1, -1]), high=np.array([1, 1]))
self.reset()
def step(self, action):
"""step function of the environment"""
if np.allclose(2., action / self.obs, rtol=5e-2, atol=1e-1):
reward = 1
next_obs = self.obs
done = True
else:
reward = -1
next_obs = self.obs
done = True
return next_obs, reward, done, None
def reset(self):
"""resets the agent"""
self.obs = np.random.uniform(low=-1, high=1, size=(2,)).astype(np.float32)
return self.obs
if __name__ == '__main__':
unittest.main()
|
{"hexsha": "2e6dbe889433bcfc959f8d5b75f2d2a299acc053", "size": 4228, "ext": "py", "lang": "Python", "max_stars_repo_path": "bark_ml/tests/py_library_tf2rl_tests/py_gail_training_tests.py", "max_stars_repo_name": "GAIL-4-BARK/bark-ml", "max_stars_repo_head_hexsha": "c61c897842c2184ee842428e451bae3be2cd7242", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "bark_ml/tests/py_library_tf2rl_tests/py_gail_training_tests.py", "max_issues_repo_name": "GAIL-4-BARK/bark-ml", "max_issues_repo_head_hexsha": "c61c897842c2184ee842428e451bae3be2cd7242", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 24, "max_issues_repo_issues_event_min_datetime": "2020-05-05T13:53:17.000Z", "max_issues_repo_issues_event_max_datetime": "2020-08-13T15:58:51.000Z", "max_forks_repo_path": "bark_ml/tests/py_library_tf2rl_tests/py_gail_training_tests.py", "max_forks_repo_name": "Brucknem/bark-ml", "max_forks_repo_head_hexsha": "c61c897842c2184ee842428e451bae3be2cd7242", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.0303030303, "max_line_length": 96, "alphanum_fraction": 0.6752601703, "include": true, "reason": "import numpy", "num_tokens": 1076}
|
# libraries
const libeng = Ref{Ptr{Cvoid}}()
const libmx = Ref{Ptr{Cvoid}}()
const libmat = Ref{Ptr{Cvoid}}()
# matlab engine functions
const eng_open = Ref{Ptr{Cvoid}}()
const eng_close = Ref{Ptr{Cvoid}}()
const eng_set_visible = Ref{Ptr{Cvoid}}()
const eng_get_visible = Ref{Ptr{Cvoid}}()
const eng_output_buffer = Ref{Ptr{Cvoid}}()
const eng_eval_string = Ref{Ptr{Cvoid}}()
const eng_put_variable = Ref{Ptr{Cvoid}}()
const eng_get_variable = Ref{Ptr{Cvoid}}()
# mxarray functions
const mx_destroy_array = Ref{Ptr{Cvoid}}()
const mx_duplicate_array = Ref{Ptr{Cvoid}}()
# functions to access mxarray
const mx_free = Ref{Ptr{Cvoid}}()
const mx_get_classid = Ref{Ptr{Cvoid}}()
const mx_get_m = Ref{Ptr{Cvoid}}()
const mx_get_n = Ref{Ptr{Cvoid}}()
const mx_get_nelems = Ref{Ptr{Cvoid}}()
const mx_get_ndims = Ref{Ptr{Cvoid}}()
const mx_get_elemsize = Ref{Ptr{Cvoid}}()
const mx_get_data = Ref{Ptr{Cvoid}}()
const mx_get_dims = Ref{Ptr{Cvoid}}()
const mx_get_nfields = Ref{Ptr{Cvoid}}()
const mx_get_pr = Ref{Ptr{Cvoid}}()
const mx_get_pi = Ref{Ptr{Cvoid}}()
const mx_get_ir = Ref{Ptr{Cvoid}}()
const mx_get_jc = Ref{Ptr{Cvoid}}()
const mx_is_double = Ref{Ptr{Cvoid}}()
const mx_is_single = Ref{Ptr{Cvoid}}()
const mx_is_int64 = Ref{Ptr{Cvoid}}()
const mx_is_uint64 = Ref{Ptr{Cvoid}}()
const mx_is_int32 = Ref{Ptr{Cvoid}}()
const mx_is_uint32 = Ref{Ptr{Cvoid}}()
const mx_is_int16 = Ref{Ptr{Cvoid}}()
const mx_is_uint16 = Ref{Ptr{Cvoid}}()
const mx_is_int8 = Ref{Ptr{Cvoid}}()
const mx_is_uint8 = Ref{Ptr{Cvoid}}()
const mx_is_char = Ref{Ptr{Cvoid}}()
const mx_is_numeric = Ref{Ptr{Cvoid}}()
const mx_is_logical = Ref{Ptr{Cvoid}}()
const mx_is_complex = Ref{Ptr{Cvoid}}()
const mx_is_sparse = Ref{Ptr{Cvoid}}()
const mx_is_empty = Ref{Ptr{Cvoid}}()
const mx_is_struct = Ref{Ptr{Cvoid}}()
const mx_is_cell = Ref{Ptr{Cvoid}}()
# functions to create & delete MATLAB arrays
const mx_create_numeric_matrix = Ref{Ptr{Cvoid}}()
const mx_create_numeric_array = Ref{Ptr{Cvoid}}()
const mx_create_double_scalar = Ref{Ptr{Cvoid}}()
const mx_create_logical_scalar = Ref{Ptr{Cvoid}}()
const mx_create_sparse = Ref{Ptr{Cvoid}}()
const mx_create_sparse_logical = Ref{Ptr{Cvoid}}()
const mx_create_string = Ref{Ptr{Cvoid}}()
const mx_create_char_array = Ref{Ptr{Cvoid}}()
const mx_create_cell_array = Ref{Ptr{Cvoid}}()
const mx_create_struct_matrix = Ref{Ptr{Cvoid}}()
const mx_create_struct_array = Ref{Ptr{Cvoid}}()
const mx_get_cell = Ref{Ptr{Cvoid}}()
const mx_set_cell = Ref{Ptr{Cvoid}}()
const mx_get_field = Ref{Ptr{Cvoid}}()
const mx_set_field = Ref{Ptr{Cvoid}}()
const mx_get_field_bynum = Ref{Ptr{Cvoid}}()
const mx_get_fieldname = Ref{Ptr{Cvoid}}()
const mx_get_string = Ref{Ptr{Cvoid}}()
# load I/O mat functions
const mat_open = Ref{Ptr{Cvoid}}()
const mat_close = Ref{Ptr{Cvoid}}()
const mat_get_variable = Ref{Ptr{Cvoid}}()
const mat_put_variable = Ref{Ptr{Cvoid}}()
const mat_get_dir = Ref{Ptr{Cvoid}}()
|
{"hexsha": "63ef6f4036916dc67ec3bc9c01de16de245f1334", "size": 3187, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/init.jl", "max_stars_repo_name": "blegat/MATLAB.jl", "max_stars_repo_head_hexsha": "9c67743e609997060fe1aeab7d9137b7c1dceb67", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 163, "max_stars_repo_stars_event_min_datetime": "2016-09-02T22:18:24.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-24T08:23:12.000Z", "max_issues_repo_path": "src/init.jl", "max_issues_repo_name": "blegat/MATLAB.jl", "max_issues_repo_head_hexsha": "9c67743e609997060fe1aeab7d9137b7c1dceb67", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 138, "max_issues_repo_issues_event_min_datetime": "2016-08-27T04:25:23.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-14T14:57:44.000Z", "max_forks_repo_path": "src/init.jl", "max_forks_repo_name": "blegat/MATLAB.jl", "max_forks_repo_head_hexsha": "9c67743e609997060fe1aeab7d9137b7c1dceb67", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 38, "max_forks_repo_forks_event_min_datetime": "2016-10-29T04:01:52.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T15:04:45.000Z", "avg_line_length": 32.8556701031, "max_line_length": 50, "alphanum_fraction": 0.6645748353, "num_tokens": 996}
|
//------------------------------------------------------------------
// **MEDYAN** - Simulation Package for the Mechanochemical
// Dynamics of Active Networks, v4.0
//
// Copyright (2015-2018) Papoian Lab, University of Maryland
//
// ALL RIGHTS RESERVED
//
// See the MEDYAN web page for more information:
// http://www.medyan.org
//------------------------------------------------------------------
#include <cmath>
#include <algorithm>
#include "Output.h"
#include "SubSystem.h"
#include "OutputStruct.hpp"
#include "CompartmentGrid.h"
#include "Bead.h"
#include "BranchingPoint.h"
#include "Bubble.h"
#include "Cylinder.h"
#include "Filament.h"
#include "Linker.h"
#include "Structure/SurfaceMesh/Membrane.hpp"
#include "MotorGhost.h"
#include "Structure/SurfaceMesh/Vertex.hpp"
#include "Boundary.h"
#include "Compartment.h"
#include "GController.h"
#include "Compartment.h"
#include "SysParams.h"
#include "MathFunctions.h"
#include "CController.h"
#include "ChemSimImpl.h"
#include "CylinderVolumeFF.h"
#include "CylinderExclVolume.h"
#include "CylinderVolumeInteractions.h"
#include <Eigen/Core>
#include "MotorGhostInteractions.h"
#include "CCylinder.h"
#include "ChemNRMImpl.h"
using namespace mathfunc;
void BasicSnapshot::print(int snapshot) {
_outputFile.precision(10);
OutputStructSnapshot snapshots(snapshot);
snapshots.outputFromSystem(_outputFile);
//DEPRECATED AS OF 9/8/16
// //collect diffusing motors
// for(auto md: _subSystem->getCompartmentGrid()->getDiffusingMotors()) {
//
// int ID = get<0>(md);
// int type = get<1>(md);
//
// auto firstPoint = get<2>(md);
// auto secondPoint = get<3>(md);
//
// _outputFile << "MOTOR " << ID << " " << type << " " << 0 << endl;
//
// //print coordinates
// _outputFile<<firstPoint[0]<<" "<<firstPoint[1]<<" "<<firstPoint[2] << " ";
// _outputFile<<secondPoint[0]<<" "<<secondPoint[1]<<" "<<secondPoint[2];
//
// _outputFile << endl;
// }
_outputFile <<endl;
}
void BirthTimes::print(int snapshot) {
_outputFile.precision(10);
// print first line (snapshot number, time, number of filaments,
// linkers, motors, branchers, bubbles)
_outputFile << snapshot << " " << tau() << " " <<
Filament::numFilaments() << " " <<
Linker::numLinkers() << " " <<
MotorGhost::numMotorGhosts() << " " <<
BranchingPoint::numBranchingPoints() << " " <<
Bubble::numBubbles() << endl;
for(auto &filament : Filament::getFilaments()) {
//print first line (Filament ID, type, length, left_delta, right_delta)
_outputFile << "FILAMENT " << filament->getId() << " " <<
filament->getType() << " " <<
filament->getCylinderVector().size() + 1 << " " <<
filament->getDeltaMinusEnd() << " " << filament->getDeltaPlusEnd() << endl;
//print birth times
for (auto cylinder : filament->getCylinderVector()){
auto b = cylinder->getFirstBead();
_outputFile<< b->getBirthTime() << " ";
}
//last bead
_outputFile<< filament->getCylinderVector().back()
->getSecondBead()->getBirthTime();
_outputFile << endl;
}
for(auto &linker : Linker::getLinkers()) {
//print first line
_outputFile << "LINKER " << linker->getId()<< " " <<
linker->getType() << endl;
//print birth times
_outputFile << linker->getBirthTime() << " " <<
linker->getBirthTime() << endl;
}
for(auto &motor : MotorGhost::getMotorGhosts()) {
//print first line
//also contains a Bound(1) or unbound(0) qualifier
_outputFile << "MOTOR " << motor->getId() << " " << motor->getType() << " " << 1 << endl;
//print birth times
_outputFile << motor->getBirthTime() << " " <<
motor->getBirthTime() << endl;
}
//DEPRECATED AS OF 9/8/16
//
// //collect diffusing motors
// for(auto md: _subSystem->getCompartmentGrid()->getDiffusingMotors()) {
//
// int ID = get<0>(md);
// int type = get<1>(md);
//
// auto firstPoint = get<2>(md);
// auto secondPoint = get<3>(md);
//
// _outputFile << "MOTOR " << ID << " " << type << " " << 0 << endl;
//
// //print coordinates
// //print birth times
// _outputFile << 0 << " " << 0 << endl;
// }
for(auto &branch : BranchingPoint::getBranchingPoints()) {
//print first line
_outputFile << "BRANCHER " << branch->getId() << " " <<
branch->getType() << endl;
//print birth times
_outputFile << branch->getBirthTime() << endl;
}
for(auto &bubble : Bubble::getBubbles()) {
//print first line
_outputFile << "BUBBLE " << bubble->getId() << " " <<
bubble->getType() << endl;
//print birth times
_outputFile << bubble->getBirthTime() << endl;
}
_outputFile <<endl;
}
void Forces::print(int snapshot) {
_outputFile.precision(10);
// print first line (snapshot number, time, number of filaments,
// linkers, motors, branchers, membranes)
_outputFile << snapshot << " " << tau() << " " <<
Filament::numFilaments() << " " <<
Linker::numLinkers() << " " <<
MotorGhost::numMotorGhosts() << " " <<
BranchingPoint::numBranchingPoints() << " " <<
Bubble::numBubbles() << " " <<
Membrane::numMembranes() << endl;
for(auto &filament : Filament::getFilaments()) {
//print first line (Filament ID, type, length, left_delta, right_delta
_outputFile << "FILAMENT " << filament->getId() << " " <<
filament->getType() << " " <<
filament->getCylinderVector().size() + 1 << " " <<
filament->getDeltaMinusEnd() << " " << filament->getDeltaPlusEnd() << endl;
//print force
for (auto cylinder : filament->getCylinderVector()){
floatingpoint forceMag= cylinder->getFirstBead()->FDotF();
forceMag = sqrt(forceMag);
_outputFile<<forceMag << " ";
}
//print last bead force
floatingpoint forceMag = filament->getCylinderVector().back()->
getSecondBead()->FDotF();
forceMag = sqrt(forceMag);
_outputFile<<forceMag;
_outputFile << endl;
}
for(auto &linker : Linker::getLinkers()) {
//print first line
_outputFile << "LINKER " << linker->getId()<< " " <<
linker->getType() << endl;
//print stretch force
_outputFile << linker->getMLinker()->stretchForce << " " <<
linker->getMLinker()->stretchForce << endl;
}
for(auto &motor : MotorGhost::getMotorGhosts()) {
//print first line
//also contains a Bound(1) or unbound(0) qualifier
_outputFile << "MOTOR " << motor->getId() << " " << motor->getType() << " " << 1 << endl;
//print stretch force
_outputFile << motor->getMMotorGhost()->stretchForce << " " <<
motor->getMMotorGhost()->stretchForce << endl;
}
//DEPRECATED AS OF 9/8/16
// //collect diffusing motors
// for(auto md: _subSystem->getCompartmentGrid()->getDiffusingMotors()) {
//
// int ID = get<0>(md);
// int type = get<1>(md);
//
// auto firstPoint = get<2>(md);
// auto secondPoint = get<3>(md);
//
// _outputFile << "MOTOR " << ID << " " << type << " " << 0 << endl;
//
// //print coordinates
// //print birth times
// _outputFile << 0 << " " << 0 << endl;
// }
for(auto &branch : BranchingPoint::getBranchingPoints()) {
//print first line
_outputFile << "BRANCHER " << branch->getId() << " " <<
branch->getType() << endl;
//Nothing for branchers
_outputFile << 0.0 << endl;
}
for(auto &bubble : Bubble::getBubbles()) {
//print first line
_outputFile << "BUBBLE " << bubble->getId() << " " <<
bubble->getType() << endl;
//Nothing for bubbles
_outputFile << 0.0 << endl;
}
for(auto &membrane : Membrane::getMembranes()) {
//print first line (Membrane ID, type)
_outputFile << "Membrane " << membrane->getId() << " " <<
membrane->getType() << endl;
//print force
for(const auto& v : membrane->getMesh().getVertices()) {
const double forceMag = mathfunc::magnitude(v.attr.vertex->force);
_outputFile << forceMag << " ";
}
_outputFile << endl;
}
_outputFile <<endl;
}
void Tensions::print(int snapshot) {
_outputFile.precision(10);
// print first line (snapshot number, time, number of filaments,
// linkers, motors, branchers)
_outputFile << snapshot << " " << tau() << " " <<
Filament::numFilaments() << " " <<
Linker::numLinkers() << " " <<
MotorGhost::numMotorGhosts() << " " <<
BranchingPoint::numBranchingPoints() << " " <<
Bubble::numBubbles() << endl;;
for(auto &filament : Filament::getFilaments()) {
//print first line (Filament ID, type, length, left_delta, right_delta)
_outputFile << "FILAMENT " << filament->getId() << " " <<
filament->getType() << " " <<
filament->getCylinderVector().size() + 1 << " " <<
filament->getDeltaMinusEnd() << " " << filament->getDeltaPlusEnd() << endl;
//print
for (auto cylinder : filament->getCylinderVector()){
floatingpoint k = cylinder->getMCylinder()->getStretchingConst();
floatingpoint deltaL = cylinder->getMCylinder()->getLength() -
cylinder->getMCylinder()->getEqLength();
_outputFile<< k * deltaL << " ";
}
//print last
Cylinder* cylinder = filament->getCylinderVector().back();
floatingpoint k = cylinder->getMCylinder()->getStretchingConst();
floatingpoint deltaL = cylinder->getMCylinder()->getLength() -
cylinder->getMCylinder()->getEqLength();
_outputFile<< k * deltaL;
_outputFile << endl;
}
for(auto &linker : Linker::getLinkers()) {
//print first line
_outputFile << "LINKER " << linker->getId()<< " " <<
linker->getType() << endl;
//print
_outputFile << linker->getMLinker()->stretchForce << " " <<
linker->getMLinker()->stretchForce << endl;
}
for(auto &motor : MotorGhost::getMotorGhosts()) {
//print first line
//also contains a Bound(1) or unbound(0) qualifier
_outputFile << "MOTOR " << motor->getId() << " " << motor->getType() << " " << 1 << endl;
//print
_outputFile << motor->getMMotorGhost()->stretchForce << " " <<
motor->getMMotorGhost()->stretchForce << endl;
}
//DEPRECATED AS OF 9/8/16
// //collect diffusing motors
// for(auto md: _subSystem->getCompartmentGrid()->getDiffusingMotors()) {
//
// int ID = get<0>(md);
// int type = get<1>(md);
//
// auto firstPoint = get<2>(md);
// auto secondPoint = get<3>(md);
//
// _outputFile << "MOTOR " << ID << " " << type << " " << 0 << endl;
//
// //print coordinates
// //print birth times
// _outputFile << 0 << " " << 0 << endl;
// }
for(auto &branch : BranchingPoint::getBranchingPoints()) {
//print first line
_outputFile << "BRANCHER " << branch->getId() << " " <<
branch->getType() << endl;
//Nothing for branchers
_outputFile << 0.0 << endl;
}
for(auto &bubble : Bubble::getBubbles()) {
//print first line
_outputFile << "BUBBLE " << bubble->getId() << " " <<
bubble->getType() << endl;
//Nothing for bubbles
_outputFile << 0.0 << endl;
}
_outputFile <<endl;
}
void WallTensions::print(int snapshot) {
_outputFile.precision(10);
// print first line (snapshot number, time, number of filaments,
// linkers, motors, branchers)
_outputFile << snapshot << " " << tau() << " " <<
Filament::numFilaments() << " " <<
Linker::numLinkers() << " " <<
MotorGhost::numMotorGhosts() << " " <<
BranchingPoint::numBranchingPoints() << " " <<
Bubble::numBubbles() << endl;;
for(auto &filament : Filament::getFilaments()) {
//print first line (Filament ID, type, length, left_delta, right_delta)
_outputFile << "FILAMENT " << filament->getId() << " " <<
filament->getType() << " " <<
filament->getCylinderVector().size() + 1 << " " <<
filament->getDeltaMinusEnd() << " " << filament->getDeltaPlusEnd() << endl;
//print
for (auto cylinder : filament->getCylinderVector()){
floatingpoint k = SysParams::Mechanics().pinK;
Bead* b = cylinder->getFirstBead();
if(b->isPinned()) {
auto norm = _subSystem->getBoundary()->normal(b->pinnedPosition);
auto dirL = twoPointDirection(b->pinnedPosition, b->vcoordinate());
floatingpoint deltaL = twoPointDistance(b->vcoordinate(), b->pinnedPosition);
_outputFile<< k * deltaL * dotProduct(norm, dirL) << " ";
}
else
_outputFile << 0.0 << " ";
}
//print last
Cylinder* cylinder = filament->getCylinderVector().back();
floatingpoint k = SysParams::Mechanics().pinK;
Bead* b = cylinder->getSecondBead();
if(b->isPinned()) {
auto norm = _subSystem->getBoundary()->normal(b->pinnedPosition);
auto dirL = twoPointDirection(b->pinnedPosition, b->vcoordinate());
floatingpoint deltaL = twoPointDistance(b->vcoordinate(), b->pinnedPosition);
_outputFile<< k * deltaL * dotProduct(norm, dirL) << " ";
}
else
_outputFile << 0.0 << " ";
_outputFile << endl;
}
for(auto &linker : Linker::getLinkers()) {
//print first line
_outputFile << "LINKER " << linker->getId()<< " " <<
linker->getType() << endl;
_outputFile << 0.0 << " " << 0.0 << endl;
}
for(auto &motor : MotorGhost::getMotorGhosts()) {
//print first line
_outputFile << "MOTOR " << motor->getId() << " " <<
motor->getType() << endl;
_outputFile << 0.0 << " " << 0.0 << endl;
}
for(auto &branch : BranchingPoint::getBranchingPoints()) {
//print first line
_outputFile << "BRANCHER " << branch->getId() << " " <<
branch->getType() << endl;
//Nothing for branchers
_outputFile << 0.0 << endl;
}
for(auto &bubble : Bubble::getBubbles()) {
//print first line
_outputFile << "BUBBLE " << bubble->getId() << " " <<
bubble->getType() << endl;
//Nothing for bubbles
_outputFile << 0.0 << endl;
}
_outputFile <<endl;
}
void Types::print(int snapshot) {
_outputFile.precision(10);
// print first line (snapshot number, time, number of filaments,
// linkers, motors, branchers)
_outputFile << snapshot << " " << tau() << " " <<
Filament::numFilaments() << " " <<
Linker::numLinkers() << " " <<
MotorGhost::numMotorGhosts() << " " <<
BranchingPoint::numBranchingPoints() << " " <<
Bubble::numBubbles() << endl;
for(auto &filament : Filament::getFilaments()) {
//print first line (Filament ID, type, length, left_delta, right_delta)
_outputFile << "FILAMENT " << filament->getId() << " " <<
filament->getType() << " " <<
filament->getCylinderVector().size() + 1 << " " <<
filament->getDeltaMinusEnd() << " " << filament->getDeltaPlusEnd() << endl;
//print
for (auto cylinder : filament->getCylinderVector()){
_outputFile<< cylinder->getType() << " ";
}
//print last
Cylinder* cylinder = filament->getCylinderVector().back();
_outputFile<< cylinder->getType();
_outputFile << endl;
}
for(auto &linker : Linker::getLinkers()) {
//print first line
_outputFile << "LINKER " << linker->getId()<< " " <<
linker->getType() << endl;
_outputFile << linker->getType() << " " <<
linker->getType() << endl;
}
for(auto &motor : MotorGhost::getMotorGhosts()) {
//print first line
//also contains a Bound(1) or unbound(0) qualifier
_outputFile << "MOTOR " << motor->getId() << " " << motor->getType() << " " << 1 << endl;
_outputFile << motor->getType() << " " <<
motor->getType() << endl;
}
//DEPRECATED AS OF 9/8/16
// //collect diffusing motors
// for(auto md: _subSystem->getCompartmentGrid()->getDiffusingMotors()) {
//
// int ID = get<0>(md);
// int type = get<1>(md);
//
// auto firstPoint = get<2>(md);
// auto secondPoint = get<3>(md);
//
// _outputFile << "MOTOR " << ID << " " << type << " " << 0 << endl;
//
// //print coordinates
// //print birth times
// _outputFile << type << " " << type << endl;
// }
for(auto &branch : BranchingPoint::getBranchingPoints()) {
//print first line
_outputFile << "BRANCHER " << branch->getId() << " " <<
branch->getType() << endl;
//Nothing for branchers
_outputFile << branch->getType() << endl;
}
for(auto &bubble : Bubble::getBubbles()) {
//print first line
_outputFile << "BUBBLE " << bubble->getId() << " " <<
bubble->getType() << endl;
//Nothing for bubbles
_outputFile << bubble->getType() << endl;
}
_outputFile <<endl;
}
void Chemistry::print(int snapshot) {
// print first line (snapshot number, time)
_outputFile << snapshot << " " << tau() << endl;
// all diffusing and bulk species
for(auto sd : _chemData.speciesDiffusing) {
string name = get<0>(sd);
auto copyNum = _grid->countDiffusingSpecies(name);
_outputFile << name << ":DIFFUSING " << copyNum << endl;
}
for(auto sb : _chemData.speciesBulk) {
string name = get<0>(sb);
auto copyNum = _grid->countBulkSpecies(name);
_outputFile << name << ":BULK " << copyNum << endl;
}
for(int filType = 0; filType < SysParams::Chemistry().numFilaments; filType++) {
for(auto sf : _chemData.speciesFilament[filType]) {
auto copyNum = Filament::countSpecies(filType, sf);
_outputFile << sf << ":FILAMENT " << copyNum << endl;
}
for(auto sp : _chemData.speciesPlusEnd[filType]) {
auto copyNum = Filament::countSpecies(filType, sp);
_outputFile << sp << ":PLUSEND " << copyNum << endl;
}
for(auto sm : _chemData.speciesMinusEnd[filType]) {
auto copyNum = Filament::countSpecies(filType, sm);
_outputFile << sm << ":MINUSEND " << copyNum << endl;
}
for(auto sl : _chemData.speciesLinker[filType]) {
auto copyNum = Linker::countSpecies(sl);
_outputFile << sl << ":LINKER " << copyNum << endl;
}
for(auto sm : _chemData.speciesMotor[filType]) {
auto copyNum = MotorGhost::countSpecies(sm);
_outputFile << sm << ":MOTOR " << copyNum << endl;
}
for(auto sb : _chemData.speciesBrancher[filType]) {
auto copyNum = BranchingPoint::countSpecies(sb);
_outputFile << sb << ":BRANCHER " << copyNum << endl;
}
}
_outputFile <<endl;
}
void MotorLifetimes::print(int snapshot) {
_outputFile.precision(3);
// print first line (snapshot number, time)
_outputFile << snapshot << " " << tau() << " " << endl;
MotorGhost::getLifetimes()->print(_outputFile);
_outputFile << endl << endl;
//clear list
MotorGhost::getLifetimes()->clearValues();
}
void MotorWalkLengths::print(int snapshot) {
_outputFile.precision(3);
// print first line (snapshot number, time)
_outputFile << snapshot << " " << tau() << " " << endl;
MotorGhost::getWalkLengths()->print(_outputFile);
_outputFile << endl << endl;
//clear list
MotorGhost::getWalkLengths()->clearValues();
}
void LinkerLifetimes::print(int snapshot) {
_outputFile.precision(3);
// print first line (step number, time)
_outputFile << snapshot << " " << tau() << " " << endl;
Linker::getLifetimes()->print(_outputFile);
_outputFile << endl << endl;
//clear list
Linker::getLifetimes()->clearValues();
}
void FilamentTurnoverTimes::print(int snapshot) {
_outputFile.precision(3);
// print first line (step number, time)
_outputFile << snapshot << " " << tau() << " " << endl;
Filament::getTurnoverTimes()->print(_outputFile);
_outputFile << endl << endl;
}
void Dissipation::print(int snapshot) {
_outputFile.precision(16);
// print first line (snapshot number, time)
_outputFile << snapshot << " " << tau() << endl;
vector<floatingpoint> energies;
energies = _cs->getEnergy();
_outputFile << energies[0] << " " << energies[1] << " "<< energies[2]<<" "<<energies[3]<<" "<<energies[4];
_outputFile <<endl;
}
void HRCD::print(int snapshot) {
_outputFile.precision(16);
DissipationTracker* dt = _cs->getDT();
vector<tuple<string, floatingpoint>> hrcdvec = dt->getHRCDVec();
// print first line (snapshot number, time)
_outputFile << snapshot << " " << tau() << endl;
for(auto &i : hrcdvec){
_outputFile<<get<0>(i)<<" ";
}
_outputFile<<endl;
for(auto &i : hrcdvec){
_outputFile<<get<1>(i)<<" ";
}
_outputFile<<endl<<endl;
}
void HRMD::print(int snapshot) {
_outputFile.precision(16);
DissipationTracker* dt = _cs->getDT();
// print first line (snapshot number, time)
vector<tuple<string, floatingpoint>> cumHRMDMechEnergy = dt->getCumHRMDMechEnergy();
vector<tuple<string, floatingpoint>> cumHRMDMechDiss = dt->getCumHRMDMechDiss();
_outputFile << snapshot << " " << tau() << endl;
// write row of names
for(auto i = 0; i < cumHRMDMechEnergy.size(); i++){
_outputFile << get<0>(cumHRMDMechEnergy[i]) << " ";
}
_outputFile<<endl;
// write row of mech energy
for(auto i = 0; i < cumHRMDMechEnergy.size(); i++){
_outputFile << get<1>(cumHRMDMechEnergy[i]) << " ";
}
_outputFile<<endl;
// write row of mech diss, assuming names are same
for(auto i = 0; i < cumHRMDMechEnergy.size(); i++){
for(auto j = 0; j < cumHRMDMechEnergy.size(); j++){
if(get<0>(cumHRMDMechDiss[j]) == get<0>(cumHRMDMechEnergy[i])){
_outputFile << get<1>(cumHRMDMechDiss[j]) << " ";
}
}
}
_outputFile<<endl;
#ifdef PRINTENERGYBEFOREANDAFTER
//Print mech energies before and after minimization
// write row of mech energy
auto HRMDMechEnergyMat = dt->getHRMDmat();
for(auto i = 0; i < HRMDMechEnergyMat.size(); i++){
for(auto j = 0; j < HRMDMechEnergyMat[i].size(); j++) {
_outputFile << get<1>(HRMDMechEnergyMat[i][j]) << " ";
}
_outputFile<<endl;
}
_outputFile<<endl;
#endif
dt->clearHRMDMats();
_outputFile<<endl<<endl;
}
void PlusEnd::print(int snapshot) {
_outputFile.precision(10);
// print first line (snapshot number, time, number of filaments,
// linkers, motors, branchers)
_outputFile << snapshot << " " << tau() << " " <<
Filament::numFilaments() << " " <<
Linker::numLinkers() << " " <<
MotorGhost::numMotorGhosts() << " " <<
BranchingPoint::numBranchingPoints() << " " <<
Bubble::numBubbles() <<endl;;
for(auto &filament : Filament::getFilaments()) {
//print first line (Filament ID, type, length, left_delta, right_delta)
_outputFile <<"FILAMENT " << filament->getId() << " " <<
filament->getType() << " " <<
filament->getCylinderVector().size() + 1 << " " <<
filament->getDeltaMinusEnd() << " " << filament->getDeltaPlusEnd() << endl;
//print plus end
auto x = filament->getCylinderVector().back()->getSecondBead()->vcoordinate();
_outputFile<<x[0]<<" "<<x[1]<<" "<<x[2]<<" \n";
for (int i=0; i<filament->getCylinderVector().back()->getCCylinder()->getSize(); i++) {
int out=filament->getCylinderVector().back()->getCCylinder()->getCMonomer(i)->activeSpeciesPlusEnd();
if(out !=-1) {_outputFile << "PLUSEND: " << out << endl;}
}
//print minus end
x = filament->getCylinderVector().front()->getFirstBead()->vcoordinate();
_outputFile<<x[0]<<" "<<x[1]<<" "<<x[2]<<" \n";
for (int i=0; i<filament->getCylinderVector().front()->getCCylinder()->getSize(); i++) {
int out=filament->getCylinderVector().front()->getCCylinder()->getCMonomer(i)->activeSpeciesMinusEnd();
if(out !=-1) {_outputFile << "MINUSEND: " << out << endl;}
}
}
_outputFile << endl;
}
void CMGraph::print(int snapshot) {
// print first line (snapshot number, time)
_outputFile << snapshot << " " << tau() << endl;
//key stores concatenated filID value.
//value stores count of linkers, motors and branchers connecting two filament IDs.
map<uint64_t, array<int, 3>> filpaircounter;
int shiftbybits;
//Get filament pairs involved in each linker
for(auto &linker : Linker::getLinkers()) {
uint32_t fid1 = linker->getFirstCylinder()->getFilID();
uint32_t fid2 = linker->getSecondCylinder()->getFilID();
shiftbybits = sizeof(fid1)*8;
uint64_t tempkey;
if(fid1<fid2) {
tempkey = fid1;
tempkey = tempkey << shiftbybits;
tempkey = tempkey|fid2;
}
else {
tempkey = fid2;
tempkey = tempkey << shiftbybits;
tempkey = tempkey|fid1;
}
filpaircounter[tempkey][0] = filpaircounter[tempkey][0]+1;
}
//Get filament pairs involved in each motor
for(auto &motor : MotorGhost::getMotorGhosts()) {
uint32_t fid1 = motor->getFirstCylinder()->getFilID();
uint32_t fid2 = motor->getSecondCylinder()->getFilID();
shiftbybits = sizeof(fid1)*8;
uint64_t tempkey;
if(fid1<fid2) {
tempkey = fid1;
tempkey = tempkey << shiftbybits;
tempkey = tempkey|fid2;
}
else {
tempkey = fid2;
tempkey = tempkey << shiftbybits;
tempkey = tempkey|fid1;
}
filpaircounter[tempkey][1] = filpaircounter[tempkey][1]+1;
}
//Get mother and daughter filament pairs involved in each brancher
for(auto &brancher : BranchingPoint::getBranchingPoints()) {
uint32_t fid1 = brancher->getFirstCylinder()->getFilID();
uint32_t fid2 = brancher->getSecondCylinder()->getFilID();
shiftbybits = sizeof(fid1)*8;
uint64_t tempkey;
if(fid1<fid2) {
tempkey = fid1;
tempkey = tempkey << shiftbybits;
tempkey = tempkey|fid2;
}
else {
tempkey = fid2;
tempkey = tempkey << shiftbybits;
tempkey = tempkey|fid1;
}
filpaircounter[tempkey][2] = filpaircounter[tempkey][2]+1;
}
uint64_t mask = (uint64_t(1) << 32) - 1;
for(auto const& i: filpaircounter){
uint64_t tempkey = i.first;
auto tempvalue = i.second;
uint64_t fID1 = tempkey >> shiftbybits;
uint64_t fID2 = mask & tempkey;
_outputFile<<fID1<<" "<<fID2<<" "<<
tempvalue[0] <<" "<< tempvalue[1] << " " << tempvalue[2]<< " ";
}
_outputFile<<endl<<endl;
}
void TMGraph::print(int snapshot) {
//_outputFile.precision(10);
// print first line (snapshot number, time)
_outputFile << snapshot << " " << tau() << endl;
vector<tuple<vector<int>,floatingpoint>> filIDVec;
for(auto &linker : Linker::getLinkers()) {
int fid1 = linker->getFirstCylinder()->getFilID();
int fid2 = linker->getSecondCylinder()->getFilID();
vector<int> pair;
pair.push_back(fid1);
pair.push_back(fid2);
floatingpoint tension = abs(linker->getMLinker()->stretchForce);
sort(pair.begin(),pair.end());
filIDVec.push_back(make_tuple(pair,tension));
}
for(auto &motor : MotorGhost::getMotorGhosts()) {
int fid1 = motor->getFirstCylinder()->getFilID();
int fid2 = motor->getSecondCylinder()->getFilID();
vector<int> pair;
pair.push_back(fid1);
pair.push_back(fid2);
floatingpoint tension = abs(motor->getMMotorGhost()->stretchForce);
sort(pair.begin(),pair.end());
filIDVec.push_back(make_tuple(pair,tension));
}
vector<vector<int>> uniqueFilIDVec;
vector<tuple<vector<int>,floatingpoint>> uniqueFilIDVecSum;
for(auto j : filIDVec){
vector<int> i = get<0>(j);
if(find(uniqueFilIDVec.begin(), uniqueFilIDVec.end(), i) != uniqueFilIDVec.end()) {
int ind = find(uniqueFilIDVec.begin(), uniqueFilIDVec.end(), i) - uniqueFilIDVec.begin();
get<1>(uniqueFilIDVecSum.at(ind)) += get<1>(j);
} else {
vector<int> pbVec;
pbVec.push_back(i[0]);
pbVec.push_back(i[1]);
//pbVec.push_back(get<1>(j));
uniqueFilIDVecSum.push_back(make_tuple(pbVec,get<1>(j)));
uniqueFilIDVec.push_back(i);
}
}
for(auto i: uniqueFilIDVecSum){
_outputFile<< get<0>(i)[0] <<" "<< get<0>(i)[1] << " " << get<1>(i) << " ";
}
_outputFile<<endl<<endl;
}
void ReactionOut::print(int snapshot) {
_outputFile.precision(10);
// print first line (snapshot number, time, number of filaments,
// linkers, motors, branchers)
_outputFile << snapshot << " " << tau() << " " <<
Filament::numFilaments() << " " <<
Linker::numLinkers() << " " <<
MotorGhost::numMotorGhosts() << " " <<
BranchingPoint::numBranchingPoints() << " " <<
Bubble::numBubbles() <<endl;;
for(auto &filament : Filament::getFilaments()) {
int numMonomer = 2; // 2 for plus/minus end
for (auto c : filament->getCylinderVector()) {
for (int i=0; i < c->getCCylinder()->getSize(); i++) {
auto FilamentMonomer = c->getCCylinder()-> getCMonomer(i)->activeSpeciesFilament();
if(FilamentMonomer != -1) {numMonomer ++;}
}
}
//print first line (Filament ID, type, length, left_delta, right_delta)
_outputFile <<"FILAMENT " << filament->getId() << " " <<
filament->getType() << " " <<
filament->getCylinderVector().size() + 1 << " " <<
filament->getDeltaMinusEnd() << " " << filament->getDeltaPlusEnd() << "\n"<<
filament->getDeltaMinusEnd() << " " << filament->getDeltaPlusEnd() << " " <<
filament->getPolyMinusEnd() << " " << filament->getPolyPlusEnd() << " " <<
filament->getDepolyMinusEnd() << " " << filament->getDepolyPlusEnd() << " " <<
filament->getNucleation() << " " << numMonomer << endl;
_outputFile << "SEVERING " << filament->getSevering() << endl;
if (filament->getNewID().size() == 0) {
_outputFile << "-1";
}
else {
for (int i = 0; i < filament->getNewID().size(); ++i) {
_outputFile << filament->getNewID()[i] << " ";
}
}
_outputFile << endl;
}
_outputFile << endl;
}
void BRForces::print(int snapshot) {
_outputFile.precision(10);
// print first line (snapshot number, time, number of filaments,
// linkers, motors, branchers)
_outputFile << snapshot << " " << tau() << " " <<
Filament::numFilaments() << " " <<
Linker::numLinkers() << " " <<
MotorGhost::numMotorGhosts() << " " <<
BranchingPoint::numBranchingPoints() << " " <<
Bubble::numBubbles() << endl;
for(auto &filament : Filament::getFilaments()) {
//print first line (Filament ID, type, length, left_delta, right_delta
_outputFile << "FILAMENT " << filament->getId() << " " <<
filament->getType() << " " <<
filament->getCylinderVector().size() + 1 << " " <<
filament->getDeltaMinusEnd() << " " << filament->getDeltaPlusEnd() << endl;
//print force
for (auto cylinder : filament->getCylinderVector()){
floatingpoint forceMag= cylinder->getFirstBead()->brFDotbrF();
forceMag = sqrt(forceMag);
_outputFile<<forceMag << " ";
}
//print last bead force
floatingpoint forceMag = filament->getCylinderVector().back()->
getSecondBead()->brFDotbrF();
forceMag = sqrt(forceMag);
_outputFile<<forceMag;
_outputFile << endl;
}
for(auto &linker : Linker::getLinkers()) {
//print first line
_outputFile << "LINKER " << linker->getId()<< " " <<
linker->getType() << endl;
//print stretch force
_outputFile << linker->getMLinker()->stretchForce << " " <<
linker->getMLinker()->stretchForce << endl;
}
for(auto &motor : MotorGhost::getMotorGhosts()) {
//print first line
//also contains a Bound(1) or unbound(0) qualifier
_outputFile << "MOTOR " << motor->getId() << " " << motor->getType() << " " << 1 << endl;
//print stretch force
_outputFile << motor->getMMotorGhost()->stretchForce << " " <<
motor->getMMotorGhost()->stretchForce << endl;
}
}
void Concentrations::print(int snapshot) {
_outputFile << snapshot << " " << tau() << endl;
for(auto c : _subSystem->getCompartmentGrid()->getCompartments()) {
if(c->isActivated()) {
_outputFile << "COMPARTMENT: " << c->coordinates()[0] << " "
<< c->coordinates()[1] << " " << c->coordinates()[2] << endl;
for(auto sd : _chemData.speciesDiffusing) {
string name = get<0>(sd);
auto s = c->findSpeciesByName(name);
auto copyNum = s->getN();
_outputFile << name << ":DIFFUSING " << copyNum << endl;
}
}
}
_outputFile << endl;
}
void MotorWalkingEvents::print(int snapshot) {
DissipationTracker* dt = _cs->getDT();
vector<tuple<int, floatingpoint, floatingpoint, floatingpoint, floatingpoint,
floatingpoint>> motorData = dt->getMotorWalkingData();
for(auto i = 0; i < motorData.size(); i++){
tuple<int, floatingpoint, floatingpoint, floatingpoint, floatingpoint,
floatingpoint> line = motorData[i];
//ID coordx coordy coordz birthtime walktime
_outputFile<< get<0>(line) << " " << get<1>(line) << " "<< get<2>(line)<<" "<<get<3>(line)
<<" "<<get<4>(line) <<" "<<get<5>(line) <<endl;
}
dt->clearMotorData();
}
void LinkerUnbindingEvents::print(int snapshot) {
DissipationTracker* dt = _cs->getDT();
vector<tuple<int, floatingpoint, floatingpoint, floatingpoint, floatingpoint,
floatingpoint>>
linkerUnbindingData = dt->getLinkerUnbindingData();
for(auto i = 0; i < linkerUnbindingData.size(); i++){
//ID coordx coordy coordz birthtime unbindingtime
tuple<int, floatingpoint, floatingpoint, floatingpoint, floatingpoint,
floatingpoint> line = linkerUnbindingData[i];
_outputFile<< get<0>(line) << " " << get<1>(line) << " "<< get<2>(line)<<" "<<get<3>(line)
<<" "<<get<4>(line) <<" "<<get<5>(line) <<endl;
}
dt->clearLinkerUnbindingData();
}
void MotorUnbindingEvents::print(int snapshot) {
DissipationTracker* dt = _cs->getDT();
vector<tuple<int, floatingpoint, floatingpoint, floatingpoint, floatingpoint,
floatingpoint>>
motorUnbindingData = dt->getMotorUnbindingData();
for(auto i = 0; i < motorUnbindingData.size(); i++){
//ID coordx coordy coordz birthtime unbindingtime
tuple<int, floatingpoint, floatingpoint, floatingpoint, floatingpoint,
floatingpoint> line = motorUnbindingData[i];
_outputFile<< get<0>(line) << " " << get<1>(line) << " "<< get<2>(line)<<" "<<get<3>(line)
<<" "<<get<4>(line) <<" "<<get<5>(line) <<endl;
}
dt->clearMotorUnbindingData();
}
void LinkerBindingEvents::print(int snapshot) {
DissipationTracker* dt = _cs->getDT();
vector<tuple<int, floatingpoint, floatingpoint, floatingpoint, floatingpoint>>
linkerBindingData = dt->getLinkerBindingData();
for(auto i = 0; i < linkerBindingData.size(); i++){
//ID coordx coordy coordz birthtime
tuple<int, floatingpoint, floatingpoint, floatingpoint, floatingpoint> line =
linkerBindingData[i];
_outputFile<< get<0>(line) << " " << get<1>(line) << " "<< get<2>(line)<<" "<<get<3>(line)
<<" "<<get<4>(line) <<endl;
}
dt->clearLinkerBindingData();
}
void ForcesOutput::print(int snapshot) {
// snapshot serial
_outputFile << snapshot << ' ' << tau() << '\n';
LOG(ERROR) << "Force output is currently not usable.";
throw std::runtime_error("Forces output not available.");
// force field forces
for(auto ff : ffm_->_forceFields) {
const auto& fb = ff->getForceBuffer();
_outputFile
<< ff->getName() << '\n'
<< fb.size() << " ";
for(const auto& x : fb) _outputFile << x << ' ';
_outputFile << '\n';
}
_outputFile << endl;
}
void IndicesOutput::print(int snapshot) {
// snapshot serial
_outputFile << snapshot << ' ' << tau() << '\n';
LOG(ERROR) << "Index output is currently not available.";
throw std::runtime_error("Index output not available.");
// Filaments
for(auto f : Filament::getFilaments()) {
_outputFile << "FILAMENT "
<< f->getId() << ' '
<< f->getType() << ' '
<< f->getCylinderVector().size() + 1 << '\n';
for(auto c : f->getCylinderVector())
_outputFile << c->getFirstBead()->getStableIndex() << ' ';
_outputFile << f->getCylinderVector().back()->getSecondBead()->getStableIndex()
<< '\n';
}
// Membranes
for(auto m : Membrane::getMembranes()) {
const auto& mesh = m->getMesh();
_outputFile << "MEMBRANE "
<< m->getId() << ' '
<< m->getType() << ' '
<< mesh.numVertices() << ' '
<< mesh.numTriangles() << '\n';
for(const auto& v : mesh.getVertices()) {
_outputFile << v.attr.vertex->getIndex() << ' ';
}
_outputFile << '\n';
}
_outputFile << std::endl;
}
void Datadump::print(int snapshot) {
_outputFile.close();
_outputFile.open(_outputFileName, std::ofstream::trunc);
_outputFile.precision(15);
if(!_outputFile.is_open()) {
cout << "There was an error opening file " << _outputFileName
<< " for output. Exiting." << endl;
exit(EXIT_FAILURE);
}
//Rearrange bead and cylinder data to create a continuous array.
Bead::rearrange();
Cylinder::updateAllData();
Cylinder::rearrange();
_outputFile << snapshot << " " << tau() << endl;
_outputFile <<"NFIL NCYL NBEAD NLINK NMOTOR NBRANCH NBUBBLE"<<endl;
_outputFile << Filament::numFilaments() << " " <<
Cylinder::numCylinders()<<" "<<
Bead::numBeads()<<" "<<
Linker::numLinkers() << " " <<
MotorGhost::numMotorGhosts() << " " <<
BranchingPoint::numBranchingPoints() << " " <<
Bubble::numBubbles() << endl;
//Bead data
_outputFile <<"BEAD DATA: BEADIDX(STABLE) FID FPOS COORDX COORDY COORDZ FORCEAUXX "
"FORCEAUXY FORCEAUXZ"<<endl;
for(auto b:Bead::getBeads()){
auto bidx = b->getStableIndex();
Filament* f = static_cast<Filament*>(b->getParent());
_outputFile <<bidx<<" "<<f->getId()<<" "<<b->getPosition()<<" "
<< b->coord[0] << ' ' << b->coord[1] << ' ' << b->coord[2] << ' '
<< b->force[0] << ' ' << b->force[1] << ' ' << b->force[2] << endl;
}
_outputFile <<endl;
/*for(int bidx = 0; bidx<Bead::rawNumStableElements(); bidx++){
_outputFile <<bidx<<" "<<beadData.coords.data()[3*bidx]<<" "<<beadData.coords.data()[3*bidx + 1]<<" "
<<beadData.coords.data()[3*bidx + 2]<<" "<<beadData.forcesAux.data()[3*bidx]<<" "<<beadData
.forcesAux.data()[3*bidx + 1]<<" "<<beadData.forcesAux.data()[3*bidx + 2]<<endl;
}
_outputFile <<endl;*/
//Cylinder data
_outputFile <<"CYLINDER DATA: CYLIDX(STABLE) FID FTYPE FPOS B1_IDX B2_IDX "
"MINUSENDSTATUS PLUSENDSTATUS MINUSENDTYPE "
"PLUSENDTYPE MINUSENDMONOMER PLUSENDMONOMER TOTALMONOMERS EQLEN"<<endl;
const auto& cylinderInfoData = Cylinder::getDbData().value;
for(int cidx = 0; cidx < Cylinder::rawNumStableElements(); cidx++){
Cylinder* cyl = cylinderInfoData[cidx].chemCylinder->getCylinder();
CCylinder* ccyl = cylinderInfoData[cidx].chemCylinder;
short filamentType = cyl->getType();
int numMonomers = SysParams::Geometry().cylinderNumMon[filamentType];
short minusendmonomer = 0;
short plusendmonomer = numMonomers-1;
short minusendtype = -1;
short plusendtype = -1;
short foundstatus = 0; //0 none found, 1 found one end, 2 found both ends
bool minusendstatus = true;
bool plusendstatus = true;
for(int midx = 0; midx<numMonomers; midx++){
if(foundstatus ==2)
break;
short m = ccyl->getCMonomer(midx)->activeSpeciesMinusEnd();
short p = ccyl->getCMonomer(midx)->activeSpeciesPlusEnd();
if(m != -1) {
foundstatus++;
minusendtype = m;
minusendmonomer = midx;
}
if(p != -1) {
plusendtype = p;
foundstatus++;
plusendmonomer = midx;
}
}
if(minusendtype == -1){
minusendstatus = false;
}
if(plusendtype == -1){
plusendstatus = false;
}
/*Cidx minus-end plus-end num-monomers*/
_outputFile <<cidx<<" "<<cylinderInfoData[cidx].filamentId<<" "
<<filamentType<<" "<<cylinderInfoData[cidx].positionOnFilament<<" "
<<cyl->getFirstBead()->getStableIndex()<<" "
<<cyl->getSecondBead()->getStableIndex()<<" "
<<minusendstatus<<" "<<plusendstatus<<" "<<minusendtype<<" "
<<plusendtype<<" "<<minusendmonomer<<" "<<plusendmonomer<<" "
<<(plusendmonomer-minusendmonomer)+1<<" "
<<cyl->getMCylinder()->getEqLength()<<endl;
}
_outputFile <<endl;
//Filament Data
_outputFile <<"FILAMENT DATA: FILID FTYPE CYLIDvec"<<endl;
for(auto fil : Filament::getFilaments()){
_outputFile <<fil->getId()<<" "<<fil->getType()<<" ";
for(auto cyl :fil->getCylinderVector()){
_outputFile << cyl->getStableIndex()<<" ";
}
_outputFile << endl;
}
_outputFile <<endl;
//Linker Data
_outputFile <<"LINKER DATA: LINKERID LINKERTYPE CYL1_IDX CYL2_IDX POS1 POS2 "
"EQLEN DIFFUSINGSPECIESNAME"<<endl;
for(auto l :Linker::getLinkers()){
Cylinder* cyl1 = l->getFirstCylinder();
Cylinder* cyl2 = l->getSecondCylinder();
float pos1 = l->getFirstPosition()*SysParams::Geometry()
.cylinderNumMon[cyl1->getType()];
float pos2 = l->getSecondPosition()*SysParams::Geometry()
.cylinderNumMon[cyl2->getType()];
_outputFile <<l->getId()<<" "<<l->getType()<<" "<<cyl1->getStableIndex()<<" "
<<cyl2->getStableIndex()<<" "<<pos1<<" "<<pos2<<" "
<<l->getMLinker()->getEqLength()<<" "<<l->getCLinker()
->getDiffusingSpecies()->getName()<<endl;
}
_outputFile <<endl;
//MOTOR Data
_outputFile <<"MOTOR DATA: MOTORID MOTORTYPE CYL1_IDX CYL2_IDX POS1 POS2 EQLEN "
"DIFFUSINGSPECIESNAME NUMHEADS NUMBOUNDHEADS"<<endl;
int counter = 0;
auto individualenergiesvec = MotorGhostInteractions::individualenergies;
auto tpdistvec = MotorGhostInteractions::tpdistvec;
auto eqlvec = MotorGhostInteractions::eqlvec;
auto kstrvec = MotorGhostInteractions::kstrvec;
for(auto l :MotorGhost::getMotorGhosts()){
Cylinder* cyl1 = l->getFirstCylinder();
Cylinder* cyl2 = l->getSecondCylinder();
float pos1 = l->getFirstPosition()*SysParams::Geometry()
.cylinderNumMon[cyl1->getType()];
float pos2 = l->getSecondPosition()*SysParams::Geometry()
.cylinderNumMon[cyl2->getType()];
_outputFile <<l->getId()<<" "<<l->getType()<<" "<<cyl1->getStableIndex()<<" "
<<cyl2->getStableIndex()<<" "<<pos1<<" "<<pos2<<" "
<<l->getMMotorGhost()->getEqLength()<<" "<<l->getCMotorGhost()
->getDiffusingSpecies()->getName()<<" "<<l->getNumHeads()<<" "
<<l->getnumBoundHeads()<<endl;
counter++;
}
_outputFile <<endl;
//Brancher Data
_outputFile <<"BRANCHING DATA: BRANCHID BRANCHTYPE CYL1_IDX CYL2_IDX POS1 EQLEN "
"DIFFUSINGBRNACHSPECIESNAME DIFFUSINGACTINSPECIESNAME"<<endl;
for(auto l :BranchingPoint::getBranchingPoints()){
Cylinder* cyl1 = l->getFirstCylinder();
Cylinder* cyl2 = l->getSecondCylinder();
float pos1 = l->getPosition()*SysParams::Geometry()
.cylinderNumMon[cyl1->getType()];
_outputFile <<l->getId()<<" "<<l->getType()<<" "<<cyl1->getStableIndex()<<" "
<<cyl2->getStableIndex()<<" "<<pos1<<" "
<<l->getMBranchingPoint()->getEqLength()<<" "<<l->getCBranchingPoint()
->getDiffusingBranchSpecies()->getName()<<" "<<l->getdiffusingactinspeciesname()<<endl;
}
_outputFile <<endl;
//Compartment Data
_outputFile <<"COMPARTMENT DATA: CMPID DIFFUSINGSPECIES "
"COPYNUM"<<endl;
for(auto cmp:_subSystem->getCompartmentGrid()->getCompartments()){
_outputFile <<cmp->getId()<<" ";
for(auto sd : _chemData.speciesDiffusing) {
string name = get<0>(sd);
auto s = cmp->findSpeciesByName(name);
auto copyNum = s->getN();
_outputFile <<name<<" "<<copyNum<<" ";
}
_outputFile <<endl;
}
_outputFile <<endl;
//BulkSpecies
_outputFile <<"BULKSPECIES: BULKSPECIES COPYNUM"<<endl;
auto cmp = _subSystem->getCompartmentGrid()->getCompartments()[0];
for(auto sb : _chemData.speciesBulk) {
string name = get<0>(sb);
auto s = cmp->findSpeciesByName(name);
auto copyNum = s->getN();
_outputFile <<name<<" "<<copyNum<<" ";
}
_outputFile <<endl;
//TALLY
_outputFile<<"TALLY OF SPECIES: SPECIESNAME COPYNUM"<<endl;
// all diffusing and bulk species
for(auto sd : _chemData.speciesDiffusing) {
string name = get<0>(sd);
auto copyNum = _subSystem->getCompartmentGrid()->countDiffusingSpecies(name);
_outputFile << name << ":DIFFUSING " << copyNum << endl;
}
for(auto sb : _chemData.speciesBulk) {
string name = get<0>(sb);
auto copyNum = _subSystem->getCompartmentGrid()->countBulkSpecies(name);
_outputFile << name << ":BULK " << copyNum << endl;
}
for(int filType = 0; filType < SysParams::Chemistry().numFilaments; filType++) {
for(auto sf : _chemData.speciesFilament[filType]) {
auto copyNum = Filament::countSpecies(filType, sf);
_outputFile << sf << ":FILAMENT " << copyNum << endl;
}
for(auto sp : _chemData.speciesPlusEnd[filType]) {
auto copyNum = Filament::countSpecies(filType, sp);
_outputFile << sp << ":PLUSEND " << copyNum << endl;
}
for(auto sm : _chemData.speciesMinusEnd[filType]) {
auto copyNum = Filament::countSpecies(filType, sm);
_outputFile << sm << ":MINUSEND " << copyNum << endl;
}
for(auto sl : _chemData.speciesLinker[filType]) {
auto copyNum = Linker::countSpecies(sl);
_outputFile << sl << ":LINKER " << copyNum << endl;
}
for(auto sm : _chemData.speciesMotor[filType]) {
auto copyNum = MotorGhost::countSpecies(sm);
_outputFile << sm << ":MOTOR " << copyNum << endl;
}
for(auto sb : _chemData.speciesBrancher[filType]) {
auto copyNum = BranchingPoint::countSpecies(sb);
_outputFile << sb << ":BRANCHER " << copyNum << endl;
}
}
_outputFile <<endl;
_outputFile <<"ENERGYDATA "<< endl;
auto minresult = _subSystem->prevMinResult.energiesAfter;
for(auto eachenergy: minresult.individual){
_outputFile <<eachenergy.name<<" "<<eachenergy.energy*kT<<endl;
}
_outputFile <<endl;
_outputFile <<"MINUSENDPOLYMERIZATIONREACTIONS "<< endl;
for(auto fil:Filament::getFilaments()){
auto cyl = fil->getCylinderVector().front(); //get Minus Ends
for(auto &it:cyl->getCCylinder()->getInternalReactions()){
if(it->getReactionType() ==ReactionType::POLYMERIZATIONMINUSEND &&
!(it->isPassivated()) && it->computePropensity() > 0){
_outputFile<<"Fil "<<cyl->getFilID()<<" Cyl "<<cyl->getStableIndex()
<<" RATEMULFACTORS ";
for(auto fac:it->_ratemulfactors)
_outputFile<<fac<<" ";
_outputFile<<endl;
auto coord = cyl->getCompartment()->coordinates();
std::cout.precision(10);
_outputFile << "RNodeNRM: ptr=" << this <<", tau=" <<
static_cast<RNodeNRM*>(it->getRnode())->getTau() <<
// cout << "tau=" << getTau() <<
", a=" << static_cast<RNodeNRM*>(it->getRnode())->getPropensity()
<<" in Compartment "<<coord[0]<<" "<<coord[1]<<" "<<coord[2]<<
", points to Reaction:\n";
//Print the reaction
it->printToStream(_outputFile);
}
}
}
}
void HessianMatrix::print(int snapshot){
_outputFile.precision(10);
vector<vector<vector<floatingpoint > > > hVec = _ffm->hessianVector;
vector<floatingpoint> tauVector = _ffm-> tauVector;
// Outputs a sparse representation of the Hessian matrix, where only elements with appreciable size (>0.00001) are
// output along with their indices. Currently this outputs for each minimization, however to reduce the file size this could be changed.
if(counter % SysParams::Mechanics().hessSkip == 0){
int k = 0;
vector<vector<floatingpoint > > hMat = hVec[k];
int total_DOF = hMat.size();
vector<tuple<int, int, floatingpoint>> elements;
for(auto i = 0; i < total_DOF; i++){
for(auto j = 0; j < total_DOF; j++){
if(std::abs(hMat[i][j]) > 0.00001){
elements.push_back(std::make_tuple(i,j,hMat[i][j]));
}
}
}
_outputFile << tauVector[k] << " "<< total_DOF<< " " << elements.size()<<endl;
for(auto i = 0; i < elements.size(); i++){
tuple<int, int, floatingpoint> element = elements[i];
_outputFile<< get<0>(element) << " "<< get<1>(element)<<" "<< get<2>(element)<<endl;
}
_outputFile<<endl;
_ffm->clearHessian(0);
};
counter += 1;
// This clears the vectors storing the matrices to reduce the amount of memory needed.
}
void HessianSpectra::print(int snapshot){
_outputFile.precision(10);
vector<Eigen::VectorXcd > evaluesVector = _ffm-> evaluesVector;
vector<Eigen::VectorXcd > IPRIVector = _ffm-> IPRIVector;
vector<Eigen::VectorXcd > IPRIIVector = _ffm-> IPRIIVector;
vector<floatingpoint> tauVector = _ffm-> tauVector;
// Outputs the eigenvalues obtained from each Hessian matrix
for(auto k = 0; k < evaluesVector.size(); k++){
_outputFile <<tauVector[k] << " "<< evaluesVector[k].size()<< endl;
for(auto i = 0; i< evaluesVector[k].size(); i++){
_outputFile<<evaluesVector[k].real()[i]<< " "<<IPRIVector[k].real()[i]<< " "<<IPRIIVector[k].real()[i]<<endl;
}
_outputFile<<endl;
};
// This clears the vectors storing the matrices to reduce the amount of memory needed.
_ffm->clearHessian(1);
if(!SysParams::Mechanics().hessMatrixPrintBool){
_ffm->clearHessian(0);
};
}
void Projections::print(int snapshot){
_outputFile.precision(10);
vector<floatingpoint> tauVector = _ffm-> tauVector;
vector<Eigen::VectorXcd > projectionsVector = _ffm -> projectionsVector;
// Outputs the eigenvalues obtained from each Hessian matrix
for(auto k = 0; k < projectionsVector.size(); k++){
_outputFile <<tauVector[k] << " "<< projectionsVector[k].size()<< endl;
for(auto i = 0; i< projectionsVector[k].size(); i++){
_outputFile<<projectionsVector[k].real()[i]<< endl;
}
_outputFile<<endl;
};
// This clears the vectors storing the matrices to reduce the amount of memory needed.
_ffm->clearHessian(2);
}
void CylinderEnergies::print(int snapshot){
CylinderVolumeFF* cvFF = dynamic_cast<CylinderVolumeFF*>(_ffm->_forceFields.at(4));
vector<tuple<floatingpoint, int, vector<tuple<floatingpoint*,floatingpoint*,floatingpoint*,floatingpoint*, floatingpoint>>>> cylEnergies = cvFF->_cylinderVolInteractionVector.at(0)->getCylEnergies();
// need to change so it doesn't include every energy calculation, only before and after
for(auto i = 0; i < cylEnergies.size(); i ++){
tuple<floatingpoint, int, vector<tuple<floatingpoint*,floatingpoint*,floatingpoint*,floatingpoint*, floatingpoint>>> tempVec = cylEnergies[i];
_outputFile<< get<0>(tempVec) << " "<< get<1>(tempVec) <<endl;
vector<tuple<floatingpoint*,floatingpoint*,floatingpoint*,floatingpoint*, floatingpoint>> dataVec = get<2>(tempVec);
for(auto j = 0; j < dataVec.size(); j++){
floatingpoint* cyl1 = get<0>(dataVec[j]);
floatingpoint* cyl2 = get<1>(dataVec[j]);
floatingpoint* cyl3 = get<2>(dataVec[j]);
floatingpoint* cyl4 = get<3>(dataVec[j]);
floatingpoint energy = get<4>(dataVec[j]);
floatingpoint meanx = (*cyl1 + *cyl2 + *cyl3 + *cyl4) / 4;
floatingpoint meany = (*(cyl1+1) + *(cyl2+1) + *(cyl3+1) + *(cyl4+1)) / 4;
floatingpoint meanz = (*(cyl1+2) + *(cyl2+2) + *(cyl3+2) + *(cyl4+2)) / 4;
_outputFile<<meanx<< " "<<meany<< " "<<meanz<< " "<<energy<<endl;
/*_outputFile<<*(cyl1)<< " "<<*(cyl1 + 1)<< " "<<*(cyl1 + 2)<< " "
<<*(cyl2)<< " "<<*(cyl2 + 1)<< " "<<*(cyl2 + 2)<< " "
<<*(cyl3)<< " "<<*(cyl3 + 1)<< " "<<*(cyl3 + 2)<< " "
<<*(cyl4)<< " "<<*(cyl4 + 1)<< " "<<*(cyl4 + 2)<< " "<< energy<<endl;*/
}
}
if(cylEnergies.size()>0){
_outputFile<<endl;}
cvFF->_cylinderVolInteractionVector.at(0)->clearCylEnergies();
}
void RockingSnapshot::savePositions(){
for(auto &filament : Filament::getFilaments()) {
//print coordinates
for (auto cylinder : filament->getCylinderVector()){
savedPositions.push_back(cylinder->getFirstBead()->coordinate()[0]);
savedPositions.push_back(cylinder->getFirstBead()->coordinate()[1]);
savedPositions.push_back(cylinder->getFirstBead()->coordinate()[2]);
}
savedPositions.push_back(filament->getCylinderVector().back()->getSecondBead()->coordinate()[0]);
savedPositions.push_back(filament->getCylinderVector().back()->getSecondBead()->coordinate()[1]);
savedPositions.push_back(filament->getCylinderVector().back()->getSecondBead()->coordinate()[2]);
}
}
void RockingSnapshot::resetPositions(){
for(auto &filament : Filament::getFilaments()) {
//print coordinates
for (auto cylinder : filament->getCylinderVector()){
cylinder->getFirstBead()->coordinate()[0] = savedPositions.front();
savedPositions.pop_front();
cylinder->getFirstBead()->coordinate()[1] = savedPositions.front();
savedPositions.pop_front();
cylinder->getFirstBead()->coordinate()[2] = savedPositions.front();
savedPositions.pop_front();
}
filament->getCylinderVector().back()->getSecondBead()->coordinate()[0] = savedPositions.front();
savedPositions.pop_front();
filament->getCylinderVector().back()->getSecondBead()->coordinate()[1] = savedPositions.front();
savedPositions.pop_front();
filament->getCylinderVector().back()->getSecondBead()->coordinate()[2] = savedPositions.front();
savedPositions.pop_front();
}
}
void RockingSnapshot::print(int snapshot) {
_outputFile.precision(10);
int numT = 100;
float omega = 3.14159;
float delT = 2*3.14159 / numT;
float A = 15;
Eigen::VectorXd keeperEigenVector = _ffm->evectors.col(k).real();;
for(auto t = 0; t < numT ; t++){
float alpha = A * sin(omega * t * delT);
// print first line (snapshot number, time, number of filaments,
// linkers, motors, branchers, bubbles)
_outputFile << snapshot << " " << tau() << " " <<
Filament::numFilaments() << " " <<
Linker::numLinkers() << " " <<
MotorGhost::numMotorGhosts() << " " <<
BranchingPoint::numBranchingPoints() << " " <<
Bubble::numBubbles() << endl;
for(auto &filament : Filament::getFilaments()) {
//print first line (Filament ID, type, length, left_delta, right_delta)
_outputFile << "FILAMENT " << filament->getId() << " " <<
filament->getType() << " " <<
filament->getCylinderVector().size() + 1 << " " <<
filament->getDeltaMinusEnd() << " " << filament->getDeltaPlusEnd() << endl;
//print coordinates
for (auto cylinder : filament->getCylinderVector()){
// TODO: need to rewrite this part if bead coordinates are not all independent variables.
int idx = cylinder->getFirstBead()->getIndex() * 3;
floatingpoint delx1 = alpha*keeperEigenVector(idx);
floatingpoint delx2 = alpha*keeperEigenVector(idx+1);
floatingpoint delx3 = alpha*keeperEigenVector(idx+2);
cylinder->getFirstBead()->coordinate()[0]+=delx1;
cylinder->getFirstBead()->coordinate()[1]+=delx2;
cylinder->getFirstBead()->coordinate()[2]+=delx3;
auto x = cylinder->getFirstBead()->coordinate();
_outputFile<<x[0] <<" "<<x[1] <<" "<<x[2] <<" ";
}
//print last bead coord]
// TODO: need to rewrite this part if bead coordinates are not all independent variables.
int idx = filament->getCylinderVector().back()->getSecondBead()->getIndex() * 3;
floatingpoint delx1 = alpha*keeperEigenVector(idx);
floatingpoint delx2 = alpha*keeperEigenVector(idx+1);
floatingpoint delx3 = alpha*keeperEigenVector(idx+2);
filament->getCylinderVector().back()->getSecondBead()->coordinate()[0]+=delx1;
filament->getCylinderVector().back()->getSecondBead()->coordinate()[1]+=delx2;
filament->getCylinderVector().back()->getSecondBead()->coordinate()[2]+=delx3;
auto x = filament->getCylinderVector().back()->getSecondBead()->coordinate();
//_outputFile<<x[0] + delx1 <<" "<<x[1] + delx2 <<" "<<x[2] + delx3 <<" ";
_outputFile<<x[0] <<" "<<x[1] <<" "<<x[2] <<" ";
_outputFile << endl;
}
for(auto &linker : Linker::getLinkers()) {
//print first line
_outputFile << "LINKER " << linker->getId()<< " " <<
linker->getType() << endl;
//print coordinates
auto x =
midPointCoordinate(linker->getFirstCylinder()->getFirstBead()->vcoordinate(),
linker->getFirstCylinder()->getSecondBead()->vcoordinate(),
linker->getFirstPosition());
_outputFile<<x[0]<<" "<<x[1]<<" "<<x[2] << " ";
x = midPointCoordinate(linker->getSecondCylinder()->getFirstBead()->vcoordinate(),
linker->getSecondCylinder()->getSecondBead()->vcoordinate(),
linker->getSecondPosition());
_outputFile<<x[0]<<" "<<x[1]<<" "<<x[2];
_outputFile << endl;
}
for(auto &motor : MotorGhost::getMotorGhosts()) {
//print first line
//also contains a Bound(1) or unbound(0) qualifier
_outputFile << "MOTOR " << motor->getId() << " " << motor->getType() << " " << 1 << endl;
//print coordinates
auto x =
midPointCoordinate(motor->getFirstCylinder()->getFirstBead()->vcoordinate(),
motor->getFirstCylinder()->getSecondBead()->vcoordinate(),
motor->getFirstPosition());
_outputFile<<x[0]<<" "<<x[1]<<" "<<x[2] << " ";
x = midPointCoordinate(motor->getSecondCylinder()->getFirstBead()->vcoordinate(),
motor->getSecondCylinder()->getSecondBead()->vcoordinate(),
motor->getSecondPosition());
_outputFile<<x[0]<<" "<<x[1]<<" "<<x[2];
_outputFile << endl;
}
for(auto &branch : BranchingPoint::getBranchingPoints()) {
//print first line
_outputFile << "BRANCHER " << branch->getId() << " " <<
branch->getType() << endl;
//print coordinates
auto x = branch->coordinate;
_outputFile<<x[0]<<" "<<x[1]<<" "<<x[2] << endl;
}
for(auto &bubble : Bubble::getBubbles()) {
//print first line
_outputFile << "BUBBLE " << bubble->getId() << " " <<
bubble->getType() << endl;
//print coordinates
auto x = bubble->coordinate;
_outputFile<<x[0]<<" "<<x[1]<<" "<<x[2] << endl;
}
_outputFile <<endl;
};
}
|
{"hexsha": "1552f6d427b30c8d9925889bd09fc1e2cfdb1c8a", "size": 64266, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/Output.cpp", "max_stars_repo_name": "allen-cell-animated/medyan", "max_stars_repo_head_hexsha": "0b5ef64fb338c3961673361e5632980617937ee6", "max_stars_repo_licenses": ["BSD-4-Clause-UC"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/Output.cpp", "max_issues_repo_name": "allen-cell-animated/medyan", "max_issues_repo_head_hexsha": "0b5ef64fb338c3961673361e5632980617937ee6", "max_issues_repo_licenses": ["BSD-4-Clause-UC"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Output.cpp", "max_forks_repo_name": "allen-cell-animated/medyan", "max_forks_repo_head_hexsha": "0b5ef64fb338c3961673361e5632980617937ee6", "max_forks_repo_licenses": ["BSD-4-Clause-UC"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.5330467491, "max_line_length": 203, "alphanum_fraction": 0.5512557184, "num_tokens": 16738}
|
#pragma once
#include <boost/filesystem.hpp>
#include "../kernel/string/string.h"
///////////////////////////////////////////////////////////////////////////////
/// Content addressable storage
///////////////////////////////////////////////////////////////////////////////
namespace ork::file {
//////////////////////////////////////////////////////////////////////////////
inline std::string generateContentTempPath(uint64_t key, std::string ext) {
using namespace boost::filesystem;
std::string temp_dir;
temp_dir = getenv("OBT_STAGE");
temp_dir += "/tempdir";
if (false == exists(temp_dir)) {
printf("Making temp_dir folder<%s>\n", temp_dir.c_str());
create_directory(temp_dir);
}
auto temp_path = temp_dir + "/" + FormatString("%zx.%s", key, ext.c_str());
return temp_path;
}
} // namespace ork::file
|
{"hexsha": "7d8c71c40bcff30674a5db6fd9fce2c4d64626f6", "size": 832, "ext": "inl", "lang": "C++", "max_stars_repo_path": "ork.core/inc/ork/file/cas.inl", "max_stars_repo_name": "tweakoz/orkid", "max_stars_repo_head_hexsha": "e3f78dfb3375853fd512a9d0828b009075a18345", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 25.0, "max_stars_repo_stars_event_min_datetime": "2015-02-21T04:21:21.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-20T05:19:27.000Z", "max_issues_repo_path": "ork.core/inc/ork/file/cas.inl", "max_issues_repo_name": "tweakoz/orkid", "max_issues_repo_head_hexsha": "e3f78dfb3375853fd512a9d0828b009075a18345", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 113.0, "max_issues_repo_issues_event_min_datetime": "2019-08-23T04:52:14.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-13T04:04:11.000Z", "max_forks_repo_path": "ork.core/inc/ork/file/cas.inl", "max_forks_repo_name": "tweakoz/orkid", "max_forks_repo_head_hexsha": "e3f78dfb3375853fd512a9d0828b009075a18345", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2017-02-20T18:17:55.000Z", "max_forks_repo_forks_event_max_datetime": "2020-06-28T03:47:55.000Z", "avg_line_length": 34.6666666667, "max_line_length": 79, "alphanum_fraction": 0.4939903846, "num_tokens": 158}
|
abstract type AbstractBoolDomain <: AbstractDomain end
"""
struct BoolDomain <: AbstractDomain
Boolean domain, uses a IntDomain in it. (true is 1 and false is 0)
"""
struct BoolDomain <: AbstractBoolDomain
inner::IntDomain
function BoolDomain(trailer::Trailer)
return new(IntDomain(trailer, 2, -1))
end
end
"""
reset_domain!(dom::BoolDomain)
Used in `reset_model!`.
"""
reset_domain!(dom::BoolDomain) = reset_domain!(dom.inner)
function Base.show(io::IO, dom::BoolDomain)
print(io, "[", join(dom, " "), "]")
end
function Base.show(io::IO, ::MIME"text/plain", dom::BoolDomain)
print(io, typeof(dom), ": [", join(dom, " "), "]")
end
"""
isempty(dom::BoolDomain)
Return `true` iff `dom` is an empty set. Done in constant time.
"""
Base.isempty(dom::BoolDomain) = Base.isempty(dom.inner)
"""
length(dom::BoolDomain)
Return the size of `dom`. Done in constant time.
"""
Base.length(dom::SeaPearl.BoolDomain) = Base.length(dom.inner)
"""
Base.in(value::Int, dom::BoolDomain)
Check if an integer is in the domain. Done in constant time.
"""
function Base.in(value::Bool, dom::BoolDomain)
intValue = convert(Int, value)
return Base.in(intValue, dom.inner)
end
"""
remove!(dom::BoolDomain, value::Int)
Remove `value` from `dom`. Done in constant time.
"""
function remove!(dom::BoolDomain, value::Bool)
if !(value in dom)
return Bool[]
end
intValue = convert(Int, value)
remove!(dom.inner, intValue)
return [value]
end
"""
removeAll!(dom::BoolDomain)
Remove every value from `dom`. Return the removed values. Done in constant time.
"""
removeAll!(dom::BoolDomain) = convert.(Bool, removeAll!(dom.inner))
"""
assign!(dom::BoolDomain, value::Int)
Remove everything from the domain but `value`. Return the removed values. Return the pruned values.
Done in *constant* time.
"""
function assign!(dom::BoolDomain, value::Bool)
@assert value in dom
return convert.(Bool, assign!(dom.inner, convert(Int, value)))
end
"""
Base.iterate(dom::BoolDomain, state=1)
Iterate over the domain in an efficient way. The order may not be consistent.
WARNING: Do **NOT** update the domain you are iterating on.
"""
function Base.iterate(dom::BoolDomain, state=1)
returned = iterate(dom.inner, state)
if isnothing(returned)
return nothing
end
value, newState = returned
return convert(Bool, value), newState
end
|
{"hexsha": "bcef5df773a7f0322d15b686162609329fbc7f27", "size": 2559, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/CP/variables/BoolDomain.jl", "max_stars_repo_name": "pitmonticone/SeaPearl.jl", "max_stars_repo_head_hexsha": "0c0ca5ec5cce81515acd202ea2d87c985c0c3fea", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 44, "max_stars_repo_stars_event_min_datetime": "2021-04-20T16:29:52.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T07:17:03.000Z", "max_issues_repo_path": "src/CP/variables/BoolDomain.jl", "max_issues_repo_name": "pitmonticone/SeaPearl.jl", "max_issues_repo_head_hexsha": "0c0ca5ec5cce81515acd202ea2d87c985c0c3fea", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 65, "max_issues_repo_issues_event_min_datetime": "2021-04-23T17:20:56.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-22T23:42:24.000Z", "max_forks_repo_path": "src/CP/variables/BoolDomain.jl", "max_forks_repo_name": "pitmonticone/SeaPearl.jl", "max_forks_repo_head_hexsha": "0c0ca5ec5cce81515acd202ea2d87c985c0c3fea", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2021-05-10T23:32:49.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-15T02:44:34.000Z", "avg_line_length": 23.4770642202, "max_line_length": 100, "alphanum_fraction": 0.6486908949, "num_tokens": 622}
|
# various analytic mass profiles: Hernquist, NFW, Plummer, Isothermal, Miyamoto-Nagai (for disks)
import numpy as np
import astropy.units as u
from astropy import constants
from .cosmo_tools import *
G = constants.G.to(u.kpc * u.km**2. / u.Msun/ u.s**2.)
class NFW:
def __init__(self, Mvir, r, cvir):
"""
Inputs: Mvir (solar mass)
r: radius (kpc)
c: r_vir/r_scale (dimensionless)
"""
self.Mvir = Mvir
self.r = r
self.cvir = cvir
self.rs = r_vir(0.3, 0.7, Mvir)/cvir
self.x = r/self.rs
def density(self):
rhos = self.Mvir/ (4*np.pi*f(self.cvir)*self.rs**3)
return rhos/(self.x*(1+self.x)**2)
def mass(self):
return self.Mvir*f(self.x)/f(self.cvir)
def potential(self):
phi = -G*self.Mvir/f(self.cvir) *np.log(1+self.x)/self.r
return phi
def v_esc(self):
phi = self.potential()
return np.sqrt(-2*phi)
def v_rot(self):
m = self.mass()
return np.sqrt(G*m/self.r)
def acc(self, position, i):
x,y,z = position
rr = np.sqrt(x**2. + y**2. + z**2.)
return -G*self.Mvir*f(self.x)*i/(f(self.cvir)*self.rr**3.)
class Isothermal:
def __init__(self, r, vc):
"""
Inputs: r: radius (kpc)
vc: circular velocity at a given position (i.e. solar circle) [km/s]
"""
self.r = r
self.vc = vc
def potential(self):
return - self.vc**2. * np.log(self.r)
def density(self):
return (self.vc)**2./ (4.*np.pi*G*self.r**2.)
def mass(self):
return (self.vc)**2.*self.r/G
def v_esc(self):
phi = self.potential()
return np.sqrt(-2.*phi)
def acc(self, position, i):
r = self.r
vc = self.vc
x,y,z = position
rr = np.sqrt(x**2 + y**2 + z**2)
acc = i * vc**2 /rr**2
return acc
class MN:
def __init__(self, Mdisk, a, b, r, z):
"""
Inputs: Mass of disk (solar mass)
a: disk scale length (kpc)
b: disk scale height (kpc)
r: radius (kpc)
z: galactocentric height (kpc)
"""
self.Mdisk = Mdisk
self.a = a
self.b = b
self.z = z
self.B = np.sqrt(self.z**2 + self.b**2)
self.r = r
def potential(self):
Mdisk = self.Mdisk
r = self.r
a = self.a
B = self.B
return -G*Mdisk / np.sqrt(r**2 + (a**2 + B**2)**2)
# def mass(self):
# b = self.b
# a = self.a
# r = self.r
# z = self.z
# Mdisk = self.Mdisk
# K = a + np.sqrt(z**2 + b**2)
# num = r**2.
# den = (r**2 + K**2)**(1.5)
# t1 = num/den
# num = z * K
# den = np.sqrt(z**2 + b**2) * (K**2 + r**2)**(1.5)
# t2 = num/den
# return Mdisk * ((r *t1) + (t2 * z))
def v_rot(self):
# taken from Bullock 05 paper
b = self.b
a = self.a
r = self.r
z = self.z
Mdisk = self.Mdisk
K = a + np.sqrt(z**2 + b**2)
num = G * Mdisk * r**2
den = (r**2 + K**2)**(1.5)
t1 = num/den
num = G * Mdisk * z**2 * K
den = np.sqrt(z**2 + b**2) * (K**2 + r**2)**(1.5)
t2 = num/den
return np.sqrt(t1+t2)
def density(self):
Mdisk = self.Mdisk
r = self.r
a = self.a
B = self.B
b = self.b
k = b**2 * Mdisk/ (4*np.pi)
num = a*r**2 + ((a+3*B)*(a+B)**2)
den = (r**2 + (a+B)**2)**(2.5) * B**3
return k * num / den
def v_esc(self):
phi = self.potential()
return np.sqrt(-2*phi)
def acc(self, position, i):
G = self.G
bdisk = self.b
adisk = self.a
rdisk = self.r
zdisk = self.z
Mdisk = self.Mdisk
x,y,z = position
rr = np.sqrt(x**2 + y**2 + z**2)
if i == 'x' or 'y':
acc = -G*Mdisk*i/( (rdisk**2.) + (adisk + np.sqrt(z**2. + bdisk**2.))**2.)**(1.5)
if i == 'z':
acc = -G*Mdisk*i*(adisk+np.sqrt(z**2.+bdisk**2.))/(((adisk+np.sqrt(z**2. + bdisk**2.))**2. + rdisk**2.)**(1.5) * np.sqrt(relz**2. + bdisk**2.))
return acc
class Hernquist:
def __init__(self, Mvir, r, a):
"""
Inputs: Mvir: total mass (solar mass)
a: Hernquist length scale (kpc)
r: radius (kpc)
"""
self.Mvir = Mvir
self.a = a
self.r = r
def density(self):
M = self.Mvir
r = self.r
a = self.a
return M*a / (2.*np.pi*r*(r+a)**3.)
def potential(self):
M = self.Mvir
a = self.a
r = self.r
return -G*M /(r+a)
def mass(self):
M = self.Mvir
r = self.r
a = self.a
return M*r**2. / (r+a)**2.
def v_esc(self):
phi = self.potential()
return np.sqrt(-2.*phi)
def v_rot(self):
M = self.mass()
r = self.r
return np.sqrt(G*M/r)
def acc(self, position, i):
M = self.Mvir
a = self.a
r = self.r
x,y,z = position
rr = np.sqrt(x**2 + y**2 + z**2.)
return -G*M*i/((rr**2. + a**2.) * rr)
class Plummer:
def __init__(self, Mtot, r, a):
"""
Inputs: Mtot: total mass (solar mass)
a: Plummer length scale (kpc)
r: radius (kpc)
"""
self.Mtot = Mtot
self.a = a
self.r = r
def density(self):
M = self.Mtot
a = self.a
r = self.r
return 3*M/(4*np.pi*a**3) * (1+(r/a)**2)**(-2.5)
def potential(self):
M = self.Mtot
a = self.a
r = self.r
return - G*M/ np.sqrt(r**2 + a**2)
def v_esc(self):
r = self.r
phi = self.potential()
return np.sqrt(-2*phi)
def mass(self):
M = self.Mtot
a = self.a
r = self.r
mass_enc = M*r**3/ (r**2 + a**2)**(1.5)
return mass_enc
def v_rot(self):
r = self.r
M = self.mass()
return np.sqrt(G*M/r)
def acc(self, position, i):
M = self.Mtot
a = self.a
x,y,z = position
rr = np.sqrt(x**2 + y**2 + z**2)
return - (G * M * i)/(rr**2 + a**2)**(1.5)
|
{"hexsha": "558f9222afd8e7f80c149473abdf4b62988f80b9", "size": 6369, "ext": "py", "lang": "Python", "max_stars_repo_path": "jellyfish/profiles.py", "max_stars_repo_name": "ekta1224/jellyfish", "max_stars_repo_head_hexsha": "3271019434448b5916dcc920d640b81375b74c05", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "jellyfish/profiles.py", "max_issues_repo_name": "ekta1224/jellyfish", "max_issues_repo_head_hexsha": "3271019434448b5916dcc920d640b81375b74c05", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 17, "max_issues_repo_issues_event_min_datetime": "2017-11-09T22:12:27.000Z", "max_issues_repo_issues_event_max_datetime": "2018-03-21T16:32:50.000Z", "max_forks_repo_path": "jellyfish/profiles.py", "max_forks_repo_name": "ekta1224/jellyfish", "max_forks_repo_head_hexsha": "3271019434448b5916dcc920d640b81375b74c05", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.6860465116, "max_line_length": 156, "alphanum_fraction": 0.4479510127, "include": true, "reason": "import numpy,import astropy,from astropy", "num_tokens": 2080}
|
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
"""
This module contains miscellaneous utility functions for use in IDAES models.
"""
from enum import Enum
from pyomo.common.deprecation import deprecated, relocated_module_attribute
import pyomo.environ as pyo
from pyomo.common.config import ConfigBlock
import idaes.logger as idaeslog
from idaes.core.util.tags import svg_tag as svg_tag_new
_log = idaeslog.getLogger(__name__)
relocated_module_attribute(
"get_solver", "idaes.core.solvers.get_solver", version="2.0.0.alpha0"
)
relocated_module_attribute(
"VarLikeExpression",
"idaes.core.base.var_like_expression.VarLikeExpression",
version="2.0.0.alpha0",
)
# Author: Andrew Lee
def add_object_reference(self, local_name, remote_object):
"""
Method to create a reference in the local model to a remote Pyomo object.
This method should only be used where Pyomo Reference objects are not
suitable (such as for referencing scalar Pyomo objects where the None
index is undesirable).
Args:
local_name : name to use for local reference (str)
remote_object : object to make a reference to
Returns:
None
"""
try:
object.__setattr__(self, local_name, remote_object)
except AttributeError:
raise AttributeError(
"{} failed to construct reference to {} - remote "
"object does not exist.".format(self.name, remote_object)
)
# Author: Jaffer Ghouse
def extract_data(data_dict):
"""
General method that returns a rule to extract data from a python
dictionary. This method allows the param block to have a database for
a parameter but extract a subset of this data to initialize a Pyomo
param object.
"""
def _rule_initialize(m, *args):
if len(args) > 1:
return data_dict[args]
else:
return data_dict[args[0]]
return _rule_initialize
@deprecated(
"idaes.core.util.misc.TagReference will be removed in a future version",
version=1.12,
)
def TagReference(s, description=""):
"""
Create a Pyomo reference with an added description string attribute to
describe the reference. The intended use for these references is to create
a time-indexed reference to variables in a model corresponding to plant
measurment tags.
Args:
s: Pyomo time slice of a variable or expression
description (str): A description the measurment
Returns:
A Pyomo Reference object with an added doc attribute
"""
r = pyo.Reference(s)
r.description = description
return r
def copy_port_values(destination=None, source=None, arc=None, direction="forward"):
"""
Moved to idaes.core.util.initialization.propagate_state.
Leaving redirection function here for deprecation warning.
"""
_log.warning(
"DEPRECATED: copy_port_values has been deprecated. "
"The same functionality can be found in "
"idaes.core.util.initialization.propagate_state."
)
from idaes.core.util.initialization import propagate_state
propagate_state(
destination=destination, source=source, arc=arc, direction=direction
)
@deprecated(
"idaes.core.util.misc.svg_tag has moved to idaes.core.util.tags.svg_tag",
version=1.12,
)
def svg_tag(*args, **kwargs):
"""
Moved to idaes.core.util.tags.svg_tag
Leaving redirection function here for deprecation warning.
"""
return svg_tag_new(*args, **kwargs)
def set_param_from_config(b, param, config=None, index=None):
"""
Utility method to set parameter value from a config block. This allows for
converting units if required. This method directly sets the value of the
parameter.
This method supports three forms for defining the parameter value:
1. a 2-tuple of the form (value, units) where units are the units that
the value are defined in
2. a float where the float is assumed to be the value of the parameter
value in the base units of the property package
3. a Python Class which has a get_parameter_value method which will
return a 2-tuple of (value, units) based on a lookup of the parameter name
Args:
b - block on which parameter and config block are defined
param - name of parameter as str. Used to find param and config arg
units - units of param object (used if conversion required)
config - (optional) config block to get parameter data from. If
unset, assumes b.config.
index - (optional) used for pure component properties where a single
property may have multiple parameters associated with it.
Returns:
None
"""
if config is None:
try:
config = b.config
except AttributeError:
raise AttributeError(
"{} - set_param_from_config method was not provided with a "
"config argument, but no default Config block exists. Please "
"specify the Config block to use via the config argument.".format(
b.name
)
)
# Check that config is an instance of a Config Block
if not isinstance(config, ConfigBlock):
raise TypeError(
"{} - set_param_from_config - config argument provided is not an "
"instance of a Config Block.".format(b.name)
)
if index is None:
try:
param_obj = getattr(b, param)
except AttributeError:
raise AttributeError(
"{} - set_param_from_config method was provided with param "
"argument {}, but no attribute of that name exists.".format(
b.name, param
)
)
try:
p_data = config.parameter_data[param]
except (KeyError, AttributeError):
raise KeyError(
"{} - set_param_from_config method was provided with param "
"argument {}, but the config block does not contain a "
"value for this parameter.".format(b.name, param)
)
else:
try:
param_obj = getattr(b, param + "_" + index)
except AttributeError:
raise AttributeError(
"{} - set_param_from_config method was provided with param and"
" index arguments {} {}, but no attribute with that "
"combination ({}_{}) exists.".format(b.name, param, index, param, index)
)
try:
p_data = config.parameter_data[param][index]
except (KeyError, AttributeError):
raise KeyError(
"{} - set_param_from_config method was provided with param and"
" index arguments {} {}, but the config block does not contain"
" a value for this parameter and index.".format(b.name, param, index)
)
units = param_obj.get_units()
# Check to see if p_data is callable, and if so, try to call the
# get_parameter_value method to get 2-tuple
if hasattr(p_data, "get_parameter_value"):
p_data = p_data.get_parameter_value(b.local_name, param)
if isinstance(p_data, tuple):
# 11 Dec 2020 - There is currently a bug in Pyomo where trying to
# convert the units of a unitless quantity results in a TypeError.
# To avoid this, we check here for cases where both the parameter and
# user provided value are unitless and bypass unit conversion.
if (units is None or units is pyo.units.dimensionless) and (
p_data[1] is None or p_data[1] is pyo.units.dimensionless
):
param_obj.value = p_data[0]
else:
param_obj.value = pyo.units.convert_value(
p_data[0], from_units=p_data[1], to_units=units
)
else:
_log.debug(
"{} no units provided for parameter {} - assuming default "
"units".format(b.name, param)
)
param_obj.value = p_data
class StrEnum(str, Enum):
"""
Multiple inheritance string-Enum for representing Enums with string values
"""
def __str__(self):
return str(self.value)
|
{"hexsha": "fe04f8bcc7df98ea6bc088666d6ad4f6bcc0960b", "size": 8985, "ext": "py", "lang": "Python", "max_stars_repo_path": "idaes/core/util/misc.py", "max_stars_repo_name": "OOAmusat/idaes-pse", "max_stars_repo_head_hexsha": "ae7d3bb8e372bc32822dcdcb75e9fd96b78da539", "max_stars_repo_licenses": ["RSA-MD"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "idaes/core/util/misc.py", "max_issues_repo_name": "OOAmusat/idaes-pse", "max_issues_repo_head_hexsha": "ae7d3bb8e372bc32822dcdcb75e9fd96b78da539", "max_issues_repo_licenses": ["RSA-MD"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "idaes/core/util/misc.py", "max_forks_repo_name": "OOAmusat/idaes-pse", "max_forks_repo_head_hexsha": "ae7d3bb8e372bc32822dcdcb75e9fd96b78da539", "max_forks_repo_licenses": ["RSA-MD"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.374015748, "max_line_length": 88, "alphanum_fraction": 0.6440734558, "include": true, "reason": "import pyomo,from pyomo", "num_tokens": 1870}
|
import numpy as np
import scipy.spatial as spatial
def bilinear_interpolate(img, coords):
""" Interpolates over every image channel
http://en.wikipedia.org/wiki/Bilinear_interpolation
:param img: max 3 channel image
:param coords: 2 x _m_ array. 1st row = xcoords, 2nd row = ycoords
:returns: array of interpolated pixels with same shape as coords
"""
int_coords = np.int32(coords)
x0, y0 = int_coords
dx, dy = coords - int_coords
# 4 Neighour pixels
q11 = img[y0, x0]
q21 = img[y0, x0+1]
q12 = img[y0+1, x0]
q22 = img[y0+1, x0+1]
btm = q21.T * dx + q11.T * (1 - dx)
top = q22.T * dx + q12.T * (1 - dx)
inter_pixel = top * dy + btm * (1 - dy)
return inter_pixel.T
def grid_coordinates(points):
""" x,y grid coordinates within the ROI of supplied points
:param points: points to generate grid coordinates
:returns: array of (x, y) coordinates
"""
xmin = np.min(points[:, 0])
xmax = np.max(points[:, 0]) + 1
ymin = np.min(points[:, 1])
ymax = np.max(points[:, 1]) + 1
return np.asarray([(x, y) for y in range(ymin, ymax)
for x in range(xmin, xmax)], np.uint32)
def process_warp(src_img, result_img, tri_affines, dst_points, delaunay):
"""
Warp each triangle from the src_image only within the
ROI of the destination image (points in dst_points).
"""
roi_coords = grid_coordinates(dst_points)
# indices to vertices. -1 if pixel is not in any triangle
roi_tri_indices = delaunay.find_simplex(roi_coords)
for simplex_index in range(len(delaunay.simplices)):
coords = roi_coords[roi_tri_indices == simplex_index]
num_coords = len(coords)
out_coords = np.dot(tri_affines[simplex_index],
np.vstack((coords.T, np.ones(num_coords))))
x, y = coords.T
result_img[y, x] = bilinear_interpolate(src_img, out_coords)
return None
def triangular_affine_matrices(vertices, src_points, dest_points):
"""
Calculate the affine transformation matrix for each
triangle (x,y) vertex from dest_points to src_points
:param vertices: array of triplet indices to corners of triangle
:param src_points: array of [x, y] points to landmarks for source image
:param dest_points: array of [x, y] points to landmarks for destination image
:returns: 2 x 3 affine matrix transformation for a triangle
"""
ones = [1, 1, 1]
for tri_indices in vertices:
src_tri = np.vstack((src_points[tri_indices, :].T, ones))
dst_tri = np.vstack((dest_points[tri_indices, :].T, ones))
mat = np.dot(src_tri, np.linalg.inv(dst_tri))[:2, :]
yield mat
def warp_image(src_img, src_points, dest_points, dest_shape, dtype=np.uint8):
# Resultant image will not have an alpha channel
num_chans = 3
src_img = src_img[:, :, :3]
rows, cols = dest_shape[:2]
result_img = np.zeros((rows, cols, num_chans), dtype)
delaunay = spatial.Delaunay(dest_points)
tri_affines = np.asarray(list(triangular_affine_matrices(
delaunay.simplices, src_points, dest_points)))
process_warp(src_img, result_img, tri_affines, dest_points, delaunay)
return result_img
def test_local():
from functools import partial
import cv2
import scipy.misc
import locator
import aligner
from matplotlib import pyplot as plt
# Load source image
face_points_func = partial(locator.face_points, '../data')
base_path = '../females/Screenshot 2015-03-04 17.11.12.png'
src_path = '../females/BlDmB5QCYAAY8iw.jpg'
src_img = cv2.imread(src_path)
# Define control points for warps
src_points = face_points_func(src_path)
base_img = cv2.imread(base_path)
base_points = face_points_func(base_path)
size = (600, 500)
src_img, src_points = aligner.resize_align(src_img, src_points, size)
base_img, base_points = aligner.resize_align(base_img, base_points, size)
result_points = locator.weighted_average_points(src_points, base_points, 0.2)
# Perform transform
dst_img1 = warp_image(src_img, src_points, result_points, size)
dst_img2 = warp_image(base_img, base_points, result_points, size)
import blender
ave = blender.weighted_average(dst_img1, dst_img2, 0.6)
mask = blender.mask_from_points(size, result_points)
blended_img = blender.poisson_blend(dst_img1, dst_img2, mask)
plt.subplot(2, 2, 1)
plt.imshow(ave)
plt.subplot(2, 2, 2)
plt.imshow(dst_img1)
plt.subplot(2, 2, 3)
plt.imshow(dst_img2)
plt.subplot(2, 2, 4)
plt.imshow(blended_img)
plt.show()
if __name__ == "__main__":
test_local()
|
{"hexsha": "50b1a3151683891fe29f85b69660417956b904a6", "size": 4473, "ext": "py", "lang": "Python", "max_stars_repo_path": "face_morpher/facemorpher/warper.py", "max_stars_repo_name": "ivan-uskov/faces", "max_stars_repo_head_hexsha": "59a27c305888e8e000cb1549f8b06216449b1f05", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-09-16T00:02:48.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-16T00:02:48.000Z", "max_issues_repo_path": "facemorpher/warper.py", "max_issues_repo_name": "ImpactCrater/AutoFaceMorpher", "max_issues_repo_head_hexsha": "9955a02d9cec309ca0db8c2454f9466f7e1633c5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "facemorpher/warper.py", "max_forks_repo_name": "ImpactCrater/AutoFaceMorpher", "max_forks_repo_head_hexsha": "9955a02d9cec309ca0db8c2454f9466f7e1633c5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.95, "max_line_length": 79, "alphanum_fraction": 0.7091437514, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1292}
|
function marginal = marginal_nodes(engine, nodes, t, add_ev)
% MARGINAL_NODES Compute the marginal on the specified query nodes (hmm)
% marginal = marginal_nodes(engine, nodes, t, add_ev)
%
% 'nodes' must be a single node.
% t is the time slice.
if nargin < 3, t = 1; end
if nargin < 4, add_ev = 0; end
assert(length(nodes)==1)
ss = engine.slice_size;
i = nodes(1);
bigT = engine.one_slice_marginal(:,t);
dom = i + (t-1)*ss;
ns = engine.eff_node_sizes(:);
bigdom = 1:ss;
marginal.T = marg_table(bigT, bigdom + (t-1)*ss, ns(bigdom), dom, engine.maximize);
marginal.domain = dom;
marginal.mu = [];
marginal.Sigma = [];
if add_ev
marginal = add_ev_to_dmarginal(marginal, engine.evidence, engine.node_sizes);
end
|
{"author": "bayesnet", "repo": "bnt", "sha": "bebba5f437b4e1e29169f0f3669df59fb5392e62", "save_path": "github-repos/MATLAB/bayesnet-bnt", "path": "github-repos/MATLAB/bayesnet-bnt/bnt-bebba5f437b4e1e29169f0f3669df59fb5392e62/BNT/inference/dynamic/@hmm_inf_engine/marginal_nodes.m"}
|
from __future__ import print_function
from functools import reduce
import re
import numpy as np
from keras.preprocessing.sequence import pad_sequences
def tokenize(sent):
'''Return the tokens of a sentence including punctuation.
>>> tokenize('Bob dropped the apple. Where is the apple?')
['Bob', 'dropped', 'the', 'apple', '.', 'Where', 'is', 'the', 'apple', '?']
'''
return [x.strip() for x in re.split('(\W+)?', sent) if x.strip()]
def parse_stories(lines, only_supporting=False):
'''Parse stories provided in the bAbi tasks format
If only_supporting is true, only the sentences that support the answer are kept.
'''
data = []
story = []
for line in lines:
line = line.decode('utf-8').strip()
nid, line = line.split(' ', 1)
nid = int(nid)
if nid == 1:
story = []
if '\t' in line:
q, a, supporting = line.split('\t')
q = tokenize(q)
substory = None
if only_supporting:
# Only select the related substory
supporting = map(int, supporting.split())
substory = [story[i - 1] for i in supporting]
else:
# Provide all the substories
substory = [x for x in story if x]
data.append((substory, q, a))
story.append('')
else:
sent = tokenize(line)
story.append(sent)
return data
def get_stories(f, only_supporting=False, max_length=None):
'''Given a file name, read the file, retrieve the stories, and then convert the sentences into a single story.
If max_length is supplied, any stories longer than max_length tokens will be discarded.
'''
data = parse_stories(f.readlines(), only_supporting=only_supporting)
flatten = lambda data: reduce(lambda x, y: x + y, data)
data = [(flatten(story), q, answer) for story, q, answer in data if not max_length or len(flatten(story)) < max_length]
return data
def vectorize_stories(data, word_idx, story_maxlen, query_maxlen):
X = []
Xq = []
Y = []
for story, query, answer in data:
x = [word_idx[w] for w in story]
xq = [word_idx[w] for w in query]
y = np.zeros(len(word_idx) + 1) # let's not forget that index 0 is reserved
y[word_idx[answer]] = 1
X.append(x)
Xq.append(xq)
Y.append(y)
return pad_sequences(X, maxlen=story_maxlen), pad_sequences(Xq, maxlen=query_maxlen), np.array(Y)
QFILE = {1: 'tasks_1-20_v1-2/en-10k/qa1_single-supporting-fact_train.txt',
2: 'tasks_1-20_v1-2/en-10k/qa2_two-supporting-facts_train.txt',
3: 'tasks_1-20_v1-2/en-10k/qa3_three-supporting-facts_train.txt',
4: 'tasks_1-20_v1-2/en-10k/qa4_two-arg-relations_train.txt',
5: 'tasks_1-20_v1-2/en-10k/qa5_three-arg-relations_train.txt',
6: 'tasks_1-20_v1-2/en-10k/qa6_yes-no-questions_train.txt',
7: 'tasks_1-20_v1-2/en-10k/qa7_counting_train.txt',
8: 'tasks_1-20_v1-2/en-10k/qa8_lists-sets_train.txt',
9: 'tasks_1-20_v1-2/en-10k/qa9_simple-negation_train.txt',
10: 'tasks_1-20_v1-2/en-10k/qa10_indefinite-knowledge_train.txt',
11: 'tasks_1-20_v1-2/en-10k/qa11_basic-coreference_train.txt',
12: 'tasks_1-20_v1-2/en-10k/qa12_conjunction_train.txt',
13: 'tasks_1-20_v1-2/en-10k/qa13_compound-coreference_train.txt',
14: 'tasks_1-20_v1-2/en-10k/qa14_time-reasoning_train.txt',
15: 'tasks_1-20_v1-2/en-10k/qa15_basic-deduction_train.txt',
16: 'tasks_1-20_v1-2/en-10k/qa16_basic-induction_train.txt',
17: 'tasks_1-20_v1-2/en-10k/qa17_positional-reasoning_train.txt',
18: 'tasks_1-20_v1-2/en-10k/qa18_size-reasoning_train.txt',
19: 'tasks_1-20_v1-2/en-10k/qa19_path-finding_train.txt',
20: 'tasks_1-20_v1-2/en-10k/qa20_agents-motivations_train.txt'}
|
{"hexsha": "caec9aec834d34c2ad236bf5dbd9f557a2747098", "size": 3934, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/babi_sitter.py", "max_stars_repo_name": "jayanthkoushik/eve", "max_stars_repo_head_hexsha": "25d290361c21941f77bb9dd8150048132863184a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 81, "max_stars_repo_stars_event_min_datetime": "2016-11-07T13:59:54.000Z", "max_stars_repo_stars_event_max_datetime": "2018-04-20T20:02:34.000Z", "max_issues_repo_path": "src/babi_sitter.py", "max_issues_repo_name": "jayanthkoushik/eve", "max_issues_repo_head_hexsha": "25d290361c21941f77bb9dd8150048132863184a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2016-11-08T05:22:42.000Z", "max_issues_repo_issues_event_max_datetime": "2016-11-10T07:23:37.000Z", "max_forks_repo_path": "src/babi_sitter.py", "max_forks_repo_name": "jayanthkoushik/eve", "max_forks_repo_head_hexsha": "25d290361c21941f77bb9dd8150048132863184a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 20, "max_forks_repo_forks_event_min_datetime": "2016-11-05T15:24:34.000Z", "max_forks_repo_forks_event_max_datetime": "2018-12-18T13:44:14.000Z", "avg_line_length": 42.3010752688, "max_line_length": 123, "alphanum_fraction": 0.6321809863, "include": true, "reason": "import numpy", "num_tokens": 1187}
|
#!/usr/bin/env python3
import numpy as np
#import scipy.interpolate as spi
#from scipy.interpolate import griddata
from scipy.interpolate import NearestNDInterpolator
from scipy.interpolate import LinearNDInterpolator
import matplotlib.pyplot as plt
## if using plot_pcolor function as-is:
#params = {'text.latex.preamble': [r'\usepackage{newtxtext,newtxmath,siunitx}']}
#plt.rcParams.update(params)
import sys
sys.path.append('../..')
from srlife import receiver
if __name__ == "__main__":
"""
Units: stress in MPa, strain in mm/mm, time in hours, temperature in K
"""
## Setup the base receiver model:
period = 10 # Loading cycle period, hours
days = 1 # Number of cycles represented in the problem
panel_stiffness = "disconnect" # Panels are disconnected from one another
model = receiver.Receiver(period, days, panel_stiffness)
## Tube geometry:
ro_tube = 60/2. # mm
wt_tube = 1.2 # mm
## Tube discretization:
nr = 13
nt = 61
nz = 146
## Solar Central Receiver (scr) geometry:
height = 14500.0 # mm
width = 13500.0 # diameter of receiver in mm
r_scr = width / 2. # radius of receiver
c_scr = 2 * np.pi * r_scr # scr circumference on which tubes are placed
n_tubes = 12 # one tube per panel
## Load receiver spring equinox noon conditions (Daggett, CA):
## -> saved in a "DELSOL3-like" flattened cylindrical shape, with:
## -> [i, j] index-notation the same as numpy.meshgrid(..., indexing='ij')
## -> i is azimuth on receiver aperture counter-clockwise from south
## -> j is height up panel/tubes from bottom
pa = np.genfromtxt('azimuth.csv', delimiter=',')
pz = np.genfromtxt('height.csv', delimiter=',')*1e3 # convert m to mm
## Bulk sodium fluid temperature from lumped-parameter modelling:
fluid_temp = np.genfromtxt('fluid_temp.csv', delimiter=',')
# ## Incident flux map from Solstice:
# inc_flux = np.genfromtxt('inc_flux.csv', delimiter=',')*1e-6 # W/m^2 to W/mm^2
## Absorbed (net) flux at tube OD from lumped-parameter modelling:
net_flux = np.genfromtxt('net_flux.csv', delimiter=',')*1e-6 # W/m^2 to W/mm^2
## create copy of (surface) coordinates and move boundaries to limits of problem:
pa_interp = pa.copy()
pa_interp[0,:] = 0; pa_interp[-1,:] = 2*np.pi
pz_interp = pz.copy()
pz_interp[:,0] = 0; pz_interp[:,-1] = height
## Create mesh for interpolating flux and fluid temperatures at tube centroids:
a_tmp = np.linspace(0, 2*np.pi, n_tubes + 1)
a_tubes = (a_tmp[:-1] + a_tmp[1:]) / 2. # tubes around receiver circumference
# z_tmp = np.linspace(0, height, nz+1)
# z_tubes = (z_tmp[:-1] + z_tmp[1:]) / 2. # flux/temp values also at surfaces
z_tubes = np.linspace(0,height,nz)
ma, mz = np.meshgrid(a_tubes, z_tubes, indexing='ij')
## Sample bulk fluid temperatures at nearest panel/tube temperature:
fluid_temp_interp = NearestNDInterpolator(
list(zip(pa.ravel(), pz.ravel())),
fluid_temp.ravel()
)
## interpolate tube flux linearly between (surface) values:
flux_interp = LinearNDInterpolator(
list(zip(pa_interp.ravel(), pz_interp.ravel())),
net_flux.ravel()
)
# Periodic function used to set daily flux cycle (10 hours)
ramp = lambda t: np.interp(
t % period,
[0., 0.2, 1., 2., 3., 4., 5., 6., 7., 8., 9., 9.8, 10.],
[0.00, 0.71, 0.87, 0.95, 0.97, 0.99, 1.00,
0.99, 0.97, 0.95, 0.87, 0.71, 0.00]
)
# Periodic function used to set switch operation (10 hours)
onoff = lambda t: np.interp(
t % period,
[0., 0.2, 9.8, 10.],
[0., 1., 1., 0.]
)
## Time steps considered (days are equivalent to number of cycles)
times = np.zeros(1)
for i in range(days):
# startup
times = np.append(
times,
period*i + np.linspace(0, 0.2, 11)[1:]
)
# hold (linear)
times = np.append(
times,
period*i + np.linspace(0.2, 9.8, 25)[1:]
)
# # hold (logarithmic relaxation)
# times = np.append(
# times,
# period*i + np.logspace(np.log10(0.2), np.log10(9.8), 10)[1:]
# )
# shutdown
times = np.append(
times,
period*i + np.linspace(9.8, 10, 11)[1:]
)
## Tube circumferential flux component (cosine distribution):
cos_theta = lambda theta: np.maximum(0,np.cos(theta))
## Flux with time and location on receiver
flux_time = lambda t, theta, a, z: ramp(t) * cos_theta(theta) * flux_interp(a, z)
## ID fluid temperature histories for each tube
T_ref = 293.15
fluid_temp_time = lambda t, a, z: T_ref + \
(onoff(t) * (fluid_temp_interp(a, z)-T_ref))
## ID pressure history
p_max = 1.5 # MPa
pressure = lambda t: p_max * onoff(t)
## A mesh over the times and height (for the fluid temperatures)
time_h, z_h = np.meshgrid(
times, z_tubes, indexing='ij'
)
## A surface mesh over the outer surface (for the flux)
time_s, theta_s, z_s = np.meshgrid(
times, np.linspace(0,2*np.pi,nt+1)[:nt],
np.linspace(0,height,nz), indexing = 'ij'
)
## Add tube0 (hottest) to its own panel:
tube0 = receiver.Tube(ro_tube, wt_tube, height, nr, nt, nz, T0 = T_ref)
tube0.set_times(times)
tube0.set_bc(
receiver.ConvectiveBC(
ro_tube-wt_tube, height, nz, times, fluid_temp_time(time_h, a_tubes[0], z_h)
), "inner"
)
tube0.set_bc(
receiver.HeatFluxBC(
ro_tube, height, nt, nz, times,
flux_time(time_s, theta_s, a_tubes[0], z_s)
), "outer"
)
tube0.set_pressure_bc(receiver.PressureBC(times, pressure(times)))
## Setup the panel0 and add tube0 to it:
tube_stiffness = "rigid"
panel0 = receiver.Panel(tube_stiffness)
panel0.add_tube(tube0, 'tube0')
## Add panel to model and save to an HDF5 file
model.add_panel(panel0, 'panel0')
model.save("model.hdf5")
|
{"hexsha": "fcac1dbae184266b47daf1a9cb982409c3a42ca4", "size": 5720, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/gen3L-sodium/setup_tube0.py", "max_stars_repo_name": "willietheboy/srlife-dev", "max_stars_repo_head_hexsha": "d4c2d28b40d2ee1bf64c7555a913b0a49adffe0f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/gen3L-sodium/setup_tube0.py", "max_issues_repo_name": "willietheboy/srlife-dev", "max_issues_repo_head_hexsha": "d4c2d28b40d2ee1bf64c7555a913b0a49adffe0f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/gen3L-sodium/setup_tube0.py", "max_forks_repo_name": "willietheboy/srlife-dev", "max_forks_repo_head_hexsha": "d4c2d28b40d2ee1bf64c7555a913b0a49adffe0f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.063583815, "max_line_length": 83, "alphanum_fraction": 0.656993007, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 1785}
|
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
caminho_base='/home/prbpedro/Development/repositories/github/bootcamp_artificial_intelligence/src/input/'
dados_completos=pd.read_csv(caminho_base + 'airline-passengers.csv')
print(dados_completos.head())
dados_completos.info()
dados_completos['datetime']=pd.to_datetime(dados_completos['Month'])
print(dados_completos.head())
dados_completos.info()
#verificando o tipo de dados
print(type(dados_completos))
#verificando se existem outliers
plt.figure(figsize=(10, 10))
g = sns.boxplot(dados_completos['Passengers']) #realiza o plot através da biblioteca seaborn
g.set_title('Box plot para o embarque passageiros')
#plotando o gráfico da variação do número de passageiros no período
plt.figure(figsize=(20, 10))
g = sns.lineplot(x=dados_completos.index,y=dados_completos['Passengers'])
g.set_title('Série Temporal do embarque de passageiros')
g.set_xlabel('Índice')
g.set_ylabel('Número de passageiros em viagens de avião')
#realizando a decomposição da série temporal
#Para encontrar a tendência, sazonalidade e ruído do modelo
#biblioteca responsável por realizar a decomposição da série temporal
from statsmodels.tsa.seasonal import seasonal_decompose
#modificando o indice para ser temporal
df_serie_temporal=dados_completos.set_index('datetime')
#verifica as colunas existentes
print(df_serie_temporal.columns)
#realiza o drop da coluna 'month'
df_serie_temporal.drop('Month',axis=1,inplace=True)
#verifica o novo dataset
print(df_serie_temporal.head())
#realizando a construção do modelo de decomposição da série temporal
#aplica o modelo de decomposição aditiva
decomposicao_aditiva = seasonal_decompose(df_serie_temporal, model='aditive',extrapolate_trend='freq')
#realiza o plot da decomposição
from pylab import rcParams
rcParams['figure.figsize'] = 18, 8
fig=decomposicao_aditiva.plot() #realiza o plot da decomposição
plt.show()
#testando a estacionariedade da série temporal
#importando o teste ADF
from statsmodels.tsa.stattools import adfuller
#aplica o teste adf
resultado_ADF = adfuller(df_serie_temporal.Passengers.values, autolag='AIC')
#para o teste ADF a hipótese nula é que existe, pelo menos, uma raiz negativa
#na série temporal (série é não-estacionária)
# com o p-valor maior que 0,05 a hipótese nula não é rejeitada
# > 0.5 estacionária
# < 0.5 não estacionária
print('ADF P-valor:',resultado_ADF[1] )
#retirando a tendência
detrended = decomposicao_aditiva.resid + decomposicao_aditiva.seasonal
plt.plot(detrended)
plt.show()
#retirando a sazonalidade
deseasonalized = decomposicao_aditiva.trend + decomposicao_aditiva.resid
plt.plot(deseasonalized)
plt.show()
#realizando a análise de autocorrelação nos dados
#importando a biblioteca para o plot da autocorrelação
from statsmodels.graphics.tsaplots import plot_acf
#aplica a autocorrelação entre os dados
plot_acf(df_serie_temporal, lags=50)
#mostra uma correlação significativa com 14 lags
plt.show()
#Transformando a série em estacionária
#aplica o primeiro "Shift" (derivada para tempo discreto)
df_serie_temporal['Passengers_diff'] = df_serie_temporal['Passengers'] - df_serie_temporal['Passengers'].shift(1)
#retira os valores nulos
df_serie_temporal['Passengers_diff']=df_serie_temporal['Passengers_diff'].dropna()
df_serie_temporal['Passengers_diff'].plot()
plt.show()
#Conferindo se agora está estacionária
X_diff = df_serie_temporal['Passengers_diff'].dropna().values
resultado_primeira_diff = adfuller(X_diff)
#pvalor, praticamente 0.05, não rejeita a hipótese nula, mas vamos considerar que está estacionária
print('p-valor: %f' % resultado_primeira_diff[1])
#bibliotecas utilizadas para a construção dos modelos de previsão de vendas de passagens
import numpy
import matplotlib.pyplot as plt
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense # Camada de conexão enter neuronios
from tensorflow.keras.layers import LSTM #
from sklearn.preprocessing import MinMaxScaler
#volta o dataset para o formato original
serie_passageiros=df_serie_temporal['Passengers'].values
# normalização do banco de dados, necessário para que os algoritmos possam ter um comportamento mais "previsível"
#cria o objeto que realiza a normalização dos dados por meio dos valores mínimos e máximos
scaler = MinMaxScaler(feature_range=(0, 1))
# aplica a escala
dataset = scaler.fit_transform(serie_passageiros.reshape(-1, 1))
print(dataset[0:20])
# Divide o conjunto de dados em treinamento e teste
train_size = int(len(dataset) * 0.67) #encontra o valor máximo para o treinamento
test_size = len(dataset) - train_size
train, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:]
print(len(train), len(test)) #tamanho do df para treinamento e teste
#Cria a matriz necessária para a entrada de dados
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset)-look_back-1):
a = dataset[i:(i+look_back), 0]
dataX.append(a)
dataY.append(dataset[i + look_back, 0])
return numpy.array(dataX), numpy.array(dataY)
#cria o reshape para que os dados estejam em um formato ideal para entrada
look_back = 14 # será utilizado apenas um passo anterior para a previsão do futuro
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
trainX = numpy.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
testX = numpy.reshape(testX, (testX.shape[0], 1, testX.shape[1]))
trainX.shape
# cria o modelo utilizando redes recorrentes e o LSTM
model = Sequential()
model.add(LSTM(4, input_shape=(1, look_back)))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
#apresenta a arquitetura da rede
model.summary()
#realiza o treinamento o modelo de previsão
model.fit(trainX, trainY, epochs=100, batch_size=1, verbose=2)
# realiza as previsões
trainPredict = model.predict(trainX)
testPredict = model.predict(testX)
# coloca os dados no formato original
trainPredict = scaler.inverse_transform(trainPredict)
trainY = scaler.inverse_transform([trainY])
testPredict = scaler.inverse_transform(testPredict)
testY = scaler.inverse_transform([testY])
# Realiza a mudança dos dados para a previsão
trainPredictPlot = numpy.empty_like(dataset)
trainPredictPlot[:, :] = numpy.nan
trainPredictPlot[look_back:len(trainPredict)+look_back, :] = trainPredict
#shift para os dados de teste
testPredictPlot = numpy.empty_like(dataset)
testPredictPlot[:, :] = numpy.nan
testPredictPlot[len(trainPredict)+(look_back*2)+1:len(dataset)-1, :] = testPredict
# Realiza o plot dos dados de previsão e o real
plt.plot(scaler.inverse_transform(dataset),label='Dataset')
plt.plot(trainPredictPlot, label='Treinamento')
plt.plot(testPredictPlot,label='Previsão')
plt.xlabel("Tempo")
plt.ylabel("Número de Passagens Vendidas")
plt.legend()
plt.show()
|
{"hexsha": "39c0a2affa46406897897c0485059e82505e07a4", "size": 6887, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/modulo2/airline_passengers_analysis.py", "max_stars_repo_name": "prbpedro/bootcamp_machine_learning", "max_stars_repo_head_hexsha": "1713e121cd333c8e80ef05aac0365e886ed9dab1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/modulo2/airline_passengers_analysis.py", "max_issues_repo_name": "prbpedro/bootcamp_machine_learning", "max_issues_repo_head_hexsha": "1713e121cd333c8e80ef05aac0365e886ed9dab1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2020-11-13T17:46:14.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T00:31:46.000Z", "max_forks_repo_path": "src/modulo2/airline_passengers_analysis.py", "max_forks_repo_name": "prbpedro/bootcamp_machine_learning", "max_forks_repo_head_hexsha": "1713e121cd333c8e80ef05aac0365e886ed9dab1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.5, "max_line_length": 115, "alphanum_fraction": 0.7903296065, "include": true, "reason": "import numpy,from statsmodels", "num_tokens": 1884}
|
\section{Mundane Objects}\label{sec:mundaneObjects}
\rowcolors{2}{lightgray}{white}
\begin{longtable}{l | r | r}
Name & Size & Price (In Gold)\\ \hline
Bedroll & M & 5\\
Blanket & M & 3\\
Chain, \passus{2} & S & 20\\
Crowbar & S & 10\\
Fire Steel & T & 10\\
Grappling Hook & S & 50\\
Lantern & S & 100\\
Lock, Very Easy (DV 15) & T & 25\\
Lock, Easy (DV 20) & T & 50\\
Lock, Medium (DV 25) & T & 100\\
Lock, Hard (DV 30) & T & 500\\
Lock, Very Hard (DV 35) & T & 1000\\
Lockpick Set & T & 25\\
Lockpick Set, Good & T & 100\\
Lockpick Set, Great & T & 500\\
Lockpick Set, Masterful & T & 1000\\
Paper, per Page & T & 2\\
Rope, \passus{10} & M & 50\\
Runestone & S & 100\\
Shackles & T & 100\\
Small Mirror & T & 100\\
Spellbook & S & 100\\
Tent & M/XL & 25\\
\end{longtable}
A \textbf{Backpack} can hold up the equivalent of 1 Large Item.\\
A \textbf{Bedroll} has enough room for one person and can be folded and attached to a backpack without actually removing space from it.\\
A \textbf{Blanket} keeps up to one person warm.
In addition to a bedroll, it can be used to brave cold temperatures.\\
A \textbf{Chain} is made of interlocking iron rings and can be broken only by making a DV 35 Strength check.\\
A \textbf{Crowbar} awards +5 on checks to open crates, break down doors and windows etc.\\
\textbf{Fire Steel} is used in combination with flint (included) to create sparks.
Creating a fire in this way doesn't require any checks, but still takes 5 minutes.\\
A \textbf{Grappling Hook} can be used in combination with a rope in order to scale featureless surfaces, as long as the user can hook it to something.\\
A \textbf{Lantern} sheds light in a \passus{6} Cone, and uses 1 vial of lantern oil per hour.\\
\textbf{Locks} can be attached to doors, chests, chains, etc.
Each lock comes with a key when bought, and remaking a key costs half as much as the lock itself.
The DV in brackets denotes how difficult it is to pick the lock.\\
\textbf{Lockpick Sets} contain multiple sets of lockpicks, as well as pryers and short pieces of metal wire.
Higher quality lockpicks increase your chances of picking locks with them.
While normal lockpick sets don't give any bonuses, good lockpick sets add +2, great lockpick sets add +4 and masterful lockpick sets add +6 to checks involving picking locks with them.
Lockpick sets have a 1 in 10 chance of breaking if you roll a natural 1 on a check to pick a lock.\\
\textbf{Paper} can be written on.
One piece of paper is considered to be 1 one sixteenth of a square passus large, so a square of \fin{25} .\\
A \textbf{Prayer Book} holds up to 100 prayers, and requires magical ink to write something in it.
A character changing the prayers that they are using has to do so from their prayer book.\\
A \textbf{Rope} comes in different strengths and can be used for various things.\\
A \textbf{Runestone} is a fist-sized stone with a magical Rune engraved in it.
It is used for causing the effects of~\nameref{ch:runes}
\textbf{Shackles} can be used to bind a person.
They come with one key, and making another key for a pair of shackles requires 20 gold.\\
A \textbf{Small Mirror}, made of steel and glass, can be used to reflect things.
Has a handle that can be used to attach it to a stick with a notch.\\
A \textbf{Spellbook} holds up to 100 spell ranks, and requires magical ink to write something in it.
A character casting a spell has to have that spell in their spellbook and recite it out of it, or have it remembered.
A character can spend one hour to remember a spell out of a spellbook.
A character can remember as many spells as they have Intellect.\\
Writing a known spell into a spellbook takes one hour per Rank.\\
A \textbf{Tent} is made of skins and fabric and can hold up to 4 people, and can be hooked into the earth in order to withstand winds.\\
A \textbf{Waterskin} provides enough room for \pugnus{5} of fluid.
In water, that is the amount a person needs to drink per day to survive.
When empty, it's considered to be T.
While filled with water, the waterskin is considered to be S\\
|
{"hexsha": "378ba956246ebed1409a7f8608e37622642fb69d", "size": 4097, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "items/items/mundane.tex", "max_stars_repo_name": "NTrixner/RaggedLandsPenAndPaper", "max_stars_repo_head_hexsha": "73781f7cd7035b927a35199af56f9da2ad2c2e95", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2018-03-13T09:33:31.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-03T09:32:08.000Z", "max_issues_repo_path": "items/items/mundane.tex", "max_issues_repo_name": "NTrixner/RaggedLandsPenAndPaper", "max_issues_repo_head_hexsha": "73781f7cd7035b927a35199af56f9da2ad2c2e95", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 155, "max_issues_repo_issues_event_min_datetime": "2018-03-18T13:19:57.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-03T13:49:05.000Z", "max_forks_repo_path": "items/items/mundane.tex", "max_forks_repo_name": "NTrixner/RaggedLandsPenAndPaper", "max_forks_repo_head_hexsha": "73781f7cd7035b927a35199af56f9da2ad2c2e95", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 48.2, "max_line_length": 184, "alphanum_fraction": 0.727605565, "num_tokens": 1213}
|
import flywheel
import logging
import warnings
import argparse
import os
import pandas as pd
import numpy as np
from fw_heudiconv.cli import tabulate
from fw_heudiconv.cli.export import get_nested
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('flaudit')
def get_sessions(client, project_label, subject_labels=None, session_labels=None):
"""Query the flywheel client for a project name
This function uses the flywheel API to find the first match of a project
name. The name must be exact so make sure to type it as is on the
website/GUI.
Parameters
---------
client
The flywheel Client class object.
project_label
The name of the project to search for.
subject_labels
List of subject IDs
session_labels
List of session IDs
Returns
---------
sessions
A list of session objects
"""
logger.info("Querying Flywheel server...")
with warnings.catch_warnings():
warnings.simplefilter("ignore")
project_obj = client.projects.find_first(
'label="{}"'.format(project_label))
assert project_obj, "Project not found! Maybe check spelling...?"
logger.debug('Found project: %s (%s)',
project_obj['label'], project_obj.id)
sessions = client.get_project_sessions(project_obj.id)
# filters
if subject_labels:
sessions = [s for s in sessions if s.subject['label']
in subject_labels]
if session_labels:
sessions = [s for s in sessions if s.label in session_labels]
sessions = [client.get(s.id) for s in sessions]
return sessions
def gather_jobs(sessions_list, verbose):
'''
Creates a dataframe summarising the gear jobs that have run on the list of sessions
'''
logger.info("Collecting gear run information...")
df = pd.DataFrame()
for sess in sessions_list:
if len(sess.analyses) < 1:
# basic_row = {
# 'job_id': None,
# 'subject': sess.subject.label,
# 'session': sess.label,
# 'gear_name': None,
# 'gear_version': None,
# 'run_label': None,
# 'run_datetime': None,
# 'run_runtime_mins': None,
# 'run_status': None
# }
#
# final = pd.DataFrame(basic_row, index=[0])
#
# df = pd.concat([df, final])
continue
else:
for al in sess.analyses:
# for each analysis, get the basic runtime information
basic_row = {
'job_id': al.id,
'subject': sess.subject.label,
'session': sess.label,
'gear_name': al.gear_info['name'],
'gear_version': al.gear_info['version'],
'run_label': al.label,
'run_datetime': al.job['created'],
'run_runtime_ms': al.job.profile['elapsed_time_ms'],
'run_status': al.job.state
}
# create a pandas row
final = pd.DataFrame(basic_row, index=[0])
if verbose:
# also collect the config and arrange as a long table
config = al.job.config['config']
if config:
for k, v in config.items():
if v == "":
config[k] = np.nan
inputs = al.job.inputs
if inputs:
vals = list(inputs.values())[0]
vals['Inputs_Option'] = list(inputs.keys())[0]
inputs = vals
else:
inputs = {
'type': np.nan,
'id': np.nan,
'name': np.nan,
'Inputs_Option': np.nan
}
config_cols = pd.DataFrame(list(config.items()), columns=[
'Config_Option', 'Config_Value'])
inputs_cols = pd.DataFrame(inputs, index=[0])
inputs_cols.rename(columns={
'type': 'Inputs_Attached_To', 'id': 'Inputs_ID', 'name': 'Inputs_Name'}, inplace=True)
final = pd.concat(
[final, inputs_cols, config_cols], axis=1)
df = pd.concat([df, final])
df.loc[:, ~df.columns.str.contains(
"Config")] = df.loc[:, ~df.columns.str.contains("Config")].ffill()
return(df)
def get_bids_from_acq(client, id):
acq = client.get(id)
sess_lab = client.get(acq['parents']['session']).label
niftis = [x for x in acq.files if x.type in ['nifti', 'bval', 'bvec']]
if not niftis:
return None
else:
rows = []
for nii in niftis:
rows.append(get_nested(nii, 'info', 'BIDS'))
return sess_lab, rows
def gather_bids_for_seqs(client, df):
bids_df = pd.DataFrame(
# columns = [
#'series_id', 'Task', 'Run', 'error_message', 'Ce', 'Filename',
#'Filename', 'ignore', 'Acq', 'valid', 'template',
#'Rec', 'Path', 'Folder', 'Echo', 'Modality', 'Dir', 'Mod'
# ]
)
df2 = df.copy()
for index, row in df.iterrows():
# print(row['series_id'])
sess_lab, bids = get_bids_from_acq(client, row['series_id'])
bids = [x for x in bids if x is not None and x != 'NA']
if bids:
bids_df_temp = pd.DataFrame(bids)
bids_df_temp['series_id'] = row['series_id']
bids_df_temp['session_id'] = sess_lab
bids_df = pd.concat([bids_df, bids_df_temp], sort=False)
return bids_df
def get_session_label(client, col):
result = []
for i in col:
obj = client.get(i).parents.session
result.append(client.get(obj).label)
return result
def gather_seqInfo(client, project_label, subject_labels=None, session_labels=None, dry_run=False, unique=False):
'''
Runs fw-heudiconv-tabulate to attach sequence information to the gear jobs query
Inputs:
args (from argparse)
'''
df = tabulate.tabulate_bids(client, project_label, subject_labels=subject_labels,
session_labels=session_labels, dry_run=False, unique=False)
#df_out = gather_bids_for_seqs(client, df)
df['session_id'] = get_session_label(client, df.series_id)
return df
def pull_attachments_from_object(obj):
attachments = obj.files
data = {
'Name': [],
'Type': [],
'MIMEType': [],
'Size_kb': []
}
for f in attachments:
data['Name'].append(f['name'])
data['Type'].append(f['type'])
data['MIMEType'].append(f['mimetype'])
data['Size_kb'].append(f['size'])
return pd.DataFrame(data)
def gather_attachments(client, project_label, project_level=True, subject_level=True, session_level=True, acquisition_level=True, include_images=False):
'''Loop over Flywheel data and consolidate all objects' attachments
---------
client
The flywheel Client class object.
project_label
The name of the project to search for.
*_level
boolean; search attachments at this level
images
boolean; include images in the search
Returns
---------
sessions
A list of session objects
'''
assert any([project_level, subject_level, session_level,
acquisition_level]), "No attachment levels requested."
with warnings.catch_warnings():
warnings.simplefilter("ignore")
project_obj = client.projects.find_first(
'label="{}"'.format(project_label))
assert project_obj, "Project not found! Maybe check spelling...?"
logger.debug('Found project: %s (%s)',
project_obj['label'], project_obj.id)
attachments = pd.DataFrame()
if project_level:
df = pull_attachments_from_object(project_obj)
df['Origin_Level'] = "Project"
df['Origin_Label'] = project_obj.label
df['Origin_ID'] = project_obj.id
attachments = pd.concat([attachments, df])
if subject_level:
subjects = [client.get(x.id) for x in project_obj.subjects()]
assert subjects, "No subjects found!"
for sub in subjects:
df = pull_attachments_from_object(sub)
df['Origin_Level'] = "Subject"
df['Origin_Label'] = sub.label
df['Origin_ID'] = sub.id
attachments = pd.concat([attachments, df])
if session_level:
sessions = [client.get(x.id)
for x in client.get_project_sessions(project_obj.id)]
assert sessions, "No sessions found!"
for sess in sessions:
df = pull_attachments_from_object(sess)
df['Origin_Level'] = "Session"
df['Origin_Label'] = sess.label
df['Origin_ID'] = sess.id
attachments = pd.concat([attachments, df])
if acquisition_level:
sessions = client.get_project_sessions(project_obj.id)
assert sessions, "No sessions found!"
for sess in sessions:
acquisitions = sess.acquisitions()
for acq in acquisitions:
df = pull_attachments_from_object(acq)
df['Origin_Level'] = "Acquisition"
df['Origin_Label'] = acq.label
df['Origin_ID'] = acq.id
if not include_images:
df = df[(df['Type'] != 'nifti') & (df['Type'] != 'dicom')]
attachments = pd.concat([attachments, df])
return attachments
def get_parser():
parser = argparse.ArgumentParser(
description="FLAUDIT: Flywheel Audit")
parser.add_argument(
"--project",
help="The project in Flywheel",
required=True
)
parser.add_argument(
"--subject",
help="The subject(s)",
nargs="+",
default=None,
type=str
)
parser.add_argument(
"--session",
help="The session(s)",
nargs="+",
default=None,
type=str
)
parser.add_argument(
"--destination",
help="Path to destination directory",
default=".",
type=str
)
parser.add_argument(
"--api-key",
help="API Key",
action='store',
default=None
)
parser.add_argument(
"--verbose",
help="Print ongoing messages of progress",
action='store_true',
default=False
)
return parser
def main():
#logger.info("{:=^70}\n".format(": fw-heudiconv exporter starting up :"))
parser = get_parser()
args = parser.parse_args()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if args.api_key:
fw = flywheel.Client(args.api_key)
else:
fw = flywheel.Client()
assert fw, "Your Flywheel CLI credentials aren't set!"
destination = args.destination
if not os.path.exists(destination):
logger.info("Creating destination directory...")
os.makedirs(args.destination)
logger.info("Gathering sessions...")
sessions = get_sessions(client=fw,
project_label=args.project,
session_labels=args.session,
subject_labels=args.subject)
logger.info("Gathering sequences...")
seqinfo = gather_seqInfo(client=fw,
project_label=args.project,
session_labels=args.session,
subject_labels=args.subject
)
logger.info("Gathering BIDS data...")
bids = gather_bids_for_seqs(client=fw, df=seqinfo)
logger.info("Gathering jobs...")
jobs = gather_jobs(sessions_list=sessions, verbose=True)
logger.info("Gathering attachments...")
attachments = gather_attachments(client=fw, project_label=args.project)
logger.info("Writing output data...")
seqinfo.to_csv("{}/seqinfo.csv".format(args.destination), index=False)
bids.to_csv("{}/bids.csv".format(args.destination), index=False)
jobs.to_csv("{}/jobs.csv".format(args.destination), index=False)
attachments.to_csv(
"{}/attachments.csv".format(args.destination), index=False)
logger.info("Done!")
#logger.info("{:=^70}".format(": Exiting fw-heudiconv exporter :"))
if __name__ == '__main__':
main()
|
{"hexsha": "e18fd38b59912cf93c62549fb046557df67c4acf", "size": 12818, "ext": "py", "lang": "Python", "max_stars_repo_path": "flaudit/cli/gather_data.py", "max_stars_repo_name": "PennBBL/flaudit", "max_stars_repo_head_hexsha": "dd022fc93367fc850d05a0e2e901ded246efd724", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "flaudit/cli/gather_data.py", "max_issues_repo_name": "PennBBL/flaudit", "max_issues_repo_head_hexsha": "dd022fc93367fc850d05a0e2e901ded246efd724", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "flaudit/cli/gather_data.py", "max_forks_repo_name": "PennBBL/flaudit", "max_forks_repo_head_hexsha": "dd022fc93367fc850d05a0e2e901ded246efd724", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.6027713626, "max_line_length": 152, "alphanum_fraction": 0.5577313153, "include": true, "reason": "import numpy", "num_tokens": 2697}
|
"""
Learning with networks that can process sequential data.
"""
from sklearn.base import ClassifierMixin, RegressorMixin, BaseEstimator, TransformerMixin
from sklearn.preprocessing import LabelEncoder, FunctionTransformer
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from abc import abstractmethod
import tempfile
import numpy as np
# Keras preprocessing - making it picklable
# The function is run only when keras is necessary
def make_keras_picklable():
import keras.models
cls = keras.models.Model
if hasattr(cls, "is_now_picklable"):
return
cls.is_now_picklable = True
def __getstate__(self):
model_str = ""
with tempfile.NamedTemporaryFile(suffix='.hdf5', delete=True) as fd:
keras.models.save_model(self, fd.name, overwrite=True)
model_str = fd.read()
d = { 'model_str': model_str }
return d
def __setstate__(self, state):
with tempfile.NamedTemporaryFile(suffix='.hdf5', delete=True) as fd:
fd.write(state['model_str'])
fd.flush()
model = keras.models.load_model(fd.name)
self.__dict__ = model.__dict__
cls.__getstate__ = __getstate__
cls.__setstate__ = __setstate__
class KerasNNBase(BaseEstimator):
"""
Recurrent neural networks using keras as a backend.
Parameters
----------
n_neurons : [int, default=32]
Width of the neural network.
lr : [float, default=32]
Learning rate used in the optimizer for the network.
beta1 : [float, default=0.9]
beta_1 parameter of the Adam optimization algorithm.
beta2 : [float, default=0.99]
beta_2 parameter of the Adam optimization algorithm.
"""
def __init__(self, n_neurons=32, n_layers=1, lr=1e-4, beta1=0.9, beta2=0.99,
batch_size=128, max_iter=128, max_patience=1e10, val_fraction=0.2):
self.n_neurons = n_neurons
self.n_layers = n_layers
self.lr = lr
self.beta1 = beta1
self.beta2 = beta2
self.batch_size = batch_size
self.max_iter = max_iter
self.val_fraction = val_fraction
self.max_patience = max_patience
self.model_ = None # future keras model
def fit(self, X, y):
"""
Fit RNN model.
Parameters
----------
X : array of array of sequences [n_samples, seq_length, n_features]
y : numpy array of shape [n_samples]
Target classes. Can be string, int etc.
Returns
-------
self : returns an instance of self.
"""
from keras.optimizers import Adam
from copy import deepcopy
make_keras_picklable()
optimizer = Adam(
lr=self.lr,
beta_1=self.beta1,
beta_2=self.beta2
)
self._make_model(X, y, optimizer)
y = self.encoder.transform(y)
# split data into training and validation parts
#X_train, X_val, y_train, y_val = train_test_split(X, y, train_size=(1.0 - self.val_fraction))
X_train = X
y_train = y
best_loss_ = 100000000000.0
patience = self.max_patience
max_iter = self.max_iter
best_model_ = [np.copy(w) for w in self.model.get_weights()]
while patience > 0 and max_iter > 0:
max_iter -= 1
val_loss = self.model.fit(X_train,y_train, epochs=1, batch_size=self.batch_size, verbose=0)
val_loss = val_loss.history['loss'][-1]
#val_loss = self.model.evaluate(X_val, y_val, verbose=0)
if np.isnan(val_loss) or np.isinf(val_loss):
break
best_model_ = [np.copy(w) for w in self.model.get_weights()]
max_iter -= 1
"""
if val_loss < best_loss_:
best_loss_ = val_loss
patience = self.max_patience
else:
patience -= 1
"""
self.model.set_weights(best_model_)
return self
def _predict(self, X):
raise NotImplementedError("Abstract method not implemented!")
def predict(self, X):
return self.encoder.inverse_transform(self._predict(X))
class KerasClassifierBase(KerasNNBase, ClassifierMixin):
@abstractmethod
def create_architecture(self, X, n_classes):
"""
Generates the architecture of nn to be trained.
"""
def _make_model(self, X, y, optimizer):
import keras.models
from keras.layers import Input, Dense, Conv1D, Flatten
from keras.layers import Activation
from keras.optimizers import Adam
n_classes = len(np.unique(y))
self.encoder = LabelEncoder()
self.encoder.fit(y)
y = self.encoder.transform(y)
try:
model = self.create_architecture(X, n_classes)
except BaseException as ex:
ip = Input(shape=X[0].shape)
x = ip
x = Flatten()(x)
x = Dense(n_classes, activation='tanh')(x)
x = Activation('sigmoid')(x)
print('Infeasible!')
print(ex)
model = keras.models.Model(inputs=ip, outputs=x)
model.compile(
optimizer=optimizer,
loss='sparse_categorical_crossentropy'
)
self.model = model
def predict_proba(self, X):
make_keras_picklable()
return self.model.predict(X)
def _predict(self, X):
yp = self.predict_proba(X)
return np.argmax(yp, axis=1)
class RNNClassifier(KerasClassifierBase):
def create_architecture(self, X, n_classes):
import keras.models
from keras.layers import Input, Dense, GRU, Flatten
from keras.layers.advanced_activations import LeakyReLU
ip = Input(shape=X[0].shape)
x = ip
for i in range(self.n_layers):
x = GRU(self.n_neurons, return_sequences=True)(x)
x = Flatten()(x)
x = Dense(n_classes, activation='softmax')(x)
return keras.models.Model(inputs=ip, outputs=x)
class CNN1DClassifier(KerasClassifierBase):
def __init__(self, conv_sz=3, stride=1, n_neurons=32, n_layers=1, lr=1e-4, beta1=0.9, beta2=0.99,
batch_size=128, max_iter=128, max_patience=32, val_fraction=0.2):
super(CNN1DClassifier, self).__init__(
n_neurons=n_neurons, n_layers=n_layers, lr=lr, beta1=beta1, beta2=beta2,
batch_size=batch_size, max_iter=max_iter, max_patience=max_patience, val_fraction=val_fraction
)
self.conv_sz = conv_sz
self.stride = stride
def create_architecture(self, X, n_classes):
import keras.models
from keras.layers import Input, Dense, Conv1D, Flatten
from keras.layers.advanced_activations import LeakyReLU
ip = Input(shape=X[0].shape)
x = ip
for i in range(self.n_layers):
x = Conv1D(filters=self.n_neurons, kernel_size=self.conv_sz,
strides=self.stride, padding='same')(x)
x = LeakyReLU(0.05)(x)
x = Flatten()(x)
x = Dense(n_classes, activation='softmax')(x)
return keras.models.Model(inputs=ip, outputs=x)
class DNNClassifier(KerasClassifierBase):
def create_architecture(self, X, n_classes):
import keras.models
from keras.layers import Input, Dense, Flatten
from keras.layers.advanced_activations import LeakyReLU
ip = Input(shape=X[0].shape)
x = ip
x = Flatten()(x)
for i in range(self.n_layers):
x = Dense(self.n_neurons)(x)
x = LeakyReLU(0.05)(x)
x = Dense(n_classes, activation='softmax')(x)
model = keras.models.Model(inputs=ip, outputs=x)
return model
class KerasRegressorBase(KerasNNBase, RegressorMixin):
@abstractmethod
def create_architecture(self, X):
"""
Creates architecture of regressor.
"""
def _make_model(self, X, y, optimizer):
import keras.models
from keras.layers import Input, Dense, GRU
from keras.optimizers import Adam
self.encoder = FunctionTransformer(func=lambda x: x, inverse_func=lambda x: x)
try:
model = self.create_architecture
except BaseException as ex:
ip = Input(shape=X[0].shape)
x = ip
x = Dense(1)(x)
model = keras.models.Model(inputs=ip, outputs=x)
model.compile(
optimizer=optimizer,
loss='sparse_categorical_crossentropy'
)
self.model = model
def _predict(self, X):
return self.model.predict(X)
class RNNRegressor(KerasRegressorBase):
def create_architecture(self, X):
import keras.models
from keras.layers import Input, Dense, GRU
from keras.layers.advanced_activations import LeakyReLU
ip = Input(shape=X[0].shape)
x = ip
for i in range(self.n_layers):
x = GRU(self.n_neurons)(x)
x = LeakyReLU(0.05)(x)
x = Dense(1)(x)
model = keras.models.Model(inputs=ip, outputs=x)
return model
class CNN1DRegressor(KerasRegressorBase):
def __init__(self, conv_sz, stride, *args, **kwargs):
super(CNN1DRegressor, self).__init__(
*args, **kwargs
)
self.conv_sz = conv_sz
self.stride = stride
def create_architecture(self, X):
import keras.models
from keras.layers import Input, Dense, Conv1D, Flatten
from keras.layers.advanced_activations import LeakyReLU
ip = Input(shape=X[0].shape)
x = ip
for i in range(self.n_layers):
x = Conv1D(self.n_neurons, self.conv_sz, self.stride, padding='same')(x)
x = LeakyReLU(0.05)(x)
x = Flatten()(x)
x = Dense(1)(x)
model = keras.models.Model(inputs=ip, outputs=x)
return model
class DNNRegressor(KerasRegressorBase):
def create_architecture(self, X):
import keras.models
from keras.layers import Input, Dense
from keras.layers.advanced_activations import LeakyReLU
ip = Input(shape=X[0].shape)
x = ip
for i in range(self.n_layers):
x = Dense(self.n_neurons)(x)
x = LeakyReLU(0.05)(x)
x = Dense(1)(x)
model = keras.models.Model(inputs=ip, outputs=x)
return model
|
{"hexsha": "d80859d9b44fcd1b985abe8569f0068b3afb1be0", "size": 10485, "ext": "py", "lang": "Python", "max_stars_repo_path": "noxer/rnn.py", "max_stars_repo_name": "noxer-org/noxer", "max_stars_repo_head_hexsha": "45fa20ac7452c4b9c8ab5ea3f93ab47f41ad29cd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2017-10-19T02:14:56.000Z", "max_stars_repo_stars_event_max_datetime": "2017-10-19T02:14:56.000Z", "max_issues_repo_path": "noxer/rnn.py", "max_issues_repo_name": "noxer-org/noxer", "max_issues_repo_head_hexsha": "45fa20ac7452c4b9c8ab5ea3f93ab47f41ad29cd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2017-10-02T16:12:36.000Z", "max_issues_repo_issues_event_max_datetime": "2017-10-02T16:12:36.000Z", "max_forks_repo_path": "noxer/rnn.py", "max_forks_repo_name": "noxer-org/noxer", "max_forks_repo_head_hexsha": "45fa20ac7452c4b9c8ab5ea3f93ab47f41ad29cd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2017-10-02T12:49:08.000Z", "max_forks_repo_forks_event_max_datetime": "2017-10-19T02:14:58.000Z", "avg_line_length": 31.4864864865, "max_line_length": 106, "alphanum_fraction": 0.6121125417, "include": true, "reason": "import numpy", "num_tokens": 2482}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.