text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
/-
Copyright (c) 2017 Mario Carneiro. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Mario Carneiro, Floris van Doorn, Violeta Hernández Palacios
! This file was ported from Lean 3 source module set_theory.cardinal.cofinality
! leanprover-community/mathlib commit bb168510ef455e9280a152e7f31673cabd3d7496
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathlib.SetTheory.Cardinal.Ordinal
import Mathlib.SetTheory.Ordinal.FixedPoint
/-!
# Cofinality
This file contains the definition of cofinality of an ordinal number and regular cardinals
## Main Definitions
* `Ordinal.cof o` is the cofinality of the ordinal `o`.
If `o` is the order type of the relation `<` on `α`, then `o.cof` is the smallest cardinality of a
subset `s` of α that is *cofinal* in `α`, i.e. `∀ x : α, ∃ y ∈ s, ¬ y < x`.
* `Cardinal.IsLimit c` means that `c` is a (weak) limit cardinal: `c ≠ 0 ∧ ∀ x < c, succ x < c`.
* `Cardinal.IsStrongLimit c` means that `c` is a strong limit cardinal:
`c ≠ 0 ∧ ∀ x < c, 2 ^ x < c`.
* `Cardinal.IsRegular c` means that `c` is a regular cardinal: `ℵ₀ ≤ c ∧ c.ord.cof = c`.
* `Cardinal.IsInaccessible c` means that `c` is strongly inaccessible:
`ℵ₀ < c ∧ IsRegular c ∧ IsStrongLimit c`.
## Main Statements
* `Ordinal.infinite_pigeonhole_card`: the infinite pigeonhole principle
* `Cardinal.lt_power_cof`: A consequence of König's theorem stating that `c < c ^ c.ord.cof` for
`c ≥ ℵ₀`
* `Cardinal.univ_inaccessible`: The type of ordinals in `Type u` form an inaccessible cardinal
(in `Type v` with `v > u`). This shows (externally) that in `Type u` there are at least `u`
inaccessible cardinals.
## Implementation Notes
* The cofinality is defined for ordinals.
If `c` is a cardinal number, its cofinality is `c.ord.cof`.
## Tags
cofinality, regular cardinals, limits cardinals, inaccessible cardinals,
infinite pigeonhole principle
-/
noncomputable section
open Function Cardinal Set Order
open Classical Cardinal Ordinal
universe u v w
variable {α : Type _} {r : α → α → Prop}
/-! ### Cofinality of orders -/
namespace Order
/-- Cofinality of a reflexive order `≼`. This is the smallest cardinality
of a subset `S : Set α` such that `∀ a, ∃ b ∈ S, a ≼ b`. -/
def cof (r : α → α → Prop) : Cardinal :=
infₛ { c | ∃ S : Set α, (∀ a, ∃ b ∈ S, r a b) ∧ (#S) = c }
#align order.cof Order.cof
/-- The set in the definition of `Order.cof` is nonempty. -/
theorem cof_nonempty (r : α → α → Prop) [IsRefl α r] :
{ c | ∃ S : Set α, (∀ a, ∃ b ∈ S, r a b) ∧ (#S) = c }.Nonempty :=
⟨_, Set.univ, fun a => ⟨a, ⟨⟩, refl _⟩, rfl⟩
#align order.cof_nonempty Order.cof_nonempty
theorem cof_le (r : α → α → Prop) {S : Set α} (h : ∀ a, ∃ b ∈ S, r a b) : cof r ≤ (#S) :=
cinfₛ_le' ⟨S, h, rfl⟩
#align order.cof_le Order.cof_le
theorem le_cof {r : α → α → Prop} [IsRefl α r] (c : Cardinal) :
c ≤ cof r ↔ ∀ {S : Set α}, (∀ a, ∃ b ∈ S, r a b) → c ≤ (#S) := by
rw [cof, le_cinfₛ_iff'' (cof_nonempty r)]
use fun H S h => H _ ⟨S, h, rfl⟩
rintro H d ⟨S, h, rfl⟩
exact H h
#align order.le_cof Order.le_cof
end Order
theorem RelIso.cof_le_lift {α : Type u} {β : Type v} {r : α → α → Prop} {s} [IsRefl β s]
(f : r ≃r s) : Cardinal.lift.{max u v} (Order.cof r) ≤ Cardinal.lift.{max u v} (Order.cof s) :=
by
rw [Order.cof, Order.cof, lift_infₛ, lift_infₛ,
le_cinfₛ_iff'' (nonempty_image_iff.2 (Order.cof_nonempty s))]
rintro - ⟨-, ⟨u, H, rfl⟩, rfl⟩
apply cinfₛ_le'
refine'
⟨_, ⟨f.symm '' u, fun a => _, rfl⟩,
lift_mk_eq.{u, v, max u v}.2 ⟨(f.symm.toEquiv.image u).symm⟩⟩
rcases H (f a) with ⟨b, hb, hb'⟩
refine' ⟨f.symm b, mem_image_of_mem _ hb, f.map_rel_iff.1 _⟩
rwa [RelIso.apply_symm_apply]
#align rel_iso.cof_le_lift RelIso.cof_le_lift
theorem RelIso.cof_eq_lift {α : Type u} {β : Type v} {r s} [IsRefl α r] [IsRefl β s] (f : r ≃r s) :
Cardinal.lift.{max u v} (Order.cof r) = Cardinal.lift.{max u v} (Order.cof s) :=
(RelIso.cof_le_lift f).antisymm (RelIso.cof_le_lift f.symm)
#align rel_iso.cof_eq_lift RelIso.cof_eq_lift
theorem RelIso.cof_le {α β : Type u} {r : α → α → Prop} {s} [IsRefl β s] (f : r ≃r s) :
Order.cof r ≤ Order.cof s :=
lift_le.1 (RelIso.cof_le_lift f)
#align rel_iso.cof_le RelIso.cof_le
theorem RelIso.cof_eq {α β : Type u} {r s} [IsRefl α r] [IsRefl β s] (f : r ≃r s) :
Order.cof r = Order.cof s :=
lift_inj.1 (RelIso.cof_eq_lift f)
#align rel_iso.cof_eq RelIso.cof_eq
/-- Cofinality of a strict order `≺`. This is the smallest cardinality of a set `S : Set α` such
that `∀ a, ∃ b ∈ S, ¬ b ≺ a`. -/
def StrictOrder.cof (r : α → α → Prop) : Cardinal :=
Order.cof (swap rᶜ)
#align strict_order.cof StrictOrder.cof
/-- The set in the definition of `Order.StrictOrder.cof` is nonempty. -/
theorem StrictOrder.cof_nonempty (r : α → α → Prop) [IsIrrefl α r] :
{ c | ∃ S : Set α, Unbounded r S ∧ (#S) = c }.Nonempty :=
@Order.cof_nonempty α _ (IsRefl.swap (rᶜ))
#align strict_order.cof_nonempty StrictOrder.cof_nonempty
/-! ### Cofinality of ordinals -/
namespace Ordinal
/-- Cofinality of an ordinal. This is the smallest cardinal of a
subset `S` of the ordinal which is unbounded, in the sense
`∀ a, ∃ b ∈ S, a ≤ b`. It is defined for all ordinals, but
`cof 0 = 0` and `cof (succ o) = 1`, so it is only really
interesting on limit ordinals (when it is an infinite cardinal). -/
def cof (o : Ordinal.{u}) : Cardinal.{u} :=
o.liftOn (fun a => StrictOrder.cof a.r)
(by
rintro ⟨α, r, wo₁⟩ ⟨β, s, wo₂⟩ ⟨⟨f, hf⟩⟩
haveI := wo₁; haveI := wo₂
dsimp only
apply @RelIso.cof_eq _ _ _ _ ?_ ?_
· constructor
exact @fun a b => not_iff_not.2 hf
· dsimp only [swap]
exact ⟨fun _ => irrefl _⟩
· dsimp only [swap]
exact ⟨fun _ => irrefl _⟩)
#align ordinal.cof Ordinal.cof
theorem cof_type (r : α → α → Prop) [IsWellOrder α r] : (type r).cof = StrictOrder.cof r :=
rfl
#align ordinal.cof_type Ordinal.cof_type
theorem le_cof_type [IsWellOrder α r] {c} : c ≤ cof (type r) ↔ ∀ S, Unbounded r S → c ≤ (#S) :=
(le_cinfₛ_iff'' (StrictOrder.cof_nonempty r)).trans
⟨fun H S h => H _ ⟨S, h, rfl⟩, by
rintro H d ⟨S, h, rfl⟩
exact H _ h⟩
#align ordinal.le_cof_type Ordinal.le_cof_type
theorem cof_type_le [IsWellOrder α r] {S : Set α} (h : Unbounded r S) : cof (type r) ≤ (#S) :=
le_cof_type.1 le_rfl S h
#align ordinal.cof_type_le Ordinal.cof_type_le
theorem lt_cof_type [IsWellOrder α r] {S : Set α} : (#S) < cof (type r) → Bounded r S := by
simpa using not_imp_not.2 cof_type_le
#align ordinal.lt_cof_type Ordinal.lt_cof_type
theorem cof_eq (r : α → α → Prop) [IsWellOrder α r] : ∃ S, Unbounded r S ∧ (#S) = cof (type r) :=
cinfₛ_mem (StrictOrder.cof_nonempty r)
#align ordinal.cof_eq Ordinal.cof_eq
theorem ord_cof_eq (r : α → α → Prop) [IsWellOrder α r] :
∃ S, Unbounded r S ∧ type (Subrel r S) = (cof (type r)).ord := by
let ⟨S, hS, e⟩ := cof_eq r
let ⟨s, _, e'⟩ := Cardinal.ord_eq S
let T : Set α := { a | ∃ aS : a ∈ S, ∀ b : S, s b ⟨_, aS⟩ → r b a }
suffices : Unbounded r T
· refine' ⟨T, this, le_antisymm _ (Cardinal.ord_le.2 <| cof_type_le this)⟩
rw [← e, e']
refine'
(RelEmbedding.ofMonotone
(fun a : T =>
(⟨a,
let ⟨aS, _⟩ := a.2
aS⟩ :
S))
fun a b h => _).ordinal_type_le
rcases a with ⟨a, aS, ha⟩
rcases b with ⟨b, bS, hb⟩
change s ⟨a, _⟩ ⟨b, _⟩
refine' ((trichotomous_of s _ _).resolve_left fun hn => _).resolve_left _
· exact asymm h (ha _ hn)
· intro e
injection e with e
subst b
exact irrefl _ h
· intro a
have : { b : S | ¬r b a }.Nonempty :=
let ⟨b, bS, ba⟩ := hS a
⟨⟨b, bS⟩, ba⟩
let b := (IsWellFounded.wf : WellFounded s).min _ this
have ba : ¬r b a := IsWellFounded.wf.min_mem _ this
refine' ⟨b, ⟨b.2, fun c => not_imp_not.1 fun h => _⟩, ba⟩
rw [show ∀ b : S, (⟨b, b.2⟩ : S) = b by intro b; cases b; rfl]
exact IsWellFounded.wf.not_lt_min _ this (IsOrderConnected.neg_trans h ba)
#align ordinal.ord_cof_eq Ordinal.ord_cof_eq
/-! ### Cofinality of suprema and least strict upper bounds -/
private theorem card_mem_cof {o} : ∃ (ι : _)(f : ι → Ordinal), lsub.{u, u} f = o ∧ (#ι) = o.card :=
⟨_, _, lsub_typein o, mk_ordinal_out o⟩
/-- The set in the `lsub` characterization of `cof` is nonempty. -/
theorem cof_lsub_def_nonempty (o) :
{ a : Cardinal | ∃ (ι : _)(f : ι → Ordinal), lsub.{u, u} f = o ∧ (#ι) = a }.Nonempty :=
⟨_, card_mem_cof⟩
#align ordinal.cof_lsub_def_nonempty Ordinal.cof_lsub_def_nonempty
theorem cof_eq_infₛ_lsub (o : Ordinal.{u}) :
cof o = infₛ { a : Cardinal | ∃ (ι : Type u)(f : ι → Ordinal), lsub.{u, u} f = o ∧ (#ι) = a } :=
by
refine' le_antisymm (le_cinfₛ (cof_lsub_def_nonempty o) _) (cinfₛ_le' _)
· rintro a ⟨ι, f, hf, rfl⟩
rw [← type_lt o]
refine'
(cof_type_le fun a => _).trans
(@mk_le_of_injective _ _
(fun s : typein ((· < ·) : o.out.α → o.out.α → Prop) ⁻¹' Set.range f =>
Classical.choose s.prop)
fun s t hst => by
let H := congr_arg f hst
rwa [Classical.choose_spec s.prop, Classical.choose_spec t.prop, typein_inj,
Subtype.coe_inj] at H)
have := typein_lt_self a
simp_rw [← hf, lt_lsub_iff] at this
cases' this with i hi
refine' ⟨enum (· < ·) (f i) _, _, _⟩
· rw [type_lt, ← hf]
apply lt_lsub
· rw [mem_preimage, typein_enum]
exact mem_range_self i
· rwa [← typein_le_typein, typein_enum]
· rcases cof_eq (· < · : (Quotient.out o).α → (Quotient.out o).α → Prop) with ⟨S, hS, hS'⟩
let f : S → Ordinal := fun s => typein LT.lt s.val
refine'
⟨S, f, le_antisymm (lsub_le fun i => typein_lt_self i) (le_of_forall_lt fun a ha => _), by
rwa [type_lt o] at hS'⟩
rw [← type_lt o] at ha
rcases hS (enum (· < ·) a ha) with ⟨b, hb, hb'⟩
rw [← typein_le_typein, typein_enum] at hb'
exact hb'.trans_lt (lt_lsub.{u, u} f ⟨b, hb⟩)
#align ordinal.cof_eq_Inf_lsub Ordinal.cof_eq_infₛ_lsub
@[simp]
theorem lift_cof (o) : Cardinal.lift.{u, v} (cof o) = cof (Ordinal.lift.{u, v} o) := by
refine' inductionOn o _
intro α r _
apply le_antisymm
· refine' le_cof_type.2 fun S H => _
have : Cardinal.lift.{u, v} (#(ULift.up ⁻¹' S)) ≤ (#(S : Type (max u v))) := by
rw [← Cardinal.lift_umax.{v, u}, ← Cardinal.lift_id'.{v, u} (#S)]
refine mk_preimage_of_injective_lift.{v, max u v} ULift.up S (ULift.up_injective.{u, v})
refine' (Cardinal.lift_le.2 <| cof_type_le _).trans this
exact fun a =>
let ⟨⟨b⟩, bs, br⟩ := H ⟨a⟩
⟨b, bs, br⟩
· rcases cof_eq r with ⟨S, H, e'⟩
have : (#ULift.down.{u, v} ⁻¹' S) ≤ Cardinal.lift.{u, v} (#S) :=
⟨⟨fun ⟨⟨x⟩, h⟩ => ⟨⟨x, h⟩⟩, fun ⟨⟨x⟩, h₁⟩ ⟨⟨y⟩, h₂⟩ e => by
simp at e; congr⟩⟩
rw [e'] at this
refine' (cof_type_le _).trans this
exact fun ⟨a⟩ =>
let ⟨b, bs, br⟩ := H a
⟨⟨b⟩, bs, br⟩
#align ordinal.lift_cof Ordinal.lift_cof
theorem cof_le_card (o) : cof o ≤ card o := by
rw [cof_eq_infₛ_lsub]
exact cinfₛ_le' card_mem_cof
#align ordinal.cof_le_card Ordinal.cof_le_card
theorem cof_ord_le (c : Cardinal) : c.ord.cof ≤ c := by simpa using cof_le_card c.ord
#align ordinal.cof_ord_le Ordinal.cof_ord_le
theorem ord_cof_le (o : Ordinal.{u}) : o.cof.ord ≤ o :=
(ord_le_ord.2 (cof_le_card o)).trans (ord_card_le o)
#align ordinal.ord_cof_le Ordinal.ord_cof_le
theorem exists_lsub_cof (o : Ordinal) :
∃ (ι : _)(f : ι → Ordinal), lsub.{u, u} f = o ∧ (#ι) = cof o := by
rw [cof_eq_infₛ_lsub]
exact cinfₛ_mem (cof_lsub_def_nonempty o)
#align ordinal.exists_lsub_cof Ordinal.exists_lsub_cof
theorem cof_lsub_le {ι} (f : ι → Ordinal) : cof (lsub.{u, u} f) ≤ (#ι) := by
rw [cof_eq_infₛ_lsub]
exact cinfₛ_le' ⟨ι, f, rfl, rfl⟩
#align ordinal.cof_lsub_le Ordinal.cof_lsub_le
theorem cof_lsub_le_lift {ι} (f : ι → Ordinal) :
cof (lsub.{u, v} f) ≤ Cardinal.lift.{v, u} (#ι) := by
rw [← mk_uLift.{u, v}]
convert cof_lsub_le.{max u v} fun i : ULift.{v, u} ι => f i.down
exact
lsub_eq_of_range_eq.{u, max u v, max u v}
(Set.ext fun x => ⟨fun ⟨i, hi⟩ => ⟨ULift.up.{v, u} i, hi⟩, fun ⟨i, hi⟩ => ⟨_, hi⟩⟩)
#align ordinal.cof_lsub_le_lift Ordinal.cof_lsub_le_lift
theorem le_cof_iff_lsub {o : Ordinal} {a : Cardinal} :
a ≤ cof o ↔ ∀ {ι} (f : ι → Ordinal), lsub.{u, u} f = o → a ≤ (#ι) := by
rw [cof_eq_infₛ_lsub]
exact
(le_cinfₛ_iff'' (cof_lsub_def_nonempty o)).trans
⟨fun H ι f hf => H _ ⟨ι, f, hf, rfl⟩, fun H b ⟨ι, f, hf, hb⟩ =>
by
rw [← hb]
exact H _ hf⟩
#align ordinal.le_cof_iff_lsub Ordinal.le_cof_iff_lsub
theorem lsub_lt_ord_lift {ι} {f : ι → Ordinal} {c : Ordinal}
(hι : Cardinal.lift.{v, u} (#ι) < c.cof)
(hf : ∀ i, f i < c) : lsub.{u, v} f < c :=
lt_of_le_of_ne (lsub_le.{v, u} hf) fun h => by
subst h
exact (cof_lsub_le_lift.{u, v} f).not_lt hι
#align ordinal.lsub_lt_ord_lift Ordinal.lsub_lt_ord_lift
theorem lsub_lt_ord {ι} {f : ι → Ordinal} {c : Ordinal} (hι : (#ι) < c.cof) :
(∀ i, f i < c) → lsub.{u, u} f < c :=
lsub_lt_ord_lift (by rwa [(#ι).lift_id])
#align ordinal.lsub_lt_ord Ordinal.lsub_lt_ord
theorem cof_sup_le_lift {ι} {f : ι → Ordinal} (H : ∀ i, f i < sup.{u, v} f) :
cof (sup.{u, v} f) ≤ Cardinal.lift.{v, u} (#ι) := by
rw [← sup_eq_lsub_iff_lt_sup.{u, v}] at H
rw [H]
exact cof_lsub_le_lift f
#align ordinal.cof_sup_le_lift Ordinal.cof_sup_le_lift
theorem cof_sup_le {ι} {f : ι → Ordinal} (H : ∀ i, f i < sup.{u, u} f) :
cof (sup.{u, u} f) ≤ (#ι) := by
rw [← (#ι).lift_id]
exact cof_sup_le_lift H
#align ordinal.cof_sup_le Ordinal.cof_sup_le
theorem sup_lt_ord_lift {ι} {f : ι → Ordinal} {c : Ordinal} (hι : Cardinal.lift.{v, u} (#ι) < c.cof)
(hf : ∀ i, f i < c) : sup.{u, v} f < c :=
(sup_le_lsub.{u, v} f).trans_lt (lsub_lt_ord_lift hι hf)
#align ordinal.sup_lt_ord_lift Ordinal.sup_lt_ord_lift
theorem sup_lt_ord {ι} {f : ι → Ordinal} {c : Ordinal} (hι : (#ι) < c.cof) :
(∀ i, f i < c) → sup.{u, u} f < c :=
sup_lt_ord_lift (by rwa [(#ι).lift_id])
#align ordinal.sup_lt_ord Ordinal.sup_lt_ord
theorem supᵢ_lt_lift {ι} {f : ι → Cardinal} {c : Cardinal}
(hι : Cardinal.lift.{v, u} (#ι) < c.ord.cof)
(hf : ∀ i, f i < c) : supᵢ.{max u v + 1, u + 1} f < c := by
rw [← ord_lt_ord, supᵢ_ord (Cardinal.bddAbove_range.{u, v} _)]
refine' sup_lt_ord_lift hι fun i => _
rw [ord_lt_ord]
apply hf
#align ordinal.supr_lt_lift Ordinal.supᵢ_lt_lift
theorem supᵢ_lt {ι} {f : ι → Cardinal} {c : Cardinal} (hι : (#ι) < c.ord.cof) :
(∀ i, f i < c) → supᵢ f < c :=
supᵢ_lt_lift (by rwa [(#ι).lift_id])
#align ordinal.supr_lt Ordinal.supᵢ_lt
theorem nfpFamily_lt_ord_lift {ι} {f : ι → Ordinal → Ordinal} {c} (hc : ℵ₀ < cof c)
(hc' : Cardinal.lift.{v, u} (#ι) < cof c) (hf : ∀ (i), ∀ b < c, f i b < c) {a} (ha : a < c) :
nfpFamily.{u, v} f a < c := by
refine' sup_lt_ord_lift ((Cardinal.lift_le.2 (mk_list_le_max ι)).trans_lt _) fun l => _
· rw [lift_max]
apply max_lt _ hc'
rwa [Cardinal.lift_aleph0]
· induction' l with i l H
· exact ha
· exact hf _ _ H
#align ordinal.nfp_family_lt_ord_lift Ordinal.nfpFamily_lt_ord_lift
theorem nfpFamily_lt_ord {ι} {f : ι → Ordinal → Ordinal} {c} (hc : ℵ₀ < cof c) (hc' : (#ι) < cof c)
(hf : ∀ (i), ∀ b < c, f i b < c) {a} : a < c → nfpFamily.{u, u} f a < c :=
nfpFamily_lt_ord_lift hc (by rwa [(#ι).lift_id]) hf
#align ordinal.nfp_family_lt_ord Ordinal.nfpFamily_lt_ord
theorem nfpBFamily_lt_ord_lift {o : Ordinal} {f : ∀ a < o, Ordinal → Ordinal} {c} (hc : ℵ₀ < cof c)
(hc' : Cardinal.lift.{v, u} o.card < cof c) (hf : ∀ (i hi), ∀ b < c, f i hi b < c) {a} :
a < c → nfpBFamily.{u, v} o f a < c :=
nfpFamily_lt_ord_lift hc (by rwa [mk_ordinal_out]) fun i => hf _ _
#align ordinal.nfp_bfamily_lt_ord_lift Ordinal.nfpBFamily_lt_ord_lift
theorem nfpBFamily_lt_ord {o : Ordinal} {f : ∀ a < o, Ordinal → Ordinal} {c} (hc : ℵ₀ < cof c)
(hc' : o.card < cof c) (hf : ∀ (i hi), ∀ b < c, f i hi b < c) {a} :
a < c → nfpBFamily.{u, u} o f a < c :=
nfpBFamily_lt_ord_lift hc (by rwa [o.card.lift_id]) hf
#align ordinal.nfp_bfamily_lt_ord Ordinal.nfpBFamily_lt_ord
theorem nfp_lt_ord {f : Ordinal → Ordinal} {c} (hc : ℵ₀ < cof c) (hf : ∀ i < c, f i < c) {a} :
a < c → nfp f a < c :=
nfpFamily_lt_ord_lift hc (by simpa using Cardinal.one_lt_aleph0.trans hc) fun _ => hf
#align ordinal.nfp_lt_ord Ordinal.nfp_lt_ord
theorem exists_blsub_cof (o : Ordinal) : ∃ f : ∀ a < (cof o).ord, Ordinal, blsub.{u, u} _ f = o :=
by
rcases exists_lsub_cof o with ⟨ι, f, hf, hι⟩
rcases Cardinal.ord_eq ι with ⟨r, hr, hι'⟩
rw [← @blsub_eq_lsub' ι r hr] at hf
rw [← hι, hι']
exact ⟨_, hf⟩
#align ordinal.exists_blsub_cof Ordinal.exists_blsub_cof
theorem le_cof_iff_blsub {b : Ordinal} {a : Cardinal} :
a ≤ cof b ↔ ∀ {o} (f : ∀ a < o, Ordinal), blsub.{u, u} o f = b → a ≤ o.card :=
le_cof_iff_lsub.trans
⟨fun H o f hf => by simpa using H _ hf, fun H ι f hf =>
by
rcases Cardinal.ord_eq ι with ⟨r, hr, hι'⟩
rw [← @blsub_eq_lsub' ι r hr] at hf
simpa using H _ hf⟩
#align ordinal.le_cof_iff_blsub Ordinal.le_cof_iff_blsub
theorem cof_blsub_le_lift {o} (f : ∀ a < o, Ordinal) :
cof (blsub.{u, v} o f) ≤ Cardinal.lift.{v, u} o.card := by
rw [← mk_ordinal_out o]
exact cof_lsub_le_lift _
#align ordinal.cof_blsub_le_lift Ordinal.cof_blsub_le_lift
theorem cof_blsub_le {o} (f : ∀ a < o, Ordinal) : cof (blsub.{u, u} o f) ≤ o.card := by
rw [← o.card.lift_id]
exact cof_blsub_le_lift f
#align ordinal.cof_blsub_le Ordinal.cof_blsub_le
theorem blsub_lt_ord_lift {o : Ordinal.{u}} {f : ∀ a < o, Ordinal} {c : Ordinal}
(ho : Cardinal.lift.{v, u} o.card < c.cof) (hf : ∀ i hi, f i hi < c) : blsub.{u, v} o f < c :=
lt_of_le_of_ne (blsub_le hf) fun h =>
ho.not_le (by simpa [← supᵢ_ord, hf, h] using cof_blsub_le_lift.{u, v} f)
#align ordinal.blsub_lt_ord_lift Ordinal.blsub_lt_ord_lift
theorem blsub_lt_ord {o : Ordinal} {f : ∀ a < o, Ordinal} {c : Ordinal} (ho : o.card < c.cof)
(hf : ∀ i hi, f i hi < c) : blsub.{u, u} o f < c :=
blsub_lt_ord_lift (by rwa [o.card.lift_id]) hf
#align ordinal.blsub_lt_ord Ordinal.blsub_lt_ord
theorem cof_bsup_le_lift {o : Ordinal} {f : ∀ a < o, Ordinal} (H : ∀ i h, f i h < bsup.{u, v} o f) :
cof (bsup.{u, v} o f) ≤ Cardinal.lift.{v, u} o.card := by
rw [← bsup_eq_blsub_iff_lt_bsup.{u, v}] at H
rw [H]
exact cof_blsub_le_lift.{u, v} f
#align ordinal.cof_bsup_le_lift Ordinal.cof_bsup_le_lift
theorem cof_bsup_le {o : Ordinal} {f : ∀ a < o, Ordinal} :
(∀ i h, f i h < bsup.{u, u} o f) → cof (bsup.{u, u} o f) ≤ o.card := by
rw [← o.card.lift_id]
exact cof_bsup_le_lift
#align ordinal.cof_bsup_le Ordinal.cof_bsup_le
theorem bsup_lt_ord_lift {o : Ordinal} {f : ∀ a < o, Ordinal} {c : Ordinal}
(ho : Cardinal.lift.{v, u} o.card < c.cof) (hf : ∀ i hi, f i hi < c) : bsup.{u, v} o f < c :=
(bsup_le_blsub f).trans_lt (blsub_lt_ord_lift ho hf)
#align ordinal.bsup_lt_ord_lift Ordinal.bsup_lt_ord_lift
theorem bsup_lt_ord {o : Ordinal} {f : ∀ a < o, Ordinal} {c : Ordinal} (ho : o.card < c.cof) :
(∀ i hi, f i hi < c) → bsup.{u, u} o f < c :=
bsup_lt_ord_lift (by rwa [o.card.lift_id])
#align ordinal.bsup_lt_ord Ordinal.bsup_lt_ord
/-! ### Basic results -/
@[simp]
theorem cof_zero : cof 0 = 0 :=
(cof_le_card 0).antisymm (Cardinal.zero_le _)
#align ordinal.cof_zero Ordinal.cof_zero
@[simp]
theorem cof_eq_zero {o} : cof o = 0 ↔ o = 0 :=
⟨inductionOn o fun α r _ z =>
let ⟨S, hl, e⟩ := cof_eq r
type_eq_zero_iff_isEmpty.2 <|
⟨fun a =>
let ⟨b, h, _⟩ := hl a
(mk_eq_zero_iff.1 (e.trans z)).elim' ⟨_, h⟩⟩,
fun e => by simp [e]⟩
#align ordinal.cof_eq_zero Ordinal.cof_eq_zero
theorem cof_ne_zero {o} : cof o ≠ 0 ↔ o ≠ 0 :=
cof_eq_zero.not
#align ordinal.cof_ne_zero Ordinal.cof_ne_zero
@[simp]
theorem cof_succ (o) : cof (succ o) = 1 := by
apply le_antisymm
· refine' inductionOn o fun α r _ => _
change cof (type _) ≤ _
rw [← (_ : (#_) = 1)]
apply cof_type_le
· refine' fun a => ⟨Sum.inr PUnit.unit, Set.mem_singleton _, _⟩
rcases a with (a | ⟨⟨⟨⟩⟩⟩) <;> simp [EmptyRelation]
· rw [Cardinal.mk_fintype, Set.card_singleton]
simp
· rw [← Cardinal.succ_zero, succ_le_iff]
simpa [lt_iff_le_and_ne, Cardinal.zero_le] using fun h =>
succ_ne_zero o (cof_eq_zero.1 (Eq.symm h))
#align ordinal.cof_succ Ordinal.cof_succ
@[simp]
theorem cof_eq_one_iff_is_succ {o} : cof.{u} o = 1 ↔ ∃ a, o = succ a :=
⟨inductionOn o fun α r _ z => by
skip
rcases cof_eq r with ⟨S, hl, e⟩; rw [z] at e
cases' mk_ne_zero_iff.1 (by rw [e]; exact one_ne_zero) with a
refine'
⟨typein r a,
Eq.symm <|
Quotient.sound
⟨RelIso.ofSurjective (RelEmbedding.ofMonotone _ fun x y => _) fun x => _⟩⟩
· apply Sum.rec <;> [exact Subtype.val, exact fun _ => a]
· rcases x with (x | ⟨⟨⟨⟩⟩⟩) <;> rcases y with (y | ⟨⟨⟨⟩⟩⟩) <;>
simp [Subrel, Order.Preimage, EmptyRelation]
exact x.2
· suffices : r x a ∨ ∃ _ : PUnit.{u}, ↑a = x
. convert this
dsimp [RelEmbedding.ofMonotone]; simp
rcases trichotomous_of r x a with (h | h | h)
· exact Or.inl h
· exact Or.inr ⟨PUnit.unit, h.symm⟩
· rcases hl x with ⟨a', aS, hn⟩
rw [(_ : ↑a = a')] at h
· exact absurd h hn
refine' congr_arg Subtype.val (_ : a = ⟨a', aS⟩)
haveI := le_one_iff_subsingleton.1 (le_of_eq e)
apply Subsingleton.elim,
fun ⟨a, e⟩ => by simp [e]⟩
#align ordinal.cof_eq_one_iff_is_succ Ordinal.cof_eq_one_iff_is_succ
/-- A fundamental sequence for `a` is an increasing sequence of length `o = cof a` that converges at
`a`. We provide `o` explicitly in order to avoid type rewrites. -/
def IsFundamentalSequence (a o : Ordinal.{u}) (f : ∀ b < o, Ordinal.{u}) : Prop :=
o ≤ a.cof.ord ∧ (∀ {i j} (hi hj), i < j → f i hi < f j hj) ∧ blsub.{u, u} o f = a
#align ordinal.is_fundamental_sequence Ordinal.IsFundamentalSequence
namespace IsFundamentalSequence
variable {a o : Ordinal.{u}} {f : ∀ b < o, Ordinal.{u}}
protected theorem cof_eq (hf : IsFundamentalSequence a o f) : a.cof.ord = o :=
hf.1.antisymm' <| by
rw [← hf.2.2]
exact (ord_le_ord.2 (cof_blsub_le f)).trans (ord_card_le o)
#align ordinal.is_fundamental_sequence.cof_eq Ordinal.IsFundamentalSequence.cof_eq
protected theorem strict_mono (hf : IsFundamentalSequence a o f) {i j} :
∀ hi hj, i < j → f i hi < f j hj :=
hf.2.1
#align ordinal.is_fundamental_sequence.strict_mono Ordinal.IsFundamentalSequence.strict_mono
theorem blsub_eq (hf : IsFundamentalSequence a o f) : blsub.{u, u} o f = a :=
hf.2.2
#align ordinal.is_fundamental_sequence.blsub_eq Ordinal.IsFundamentalSequence.blsub_eq
theorem ord_cof (hf : IsFundamentalSequence a o f) :
IsFundamentalSequence a a.cof.ord fun i hi => f i (hi.trans_le (by rw [hf.cof_eq])) := by
have H := hf.cof_eq
subst H
exact hf
#align ordinal.is_fundamental_sequence.ord_cof Ordinal.IsFundamentalSequence.ord_cof
theorem id_of_le_cof (h : o ≤ o.cof.ord) : IsFundamentalSequence o o fun a _ => a :=
⟨h, @fun _ _ _ _ => id, blsub_id o⟩
#align ordinal.is_fundamental_sequence.id_of_le_cof Ordinal.IsFundamentalSequence.id_of_le_cof
protected theorem zero {f : ∀ b < (0 : Ordinal), Ordinal} : IsFundamentalSequence 0 0 f :=
⟨by rw [cof_zero, ord_zero], @fun i j hi => (Ordinal.not_lt_zero i hi).elim, blsub_zero f⟩
#align ordinal.is_fundamental_sequence.zero Ordinal.IsFundamentalSequence.zero
protected theorem succ : IsFundamentalSequence (succ o) 1 fun _ _ => o := by
refine' ⟨_, @fun i j hi hj h => _, blsub_const Ordinal.one_ne_zero o⟩
· rw [cof_succ, ord_one]
· rw [lt_one_iff_zero] at hi hj
rw [hi, hj] at h
exact h.false.elim
#align ordinal.is_fundamental_sequence.succ Ordinal.IsFundamentalSequence.succ
protected theorem monotone (hf : IsFundamentalSequence a o f) {i j : Ordinal} (hi : i < o)
(hj : j < o) (hij : i ≤ j) : f i hi ≤ f j hj := by
rcases lt_or_eq_of_le hij with (hij | rfl)
· exact (hf.2.1 hi hj hij).le
· rfl
#align ordinal.is_fundamental_sequence.monotone Ordinal.IsFundamentalSequence.monotone
theorem trans {a o o' : Ordinal.{u}} {f : ∀ b < o, Ordinal.{u}} (hf : IsFundamentalSequence a o f)
{g : ∀ b < o', Ordinal.{u}} (hg : IsFundamentalSequence o o' g) :
IsFundamentalSequence a o' fun i hi =>
f (g i hi) (by rw [← hg.2.2]; apply lt_blsub) := by
refine' ⟨_, @fun i j _ _ h => hf.2.1 _ _ (hg.2.1 _ _ h), _⟩
· rw [hf.cof_eq]
exact hg.1.trans (ord_cof_le o)
· rw [@blsub_comp.{u, u, u} o _ f (@IsFundamentalSequence.monotone _ _ f hf)]
exact hf.2.2
exact hg.2.2
#align ordinal.is_fundamental_sequence.trans Ordinal.IsFundamentalSequence.trans
end IsFundamentalSequence
/-- Every ordinal has a fundamental sequence. -/
theorem exists_fundamental_sequence (a : Ordinal.{u}) : ∃ f, IsFundamentalSequence a a.cof.ord f :=
by
suffices h : ∃ o f, IsFundamentalSequence a o f
· rcases h with ⟨o, f, hf⟩
exact ⟨_, hf.ord_cof⟩
rcases exists_lsub_cof a with ⟨ι, f, hf, hι⟩
rcases ord_eq ι with ⟨r, wo, hr⟩
haveI := wo
let r' := Subrel r { i | ∀ j, r j i → f j < f i }
let hrr' : r' ↪r r := Subrel.relEmbedding _ _
haveI := hrr'.isWellOrder
refine'
⟨_, _, hrr'.ordinal_type_le.trans _, @fun i j _ h _ => (enum r' j h).prop _ _,
le_antisymm (blsub_le fun i hi => lsub_le_iff.1 hf.le _) _⟩
· rw [← hι, hr]
· change r (hrr'.1 _) (hrr'.1 _)
rwa [hrr'.2, @enum_lt_enum _ r']
· rw [← hf, lsub_le_iff]
intro i
suffices h : ∃ i' hi', f i ≤ bfamilyOfFamily' r' (fun i => f i) i' hi'
· rcases h with ⟨i', hi', hfg⟩
exact hfg.trans_lt (lt_blsub _ _ _)
by_cases h : ∀ j, r j i → f j < f i
· refine' ⟨typein r' ⟨i, h⟩, typein_lt_type _ _, _⟩
rw [bfamilyOfFamily'_typein]
· push_neg at h
cases' wo.wf.min_mem _ h with hji hij
refine' ⟨typein r' ⟨_, fun k hkj => lt_of_lt_of_le _ hij⟩, typein_lt_type _ _, _⟩
· by_contra' H
exact (wo.wf.not_lt_min _ h ⟨IsTrans.trans _ _ _ hkj hji, H⟩) hkj
· rwa [bfamilyOfFamily'_typein]
#align ordinal.exists_fundamental_sequence Ordinal.exists_fundamental_sequence
@[simp]
theorem cof_cof (a : Ordinal.{u}) : cof (cof a).ord = cof a := by
cases' exists_fundamental_sequence a with f hf
cases' exists_fundamental_sequence a.cof.ord with g hg
exact ord_injective (hf.trans hg).cof_eq.symm
#align ordinal.cof_cof Ordinal.cof_cof
protected theorem IsNormal.isFundamentalSequence {f : Ordinal.{u} → Ordinal.{u}} (hf : IsNormal f)
{a o} (ha : IsLimit a) {g} (hg : IsFundamentalSequence a o g) :
IsFundamentalSequence (f a) o fun b hb => f (g b hb) := by
refine' ⟨_, @fun i j _ _ h => hf.strictMono (hg.2.1 _ _ h), _⟩
· rcases exists_lsub_cof (f a) with ⟨ι, f', hf', hι⟩
rw [← hg.cof_eq, ord_le_ord, ← hι]
suffices (lsub.{u, u} fun i => infₛ { b : Ordinal | f' i ≤ f b }) = a
by
rw [← this]
apply cof_lsub_le
have H : ∀ i, ∃ b < a, f' i ≤ f b := fun i => by
have := lt_lsub.{u, u} f' i
rw [hf', ← IsNormal.blsub_eq.{u, u} hf ha, lt_blsub_iff] at this
simpa using this
refine' (lsub_le fun i => _).antisymm (le_of_forall_lt fun b hb => _)
· rcases H i with ⟨b, hb, hb'⟩
exact lt_of_le_of_lt (cinfₛ_le' hb') hb
· have := hf.strictMono hb
rw [← hf', lt_lsub_iff] at this
cases' this with i hi
rcases H i with ⟨b, _, hb⟩
exact
((le_cinfₛ_iff'' ⟨b, by exact hb⟩).2 fun c hc =>
hf.strictMono.le_iff_le.1 (hi.trans hc)).trans_lt (lt_lsub _ i)
· rw [@blsub_comp.{u, u, u} a _ (fun b _ => f b) (@fun i j _ _ h => hf.strictMono.monotone h) g
hg.2.2]
exact IsNormal.blsub_eq.{u, u} hf ha
#align ordinal.is_normal.is_fundamental_sequence Ordinal.IsNormal.isFundamentalSequence
theorem IsNormal.cof_eq {f} (hf : IsNormal f) {a} (ha : IsLimit a) : cof (f a) = cof a :=
let ⟨_, hg⟩ := exists_fundamental_sequence a
ord_injective (hf.isFundamentalSequence ha hg).cof_eq
#align ordinal.is_normal.cof_eq Ordinal.IsNormal.cof_eq
theorem IsNormal.cof_le {f} (hf : IsNormal f) (a) : cof a ≤ cof (f a) := by
rcases zero_or_succ_or_limit a with (rfl | ⟨b, rfl⟩ | ha)
· rw [cof_zero]
exact zero_le _
· rw [cof_succ, Cardinal.one_le_iff_ne_zero, cof_ne_zero, ← Ordinal.pos_iff_ne_zero]
exact (Ordinal.zero_le (f b)).trans_lt (hf.1 b)
· rw [hf.cof_eq ha]
#align ordinal.is_normal.cof_le Ordinal.IsNormal.cof_le
@[simp]
theorem cof_add (a b : Ordinal) : b ≠ 0 → cof (a + b) = cof b := fun h => by
rcases zero_or_succ_or_limit b with (rfl | ⟨c, rfl⟩ | hb)
· contradiction
· rw [add_succ, cof_succ, cof_succ]
· exact (add_isNormal a).cof_eq hb
#align ordinal.cof_add Ordinal.cof_add
theorem aleph0_le_cof {o} : ℵ₀ ≤ cof o ↔ IsLimit o := by
rcases zero_or_succ_or_limit o with (rfl | ⟨o, rfl⟩ | l)
· simp [not_zero_isLimit, Cardinal.aleph0_ne_zero]
· simp [not_succ_isLimit, Cardinal.one_lt_aleph0]
· simp [l]
refine' le_of_not_lt fun h => _
cases' Cardinal.lt_aleph0.1 h with n e
have := cof_cof o
rw [e, ord_nat] at this
cases n
· simp at e
simp [e, not_zero_isLimit] at l
· rw [nat_cast_succ, cof_succ] at this
rw [← this, cof_eq_one_iff_is_succ] at e
rcases e with ⟨a, rfl⟩
exact not_succ_isLimit _ l
#align ordinal.aleph_0_le_cof Ordinal.aleph0_le_cof
@[simp]
theorem aleph'_cof {o : Ordinal} (ho : o.IsLimit) : (aleph' o).ord.cof = o.cof :=
aleph'_isNormal.cof_eq ho
#align ordinal.aleph'_cof Ordinal.aleph'_cof
@[simp]
theorem aleph_cof {o : Ordinal} (ho : o.IsLimit) : (aleph o).ord.cof = o.cof :=
aleph_isNormal.cof_eq ho
#align ordinal.aleph_cof Ordinal.aleph_cof
@[simp]
theorem cof_omega : cof ω = ℵ₀ :=
(aleph0_le_cof.2 omega_isLimit).antisymm' <|
by
rw [← card_omega]
apply cof_le_card
#align ordinal.cof_omega Ordinal.cof_omega
theorem cof_eq' (r : α → α → Prop) [IsWellOrder α r] (h : IsLimit (type r)) :
∃ S : Set α, (∀ a, ∃ b ∈ S, r a b) ∧ (#S) = cof (type r) :=
let ⟨S, H, e⟩ := cof_eq r
⟨S, fun a =>
let a' := enum r _ (h.2 _ (typein_lt_type r a))
let ⟨b, h, ab⟩ := H a'
⟨b, h,
(IsOrderConnected.conn a b a' <|
(typein_lt_typein r).1
(by
rw [typein_enum]
exact lt_succ (typein _ _))).resolve_right
ab⟩,
e⟩
#align ordinal.cof_eq' Ordinal.cof_eq'
@[simp]
theorem cof_univ : cof univ.{u, v} = Cardinal.univ.{u, v} :=
le_antisymm (cof_le_card _)
(by
refine' le_of_forall_lt fun c h => _
rcases lt_univ'.1 h with ⟨c, rfl⟩
rcases @cof_eq Ordinal.{u} (· < ·) _ with ⟨S, H, Se⟩
rw [univ, ← lift_cof, ← Cardinal.lift_lift.{u, u + 1, v}, Cardinal.lift_lt, ← Se]
refine' lt_of_not_ge fun h => _
cases' Cardinal.lift_down h with a e
refine' Quotient.inductionOn a (fun α e => _) e
cases' Quotient.exact e with f
have f := Equiv.ulift.symm.trans f
let g a := (f a).1
let o := succ (sup.{u, u} g)
rcases H o with ⟨b, h, l⟩
refine' l (lt_succ_iff.2 _)
rw [← show g (f.symm ⟨b, h⟩) = b by simp]
apply le_sup)
#align ordinal.cof_univ Ordinal.cof_univ
/-! ### Infinite pigeonhole principle -/
/-- If the union of s is unbounded and s is smaller than the cofinality,
then s has an unbounded member -/
theorem unbounded_of_unbounded_unionₛ (r : α → α → Prop) [wo : IsWellOrder α r] {s : Set (Set α)}
(h₁ : Unbounded r <| ⋃₀ s) (h₂ : (#s) < StrictOrder.cof r) : ∃ x ∈ s, Unbounded r x := by
by_contra' h
simp_rw [not_unbounded_iff] at h
let f : s → α := fun x : s => wo.wf.sup x (h x.1 x.2)
refine' h₂.not_le (le_trans (cinfₛ_le' ⟨range f, fun x => _, rfl⟩) mk_range_le)
rcases h₁ x with ⟨y, ⟨c, hc, hy⟩, hxy⟩
exact ⟨f ⟨c, hc⟩, mem_range_self _, fun hxz => hxy (Trans.trans (wo.wf.lt_sup _ hy) hxz)⟩
#align ordinal.unbounded_of_unbounded_sUnion Ordinal.unbounded_of_unbounded_unionₛ
/-- If the union of s is unbounded and s is smaller than the cofinality,
then s has an unbounded member -/
theorem unbounded_of_unbounded_unionᵢ {α β : Type u} (r : α → α → Prop) [wo : IsWellOrder α r]
(s : β → Set α) (h₁ : Unbounded r <| ⋃ x, s x) (h₂ : (#β) < StrictOrder.cof r) :
∃ x : β, Unbounded r (s x) := by
rw [← unionₛ_range] at h₁
rcases unbounded_of_unbounded_unionₛ r h₁ (mk_range_le.trans_lt h₂) with ⟨_, ⟨x, rfl⟩, u⟩
exact ⟨x, u⟩
#align ordinal.unbounded_of_unbounded_Union Ordinal.unbounded_of_unbounded_unionᵢ
/-- The infinite pigeonhole principle -/
theorem infinite_pigeonhole {β α : Type u} (f : β → α) (h₁ : ℵ₀ ≤ (#β)) (h₂ : (#α) < (#β).ord.cof) :
∃ a : α, (#f ⁻¹' {a}) = (#β) := by
have : ∃ a, (#β) ≤ (#f ⁻¹' {a}) := by
by_contra' h
apply mk_univ.not_lt
rw [← preimage_univ, ← unionᵢ_of_singleton, preimage_unionᵢ]
exact
mk_unionᵢ_le_sum_mk.trans_lt
((sum_le_supᵢ _).trans_lt <| mul_lt_of_lt h₁ (h₂.trans_le <| cof_ord_le _) (supᵢ_lt h₂ h))
cases' this with x h
refine' ⟨x, h.antisymm' _⟩
rw [le_mk_iff_exists_set]
exact ⟨_, rfl⟩
#align ordinal.infinite_pigeonhole Ordinal.infinite_pigeonhole
/-- Pigeonhole principle for a cardinality below the cardinality of the domain -/
theorem infinite_pigeonhole_card {β α : Type u} (f : β → α) (θ : Cardinal) (hθ : θ ≤ (#β))
(h₁ : ℵ₀ ≤ θ) (h₂ : (#α) < θ.ord.cof) : ∃ a : α, θ ≤ (#f ⁻¹' {a}) := by
rcases le_mk_iff_exists_set.1 hθ with ⟨s, rfl⟩
cases' infinite_pigeonhole (f ∘ Subtype.val : s → α) h₁ h₂ with a ha
use a; rw [← ha, @preimage_comp _ _ _ Subtype.val f]
exact mk_preimage_of_injective _ _ Subtype.val_injective
#align ordinal.infinite_pigeonhole_card Ordinal.infinite_pigeonhole_card
theorem infinite_pigeonhole_set {β α : Type u} {s : Set β} (f : s → α) (θ : Cardinal)
(hθ : θ ≤ (#s)) (h₁ : ℵ₀ ≤ θ) (h₂ : (#α) < θ.ord.cof) :
∃ (a : α)(t : Set β)(h : t ⊆ s), θ ≤ (#t) ∧ ∀ ⦃x⦄ (hx : x ∈ t), f ⟨x, h hx⟩ = a := by
cases' infinite_pigeonhole_card f θ hθ h₁ h₂ with a ha
refine' ⟨a, { x | ∃ h, f ⟨x, h⟩ = a }, _, _, _⟩
· rintro x ⟨hx, _⟩
exact hx
· refine'
ha.trans
(ge_of_eq <|
Quotient.sound ⟨Equiv.trans _ (Equiv.subtypeSubtypeEquivSubtypeExists _ _).symm⟩)
simp only [coe_eq_subtype, mem_singleton_iff, mem_preimage, mem_setOf_eq]
rfl
rintro x ⟨_, hx'⟩; exact hx'
#align ordinal.infinite_pigeonhole_set Ordinal.infinite_pigeonhole_set
end Ordinal
/-! ### Regular and inaccessible cardinals -/
namespace Cardinal
open Ordinal
--Porting note: commented out, doesn't seem necessary
-- mathport name: cardinal.pow
--local infixr:0 "^" => @HPow.hPow Cardinal Cardinal Cardinal instHPow
/-- A cardinal is a limit if it is not zero or a successor
cardinal. Note that `ℵ₀` is a limit cardinal by this definition. -/
def IsLimit (c : Cardinal) : Prop :=
c ≠ 0 ∧ ∀ x < c, succ x < c
#align cardinal.is_limit Cardinal.IsLimit
theorem IsLimit.ne_zero {c} (h : IsLimit c) : c ≠ 0 :=
h.1
#align cardinal.is_limit.ne_zero Cardinal.IsLimit.ne_zero
theorem IsLimit.succ_lt {x c} (h : IsLimit c) : x < c → succ x < c :=
h.2 x
#align cardinal.is_limit.succ_lt Cardinal.IsLimit.succ_lt
theorem IsLimit.aleph0_le {c} (h : IsLimit c) : ℵ₀ ≤ c := by
by_contra' h'
rcases lt_aleph0.1 h' with ⟨_ | n, rfl⟩
· exact h.1.irrefl
· simpa using h.2 n
#align cardinal.is_limit.aleph_0_le Cardinal.IsLimit.aleph0_le
/-- A cardinal is a strong limit if it is not zero and it is
closed under powersets. Note that `ℵ₀` is a strong limit by this definition. -/
def IsStrongLimit (c : Cardinal) : Prop :=
c ≠ 0 ∧ ∀ x < c, (2^x) < c
#align cardinal.is_strong_limit Cardinal.IsStrongLimit
theorem IsStrongLimit.ne_zero {c} (h : IsStrongLimit c) : c ≠ 0 :=
h.1
#align cardinal.is_strong_limit.ne_zero Cardinal.IsStrongLimit.ne_zero
theorem IsStrongLimit.two_power_lt {x c} (h : IsStrongLimit c) : x < c → (2^x) < c :=
h.2 x
#align cardinal.is_strong_limit.two_power_lt Cardinal.IsStrongLimit.two_power_lt
theorem isStrongLimit_aleph0 : IsStrongLimit ℵ₀ :=
⟨aleph0_ne_zero, fun x hx => by
rcases lt_aleph0.1 hx with ⟨n, rfl⟩
exact_mod_cast nat_lt_aleph0 (2 ^ n)⟩
#align cardinal.is_strong_limit_aleph_0 Cardinal.isStrongLimit_aleph0
theorem IsStrongLimit.isLimit {c} (H : IsStrongLimit c) : IsLimit c :=
⟨H.1, fun x h => (succ_le_of_lt <| cantor x).trans_lt (H.2 _ h)⟩
#align cardinal.is_strong_limit.is_limit Cardinal.IsStrongLimit.isLimit
theorem isLimit_aleph0 : IsLimit ℵ₀ :=
isStrongLimit_aleph0.isLimit
#align cardinal.is_limit_aleph_0 Cardinal.isLimit_aleph0
theorem isStrongLimit_beth {o : Ordinal} (H : ∀ a < o, succ a < o) : IsStrongLimit (beth o) := by
rcases eq_or_ne o 0 with (rfl | h)
· rw [beth_zero]
exact isStrongLimit_aleph0
· refine' ⟨beth_ne_zero o, fun a ha => _⟩
rw [beth_limit ⟨h, H⟩] at ha
rcases exists_lt_of_lt_csupᵢ' ha with ⟨⟨i, hi⟩, ha⟩
have := power_le_power_left two_ne_zero ha.le
rw [← beth_succ] at this
exact this.trans_lt (beth_lt.2 (H i hi))
#align cardinal.is_strong_limit_beth Cardinal.isStrongLimit_beth
theorem mk_subset_mk_lt_cof {α : Type _} (h : ∀ x < #α, (2^x) < (#α)) :
(#{ s : Set α // (#s) < cof (#α).ord }) = (#α) := by
rcases eq_or_ne (#α) 0 with (ha | ha)
· rw [ha]
simp [fun s => (Cardinal.zero_le s).not_lt]
have h' : IsStrongLimit (#α) := ⟨ha, h⟩
rcases ord_eq α with ⟨r, wo, hr⟩
haveI := wo
apply le_antisymm
· conv_rhs => rw [← mk_bounded_subset h hr]
apply mk_le_mk_of_subset
intro s hs
rw [hr] at hs
exact lt_cof_type hs
· refine' @mk_le_of_injective α _ (fun x => Subtype.mk {x} _) _
· rw [mk_singleton]
exact one_lt_aleph0.trans_le (aleph0_le_cof.2 (ord_isLimit h'.isLimit.aleph0_le))
· intro a b hab
simpa [singleton_eq_singleton_iff] using hab
#align cardinal.mk_subset_mk_lt_cof Cardinal.mk_subset_mk_lt_cof
/-- A cardinal is regular if it is infinite and it equals its own cofinality. -/
def IsRegular (c : Cardinal) : Prop :=
ℵ₀ ≤ c ∧ c ≤ c.ord.cof
#align cardinal.is_regular Cardinal.IsRegular
theorem IsRegular.aleph0_le {c : Cardinal} (H : c.IsRegular) : ℵ₀ ≤ c :=
H.1
#align cardinal.is_regular.aleph_0_le Cardinal.IsRegular.aleph0_le
theorem IsRegular.cof_eq {c : Cardinal} (H : c.IsRegular) : c.ord.cof = c :=
(cof_ord_le c).antisymm H.2
#align cardinal.is_regular.cof_eq Cardinal.IsRegular.cof_eq
theorem IsRegular.pos {c : Cardinal} (H : c.IsRegular) : 0 < c :=
aleph0_pos.trans_le H.1
#align cardinal.is_regular.pos Cardinal.IsRegular.pos
theorem IsRegular.ord_pos {c : Cardinal} (H : c.IsRegular) : 0 < c.ord := by
rw [Cardinal.lt_ord]
exact H.pos
#align cardinal.is_regular.ord_pos Cardinal.IsRegular.ord_pos
theorem isRegular_cof {o : Ordinal} (h : o.IsLimit) : IsRegular o.cof :=
⟨aleph0_le_cof.2 h, (cof_cof o).ge⟩
#align cardinal.is_regular_cof Cardinal.isRegular_cof
theorem isRegular_aleph0 : IsRegular ℵ₀ :=
⟨le_rfl, by simp⟩
#align cardinal.is_regular_aleph_0 Cardinal.isRegular_aleph0
theorem isRegular_succ {c : Cardinal.{u}} (h : ℵ₀ ≤ c) : IsRegular (succ c) :=
⟨h.trans (le_succ c),
succ_le_of_lt
(by
cases' Quotient.exists_rep (@succ Cardinal _ _ c) with α αe; simp at αe
rcases ord_eq α with ⟨r, wo, re⟩; skip
have := ord_isLimit (h.trans (le_succ _))
rw [← αe, re] at this⊢
rcases cof_eq' r this with ⟨S, H, Se⟩
rw [← Se]
apply lt_imp_lt_of_le_imp_le fun h => mul_le_mul_right' h c
rw [mul_eq_self h, ← succ_le_iff, ← αe, ← sum_const']
refine' le_trans _ (sum_le_sum (fun (x : S) => card (typein r (x : α))) _ fun i => _)
· simp only [← card_typein, ← mk_sigma]
exact
⟨Embedding.ofSurjective (fun x => x.2.1) fun a =>
let ⟨b, h, ab⟩ := H a
⟨⟨⟨_, h⟩, _, ab⟩, rfl⟩⟩
· rw [← lt_succ_iff, ← lt_ord, ← αe, re]
apply typein_lt_type)⟩
#align cardinal.is_regular_succ Cardinal.isRegular_succ
theorem isRegular_aleph_one : IsRegular (aleph 1) := by
rw [← succ_aleph0]
exact isRegular_succ le_rfl
#align cardinal.is_regular_aleph_one Cardinal.isRegular_aleph_one
theorem isRegular_aleph'_succ {o : Ordinal} (h : ω ≤ o) : IsRegular (aleph' (succ o)) := by
rw [aleph'_succ]
exact isRegular_succ (aleph0_le_aleph'.2 h)
#align cardinal.is_regular_aleph'_succ Cardinal.isRegular_aleph'_succ
theorem isRegular_aleph_succ (o : Ordinal) : IsRegular (aleph (succ o)) := by
rw [aleph_succ]
exact isRegular_succ (aleph0_le_aleph o)
#align cardinal.is_regular_aleph_succ Cardinal.isRegular_aleph_succ
/-- A function whose codomain's cardinality is infinite but strictly smaller than its domain's
has a fiber with cardinality strictly great than the codomain.
-/
theorem infinite_pigeonhole_card_lt {β α : Type u} (f : β → α) (w : (#α) < (#β)) (w' : ℵ₀ ≤ (#α)) :
∃ a : α, (#α) < (#f ⁻¹' {a}) := by
simp_rw [← succ_le_iff]
exact
Ordinal.infinite_pigeonhole_card f (succ (#α)) (succ_le_of_lt w) (w'.trans (lt_succ _).le)
((lt_succ _).trans_le (isRegular_succ w').2.ge)
#align cardinal.infinite_pigeonhole_card_lt Cardinal.infinite_pigeonhole_card_lt
/-- A function whose codomain's cardinality is infinite but strictly smaller than its domain's
has an infinite fiber.
-/
theorem exists_infinite_fiber {β α : Type _} (f : β → α) (w : (#α) < (#β)) (w' : Infinite α) :
∃ a : α, Infinite (f ⁻¹' {a}) := by
simp_rw [Cardinal.infinite_iff] at w'⊢
cases' infinite_pigeonhole_card_lt f w w' with a ha
exact ⟨a, w'.trans ha.le⟩
#align cardinal.exists_infinite_fiber Cardinal.exists_infinite_fiber
/-- If an infinite type `β` can be expressed as a union of finite sets,
then the cardinality of the collection of those finite sets
must be at least the cardinality of `β`.
-/
theorem le_range_of_union_finset_eq_top {α β : Type _} [Infinite β] (f : α → Finset β)
(w : (⋃ a, (f a : Set β)) = ⊤) : (#β) ≤ (#range f) := by
have k : _root_.Infinite (range f) := by
rw [infinite_coe_iff]
apply mt (union_finset_finite_of_range_finite f)
rw [w]
exact infinite_univ
by_contra h
simp only [not_le] at h
let u : ∀ b, ∃ a, b ∈ f a := fun b => by simpa using (w.ge : _) (Set.mem_univ b)
let u' : β → range f := fun b => ⟨f (u b).choose, by simp⟩
have v' : ∀ a, u' ⁻¹' {⟨f a, by simp⟩} ≤ f a :=
by
rintro a p m
simp at m
rw [← m]
apply fun b => (u b).choose_spec
obtain ⟨⟨-, ⟨a, rfl⟩⟩, p⟩ := exists_infinite_fiber u' h k
exact (@Infinite.of_injective _ _ p (inclusion (v' a)) (inclusion_injective _)).false
#align cardinal.le_range_of_union_finset_eq_top Cardinal.le_range_of_union_finset_eq_top
theorem lsub_lt_ord_lift_of_isRegular {ι} {f : ι → Ordinal} {c} (hc : IsRegular c)
(hι : Cardinal.lift.{v, u} (#ι) < c) : (∀ i, f i < c.ord) → Ordinal.lsub.{u, v} f < c.ord :=
lsub_lt_ord_lift (by rwa [hc.cof_eq])
#align cardinal.lsub_lt_ord_lift_of_is_regular Cardinal.lsub_lt_ord_lift_of_isRegular
theorem lsub_lt_ord_of_isRegular {ι} {f : ι → Ordinal} {c} (hc : IsRegular c) (hι : (#ι) < c) :
(∀ i, f i < c.ord) → Ordinal.lsub f < c.ord :=
lsub_lt_ord (by rwa [hc.cof_eq])
#align cardinal.lsub_lt_ord_of_is_regular Cardinal.lsub_lt_ord_of_isRegular
theorem sup_lt_ord_lift_of_isRegular {ι} {f : ι → Ordinal} {c} (hc : IsRegular c)
(hι : Cardinal.lift.{v, u} (#ι) < c) : (∀ i, f i < c.ord) → Ordinal.sup.{u, v} f < c.ord :=
sup_lt_ord_lift (by rwa [hc.cof_eq])
#align cardinal.sup_lt_ord_lift_of_is_regular Cardinal.sup_lt_ord_lift_of_isRegular
theorem sup_lt_ord_of_isRegular {ι} {f : ι → Ordinal} {c} (hc : IsRegular c) (hι : (#ι) < c) :
(∀ i, f i < c.ord) → Ordinal.sup f < c.ord :=
sup_lt_ord (by rwa [hc.cof_eq])
#align cardinal.sup_lt_ord_of_is_regular Cardinal.sup_lt_ord_of_isRegular
theorem blsub_lt_ord_lift_of_isRegular {o : Ordinal} {f : ∀ a < o, Ordinal} {c} (hc : IsRegular c)
(ho : Cardinal.lift.{v, u} o.card < c) :
(∀ i hi, f i hi < c.ord) → Ordinal.blsub.{u, v} o f < c.ord :=
blsub_lt_ord_lift (by rwa [hc.cof_eq])
#align cardinal.blsub_lt_ord_lift_of_is_regular Cardinal.blsub_lt_ord_lift_of_isRegular
theorem blsub_lt_ord_of_isRegular {o : Ordinal} {f : ∀ a < o, Ordinal} {c} (hc : IsRegular c)
(ho : o.card < c) : (∀ i hi, f i hi < c.ord) → Ordinal.blsub o f < c.ord :=
blsub_lt_ord (by rwa [hc.cof_eq])
#align cardinal.blsub_lt_ord_of_is_regular Cardinal.blsub_lt_ord_of_isRegular
theorem bsup_lt_ord_lift_of_isRegular {o : Ordinal} {f : ∀ a < o, Ordinal} {c} (hc : IsRegular c)
(hι : Cardinal.lift.{v, u} o.card < c) :
(∀ i hi, f i hi < c.ord) → Ordinal.bsup.{u, v} o f < c.ord :=
bsup_lt_ord_lift (by rwa [hc.cof_eq])
#align cardinal.bsup_lt_ord_lift_of_is_regular Cardinal.bsup_lt_ord_lift_of_isRegular
theorem bsup_lt_ord_of_isRegular {o : Ordinal} {f : ∀ a < o, Ordinal} {c} (hc : IsRegular c)
(hι : o.card < c) : (∀ i hi, f i hi < c.ord) → Ordinal.bsup o f < c.ord :=
bsup_lt_ord (by rwa [hc.cof_eq])
#align cardinal.bsup_lt_ord_of_is_regular Cardinal.bsup_lt_ord_of_isRegular
theorem supᵢ_lt_lift_of_isRegular {ι} {f : ι → Cardinal} {c} (hc : IsRegular c)
(hι : Cardinal.lift.{v, u} (#ι) < c) : (∀ i, f i < c) → supᵢ.{max u v + 1, u + 1} f < c :=
supᵢ_lt_lift.{u, v} (by rwa [hc.cof_eq])
#align cardinal.supr_lt_lift_of_is_regular Cardinal.supᵢ_lt_lift_of_isRegular
theorem supᵢ_lt_of_isRegular {ι} {f : ι → Cardinal} {c} (hc : IsRegular c) (hι : (#ι) < c) :
(∀ i, f i < c) → supᵢ f < c :=
supᵢ_lt (by rwa [hc.cof_eq])
#align cardinal.supr_lt_of_is_regular Cardinal.supᵢ_lt_of_isRegular
theorem sum_lt_lift_of_isRegular {ι : Type u} {f : ι → Cardinal} {c : Cardinal} (hc : IsRegular c)
(hι : Cardinal.lift.{v, u} (#ι) < c) (hf : ∀ i, f i < c) : sum f < c :=
(sum_le_supᵢ_lift _).trans_lt <| mul_lt_of_lt hc.1 hι (supᵢ_lt_lift_of_isRegular hc hι hf)
#align cardinal.sum_lt_lift_of_is_regular Cardinal.sum_lt_lift_of_isRegular
theorem sum_lt_of_isRegular {ι : Type u} {f : ι → Cardinal} {c : Cardinal} (hc : IsRegular c)
(hι : (#ι) < c) : (∀ i, f i < c) → sum f < c :=
sum_lt_lift_of_isRegular.{u, u} hc (by rwa [lift_id])
#align cardinal.sum_lt_of_is_regular Cardinal.sum_lt_of_isRegular
theorem nfpFamily_lt_ord_lift_of_isRegular {ι} {f : ι → Ordinal → Ordinal} {c} (hc : IsRegular c)
(hι : Cardinal.lift.{v, u} (#ι) < c) (hc' : c ≠ ℵ₀) (hf : ∀ (i), ∀ b < c.ord, f i b < c.ord) {a}
(ha : a < c.ord) : nfpFamily.{u, v} f a < c.ord := by
apply nfpFamily_lt_ord_lift.{u, v} _ _ hf ha <;> rw [hc.cof_eq]
exact lt_of_le_of_ne hc.1 hc'.symm
exact hι
#align cardinal.nfp_family_lt_ord_lift_of_is_regular Cardinal.nfpFamily_lt_ord_lift_of_isRegular
theorem nfpFamily_lt_ord_of_isRegular {ι} {f : ι → Ordinal → Ordinal} {c} (hc : IsRegular c)
(hι : (#ι) < c) (hc' : c ≠ ℵ₀) {a} (hf : ∀ (i), ∀ b < c.ord, f i b < c.ord) :
a < c.ord → nfpFamily.{u, u} f a < c.ord :=
nfpFamily_lt_ord_lift_of_isRegular hc (by rwa [lift_id]) hc' hf
#align cardinal.nfp_family_lt_ord_of_is_regular Cardinal.nfpFamily_lt_ord_of_isRegular
theorem nfpBFamily_lt_ord_lift_of_isRegular {o : Ordinal} {f : ∀ a < o, Ordinal → Ordinal} {c}
(hc : IsRegular c) (ho : Cardinal.lift.{v, u} o.card < c) (hc' : c ≠ ℵ₀)
(hf : ∀ (i hi), ∀ b < c.ord, f i hi b < c.ord) {a} :
a < c.ord → nfpBFamily.{u, v} o f a < c.ord :=
nfpFamily_lt_ord_lift_of_isRegular hc (by rwa [mk_ordinal_out]) hc' fun i => hf _ _
#align cardinal.nfp_bfamily_lt_ord_lift_of_is_regular Cardinal.nfpBFamily_lt_ord_lift_of_isRegular
theorem nfpBFamily_lt_ord_of_isRegular {o : Ordinal} {f : ∀ a < o, Ordinal → Ordinal} {c}
(hc : IsRegular c) (ho : o.card < c) (hc' : c ≠ ℵ₀)
(hf : ∀ (i hi), ∀ b < c.ord, f i hi b < c.ord) {a} :
a < c.ord → nfpBFamily.{u, u} o f a < c.ord :=
nfpBFamily_lt_ord_lift_of_isRegular hc (by rwa [lift_id]) hc' hf
#align cardinal.nfp_bfamily_lt_ord_of_is_regular Cardinal.nfpBFamily_lt_ord_of_isRegular
theorem nfp_lt_ord_of_isRegular {f : Ordinal → Ordinal} {c} (hc : IsRegular c) (hc' : c ≠ ℵ₀)
(hf : ∀ i < c.ord, f i < c.ord) {a} : a < c.ord → nfp f a < c.ord :=
nfp_lt_ord
(by
rw [hc.cof_eq]
exact lt_of_le_of_ne hc.1 hc'.symm)
hf
#align cardinal.nfp_lt_ord_of_is_regular Cardinal.nfp_lt_ord_of_isRegular
theorem derivFamily_lt_ord_lift {ι} {f : ι → Ordinal → Ordinal} {c} (hc : IsRegular c)
(hι : Cardinal.lift.{v, u} (#ι) < c) (hc' : c ≠ ℵ₀)
(hf : ∀ (i), ∀ b < c.ord, f i b < c.ord) {a} :
a < c.ord → derivFamily.{u, v} f a < c.ord := by
have hω : ℵ₀ < c.ord.cof := by
rw [hc.cof_eq]
exact lt_of_le_of_ne hc.1 hc'.symm
apply a.limitRecOn
· rw [derivFamily_zero]
exact nfpFamily_lt_ord_lift hω (by rwa [hc.cof_eq]) hf
· intro b hb hb'
rw [derivFamily_succ]
exact
nfpFamily_lt_ord_lift hω (by rwa [hc.cof_eq]) hf
((ord_isLimit hc.1).2 _ (hb ((lt_succ b).trans hb')))
· intro b hb H hb'
rw [derivFamily_limit f hb]
exact
bsup_lt_ord_of_isRegular.{u, v} hc (ord_lt_ord.1 ((ord_card_le b).trans_lt hb')) fun o' ho' =>
H o' ho' (ho'.trans hb')
#align cardinal.deriv_family_lt_ord_lift Cardinal.derivFamily_lt_ord_lift
theorem derivFamily_lt_ord {ι} {f : ι → Ordinal → Ordinal} {c} (hc : IsRegular c) (hι : (#ι) < c)
(hc' : c ≠ ℵ₀) (hf : ∀ (i), ∀ b < c.ord, f i b < c.ord) {a} :
a < c.ord → derivFamily.{u, u} f a < c.ord :=
derivFamily_lt_ord_lift hc (by rwa [lift_id]) hc' hf
#align cardinal.deriv_family_lt_ord Cardinal.derivFamily_lt_ord
theorem derivBFamily_lt_ord_lift {o : Ordinal} {f : ∀ a < o, Ordinal → Ordinal} {c}
(hc : IsRegular c) (hι : Cardinal.lift.{v, u} o.card < c) (hc' : c ≠ ℵ₀)
(hf : ∀ (i hi), ∀ b < c.ord, f i hi b < c.ord) {a} :
a < c.ord → derivBFamily.{u, v} o f a < c.ord :=
derivFamily_lt_ord_lift hc (by rwa [mk_ordinal_out]) hc' fun i => hf _ _
#align cardinal.deriv_bfamily_lt_ord_lift Cardinal.derivBFamily_lt_ord_lift
theorem derivBFamily_lt_ord {o : Ordinal} {f : ∀ a < o, Ordinal → Ordinal} {c} (hc : IsRegular c)
(hι : o.card < c) (hc' : c ≠ ℵ₀) (hf : ∀ (i hi), ∀ b < c.ord, f i hi b < c.ord) {a} :
a < c.ord → derivBFamily.{u, u} o f a < c.ord :=
derivBFamily_lt_ord_lift hc (by rwa [lift_id]) hc' hf
#align cardinal.deriv_bfamily_lt_ord Cardinal.derivBFamily_lt_ord
theorem deriv_lt_ord {f : Ordinal.{u} → Ordinal} {c} (hc : IsRegular c) (hc' : c ≠ ℵ₀)
(hf : ∀ i < c.ord, f i < c.ord) {a} : a < c.ord → deriv f a < c.ord :=
derivFamily_lt_ord_lift hc
(by simpa using Cardinal.one_lt_aleph0.trans (lt_of_le_of_ne hc.1 hc'.symm)) hc' fun _ => hf
#align cardinal.deriv_lt_ord Cardinal.deriv_lt_ord
/-- A cardinal is inaccessible if it is an uncountable regular strong limit cardinal. -/
def IsInaccessible (c : Cardinal) :=
ℵ₀ < c ∧ IsRegular c ∧ IsStrongLimit c
#align cardinal.is_inaccessible Cardinal.IsInaccessible
theorem IsInaccessible.mk {c} (h₁ : ℵ₀ < c) (h₂ : c ≤ c.ord.cof) (h₃ : ∀ x < c, (2^x) < c) :
IsInaccessible c :=
⟨h₁, ⟨h₁.le, h₂⟩, (aleph0_pos.trans h₁).ne', h₃⟩
#align cardinal.is_inaccessible.mk Cardinal.IsInaccessible.mk
-- Lean's foundations prove the existence of ℵ₀ many inaccessible cardinals
theorem univ_inaccessible : IsInaccessible univ.{u, v} :=
IsInaccessible.mk (by simpa using lift_lt_univ' ℵ₀) (by simp) fun c h =>
by
rcases lt_univ'.1 h with ⟨c, rfl⟩
rw [← lift_two_power.{u, max (u + 1) v}]
apply lift_lt_univ'
#align cardinal.univ_inaccessible Cardinal.univ_inaccessible
theorem lt_power_cof {c : Cardinal.{u}} : ℵ₀ ≤ c → c < (c^cof c.ord) :=
Quotient.inductionOn c fun α h => by
rcases ord_eq α with ⟨r, wo, re⟩; skip
have := ord_isLimit h
rw [mk'_def, re] at this⊢
rcases cof_eq' r this with ⟨S, H, Se⟩
have := sum_lt_prod (fun a : S => #{ x // r x a }) (fun _ => #α) fun i => ?_
· simp only [Cardinal.prod_const, Cardinal.lift_id, ← Se, ← mk_sigma, power_def] at this ⊢
refine' lt_of_le_of_lt _ this
refine' ⟨Embedding.ofSurjective _ _⟩
· exact fun x => x.2.1
· exact fun a =>
let ⟨b, h, ab⟩ := H a
⟨⟨⟨_, h⟩, _, ab⟩, rfl⟩
· have := typein_lt_type r i
rwa [← re, lt_ord] at this
#align cardinal.lt_power_cof Cardinal.lt_power_cof
theorem lt_cof_power {a b : Cardinal} (ha : ℵ₀ ≤ a) (b1 : 1 < b) : a < cof (b^a).ord := by
have b0 : b ≠ 0 := (zero_lt_one.trans b1).ne'
apply lt_imp_lt_of_le_imp_le (power_le_power_left <| power_ne_zero a b0)
rw [← power_mul, mul_eq_self ha]
exact lt_power_cof (ha.trans <| (cantor' _ b1).le)
#align cardinal.lt_cof_power Cardinal.lt_cof_power
end Cardinal
|
{"author": "leanprover-community", "repo": "mathlib4", "sha": "b9a0a30342ca06e9817e22dbe46e75fc7f435500", "save_path": "github-repos/lean/leanprover-community-mathlib4", "path": "github-repos/lean/leanprover-community-mathlib4/mathlib4-b9a0a30342ca06e9817e22dbe46e75fc7f435500/Mathlib/SetTheory/Cardinal/Cofinality.lean"}
|
#include "algorithms/synthesis/syrec_synthesis.hpp"
#include "core/syrec/expression.hpp"
#include "core/syrec/program.hpp"
#include "core/syrec/variable.hpp"
#include "core/utils/timer.hpp"
#include <boost/dynamic_bitset.hpp>
#include <cmath>
#include <functional>
#include <memory>
#include <numeric>
namespace syrec {
struct annotater {
explicit annotater(circuit& circ, const std::stack<statement::ptr>& stmts):
_circ(circ),
_stmts(stmts) {}
// Operator needs this signature to work
void operator()(gate& g) const {
if (!_stmts.empty()) {
_circ.annotate(g, "lno", std::to_string(_stmts.top()->line_number));
}
}
private:
circuit& _circ;
const std::stack<statement::ptr>& _stmts;
};
// Helper Functions for the synthesis methods
standard_syrec_synthesizer::standard_syrec_synthesizer(circuit& circ, const program& prog [[maybe_unused]]):
_circ(circ) {
free_const_lines_map.insert(std::make_pair(false, std::vector<unsigned>()));
free_const_lines_map.insert(std::make_pair(true, std::vector<unsigned>()));
// root anlegen
cct_man.current = add_vertex(cct_man.tree);
cct_man.root = cct_man.current;
// Blatt anlegen
cct_man.current = add_vertex(cct_man.tree);
get(boost::vertex_name, cct_man.tree)[cct_man.current].circ = std::make_shared<circuit>();
get(boost::vertex_name, cct_man.tree)[cct_man.current].circ->gate_added.connect(annotater(*get(boost::vertex_name, cct_man.tree)[cct_man.current].circ, _stmts));
add_edge(cct_man.root, cct_man.current, cct_man.tree);
}
void standard_syrec_synthesizer::set_main_module(const module::ptr& main_module) {
assert(modules.empty());
modules.push(main_module);
}
bool standard_syrec_synthesizer::on_module(const module::ptr& main) {
for (const auto& stat: main->statements) {
if (!full_statement(stat)) {
if (!on_statement(stat)) {
return false;
}
}
}
return assemble_circuit(cct_man.root);
}
/// checking the entire statement
bool standard_syrec_synthesizer::full_statement(const statement::ptr& statement) {
bool okay = false;
if (auto* stat = dynamic_cast<assign_statement*>(statement.get())) {
okay = full_statement(*stat);
} else {
return false;
}
return okay;
}
bool standard_syrec_synthesizer::full_statement(const assign_statement& statement) {
std::vector<unsigned> d, dd, stat_lhs, comp, ddd;
std::vector<unsigned> lines;
get_variables(statement.lhs, stat_lhs);
op_rhs_lhs_expression(statement.rhs, d);
if (op_vec.empty()) {
return false;
}
flow(statement.rhs, ddd);
/// Only when the rhs input signals are repeated (since the results are stored in the rhs)
if (check_repeats()) {
flow(statement.rhs, dd);
if (exp_op_vector.size() == 1) {
if (exp_op_vector.at(0) == 1 or exp_op_vector.at(0) == 2) {
/// cancel out the signals
exp_op_vector.clear();
assign_op_vector.clear();
exp_lhs_vector.clear();
exp_rhs_vector.clear();
op_vec.clear();
} else {
if (statement.op == 1) {
expression_single_op(1, exp_lhs_vector.at(0), stat_lhs);
expression_single_op(1, exp_rhs_vector.at(0), stat_lhs);
exp_op_vector.clear();
assign_op_vector.clear();
exp_lhs_vector.clear();
exp_rhs_vector.clear();
op_vec.clear();
} else {
expression_single_op(statement.op, exp_lhs_vector.at(0), stat_lhs);
expression_single_op(exp_op_vector.at(0), exp_rhs_vector.at(0), stat_lhs);
exp_op_vector.clear();
assign_op_vector.clear();
exp_lhs_vector.clear();
exp_rhs_vector.clear();
op_vec.clear();
}
}
} else {
if (exp_lhs_vector.at(0) == exp_rhs_vector.at(0)) {
if (exp_op_vector.at(0) == 1 or exp_op_vector.at(0) == 2) {
/// cancel out the signals
} else if (exp_op_vector.at(0) != 1 or exp_op_vector.at(0) != 2) {
expression_single_op(statement.op, exp_lhs_vector.at(0), stat_lhs);
expression_single_op(exp_op_vector.at(0), exp_rhs_vector.at(0), stat_lhs);
}
} else {
solver(stat_lhs, statement.op, exp_lhs_vector.at(0), exp_op_vector.at(0), exp_rhs_vector.at(0));
}
unsigned j = 0;
unsigned z;
std::vector<unsigned> stat_assign_op;
if ((exp_op_vector.size() % 2) == 0) {
z = ((exp_op_vector.size()) / (2));
} else {
z = (((exp_op_vector.size()) - 1) / (2));
}
for (unsigned k = 0; k <= z - 1; k++) {
stat_assign_op.push_back(assign_op_vector.at(k));
}
/// Assignment operations
std::reverse(stat_assign_op.begin(), stat_assign_op.end());
/// If reversible assignment is "-", the assignment operations must negated appropriately
if (statement.op == 1) {
for (unsigned int& i: stat_assign_op) {
if (i == 0) {
i = 1;
} else if (i == 1) {
i = 0;
} else {
continue;
}
}
}
for (unsigned i = 1; i <= exp_op_vector.size() - 1; i++) {
/// when both rhs and lhs exist
if ((exp_lhs_vector.at(i) != comp) && (exp_rhs_vector.at(i) != comp)) {
if (exp_lhs_vector.at(i) == exp_rhs_vector.at(i)) {
if (exp_op_vector.at(i) == 1 or exp_op_vector.at(i) == 2) {
/// cancel out the signals
j = j + 1;
} else if (exp_op_vector.at(i) != 1 or exp_op_vector.at(i) != 2) {
if (stat_assign_op.at(j) == 1) {
expression_single_op(1, exp_lhs_vector.at(i), stat_lhs);
expression_single_op(1, exp_rhs_vector.at(i), stat_lhs);
j = j + 1;
} else {
expression_single_op(stat_assign_op.at(j), exp_lhs_vector.at(i), stat_lhs);
expression_single_op(exp_op_vector.at(i), exp_rhs_vector.at(i), stat_lhs);
j = j + 1;
}
}
} else {
solver(stat_lhs, stat_assign_op.at(j), exp_lhs_vector.at(i), exp_op_vector.at(i), exp_rhs_vector.at(i));
j = j + 1;
}
}
/// when only rhs exists
else if ((exp_lhs_vector.at(i) == comp) && (exp_rhs_vector.at(i) != comp)) {
exp_evaluate(lines, stat_assign_op.at(j), exp_rhs_vector.at(i), stat_lhs);
j = j + 1;
}
/// when only lhs exists
else if ((exp_lhs_vector.at(i) != comp) && (exp_rhs_vector.at(i) == comp)) {
exp_evaluate(lines, stat_assign_op.at(j), exp_rhs_vector.at(i), stat_lhs);
j = j + 1;
} else if ((exp_lhs_vector.at(i) == comp) && (exp_rhs_vector.at(i) == comp)) {
}
}
exp_op_vector.clear();
assign_op_vector.clear();
exp_lhs_vector.clear();
exp_rhs_vector.clear();
op_vec.clear();
}
} else {
exp_op_vector.clear();
assign_op_vector.clear();
exp_lhs_vector.clear();
exp_rhs_vector.clear();
op_vec.clear();
return false;
}
exp_op_vector.clear();
assign_op_vector.clear();
exp_lhs_vector.clear();
exp_rhs_vector.clear();
op_vec.clear();
return true;
}
bool standard_syrec_synthesizer::flow(const expression::ptr& expression, std::vector<unsigned>& v) {
if (auto* binary = dynamic_cast<binary_expression*>(expression.get())) {
return flow(*binary, v);
} else if (auto* var = dynamic_cast<variable_expression*>(expression.get())) {
return flow(*var, v);
} else {
return false;
}
}
bool standard_syrec_synthesizer::flow(const variable_expression& expression, std::vector<unsigned>& v) {
get_variables(expression.var, v);
return true;
}
/// generating LHS and RHS (can be whole expressions as well)
bool standard_syrec_synthesizer::flow(const binary_expression& expression, std::vector<unsigned>& v [[maybe_unused]]) {
std::vector<unsigned> lhs, rhs, comp;
assign_op_vector.push_back(expression.op);
if (!flow(expression.lhs, lhs) || !flow(expression.rhs, rhs)) {
return false;
}
exp_lhs_vector.push_back(lhs);
exp_rhs_vector.push_back(rhs);
exp_op_vector.push_back(expression.op);
return true;
}
bool standard_syrec_synthesizer::solver(const std::vector<unsigned>& stat_lhs, unsigned stat_op, const std::vector<unsigned>& exp_lhs, unsigned exp_op, const std::vector<unsigned>& exp_rhs) {
std::vector<unsigned> lines;
if (stat_op == exp_op) {
if (exp_op == 1) {
expression_single_op(1, exp_lhs, stat_lhs);
expression_single_op(0, exp_rhs, stat_lhs);
} else {
expression_single_op(stat_op, exp_lhs, stat_lhs);
expression_single_op(stat_op, exp_rhs, stat_lhs);
}
} else {
sub_flag = true;
exp_evaluate(lines, exp_op, exp_lhs, exp_rhs);
sub_flag = false;
exp_evaluate(lines, stat_op, lines, stat_lhs);
sub_flag = true;
if (exp_op < 3) {
expression_op_inverse(exp_op, exp_lhs, exp_rhs);
}
}
sub_flag = false;
return true;
}
/// If the input signals are repeated (i.e., rhs input signals are repeated)
bool standard_syrec_synthesizer::check_repeats() {
std::vector check_lhs_vec(exp_lhs_vector.cbegin(), exp_lhs_vector.cend());
std::vector check_rhs_vec(exp_rhs_vector.cbegin(), exp_rhs_vector.cend());
for (unsigned k = 0; k < check_lhs_vec.size(); k++) {
if (check_lhs_vec.at(k).empty()) {
check_lhs_vec.erase(check_lhs_vec.begin() + (k));
}
}
for (unsigned k = 0; k < check_rhs_vec.size(); k++) {
if (check_rhs_vec.at(k).empty()) {
check_rhs_vec.erase(check_rhs_vec.begin() + (k));
}
}
for (int i = 0; i < int(check_rhs_vec.size()); i++) {
for (int j = 0; j < int(check_rhs_vec.size()); j++) {
if (j != i) {
if (check_rhs_vec.at(i) == check_rhs_vec.at(j)) {
exp_op_vector.clear();
exp_lhs_vector.clear();
exp_rhs_vector.clear();
return true;
}
}
}
}
for (auto& i: check_lhs_vec) {
for (auto& j: check_rhs_vec) {
if (i == j) {
exp_op_vector.clear();
exp_lhs_vector.clear();
exp_rhs_vector.clear();
return true;
}
}
}
exp_op_vector.clear();
exp_lhs_vector.clear();
exp_rhs_vector.clear();
return false;
}
/// generating LHS and RHS (not whole expressions, just the corresponding variables)
bool standard_syrec_synthesizer::op_rhs_lhs_expression(const expression::ptr& expression, std::vector<unsigned>& v) {
if (auto* binary = dynamic_cast<binary_expression*>(expression.get())) {
return op_rhs_lhs_expression(*binary, v);
} else if (auto* var = dynamic_cast<variable_expression*>(expression.get())) {
return op_rhs_lhs_expression(*var, v);
} else {
return false;
}
}
bool standard_syrec_synthesizer::op_rhs_lhs_expression(const variable_expression& expression, std::vector<unsigned>& v) {
get_variables(expression.var, v);
return true;
}
bool standard_syrec_synthesizer::op_rhs_lhs_expression(const binary_expression& expression, std::vector<unsigned>& v) {
std::vector<unsigned> lhs, rhs;
if (!op_rhs_lhs_expression(expression.lhs, lhs) || !op_rhs_lhs_expression(expression.rhs, rhs)) {
return false;
}
v = rhs;
op_vec.push_back(expression.op);
return true;
}
/////////When the input signals are not repeated//////////////////
bool standard_syrec_synthesizer::on_statement(const statement::ptr& statement) {
_stmts.push(statement);
bool okay = false;
if (auto* swap_stat = dynamic_cast<swap_statement*>(statement.get())) {
okay = on_statement(*swap_stat);
} else if (auto* unary_stat = dynamic_cast<unary_statement*>(statement.get())) {
okay = on_statement(*unary_stat);
} else if (auto* assign_stat = dynamic_cast<assign_statement*>(statement.get())) {
okay = on_statement(*assign_stat);
} else if (auto* if_stat = dynamic_cast<if_statement*>(statement.get())) {
okay = on_statement(*if_stat);
} else if (auto* for_stat = dynamic_cast<for_statement*>(statement.get())) {
okay = on_statement(*for_stat);
} else if (auto* call_stat = dynamic_cast<call_statement*>(statement.get())) {
okay = on_statement(*call_stat);
} else if (auto* uncall_stat = dynamic_cast<uncall_statement*>(statement.get())) {
okay = on_statement(*uncall_stat);
} else if (auto* skip_stat = dynamic_cast<skip_statement*>(statement.get())) {
okay = on_statement(*skip_stat);
} else {
return false;
}
_stmts.pop();
return okay;
}
bool standard_syrec_synthesizer::on_statement(const swap_statement& statement) {
std::vector<unsigned> lhs, rhs;
get_variables(statement.lhs, lhs);
get_variables(statement.rhs, rhs);
assert(lhs.size() == rhs.size());
swap(lhs, rhs);
return true;
}
bool standard_syrec_synthesizer::on_statement(const unary_statement& statement) {
// load variable
std::vector<unsigned> var;
get_variables(statement.var, var);
switch (statement.op) {
case unary_statement::invert:
bitwise_negation(var);
break;
case unary_statement::increment:
increment(var);
break;
case unary_statement::decrement:
decrement(var);
break;
default:
return false;
}
return true;
}
/// Function when the assignment statements does not include repeated input signals
bool standard_syrec_synthesizer::on_statement(const assign_statement& statement) {
std::vector<unsigned> lhs, rhs, d;
get_variables(statement.lhs, lhs);
op_rhs_lhs_expression(statement.rhs, d);
on_expression(statement.rhs, rhs, lhs, statement.op);
op_vec.clear();
bool status = false;
switch (statement.op) {
case assign_statement::add: {
if (!exp_opp.empty() && exp_opp.top() == statement.op) {
status = increase_new(lhs, exp_lhss.top());
status = increase_new(lhs, exp_rhss.top());
exp_opp.pop();
exp_lhss.pop();
exp_rhss.pop();
} else {
status = increase_new(lhs, rhs);
}
while (!exp_opp.empty()) {
expression_op_inverse(exp_opp.top(), exp_lhss.top(), exp_rhss.top());
sub_flag = false;
exp_opp.pop();
exp_lhss.pop();
exp_rhss.pop();
}
} break;
case assign_statement::subtract: {
if (!exp_opp.empty() && exp_opp.top() == statement.op) {
status = decrease_new(lhs, exp_lhss.top());
status = increase_new(lhs, exp_rhss.top());
exp_opp.pop();
exp_lhss.pop();
exp_rhss.pop();
} else {
status = decrease_new(lhs, rhs);
}
while (!exp_opp.empty()) {
expression_op_inverse(exp_opp.top(), exp_lhss.top(), exp_rhss.top());
sub_flag = false;
exp_opp.pop();
exp_lhss.pop();
exp_rhss.pop();
}
} break;
case assign_statement::exor: {
if (!exp_opp.empty() && exp_opp.top() == statement.op) {
status = bitwise_cnot(lhs, exp_lhss.top());
status = bitwise_cnot(lhs, exp_rhss.top());
exp_opp.pop();
exp_lhss.pop();
exp_rhss.pop();
} else {
status = bitwise_cnot(lhs, rhs);
}
while (!exp_opp.empty()) {
expression_op_inverse(exp_opp.top(), exp_lhss.top(), exp_rhss.top());
sub_flag = false;
exp_opp.pop();
exp_lhss.pop();
exp_rhss.pop();
}
} break;
default:
return false;
}
return status;
}
bool standard_syrec_synthesizer::on_statement(const if_statement& statement) {
// calculate expression
std::vector<unsigned> expression_result, lhs_stat;
unsigned op = 0u;
on_expression(statement.condition, expression_result, lhs_stat, op);
assert(expression_result.size() == 1u);
// add new helper line
unsigned helper_line = expression_result.front();
// activate this line
add_active_control(helper_line);
for (const auto& stat: statement.then_statements) {
if (!full_statement(stat)) {
if (!on_statement(stat)) {
return false;
}
}
}
// toggle helper line
remove_active_control(helper_line);
(*(get(boost::vertex_name, cct_man.tree)[cct_man.current].circ)).append_not(helper_line);
add_active_control(helper_line);
for (const auto& stat: statement.else_statements) {
if (!full_statement(stat)) {
if (!on_statement(stat)) {
return false;
}
}
}
// de-active helper line
remove_active_control(helper_line);
(*(get(boost::vertex_name, cct_man.tree)[cct_man.current].circ)).append_not(helper_line);
return true;
}
bool standard_syrec_synthesizer::on_statement(const for_statement& statement) {
const auto [nfrom, nto] = statement.range;
const unsigned from = nfrom ? nfrom->evaluate(loop_map) : 1u; // default value is 1u
const unsigned to = nto->evaluate(loop_map);
const unsigned step = statement.step ? statement.step->evaluate(loop_map) : 1u; // default step is +1
const std::string& loop_variable = statement.loop_variable;
if (from <= to) {
for (unsigned i = from; i <= to; i += step) {
// adjust loop variable if necessary
if (!loop_variable.empty()) {
loop_map[loop_variable] = i;
}
for (const auto& stat: statement.statements) {
if (!full_statement(stat)) {
if (!on_statement(stat)) {
return false;
}
}
}
}
}
else if (from > to) {
for (int i = (int)from; i >= (int)to; i -= (int)step) {
// adjust loop variable if necessary
if (!loop_variable.empty()) {
loop_map[loop_variable] = i;
}
for (const auto& stat: statement.statements) {
if (!full_statement(stat)) {
if (!on_statement(stat)) {
return false;
}
}
}
}
}
// clear loop variable if necessary
if (!loop_variable.empty()) {
assert(loop_map.erase(loop_variable) == 1u);
}
return true;
}
bool standard_syrec_synthesizer::on_statement(const call_statement& statement) {
// 1. Adjust the references module's parameters to the call arguments
for (unsigned i = 0u; i < statement.parameters.size(); ++i) {
const std::string& parameter = statement.parameters.at(i);
const auto& module_parameter = statement.target->parameters.at(i);
module_parameter->set_reference(modules.top()->find_parameter_or_variable(parameter));
}
// 2. Create new lines for the module's variables
add_variables(_circ, statement.target->variables);
modules.push(statement.target);
for (const auto& stat: statement.target->statements) {
if (!full_statement(stat)) {
if (!on_statement(stat)) {
return false;
}
}
}
modules.pop();
return true;
}
bool standard_syrec_synthesizer::on_statement(const uncall_statement& statement) {
// 1. Adjust the references module's parameters to the call arguments
for (unsigned i = 0u; i < statement.parameters.size(); ++i) {
const std::string& parameter = statement.parameters.at(i);
const auto& module_parameter = statement.target->parameters.at(i);
module_parameter->set_reference(modules.top()->find_parameter_or_variable(parameter));
}
// 2. Create new lines for the module's variables
add_variables(_circ, statement.target->variables);
modules.push(statement.target);
const auto statements = statement.target->statements;
for (auto it = statements.rbegin(); it != statements.rend(); ++it) {
const auto reverse_statement = (*it)->reverse();
if (!full_statement(reverse_statement)) {
if (!on_statement(reverse_statement)) {
return false;
}
}
}
modules.pop();
return true;
}
bool standard_syrec_synthesizer::on_statement(const skip_statement& statement [[maybe_unused]]) {
return true;
}
bool standard_syrec_synthesizer::on_expression(const expression::ptr& expression, std::vector<unsigned>& lines, std::vector<unsigned>& lhs_stat, unsigned op) {
if (auto* numeric = dynamic_cast<numeric_expression*>(expression.get())) {
return on_expression(*numeric, lines);
} else if (auto* variable = dynamic_cast<variable_expression*>(expression.get())) {
return on_expression(*variable, lines);
} else if (auto* binary = dynamic_cast<binary_expression*>(expression.get())) {
return on_expression(*binary, lines, lhs_stat, op);
} else if (auto* shift = dynamic_cast<shift_expression*>(expression.get())) {
return on_expression(*shift, lines, lhs_stat, op);
} else {
return false;
}
}
bool standard_syrec_synthesizer::on_expression(const numeric_expression& expression, std::vector<unsigned>& lines) {
get_constant_lines(expression.bitwidth(), expression.value->evaluate(loop_map), lines);
return true;
}
bool standard_syrec_synthesizer::on_expression(const variable_expression& expression, std::vector<unsigned>& lines) {
get_variables(expression.var, lines);
return true;
}
/// Function when the assignment statements consist of binary expressions and does not include repeated input signals
bool standard_syrec_synthesizer::on_expression(const binary_expression& expression, std::vector<unsigned>& lines, std::vector<unsigned>& lhs_stat, unsigned op) {
std::vector<unsigned> lhs, rhs;
if (!on_expression(expression.lhs, lhs, lhs_stat, op) || !on_expression(expression.rhs, rhs, lhs_stat, op)) {
return false;
}
exp_lhss.push(lhs);
exp_rhss.push(rhs);
exp_opp.push(expression.op);
if (exp_opp.size() == op_vec.size()) {
if (exp_opp.top() == op) {
return true;
}
}
switch (expression.op) {
case binary_expression::add: // +
increase_new(rhs, lhs);
lines = rhs;
break;
case binary_expression::subtract: // -
decrease_new_assign(rhs, lhs);
lines = rhs;
break;
case binary_expression::exor: // ^
bitwise_cnot(rhs, lhs); // duplicate lhs
lines = rhs;
break;
case binary_expression::multiply: // *
get_constant_lines(expression.bitwidth(), 0u, lines);
multiplication(lines, lhs, rhs);
break;
case binary_expression::divide: // /
get_constant_lines(expression.bitwidth(), 0u, lines);
division(lines, lhs, rhs);
break;
case binary_expression::modulo: {
get_constant_lines(expression.bitwidth(), 0u, lines);
std::vector<unsigned> quot;
get_constant_lines(expression.bitwidth(), 0u, quot);
bitwise_cnot(lines, lhs); // duplicate lhs
modulo(quot, lines, rhs);
} break;
case binary_expression::logical_and: // &&
lines.emplace_back(get_constant_line(false));
conjunction(lines.at(0), lhs.at(0), rhs.at(0));
break;
case binary_expression::logical_or: // ||
lines.emplace_back(get_constant_line(false));
disjunction(lines.at(0), lhs.at(0), rhs.at(0));
break;
case binary_expression::bitwise_and: // &
get_constant_lines(expression.bitwidth(), 0u, lines);
bitwise_and(lines, lhs, rhs);
break;
case binary_expression::bitwise_or: // |
get_constant_lines(expression.bitwidth(), 0u, lines);
bitwise_or(lines, lhs, rhs);
break;
case binary_expression::less_than: // <
lines.emplace_back(get_constant_line(false));
less_than(lines.at(0), lhs, rhs);
break;
case binary_expression::greater_than: // >
lines.emplace_back(get_constant_line(false));
greater_than(lines.at(0), lhs, rhs);
break;
case binary_expression::equals: // =
lines.emplace_back(get_constant_line(false));
equals(lines.at(0), lhs, rhs);
break;
case binary_expression::not_equals: // !=
lines.emplace_back(get_constant_line(false));
not_equals(lines.at(0), lhs, rhs);
break;
case binary_expression::less_equals: // <=
lines.emplace_back(get_constant_line(false));
less_equals(lines.at(0), lhs, rhs);
break;
case binary_expression::greater_equals: // >=
lines.emplace_back(get_constant_line(false));
greater_equals(lines.at(0), lhs, rhs);
break;
default:
return false;
}
return true;
}
/// This function is used when input signals (rhs) are equal (just to solve statements individually)
bool standard_syrec_synthesizer::exp_evaluate(std::vector<unsigned>& lines, unsigned op, const std::vector<unsigned>& lhs, const std::vector<unsigned>& rhs) {
switch (op) {
case binary_expression::add: // +
increase_new(rhs, lhs);
lines = rhs;
break;
case binary_expression::subtract: // -
if (sub_flag) {
decrease_new_assign(rhs, lhs);
lines = rhs;
} else {
decrease_new(rhs, lhs);
lines = rhs;
}
break;
case binary_expression::exor: // ^
bitwise_cnot(rhs, lhs); // duplicate lhs
lines = rhs;
break;
default:
return false;
}
return true;
}
bool standard_syrec_synthesizer::on_expression(const shift_expression& expression, std::vector<unsigned>& lines, std::vector<unsigned>& lhs_stat, unsigned op) {
std::vector<unsigned> lhs;
if (!on_expression(expression.lhs, lhs, lhs_stat, op)) {
return false;
}
unsigned rhs = expression.rhs->evaluate(loop_map);
switch (expression.op) {
case shift_expression::left: // <<
get_constant_lines(expression.bitwidth(), 0u, lines);
left_shift(lines, lhs, rhs);
break;
case shift_expression::right: // <<
get_constant_lines(expression.bitwidth(), 0u, lines);
right_shift(lines, lhs, rhs);
break;
default:
return false;
}
return true;
}
//**********************************************************************
//***** Unary Operations *****
//**********************************************************************
bool standard_syrec_synthesizer::bitwise_negation(const std::vector<unsigned>& dest) {
for (unsigned idx: dest) {
(*(get(boost::vertex_name, cct_man.tree)[cct_man.current].circ)).append_not(idx);
}
return true;
}
bool standard_syrec_synthesizer::decrement(const std::vector<unsigned>& dest) {
for (unsigned int i: dest) {
(*(get(boost::vertex_name, cct_man.tree)[cct_man.current].circ)).append_not(i);
add_active_control(i);
}
for (unsigned int i: dest) {
remove_active_control(i);
}
return true;
}
bool standard_syrec_synthesizer::increment(const std::vector<unsigned>& dest) {
for (unsigned int i: dest) {
add_active_control(i);
}
for (int i = int(dest.size()) - 1; i >= 0; --i) {
remove_active_control(dest.at(i));
(*(get(boost::vertex_name, cct_man.tree)[cct_man.current].circ)).append_not(dest.at(i));
}
return true;
}
//**********************************************************************
//***** Binary Operations *****
//**********************************************************************
bool standard_syrec_synthesizer::bitwise_and(const std::vector<unsigned>& dest, const std::vector<unsigned>& src1, const std::vector<unsigned>& src2) {
bool ok = true;
for (unsigned i = 0u; i < dest.size(); ++i) {
ok &= conjunction(dest.at(i), src1.at(i), src2.at(i));
}
return ok;
}
bool standard_syrec_synthesizer::bitwise_cnot(const std::vector<unsigned>& dest, const std::vector<unsigned>& src) {
for (unsigned i = 0u; i < src.size(); ++i) {
(*(get(boost::vertex_name, cct_man.tree)[cct_man.current].circ)).append_cnot(src.at(i), dest.at(i));
}
return true;
}
bool standard_syrec_synthesizer::bitwise_or(const std::vector<unsigned>& dest, const std::vector<unsigned>& src1, const std::vector<unsigned>& src2) {
bool ok = true;
for (unsigned i = 0u; i < dest.size(); ++i) {
ok &= disjunction(dest.at(i), src1.at(i), src2.at(i));
}
return ok;
}
bool standard_syrec_synthesizer::conjunction(unsigned dest, unsigned src1, unsigned src2) {
(*(get(boost::vertex_name, cct_man.tree)[cct_man.current].circ)).append_toffoli(src1, src2, dest);
return true;
}
bool standard_syrec_synthesizer::decrease_with_carry(const std::vector<unsigned>& dest, const std::vector<unsigned>& src, unsigned carry) {
for (unsigned i = 0u; i < src.size(); ++i) {
(*(get(boost::vertex_name, cct_man.tree)[cct_man.current].circ)).append_not(dest.at(i));
}
increase_with_carry(dest, src, carry);
for (unsigned i = 0u; i < src.size(); ++i) {
(*(get(boost::vertex_name, cct_man.tree)[cct_man.current].circ)).append_not(dest.at(i));
}
return true;
}
bool standard_syrec_synthesizer::disjunction(unsigned dest, unsigned src1, unsigned src2) {
(*(get(boost::vertex_name, cct_man.tree)[cct_man.current].circ)).append_cnot(src1, dest);
(*(get(boost::vertex_name, cct_man.tree)[cct_man.current].circ)).append_cnot(src2, dest);
(*(get(boost::vertex_name, cct_man.tree)[cct_man.current].circ)).append_toffoli(src1, src2, dest);
return true;
}
bool standard_syrec_synthesizer::division(const std::vector<unsigned>& dest, const std::vector<unsigned>& src1, const std::vector<unsigned>& src2) {
if (!modulo(dest, src1, src2)) return false;
std::vector<unsigned> sum;
std::vector<unsigned> partial;
for (unsigned i = 1u; i < src1.size(); ++i) {
(*(get(boost::vertex_name, cct_man.tree)[cct_man.current].circ)).append_not(src2.at(i));
}
for (unsigned i = 1u; i < src1.size(); ++i) {
add_active_control(src2.at(i));
}
for (int i = int(src1.size()) - 1; i >= 0; --i) {
partial.push_back(src2.at(src1.size() - 1u - i));
sum.insert(sum.begin(), src1.at(i));
add_active_control(dest.at(i));
increase_new(sum, partial);
remove_active_control(dest.at(i));
if (i > 0) {
for (unsigned j = (src1.size() - i); j < src1.size(); ++j) {
remove_active_control(src2.at(j));
}
(*(get(boost::vertex_name, cct_man.tree)[cct_man.current].circ)).append_not(src2.at(src1.size() - i));
for (unsigned j = (src1.size() + 1u - i); j < src1.size(); ++j) {
add_active_control(src2.at(j));
}
}
}
return true;
}
bool standard_syrec_synthesizer::equals(unsigned dest, const std::vector<unsigned>& src1, const std::vector<unsigned>& src2) {
for (unsigned i = 0u; i < src1.size(); ++i) {
(*(get(boost::vertex_name, cct_man.tree)[cct_man.current].circ)).append_cnot(src2.at(i), src1.at(i));
(*(get(boost::vertex_name, cct_man.tree)[cct_man.current].circ)).append_not(src1.at(i));
}
gate::line_container controls(src1.begin(), src1.end());
(*(get(boost::vertex_name, cct_man.tree)[cct_man.current].circ)).append_multi_control_toffoli(controls, dest);
for (unsigned i = 0u; i < src1.size(); ++i) {
(*(get(boost::vertex_name, cct_man.tree)[cct_man.current].circ)).append_cnot(src2.at(i), src1.at(i));
(*(get(boost::vertex_name, cct_man.tree)[cct_man.current].circ)).append_not(src1.at(i));
}
return true;
}
bool standard_syrec_synthesizer::greater_equals(unsigned dest, const std::vector<unsigned>& src1, const std::vector<unsigned>& src2) {
if (!greater_than(dest, src2, src1)) return false;
(*(get(boost::vertex_name, cct_man.tree)[cct_man.current].circ)).append_not(dest);
return true;
}
bool standard_syrec_synthesizer::greater_than(unsigned dest, const std::vector<unsigned>& src1, const std::vector<unsigned>& src2) {
return less_than(dest, src2, src1);
}
bool standard_syrec_synthesizer::increase_new(const std::vector<unsigned>& rhs, const std::vector<unsigned>& lhs) {
unsigned bitwidth = rhs.size();
if (bitwidth == 1) {
(*(get(boost::vertex_name, cct_man.tree)[cct_man.current].circ)).append_cnot(lhs.at(0), rhs.at(0));
} else {
for (unsigned i = 1; i <= bitwidth - 1; ++i) {
(*(get(boost::vertex_name, cct_man.tree)[cct_man.current].circ)).append_cnot(lhs.at(i), rhs.at(i));
}
for (unsigned i = bitwidth - 2; i >= 1; --i) {
(*(get(boost::vertex_name, cct_man.tree)[cct_man.current].circ)).append_cnot(lhs.at(i), lhs.at(i + 1));
}
for (unsigned i = 0; i <= bitwidth - 2; ++i) {
(*(get(boost::vertex_name, cct_man.tree)[cct_man.current].circ)).append_toffoli(rhs.at(i), lhs.at(i), lhs.at(i + 1));
}
(*(get(boost::vertex_name, cct_man.tree)[cct_man.current].circ)).append_cnot(lhs.at(bitwidth - 1), rhs.at(bitwidth - 1));
for (unsigned i = bitwidth - 2; i >= 1; --i) {
(*(get(boost::vertex_name, cct_man.tree)[cct_man.current].circ)).append_toffoli(lhs.at(i), rhs.at(i), lhs.at(i + 1));
(*(get(boost::vertex_name, cct_man.tree)[cct_man.current].circ)).append_cnot(lhs.at(i), rhs.at(i));
}
(*(get(boost::vertex_name, cct_man.tree)[cct_man.current].circ)).append_toffoli(lhs.at(0), rhs.at(0), lhs.at(1));
(*(get(boost::vertex_name, cct_man.tree)[cct_man.current].circ)).append_cnot(lhs.at(0), rhs.at(0));
for (unsigned i = 1; i <= bitwidth - 2; ++i) {
(*(get(boost::vertex_name, cct_man.tree)[cct_man.current].circ)).append_cnot(lhs.at(i), lhs.at(i + 1));
}
for (unsigned i = 1; i <= bitwidth - 1; ++i) {
(*(get(boost::vertex_name, cct_man.tree)[cct_man.current].circ)).append_cnot(lhs.at(i), rhs.at(i));
}
}
return true;
}
bool standard_syrec_synthesizer::decrease_new(const std::vector<unsigned>& rhs, const std::vector<unsigned>& lhs) {
for (unsigned int rh: rhs) {
(*(get(boost::vertex_name, cct_man.tree)[cct_man.current].circ)).append_not(rh);
}
increase_new(rhs, lhs);
for (unsigned int rh: rhs) {
(*(get(boost::vertex_name, cct_man.tree)[cct_man.current].circ)).append_not(rh);
}
return true;
}
bool standard_syrec_synthesizer::decrease_new_assign(const std::vector<unsigned>& rhs, const std::vector<unsigned>& lhs) {
for (unsigned int lh: lhs) {
(*(get(boost::vertex_name, cct_man.tree)[cct_man.current].circ)).append_not(lh);
}
increase_new(rhs, lhs);
for (unsigned int lh: lhs) {
(*(get(boost::vertex_name, cct_man.tree)[cct_man.current].circ)).append_not(lh);
}
for (unsigned i = 0u; i < lhs.size(); ++i) {
(*(get(boost::vertex_name, cct_man.tree)[cct_man.current].circ)).append_not(rhs.at(i));
}
return true;
}
bool standard_syrec_synthesizer::expression_op_inverse(unsigned op, const std::vector<unsigned>& exp_lhs, const std::vector<unsigned>& exp_rhs) {
switch (op) {
case binary_expression::add: // +
decrease_new(exp_rhs, exp_lhs);
break;
case binary_expression::subtract: // -
decrease_new_assign(exp_rhs, exp_lhs);
break;
case binary_expression::exor: // ^
bitwise_cnot(exp_rhs, exp_lhs);
break;
default:
return false;
}
return true;
}
bool standard_syrec_synthesizer::expression_single_op(unsigned op, const std::vector<unsigned>& exp_lhs, const std::vector<unsigned>& exp_rhs) {
switch (op) {
case binary_expression::add: // +
increase_new(exp_rhs, exp_lhs);
break;
case binary_expression::subtract: // -
if (sub_flag) {
decrease_new_assign(exp_rhs, exp_lhs);
} else {
decrease_new(exp_rhs, exp_lhs);
}
break;
case binary_expression::exor: // ^
bitwise_cnot(exp_rhs, exp_lhs);
break;
default:
return false;
}
return true;
}
bool standard_syrec_synthesizer::increase_with_carry(const std::vector<unsigned>& dest, const std::vector<unsigned>& src, unsigned carry) {
unsigned bitwidth = src.size();
if (bitwidth == 0) return true;
for (unsigned i = 1u; i < bitwidth; ++i) {
(*(get(boost::vertex_name, cct_man.tree)[cct_man.current].circ)).append_cnot(src.at(i), dest.at(i));
}
if (bitwidth > 1) {
(*(get(boost::vertex_name, cct_man.tree)[cct_man.current].circ)).append_cnot(src.at(bitwidth - 1), carry);
}
for (int i = (int)bitwidth - 2; i > 0; --i) {
(*(get(boost::vertex_name, cct_man.tree)[cct_man.current].circ)).append_cnot(src.at(i), src.at(i + 1));
}
for (unsigned i = 0u; i < bitwidth - 1; ++i) {
(*(get(boost::vertex_name, cct_man.tree)[cct_man.current].circ)).append_toffoli(src.at(i), dest.at(i), src.at(i + 1));
}
(*(get(boost::vertex_name, cct_man.tree)[cct_man.current].circ)).append_toffoli(src.at(bitwidth - 1), dest.at(bitwidth - 1), carry);
for (int i = (int)bitwidth - 1; i > 0; --i) {
(*(get(boost::vertex_name, cct_man.tree)[cct_man.current].circ)).append_cnot(src.at(i), dest.at(i));
(*(get(boost::vertex_name, cct_man.tree)[cct_man.current].circ)).append_toffoli(dest.at(i - 1), src.at(i - 1), src.at(i));
}
for (unsigned i = 1u; i < bitwidth - 1u; ++i) {
(*(get(boost::vertex_name, cct_man.tree)[cct_man.current].circ)).append_cnot(src.at(i), src.at(i + 1));
}
for (unsigned i = 0u; i < bitwidth; ++i) {
(*(get(boost::vertex_name, cct_man.tree)[cct_man.current].circ)).append_cnot(src.at(i), dest.at(i));
}
return true;
}
bool standard_syrec_synthesizer::less_equals(unsigned dest, const std::vector<unsigned>& src1, const std::vector<unsigned>& src2) {
if (!less_than(dest, src2, src1)) return false;
(*(get(boost::vertex_name, cct_man.tree)[cct_man.current].circ)).append_not(dest);
return true;
}
bool standard_syrec_synthesizer::less_than(unsigned dest, const std::vector<unsigned>& src1, const std::vector<unsigned>& src2) {
return (decrease_with_carry(src1, src2, dest) && increase_new(src1, src2));
}
bool standard_syrec_synthesizer::modulo(const std::vector<unsigned>& dest, const std::vector<unsigned>& src1, const std::vector<unsigned>& src2) {
std::vector<unsigned> sum;
std::vector<unsigned> partial;
for (unsigned i = 1u; i < src1.size(); ++i) {
(*(get(boost::vertex_name, cct_man.tree)[cct_man.current].circ)).append_not(src2.at(i));
}
for (unsigned i = 1u; i < src1.size(); ++i) {
add_active_control(src2.at(i));
}
for (int i = int(src1.size()) - 1; i >= 0; --i) {
partial.push_back(src2.at(src1.size() - 1u - i));
sum.insert(sum.begin(), src1.at(i));
decrease_with_carry(sum, partial, dest.at(i));
add_active_control(dest.at(i));
increase_new(sum, partial);
remove_active_control(dest.at(i));
(*(get(boost::vertex_name, cct_man.tree)[cct_man.current].circ)).append_not(dest.at(i));
if (i > 0) {
for (unsigned j = (src1.size() - i); j < src1.size(); ++j) {
remove_active_control(src2.at(j));
}
(*(get(boost::vertex_name, cct_man.tree)[cct_man.current].circ)).append_not(src2.at(src1.size() - i));
for (unsigned j = (src1.size() + 1u - i); j < src1.size(); ++j) {
add_active_control(src2.at(j));
}
}
}
return true;
}
bool standard_syrec_synthesizer::multiplication(const std::vector<unsigned>& dest, const std::vector<unsigned>& src1, const std::vector<unsigned>& src2) {
if ((src1.empty()) || (dest.empty())) return true;
std::vector<unsigned> sum = dest;
std::vector<unsigned> partial = src2;
bool ok = true;
add_active_control(src1.at(0));
ok = ok && bitwise_cnot(sum, partial);
remove_active_control(src1.at(0));
for (unsigned i = 1; i < dest.size(); ++i) {
sum.erase(sum.begin());
partial.pop_back();
add_active_control(src1.at(i));
ok = ok && increase_new(sum, partial);
remove_active_control(src1.at(i));
}
return ok;
}
bool standard_syrec_synthesizer::not_equals(unsigned dest, const std::vector<unsigned>& src1, const std::vector<unsigned>& src2) {
if (!equals(dest, src1, src2)) return false;
(*(get(boost::vertex_name, cct_man.tree)[cct_man.current].circ)).append_not(dest);
return true;
}
void standard_syrec_synthesizer::swap(const std::vector<unsigned>& dest1, const std::vector<unsigned>& dest2) {
for (unsigned i = 0u; i < dest1.size(); ++i) {
(*(get(boost::vertex_name, cct_man.tree)[cct_man.current].circ)).append_fredkin(dest1.at(i), dest2.at(i));
}
}
//**********************************************************************
//***** Shift Operations *****
//**********************************************************************
void standard_syrec_synthesizer::left_shift(const std::vector<unsigned>& dest, const std::vector<unsigned>& src1, unsigned src2) {
for (unsigned i = 0u; (i + src2) < dest.size(); ++i) {
(*(get(boost::vertex_name, cct_man.tree)[cct_man.current].circ)).append_cnot(src1.at(i), dest.at(i + src2));
}
}
void standard_syrec_synthesizer::right_shift(const std::vector<unsigned>& dest, const std::vector<unsigned>& src1, unsigned src2) {
for (unsigned i = src2; i < dest.size(); ++i) {
(*(get(boost::vertex_name, cct_man.tree)[cct_man.current].circ)).append_cnot(src1.at(i), dest.at(i - src2));
}
}
//**********************************************************************
//***** Efficient Controls *****
//**********************************************************************
void standard_syrec_synthesizer::add_active_control(unsigned control) {
// aktuelles Blatt vollendet, zurueck zum parent
cct_man.current = source(*(in_edges(cct_man.current, cct_man.tree).first), cct_man.tree);
// child fuer neuen control anlegen
cct_node child = add_vertex(cct_man.tree);
get(boost::vertex_name, cct_man.tree)[child].control = control;
get(boost::vertex_name, cct_man.tree)[child].controls = get(boost::vertex_name, cct_man.tree)[cct_man.current].controls;
get(boost::vertex_name, cct_man.tree)[child].controls.insert(control);
// get( boost::vertex_name, cct_man.tree )[child].circ = std::shared_ptr<circuit>( new circuit() );
add_edge(cct_man.current, child, cct_man.tree);
cct_man.current = child;
// neues Blatt anlegen
cct_node leaf = add_vertex(cct_man.tree);
get(boost::vertex_name, cct_man.tree)[leaf].controls = get(boost::vertex_name, cct_man.tree)[cct_man.current].controls;
get(boost::vertex_name, cct_man.tree)[leaf].circ = std::make_shared<circuit>();
get(boost::vertex_name, cct_man.tree)[leaf].circ->gate_added.connect(annotater(*get(boost::vertex_name, cct_man.tree)[leaf].circ, _stmts));
add_edge(cct_man.current, leaf, cct_man.tree);
cct_man.current = leaf;
}
void standard_syrec_synthesizer::remove_active_control(unsigned control [[maybe_unused]]) {
// aktuelles Blatt vollendet, zurueck zum parent
cct_man.current = source(*(in_edges(cct_man.current, cct_man.tree).first), cct_man.tree);
// aktueller Knoten abgeschlossen, zurueck zum parent
cct_man.current = source(*(in_edges(cct_man.current, cct_man.tree).first), cct_man.tree);
// neues Blatt anlegen
cct_node leaf = add_vertex(cct_man.tree);
get(boost::vertex_name, cct_man.tree)[leaf].controls = get(boost::vertex_name, cct_man.tree)[cct_man.current].controls;
get(boost::vertex_name, cct_man.tree)[leaf].circ = std::make_shared<circuit>();
get(boost::vertex_name, cct_man.tree)[leaf].circ->gate_added.connect(annotater(*get(boost::vertex_name, cct_man.tree)[leaf].circ, _stmts));
add_edge(cct_man.current, leaf, cct_man.tree);
cct_man.current = leaf;
}
bool standard_syrec_synthesizer::assemble_circuit(const cct_node& current) {
// leaf
if (out_edges(current, cct_man.tree).first == out_edges(current, cct_man.tree).second /*get( boost::vertex_name, cct_man.tree )[current].circ.get()->num_gates() > 0u*/) {
_circ.insert_circuit(_circ.num_gates(), *(get(boost::vertex_name, cct_man.tree)[current].circ), get(boost::vertex_name, cct_man.tree)[current].controls);
return true;
}
// assemble optimized circuits of successors
for (auto edge_it = out_edges(current, cct_man.tree).first; edge_it != out_edges(current, cct_man.tree).second; ++edge_it) {
if (!assemble_circuit(target(*edge_it, cct_man.tree))) {
return false;
}
}
return true;
}
void standard_syrec_synthesizer::get_variables(const variable_access::ptr& var, std::vector<unsigned>& lines) {
unsigned offset = _var_lines[var->get_var()];
if (!var->indexes.empty()) {
// check if it is all numeric_expressions
unsigned n = var->get_var()->dimensions.size(); // dimensions
if ((unsigned)std::count_if(var->indexes.cbegin(), var->indexes.cend(), [&](const auto& p) { return dynamic_cast<numeric_expression*>(p.get()); }) == n) {
for (unsigned i = 0u; i < n; ++i) {
offset += dynamic_cast<numeric_expression*>(var->indexes.at(i).get())->value->evaluate(loop_map) *
std::accumulate(var->get_var()->dimensions.begin() + i + 1u, var->get_var()->dimensions.end(), 1u, std::multiplies<>()) *
var->get_var()->bitwidth;
}
}
}
if (var->range) {
auto [nfirst, nsecond] = *var->range;
unsigned first = nfirst->evaluate(loop_map);
unsigned second = nsecond->evaluate(loop_map);
if (first < second) {
for (unsigned i = first; i <= second; ++i) {
lines.emplace_back(offset + i);
}
} else {
for (int i = (int)first; i >= (int)second; --i) {
lines.emplace_back(offset + i);
}
}
} else {
for (unsigned i = 0u; i < var->get_var()->bitwidth; ++i) {
lines.emplace_back(offset + i);
}
}
}
/**
* Function to access array variables
*
* The array variable that corresponds to the given indexes is exchanged (via swap operations) with some given helper lines
*
* \param offset is the first line number associated to the array
* \param dimensions is the dimensions of the array
* \param indexes is the indexes of the array
* \param bitwidth is the bitwidth of the variables within the array
* \param lines is the destination, where
*/
unsigned standard_syrec_synthesizer::get_constant_line(bool value) {
unsigned const_line = 0u;
if (!free_const_lines_map[value].empty()) {
const_line = free_const_lines_map[value].back();
free_const_lines_map[value].pop_back();
} else if (!free_const_lines_map[!value].empty()) {
const_line = free_const_lines_map[!value].back();
free_const_lines_map[!value].pop_back();
(*(get(boost::vertex_name, cct_man.tree)[cct_man.current].circ)).append_not(const_line);
} else {
const_line = _circ.add_line((std::string("const_") + std::to_string(value)), "garbage", value, true);
}
return const_line;
}
void standard_syrec_synthesizer::get_constant_lines(unsigned bitwidth, unsigned value, std::vector<unsigned>& lines) {
boost::dynamic_bitset<> number(bitwidth, value);
for (unsigned i = 0u; i < bitwidth; ++i) {
lines.emplace_back(get_constant_line(number.test(i)));
}
}
void standard_syrec_synthesizer::add_variable(circuit& circ, const std::vector<unsigned>& dimensions, const variable::ptr& var,
constant _constant, bool _garbage, const std::string& arraystr) {
if (dimensions.empty()) {
for (unsigned i = 0u; i < var->bitwidth; ++i) {
std::string name = var->name + arraystr + "." + std::to_string(i);
circ.add_line(name, name, _constant, _garbage);
}
} else {
unsigned len = dimensions.front();
std::vector<unsigned> new_dimensions(dimensions.begin() + 1u, dimensions.end());
for (unsigned i = 0u; i < len; ++i) {
add_variable(circ, new_dimensions, var, _constant, _garbage, arraystr + "[" + std::to_string(i) + "]");
}
}
}
void standard_syrec_synthesizer::add_variables(circuit& circ, const variable::vec& variables) {
for (const auto& var: variables) {
// entry in var lines map
_var_lines.insert(std::make_pair(var, circ.get_lines()));
// types of constant and garbage
constant _constant = (var->type == variable::out || var->type == variable::wire) ? constant(false) : constant();
bool _garbage = (var->type == variable::in || var->type == variable::wire);
add_variable(circ, var->dimensions, var, _constant, _garbage, std::string());
}
}
bool syrec_synthesis(circuit& circ, const program& program, const properties::ptr& settings, const properties::ptr& statistics) {
// Settings parsing
auto variable_name_format = get<std::string>(settings, "variable_name_format", "%1$s%3$s.%2$d");
auto main_module = get<std::string>(settings, "main_module", std::string());
auto statement_synthesizer = standard_syrec_synthesizer(circ, program);
// get the main module
module::ptr main;
if (!main_module.empty()) {
main = program.find_module(main_module);
if (!main) {
std::cerr << "Program has no module: " << main_module << std::endl;
return false;
}
} else {
main = program.find_module("main");
if (!main) {
main = program.modules().front();
}
}
// declare as top module
statement_synthesizer.set_main_module(main);
// create lines for global variables
statement_synthesizer.add_variables(circ, main->parameters);
statement_synthesizer.add_variables(circ, main->variables);
// Run-time measuring
timer<properties_timer> t;
if (statistics) {
properties_timer rt(statistics);
t.start(rt);
}
// synthesize the statements
bool synth_okay = statement_synthesizer.on_module(main);
if (statistics) {
t.stop();
}
return synth_okay;
}
} // namespace syrec
|
{"hexsha": "c17aebffe3f6bca249775885cff4b6d780623abc", "size": 58253, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/algorithms/synthesis/syrec_synthesis.cpp", "max_stars_repo_name": "SmaranTum/syrec", "max_stars_repo_head_hexsha": "7fd0eece4c3376e52c1f5f17add71a49e5826dac", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2022-03-19T21:51:58.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-19T21:51:58.000Z", "max_issues_repo_path": "src/algorithms/synthesis/syrec_synthesis.cpp", "max_issues_repo_name": "SmaranTum/syrec", "max_issues_repo_head_hexsha": "7fd0eece4c3376e52c1f5f17add71a49e5826dac", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 9.0, "max_issues_repo_issues_event_min_datetime": "2022-02-28T17:03:50.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-25T16:02:39.000Z", "max_forks_repo_path": "src/algorithms/synthesis/syrec_synthesis.cpp", "max_forks_repo_name": "cda-tum/syrec", "max_forks_repo_head_hexsha": "43dcdd997edd02c80304f53f66118f9dd0fc5f68", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2022-02-02T10:35:09.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-02T15:52:24.000Z", "avg_line_length": 41.2264685067, "max_line_length": 195, "alphanum_fraction": 0.542409833, "num_tokens": 12928}
|
from sklearn import metrics, preprocessing
import numpy as np
import csv
import pandas as pd
# we assume data is distributed normally
def processing_data(data):
# Normalization
scaler = preprocessing.StandardScaler()
data = data.astype(str).astype(int)
X = data.get('X')
Y = data.get('Y')
# Preprocess data, normalization
xdata = np.column_stack((X, Y))
xdata = np.nan_to_num(xdata)
xdata = scaler.fit_transform(xdata)
state = xdata[0:1, :]
return state, xdata
def csv_to_dataframe(file):
relative_path = "./"
full_relative_path = relative_path + str(file)
columns = []
data = []
with open(full_relative_path, 'rt', encoding='utf8') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for index, row in enumerate(csv_reader):
if index == 0:
columns = row
else:
data.append(row)
df = pd.DataFrame(data, columns=columns)
return df
|
{"hexsha": "8c47c4a58b741e5feaf290ea9a4603533af24c32", "size": 994, "ext": "py", "lang": "Python", "max_stars_repo_path": "preprocessing.py", "max_stars_repo_name": "wruoting/Assignment3-ML", "max_stars_repo_head_hexsha": "d3766ffbfc8ddd050f2979cd47665af4d441d78d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "preprocessing.py", "max_issues_repo_name": "wruoting/Assignment3-ML", "max_issues_repo_head_hexsha": "d3766ffbfc8ddd050f2979cd47665af4d441d78d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "preprocessing.py", "max_forks_repo_name": "wruoting/Assignment3-ML", "max_forks_repo_head_hexsha": "d3766ffbfc8ddd050f2979cd47665af4d441d78d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.1162790698, "max_line_length": 69, "alphanum_fraction": 0.629778672, "include": true, "reason": "import numpy", "num_tokens": 238}
|
import itertools as it
from typing import Tuple, List
import numpy as np
from .base_model import BaseModel
from pyffm.util import logistic
class FMModel(BaseModel):
def __init__(
self, num_latent, num_features, reg_lambda, use_linear=False, **kwargs
):
super().__init__(
num_features=num_features,
reg_lambda=reg_lambda,
use_linear=use_linear,
sigmoid=kwargs.get("sigmoid", False),
regression=kwargs.get("regression", False),
)
self.latent_w = (
np.random.rand(num_features, num_latent) * 1 / np.sqrt(num_latent)
)
self.grads = np.ones((num_features, num_latent))
@property
def kappa(self):
return self._kappa
@kappa.setter
def kappa(self, value):
if isinstance(value, int):
self._kappa = value
else:
x, y = value
self._kappa = np.divide(-y, (1 + np.exp(y * self._phi(x))))
def calc_subgrad(self, x_1) -> float:
return self._subgrad(self.kappa, *x_1)
def _subgrad(self, kappa, f1, x1):
return self.reg_lambda * self.latent_w[f1] + kappa * x1 * (1 / np.sqrt(2))
def calc_lin_subgrads(self, x_1):
return self._lin_subgrad(self.kappa, x_1[0], x_1[1])
def _lin_subgrad(self, kappa, f1, x1):
return self.reg_lambda * self.lin_terms[f1] + kappa * x1 * (1 / np.sqrt(2))
def predict(self, x):
return logistic(self._phi(x))
def _phi(self, x: List[Tuple[int, float]]):
"""
Sum over bias and linear terms + sum of products of latent vectors
TODO - implement the O(nk) implementation: Σ((Σ w_j'x_j') - w_j x_j) · w_j x_j
"""
phi = 0
if self.use_linear:
phi += self.bias
for feat in [val[0] for val in x]:
phi += (1 / np.sqrt(2)) * self.lin_terms[feat]
for ((feat1, val1), (feat2, val2)) in it.combinations(x, r=2):
phi += np.dot(self.latent_w[feat1], self.latent_w[feat2]) * val1 * val2
return phi
def logloss(self, x, y):
return np.log(1 + np.exp(-y * self._phi(x)))
|
{"hexsha": "0bcc07339f53002a66fc43939a66d6aa7c1b1f1c", "size": 2165, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyffm/engine/model/fm_model.py", "max_stars_repo_name": "mascaroa/pyffm", "max_stars_repo_head_hexsha": "2445ed2c048347ebbfc76d39990065eb76a8d784", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-12-22T02:59:37.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T20:54:40.000Z", "max_issues_repo_path": "pyffm/engine/model/fm_model.py", "max_issues_repo_name": "mascaroa/pyffm", "max_issues_repo_head_hexsha": "2445ed2c048347ebbfc76d39990065eb76a8d784", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-04-05T01:56:13.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-10T02:40:31.000Z", "max_forks_repo_path": "pyffm/engine/model/fm_model.py", "max_forks_repo_name": "mascaroa/pyffm", "max_forks_repo_head_hexsha": "2445ed2c048347ebbfc76d39990065eb76a8d784", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.8382352941, "max_line_length": 87, "alphanum_fraction": 0.5810623557, "include": true, "reason": "import numpy", "num_tokens": 604}
|
import pandas as pd
import gdal
import numpy as np
import os
import rasterio
import tqdm
class TrainingData:
"""Prepares training datasets using a raster stack, species occurrences and a set of band means and standard
deviations.
:param self: a class instance of TrainingData
:param oh: an Occurrence object: holds occurrence files and tables
:param gh: a GIS object: holds path and file names required for computation of gis data
:param verbose: a boolean: prints a progress bar if True, silent if False
:return: Object. Used to create a series of .csv files (one for each species detected by the Occurrences object)
containing the input data to the trainer, executed by calling class method create_training_df on TrainingData
object.
"""
def __init__(self, oh, gh, verbose):
self.oh = oh
self.gh = gh
self.verbose = verbose
def prep_training_df(self, src, inras, spec):
"""Loads array from raster stack, locations from species occurrences and band statistics.
:param self: a class instance of TrainingData
:param src: rasterio source object for raster stack.
:param inras: gdal source object for raster stack.
:param spec: string containing the species name for which the data will be loaded.
:return: Tuple. Containing:
string 'spec' that contains the species name for which the files are loaded and returned;
list 'ppa' contains the status for each loaded occurrence (0 for absence, 1 for presence) for the specified
species;
list 'long' and 'lati' contain the longitude and latitude for each occurrence from a specified species;
list 'row' and 'col' contain the values from the previous 'long' and 'lati' columns converted from WGS84 to
image coordinates;
matrix 'myarray' is an multi-dimensional representation of the raster stack;
table 'mean_std' is an table containing the mean and standard deviation for each of the scaled raster layers
"""
data = pd.read_csv(self.gh.spec_ppa + '/%s_ppa_dataframe.csv' % spec)
spec = spec.replace(" ", "_")
len_pd = np.arange(len(data))
# dictionary keys are used to query table files that are generated by the package.
long = data["dLon"]
lati = data["dLat"]
ppa = data["present/pseudo_absent"]
lon = long.values
lat = lati.values
row = []
col = []
for i in len_pd:
row_n, col_n = src.index(lon[i], lat[i])
row.append(row_n)
col.append(col_n)
myarray = inras.ReadAsArray()
mean_std = pd.read_csv(self.gh.gis + '/env_bio_mean_std.txt', sep="\t")
mean_std = mean_std.to_numpy()
return spec, ppa, long, lati, row, col, myarray, mean_std
def create_training_df(self):
"""Create training dataset by extracting all environmental variables for each occurrence location for a set of
species.
:param self: a class instance of TrainingData
:return: None. Does not return value or object, instead writes the computed training dataset to file for each
species detected by the Occurrence object (oh).
"""
src = rasterio.open(self.gh.stack + '/stacked_env_variables.tif')
inRas = gdal.Open(self.gh.stack + '/stacked_env_variables.tif')
for i in tqdm.tqdm(self.oh.name, desc='Creating training data' + (28 * ' '), leave=True) if self.verbose else self.oh.name:
spec, ppa, long, lati, row, col, myarray, mean_std = self.prep_training_df(src, inRas, i)
X = []
for j in range(0, self.gh.length):
band = myarray[j]
x = []
for i in range(0, len(row)):
value = band[row[i], col[i]]
if j < self.gh.scaled_len:
if value < -1000:
value = np.nan
else:
value = ((value - mean_std.item((j, 1))) / mean_std.item((j, 2)))
x.append(value)
if j >= self.gh.scaled_len:
if value < -1000:
value = np.nan
else:
value = value
x.append(value)
X.append(x)
X = np.array([np.array(xi) for xi in X])
df = pd.DataFrame(X)
df = df.T
# dictionary keys are writen to file and not used to query user data.
df["dLat"] = lati
df["dLon"] = long
df["present/pseudo_absent"] = ppa
df["row_n"] = row
df.rename(columns=dict(zip(df.columns[0:self.gh.length], self.gh.names)), inplace=True)
df = df.dropna(axis=0, how='any')
input_data = df
if not os.path.isdir(self.gh.spec_ppa_env):
os.makedirs(self.gh.spec_ppa_env, exist_ok=True)
input_data.to_csv(self.gh.spec_ppa_env + '/%s_env_dataframe.csv' % spec)
|
{"hexsha": "907ad7b0fb8e28b6060d5a5655b9d52b344d7059", "size": 5135, "ext": "py", "lang": "Python", "max_stars_repo_path": "sdmdl/sdmdl/data_prep/training_data.py", "max_stars_repo_name": "naturalis/trait-geo-diverse-angiosperms", "max_stars_repo_head_hexsha": "034ce4e807f7a83d31bf7b46435275ba91cfcb00", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2019-11-06T05:43:01.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-16T11:47:26.000Z", "max_issues_repo_path": "sdmdl/sdmdl/data_prep/training_data.py", "max_issues_repo_name": "naturalis/sdmdl", "max_issues_repo_head_hexsha": "034ce4e807f7a83d31bf7b46435275ba91cfcb00", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 249, "max_issues_repo_issues_event_min_datetime": "2019-08-21T10:14:25.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-24T02:30:58.000Z", "max_forks_repo_path": "sdmdl/sdmdl/data_prep/training_data.py", "max_forks_repo_name": "naturalis/sdmdl", "max_forks_repo_head_hexsha": "034ce4e807f7a83d31bf7b46435275ba91cfcb00", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2019-08-21T11:07:30.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-20T02:19:18.000Z", "avg_line_length": 43.1512605042, "max_line_length": 131, "alphanum_fraction": 0.5966893866, "include": true, "reason": "import numpy", "num_tokens": 1179}
|
from classifiers import SVM, KRL
import argparse
from utils import get_sequence, get_mismatch, load_data
from Kernels import Kernel, get_gram_cross
import numpy as np
import csv
from tqdm import tqdm
def get_gram_matrix(x_tr, x_te, k, n_mismatch, n_kernel):
dict_sequences = get_sequence(x_tr, k=k)
embeddings_train = get_mismatch(
dict_sequences,
x_tr,
k=k,
n_mismatch=n_mismatch,
)
embeddings_test = get_mismatch(
dict_sequences,
x_te,
k=k,
n_mismatch=n_mismatch,
)
ker = Kernel(kernel="spectrum")
gram_matrix = ker.compute_gram_matrix(embeddings_train)
gram_test = get_gram_cross(embeddings_train, embeddings_test)
return gram_matrix, gram_test
def predict_first_set(
gram_train, gram_test, y_label, scale=25000, max_iter=1, lambd=0.00001
):
gram_train, gram_test = gram_train[0], gram_test[0]
krl = KRL(gram_m=gram_train / scale, max_iter=max_iter, lambd=lambd)
krl.fit(np.array(y_label))
y_predict_test = np.sign(krl.predict(gram_test / scale))
return y_predict_test
def predict_second_set(
gram_train, gram_test, y_label, scale=25000, max_iter=1, lambd=0.00001
):
gram_train = gram_train[0] + gram_train[1] + gram_train[2]
gram_test = gram_test[0] + gram_test[1] + gram_test[2]
krl = KRL(gram_m=gram_train / scale, max_iter=max_iter, lambd=lambd)
krl.fit(np.array(y_label))
y_predict_test = np.sign(krl.predict(gram_test / scale))
return y_predict_test
def predict_third_set(
gram_train, gram_test, y_label, scale=20000, max_iter=1, lambd=0.00001
):
gram_train = gram_train[0] + gram_train[1] + gram_train[2]
gram_test = gram_test[0] + gram_test[1] + gram_test[2]
krl = KRL(gram_m=gram_train / scale, max_iter=max_iter, lambd=lambd)
krl.fit(np.array(y_label))
y_pred_krl = krl.predict(gram_test / scale)
clf = SVM(gram_m=gram_train)
clf.fit(np.array(y_label))
y_pred_svm = clf.predict(gram_test)
y_pred = np.sign(y_pred_svm + y_pred_krl)
return y_pred
def main(filename):
"""
Main function for generating submissions.
"""
y_pred_all = []
X_train, y_train_all, X_test = load_data()
for n in range(3):
print(
"############## working on dataset {} ###################".format(
str(n + 1)
)
)
# process
y_train = 2 * np.array(y_train_all[2000 * n : 2000 * (n + 1)]) - 1
k, n_mismatch = 13, 3
if n != 0:
print("Compute gram matrix for first kernel")
gram_train_13_3, gram_test_13_3 = get_gram_matrix(
X_train[2000 * n : 2000 * (n + 1)],
X_test[1000 * n : 1000 * (n + 1)],
k=k,
n_mismatch=n_mismatch,
n_kernel=n + 1,
)
k, n_mismatch = 12, 2
if n != 0:
print("Compute gram matrix for second kernel ")
gram_train_12_2, gram_test_12_2 = get_gram_matrix(
X_train[2000 * n : 2000 * (n + 1)],
X_test[1000 * n : 1000 * (n + 1)],
k=k,
n_mismatch=n_mismatch,
n_kernel=n + 1,
)
print("Compute gram matrix for third kernel ")
k, n_mismatch = 13, 2
gram_train_13_2, gram_test_13_2 = get_gram_matrix(
X_train[2000 * n : 2000 * (n + 1)],
X_test[1000 * n : 1000 * (n + 1)],
k=k,
n_mismatch=n_mismatch,
n_kernel=n + 1,
)
print("Training and generating prediction")
if n == 0:
train_grams = [gram_train_13_2]
test_grams = [gram_test_13_2]
y_pred = predict_first_set(train_grams, test_grams, y_train)
elif n == 1:
train_grams = [gram_train_13_2, gram_train_12_2, gram_train_13_3]
test_grams = [gram_test_13_2, gram_test_12_2, gram_test_13_3]
y_pred = predict_second_set(train_grams, test_grams, y_train)
else:
train_grams = [gram_train_13_2, gram_train_12_2, gram_train_13_3]
test_grams = [gram_test_13_2, gram_test_12_2, gram_test_13_3]
y_pred = predict_third_set(train_grams, test_grams, y_train)
y_pred = (y_pred + 1) / 2
y_pred_all += list(y_pred)
print("Saving prediction in CSV file")
with open(filename, "w") as csvfile:
fieldnames = ["Id", "Bound"]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for i in tqdm(range(0, len(y_pred_all))):
writer.writerow({"Id": i, "Bound": int(y_pred_all[i])})
print("You can find results on " + filename)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="KMML code ")
parser.add_argument(
"--output_file",
type=str,
metavar="f",
default="kmml_preds.csv",
help="csv output filename",
)
args = parser.parse_args()
main(args.output_file)
|
{"hexsha": "751d7057c93547c33fe96a7dcf1a23f0eb25b010", "size": 5062, "ext": "py", "lang": "Python", "max_stars_repo_path": "main.py", "max_stars_repo_name": "HamzaG737/Kmml-challenge-code", "max_stars_repo_head_hexsha": "c7ae3e26a7e02e4951758c683f29c23c2ab9b3e4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "main.py", "max_issues_repo_name": "HamzaG737/Kmml-challenge-code", "max_issues_repo_head_hexsha": "c7ae3e26a7e02e4951758c683f29c23c2ab9b3e4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "main.py", "max_forks_repo_name": "HamzaG737/Kmml-challenge-code", "max_forks_repo_head_hexsha": "c7ae3e26a7e02e4951758c683f29c23c2ab9b3e4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.2469135802, "max_line_length": 78, "alphanum_fraction": 0.6048992493, "include": true, "reason": "import numpy", "num_tokens": 1434}
|
\chapter{Development and Results}
%Once the state of the art has been studied and the main lines of work of energy reduction are known, analysis of all the techniques will be made.
\input{Chapter21}
\clearpage
\input{Chapter22}
\input{Chapter23}
%%\input{Chapter33}
\input{Chapter24}
\clearpage
\input{Chapter25}
\clearpage
\input{Chapter26}
|
{"hexsha": "83fb6237335c1f545cc18a783eaff5d995321dc8", "size": 344, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "Chapter2.tex", "max_stars_repo_name": "alvarolop/tfg_latex", "max_stars_repo_head_hexsha": "9b5d4e89183ba775aae81c60da5ea170ffd22f79", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Chapter2.tex", "max_issues_repo_name": "alvarolop/tfg_latex", "max_issues_repo_head_hexsha": "9b5d4e89183ba775aae81c60da5ea170ffd22f79", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Chapter2.tex", "max_forks_repo_name": "alvarolop/tfg_latex", "max_forks_repo_head_hexsha": "9b5d4e89183ba775aae81c60da5ea170ffd22f79", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.9333333333, "max_line_length": 146, "alphanum_fraction": 0.7819767442, "num_tokens": 93}
|
import torch
import cv2
import numpy as np
from operator import itemgetter
# import some common detectron2 utilities
from detectron2 import model_zoo
from detectron2.data import MetadataCatalog
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from tools.plot_tools import plt_plot
COLORS = { # in HSV FORMAT
'green': ([56, 50, 50], [100, 255, 255], [72, 200, 153]), # NIGERIA
'referee': ([0, 0, 0], [255, 35, 65], [120, 0, 0]), # REFEREE
'white': ([0, 0, 190], [255, 26, 255], [255, 0, 255]) # USA
}
IOU_TH = 0.2
PAD = 15
def hsv2bgr(color_hsv):
color_bgr = np.array(cv2.cvtColor(np.uint8([[color_hsv]]), cv2.COLOR_HSV2BGR)).ravel()
color_bgr = (int(color_bgr[0]), int(color_bgr[1]), int(color_bgr[2]))
return color_bgr
class FeetDetector:
def __init__(self, players):
# Image segmentation model from DETECTRON2
cfg_seg = get_cfg()
cfg_seg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
cfg_seg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7 # set threshold for this model
cfg_seg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")
self.predictor_seg = DefaultPredictor(cfg_seg)
self.bbs = []
self.players = players
self.cfg = cfg_seg
@staticmethod
def count_non_black(image):
colored = 0
for color in image.flatten():
if color > 0.0001:
colored += 1
return colored
@staticmethod
def bb_intersection_over_union(boxA, boxB):
# sources: https://www.pyimagesearch.com/2016/11/07/intersection-over-union-iou-for-object-detection/
# determine the (x, y)-coordinates of the intersection rectangle
xA = max(boxA[0], boxB[0]) # horizontal tl
yA = max(boxA[1], boxB[1]) # vertical tl
xB = min(boxA[2], boxB[2]) # horizontal br
yB = min(boxA[3], boxB[3]) # vertical br
# compute the area of intersection rectangle
interArea = max(0, xB - xA + 1) * max(0, yB - yA + 1)
# compute the area of both the prediction and ground-truth
# rectangles
boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)
boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = interArea / float(boxAArea + boxBArea - interArea)
# return the intersection over union value
return iou
def get_players_pos(self, M, M1, frame, timestamp, map_2d):
warped_kpts = []
outputs_seg = self.predictor_seg(frame)
indices = outputs_seg["instances"].pred_classes.cpu().numpy()
predicted_masks = outputs_seg["instances"].pred_masks.cpu().numpy()
ppl = []
kernel = np.array([[0, 1, 0],
[1, 1, 1],
[0, 1, 0]], np.uint8)
for i, entry in enumerate(indices): # picking only class 0 (people)
if entry == 0:
ppl.append(
np.array(cv2.erode(np.array(predicted_masks[i], dtype=np.uint8), kernel, iterations=4), dtype=bool))
'''v = Visualizer(frame[:, :, ::-1], MetadataCatalog.get(self.cfg.DATASETS.TRAIN[0]), scale=1.2)
out = v.draw_instance_predictions(outputs_seg["instances"].to("cpu"))
plt_plot(out.get_image()[:, :, ::-1])'''
indexes_ppl = np.array(
[np.array(np.where(p == True)).T for p in ppl])
# returns two np arrays per person, one for x one for y
# calculate estimated position of players in the 2D map
for keypoint, p in zip(indexes_ppl, ppl):
top = min(keypoint[:, 0])
bottom = max(keypoint[:, 0])
left = min(keypoint[:, 1])
right = max(keypoint[:, 1])
bbox_person = (top - PAD, left - PAD, bottom + PAD, right + PAD)
tmp_tensor = p.reshape((p.shape[0], p.shape[1], 1))
crop_img = np.where(tmp_tensor, frame, 0)
crop_img = crop_img[top:(bottom - int(0.3 * (bottom - top))), left:right]
if len(crop_img) > 0:
crop_img = cv2.cvtColor(crop_img, cv2.COLOR_BGR2HSV)
best_mask = [0, ''] # (num_non_black, color)
for color in COLORS.keys():
mask = cv2.inRange(crop_img, np.
array(COLORS[color][0]), np.array(COLORS[color][1]))
output = cv2.bitwise_and(crop_img, crop_img, mask=mask)
# plt_plot(np.hstack((cv2.cvtColor(crop_img, cv2.COLOR_HSV2BGR),
# cv2.cvtColor(output, cv2.COLOR_HSV2BGR))))
non_blacks = FeetDetector.count_non_black(output)
if best_mask[0] < non_blacks:
best_mask[0] = non_blacks
best_mask[1] = color
head = int(np.argmin(keypoint[:, 0]))
foot = int(np.argmax(keypoint[:, 0]))
kpt = np.array([keypoint[head, 1], keypoint[foot, 0], 1]) # perspective space
homo = M1 @ (M @ kpt.reshape((3, -1)))
homo = np.int32(homo / homo[-1]).ravel()
# homo = [vertical pos, horizontal pos]
# homo has the position of player in the 2D map
if best_mask[1] != '':
color = hsv2bgr(COLORS[best_mask[1]][2])
warped_kpts.append((homo, color, best_mask[1], bbox_person)) # appending also the color
cv2.circle(frame, (keypoint[head, 1], keypoint[foot, 0]), 2, color, 5)
for kpt in warped_kpts:
(homo, color, color_key, bbox) = kpt
# updates if possible the player position and bbox
iou_scores = [] # (current_iou, player)
for player in self.players:
if (player.team == color_key) and (player.previous_bb is not None) and \
(0 <= homo[0] < map_2d.shape[1]) and (0 <= homo[1] < map_2d.shape[0]):
iou_current = self.bb_intersection_over_union(bbox, player.previous_bb)
if iou_current >= IOU_TH:
iou_scores.append((iou_current, player))
if len(iou_scores) > 0:
# only update player
max_iou = max(iou_scores, key=itemgetter(0))
max_iou[1].previous_bb = bbox
max_iou[1].positions[timestamp] = (homo[0], homo[1])
else:
for player in self.players:
if (player.team == color_key) and (player.previous_bb is None):
player.previous_bb = bbox
player.positions[timestamp] = (homo[0], homo[1])
break
for player in self.players:
if len(player.positions) > 0:
if (timestamp - max(player.positions.keys())) >= 7:
player.positions = {}
player.previous_bb = None
player.has_ball = False
map_2d_text = map_2d.copy()
for p in self.players:
if p.team != 'referee':
try:
cv2.circle(map_2d, (p.positions[timestamp]), 10, p.color, 7)
cv2.circle(map_2d, (p.positions[timestamp]), 13, (0, 0, 0), 3)
cv2.circle(map_2d_text, (p.positions[timestamp]), 25, p.color, -1)
cv2.circle(map_2d_text, (p.positions[timestamp]), 27, (0, 0, 0), 5)
text_size, _ = cv2.getTextSize(str(p.ID), cv2.FONT_HERSHEY_SIMPLEX, 1.5, 3)
text_origin = (p.positions[timestamp][0] - text_size[0] // 2,
p.positions[timestamp][1] + text_size[1] // 2)
cv2.putText(map_2d_text, str(p.ID), text_origin,
cv2.FONT_HERSHEY_SIMPLEX, 1.5,
(0, 0, 0), 3, cv2.LINE_AA)
except KeyError:
pass
return frame, map_2d, map_2d_text
if __name__ == '__main__':
print(torch.__version__, torch.cuda.is_available())
|
{"hexsha": "2e1cb9c70ea3cff2a4622af93c99a42e7198ddbd", "size": 8475, "ext": "py", "lang": "Python", "max_stars_repo_path": "player_detection.py", "max_stars_repo_name": "Basket-Analytics/BasketTracking", "max_stars_repo_head_hexsha": "5921dc7a7abd74ab6e1d2c0a78642cc53e4e5ad6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2021-05-08T22:28:14.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-17T03:10:36.000Z", "max_issues_repo_path": "player_detection.py", "max_issues_repo_name": "CrisSherban/BasketTracking", "max_issues_repo_head_hexsha": "5921dc7a7abd74ab6e1d2c0a78642cc53e4e5ad6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-03-08T06:34:11.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-08T06:34:11.000Z", "max_forks_repo_path": "player_detection.py", "max_forks_repo_name": "CrisSherban/BasketTracking", "max_forks_repo_head_hexsha": "5921dc7a7abd74ab6e1d2c0a78642cc53e4e5ad6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-05-15T11:58:41.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-15T11:58:41.000Z", "avg_line_length": 43.9119170984, "max_line_length": 120, "alphanum_fraction": 0.5548082596, "include": true, "reason": "import numpy", "num_tokens": 2261}
|
# 2019-11-19 10:28:31(JST)
import sys
import numpy as np
# import collections
# import math
# from string import ascii_lowercase, ascii_uppercase, digits
# from bisect import bisect_left as bi_l, bisect_right as bi_r
# import itertools
# from functools import reduce
# import operator as op
# import re
# import heapq
# import array
# from scipy.misc import comb # (default: exact=False)
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import floyd_warshall
def main():
n, m, l = map(int, sys.stdin.readline().split())
ABCQST = np.array(sys.stdin.read().split(), np.int64)
ABC = ABCQST[:m * 3]
A, B, C = ABC[::3], ABC[1::3], ABC[2::3]
ST = ABCQST[m * 3 + 1:]
S, T = ST[::2], ST[1::2]
dist = csr_matrix((C, (A, B)), (n+1, n+1))
min_dist = floyd_warshall(dist, directed=False)
filling_times = np.full((n+1, n+1), np.inf)
np.diagonal(filling_times, 0)
filling_times[min_dist <= l] = 1
min_filling_times = floyd_warshall(filling_times, directed=False)
min_filling_times[min_filling_times == np.inf] = 0
# 最後に-1する
min_filling_times = min_filling_times.astype(int)
res = min_filling_times[S, T] - 1
print('\n'.join(res.astype(str)))
if __name__ == "__main__":
main()
|
{"hexsha": "54899451608d3a0f1a298387e581ae22a4f92634", "size": 1305, "ext": "py", "lang": "Python", "max_stars_repo_path": "jp.atcoder/abc143/abc143_e/8522563.py", "max_stars_repo_name": "kagemeka/atcoder-submissions", "max_stars_repo_head_hexsha": "91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-02-09T03:06:25.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-09T03:06:25.000Z", "max_issues_repo_path": "jp.atcoder/abc143/abc143_e/8522563.py", "max_issues_repo_name": "kagemeka/atcoder-submissions", "max_issues_repo_head_hexsha": "91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-02-05T22:53:18.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-09T01:29:30.000Z", "max_forks_repo_path": "jp.atcoder/abc143/abc143_e/8522563.py", "max_forks_repo_name": "kagemeka/atcoder-submissions", "max_forks_repo_head_hexsha": "91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.1875, "max_line_length": 70, "alphanum_fraction": 0.645210728, "include": true, "reason": "import numpy,from scipy", "num_tokens": 397}
|
""" Test the optimizer classes """
import pytest
import numpy as np
from josim_tools.optimize import NumpyVectorArray
def test_numpy_vector_array():
""" Test NumpyVectorArray class """
array_a = np.array([1, 2, 3, 4])
array_b = np.array([5, 6, 7, 8])
array_c = np.array([9, 10, 11, 12])
array_d = np.array([13, 14, 15, 16])
vector_a = np.array([array_a])
vector_b = np.array([array_a, array_b])
vector_c = np.array([array_a, array_b, array_c])
vector_d = np.array([array_a, array_b, array_c, array_d])
array_size = 4
numpy_vector_array = NumpyVectorArray(array_size, default_allocation=0)
assert numpy_vector_array.capacity() == 0
assert numpy_vector_array.size() == 0
assert numpy_vector_array.array_size() == 4
numpy_vector_array.append_list([array_a])
assert numpy_vector_array.capacity() == 1
assert numpy_vector_array.size() == 1
assert numpy_vector_array.array_size() == 4
assert np.all(vector_a == numpy_vector_array.view())
numpy_vector_array.append_list([array_b])
assert numpy_vector_array.capacity() == 2
assert numpy_vector_array.size() == 2
assert numpy_vector_array.array_size() == 4
assert np.all(vector_b == numpy_vector_array.view())
numpy_vector_array.append(array_c)
assert numpy_vector_array.capacity() == 4
assert numpy_vector_array.size() == 3
assert numpy_vector_array.array_size() == 4
assert np.all(vector_c == numpy_vector_array.view())
numpy_vector_array.append(array_d)
assert numpy_vector_array.capacity() == 4
assert numpy_vector_array.size() == 4
assert numpy_vector_array.array_size() == 4
assert np.all(vector_d == numpy_vector_array.view())
|
{"hexsha": "2742d3223d9bd4c5c6b80adaa3b63f923ac5ed34", "size": 1727, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_optimizer.py", "max_stars_repo_name": "JoeyDelp/josim-tools", "max_stars_repo_head_hexsha": "e6b9eb3e6b45bea53dd1b355121ee4b09867eb07", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-06-22T14:00:22.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-24T16:32:49.000Z", "max_issues_repo_path": "tests/test_optimizer.py", "max_issues_repo_name": "JoeyDelp/josim-tools", "max_issues_repo_head_hexsha": "e6b9eb3e6b45bea53dd1b355121ee4b09867eb07", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_optimizer.py", "max_forks_repo_name": "JoeyDelp/josim-tools", "max_forks_repo_head_hexsha": "e6b9eb3e6b45bea53dd1b355121ee4b09867eb07", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-08-03T10:49:14.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-24T12:54:10.000Z", "avg_line_length": 30.8392857143, "max_line_length": 75, "alphanum_fraction": 0.7017950203, "include": true, "reason": "import numpy", "num_tokens": 448}
|
"""
smoothconv(z,nas)
Smoothen field `z(ns,ns)` with a spectral method at scale `ns/nas`
Takes into account missing values.
"""
function smoothspec(zi,nas)
@compat iinan=findall(isnan.(zi))
@compat iinotnan=findall(.~isnan.(zi))
zi[iinan]=0.
nss=size(zi);
ns=nss[1];
sdim=div(ns,nas); # the smoothing radius is one large scale pixel wide (setting the diameter to 1 pixel is wrong)
kx=[collect(0:ns/2)' collect(-ns/2+1:-1)']';
kx=kx*ones(1,ns);
kx=kx.^2+permutedims(kx,[2 1]).^2;
zif=fft(zi);
zif[kx[:].>(nas/2).^2]=0.0;
zif[kx[:].==(nas/2).^2]=real(zif[kx[:].==(nas/2).^2]*2.0);
zif=real(ifft(zif));
return zif
end
|
{"hexsha": "70efbf491fae25157eda4348f8997250083ef5a8", "size": 759, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/rf/smoothspec.jl", "max_stars_repo_name": "UnofficialJuliaMirror/RainFARM.jl-e9a4e08f-a0a3-5224-a821-6d0231c12d6b", "max_stars_repo_head_hexsha": "740f4edff721692e13168b132503aa62d5fea574", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/rf/smoothspec.jl", "max_issues_repo_name": "UnofficialJuliaMirror/RainFARM.jl-e9a4e08f-a0a3-5224-a821-6d0231c12d6b", "max_issues_repo_head_hexsha": "740f4edff721692e13168b132503aa62d5fea574", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/rf/smoothspec.jl", "max_forks_repo_name": "UnofficialJuliaMirror/RainFARM.jl-e9a4e08f-a0a3-5224-a821-6d0231c12d6b", "max_forks_repo_head_hexsha": "740f4edff721692e13168b132503aa62d5fea574", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.1071428571, "max_line_length": 121, "alphanum_fraction": 0.5362318841, "num_tokens": 259}
|
import numpy as np
def affine(r, c, x0, dxx, dyx, y0, dxy, dyy):
"""
Returns the affine transform -- normally row, column to x,y position.
If this is the geotransform from a gdal geotiff (for example) the coordinates are the displayed pixel corners - not the center.
If you want the center of the pixel then use affine_center
"""
x = x0 + c * dxx + r * dyx
y = y0 + c * dxy + r * dyy
return x, y
def affine_center(r, c, x0, dxx, dyx, y0, dxy, dyy):
return affine(r + 0.5, c + 0.5, x0, dxx, dyx, y0, dxy, dyy)
def inv_affine(x, y, x0, dxx, dyx, y0, dxy, dyy):
if dyx == 0 and dxy == 0:
c = np.array(np.floor((x - x0) / dxx), dtype=np.int32)
r = np.array(np.floor((y - y0) / dyy), dtype=np.int32)
else:
# @todo support skew projection
raise ValueError("non-North up affine transforms are not supported yet")
return r, c
class Grid(object):
''' Class to compute array indices from position and vice versa.
In HSTB module, currently used with an ESRI grids (ArcExt) and VR BAGs (SurveyOutline)
The attributes x_edges and y_edges contain the coordinates for each cell.
The attributes grid_val and grid_count are numpy arrays of the appropriate size IFF allocate is True in the constructor.
Be aware that changing the cell_size or min/max x/y locations will reallocate the grid_val and grid_count arrays (deleteing all existing data)
'''
def __init__(self, lower_left_xy, nxy, cell_size, buffer_dist=0, allocate=True):
""" Creates an object describing a rectangular grid.
Parameters
----------
lower_left_xy
Tuple for lower left corner (x, y) position
nxy
Tuple of number of x cells and y cells
cell_size
Is either a single number for a square grid or a 2-tuple for x_size, ysize
buffer_dist
Amount to expand gridded area in all directions, +/- X and Y
allocate
Whether to create a numpy array to hold data in. Setting allocate=False implies you will just use the coordinate math.
"""
self.x_edges = self.y_edges = None # this will be replaced once the min/max x/y are set
self.grid_val = self.grid_count = None # this will be replaced IF allocate is True
self.allocate = allocate
try:
self.cell_size_x, self.cell_size_y = cell_size
except TypeError:
self.cell_size_x = cell_size
self.cell_size_y = cell_size
# Identify spatial characteristics of input grid raster
self.minx = lower_left_xy[0] - buffer_dist # Desired grid cell size = 500 m ## BUFFER
self.maxx = self.minx + nxy[0] * self.cell_size_x + buffer_dist
self.miny = lower_left_xy[1] - buffer_dist # Desired grid cell size = 500 m ## BUFFER
self.maxy = self.miny + nxy[1] * self.cell_size_y + buffer_dist
@property
def geotransform(self):
return (self.minx, self.cell_size_x, 0, self.miny, 0, self.cell_size_y)
@property
def cell_size_x(self):
""" Cell size in X direction.
Computed from Min/Max(X/Y)."""
return self._cell_size_y
@cell_size_x.setter
def cell_size_x(self, value):
self._cell_size_y = value
self.reset_bounds()
@property
def cell_size_y(self):
"""Cell zise in Y direction.
Computed from Min/Max(X/Y)."""
return self._cell_size_y
@cell_size_y.setter
def cell_size_y(self, value):
self._cell_size_y = value
self.reset_bounds()
@property
def minx(self):
"""Minimum position of grid in X.
Min/Max(X/Y) used to determine cell sizes."""
return self._minx
@minx.setter
def minx(self, value):
self._minx = value
self.reset_bounds()
@property
def miny(self):
"""Minimum position of grid in Y.
Min/Max(X/Y) used to determine cell sizes."""
return self._miny
@miny.setter
def miny(self, value):
self._miny = value
self.reset_bounds()
@property
def maxx(self):
"""Maximum position of grid in X.
Min/Max(X/Y) used to determine cell sizes."""
return self._maxx
@maxx.setter
def maxx(self, value):
self._maxx = value
self.reset_bounds()
@property
def maxy(self):
"""Maximum position of grid in Y"""
return self._maxy
@maxy.setter
def maxy(self, value):
self._maxy = value
self.reset_bounds()
@property
def orig_x(self):
return self.minx
@property
def orig_y(self):
return self.miny
@property
def origin(self) -> tuple:
"""Origin of grid (minx, miny) as a tuple"""
return self.orig_x, self.orig_y
def reset_bounds(self):
try:
self.x_edges = np.arange(self.orig_x, self.maxx + self.cell_size_x, self.cell_size_x)
self.y_edges = np.arange(self.orig_y, self.maxy + self.cell_size_y, self.cell_size_y)
if self.allocate:
self.grid_val = self.zeros()
self.grid_val.fill(np.nan)
self.grid_count = self.zeros()
except AttributeError:
pass # may not be set up yet
# Define extents of Groundings Histogram based on extents of grid raster
# Note: If Must keep origin at same grid cell node, set orig_x and orig_y as min coords of grid (bottom left)
@property
def orig(self) -> np.array:
"""Origin of grid as a numpy array (minx, miny)"""
return np.array([self.orig_x, self.orig_y]) # Min x and y coordinate of grid raster; used to generate raster of Groundings
@property
def numx(self):
"""The number of cells in the X direction"""
return len(self.x_edges)-1
@property
def numy(self):
"""The number of cells in the Y direction"""
return len(self.y_edges)-1
@property
def numcol(self):
"""The number of columns of data (cells in X direction)"""
return self.numx
@property
def numrow(self):
"""The number of rows of data (cells in Y direction)"""
return self.numy
def row_col_from_xy(self, x, y):
"""Returns the (Row,Col) based on the x,y position supplied.
Really you probably want to use array_indices_from_xy as (row,col) equates to (Y, X)"""
return self.row_index(y), self.col_index(x)
def row_index(self, y):
"""Get the Row based on the Y position"""
return (y - self.orig_y) / self.cell_size_y
def col_index(self, x):
"""Get the Col based on the X position"""
return (x - self.orig_x) / self.cell_size_x
def array_indices_from_xy(self, inputarray):
"""Pass in a numpy array of XY values and returns the row,column indices as a numpy array"""
output = np.array((inputarray - np.array(self.origin)) / np.array((self.cell_size_x, self.cell_size_y)),
dtype=np.int32) # Translating polygon enveloppe indices to grid indices
return output
def zeros(self, dtype=np.float64):
"""Get a zero numpy array with the size matching this object's rows and columns (x cells and y cells counts)"""
return np.zeros([self.numx, self.numy], dtype=dtype)
|
{"hexsha": "e80867d7c98bd047ac5367772a3be7550ba6f2cf", "size": 7370, "ext": "py", "lang": "Python", "max_stars_repo_path": "HSTB/shared/gridded_coords.py", "max_stars_repo_name": "noaa-ocs-hydrography/shared", "max_stars_repo_head_hexsha": "d2004e803c708dffa43d09d3ffea4e4045811b28", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "HSTB/shared/gridded_coords.py", "max_issues_repo_name": "noaa-ocs-hydrography/shared", "max_issues_repo_head_hexsha": "d2004e803c708dffa43d09d3ffea4e4045811b28", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "HSTB/shared/gridded_coords.py", "max_forks_repo_name": "noaa-ocs-hydrography/shared", "max_forks_repo_head_hexsha": "d2004e803c708dffa43d09d3ffea4e4045811b28", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.0952380952, "max_line_length": 146, "alphanum_fraction": 0.6245590231, "include": true, "reason": "import numpy", "num_tokens": 1873}
|
#! /usr/bin/env python
############################# BEGIN FRONTMATTER ################################
# #
# TEA - calculates Thermochemical Equilibrium Abundances of chemical species #
# #
# TEA is part of the PhD dissertation work of Dr. Jasmina #
# Blecic, who developed it with coding assistance from #
# undergraduate M. Oliver Bowman and under the advice of #
# Prof. Joseph Harrington at the University of Central Florida, #
# Orlando, Florida, USA. #
# #
# Copyright (C) 2014-2016 University of Central Florida #
# #
# This program is reproducible-research software: you can #
# redistribute it and/or modify it under the terms of the #
# Reproducible Research Software License as published by #
# Prof. Joseph Harrington at the University of Central Florida, #
# either version 0.3 of the License, or (at your option) any later #
# version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# Reproducible Research Software License for more details. #
# #
# You should have received a copy of the Reproducible Research #
# Software License along with this program. If not, see #
# <http://planets.ucf.edu/resources/reproducible/>. The license's #
# preamble explains the situation, concepts, and reasons surrounding #
# reproducible research, and answers some common questions. #
# #
# This project was started with the support of the NASA Earth and #
# Space Science Fellowship Program, grant NNX12AL83H, held by #
# Jasmina Blecic, Principal Investigator Joseph Harrington, and the #
# NASA Science Mission Directorate Planetary Atmospheres Program, #
# grant NNX12AI69G. #
# #
# See the file ACKNOWLEDGING in the top-level TEA directory for #
# instructions on how to acknowledge TEA in publications. #
# #
# We welcome your feedback, but do not guarantee support. #
# Many questions are answered in the TEA forums: #
# #
# https://physics.ucf.edu/mailman/listinfo/tea-user #
# https://physics.ucf.edu/mailman/listinfo/tea-devel #
# #
# Visit our Github site: #
# #
# https://github.com/dzesmin/TEA/ #
# #
# Reach us directly at: #
# #
# Jasmina Blecic <jasmina@physics.ucf.edu> #
# Joseph Harrington <jh@physics.ucf.edu> #
# #
############################## END FRONTMATTER #################################
import numpy as np
import sys
import ntpath
import os
import shutil
import time
import multiprocessing as mp
import ctypes
import warnings
import six
import readconf as rc
import iterate as it
import format as form
import makeheader as mh
import readatm as ra
import balance as bal
location_TEA = os.path.realpath(os.path.dirname(__file__) + "/..") + "/"
# =============================================================================
# This program runs TEA over a pre-atm file that contains multiple T-P points.
# It prints on screen the current T-P line from the pre-atm file, the iteration
# number at which the set precision (tolerance error) is accomplished and if
# maximum iteration is reached informs the user that the minimization is done.
# Example:
# Layer 100:
# 5
# The solution has converged to the given tolerance error.
#
# The program is executed with in-shell inputs:
# runatm.py <MULTITP_INPUT_FILE_PATH> <DIRECTORY_NAME>
# Example: ../TEA/tea/runatm.py ../TEA/tea/doc/examples/multiTP/atm_inputs/multiTP_Example.atm example_multiTP
# =============================================================================
def worker(pressure, temp, b, free_energy, heat, stoich_arr, guess,
maxiter, verb, times, xtol, savefiles, start, end, abn, n):
"""
Multiprocessing thermochemical-equilibrium calculation.
"""
# Switch off verbosity if using more than one CPU
#if ncpu > 1 and n != 0:
if ncpu > 1:
verb, times = 0, False
save_info = None
for q in np.arange(start, end):
if verb >= 1:
print('\nLayer {:d}:'.format(q+1))
g_RT = mh.calc_gRT(free_energy, heat, temp[q])
if savefiles:
save_info = location_out, desc, speclist, temp[q]
hfolder = location_out + desc + "/headers/"
mh.write_header(hfolder, desc, temp[q], pressure[q], speclist,
atom_name, stoich_arr, b[q], g_RT)
# Execute main TEA loop for the current line, run iterate.py
y, x, delta, y_bar, x_bar, delta_bar = it.iterate(pressure[q],
stoich_arr, b[q], g_RT, maxiter, verb, times, guess, xtol, save_info)
guess = x, x_bar
abn[q] = x/x_bar
tstart = time.time()
# Read configuration-file parameters:
TEApars, PREATpars = rc.readcfg()
maxiter, savefiles, verb, times, abun_file, location_out, xtol, ncpu = TEApars
# Print license
if verb>=1:
print("\n\
================= Thermal Equilibrium Abundances (TEA) =================\n\
A program to calculate species abundances under thermochemical equilibrium.\n\
\n\
Copyright (C) 2014-2016 University of Central Florida.\n\
\n\
This program is reproducible-research software. See the Reproducible\n\
Research Software License that accompanies the code, or visit:\n\
http://planets.ucf.edu/resources/reproducible\n\
Questions? Feedback? Search our mailing list archives or post a comment:\n\
https://physics.ucf.edu/mailman/listinfo/tea-user\n\
\n\
Direct contact: \n\
Jasmina Blecic <jasmina@physics.ucf.edu> \n\
========================================================================\n")
# Correct directory names
if location_out[-1] != '/':
location_out += '/'
# Retrieve pre-atm file
infile = sys.argv[1:][0]
# Retrieve current output directory name given by user
desc = sys.argv[1:][1]
# Check if config file exists in the working directory
TEA_config = 'TEA.cfg'
try:
f = open(TEA_config)
except IOError:
print("\nConfig file is missing. Place TEA.cfg in the working directory.\n")
# If input file does not exist break
try:
f = open(infile)
except:
raise IOError ("\nPre-atmospheric file does not exist.\n")
# Set up locations of necessary scripts and directories of files
thermo_dir = location_TEA + "lib/gdata"
if verb==2 or savefiles==True:
inputs_dir = location_out + desc + "/inputs/"
out_dir = location_out + desc + "/results/"
if os.path.exists(out_dir):
six.moves.input(" Output directory " + str(location_out + desc) +
"/\n already exists.\n"
" Press enter to continue and overwrite existing files,\n"
" or quit and choose another output name.\n")
# Create directories
if not os.path.exists(out_dir):
os.makedirs(out_dir)
if not os.path.exists(inputs_dir):
os.makedirs(inputs_dir)
# Inform user if TEA.cfg file already exists in inputs/ directory
if os.path.isfile(inputs_dir + TEA_config):
print(" " + str(TEA_config) + " overwritten in inputs/ directory.")
# Copy TEA.cfg file to current inputs directory
shutil.copy2(TEA_config, inputs_dir + TEA_config)
# Inform user if abundances file already exists in inputs/ directory
head, abun_filename = ntpath.split(abun_file)
if os.path.isfile(inputs_dir + abun_filename):
print(" " + str(abun_filename) + " overwritten in inputs/ directory.")
# Copy abundances file to inputs/ directory
shutil.copy2(abun_file, inputs_dir + abun_filename)
# Inform user if pre-atm file already exists in inputs/ directory
head, preatm_filename = ntpath.split(infile)
if os.path.isfile(inputs_dir + preatm_filename):
print(" pre-atm file " + str(preatm_filename) +
" overwritten in inputs/ directory.")
else:
# Copy pre-atm file to inputs/ directory
shutil.copy2(infile, inputs_dir + preatm_filename)
# Read pre-atm file
n_runs, speclist, pres_arr, temp_arr, atom_arr, atom_name, end_head = \
ra.readatm(infile)
# Number of output species:
nspec = np.size(speclist)
# Correct species list for only species found in thermo_dir
gdata_files = os.listdir(thermo_dir)
good_spec = []
for i in np.arange(nspec):
spec_file = speclist[i] + '.txt'
if spec_file in gdata_files:
good_spec = np.append(good_spec, speclist[i])
else:
print('Species ' + speclist[i] + ' does not exist in /' \
+ thermo_dir.split("/")[-1] + ' ! IGNORED THIS SPECIES.')
# Update list of valid species
speclist = np.copy(good_spec)
# =================== Start writing final atm file ===================
# Open final atm file for writing, keep open to add new lines
# If running in multiprocessor mode with verbosity zero, supress savefiles
fout_name = desc + '.tea'
if verb==2 or savefiles==True:
fout_name = out_dir + desc + '.tea'
fout = open(fout_name, 'w+')
# Write a header file
fout.write(
"# This is a final TEA output file with calculated abundances (mixing "
"fractions) for all listed species."
"\n# Units: pressure (bar), temperature (K), abundance (unitless).\n\n")
fout.write('#SPECIES\n')
# Write corrected species list into pre-atm file and continue
for i in np.arange(nspec):
fout.write(speclist[i] + ' ')
fout.write("\n\n")
fout.write("#TEADATA\n")
# Write data header from the pre-atm file into each column of atm file
fout.write('#Pressure'.ljust(11) + ' ')
fout.write('Temp'.ljust(8) + ' ')
for i in np.arange(nspec):
fout.write(speclist[i].ljust(10)+' ')
fout.write('\n')
# Times / speed check for pre-loop runtime
if times:
tnew = time.time()
elapsed = tnew - tstart
print("\npre-loop: " + str(elapsed))
# Supress warning that ctypeslib will throw
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# Allocate abundances matrix for all species and all T-Ps
sm_abn = mp.Array(ctypes.c_double, n_runs*nspec)
abn = np.ctypeslib.as_array(sm_abn.get_obj()).reshape((n_runs, nspec))
# Bound ncpu to the manchine capacity
ncpu = np.clip(ncpu, 1, mp.cpu_count())
chunksize = int(n_runs/float(ncpu)+1)
# Load gdata
free_energy, heat = mh.read_gdata(speclist, thermo_dir)
stoich_arr, elem_arr = mh.read_stoich(speclist)
temp_arr = np.array(temp_arr, np.double)
pres_arr = np.array(pres_arr, np.double)
atom_arr = np.array(atom_arr, np.double)
# Use only elements with non-null stoichiometric values
eidx = np.in1d(atom_name, elem_arr)
atom_arr = atom_arr[:,eidx]
atom_name = atom_name[eidx]
# Sort stoich_arr according to atom_name
sidx = np.zeros(len(atom_name), int)
for i in np.arange(len(atom_name)):
sidx[i] = np.where(elem_arr == atom_name[i])[0][0]
stoich_arr = stoich_arr[:,sidx]
# Time / speed testing for balance.py
if times:
ini = time.time()
# Initial abundances guess
guess = bal.balance(stoich_arr, atom_arr[0], verb)
# Retrieve balance runtime
if times:
fin = time.time()
elapsed = fin - ini
print("balance.py: " + str(elapsed))
# ============== Execute TEA for each T-P ==============
# Loop over all lines in pre-atm file and execute TEA loop
processes = []
for n in np.arange(ncpu):
start = n * chunksize
end = np.amin(((n+1) * chunksize, n_runs))
proc = mp.Process(target=worker, args=(pres_arr, temp_arr, atom_arr,
free_energy, heat, stoich_arr, guess, maxiter, verb, times,
xtol, savefiles, start, end, abn, n))
processes.append(proc)
proc.start()
# Make sure all processes finish their work
for n in np.arange(ncpu):
processes[n].join()
# Write layers output
for q in np.arange(n_runs):
fout.write("{:.4e} {:7.2f} ".format(pres_arr[q], temp_arr[q]))
for i in np.arange(nspec):
fout.write('{:1.4e} '.format(abn[q,i]))
fout.write('\n')
# Close atm file
fout.close()
# Print on-screen
if verb >= 1:
print("\n Species abundances calculated.\n Created TEA atmospheric file.")
# Time / speed testing
if verb >= 1:
tend = time.time()
elapsed = tend - tstart
print("Overall run time: " + str(elapsed) + " seconds")
|
{"hexsha": "00dbe4fc65d369635f2edfe00b67a4bf96f96cf9", "size": 14125, "ext": "py", "lang": "Python", "max_stars_repo_path": "run/runatm.py", "max_stars_repo_name": "SiddhantDeshmukh/TEA", "max_stars_repo_head_hexsha": "beaa882b7084d380a38a6bf5f219b0ee848afb9e", "max_stars_repo_licenses": ["BSD-4-Clause-UC"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "run/runatm.py", "max_issues_repo_name": "SiddhantDeshmukh/TEA", "max_issues_repo_head_hexsha": "beaa882b7084d380a38a6bf5f219b0ee848afb9e", "max_issues_repo_licenses": ["BSD-4-Clause-UC"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "run/runatm.py", "max_forks_repo_name": "SiddhantDeshmukh/TEA", "max_forks_repo_head_hexsha": "beaa882b7084d380a38a6bf5f219b0ee848afb9e", "max_forks_repo_licenses": ["BSD-4-Clause-UC"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.5890804598, "max_line_length": 110, "alphanum_fraction": 0.5650973451, "include": true, "reason": "import numpy", "num_tokens": 3164}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
a module for basic VO Registry interactions.
A VO registry is a database of VO resources--data collections and
services--that are available for VO applications. Typically, it is
aware of the resources from all over the world. A registry can find
relevent data collections and services through search
queries--typically, subject-based. The registry responds with a list
of records describing matching resources. With a record in hand, the
application can use the information in the record to access the
resource directly. Most often, the resource is a data service that
can be queried for individual datasets of interest.
This module provides basic, low-level access to the VAO Registry at
STScI using (proprietary) VOTable-based services. In most cases,
the Registry task, with its higher-level features (e.g. result caching
and resource aliases), can be a more convenient interface. The more
basic interface provided here allows developers to code their own
interaction models.
"""
from __future__ import print_function, division
from ..dal import query as dalq
from ..dal import sia, ssa, sla, scs
from urllib import quote_plus, urlopen
import re
import numpy.ma as _ma
__all__ = [ "search", "RegistryService", "RegistryQuery",
"RegistryResults", "SimpleResource" ]
def search(keywords=None, servicetype=None, waveband=None, sqlpred=None):
"""
execute a simple query to the VAO registry.
Parameters
----------
keywords : str or list of str
keyword terms to match to registry records.
Use this parameter to find resources related to a
particular topic.
servicetype : str
the service type to restrict results to.
Allowed values include,
'catalog' (synonyms: 'table', 'scs', 'conesearch', 'ConeSearch'),
'image' (synonyms: 'sia', 'SimpleImageAccess'),
'spectrum' (synonyms: 'ssa', 'ssap', 'SimpleSpectralAccess'),
'line' (synonyms: 'sla', 'slap', 'SimpleLineAccess')
'database' (synonyms: 'tap','TableAccess').
waveband : str
the name of a desired waveband; resources returned
will be restricted to those that indicate as having
data in that waveband. Allowed, case-insensitive
values include 'Radio', 'Millimeter', 'Infrared'
(synonym: 'IR'), 'Optical', 'UV', 'EUV', 'X-ray'
(synonym: 'Xray').
sqlpred : str
an SQL WHERE predicate (without the leading "WHERE")
that further contrains the search against supported
keywords.
Returns
-------
RegistryResults
a container holding a table of matching resource (e.g. services)
See Also
--------
RegistryResults
"""
reg = RegistryService()
return reg.search(keywords, servicetype, waveband, sqlpred)
class RegistryService(dalq.DALService):
"""
a class for submitting searches to the VAO registry.
"""
STSCI_REGISTRY_BASEURL = "http://vao.stsci.edu/directory/NVORegInt.asmx/"
def __init__(self, baseurl=None, resmeta=None, version="1.0"):
"""
connect to an STScI registry at the given URL
Parameters
----------
baseurl : str
the base URL for submitting search queries to the
service. If None, it will default to the STScI
public registry
resmeta : str
an optional dictionary of properties about the service
"""
if not baseurl: baseurl = self.STSCI_REGISTRY_BASEURL
if not baseurl.endswith("/"): baseurl += "/"
super(RegistryService, self).__init__(baseurl, "vaoreg",
version, resmeta)
def search(self, keywords=None, servicetype=None,
waveband=None, orkw=False, sqlpred=None):
"""
execute a simple registry search of the specified
keywords.
Parameters
----------
keywords : str or list of str
keyword terms to match to registry records.
Use this parameter to find resources related to a
particular topic.
servicetype : str
the service type to restrict results to.
Allowed values include,
'catalog' (synonyms: 'table', 'scs', 'conesearch', 'ConeSearch'),
'image' (synonyms: 'sia', 'SimpleImageAccess'),
'spectrum' (synonyms: 'ssa', 'ssap', 'SimpleSpectralAccess'),
'line' (synonyms: 'sla', 'slap', 'SimpleLineAccess')
'database' (synonyms: 'tap','TableAccess').
waveband : str
the name of a desired waveband; resources returned
will be restricted to those that indicate as having
data in that waveband. Allowed, case-insensitive
values include 'Radio', 'Millimeter', 'Infrared'
(synonym: 'IR'), 'Optical', 'UV', 'EUV', 'X-ray'
(synonym: 'Xray').
orkw : bool
If true, the keywords will be OR-ed together,
and returned records will match at least one of
the keywords. If false (default), the keywords qill
be AND-ed, requiring the returned records to to
match all of the keywords.
sqlpred : str
an SQL WHERE predicate (without the leading "WHERE")
that further contrains the search against supported
keywords.
Returns
-------
RegistryResults
a container holding a table of matching resource (e.g. services)
See Also
--------
RegistryResults
"""
srch = self.create_query(keywords, servicetype, waveband, orkw, sqlpred)
# print(srch.getqueryurl())
return srch.execute()
def resolve(self, ivoid):
"""
Resolve the identifier against the registry, returning a
resource record.
Parameters
----------
ivoid : str
the IVOA Identifier of the resource
"""
srch = self.create_query()
srch.addpredicate("identifier='{0}'".format(ivoid))
res = srch.execute()
return res.getrecord(0)
def create_query(self, keywords=None, servicetype=None,
waveband=None, orkw=False, sqlpred=None):
"""
create a RegistryQuery object that can be refined or saved
before submitting.
Parameters
----------
keywords : str
a string giving a single term or a python list
of terms to match to registry records.
servicetype : str
the service type to restrict results to;
allowed values include,
'catalog' (synonyms: 'table', 'scs', 'conesearch', 'ConeSearch'),
'image' (synonyms: 'sia', 'SimpleImageAccess'),
'spectrum' (synonyms: 'ssa', 'ssap', 'SimpleSpectralAccess'),
'line' (synonyms: 'sla', 'slap', 'SimpleLineAccess')
'database' (synonyms: 'tap','TableAccess').
waveband : str
the name of a desired waveband; resources returned
will be restricted to those that indicate as having
data in that waveband. Allowed, case-insensitive
values include 'Radio', 'Millimeter', 'Infrared'
(synonym: 'IR'), 'Optical', 'UV', 'EUV', 'X-ray'
(synonym: 'Xray').
orkw : bool
If true, the keywords will be OR-ed together,
and returned records will match at least one of
the keywords. If false (default), the keywords qill
be AND-ed, requiring the returned records to to
match all of the keywords.
sqlpred : str
an SQL WHERE predicate (without the leading "WHERE")
that further contrains the search against supported
keywords.
See Also
--------
RegistryQuery
"""
srch = RegistryQuery(self._baseurl)
if sqlpred:
srch.addpredicate(sqlpred)
if waveband:
srch.waveband = waveband
if servicetype:
srch.servicetype = servicetype
if keywords:
srch.addkeywords(keywords)
if isinstance(orkw, bool):
srch.or_keywords(orkw)
elif orkw is not None:
raise ValueError("create_query: orkw parameter not a bool: " +
str(orkw))
return srch
class RegistryQuery(dalq.DALQuery):
"""
a representation of a registry query that can be built up over
successive method calls and then executed. An instance is normally
obtained via a call to RegistrySearch.create_query()
"""
SERVICE_NAME = "VOTCapBandPredOpt"
# SERVICE_NAME = "VOTCapability"
RESULTSET_TYPE_ARG = "VOTStyleOption=2"
ALLOWED_WAVEBANDS = "Radio Millimeter Infrared Optical UV".split() + \
"EUV X-ray Gamma-ray".split()
WAVEBAND_SYN = { "ir": "Infrared",
"IR": "Infrared",
"uv": "UV",
"euv": "EUV",
"xray": "X-ray" }
ALLOWED_CAPS = { "table": "ConeSearch",
"catalog": "ConeSearch",
"scs": "ConeSearch",
"conesearch": "ConeSearch",
"image": "SimpleImageAccess",
"sia": "SimpleImageAccess",
"spectra": "SimpleSpectralAccess",
"spectrum": "SimpleSpectralAccess",
"ssa": "SimpleSpectralAccess",
"ssap": "SimpleSpectralAccess",
"line": "SimpleLineAccess",
"sla": "SimpleLineAccess",
"slap": "SimpleLineAccess",
"tap": "TableAccess",
"database": "TableAccess",
"tableAccess": "TableAccess",
"simpleImageAccess": "SimpleImageAccess",
"simpleLineAccess": "SimpleLineAccess",
"simpleSpectralAccess": "SimpleSpectralAccess" }
def __init__(self, baseurl=None, orKeywords=False, version="1.0"):
"""
create the query instance
Parameters
----------
baseurl : str
the base URL for the VAO registry. If None, it will
be set to the public VAO registry at STScI.
orKeywords : bool
if True, keyword constraints will by default be
OR-ed together; that is, a resource that matches
any of the keywords will be returned. If FALSE,
the keywords will be AND-ed, thus requiring a
resource to match all the keywords.
"""
if not baseurl: baseurl = RegistryService.STSCI_REGISTRY_BASEURL
super(RegistryQuery, self).__init__(baseurl, "vaoreg", version)
self._kw = [] # list of individual keyword phrases
self._preds = [] # list of SQL predicates
self._svctype = None
self._band = None
self._orKw = orKeywords
self._doSort = True
self._dalonly = False
@property
def keywords(self):
"""
return the current set of keyword constraints
To update, use addkeywords(), removekeywords(), or clearkeywords().
"""
return list(self._kw)
def addkeywords(self, keywords):
"""
add keywords that should be added to this query. Keywords
are searched against key fields in the registry record. A
keyword can in fact be a phrase--a sequence of words; in this
case the sequence of words must appear verbatim in the record
for that record to be matched.
Parameters
----------
keywords : str or list of str
either a single keyword phrase (as a string)
or a list of keyword phrases to add to the
query.
"""
if isinstance(keywords, str):
keywords = [keywords]
self._kw.extend(keywords)
def removekeywords(self, keywords):
"""
remove the given keyword or keywords from the query. A
keyword can in fact be a phrase--a sequence of words; in this
case, the phrase will be remove.
Parameters
----------
keywords : str or list of str
either a single keyword phrase (as a string)
or a list of keyword phrases to remove from
the query.
"""
if isinstance(keywords, str):
keywords = [keywords]
for kw in keywords:
self._kw.remove(kw)
def clearkeywords(self):
"""
remove all keywords that have been added to this query.
"""
self._kw = []
def or_keywords(self, ored):
"""
set whether keywords are OR-ed or AND-ed together. When
the keywords are OR-ed, returned records will match at
least one of the keywords. When they are AND-ed, the
records will match all of the keywords provided.
Parameters
----------
ored : bool
true, if the keywords should be OR-ed; false,
if they should be AND-ed.
"""
if not isinstance(ored, bool):
raise ValueError("RegistryQuery.or_keyword: value not a bool")
self._orKw = ored
def will_or_keywords(self):
"""
Return true if the keywords will be OR-ed.
"""
return self._orKw
@property
def servicetype(self):
"""
the type of service that query results will be restricted to.
"""
return self._svctype
@servicetype.setter
def servicetype(self, val):
if not val:
raise ValueError("missing serviceType value");
if len(val) < 2:
raise ValueError("unrecognized serviceType value: " + val);
# uncapitalize
if val[0].upper() == val[0]:
val = val[0].lower() + val[1:]
if val not in self.ALLOWED_CAPS.keys():
raise ValueError("unrecognized servicetype value: " + val);
self._svctype = val
@servicetype.deleter
def servicetype(self):
self._svctype = None
@property
def waveband(self):
"""
the waveband to restrict the query by. The query results will
include only those resourse that indicate they have data from this
waveband. Allowed values include "Radio", "Millimeter", "Infrared"
(synonym: "IR"), "Optical", "UV", "EUV", "X-ray" (synonym: "Xray");
when setting, the value is case-insensitive.
"""
return self._band
@waveband.setter
def waveband(self, band):
if band is None:
self._band = None
return
if not isinstance(band, str):
raise ValueError("band should be a string; got: " + str(type(band)))
if not band:
raise ValueError("missing waveband value");
if len(band) < 2:
raise ValueError("unrecognized waveband: " + band);
_band = band
if self.WAVEBAND_SYN.has_key(band):
_band = self.WAVEBAND_SYN[band]
else:
# capitalize
_band = _band[0].upper() + _band[1:]
if _band not in self.ALLOWED_WAVEBANDS:
raise ValueError("unrecognized waveband: " + band)
self._band = _band
@waveband.deleter
def waveband(self):
self._band = None
@property
def predicates(self):
"""
the (read-only) list of predicate constraints that will
be applied to the query. These will be AND-ed with all other
constraints (including previously added predicates); that is,
this constraint must be satisfied in addition to the other
constraints to match a particular resource record.
To update, use addpredicate(), removepredicate(), or clearpredicate().
"""
return list(self._preds)
def addpredicate(self, pred):
"""
add an SQL search predicate to the query. This predicate should
be of form supported by STScI VOTable search services. This
predicate will be AND-ed with all other constraints (including
previously added predicates); that is, this constraint must be
satisfied in addition to the other constraints to match a
particular resource record.
"""
self._preds.append(pred)
def removepredicate(self, pred):
"""
remove the give predicate from the current set of predicate
constraints.
"""
self._preds.remove(pred)
def clearpredicates(self):
"""
remove all previously added predicates.
"""
self._preds = []
def execute_votable(self):
"""
submit the query and return the results as an AstroPy votable instance
Raises
------
DALServiceError
for errors connecting to or communicating with the service
DALFormatError
for errors parsing the VOTable response
DALQueryError
for errors in the input query syntax
"""
out = dalq.DALQuery.execute_votable(self)
res = dalq.DALResults(out)
tbl = res.votable
# We note that the server-side implementation of the service will
# include all of the capability records of resource that have
# capabilities of the given type. Consequently, the results includes
# capabilites that are not of the requested type.
# filter out service types that don't match
if self.servicetype:
cap = self._toCapConst(self.servicetype).encode('utf-8')
tbl.array = \
_ma.array(tbl.array.data[tbl.array.data['capabilityClass']==cap],
mask=tbl.array.mask[tbl.array.data['capabilityClass']==cap])
tbl._nrows = tbl.array.shape[0]
return out
def execute(self):
"""
submit the query and return the results as a RegistryResults
instance.
Raises
------
RegistryServiceError
for errors connecting to or communicating with the service
RegistryQueryError
if the service responds with an error, including a query syntax
error. A syntax error should only occur if the query contains
non-sensical predicates.
"""
return RegistryResults(self.execute_votable(), self.getqueryurl())
def execute_stream(self):
"""
submit the query and return the raw VOTable XML as a file stream
Raises
------
DALServiceError
for errors connecting to or communicating with the service
DALQueryError
for errors in the input query syntax
"""
try:
url = self.getqueryurl()
out = urlopen(url)
if dalq._is_python3:
contenttype = out.info().get_content_type()
else:
contenttype = out.info().gettype()
if contenttype == "text/plain":
# Error message returned
self._raiseServiceError(out.read())
elif contenttype != "text/xml":
# Unexpected response
raise dalq.DALFormatError("Wrong response format: " +
contenttype)
return out
except IOError as ex:
raise dalq.DALServiceError.from_except(ex, url)
def _raiseServiceError(self, response):
invalidmessage = "System.InvalidOperationException: "
outmsg = re.sub(r'\n.*', '', response).strip()
if response.startswith(invalidmessage):
raise dalq.DALQueryError(outmsg[len(invalidmessage):])
raise dalq.DALServiceError(outmsg)
def getqueryurl(self, lax=False):
"""
return the GET URL that will submit the query and return the
results as a VOTable
"""
url = "{0}{1}?{2}".format(self._baseurl, self.SERVICE_NAME,
self.RESULTSET_TYPE_ARG)
# this adds arbitrary parameters
# if len(self.paramnames()) > 0:
# url += "&" + \
# "&".join(map(lambda p: "{0}={1}".format(p,self._paramtostr(self._param[p])),
# self._param.keys()))
if self._band:
url += "&waveband={0}".format(self._band)
else:
url += "&waveband="
if self._svctype:
url += "&capability={0}".format(self._toCapConst(self.servicetype))
else:
url += "&capability="
preds = list(self._preds)
if (self.keywords):
preds.append(self.keywords_to_predicate(self.keywords, self._orKw))
if (preds):
url += "&predicate={0}".format(
quote_plus(" AND ".join(map(lambda p: "({0})".format(p),
preds))))
else:
url += "&predicate="
return url
def _toCapConst(self, stype):
return self.ALLOWED_CAPS[stype]
def keywords_to_predicate(self, keywords, ored=True):
"""
return the given keywords as a predicate that can be added to
the current query. This function can be overridden to change
how keyword searches are implemented.
Parameters
----------
*keywords* a python list of the keywords
*ored* if True, the keywords should be ORed together;
otherwise, they should be ANDed
"""
textcols = ["title", "shortName", "identifier",
"[content/subject]", "[curation/publisher]",
"[content/description]" ]
conjunction = (ored and ") OR (") or ") AND ("
const = []
for kw in keywords:
keyconst = []
for col in textcols:
keyconst.append("{0} LIKE '%{1}%'".format(col, kw))
const.append(" OR ".join(keyconst))
return "("+conjunction.join(const)+")"
class RegistryResults(dalq.DALResults):
"""
an iterable set of results from a registry query. Each record is
returned as SimpleResource instance
"""
_strarraycols = ["waveband", "subject", "type", "contentLevel"]
def __init__(self, votable, url=None, version="1.0"):
"""
initialize the results. This constructor is not typically called
by directly applications; rather an instance is obtained from calling
a SIAQuery's execute().
"""
super(RegistryResults, self).__init__(votable, url, "vaoreg", version)
def getrecord(self, index):
"""
return all the attributes of a resource record with the given index
as SimpleResource instance (a dictionary-like object).
Parameters
----------
index : int
the zero-based index of the record
"""
return SimpleResource(self, index)
def getvalue(self, name, index):
"""
return the value of a record attribute--a value from a column and row.
This implementation is aware of how lists of strings are encoded
and will return a python list of strings accordingly.
Parameters
----------
name : str
the name of the attribute (column)
index : int
the zero-based index of the record
Raises
------
IndexError
if index is negative or equal or larger than the
number of rows in the result table.
KeyError
if name is not a recognized column name
"""
out = super(RegistryResults, self).getvalue(name, index)
if name in self._strarraycols:
out = split_str_array_cell(out)
return out
@property
def size(self):
"""
the number of records returned in this result (read-only)
"""
return self.votable.nrows
class SimpleResource(dalq.Record):
"""
a dictionary for the resource metadata returned in one record of a
registry query.
A SimpleResource acts as a dictionary, so in general, all attributes can
be accessed by name via the [] operator, and the attribute names can
by returned via the keys() function. For convenience, it also stores
key values as properties; these include:
Properties
----------
title : bytes
the title of the resource
shortname : bytes
the resource's short name
ivoid : bytes
the IVOA identifier for the resource (identifier will also work)
accessurl : str
when the resource is a service, the service's access URL.
"""
def __init__(self, results, index):
super(SimpleResource, self).__init__(results, index)
def __getitem__(self, key):
"""
return a resource metadatum value with a name given by key. This
version will split encoded string array values into tuples.
"""
out = super(SimpleResource, self).__getitem__(key)
if key in RegistryResults._strarraycols:
out = split_str_array_cell(out)
return out
@property
def title(self):
"""
the title of the resource
"""
return self.get("title")
@property
def shortname(self):
"""
the short name for the resource
"""
return self.get("shortName")
@property
def description(self):
"""
the textual description of the resource.
See Also
--------
SimpleResource.describe
"""
return self.get("description")
@property
def tags(self):
"""
a user-friendly label for the resource
"""
return self.get("tags")
@property
def ivoid(self):
"""
the IVOA identifier for the resource. In this interface, this
ID may be appended by a #-delimited suffix to point to a particular
capability.
"""
return self.get("identifier")
@property
def identifier(self):
"""
the IVOA identifier for the resource. In this interface, this
ID may be appended by a #-delimited suffix to point to a particular
capability.
"""
return self.get("identifier")
@property
def publisher(self):
"""
the name of the organization responsible for providing this resource.
"""
return self.get("publisher")
@property
def waveband(self):
"""
a list of names of the wavebands that the resource provides data for
"""
return self.get("waveband")
@property
def subject(self):
"""
a list of the subject keywords that describe this resource
"""
return self.get("subject")
@property
def type(self):
"""
a list of the resource types that characterize this resource.
"""
return self.get("type")
@property
def contentlevel(self):
"""
a list of content level labels that describe the intended audience
for this resource.
"""
return self.get("contentLevel")
@property
def capability(self):
"""
the name of the IVOA service capability. This will typically set to
the value of the capability/@xsi:type attribute in the VOResource
record (without the namespace prefix).
"""
return self.get("capabilityClass")
@property
def standardid(self):
"""
the IVOA identifier of the standard that this resource capability
supports.
"""
return self.get("capabilityStandardID")
@property
def accessurl(self):
"""
the URL that can be used to access the service resource. If the
resource is not a service, this will typically be blank.
Note that this will always be returned as a native string--i.e. as
unicode for Python 3 and as a byte-string for Python 2--making ready
to use as a URL with urllib functions.
"""
return self._get_to_str("accessURL")
def to_service(self):
"""
return an appropriate DALService subclass for this resource that
can be used to search the resource. Return None if the resource is
not a recognized DAL service. Currently, only Conesearch, SIA, SSA,
and SLA services are supported.
"""
return _createService(self, True);
def search(self, *args, **keys):
"""
assuming this resource refers to a searchable service, execute a
search against the resource. This is equivalent to:
self.to_service().search(*args, **keys)
The arguments provided should be appropriate for the service that
the DAL service type would expect. See the documentation for the
appropriate service type:
============ =========================================
Service type Use the argument syntax for
============ =========================================
catalog :py:meth:`pyvo.dal.scs.SCSService.search`
image :py:meth:`pyvo.dal.sia.SIAService.search`
spectrum :py:meth:`pyvo.dal.ssa.SSAService.search`
line :py:meth:`pyvo.dal.sla.SLAService.search`
database *not yet supported*
============ =========================================
Raises
------
RuntimeError
if the resource does not describe a searchable service.
"""
service = _createService(self, False);
if not service:
raise RuntimeError("resource, {0}, is not a searchable service".format(self.shortname))
return service.search(*args, **keys)
def describe(self, verbose=False, width=78, file=None):
"""
Print a summary description of this resource.
Parameters
----------
verbose : bool
If false (default), only user-oriented information is
printed; if true, additional information will be printed
as well.
width : int
Format the description with given character-width.
out : writable file-like object
If provided, write information to this output stream.
Otherwise, it is written to standard out.
"""
restype = "Generic Resource"
if self.get("interfaceClass"):
# it's a service of some kind
restype = "Custom Service"
stdid = self.get("capabilityStandardID")
if stdid:
if stdid.startswith("ivo://ivoa.net/std/ConeSearch"):
restype = "Catalog Cone-search Service"
elif stdid.startswith("ivo://ivoa.net/std/SIA"):
restype = "Image Data Service"
elif stdid.startswith("ivo://ivoa.net/std/SSA"):
restype = "Spectrum Data Service"
elif stdid.startswith("ivo://ivoa.net/std/SLA"):
restype = "Spectral Line Database Service"
elif stdid.startswith("ivo://ivoa.net/std/Registry"):
restype = "Registry Service"
if "Harvest" in self.get("capabilityClass"):
restype = "Registry Harvest Service"
elif "Search" in self.get("capabilityClass"):
restype = "Registry Search Service"
elif self.get("interfaceClass") == "WebBrowser":
restype = "Web-page Based Service"
print(restype, file=file)
print(dalq.para_format_desc(self.title), file=file)
print("Short Name: " + self.shortname, file=file)
print("Publisher: " + dalq.para_format_desc(self.publisher), file=file)
print("IVOA Identifier: " + self.identifier, file=file)
if self.accessurl:
print("Base URL: " + self.accessurl, file=file)
if self.description:
print(file=file)
print(dalq.para_format_desc(self.description), file=file)
print(file=file)
if self.get("subjects"):
val = self.get("subjects")
if not hasattr(val, "__getitem__"):
val = [val]
val = (str(v) for v in val)
print(dalq.para_format_desc("Subjects: " + ", ".join(val)),
file=file)
if self.get("waveband"):
val = self.get("waveband")
if not hasattr(val, "__getitem__"):
val = [val]
val = (str(v) for v in val)
print(dalq.para_format_desc("Waveband Coverage: " + ", ".join(val)),
file=file)
if verbose:
if self.get("capabilityStandardID"):
print("StandardID: " + self["capabilityStandardID"], file=file)
if self.get("referenceURL"):
print("More info: " + self["referenceURL"], file=file)
_standardIDs = {
"ivo://ivoa.net/std/ConeSearch": scs.SCSService,
"ivo://ivoa.net/std/SIA": sia.SIAService,
"ivo://ivoa.net/std/SSA": ssa.SSAService,
"ivo://ivoa.net/std/SLAP": sla.SLAService,
}
def _createService(resource, savemeta=False):
if not resource.accessurl:
return None
meta = None
if savemeta:
meta = resource
serviceCls = _standardIDs.get(resource.standardid)
try:
if serviceCls:
return serviceCls(resource.accessurl, meta)
except Exception:
return None
def split_str_array_cell(val, delim=None):
"""
split an encoded string array value into a tuple. The VAO registry's
search service encodes string array values by delimiting the elements
with pound signs ('#'). These delimiters also mark the start and end
of the encoded value as well. This function converts the encoded value
into a split tuple.
Parameters
----------
val : str
the original string value to split
delim : str
the delimiter that separates the values; defaults to '#'
"""
if not val: return val
if delim is None:
dval = "'#'"
# we do the following because "u'#'" is not legal syntax in Python3
if dalq._is_python3:
if isinstance(val, bytes):
dval = "b'#'"
else:
if isinstance(val, unicode):
dval = "u'#'"
delim = eval(dval)
if val[0:1] == delim: val = val[1:]
if val[-1:] == delim: val = val[:-1]
return tuple(val.split(delim))
|
{"hexsha": "fb50620deb48ec23560b5aeaca15777bc86e9dde", "size": 35115, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyvo/registry/vao.py", "max_stars_repo_name": "bsipocz/pyvo", "max_stars_repo_head_hexsha": "290159d3e7218f6f8d9edee3145cfd2bee190130", "max_stars_repo_licenses": ["BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2016-07-12T22:27:36.000Z", "max_stars_repo_stars_event_max_datetime": "2016-07-12T22:27:36.000Z", "max_issues_repo_path": "pyvo/registry/vao.py", "max_issues_repo_name": "bsipocz/pyvo", "max_issues_repo_head_hexsha": "290159d3e7218f6f8d9edee3145cfd2bee190130", "max_issues_repo_licenses": ["BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pyvo/registry/vao.py", "max_forks_repo_name": "bsipocz/pyvo", "max_forks_repo_head_hexsha": "290159d3e7218f6f8d9edee3145cfd2bee190130", "max_forks_repo_licenses": ["BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.0449101796, "max_line_length": 99, "alphanum_fraction": 0.5761070767, "include": true, "reason": "import numpy", "num_tokens": 7681}
|
import os
import argparse
import ast
import gc
import logging
import math
import sys
import time
import numpy as np
import pandas as pd
import hparameters
import utility
import json
def main(m_params, start_counter=0):
param_dictionary = {} #Format is key:value = htune_number: dict containing variable params
#defining hyperam range
if m_params['model_name'] == "HCGRU" :
# Rectified Adam Parameters
lrs_max_min = [ (1e-3, 1e-4), (1e-4,1e-5)] # minimum and maximum learning rate
b1s = [0.75, 0.9] # beta 1
b2s = [0.9, 0.99] # beta 2
inp_dropouts = [0.1, 0.225, 0.35] #input dropout
rec_dropouts = [0.1, 0.225, 0.35] #recurrent dropout
clip_norms = [12.5]
counter = 0
os.makedirs('hypertune',exist_ok=True)
f_training = open(f"hypertune/{m_params['model_name']}_train.txt","w")
# f_training2 = open("hypertune/hypertune_train2.txt","w")
f_testings = [ open(f"hypertune/{m_params['model_name']}_test_{idx+1}.txt","w") for idx in range(3) ]
for lr in lrs_max_min:
for b1 in b1s:
for b2 in b2s:
for inpd in inp_dropouts:
for recd in rec_dropouts:
new_record = { 'lr_max':lr[0], 'lr_min':lr[1], 'b1':b1, 'b2':b2, 'inp_dropout':inpd, 'rec_dropout':recd }
param_dictionary.update( {counter:new_record} )
print(f"\n\n Training model v{counter}")
train_cmd = train_cmd_maker( m_params['model_name'], lr, b1, b2, inpd, recd, counter )
f_training.write(f'{train_cmd} && ')
print(f" Testing model v{counter}")
test_cmd = test_cmd_maker( m_params['model_name'], inpd, recd, counter )
#f_testing.write(f'{train_cmd} && ')
f_testings[int(counter%3)].write(f'{test_cmd} && ')
counter = counter + 1
f_training.close()
[ f.close() for f in f_testings ]
elif m_params['model_name'] == "TRUNET":
# Rectified Adam Parameters
lrs_max_min = [ (1e-3, 1e-4),(1e-4, 1e-5) ] # minimum and maximum learning rate
b1s = [0.9] # beta 1
b2s = [0.9, 0.99] # beta 2
dropouts = [0.15, 0.35 ]
inp_dropouts = [0.15, 0.35] #input dropout
rec_dropouts = [0.15, 0.35] #recurrent dropout
clip_norms = [4.5, 5.5]
counter = start_counter
os.makedirs('hypertune',exist_ok=True)
f_training = open(f"hypertune/{m_params['model_name']}_train.txt","a")
f_testings = [ open(f"hypertune/{m_params['model_name']}_test_{idx+1}.txt","a") for idx in range(3) ]
for lr in lrs_max_min:
for b1 in b1s:
for b2 in b2s:
for inpd in inp_dropouts:
for recd in rec_dropouts:
for clip_norm in clip_norms:
for dropout in dropouts:
new_record = { 'lr_max':lr[0], 'lr_min':lr[1], 'b2':b2, 'inp_dropout':inpd, 'rec_dropout':recd,
'clip_norm':clip_norm, 'dropout':dropout }
param_dictionary.update( {counter:new_record} )
print(f"\n\n Training model v{counter}")
train_cmd = train_cmd_maker( m_params['model_name'], lr, b1, b2, inpd, recd, counter, clip_norm=clip_norm, do=dropout )
#f_training.write(f'{train_cmd} && ')
print(f" Testing model v{counter}")
test_cmd = test_cmd_maker( m_params['model_name'], inpd, recd, counter, dropout )
#f_testings[int(counter%3)].write(f'{test_cmd} && ')
counter = counter + 1
f_training.close()
[ f.close() for f in f_testings ]
save_param_dict(m_params['model_name'], param_dictionary)
def train_cmd_maker( mn ,lr_min_max, b1, b2, inp_drop, rec_drop, counter,gpu=None,clip_norm=6.5, do=0.2):
cmd = [
f"CUDA_VISIBLE_DEVICES=0",
# f"CUDA_VISIBLE_DEVICES={gpu}",
"python3", "train.py","-mn",f"{mn}",
"-ctsm", "1999_2009_2014", "-mts",
f"\"{{'htuning':True, 'htune_version':{counter},'stochastic':False,'stochastic_f_pass':1,'clip_norm':{clip_norm},'discrete_continuous':True,'var_model_type':'mc_dropout','do':{do},'ido':{inp_drop},'rdo':{rec_drop}, 'b1':{b1}, 'b2':{b2}, 'lr_max':{lr_min_max[0]}, 'lr_min':{lr_min_max[1]}, 'location':['Cardiff','London','Glasgow','Birmingham','Lancaster','Manchester','Liverpool','Bradford','Edinburgh','Leeds'] }}\"",
"-dd", "/media/Data3/akanni/Rain_Data_Mar20", "-bs", "16"]
cmd2 = ' '.join(cmd)
return cmd2
def test_cmd_maker( mn,inp_drop, rec_drop, counter, do=0.2):
cmd = [
f"CUDA_VISIBLE_DEVICES={int(counter%3)+1}",
"python3", "predict.py", "-mn", f"{mn}", "-ctsm", "1999_2009_2014", "-ctsm_test", "2014_2019-07-04", "-mts",
f"\"{{'htuning':True,'htune_version':{counter},'stochastic':True,'stochastic_f_pass':5,'distr_type':'Normal','discrete_continuous':True,'var_model_type':'mc_dropout', 'do':{do},'ido':{inp_drop},'rdo':{rec_drop}, 'location':['Cardiff','London','Glasgow','Birmingham','Lancaster','Manchester','Liverpool','Bradford','Edinburgh','Leeds'],'location_test':['Cardiff','London','Glasgow','Birmingham','Lancaster','Manchester','Liverpool','Bradford','Edinburgh','Leeds']}}\"",
"-ts", f"\"{{'region_pred':True}}\"", "-dd", "/media/Data3/akanni/Rain_Data_Mar20", "-bs", f"{65}" ]
cmd2 = ' '.join(cmd)
return cmd2
def save_param_dict(mn, param_dict ):
path_ = os.path.join('./hypertune',f'{mn}_param_dict.json')
json.dump( param_dict, open(path_,"w") )
if __name__ == "__main__":
s_dir = utility.get_script_directory(sys.argv[0])
args_dict = utility.parse_arguments(s_dir)
main( args_dict )
#python3 hypertuning.py -mn "HCGRU" -mts "{}" -ctsm ""
#python3 hypertuning.py -mn "TRUNET" -mts "{}" -ctsm ""
|
{"hexsha": "550085e3212a0656c80c84c10e89d8d9cbb196e7", "size": 6683, "ext": "py", "lang": "Python", "max_stars_repo_path": "hypertuning.py", "max_stars_repo_name": "Akanni96/TRUNET", "max_stars_repo_head_hexsha": "12dff08f2361848e13b0952540e2198db386eab8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-08-29T22:41:18.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-28T03:21:33.000Z", "max_issues_repo_path": "hypertuning.py", "max_issues_repo_name": "Akanni96/TRUNET", "max_issues_repo_head_hexsha": "12dff08f2361848e13b0952540e2198db386eab8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2020-08-30T10:12:11.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-08T02:54:32.000Z", "max_forks_repo_path": "hypertuning.py", "max_forks_repo_name": "Akanni96/TRUNET", "max_forks_repo_head_hexsha": "12dff08f2361848e13b0952540e2198db386eab8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-02-25T12:23:32.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-17T09:37:05.000Z", "avg_line_length": 46.0896551724, "max_line_length": 472, "alphanum_fraction": 0.5300014963, "include": true, "reason": "import numpy", "num_tokens": 1811}
|
!-----------------------------------------------------------------------
! Fortran2003 interface to CUDA Library for Skeleton 2D Electrostatic
! GPU-MPI PIC Code */
! written by Viktor K. Decyk, UCLA
module gpuppush2_c
use iso_c_binding
implicit none
!
interface
subroutine cgpuppgppush2l(g_ppart,g_fxy,g_kpic,noff,nyp,qbm,dt,&
&g_ek,nx,ny,mx,my,idimp,nppmx,nxv,nypmx,mx1,mxyp1,ipbc) &
&bind(C,name='cgpuppgppush2l')
use iso_c_binding
implicit none
integer(c_int), value :: nx, ny, mx, my, idimp, nppmx, nxv
integer(c_int), value :: nypmx, mx1, mxyp1, ipbc
integer(c_int), value :: noff, nyp
real(c_float), value :: qbm, dt
type (c_ptr), value :: g_ppart, g_fxy, g_kpic, g_ek
end subroutine
end interface
!
interface
subroutine cgpu2ppgppost2l(g_ppart,g_q,g_kpic,noff,qm,idimp, &
&nppmx,mx,my,nxv,nypmx,mx1,mxyp1) bind(C,name='cgpu2ppgppost2l')
use iso_c_binding
implicit none
integer(c_int), value :: idimp, nppmx, mx, my, nxv, nypmx, mx1
integer(c_int), value :: mxyp1
integer(c_int), value :: noff
real(c_float), value :: qm
type (c_ptr), value :: g_ppart, g_q, g_kpic
end subroutine
end interface
!
interface
subroutine cgpuppcaguard2xl(g_qc,g_scs,g_q,nyp,nx,nxe,nypmx, &
&nxvh,kypd) bind(C,name='cgpuppcaguard2xl')
use iso_c_binding
implicit none
integer(c_int), value :: nx, nxe, nypmx, nxvh, kypd
integer(c_int), value :: nyp
type (c_ptr), value :: g_qc, g_scs, g_q
end subroutine
end interface
!
interface
subroutine cgpuppcaguard2yl(g_qc,g_scr,nx,nxvh,kypd) &
& bind(C,name='cgpuppcaguard2yl')
use iso_c_binding
implicit none
integer(c_int), value :: nx, nxvh, kypd
type (c_ptr), value :: g_qc, g_scr
end subroutine
end interface
!
interface
subroutine cgpuppccguard2xl(g_fxyc,g_scs,g_fxy,nyp,nx,nxe,nypmx&
&,nxvh,kypd) bind(C,name='cgpuppccguard2xl')
use iso_c_binding
implicit none
integer(c_int), value :: nx, nxe, nypmx, nxvh, kypd
integer(c_int), value :: nyp
type (c_ptr), value :: g_fxyc, g_scs, g_fxy
end subroutine
end interface
!
interface
subroutine cgpuppccguard2yl(g_fxy,g_scr,nyp,nx,nxe,nxvh,nypmx) &
&bind(C,name='cgpuppccguard2yl')
use iso_c_binding
implicit none
integer(c_int), value :: nx, nxe, nxvh, nypmx
integer(c_int), value :: nyp
type (c_ptr), value :: g_fxy, g_scr
end subroutine
end interface
!
interface
subroutine cgpupppord2la(g_ppart,g_ppbuff,g_sbufl,g_sbufr, &
&g_kpic,g_ncl,g_ihole,g_ncll,g_nclr,noff,nyp,idimp,nppmx,nx,ny,mx, &
&my,mx1,myp1,npbmx,ntmax,nbmax,g_irc) bind(C,name='cgpupppord2la')
use iso_c_binding
implicit none
integer(c_int), value :: idimp, nppmx, nx, ny, mx, my, mx1
integer(c_int), value ::myp1, npbmx, ntmax, nbmax
integer(c_int), value:: noff, nyp
type (c_ptr), value :: g_ppart, g_ppbuff, g_sbufl, g_sbufr
type (c_ptr), value :: g_kpic, g_ncl, g_ihole, g_ncll, g_nclr
type (c_ptr), value :: g_irc
end subroutine
end interface
!
interface
subroutine cgpupppord2lb(g_ppart,g_ppbuff,g_rbufl,g_rbufr, &
&g_kpic,g_ncl,g_ihole,g_mcll,g_mclr,idimp,nppmx,mx1,myp1,npbmx, &
&ntmax,nbmax,g_irc) bind(C,name='cgpupppord2lb')
use iso_c_binding
implicit none
integer(c_int), value :: idimp, nppmx, mx1, myp1, npbmx, ntmax
integer(c_int), value :: nbmax
type (c_ptr), value :: g_ppart, g_ppbuff, g_rbufl, g_rbufr
type (c_ptr), value :: g_kpic, g_ncl, g_ihole, g_mcll, g_mclr
type (c_ptr), value :: g_irc
end subroutine
end interface
!
interface
subroutine cgpuppois22t(g_qt, g_fxyt,g_ffct,g_we,nx,ny,kstrt, &
&nyv,kxp1,nyhd) bind(C,name='cgpuppois22t')
use iso_c_binding
implicit none
integer(c_int), value :: nx, ny, kstrt, nyv, kxp1, nyhd
type (c_ptr), value :: g_qt, g_fxyt, g_ffct, g_we
end subroutine
end interface
!
interface
subroutine cgpuwppfft2rcsx(g_f,g_bsm,isign,g_mixup,g_sct,indx, &
&indy,kstrt,nvp,kxp1,kyp,nxhd,kypd,nxhyd,nxyhd) &
&bind(C,name='cgpuwppfft2rcsx')
use iso_c_binding
implicit none
integer(c_int), value :: isign, indx, indy, kstrt, nvp, kxp1
integer(c_int), value :: kyp, nxhd, kypd, nxhyd, nxyhd
type (c_ptr), value :: g_f, g_bsm, g_mixup, g_sct
end subroutine
end interface
!
interface
subroutine cgpuwppfft2rcsy(g_g,g_brm,isign,g_mixup,g_sct,indx, &
&indy,kstrt,nvp,kxp1,kyp,nyd,nxhyd,nxyhd) &
&bind(C,name='cgpuwppfft2rcsy')
use iso_c_binding
implicit none
integer(c_int), value :: isign, indx, indy, kstrt, nvp, kxp1
integer(c_int), value :: kyp, nyd, nxhyd, nxyhd
type (c_ptr), value :: g_g, g_brm, g_mixup, g_sct
end subroutine
end interface
!
interface
subroutine cgpuwppfft2rcsxn(g_fn,g_bsm,isign,g_mixup,g_sct,indx&
&,indy,ndim,kstrt,nvp,kxp1,kyp,nxhd,kypd,nxhyd,nxyhd) &
&bind(C,name='cgpuwppfft2rcsxn')
use iso_c_binding
implicit none
integer(c_int), value :: isign, indx, indy, ndim, kstrt, nvp
integer(c_int), value :: kxp1, kyp, nxhd, kypd, nxhyd, nxyhd
type (c_ptr), value :: g_fn, g_bsm, g_mixup, g_sct
end subroutine
end interface
!
interface
subroutine cgpuwppfft2rcsyn(g_gn,g_brm,isign,g_mixup,g_sct,indx&
&,indy,ndim,kstrt,nvp,kxp1,kyp,nyd,nxhyd,nxyhd) &
&bind(C,name='cgpuwppfft2rcsyn')
use iso_c_binding
implicit none
integer(c_int), value :: isign, indx, indy, ndim, kstrt, nvp
integer(c_int), value :: kxp1, kyp, nyd, nxhyd, nxyhd
type (c_ptr), value :: g_gn, g_brm, g_mixup, g_sct
end subroutine
end interface
!
interface
subroutine cgpuppltpose(g_f,g_g,nx,ny,kxp,kyp,kstrt,nxv,nyv) &
&bind(C,name='cgpuppltpose')
use iso_c_binding
implicit none
integer(c_int), value:: nx, ny, kxp, kyp, kstrt, nxv, nyv
type (c_ptr), value :: g_f, g_g
end subroutine
end interface
!
interface
subroutine cgpuppltposen(g_fn,g_gn,nx,ny,kxp,kyp,kstrt,ndim,nxv&
&,nyv) bind(C,name='cgpuppltposen')
use iso_c_binding
implicit none
integer(c_int), value :: nx, ny, kxp, kyp, kstrt, ndim, nxv
integer(c_int), value :: nyv
type (c_ptr), value :: g_fn, g_gn
end subroutine
end interface
!
interface
subroutine cgpusum2(g_a,g_sa,nx) bind(C,name='cgpusum2')
use iso_c_binding
implicit none
integer(c_int), value :: nx
type (c_ptr), value :: g_a, g_sa
end subroutine
end interface
!
end module
|
{"hexsha": "f70b12ba7faccbfd056d536ea3d13894a8a15106", "size": 7370, "ext": "f03", "lang": "FORTRAN", "max_stars_repo_path": "gpu/gpuppic2/gpuppush2_c.f03", "max_stars_repo_name": "gcasabona/cuda", "max_stars_repo_head_hexsha": "064cfa02398e2402c113d45153d7ba36ae930f7e", "max_stars_repo_licenses": ["W3C"], "max_stars_count": 51, "max_stars_repo_stars_event_min_datetime": "2017-03-22T04:06:03.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-18T22:48:51.000Z", "max_issues_repo_path": "gpu/gpuppic2/gpuppush2_c.f03", "max_issues_repo_name": "gcasabona/cuda", "max_issues_repo_head_hexsha": "064cfa02398e2402c113d45153d7ba36ae930f7e", "max_issues_repo_licenses": ["W3C"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gpu/gpuppic2/gpuppush2_c.f03", "max_forks_repo_name": "gcasabona/cuda", "max_forks_repo_head_hexsha": "064cfa02398e2402c113d45153d7ba36ae930f7e", "max_forks_repo_licenses": ["W3C"], "max_forks_count": 25, "max_forks_repo_forks_event_min_datetime": "2017-02-22T05:21:32.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-02T14:53:19.000Z", "avg_line_length": 37.0351758794, "max_line_length": 73, "alphanum_fraction": 0.5940298507, "num_tokens": 2403}
|
#!/usr/bin/env python
# coding: utf-8
# # Fit an H2O GBM on the Lending Club data
# ### Imports
# In[1]:
import pandas as pd
import numpy as np
import random, time, os, pickle
import matplotlib.pyplot as plt
from feature_engine import categorical_encoders as ce
from feature_engine import discretisers as dsc
from feature_engine import missing_data_imputers as mdi
from feature_engine import feature_selection as fs
from sklearn.pipeline import Pipeline as pipe
import h2o
from h2o.estimators import H2OXGBoostEstimator, H2OGradientBoostingEstimator
from h2o.grid.grid_search import H2OGridSearch
import warnings
warnings.filterwarnings('ignore')
pd.set_option('display.max_columns', None)
get_ipython().run_line_magic('matplotlib', 'inline')
# ### Read data into pandas and fix date columns
# In[2]:
df = pd.read_csv("../data/lending_club_loan_two.csv")
# In[3]:
## Fix date columns:
dt_cols = ['issue_d','earliest_cr_line']
df.loc[:,dt_cols] = df.loc[:,dt_cols] .applymap(lambda x: np.nan if x in ['null','NULL',''] else x) .apply(lambda x: pd.to_datetime(x,format = "%b-%Y"))
# ### Split into train and test
# In[4]:
p_trn = 0.8
n = df.shape[0]
df = df.sample(frac=1,random_state=1).reset_index(drop=True)
df_train = df.iloc[:int(n*p_trn),:].copy()
df_test = df.iloc[int(n*p_trn):,:].copy()
# In[5]:
df_train.head(5)
# ## Verify data types
# ### Make sure all columns are of the correct data type
# In[6]:
dtype_dict = df_train.dtypes.to_dict()
# In[7]:
numeric_cols = [k for k,v in dtype_dict.items() if pd.api.types.is_numeric_dtype(v)]
numeric_cols.sort()
print("Numeric columns: " + ", ".join(numeric_cols))
# In[8]:
object_cols = [k for k,v in dtype_dict.items() if pd.api.types.is_object_dtype(v)]
object_cols.sort()
print("Object columns: " + ", ".join(object_cols))
# In[9]:
datetime_cols = [k for k,v in dtype_dict.items() if pd.api.types.is_datetime64_any_dtype(v)]
datetime_cols.sort()
print("Datetime columns: " + ", ".join(datetime_cols))
# In[10]:
other_cols = [c for c in df.columns if c not in numeric_cols + object_cols + datetime_cols]
print("Columns not accounted for: " + ", ".join(other_cols))
# ## Apply NULL/null/NA/NaN consistently
#
# Different datesources may result in different formats for null/missing values. It's typically a good idea to apply a consistent format. I'll do this by replacing 'NULL', 'null' and '' in character columns with `np.nan`.
# In[11]:
df_train.loc[:,object_cols] = df_train.loc[:,object_cols] .applymap(lambda x: np.nan if str(x).lower() in ['null',''] else x)
# In[12]:
target = 'loan_status'
object_cols_x_target = [c for c in object_cols if c != target]
cols_to_drop = datetime_cols + ['address']
p = pipe([
# Add missing indicators for numeric features
("num_nan_ind",mdi.AddMissingIndicator(variables=numeric_cols)),
# Add missing level for categorical features
("fill_cat_nas",mdi.CategoricalVariableImputer(
fill_value = "_MISSING_", variables=object_cols_x_target)),
# Bin rare levels of categorical variables
("rare_cats",ce.RareLabelCategoricalEncoder(
tol = 0.02, replace_with = "_RARE_", variables=object_cols_x_target)),
# Impute missing numeric variables with media
("rmmean",mdi.MeanMedianImputer(imputation_method = 'median',variables=numeric_cols)),
# Drop dates and address columns
("drop_date",fs.DropFeatures(features_to_drop=cols_to_drop))])
# In[13]:
df_train_prepped = p.fit_transform(df_train)
# In[14]:
df_train_prepped.head()
# In[15]:
h2o.init()
# In[16]:
h2o_train = h2o.H2OFrame(df_train_prepped)
h2o_train['loan_status'] = h2o_train['loan_status'].asfactor()
# In[17]:
predictors = df_train_prepped.columns.to_list()
predictors.remove('loan_status')
# In[18]:
# Set up gbm parameter grid and fit in h2o
hyper_params = {
'ntrees': [100,200],
'max_depth': [2,4,6],
'learn_rate': [0.05, 0.1]
}
gbm = H2OGradientBoostingEstimator(
distribution='bernoulli',
seed = 1)
h2o_train1, h2o_train2 = h2o_train.split_frame(
ratios=[0.8],
seed = 1)
xgb_grid = H2OGridSearch(
model = gbm,
hyper_params = hyper_params
)
xgb_grid.train(
training_frame = h2o_train1,
x = predictors,
y = 'loan_status',
validation_frame = h2o_train2)
# In[19]:
grid_results = xgb_grid.get_grid(
sort_by="logloss")
# In[20]:
best_params = grid_results.models[0].actual_params
best_params = dict((k,best_params[k]) for k in ('ntrees','max_depth','learn_rate'))
# In[21]:
best_params
# In[22]:
df_test_prepped = p.transform(df_test)
h2o_test = h2o.H2OFrame(df_test_prepped)
h2o_test['loan_status'] = h2o_test['loan_status'].asfactor()
# In[23]:
gbm = H2OGradientBoostingEstimator(
distribution='bernoulli',
model_id = 'final_gbm',
**best_params,
seed = 1)
# In[24]:
gbm.train(
training_frame = h2o_train,
x = predictors,
y = 'loan_status',
validation_frame = h2o_test)
# In[25]:
#path = os.getcwd() + '/lendingclub-app/src/main/resources/'
#if not os.path.exists(path):
# os.makedirs(path)
#gbm.download_mojo(path=path, get_genmodel_jar=False)
# In[26]:
gbm.predict(h2o_test[0:10,:])
# In[29]:
df_test.head(10).to_pickle(path = 'test_cases.pkl')
# In[31]:
with open('pipeline.pkl','wb') as f:
pickle.dump(p,f)
# In[30]:
h2o.shutdown()
|
{"hexsha": "2d3c0ed13fc069468328d3e76feb48d2d76de567", "size": 5379, "ext": "py", "lang": "Python", "max_stars_repo_path": "_build/jupyter_execute/H2O_with_LendingClub.py", "max_stars_repo_name": "Strabes/h2o-prod", "max_stars_repo_head_hexsha": "2bfd4c87302c2ca3219b0bc313f13c9e787d84ad", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "_build/jupyter_execute/H2O_with_LendingClub.py", "max_issues_repo_name": "Strabes/h2o-prod", "max_issues_repo_head_hexsha": "2bfd4c87302c2ca3219b0bc313f13c9e787d84ad", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "_build/jupyter_execute/H2O_with_LendingClub.py", "max_forks_repo_name": "Strabes/h2o-prod", "max_forks_repo_head_hexsha": "2bfd4c87302c2ca3219b0bc313f13c9e787d84ad", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 18.4212328767, "max_line_length": 221, "alphanum_fraction": 0.696969697, "include": true, "reason": "import numpy", "num_tokens": 1512}
|
"""
This module contains a function to plot the cross correlations computed with
stack_ccorr_tremor and the autocorrelations computed with
stack_acorr_tremor sorted by different criteria
"""
import obspy
from obspy.signal.cross_correlation import correlate
import matplotlib.pyplot as plt
import numpy as np
import pickle
from stacking import linstack, powstack, PWstack
def plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, type_sort, \
Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax):
"""
This function plots the cross correlations computed with
stack_ccorr_tremor and the autocorrelations computed with
stack_acorr_tremor sorted by different criteria
Input:
type arrayName = string
arrayName = Name of seismic array
type x0 = float
x0 = Distance of the center of the cell from the array (east)
type y0 = float
y0 = Distance of the center of the cell from the array (north)
type type_stack = string
type_stack = Type of stack ('lin', 'pow', 'PWS')
type w = float
w = Power of the stack (for 'pow' and 'PWS')
type cc_stack = string
cc_stack = Type of stack ('lin', 'pow', 'PWS') over tremor windows
type Tmax = float
Tmax = Maximum time lag for cross correlation plot
type amp = float
amp = Amplification factor of cross correlation for plotting
type n1 = integer
n1 = Index of first tremor to be plotted
type n2 = integer
n2 = Index of last tremor to be plotted
type ncor = integer
ncor = Number of points for the cross correlation with the stack
type tmin = float
tmin = Minimum time lag for comparing cross correlation with the stack
type tmax = float
tmax = Maximum time lag for comparing cross correlation with the stack
type RMSmin = float
RMSmin = Minimum time lag to compute the RMS
type RMSmax = float
RMSmax = Maximum time lag to compute the RMS
"""
# Read file containing data from stack_ccorr_tremor
filename = 'cc/{}/{}_{:03d}_{:03d}/{}_{:03d}_{:03d}_{}.pkl'.format( \
arrayName, arrayName, int(x0), int(y0), arrayName, int(x0), int(y0), \
type_stack)
data = pickle.load(open(filename, 'rb'))
EW_UD = data[6]
NS_UD = data[7]
# Read file containing data from stack_acorr_tremor
filename = 'ac/{}/{}_{:03d}_{:03d}/{}_{:03d}_{:03d}_{}.pkl'.format( \
arrayName, arrayName, int(x0), int(y0), arrayName, int(x0), int(y0), \
type_stack)
data = pickle.load(open(filename, 'rb'))
EW = data[6]
NS = data[7]
UD = data[8]
# Stack over all tremor windows
if (cc_stack == 'lin'):
EWstack = linstack([EW_UD], normalize=False)[0]
NSstack = linstack([NS_UD], normalize=False)[0]
elif (cc_stack == 'pow'):
EWstack = powstack([EW_UD], w, normalize=False)[0]
NSstack = powstack([NS_UD], w, normalize=False)[0]
elif (cc_stack == 'PWS'):
EWstack = PWstack([EW_UD], w, normalize=False)[0]
NSstack = PWstack([NS_UD], w, normalize=False)[0]
else:
raise ValueError( \
'Type of stack must be lin, pow, or PWS')
# Initialize indicators of cross correlation fit
nt = len(EW_UD)
ccmaxEW = np.zeros(nt)
cc0EW = np.zeros(nt)
timedelayEW = np.zeros(nt)
rmsEW = np.zeros(nt)
ccmaxNS = np.zeros(nt)
cc0NS = np.zeros(nt)
timedelayNS = np.zeros(nt)
rmsNS = np.zeros(nt)
# Windows of the cross correlation to look at
i0 = int((len(EWstack) - 1) / 2)
ibegin = i0 + int(tmin / EWstack.stats.delta)
iend = i0 + int(tmax / EWstack.stats.delta) + 1
rmsb = i0 + int(RMSmin / EWstack.stats.delta)
rmse = i0 + int(RMSmax / EWstack.stats.delta) + 1
for i in range(0, nt):
rmsEW[i] = np.max(np.abs(EW_UD[i][ibegin : iend])) / \
np.sqrt(np.mean(np.square(EW_UD[i][rmsb:rmse])))
rmsNS[i] = np.max(np.abs(NS_UD[i][ibegin : iend])) / \
np.sqrt(np.mean(np.square(NS_UD[i][rmsb:rmse])))
# Cross correlate cc for EW with stack
cc_EW = correlate(EW_UD[i][ibegin : iend], EWstack[ibegin : iend], \
ncor)
ccmaxEW[i] = np.max(cc_EW)
cc0EW[i] = cc_EW[ncor]
timedelayEW[i] = (np.argmax(cc_EW) - ncor) * EWstack.stats.delta
# Cross correlate cc for NS with stack
cc_NS = correlate(NS_UD[i][ibegin : iend], NSstack[ibegin : iend], \
ncor)
ccmaxNS[i] = np.max(cc_NS)
cc0NS[i] = cc_NS[ncor]
timedelayNS[i] = (np.argmax(cc_NS) - ncor) * NSstack.stats.delta
# Sort cross correlations
if (type_sort == 'ccmaxEW'):
order = np.argsort(ccmaxEW)
elif (type_sort == 'ccmaxNS'):
order = np.argsort(ccmaxNS)
elif (type_sort == 'cc0EW'):
order = np.argsort(cc0EW)
elif (type_sort == 'cc0NS'):
order = np.argsort(cc0NS)
elif (type_sort == 'timedelayEW'):
order = np.flip(np.argsort(np.abs(timedelayEW)), axis=0)
elif (type_sort == 'timedelayNS'):
order = np.flip(np.argsort(np.abs(timedelayNS)), axis=0)
elif (type_sort == 'rmsEW'):
order = np.argsort(rmsEW)
elif (type_sort == 'rmsNS'):
order = np.argsort(rmsNS)
else:
raise ValueError( \
'Type of ranking must be ccmaxEW, ccmaxNS, cc0EW, cc0NS, ' + \
'timedelayEW, timedelayNS, rmsEW or rmsNS')
# Plot cross correlations
plt.figure(1, figsize=(20, 15))
ax1 = plt.subplot(121)
for i in range(n1, n2):
index = order[nt - i - 1]
dt = EW_UD[index].stats.delta
ncor = int((EW_UD[index].stats.npts - 1) / 2)
t = dt * np.arange(- ncor, ncor + 1)
plt.plot(t, (2.0 * i + 1) - 2 * n1 + amp * EW_UD[index].data, 'k-')
plt.xlim(0, Tmax)
plt.ylim(0.0, 2.0 * (n2 - n1))
plt.title('East / Vertical component', fontsize=24)
plt.xlabel('Lag time (s)', fontsize=24)
plt.ylabel('Cross correlation', fontsize=24)
ax1.set_yticklabels([])
ax1.tick_params(labelsize=20)
ax2 = plt.subplot(122)
for i in range(n1, n2):
index = order[nt - i - 1]
dt = NS_UD[index].stats.delta
ncor = int((NS_UD[index].stats.npts - 1) / 2)
t = dt * np.arange(- ncor, ncor + 1)
plt.plot(t, (2.0 * i + 1) - 2 * n1 + amp * NS_UD[index].data, 'k-')
plt.xlim(0, Tmax)
plt.ylim(0.0, 2.0 * (n2 - n1))
plt.title('North / Vertical component', fontsize=24)
plt.xlabel('Lag time (s)', fontsize=24)
plt.ylabel('Cross correlation', fontsize=24)
ax2.set_yticklabels([])
ax2.tick_params(labelsize=20)
plt.suptitle('{} at {} km, {} km ({} - {}) sorted by {}'.format( \
arrayName, x0, y0, type_stack, cc_stack, type_sort), fontsize=24)
plt.savefig('cc/{}/{}_{:03d}_{:03d}/{}_{:03d}_{:03d}_{}_{}_sort_{}.eps'. \
format(arrayName, arrayName, int(x0), int(y0), arrayName, int(x0), \
int(y0), type_stack, cc_stack, type_sort), format='eps')
ax1.clear()
ax2.clear()
plt.close(1)
# Plot autocorrelations
plt.figure(2, figsize=(30, 15))
ax1 = plt.subplot(131)
for i in range(n1, n2):
index = order[nt - i - 1]
dt = EW[index].stats.delta
ncor = int((EW[index].stats.npts - 1) / 2)
t = dt * np.arange(- ncor, ncor + 1)
plt.plot(t, (2.0 * i + 1) - 2 * n1 + amp * EW[index].data, 'k-')
plt.xlim(0, Tmax)
plt.ylim(0.0, 2.0 * (n2 - n1))
plt.title('East component', fontsize=24)
plt.xlabel('Lag time (s)', fontsize=24)
plt.ylabel('Autocorrelation', fontsize=24)
ax1.set_yticklabels([])
ax1.tick_params(labelsize=20)
ax2 = plt.subplot(132)
for i in range(n1, n2):
index = order[nt - i - 1]
dt = NS[index].stats.delta
ncor = int((NS[index].stats.npts - 1) / 2)
t = dt * np.arange(- ncor, ncor + 1)
plt.plot(t, (2.0 * i + 1) - 2 * n1 + amp * NS[index].data, 'k-')
plt.xlim(0, Tmax)
plt.ylim(0.0, 2.0 * (n2 - n1))
plt.title('North component', fontsize=24)
plt.xlabel('Lag time (s)', fontsize=24)
plt.ylabel('Autocorrelation', fontsize=24)
ax2.set_yticklabels([])
ax2.tick_params(labelsize=20)
ax3 = plt.subplot(133)
for i in range(n1, n2):
index = order[nt - i - 1]
dt = UD[index].stats.delta
ncor = int((UD[index].stats.npts - 1) / 2)
t = dt * np.arange(- ncor, ncor + 1)
plt.plot(t, (2.0 * i + 1) - 2 * n1 + amp * UD[index].data, 'k-')
plt.xlim(0, Tmax)
plt.ylim(0.0, 2.0 * (n2 - n1))
plt.title('Vertical component', fontsize=24)
plt.xlabel('Lag time (s)', fontsize=24)
plt.ylabel('Autocorrelation', fontsize=24)
ax3.set_yticklabels([])
ax3.tick_params(labelsize=20)
plt.suptitle('{} at {} km, {} km ({} - {}) sorted by {}'.format( \
arrayName, x0, y0, type_stack, cc_stack, type_sort), fontsize=24)
plt.savefig('ac/{}/{}_{:03d}_{:03d}/{}_{:03d}_{:03d}_{}_{}_sort_{}.eps'. \
format(arrayName, arrayName, int(x0), int(y0), arrayName, int(x0), \
int(y0), type_stack, cc_stack, type_sort), format='eps')
ax1.clear()
ax2.clear()
ax3.clear()
plt.close(2)
if __name__ == '__main__':
# Set the parameters
arrayName = 'BS'
x0 = 0.0
y0 = 0.0
w = 2.0
Tmax = 15.0
n1 = 0
n2 = 82
ncor = 40
tmin = 4.0
tmax = 6.0
RMSmin = 12.0
RMSmax = 14.0
# Linear stack - Linear stack
type_stack = 'lin'
cc_stack = 'lin'
amp = 10.0
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, 'ccmaxEW', \
Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, 'ccmaxNS', \
Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, 'cc0EW', \
Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, 'cc0NS', \
Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, \
'timedelayEW', Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, \
'timedelayNS', Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, 'rmsEW', \
Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, 'rmsNS', \
Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
# Linear stack - Power stack
type_stack = 'lin'
cc_stack = 'pow'
amp = 10.0
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, 'ccmaxEW', \
Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, 'ccmaxNS', \
Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, 'cc0EW', \
Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, 'cc0NS', \
Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, \
'timedelayEW', Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, \
'timedelayNS', Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, 'rmsEW', \
Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, 'rmsNS', \
Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
# Linear stack - PWS stack
type_stack = 'lin'
cc_stack = 'PWS'
amp = 10.0
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, 'ccmaxEW', \
Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, 'ccmaxNS', \
Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, 'cc0EW', \
Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, 'cc0NS', \
Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, \
'timedelayEW', Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, \
'timedelayNS', Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, 'rmsEW', \
Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, 'rmsNS', \
Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
# Power stack - Linear stack
type_stack = 'pow'
cc_stack = 'lin'
amp = 2.0
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, 'ccmaxEW', \
Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, 'ccmaxNS', \
Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, 'cc0EW', \
Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, 'cc0NS', \
Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, \
'timedelayEW', Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, \
'timedelayNS', Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, 'rmsEW', \
Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, 'rmsNS', \
Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
# Power stack - Power stack
type_stack = 'pow'
cc_stack = 'pow'
amp = 2.0
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, 'ccmaxEW', \
Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, 'ccmaxNS', \
Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, 'cc0EW', \
Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, 'cc0NS', \
Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, \
'timedelayEW', Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, \
'timedelayNS', Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, 'rmsEW', \
Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, 'rmsNS', \
Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
# Power stack - PWS stack
type_stack = 'pow'
cc_stack = 'PWS'
amp = 2.0
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, 'ccmaxEW', \
Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, 'ccmaxNS', \
Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, 'cc0EW', \
Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, 'cc0NS', \
Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, \
'timedelayEW', Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, \
'timedelayNS', Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, 'rmsEW', \
Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, 'rmsNS', \
Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
# PWS stack - Linear stack
type_stack = 'PWS'
cc_stack = 'lin'
amp = 50.0
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, 'ccmaxEW', \
Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, 'ccmaxNS', \
Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, 'cc0EW', \
Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, 'cc0NS', \
Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, \
'timedelayEW', Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, \
'timedelayNS', Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, 'rmsEW', \
Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, 'rmsNS', \
Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
# PWS stack - Power stack
type_stack = 'PWS'
cc_stack = 'pow'
amp = 50.0
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, 'ccmaxEW', \
Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, 'ccmaxNS', \
Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, 'cc0EW', \
Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, 'cc0NS', \
Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, \
'timedelayEW', Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, \
'timedelayNS', Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, 'rmsEW', \
Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, 'rmsNS', \
Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
# PWS stack - PWS stack
type_stack = 'PWS'
cc_stack = 'PWS'
amp = 50.0
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, 'ccmaxEW', \
Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, 'ccmaxNS', \
Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, 'cc0EW', \
Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, 'cc0NS', \
Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, \
'timedelayEW', Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, \
'timedelayNS', Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, 'rmsEW', \
Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
plot_stack_sort(arrayName, x0, y0, type_stack, w, cc_stack, 'rmsNS', \
Tmax, amp, n1, n2, ncor, tmin, tmax, RMSmin, RMSmax)
|
{"hexsha": "b53bb362d4c66208336225967578237471093a47", "size": 20259, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/plot_stack_sort.py", "max_stars_repo_name": "ArianeDucellier/timelags", "max_stars_repo_head_hexsha": "383c5702ad25405555d934c984ac8245722f8596", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-18T00:29:23.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-18T00:29:23.000Z", "max_issues_repo_path": "src/plot_stack_sort.py", "max_issues_repo_name": "ArianeDucellier/timelags", "max_issues_repo_head_hexsha": "383c5702ad25405555d934c984ac8245722f8596", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/plot_stack_sort.py", "max_forks_repo_name": "ArianeDucellier/timelags", "max_forks_repo_head_hexsha": "383c5702ad25405555d934c984ac8245722f8596", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.7875288684, "max_line_length": 78, "alphanum_fraction": 0.6140974382, "include": true, "reason": "import numpy", "num_tokens": 7078}
|
from manimlib.imports import *
import numpy as np
from manim_reveal import SlideScene
# import manimtda
from manimtda.linalg import *
class LLCombine(SlideScene):
CONFIG={
"camera_config":{"background_color":"#F0F1EB"},
"video_slides_dir":"../video_slides"
}
def construct(self):
title = TexMobject(r"L L = L", color=BLACK).shift(2*UP)
self.play(
Write(title),
)
self.slide_break()
L1 = Lmat().shift(1.125*RIGHT)
L2 = Lmat().shift(1.125*LEFT)
self.play(
ShowCreation(L1),
ShowCreation(L2)
)
self.slide_break()
self.play(
ApplyMethod(L1.shift, 1.125*LEFT),
ApplyMethod(L2.shift, 1.125*RIGHT)
)
|
{"hexsha": "b7553cf781954b615d1ec3aa3de6cd26dd9a979e", "size": 774, "ext": "py", "lang": "Python", "max_stars_repo_path": "animations/LL_combine.py", "max_stars_repo_name": "bnels/cse21", "max_stars_repo_head_hexsha": "daad575743bfa7c025c507b3a18a3dff445ca385", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-04-08T17:00:32.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-08T17:00:32.000Z", "max_issues_repo_path": "animations/LL_combine.py", "max_issues_repo_name": "bnels/cse21", "max_issues_repo_head_hexsha": "daad575743bfa7c025c507b3a18a3dff445ca385", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "animations/LL_combine.py", "max_forks_repo_name": "bnels/cse21", "max_forks_repo_head_hexsha": "daad575743bfa7c025c507b3a18a3dff445ca385", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.4545454545, "max_line_length": 63, "alphanum_fraction": 0.5684754522, "include": true, "reason": "import numpy", "num_tokens": 205}
|
'''
Copyright 2016 Jihun Hamm
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License
'''
import scipy.io
import numpy as np
from filterAlg_NN import NN1
from learningAlg import mlogreg
import minimaxFilter
##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
mat = scipy.io.loadmat('genki.mat')
#nsubjs = np.asscalar(mat['nsubjs'])
K1 = np.asscalar(mat['K1'])
K2 = np.asscalar(mat['K2'])
D = np.asscalar(mat['D'])
Ntrain = np.asscalar(mat['Ntrain'])
Ntest = np.asscalar(mat['Ntest'])
N = Ntrain + Ntest
y1_train = mat['y1_train']-1
y1_test = mat['y1_test']-1
y1 = np.hstack((y1_train,y1_test))
del y1_train, y1_test
y2_train = mat['y2_train']-1
y2_test = mat['y2_test']-1
y2 = np.hstack((y2_train,y2_test))
del y2_train, y2_test
Xtrain = mat['Xtrain']
Xtest = mat['Xtest']
X = np.hstack((Xtrain,Xtest))
del Xtrain, Xtest
ind_train_dom1 = [[range(Ntrain)]]
ind_test_dom1 = [[range(Ntrain,Ntrain+Ntest)]]
##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
ntrials = 1
ds = [10]#[10,20,50,100]
lambda0 = 1E-6
lambda1 = 1E-6
lambda2 = 1E-6
maxiter_minimax = 100
maxiter_final = 50
rho = 10.
##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
##%% Minimax + NN1
rates1_minimax2 = np.nan*np.ones((len(ds),ntrials))
rates2_minimax2 = np.nan*np.ones((len(ds),ntrials))
W0_minimax2 = [[[] for i in range(ntrials)] for j in range(len(ds))]
for trial in range(ntrials):
for j in range(len(ds)):
d = ds[j]
nhs = [20,d] #
hparams0 = {'D':D, 'nhs':nhs, 'activation':'sigmoid', 'l':lambda0}
hparams1 = {'K':K1, 'l':lambda1, 'd':d}
hparams2 = {'K':K2,'l':lambda2, 'd':d}
if False:
W0 = NN1.init(hparams0)
else:
print 'Pre-training by autoencoder'
W0 = NN1.initByAutoencoder(X[:,ind_train_dom1[trial][0]].squeeze(),hparams0)
W1 = mlogreg.init(hparams1)
W2 = mlogreg.init(hparams2)
for iter in range(maxiter_minimax):
if True:#iter==maxiter_minimax-1:
G_train = NN1.g(W0,X[:,ind_train_dom1[trial][0]].squeeze(),hparams0)
#% Full training
tW1,f1 = mlogreg.train(G_train,y1[:,ind_train_dom1[trial][0]].squeeze(),hparams1,None,maxiter_final)
tW2,f2 = mlogreg.train(G_train,y2[:,ind_train_dom1[trial][0]].squeeze(),hparams2,None,maxiter_final)
#% Testing error
G_test = NN1.g(W0,X[:,ind_test_dom1[trial][0]].squeeze(),hparams0)
rate1,_ = mlogreg.accuracy(tW1,G_test,y1[:,ind_test_dom1[trial][0]].squeeze())
rate2,_ = mlogreg.accuracy(tW2,G_test,y2[:,ind_test_dom1[trial][0]].squeeze())
print 'minimax (NN): rho=%f, d=%d, trial=%d, rate1=%f, rate2=%f\n' % \
(rho,d,trial,rate1,rate2)
rates1_minimax2[j,trial] = rate1
rates2_minimax2[j,trial] = rate2
W0_minimax2[j][trial] = W0
W0,W1,W2 = minimaxFilter.run(W0,W1,W2,rho,'alt',1,\
X[:,ind_train_dom1[trial][0]].squeeze(), \
y1[:,ind_train_dom1[trial][0]].squeeze(),\
y2[:,ind_train_dom1[trial][0]].squeeze(),\
NN1,mlogreg,mlogreg,\
hparams0,hparams1,hparams2)
'''
>>> rates1_minimax2
array([[ 0.89 ],
[ 0.88 ],
[ 0.895],
[ 0.885]])
>>> rates2_minimax2
array([[ 0.6 ],
[ 0.565],
[ 0.64 ],
[ 0.64 ]])
'''
np.savez('test_NN_genki.npz',\
W0_minimax2=[W0_minimax2], \
rates1_minimax2=[rates1_minimax2],\
rates2_minimax2=[rates2_minimax2]\
)
|
{"hexsha": "52d0dff0c86a12650225e88f285c7b6939bf8ebc", "size": 4255, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/test_NN_genki.py", "max_stars_repo_name": "jihunhamm/MinimaxFilter", "max_stars_repo_head_hexsha": "fa9ee7aa126cbf651c4c9cbf076e4ba848fcfc46", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2017-05-25T20:14:26.000Z", "max_stars_repo_stars_event_max_datetime": "2019-07-08T12:20:17.000Z", "max_issues_repo_path": "test/test_NN_genki.py", "max_issues_repo_name": "jihunhamm/MinimaxFilter", "max_issues_repo_head_hexsha": "fa9ee7aa126cbf651c4c9cbf076e4ba848fcfc46", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/test_NN_genki.py", "max_forks_repo_name": "jihunhamm/MinimaxFilter", "max_forks_repo_head_hexsha": "fa9ee7aa126cbf651c4c9cbf076e4ba848fcfc46", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2016-12-29T16:57:20.000Z", "max_forks_repo_forks_event_max_datetime": "2020-01-19T00:59:23.000Z", "avg_line_length": 30.1773049645, "max_line_length": 116, "alphanum_fraction": 0.5635722679, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1262}
|
import pandas as pd
import numpy as np
from sklearn.metrics.pairwise import nan_euclidean_distances
from statsmodels.distributions.empirical_distribution import ECDF
from matplotlib import pyplot as plt
import json
SAMPLES_FNAME = './data/samples_pathlens_100000.csv'
SIMILARITY_MATRIX_FNAME = './data/similarity_matrix_distances_pathlens_100k.csv'
RIPE_RIS_PEERS_FNAME = 'list_of_RIPE_RIS_peers.json'
PATHLEN_DISTANCES_SUM_FNAME = './data/pathlen_distances_sum.json'
PATHLEN_DISTANCES_AVG_FNAME = './data/pathlen_distances_avg.json'
PATHLEN_DISTANCES_MIN_FNAME = './data/pathlen_distances_min.json'
PATHLEN_DISTANCES_MAX_FNAME = './data/pathlen_distances_max.json'
PATHLEN_DISTANCES_SUM_5MAX_FNAME = './data/pathlen_distances_sum_5max.json'
PATHLEN_DISTANCES_SUM_10MAX_FNAME = './data/pathlen_distances_sum_10max.json'
PATHLEN_DISTANCES_SUM_5MIN_FNAME = './data/pathlen_distances_sum_5min.json'
PATHLEN_DISTANCES_SUM_10MIN_FNAME = './data/pathlen_distances_sum_10min.json'
PATHLEN_DISTANCES_SUM_5MAX_FNAME = './data/pathlen_distances_sum_5max.json'
PATHLEN_DISTANCES_SUM_10MAX_FNAME = './data/pathlen_distances_sum_10max.json'
PATHLEN_DISTANCES_SUM_5MIN_FNAME = './data/pathlen_distances_sum_5min.json'
PATHLEN_DISTANCES_SUM_10MIN_FNAME = './data/pathlen_distances_sum_10min.json'
THRESHOLD_MIN_SAMPLES = 200
normalized_nan_euclidean_distances = lambda x : nan_euclidean_distances(x)/x.shape[1]
adapted_normalized_nan_euclidean_distances = lambda x,max_size : nan_euclidean_distances(x)/ max_size *np.sqrt(max_size/x.shape[1])
print('Reading csv file with pathlens...')
df = pd.read_csv(SAMPLES_FNAME, delimiter=',')
print('Basic data handling ...')
df = df.replace(-1,np.nan)
df = df.dropna(axis=0,how='all')
df = df.dropna(axis=1,how='all')
# df1 = df.iloc[0:3,0:3]
# # df1 = df1.to_numpy().transpose()
# df2 = df.iloc[0:2,0:3]
# # df2 = df2.to_numpy().transpose()
# print(df1)
# print(df1.shape)
print('Calculating transpose ...')
a = df.to_numpy().transpose()
# avg_distance = [1]
# max_distance = [1]
# previous_distance_matrix = np.ones((a.shape[0],a.shape[0]))
# for i in range(1,a.shape[1]):
# distance_matrix = normalized_nan_euclidean_distances(a[:,0:i])
# diff = np.abs(distance_matrix-previous_distance_matrix)
# avg_distance.append(np.nanmean(diff))
# max_distance.append(np.nanmax(diff))
# previous_distance_matrix = distance_matrix.copy()
# np.fill_diagonal(distance_matrix,np.nan)
# # print(distance_matrix)
# perfect_sim = np.where(distance_matrix==0)
# print(len(perfect_sim[0]))
# plt.plot(range(a.shape[1]),max_distance,range(a.shape[1]),avg_distance)
# plt.yscale('log')
# plt.show()
print('Calculating Euclidean distances...')
distance_matrix = normalized_nan_euclidean_distances(a)
np.fill_diagonal(distance_matrix,np.nan)
print('Creating the distance matrix...')
df[~df.isna()] = 1
df[df.isna()] = 0
u = df.to_numpy()
common_samples = np.matmul(u.transpose(),u)
distance_matrix[common_samples<THRESHOLD_MIN_SAMPLES] = np.nan
print('Saving the distance matrix...')
pd.DataFrame(distance_matrix, columns=df.columns).to_csv(SIMILARITY_MATRIX_FNAME, index=False)
print('Calculating distance metrics...')
with open(RIPE_RIS_PEERS_FNAME, 'r') as f:
ripe_ris_peers = json.load(f)
sum_distances = np.nansum(distance_matrix, axis=0)
sum_distances[np.isnan(distance_matrix).all(axis=0)] = np.nan
avg_distances = np.nanmean(distance_matrix, axis=0)
min_distances = np.nanmin(distance_matrix, axis=0)
max_distances = np.nanmax(distance_matrix, axis=0)
total_distance = {df.columns[i]: sum_distances[i] for i in range(sum_distances.shape[0])}
avg_distance = {df.columns[i]: avg_distances[i] for i in range(avg_distances.shape[0])}
min_distance = {df.columns[i]: min_distances[i] for i in range(min_distances.shape[0])}
max_distance = {df.columns[i]: max_distances[i] for i in range(max_distances.shape[0])}
nan_nansum = lambda x: np.nan if np.isnan(x).all() else np.nansum(x)
sum_5max_distances = {df.columns[i]:nan_nansum(np.sort(distance_matrix[i,:])[0:5]) for i in range(sum_distances.shape[0])}
sum_10max_distances = {df.columns[i]:nan_nansum(np.sort(distance_matrix[i,:])[0:10]) for i in range(sum_distances.shape[0])}
sum_5min_distances = {df.columns[i]:nan_nansum(np.sort(distance_matrix[i,~np.isnan(distance_matrix[i,:])])[::-1][0:5]) for i in range(sum_distances.shape[0])}
sum_10min_distances = {df.columns[i]:nan_nansum(np.sort(distance_matrix[i,~np.isnan(distance_matrix[i,:])])[::-1][0:10]) for i in range(sum_distances.shape[0])}
with open(PATHLEN_DISTANCES_SUM_FNAME,'w') as f:
json.dump(total_distance, f)
with open(PATHLEN_DISTANCES_AVG_FNAME,'w') as f:
json.dump(avg_distance, f)
with open(PATHLEN_DISTANCES_MIN_FNAME,'w') as f:
json.dump(min_distance, f)
with open(PATHLEN_DISTANCES_MAX_FNAME,'w') as f:
json.dump(max_distance, f)
with open(PATHLEN_DISTANCES_SUM_5MAX_FNAME,'w') as f:
json.dump(sum_5max_distances, f)
with open(PATHLEN_DISTANCES_SUM_10MAX_FNAME,'w') as f:
json.dump(sum_10max_distances, f)
with open(PATHLEN_DISTANCES_SUM_5MIN_FNAME,'w') as f:
json.dump(sum_5min_distances, f)
with open(PATHLEN_DISTANCES_SUM_10MIN_FNAME,'w') as f:
json.dump(sum_10min_distances, f)
# list_of_perfect_sims = []
# for i in range(len(perfect_sim[0])):
# ind1 = perfect_sim[0][i]
# ind2 = perfect_sim[1][i]
# if ind1 < ind2:
# peer_ip1 = df.columns[ind1]
# peer_ip2 = df.columns[ind2]
# peer_asn1 = ripe_ris_peers[peer_ip1]
# peer_asn2 = ripe_ris_peers[peer_ip2]
# if (peer_asn1 != peer_asn2) and (sum(df.iloc[:,ind1].notna() & df.iloc[:,ind2].notna())>10):
# print([ind1, ind2, peer_ip1, peer_ip2, peer_asn1, peer_asn2, sum(df.iloc[:,ind1].notna() & df.iloc[:,ind2].notna())])
|
{"hexsha": "15a4937b744b381456bed3143e7c4122d36eff85", "size": 5772, "ext": "py", "lang": "Python", "max_stars_repo_path": "TEMP_pavlos/calculate_similarity_from_pathlens.py", "max_stars_repo_name": "cgeorgitsis/ai4netmon", "max_stars_repo_head_hexsha": "36c4c1695fd980705d3e3f76385cda14baf7f397", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "TEMP_pavlos/calculate_similarity_from_pathlens.py", "max_issues_repo_name": "cgeorgitsis/ai4netmon", "max_issues_repo_head_hexsha": "36c4c1695fd980705d3e3f76385cda14baf7f397", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "TEMP_pavlos/calculate_similarity_from_pathlens.py", "max_forks_repo_name": "cgeorgitsis/ai4netmon", "max_forks_repo_head_hexsha": "36c4c1695fd980705d3e3f76385cda14baf7f397", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.2285714286, "max_line_length": 160, "alphanum_fraction": 0.7524255024, "include": true, "reason": "import numpy,from statsmodels", "num_tokens": 1648}
|
function qq = ppdiff(pp,j)
%PPDIFF Differentiate piecewise polynomial.
% QQ = PPDIFF(PP,J) returns the J:th derivative of a piecewise
% polynomial PP. PP must be on the form evaluated by PPVAL. QQ is a
% piecewise polynomial on the same form. Default value for J is 1.
%
% Example:
% x = linspace(-pi,pi,9);
% y = sin(x);
% pp = spline(x,y);
% qq = ppdiff(pp);
% xx = linspace(-pi,pi,201);
% plot(xx,cos(xx),'b',xx,ppval(qq,xx),'r')
%
% See also PPVAL, SPLINE, SPLINEFIT, PPINT
% Author: Jonas Lundgren <splinefit@gmail.com> 2009
if nargin < 1, help ppdiff, return, end
if nargin < 2, j = 1; end
% Check diff order
if ~isreal(j) || mod(j,1) || j < 0
msgid = 'PPDIFF:DiffOrder';
message = 'Order of derivative must be a non-negative integer!';
error(msgid,message)
end
% Get coefficients
coefs = pp.coefs;
[m n] = size(coefs);
if j == 0
% Do nothing
elseif j < n
% Derivative of order J
D = [n-j:-1:1; ones(j-1,n-j)];
D = cumsum(D,1);
D = prod(D,1);
coefs = coefs(:,1:n-j);
for k = 1:n-j
coefs(:,k) = D(k)*coefs(:,k);
end
else
% Derivative kills PP
coefs = zeros(m,1);
end
% Set output
qq = pp;
qq.coefs = coefs;
qq.order = size(coefs,2);
|
{"author": "Sable", "repo": "mcbench-benchmarks", "sha": "ba13b2f0296ef49491b95e3f984c7c41fccdb6d8", "save_path": "github-repos/MATLAB/Sable-mcbench-benchmarks", "path": "github-repos/MATLAB/Sable-mcbench-benchmarks/mcbench-benchmarks-ba13b2f0296ef49491b95e3f984c7c41fccdb6d8/13812-splinefit/ppdiff.m"}
|
import numpy as np
from console_progressbar import ProgressBar
from aspire.aspire.common.config import AspireConfig
from aspire.aspire.common.exceptions import ErrorTooBig, WrongInput, DimensionsIncompatible
from aspire.aspire.common.logger import logger
from aspire.aspire.utils.data_utils import load_stack_from_file
from aspire.aspire.utils.helpers import accepts
@accepts(np.ndarray, np.ndarray, int, float)
def compare_stacks(stack1, stack2, verbose=None, max_error=None):
""" Calculate the difference between two projection-stacks.
Return the relative error between them.
:param stack1: first stack to compare
:param stack2: second stack to compare
:param verbose: level of verbosity
verbose=0 silent
verbose=1 show progress bar
verbose=2 print progress every 1000 images
verbose=3 print message for each processed image
:param max_error: when given, raise an exception if difference between stacks is too big
:return: returns the accumulative error between the two stacks
"""
if max_error is not None:
try:
max_error = np.longdouble(max_error)
except (TypeError, ValueError):
raise WrongInput("max_error must be either a float or an integer!")
if verbose is None:
verbose = AspireConfig.verbosity
# check the dimensions of the stack are compatible
if stack1.shape != stack2.shape:
raise DimensionsIncompatible("Can't compare stacks of different sizes!"
f" {stack1.shape} != {stack2.shape}")
num_of_images = stack1.shape[0]
if num_of_images == 0:
logger.warning('stacks are empty!')
if verbose == 1:
pb = ProgressBar(total=100, prefix='comparing:', suffix='completed', decimals=0, length=100,
fill='%')
relative_err = 0
accumulated_err = 0
for i in range(num_of_images):
err = np.linalg.norm(stack1[i] - stack2[i])/np.linalg.norm(stack1[i])
accumulated_err += err
relative_err = accumulated_err / (i+1)
# if we already reached a relatively big error, we can stop here
# we can't ask "if max_error" as max_error is so small and treated as 0 (False)
if max_error is not None and relative_err > max_error:
raise ErrorTooBig('Stacks comparison failed! error is too big: {}'.format(relative_err))
if verbose == 0:
continue
elif verbose == 1:
pb.print_progress_bar((i + 1) / num_of_images * 100)
elif verbose == 2 and (i+1) % 100 == 0:
logger.info(f'Finished comparing {i+1}/{num_of_images} projections. '
f'Relative error so far: {relative_err}')
elif verbose == 3:
logger.info(f'Difference between projections ({i+1}) <> ({i+1}): {err}')
if verbose == 2:
logger.info(f'Finished comparing {num_of_images}/{num_of_images} projections. '
f'Relative error: {relative_err}')
return relative_err
def compare_stack_files(file1, file2, verbose=None, max_error=None):
""" Wrapper for func compare_stacks. """
stack1 = load_stack_from_file(file1)
stack2 = load_stack_from_file(file2)
return compare_stacks(stack1, stack2, verbose=verbose, max_error=max_error)
|
{"hexsha": "b9910b2bd919d4095b61e44f0e3f405db380af50", "size": 3390, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/aspire/aspire/utils/compare_stacks.py", "max_stars_repo_name": "janden/ASPIRE-Python", "max_stars_repo_head_hexsha": "5bcf831881fd0e42630c3b99671c5ed08de260ea", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/aspire/aspire/utils/compare_stacks.py", "max_issues_repo_name": "janden/ASPIRE-Python", "max_issues_repo_head_hexsha": "5bcf831881fd0e42630c3b99671c5ed08de260ea", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/aspire/aspire/utils/compare_stacks.py", "max_forks_repo_name": "janden/ASPIRE-Python", "max_forks_repo_head_hexsha": "5bcf831881fd0e42630c3b99671c5ed08de260ea", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.0898876404, "max_line_length": 100, "alphanum_fraction": 0.6539823009, "include": true, "reason": "import numpy", "num_tokens": 770}
|
# coding: utf-8
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import numpy as np
class DataAugmenter(object):
def __init__(self):
super().__init__()
def augment(self, X, y, sample_weight=None):
raise NotImplementedError
def __call__(self, X, y, sample_weight=None):
return self.augment(X, y, sample_weight=None)
def sample_z(self, size):
raise NotImplementedError
class NormalDataAugmenter(DataAugmenter):
def __init__(self, skewing_function, width=1, center=0, n_augment=2):
super().__init__()
self.skewing_function = skewing_function
self.width = width
self.center = center
self.n_augment = n_augment
def augment(self, X, y, sample_weight=None):
z_list = [self.sample_z( size=X.shape[0] ) for _ in range(self.n_augment)]
X_aug = np.concatenate( [X, ] + [ self.skewing_function(X, z) for z in z_list ], axis=0)
y_aug = np.concatenate( [y, ] + [y for _ in range(self.n_augment) ], axis=0)
z_aug = np.concatenate( [np.zeros(X.shape[0]) + self.center, ] + z_list)
w_aug = None
if sample_weight is not None:
w_aug = np.concatenate( [sample_weight, ] + [sample_weight for _ in range(self.n_augment) ], axis=0)
return X_aug, y_aug, w_aug, z_aug
def sample_z(self, size):
z = np.random.normal( loc=self.center, scale=self.width, size=size )
return z
class NormalDataPerturbator(object):
def __init__(self, skewing_function, width=1, center=0):
super().__init__()
self.skewing_function = skewing_function
self.width = width
self.center = center
def perturb(self, X):
z = self.sample_z(size=X.shape[0])
X = self.skewing_function(X, z)
return X, z
def __call__(self, X, y, sample_weight=None):
return self.perturb(X)
def sample_z(self, size):
z = np.random.normal( loc=self.center, scale=self.width, size=size )
return z
|
{"hexsha": "109f933c432f8b8d8092e740ead9385201981651", "size": 2110, "ext": "py", "lang": "Python", "max_stars_repo_path": "old_models/data_augment.py", "max_stars_repo_name": "victor-estrade/SystGradDescent", "max_stars_repo_head_hexsha": "822e7094290301ec47a99433381a8d6406798aff", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-03-20T09:05:02.000Z", "max_stars_repo_stars_event_max_datetime": "2019-03-20T15:23:44.000Z", "max_issues_repo_path": "old_models/data_augment.py", "max_issues_repo_name": "victor-estrade/SystGradDescent", "max_issues_repo_head_hexsha": "822e7094290301ec47a99433381a8d6406798aff", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "old_models/data_augment.py", "max_forks_repo_name": "victor-estrade/SystGradDescent", "max_forks_repo_head_hexsha": "822e7094290301ec47a99433381a8d6406798aff", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.4615384615, "max_line_length": 112, "alphanum_fraction": 0.6511848341, "include": true, "reason": "import numpy", "num_tokens": 547}
|
# Create the ffunctions necessary for our analysis
# import necessary libraries
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# create max_min function to get details of maximum and minimum of the given column
def max_min(df, col):
'''
args: df: dataframe
col: column name
return a dataframe with two columns
'''
# get indices of max and min values
max_idx = df[col].idxmax()
min_idx = df[col].idxmin()
# get the rows where max and min values are present
max_row = pd.DataFrame(df.loc[max_idx])
min_row = pd.DataFrame(df.loc[min_idx])
max_row.columns = max_row.loc['original_title']
min_row.columns = min_row.loc['original_title']
return pd.concat([max_row, min_row], axis=1)
# Create function to make sns plot with top 10 values of the given column
def sns_plot(df, col, title, xlabel, color):
'''
args: df: dataframe
col: column name,
title,
xlabel,
color
return: A seaborn plot
'''
info = pd.DataFrame(df.sort_values(by=col, ascending=False))
x = list(info[col])[:10]
y = list(info.original_title)[:10]
plot_ax = sns.pointplot(x=x, y=y, color=color)
plot_ax.set_title(title, fontsize=16)
plot_ax.set_xlabel(xlabel, fontsize=14)
sns.set(rc={'figure.figsize':(10,5)})
sns.set_style('whitegrid')
plt.setp(plot_ax.collections, alpha=0.8)
plt.setp(plot_ax.lines, alpha=0.6)
# create function to create line plot to see the trend between two variables.
def cmp_line(df, var1, var2, xticks, title, xlabel, ylabel, color):
'''
args:
df: dataframe
var1: column name to create a groupby object
var2: column name to compare with the first column
xticks: list of xticks
title: title of the plot
xlabel: label of X axis of the plot
ylabel: label of Y axis of the plot
color: color of the line
return: A line plot
'''
df.groupby(var1)[var2].mean().plot(color=color, xticks=xticks)
sns.set(rc={'figure.figsize':(10,5)})
sns.set_style('whitegrid')
plt.title(title, fontsize=16)
plt.xlabel(xlabel, fontsize=14)
plt.ylabel(ylabel, fontsize=14)
# Create function to devide the numerical columns into some levels and make bar plot to analyse relationship between two variables.
def cmp_bar(df, var1, var2, levels, title, xlabel, ylabel):
'''
args:
df: dataframe
var1: column name whose values have to be leveled and create a new column with that
var2: column name to compare with the newly created column
title: title of the plot
xlabel: label of X axis of the plot
ylabel: label of Y axis of the plot
return: A bar plot
'''
df[var1+'_level'] = pd.cut(df[var1], levels)
df.groupby(var1+'_level')[var2].mean().plot(kind='bar', alpha=.9)
plt.title(title, fontsize=16, fontweight='bold')
plt.xlabel(xlabel, fontsize=14)
plt.ylabel(ylabel, fontsize=14)
# Create function to make scatter plot to analyse the trend between two variables.
def cmp_scatter(df, var1, var2, title, xlabel, ylabel, color):
'''
args:
df: dataframe
var1: column name whose values will be on X-axis
var2: column name whose values will be on Y-axis
title: title of the plot
xlabel: label of X axis of the plot
ylabel: label of Y axis of the plot
color: color of the points
return: A scatter plot
'''
plot_ax = sns.regplot(x=df[var1], y=df[var2], color=color)
sns.set(rc={'figure.figsize':(10,5)})
sns.set_style('whitegrid')
plt.title(title, fontsize=16)
plt.xlabel(xlabel, fontsize=14)
plt.ylabel(ylabel, fontsize=14)
plt.setp(plot_ax.collections, alpha=.7)
print('Correlation Coefficient is: ', df.corr().loc[var1, var2].round(10))
# Here create a function to count indivisual value in a column where values are combined and seperated by '|' in the
# cells of the dataframe. This function take column_name as argument and return a pnadas Series.
def count_category(df, col):
'''
args:
df: dataframe
col: column name whose values has to be counted
return: A pnadas series with the counts of all unique values
'''
total_values = df[col].str.cat(sep='|')
values_list = pd.Series(total_values.split(sep='|'))
return values_list.value_counts(ascending=False)
# create a function to count the values using count_category() and make bar plot with that count
def category_plot(df, col, limit, plot_kind, text, title, xlabel, ylabel):
'''
args: df: dataframe
col: column name whose alues have to be counted
limit: Number of unique values to be in plot. -1 if all
plot_kind: kind of the plot vertical or horizontal
title: title of the plot
xlabel: label of X axis of the plot
ylabel: label of Y axis of the plot
return: A bar plot
'''
total_count = count_category(df, col)
print(text.format(total_count.shape[0]), '\n') # print number of unique values
print(total_count.iloc[:3]) # Print top 3 counts
total_count.iloc[:limit].plot(kind=plot_kind, fontsize=13, figsize=(14,7), alpha=.9)
sns.set_style('whitegrid')
plt.title(title, fontsize=16)
plt.xlabel(xlabel, fontsize=14)
plt.ylabel(ylabel, fontsize=14)
# create a function to visualize categorical variable over year
def category_over_yr(df, title, xlabel, ylabel):
'''
args: df: Dataframe used to create visualization
title: title of the plot
xlabel: xlabel of the plot
ylabel: ylabel of the plot
return: A bar diagram
'''
df.plot(kind='bar', figsize=(15,7), fontsize=14);
# set style and label the plot
sns.set_style('whitegrid');
plt.title(title, fontsize=16, fontweight='bold');
plt.xlabel(xlabel, fontsize=14);
plt.ylabel(ylabel, fontsize=14);
# create a 'standerize' function which takes a dataframe as an argument and return that dataframe with standerize values.
def standerize(df):
'''
args: df: Dataframe
return: Dataframe with standerize values
'''
return (df - df.mean()) / df.std()
|
{"hexsha": "268c9eb3310f6129e0cf6e3718e2a88d4f7b4519", "size": 6403, "ext": "py", "lang": "Python", "max_stars_repo_path": "pkg/analysis_fn.py", "max_stars_repo_name": "codeslash21/TMDB_data_analysis", "max_stars_repo_head_hexsha": "116bbd4ab7d431653e39c1188ce9e437133d4396", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pkg/analysis_fn.py", "max_issues_repo_name": "codeslash21/TMDB_data_analysis", "max_issues_repo_head_hexsha": "116bbd4ab7d431653e39c1188ce9e437133d4396", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pkg/analysis_fn.py", "max_forks_repo_name": "codeslash21/TMDB_data_analysis", "max_forks_repo_head_hexsha": "116bbd4ab7d431653e39c1188ce9e437133d4396", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.5025380711, "max_line_length": 131, "alphanum_fraction": 0.6509448696, "include": true, "reason": "import numpy", "num_tokens": 1593}
|
import random
import numpy as np
from affordable.affordable import Affordable
class AbstractGame(Affordable):
def __init__(self, ctx, name):
super().__init__(ctx, name)
self.ctx = ctx
self.affordables = []
self.actions_list = []
self.states_list = []
self.policy = None
self.ctx['game'] = self
self.ctx['embedded'] = self.embedded()
self.step_handlers = []
for sub in self.all_affordables():
self.add_affordable(sub)
def all_affordables(self):
return ()
def add_affordable(self, affordable):
for sub in affordable.subaffordables():
self.affordables.append(sub)
self.affordables.append(affordable)
self.ctx['game'].add_step_handler(affordable)
affordable.add_change_handler(self.ctx['game'])
for sub in affordable.subaffordables():
self.ctx['game'].add_step_handler(sub)
sub.add_change_handler(self.ctx['outer'])
import itertools, collections
fields = [a.name() for a in self.affordables]
namedtupleClass = collections.namedtuple('Action', fields)
self.actions_list = [namedtupleClass._make(actions)
for actions in itertools.product(*[a.available_actions() for a in self.affordables])]
globals()[namedtupleClass.__name__] = namedtupleClass
holders = self.affordables
fields = [a.name() for a in holders]
namedtupleClass = collections.namedtuple('State', fields)
self.states_list = [namedtupleClass._make(states)
for states in itertools.product(*[h.available_states() for h in holders])]
globals()[namedtupleClass.__name__] = namedtupleClass
def add_step_handler(self, handler):
if handler is not self and handler not in self.step_handlers:
self.step_handlers.append(handler)
def fire_step_event(self, **pwargs):
for h in self.step_handlers:
h.on_stepped(self, **pwargs)
def action_space(self):
return self.actions_list
def state_space(self):
return self.states_list
def action(self):
import collections
fields = [a.name() for a in self.affordables]
namedtupleClass = collections.namedtuple('Action', fields)
a = namedtupleClass._make([a.action() for a in self.affordables])
globals()[namedtupleClass.__name__] = namedtupleClass
return a
def state(self):
import collections
holders = self.affordables
fields = [a.name() for a in holders]
namedtupleClass = collections.namedtuple('State', fields)
s = namedtupleClass._make([a.state() for a in holders])
globals()[namedtupleClass.__name__] = namedtupleClass
return s
def embedded(self):
return np.zeros((16, 16))
def apply_effect(self):
pass
def act(self, observation, reward, done):
self.apply_effect()
if self.policy is None:
action = random.sample(self.action_space(), 1)
for a in self.affordables:
a.act(action)
return action
else:
action = self.policy(observation, reward, done)
for a in self.affordables:
a.act(action)
return action
def reward(self):
return 0.0
def reset(self):
for a in self.affordables:
a.reset()
def exit_condition(self):
return False
def force_condition(self):
return random.random() < 0.005
|
{"hexsha": "7602d009524c0f63925af85ca2bbbe7e0d9dc2a6", "size": 3621, "ext": "py", "lang": "Python", "max_stars_repo_path": "affordable/game.py", "max_stars_repo_name": "mountain/affordable", "max_stars_repo_head_hexsha": "31834c866ddce255d4b9a1ca28973e3eae2bf939", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "affordable/game.py", "max_issues_repo_name": "mountain/affordable", "max_issues_repo_head_hexsha": "31834c866ddce255d4b9a1ca28973e3eae2bf939", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "affordable/game.py", "max_forks_repo_name": "mountain/affordable", "max_forks_repo_head_hexsha": "31834c866ddce255d4b9a1ca28973e3eae2bf939", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.9256198347, "max_line_length": 114, "alphanum_fraction": 0.6164043082, "include": true, "reason": "import numpy", "num_tokens": 788}
|
from supreme._build import CExtension
import os.path
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration, get_numpy_include_dirs
config = Configuration('fast', parent_package, top_path)
c_files = [f for f in os.listdir(config.local_path) if f.endswith('.c')]
config.ext_modules.append(CExtension('libfast_',
c_files,
path=config.local_path))
config.add_data_dir('tests')
return config
|
{"hexsha": "9f3f14eb5cbf9761d3f5ebe21773f5ed6b66ee57", "size": 549, "ext": "py", "lang": "Python", "max_stars_repo_path": "supreme/lib/fast/setup.py", "max_stars_repo_name": "KirillDZR/supreme", "max_stars_repo_head_hexsha": "c296722599363bd0cbcce6877bd9de9b066cb74b", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 95, "max_stars_repo_stars_event_min_datetime": "2015-01-17T09:48:20.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-07T16:02:38.000Z", "max_issues_repo_path": "supreme/lib/fast/setup.py", "max_issues_repo_name": "KirillDZR/supreme", "max_issues_repo_head_hexsha": "c296722599363bd0cbcce6877bd9de9b066cb74b", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2015-10-23T15:13:34.000Z", "max_issues_repo_issues_event_max_datetime": "2019-09-23T22:47:10.000Z", "max_forks_repo_path": "supreme/lib/fast/setup.py", "max_forks_repo_name": "KirillDZR/supreme", "max_forks_repo_head_hexsha": "c296722599363bd0cbcce6877bd9de9b066cb74b", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 34, "max_forks_repo_forks_event_min_datetime": "2015-02-22T20:54:40.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-27T13:39:32.000Z", "avg_line_length": 32.2941176471, "max_line_length": 79, "alphanum_fraction": 0.6448087432, "include": true, "reason": "from numpy", "num_tokens": 104}
|
import tactic.choose
/- choice -/
example (h : ∀n m : ℕ, n < m → ∃i j, m = n + i ∨ m + j = n) : true :=
begin
choose i j h using h,
guard_hyp i : ∀n m : ℕ, n < m → ℕ,
guard_hyp j : ∀n m : ℕ, n < m → ℕ,
guard_hyp h : ∀ (n m : ℕ) (h : n < m), m = n + i n m h ∨ m + j n m h = n,
trivial
end
example (h : ∀n m : ℕ, n < m → ∃i j, m = n + i ∨ m + j = n) : true :=
begin
choose! i j h using h,
guard_hyp i : ℕ → ℕ → ℕ,
guard_hyp j : ℕ → ℕ → ℕ,
guard_hyp h : ∀ (n m : ℕ), n < m → m = n + i n m ∨ m + j n m = n,
trivial
end
example (h : ∀n m : ℕ, ∃i, ∀n:ℕ, ∃j, m = n + i ∨ m + j = n) : true :=
begin
choose i j h using h,
guard_hyp i : ℕ → ℕ → ℕ,
guard_hyp j : ℕ → ℕ → ℕ → ℕ,
guard_hyp h : ∀ (n m k : ℕ), m = k + i n m ∨ m + j n m k = k,
trivial
end
-- Test `simp only [exists_prop]` gets applied after choosing.
-- Because of this simp, we need a non-rfl goal
example (h : ∀ n, ∃ k ≥ 0, n = k) : ∀ x : ℕ, 1 = 1 :=
begin
choose u hu using h,
guard_hyp hu : ∀ n, u n ≥ 0 ∧ n = u n,
intro, refl
end
-- test choose with conjunction
example (h : ∀ i : ℕ, ∃ j, i < j ∧ j < i+i) : true :=
begin
choose f h h' using h,
guard_hyp f : ℕ → ℕ,
guard_hyp h : ∀ (i : ℕ), i < f i,
guard_hyp h' : ∀ (i : ℕ), f i < i + i,
trivial,
end
-- test choose with nonempty instances
universe u
example {α : Type u} (p : α → Prop) (h : ∀ i : α, p i → ∃ j : α × α, p j.1) : true :=
begin
choose! f h using h,
guard_hyp f : α → α × α,
guard_hyp h : ∀ (i : α), p i → p (f i).1,
trivial,
end
|
{"author": "leanprover-community", "repo": "mathlib", "sha": "5e526d18cea33550268dcbbddcb822d5cde40654", "save_path": "github-repos/lean/leanprover-community-mathlib", "path": "github-repos/lean/leanprover-community-mathlib/mathlib-5e526d18cea33550268dcbbddcb822d5cde40654/test/choose.lean"}
|
#
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import xir
import vart
import numpy as np
import hot_patch_xmodel
def md5(np_array):
hash_md5 = hashlib.md5()
hash_md5.update(np_array)
return hash_md5.hexdigest()
g = xir.Graph.deserialize('/workspace/yolov4-tiny.xmodel')
the_root = g.get_root_subgraph()
the_root.get_name()
hot_patch_xmodel.hot_patch(the_root)
graph_runner = vart.RunnerExt.create_runner(the_root, "run")
inputs = graph_runner.get_inputs()
outputs = graph_runner.get_outputs()
with open('/scratch/models/cache/golden/74/32192dbe8b0cacdf99c2112732324b',
'rb') as f:
f.readinto(inputs[0])
print(md5(inputs[0]))
job = graph_runner.execute_async(inputs, outputs)
graph_runner.wait(job)
print(md5(outputs[0]))
print(md5(outputs[1]))
|
{"hexsha": "3d62cc08553d7dc6791a0dccd00089b93adaa40f", "size": 1320, "ext": "py", "lang": "Python", "max_stars_repo_path": "tools/Vitis-AI-Library/graph_runner/test/run-graph-task.py", "max_stars_repo_name": "hito0512/Vitis-AI", "max_stars_repo_head_hexsha": "996459fb96cb077ed2f7e789d515893b1cccbc95", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 848, "max_stars_repo_stars_event_min_datetime": "2019-12-03T00:16:17.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T22:53:17.000Z", "max_issues_repo_path": "tools/Vitis-AI-Library/graph_runner/test/run-graph-task.py", "max_issues_repo_name": "wangyifan778/Vitis-AI", "max_issues_repo_head_hexsha": "f61061eef7550d98bf02a171604c9a9f283a7c47", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 656, "max_issues_repo_issues_event_min_datetime": "2019-12-03T00:48:46.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T18:41:54.000Z", "max_forks_repo_path": "tools/Vitis-AI-Library/graph_runner/test/run-graph-task.py", "max_forks_repo_name": "wangyifan778/Vitis-AI", "max_forks_repo_head_hexsha": "f61061eef7550d98bf02a171604c9a9f283a7c47", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 506, "max_forks_repo_forks_event_min_datetime": "2019-12-03T00:46:26.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T10:34:56.000Z", "avg_line_length": 30.0, "max_line_length": 75, "alphanum_fraction": 0.7598484848, "include": true, "reason": "import numpy", "num_tokens": 332}
|
#pragma once
#include <boost/multi_index/hashed_index.hpp>
#include <boost/multi_index/member.hpp>
#include <boost/multi_index/ordered_index.hpp>
#include <boost/multi_index/random_access_index.hpp>
#include <boost/multi_index_container.hpp>
#include <chrono>
#include <memory>
#include <cga/lib/blocks.hpp>
#include <cga/node/voting.hpp>
#include <cga/secure/common.hpp>
#include <unordered_set>
namespace cga
{
class node;
class transaction;
class rolled_hash
{
public:
std::chrono::steady_clock::time_point time;
cga::block_hash hash;
};
/**
* Processing blocks is a potentially long IO operation.
* This class isolates block insertion from other operations like servicing network operations
*/
class block_processor
{
public:
block_processor (cga::node &);
~block_processor ();
void stop ();
void flush ();
bool full ();
void add (cga::unchecked_info const &);
void add (std::shared_ptr<cga::block>, uint64_t = 0);
void force (std::shared_ptr<cga::block>);
bool should_log (bool);
bool have_blocks ();
void process_blocks ();
cga::process_return process_one (cga::transaction const &, cga::unchecked_info);
cga::process_return process_one (cga::transaction const &, std::shared_ptr<cga::block>);
cga::vote_generator generator;
// Delay required for average network propagartion before requesting confirmation
static std::chrono::milliseconds constexpr confirmation_request_delay{ 1500 };
private:
void queue_unchecked (cga::transaction const &, cga::block_hash const &);
void verify_state_blocks (cga::transaction const & transaction_a, std::unique_lock<std::mutex> &, size_t = std::numeric_limits<size_t>::max ());
void process_batch (std::unique_lock<std::mutex> &);
void process_live (cga::block_hash const &, std::shared_ptr<cga::block>);
bool stopped;
bool active;
std::chrono::steady_clock::time_point next_log;
std::deque<cga::unchecked_info> state_blocks;
std::deque<cga::unchecked_info> blocks;
std::unordered_set<cga::block_hash> blocks_hashes;
std::deque<std::shared_ptr<cga::block>> forced;
boost::multi_index_container<
cga::rolled_hash,
boost::multi_index::indexed_by<
boost::multi_index::ordered_non_unique<boost::multi_index::member<cga::rolled_hash, std::chrono::steady_clock::time_point, &cga::rolled_hash::time>>,
boost::multi_index::hashed_unique<boost::multi_index::member<cga::rolled_hash, cga::block_hash, &cga::rolled_hash::hash>>>>
rolled_back;
static size_t const rolled_back_max = 1024;
std::condition_variable condition;
cga::node & node;
std::mutex mutex;
friend std::unique_ptr<seq_con_info_component> collect_seq_con_info (block_processor & block_processor, const std::string & name);
};
}
|
{"hexsha": "6d18d1a1adbdb1a11e9b226c7bf939a03a9b77b3", "size": 2673, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "cga/node/blockprocessor.hpp", "max_stars_repo_name": "cgacurrency/cga-node", "max_stars_repo_head_hexsha": "ad21cd224bed5de4bd0cec8c2aa42d35aea842a5", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "cga/node/blockprocessor.hpp", "max_issues_repo_name": "cgacurrency/cga-node", "max_issues_repo_head_hexsha": "ad21cd224bed5de4bd0cec8c2aa42d35aea842a5", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cga/node/blockprocessor.hpp", "max_forks_repo_name": "cgacurrency/cga-node", "max_forks_repo_head_hexsha": "ad21cd224bed5de4bd0cec8c2aa42d35aea842a5", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.1710526316, "max_line_length": 150, "alphanum_fraction": 0.7643097643, "num_tokens": 636}
|
from scipy.special import loggamma
import matplotlib.pyplot as plt
import numpy as np
from scipy.special import loggamma
from math import log
import collections
# This function is given, nothing to do here.
def simulate_data(num_samples, tails_proba):
"""Simulate a sequence of i.i.d. coin flips.
Tails are denoted as 1 and heads are denoted as 0.
Parameters
----------
num_samples : int
Number of samples to generate.
tails_proba : float in range (0, 1)
Probability of observing tails.
Returns
-------
samples : array, shape (num_samples)
Outcomes of simulated coin flips. Tails is 1 and heads is 0.
"""
return np.random.choice([0, 1], size=(num_samples), p=[
1 - tails_proba, tails_proba])
def compute_log_likelihood(theta, samples):
"""Compute log p(D | theta) for the given values of theta.
Parameters
----------
theta : array, shape (num_points)
Values of theta for which it's necessary to evaluate the log-likelihood.
samples : array, shape (num_samples)
Outcomes of simulated coin flips. Tails is 1 and heads is 0.
Returns
-------
log_likelihood : array, shape (num_points)
Values of log-likelihood for each value in theta.
"""
res = np.zeros(shape=(len(theta), 1))
count = collections.Counter(samples)
for i, t in enumerate(theta):
tail = log(t)
head = log(1 - t)
res[i][0] = tail * count[1] + head * count[0]
return res
def compute_log_prior(theta, a, b):
"""Compute log p(theta | a, b) for the given values of theta.
Parameters
----------
theta : array, shape (num_points)
Values of theta for which it's necessary to evaluate the log-prior.
a, b: float
Parameters of the prior Beta distribution.
Returns
-------
log_prior : array, shape (num_points)
Values of log-prior for each value in theta.
"""
res = np.zeros(shape=(len(theta), 1))
for i, t in enumerate(theta):
res[i][0] = loggamma(a + b) - loggamma(a) - loggamma(b) + \
(a - 1) * log(t) + (b - 1) * log(1 - t)
return res
def compute_log_posterior(theta, samples, a, b):
"""Compute log p(theta | D, a, b) for the given values of theta.
Parameters
----------
theta : array, shape (num_points)
Values of theta for which it's necessary to evaluate the log-prior.
samples : array, shape (num_samples)
Outcomes of simulated coin flips. Tails is 1 and heads is 0.
a, b: float
Parameters of the prior Beta distribution.
Returns
-------
log_posterior : array, shape (num_points)
Values of log-posterior for each value in theta.
"""
count = collections.Counter(samples)
tail = count[1]
head = count[0]
res = np.zeros(shape=(len(theta), 1))
for i, t in enumerate(theta):
res[i][0] = loggamma(tail + a + head + b) - loggamma(tail + a) - loggamma(
head + b) + (tail + a - 1) * log(t) + (head + b - 1) * log(1 - t)
return res
def compute_theta_mle(samples):
"""Compute theta_MLE for the given data.
Parameters
----------
samples : array, shape (num_samples)
Outcomes of simulated coin flips. Tails is 1 and heads is 0.
Returns
-------
theta_mle : float
Maximum likelihood estimate of theta.
"""
count = collections.Counter(samples)
return count[1] / len(samples)
def compute_theta_map(samples, a, b):
"""Compute theta_MAP for the given data.
Parameters
----------
samples : array, shape (num_samples)
Outcomes of simulated coin flips. Tails is 1 and heads is 0.
a, b: float
Parameters of the prior Beta distribution.
Returns
-------
theta_mle : float
Maximum a posteriori estimate of theta.
"""
count = collections.Counter(samples)
tail = count[1]
head = count[0]
return (tail + a - 1) / (tail + a + head + b - 2)
if __name__ == '__main__':
num_samples = 20
tails_proba = 0.7
samples = simulate_data(num_samples, tails_proba)
a, b = 3, 5
print(samples)
plt.figure(figsize=[12, 8])
x = np.linspace(1e-5, 1 - 1e-5, 1000)
# Plot the prior distribution
log_prior = compute_log_prior(x, a, b)
prior = np.exp(log_prior)
plt.plot(x, prior, label='prior')
# Plot the likelihood
log_likelihood = compute_log_likelihood(x, samples)
likelihood = np.exp(log_likelihood)
int_likelihood = np.mean(likelihood)
# We rescale the likelihood - otherwise it would be impossible to see in
# the plot
rescaled_likelihood = likelihood / int_likelihood
plt.plot(x, rescaled_likelihood, label='scaled likelihood', color='purple')
# Plot the posterior distribution
log_posterior = compute_log_posterior(x, samples, a, b)
posterior = np.exp(log_posterior)
plt.plot(x, posterior, label='posterior')
# Visualize theta_mle
theta_mle = compute_theta_mle(samples)
ymax = np.exp(
compute_log_likelihood(
np.array(
[theta_mle]),
samples)) / int_likelihood
plt.vlines(
x=theta_mle,
ymin=0.00,
ymax=ymax,
linestyle='dashed',
color='purple',
label=r'$\theta_{MLE}$')
# Visualize theta_map
theta_map = compute_theta_map(samples, a, b)
ymax = np.exp(compute_log_posterior(np.array([theta_map]), samples, a, b))
plt.vlines(
x=theta_map,
ymin=0.00,
ymax=ymax,
linestyle='dashed',
color='orange',
label=r'$\theta_{MAP}$')
plt.xlabel(r'$\theta$', fontsize='xx-large')
plt.legend(fontsize='xx-large')
plt.show()
|
{"hexsha": "f4a160d18a707641390965dbbfcb01de809eeb0c", "size": 5773, "ext": "py", "lang": "Python", "max_stars_repo_path": "exercise03/probabilistic_inference.py", "max_stars_repo_name": "Rylie-W/ML33_21WS", "max_stars_repo_head_hexsha": "6c489953ba227bee7b534106c4ab9d02c79910e4", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "exercise03/probabilistic_inference.py", "max_issues_repo_name": "Rylie-W/ML33_21WS", "max_issues_repo_head_hexsha": "6c489953ba227bee7b534106c4ab9d02c79910e4", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "exercise03/probabilistic_inference.py", "max_forks_repo_name": "Rylie-W/ML33_21WS", "max_forks_repo_head_hexsha": "6c489953ba227bee7b534106c4ab9d02c79910e4", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.5792079208, "max_line_length": 82, "alphanum_fraction": 0.6138922571, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1480}
|
set_bigfloat_precision(128)
temp = BigFloat(0)
f(x) = exp(-x) - sin(x)
steffensen(c) = c - (f(c)*f(c))/( f(c+f(c)) - f(c) )
temp2 = BigFloat(100)
witch = big"0.588532743981861077432452045702903688531271516109030533319914299511672553307351427738524061576027409562153528176982466770293849745782742957500713135"
for i = 1:10
temp2 = temp
temp = steffensen(temp)
@printf("%.4e\t%.4e\n", temp , abs(temp - witch) )
end
|
{"hexsha": "f26a1374c245a8e3a41a55c2d6e14af32851f71c", "size": 423, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "Analiza_Numeryczna_M/02Lista/zad8.jl", "max_stars_repo_name": "Magikis/Uniwersity", "max_stars_repo_head_hexsha": "06964ef31d721af85740df1dce3f966006ab9f78", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2017-11-30T08:45:48.000Z", "max_stars_repo_stars_event_max_datetime": "2018-04-26T14:15:45.000Z", "max_issues_repo_path": "Analiza_Numeryczna_M/02Lista/zad8.jl", "max_issues_repo_name": "Magikis/Uniwersity", "max_issues_repo_head_hexsha": "06964ef31d721af85740df1dce3f966006ab9f78", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Analiza_Numeryczna_M/02Lista/zad8.jl", "max_forks_repo_name": "Magikis/Uniwersity", "max_forks_repo_head_hexsha": "06964ef31d721af85740df1dce3f966006ab9f78", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2017-10-16T09:42:59.000Z", "max_forks_repo_forks_event_max_datetime": "2018-01-27T19:48:45.000Z", "avg_line_length": 23.5, "max_line_length": 162, "alphanum_fraction": 0.7257683215, "num_tokens": 158}
|
function a = circulant ( m, n, x )
%*****************************************************************************80
%
%% CIRCULANT returns the CIRCULANT matrix.
%
% Formula:
%
% K = 1 + mod ( J-I, N )
% A(I,J) = X(K)
%
% Example:
%
% M = 4, N = 4, X = ( 1, 2, 3, 4 )
%
% 1 2 3 4
% 4 1 2 3
% 3 4 1 2
% 2 3 4 1
%
% M = 4, N = 5, X = ( 1, 2, 3, 4, 5 )
%
% 1 2 3 4 5
% 5 1 2 3 4
% 4 5 1 2 3
% 3 4 5 1 2
%
% M = 5, N = 4, X = ( 1, 2, 3, 4 )
%
% 1 2 3 4
% 5 1 2 3
% 4 5 1 2
% 3 4 5 1
% 1 2 3 4
%
% Discussion:
%
% Westlake lists the following "special" circulants:
%
% B2, X = ( T^2, 1, 2, ..., T, T+1, T, T-1, ..., 1 ),
% with T = ( N - 2 ) / 2;
%
% B3, X = ( N+1, 1, 1, ..., 1 );
%
% B5, X = ( 1, 2, 3, ..., N ).
%
% Rectangular Properties:
%
% The product of two circulant matrices is a circulant matrix.
%
% The transpose of a circulant matrix is a circulant matrix.
%
% A circulant matrix C, whose first row is (c1, c2, ..., cn), can be
% written as a polynomial in the upshift matrix U:
%
% C = c1 * I + c2 * U + c3 * U**2 + ... + cn * U**n-1.
%
% A is a circulant: each row is shifted once to get the next row.
%
% Square Properties:
%
% A is generally not symmetric: A' /= A.
%
% A is Toeplitz: constant along diagonals.
%
% A is persymmetric: A(I,J) = A(N+1-J,N+1-I).
%
% A commutes with any other circulant matrix.
%
% A is normal.
%
% The transpose of A is also a circulant matrix.
%
% The inverse of A is also a circulant matrix.
%
% The Fourier matrix is the eigenvector matrix for every circulant matrix.
%
% Because the Fourier matrix F diagonalizes A, the inverse (or
% pseudoinverse, if any LAMBDA is zero) can be written
%
% inverse ( A ) = (F*) * 1/LAMBDA * F
%
% A is symmetric if, for all I, X(I+1) = X(N-I+1).
%
% If R is an N-th root of unity, that is, R is a complex number such
% that R**N = 1, then
%
% Y = X(1) + X(2)*R + X(3)*R**2 + ... + X(N)*R**(N-1)
%
% is an eigenvalue of A, with eigenvector
%
% ( 1, R, R**2, ..., R**(N-1) )
%
% and left eigenvector
%
% ( R**(N-1), R**(N-2), ..., R**2, R, 1 ).
%
% Although there are exactly N distinct roots of unity, the circulant
% may have repeated eigenvalues, because of the behavior of the polynomial.
% However, the matrix is guaranteed to have N linearly independent
% eigenvectors.
%
% Licensing:
%
% This code is distributed under the GNU LGPL license.
%
% Modified:
%
% 01 October 2007
%
% Author:
%
% John Burkardt
%
% Reference:
%
% Philip Davis,
% Circulant Matrices,
% John Wiley, 1979, QA188.D37.
%
% Robert Gregory, David Karney,
% A Collection of Matrices for Testing Computational Algorithms,
% Wiley, New York, 1969, page 22,
% LC: QA263.G68.
%
% Joan Westlake,
% Test Matrix A24,
% A Handbook of Numerical Matrix Inversion and Solution of Linear Equations,
% John Wiley, 1968.
%
% Parameters:
%
% Input, integer M, N, the number of rows and columns of A.
%
% Input, real X(N), the values in the first row of A.
%
% Output, real A(M,N), the matrix.
%
for i = 1 : m
for j = 1 : n
k = 1 + i4_modp ( j-i, n );
a(i,j) = x(k);
end
end
return
end
|
{"author": "johannesgerer", "repo": "jburkardt-m", "sha": "1726deb4a34dd08a49c26359d44ef47253f006c1", "save_path": "github-repos/MATLAB/johannesgerer-jburkardt-m", "path": "github-repos/MATLAB/johannesgerer-jburkardt-m/jburkardt-m-1726deb4a34dd08a49c26359d44ef47253f006c1/test_mat/circulant.m"}
|
#' Get header info for a document.
#'
#' @export
#' @template all
#' @template return
#' @param dbname (character) Database name. Required.
#' @param docid (character) Document ID. Required.
#' @examples \dontrun{
#' (x <- Cushion$new())
#'
#' # create a database
#' if ("sofadb" %in% db_list(x)) {
#' invisible(db_delete(x, dbname="sofadb"))
#' }
#' db_create(x, dbname='sofadb')
#'
#' # create a document
#' doc1 <- '{"name": "drink", "beer": "IPA", "score": 5}'
#' doc_create(x, dbname="sofadb", doc1, docid="abeer")
#'
#' # run doc_head
#' doc_head(x, dbname="sofadb", docid="abeer")
#' doc_head(x, dbname="sofadb", docid="abeer", as='json')
#' }
doc_head <- function(cushion, dbname, docid, as = 'list', ...) {
check_cushion(cushion)
out <- HEAD(
file.path(cushion$make_url(), dbname, docid),
cushion$get_headers(),
...)
stop_status(out)
if (as == 'list') out$all_headers else jsonlite::toJSON(out$all_header)
}
|
{"hexsha": "4e464143cfd67400b4decaa24e93200bc648e3f8", "size": 939, "ext": "r", "lang": "R", "max_stars_repo_path": "R/doc_head.r", "max_stars_repo_name": "FTwex/sofa-cloudant", "max_stars_repo_head_hexsha": "097577be2446865e17d41bcb015141eed19c7139", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "R/doc_head.r", "max_issues_repo_name": "FTwex/sofa-cloudant", "max_issues_repo_head_hexsha": "097577be2446865e17d41bcb015141eed19c7139", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "R/doc_head.r", "max_forks_repo_name": "FTwex/sofa-cloudant", "max_forks_repo_head_hexsha": "097577be2446865e17d41bcb015141eed19c7139", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.6176470588, "max_line_length": 73, "alphanum_fraction": 0.6261980831, "num_tokens": 297}
|
import numpy as np
import os
import yaml
import pandas as pd
import plotly.graph_objects as go
from keras.preprocessing.image import NumpyArrayIterator
from plotly.subplots import make_subplots
def report_dataframes(report_path):
'''
'''
categories = [f for f in os.listdir(report_path) if not f.startswith('.')
or f.endswith('.csv')]
import_dict = {}
for i in range(len(categories)):
yaml_path = report_path+categories[i]+'/report/'
yaml_file_name = [file for file in os.listdir(yaml_path)
if file != 'run_tform_config.yaml' and not
(file.startswith('.') or file.endswith('.csv'))]
with open(yaml_path+yaml_file_name[0], 'r') as file:
import_dict[categories[i]] = yaml.load(
file, Loader=yaml.FullLoader)
column_names = ['report_name', 'layers', 'kernel_size',
'activation_function', 'optimizer', 'pooling',
'test_accuracy']
history_names = ['report_name', 'epochs', 'train_loss', 'val_loss',
'test_loss', 'train_accuracy', 'val_accuracy',
'test_accuracy']
data_dict = {}
rank_dict = {}
history_dict = {}
index = 0
############################################
# Temporary solution, needs to be updated
optimize = 'adam'
pool = 'max'
############################################
for keys in import_dict.items():
n = 0
for keys_1, values_1 in keys[1].items():
if 'activation' in keys_1:
n += 1
a_function = values_1
if 'kernel_size' in keys_1:
k_size = values_1
if 'optimizer' in keys_1:
optimize = values_1
if 'pooling' in keys_1:
pool = values_1
if 'test_accuracy' in keys_1:
accuracy = values_1
data_dict[index] = [keys[0], n, k_size, a_function,
optimize, pool, np.round(accuracy, 3)]
rank_dict[index] = [keys[0], np.round(accuracy, 3)]
try:
# For tuner
history_dict[index] = [keys[0], list(range(1,
len(keys[1]['loss'])+1)),
keys[1]['loss'], keys[1]['val_loss'],
keys[1]['test_loss'],
keys[1]['accuracy'],
keys[1]['val_accuracy'],
keys[1]['test_accuracy']]
except KeyError:
# For pre-defined CNN
history_dict[index] = [keys[0], list(range(1,
len(keys[1]['loss'])+1)),
keys[1]['loss'],
keys[1]['test_loss'],
keys[1]['accuracy'],
keys[1]['test_accuracy']]
history_names = ['report_name', 'epochs', 'train_loss',
'test_loss', 'train_accuracy',
'test_accuracy']
index += 1
hyperparam_df = pd.DataFrame.from_dict(data_dict, orient='index',
columns=column_names)
history_df = pd.DataFrame.from_dict(history_dict, orient='index',
columns=history_names)
tform_rank_df = pd.DataFrame.from_dict(
rank_dict, orient='index', columns=[column_names[0], column_names[-1]])
return hyperparam_df, history_df, tform_rank_df
def report_plots(hyperparam_df, history_df):
'''
'''
# Generate plot for comparing the CNN histories
fig1 = make_subplots(subplot_titles=('CNN Loss', 'CNN Accuracy'),
horizontal_spacing=0.15, rows=1, cols=2)
# Assigned title to the overall figure, the subplots and their axes
fig1.update_layout(dict(font=dict(size=12)))
fig1.update_layout(title_text="CNN Performance History Comparison",
height=600)
fig1.update_xaxes(title_text='Epochs', row=1, col=1)
fig1.update_xaxes(title_text='Epochs', row=1, col=2)
fig1.update_yaxes(title_text='Loss', row=1, col=1)
fig1.update_yaxes(title_text='Accuracy', row=1, col=2)
# Define the color palette to use
color = ['#a50026', '#d73027', '#f46d43', '#fdae61', '#fee090', '#ffffbf',
'#e0f3f8', '#abd9e9', '#74add1', '#4575b4', '#313695',
'#8e0152', '#c51b7d', '#de77ae', '#f1b6da', '#fde0ef', '#f7f7f7',
'#e6f5d0', ' #b8e186', '#7fbc41', '#4d9221', '#276419']
# Let's plot the trainig and validation loss and accuracy
for i in range(len(history_df)):
fig1.add_scatter(
x=history_df['epochs'][i], y=history_df['train_loss'][i],
mode='lines', legendgroup=history_df['report_name'][i],
name=history_df['report_name'][i],
marker=dict(size=8, color=color[i], colorscale='Electric'),
row=1, col=1)
fig1.add_scatter(
x=history_df['epochs'][i], y=history_df['train_accuracy'][i],
mode='lines', legendgroup=history_df['report_name'][i],
name=history_df['report_name'][i],
marker=dict(size=8, color=color[i], colorscale='Electric'),
row=1, col=2, showlegend=False)
fig1.add_scatter(
x=history_df['epochs'][i], y=history_df['val_loss'][i],
mode='markers', legendgroup=history_df['report_name'][i],
name=history_df['report_name'][i],
marker=dict(size=8, color=color[i], colorscale='Electric'),
row=1, col=1, showlegend=False)
fig1.add_scatter(
x=history_df['epochs'][i], y=history_df['val_accuracy'][i],
mode='markers', legendgroup=history_df['report_name'][i],
name=history_df['report_name'][i],
marker=dict(size=8, color=color[i], colorscale='Electric'),
row=1, col=2, showlegend=False)
fig1.update_layout(plot_bgcolor='lightslategray')
# Generate Parallel Coordinates plot
fig2 = go.Figure(data=go.Parcats(
line=dict(color=['#a50026', '#d73027', '#f46d43', '#fdae61', '#fee090',
'#ffffbf', '#e0f3f8', '#abd9e9', '#74add1',
'#4575b4', '#313695', '#8e0152', '#c51b7d',
'#de77ae', '#f1b6da', '#fde0ef', '#f7f7f7',
'#e6f5d0', ' #b8e186', '#7fbc41', '#4d9221',
'#276419'], colorscale='Electric'),
dimensions=list(
[dict(label='Report Name', values=hyperparam_df['report_name']),
dict(label='Num layers', values=hyperparam_df['layers'],
categoryorder='category descending'),
dict(label='Kernel_Size', values=hyperparam_df['kernel_size'],
categoryorder='category descending'),
dict(label='Activation',
values=hyperparam_df['activation_function']),
dict(label='Optimizer', values=hyperparam_df['optimizer']),
dict(label='Pooling', values=hyperparam_df['pooling'].values),
dict(label='Accuracy',
values=np.round(hyperparam_df['test_accuracy'], 3),
categoryorder='category descending'), ])))
fig2.update_layout(dict(font=dict(size=12)),
title='Parallel Coordinate Plot Comparison',
plot_bgcolor='lightblue',
paper_bgcolor='white')
return fig1, fig2
def summary_report_plots(report_path):
'''The function that plots the parallel coordinates between report name,
layers, optimizer, activation function, kernel size, pooling and accuracy.
Parameters
----------
report_path: str
string representing the location of parent report directory
Returns:
--------
plotly.graph
'''
hyperparam_df, history_df, tform_rank_df = report_dataframes(report_path)
fig1, fig2 = report_plots(hyperparam_df, history_df)
return fig1, fig2
def summary_report_tables(report_path):
'''The function that returns tables wiht the summary of the transformations
used and they performnce
Parameters
----------
report_path: str
string representing the location of parent report directory
Returns
-------
summary_df : pandas Dataframe
Table containing information of the transformations run,
which data series they were applied to and their plot format
tform_rank_df : pandas Dataframe
Table containing information of the run anme and its
overall performance
'''
hyperparam_df, history_df, tform_rank_df = report_dataframes(report_path)
summary_df = summary_dataframe(report_path)
# tform_rank_df.style.format(
# "{:.3}", subset=["test_accuracy"])
# .style.apply(highlight_max(
# subset=["test_accuracy"], color='gold'))
return summary_df, tform_rank_df
def summary_dataframe(report_path):
'''
'''
categories = [f for f in os.listdir(report_path) if not f.startswith('.')
or f.endswith('.csv')]
run_tform = {}
for i in range(len(categories)):
yaml_path = report_path+categories[i]+'/report/'
yaml_file_name = [file for file in os.listdir(yaml_path)
if file == 'run_tform_config.yaml']
# print(yaml_file_name)
with open(yaml_path+yaml_file_name[0], 'r') as file:
run_tform[categories[i]] = yaml.load(file,
Loader=yaml.FullLoader)
run_name = []
series = []
transforms = []
columns = []
plot_code = []
for keys in run_tform.items():
n = 0
for keys_1, values_1 in keys[1].items():
if 'tform_' in keys_1:
series.append('series_'+str(n+1))
transforms.append(values_1[2])
columns.append(values_1[1])
plot_code.append(values_1[0])
run_name.append(keys[1]['run_name'])
n += 1
summary_table = np.array([transforms, columns, plot_code]).transpose()
multi_index = [run_name, series]
summary_df = pd.DataFrame(summary_table, index=multi_index,
columns=['transform', 'column', 'plot_code'])
return summary_df
def model_analysis(model, test_set, test_set_list=None):
''' The function that provides analysis of a trained model for
its predicted output and actual output
Parameters
----------
model: keras.model
a keras instance of the trained model to use for the prediction
test_set_list: list
list representing the file names, images and labels
for test set.
test_set: keras.ImageDataGenerator iterator
the interator instance created using the
keras.ImageDataGenerator. This can either be a
NumpyArrayIterator or a DirectoryIterator
Returns
-------
result: pandas.DataFrame
dataframe having filenames, actual labels,
predicted labels and probability for decision
'''
predictions = model.predict(test_set)
predicted_class_indices = np.argmax(predictions, axis=1)
probabilities = []
for n in range(len(predictions)):
probabilities.append(np.round(predictions[n], 3))
labels_dict = {}
if type(test_set) == NumpyArrayIterator:
labels = []
for i in range(len(test_set_list)):
labels.append(test_set_list[i][:][2])
for i, label in enumerate(np.unique(labels)):
for j in range(len(labels)):
if labels[j] == label:
labels_dict.update({i: label})
filenames = [n[0][:][:] for n in test_set_list]
else:
labels_indices = (test_set.class_indices)
labels_dict = dict((v, k) for k, v in labels_indices.items())
filenames = test_set.filenames
labels = []
for i in range(len(filenames)):
for key, value in labels_indices.items():
if key in filenames[i]:
labels.append(key)
predicted_labels = [labels_dict[k] for k in predicted_class_indices]
result = pd.DataFrame.from_dict({"Filenames": filenames,
"Actual_Labels": labels,
"Predicted_Labels": predicted_labels,
"Probabilities": probabilities})
return result
|
{"hexsha": "2b7e21b76dc65487ce651258046192a9796bcc68", "size": 12791, "ext": "py", "lang": "Python", "max_stars_repo_path": "hardy/data_reporting/reporting.py", "max_stars_repo_name": "EISy-as-Py/hardy", "max_stars_repo_head_hexsha": "de873b7fe6e2c010d9eaa5e0b30550c8850e0f71", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 15, "max_stars_repo_stars_event_min_datetime": "2020-04-11T00:49:41.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-28T05:03:44.000Z", "max_issues_repo_path": "hardy/data_reporting/reporting.py", "max_issues_repo_name": "EISy-as-Py/hardy", "max_issues_repo_head_hexsha": "de873b7fe6e2c010d9eaa5e0b30550c8850e0f71", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 15, "max_issues_repo_issues_event_min_datetime": "2020-06-17T17:33:01.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-11T17:34:41.000Z", "max_forks_repo_path": "hardy/data_reporting/reporting.py", "max_forks_repo_name": "EISy-as-Py/hardy", "max_forks_repo_head_hexsha": "de873b7fe6e2c010d9eaa5e0b30550c8850e0f71", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2020-08-28T18:46:17.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-16T22:29:06.000Z", "avg_line_length": 38.9969512195, "max_line_length": 79, "alphanum_fraction": 0.5582049879, "include": true, "reason": "import numpy", "num_tokens": 2839}
|
runlengthencoding <- function(x)
{
splitx <- unlist(strsplit(input, ""))
rlex <- rle(splitx)
paste(with(rlex, as.vector(rbind(lengths, values))), collapse="")
}
input <- "WWWWWWWWWWWWBWWWWWWWWWWWWBBBWWWWWWWWWWWWWWWWWWWWWWWWBWWWWWWWWWWWWWW"
runlengthencoding(input)
|
{"hexsha": "abef9fbd8891e2430c2d02a442b6c6c02b43a90a", "size": 275, "ext": "r", "lang": "R", "max_stars_repo_path": "Task/Run-length-encoding/R/run-length-encoding-1.r", "max_stars_repo_name": "LaudateCorpus1/RosettaCodeData", "max_stars_repo_head_hexsha": "9ad63ea473a958506c041077f1d810c0c7c8c18d", "max_stars_repo_licenses": ["Info-ZIP"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-11-09T22:08:38.000Z", "max_stars_repo_stars_event_max_datetime": "2018-11-09T22:08:38.000Z", "max_issues_repo_path": "Task/Run-length-encoding/R/run-length-encoding-1.r", "max_issues_repo_name": "seanwallawalla-forks/RosettaCodeData", "max_issues_repo_head_hexsha": "9ad63ea473a958506c041077f1d810c0c7c8c18d", "max_issues_repo_licenses": ["Info-ZIP"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Task/Run-length-encoding/R/run-length-encoding-1.r", "max_forks_repo_name": "seanwallawalla-forks/RosettaCodeData", "max_forks_repo_head_hexsha": "9ad63ea473a958506c041077f1d810c0c7c8c18d", "max_forks_repo_licenses": ["Info-ZIP"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-11-09T22:08:40.000Z", "max_forks_repo_forks_event_max_datetime": "2018-11-09T22:08:40.000Z", "avg_line_length": 27.5, "max_line_length": 78, "alphanum_fraction": 0.7454545455, "num_tokens": 90}
|
*
* -----------------------------------------------------------------
* N U M T E R F
* -----------------------------------------------------------------
*
* Written by G. Gaigalas, *
* Vilnius, Lithuania December 1993 *
*
FUNCTION NUMTERF(I2N,I2S,I2L,N,I2Q)
IMPLICIT DOUBLEPRECISION (A-H,O-Z)
COMMON /MT67/ M76(238)
NUMTERF=0
ISL=(I2S*100)+I2L
DO 1 IA =1,119
IF((N/2)*2.EQ.N) THEN
J=119+IA
ELSE
J=IA
ENDIF
LASTE=M76(J)
LS=JTHN(LASTE,1,10000)
IF(ISL.EQ.LS)THEN
NR=JTHN(LASTE,4,100)
IF(I2N.EQ.NR)GO TO 2
ENDIF
1 CONTINUE
STOP
2 NUMTERF=J
I2Q=JTHN(LASTE,3,100)
C WRITE(*,5) I2Q,NUMTERF
C 5 FORMAT(2X,'I2Q=',I6,' J=',I6)
RETURN
END
|
{"hexsha": "033fcdbdbabcc16cb3c8bbf39b82c87532b0610b", "size": 889, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "src/lib/libang/numterf.f", "max_stars_repo_name": "mansour2014/ATSP2K_plus", "max_stars_repo_head_hexsha": "30842b9f086d1e497aeb778e2a352d1e8e520ec3", "max_stars_repo_licenses": ["BSD-4-Clause-UC"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-07-21T14:03:39.000Z", "max_stars_repo_stars_event_max_datetime": "2019-07-21T14:03:39.000Z", "max_issues_repo_path": "src/lib/libang/numterf.f", "max_issues_repo_name": "mzmansour/ATSP2K_plus", "max_issues_repo_head_hexsha": "30842b9f086d1e497aeb778e2a352d1e8e520ec3", "max_issues_repo_licenses": ["BSD-4-Clause-UC"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/lib/libang/numterf.f", "max_forks_repo_name": "mzmansour/ATSP2K_plus", "max_forks_repo_head_hexsha": "30842b9f086d1e497aeb778e2a352d1e8e520ec3", "max_forks_repo_licenses": ["BSD-4-Clause-UC"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.1470588235, "max_line_length": 71, "alphanum_fraction": 0.3802024747, "num_tokens": 295}
|
@testset "FBM" begin
rng = MersenneTwister(42)
h = 0.3
k = FBMKernel(; h=h)
v1 = rand(rng, 3)
v2 = rand(rng, 3)
@test k(v1, v2) ≈
(
sqeuclidean(v1, zero(v1))^h + sqeuclidean(v2, zero(v2))^h -
sqeuclidean(v1 - v2, zero(v1 - v2))^h
) / 2 atol = 1e-5
@test repr(k) == "Fractional Brownian Motion Kernel (h = $(h))"
test_interface(k)
@test repr(k) == "Fractional Brownian Motion Kernel (h = $(h))"
test_ADs(FBMKernel; ADs=[:ReverseDiff, :Zygote])
@test_broken "Tests failing for kernelmatrix(k, x) for ForwardDiff"
test_params(k, ([h],))
end
|
{"hexsha": "3fe58dd0a6c82de86b63c852d7a8a53b330f1ce1", "size": 617, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/basekernels/fbm.jl", "max_stars_repo_name": "kaandocal/KernelFunctions.jl", "max_stars_repo_head_hexsha": "231909fb6271695926e126899ac797f5628a137b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/basekernels/fbm.jl", "max_issues_repo_name": "kaandocal/KernelFunctions.jl", "max_issues_repo_head_hexsha": "231909fb6271695926e126899ac797f5628a137b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/basekernels/fbm.jl", "max_forks_repo_name": "kaandocal/KernelFunctions.jl", "max_forks_repo_head_hexsha": "231909fb6271695926e126899ac797f5628a137b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.85, "max_line_length": 71, "alphanum_fraction": 0.5753646677, "num_tokens": 225}
|
import numpy as np
class WeakClassifier():
""" weak classifier - threshold on the features
Args:
X (numpy.array): data array of flattened images
(row:observations, col:features) (float).
y (numpy.array): Labels array of shape (num observations, )
"""
def __init__(self, X, y, weights, thresh=0, feat=0, sign=1):
self.Xtrain = np.float32(X)
self.ytrain = np.float32(y)
self.idx_0 = self.ytrain == -1
self.idx_1 = self.ytrain == 1
self.threshold = thresh
self.feature = feat
self.sign = sign
self.weights = weights
def train(self):
# save the threshold that leads to best prediction
tmp_signs = []
tmp_thresholds = []
for f in range(self.Xtrain.shape[1]):
m0 = self.Xtrain[self.idx_0, f].mean()
m1 = self.Xtrain[self.idx_1, f].mean()
tmp_signs.append(1 if m0 < m1 else -1)
tmp_thresholds.append((m0+m1)/2.0)
tmp_errors=[]
for f in range(self.Xtrain.shape[1]):
tmp_result = self.weights*(tmp_signs[f]*((self.Xtrain[:,f]>tmp_thresholds[f])*2-1) != self.ytrain)
tmp_errors.append(sum(tmp_result))
feat = tmp_errors.index(min(tmp_errors))
self.feature = feat
self.threshold = tmp_thresholds[feat]
self.sign = tmp_signs[feat]
# -- print self.feature, self.threshold
def predict(self, x):
return self.sign * ((x[self.feature] > self.threshold) * 2 - 1)
class VJ_Classifier:
"""Weak classifier for Viola Jones procedure
Args:
X (numpy.array): Feature scores for each image. Rows: number of images
Columns: number of features.
y (numpy.array): Labels array of shape (num images, )
weights (numpy.array): observations weights array of shape (num observations, )
Attributes:
Xtrain (numpy.array): Feature scores, one for each image.
ytrain (numpy.array): Labels, one per image.
weights (float): Observations weights
threshold (float): Integral image score minimum value.
feat (int): index of the feature that leads to minimum classification error.
polarity (float): Feature's sign value. Defaults to 1.
error (float): minimized error (epsilon)
"""
def __init__(self, X, y, weights, thresh=0, feat=0, polarity=1):
self.Xtrain = np.float32(X)
self.ytrain = np.float32(y)
self.weights = weights
self.threshold = thresh
self.feature = feat
self.polarity = polarity
self.error = 0
def train(self):
"""Trains a weak classifier that uses Haar-like feature scores.
This process finds the feature that minimizes the error as shown in
the Viola-Jones paper.
Once found, the following attributes are updated:
- feature: The column id in X.
- threshold: Threshold (theta) used.
- polarity: Sign used (another way to find the parity shown in the
paper).
- error: lowest error (epsilon).
"""
signs = [1] * self.Xtrain.shape[1]
thresholds = [0] * self.Xtrain.shape[1]
errors = [100] * self.Xtrain.shape[1]
for f in range(self.Xtrain.shape[1]):
tmp_thresholds = self.Xtrain[:,f].copy()
tmp_thresholds = np.unique(tmp_thresholds)
tmp_thresholds.sort()
tmp_thresholds = [(tmp_thresholds[i]+tmp_thresholds[i+1])/2 for i in
range(len(tmp_thresholds)-1)]
min_e = 10000000000000
for theta in tmp_thresholds:
for s in [1,-1]:
tmp_r = self.weights * ( s*((self.Xtrain[:,f]<theta)*2-1) != self.ytrain )
tmp_e = sum(tmp_r)
if tmp_e < min_e:
thresholds[f] = theta
signs[f] = s
errors[f] = tmp_e
min_e = tmp_e
feat = errors.index(min(errors))
self.feature = feat
self.threshold = thresholds[feat]
self.polarity = signs[feat]
self.error = errors[feat]
def predict(self, x):
"""Returns a predicted label.
Inequality shown in the Viola Jones paper for h_j(x).
Args:
x (numpy.array): Scores obtained from Haar Features, one for each
feature.
Returns:
float: predicted label
"""
return self.polarity * ((x[self.feature] < self.threshold) * 2 - 1)
|
{"hexsha": "7291c8a020701bf056d51523a3fc3365d5d3f9e9", "size": 4650, "ext": "py", "lang": "Python", "max_stars_repo_path": "assignments/ps06/helper_classes.py", "max_stars_repo_name": "jperuggia/ComputerVision", "max_stars_repo_head_hexsha": "6a36cf96dec40fe4cd5584fbc2d8e384a74a66cf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "assignments/ps06/helper_classes.py", "max_issues_repo_name": "jperuggia/ComputerVision", "max_issues_repo_head_hexsha": "6a36cf96dec40fe4cd5584fbc2d8e384a74a66cf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "assignments/ps06/helper_classes.py", "max_forks_repo_name": "jperuggia/ComputerVision", "max_forks_repo_head_hexsha": "6a36cf96dec40fe4cd5584fbc2d8e384a74a66cf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-11-02T08:36:01.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-05T19:08:53.000Z", "avg_line_length": 36.0465116279, "max_line_length": 110, "alphanum_fraction": 0.5683870968, "include": true, "reason": "import numpy", "num_tokens": 1086}
|
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!! !!
!! GNU General Public License !!
!! !!
!! This file is part of the Flexible Modeling System (FMS). !!
!! !!
!! FMS is free software; you can redistribute it and/or modify !!
!! it and are expected to follow the terms of the GNU General Public !!
!! License as published by the Free Software Foundation. !!
!! !!
!! FMS is distributed in the hope that it will be useful, !!
!! but WITHOUT ANY WARRANTY; without even the implied warranty of !!
!! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the !!
!! GNU General Public License for more details. !!
!! !!
!! You should have received a copy of the GNU General Public License !!
!! along with FMS; if not, write to: !!
!! Free Software Foundation, Inc. !!
!! 59 Temple Place, Suite 330 !!
!! Boston, MA 02111-1307 USA !!
!! or see: !!
!! http://www.gnu.org/licenses/gpl.txt !!
!! !!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
module bgrid_vert_mod
!-----------------------------------------------------------------------
!
! allocates storage and initializes vertical grid constants
!
! contains several interfaces for compute pressure and height
!
!-----------------------------------------------------------------------
use types_mod, only : r8
use constants_mod, only: GRAV, RDGAS, RVGAS
use fms_mod, only: error_mesg, FATAL
implicit none
private
!-----------------------------------------------------------------------
! public defined data type (vert_grid_type)
! -----------------------------------------
! nlev = number of vertical levels (integer)
! nplev = number of pure pressure levels at the top of the model,
! will equal zero when not using hybrid coordinate (integer)
! deta = vertical eta thickness/depth of model layers
! aeta = eta values at model levels (full levels)
! eta = eta values at model layer interfaces (half levels)
! dpeta = vertical pressure thickness/depth of model layers
! apeta = pressure values at model levels (full levels)
! peta = pressure values at model layer interfaces (half levels)
! dfl = reference values of geopotental height at half levels
!
! psmin = minimum allowable surface pressure to avoid negative
! mass in a model layer (important for hybrid coord)
! hybrid = logical flag (true for hybrid coordinate)
! pzero = logical flag (true for pres = 0 at top of model)
!
! pref,tref,gamma = reference values for computing dfl
!
!
! public interfaces
! -----------------
! vert_grid_init - initializes the vert_grid_type
! compute_height_half - computes the geopotential height (in meters)
! at half model levels
! compute_pres_depth - computes the pressure weight (mass) of a
! model layer
! compute_pres_full - computes the pressure at full model levels
! compute_pres_half - computes the pressure at half model levels
!
!-----------------------------------------------------------------------
!------- interfaces -------
public vert_grid_init, compute_geop_height, compute_height, &
compute_pres_depth, compute_pres_full, compute_pres_half, &
compute_pres_weights, compute_pressures, compute_height_bottom
!------- public defined data type -------
public vert_grid_type
type vert_grid_type
integer :: nlev, nplev
real(r8), pointer, dimension(:) :: deta, aeta, eta, dfl, &
dpeta, apeta, peta, wta, wtb
real(r8) :: pref, tref, gamma, psmin
logical :: hybrid, pzero
end type vert_grid_type
!-----------------------------------------------------------------------
real(r8), parameter :: d608 = (RVGAS-RDGAS)/RDGAS
real(r8), parameter :: ginv = 1./GRAV
!------ parameters for eta coordinate reference surface heights --------
real(r8), parameter :: pref = 101325., tref = 288., gamma = 0.0065
!------ parameters for performance timing of code sections -----
integer, parameter :: timlev = 9
logical :: do_clock_init = .true.
integer, dimension(7) :: id
character(len=16), dimension(7) :: &
names = (/ 'comp_pres_depth ', &
'comp_pres_full ', &
'comp_pressures ', &
'comp_pres_half ', &
'comp_pres_wghts ', &
'comp_geop_hght ', &
'comp_height_btm ' /)
!-----------------------------------------------------------------------
contains
!#######################################################################
function vert_grid_init (eta, peta, verbose) result (Vgrid)
!-----------------------------------------------------------------------
!
! all arguments are input only
! ----------------------------
!
! deta_in = vertical grid spacing, an array of order one
!
real(r8), intent (in) :: eta(:)
real(r8), intent (in), optional :: peta(:)
integer, intent (in), optional :: verbose
type(vert_grid_type) :: Vgrid
!-----------------------------------------------------------------------
integer :: k, kx, lverbose
real(r8) :: rgog
real(r8), dimension(size(eta)) :: phalf, lphalf, pres
real(r8), dimension(size(eta)-1) :: lpfull
!-----------------------------------------------------------------------
lverbose = 0; if (present(verbose)) lverbose = verbose
!--------------derived vertical constants-------------------------------
kx = size(eta) - 1
allocate (Vgrid% deta(kx), Vgrid% aeta(kx), Vgrid% eta(kx+1), &
Vgrid%dpeta(kx), Vgrid%apeta(kx), Vgrid%peta(kx+1), &
Vgrid%wta (kx), Vgrid%wtb (kx), &
Vgrid% dfl(kx+1))
Vgrid % nlev = kx
!--------- set-up eta values and hybrid pressure levels ----------
!--------- note: eta(1) and eta(kx+1) have set values -----
!--------- also note: peta(kx+1) = 0.0 -----
Vgrid % eta(1) = 0.0
Vgrid % eta(kx+1) = 1.0
Vgrid % eta(2:kx) = eta(2:kx)
Vgrid % peta = 0.0
if (present(peta)) Vgrid % peta(1:kx) = peta(1:kx)
do k = 1, kx
Vgrid % deta(k) = Vgrid % eta(k+1) - Vgrid % eta(k)
Vgrid % aeta(k) = (Vgrid % eta(k+1) + Vgrid % eta(k)) * 0.5
Vgrid % dpeta(k) = Vgrid % peta(k+1) - Vgrid % peta(k)
Vgrid % apeta(k) = 0.0
Vgrid % wta (k) = 0.0
Vgrid % wtb (k) = 0.0
enddo
!----------- is this a hybrid coordinate ??? -----
Vgrid % hybrid = .false.
do k = 1, kx+1
if ( Vgrid % peta(k) > 0.0 ) then
Vgrid % hybrid = .true.
exit
endif
enddo
!----------- find lowest pure pressure level --------------
Vgrid % nplev = 0
do k = 1, kx
if ( Vgrid % deta(k) > 0.0 ) exit
Vgrid % nplev = k
enddo
! ---- need average pressure in these layers ----
Vgrid % pzero = .true.
if ( Vgrid % nplev >= 1 ) then
phalf(:) = Vgrid%peta(:) + Vgrid%eta(:)*pref
if ( phalf(1) <= epsilon(phalf) ) then
lphalf(1) = 0.0
lphalf(2:) = log(phalf(2:))
else
lphalf(:) = log(phalf(:))
Vgrid % pzero = .false.
endif
do k = 1, kx
lpfull(k) = (phalf(k+1)*lphalf(k+1) - phalf(k)*lphalf(k)) &
/ (phalf(k+1)-phalf(k)) - 1.0
Vgrid % apeta(k) = exp(lpfull(k))
Vgrid % wtb (k) = lphalf(k+1) - lpfull(k)
Vgrid % wta (k) = lpfull(k) - lphalf(k)
enddo
if (Vgrid % pzero) Vgrid % wta(1) =Vgrid % wtb(1)
endif
!----------- find the minimum allowable surface pressure ------
Vgrid % psmin = 0.0
do k = 1, kx
if ( Vgrid % deta(k) > 0.0 ) Vgrid % psmin = &
max ( Vgrid % psmin, -Vgrid % dpeta(k)/Vgrid % deta(k) )
enddo
!--- optional output of coordinate values ----
if (lverbose > 1) then
print *, ' eta=',Vgrid % eta
print *, 'deta=',Vgrid % deta
print *, 'aeta=',Vgrid % aeta
print *, ' peta=',Vgrid % peta
print *, 'dpeta=',Vgrid % dpeta
print *, 'apeta=',Vgrid % apeta
print *, 'nplev=',Vgrid % nplev
print *, 'minimum allowable surface pressure = ', Vgrid % psmin
endif
!---------- set-up eta coordinate geopotential heights -------------
rgog = RDGAS*gamma/GRAV
do k=1,kx
pres(k) = Vgrid%peta(k) + Vgrid%eta(k)*pref
Vgrid % dfl(k) = GRAV*tref*(1.0-(pres(k)/pref)**rgog)/gamma
enddo
Vgrid % dfl(kx+1) = 0.0
Vgrid % pref = pref
Vgrid % tref = tref
Vgrid % gamma = gamma
! initialize code sections for performance timing
!if (do_clock_init) then
! do k = 1, size(id)
! id(k) = mpp_clock_init ('BGRID: vert ('//trim(names(k))//')', timlev, flags=MPP_CLOCK_SYNC)
! enddo
! do_clock_init = .false.
!endif
!-----------------------------------------------------------------------
end function vert_grid_init
!#######################################################################
subroutine compute_pres_depth (Vgrid, pssl, pdepth)
type(vert_grid_type), intent(in) :: Vgrid
real(r8) , intent(in) :: pssl(:,:)
real(r8) , intent(out) :: pdepth(:,:,:)
integer :: k, kp, ke
!-----------------------------------------------------------------------
! compute the pressure weight (depth) for model layers
!-----------------------------------------------------------------------
!call mpp_clock_begin (id(1))
kp = Vgrid % nplev
ke = Vgrid % nlev
if (size(pdepth,3) /= ke) call error_mesg ( &
'compute_pres_depth in bgrid_vert_mod', &
'incorrect dimension 3 for pdepth', FATAL)
! --- check for zero/negative depth layers ---
if (Vgrid % hybrid) then
if (minval(pssl) <= Vgrid % psmin) call error_mesg &
('compute_pres_depth in bgrid_vert_mod', &
'pressure depth <= 0.0', FATAL)
endif
! --- compute depth ---
do k = 1, kp
pdepth(:,:,k) = Vgrid % dpeta(k)
enddo
do k = kp+1, ke
pdepth(:,:,k) = Vgrid % dpeta(k) + Vgrid % deta(k) * pssl(:,:)
enddo
!call mpp_clock_end (id(1))
end subroutine compute_pres_depth
!#######################################################################
subroutine compute_pres_full (Vgrid, pssl, pfull, phalf, dpde)
type(vert_grid_type), intent(in) :: Vgrid
real(r8) , intent(in) :: pssl(:,:)
real(r8) , intent(out) :: pfull(:,:,:)
real(r8), optional , intent(in) :: phalf(:,:,:), dpde(:,:,:)
real(r8), dimension(size(pfull,1),size(pfull,2),size(pfull,3)+1) :: ph
real(r8), dimension(size(pfull,1),size(pfull,2),size(pfull,3)) :: dp
integer :: k, kp, ke
!-----------------------------------------------------------------------
! compute the pressure at full model levels
!-----------------------------------------------------------------------
!call mpp_clock_begin (id(2))
kp = Vgrid % nplev
ke = Vgrid % nlev
if (size(pfull,3) /= ke) call error_mesg ( &
'compute_pres_full in bgrid_vert_mod', &
'incorrect dimension 3 for pfull', FATAL)
!--- set or compute optional arguments ---
if (present(phalf)) then
ph = phalf
else
call compute_pres_half (Vgrid, pssl, ph)
endif
if (present(dpde)) then
dp = dpde
else
call compute_pres_depth (Vgrid, pssl, dp)
endif
!--- compute p*logp at half levels ---
if ( Vgrid % pzero ) then
ph(:,:,1) = 0.0
ph(:,:,2:ke+1) = ph(:,:,2:ke+1) * log(ph(:,:,2:ke+1))
else
ph(:,:,:) = ph(:,:,:) * log(ph(:,:,:))
endif
!--- compute pressure at full levels ---
do k = 1, kp
pfull(:,:,k) = Vgrid % apeta(k)
enddo
do k = kp+1, ke
pfull(:,:,k) = exp( (ph(:,:,k+1)-ph(:,:,k))/dp(:,:,k) - 1.0 )
enddo
!call mpp_clock_end (id(2))
end subroutine compute_pres_full
!#######################################################################
subroutine compute_pressures (Vgrid, pssl, phalf, pfull, dpde, wta, wtb)
type(vert_grid_type), intent(in) :: Vgrid
real(r8) , intent(in) :: pssl(:,:)
real(r8) , intent(out) :: phalf(:,:,:), pfull(:,:,:)
real(r8), optional , intent(out) :: dpde(:,:,:)
real(r8), optional , intent(out) :: wta(:,:,:), wtb(:,:,:)
real(r8), dimension(size(pfull,1),size(pfull,2),size(pfull,3)+1) :: ph,lph
real(r8), dimension(size(pfull,1),size(pfull,2),size(pfull,3)) :: dp,lpf
integer :: k, kp, ke
!-----------------------------------------------------------------------
! compute the pressure at full model levels
!-----------------------------------------------------------------------
kp = Vgrid % nplev
ke = Vgrid % nlev
if (size(pfull,3) /= ke) call error_mesg ( &
'compute_pressures in bgrid_vert_mod', &
'incorrect dimension 3 for pfull', FATAL)
!--- set or compute optional arguments ---
call compute_pres_half (Vgrid, pssl, ph)
phalf = ph
call compute_pres_depth (Vgrid, pssl, dp)
if (present(dpde)) dpde = dp
! do not include time for previous calls
!call mpp_clock_begin (id(3))
!--- compute p*logp at half levels ---
if ( Vgrid % pzero ) then
lph(:,:,1) = 0.0
ph(:,:,1) = 0.0
lph(:,:,2:ke+1) = log(ph(:,:,2:ke+1))
ph(:,:,2:ke+1) = ph(:,:,2:ke+1) * lph(:,:,2:ke+1)
else
lph(:,:,:) = log(ph(:,:,:))
ph(:,:,:) = ph(:,:,:) * lph(:,:,:)
endif
!--- compute pressure at full levels ---
do k = 1, kp
pfull(:,:,k) = Vgrid % apeta(k)
enddo
! if (present(wta) .or. present(wtb)) then
! do k = 1, kp
! lpf(:,:,k) = log(pfull(:,:,k))
! enddo
! endif
do k = kp+1, ke
lpf(:,:,k) = (ph(:,:,k+1)-ph(:,:,k))/dp(:,:,k) - 1.0
pfull(:,:,k) = exp( lpf(:,:,k) )
enddo
!--- compute weights at full levels ---
if (present(wtb)) then
do k = 1, kp
wtb(:,:,k) = Vgrid % wtb(k)
enddo
do k = kp+1, size(wtb,3)
wtb(:,:,k) = lph(:,:,k+1) - lpf(:,:,k)
enddo
endif
if (present(wta)) then
do k = 1, kp
wta(:,:,k) = Vgrid % wta(k)
enddo
do k = kp+1, size(wta,3)
wta(:,:,k) = lpf(:,:,k) - lph(:,:,k)
enddo
if (Vgrid % pzero .and. kp == 0) wta(:,:,1) = wtb(:,:,1)
endif
!call mpp_clock_end (id(3))
end subroutine compute_pressures
!#######################################################################
subroutine compute_pres_half (Vgrid, pssl, phalf)
type(vert_grid_type), intent(in) :: Vgrid
real(r8) , intent(in) :: pssl(:,:)
real(r8) , intent(out) :: phalf(:,:,:)
integer :: k, kp, ke
!-----------------------------------------------------------------------
! compute the pressure at the interface between model layers
! (half model levels)
!-----------------------------------------------------------------------
!call mpp_clock_begin (id(4))
kp = Vgrid % nplev + 1
ke = Vgrid % nlev + 1
if (size(phalf,3) /= ke) call error_mesg ( &
'compute_pres_half in bgrid_vert_mod', &
'incorrect dimension 3 for phalf', FATAL)
do k = 1, kp
phalf(:,:,k) = Vgrid % peta(k)
enddo
do k = kp+1, ke
phalf(:,:,k) = Vgrid % peta(k) + Vgrid % eta(k) * pssl(:,:)
enddo
!call mpp_clock_end (id(4))
end subroutine compute_pres_half
!#######################################################################
subroutine compute_pres_weights ( Vgrid, phalf, pfull, wta, wtb )
type(vert_grid_type), intent(in) :: Vgrid
real(r8), intent(in) :: phalf(:,:,:), pfull(:,:,:)
real(r8), intent(out) :: wta(:,:,:), wtb(:,:,:)
real(r8), dimension(size(phalf,1),size(phalf,2),size(phalf,3)) :: logph
real(r8), dimension(size(pfull,1),size(pfull,2),size(pfull,3)) :: logpf
integer :: k, kp, kx, ks
!call mpp_clock_begin (id(5))
kp = Vgrid % nplev
kx = size(pfull,3)
if (Vgrid%pzero) then
ks = max(2,kp+1)
else
ks = kp+1
endif
logph(:,:,ks :kx+1) = log(phalf(:,:,ks :kx+1))
logpf(:,:,kp+1:kx ) = log(pfull(:,:,kp+1:kx ))
do k = 1, kp
wtb(:,:,k) = Vgrid % wtb(k)
enddo
do k = kp+1, kx
wtb(:,:,k) = logph(:,:,k+1) - logpf(:,:,k)
enddo
if (Vgrid%pzero .and. kp == 0) wta(:,:,1) = wtb(:,:,1)
do k = 1, kp
wta(:,:,k) = Vgrid % wta(k)
enddo
do k = ks, kx
wta(:,:,k) = logpf(:,:,k) - logph(:,:,k)
enddo
!call mpp_clock_end (id(5))
end subroutine compute_pres_weights
!#######################################################################
subroutine compute_geop_height (Vgrid, fssl, vtemp, wta, wtb, &
zfull, zhalf, mask)
type(vert_grid_type), intent(in) :: Vgrid
real(r8), intent(in), dimension(:,:) :: fssl
real(r8), intent(in), dimension(:,:,:) :: vtemp, wta, wtb
real(r8), intent(out) :: zfull(:,:,:)
real(r8), intent(out), optional :: zhalf(:,:,:)
real(r8), intent(in), optional :: mask(:,:,:)
integer :: k, klev
real(r8), dimension(size(vtemp,1),size(vtemp,2)) :: zb, zt, rt
!-----------------------------------------------------------------------
! compute the height (in meters) at the interface between
! model layers (half model levels)
!-----------------------------------------------------------------------
!call mpp_clock_begin (id(6))
klev = Vgrid % nlev
if (size(zfull,3) /= klev) call error_mesg ( &
'compute_geop_height in bgrid_vert_mod', &
'incorrect dimension 3 for zfull', FATAL)
if (present(zhalf)) then
if (size(zhalf,3) /= klev+1) call error_mesg ( &
'compute_geop_height in bgrid_vert_mod', &
'incorrect dimension 3 for zhalf', FATAL)
endif
zb(:,:) = fssl(:,:)
if (present(zhalf)) zhalf(:,:,klev+1) = zb(:,:)
!------- vertical integration loop (bottom to top) ----------
do k = klev, 1, -1
rt(:,:) = RDGAS * vtemp(:,:,k)
zt(:,:) = zb(:,:) + rt(:,:) * (wta(:,:,k)+wtb(:,:,k))
zfull(:,:,k) = zb(:,:) + rt(:,:) * wtb(:,:,k)
if (present(mask)) then
where (mask(:,:,k) < 0.5)
zt(:,:) = Vgrid % dfl(k)
zfull(:,:,k) = 0.5*(Vgrid % dfl(k)+Vgrid % dfl(k+1))
endwhere
endif
zb(:,:) = zt(:,:)
if (present(zhalf)) zhalf(:,:,k) = zb(:,:)
enddo
!call mpp_clock_end (id(6))
end subroutine compute_geop_height
!#######################################################################
subroutine compute_height (Vgrid, fssl, temp, sphum, pfull, phalf, &
zfull, zhalf, mask)
type(vert_grid_type), intent(in) :: Vgrid
real(r8), intent(in), dimension(:,:) :: fssl
real(r8), intent(in), dimension(:,:,:) :: temp, sphum, pfull, phalf
real(r8), intent(out), dimension(:,:,:) :: zfull, zhalf
real(r8), intent(in), optional :: mask(:,:,:)
real(r8), dimension(size(temp,1),size(temp,2),size(temp,3)) :: &
wta, wtb, vtemp
!-----------------------------------------------------------------------
! compute the height (in meters) at the interface between
! model layers (half model levels)
! assumes that specific humidity will be used to compute
! the virtual temperature
!-----------------------------------------------------------------------
call compute_pres_weights ( Vgrid, phalf, pfull, wta, wtb )
vtemp = temp * (1.0+d608*sphum)
if (present(mask)) then
call compute_geop_height ( Vgrid, fssl, vtemp, wta, wtb, &
zfull, zhalf, mask )
else
call compute_geop_height ( Vgrid, fssl, vtemp, wta, wtb, &
zfull, zhalf )
endif
zfull = zfull * ginv
zhalf = zhalf * ginv
end subroutine compute_height
!#######################################################################
subroutine compute_height_bottom ( Vgrid, pssl, tbot, qbot, &
zbot, pbot, kbot )
type(vert_grid_type), intent(in) :: Vgrid
real(r8), intent(in), dimension(:,:) :: pssl, tbot, qbot
real(r8), intent(out), dimension(:,:) :: zbot, pbot
integer, intent(in), optional :: kbot(:,:)
real(r8), dimension(size(pssl,1),size(pssl,2)) :: rt, dp, phb, pht, &
lphb, lpht, lpf
integer :: i, j, kb
! ----- pressure at top and bottom interface of bottom level -----
!call mpp_clock_begin (id(7))
if (present(kbot)) then
do j = 1, size(pssl,2)
do i = 1, size(pssl,1)
kb = kbot(i,j)
pht(i,j) = Vgrid%peta(kb ) + Vgrid%eta(kb )*pssl(i,j)
phb(i,j) = Vgrid%peta(kb+1) + Vgrid%eta(kb+1)*pssl(i,j)
enddo
enddo
else
kb = Vgrid%nlev
pht(:,:) = Vgrid%peta(kb+1) + Vgrid%eta(kb+1)*pssl(:,:)
phb(:,:) = Vgrid%peta(kb ) + Vgrid%eta(kb )*pssl(:,:)
endif
rt = ginv*RDGAS * (tbot * (1.+d608*qbot))
dp = phb - pht
lphb = log(phb)
lpht = log(pht)
lpf = (phb*lphb-pht*lpht)/dp -1
zbot = rt * (lphb-lpf)
pbot = exp(lpf)
!call mpp_clock_end (id(7))
end subroutine compute_height_bottom
!#######################################################################
end module bgrid_vert_mod
|
{"hexsha": "7cc38fdf7069b1879822c3c5e6226bdba3d1dff0", "size": 22598, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "models/bgrid_solo/fms_src/atmos_bgrid/tools/bgrid_vert.f90", "max_stars_repo_name": "hkershaw-brown/feature-preprocess", "max_stars_repo_head_hexsha": "fe2bd77b38c63fa0566c83ebc4d2fac1623aef66", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 65, "max_stars_repo_stars_event_min_datetime": "2019-10-16T13:31:06.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-14T11:52:58.000Z", "max_issues_repo_path": "models/bgrid_solo/fms_src/atmos_bgrid/tools/bgrid_vert.f90", "max_issues_repo_name": "hkershaw-brown/feature-preprocess", "max_issues_repo_head_hexsha": "fe2bd77b38c63fa0566c83ebc4d2fac1623aef66", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 283, "max_issues_repo_issues_event_min_datetime": "2019-09-23T15:48:34.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T21:44:41.000Z", "max_forks_repo_path": "models/bgrid_solo/fms_src/atmos_bgrid/tools/bgrid_vert.f90", "max_forks_repo_name": "hkershaw-brown/feature-preprocess", "max_forks_repo_head_hexsha": "fe2bd77b38c63fa0566c83ebc4d2fac1623aef66", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 67, "max_forks_repo_forks_event_min_datetime": "2019-09-19T22:13:24.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-20T15:58:26.000Z", "avg_line_length": 32.5619596542, "max_line_length": 98, "alphanum_fraction": 0.4591556775, "num_tokens": 6243}
|
#=
Provides calc_model_rv(theta, time)
Computes the velocity of the star due to the perturbations of multiple planets, as the linear superposition of the Keplerian orbit induced by each planet, i.e., neglecting mutual planet-planet interactions
=#
include("kepler_eqn.jl") # Code to solve Kepler's equation provides calc_ecc_anom(mean_anom, ecc; tol = 1.e-8)
# Calculate Keplerian velocity of star due to one planet (with parameters displaced by offset)
function calc_rv_pal_one_planet{T}( theta::Array{T,1}, time::Float64; plid::Integer = 1, tol::Real = 1.e-8 )
#= P = extract_period(theta,offset=offset)
K = extract_amplitude(theta,offset=offset)
h = extract_ecosw(theta,offset=offset) # TODO: check h & k def)
k = extract_esinw(theta,offset=offset)
M0 = extract_mean_anomaly_at_t0(theta,offset=offset) =#
(P,K,h,k,M0) = extract_PKhkM(theta,plid=plid)
ecc = sqrt(h*h+k*k)
w = atan2(k,h)
n = 2pi/P
#M = mod2pi(time*n-M0)
M = time*n-M0
lambda = w+M
#E = ecc_anom_bessel_series_approx(M,ecc) # WARNING: This would calling approximate version
#E = ecc_anom_itterative_laguerre(M,ecc,tol=tol) # WARNING: hardwired particular algorithm
E = calc_ecc_anom(M,ecc,tol=tol)
c = cos(lambda+ecc*sin(E))
s = sin(lambda+ecc*sin(E))
if ecc >= 1.0
println("# ERROR in calc_rv_pal_one_planet: ecc>=1.0: ",theta)
end
@assert(0.0<=ecc<1.0)
j = sqrt((1.0-ecc)*(1.0+ecc))
#p, q = (ecc == 0.0) ? fill((0.0, 0.0), length(time)) : (ecc.*sin(E), ecc.*cos(E))
p, q = (ecc == 0.0) ? (zero(T), zero(T)) : (ecc*sin(E), ecc*cos(E))
a = K/(n/sqrt((1.0-ecc)*(1.0+ecc)))
zdot = a*n/(1.0-q)*( cos(lambda+p)-k*q/(1.0+j) )
end
# Calculate Keplerian velocity of star due to num_pl planets
function calc_rv_pal_multi_planet{T}( theta::Array{T,1}, time::Float64; tol::Real = 1.e-8)
zdot = zero(T)
for plid in 1:num_planets(theta)
zdot += calc_rv_pal_one_planet(theta,time,plid=plid,tol=tol)
end
return zdot
end
# Assumes model parameters = [ Period_1, K_1, k_1, h_1, M0_1, Period_2, K_2, k_2, h_2, M0_2, ... C ]
function calc_model_rv{T}( theta::Array{T,1}, t::Float64; obsid::Integer = 1, tol::Real = 1.e-8)
Offset = extract_rvoffset(theta, obsid=obsid)
calc_rv_pal_multi_planet(theta, t, tol=tol) + Offset
end
export isvalid, calc_model_rv
|
{"hexsha": "94565050f8750f6e9b7f58a649aaa044ea8f09d2", "size": 2359, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "examples/radial_velocity/src/rv_model_keplerian.jl", "max_stars_repo_name": "scidom/PGUManifoldMC.jl", "max_stars_repo_head_hexsha": "766cf983b122678d47524c566ebd6fffa7f804d8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-08-08T12:59:00.000Z", "max_stars_repo_stars_event_max_datetime": "2018-08-08T12:59:00.000Z", "max_issues_repo_path": "examples/radial_velocity/analysis/src/rv_model_keplerian.jl", "max_issues_repo_name": "eford/PGUManifoldMC.jl", "max_issues_repo_head_hexsha": "0a75d899f580a79282eb742f17a207b14963ea79", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2017-04-13T03:04:56.000Z", "max_issues_repo_issues_event_max_datetime": "2017-04-13T03:05:48.000Z", "max_forks_repo_path": "examples/radial_velocity/analysis/src/rv_model_keplerian.jl", "max_forks_repo_name": "eford/PGUManifoldMC.jl", "max_forks_repo_head_hexsha": "0a75d899f580a79282eb742f17a207b14963ea79", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-08-21T18:03:01.000Z", "max_forks_repo_forks_event_max_datetime": "2018-08-21T18:03:01.000Z", "avg_line_length": 42.125, "max_line_length": 209, "alphanum_fraction": 0.6655362442, "num_tokens": 820}
|
[STATEMENT]
lemma convex_rel_interior:
fixes S :: "'n::euclidean_space set"
assumes "convex S"
shows "convex (rel_interior S)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. convex (rel_interior S)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. convex (rel_interior S)
[PROOF STEP]
{
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. convex (rel_interior S)
[PROOF STEP]
fix x y and u :: real
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. convex (rel_interior S)
[PROOF STEP]
assume assm: "x \<in> rel_interior S" "y \<in> rel_interior S" "0 \<le> u" "u \<le> 1"
[PROOF STATE]
proof (state)
this:
x \<in> rel_interior S
y \<in> rel_interior S
0 \<le> u
u \<le> 1
goal (1 subgoal):
1. convex (rel_interior S)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
x \<in> rel_interior S
y \<in> rel_interior S
0 \<le> u
u \<le> 1
[PROOF STEP]
have "x \<in> S"
[PROOF STATE]
proof (prove)
using this:
x \<in> rel_interior S
y \<in> rel_interior S
0 \<le> u
u \<le> 1
goal (1 subgoal):
1. x \<in> S
[PROOF STEP]
using rel_interior_subset
[PROOF STATE]
proof (prove)
using this:
x \<in> rel_interior S
y \<in> rel_interior S
0 \<le> u
u \<le> 1
rel_interior ?S \<subseteq> ?S
goal (1 subgoal):
1. x \<in> S
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
x \<in> S
goal (1 subgoal):
1. convex (rel_interior S)
[PROOF STEP]
have "x - u *\<^sub>R (x-y) \<in> rel_interior S"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. x - u *\<^sub>R (x - y) \<in> rel_interior S
[PROOF STEP]
proof (cases "0 = u")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. 0 = u \<Longrightarrow> x - u *\<^sub>R (x - y) \<in> rel_interior S
2. 0 \<noteq> u \<Longrightarrow> x - u *\<^sub>R (x - y) \<in> rel_interior S
[PROOF STEP]
case False
[PROOF STATE]
proof (state)
this:
0 \<noteq> u
goal (2 subgoals):
1. 0 = u \<Longrightarrow> x - u *\<^sub>R (x - y) \<in> rel_interior S
2. 0 \<noteq> u \<Longrightarrow> x - u *\<^sub>R (x - y) \<in> rel_interior S
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
0 \<noteq> u
[PROOF STEP]
have "0 < u"
[PROOF STATE]
proof (prove)
using this:
0 \<noteq> u
goal (1 subgoal):
1. 0 < u
[PROOF STEP]
using assm
[PROOF STATE]
proof (prove)
using this:
0 \<noteq> u
x \<in> rel_interior S
y \<in> rel_interior S
0 \<le> u
u \<le> 1
goal (1 subgoal):
1. 0 < u
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
0 < u
goal (2 subgoals):
1. 0 = u \<Longrightarrow> x - u *\<^sub>R (x - y) \<in> rel_interior S
2. 0 \<noteq> u \<Longrightarrow> x - u *\<^sub>R (x - y) \<in> rel_interior S
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
0 < u
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
0 < u
goal (1 subgoal):
1. x - u *\<^sub>R (x - y) \<in> rel_interior S
[PROOF STEP]
using assm rel_interior_convex_shrink[of S y x u] assms \<open>x \<in> S\<close>
[PROOF STATE]
proof (prove)
using this:
0 < u
x \<in> rel_interior S
y \<in> rel_interior S
0 \<le> u
u \<le> 1
\<lbrakk>convex S; y \<in> rel_interior S; x \<in> S; 0 < u; u \<le> 1\<rbrakk> \<Longrightarrow> x - u *\<^sub>R (x - y) \<in> rel_interior S
convex S
x \<in> S
goal (1 subgoal):
1. x - u *\<^sub>R (x - y) \<in> rel_interior S
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
x - u *\<^sub>R (x - y) \<in> rel_interior S
goal (1 subgoal):
1. 0 = u \<Longrightarrow> x - u *\<^sub>R (x - y) \<in> rel_interior S
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. 0 = u \<Longrightarrow> x - u *\<^sub>R (x - y) \<in> rel_interior S
[PROOF STEP]
case True
[PROOF STATE]
proof (state)
this:
0 = u
goal (1 subgoal):
1. 0 = u \<Longrightarrow> x - u *\<^sub>R (x - y) \<in> rel_interior S
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
0 = u
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
0 = u
goal (1 subgoal):
1. x - u *\<^sub>R (x - y) \<in> rel_interior S
[PROOF STEP]
using assm
[PROOF STATE]
proof (prove)
using this:
0 = u
x \<in> rel_interior S
y \<in> rel_interior S
0 \<le> u
u \<le> 1
goal (1 subgoal):
1. x - u *\<^sub>R (x - y) \<in> rel_interior S
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
x - u *\<^sub>R (x - y) \<in> rel_interior S
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
x - u *\<^sub>R (x - y) \<in> rel_interior S
goal (1 subgoal):
1. convex (rel_interior S)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
x - u *\<^sub>R (x - y) \<in> rel_interior S
[PROOF STEP]
have "(1 - u) *\<^sub>R x + u *\<^sub>R y \<in> rel_interior S"
[PROOF STATE]
proof (prove)
using this:
x - u *\<^sub>R (x - y) \<in> rel_interior S
goal (1 subgoal):
1. (1 - u) *\<^sub>R x + u *\<^sub>R y \<in> rel_interior S
[PROOF STEP]
by (simp add: algebra_simps)
[PROOF STATE]
proof (state)
this:
(1 - u) *\<^sub>R x + u *\<^sub>R y \<in> rel_interior S
goal (1 subgoal):
1. convex (rel_interior S)
[PROOF STEP]
}
[PROOF STATE]
proof (state)
this:
\<lbrakk>?x2 \<in> rel_interior S; ?y2 \<in> rel_interior S; 0 \<le> ?u2; ?u2 \<le> 1\<rbrakk> \<Longrightarrow> (1 - ?u2) *\<^sub>R ?x2 + ?u2 *\<^sub>R ?y2 \<in> rel_interior S
goal (1 subgoal):
1. convex (rel_interior S)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<lbrakk>?x2 \<in> rel_interior S; ?y2 \<in> rel_interior S; 0 \<le> ?u2; ?u2 \<le> 1\<rbrakk> \<Longrightarrow> (1 - ?u2) *\<^sub>R ?x2 + ?u2 *\<^sub>R ?y2 \<in> rel_interior S
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>?x2 \<in> rel_interior S; ?y2 \<in> rel_interior S; 0 \<le> ?u2; ?u2 \<le> 1\<rbrakk> \<Longrightarrow> (1 - ?u2) *\<^sub>R ?x2 + ?u2 *\<^sub>R ?y2 \<in> rel_interior S
goal (1 subgoal):
1. convex (rel_interior S)
[PROOF STEP]
unfolding convex_alt
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>?x2 \<in> rel_interior S; ?y2 \<in> rel_interior S; 0 \<le> ?u2; ?u2 \<le> 1\<rbrakk> \<Longrightarrow> (1 - ?u2) *\<^sub>R ?x2 + ?u2 *\<^sub>R ?y2 \<in> rel_interior S
goal (1 subgoal):
1. \<forall>x\<in>rel_interior S. \<forall>y\<in>rel_interior S. \<forall>u. 0 \<le> u \<and> u \<le> 1 \<longrightarrow> (1 - u) *\<^sub>R x + u *\<^sub>R y \<in> rel_interior S
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
convex (rel_interior S)
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 2940, "file": null, "length": 35}
|
function gpu_energy!(
pos::AbstractArray,
forces::AbstractArray,
N::Integer,
L::Real,
rc::Real,
a::Real,
b::Real,
λ::Integer,
temp::Real,
full_ener::AbstractArray,
vir::AbstractArray
)
total_energy = 0.0f0
virial = 0.0f0
force = 0.0f0
ener = 0.0f0
index = (blockIdx().x - 1) * blockDim().x + threadIdx().x
stride = blockDim().x * gridDim().x
@inbounds for i = index:stride:N
# Reset variables
total_energy = 0.0f0
virial = 0.0f0
for j = 1:N
if i == j
continue
end
xij = pos[1, i] - pos[1, j]
yij = pos[2, i] - pos[2, j]
zij = pos[3, i] - pos[3, j]
# Periodic boundaries
xij -= L * round(xij / L)
yij -= L * round(yij / L)
zij -= L * round(zij / L)
# Compute distance
Δpos = xij * xij + yij * yij + zij * zij
Δpos = CUDA.sqrt(Δpos)
if Δpos < rc
if Δpos < b
# * Energy computation
ener = CUDA.pow(1.0f0 / Δpos, λ) - CUDA.pow(1.0f0 / Δpos, λ - 1.0f0)
ener *= a / temp
ener += 1.0f0 / temp
# * Force computation
force = λ * CUDA.pow(1.0f0 / Δpos, λ + 1.0f0)
force -= (λ - 1.0f0) * CUDA.pow(1.0f0 / Δpos, λ)
force *= a / temp
else
force = 0.0f0
ener = 0.0f0
end
# * Update the energy
total_energy += ener
# * Compute forces
fx = force * xij / Δpos
fy = force * yij / Δpos
fz = force * zij / Δpos
# * Update the forces
forces[1, i] += fx
forces[2, i] += fy
forces[3, i] += fz
forces[1, j] -= fx
forces[2, j] -= fy
forces[3, j] -= fz
# * Compute the virial, avoid double counting
virial += 0.5f0 * (fx * xij + fy * yij + fz * zij)
end
end
# * Save the values in their respective arrays
full_ener[i] = total_energy
vir[i] = virial
end
return nothing
end
function gpu_ermak!(
positions::AbstractArray,
forces::AbstractArray,
N::Integer,
L::Real,
τ::Real,
rnd_matrix::AbstractArray,
pbc::Bool
)
index = (blockIdx().x - 1) * blockDim().x + threadIdx().x
stride = blockDim().x * gridDim().x
for j = index:stride:N
positions[1, j] += (forces[1, j] * τ) + rnd_matrix[1, j]
positions[2, j] += (forces[2, j] * τ) + rnd_matrix[2, j]
positions[3, j] += (forces[3, j] * τ) + rnd_matrix[3, j]
if pbc
positions[1, j] -= L * round(positions[1, j] / L)
positions[2, j] -= L * round(positions[2, j] / L)
positions[3, j] -= L * round(positions[3, j] / L)
end
end
return nothing
end
|
{"hexsha": "63337d88aa2083728284477cecea3a4e5955be4c", "size": 3134, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/kernels.jl", "max_stars_repo_name": "edwinb-ai/Cubed.jl", "max_stars_repo_head_hexsha": "c93becfd252fc11e1a3ece0c054966fd2e6010ca", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-10-14T07:12:51.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-14T07:12:51.000Z", "max_issues_repo_path": "src/kernels.jl", "max_issues_repo_name": "edwinb-ai/Cubed.jl", "max_issues_repo_head_hexsha": "c93becfd252fc11e1a3ece0c054966fd2e6010ca", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/kernels.jl", "max_forks_repo_name": "edwinb-ai/Cubed.jl", "max_forks_repo_head_hexsha": "c93becfd252fc11e1a3ece0c054966fd2e6010ca", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.3361344538, "max_line_length": 88, "alphanum_fraction": 0.4365028717, "num_tokens": 945}
|
import pandas as pd
import numpy as np
import argparse
import rdkit
from rdkit.Chem import AllChem
from rdkit import Chem, DataStructs
from joblib import Parallel, delayed
from tqdm import tqdm
rdkit.RDLogger.DisableLog('rdApp.*')
from dglt.contrib.moses.moses.utils import valid_smiles
parser = argparse.ArgumentParser("Compute the similarity")
parser.add_argument('--test_load',
type=str,
help='The test smiles file to be compared')
parser.add_argument('--recon_load',
type=str, default=None, required=True,
help='The generated smiles file')
parser.add_argument('--ref_load',
required=True, type=str, nargs='+', metavar='FILE',
help='The reference smiles files for generated smiles file. '
'It usually includes the train smiles and valid smiles')
parser.add_argument('--result_save',
type=str, default=None,
help='The path to save the results. '
'Default is recon filename+_sorted.csv')
parser.add_argument('--gen_ref',
type=str, default=None, nargs='?', const='-', metavar='FILE',
help='The smiles file that generated from. '
'If no FILE is provided, the last ref_load will be used.')
parser.add_argument('--n_workers',
type=int, default=-1,
help='Number of workers')
opts = parser.parse_args()
if opts.test_load is not None:
smiles_data = pd.read_csv(opts.test_load, usecols=['SMILES']).values
else:
smiles_data = None
recon_data = pd.read_csv(opts.recon_load)
if 'Similarity_Train' in recon_data:
train_res = recon_data['Similarity_Train'].values.flatten()
recon_data.drop(columns='Similarity_Train', inplace=True)
else:
train_res = None
recon_data = recon_data.values
if opts.gen_ref is None:
recon_data = np.array([list(set(recon_data.flatten().tolist()))])
ref_data = []
for f in opts.ref_load:
ref_data.append(pd.read_csv(f, usecols=['SMILES']).values)
train_data = np.vstack(ref_data)
if opts.gen_ref is None:
valid_data = None
elif opts.gen_ref == '-':
valid_data = pd.read_csv(opts.ref_load[-1], usecols=['SMILES']).values
else:
valid_data = pd.read_csv(opts.gen_ref, usecols=['SMILES']).values
save_path = opts.result_save
if save_path is None:
save_path = opts.recon_load.strip(".csv") + "_sorted.csv"
def get_test_train(sm1):
r = []
r_id = []
fp1 = AllChem.GetMorganFingerprint(Chem.MolFromSmiles(sm1), 2)
for i, sm1 in enumerate(recon_data):
for sm2 in sm1:
if not isinstance(sm2, float) and Chem.MolFromSmiles(sm2) is not None:
fp2 = AllChem.GetMorganFingerprint(Chem.MolFromSmiles(sm2), 2)
r.append(DataStructs.TanimotoSimilarity(fp1, fp2))
r_id.append(i)
else:
r.append(0.0)
r_id.append(0)
return r, r_id
if smiles_data is not None:
res = Parallel(n_jobs=opts.n_workers)(delayed(get_test_train)(_) for _ in tqdm(
smiles_data[:, 0],
desc='Computing similarity for test data'
))
res, res_id = list(zip(*res))
else:
res, res_id = None, None
def get_train_sim(sm1):
if not isinstance(sm1, float) and Chem.MolFromSmiles(sm1) is not None:
fp1 = AllChem.GetMorganFingerprint(Chem.MolFromSmiles(sm1), 2)
sim_train = [DataStructs.TanimotoSimilarity(
fp1,
AllChem.GetMorganFingerprint(
Chem.MolFromSmiles(_),
2
)) if Chem.MolFromSmiles(_) is not None else 0.0 for _ in train_data[:, 0]]
return np.max(sim_train)
return 0.0
if train_res is None:
train_data = np.array([[_] for _ in train_data[:,0]if valid_smiles(_) is not None])
train_res = Parallel(n_jobs=opts.n_workers)(delayed(get_train_sim)(_) for _ in tqdm(
recon_data.flatten(),
desc='Computing similarity for train data'
))
train_res_arr = np.array(train_res)
if res is not None:
res_arr = np.array(res)
res_id_arr = np.array(res_id)
sim = np.max(res_arr, axis=0).flatten()
sim_id = np.argmax(res_arr, axis=0)
sim_test = smiles_data[sim_id].flatten()
res_arr_ex = res_arr.copy()
res_arr_ex[:, train_res_arr == 1] = 0
sim_test_valid = np.max(res_arr_ex, axis=1).flatten()
sim_test_valid_id = np.argmax(res_arr_ex, axis=1)
thres = [_ / 10.0 for _ in range(10)]
print("|" + '|'.join(map(str,thres)) + "|")
print("|" + '|'.join(map(lambda s:str(np.sum(sim > s)), thres)) + "|")
print("|" + '|'.join(map(lambda s:str(np.sum((sim > s) & (train_res_arr != 1))), thres)) + "|")
print("object_value=%0.5f" % (np.max(sim[train_res_arr != 1])))
results = []
results_cols = []
if res is not None:
results.append(sim_test)
results_cols.append('Test_SMILES')
results.append(recon_data.flatten())
results_cols.append('Recon_SMILES')
if res is not None:
results.append(sim)
results_cols.append('Similarity')
results.append(train_res)
results_cols.append('Similarity_Train')
if valid_data is not None and res is not None:
sim_valid = valid_data[res_id_arr[sim_id,range(sim_id.shape[0])]].flatten()
results.append(sim_valid)
results_cols.append('Orig_SMILES')
output_data = pd.DataFrame(np.stack(results).T, columns=results_cols)
output_data.sort_values('Similarity', ascending=False)\
.to_csv(save_path)
results = []
results_cols = []
if res is not None:
results.append(smiles_data.flatten())
results_cols.append('Test_SMILES')
recon_data = recon_data.flatten()
sim_test_smiles = recon_data[sim_test_valid_id]
results.append(sim_test_smiles)
results_cols.append('Recon_SMILES')
results.append(sim_test_valid)
results_cols.append('Similarity')
sim_train = train_res_arr[sim_test_valid_id]
results.append(sim_train)
results_cols.append('Similarity_Train')
output_data = pd.DataFrame(np.stack(results).T, columns=results_cols)
output_data.sort_values('Similarity', ascending=False)\
.to_csv(save_path.strip('.csv') + '_test.csv')
|
{"hexsha": "9bc800048e5abe79ce6e934e47c54ac2813fb76c", "size": 6184, "ext": "py", "lang": "Python", "max_stars_repo_path": "dglt/contrib/moses/scripts/similarity.py", "max_stars_repo_name": "uta-smile/CD-MVGNN", "max_stars_repo_head_hexsha": "b48f4cd14befed298980a83edb417ab6809f0af6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2022-02-06T09:13:51.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-19T15:03:35.000Z", "max_issues_repo_path": "dglt/contrib/moses/scripts/similarity.py", "max_issues_repo_name": "uta-smile/CD-MVGNN", "max_issues_repo_head_hexsha": "b48f4cd14befed298980a83edb417ab6809f0af6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-02-14T23:16:27.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-14T23:16:27.000Z", "max_forks_repo_path": "dglt/contrib/moses/scripts/similarity.py", "max_forks_repo_name": "uta-smile/CD-MVGNN", "max_forks_repo_head_hexsha": "b48f4cd14befed298980a83edb417ab6809f0af6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.1637426901, "max_line_length": 99, "alphanum_fraction": 0.6581500647, "include": true, "reason": "import numpy", "num_tokens": 1549}
|
# Copyright 2021 The ParallelAccel Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import linear_algebra
from linear_algebra.study import ParamResolver
import jax.numpy as jnp
import jax.lax as lax
import numpy as np
import sympy
import graph_helper_tool as tn
from asic_la.sharded_probability_function import invert_permutation
def build_random_acyclic_graph(
Nparams,
Nexponents,
depth,
N,
two_param_building_blocks=False,
subdomain=None,
seed=10,
):
"""
Build a random acyclic_graph on `N` discretes of depth `depth`
variabled on `Nparams` symbols and `Nexponents` floating
point numbers.
Args:
Nparams: The number of sympy parameters in the acyclic_graph.
Nexponents: The number of non-parametric exponents to be used
to exponentiate building_blocks.
depth: Graph depth.
N: number of discretes
to_param_building_blocks: If `True` only use building_blocks that can be parametrized
by two parameters.
subdomain: The discrete domain on which the building_blocks should act.
seed: The seed for the random initialization of the acyclic_graph.
Same seeds produce the same acyclic_graph.
Returns:
linear_algebra.Graph: The acyclic_graph
List[linear_algebra.LinearSpace]: The discretes.
linear_algebra.ParamResolver: The parameter resolver.
"""
def f1(symbol):
return symbol / sympy.pi
def f2(symbol):
return symbol * sympy.pi
def f3(symbol):
return sympy.pi * symbol
def f4(symbol):
return symbol
funs = [f1, f2, f3, f4]
np.random.seed(seed)
names = [f"param_{n}" for n in range(Nparams)]
symbols = [sympy.Symbol(name) for name in names]
exponents = symbols + [np.random.rand(1)[0] * 10 for _ in range(Nexponents)]
resolver = ParamResolver(
{name: np.random.rand(1)[0] * 10 for name in names}
)
building_blocks = [
linear_algebra.flip_x_axis_angle,
linear_algebra.flip_x_axis_angle_square,
linear_algebra.flip_y_axis_angle,
linear_algebra.flip_y_axis_angle_square,
linear_algebra.flip_z_axis_angle,
linear_algebra.flip_z_axis_angle_square,
linear_algebra.flip_pi_over_4_axis_angle,
linear_algebra.cond_rotate_z,
linear_algebra.cond_rotate_x,
linear_algebra.cond_x_angle,
linear_algebra.swap_angle,
linear_algebra.imaginary_swap_angle,
linear_algebra.x_axis_two_angles,
linear_algebra.imaginary_swap_two_angles,
linear_algebra.rotate_on_xy_plane,
linear_algebra.EmptyBuildingBlock,
linear_algebra.flip_x_axis,
linear_algebra.flip_z_axis,
linear_algebra.flip_y_axis,
linear_algebra.flip_pi_over_4_axis,
linear_algebra.rotate_x_axis,
linear_algebra.rotate_y_axis,
linear_algebra.rotate_z_axis,
]
nq = [1, 2, 1, 2, 1, 2, 1, 2, 2, 2, 2, 2, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1]
num_discretes = dict(zip(building_blocks, nq))
num_params = dict(zip(building_blocks, [1] * 12 + [2, 2, 2, 0, 1, 1, 1, 1, 1, 1, 1]))
if subdomain is not None:
r = np.array(list(set(subdomain))).astype(np.int64)
else:
r = np.arange(N)
discretes = linear_algebra.LinearSpace.range(N)
acyclic_graph = linear_algebra.Graph()
d = 0
while d < depth:
building_block = np.random.choice(building_blocks, 1)[0]
numq = num_discretes[building_block]
nparams = num_params[building_block]
if two_param_building_blocks:
if nparams < 2:
continue
d += 1
if Nparams > 0:
fs = np.random.choice(funs, nparams)
else:
fs = [lambda x: x] * nparams
ps = np.random.choice(r, numq, replace=False)
symbs = np.random.choice(exponents, nparams, replace=True)
if building_block is linear_algebra.rotate_on_xy_plane:
g = building_block(theta=fs[0](symbs[0]), phi=fs[1](symbs[1]))
acyclic_graph += [g(discretes[ps[0]], discretes[ps[1]])]
elif building_block is linear_algebra.imaginary_swap_two_angles:
g = building_block(phase_exponent=fs[0](symbs[0]), exponent=fs[1](symbs[1]))
acyclic_graph += [g(discretes[ps[0]], discretes[ps[1]])]
elif building_block is linear_algebra.x_axis_two_angles:
g = building_block(phase_exponent=fs[0](symbs[0]), exponent=fs[1](symbs[1]))
acyclic_graph += [g(discretes[ps[0]])]
elif (
building_block is linear_algebra.flip_x_axis or building_block is linear_algebra.flip_y_axis or building_block is linear_algebra.flip_z_axis or building_block is linear_algebra.flip_pi_over_4_axis
):
acyclic_graph += [building_block(discretes[ps[0]]) ** fs[0](symbs[0])]
elif building_block is linear_algebra.rotate_x_axis or building_block is linear_algebra.rotate_y_axis or building_block is linear_algebra.rotate_z_axis:
g = building_block(fs[0](symbs[0]))
acyclic_graph += [g(discretes[ps[0]])]
else:
if nparams == 0:
g = building_block(2)
acyclic_graph += [g(discretes[ps[0]], discretes[ps[1]])]
else:
g = building_block(exponent=fs[0](symbs[0]))
if numq == 1:
acyclic_graph += [g(discretes[ps[0]])]
elif numq == 2:
g = building_block(exponent=fs[0](symbs[0]))
acyclic_graph += [g(discretes[ps[0]], discretes[ps[1]])]
return acyclic_graph, discretes, resolver
def full_matrix(building_block, inds, N):
"""
Extend `building_block` acting on discretes indices `inds`
to an `N`-discrete building_block in natural discrete ordering (small
to large).
"""
if len(inds) == 1:
return np.kron(
np.kron(np.eye(2 ** (inds[0])), building_block),
np.eye(2 ** (N - 1 - inds[0])),
)
if len(inds) == 2:
indsort = np.argsort(inds)
inds = np.asarray(inds)[indsort]
perm = list(indsort) + list(2 + indsort)
G = tn.Node(building_block.reshape(2, 2, 2, 2).transpose(perm))
Ids = [tn.Node(np.eye(2)) for n in range(N - 2)]
order = []
for n in range(inds[0]):
order.append(Ids[n][0])
order.append(G[0])
for n in range(inds[0] + 1, inds[1]):
order.append(Ids[n - 1][0])
order.append(G[1])
for n in range(inds[1] + 1, N):
order.append(Ids[n - 2][0])
for n in range(inds[0]):
order.append(Ids[n][1])
order.append(G[2])
for n in range(inds[0] + 1, inds[1]):
order.append(Ids[n - 1][1])
order.append(G[3])
for n in range(inds[1] + 1, N):
order.append(Ids[n - 2][1])
if len(Ids) > 1:
I = tn.outer_product(Ids[0], Ids[1])
for i in Ids[2:]:
I = tn.outer_product(I, i)
final = tn.outer_product(I, G)
else:
final = G
return final.reorder_edges(order).tensor.reshape((2 ** N, 2 ** N))
raise ValueError()
def get_full_matrix(acyclic_graph, discretes):
"""
Get the full unitary matrix of a linear_algebra.Graph `acyclic_graph`
acting on linear_algebra-discretes `discretes`.
"""
N = len(discretes)
mat = np.eye(2 ** N)
for op in acyclic_graph.all_operations():
inds = [discretes.index(discrete) for discrete in op.discretes]
building_block = linear_algebra.unitary(op)
mat = full_matrix(building_block, inds, N) @ mat
return mat
def dot(state, state_labels, matrix, matrix_labels):
axes = [state_labels.index(l) for l in matrix_labels]
shape = (2,) * (2 * len(axes))
result = np.tensordot(
state,
matrix.reshape(shape),
(axes, tuple(range(len(axes), 2 * len(axes)))),
)
new_labels = (
tuple([l for l in state_labels if l not in matrix_labels])
+ matrix_labels
)
return result, new_labels
def apply_supermatrices(state, state_labels, supermatrices, supermatrix_labels):
"""
Contract `supermatrices` with `state` along the labels given by
`state_labels` and `supermatrix_labels`.
Args:
state: A (2,)*num_discrete shaped array.
state_labels: A tuple of unique ints labelling each tensor legs
(i.e. the discrete labels for each tensor leg)
l supermatrices: A sequence of matrix-shaped supermatrices (i.e. 128 by 128).
supermatrix_labels: The labels of the discretes on which each building_block acts.
Returns:
np.ndarray: The result of applying the building_blocks to `state`. The returned
state is permuted into the ordering given by `state_labels`.
"""
labels = state_labels
for matrix, matrix_labels in zip(supermatrices, supermatrix_labels):
state, labels = dot(state, labels, matrix, matrix_labels)
final_perm = [labels.index(l) for l in state_labels]
return state.transpose(final_perm)
def get_full_matrix_from_supermatrix(supermatrix, contracted_labels):
"""
Returns the full unitary matrix of a single `supermatrix`
that acts on all discretes in the acyclic_graph (i.e. `axes` and
`perm` need to be permutations of np.arange(large_block.ndim//2))
"""
N = len(contracted_labels)
invperm = invert_permutation(contracted_labels)
perm = np.append(invperm, np.array(invperm) + N)
return (
np.reshape(supermatrix, (2,) * len(perm))
.transpose(perm)
.reshape((2 ** N, 2 ** N))
)
def get_full_matrices_from_supergradient(supergradient, contracted_labels):
"""
Returns the gradients in matrix form of a list of `supergradients`
of length 1 (i.e. only one large_block with possibly multiple
gradients) that acts on all discretes in the acyclic_graph.
"""
N = len(contracted_labels)
invperm = invert_permutation(contracted_labels)
perm = np.append(invperm, np.array(invperm) + N)
return {
s: g.reshape((2,) * 2 * N).transpose(perm).reshape(2 ** N, 2 ** N)
for s, g in supergradient.items()
}
def finite_diff_gradients(acyclic_graph, resolver, epsilon=1e-8):
resolved_acyclic_graph = linear_algebra.resolve_parameters(acyclic_graph, resolver)
G0 = linear_algebra.unitary(resolved_acyclic_graph)
gradients = {}
for k in linear_algebra.parameter_symbols(acyclic_graph):
tempresolver = {}
for k2, v2 in resolver.param_dict.items():
if k2 == k.name:
tempresolver[k2] = v2 + epsilon
else:
tempresolver[k2] = v2
shifted_resolved_acyclic_graph = linear_algebra.resolve_parameters(
acyclic_graph, tempresolver
)
G1 = linear_algebra.unitary(shifted_resolved_acyclic_graph)
gradients[k] = (G1 - G0) / epsilon
return gradients
def compute_gradients(
state,
supermatrices,
supergradients,
super_oplabels,
observables,
observables_labels,
num_discretes,
):
"""
Compute the gradients of a symplectic acyclic_graph for the cost function
<psi|sum_n H_n |psi>, with H_n the element at `observables[n]`, acting on
discretes `observables_labels[n]`.
Args:
state: a random numpy ndarray of shape (2,)* num_discretes.
supermatrices (list[np.ndarray]): list of supermatrices
supergradients (list[dict]): list of dict of gradient matrices
of each supermatrix. each dict maps sympy.Symbol to np.ndarray
super_oplabels (list[tuple[int]]): the discrete labels of each large_block.
observables (list[np.ndarray]): a list of observables (in tensor format).
observables_labels (list[tuple[int]]): the discrete labels for each element
in `observables`
num_discretes (int): the number of discretes
"""
obs_and_labels = list(zip(observables, observables_labels))
state_labels = tuple(range(num_discretes))
state = apply_supermatrices(
state, state_labels, supermatrices, super_oplabels
)
psi = np.zeros(state.shape, state.dtype)
for ob, ob_labels in obs_and_labels:
inds = [state_labels.index(l) for l in ob_labels]
cont_state_labels = list(range(-1, -len(state_labels) - 1, -1))
cont_ob_labels = []
for n, i in enumerate(inds):
cont_ob_labels.append(cont_state_labels[i])
cont_state_labels[i] = ob_labels[n] + 1
shape = (2,) * (2 * len(ob_labels))
psi += tn.ncon(
[state, ob.reshape(shape)],
[
tuple(cont_state_labels),
tuple([o + 1 for o in ob_labels]) + tuple(cont_ob_labels),
],
)
reversed_super_oplabels = list(reversed(super_oplabels))
reversed_supergradients = list(reversed(supergradients))
accumulated_gradients = {}
psi = psi.conj()
for n, building_block in enumerate(reversed(supermatrices)):
building_block_labels = reversed_super_oplabels[n]
state, tmp_labels = dot(state, state_labels, building_block.T.conj(), building_block_labels)
for k, grad in reversed_supergradients[n].items():
tmp, _ = dot(psi, state_labels, grad.T, building_block_labels)
if k in accumulated_gradients:
accumulated_gradients[k] += np.dot(tmp.ravel(), state.ravel())
else:
accumulated_gradients[k] = np.dot(tmp.ravel(), state.ravel())
psi, state_labels = dot(psi, state_labels, building_block.T, building_block_labels)
assert (
tmp_labels == state_labels
), "two identical building_block applications produced different label-ordering"
# bring state back into natural discrete ordering (i.e. small to large)
perm = [state_labels.index(i) for i in range(num_discretes)]
return accumulated_gradients, state.transpose(perm)
def generate_raw_pbaxistring(discretes, string_length, replace=False):
"""
Get a pbaxistring of length `string_length` acting on `discretes`
"""
pbaxis = [linear_algebra.flip_x_axis, linear_algebra.flip_y_axis, linear_algebra.flip_z_axis]
rawstring = np.random.choice(pbaxis, string_length)
acting_discretes = np.random.choice(discretes, string_length, replace=replace)
return np.random.rand(1), rawstring, acting_discretes
def generate_pbaxisum(num_strings, discretes, string_length):
pbaxistrings = []
for _ in range(num_strings):
coeff, pbaxistring, prob_basis_axis_discretes = generate_raw_pbaxistring(
discretes, string_length, replace=False
)
pbaxistrings.append(
linear_algebra.ProbBasisAxisString(
coeff, [p(q) for p, q in zip(pbaxistring, prob_basis_axis_discretes)]
)
)
return sum(pbaxistrings)
def to_array(arr):
return np.array(arr.real) + 1j * np.array(arr.imag)
def _mantissa_eps(mantissa_bits):
return 0.5 * (2 ** (1 - mantissa_bits))
def eps(precision, dtype=jnp.float32):
dtype_eps = jnp.finfo(dtype).eps
if dtype in (jnp.float64, jnp.complex128):
return _mantissa_eps(49)
if dtype in (jnp.float32, jnp.complex64):
if precision == lax.Precision.DEFAULT:
return jnp.finfo(jnp.bfloat16).eps
if precision == lax.Precision.HIGH:
return _mantissa_eps(18) # TODO: Check this
if precision == lax.Precision.HIGHEST:
return jnp.finfo(jnp.float32).eps
raise ValueError(f"Invalid precision {precision}.")
return dtype_eps
|
{"hexsha": "1458352817a1a39022d8f3155fb1eeb0f22114fc", "size": 16340, "ext": "py", "lang": "Python", "max_stars_repo_path": "parallel_accel/Simulator/asic_la/testutils.py", "max_stars_repo_name": "google/parallel_accel", "max_stars_repo_head_hexsha": "b58fda1c3a22f2aaa9a97337d602cd72c49ee8be", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-19T21:17:02.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-19T21:17:02.000Z", "max_issues_repo_path": "parallel_accel/Simulator/asic_la/testutils.py", "max_issues_repo_name": "google/parallel_accel", "max_issues_repo_head_hexsha": "b58fda1c3a22f2aaa9a97337d602cd72c49ee8be", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "parallel_accel/Simulator/asic_la/testutils.py", "max_forks_repo_name": "google/parallel_accel", "max_forks_repo_head_hexsha": "b58fda1c3a22f2aaa9a97337d602cd72c49ee8be", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.2669789227, "max_line_length": 208, "alphanum_fraction": 0.6469400245, "include": true, "reason": "import numpy,import sympy,import jax", "num_tokens": 4156}
|
''' Incremental-Classifier Learning
Authors : Khurram Javed, Muhammad Talha Paracha
Maintainer : Khurram Javed
Lab : TUKL-SEECS R&D Lab
Email : 14besekjaved@seecs.edu.pk '''
import logging
import numpy as np
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from torchnet.meter import confusionmeter
from tqdm import tqdm
logger = logging.getLogger('iCARL')
class EvaluatorFactory():
'''
This class is used to get different versions of evaluators
'''
def __init__(self):
pass
@staticmethod
def get_evaluator(testType="rmse", cuda=True):
if testType == "rmse":
return DocumentMseEvaluator(cuda)
class DocumentMseEvaluator():
'''
Evaluator class for softmax classification
'''
def __init__(self, cuda):
self.cuda = cuda
def evaluate(self, model, iterator):
model.eval()
lossAvg = None
with torch.no_grad():
for img, target in tqdm(iterator):
if self.cuda:
img, target = img.cuda(), target.cuda()
response = model(Variable(img))
# print (response[0])
# print (target[0])
loss = F.mse_loss(response, Variable(target.float()))
loss = torch.sqrt(loss)
if lossAvg is None:
lossAvg = loss
else:
lossAvg += loss
# logger.debug("Cur loss %s", str(loss))
lossAvg /= len(iterator)
logger.info("Avg Val Loss %s", str((lossAvg).cpu().data.numpy()))
|
{"hexsha": "1afa0403fcbc6cdf7fac4d73fa110fecc2a9b9e1", "size": 1623, "ext": "py", "lang": "Python", "max_stars_repo_path": "trainer/evaluator.py", "max_stars_repo_name": "Khurramjaved96/Dicta", "max_stars_repo_head_hexsha": "416638a3d1ad851b00394e55a7574ec978080d51", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 60, "max_stars_repo_stars_event_min_datetime": "2019-05-29T17:09:15.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T15:35:57.000Z", "max_issues_repo_path": "trainer/evaluator.py", "max_issues_repo_name": "Khurramjaved96/Dicta", "max_issues_repo_head_hexsha": "416638a3d1ad851b00394e55a7574ec978080d51", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2018-06-08T14:32:34.000Z", "max_issues_repo_issues_event_max_datetime": "2019-05-20T05:34:39.000Z", "max_forks_repo_path": "trainer/evaluator.py", "max_forks_repo_name": "Khurramjaved96/Dicta", "max_forks_repo_head_hexsha": "416638a3d1ad851b00394e55a7574ec978080d51", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 28, "max_forks_repo_forks_event_min_datetime": "2019-06-10T04:07:24.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-12T19:21:49.000Z", "avg_line_length": 25.359375, "max_line_length": 73, "alphanum_fraction": 0.584103512, "include": true, "reason": "import numpy", "num_tokens": 362}
|
'''
Aggregate experiment data
- Experiment: AvidaGP L9
SUMMARY FILES
- experiment
- config + summary(evaluation) + systematics + summary(world summary)
- world
- task
'''
import argparse, os, sys, errno, csv, json
from scipy.stats import entropy
run_identifier = "RUN_" # String that identifies a run directory
default_trait_cov_thresh = 100
trait_order = ["echo","nand","not","or_not","and","or","and_not","nor","xor","equ"]
systematics_fields = {
"num_taxa",
"total_orgs",
"ave_depth",
"num_roots",
"mrca_depth",
"diversity",
"mean_genotype_pairwise_distance",
"min_genotype_pairwise_distance",
"max_genotype_pairwise_distance",
"variance_genotype_pairwise_distance",
"genotype_current_phylogenetic_diversity"
}
world_eval_fields = {
"epoch",
"aggregate_scores",
"scores",
"selected",
"num_unique_selected"
}
def read_csv(file_path):
content = None
with open(file_path, "r") as fp:
content = fp.read().strip().split("\n")
header = content[0].split(",")
content = content[1:]
lines = [{header[i]: l[i] for i in range(len(header))} for l in csv.reader(content, quotechar='"', delimiter=',', quoting=csv.QUOTE_ALL, skipinitialspace=True)]
return lines
def main():
parser = argparse.ArgumentParser(description="Run submission script.")
parser.add_argument("--data_dir", type=str, help="Where is the base output directory for each run?")
parser.add_argument("--dump", type=str, help="Where to dump this?", default=".")
parser.add_argument("--epoch", type=int, help="Epoch to pull data for?")
parser.add_argument("--epoch_range", type=int, help="The range (in epochs) to collect time series data?", nargs=2)
parser.add_argument("--trait_cov_thresh", type=float, help="Threshold for trait score to count toward trait coverage", default=default_trait_cov_thresh)
# Parse user arguments
args = parser.parse_args()
data_dir = args.data_dir
dump_dir = args.dump
epoch = args.epoch
epoch_range = args.epoch_range
trait_cov_thresh = args.trait_cov_thresh
# Does data directory exist, if not exit.
if not os.path.exists(data_dir):
print("Unable to find data directory.")
exit(-1)
# Create directory to dump output
os.makedirs(name=dump_dir, exist_ok=True)
# Aggregate run directories
run_dirs = [run_dir for run_dir in os.listdir(data_dir) if run_identifier in run_dir]
print(f"Found {len(run_dirs)} run directories.")
# TODO - create time series file(s)
# The summary file contains one line per run
experiment_summary_header = None
experiment_summary_content_lines = []
# For each run directory,
# - get configuration, TODO
skipped_runs = []
for run_dir in run_dirs:
run_path = os.path.join(data_dir, run_dir)
experiment_summary_info = {} # Hold summary information about this run.
print(f"Processing: {run_path}")
# Add generic info
experiment_summary_info["trait_cov_thresh"] = trait_cov_thresh
#######################################################################
# Extract run configuration (from output/run_config.csv)
#######################################################################
run_config_path = os.path.join(run_path, "output", "run_config.csv")
run_config_data = read_csv(run_config_path)
# Add each experiment-wide parameter to summary info
for line in run_config_data:
if line["source"] == "experiment":
experiment_summary_info[line["parameter"]] = line["value"]
# Accumulate world-level parameter names
world_params = {line["parameter"]:set() for line in run_config_data if "world_" in line["source"]}
# Collect the distinct values for each world-level parameter
for line in run_config_data:
param = line["parameter"]
if param in world_params:
world_params[param].add(line["value"])
# Only add world-level parameters to summary info if value is shared across all worlds
for param in world_params:
if len(world_params[param]) == 1:
experiment_summary_info[param] = list(world_params[param])[0]
# Grab a few useful parameter values
num_pops = int(experiment_summary_info["NUM_POPS"])
max_pop_size = int(experiment_summary_info["world_size"])
max_update = int(experiment_summary_info["UPDATES_PER_EPOCH"])
#######################################################################
# Extract run systematics
#######################################################################
run_sys_path = os.path.join(run_path, "output", "systematics.csv")
run_sys_data = read_csv(run_sys_path)
# Extract target epoch systematics data
targ_epoch_run_sys_data = [line for line in run_sys_data if line["epoch"]==str(epoch)]
if len(targ_epoch_run_sys_data) != 1:
print(" Failed to find target epoch, or too many target lines.", targ_epoch_run_sys_data)
skipped_runs.append(run_dir)
continue
targ_epoch_run_sys_data = targ_epoch_run_sys_data[0]
# Add systematics fields to experiment summary
for field in systematics_fields:
experiment_summary_info[field] = targ_epoch_run_sys_data[field]
# TODO - time series data!
# Clear out data list
run_sys_data = None
#######################################################################
# Extract run world evaluation data
#######################################################################
run_world_eval_path = os.path.join(run_path, "output", "world_evaluation.csv")
run_world_eval_data = read_csv(run_world_eval_path)
# Extract target epoch evaluation data
targ_epoch_run_world_eval_data = [line for line in run_world_eval_data if line["epoch"]==str(epoch)]
if len(targ_epoch_run_world_eval_data) != 1:
print(" Failed to find target epoch, or too many target lines.", targ_epoch_run_world_eval_data)
skipped_runs.append(run_dir)
continue
targ_epoch_run_world_eval_data = targ_epoch_run_world_eval_data[0]
# Add world evaluation fields to experiment summary
for field in world_eval_fields:
experiment_summary_info[field] = targ_epoch_run_world_eval_data[field]
# Compute secondary experiment summary fields
aggregate_scores = json.loads(targ_epoch_run_world_eval_data["aggregate_scores"])
scores = json.loads(targ_epoch_run_world_eval_data["scores"])
# Based on target epoch
# max_aggregate_score
max_aggregate_score = max(aggregate_scores)
experiment_summary_info["max_aggregate_score"] = max_aggregate_score
# total_trait_coverage
trait_coverage = {j for world_scores in scores for j in range(len(world_scores)) if world_scores[j] >= trait_cov_thresh}
total_trait_coverage = len(trait_coverage)
experiment_summary_info["total_trait_coverage"] = total_trait_coverage
# max_trait_coverage
by_world_trait_coverage = [len([j for j in range(len(world_scores)) if world_scores[j] >= trait_cov_thresh]) for world_scores in scores]
max_trait_coverage = max(by_world_trait_coverage)
experiment_summary_info["max_trait_coverage"] = max_trait_coverage
# Based on whole run (<= epoch)
unique_selected = []
entropy_selected = []
for line in run_world_eval_data:
if int(line["epoch"]) > epoch: continue
num_unique_selected = int(line["num_unique_selected"])
unique_selected.append(num_unique_selected)
selected = json.loads(line["selected"])
selected_dist = [0 for _ in range(num_pops)]
for id in selected:
selected_dist[int(id)] += 1
entropy_selected.append(entropy(selected_dist, base=2))
avg_unique_selected = sum(unique_selected) / len(unique_selected)
avg_entropy_selected = sum(entropy_selected) / len(entropy_selected)
experiment_summary_info["avg_unique_selected"] = avg_unique_selected
experiment_summary_info["avg_entropy_selected"] = avg_entropy_selected # NOTE - not sure if this is a good way of getting at this
# Clear out data list
run_world_eval_data = None
#######################################################################
# Extract run world summary data
#######################################################################
run_world_summary_path = os.path.join(run_path, "output", "world_summary.csv")
run_world_summary_data = read_csv(run_world_summary_path)
# Extract target epoch summary data
targ_epoch_run_world_summary_data = [line for line in run_world_summary_data if line["epoch"]==str(epoch) and line["world_update"]==str(max_update)]
if len(targ_epoch_run_world_summary_data) != num_pops:
print(" Failed to find target epoch, or too many target lines.", targ_epoch_run_world_summary_data)
skipped_runs.append(run_dir)
continue
# Experiment summary info
# average num orgs
num_orgs = [int(line["num_orgs"]) for line in targ_epoch_run_world_summary_data]
avg_num_orgs = sum(num_orgs) / len(num_orgs)
experiment_summary_info["avg_num_orgs"] = avg_num_orgs
# average gen
gens = [float(line["avg_generation"]) for line in targ_epoch_run_world_summary_data]
avg_gens = sum(gens) / len(gens)
experiment_summary_info["avg_gens"] = avg_gens
# Clear out data list
run_world_summary_data = None
#######################################################################
# Add experiment summary info to experiment summary file content
#######################################################################
experiment_summary_fields = list(experiment_summary_info.keys())
experiment_summary_fields.sort()
if experiment_summary_header == None:
experiment_summary_header = experiment_summary_fields
elif experiment_summary_header != experiment_summary_fields:
print("Header mismatch!")
exit(-1)
# If field contains commas, surround in quotation marks
for field in experiment_summary_fields:
if "," in str(experiment_summary_info[field]):
experiment_summary_info[field] = f'"{str(experiment_summary_info[field])}"'
experiment_summary_line = [str(experiment_summary_info[field]) for field in experiment_summary_fields]
experiment_summary_content_lines.append(",".join(experiment_summary_line))
# --> END RUNS FOR LOOP <--
# write out aggregate data
with open(os.path.join(dump_dir, "experiment_summary.csv"), "w") as fp:
out_content = ",".join(experiment_summary_header) + "\n" + "\n".join(experiment_summary_content_lines)
fp.write(out_content)
print(f"Skipped ({len(skipped_runs)}):")
for run_dir in skipped_runs:
print(f" - {run_dir}")
if __name__ == "__main__":
main()
|
{"hexsha": "cb4312613a70116f6dbfe098b1a625bc3afc49cd", "size": 11343, "ext": "py", "lang": "Python", "max_stars_repo_path": "experiments/2021-09-22-selection/analysis/aggregate.py", "max_stars_repo_name": "amlalejini/directed-digital-evolution", "max_stars_repo_head_hexsha": "eb19ea9182e2e9b203513433e90bd73d4768a5a1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-09-10T15:32:52.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-10T21:30:25.000Z", "max_issues_repo_path": "experiments/2021-09-22-selection/analysis/aggregate.py", "max_issues_repo_name": "amlalejini/directed-digital-evolution", "max_issues_repo_head_hexsha": "eb19ea9182e2e9b203513433e90bd73d4768a5a1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "experiments/2021-09-22-selection/analysis/aggregate.py", "max_forks_repo_name": "amlalejini/directed-digital-evolution", "max_forks_repo_head_hexsha": "eb19ea9182e2e9b203513433e90bd73d4768a5a1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.7379032258, "max_line_length": 164, "alphanum_fraction": 0.636604073, "include": true, "reason": "from scipy", "num_tokens": 2328}
|
#define BOOST_TEST_DYN_LINK
#define BOOST_TEST_MODULE Regression
#include <stdio.h>
#include <stdlib.h>
#include <string>
#include <boost/test/included/unit_test.hpp>
#include <xolotl/perf/EventCounter.h>
using namespace std;
using namespace xolotl::perf;
/**
* This suite is responsible for testing the EventCounter.
*/
BOOST_AUTO_TEST_SUITE(EventCounter_testSuite)
BOOST_AUTO_TEST_CASE(checkInitialValue)
{
auto tester = EventCounter();
BOOST_TEST_MESSAGE("\n"
<< "EventCounter Message: \n"
<< "tester.getValue() " << tester.getValue() << "\n");
// Require that the value of this EventCounter is 0
BOOST_REQUIRE_EQUAL(0U, tester.getValue());
}
BOOST_AUTO_TEST_CASE(checkCounting)
{
auto tester = EventCounter();
for (int i = 0; i < 3; i++) {
// increment the EventCounter
tester.increment();
}
BOOST_TEST_MESSAGE("\n"
<< "EventCounter Message: \n"
<< "tester.getValue() = " << tester.getValue() << "\n");
// Require that the value of this EventCounter is 3
BOOST_REQUIRE_EQUAL(3U, tester.getValue());
}
BOOST_AUTO_TEST_SUITE_END()
|
{"hexsha": "1d327c50c1839e16ad67b595c1a27c5f5b9a9cb8", "size": 1071, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "test/perf/EventCounterTester.cpp", "max_stars_repo_name": "ORNL-Fusion/xolotl", "max_stars_repo_head_hexsha": "993434bea0d3bca439a733a12af78034c911690c", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 13.0, "max_stars_repo_stars_event_min_datetime": "2018-06-13T18:08:57.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-30T02:39:01.000Z", "max_issues_repo_path": "test/perf/EventCounterTester.cpp", "max_issues_repo_name": "ORNL-Fusion/xolotl", "max_issues_repo_head_hexsha": "993434bea0d3bca439a733a12af78034c911690c", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 97.0, "max_issues_repo_issues_event_min_datetime": "2018-02-14T15:24:08.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T16:29:48.000Z", "max_forks_repo_path": "test/perf/EventCounterTester.cpp", "max_forks_repo_name": "ORNL-Fusion/xolotl", "max_forks_repo_head_hexsha": "993434bea0d3bca439a733a12af78034c911690c", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 18.0, "max_forks_repo_forks_event_min_datetime": "2018-02-13T20:36:03.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-04T14:54:16.000Z", "avg_line_length": 21.0, "max_line_length": 58, "alphanum_fraction": 0.7189542484, "num_tokens": 269}
|
#!/usr/bin/env python
import os
import numpy as np
import tensorflow as tf
from tensorflow.keras.models import Model
from scipy.misc import imread
from sklearn.cluster import KMeans
from sklearn.decomposition.pca import PCA
from tqdm import tqdm
from utils import (kernel_classifier_distance_and_std_from_activations,
load_image_names)
class PerceptualScores:
EXTRACTOR_NAMES = ["MobileNetV2", "ResNet50", "VGG16", "VGG19"]
def __init__(self, config):
# pylint: disable=no-else-raise
self._config = config
self._real_activations = None
if self._config.extractor_name == "MobileNetV2":
from tensorflow.keras.applications.mobilenet_v2 import MobileNetV2
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
model = MobileNetV2(include_top=False, weights="imagenet", alpha=1.4)
self._preprocess = preprocess_input
raise NotImplementedError("Need to update blocks...")
elif self._config.extractor_name == "ResNet50":
from tensorflow.keras.applications.resnet50 import ResNet50
from tensorflow.keras.applications.resnet50 import preprocess_input
model = ResNet50(include_top=False, weights="imagenet")
self._preprocess = preprocess_input
raise NotImplementedError("Need to update blocks...")
elif self._config.extractor_name == "VGG16":
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.applications.vgg16 import preprocess_input
model = VGG16(include_top=False, weights="imagenet")
self._preprocess = preprocess_input
raise NotImplementedError("Need to update blocks...")
elif self._config.extractor_name == "VGG19":
from tensorflow.keras.applications.vgg19 import VGG19
from tensorflow.keras.applications.vgg19 import preprocess_input
model = VGG19(include_top=False, weights="imagenet")
self._extractor = Model(inputs=model.input, outputs=
[model.get_layer("block{}_pool".format(i)).output for i in range(1, 6)])
self._preprocess = preprocess_input
else:
raise ValueError("Unknown feature extractor '{}'".format(self._config.extractor_name))
self._pca = None
self._high_dimensional_kmeans = None
self._low_dimensional_kmeans = None
def _get_activations_from_images(self, all_image_names):
activations = []
data = tf.data.Dataset.from_tensor_slices(all_image_names).batch(self._config.batch_size)
tf.logging.info("Computing activations for {} images".format(len(all_image_names)))
for image_names in tqdm(data, total=len(all_image_names) // self._config.batch_size + 1):
images = [imread(image_name.numpy().decode("utf-8"), mode="RGB") for image_name in image_names]
batch = tf.cast(tf.stack(images), dtype=tf.float32)
activations.append([tf.reduce_mean(features, axis=[1, 2]) for features in self._extractor(self._preprocess(batch))])
return [tf.concat([act[i] for act in activations], axis=0) for i in range(len(activations[0]))]
def _get_activations_from_generator(self, generator, data_set):
activations = []
tf.logging.debug("Computing activations for newly-generated samples")
for batch in data_set:
samples = tf.cast(tf.cast((generator(batch)+1) * 127.5, dtype=tf.int32), dtype=tf.float32) # denormalize to normal RGB
activations.append([tf.reduce_mean(features, axis=[1, 2]) for features in self._extractor(self._preprocess(samples))])
return [tf.concat([act[i] for act in activations], axis=0) for i in range(len(activations[0]))]
def initialize(self, override_data_dir=None):
assert self._real_activations is None
data_dir = override_data_dir if override_data_dir else \
(self._config.target_data_dir if self._config.target_data_dir else self._config.data_dir)
activations_file = os.path.join("data", data_dir, "activations_{}.npz".format(self._config.extractor_name))
if os.path.exists(activations_file):
tf.logging.info("Loading activations from {}".format(activations_file))
with np.load(activations_file) as activations:
self._real_activations = [tf.convert_to_tensor(activations[f]) for f in sorted(activations.files)]
else:
tf.logging.warning("Computing activations for real images in '{}'".format(data_dir))
self._real_activations = self._get_activations_from_images(load_image_names(data_dir))
tf.logging.info("Saving activations to {}".format(activations_file))
np.savez(activations_file, **{"block_{}".format(i): act.numpy() for i, act in enumerate(self._real_activations)})
tf.logging.debug("Fitting PCA")
self._pca = PCA(n_components=2)
low_dimensional_real_activations = self._pca.fit_transform(self._real_activations[-1])
tf.logging.debug("Explained variance: {} ({:.5f})".format(
self._pca.explained_variance_ratio_, np.sum(self._pca.explained_variance_ratio_)))
high_dimensional_clusters = 7
tf.logging.debug("Clustering high-dimensional activations with {} clusters".format(high_dimensional_clusters))
self._high_dimensional_kmeans = KMeans(n_clusters=high_dimensional_clusters)
self._high_dimensional_kmeans.fit(self._real_activations[-1])
tf.logging.debug("Inertia: {:.1f}".format(self._high_dimensional_kmeans.inertia_))
low_dimensional_clusters = 4
tf.logging.debug("Clustering low-dimensional activations with {} clusters".format(low_dimensional_clusters))
self._low_dimensional_kmeans = KMeans(n_clusters=low_dimensional_clusters)
self._low_dimensional_kmeans.fit(low_dimensional_real_activations)
tf.logging.debug("Inertia: {:.1f}".format(self._low_dimensional_kmeans.inertia_))
def _compute_scores_from_activations(self, generated_activations):
fid = tf.contrib.gan.eval.frechet_classifier_distance_from_activations(self._real_activations[-1], generated_activations[-1])
mmd, _ = kernel_classifier_distance_and_std_from_activations(self._real_activations[-1], generated_activations[-1])
low_level_fids = [
tf.contrib.gan.eval.frechet_classifier_distance_from_activations(self._real_activations[i], generated_activations[i]) \
for i in range(len(self._real_activations)-1)]
combined_fid = tf.contrib.gan.eval.frechet_classifier_distance_from_activations(
tf.concat(self._real_activations, axis=-1), tf.concat(generated_activations, axis=-1))
# high_dimensional_cluster_distances = tf.reduce_min(self._high_dimensional_kmeans.transform(generated_activations), axis=-1)
# low_dimensional_cluster_distances = tf.reduce_min(self._low_dimensional_kmeans.transform(self._pca.transform(generated_activations)), axis=-1)
# mean_std = lambda d: (tf.reduce_mean(d), tf.convert_to_tensor(np.std(d)))
# return fid, k_mmd, mean_std(high_dimensional_cluster_distances), mean_std(low_dimensional_cluster_distances)
return fid, mmd, -self._high_dimensional_kmeans.score(generated_activations[-1]), \
-self._low_dimensional_kmeans.score(self._pca.transform(generated_activations[-1])), low_level_fids, combined_fid
def compute_scores_from_samples(self):
assert os.path.exists(self._config.samples_dir)
all_image_names = [os.path.join(self._config.samples_dir, sample) for sample in \
sorted(os.listdir(self._config.samples_dir)) if sample.endswith(".png")]
activations_file = os.path.join(self._config.samples_dir, "activations_{}.npz".format(self._config.extractor_name))
if os.path.exists(activations_file):
tf.logging.info("Loading activations from {}".format(activations_file))
generated_activations = tf.convert_to_tensor(np.load(activations_file))
else:
tf.logging.warning("Computing activations for generated images in '{}'".format(self._config.samples_dir))
generated_activations = self._get_activations_from_images(all_image_names)
tf.logging.info("Saving activations to {}".format(activations_file))
np.savez(activations_file, **{"block_{}".format(i): act.numpy() for i, act in enumerate(self._real_activations)})
tf.logging.info("Computing scores")
return self._compute_scores_from_activations(generated_activations)
def compute_scores_from_generator(self, generator, data_set):
generated_activations = self._get_activations_from_generator(generator, data_set)
tf.logging.debug("Computing scores")
return self._compute_scores_from_activations(generated_activations)
|
{"hexsha": "f74a1f06253c573af8f4b81d28f7a232ac6ab9ae", "size": 8429, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/perceptual_scores.py", "max_stars_repo_name": "furgerf/GAN-for-dermatologic-imaging", "max_stars_repo_head_hexsha": "e90b06c46c7693e984a4c5b067e18460113cd23b", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/perceptual_scores.py", "max_issues_repo_name": "furgerf/GAN-for-dermatologic-imaging", "max_issues_repo_head_hexsha": "e90b06c46c7693e984a4c5b067e18460113cd23b", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2020-09-26T01:22:00.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-22T18:00:52.000Z", "max_forks_repo_path": "src/perceptual_scores.py", "max_forks_repo_name": "furgerf/GAN-for-dermatologic-imaging", "max_forks_repo_head_hexsha": "e90b06c46c7693e984a4c5b067e18460113cd23b", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 56.9527027027, "max_line_length": 148, "alphanum_fraction": 0.7582156839, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1953}
|
*----------------------------------------------------------------------*
subroutine transpose_contr(contr,op_info,multi)
*----------------------------------------------------------------------*
* transpose a contraction
*----------------------------------------------------------------------*
implicit none
include 'opdim.h'
include 'stdunit.h'
include 'def_contraction.h'
include 'mdef_operator_info.h'
include 'ifc_operators.h'
integer, parameter ::
& ntest = 00
type(contraction) ::
& contr
type(operator_info) ::
& op_info
logical, intent(in), optional ::
& multi
integer ::
& nvtx, narc, nxarc, ivtx, jvtx, ihelp, isuper, idx,
& idxst, idxnd, njoined, njoined_res, iarc, ifac
logical ::
& skip, reo, use_multi, scal
type(operator), pointer ::
& op
type(cntr_vtx) ::
& vhelp
type(cntr_vtx), pointer ::
& vertex(:)
type(cntr_arc), pointer ::
& arc(:), xarc(:)
integer, pointer ::
& svertex(:), occ_vtx(:,:,:), vtx_reo(:), occ(:,:,:)
logical, pointer ::
& fix_vtx(:)
integer, external ::
& iblk_occ
logical, external ::
& occ_is_diag_blk
if (ntest.ge.100) then
call write_title(lulog,wst_dbg_subr,'transpose_contr')
write(lulog,*) 'on input:'
call prt_contr2(lulog,contr,op_info)
end if
if (present(multi)) then
use_multi = multi
else
use_multi = .false.
endif
nvtx = contr%nvtx
vertex => contr%vertex
svertex => contr%svertex
narc = contr%narc
arc => contr%arc
nxarc = contr%nxarc
xarc => contr%xarc
! A new logical 'scal' has been used of dagger is not necessary
! while transposing the formula for operator
! corresponding to CI coefficients
if(use_multi) then
scal = op_info%op_arr(contr%idx_res)%op%n_occ_cls.gt.1
else
scal = .true.
end if
! set transposition info for result
if (scal) contr%dagger = .not.contr%dagger
! process vertices
! reverse sequence
do ivtx = 1, nvtx/2
vhelp = vertex(ivtx)
vertex(ivtx) = vertex(nvtx-ivtx+1)
vertex(nvtx-ivtx+1) = vhelp
end do
do ivtx = 1, nvtx/2
ihelp = svertex(ivtx)
svertex(ivtx) = svertex(nvtx-ivtx+1)
svertex(nvtx-ivtx+1) = ihelp
end do
! update transposition info or block of operator
do ivtx = 1, nvtx
c dbg
c print *,'ivtx',ivtx
c dbg
! handle super-vertex only once
isuper = svertex(ivtx)
skip = .false.
do jvtx = 1, ivtx-1 ! previously visited?
skip = skip.or.(isuper.eq.svertex(jvtx))
end do
if (skip) cycle
if (vertex(ivtx)%dagger) then
! if the vertex was transpose, we know that iblk
! refers to the un-transpose operator, so we merely
! change the dagger label
vertex(ivtx)%dagger = .false.
do jvtx = ivtx+1, nvtx
if (svertex(jvtx).ne.isuper) cycle
vertex(jvtx)%dagger = .false.
end do
else
! find out whether the transposed block exists:
op => op_info%op_arr(vertex(ivtx)%idx_op)%op
njoined = op%njoined
idxnd = vertex(ivtx)%iblk_op
idxst = idxnd-njoined+1
c idxst = vertex(ivtx)%iblk_op
c idxnd = idxst+njoined-1
occ => op%ihpvca_occ(1:ngastp,1:2,idxst:idxnd)
c dbg
c print *,'idxst, idxnd: ',idxst,idxnd
c call wrt_occ_n(6,occ,op%njoined)
c dbg
! ... but only if we have off-diagonal blocks
! or if we know that the operator is hermitian
if (.not.occ_is_diag_blk(occ,njoined).or.
& abs(op%hermitian).eq.1)
& then
idx = iblk_occ(occ,.true.,op,
& op%blk_version((idxst-1)/njoined+1))
! we have to change the overall sign, if the
! operator is anti-hermitian:
if (op%hermitian.eq.-1)
& contr%fac = -contr%fac
else
idx = -1
end if
c dbg
c print *,'idx = ',idx,occ_is_diag_blk(occ,njoined),op%hermitian
c dbg
if (idx.gt.0) then
idx = (idx-1)*njoined+1
do jvtx = ivtx, nvtx
if (svertex(jvtx).ne.isuper) cycle
vertex(jvtx)%iblk_op = idx
idx = idx+1
end do
else
do jvtx = ivtx, nvtx
if (svertex(jvtx).ne.isuper) cycle
vertex(jvtx)%dagger = .true.
end do
end if
end if
end do
call update_svtx4contr(contr)
! update arcs:
do iarc = 1, narc
ihelp = nvtx-arc(iarc)%link(1)+1
arc(iarc)%link(1) = nvtx-arc(iarc)%link(2)+1
arc(iarc)%link(2) = ihelp
! no adjoint here !
arc(iarc)%occ_cnt = arc(iarc)%occ_cnt
c arc(iarc)%occ_cnt = iocc_dagger(arc(iarc)%occ_cnt)
end do
if (nxarc.gt.0) then
op => op_info%op_arr(contr%idx_res)%op
njoined_res = op%njoined
end if
! update xarcs:
do iarc = 1, nxarc
xarc(iarc)%link(1) = nvtx-xarc(iarc)%link(1)+1
xarc(iarc)%link(2) = njoined_res-xarc(iarc)%link(2)+1
xarc(iarc)%occ_cnt = iocc_dagger(xarc(iarc)%occ_cnt)
end do
c dbg
c call prt_contr2(lulog,contr,op_info)
c dbg
allocate(occ_vtx(ngastp,2,nvtx),vtx_reo(nvtx),fix_vtx(nvtx))
fix_vtx = .true. ! not important
call occvtx4contr(1,occ_vtx,contr,op_info)
call topo_contr(ifac,reo,vtx_reo,contr,occ_vtx,fix_vtx)
call canon_contr(contr,reo,vtx_reo)
deallocate(occ_vtx,vtx_reo,fix_vtx)
if (ntest.ge.100) then
write(lulog,*) 'on output:'
call prt_contr2(lulog,contr,op_info)
end if
return
end
|
{"hexsha": "b32a66c02e331559da43ecbe21f893baf461a5b9", "size": 6073, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "formula/transpose_contr.f", "max_stars_repo_name": "ak-ustutt/GeCCo-public", "max_stars_repo_head_hexsha": "8d43a6c9323aeba7eb54625b95553bfd4b2418c6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "formula/transpose_contr.f", "max_issues_repo_name": "ak-ustutt/GeCCo-public", "max_issues_repo_head_hexsha": "8d43a6c9323aeba7eb54625b95553bfd4b2418c6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "formula/transpose_contr.f", "max_forks_repo_name": "ak-ustutt/GeCCo-public", "max_forks_repo_head_hexsha": "8d43a6c9323aeba7eb54625b95553bfd4b2418c6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.2139303483, "max_line_length": 73, "alphanum_fraction": 0.5277457599, "num_tokens": 1794}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 29 16:02:24 2021
@author: jacobfaibussowitsch
"""
import os
import pickle
from collections import namedtuple
import contextlib
import meshio
import pytest
import numpy as np
import scipy.sparse as scp
cur_dir = os.path.basename(os.getcwd())
if cur_dir == "test":
test_root_dir = os.getcwd()
elif cur_dir == "pyhesive":
parent_dir = os.path.basename(os.path.dirname(os.getcwd()))
if parent_dir == "pyhesive":
# we are in pyhesive/pyhesive, i.e. the package dir
test_root_dir = os.path.join(os.getcwd(),"test")
elif "pyhesive" in os.listdir():
# we are in root pyhesive dir
test_root_dir = os.path.join(os.getcwd(),"pyhesive","test")
else:
raise RuntimeError("Cannot determine location")
else:
raise RuntimeError("Cannot determine location")
test_root_dir = os.path.realpath(os.path.expanduser(test_root_dir))
pyhesiveRootDir = os.path.dirname(test_root_dir)
data_dir = os.path.join(test_root_dir,"data")
mesh_dir = os.path.join(data_dir,"meshes")
bin_dir = os.path.join(data_dir,"bin")
data_set_named_tuple = namedtuple("DataSet",["mesh","adjacency","closure","partitionData"])
data_set_named_tuple.__new__.__defaults__ = (None,)*len(data_set_named_tuple._fields)
class DataSet(data_set_named_tuple):
__slots__ = ()
@contextlib.contextmanager
def no_except():
yield
def store_obj(filename,obj):
with open(filename,"wb") as fd:
# protocol 4 since python3.4
pickle.dump(obj,fd)
def load_obj(filename):
with open(filename, "rb") as f:
return pickle.load(f)
def assert_scipy_all_close(A,B,rtol=1e-7,atol=1e-8):
def _scipy_all_close_lil():
assert A.shape == B.shape
for i in range(A.get_shape()[0]):
rowA = A.getrowview(i).toarray()
rowB = B.getrowview(i).toarray()
np.testing.assert_allclose(rowA,rowB,rtol=rtol,atol=atol)
return
def _scipy_all_close_sparse():
# If you want to check matrix shapes as well
np.testing.assert_allclose(A.shape,B.shape,rtol=rtol,atol=atol)
r1,c1 = A.nonzero()
r2,c2 = B.nonzero()
lidx1 = np.ravel_multi_index((r1,c1),A.shape)
lidx2 = np.ravel_multi_index((r2,c2),B.shape)
sidx1 = lidx1.argsort()
sidx2 = lidx2.argsort()
np.testing.assert_allclose(lidx1[sidx1],lidx2[sidx2],rtol=rtol,atol=atol)
v1,v2 = A.data,B.data
V1,V2 = v1[sidx1],v2[sidx2]
np.testing.assert_allclose(V1,V2,rtol=rtol,atol=atol)
return
assert type(A) == type(B)
if isinstance(A,scp.lil_matrix):
_scipy_all_close_lil()
else:
_scipy_all_close_sparse()
return
def assertNumpyArrayEqual(self,first,second,msg=None):
assert type(first) == np.ndarray,msg
assert type(first) == type(second),msg
try:
np.testing.assert_array_almost_equal_nulp(first,second)
except AssertionError:
additionalMsg = "Locations: %s" % (np.argwhere(first!=second))
if msg is not None:
msg = "\n".join([msg,additionalMsg])
else:
msg = additionalMsg
pytest.fail(msg)
def trygetattr(obj,attr):
if hasattr(obj,attr):
return getattr(obj,attr)
return
def commonPartitionSetup(meshFileList,testFileList,partitionList,replaceFunc,testFunc):
for meshFile,testFile,partList in zip(meshFileList,testFileList,partitionList):
with pyhesive.Mesh.fromFile(meshFile) as pyh:
if replaceFiles:
replaceFunc(pyh,testFile,partList)
continue
testDict = loadObj(testFile)
for part,testPart in zip(partList,testDict):
assert part == testPart
pyh.PartitionMesh(part)
testFunc(pyh,testDict,part)
|
{"hexsha": "c9e98eecff1a8746e0070f3fcc5dc70e4e945977", "size": 3613, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyhesive/test/common.py", "max_stars_repo_name": "Johnson-Research-Group/Pyhesive", "max_stars_repo_head_hexsha": "327f204b445a0db251088a29c0c3706593833d3a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pyhesive/test/common.py", "max_issues_repo_name": "Johnson-Research-Group/Pyhesive", "max_issues_repo_head_hexsha": "327f204b445a0db251088a29c0c3706593833d3a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pyhesive/test/common.py", "max_forks_repo_name": "Johnson-Research-Group/Pyhesive", "max_forks_repo_head_hexsha": "327f204b445a0db251088a29c0c3706593833d3a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.6147540984, "max_line_length": 91, "alphanum_fraction": 0.704400775, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1016}
|
\section{Buffer abstraction}
As we have seen, some of the statements available to the programmer make use of a write buffer. Since depending on the memory model chosen, we may encounter different behaviours when running the same program using different memory models, we face the challenge of rewriting the program in a way that its behaviour will be conclusively defined, independent of the memory model chosen. We will do this by completely eliminating any buffer access by abstracting the buffers as sets of local variables. In order for us to be able to assume the lack of buffer operations, we will need to eliminate fence- and flush statements. As for remote stores and loads, we can replace them with code that simulates their buffer-related behaviours using non-WMM statements, retaining the use of store and load statements to represent assignment statements involving global variables without the involvement of the buffer. Since this intermediary code will never be run, but instead will be further abstracted in the predicate abstraction stage, we may safely ignore any regular WMM-behaviour such statements might have, as we will handle store- and load statements like local assignments for predicate abstraction purposes. The reason we refrain from using local assignment statements instead of store- or load statements in our abstracted program is our desire to preserve syntactic correctness even in our intermediary program, and to highlight the fact, that at certain points, load statements will actually perform a remote read, and that flush statements may at one point actually perform remote writes.\\
\subsection{Buffer size analysis}
For the purpose of determining the maximum buffer space needed by any global variable, we count the number of store statements in the control flow of the program within each process $i$. In case there are less store statements on a variable than the user-specified maximum buffer size $K$, we will only allocate the necessary number of buffer variables, which is the minimum of $K$ and the number of stores on that variable in the process $i$. We will refer to this number as $s_{x,i}$ for any global variable \lstinline$x$ when employing PSO, and $s_i$ for TSO.\\
In detail, we first take the AST representing the program, and cascade through it from the root to the leaves. Whenever we encounter a program point, we draw one control flow edge from that program point to any other program point that might directly follow in a possible sequential execution order. As shown in Figure 1, keywords such as \lstinline$else$ and \lstinline$endif$ are not program points, yet \lstinline$nop$ or unlabelled statements are.\\
Once we have the control flow graph, we start at the first program point in every process $i$ with a visitor having $s_i$ or $s_{x, i}$ for all global variables \lstinline$x$ set to $0$, and traverse the entire graph until we encounter a dead end, or an already visited node (which is the case for cyclic control flow graphs). Every time we visit a program point, we take the maxima of the visitor's $s_{x, i}$ or $s_i$, depending on the current store order, and the current node's corresponding values, and store them in both the node and the visitor. If the visited node is a store, we increment the corresponding values by 1 beforehand. Figure 2. shows the snapshot of the state of the program with a visitor just having visited the third node from the top.\\
Note that all buffer size entries are initialized to $0$. The visitor propagates the accumulated values to every subsequent node visited. If a visitor encounters a node with more than one outgoing edge, it spawns a copy of itself for every additional edge. These copies carry the same values as their predecessors, and they have the same record of visited nodes, i.e. if a visitor comes upon a node that had already been visited by any one of its ancestors, it will also regard that node as visited. Conversely, if a visitor encounters a node which had been visited by at least one other visitor, but none of its direct ancestors, it will not regard that node as visited, and it will continue down the control flow graph as usual.\\
If a visitor encounters a node it regards to be visited, it still increments the corresponding value as needed. At this point, if there is any value stored in the node that is smaller than its counterpart stored by the visitor, an arbitrary increase of that value is detected and that value is set to TOP (which is regarded as infinitely large for comparison reasons). The node communicates this change in the direction of the control flow, and every node that is situated downward of this current node also gets its corresponding value set to TOP. At this point, the visitor stops. Figure 3. shows the snapshot of the state of the program with a visitor just having revisited the third node from the top, Figure 4. shows the aftermath of the TOP values cascading along the control flow edges, and finally, Figure 5. shows the state after the last visitor reaches the bottom most node after having been spawned in the fourth node from the top.\\
Note that while the order of evaluating the path of multiple visitors is irrelevant, their affecting each other by node value editing creates a race condition on nodes that are potentially visited by more than one visitor. Thus, the buffer size analysis is not parallelizable on the same process. However, different processes may be examined in parallel.
\begin{figure}[h]
\caption{Control flow example}
\centering
\includegraphics[width=5cm]{png/controlflow_01.png}
\end{figure}
\pagebreak
\begin{figure}[h]
\caption{Visitor reached 3rd node}
\centering
\includegraphics[width=12cm]{png/controlflow_02.png}
\end{figure}
\begin{figure}[h]
\caption{Visitor reached 3rd node again}
\centering
\includegraphics[width=12cm]{png/controlflow_03.png}
\end{figure}
\pagebreak
\begin{figure}[h]
\caption{TOP values cascaded}
\centering
\includegraphics[width=12cm]{png/controlflow_04.png}
\end{figure}
\begin{figure}[h]
\caption{Visitor reached last node}
\centering
\includegraphics[width=12cm]{png/controlflow_05.png}
\end{figure}
\FloatBarrier
\pagebreak
\subsection{PSO abstraction}
Using PSO, every global variable has a separate buffer per process of the size $s_{x,i}$. Therefore, for each process $i$, for each global variable \lstinline$x$, there are $s_{x,i}$ buffer variables of the form \lstinline$x_j_i$, where $j \in [1, s_{x,i}]$. Since flushes are non-deterministic, we cannot keep track of occupied buffer spaces. Therefore, we declare 2 additional auxiliary variables for each global variable in each process: \lstinline$x_cnt_i$, which is initialised to 0 and otherwise has the value of the last buffer index to which a value of \lstinline$x$ was written, and \lstinline$x_fst_i$, which is analogous to \lstinline$x_cnt_i$ with the difference that it points to the first such index. Figure 6. illustrates the buffer allocation abstraction we apply for PSO programs.\\
\begin{figure}[h]
\caption{PSO buffer abstraction approach}
\centering
\includegraphics[width=13cm]{png/pso_buffer.png}
\end{figure}
\pagebreak
\subsection{TSO abstraction}
Using TSO, every process has a separate buffer of the size $K$, which all global variables share. The buffer holds all values in an order-preserving manner: A value $v_{1}$ that is stored in the buffer before another value $v_{2}$ will also be written to memory before $v_{2}$. Therefore, the store order of values must be preserved in the program.\\
An economic way to do this is to keep track of the last element inserted into the buffer, and store the next value in the following slot. Since we cannot know the actual run-time allocation of the buffer, we will only be able to analyse the maximum buffer size of the entire process (which we express by $s_{i}$), but not of the individual variables. For each buffer slot, we will declare two variables of the form \lstinline$buf_j_i$, and \lstinline$own_j_i$, where $j \in [1, s_{i}]$.\\
We will also statically build an allocator table, which will pair every global variable to a unique numeric value which will allow us to identify the values stored in the buffer as belonging to its owner variable at run-time. We will use the notation $\rho(v)$ to denote the numeric allocator key of the variable \lstinline$v$. Thus, for every buffer slot with the number $j \in [1, s_{i}]$ in every process $i$, we will have a variable \lstinline$buf_j_i$ (which we will call a buffer variable) containing the value currently held by that buffer slot, and a variable \lstinline$own_j_i$ (which we will call a buffer allocator) containing the allocator key of the variable whose value is stored there. If the allocator is set to $0$, the buffer is marked to be free, and if it is set to $-1$, it is marked as invalidated by a flush. Figure 7. illustrates the buffer allocation abstraction we apply for TSO programs.\\
\begin{figure}[h]
\caption{TSO buffer abstraction approach}
\centering
\includegraphics[width=13cm]{png/tso_buffer_02.png}
\end{figure}
\FloatBarrier
\subsection{Replacement rules}
After we compute all $s_{x,i}$ or $s_{i}$ for each global variable \lstinline$x$ and for each process $i$, we have enough data to statically perform all necessary replacements in our program to allow predicate abstraction. We will define different replacement rules for each store order.
\subsubsection{Replacing store statements}
Assuming \lstinline$x$ is a global variable, \lstinline$r$ is an integer expression, and we encounter \lstinline$store x = r$ in a process $i$, we will replace this statement with the code blocks listed below. Note, that the variable symbol is appended to every corresponding if-else statement in the case of TSO. This is done in order to allow us a fast context analysis on buffer variable assignments within these if-else blocks, which we will use later on.
\begin{multicols}{2}
PSO approach:
\begin{lstlisting}[frame=single, mathescape]
if (x_cnt_$i$ = $s_{x,i}$)
abort("overflow");
endif;
if (x_fst_$i$ = 0)
x_fst_$i$ = 1;
x_1_$i$ = r;
endif;
x_cnt_$i$ = x_cnt_$i$ + 1;
if (x_cnt_$i$ = 2)
x_2_$i$ = r;
endif;
$...$
if (x_cnt_$i$ = $j$)
x_$j$_$i$ = r;
endif;
$...$
if (x_cnt_$i$ = $s_{x,i}$)
x_$s_{x,i}$_$i$ = r;
endif;
\end{lstlisting}
\columnbreak
TSO approach:
\begin{lstlisting}[frame=single, mathescape]
if (own_1_$i$ = 0) // x
buf_1_$i$ = r;
own_1_$i$ = $\rho($x$)$;
goto $SEL$;
endif;
if (own_2_$i$ = 0) // x
buf_2_$i$ = r;
own_2_$i$ = $\rho($x$)$;
goto $SEL$;
endif;
$...$
if (own_$j$_$i$ = 0) // x
buf_$j$_$i$ = r;
own_$j$_$i$ = $\rho($x$)$;
goto $SEL$;
endif;
$...$
if (own_$s_{i}$_$i$ = 0) // x
buf_$s_{i}$_$i$ = r;
own_$s_{i}$_$i$ = $\rho($x$)$;
goto $SEL$;
endif;
abort("overflow");
$SEL$: nop;
\end{lstlisting}
\end{multicols}
\pagebreak
\subsubsection{Replacing load statements}
Assuming \lstinline$x$ is a global variable, \lstinline$l$ is a local variable, and we encounter \lstinline$load l = x$ in a process $i$, we will replace this statement with:
\begin{multicols}{2}
PSO approach:
\begin{lstlisting}[frame=single, mathescape]
if (x_cnt_$i$ = 0)
load l = x;
endif;
if (x_cnt_$i$ = $s_{x,i}$)
l = x_$s_{x,i}$_$i$;
endif;
$...$
if (x_cnt_$i$ = $j$)
l = x_$j$_$i$;
endif;
$...$
if (x_cnt_$i$ = 2)
l = x_2_$i$;
endif;
if (x_cnt_$i$ = 1)
l = x_1_$i$;
endif;
\end{lstlisting}
\columnbreak
TSO approach:
\begin{lstlisting}[frame=single, mathescape]
if (own_$s_{i}$_$i$ = $\rho($x$)$) // x
l = buf_$s_{i}$_$i$;
goto $LEL$;
endif;
$...$
if (own_$j$_$i$ = $\rho($x$)$) // x
l = buf_$j$_$i$;
goto $LEL$;
endif;
$...$
if (own_2_$i$ = $\rho($x$)$) // x
l = buf_2_$i$;
goto $LEL$;
endif;
if (own_1_$i$ = $\rho($x$)$) // x
l = buf_1_$i$;
goto $LEL$;
endif;
load l = V;
$LEL$: nop;
\end{lstlisting}
\end{multicols}
\subsubsection{Replacing fence statements}
Assuming we encounter \lstinline$fence;$ in a process $i$, we will replace this statement with the following block for every global variable \lstinline$x$:
\begin{multicols}{2}
PSO approach:
\begin{lstlisting}[frame=single, mathescape]
assume (x_cnt_$i$ = 0);
assume (x_fst_$i$ = 0);
\end{lstlisting}
\columnbreak
TSO approach:
\begin{lstlisting}[frame=single, mathescape]
assume (own_1_$i$ < 1);
assume (own_2_$i$ < 1);
$...$
assume (own_$j$_$i$ < 1);
$...$
assume (own_$s_{i}$_$i$ < 1);
own_1_$i$ = 0;
own_2_$i$ = 0;
$...$
own_$j$_$i$ = 0;
$...$
own_$s_{i}$_$i$ = 0;
\end{lstlisting}
\end{multicols}
\subsubsection{Replacing flush statements}
Let $\beta$ be the number of global variables in $P$ and $\rho^{-1}$ be the inverse function of $\rho$, i.e. it returns the variable symbol belonging to the input number. Assuming we encounter \lstinline$flush;$ in a process $i$, we will replace this statement with:
\begin{multicols}{2}
TSO approach:
\begin{lstlisting}[frame=single, mathescape]
if (*)
goto $FEL$;
endif;
if (own_1_$i$ > 0)
$[flush\:1]$
endif;
if (*)
goto $FEL$;
endif;
if (own_2_$i$ > 0)
$[flush\:2]$
endif;
$...$
if (*)
goto $FEL$;
endif;
if (own_$j$_$i$ > 0)
$[flush\:j]$
endif;
$...$
if (*)
goto $FEL$;
endif;
if (own_$s_{i}$_$i$ > 0)
$[flush\:s_{i}]$
endif;
$FEL$: nop;
\end{lstlisting}
\columnbreak
The placeholder $[flush\:\alpha]$ describes the following code block for a number $\alpha$:
\begin{lstlisting}[frame=single, mathescape]
if (own_$\alpha$_$i$ = 1) // $\rho^{-1}(1)$
store $\rho^{-1}(1)$ = buf_$\alpha$_$i$;
goto $SubFEL$;
endif;
if (own_$\alpha$_$i$ = 2) // $\rho^{-1}(2)$
store $\rho^{-1}(2)$ = buf_$\alpha$_$i$;
goto $SubFEL$;
endif;
$...$
if (own_$\alpha$_$i$ = $j$) // $\rho^{-1}(j)$
store $\rho^{-1}(j)$ = buf_$\alpha$_$i$;
goto $SubFEL$;
endif;
$...$
if (own_$\alpha$_$i$ = $\beta$) // $\rho^{-1}(\beta)$
store $\rho^{-1}(\beta)$ = buf_$\alpha$_$i$;
goto $SubFEL$;
endif;
$SubFEL$: own_$\alpha$_$i$ = -1;
\end{lstlisting}
\end{multicols}
\pagebreak
PSO approach:
\begin{lstlisting}[frame=single, mathescape]
p: if (*)
$[global\:variable\:flushes]$
goto p;
endif;
\end{lstlisting}
The placeholder $[global\:variable\:flushes]$ describes the following code block for each global variable \lstinline$x$:
\begin{lstlisting}[frame=single, mathescape]
if (x_cnt_$i$ > 0)
if (*)
if (x_fst_$i$ > 1)
if (x_fst_$i$ > 2)
$...$
if (x_fst_$i$ > $j$)
$...$
if (x_fst_$i$ > $s_{x,i} - 1$)
store x = x_$s_{x,i}$_$i$;
else
store x = x_$(s_{x,i} - 1)$_$i$;
endif;
$...$
else
store x = x_$j$_$i$;
endif;
$...$
else
store x = x_2_$i$;
endif;
else
store x = x_1_$i$;
endif;
x_fst_$i$ = x_fst_$i$ + 1;
endif;
endif;
\end{lstlisting}
|
{"hexsha": "bf8dc6685b4b42d1f999e494906236a7d7ef5562", "size": 14827, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "docs/writeup/ba.tex", "max_stars_repo_name": "hetmeter/awmm", "max_stars_repo_head_hexsha": "8d65b1246898b27db1ac5a6542465f71e27b1603", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "docs/writeup/ba.tex", "max_issues_repo_name": "hetmeter/awmm", "max_issues_repo_head_hexsha": "8d65b1246898b27db1ac5a6542465f71e27b1603", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "docs/writeup/ba.tex", "max_forks_repo_name": "hetmeter/awmm", "max_forks_repo_head_hexsha": "8d65b1246898b27db1ac5a6542465f71e27b1603", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.7506702413, "max_line_length": 1593, "alphanum_fraction": 0.7182842112, "num_tokens": 4220}
|
! Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
!
! Licensed under the Apache License, Version 2.0 (the "License");
! you may not use this file except in compliance with the License.
! You may obtain a copy of the License at
!
! http://www.apache.org/licenses/LICENSE-2.0
!
! Unless required by applicable law or agreed to in writing, software
! distributed under the License is distributed on an "AS IS" BASIS,
! WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
! See the License for the specific language governing permissions and
! limitations under the License.
module mMmM
common n
contains
subroutine mM1() ! mangled linker name
n = n + 1
end subroutine
subroutine mM2() bind(C)
n = n + 1
end subroutine
subroutine mM3() bind(C,name="") ! mangled linker name
n = n + 1
end subroutine
subroutine mM4() bind(C,name="mM4")
n = n + 1
end subroutine
end module mMmM
subroutine sS1() ! mangled linker name
common n
n = n + 1
end subroutine
subroutine sS2() bind(C)
common n
n = n + 1
end subroutine
subroutine sS3() bind(C,name="") ! mangled linker name
common n
n = n + 1
end subroutine
subroutine sS4() bind(C,name="sS4")
common n
n = n + 1
end subroutine
! --------------------
use mMmM
interface
subroutine sS2() bind(C)
end subroutine
subroutine sS3() bind(C,name="")
end subroutine
subroutine sS4() bind(C,name="sS4")
end subroutine
subroutine cC() bind(C)
end subroutine
end interface
n = 0
call mM1
call mM2
call mM3
call mM4
call sS1
call sS2
call sS3
call sS4
call cC
if (n .eq. 12) print*, 'PASS'
if (n .ne. 12) print*, 'FAIL: expected 12 calls; made', n
end
|
{"hexsha": "fa3e011dcdda1abc62c1bd0391446adf73f184b4", "size": 1699, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "test/f90_correct/src/bi08.f90", "max_stars_repo_name": "kammerdienerb/flang", "max_stars_repo_head_hexsha": "8cc4a02b94713750f09fe6b756d33daced0b4a74", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-12-11T17:43:58.000Z", "max_stars_repo_stars_event_max_datetime": "2019-12-11T17:43:58.000Z", "max_issues_repo_path": "test/f90_correct/src/bi08.f90", "max_issues_repo_name": "kammerdienerb/flang", "max_issues_repo_head_hexsha": "8cc4a02b94713750f09fe6b756d33daced0b4a74", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 58, "max_issues_repo_issues_event_min_datetime": "2017-10-25T17:21:13.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-22T13:35:09.000Z", "max_forks_repo_path": "test/f90_correct/src/bi08.f90", "max_forks_repo_name": "kammerdienerb/flang", "max_forks_repo_head_hexsha": "8cc4a02b94713750f09fe6b756d33daced0b4a74", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2019-12-21T06:35:35.000Z", "max_forks_repo_forks_event_max_datetime": "2020-06-07T23:18:58.000Z", "avg_line_length": 18.4673913043, "max_line_length": 74, "alphanum_fraction": 0.6845203061, "num_tokens": 515}
|
import argparse
import ast
import glob
import importlib
import os
import time
import warnings
from pathlib import Path
from typing import Any
import networkx as nx
from joblib import Parallel, delayed
warnings.filterwarnings('ignore', category=UserWarning)
warnings.filterwarnings('ignore', category=RuntimeWarning)
os.environ["OMP_NUM_THREADS"] = "2" # export OMP_NUM_THREADS=4
os.environ["OPENBLAS_NUM_THREADS"] = "2" # export OPENBLAS_NUM_THREADS=4
os.environ["MKL_NUM_THREADS"] = "2" # export MKL_NUM_THREADS=6
os.environ["VECLIB_MAXIMUM_THREADS"] = "2" # export VECLIB_MAXIMUM_THREADS=4
os.environ["NUMEXPR_NUM_THREADS"] = "2" # export NUMEXPR_NUM_THREADS=6
from src.graph_io import GraphReader, SyntheticGraph
from src.infinity_mirror import InfinityMirror
from src.utils import timer, ColorPrint as CP, get_imt_output_directory
def parse_args():
model_names = {'ErdosRenyi', 'ChungLu', 'BTER', 'CNRG', 'HRG', 'Kronecker', 'UniformRandom', 'GCN_AE',
'GCN_VAE', 'Linear_AE', 'Linear_VAE', 'Deep_GCN_AE', 'Deep_GCN_VAE', 'SBM', 'GraphForge',
'NetGAN', 'GraphRNN', '_BTER', 'BUGGE'}
selections = {'fast', }
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter) # formatter class shows defaults in help
# using choices we can control the inputs. metavar='' prevents printing the choices in the help preventing clutter
parser.add_argument('-i', '--input', help='Input graph', metavar='', nargs='+', required=True)
parser.add_argument('-m', '--model', help='Model to use', metavar='', choices=model_names, nargs=1, required=True)
parser.add_argument('-n', '--gens', help='#generations', nargs=1, metavar='', type=int, required=True)
parser.add_argument('-s', '--sel', help='Selection policy', choices=selections, nargs=1, default='fast')
parser.add_argument('-o', '--outdir', help='Name of the output directory', nargs=1, default='output', metavar='')
parser.add_argument('-p', '--pickle', help='Use pickle?', action='store_true')
parser.add_argument('-g', '--num_graphs', help='#graphs/generation', default=[10], nargs=1, metavar='', type=int)
parser.add_argument('-c', '--cores', help='#cores to use', default=[1], nargs=1, metavar='', type=int)
parser.add_argument('-t', '--trials', help='#trials', nargs=1, metavar='', type=int, required=True)
parser.add_argument('-r', '--rewire', help='edge rewire prob', nargs=1, default=[0], metavar='', type=float)
parser.add_argument('-f', '--finish', help='try to finish an incomplete file', nargs=1, type=str, default='')
parser.add_argument('-z', '--features_bool', help='get back the learned model features', action='store_true')
parser.add_argument('-l', '--take_lcc', help='whether or not to take only the largest connected component', action='store_true')
return parser.parse_args()
def process_args(args) -> Any:
"""
Validates args
:param args:
:return:
"""
possible_extensions = {'.g', '.gml', '.txt', '.gml', '.mat'}
graph_names = {fname[: fname.find(ext)].split('/')[-1]
for ext in possible_extensions
for fname in glob.glob(f'./input/*{ext}')}
graph_names.update(set(SyntheticGraph.implemented_methods)) # add the synthetic graph generators
model_name = args.model[0]
if args.finish != '':
finish_path = args.finish[0]
else:
finish_path = None
# check input
if len(args.input) > 1:
kind = args.input[0] # kind of synthetic graph
assert kind in SyntheticGraph.implemented_methods, f'{kind} not implemented in SyntheticGraph class'
r = float(args.rewire[0])
kwd_args = {'r': r}
for param, val in zip(SyntheticGraph.implemented_methods[kind], args.input[1:]):
kwd_args[param] = ast.literal_eval(val)
g = SyntheticGraph(kind, **kwd_args).g
else:
g = GraphReader(filename=args.input[0], take_lcc=args.take_lcc).graph
r = 0
if finish_path is not None:
finish_name = finish_path.split('/')[-3]
finish_model = finish_path.split('/')[-2]
assert finish_name == g.name, f'invalid name {finish_name}, expected {g.name}'
assert finish_model == model_name, f'invalid name {finish_model}, expect {model_name}'
if model_name in ('GCN_AE', 'GCN_VAE', 'Linear_AE', 'Linear_VAE', 'Deep_GCN_AE', 'Deep_GCN_VAE'):
model_name = 'GraphAutoEncoder' # one class for all autoencoder business
module = importlib.import_module(f'src.graph_models')
model_obj = getattr(module, model_name)
return args.sel[0], g, model_obj, int(args.gens[0]), args.pickle, int(args.num_graphs[0]), r, finish_path, args.features_bool
def make_dirs(output_dir: str, gname: str, model: str) -> None:
"""
Makes input and output directories if they do not exist already
:return:
"""
output_dir = Path(output_dir)
for dirname in ('pickles', f'pickles/{gname}', f'pickles/{gname}/{model}', 'features',
f'features/{gname}', f'features/{gname}/{model}'):
dir_ = output_dir / dirname
if not dir_.exists():
CP.print_blue(f'Making dir {dir_!r}')
os.makedirs(dir_, exist_ok=True)
return
def run_infinity_mirror(args, trial) -> None:
"""
Creates and runs infinity mirror
:return:
"""
selection, g, model, num_gens, use_pickle, num_graphs, rewire, finish, features_bool = process_args(args)
# process args returns the Class and not an object
empty_g = nx.empty_graph(1)
empty_g.name = 'empty' # create an empty graph as a placeholder
if args.model[0] in ('GCN_AE', 'GCN_VAE', 'Linear_AE', 'Linear_VAE', 'Deep_GCN_AE', 'Deep_GCN_VAE'):
model_obj = model(
input_graph=empty_g,
trial=trial,
kind=args.model[0])
else:
model_obj = model(
input_graph=empty_g,
trial=trial) # this is a roundabout way to ensure the name of GraphModel object is correct
imt_output_dir = get_imt_output_directory()
make_dirs(output_dir=imt_output_dir, gname=g.name, model=model_obj.model_name)
assert selection == 'fast', 'invalid selection'
num_graphs = 1 # only 1 graph per generation
inf = InfinityMirror(initial_graph=g, num_generations=num_gens, model_obj=model_obj,
num_graphs=num_graphs, trial=trial, r=rewire, dataset=g.name, model=args.model[0], finish=finish, features_bool=features_bool)
tic = time.perf_counter()
inf.run(use_pickle=use_pickle)
toc = time.perf_counter()
inf.write_timing_stats(round(toc - tic, 3))
print(trial, inf)
return
@timer
def main() -> None:
args = parse_args()
num_jobs, num_trials = int(args.cores[0]), int(args.trials[0])
CP.print_green(f'Running infinity mirror on {num_jobs} cores for {num_trials} trials')
# print(args)
# exit(1)
Parallel(n_jobs=num_jobs, backend='multiprocessing')(
delayed(run_infinity_mirror)(trial=i + 1, args=args)
for i in range(num_trials)
)
return
if __name__ == '__main__':
main()
|
{"hexsha": "baef553cea8dd919cb4485a35d16a5ea643083db", "size": 7197, "ext": "py", "lang": "Python", "max_stars_repo_path": "main.py", "max_stars_repo_name": "Abdumaleek/infinity-mirror", "max_stars_repo_head_hexsha": "b493c5602d9e4bcf374b748e9b80e7c85be54a88", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2020-03-13T02:54:03.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-18T02:33:12.000Z", "max_issues_repo_path": "main.py", "max_issues_repo_name": "Abdumaleek/infinity-mirror", "max_issues_repo_head_hexsha": "b493c5602d9e4bcf374b748e9b80e7c85be54a88", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-11-10T19:47:00.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T01:24:59.000Z", "max_forks_repo_path": "main.py", "max_forks_repo_name": "Abdumaleek/infinity-mirror", "max_forks_repo_head_hexsha": "b493c5602d9e4bcf374b748e9b80e7c85be54a88", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-05-24T21:54:44.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-24T21:54:44.000Z", "avg_line_length": 39.3278688525, "max_line_length": 151, "alphanum_fraction": 0.6663887731, "include": true, "reason": "import networkx", "num_tokens": 1857}
|
function try_parse(s)
if all(isnumeric, s)
parse(Int, s)
else
s
end
end
struct BinaryOp
op
in_1
in_2
out
BinaryOp(op, in_1::AbstractString, in_2::AbstractString, out) = new(op, try_parse(in_1), try_parse(in_2), out)
end
struct UnaryOp
op
in_
out
UnaryOp(op, in_::AbstractString, out) = new(op, try_parse(in_), out)
end
function parse_line(l)
m_simple = match(r"^(?<op>NOT)? ?(?<in_>\S+) -> (?<out>\w+)$", l)
m_double = match(r"^(?<in_1>\S+) (?<op>AND|OR|RSHIFT|LSHIFT) (?<in_2>\S+) -> (?<out>\w+)$", l)
if !isnothing(m_simple)
UnaryOp(m_simple[:op], m_simple[:in_], m_simple[:out])
elseif !isnothing(m_double)
BinaryOp(m_double[:op],m_double[:in_1], m_double[:in_2], m_double[:out])
else
@error "Did not parse $l"
end
end
data = readlines() .|> parse_line
function apply(assgn::UnaryOp, mem)
in_ = assgn.in_
if in_ isa AbstractString
in_ = mem[in_]
end
if !isnothing(assgn.op)
~in_
else
in_
end
end
function apply(assgn::BinaryOp, mem)
in_1 = assgn.in_1
if in_1 isa AbstractString
in_1 = mem[in_1]
end
in_2 = assgn.in_2
if in_2 isa AbstractString
in_2 = mem[in_2]
end
if assgn.op == "AND"
in_1 & in_2
elseif assgn.op == "OR"
in_1 | in_2
elseif assgn.op == "RSHIFT"
in_1 >> in_2
elseif assgn.op == "LSHIFT"
in_1 << in_2
else
@error "No operation in $assgn"
end
end
function compute(assgn::UnaryOp, circuit, mem)
out_key = assgn.out
if haskey(mem, out_key)
return mem[out_key]
end
in_ = assgn.in_
if in_ isa AbstractString && !haskey(mem, in_)
compute(circuit[in_], circuit, mem)
end
out = apply(assgn, mem)
mem[out_key] = out
return out
end
function compute(assgn::BinaryOp, circuit, mem)
out_key = assgn.out
if haskey(mem, out_key)
return mem[out_key]
end
in_1 = assgn.in_1
if in_1 isa AbstractString && !haskey(mem, in_1)
compute(circuit[in_1], circuit, mem)
end
in_2 = assgn.in_2
if in_2 isa AbstractString && !haskey(mem, in_2)
compute(circuit[in_2], circuit, mem)
end
out = apply(assgn, mem)
mem[out_key] = out
return out
end
function solve_1()
g_struct = Dict{String, Union{UnaryOp, BinaryOp}}()
g_val = Dict{String, Int}()
for line in data
g_struct[line.out] = line
end
compute(g_struct["a"], g_struct, g_val)
end
solve_1() |> println
|
{"hexsha": "808cbeaa4ece3fb828f5493101d23a8b31bc737e", "size": 2584, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "2015/7.jl", "max_stars_repo_name": "pLOPeGG/JuliaOfCode", "max_stars_repo_head_hexsha": "894bca6427174b84ed385b8a008b79d15d63bd7b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "2015/7.jl", "max_issues_repo_name": "pLOPeGG/JuliaOfCode", "max_issues_repo_head_hexsha": "894bca6427174b84ed385b8a008b79d15d63bd7b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "2015/7.jl", "max_forks_repo_name": "pLOPeGG/JuliaOfCode", "max_forks_repo_head_hexsha": "894bca6427174b84ed385b8a008b79d15d63bd7b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.0081300813, "max_line_length": 114, "alphanum_fraction": 0.592879257, "num_tokens": 862}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""tests for load.py"""
import tempfile
import unittest
import warnings
import numpy as np
# has to specify the exact file to avoid nosetests error on full tests
from sknetwork.data.load import load_netset, load_konect, clear_data_home, save, load
from sknetwork.data.toy_graphs import house, star_wars
from sknetwork.utils.timeout import TimeOut
class TestLoader(unittest.TestCase):
def test_netset(self):
tmp_data_dir = tempfile.gettempdir() + '/stub'
clear_data_home(tmp_data_dir)
try:
graph = load_netset('stub', tmp_data_dir)
except: # pragma: no cover
warnings.warn('Could not reach Telecom Graphs. Corresponding test has not been performed.', RuntimeWarning)
return
n = 2
self.assertEqual(graph.adjacency.shape, (n, n))
self.assertEqual(len(graph.names), n)
clear_data_home(tmp_data_dir)
def test_invalid_netset(self):
tmp_data_dir = tempfile.gettempdir() + '/stub'
clear_data_home(tmp_data_dir)
try:
with self.assertRaises(ValueError):
load_netset('junk', tmp_data_dir)
except: # pragma: no cover
warnings.warn('Could not reach Telecom Graphs. Corresponding test has not been performed.', RuntimeWarning)
return
load_netset()
def test_konect(self):
tmp_data_dir = tempfile.gettempdir() + '/moreno_crime'
clear_data_home(tmp_data_dir)
try:
with TimeOut(2):
data = load_konect('moreno_crime', tmp_data_dir)
except (TimeoutError, RuntimeError): # pragma: no cover
warnings.warn('Could not reach Konect. Corresponding test has not been performed.', RuntimeWarning)
return
self.assertEqual(data.biadjacency.shape[0], 829)
self.assertEqual(data.name.shape[0], 829)
data = load_konect('moreno_crime', tmp_data_dir)
self.assertEqual(data.biadjacency.shape[0], 829)
self.assertEqual(data.name.shape[0], 829)
clear_data_home(tmp_data_dir)
tmp_data_dir = tempfile.gettempdir() + '/ego-facebook'
try:
with TimeOut(2):
data = load_konect('ego-facebook', tmp_data_dir)
except (TimeoutError, RuntimeError): # pragma: no cover
warnings.warn('Could not reach Konect. Corresponding test has not been performed.', RuntimeWarning)
return
self.assertEqual(data.adjacency.shape[0], 2888)
data = load_konect('ego-facebook', tmp_data_dir)
self.assertEqual(data.adjacency.shape[0], 2888)
clear_data_home(tmp_data_dir)
def test_invalid_konect(self):
tmp_data_dir = tempfile.gettempdir() + '/stub'
clear_data_home(tmp_data_dir)
try:
with TimeOut(4):
with self.assertRaises(ValueError):
load_konect('junk', tmp_data_dir)
with self.assertRaises(ValueError):
load_konect('', tmp_data_dir)
except (TimeoutError, RuntimeError): # pragma: no cover
warnings.warn('Could not reach Konect. Corresponding test has not been performed.', RuntimeWarning)
return
def test_save_load(self):
data = house()
tmp_data_dir = tempfile.gettempdir() + '/stub'
save(tmp_data_dir + '/house', data)
loaded_data = load(tmp_data_dir + '/house')
self.assertTrue(np.allclose(data.data, loaded_data.adjacency.data))
data = star_wars()
save(tmp_data_dir + '/star_wars', data)
loaded_data = load(tmp_data_dir + '/star_wars')
self.assertTrue(np.allclose(data.data, loaded_data.biadjacency.data))
data = star_wars(metadata=True)
save(tmp_data_dir + '/star_wars', data)
loaded_data = load(tmp_data_dir + '/star_wars')
self.assertTrue(np.allclose(data.biadjacency.data, loaded_data.biadjacency.data))
self.assertEqual(data.names_col[0], loaded_data.names_col[0])
|
{"hexsha": "6744c577423b5a330a5ea78478bcc87af2dde21f", "size": 4079, "ext": "py", "lang": "Python", "max_stars_repo_path": "sknetwork/data/tests/test_load.py", "max_stars_repo_name": "altana-tech/scikit-network", "max_stars_repo_head_hexsha": "dedc9d3e694c7106e4709aae22dffb5142c15859", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 457, "max_stars_repo_stars_event_min_datetime": "2018-07-24T12:42:14.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T08:30:39.000Z", "max_issues_repo_path": "sknetwork/data/tests/test_load.py", "max_issues_repo_name": "altana-tech/scikit-network", "max_issues_repo_head_hexsha": "dedc9d3e694c7106e4709aae22dffb5142c15859", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 281, "max_issues_repo_issues_event_min_datetime": "2018-07-13T05:01:19.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T14:13:43.000Z", "max_forks_repo_path": "sknetwork/data/tests/test_load.py", "max_forks_repo_name": "altana-tech/scikit-network", "max_forks_repo_head_hexsha": "dedc9d3e694c7106e4709aae22dffb5142c15859", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 58, "max_forks_repo_forks_event_min_datetime": "2019-04-22T09:04:32.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T12:43:08.000Z", "avg_line_length": 39.2211538462, "max_line_length": 119, "alphanum_fraction": 0.6474626134, "include": true, "reason": "import numpy", "num_tokens": 918}
|
//---------------------------------------------------------------------------//
// Copyright (c) 2018-2020 Mikhail Komarov <nemo@nil.foundation>
//
// Distributed under the Boost Software License, Version 1.0
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt
//---------------------------------------------------------------------------//
#ifndef CRYPTO3_BLOCK_CIPHERS_DETAIL_SHACAL1_POLICY_HPP
#define CRYPTO3_BLOCK_CIPHERS_DETAIL_SHACAL1_POLICY_HPP
#include <boost/crypto3/block/detail/shacal/shacal_policy.hpp>
namespace boost {
namespace crypto3 {
namespace block {
namespace detail {
typedef shacal_policy shacal1_policy;
} // namespace detail
} // namespace block
} // namespace crypto3
} // namespace boost
#endif // CRYPTO3_BLOCK_CIPHERS_DETAIL_SHACAL1_POLICY_HPP
|
{"hexsha": "a9bea83898cce86069ffa458dbf1c1231618b503", "size": 911, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/boost/crypto3/block/detail/shacal/shacal1_policy.hpp", "max_stars_repo_name": "NilFoundation/boost-crypto", "max_stars_repo_head_hexsha": "a3e599b780bbbbc063b7c8da0e498125769e08be", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 4.0, "max_stars_repo_stars_event_min_datetime": "2020-09-02T06:19:14.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-07T04:55:03.000Z", "max_issues_repo_path": "include/boost/crypto3/block/detail/shacal/shacal1_policy.hpp", "max_issues_repo_name": "NilFoundation/boost-crypto", "max_issues_repo_head_hexsha": "a3e599b780bbbbc063b7c8da0e498125769e08be", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 3.0, "max_issues_repo_issues_event_min_datetime": "2020-04-06T21:49:03.000Z", "max_issues_repo_issues_event_max_datetime": "2020-09-18T04:54:51.000Z", "max_forks_repo_path": "include/boost/crypto3/block/detail/shacal/shacal1_policy.hpp", "max_forks_repo_name": "NilFoundation/boost-crypto", "max_forks_repo_head_hexsha": "a3e599b780bbbbc063b7c8da0e498125769e08be", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2022-02-13T21:14:37.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-13T21:14:37.000Z", "avg_line_length": 33.7407407407, "max_line_length": 79, "alphanum_fraction": 0.5806805708, "num_tokens": 188}
|
'''Perimeter monitoring for MD simulations using MDAnalysis'''
import glob
import MDAnalysis
from MDAnalysis.analysis import distances
import numpy as np
# Please change the workdir here..
workdir = 'REPLACEME'
top = glob.glob("{0}/**/{1}".format(workdir.rstrip("/"), '*-in-noh2o.pdb'), recursive=True)[0]
traj = glob.glob("{0}/**/{1}".format(workdir.rstrip("/"), "*.dcd"), recursive=True)[0]
outputpath = "{0}/{1}".format(workdir.rstrip("/"), "perimeter.csv")
# For manual mode..
#top = '/mdspace/mstahnke/2007_2YDV_Na_MD/2YDV_Na_MD-in-noh2o.pdb'
#traj = '/mdspace/mstahnke/2007_2YDV_Na_MD/2YDV_Na_MD_trj/allframes-noh2o.dcd'
#outputpath = '/mdspace/mstahnke/2007_2YDV_Na_MD/perimeter.csv'
u = MDAnalysis.Universe(top, traj) # Creating MDAnalysis universe aka loading in MD data
print('Created MDUniverse successfully.')
# creating atom selections for later use
s1 = u.select_atoms("resid 246 and resname TRP and name CA")
s2 = u.select_atoms("resid 91 and resname SER and name CA")
s3 = u.select_atoms("resid 52 and resname ASP and name CA")
s4 = u.select_atoms("resid 280 and resname ASN and name CA")
lig = u.select_atoms("resid 400")
sodium = u.select_atoms("resid 2402")
# getting distances
distdict = {}
distpairs = [(s1, s2), (s2, s3), (s3, s4), (s4, s1)]
step = -1
perilist = []
print('Starting perimeter calculations.')
for ts in u.trajectory:
step += 1
peri = 0.0
for n, distpair in enumerate(distpairs):
a = distpair[0]
b = distpair[1]
# distance array cropped to the distance between selections rounded to two decimal digits
dist = round(MDAnalysis.analysis.distances.dist(a, b)[2, 0], 2)
peri += dist
perilist.append([step, u.trajectory.time, peri])
perilist = np.array(perilist)
print('Finished perimeter calculations and writing out file to \"{}\"'.format(outputpath))
# adding distances
# writing out
if outputpath is not None:
np.savetxt(outputpath, perilist, delimiter=',') # writing to file
|
{"hexsha": "7c5ee3e45405f6b55c51f2eb6061f733e3056e9f", "size": 1977, "ext": "py", "lang": "Python", "max_stars_repo_path": "perimetermonitor.py", "max_stars_repo_name": "dmachalz/mdanalysis", "max_stars_repo_head_hexsha": "b23fb34cfc78d2d67de510c375c8cb691f39dd5b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "perimetermonitor.py", "max_issues_repo_name": "dmachalz/mdanalysis", "max_issues_repo_head_hexsha": "b23fb34cfc78d2d67de510c375c8cb691f39dd5b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "perimetermonitor.py", "max_forks_repo_name": "dmachalz/mdanalysis", "max_forks_repo_head_hexsha": "b23fb34cfc78d2d67de510c375c8cb691f39dd5b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.6111111111, "max_line_length": 94, "alphanum_fraction": 0.7071320182, "include": true, "reason": "import numpy", "num_tokens": 591}
|
#include "Collisions.h"
#include "Box.h"
#include "Points.h"
#define EIGEN_DONT_ALIGN_STATICALLY
#include <Eigen\Dense>
using namespace std;
using namespace Eigen;
void CD(const Mesh& mesh, const shared_ptr<Obstacles> obs, std::vector<std::shared_ptr<btc::Collision> > &cls)
{
MatrixXd verts2(3, mesh.nodes.size());
MatrixXi faces2(3, mesh.faces.size());
//VectorXi EoLs(1, mesh.nodes.size());
VectorXi EoLs;
EoLs.resize(mesh.nodes.size());
for (int i = 0; i < mesh.nodes.size(); i++) {
verts2.col(i) = Vector3d(mesh.nodes[i]->x[0], mesh.nodes[i]->x[1], mesh.nodes[i]->x[2]);
if (mesh.nodes[i]->EoL) EoLs(i) = 1;
}
for (int i = 0; i < mesh.faces.size(); i++) {
faces2.col(i) = Vector3i(mesh.faces[i]->v[0]->node->index, mesh.faces[i]->v[1]->node->index, mesh.faces[i]->v[2]->node->index);
}
// Compute these first so they form the base of our collision list
btc::pointTriCollision(cls, obs->cdthreshold, obs->points->pxyz, obs->points->norms, verts2, faces2, true);
int c = cls.size();
for (int b = 0; b < obs->num_boxes; b++) {
vector<shared_ptr<btc::Collision> > clst;
btc::boxTriCollision(clst, obs->cdthreshold, obs->boxes[b]->dim, obs->boxes[b]->E1, verts2, faces2, EoLs, false);
cls.insert(cls.end(), clst.begin(), clst.end());
// We need to augment the indices of the box geometry by the object number
// TODO:: Internally?
for (c; c < cls.size(); c++) {
if (cls[c]->count1 == 1 && cls[c]->count2 == 3) {
//cls[c]->verts1(0) = obs->points->num_points + (obs->boxes[b]->num_points * b) + cls[c]->verts1(0);
cls[c]->verts1(0) = obs->points->num_points + (b* obs->boxes[b]->num_points) + (b* obs->boxes[b]->num_edges) + cls[c]->verts1(0);
}
for (int e = 0; e < cls[c]->edge1.size(); e++) {
//cls[c]->edge1[e] = obs->points->num_points + (obs->boxes[b]->num_edges * b) + cls[c]->edge1[e];
//cls[c]->edge1[e] = (obs->boxes[b]->num_edges * b) + cls[c]->edge1[e];
cls[c]->edge1[e] = obs->points->num_points + (b* obs->boxes[b]->num_points) + (b* obs->boxes[b]->num_edges) + (obs->boxes[b]->num_points + cls[c]->edge1[e]);
}
}
}
}
void CD2(const Mesh& mesh, const shared_ptr<Obstacles> obs, std::vector<std::shared_ptr<btc::Collision> > &cls)
{
MatrixXd verts2(3, mesh.nodes.size());
MatrixXi faces2(3, mesh.faces.size());
VectorXi EoLs;
EoLs.resize(mesh.nodes.size());
for (int i = 0; i < mesh.nodes.size(); i++) {
verts2.col(i) = Vector3d(mesh.nodes[i]->x[0], mesh.nodes[i]->x[1], mesh.nodes[i]->x[2]);
if (mesh.nodes[i]->EoL) EoLs(i) = 1;
}
for (int i = 0; i < mesh.faces.size(); i++) {
faces2.col(i) = Vector3i(mesh.faces[i]->v[0]->node->index, mesh.faces[i]->v[1]->node->index, mesh.faces[i]->v[2]->node->index);
}
// Compute these first so they form the base of our collision list
btc::pointTriCollision(cls, obs->cdthreshold, obs->points->pxyz, obs->points->norms, verts2, faces2, false);
for (int b = 0; b < obs->num_boxes; b++) {
vector<shared_ptr<btc::Collision> > clst;
btc::boxTriCollision(clst, obs->cdthreshold, obs->boxes[b]->dim, obs->boxes[b]->E1, verts2, faces2, EoLs, false);
cls.insert(cls.end(), clst.begin(), clst.end());
}
}
|
{"hexsha": "e4e16ffbf9b65a14add4aa46e8c47a4156591c0f", "size": 3152, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/Collisions.cpp", "max_stars_repo_name": "sueda/eol-cloth", "max_stars_repo_head_hexsha": "cc8f24eef81283c541b859c05dd8ceed7813271f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 85.0, "max_stars_repo_stars_event_min_datetime": "2018-05-17T04:00:05.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-12T03:46:30.000Z", "max_issues_repo_path": "src/Collisions.cpp", "max_issues_repo_name": "sueda/eol-cloth", "max_issues_repo_head_hexsha": "cc8f24eef81283c541b859c05dd8ceed7813271f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2019-05-19T09:24:08.000Z", "max_issues_repo_issues_event_max_datetime": "2020-03-20T15:03:18.000Z", "max_forks_repo_path": "src/Collisions.cpp", "max_forks_repo_name": "sueda/eol-cloth", "max_forks_repo_head_hexsha": "cc8f24eef81283c541b859c05dd8ceed7813271f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 18.0, "max_forks_repo_forks_event_min_datetime": "2018-05-17T03:56:05.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-07T07:41:50.000Z", "avg_line_length": 40.4102564103, "max_line_length": 161, "alphanum_fraction": 0.6284898477, "num_tokens": 1079}
|
#!/usr/bin/env python
# Copyright (c) 2009, South African Astronomical Observatory (SAAO) #
# All rights reserved. #
"""
SPECSENS calulates the calibration curve given an observation, a standard star,
and the extinction curve for the site. The task assumes a 1-D spectrum that
has already been sensed from the original observations.
Author Version Date
-----------------------------------------------
S. M. Crawford (SAAO) 1.0 21 Mar 2011
TODO
----
LIMITATIONS
-----------
"""
# Ensure python 2.5 compatibility
from __future__ import with_statement
import os
import sys
import time
import numpy as np
import pyfits
from matplotlib.pyplot import *
from pyraf import iraf
import saltstat
import saltsafekey as saltkey
import saltsafeio as saltio
from saltsafelog import logging
import spectools as st
from spectools import SALTSpecError
from PySpectrograph.Spectra import Spectrum
from saltfit import interfit
from pylab import *
debug = True
# -----------------------------------------------------------
# core routine
def specsens(specfile, outfile, stdfile, extfile, airmass=None, exptime=None,
stdzp=3.68e-20, function='polynomial', order=3, thresh=3, niter=5,
fitter='gaussian', clobber=True, logfile='salt.log', verbose=True):
with logging(logfile, debug) as log:
# read in the specfile and create a spectrum object
obs_spectra = st.readspectrum(specfile.strip(), error=True, ftype='ascii')
# smooth the observed spectrum
# read in the std file and convert from magnitudes to fnu
# then convert it to fwave (ergs/s/cm2/A)
std_spectra = st.readspectrum(stdfile.strip(), error=False, ftype='ascii')
std_spectra.flux = Spectrum.magtoflux(std_spectra.flux, stdzp)
std_spectra.flux = Spectrum.fnutofwave(
std_spectra.wavelength, std_spectra.flux)
# Get the typical bandpass of the standard star,
std_bandpass = np.diff(std_spectra.wavelength).mean()
# Smooth the observed spectrum to that bandpass
obs_spectra.flux = st.boxcar_smooth(obs_spectra, std_bandpass)
# read in the extinction file (leave in magnitudes)
ext_spectra = st.readspectrum(extfile.strip(), error=False, ftype='ascii')
# determine the airmass if not specified
if saltio.checkfornone(airmass) is None:
message = 'Airmass was not supplied'
raise SALTSpecError(message)
# determine the exptime if not specified
if saltio.checkfornone(exptime) is None:
message = 'Exposure Time was not supplied'
raise SALTSpecError(message)
# calculate the calibrated spectra
log.message('Calculating the calibration curve for %s' % specfile)
cal_spectra = sensfunc(
obs_spectra, std_spectra, ext_spectra, airmass, exptime)
# plot(cal_spectra.wavelength, cal_spectra.flux * std_spectra.flux)
# fit the spectra--first take a first cut of the spectra
# using the median absolute deviation to throw away bad points
cmed = np.median(cal_spectra.flux)
cmad = saltstat.mad(cal_spectra.flux)
mask = (abs(cal_spectra.flux - cmed) < thresh * cmad)
mask = np.logical_and(mask, (cal_spectra.flux > 0))
# now fit the data
# Fit using a gaussian process.
if fitter=='gaussian':
from sklearn.gaussian_process import GaussianProcess
#Instanciate a Gaussian Process model
dy = obs_spectra.var[mask] ** 0.5
dy /= obs_spectra.flux[mask] / cal_spectra.flux[mask]
y = cal_spectra.flux[mask]
gp = GaussianProcess(corr='squared_exponential', theta0=1e-2,
thetaL=1e-4, thetaU=0.1, nugget=(dy / y) ** 2.0)
X = np.atleast_2d(cal_spectra.wavelength[mask]).T
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
x = np.atleast_2d(cal_spectra.wavelength).T
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred = gp.predict(x)
cal_spectra.flux = y_pred
else:
fit=interfit(cal_spectra.wavelength[mask], cal_spectra.flux[mask], function=function, order=order, thresh=thresh, niter=niter)
fit.interfit()
cal_spectra.flux=fit(cal_spectra.wavelength)
# write the spectra out
st.writespectrum(cal_spectra, outfile, ftype='ascii')
def sensfunc(obs_spectra, std_spectra, ext_spectra, airmass, exptime):
"""Given an observe spectra, calculate the calibration curve for the
spectra. All data is interpolated to the binning of the obs_spectra.
The calibrated spectra is then calculated from
C = F_obs/ F_std / 10**(-0.4*A*E)/T/dW
where F_obs is the observed flux from the source, F_std is the
standard spectra, A is the airmass, E is the
extinction in mags, T is the exposure time and dW is the bandpass
Parameters
-----------
obs_spectra--spectrum of the observed star (counts/A)
std_spectra--know spectrum of the standard star (ergs/s/cm2/A)
ext_spectra--spectrum of the extinction curve (in mags)
airmass--airmass of the observations
exptime--exposure time of the observations
function
"""
# re-interpt the std_spectra over the same wavelength
std_spectra.interp(obs_spectra.wavelength)
# re-interp the ext_spetra over the same wavelength
ext_spectra.interp(obs_spectra.wavelength)
# create the calibration spectra
cal_spectra = Spectrum.Spectrum(
obs_spectra.wavelength, obs_spectra.flux.copy(), stype='continuum')
# set up the bandpass
bandpass = np.diff(obs_spectra.wavelength).mean()
# correct for extinction
cal_spectra.flux = cal_spectra.flux / \
10 ** (-0.4 * airmass * ext_spectra.flux)
# correct for the exposure time and calculation the sensitivity curve
cal_spectra.flux = cal_spectra.flux / exptime / bandpass / std_spectra.flux
return cal_spectra
# main code
parfile = iraf.osfn("saltspec$specsens.par")
t = iraf.IrafTaskFactory(
taskname="specsens", value=parfile, function=specsens, pkgname='saltspec')
|
{"hexsha": "497cd8ead6b7e020dfcb3c2d86a18526fe891282", "size": 6366, "ext": "py", "lang": "Python", "max_stars_repo_path": "saltspec/specsens.py", "max_stars_repo_name": "Richard-Tarbell/pysalt", "max_stars_repo_head_hexsha": "2815d5533c7e60b7042f2bc3cf46cecdd38fc609", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2015-02-22T08:35:04.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-12T11:32:34.000Z", "max_issues_repo_path": "saltspec/specsens.py", "max_issues_repo_name": "Richard-Tarbell/pysalt", "max_issues_repo_head_hexsha": "2815d5533c7e60b7042f2bc3cf46cecdd38fc609", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 17, "max_issues_repo_issues_event_min_datetime": "2015-02-24T18:40:26.000Z", "max_issues_repo_issues_event_max_datetime": "2018-06-05T12:52:59.000Z", "max_forks_repo_path": "saltspec/specsens.py", "max_forks_repo_name": "Richard-Tarbell/pysalt", "max_forks_repo_head_hexsha": "2815d5533c7e60b7042f2bc3cf46cecdd38fc609", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2015-03-20T14:46:19.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-12T18:30:01.000Z", "avg_line_length": 36.7976878613, "max_line_length": 138, "alphanum_fraction": 0.6591266101, "include": true, "reason": "import numpy", "num_tokens": 1645}
|
[STATEMENT]
lemma a_star_refl:
shows "M \<longrightarrow>\<^sub>a* M"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. M \<longrightarrow>\<^sub>a* M
[PROOF STEP]
by blast
|
{"llama_tokens": 72, "file": null, "length": 1}
|
/*
* Copyright (c) 2019 Opticks Team. All Rights Reserved.
*
* This file is part of Opticks
* (see https://bitbucket.org/simoncblyth/opticks).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <iostream>
#include <iomanip>
#include <cstring>
#ifdef USE_BOOST_LOG
// http://stackoverflow.com/questions/24302123/how-can-i-use-boost-log-across-dll-boundaries
// http://www.boost.org/doc/libs/1_61_0/libs/log/doc/html/log/installation/config.html
/*
If your application consists of more than one module (e.g. an exe and one or
several dll's) that use Boost.Log, the library must be built as a shared
object.
*/
// headers here so the macro can work elsewhere easily
#include <boost/log/core.hpp>
#include <boost/log/trivial.hpp>
#include <boost/log/expressions.hpp>
#include "boost/log/utility/setup.hpp"
#define LOG BOOST_LOG_TRIVIAL
// trace/debug/info/warning/error/fatal
// textual workaround to get across the dll divide
// using static methods of Blog
#define BLOG(argc, argv) \
{ \
boost::log::add_common_attributes(); \
boost::log::register_simple_formatter_factory< boost::log::trivial::severity_level, char >("Severity"); \
boost::log::add_console_log( \
std::cerr, \
boost::log::keywords::format = "[%TimeStamp%]:%Severity%: %Message%", \
boost::log::keywords::auto_flush = true \
); \
boost::log::core::get()->set_filter \
( \
boost::log::trivial::severity >= BLog::FilterLevel((argc), (argv)) \
); \
} \
#else
#include <plog/Log.h>
//
// such dirty tricks need to come in as the last header
// before code you control
//
// translate from boost log levels to plog
//using plog::fatal ;
//using plog::error ;
//using plog::warning ;
//using plog::info ;
//using plog::debug ;
//using plog::verbose ;
// defines are dangerous
//#define trace plog::verbose
#endif
#include "BRAP_API_EXPORT.hh"
#include "BRAP_HEAD.hh"
class BRAP_API BLog {
public:
static void Initialize(void* whatever, int level);
static int SeverityLevel(const char* ll);
public:
BLog(int argc, char** argv);
void setDir( const char* dir);
int getLevel();
virtual ~BLog();
private:
void init();
void parse(int argc, char** argv);
void setName( const char* logname);
void setLevel( const char* loglevel);
void initialize( void* whatever );
void addFileLog();
private:
int m_argc ;
char** m_argv ;
int m_loglevel ;
const char* m_logname ;
const char* m_logdir ;
bool m_nogeocache ;
bool m_pause ;
bool m_exitpause ;
bool m_addfile ;
};
|
{"hexsha": "94f67d3070287ee18b97447ceac64ac0c84542d5", "size": 3312, "ext": "hh", "lang": "C++", "max_stars_repo_path": "boostrap/BLogDeprecated.hh", "max_stars_repo_name": "hanswenzel/opticks", "max_stars_repo_head_hexsha": "b75b5929b6cf36a5eedeffb3031af2920f75f9f0", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 11.0, "max_stars_repo_stars_event_min_datetime": "2020-07-05T02:39:32.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-20T18:52:44.000Z", "max_issues_repo_path": "boostrap/BLogDeprecated.hh", "max_issues_repo_name": "hanswenzel/opticks", "max_issues_repo_head_hexsha": "b75b5929b6cf36a5eedeffb3031af2920f75f9f0", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "boostrap/BLogDeprecated.hh", "max_forks_repo_name": "hanswenzel/opticks", "max_forks_repo_head_hexsha": "b75b5929b6cf36a5eedeffb3031af2920f75f9f0", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2020-09-03T20:36:32.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-19T07:42:21.000Z", "avg_line_length": 28.0677966102, "max_line_length": 110, "alphanum_fraction": 0.6479468599, "num_tokens": 811}
|
/******************************************************************************
Triangle class.
Copyright (c) 2010 - 2012
Alexander Rukletsov <rukletsov@gmail.com>
Dzmitry Hlindzich <dzmitry.hlindzich@ziti.uni-heidelberg.de>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
*******************************************************************************/
#ifndef TRIANGLE_HPP_507AFC96_F3F4_40FF_827C_66F388AEDAD2_
#define TRIANGLE_HPP_507AFC96_F3F4_40FF_827C_66F388AEDAD2_
#include <stdexcept>
#include <boost/array.hpp>
#include <boost/operators.hpp>
namespace bo {
template <typename PointType>
class Triangle: boost::equality_comparable1< Triangle<PointType> >
{
public:
Triangle() { }
Triangle(const PointType& A, const PointType& B, const PointType& C);
PointType A() const;
PointType B() const;
PointType C() const;
// Assignment and access operators. Range-check is done by boost::array via
// debug-only assertions. Use at() method for safer but less efficient version
// with exceptions.
const PointType& operator[](std::size_t index) const;
PointType& operator[](std::size_t index);
// Assignment and access methods. Throw an exception in case of bad index.
// Safer, but less efficient alternative of opeartor[].
const PointType& at(std::size_t index) const;
PointType& at(std::size_t index);
// In general won't work for floats. This is because not every real number can
// be represented by float/double/long double and therefore theoretically equal
// numbers can differ, i.e. f^{-1}(f(x)) can differ from x. Fore more information
// on this topic see
// http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm
bool operator==(const Triangle<PointType>& other) const;
protected:
// Provides a range check for a given index. Throws a std::out_of_range exception
// in case of bad index.
static void check_range(std::size_t index) ;
protected:
boost::array<PointType, 3> vertices_;
};
template <typename PointType>
Triangle<PointType>::Triangle(const PointType& A, const PointType& B, const PointType& C)
{
vertices_[0] = A;
vertices_[1] = B;
vertices_[2] = C;
}
template <typename PointType> inline
PointType Triangle<PointType>::A() const
{
return vertices_[0];
}
template <typename PointType> inline
PointType Triangle<PointType>::B() const
{
return vertices_[1];
}
template <typename PointType> inline
PointType Triangle<PointType>::C() const
{
return vertices_[2];
}
template <typename PointType> inline
const PointType& Triangle<PointType>::operator[](std::size_t index) const
{
return vertices_[index];
}
template <typename PointType> inline
PointType& Triangle<PointType>::operator[](std::size_t index)
{
return vertices_[index];
}
template <typename PointType> inline
const PointType& Triangle<PointType>::at(std::size_t index) const
{
check_range(index);
return vertices_[index];
}
template <typename PointType> inline
PointType& Triangle<PointType>::at(std::size_t index)
{
check_range(index);
return vertices_[index];
}
template <typename PointType>
bool Triangle<PointType>::operator==(const Triangle<PointType>& other) const
{
bool equal = !((vertices_[0] != other.vertices_[0]) ||
(vertices_[1] != other.vertices_[1]) ||
(vertices_[2] != other.vertices_[2]));
return equal;
}
template <typename PointType>
void Triangle<PointType>::check_range(std::size_t index)
{
if (index >= 3)
throw std::out_of_range("Triangle has only 3 vertices.");
}
} // namespace bo
#endif // TRIANGLE_HPP_507AFC96_F3F4_40FF_827C_66F388AEDAD2_
|
{"hexsha": "39d87ec40412b6caa144238577500d713da96a1c", "size": 4949, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "Bo/core/triangle.hpp", "max_stars_repo_name": "rukletsov/bo", "max_stars_repo_head_hexsha": "bfece9e8f910b0c8f522733854405bf0a801b0e8", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 3.0, "max_stars_repo_stars_event_min_datetime": "2016-09-14T03:30:30.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-16T10:53:32.000Z", "max_issues_repo_path": "Bo/core/triangle.hpp", "max_issues_repo_name": "rukletsov/bo", "max_issues_repo_head_hexsha": "bfece9e8f910b0c8f522733854405bf0a801b0e8", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Bo/core/triangle.hpp", "max_forks_repo_name": "rukletsov/bo", "max_forks_repo_head_hexsha": "bfece9e8f910b0c8f522733854405bf0a801b0e8", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.1363636364, "max_line_length": 89, "alphanum_fraction": 0.7078197616, "num_tokens": 1172}
|
[STATEMENT]
lemma table_classes_SXcpt [simp]:
"table_of Classes (SXcpt xn)
= Some \<lparr>access=Public,cfields=[],methods=SXcpt_mdecls,
init=Skip,
super=if xn = Throwable then Object else SXcpt Throwable,
superIfs=[]\<rparr>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. table_of Classes (SXcpt xn) = Some \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if xn = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>
[PROOF STEP]
apply (unfold table_classes_defs)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. table_of ([(Base, BaseCl), (Ext, ExtCl), (Main, MainCl)] @ [(Object, \<lparr>access = Public, cfields = [], methods = Object_mdecls, init = Skip, super = undefined, superIfs = []\<rparr>), (SXcpt Throwable, \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if Throwable = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>), (SXcpt NullPointer, \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if NullPointer = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>), (SXcpt OutOfMemory, \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if OutOfMemory = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>), (SXcpt ClassCast, \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if ClassCast = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>), (SXcpt NegArrSize, \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if NegArrSize = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>), (SXcpt IndOutBound, \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if IndOutBound = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>), (SXcpt ArrStore, \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if ArrStore = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>)]) (SXcpt xn) = Some \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if xn = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>
[PROOF STEP]
apply (induct_tac xn)
[PROOF STATE]
proof (prove)
goal (7 subgoals):
1. table_of ([(Base, BaseCl), (Ext, ExtCl), (Main, MainCl)] @ [(Object, \<lparr>access = Public, cfields = [], methods = Object_mdecls, init = Skip, super = undefined, superIfs = []\<rparr>), (SXcpt Throwable, \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if Throwable = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>), (SXcpt NullPointer, \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if NullPointer = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>), (SXcpt OutOfMemory, \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if OutOfMemory = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>), (SXcpt ClassCast, \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if ClassCast = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>), (SXcpt NegArrSize, \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if NegArrSize = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>), (SXcpt IndOutBound, \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if IndOutBound = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>), (SXcpt ArrStore, \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if ArrStore = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>)]) (SXcpt Throwable) = Some \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if Throwable = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>
2. table_of ([(Base, BaseCl), (Ext, ExtCl), (Main, MainCl)] @ [(Object, \<lparr>access = Public, cfields = [], methods = Object_mdecls, init = Skip, super = undefined, superIfs = []\<rparr>), (SXcpt Throwable, \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if Throwable = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>), (SXcpt NullPointer, \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if NullPointer = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>), (SXcpt OutOfMemory, \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if OutOfMemory = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>), (SXcpt ClassCast, \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if ClassCast = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>), (SXcpt NegArrSize, \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if NegArrSize = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>), (SXcpt IndOutBound, \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if IndOutBound = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>), (SXcpt ArrStore, \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if ArrStore = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>)]) (SXcpt NullPointer) = Some \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if NullPointer = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>
3. table_of ([(Base, BaseCl), (Ext, ExtCl), (Main, MainCl)] @ [(Object, \<lparr>access = Public, cfields = [], methods = Object_mdecls, init = Skip, super = undefined, superIfs = []\<rparr>), (SXcpt Throwable, \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if Throwable = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>), (SXcpt NullPointer, \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if NullPointer = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>), (SXcpt OutOfMemory, \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if OutOfMemory = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>), (SXcpt ClassCast, \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if ClassCast = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>), (SXcpt NegArrSize, \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if NegArrSize = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>), (SXcpt IndOutBound, \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if IndOutBound = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>), (SXcpt ArrStore, \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if ArrStore = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>)]) (SXcpt OutOfMemory) = Some \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if OutOfMemory = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>
4. table_of ([(Base, BaseCl), (Ext, ExtCl), (Main, MainCl)] @ [(Object, \<lparr>access = Public, cfields = [], methods = Object_mdecls, init = Skip, super = undefined, superIfs = []\<rparr>), (SXcpt Throwable, \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if Throwable = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>), (SXcpt NullPointer, \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if NullPointer = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>), (SXcpt OutOfMemory, \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if OutOfMemory = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>), (SXcpt ClassCast, \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if ClassCast = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>), (SXcpt NegArrSize, \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if NegArrSize = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>), (SXcpt IndOutBound, \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if IndOutBound = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>), (SXcpt ArrStore, \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if ArrStore = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>)]) (SXcpt ClassCast) = Some \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if ClassCast = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>
5. table_of ([(Base, BaseCl), (Ext, ExtCl), (Main, MainCl)] @ [(Object, \<lparr>access = Public, cfields = [], methods = Object_mdecls, init = Skip, super = undefined, superIfs = []\<rparr>), (SXcpt Throwable, \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if Throwable = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>), (SXcpt NullPointer, \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if NullPointer = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>), (SXcpt OutOfMemory, \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if OutOfMemory = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>), (SXcpt ClassCast, \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if ClassCast = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>), (SXcpt NegArrSize, \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if NegArrSize = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>), (SXcpt IndOutBound, \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if IndOutBound = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>), (SXcpt ArrStore, \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if ArrStore = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>)]) (SXcpt NegArrSize) = Some \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if NegArrSize = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>
6. table_of ([(Base, BaseCl), (Ext, ExtCl), (Main, MainCl)] @ [(Object, \<lparr>access = Public, cfields = [], methods = Object_mdecls, init = Skip, super = undefined, superIfs = []\<rparr>), (SXcpt Throwable, \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if Throwable = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>), (SXcpt NullPointer, \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if NullPointer = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>), (SXcpt OutOfMemory, \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if OutOfMemory = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>), (SXcpt ClassCast, \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if ClassCast = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>), (SXcpt NegArrSize, \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if NegArrSize = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>), (SXcpt IndOutBound, \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if IndOutBound = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>), (SXcpt ArrStore, \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if ArrStore = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>)]) (SXcpt IndOutBound) = Some \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if IndOutBound = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>
7. table_of ([(Base, BaseCl), (Ext, ExtCl), (Main, MainCl)] @ [(Object, \<lparr>access = Public, cfields = [], methods = Object_mdecls, init = Skip, super = undefined, superIfs = []\<rparr>), (SXcpt Throwable, \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if Throwable = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>), (SXcpt NullPointer, \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if NullPointer = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>), (SXcpt OutOfMemory, \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if OutOfMemory = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>), (SXcpt ClassCast, \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if ClassCast = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>), (SXcpt NegArrSize, \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if NegArrSize = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>), (SXcpt IndOutBound, \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if IndOutBound = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>), (SXcpt ArrStore, \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if ArrStore = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>)]) (SXcpt ArrStore) = Some \<lparr>access = Public, cfields = [], methods = SXcpt_mdecls, init = Skip, super = if ArrStore = Throwable then Object else SXcpt Throwable, superIfs = []\<rparr>
[PROOF STEP]
apply (simp add: Object_def SXcpt_def)+
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
|
{"llama_tokens": 5337, "file": null, "length": 4}
|
import Assignment1Support
import EvaluationsStub
import BagOfWords
import AddNoise
import collections
import operator
import numpy as np
### UPDATE this path for your environment
kDataPath = "..\\Data\\SMSSpamCollection"
(xRaw, yRaw) = Assignment1Support.LoadRawData(kDataPath)
(xTrainRawOriginal, yTrainRawOriginal, xTestRawOriginal, yTestRawOriginal) = Assignment1Support.TrainTestSplit(xRaw, yRaw)
(xTrainRaw, yTrainRaw) = AddNoise.MakeProblemHarder(xTrainRawOriginal, yTrainRawOriginal)
(xTestRaw, yTestRaw) = AddNoise.MakeProblemHarder(xTestRawOriginal, yTestRawOriginal)
#(xTrainRaw, yTrainRaw, xTestRaw, yTestRaw) = Assignment1Support.TrainTestSplit(xRaw, yRaw)
(xTrain, xTest) = Assignment1Support.FeaturizeExtenstion(xTrainRaw, yTrainRaw, xTestRaw, numFrequentWords=40, numMutualInformationWords=100, includeHandCraftedFeatures=True)
yTrain = yTrainRaw
yTest = yTestRaw
############################
import RandomForestsModel
rfmodel = RandomForestsModel.RandomForestsModel()
print("### BEST SMS model")
import LogisticRegressionModel_NumPy
logmodel = LogisticRegressionModel_NumPy.LogisticRegressionModel_NumPy()
#CROSS VALIDATION
#numFolds = 5
#rfavgAccr = 0
#logavgAccr = 0
#for i in range(numFolds):
# (foldTrainX, foldTrainY) = Assignment1Support.GetAllDataExceptFold(xTrain, yTrain, i, numFolds)
# (foldValidationX, foldValidationY) = Assignment1Support.GetDataInFold(xTrain, yTrain, i, numFolds)
# # do feature engineering/selection on foldTrainX, foldTrainY
# xTrain_np = np.asarray(foldTrainX)
# yTrain_np = np.asarray(foldTrainY)
# xTest_np = np.asarray(foldValidationX)
# yTest_np = np.asarray(foldValidationY)
# rfmodel.fit(xTrain_np, yTrain_np, numTrees=2, minSplit=100, useBagging=False, featureRestriction=0, seed=300)
# rfyPredicted = rfmodel.predictThres(xTest_np, 0.5)
# logmodel.fit(xTrain_np, yTrain_np, iterations=30000, step=0.01)
# logyPredicted = logmodel.predict(xTest_np, 0.5)
# rfavgAccr += EvaluationsStub.Accuracy(yTest_np, rfyPredicted)
# logavgAccr += EvaluationsStub.Accuracy(yTest_np, logyPredicted)
#print("RF CV Accuracy: %f" % (rfavgAccr / 5.0))
#print("LOG CV Accuracy: %f" % (logavgAccr / 5.0))
xTrain_np = np.asarray(xTrain)
yTrain_np = np.asarray(yTrain)
xTest_np = np.asarray(xTest)
yTest_np = np.asarray(yTest)
rfmodel.fit(xTrain_np, yTrain_np, numTrees=2, minSplit=100, useBagging=False, featureRestriction=0, seed=200)
logmodel.fit(xTrain_np, yTrain_np, iterations=30000, step=0.01)
#rfyPredicted = rfmodel.predictThres(xTest_np, 0.5)
#logyPredicted = logmodel.predict(xTest_np, 0.5)
#yPredictFinal = []
#for i in range(len(rfyPredicted)):
# yPredictFinal.append(1 if rfyPredicted[i] == 1 and logyPredicted[i] == 1 else 0 )
#print(EvaluationsStub.ExecuteAll(yTest, rfyPredicted))
#print(EvaluationsStub.ExecuteAll(yTest, logyPredicted))
#print(EvaluationsStub.ExecuteAll(yTest, yPredictFinal))
thresholdval = -0.01
for j in range(101):
thresholdval += 0.01
rfyPredicted = rfmodel.predictThres(xTest_np, thresholdval)
logyPredicted = logmodel.predict(xTest_np, thresholdval)
yPredictFinal = []
for i in range(len(rfyPredicted)):
yPredictFinal.append(1 if rfyPredicted[i] == 1 and logyPredicted[i] == 1 else 0 )
print(thresholdval, EvaluationsStub.FalsePositiveRate(yTest_np, rfyPredicted), EvaluationsStub.FalseNegativeRate(yTest_np, rfyPredicted))
|
{"hexsha": "18a76ab9d13b5ec0bc6493ade9a4429c899145ec", "size": 3422, "ext": "py", "lang": "Python", "max_stars_repo_path": "Code/StartingPoint2.py", "max_stars_repo_name": "isibord/DecisionTree", "max_stars_repo_head_hexsha": "8e3336e9c7ec60ebfcecd02a1b7d7ae5dd6f7054", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-02-13T11:24:15.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-13T11:24:15.000Z", "max_issues_repo_path": "Code/StartingPoint2.py", "max_issues_repo_name": "isibord/DecisionTree", "max_issues_repo_head_hexsha": "8e3336e9c7ec60ebfcecd02a1b7d7ae5dd6f7054", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Code/StartingPoint2.py", "max_forks_repo_name": "isibord/DecisionTree", "max_forks_repo_head_hexsha": "8e3336e9c7ec60ebfcecd02a1b7d7ae5dd6f7054", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.5656565657, "max_line_length": 173, "alphanum_fraction": 0.7647574518, "include": true, "reason": "import numpy", "num_tokens": 1026}
|
# /*
# * @Author: dorming
# * @Date: 2021-01-14 15:21:38
# * @Last Modified by: dorming
# * @Last Modified time: 2021-01-14 15:21:38
# */
import numpy as np
class B(object):
def __init__(self, *args, **kwargs):
self.a = 1
self.b = 2
print(self)
print("init", args, kwargs)
def __new__(cls, *args, **kwargs):
print("new ", args, kwargs)
test = super(B, cls).__new__(cls)
print(test)
return test
class Counter:
def __init__(self, func):
self.func = func
self.count = 0
def __call__(self, *args, **kwargs):
self.count += 1
return self.func(*args, **kwargs)
@Counter
def foo():
pass
for i in range(10):
foo()
print(foo.count) # 10
|
{"hexsha": "a32b73223a9cac93d880eb5bad6d13778b902427", "size": 774, "ext": "py", "lang": "Python", "max_stars_repo_path": "lib/augmentation/test_case.py", "max_stars_repo_name": "zongdaoming/CMT", "max_stars_repo_head_hexsha": "fc3773bb6c6b1ab091688addfffca3fb1e382ae4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-05-10T20:12:23.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-24T18:01:13.000Z", "max_issues_repo_path": "lib/augmentation/test_case.py", "max_issues_repo_name": "zongdaoming/CMT", "max_issues_repo_head_hexsha": "fc3773bb6c6b1ab091688addfffca3fb1e382ae4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lib/augmentation/test_case.py", "max_forks_repo_name": "zongdaoming/CMT", "max_forks_repo_head_hexsha": "fc3773bb6c6b1ab091688addfffca3fb1e382ae4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 20.3684210526, "max_line_length": 46, "alphanum_fraction": 0.5387596899, "include": true, "reason": "import numpy", "num_tokens": 243}
|
"""Wigner thermal conductivity base class."""
# Copyright (C) 2022 Michele Simoncelli
# All rights reserved.
#
# This file is part of phono3py.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the phonopy project nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import textwrap
import numpy as np
from phonopy.phonon.degeneracy import degenerate_sets
from phonopy.units import EV, Angstrom, Hbar, THz
from phono3py.conductivity.base import HeatCapacityMixIn
from phono3py.phonon.grid import get_grid_points_by_rotations
from phono3py.phonon.velocity_operator import VelocityOperator
class ConductivityWignerMixIn(HeatCapacityMixIn):
"""Thermal conductivity mix-in for velocity operator.
This mix-in is included in `ConductivityWignerRTA` and `ConductivityWignerLBTE`.
"""
@property
def kappa_TOT_RTA(self):
"""Return kappa."""
return self._kappa_TOT_RTA
@property
def kappa_P_RTA(self):
"""Return kappa."""
return self._kappa_P_RTA
@property
def kappa_C(self):
"""Return kappa."""
return self._kappa_C
@property
def mode_kappa_P_RTA(self):
"""Return mode_kappa."""
return self._mode_kappa_P_RTA
@property
def mode_kappa_C(self):
"""Return mode_kappa."""
return self._mode_kappa_C
@property
def velocity_operator(self):
"""Return velocity operator at grid points.
Grid points are those at mode kappa are calculated.
"""
return self._gv_operator
@property
def gv_by_gv_operator(self):
"""Return gv_by_gv operator at grid points where mode kappa are calculated."""
return self._gv_operator_sum2
def _init_velocity(self, gv_delta_q):
self._velocity_obj = VelocityOperator(
self._pp.dynamical_matrix,
q_length=gv_delta_q,
symmetry=self._pp.primitive_symmetry,
frequency_factor_to_THz=self._pp.frequency_factor_to_THz,
)
def _set_velocities(self, i_gp, i_data):
self._set_gv_operator(i_gp, i_data)
self._set_gv_by_gv_operator(i_gp, i_data)
def _set_gv_operator(self, i_irgp, i_data):
"""Set velocity operator."""
irgp = self._grid_points[i_irgp]
self._velocity_obj.run([self._get_qpoint_from_gp_index(irgp)])
gv_operator = self._velocity_obj.velocity_operators[0, :, :, :]
self._gv_operator[i_data] = gv_operator[self._pp.band_indices, :, :]
#
gv = np.einsum("iij->ij", gv_operator).real
deg_sets = degenerate_sets(self._frequencies[irgp])
# group velocities in the degenerate subspace are obtained diagonalizing the
# velocity operator in the subspace of degeneracy.
for id_dir in range(3):
pos = 0
for deg in deg_sets:
if len(deg) > 1:
matrix_deg = gv_operator[
pos : pos + len(deg), pos : pos + len(deg), id_dir
]
eigvals_deg = np.linalg.eigvalsh(matrix_deg)
gv[pos : pos + len(deg), id_dir] = eigvals_deg
pos += len(deg)
#
self._gv[i_data] = gv[self._pp.band_indices, :]
def _set_gv_by_gv_operator(self, i_irgp, i_data):
"""Outer product of group velocities.
(v x v) [num_k*, num_freqs, 3, 3]
"""
gv_by_gv_operator_tensor, order_kstar = self._get_gv_by_gv_operator(
i_irgp, i_data
)
# gv_by_gv_tensor, order_kstar = self._get_gv_by_gv(i_irgp, i_data)
self._num_sampling_grid_points += order_kstar
# Sum all vxv at k*
for j, vxv in enumerate(([0, 0], [1, 1], [2, 2], [1, 2], [0, 2], [0, 1])):
# self._gv_sum2[i_data, :, j] = gv_by_gv_tensor[:, vxv[0], vxv[1]]
# here it is storing the 6 independent components of the v^i x v^j tensor
# i_data is the q-point index
# j indexes the 6 independent component of the symmetric tensor v^i x v^j
self._gv_operator_sum2[i_data, :, :, j] = gv_by_gv_operator_tensor[
:, :, vxv[0], vxv[1]
]
# self._gv_sum2[i_data, :, j] = gv_by_gv_tensor[:, vxv[0], vxv[1]]
def _get_gv_by_gv_operator(self, i_irgp, i_data):
if self._is_kappa_star:
rotation_map = get_grid_points_by_rotations(
self._grid_points[i_irgp], self._pp.bz_grid
)
else:
rotation_map = get_grid_points_by_rotations(
self._grid_points[i_irgp],
self._pp.bz_grid,
reciprocal_rotations=self._point_operations,
)
gv_operator = self._gv_operator[i_data]
nat3 = len(self._pp.primitive) * 3
nbands = np.shape(gv_operator)[0]
gv_by_gv_operator = np.zeros((nbands, nat3, 3, 3), dtype=self._complex_dtype)
for r in self._rotations_cartesian:
# can be optimized
gvs_rot_operator = np.zeros((nbands, nat3, 3), dtype=self._complex_dtype)
for s in range(0, nbands):
for s_p in range(0, nat3):
for i in range(0, 3):
for j in range(0, 3):
gvs_rot_operator[s, s_p, i] += (
gv_operator[s, s_p, j] * r.T[j, i]
)
#
for s in range(0, nbands):
for s_p in range(0, nat3):
for i in range(0, 3):
for j in range(0, 3):
gv_by_gv_operator[s, s_p, i, j] += gvs_rot_operator[
s, s_p, i
] * np.conj(gvs_rot_operator[s, s_p, j])
# note np.conj(gvs_rot_operator[s,s_p,j]) =
# gvs_rot_operator[s_p,s,j] since Vel op. is hermitian
order_kstar = len(np.unique(rotation_map))
gv_by_gv_operator /= len(rotation_map) // len(np.unique(rotation_map))
if self._grid_weights is not None:
if order_kstar != self._grid_weights[i_irgp]:
if self._log_level:
text = (
"Number of elements in k* is unequal "
"to number of equivalent grid-points. "
"This means that the mesh sampling grids break "
"symmetry. Please check carefully "
"the convergence over grid point densities."
)
msg = textwrap.fill(
text, initial_indent=" ", subsequent_indent=" ", width=70
)
print("*" * 30 + "Warning" + "*" * 30)
print(msg)
print("*" * 67)
return gv_by_gv_operator, order_kstar
def get_conversion_factor_WTE(volume):
"""Return conversion factor of thermal conductivity."""
return (
(THz * Angstrom) ** 2 # ----> group velocity
* EV # ----> specific heat is in eV/
* Hbar # ----> transform lorentzian_div_hbar from eV^-1 to s
/ (volume * Angstrom**3)
) # ----> unit cell volume
|
{"hexsha": "a6d754d02afaaec9e56e7b1525835ec99305736d", "size": 8628, "ext": "py", "lang": "Python", "max_stars_repo_path": "phono3py/conductivity/wigner.py", "max_stars_repo_name": "MSimoncelli/phono3py", "max_stars_repo_head_hexsha": "b28b45a025c279833e9269e5d91330c75d3f6ae0", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 38, "max_stars_repo_stars_event_min_datetime": "2016-04-27T04:43:25.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-01T07:46:56.000Z", "max_issues_repo_path": "phono3py/conductivity/wigner.py", "max_issues_repo_name": "MSimoncelli/phono3py", "max_issues_repo_head_hexsha": "b28b45a025c279833e9269e5d91330c75d3f6ae0", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 36, "max_issues_repo_issues_event_min_datetime": "2016-12-22T12:42:54.000Z", "max_issues_repo_issues_event_max_datetime": "2020-05-02T07:31:53.000Z", "max_forks_repo_path": "phono3py/conductivity/wigner.py", "max_forks_repo_name": "MSimoncelli/phono3py", "max_forks_repo_head_hexsha": "b28b45a025c279833e9269e5d91330c75d3f6ae0", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 30, "max_forks_repo_forks_event_min_datetime": "2016-02-11T13:33:56.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-01T21:36:50.000Z", "avg_line_length": 38.8648648649, "max_line_length": 86, "alphanum_fraction": 0.6048910524, "include": true, "reason": "import numpy", "num_tokens": 2080}
|
import numpy as np
from bokeh.document import Document
from bokeh.models import ColumnDataSource, Range1d, Plot, LinearAxis, Grid
from bokeh.models.glyphs import ImageURL
from bokeh.plotting import show
url = "http://bokeh.pydata.org/en/latest/_static/images/logo.png"
N = 5
source = ColumnDataSource(dict(
url = [url]*N,
x1 = np.linspace( 0, 150, N),
y1 = np.linspace( 0, 150, N),
w1 = np.linspace( 10, 50, N),
h1 = np.linspace( 10, 50, N),
x2 = np.linspace(-50, 150, N),
y2 = np.linspace( 0, 200, N),
))
xdr = Range1d(start=-100, end=200)
ydr = Range1d(start=-100, end=200)
plot = Plot(
title=None, x_range=xdr, y_range=ydr, plot_width=300, plot_height=300,
h_symmetry=False, v_symmetry=False, min_border=0, toolbar_location=None)
image1 = ImageURL(url="url", x="x1", y="y1", w="w1", h="h1", anchor="center")
plot.add_glyph(source, image1)
image2 = ImageURL(url="url", x="x2", y="y2", w=20, h=20, anchor="top_left")
plot.add_glyph(source, image2)
image3 = ImageURL(url=dict(value=url), x=200, y=-100, anchor="bottom_right")
plot.add_glyph(source, image3)
xaxis = LinearAxis()
plot.add_layout(xaxis, 'below')
yaxis = LinearAxis()
plot.add_layout(yaxis,'left')
plot.add_layout(Grid(dimension=0, ticker=xaxis.ticker))
plot.add_layout(Grid(dimension=1, ticker=yaxis.ticker))
doc = Document( )
doc.add_root(plot)
show(plot)
|
{"hexsha": "136a741c2b79528c4f9addc48108ff5d0c142206", "size": 1381, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/glyphs/ImageURL.py", "max_stars_repo_name": "andreagrant/bokehDev", "max_stars_repo_head_hexsha": "a684afee183496c54d4f187a890707cf6b5ec2a5", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-04-03T13:05:55.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-03T13:05:55.000Z", "max_issues_repo_path": "tests/glyphs/ImageURL.py", "max_issues_repo_name": "andreagrant/bokehDev", "max_issues_repo_head_hexsha": "a684afee183496c54d4f187a890707cf6b5ec2a5", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/glyphs/ImageURL.py", "max_forks_repo_name": "andreagrant/bokehDev", "max_forks_repo_head_hexsha": "a684afee183496c54d4f187a890707cf6b5ec2a5", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.62, "max_line_length": 77, "alphanum_fraction": 0.6886314265, "include": true, "reason": "import numpy", "num_tokens": 448}
|
export saveRecoParams, loadRecoParams, defaultRecoParams, defaultOnlineRecoParams
function defaultRecoParams()
params = Dict{Symbol,Any}()
params[:lambd] = 1e-2
params[:iterations] = 4
params[:SNRThresh] = 2.0
params[:minFreq] = 80e3
params[:maxFreq] = 1.25e6
params[:sortBySNR] = false
params[:nAverages] = 1
params[:repetitionTime] = 0.0
params[:denoiseWeight] = 0
params[:loadas32bit] = true
params[:loadasreal] = false
params[:sparseTrafo] = nothing
params[:redFactor] = 0.0
params[:solver] = "kaczmarz"
params[:emptyMeasPath] = nothing
params[:frames] = 1
params[:spectralCleaning] = true
params[:recChannels] = [1,2,3]
params[:reconstructor] = get(ENV,"USER","default")
params[:firstFrameBG] = 1
params[:lastFrameBG] = 1
return params
end
function defaultRecoParamsOld()
params = Dict{Symbol,Any}()
params[:lambd] = "1e-2"
params[:iterations] = "4"
params[:SNRThresh] = "2"
params[:minFreq] = "80e3"
params[:maxFreq] = "1.25e6"
params[:sortBySNR] = "false"
params[:nAverages] = "1"
params[:repetitionTime] = "0.0"
params[:denoiseWeight] = "0"
params[:loadas32bit] = "true"
params[:loadasreal] = "false"
params[:maxload] = "100"
params[:sparseTrafo] = "nothing"
params[:redFactor] = "0.0"
params[:solver] = "kaczmarz"
params[:bEmpty] = "nothing"
return params
end
function defaultOnlineRecoParams()
params = defaultRecoParams()
params[:iterations] = 1
params[:SNRThresh] = 5
return params
end
function saveRecoParams(filename::AbstractString, params)
ini = Inifile()
for (key,value) in params
set(ini, string(key), string(value) )
end
open(filename,"w") do fd
write(fd, ini)
end
end
to_bool(s::AbstractString) = (lowercase(s) == "true") ? true : false
to_bool(b::Bool) = b
function loadRecoParams(filename::AbstractString)
ini = Inifile()
if isfile(filename)
read(ini, filename)
end
params = defaultRecoParamsOld()
for key in [:lambd, :SNRThresh, :minFreq, :maxFreq, :repetitionTime, :denoiseWeight, :redFactor]
params[key] = parse(Float64,get(ini,"","$key", params[key]))
end
for key in [:iterations, :nAverages, :maxload]
params[key] = parse(Int,get(ini,"","$key", params[key]))
end
for key in [:sortBySNR, :loadas32bit, :loadasreal]
params[key] = to_bool(get(ini,"","$key", params[key]))
end
sparseTrafo = get(ini,"","sparseTrafo", params[:sparseTrafo])
params[:sparseTrafo] = (sparseTrafo == "nothing") ? nothing : sparseTrafo
params[:solver] = get(ini,"","solver", params[:solver])
params[:bEmpty] = get(ini,"bEmpty")
if params[:bEmpty] == :notfound
params[:bEmpty] = nothing
end
params[:SFPath] = get(ini,"SFPath")
if params[:SFPath] == :notfound
params[:SFPath] = nothing
else
params[:SFPath] = String[strip(path) for path in split(params[:SFPath],",")]
end
params[:SFPathFreq] = get(ini,"SFPathFreq")
if params[:SFPathFreq] == :notfound
params[:SFPathFreq] = nothing
else
params[:SFPathFreq] = String[strip(path) for path in split(params[:SFPathFreq],",")]
end
recChanStr = get(ini,"recChannels")
if recChanStr != :notfound
params[:recChannels] = [parse(Int,chan) for chan in split(recChanStr,",")]
end
return params
end
|
{"hexsha": "461410cfba4c2050efdbae382b30d1d8ffdcd41a", "size": 3258, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/RecoParameters.jl", "max_stars_repo_name": "UnofficialJuliaMirrorSnapshots/MPIReco.jl-e4246700-6248-511e-8146-a1d1f47669d2", "max_stars_repo_head_hexsha": "1c7ef4519acb88fd9ba0089515ca8b2f78b05fb4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2019-11-29T16:33:25.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-26T06:30:47.000Z", "max_issues_repo_path": "src/RecoParameters.jl", "max_issues_repo_name": "UnofficialJuliaMirrorSnapshots/MPIReco.jl-e4246700-6248-511e-8146-a1d1f47669d2", "max_issues_repo_head_hexsha": "1c7ef4519acb88fd9ba0089515ca8b2f78b05fb4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2018-10-04T09:15:59.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-19T09:30:17.000Z", "max_forks_repo_path": "src/RecoParameters.jl", "max_forks_repo_name": "UnofficialJuliaMirrorSnapshots/MPIReco.jl-e4246700-6248-511e-8146-a1d1f47669d2", "max_forks_repo_head_hexsha": "1c7ef4519acb88fd9ba0089515ca8b2f78b05fb4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2019-03-12T14:08:50.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-08T05:36:02.000Z", "avg_line_length": 26.2741935484, "max_line_length": 98, "alphanum_fraction": 0.6706568447, "num_tokens": 1094}
|
#pragma once
#include <cstring>
#include <limits>
#include <map>
#include <memory>
#include <stdexcept>
#include <vector>
#include <Eigen/Geometry>
#if VOXELIZED_GEOMETRY_TOOLS__SUPPORTED_ROS_VERSION == 2
#include <sensor_msgs/msg/point_cloud2.hpp>
#elif VOXELIZED_GEOMETRY_TOOLS__SUPPORTED_ROS_VERSION == 1
#include <sensor_msgs/PointCloud2.h>
#else
#error "Undefined or unknown VOXELIZED_GEOMETRY_TOOLS__SUPPORTED_ROS_VERSION"
#endif
#include <voxelized_geometry_tools/pointcloud_voxelization_interface.hpp>
namespace voxelized_geometry_tools
{
namespace pointcloud_voxelization
{
#if VOXELIZED_GEOMETRY_TOOLS__SUPPORTED_ROS_VERSION == 2
using PointCloud2 = sensor_msgs::msg::PointCloud2;
using PointCloud2ConstSharedPtr = std::shared_ptr<const PointCloud2>;
#elif VOXELIZED_GEOMETRY_TOOLS__SUPPORTED_ROS_VERSION == 1
using PointCloud2 = sensor_msgs::PointCloud2;
using PointCloud2ConstSharedPtr = sensor_msgs::PointCloud2ConstPtr;
#endif
class PointCloud2Wrapper : public PointCloudWrapper
{
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
double MaxRange() const override { return max_range_; }
int64_t Size() const override
{
return static_cast<int64_t>(cloud_ptr_->width * cloud_ptr_->height);
}
const Eigen::Isometry3d& GetPointCloudOriginTransform() const override
{
return origin_transform_;
}
void SetPointCloudOriginTransform(
const Eigen::Isometry3d& origin_transform) override
{
origin_transform_ = origin_transform;
}
protected:
PointCloud2Wrapper(
const PointCloud2* const cloud_ptr,
const Eigen::Isometry3d& origin_transform, const double max_range);
private:
void CopyPointLocationIntoDoublePtrImpl(
const int64_t point_index, double* destination) const override
{
const Eigen::Vector4d point =
GetPointLocationVector4f(point_index).cast<double>();
std::memcpy(destination, point.data(), sizeof(double) * 3);
}
void CopyPointLocationIntoFloatPtrImpl(
const int64_t point_index, float* destination) const override
{
const size_t starting_offset = GetStartingOffsetForPointXYZ(point_index);
std::memcpy(destination, &(cloud_ptr_->data.at(starting_offset)),
sizeof(float) * 3);
}
size_t GetStartingOffsetForPointXYZ(const int64_t point_index) const
{
const size_t starting_offset =
(static_cast<size_t>(point_index)
* static_cast<size_t>(cloud_ptr_->point_step))
+ xyz_offset_from_point_start_;
return starting_offset;
}
const PointCloud2* const cloud_ptr_ = nullptr;
size_t xyz_offset_from_point_start_ = 0;
Eigen::Isometry3d origin_transform_ = Eigen::Isometry3d::Identity();
double max_range_ = std::numeric_limits<double>::infinity();
};
class NonOwningPointCloud2Wrapper : public PointCloud2Wrapper
{
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
NonOwningPointCloud2Wrapper(
const PointCloud2* const cloud_ptr,
const Eigen::Isometry3d& origin_transform,
const double max_range = std::numeric_limits<double>::infinity())
: PointCloud2Wrapper(cloud_ptr, origin_transform, max_range) {}
};
class OwningPointCloud2Wrapper : public PointCloud2Wrapper
{
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
OwningPointCloud2Wrapper(
const PointCloud2ConstSharedPtr& cloud_ptr,
const Eigen::Isometry3d& origin_transform,
const double max_range = std::numeric_limits<double>::infinity())
: PointCloud2Wrapper(cloud_ptr.get(), origin_transform, max_range),
owned_cloud_ptr_(cloud_ptr) {}
private:
PointCloud2ConstSharedPtr owned_cloud_ptr_;
};
} // namespace pointcloud_voxelization
} // namespace voxelized_geometry_tools
|
{"hexsha": "7df1d04a9592b63eff9cb7ed039cc621b8d43d4c", "size": 3659, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/voxelized_geometry_tools/pointcloud_voxelization_ros_interface.hpp", "max_stars_repo_name": "calderpg/voxelized_geometry_tools", "max_stars_repo_head_hexsha": "cc36bfd426e984e451e5b844f89be8596b905774", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 29.0, "max_stars_repo_stars_event_min_datetime": "2018-10-15T19:05:07.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T05:29:13.000Z", "max_issues_repo_path": "include/voxelized_geometry_tools/pointcloud_voxelization_ros_interface.hpp", "max_issues_repo_name": "calderpg/voxelized_geometry_tools", "max_issues_repo_head_hexsha": "cc36bfd426e984e451e5b844f89be8596b905774", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 6.0, "max_issues_repo_issues_event_min_datetime": "2018-11-29T23:49:57.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-20T13:16:19.000Z", "max_forks_repo_path": "include/voxelized_geometry_tools/pointcloud_voxelization_ros_interface.hpp", "max_forks_repo_name": "calderpg/voxelized_geometry_tools", "max_forks_repo_head_hexsha": "cc36bfd426e984e451e5b844f89be8596b905774", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 13.0, "max_forks_repo_forks_event_min_datetime": "2018-10-17T23:58:35.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T06:26:46.000Z", "avg_line_length": 29.9918032787, "max_line_length": 77, "alphanum_fraction": 0.7728887674, "num_tokens": 884}
|
library(ggplot2)
args <- commandArgs(trailingOnly=TRUE)
if (length(args) <= 13) {
print(args)
stop("Usage: evaluator_bars_per_group_strategies.r common.r input.csv output.pdf granularity groupName evaluator1 unit1 quantity1 indication1 evaluator2 unit2 quantity2 indication2")
}
common <- args[1]
inFile <- args[2]
outPdf <- args[3]
# granularity <- args[4]
groupName <- args[5]
e1 <- args[6]
u1 <- args[7]
q1 <- args[8]
i1 <- args[9]
e2 <- args[10]
u2 <- args[11]
q2 <- args[12]
i2 <- args[13]
s1 <- args[14]
s2 <- args[15]
source(common)
res = read.csv(inFile, header=TRUE)
res$origin <- paste(res$file, res$focus, res$strategy)
# Select the right data
res1 <- res[res$evaluator == e1,]
res2 <- res[res$evaluator == e2,]
# Make the output numeric
res$output <- suppressWarnings(as.numeric(as.character(res$output)))
# Replace NaN with '0'
res$output <- replace(res$output, is.na(res$output), 0)
dat <- merge(res1, res2, by = "origin")
dat <- dat[!is.na(dat$output.x),]
dat <- dat[!is.na(dat$output.y),]
dat$strategy <- dat$strategy.x
if (length(dat$origin) != 0) {
startPdf(outPdf)
ggplot(dat, aes(output.x, output.y, fill = strategy)) +
geom_bar(stat="identity", position = "dodge") +
scale_fill_brewer(palette = "Set1") +
labs(x = paste(e1, "(", u1, q1, ")")) + labs(y = paste(e2, "(", u2, q2, ")")) +
theme(axis.title.y=element_text(angle = 0)) +
theme(legend.position="bottom")
} else {
invalidDataPdf(outPdf)
}
|
{"hexsha": "49ece405c4e4d0f5b44c384054d1e0576e3c56ba", "size": 1468, "ext": "r", "lang": "R", "max_stars_repo_path": "easyspec-evaluate/rscripts/evaluator_bars_per_group_strategies_on_demand.r", "max_stars_repo_name": "NorfairKing/easyspec", "max_stars_repo_head_hexsha": "b038b45a375cc0bed2b00c255b508bc06419c986", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 29, "max_stars_repo_stars_event_min_datetime": "2017-07-06T08:41:57.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-23T19:27:30.000Z", "max_issues_repo_path": "easyspec-evaluate/rscripts/evaluator_bars_per_group_strategies_on_demand.r", "max_issues_repo_name": "NorfairKing/easyspec", "max_issues_repo_head_hexsha": "b038b45a375cc0bed2b00c255b508bc06419c986", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2017-09-10T19:07:51.000Z", "max_issues_repo_issues_event_max_datetime": "2017-09-17T10:58:41.000Z", "max_forks_repo_path": "easyspec-evaluate/rscripts/evaluator_bars_per_group_strategies_on_demand.r", "max_forks_repo_name": "NorfairKing/easyspec", "max_forks_repo_head_hexsha": "b038b45a375cc0bed2b00c255b508bc06419c986", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2017-09-09T19:42:50.000Z", "max_forks_repo_forks_event_max_datetime": "2018-05-30T10:03:55.000Z", "avg_line_length": 23.6774193548, "max_line_length": 184, "alphanum_fraction": 0.6566757493, "num_tokens": 460}
|
#!/usr/bin/env python3
import sys
from os.path import expanduser
import pudb
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
from mpl_toolkits.mplot3d import Axes3D
# Look for modules in top level of AstroLib
sys.path.insert(0, expanduser("~/AstroLib/python"))
from orbits.kepler_uv import kep_uv, OutputKepUV
"""
kepler_uv_plot.py - demo to show kepler_uv results
"""
mu = 1
rv0 = np.array([1, 0, 0, 0, 1, 0])
tof = np.linspace(0, 5, num=500)
n = tof.size
x = np.zeros(n)
y = np.zeros(n)
z = np.zeros(n)
for i in range(0,n):
output = kep_uv(rv0, tof[i], mu)
x[i] = output.rv[0]
y[i] = output.rv[1]
z[i] = output.rv[2]
# Plot trajectory
fig1 = plt.figure()
ax11 = fig1.add_subplot(111, projection='3d', \
xlabel='X Position (LU)', \
ylabel='Y Position (LU)', \
zlabel='Z Position (LU)')
traj = ax11.plot(x, y, z, label='Trajectory')
traj_start = ax11.scatter(x[0], y[0], z[0], \
marker='o', color='k', label='Start')
traj_end = ax11.scatter(x[n-1], y[n-1], z[n-1], \
marker='x', color='r', label='End')
l = max(np.max(abs(x)),np.max(abs(y)),np.max(abs(z)))*1.1
ax11.set_xlim(-l, l)
ax11.set_ylim(-l, l)
ax11.set_zlim(-l, l)
ax11.legend(loc=2)
# Plot convergence for full tof calculation
fig2 = plt.figure()
ax21 = fig2.add_subplot(211, ylabel='x')
i_total = output.diagnostic['i']
x_store = output.diagnostic['x_store']
x_line = ax21.plot(np.arange(0,i_total+1), x_store, '.-')
ax21.set_xlim(0,i_total)
ax21.xaxis.set_major_locator(MaxNLocator(integer=True))
ax21.grid(True)
ax22 = fig2.add_subplot(212, xlabel='iteration', ylabel='f(x) = TOF - t')
deltat_store = output.diagnostic['deltat_store']
fx_line = ax22.plot(np.arange(0,i_total), deltat_store, 'r.-')
ax22.set_xlim(0,i_total)
ax22.xaxis.set_major_locator(MaxNLocator(integer=True))
ax22.grid(True)
plt.show()
|
{"hexsha": "c4fa4d7b58ea2bf344b5cc6b16464eb10b3b274a", "size": 1886, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/demos/kepler_uv_plot.py", "max_stars_repo_name": "yookiwooki/AstroLib", "max_stars_repo_head_hexsha": "4598be425e837ea6b216d4f0d09e789aa54d9368", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "python/demos/kepler_uv_plot.py", "max_issues_repo_name": "yookiwooki/AstroLib", "max_issues_repo_head_hexsha": "4598be425e837ea6b216d4f0d09e789aa54d9368", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/demos/kepler_uv_plot.py", "max_forks_repo_name": "yookiwooki/AstroLib", "max_forks_repo_head_hexsha": "4598be425e837ea6b216d4f0d09e789aa54d9368", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.1466666667, "max_line_length": 73, "alphanum_fraction": 0.6749734889, "include": true, "reason": "import numpy", "num_tokens": 612}
|
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from numpy.testing import assert_almost_equal
from mmpose.models import build_loss
from mmpose.models.utils.geometry import batch_rodrigues
def test_mesh_loss():
"""test mesh loss."""
loss_cfg = dict(
type='MeshLoss',
joints_2d_loss_weight=1,
joints_3d_loss_weight=1,
vertex_loss_weight=1,
smpl_pose_loss_weight=1,
smpl_beta_loss_weight=1,
img_res=256,
focal_length=5000)
loss = build_loss(loss_cfg)
smpl_pose = torch.zeros([1, 72], dtype=torch.float32)
smpl_rotmat = batch_rodrigues(smpl_pose.view(-1, 3)).view(-1, 24, 3, 3)
smpl_beta = torch.zeros([1, 10], dtype=torch.float32)
camera = torch.tensor([[1, 0, 0]], dtype=torch.float32)
vertices = torch.rand([1, 6890, 3], dtype=torch.float32)
joints_3d = torch.ones([1, 24, 3], dtype=torch.float32)
joints_2d = loss.project_points(joints_3d, camera) + (256 - 1) / 2
fake_pred = {}
fake_pred['pose'] = smpl_rotmat
fake_pred['beta'] = smpl_beta
fake_pred['camera'] = camera
fake_pred['vertices'] = vertices
fake_pred['joints_3d'] = joints_3d
fake_gt = {}
fake_gt['pose'] = smpl_pose
fake_gt['beta'] = smpl_beta
fake_gt['vertices'] = vertices
fake_gt['has_smpl'] = torch.ones(1, dtype=torch.float32)
fake_gt['joints_3d'] = joints_3d
fake_gt['joints_3d_visible'] = torch.ones([1, 24, 1], dtype=torch.float32)
fake_gt['joints_2d'] = joints_2d
fake_gt['joints_2d_visible'] = torch.ones([1, 24, 1], dtype=torch.float32)
losses = loss(fake_pred, fake_gt)
assert torch.allclose(losses['vertex_loss'], torch.tensor(0.))
assert torch.allclose(losses['smpl_pose_loss'], torch.tensor(0.))
assert torch.allclose(losses['smpl_beta_loss'], torch.tensor(0.))
assert torch.allclose(losses['joints_3d_loss'], torch.tensor(0.))
assert torch.allclose(losses['joints_2d_loss'], torch.tensor(0.))
fake_pred = {}
fake_pred['pose'] = smpl_rotmat + 1
fake_pred['beta'] = smpl_beta + 1
fake_pred['camera'] = camera
fake_pred['vertices'] = vertices + 1
fake_pred['joints_3d'] = joints_3d.clone()
joints_3d_t = joints_3d.clone()
joints_3d_t[:, 0] = joints_3d_t[:, 0] + 1
fake_gt = {}
fake_gt['pose'] = smpl_pose
fake_gt['beta'] = smpl_beta
fake_gt['vertices'] = vertices
fake_gt['has_smpl'] = torch.ones(1, dtype=torch.float32)
fake_gt['joints_3d'] = joints_3d_t
fake_gt['joints_3d_visible'] = torch.ones([1, 24, 1], dtype=torch.float32)
fake_gt['joints_2d'] = joints_2d + (256 - 1) / 2
fake_gt['joints_2d_visible'] = torch.ones([1, 24, 1], dtype=torch.float32)
losses = loss(fake_pred, fake_gt)
assert torch.allclose(losses['vertex_loss'], torch.tensor(1.))
assert torch.allclose(losses['smpl_pose_loss'], torch.tensor(1.))
assert torch.allclose(losses['smpl_beta_loss'], torch.tensor(1.))
assert torch.allclose(losses['joints_3d_loss'], torch.tensor(0.5 / 24))
assert torch.allclose(losses['joints_2d_loss'], torch.tensor(0.5))
def test_gan_loss():
"""test gan loss."""
with pytest.raises(NotImplementedError):
loss_cfg = dict(
type='GANLoss',
gan_type='test',
real_label_val=1.0,
fake_label_val=0.0,
loss_weight=1)
_ = build_loss(loss_cfg)
input_1 = torch.ones(1, 1)
input_2 = torch.ones(1, 3, 6, 6) * 2
# vanilla
loss_cfg = dict(
type='GANLoss',
gan_type='vanilla',
real_label_val=1.0,
fake_label_val=0.0,
loss_weight=2.0)
gan_loss = build_loss(loss_cfg)
loss = gan_loss(input_1, True, is_disc=False)
assert_almost_equal(loss.item(), 0.6265233)
loss = gan_loss(input_1, False, is_disc=False)
assert_almost_equal(loss.item(), 2.6265232)
loss = gan_loss(input_1, True, is_disc=True)
assert_almost_equal(loss.item(), 0.3132616)
loss = gan_loss(input_1, False, is_disc=True)
assert_almost_equal(loss.item(), 1.3132616)
# lsgan
loss_cfg = dict(
type='GANLoss',
gan_type='lsgan',
real_label_val=1.0,
fake_label_val=0.0,
loss_weight=2.0)
gan_loss = build_loss(loss_cfg)
loss = gan_loss(input_2, True, is_disc=False)
assert_almost_equal(loss.item(), 2.0)
loss = gan_loss(input_2, False, is_disc=False)
assert_almost_equal(loss.item(), 8.0)
loss = gan_loss(input_2, True, is_disc=True)
assert_almost_equal(loss.item(), 1.0)
loss = gan_loss(input_2, False, is_disc=True)
assert_almost_equal(loss.item(), 4.0)
# wgan
loss_cfg = dict(
type='GANLoss',
gan_type='wgan',
real_label_val=1.0,
fake_label_val=0.0,
loss_weight=2.0)
gan_loss = build_loss(loss_cfg)
loss = gan_loss(input_2, True, is_disc=False)
assert_almost_equal(loss.item(), -4.0)
loss = gan_loss(input_2, False, is_disc=False)
assert_almost_equal(loss.item(), 4)
loss = gan_loss(input_2, True, is_disc=True)
assert_almost_equal(loss.item(), -2.0)
loss = gan_loss(input_2, False, is_disc=True)
assert_almost_equal(loss.item(), 2.0)
# hinge
loss_cfg = dict(
type='GANLoss',
gan_type='hinge',
real_label_val=1.0,
fake_label_val=0.0,
loss_weight=2.0)
gan_loss = build_loss(loss_cfg)
loss = gan_loss(input_2, True, is_disc=False)
assert_almost_equal(loss.item(), -4.0)
loss = gan_loss(input_2, False, is_disc=False)
assert_almost_equal(loss.item(), -4.0)
loss = gan_loss(input_2, True, is_disc=True)
assert_almost_equal(loss.item(), 0.0)
loss = gan_loss(input_2, False, is_disc=True)
assert_almost_equal(loss.item(), 3.0)
|
{"hexsha": "98907675d26bfe65790edfc2bde7b8179aee4ad8", "size": 5793, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_losses/test_mesh_losses.py", "max_stars_repo_name": "nightfuryyy/mmpose", "max_stars_repo_head_hexsha": "910d9e31dd9d46e3329be1b7567e6309d70ab64c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1775, "max_stars_repo_stars_event_min_datetime": "2020-07-10T01:20:01.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T16:31:50.000Z", "max_issues_repo_path": "tests/test_losses/test_mesh_losses.py", "max_issues_repo_name": "KHB1698/mmpose", "max_issues_repo_head_hexsha": "93c3a742c540dfb4ca515ad545cef705a07d90b4", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1021, "max_issues_repo_issues_event_min_datetime": "2020-07-11T11:40:24.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T14:32:26.000Z", "max_forks_repo_path": "tests/test_losses/test_mesh_losses.py", "max_forks_repo_name": "KHB1698/mmpose", "max_forks_repo_head_hexsha": "93c3a742c540dfb4ca515ad545cef705a07d90b4", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 477, "max_forks_repo_forks_event_min_datetime": "2020-07-11T11:27:51.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T09:42:25.000Z", "avg_line_length": 35.3231707317, "max_line_length": 78, "alphanum_fraction": 0.6556188503, "include": true, "reason": "from numpy", "num_tokens": 1767}
|
import os
from os.path import join as pjoin
import numpy as np
import pandas as pd
import scipy.stats
import dask
from cesium import featurize
from cesium.tests.fixtures import (sample_values, sample_ts_files,
sample_featureset)
import numpy.testing as npt
import pytest
DATA_PATH = pjoin(os.path.dirname(__file__), "data")
FEATURES_CSV_PATH = pjoin(DATA_PATH, "test_features_with_targets.csv")
def test_featurize_files_function(tmpdir):
"""Test featurize function for on-disk time series"""
with sample_ts_files(size=4, labels=['A', 'B']) as ts_paths:
fset, labels = featurize.featurize_ts_files(ts_paths,
features_to_use=["std_err"],
scheduler=dask.get)
assert "std_err" in fset
assert fset.shape == (4, 1)
npt.assert_array_equal(labels, ['A', 'B', 'A', 'B'])
def test_featurize_time_series_single():
"""Test featurize wrapper function for single time series"""
t, m, e = sample_values()
features_to_use = ['amplitude', 'std_err']
meta_features = {'meta1': 0.5}
fset = featurize.featurize_time_series(t, m, e, features_to_use,
meta_features, scheduler=dask.get)
assert fset['amplitude'].values.dtype == np.float64
def test_featurize_time_series_single_multichannel():
"""Test featurize wrapper function for single multichannel time series"""
n_channels = 3
t, m, e = sample_values(channels=n_channels)
features_to_use = ['amplitude', 'std_err']
meta_features = {'meta1': 0.5}
fset = featurize.featurize_time_series(t, m, e, features_to_use,
meta_features, scheduler=dask.get)
assert ('amplitude', 0) in fset.columns
assert 'meta1' in fset.columns
def test_featurize_time_series_multiple():
"""Test featurize wrapper function for multiple time series"""
n_series = 5
list_of_series = [sample_values() for i in range(n_series)]
times, values, errors = [list(x) for x in zip(*list_of_series)]
features_to_use = ['amplitude', 'std_err']
meta_features = [{'meta1': 0.5}] * n_series
fset = featurize.featurize_time_series(times, values, errors,
features_to_use,
meta_features, scheduler=dask.get)
npt.assert_array_equal(sorted(fset.columns.get_level_values('feature')),
['amplitude', 'meta1', 'std_err'])
def test_featurize_time_series_multiple_multichannel():
"""Test featurize wrapper function for multiple multichannel time series"""
n_series = 5
n_channels = 3
list_of_series = [sample_values(channels=n_channels)
for i in range(n_series)]
times, values, errors = [list(x) for x in zip(*list_of_series)]
features_to_use = ['amplitude', 'std_err']
meta_features = {'meta1': 0.5}
fset = featurize.featurize_time_series(times, values, errors,
features_to_use,
meta_features, scheduler=dask.get)
assert ('amplitude', 0) in fset.columns
assert 'meta1' in fset.columns
def test_featurize_time_series_uneven_multichannel():
"""Test featurize wrapper function for uneven-length multichannel data"""
n_channels = 3
t, m, e = sample_values(channels=n_channels)
t = [[t, t[0:-5], t[0:-10]]]
m = [[m[0], m[1][0:-5], m[2][0:-10]]]
e = [[e[0], e[1][0:-5], e[2][0:-10]]]
features_to_use = ['amplitude', 'std_err']
meta_features = {'meta1': 0.5}
fset = featurize.featurize_time_series(t, m, e, features_to_use,
meta_features, scheduler=dask.get)
assert ('amplitude', 0) in fset.columns
assert 'meta1' in fset.columns
def test_featurize_time_series_custom_functions():
"""Test featurize wrapper function for time series w/ custom functions"""
n_channels = 3
t, m, e = sample_values(channels=n_channels)
features_to_use = ['amplitude', 'std_err', 'test_f']
meta_features = {'meta1': 0.5}
custom_functions = {'test_f': lambda t, m, e: np.pi}
fset = featurize.featurize_time_series(t, m, e, features_to_use,
meta_features,
custom_functions=custom_functions,
scheduler=dask.get)
npt.assert_array_equal(fset['test_f', 0], np.pi)
assert ('amplitude', 0) in fset.columns
assert 'meta1' in fset.columns
def test_featurize_time_series_custom_dask_graph():
"""Test featurize wrapper function for time series w/ custom dask graph"""
n_channels = 3
t, m, e = sample_values(channels=n_channels)
features_to_use = ['amplitude', 'std_err', 'test_f', 'test_meta']
meta_features = {'meta1': 0.5}
custom_functions = {'test_f': (lambda x: x.min() - x.max(), 'amplitude'),
'test_meta': (lambda x: 2. * x, 'meta1')}
fset = featurize.featurize_time_series(t, m, e, features_to_use,
meta_features,
custom_functions=custom_functions,
scheduler=dask.get)
assert ('amplitude', 0) in fset.columns
assert ('test_f', 0) in fset.columns
assert ('test_meta', 0) in fset.columns
def test_featurize_time_series_default_times():
"""Test featurize wrapper function for time series w/ missing times"""
n_channels = 3
_, m, e = sample_values(channels=n_channels)
features_to_use = ['amplitude', 'std_err']
meta_features = {}
fset = featurize.featurize_time_series(None, m, e, features_to_use,
meta_features, scheduler=dask.get)
m = [[m[0], m[1][0:-5], m[2][0:-10]]]
e = [[e[0], e[1][0:-5], e[2][0:-10]]]
fset = featurize.featurize_time_series(None, m, e, features_to_use,
meta_features, scheduler=dask.get)
m = m[0][0]
e = e[0][0]
fset = featurize.featurize_time_series(None, m, e, features_to_use,
meta_features, scheduler=dask.get)
assert ('amplitude', 0) in fset.columns
def test_featurize_time_series_default_errors():
"""Test featurize wrapper function for time series w/ missing errors"""
n_channels = 3
t, m, _ = sample_values(channels=n_channels)
features_to_use = ['amplitude', 'std_err']
meta_features = {}
fset = featurize.featurize_time_series(t, m, None, features_to_use,
meta_features, scheduler=dask.get)
t = [[t, t[0:-5], t[0:-10]]]
m = [[m[0], m[1][0:-5], m[2][0:-10]]]
fset = featurize.featurize_time_series(t, m, None, features_to_use,
meta_features, scheduler=dask.get)
t = t[0][0]
m = m[0][0]
fset = featurize.featurize_time_series(t, m, None, features_to_use,
meta_features, scheduler=dask.get)
assert ('amplitude', 0) in fset.columns
def test_featurize_time_series_pandas_metafeatures():
"""Test featurize function for metafeatures passed as Series/DataFrames."""
t, m, e = sample_values()
features_to_use = ['amplitude', 'std_err']
meta_features = pd.Series({'meta1': 0.5})
fset = featurize.featurize_time_series(t, m, e, features_to_use,
meta_features, scheduler=dask.get)
npt.assert_allclose(fset['meta1'], 0.5)
n_series = 5
list_of_series = [sample_values() for i in range(n_series)]
times, values, errors = [list(x) for x in zip(*list_of_series)]
features_to_use = ['amplitude', 'std_err']
meta_features = pd.DataFrame({'meta1': [0.5] * n_series,
'meta2': [0.8] * n_series})
fset = featurize.featurize_time_series(times, values, errors,
features_to_use,
meta_features, scheduler=dask.get)
npt.assert_allclose(fset['meta1'], 0.5)
npt.assert_allclose(fset['meta2'], 0.8)
def test_impute():
"""Test imputation of missing Featureset values."""
fset, labels = sample_featureset(5, 1, ['amplitude'], ['class1', 'class2'],
names=['a', 'b', 'c', 'd', 'e'],
meta_features=['meta1'])
imputed = featurize.impute_featureset(fset)
npt.assert_allclose(fset.amplitude.values, imputed.amplitude.values)
assert isinstance(imputed, pd.DataFrame)
fset.amplitude.values[0] = np.inf
fset.amplitude.values[1] = np.nan
amp_values = fset.amplitude.values[2:]
other_values = fset.values.T.ravel()[2:]
imputed = featurize.impute_featureset(fset, strategy='constant',
value=None)
npt.assert_allclose(-2 * np.nanmax(np.abs(other_values)),
imputed.amplitude.values[0:2])
imputed = featurize.impute_featureset(fset, strategy='constant',
value=-1e4)
npt.assert_allclose(-1e4, imputed.amplitude.values[0:2])
imputed = featurize.impute_featureset(fset, strategy='mean')
npt.assert_allclose(np.mean(amp_values), imputed.amplitude.values[0:2])
npt.assert_allclose(amp_values, imputed.amplitude.values[2:])
imputed = featurize.impute_featureset(fset, strategy='median')
npt.assert_allclose(np.median(amp_values), imputed.amplitude.values[0:2])
npt.assert_allclose(amp_values, imputed.amplitude.values[2:])
imputed = featurize.impute_featureset(fset, strategy='most_frequent')
npt.assert_allclose(scipy.stats.mode(amp_values).mode.item(),
imputed.amplitude.values[0:2])
npt.assert_allclose(amp_values, imputed.amplitude.values[2:])
featurize.impute_featureset(fset, strategy='constant', value=-1e4,
inplace=True)
npt.assert_allclose(-1e4, fset.amplitude.values[0:2])
with pytest.raises(NotImplementedError):
featurize.impute_featureset(fset, strategy='blah')
def test_roundtrip_featureset(tmpdir):
fset_path = os.path.join(str(tmpdir), 'test.npz')
for n_channels in [1, 3]:
for labels in [['class1', 'class2'], []]:
fset, labels = sample_featureset(3, n_channels, ['amplitude'],
labels, names=['a', 'b', 'c'],
meta_features=['meta1'])
pred_probs = pd.DataFrame(np.random.random((len(fset), 2)),
index=fset.index.values,
columns=['class1', 'class2'])
featurize.save_featureset(fset, fset_path, labels=labels,
pred_probs=pred_probs)
fset_loaded, data_loaded = featurize.load_featureset(fset_path)
npt.assert_allclose(fset.values, fset_loaded.values)
npt.assert_array_equal(fset.index, fset_loaded.index)
npt.assert_array_equal(fset.columns, fset_loaded.columns)
assert isinstance(fset_loaded, pd.DataFrame)
npt.assert_array_equal(labels, data_loaded['labels'])
npt.assert_allclose(pred_probs, data_loaded['pred_probs'])
npt.assert_array_equal(pred_probs.columns,
data_loaded['pred_probs'].columns)
def test_ignore_exceptions():
import cesium.features.graphs
def raise_exc(x):
raise ValueError()
old_value = cesium.features.graphs.dask_feature_graph['mean']
try:
cesium.features.graphs.dask_feature_graph['mean'] = (raise_exc, 't')
t, m, e = sample_values()
features_to_use = ['mean']
with pytest.raises(ValueError):
fset = featurize.featurize_time_series(t, m, e, features_to_use,
scheduler=dask.get,
raise_exceptions=True)
fset = featurize.featurize_time_series(t, m, e, features_to_use,
scheduler=dask.get,
raise_exceptions=False)
assert np.isnan(fset.values).all()
finally:
cesium.features.graphs.dask_feature_graph['mean'] = old_value
|
{"hexsha": "c90b4a53bfe359f73d49c6c5e81119bccac0fb54", "size": 12579, "ext": "py", "lang": "Python", "max_stars_repo_path": "cesium/tests/test_featurize.py", "max_stars_repo_name": "acrellin/cesium", "max_stars_repo_head_hexsha": "9d33edc0f9b3a79c68070826c0f390896abe294d", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 603, "max_stars_repo_stars_event_min_datetime": "2016-04-15T00:11:07.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-18T09:10:39.000Z", "max_issues_repo_path": "cesium/tests/test_featurize.py", "max_issues_repo_name": "acrellin/cesium", "max_issues_repo_head_hexsha": "9d33edc0f9b3a79c68070826c0f390896abe294d", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 146, "max_issues_repo_issues_event_min_datetime": "2016-03-17T19:58:24.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-05T20:36:03.000Z", "max_forks_repo_path": "cesium/tests/test_featurize.py", "max_forks_repo_name": "acrellin/cesium", "max_forks_repo_head_hexsha": "9d33edc0f9b3a79c68070826c0f390896abe294d", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 84, "max_forks_repo_forks_event_min_datetime": "2016-04-13T23:30:58.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-18T07:34:09.000Z", "avg_line_length": 44.2922535211, "max_line_length": 80, "alphanum_fraction": 0.597901264, "include": true, "reason": "import numpy,import scipy", "num_tokens": 3063}
|
"""
An [`AbstractConstraintSet`](@ref) that stores the constraint values as well as Lagrange
multiplier and penalty terms for each constraint.
The cost associated with constraint terms in the augmented Lagrangian can be evaluated for
cost!(J::Vector, ::ALConstraintSet)
which adds the cost at each time step to the vector `J` of length `N`.
The cost expansion for these terms is evaluated along the trajectory `Z` using
cost_expansion!(E::Objective, conSet::ALConstraintSet, Z)
which also adds the expansion terms to the terms in `E`.
The penalty and multiplier terms can be updated using
penalty_update!(::ALConstraintSet)
dual_update!(::ALConstraintSet)
The current set of active constraint (with tolerance `tol`) can be re-calculated using
update_active_set!(::ALConstraintSet, ::Val{tol})
The maximum penalty can be queried using `max_penalty(::ALConstraintSet)`, and the
penalties and/or multipliers can be reset using
reset!(::ALConstraintSet)
reset_penalties!(::ALConstraintSet)
reset_duals!(::ALConstraintSet)
# Constructor
ALConstraintSet(::ConstraintList, ::AbstractModel)
ALConstraintSet(::Problem)
"""
struct ALConstraintSet{T} <: AbstractConstraintSet
convals::Vector{ConVal}
errvals::Vector{ConVal}
λ::Vector{<:Vector}
μ::Vector{<:Vector}
active::Vector{<:Vector}
c_max::Vector{T}
μ_max::Vector{T}
μ_maxes::Vector{Vector{T}}
params::Vector{ConstraintParams{T}}
p::Vector{Int}
end
function ALConstraintSet(cons::ConstraintList, model::AbstractModel)
n,m = cons.n, cons.m
n̄ = RobotDynamics.state_diff_size(model)
ncon = length(cons)
useG = model isa LieGroupModel
errvals = map(1:ncon) do i
C,c = gen_convals(n̄, m, cons[i], cons.inds[i])
ConVal(n̄, m, cons[i], cons.inds[i], C, c, useG)
end
convals = map(errvals) do errval
ConVal(n, m, errval)
end
errvals = convert(Vector{ConVal}, errvals)
convals = convert(Vector{ConVal}, convals)
λ = map(1:ncon) do i
p = length(cons[i])
[@SVector zeros(p) for i in cons.inds[i]]
end
μ = map(1:ncon) do i
p = length(cons[i])
[@SVector ones(p) for i in cons.inds[i]]
end
a = map(1:ncon) do i
p = length(cons[i])
[@SVector ones(Bool,p) for i in cons.inds[i]]
end
c_max = zeros(ncon)
μ_max = zeros(ncon)
μ_maxes = [zeros(length(ind)) for ind in cons.inds]
params = [ConstraintParams() for con in cons.constraints]
ALConstraintSet(convals, errvals, λ, μ, a, c_max, μ_max, μ_maxes, params, copy(cons.p))
end
@inline ALConstraintSet(prob::Problem) = ALConstraintSet(prob.constraints, prob.model)
# Iteration
Base.iterate(conSet::AbstractConstraintSet) =
isempty(get_convals(conSet)) ? nothing : (get_convals(conSet)[1].con,1)
Base.iterate(conSet::AbstractConstraintSet, state::Int) =
state >= length(conSet) ? nothing : (get_convals(conSet)[state+1].con, state+1)
@inline Base.length(conSet) = length(get_convals(conSet))
Base.IteratorSize(::AbstractConstraintSet) = Base.HasLength()
Base.IteratorEltype(::AbstractConstraintSet) = Base.HasEltype()
Base.eltype(::AbstractConstraintSet) = AbstractConstraint
"""
link_constraints!(set1, set2)
Link any common constraints between `set1` and `set2` by setting elements in `set1` to point
to elements in `set2`
"""
function link_constraints!(set1::ALConstraintSet, set2::ALConstraintSet)
# Find common constraints
links = Tuple{Int,Int}[]
for (i,con1) in enumerate(set1)
for (j,con2) in enumerate(set2)
if con1 === con2
push!(links, (i,j))
end
end
end
# Link values
for (i,j) in links
set1.convals[i] = set2.convals[j]
set1.errvals[i] = set2.errvals[j]
set1.active[i] = set2.active[j]
set1.λ[i] = set2.λ[j]
set1.μ[i] = set2.μ[j]
end
return links
end
@inline get_convals(conSet::ALConstraintSet) = conSet.convals
@inline get_errvals(conSet::ALConstraintSet) = conSet.errvals
# Augmented Lagrangian Updates
function dual_update!(conSet::ALConstraintSet)
for i in eachindex(conSet.λ)
dual_update!(conSet.convals[i], conSet.λ[i], conSet.μ[i], conSet.params[i])
end
end
function dual_update!(conval::ConVal, λ::Vector{<:SVector}, μ::Vector{<:SVector}, params::ConstraintParams)
c = conval.vals
λ_max = params.λ_max
λ_min = sense(conval.con) == Equality() ? -λ_max : zero(λ_max)
for i in eachindex(conval.inds)
λ[i] = clamp.(λ[i] + μ[i] .* c[i], λ_min, λ_max)
end
end
function penalty_update!(conSet::ALConstraintSet)
for i in eachindex(conSet.μ)
penalty_update!(conSet.μ[i], conSet.params[i])
end
end
function penalty_update!(μ::Vector{<:SVector}, params::ConstraintParams)
ϕ = params.ϕ
μ_max = params.μ_max
for i in eachindex(μ)
μ[i] = clamp.(ϕ * μ[i], 0.0, μ_max)
end
end
# Active Set
function update_active_set!(conSet::ALConstraintSet, val::Val{tol}=Val(0.0)) where tol
for i in eachindex(conSet.active)
update_active_set!(conSet.active[i], conSet.λ[i], conSet.convals[i], val)
end
end
function update_active_set!(a::Vector{<:StaticVector}, λ::Vector{<:StaticVector},
conval::ConVal, ::Val{tol}) where tol
if sense(conval.con) == Inequality()
for i in eachindex(a)
a[i] = @. (conval.vals[i] >= -tol) | (λ[i] > zero(tol))
end
end
end
# Cost
function cost!(J::Vector{<:Real}, conSet::ALConstraintSet)
for i in eachindex(conSet.convals)
cost!(J, conSet.convals[i], conSet.λ[i], conSet.μ[i], conSet.active[i])
end
end
function cost!(J::Vector{<:Real}, conval::ConVal, λ::Vector{<:StaticVector},
μ::Vector{<:StaticVector}, a::Vector{<:StaticVector})
for (i,k) in enumerate(conval.inds)
c = SVector(conval.vals[i])
Iμ = Diagonal(SVector(μ[i] .* a[i]))
J[k] += λ[i]'c .+ 0.5*c'Iμ*c
end
end
function cost_expansion!(E::Objective, conSet::ALConstraintSet, Z::AbstractTrajectory,
init::Bool=false, rezero::Bool=false)
for i in eachindex(conSet.errvals)
cost_expansion!(E, conSet.convals[i], conSet.λ[i], conSet.μ[i], conSet.active[i])
end
end
@generated function cost_expansion!(E::QuadraticObjective{n,m}, conval::ConVal{C}, λ, μ, a) where {n,m,C}
if C <: StateConstraint
expansion = quote
cx = ∇c
E[k].Q .+= cx'Iμ*cx
E[k].q .+= cx'g
end
elseif C <: ControlConstraint
expansion = quote
cu = ∇c
E[k].R .+= cu'Iμ*cu
E[k].r .+= cu'g
end
elseif C<: StageConstraint
ix = SVector{n}(1:n)
iu = SVector{m}(n .+ (1:m))
expansion = quote
cx = ∇c[:,$ix]
cu = ∇c[:,$iu]
E[k].Q .+= cx'Iμ*cx
E[k].q .+= cx'g
E[k].H .+= cu'Iμ*cx
E[k].R .+= cu'Iμ*cu
E[k].r .+= cu'g
end
else
throw(ArgumentError("cost expansion not supported for CoupledConstraints"))
end
quote
for (i,k) in enumerate(conval.inds)
∇c = SMatrix(conval.jac[i])
c = conval.vals[i]
Iμ = Diagonal(a[i] .* μ[i])
g = Iμ*c .+ λ[i]
$expansion
end
end
end
"""
max_penalty(conSet::ALConstraintSet)
Calculate the maximum constrained penalty across all constraints.
"""
function max_penalty(conSet::ALConstraintSet)
max_penalty!(conSet)
maximum(conSet.μ_max)
end
function max_penalty!(conSet::ALConstraintSet{T}) where T
conSet.c_max .*= 0
for i in eachindex(conSet.μ)
maxes = conSet.μ_maxes[i]::Vector{T}
max_penalty!(maxes, conSet.μ[i])
conSet.μ_max[i] = maximum(maxes)
end
end
function max_penalty!(μ_max::Vector{<:Real}, μ::Vector{<:StaticVector})
for i in eachindex(μ)
μ_max[i] = maximum(μ[i])
end
return nothing
end
# Reset
function reset!(conSet::ALConstraintSet)
reset_duals!(conSet)
reset_penalties!(conSet)
end
function reset_duals!(conSet::ALConstraintSet)
function _reset!(V::Vector{<:SVector})
for i in eachindex(V)
V[i] = zero(V[i])
end
end
for i in eachindex(conSet.λ)
_reset!(conSet.λ[i])
end
end
function reset_penalties!(conSet::ALConstraintSet)
function _reset!(V::Vector{<:SVector}, params::ConstraintParams)
for i in eachindex(V)
V[i] = zero(V[i]) .+ params.μ0
end
end
for i in eachindex(conSet.μ)
# reset!(conSet.μ[i], conSet.params[i].μ0)
# μ0 = conSet.params[i].μ0
_reset!(conSet.μ[i], conSet.params[i])
end
end
|
{"hexsha": "643e71fd928205f5b9cba629865d00afc500d9b1", "size": 8125, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/ALconset.jl", "max_stars_repo_name": "serenity4/TrajectoryOptimization.jl", "max_stars_repo_head_hexsha": "5584984cd472d5ceb6634c032b8b572e57754084", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/ALconset.jl", "max_issues_repo_name": "serenity4/TrajectoryOptimization.jl", "max_issues_repo_head_hexsha": "5584984cd472d5ceb6634c032b8b572e57754084", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/ALconset.jl", "max_forks_repo_name": "serenity4/TrajectoryOptimization.jl", "max_forks_repo_head_hexsha": "5584984cd472d5ceb6634c032b8b572e57754084", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.7303754266, "max_line_length": 107, "alphanum_fraction": 0.6792615385, "num_tokens": 2578}
|
import aoc_utils
import itertools
import functools
import operator
import networkx
import math
from collections import *
from copy import deepcopy
import random
import re
lines = aoc_utils.readlines()
def isvalid(string):
stack = []
for x in string:
if x == "{" or x == "[" or x == "(" or x == "<":
stack.append(x)
else:
if x == ")":
x = "("
if x == "]":
x = "["
if x == ">":
x = "<"
if x == "}":
x = "{"
if x == stack[-1]:
stack.pop(len(stack) - 1)
else:
print(x)
if x == "(":
return 3
if x == "[":
return 57
if x == "{":
return 1197
if x == "<":
return 25137
return 0
def getstack(string):
stack = []
for x in string:
if x == "{" or x == "[" or x == "(" or x == "<":
stack.append(x)
else:
if x == ")":
x = "("
if x == "]":
x = "["
if x == ">":
x = "<"
if x == "}":
x = "{"
if x == stack[-1]:
stack.pop(len(stack) - 1)
else:
if x == "(":
return 3
if x == "[":
return 57
if x == "{":
return 1197
if x == "<":
return 25137
return stack
lines = list(filter(lambda line: isvalid(line) == 0, lines))
scores = []
for line in lines:
score = 0
for character in reversed(getstack(line)):
print(character)
score *= 5
if character == "(":
score += 1
if character == "[":
score += 2
if character == "{":
score += 3
if character == "<":
score += 4
scores.append(score)
print(sorted(scores)[len(scores) // 2])
|
{"hexsha": "be90ea942b501a20e842e4a783579fe27c5ac204", "size": 2100, "ext": "py", "lang": "Python", "max_stars_repo_path": "Python/Python21/10.py", "max_stars_repo_name": "sapieninja/AdventOfCode", "max_stars_repo_head_hexsha": "8190c11e3eb2e4292a0cf66a6ef9261dee880f2e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Python/Python21/10.py", "max_issues_repo_name": "sapieninja/AdventOfCode", "max_issues_repo_head_hexsha": "8190c11e3eb2e4292a0cf66a6ef9261dee880f2e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-03-30T12:31:38.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-30T12:31:38.000Z", "max_forks_repo_path": "Python/Python21/10.py", "max_forks_repo_name": "sapieninja/AdventOfCode", "max_forks_repo_head_hexsha": "8190c11e3eb2e4292a0cf66a6ef9261dee880f2e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.595505618, "max_line_length": 60, "alphanum_fraction": 0.3514285714, "include": true, "reason": "import networkx", "num_tokens": 474}
|
from scipy import stats
from skimage import img_as_ubyte
from skimage.feature import local_binary_pattern
from skimage.io import imread
import glob
import keras_NN
import numpy as np
import os
import pandas as pd
import time
# Define the global variables related to the dataset
DATASET_PATH = "./input"
TRAINING_FOLDER_NAME = "train"
TESTING_FOLDER_NAME = "test"
TRAINING_FILE_NAME = "train.csv"
TESTING_FILE_NAME = "test.csv"
IMAGE_EXTENSION = ".jpg"
FEATURE_EXTENSION = "_LBP.csv"
def load_image_path_list():
training_image_path_rule = os.path.join(DATASET_PATH, TRAINING_FOLDER_NAME,
"*" + IMAGE_EXTENSION)
testing_image_path_rule = os.path.join(DATASET_PATH, TESTING_FOLDER_NAME,
"*" + IMAGE_EXTENSION)
training_image_path_list = glob.glob(training_image_path_rule)
testing_image_path_list = glob.glob(testing_image_path_rule)
return (training_image_path_list, testing_image_path_list)
def retrieve_LBP_feature_histogram(image_path):
try:
# Read feature directly from file
image_feature_path = image_path + FEATURE_EXTENSION
if os.path.isfile(image_feature_path):
LBP_feature_histogram = np.genfromtxt(image_feature_path,
delimiter=",")
return LBP_feature_histogram
# Define LBP parameters
radius = 5
n_points = 8
bins_num = pow(2, n_points)
LBP_value_range = (0, pow(2, n_points) - 1)
# Retrieve feature
assert os.path.isfile(image_path)
image_content_in_gray = imread(image_path, as_grey=True)
image_content_in_gray = img_as_ubyte(image_content_in_gray)
LBP_feature = local_binary_pattern(image_content_in_gray, n_points,
radius)
LBP_feature_histogram, _ = np.histogram(LBP_feature,
bins=bins_num,
range=LBP_value_range,
density=True)
# Save feature to file
assert LBP_feature_histogram is not None
np.savetxt(image_feature_path, LBP_feature_histogram, delimiter=",")
return LBP_feature_histogram
except:
print("Unable to retrieve LBP feature histogram in %s." %
(os.path.basename(image_path)))
return None
def load_features(image_path_list):
feature_dict = {}
for image_path in image_path_list:
LBP_feature_histogram = retrieve_LBP_feature_histogram(image_path)
feature_dict[os.path.basename(image_path)] = LBP_feature_histogram
return feature_dict
def load_csv_files():
training_file_path = os.path.join(DATASET_PATH, TRAINING_FILE_NAME)
testing_file_path = os.path.join(DATASET_PATH, TESTING_FILE_NAME)
training_file_content = pd.read_csv(training_file_path,
skiprows=0).as_matrix()
training_names = training_file_content[:, 0]
training_labels = training_file_content[:, 1]
training_labels = training_labels.astype(np.uint32)
testing_file_content = pd.read_csv(testing_file_path,
skiprows=0).as_matrix()
testing_names = testing_file_content[:, 0]
return (training_names, training_labels, testing_names)
def get_attributes(feature_dict, names):
feature_list = []
for name in names:
feature_list.append(feature_dict[name])
return np.array(feature_list)
def run():
# Load image paths in the dataset
training_image_path_list, testing_image_path_list = load_image_path_list()
# Load features
training_image_feature_dict = load_features(training_image_path_list)
testing_image_feature_dict = load_features(testing_image_path_list)
# Load training labels
training_names, training_labels, testing_names = load_csv_files()
# Convert data to suitable form for training/testing phase
X_train = get_attributes(training_image_feature_dict, training_names)
Y_train = training_labels
X_test = get_attributes(testing_image_feature_dict, testing_names)
# Generate prediction list
prediction_list = []
for trial_index in range(11):
print("Working on trial NO.{:d}".format(trial_index + 1))
current_prediction = keras_NN.generate_prediction(
X_train, Y_train, X_test)
prediction_list.append(current_prediction)
# Generate ensemble prediction
ensemble_prediction, _ = stats.mode(prediction_list)
ensemble_prediction = np.squeeze(ensemble_prediction)
# Create submission file
submission_file_name = "Aurora_" + str(int(time.time())) + ".csv"
file_content = pd.DataFrame({
"Id": testing_names,
"Prediction": ensemble_prediction
})
file_content.to_csv(submission_file_name, index=False, header=True)
print("All done!")
if __name__ == "__main__":
run()
|
{"hexsha": "29b114c4f6cf5afbf01bd345eed3faaad886e6f0", "size": 5031, "ext": "py", "lang": "Python", "max_stars_repo_path": "Copper Analysis/solution.py", "max_stars_repo_name": "nixingyang/Kaggle-Face-Verification", "max_stars_repo_head_hexsha": "b5f9908f4c23dc78b3e6b647c7add8f2b0d84663", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Copper Analysis/solution.py", "max_issues_repo_name": "nixingyang/Kaggle-Face-Verification", "max_issues_repo_head_hexsha": "b5f9908f4c23dc78b3e6b647c7add8f2b0d84663", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Copper Analysis/solution.py", "max_forks_repo_name": "nixingyang/Kaggle-Face-Verification", "max_forks_repo_head_hexsha": "b5f9908f4c23dc78b3e6b647c7add8f2b0d84663", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2016-09-05T03:13:32.000Z", "max_forks_repo_forks_event_max_datetime": "2018-11-29T07:55:23.000Z", "avg_line_length": 34.6965517241, "max_line_length": 79, "alphanum_fraction": 0.6779964222, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1022}
|
program tarefaa
print *, "Digite o valor inteiro de N:"
read (*,*) N
! Número de "Andarilhos" M
M = 1000
! loop do passo
do i = 1, N
! zera a soma
soma = 0e0
! loop do andarilho
do j = 1, M
! soma os passos de cada andarilho
soma = soma + rand()**i
end do
! Escreve a media no terminal
write (*,'(A,I0,A,1F5.3)') "<x^",i,">: ", soma/M
end do
end program tarefaa
|
{"hexsha": "2c438594106da63b7e4beac62887861bf9268241", "size": 500, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "projeto-2/tarefa-a/tarefa-a-10407962.f90", "max_stars_repo_name": "ArexPrestes/introducao-fisica-computacional", "max_stars_repo_head_hexsha": "bf6e7a0134c11ddbaf9125c42eb0982250f970d9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "projeto-2/tarefa-a/tarefa-a-10407962.f90", "max_issues_repo_name": "ArexPrestes/introducao-fisica-computacional", "max_issues_repo_head_hexsha": "bf6e7a0134c11ddbaf9125c42eb0982250f970d9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "projeto-2/tarefa-a/tarefa-a-10407962.f90", "max_forks_repo_name": "ArexPrestes/introducao-fisica-computacional", "max_forks_repo_head_hexsha": "bf6e7a0134c11ddbaf9125c42eb0982250f970d9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 20.8333333333, "max_line_length": 56, "alphanum_fraction": 0.464, "num_tokens": 169}
|
"""
If you use this code, please cite one of the SynthSeg papers:
https://github.com/BBillot/SynthSeg/blob/master/bibtex.bib
Copyright 2020 Benjamin Billot
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in
compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is
distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied. See the License for the specific language governing permissions and limitations under the
License.
"""
# python imports
import os
import csv
import numpy as np
from keras.models import Model
# project imports
from SynthSeg import evaluate
# third-party imports
from ext.lab2im import utils
from ext.lab2im import layers
from ext.lab2im import edit_volumes
from ext.neuron import models as nrn_models
def predict(path_images,
path_segmentations,
path_model,
segmentation_labels,
n_neutral_labels=None,
path_posteriors=None,
path_resampled=None,
path_volumes=None,
segmentation_label_names=None,
padding=None,
cropping=None,
target_res=1.,
flip=True,
topology_classes=None,
sigma_smoothing=0.5,
keep_biggest_component=True,
conv_size=3,
n_levels=5,
nb_conv_per_level=2,
unet_feat_count=24,
feat_multiplier=2,
activation='elu',
gt_folder=None,
evaluation_labels=None,
mask_folder=None,
list_incorrect_labels=None,
list_correct_labels=None,
compute_distances=False,
recompute=True,
verbose=False):
"""
This function uses trained models to segment images.
It is crucial that the inputs match the architecture parameters of the trained model.
:param path_images: path of the images to segment. Can be the path to a directory or the path to a single image.
:param path_segmentations: path where segmentations will be writen.
Should be a dir, if path_images is a dir, and a file if path_images is a file.
:param path_model: path ot the trained model.
:param segmentation_labels: List of labels for which to compute Dice scores. It should be the same list as the
segmentation_labels used in training.
:param n_neutral_labels: (optional) if the label maps contain some right/left specific labels and if test-time
flipping is applied (see parameter 'flip'), please provide the number of non-sided labels (including background).
It should be the same value as for training. Default is None.
:param path_posteriors: (optional) path where posteriors will be writen.
Should be a dir, if path_images is a dir, and a file if path_images is a file.
:param path_resampled: (optional) path where images resampled to 1mm isotropic will be writen.
We emphasise that images are resampled as soon as the resolution in one of the axes is not in the range [0.9; 1.1].
Should be a dir, if path_images is a dir, and a file if path_images is a file. Default is None, where resampled
images are not saved.
:param path_volumes: (optional) path of a csv file where the soft volumes of all segmented regions will be writen.
The rows of the csv file correspond to subjects, and the columns correspond to segmentation labels.
The soft volume of a structure corresponds to the sum of its predicted probability map.
:param segmentation_label_names: (optional) List of names correponding to the names of the segmentation labels.
Only used when path_volumes is provided. Must be of the same size as segmentation_labels. Can be given as a
list, a numpy array of strings, or the path to such a numpy array. Default is None.
:param padding: (optional) pad the images to the specified shape before predicting the segmentation maps.
Can be an int, a sequence or a 1d numpy array.
:param cropping: (optional) crop the images to the specified shape before predicting the segmentation maps.
If padding and cropping are specified, images are padded before being cropped.
Can be an int, a sequence or a 1d numpy array.
:param target_res: (optional) target resolution at which the network operates (and thus resolution of the output
segmentations). This must match the resolution of the training data ! target_res is used to automatically resampled
the images with resolutions outside [target_res-0.05, target_res+0.05].
Can be a sequence, a 1d numpy array. Set to None to disable the automatic resampling. Default is 1mm.
:param flip: (optional) whether to perform test-time augmentation, where the input image is segmented along with
a right/left flipped version on it. If set to True (default), be careful because this requires more memory.
:param topology_classes: List of classes corresponding to all segmentation labels, in order to group them into
classes, for each of which we will operate a smooth version of biggest connected component.
Can be a sequence, a 1d numpy array, or the path to a numpy 1d array in the same order as segmentation_labels.
Default is None, where no topological analysis is performed.
:param sigma_smoothing: (optional) If not None, the posteriors are smoothed with a gaussian kernel of the specified
standard deviation.
:param keep_biggest_component: (optional) whether to only keep the biggest component in the predicted segmentation.
This is applied independently of topology_classes, and it is applied to the whole segmentation
:param conv_size: (optional) size of unet's convolution masks. Default is 3.
:param n_levels: (optional) number of levels for unet. Default is 5.
:param nb_conv_per_level: (optional) number of convolution layers per level. Default is 2.
:param unet_feat_count: (optional) number of features for the first layer of the unet. Default is 24.
:param feat_multiplier: (optional) multiplicative factor for the number of feature for each new level. Default is 2.
:param activation: (optional) activation function. Can be 'elu', 'relu'.
:param gt_folder: (optional) path of the ground truth label maps corresponding to the input images. Should be a dir,
if path_images is a dir, or a file if path_images is a file.
Providing a gt_folder will trigger a Dice evaluation, where scores will be writen along with the path_segmentations.
Specifically, the scores are contained in a numpy array, where labels are in rows, and subjects in columns.
:param evaluation_labels: (optional) if gt_folder is True you can evaluate the Dice scores on a subset of the
segmentation labels, by providing another label list here. Can be a sequence, a 1d numpy array, or the path to a
numpy 1d array. Default is np.unique(segmentation_labels).
:param mask_folder: (optional) path of masks that will be used to mask out some parts of the obtained segmentations
during the evaluation. Default is None, where nothing is masked.
:param list_incorrect_labels: (optional) this option enables to replace some label values in the obtained
segmentations by other label values. Can be a list, a 1d numpy array, or the path to such an array.
:param list_correct_labels: (optional) list of values to correct the labels specified in list_incorrect_labels.
Correct values must have the same order as their corresponding value in list_incorrect_labels.
:param compute_distances: (optional) whether to add Hausdorff and mean surface distance evaluations to the default
Dice evaluation. Default is True.
:param recompute: (optional) whether to recompute segmentations that were already computed. This also applies to
Dice scores, if gt_folder is not None. Default is True.
:param verbose: (optional) whether to print out info about the remaining number of cases.
"""
# prepare input/output filepaths
path_images, path_segmentations, path_posteriors, path_resampled, path_volumes, compute = \
prepare_output_files(path_images, path_segmentations, path_posteriors, path_resampled, path_volumes, recompute)
# get label list
segmentation_labels, _ = utils.get_list_labels(label_list=segmentation_labels)
n_labels = len(segmentation_labels)
# get unique label values, and build correspondance table between contralateral structures if necessary
if (n_neutral_labels is not None) & flip:
n_sided_labels = int((n_labels - n_neutral_labels) / 2)
lr_corresp = np.stack([segmentation_labels[n_neutral_labels:n_neutral_labels + n_sided_labels],
segmentation_labels[n_neutral_labels + n_sided_labels:]])
segmentation_labels, indices = np.unique(segmentation_labels, return_index=True)
lr_corresp_unique, lr_corresp_indices = np.unique(lr_corresp[0, :], return_index=True)
lr_corresp_unique = np.stack([lr_corresp_unique, lr_corresp[1, lr_corresp_indices]])
lr_corresp_unique = lr_corresp_unique[:, 1:] if not np.all(lr_corresp_unique[:, 0]) else lr_corresp_unique
lr_indices = np.zeros_like(lr_corresp_unique)
for i in range(lr_corresp_unique.shape[0]):
for j, lab in enumerate(lr_corresp_unique[i]):
lr_indices[i, j] = np.where(segmentation_labels == lab)[0]
else:
segmentation_labels, indices = np.unique(segmentation_labels, return_index=True)
lr_indices = None
# prepare topology classes
if topology_classes is not None:
topology_classes = utils.load_array_if_path(topology_classes, load_as_numpy=True)[indices]
# prepare volume file if needed
if path_volumes is not None:
if segmentation_label_names is not None:
segmentation_label_names = utils.load_array_if_path(segmentation_label_names)[indices]
csv_header = [[''] + segmentation_label_names[1:].tolist()]
csv_header += [[''] + [str(lab) for lab in segmentation_labels[1:]]]
else:
csv_header = [['subjects'] + [str(lab) for lab in segmentation_labels[1:]]]
with open(path_volumes, 'w') as csvFile:
writer = csv.writer(csvFile)
writer.writerows(csv_header)
csvFile.close()
# build network
_, _, n_dims, n_channels, _, _ = utils.get_volume_info(path_images[0])
model_input_shape = [None] * n_dims + [n_channels]
net = build_model(path_model, model_input_shape, n_levels, len(segmentation_labels), conv_size,
nb_conv_per_level, unet_feat_count, feat_multiplier, activation, sigma_smoothing)
# perform segmentation
loop_info = utils.LoopInfo(len(path_images), 10, 'predicting', True)
for idx, (path_image, path_segmentation, path_posterior, path_resample, tmp_compute) in \
enumerate(zip(path_images, path_segmentations, path_posteriors, path_resampled, compute)):
# compute segmentation only if needed
if tmp_compute:
if verbose:
loop_info.update(idx)
# preprocessing
image, aff, h, im_res, _, _, shape, pad_shape, crop_idx, im_flipped = \
preprocess_image(path_image, n_levels, target_res, cropping, padding, flip, path_resample)
# prediction
prediction_patch = net.predict(image)
prediction_patch_flip = net.predict(im_flipped) if flip else None
# postprocessing
seg, posteriors = postprocess(prediction_patch, pad_shape, shape, crop_idx, n_dims, segmentation_labels,
lr_indices, keep_biggest_component, aff,
topology_classes=topology_classes, post_patch_flip=prediction_patch_flip)
# write results to disk
if path_segmentation is not None:
utils.save_volume(seg, aff, h, path_segmentation, dtype='int32')
if path_posterior is not None:
if n_channels > 1:
posteriors = utils.add_axis(posteriors, axis=[0, -1])
utils.save_volume(posteriors, aff, h, path_posterior, dtype='float32')
else:
if path_volumes is not None:
posteriors, _, _, _, _, _, im_res = utils.get_volume_info(path_posterior, True, aff_ref=np.eye(4))
else:
posteriors = im_res = None
# compute volumes
if path_volumes is not None:
volumes = np.sum(posteriors[..., 1:], axis=tuple(range(0, len(posteriors.shape) - 1)))
volumes = np.around(volumes * np.prod(im_res), 3)
row = [os.path.basename(path_image).replace('.nii.gz', '')] + [str(vol) for vol in volumes]
with open(path_volumes, 'a') as csvFile:
writer = csv.writer(csvFile)
writer.writerow(row)
csvFile.close()
# evaluate
if gt_folder is not None:
# find path where segmentations are saved evaluation folder, and get labels on which to evaluate
eval_folder = os.path.dirname(path_segmentations[0])
if evaluation_labels is None:
evaluation_labels = segmentation_labels
# set path of result arrays for surface distance if necessary
if compute_distances:
path_hausdorff = os.path.join(eval_folder, 'hausdorff.npy')
path_hausdorff_99 = os.path.join(eval_folder, 'hausdorff_99.npy')
path_hausdorff_95 = os.path.join(eval_folder, 'hausdorff_95.npy')
path_mean_distance = os.path.join(eval_folder, 'mean_distance.npy')
else:
path_hausdorff = path_hausdorff_99 = path_hausdorff_95 = path_mean_distance = None
# compute evaluation metrics
evaluate.evaluation(gt_folder,
eval_folder,
evaluation_labels,
mask_dir=mask_folder,
path_dice=os.path.join(eval_folder, 'dice.npy'),
path_hausdorff=path_hausdorff,
path_hausdorff_99=path_hausdorff_99,
path_hausdorff_95=path_hausdorff_95,
path_mean_distance=path_mean_distance,
list_incorrect_labels=list_incorrect_labels,
list_correct_labels=list_correct_labels,
recompute=recompute,
verbose=verbose)
def prepare_output_files(path_images, out_seg, out_posteriors, out_resampled, out_volumes, recompute):
'''
Prepare output files.
'''
# check inputs
assert path_images is not None, 'please specify an input file/folder (--i)'
assert out_seg is not None, 'please specify an output file/folder (--o)'
# convert path to absolute paths
path_images = os.path.abspath(path_images)
basename = os.path.basename(path_images)
out_seg = os.path.abspath(out_seg) if (out_seg is not None) else out_seg
out_posteriors = os.path.abspath(out_posteriors) if (out_posteriors is not None) else out_posteriors
out_resampled = os.path.abspath(out_resampled) if (out_resampled is not None) else out_resampled
out_volumes = os.path.abspath(out_volumes) if (out_volumes is not None) else out_volumes
# path_images is a folder
if ('.nii.gz' not in basename) & ('.nii' not in basename) & ('.mgz' not in basename) & ('.npz' not in basename):
if os.path.isfile(path_images):
raise Exception('Extension not supported for %s, only use: nii.gz, .nii, .mgz, or .npz' % path_images)
path_images = utils.list_images_in_folder(path_images)
if (out_seg[-7:] == '.nii.gz') | (out_seg[-4:] == '.nii') | (out_seg[-4:] == '.mgz') | (out_seg[-4:] == '.npz'):
raise Exception('Output folders cannot have extensions: .nii.gz, .nii, .mgz, or .npz, had %s' % out_seg)
utils.mkdir(out_seg)
out_seg = [os.path.join(out_seg, os.path.basename(image)).replace('.nii', '_synthseg.nii') for image in
path_images]
out_seg = [seg_path.replace('.mgz', '_synthseg.mgz') for seg_path in out_seg]
out_seg = [seg_path.replace('.npz', '_synthseg.npz') for seg_path in out_seg]
recompute_seg = [not os.path.isfile(path_seg) for path_seg in out_seg]
if out_posteriors is not None:
if (out_posteriors[-7:] == '.nii.gz') | (out_posteriors[-4:] == '.nii') | \
(out_posteriors[-4:] == '.mgz') | (out_posteriors[-4:] == '.npz'):
raise Exception('Output folders cannot have extensions: '
'.nii.gz, .nii, .mgz, or .npz, had %s' % out_posteriors)
utils.mkdir(out_posteriors)
out_posteriors = [os.path.join(out_posteriors, os.path.basename(image)).replace('.nii',
'_posteriors.nii') for image in path_images]
out_posteriors = [posteriors_path.replace('.mgz', '_posteriors.mgz') for posteriors_path in out_posteriors]
out_posteriors = [posteriors_path.replace('.npz', '_posteriors.npz') for posteriors_path in out_posteriors]
recompute_post = [not os.path.isfile(path_post) for path_post in out_posteriors]
else:
out_posteriors = [out_posteriors] * len(path_images)
recompute_post = [out_volumes is not None] * len(path_images)
if out_resampled is not None:
if (out_resampled[-7:] == '.nii.gz') | (out_resampled[-4:] == '.nii') | \
(out_resampled[-4:] == '.mgz') | (out_resampled[-4:] == '.npz'):
raise Exception('Output folders cannot have extensions: '
'.nii.gz, .nii, .mgz, or .npz, had %s' % out_resampled)
utils.mkdir(out_resampled)
out_resampled = [os.path.join(out_resampled, os.path.basename(image)).replace('.nii',
'_resampled.nii') for image in path_images]
out_resampled = [resampled_path.replace('.mgz', '_resampled.mgz') for resampled_path in out_resampled]
out_resampled = [resampled_path.replace('.npz', '_resampled.npz') for resampled_path in out_resampled]
recompute_resampled = [not os.path.isfile(path_post) for path_post in out_resampled]
else:
out_resampled = [out_resampled] * len(path_images)
recompute_resampled = [out_volumes is not None] * len(path_images)
# path_images is an image
else:
assert os.path.isfile(path_images), "file does not exist: %s \n" \
"please make sure the path and the extension are correct" % path_images
path_images = [path_images]
if ('.nii.gz' not in out_seg) & ('.nii' not in out_seg) & ('.mgz' not in out_seg) & ('.npz' not in out_seg):
utils.mkdir(out_seg)
filename = os.path.basename(path_images[0]).replace('.nii', '_synthseg.nii')
filename = filename.replace('.mgz', '_synthseg.mgz')
filename = filename.replace('.npz', '_synthseg.npz')
out_seg = os.path.join(out_seg, filename)
else:
utils.mkdir(os.path.dirname(out_seg))
out_seg = [out_seg]
recompute_seg = [not os.path.isfile(out_seg[0])]
if out_posteriors is not None:
if ('.nii.gz' not in out_posteriors) & ('.nii' not in out_posteriors) &\
('.mgz' not in out_posteriors) & ('.npz' not in out_posteriors):
utils.mkdir(out_posteriors)
filename = os.path.basename(path_images[0]).replace('.nii', '_posteriors.nii')
filename = filename.replace('.mgz', '_posteriors.mgz')
filename = filename.replace('.npz', '_posteriors.npz')
out_posteriors = os.path.join(out_posteriors, filename)
else:
utils.mkdir(os.path.dirname(out_posteriors))
recompute_post = [not os.path.isfile(out_posteriors[0])]
else:
recompute_post = [out_volumes is not None]
out_posteriors = [out_posteriors]
if out_resampled is not None:
if ('.nii.gz' not in out_resampled) & ('.nii' not in out_resampled) &\
('.mgz' not in out_resampled) & ('.npz' not in out_resampled):
utils.mkdir(out_resampled)
filename = os.path.basename(path_images[0]).replace('.nii', '_resampled.nii')
filename = filename.replace('.mgz', '_resampled.mgz')
filename = filename.replace('.npz', '_resampled.npz')
out_resampled = os.path.join(out_resampled, filename)
else:
utils.mkdir(os.path.dirname(out_resampled))
recompute_resampled = [not os.path.isfile(out_resampled[0])]
else:
recompute_resampled = [out_volumes is not None]
out_resampled = [out_resampled]
recompute_list = [recompute | re_seg | re_post | re_res
for (re_seg, re_post, re_res) in zip(recompute_seg, recompute_post, recompute_resampled)]
if out_volumes is not None:
if out_volumes[-4:] != '.csv':
print('Path for volume outputs provided without csv extension. Adding csv extension.')
out_volumes += '.csv'
utils.mkdir(os.path.dirname(out_volumes))
return path_images, out_seg, out_posteriors, out_resampled, out_volumes, recompute_list
def preprocess_image(im_path, n_levels, target_res, crop=None, padding=None, flip=False, path_resample=None):
# read image and corresponding info
im, _, aff, n_dims, n_channels, header, im_res = utils.get_volume_info(im_path, True)
# resample image if necessary
if target_res is not None:
target_res = np.squeeze(utils.reformat_to_n_channels_array(target_res, n_dims))
if np.any((im_res > target_res + 0.05) | (im_res < target_res - 0.05)):
im_res = target_res
im, aff = edit_volumes.resample_volume(im, aff, im_res)
if path_resample is not None:
utils.save_volume(im, aff, header, path_resample)
# align image
im = edit_volumes.align_volume_to_ref(im, aff, aff_ref=np.eye(4), n_dims=n_dims)
shape = list(im.shape)
# pad image if specified
if padding:
im = edit_volumes.pad_volume(im, padding_shape=padding)
pad_shape = im.shape[:n_dims]
else:
pad_shape = shape
# check that patch_shape or im_shape are divisible by 2**n_levels
if crop is not None:
crop = utils.reformat_to_list(crop, length=n_dims, dtype='int')
if not all([pad_shape[i] >= crop[i] for i in range(len(pad_shape))]):
crop = [min(pad_shape[i], crop[i]) for i in range(n_dims)]
if not all([size % (2**n_levels) == 0 for size in crop]):
crop = [utils.find_closest_number_divisible_by_m(size, 2 ** n_levels) for size in crop]
else:
if not all([size % (2**n_levels) == 0 for size in pad_shape]):
crop = [utils.find_closest_number_divisible_by_m(size, 2 ** n_levels) for size in pad_shape]
# crop image if necessary
if crop is not None:
im, crop_idx = edit_volumes.crop_volume(im, cropping_shape=crop, return_crop_idx=True)
else:
crop_idx = None
# normalise image
if n_channels == 1:
im = edit_volumes.rescale_volume(im, new_min=0., new_max=1., min_percentile=0.5, max_percentile=99.5)
else:
for i in range(im.shape[-1]):
im[..., i] = edit_volumes.rescale_volume(im[..., i], new_min=0., new_max=1.,
min_percentile=0.5, max_percentile=99.5)
# flip image along right/left axis
if flip & (n_dims > 2):
im_flipped = edit_volumes.flip_volume(im, direction='rl', aff=np.eye(4))
im_flipped = utils.add_axis(im_flipped) if n_channels > 1 else utils.add_axis(im_flipped, axis=[0, -1])
else:
im_flipped = None
# add batch and channel axes
im = utils.add_axis(im) if n_channels > 1 else utils.add_axis(im, axis=[0, -1])
return im, aff, header, im_res, n_channels, n_dims, shape, pad_shape, crop_idx, im_flipped
def build_model(model_file, input_shape, n_levels, n_lab, conv_size, nb_conv_per_level, unet_feat_count,
feat_multiplier, activation, sigma_smoothing):
assert os.path.isfile(model_file), "The provided model path does not exist."
# build UNet
net = nrn_models.unet(nb_features=unet_feat_count,
input_shape=input_shape,
nb_levels=n_levels,
conv_size=conv_size,
nb_labels=n_lab,
feat_mult=feat_multiplier,
activation=activation,
nb_conv_per_level=nb_conv_per_level,
batch_norm=-1)
net.load_weights(model_file, by_name=True)
# smooth posteriors if specified
if sigma_smoothing > 0:
last_tensor = net.output
last_tensor._keras_shape = tuple(last_tensor.get_shape().as_list())
last_tensor = layers.GaussianBlur(sigma=sigma_smoothing)(last_tensor)
net = Model(inputs=net.inputs, outputs=last_tensor)
return net
def postprocess(post_patch, pad_shape, im_shape, crop, n_dims, segmentation_labels, lr_indices,
keep_biggest_component, aff, topology_classes=True, post_patch_flip=None):
# get posteriors and segmentation
post_patch = np.squeeze(post_patch)
if post_patch_flip is not None:
post_patch_flip = edit_volumes.flip_volume(np.squeeze(post_patch_flip), direction='rl', aff=np.eye(4))
if lr_indices is not None:
post_patch_flip[..., lr_indices.flatten()] = post_patch_flip[..., lr_indices[::-1].flatten()]
post_patch = 0.5 * (post_patch + post_patch_flip)
# keep biggest connected component (use it with smoothing!)
if keep_biggest_component:
tmp_post_patch = post_patch[..., 1:]
post_patch_mask = np.sum(tmp_post_patch, axis=-1) > 0.25
post_patch_mask = edit_volumes.get_largest_connected_component(post_patch_mask)
post_patch_mask = np.stack([post_patch_mask]*tmp_post_patch.shape[-1], axis=-1)
tmp_post_patch = edit_volumes.mask_volume(tmp_post_patch, mask=post_patch_mask)
post_patch[..., 1:] = tmp_post_patch
# reset posteriors to zero outside the largest connected component of each topological class
if topology_classes is not None:
post_patch_mask = post_patch > 0.25
for topology_class in np.unique(topology_classes)[1:]:
tmp_topology_indices = np.where(topology_classes == topology_class)[0]
tmp_mask = np.any(post_patch_mask[..., tmp_topology_indices], axis=-1)
tmp_mask = edit_volumes.get_largest_connected_component(tmp_mask)
for idx in tmp_topology_indices:
post_patch[..., idx] *= tmp_mask
# renormalise posteriors and get hard segmentation
if (post_patch_flip is not None) | keep_biggest_component | (topology_classes is not None):
post_patch /= np.sum(post_patch, axis=-1)[..., np.newaxis]
seg_patch = post_patch.argmax(-1)
# paste patches back to matrix of original image size
if crop is not None:
seg = np.zeros(shape=pad_shape, dtype='int32')
posteriors = np.zeros(shape=[*pad_shape, segmentation_labels.shape[0]])
posteriors[..., 0] = np.ones(pad_shape) # place background around patch
if n_dims == 2:
seg[crop[0]:crop[2], crop[1]:crop[3]] = seg_patch
posteriors[crop[0]:crop[2], crop[1]:crop[3], :] = post_patch
elif n_dims == 3:
seg[crop[0]:crop[3], crop[1]:crop[4], crop[2]:crop[5]] = seg_patch
posteriors[crop[0]:crop[3], crop[1]:crop[4], crop[2]:crop[5], :] = post_patch
else:
seg = seg_patch
posteriors = post_patch
seg = segmentation_labels[seg.astype('int')].astype('int')
if im_shape != pad_shape:
bounds = [int((p-i)/2) for (p, i) in zip(pad_shape, im_shape)]
bounds += [p + i for (p, i) in zip(bounds, im_shape)]
seg = edit_volumes.crop_volume_with_idx(seg, bounds)
posteriors = edit_volumes.crop_volume_with_idx(posteriors, bounds, n_dims=n_dims)
# align prediction back to first orientation
if n_dims > 2:
seg = edit_volumes.align_volume_to_ref(seg, aff=np.eye(4), aff_ref=aff, n_dims=n_dims, return_aff=False)
posteriors = edit_volumes.align_volume_to_ref(posteriors, aff=np.eye(4), aff_ref=aff, n_dims=n_dims)
return seg, posteriors
|
{"hexsha": "bc8adc822e5b8227d6952e674d42df447e4fd832", "size": 28973, "ext": "py", "lang": "Python", "max_stars_repo_path": "SynthSeg/predict.py", "max_stars_repo_name": "a-parida12/SynthSeg", "max_stars_repo_head_hexsha": "fc37820826f13e39603e96e532bdbdd409b51774", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-11-23T11:57:53.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-08T10:22:13.000Z", "max_issues_repo_path": "SynthSeg/predict.py", "max_issues_repo_name": "a-parida12/SynthSeg", "max_issues_repo_head_hexsha": "fc37820826f13e39603e96e532bdbdd409b51774", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "SynthSeg/predict.py", "max_forks_repo_name": "a-parida12/SynthSeg", "max_forks_repo_head_hexsha": "fc37820826f13e39603e96e532bdbdd409b51774", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-11-09T17:09:55.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-09T17:09:55.000Z", "avg_line_length": 54.8731060606, "max_line_length": 120, "alphanum_fraction": 0.6635488213, "include": true, "reason": "import numpy", "num_tokens": 6830}
|
\documentclass[11 pt]{scrartcl}
\usepackage[header, margin, koma]{tyler}
\newcommand{\hwtitle}{Discussion 4B Recap}
\pagestyle{fancy}
\fancyhf{}
\fancyhead[l]{\hwtitle{}}
\fancyhead[r]{Tyler Zhu}
\cfoot{\thepage}
\begin{document}
\title{\Large \hwtitle{}}
\author{\large Tyler Zhu}
\date{\large\today}
\maketitle
\section{Polynomials}
\itemnum
\ii There are two ways to uniquely determine a degree $d$ polynomial:
\itemnum
\ii using $d+1$ coefficients
\ii using $d+1$ points
\itemend
\ii A nonzero degree $d$ polynomial has at most $d$ roots
\ii Working over $\text{GF}(p)$ (the Galois Field of size $p$) is equivalent to working over mod $p$ (i.e. all coefficients and variables are interpreted this way).
\ii Polynomial Division Algorithm: given polynomials $p(x), q(x)$, we can write $p(x) = q(x)h(x) + r(x)$, where $\deg r < \deg q$ (compare to integer division algorithm).
\ii \# of polynomials of degree $\leq d$ over GF$(p)$ passing through $d+1-k$ points is $p^k$
\itemend
\section{Secret Sharing}
\itemnum
\ii We want to share a secret that needs consensus of at least $k$ people to reveal.
\ii Scheme is to create a degree $k-1$ polynomial with $P(0)$ = secret and give everyone a different pair $(i, P(i))$
\ii One variation is if we need different amount of types of people to agree (a secretary general and 55 people or just 164 people). Simply give different people more shares, i.e. weighting scheme.
\ii Another variation is if different committees need to agree to reveal the secret. Create polynomials for each committee and distribute shares to their members. Only when all members agree will their committee secret be revealed; need all committee secretes to reveal the overall secrets.
\ii Can't use the first scheme in the second case as different people in different committees can then collude.
\itemend
\end{document}
|
{"hexsha": "21dcab41f768a32c3057ff0a1958bbcbb4a66b61", "size": 1925, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "CS70/recap4b/recap4b.tex", "max_stars_repo_name": "cbugwadia32/course-notes", "max_stars_repo_head_hexsha": "cc269a2606bab22a5c9b8f1af23f360fa291c583", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2021-07-20T19:22:41.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-07T01:19:16.000Z", "max_issues_repo_path": "CS70/recap4b/recap4b.tex", "max_issues_repo_name": "cbugwadia32/course-notes", "max_issues_repo_head_hexsha": "cc269a2606bab22a5c9b8f1af23f360fa291c583", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "CS70/recap4b/recap4b.tex", "max_forks_repo_name": "cbugwadia32/course-notes", "max_forks_repo_head_hexsha": "cc269a2606bab22a5c9b8f1af23f360fa291c583", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-10-13T08:41:17.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-07T17:21:17.000Z", "avg_line_length": 45.8333333333, "max_line_length": 295, "alphanum_fraction": 0.7163636364, "num_tokens": 529}
|
import numpy as np
from scipy import signal
import matplotlib.pyplot as plt
from scipy.signal import butter, lfilter
#from control import matlab
def decimate(data,fs_befor,fs_after):
from scipy.signal import decimate
if fs_after<=8:
data_ = decimate(data,int(fs_befor/8),ftype='iir')
data_ = decimate(data_,int(8/fs_after),ftype='iir')
else:
data_ = decimate(data,int(fs_befor/fs_after),ftype='iir')
return data_
def butter_bandpass_filter(data, lowcut, highcut, fs, order=4):
nyq = 0.5 * fs
low = lowcut/nyq
high = highcut/nyq
b, a = butter(order, [low, high], btype='band')
y = lfilter(b, a, data)
return y
def bandpass(data, lowcut, highcut, fs, order=4):
nyq = 0.5 * fs
if highcut==None:
b, a = butter(order, lowcut/nyq, btype='low')
elif lowcut==None:
b, a = butter(order, highcut/nyq, btype='high')
else:
b, a = butter(order, [lowcut/nyq, highcut/nyq], btype='band')
y = lfilter(b, a, data)
return y,b,a
def filt_iirpeak(dic,fs,f0,Q,plot=False):
w0 = f0/(fs/2)
num, den = signal.iirpeak(w0, Q)
data = { key:signal.lfilter(num,den,dic[key]) for key in dic.keys()}
if plot == True:
w, h = signal.freqz(num, den,worN=10000)
freq = w*fs/(2*np.pi)
fig, ax = plt.subplots(2, 1, figsize=(8, 6))
ax[0].semilogx(freq, 20*np.log10(abs(h)), color='blue')
ax[0].set_title("Frequency Response")
ax[0].set_ylabel("Amplitude (dB)", color='blue')
#ax[0].set_xlim([0, 100])
#ax[0].set_ylim([-50, 10])
ax[0].grid()
ax[1].semilogx(freq, np.unwrap(np.angle(h))*180/np.pi, color='green')
ax[1].set_ylabel("Angle (degrees)", color='green')
ax[1].set_xlabel("Frequency (Hz)")
#ax[1].set_xlim([0, 100])
#ax[1].set_yticks([-90, -60, -30, 0, 30, 60, 90])
#ax[1].set_ylim([-90, 90])
ax[1].grid()
plt.savefig('hoge.png')
plt.close()
return data
def filt_butterBandPass(dic,fs,lowcut,highcut,order,plot=False):
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
num, den = butter(order, [low, high], btype='band')
data = { key:signal.lfilter(num,den,dic[key]) for key in dic.keys()}
if plot == True:
w, h = signal.freqz(num, den,worN=1000)
freq = w*fs/(2*np.pi)
fig, ax = plt.subplots(2, 1, figsize=(8, 6))
ax[0].semilogx(freq, 20*np.log10(abs(h)), color='blue')
ax[0].set_title("Frequency Response")
ax[0].set_ylabel("Amplitude (dB)", color='blue')
#ax[0].set_xlim([0, 100])
#ax[0].set_ylim([-50, 10])
ax[0].grid()
ax[1].semilogx(freq, np.unwrap(np.angle(h))*180/np.pi, color='green')
ax[1].set_ylabel("Angle (degrees)", color='green')
ax[1].set_xlabel("Frequency (Hz)")
#ax[1].set_xlim([0, 100])
#ax[1].set_yticks([-90, -60, -30, 0, 30, 60, 90])
#ax[1].set_ylim([-90, 90])
ax[1].grid()
plt.savefig('hoge.png')
plt.close()
return data
|
{"hexsha": "a795afb74e37c52933d63e6be1aca47668ca406c", "size": 3101, "ext": "py", "lang": "Python", "max_stars_repo_path": "miyopy/signal/filt.py", "max_stars_repo_name": "MiyoKouseki/miyopy", "max_stars_repo_head_hexsha": "0f2da1a99f656259b556a9aac892483b44d17112", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-10T03:24:09.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-10T03:24:09.000Z", "max_issues_repo_path": "miyopy/signal/filt.py", "max_issues_repo_name": "MiyoKouseki/miyopy", "max_issues_repo_head_hexsha": "0f2da1a99f656259b556a9aac892483b44d17112", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2019-09-02T22:42:20.000Z", "max_issues_repo_issues_event_max_datetime": "2020-04-27T06:28:35.000Z", "max_forks_repo_path": "miyopy/signal/filt.py", "max_forks_repo_name": "MiyoKouseki/miyopy", "max_forks_repo_head_hexsha": "0f2da1a99f656259b556a9aac892483b44d17112", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-03-26T09:33:16.000Z", "max_forks_repo_forks_event_max_datetime": "2019-03-26T09:33:16.000Z", "avg_line_length": 34.8426966292, "max_line_length": 81, "alphanum_fraction": 0.5701386649, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1034}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 13 10:06:46 2019
@author: mikael
"""
import struct
import math as m
import numpy as np
def to_float80(x):
sign = 0
exponent = 0
if (x < 0):
sign = 0x8000
x = -x
mantisse_h = 0
mantisse_l = 0
mantisse = 0
if (x != 0):
(mantisse, exponent) = m.frexp(x)
if ((exponent > 16384) | (mantisse >= 1)):
exponent = 0x7fff
mantisse = 0
else:
exponent += 16382
if (exponent < 0):
mantisse = m.ldexp(mantisse, exponent)
exponent = 0
sign_exponent = sign | exponent
mantisse = m.ldexp(mantisse, 32)
round_mantisse = m.floor(mantisse)
mantisse_h = round_mantisse
mantisse = m.ldexp(mantisse - round_mantisse, 32)
round_mantisse = m.floor(mantisse)
mantisse_l = round_mantisse
y = struct.pack('>HLL', sign_exponent, mantisse_h, mantisse_l)
return y
class AIFFWriter:
@property
def channels(self):
return self._channels
@property
def sample_rate(self):
return self._sample_rate
@property
def bits_per_sample(self):
return self._bits_per_sample
@property
def frames(self):
return self._frames
@property
def data(self):
return self._data
def __init__(self, filename, data, bits_per_sample = 16, sample_rate = 44100.0):
# data can be a list of channels, or a numpy array of shape (channels, number of samples)
data = np.array(data)
if len(data.shape) == 1: # Vector
data = np.reshape(data, (1, data.shape[0]))
data = data.T
bits_per_sample = int(bits_per_sample)
data_type = 2**(np.ceil(np.log2(bits_per_sample/8)) + 3)
data_type = int(data_type)
padding = data_type - bits_per_sample
self._writer = open(filename, "wb")
self._channels = 1 if len(data.shape) == 1 else data.shape[1]
self._frames = data.shape[0]
self._sample_rate = sample_rate
self._bits_per_sample = bits_per_sample
# Cast
data = np.floor(data).astype('int%d' % data_type)
self._data = data.flatten() * (2**padding)
self._write()
def _write(self):
bits_per_sample = self.bits_per_sample
samples = self.frames * self.channels
data = self.data
data_type = 2**(np.ceil(np.log2(bits_per_sample/8)) + 3)
data_type = int(data_type)
# FORM chunk is 12 bytes, COMM chunk is 26 bytes, SSND chunk is data size in bytes + 16
ssnd_size = np.ceil(samples * data_type/8) + 16
ssnd_size = int(ssnd_size)
form_size = 12
comm_size = 26
file_size = form_size + comm_size + ssnd_size
if data_type == 8:
fmt = '>%db'
elif data_type == 16:
fmt = '>%dh'
elif data_type == 32:
fmt = '>%di'
else:
fmt = '>%dq'
with self._writer as file:
# Write FORM chunk
file.write('FORM'.encode('ascii'))
file.write(struct.pack('>I', file_size - 8))
file.write('AIFF'.encode('ascii'))
# Write COMM chunk
file.write('COMM'.encode('ascii'))
file.write(struct.pack('>I', comm_size - 8))
file.write(struct.pack('>H', self.channels))
file.write(struct.pack('>I', self.frames))
file.write(struct.pack('>H', self.bits_per_sample))
file.write(to_float80(self.sample_rate))
# Write SSND chunk
file.write('SSND'.encode('ascii'))
file.write(struct.pack('>I', ssnd_size - 8))
file.write(struct.pack('>I', 0)) # Offset
file.write(struct.pack('>I', 0)) # Block size
file.write(struct.pack(fmt % samples, *data)) # Data
file.close()
|
{"hexsha": "e27cfd31e4b793e938f192855bc80c8494f22891", "size": 4017, "ext": "py", "lang": "Python", "max_stars_repo_path": "build/lib/audiofeel/io/aiff/writer.py", "max_stars_repo_name": "mrAgan/audiofeel-player", "max_stars_repo_head_hexsha": "39a1ea63808754a61590a72373b04a53a2331fbb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-09-15T10:45:03.000Z", "max_stars_repo_stars_event_max_datetime": "2019-09-15T10:45:03.000Z", "max_issues_repo_path": "build/lib/audiofeel/io/aiff/writer.py", "max_issues_repo_name": "mrAgan/audiofeel-player", "max_issues_repo_head_hexsha": "39a1ea63808754a61590a72373b04a53a2331fbb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "build/lib/audiofeel/io/aiff/writer.py", "max_forks_repo_name": "mrAgan/audiofeel-player", "max_forks_repo_head_hexsha": "39a1ea63808754a61590a72373b04a53a2331fbb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.1086956522, "max_line_length": 97, "alphanum_fraction": 0.5548917102, "include": true, "reason": "import numpy", "num_tokens": 1059}
|
#!/usr/bin/env python
import rospy
from sensor_msgs.msg import Image
import numpy as np
import sys
sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
import cv2
sys.path.append('/opt/ros/kinetic/lib/python2.7/dist-packages')
import rospy
from std_msgs.msg import String
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
from os import listdir
from os.path import isfile, join
from sensor_msgs.msg import PointCloud2
import std_msgs.msg
import sensor_msgs.point_cloud2 as pcl2
import cv2
# basedir='/home/vm/catkin_build/src/sensor_fusion/Dataset/2011_09_26/2011_09_26_drive_0009_sync/image_02/data'
basedir='/home/vm/catkin_mrinal/src/darknet_ros/darknet_ros/Dataset/2011_09_26/2011_09_26_drive_0009_sync/image_02/data_'
if __name__ == '__main__':
'''Sample code to publish a pcl2 with python'''
rospy.init_node('image_publish')
image_pub = rospy.Publisher("/camera_images", Image)
# loop_rate=rospy.Rate(10)
# rospy.loginfo("Initializing sample pcl2 publisher node...")
#give time to roscore to make the connections
# rospy.sleep(1.)
bridge = CvBridge()
for f in sorted(listdir(basedir)) :
#for i in range (1000):
f_path=basedir+'/'+f
# print(f_path)
cv_image=cv2.imread(f_path)
print(cv_image.shape)
try:
img_msg=bridge.cv2_to_imgmsg(cv_image,"bgr8")
except CvBridgeError as e:
print('here\n')
img_msg.header.stamp=rospy.Time.now()
img_msg.header.frame_id='velodyne'
image_pub.publish(img_msg)
print('Image is published!!!!')
# loop_rate.sleep()
# rospy.sleep(0.01)
|
{"hexsha": "44c7b385d308c607992de0178ce33c2dd21de11a", "size": 1704, "ext": "py", "lang": "Python", "max_stars_repo_path": "darknet_ros/scripts/publishKittiImage.py", "max_stars_repo_name": "mrinalsenapati04/darknet_ros", "max_stars_repo_head_hexsha": "238c5dce25af9c607e73a59aa588c73343b99739", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "darknet_ros/scripts/publishKittiImage.py", "max_issues_repo_name": "mrinalsenapati04/darknet_ros", "max_issues_repo_head_hexsha": "238c5dce25af9c607e73a59aa588c73343b99739", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "darknet_ros/scripts/publishKittiImage.py", "max_forks_repo_name": "mrinalsenapati04/darknet_ros", "max_forks_repo_head_hexsha": "238c5dce25af9c607e73a59aa588c73343b99739", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.8813559322, "max_line_length": 121, "alphanum_fraction": 0.7018779343, "include": true, "reason": "import numpy", "num_tokens": 454}
|
# -*- coding: utf-8 -*-
import os
import time
import numpy as np
import pandas as pd
import scanpy as sc
import scipy.sparse as ssp
from cospar.tmap import _tmap_core as tmap_core
from cospar.tmap import _utils as tmap_util
from .. import help_functions as hf
from .. import logging as logg
from .. import settings
from .. import tool as tl
# v1 version, allows to set later time point
def infer_Tmap_from_multitime_clones(
adata_orig,
clonal_time_points=None,
later_time_point=None,
smooth_array=[15, 10, 5],
CoSpar_KNN=20,
sparsity_threshold=0.1,
intraclone_threshold=0.05,
normalization_mode=1,
extend_Tmap_space=False,
save_subset=True,
use_full_Smatrix=True,
trunca_threshold=[0.001, 0.01],
compute_new=False,
max_iter_N=5,
epsilon_converge=0.05,
):
"""
Infer transition map for clonal data with multiple time points.
It prepares adata object for cells of targeted time points by
:func:`cospar.tmap._utils.select_time_points`, generates the similarity matrix
via :func:`cospar.tmap._utils.generate_similarity_matrix`, and iteratively calls
the core function :func:`.refine_Tmap_through_cospar` to update
the transition map.
* If `later_time_point=None`, the inferred map allows transitions
between neighboring time points. For example, if
clonal_time_points=['day1','day2','day3'], then it computes transitions
for pairs (day1, day2) and (day2, day3), but not (day1, day3).
* If `later_time_point` is specified, the function produces a map
between earlier time points and this later time point. For example, if
`later_time_point='day3`, the map allows transitions for pairs (day1, day3)
and (day2, day3), but not (day1,day2).
Parameters
------------
adata_orig: :class:`~anndata.AnnData` object
Should be prepared from our anadata initialization.
clonal_time_points: `list` of `str`, optional (default: all time points)
List of time points to be included for analysis.
We assume that each selected time point has clonal measurements.
later_time_points: `list`, optional (default: None)
If specified, the function will produce a map T between these early
time points among `clonal_time_points` and the `later_time_point`.
If not specified, it produces a map T between neighboring clonal time points.
smooth_array: `list`, optional (default: [15,10,5])
List of smooth rounds at initial runs of iteration.
Suppose that it has a length N. For iteration n<N, the n-th entry of
smooth_array determines the kernel exponent to build the S matrix at the n-th
iteration. When n>N, we use the last entry of smooth_array to compute
the S matrix. We recommend starting with more smoothing depth and gradually
reduce the depth, as inspired by simulated annealing. Data with higher
clonal dispersion should start with higher smoothing depth. The final depth should
depend on the manifold itself. For fewer cells, it results in a small KNN graph,
and a small final depth should be used. We recommend to use a number at
the multiple of 5 for computational efficiency i.e.,
smooth_array=[20, 15, 10, 5], or [20,15,10]
max_iter_N: `int`, optional (default: 5)
The maximum iterations used to compute the transition map, regardless of epsilon_converge.
epsilon_converge: `float`, optional (default: 0.05)
The convergence threshold for the change of map correlations between consecutive iterations.
This convergence test is activated only when CoSpar has iterated for 3 times.
CoSpar_KNN: `int`, optional (default: 20)
The number of neighbors for KNN graph used for computing the
similarity matrix.
trunca_threshold: `list`, optional (default: [0.001,0.01])
Threshold to reset entries of a matrix to zero. The first entry is for
Similarity matrix; the second entry is for the Tmap.
This is only for computational and storage efficiency.
sparsity_threshold: `float`, optional (default: 0.1)
The relative threshold to remove noises in the updated transition map,
in the range [0,1].
intraclone_threshold: `float`, optional (default: 0.05)
The threshold to remove noises in the demultiplexed (un-smoothed) map,
in the range [0,1]
normalization_mode: `int`, optional (default: 1)
Normalization method. Choice: [0,1].
0, single-cell normalization; 1, Clone normalization. The clonal
normalization suppresses the contribution of large
clones, and is much more robust.
extend_Tmap_space: `bool` optional (default: `False`)
If true, the initial states for Tmap will include all states at initial time points,
and the later states for Tmap will include all states at later time points.
Otherwise, the initial and later state space of the Tmap will be
restricted to cells with multi-time clonal information
alone. The latter case speeds up the computation, which is recommended.
This option is ignored when `later_time_points` is not None.
save_subset: `bool`, optional (default: True)
If true, save only Smatrix at smooth round [5,10,15,...];
Otherwise, save Smatrix at each round.
use_full_Smatrix: `bool`, optional (default: True)
If true, extract the relevant Smatrix from the full Smatrix defined by all cells.
This tends to be more accurate. The package is optimized around this choice.
Compute_new: `bool`, optional (default: False)
If True, compute Smatrix from scratch, whether it was
computed and saved before or not. This is activated only when
`use_full_Smatrix=False`.
Returns
-------
adata: :class:`~anndata.AnnData` object
Store results at adata.uns['transition_map']
and adata.uns['intraclone_transition_map']. This adata is different
from the input adata_orig due to subsampling cells.
"""
t0 = time.time()
hf.check_available_clonal_info(adata_orig)
clonal_time_points_0 = np.array(adata_orig.uns["clonal_time_points"])
if len(clonal_time_points_0) < 2:
raise ValueError("There are no multi-time clones. Abort the inference.")
if clonal_time_points is None:
clonal_time_points = clonal_time_points_0
if type(later_time_point) == list:
later_time_point = later_time_point[0]
if later_time_point is not None:
clonal_time_points = list(clonal_time_points) + [later_time_point]
clonal_time_points = list(set(clonal_time_points))
hf.check_input_parameters(
adata_orig,
later_time_point=later_time_point,
clonal_time_points=clonal_time_points,
smooth_array=smooth_array,
save_subset=save_subset,
)
# order the clonal time points
time_ordering = adata_orig.uns["time_ordering"]
sel_idx_temp = np.in1d(time_ordering, clonal_time_points)
clonal_time_points = time_ordering[sel_idx_temp]
logg.info("------Compute the full Similarity matrix if necessary------")
data_path = settings.data_path
if (
use_full_Smatrix
): # prepare the similarity matrix with all state info, all subsequent similarity will be down-sampled from this one.
temp_str = "0" + str(trunca_threshold[0])[2:]
round_of_smooth = np.max(smooth_array)
data_des = adata_orig.uns["data_des"][0]
similarity_file_name = os.path.join(
data_path,
f"{data_des}_Similarity_matrix_with_all_cell_states_kNN{CoSpar_KNN}_Truncate{temp_str}",
)
if not (
os.path.exists(similarity_file_name + f"_SM{round_of_smooth}.npz")
and (not compute_new)
):
similarity_matrix_full = tmap_util.generate_similarity_matrix(
adata_orig,
similarity_file_name,
round_of_smooth=round_of_smooth,
neighbor_N=CoSpar_KNN,
truncation_threshold=trunca_threshold[0],
save_subset=save_subset,
compute_new_Smatrix=compute_new,
)
# compute transition map between neighboring time points
if later_time_point is None:
logg.info("----Infer transition map between neighboring time points-----")
logg.info("Step 1: Select time points")
adata = tmap_util.select_time_points(
adata_orig,
time_point=clonal_time_points,
extend_Tmap_space=extend_Tmap_space,
)
logg.info("Step 2: Optimize the transition map recursively")
tmap_core.infer_Tmap_from_multitime_clones_private(
adata,
smooth_array=smooth_array,
neighbor_N=CoSpar_KNN,
sparsity_threshold=sparsity_threshold,
intraclone_threshold=intraclone_threshold,
normalization_mode=normalization_mode,
save_subset=save_subset,
use_full_Smatrix=use_full_Smatrix,
trunca_threshold=trunca_threshold,
compute_new_Smatrix=compute_new,
max_iter_N=max_iter_N,
epsilon_converge=epsilon_converge,
)
if "Smatrix" in adata.uns.keys():
adata.uns.pop("Smatrix")
logg.info(f"-----------Total used time: {time.time()-t0} s ------------")
return adata
else:
# compute transition map between initial time points and the later time point
sel_id = np.nonzero(np.in1d(clonal_time_points, later_time_point))[0][0]
initial_time_points = clonal_time_points[:sel_id]
time_info_orig = np.array(adata_orig.obs["time_info"])
sp_idx = np.zeros(adata_orig.shape[0], dtype=bool)
all_time_points = list(initial_time_points) + [later_time_point]
label = "t"
for xx in all_time_points:
id_array = np.nonzero(time_info_orig == xx)[0]
sp_idx[id_array] = True
label = label + "*" + str(xx)
adata = adata_orig[sp_idx]
data_des_orig = adata_orig.uns["data_des"][0]
data_des_0 = adata_orig.uns["data_des"][-1]
data_des = (
data_des_0
+ f"_MultiTimeClone_Later_FullSpace{int(extend_Tmap_space)}_{label}"
)
adata.uns["data_des"] = [data_des_orig, data_des]
time_info = np.array(adata.obs["time_info"])
time_index_t2 = time_info == later_time_point
time_index_t1 = ~time_index_t2
#### used for similarity matrix generation
Tmap_cell_id_t1 = np.nonzero(time_index_t1)[0]
Tmap_cell_id_t2 = np.nonzero(time_index_t2)[0]
adata.uns["Tmap_cell_id_t1"] = Tmap_cell_id_t1
adata.uns["Tmap_cell_id_t2"] = Tmap_cell_id_t2
adata.uns["clonal_cell_id_t1"] = Tmap_cell_id_t1
adata.uns["clonal_cell_id_t2"] = Tmap_cell_id_t2
adata.uns["sp_idx"] = sp_idx
data_path = settings.data_path
transition_map = np.zeros((len(Tmap_cell_id_t1), len(Tmap_cell_id_t2)))
intraclone_transition_map = np.zeros(
(len(Tmap_cell_id_t1), len(Tmap_cell_id_t2))
)
logg.info(
"------Infer transition map between initial time points and the later time one------"
)
for yy in initial_time_points:
logg.info(f"--------Current initial time point: {yy}--------")
logg.info("Step 1: Select time points")
adata_temp = tmap_util.select_time_points(
adata_orig, time_point=[yy, later_time_point], extend_Tmap_space=True
) # for this to work, we need to set extend_Tmap_space=True, otherwise for different initial time points, the later Tmap_cell_id_t2 may be different
logg.info("Step 2: Optimize the transition map recursively")
tmap_core.infer_Tmap_from_multitime_clones_private(
adata_temp,
smooth_array=smooth_array,
neighbor_N=CoSpar_KNN,
sparsity_threshold=sparsity_threshold,
intraclone_threshold=intraclone_threshold,
normalization_mode=normalization_mode,
save_subset=save_subset,
use_full_Smatrix=use_full_Smatrix,
trunca_threshold=trunca_threshold,
compute_new_Smatrix=compute_new,
max_iter_N=max_iter_N,
epsilon_converge=epsilon_converge,
)
temp_id_t1 = np.nonzero(time_info == yy)[0]
sp_id_t1 = hf.converting_id_from_fullSpace_to_subSpace(
temp_id_t1, Tmap_cell_id_t1
)[0]
transition_map[sp_id_t1, :] = adata_temp.uns["transition_map"].A
intraclone_transition_map[sp_id_t1, :] = adata_temp.uns[
"intraclone_transition_map"
].A
if "Smatrix" in adata_temp.uns.keys():
adata_temp.uns.pop("Smatrix")
adata.uns["transition_map"] = ssp.csr_matrix(transition_map)
adata.uns["intraclone_transition_map"] = ssp.csr_matrix(
intraclone_transition_map
)
logg.info(f"-----------Total used time: {time.time()-t0} s ------------")
return adata
def infer_intraclone_Tmap(adata, intraclone_threshold=0.05, normalization_mode=1):
"""
Infer intra-clone transition map.
Parameters
----------
adata: :class:`~anndata.AnnData` object
Should be prepared by :func:`cospar.tmap._utils.select_time_points`
intraclone_threshold: `float`, optional (default: 0.05)
The threshold to remove noises in the demultiplexed (un-smoothed) map,
in the range [0,1]
normalization_mode: `int`, optional (default: 1)
Normalization method. Choice: [0,1].
0, single-cell normalization; 1, Clone normalization. The clonal
normalization suppresses the contribution of large
clones, and is much more robust.
Returns
-------
None. Update/generate adata.uns['intraclone_transition_map']
"""
########## extract data
if "transition_map" not in adata.uns.keys():
logg.error(
"Please run ---- CS.tmap.infer_Tmap_from_multitime_clones ---- first"
)
else:
clone_annot = adata.obsm["X_clone"]
multiTime_cell_id_t1 = [adata.uns["Tmap_cell_id_t1"]]
multiTime_cell_id_t2 = [adata.uns["Tmap_cell_id_t2"]]
proportion = adata.uns["proportion"]
transition_map = adata.uns["transition_map"]
X_clone = clone_annot.copy()
if not ssp.issparse(X_clone):
X_clone = ssp.csr_matrix(X_clone)
demultiplexed_map = tmap_core.refine_Tmap_through_cospar_noSmooth(
multiTime_cell_id_t1,
multiTime_cell_id_t2,
proportion,
transition_map,
X_clone,
sparsity_threshold=intraclone_threshold,
normalization_mode=normalization_mode,
)
adata.uns["intraclone_transition_map"] = ssp.csr_matrix(demultiplexed_map)
def infer_Tmap_from_one_time_clones(
adata_orig,
initial_time_points=None,
later_time_point=None,
initialize_method="OT",
OT_epsilon=0.02,
OT_dis_KNN=5,
OT_cost="SPD",
HighVar_gene_pctl=85,
padding_X_clone=False,
normalization_mode=1,
sparsity_threshold=0.2,
CoSpar_KNN=20,
use_full_Smatrix=True,
smooth_array=[15, 10, 5],
trunca_threshold=[0.001, 0.01],
compute_new=False,
max_iter_N=[1, 5],
epsilon_converge=[0.05, 0.05],
use_fixed_clonesize_t1=False,
sort_clone=1,
save_subset=True,
use_existing_KNN_graph=False,
):
"""
Infer transition map from clones with a single time point
We jointly infer a transition map and the initial clonal observation
through iteration. The inferred map is between each of the initial
time points ['day_1','day_2',...,] and the time point with clonal
observation. We initialize the transition map by either the OT
method or HighVar method.
**Summary**
* Parameters relevant for cell state selection: initial_time_points,
later_time_point.
* Initialization methods:
* 'OT': optional transport based method. Key parameters: `OT_epsilon, OT_dis_KNN`.
See :func:`.infer_Tmap_from_optimal_transport`.
* 'HighVar': a customized approach, assuming that cells similar in gene
expression across time points share clonal origin. Key parameter: `HighVar_gene_pctl`.
See :func:`.infer_Tmap_from_HighVar`.
* Key parameters relevant for joint optimization itself (which relies on coherent sparse optimization):
`smooth_array, CoSpar_KNN, sparsity_threshold`. See :func:`.refine_Tmap_through_joint_optimization`.
Parameters
----------
adata_orig: :class:`~anndata.AnnData` object
It is assumed to be preprocessed and has multiple time points.
initial_time_points: `list`, optional (default, all time points)
List of initial time points to be included for the transition map.
Like ['day_1','day_2']. Entries consistent with adata.obs['time_info'].
later_time_point: `str`, optional (default, the last time point)
The time point with clonal observation. Its value should be
consistent with adata.obs['time_info'].
initialize_method: `str`, optional (default 'OT')
Method to initialize the transition map from state information.
Choice: {'OT', 'HighVar'}.
OT_epsilon: `float`, optional (default: 0.02)
The entropic regularization, >0. A larger value increases
uncertainty of the transition. Relevant when `initialize_method='OT'`.
OT_dis_KNN: `int`, optional (default: 5)
Number of nearest neighbors to construct the KNN graph for
computing the shortest path distance. Relevant when `initialize_method='OT'`.
OT_cost: `str`, optional (default: `SPD`), options {'GED','SPD'}
The cost metric. We provide gene expression distance (GED), and also
shortest path distance (SPD). GED is much faster, but SPD is more accurate.
However, cospar is robust to the initialization.
HighVar_gene_pctl: `int`, optional (default: 85)
Percentile threshold to select highly variable genes to construct pseudo-clones.
A higher value selects more variable genes. Range: [0,100].
Relevant when `initialize_method='HighVar'`.
padding_X_clone: `bool`, optional (default: False)
If true, select cells at the `later_time_point` yet without any clonal label, and
generate a unique clonal label for each of them. This adds artificial clonal data.
However, it will make the best use of the state information, especially when there
are very few clonal barcodes in the data.
normalization_mode: `int`, optional (default: 1)
Normalization method. Choice: [0,1].
0, single-cell normalization; 1, Clone normalization. The clonal
normalization suppresses the contribution of large
clones, and is much more robust.
smooth_array: `list`, optional (default: [15,10,5])
List of smooth rounds at initial runs of iteration.
Suppose that it has a length N. For iteration n<N, the n-th entry of
smooth_array determines the kernel exponent to build the S matrix at the n-th
iteration. When n>N, we use the last entry of smooth_array to compute
the S matrix. We recommend starting with more smoothing depth and gradually
reduce the depth, as inspired by simulated annealing. Data with higher
clonal dispersion should start with higher smoothing depth. The final depth should
depend on the manifold itself. For fewer cells, it results in a small KNN graph,
and a small final depth should be used. We recommend to use a number at
the multiple of 5 for computational efficiency i.e.,
smooth_array=[20, 15, 10, 5], or [20,15,10]
max_iter_N: `list`, optional (default: [1,5])
A list for maximum iterations for the Joint optimization and CoSpar core function, respectively.
epsilon_converge: `list`, optional (default: [0.05,0.05])
A list of convergence threshold for the Joint optimization and CoSpar core function, respectively.
The convergence threshold is for the change of map correlations between consecutive iterations.
For CoSpar core function, this convergence test is activated only when CoSpar has iterated for 3 times.
CoSpar_KNN: `int`, optional (default: 20)
The number of neighbors for KNN graph used for computing the similarity matrix.
trunca_threshold: `list`, optional (default: [0.001,0.01])
Threshold to reset entries of a matrix to zero. The first entry is for
Similarity matrix; the second entry is for the Tmap.
This is only for computational and storage efficiency.
sparsity_threshold: `float`, optional (default: 0.1)
The relative threshold to remove noises in the updated transition map,
in the range [0,1].
save_subset: `bool`, optional (default: True)
If true, save only Smatrix at smooth round [5,10,15,...];
Otherwise, save Smatrix at each round.
use_full_Smatrix: `bool`, optional (default: True)
If true, extract the relevant Smatrix from the full Smatrix defined by all cells.
This tends to be more accurate. The package is optimized around this choice.
use_fixed_clonesize_t1: `bool`, optional (default: False)
If true, fix the number of initial states as the same for all clones
sort_clone: `int`, optional (default: 1)
The order to infer initial states for each clone: {1,-1,others}.
1, sort clones by size from small to large;
-1, sort clones by size from large to small;
others, do not sort.
compute_new: `bool`, optional (default: False)
If True, compute everything (ShortestPathDis, OT_map, etc.) from scratch,
whether it was computed and saved before or not. Regarding the Smatrix, it is
recomputed only when `use_full_Smatrix=False`.
use_existing_KNN_graph: `bool`, optional (default: False)
If true and adata.obsp['connectivities'], use the existing knn graph
to compute the shortest-path distance. Revelant if initialize_method='OT'.
This overrides all other relevant parameters for building shortest-path distance.
Returns
-------
adata: :class:`~anndata.AnnData` object
Update adata.obsm['X_clone'] and adata.uns['transition_map'],
as well as adata.uns['OT_transition_map'] or
adata.uns['HighVar_transition_map'], depending on the initialization.
adata_orig.obsm['X_clone'] remains the same.
"""
t0 = time.time()
hf.check_available_clonal_info(adata_orig)
clonal_time_points_0 = np.array(adata_orig.uns["clonal_time_points"])
time_ordering = adata_orig.uns["time_ordering"]
if len(clonal_time_points_0) == 0:
raise ValueError(
"No clonal time points available for this dataset. Please run cs.tmap.infer_Tmap_from_state_info_alone."
)
if later_time_point is None:
sel_idx_temp = np.in1d(time_ordering, clonal_time_points_0)
later_time_point = time_ordering[sel_idx_temp][-1]
if type(later_time_point) == list:
later_time_point = later_time_point[0]
# use the last clonal later time point
if initial_time_points is None:
sel_id_temp = np.nonzero(np.in1d(time_ordering, [later_time_point]))[0][0]
initial_time_points = time_ordering[:sel_id_temp]
sel_idx_temp = np.in1d(time_ordering, initial_time_points)
initial_time_points = list(time_ordering[sel_idx_temp])
if later_time_point in initial_time_points:
logg.warn(f"remove {later_time_point} from initial_time_points")
initial_time_points.remove(later_time_point)
hf.check_input_parameters(
adata_orig,
later_time_point=later_time_point,
initial_time_points=initial_time_points,
smooth_array=smooth_array,
save_subset=save_subset,
)
if initialize_method not in ["OT", "HighVar"]:
logg.warn(
"initialize_method not among ['OT','HighVar']. Use initialize_method='OT'"
)
initialize_method = "OT"
if OT_cost not in ["GED", "SPD"]:
logg.warn("OT_cost not among ['GED','SPD']. Use OT_cost='SPD'")
OT_cost = "SPD"
sp_idx = np.zeros(adata_orig.shape[0], dtype=bool)
time_info_orig = np.array(adata_orig.obs["time_info"])
all_time_points = list(initial_time_points) + [later_time_point]
label = "t"
for xx in all_time_points:
id_array = np.nonzero(time_info_orig == xx)[0]
sp_idx[id_array] = True
label = label + "*" + str(xx)
adata = adata_orig[sp_idx]
clone_annot_orig = adata_orig.obsm["X_clone"].copy()
data_des_orig = adata_orig.uns["data_des"][0]
data_des_0 = adata_orig.uns["data_des"][-1]
data_des = data_des_0 + f"_OneTimeClone_{label}"
adata.uns["data_des"] = [data_des_orig, data_des]
time_info = np.array(adata.obs["time_info"])
time_index_t2 = time_info == later_time_point
time_index_t1 = ~time_index_t2
## set cells without a clone ID to have a unique clone ID
if padding_X_clone:
logg.info("Generate a unique clonal label for each clonally unlabeled cell.")
time_index_t2_orig = time_info_orig == later_time_point
zero_clone_idx = clone_annot_orig[time_index_t2_orig].sum(1).A.flatten() == 0
clone_annot_t2_padding = np.diag(np.ones(np.sum(zero_clone_idx)))
non_zero_clones_idx = (
clone_annot_orig[time_index_t2_orig].sum(0).A.flatten() > 0
)
M0 = np.sum(non_zero_clones_idx)
M1 = clone_annot_t2_padding.shape[1]
clone_annot_new = np.zeros((clone_annot_orig.shape[0], M0 + M1))
clone_annot_new[:, :M0] = clone_annot_orig[:, non_zero_clones_idx].A
sp_id_t2 = np.nonzero(time_index_t2_orig)[0]
clone_annot_new[sp_id_t2[zero_clone_idx], M0:] = clone_annot_t2_padding
else:
clone_annot_new = clone_annot_orig
# remove clones without a cell at t2
valid_clone_id = np.nonzero(
clone_annot_new[time_info_orig == later_time_point].sum(0).A.flatten() > 0
)[0]
X_clone_temp = clone_annot_new[:, valid_clone_id]
adata_orig.obsm["X_clone"] = ssp.csr_matrix(X_clone_temp)
#### used for similarity matrix generation
Tmap_cell_id_t1 = np.nonzero(time_index_t1)[0]
Tmap_cell_id_t2 = np.nonzero(time_index_t2)[0]
adata.uns["Tmap_cell_id_t1"] = Tmap_cell_id_t1
adata.uns["Tmap_cell_id_t2"] = Tmap_cell_id_t2
adata.uns["clonal_cell_id_t1"] = Tmap_cell_id_t1
adata.uns["clonal_cell_id_t2"] = Tmap_cell_id_t2
adata.uns["sp_idx"] = sp_idx
data_path = settings.data_path
transition_map = np.zeros((len(Tmap_cell_id_t1), len(Tmap_cell_id_t2)))
ini_transition_map = np.zeros((len(Tmap_cell_id_t1), len(Tmap_cell_id_t2)))
X_clone_updated = adata_orig.obsm["X_clone"][
sp_idx
].A # this does not work well if there are empty clones to begin with
logg.info(
"--------Infer transition map between initial time points and the later time one-------"
)
for yy in initial_time_points:
logg.info(f"--------Current initial time point: {yy}--------")
adata_temp = infer_Tmap_from_one_time_clones_twoTime(
adata_orig,
selected_two_time_points=[yy, later_time_point],
initialize_method=initialize_method,
OT_epsilon=OT_epsilon,
OT_dis_KNN=OT_dis_KNN,
OT_cost=OT_cost,
HighVar_gene_pctl=HighVar_gene_pctl,
normalization_mode=normalization_mode,
sparsity_threshold=sparsity_threshold,
CoSpar_KNN=CoSpar_KNN,
use_full_Smatrix=use_full_Smatrix,
smooth_array=smooth_array,
trunca_threshold=trunca_threshold,
compute_new=compute_new,
use_fixed_clonesize_t1=use_fixed_clonesize_t1,
sort_clone=sort_clone,
save_subset=save_subset,
use_existing_KNN_graph=use_existing_KNN_graph,
max_iter_N=max_iter_N,
epsilon_converge=epsilon_converge,
)
temp_id_t1 = np.nonzero(time_info == yy)[0]
sp_id_t1 = hf.converting_id_from_fullSpace_to_subSpace(
temp_id_t1, Tmap_cell_id_t1
)[0]
transition_map_temp = adata_temp.uns["transition_map"].A
transition_map[sp_id_t1, :] = transition_map_temp
if initialize_method == "OT":
transition_map_ini_temp = adata_temp.uns["OT_transition_map"]
else:
transition_map_ini_temp = adata_temp.uns["HighVar_transition_map"]
ini_transition_map[sp_id_t1, :] = transition_map_ini_temp.A
# Update clonal prediction. This does not work well if there are empty clones to begin with
time_info_idx = np.array(adata_temp.obs["time_info"]) == yy
X_clone_updated[temp_id_t1, :] = adata_temp.obsm["X_clone"][time_info_idx].A
adata.uns["transition_map"] = ssp.csr_matrix(transition_map)
adata.obsm["X_clone"] = ssp.csr_matrix(X_clone_updated)
if initialize_method == "OT":
adata.uns["OT_transition_map"] = ssp.csr_matrix(ini_transition_map)
else:
adata.uns["HighVar_transition_map"] = ssp.csr_matrix(ini_transition_map)
adata_orig.obsm["X_clone"] = clone_annot_orig # reset to the original clonal matrix
logg.info(f"-----------Total used time: {time.time()-t0} s ------------")
return adata
# updated version: v1, we initialize the X_clone as isolated cells
def infer_Tmap_from_state_info_alone(
adata_orig,
initial_time_points=None,
later_time_point=None,
initialize_method="OT",
OT_epsilon=0.02,
OT_dis_KNN=5,
OT_cost="SPD",
HighVar_gene_pctl=85,
normalization_mode=1,
sparsity_threshold=0.2,
CoSpar_KNN=20,
use_full_Smatrix=True,
smooth_array=[15, 10, 5],
trunca_threshold=[0.001, 0.01],
compute_new=False,
max_iter_N=[1, 5],
epsilon_converge=[0.05, 0.05],
use_fixed_clonesize_t1=False,
sort_clone=1,
save_subset=True,
use_existing_KNN_graph=False,
):
"""
Infer transition map from state information alone.
After initializing the clonal matrix as such that each cell has a unique barcode,
it runs :func:`.infer_Tmap_from_one_time_clones` to infer the transition map.
Returns
-------
adata will include both the inferred transition map, and also the updated X_clone matrix.
The input, adata_orig, will maintain the original X_clone matrix.
"""
##--------------- check input parameters
if "data_des" not in adata_orig.uns.keys():
adata_orig.uns["data_des"] = ["cospar"]
logg.info(
"Step I: Generate pseudo clones where each cell has a unique barcode-----"
)
if type(later_time_point) == list:
later_time_point = later_time_point[0]
hf.update_time_ordering(adata_orig, mode="auto")
time_ordering = adata_orig.uns["time_ordering"]
# use the last time point
if later_time_point is None:
later_time_point = time_ordering[-1]
if initial_time_points is None:
# use the time points preceding the last one.
sel_id_temp = np.nonzero(np.in1d(time_ordering, [later_time_point]))[0][0]
initial_time_points = time_ordering[:sel_id_temp]
else:
# re-order time points. This also gets rid of invalid time points
sel_idx_temp = np.in1d(time_ordering, initial_time_points)
if np.sum(sel_idx_temp) > 0:
initial_time_points = time_ordering[sel_idx_temp]
else:
raise ValueError(
f"The 'initial_time_points' are not valid. Please select from {time_ordering}"
)
##--------------- use the artifical clonal matrix
X_clone_0 = adata_orig.obsm["X_clone"].copy()
# adata_orig.obsm['X_clone_old']=adata_orig.obsm['X_clone'].copy()
X_clone = np.diag(np.ones(adata_orig.shape[0]))
adata_orig.obsm["X_clone"] = ssp.csr_matrix(X_clone)
logg.info("Step II: Perform joint optimization-----")
adata = infer_Tmap_from_one_time_clones(
adata_orig,
initial_time_points=initial_time_points,
later_time_point=later_time_point,
initialize_method=initialize_method,
OT_epsilon=OT_epsilon,
OT_dis_KNN=OT_dis_KNN,
OT_cost=OT_cost,
HighVar_gene_pctl=HighVar_gene_pctl,
normalization_mode=normalization_mode,
sparsity_threshold=sparsity_threshold,
CoSpar_KNN=CoSpar_KNN,
use_full_Smatrix=use_full_Smatrix,
smooth_array=smooth_array,
trunca_threshold=trunca_threshold,
compute_new=compute_new,
max_iter_N=max_iter_N,
epsilon_converge=epsilon_converge,
use_fixed_clonesize_t1=use_fixed_clonesize_t1,
sort_clone=sort_clone,
save_subset=save_subset,
use_existing_KNN_graph=use_existing_KNN_graph,
)
# only restore the original X_clone information to adata_orig.
# adata will carry the new structure
adata_orig.obsm["X_clone"] = X_clone_0
time_info_orig = np.array(adata_orig.obs["time_info"])
all_time_points = list(initial_time_points) + [later_time_point]
label = "t"
for xx in all_time_points:
id_array = np.nonzero(time_info_orig == xx)[0]
label = label + "*" + str(xx)
data_des_orig = adata_orig.uns["data_des"][0]
data_des_0 = adata_orig.uns["data_des"][-1]
data_des = data_des_0 + f"_StateInfo_{label}"
adata.uns["data_des"] = [data_des_orig, data_des]
return adata
def infer_Tmap_from_one_time_clones_twoTime(
adata_orig,
selected_two_time_points=["1", "2"],
initialize_method="OT",
OT_epsilon=0.02,
OT_dis_KNN=5,
OT_cost="SPD",
HighVar_gene_pctl=80,
normalization_mode=1,
sparsity_threshold=0.2,
CoSpar_KNN=20,
use_full_Smatrix=True,
smooth_array=[15, 10, 5],
max_iter_N=[1, 5],
epsilon_converge=[0.05, 0.05],
trunca_threshold=[0.001, 0.01],
compute_new=True,
use_fixed_clonesize_t1=False,
sort_clone=1,
save_subset=True,
joint_optimization=True,
use_existing_KNN_graph=False,
):
"""
Infer transition map from clones with a single time point
It is the same as :func:`.infer_Tmap_from_one_time_clones`, except that
it assumes that the input adata_orig has only two time points.
joint_optimization: `bool`, optional (default: True).
"""
time_info_orig = np.array(adata_orig.obs["time_info"])
sort_time_point = np.sort(list(set(time_info_orig)))
N_valid_time = np.sum(np.in1d(sort_time_point, selected_two_time_points))
if N_valid_time != 2:
logg.error(f"Must select only two time points among the list {sort_time_point}")
# The second time point in this list (not necessarily later time point) is assumed to have clonal data.")
else:
####################################
logg.info("Step 0: Pre-processing and sub-sampling cells-------")
# select cells from the two time points, and sub-sampling, create the new adata object with these cell states
sp_idx = (time_info_orig == selected_two_time_points[0]) | (
time_info_orig == selected_two_time_points[1]
)
adata = adata_orig[sp_idx]
data_des_0 = adata_orig.uns["data_des"][-1]
data_des_orig = adata_orig.uns["data_des"][0]
data_des = (
data_des_0
+ f"_t*{selected_two_time_points[0]}*{selected_two_time_points[1]}"
)
adata.uns["data_des"] = [data_des_orig, data_des]
time_info = np.array(adata.obs["time_info"])
time_index_t1 = time_info == selected_two_time_points[0]
time_index_t2 = time_info == selected_two_time_points[1]
#### used for similarity matrix generation
Tmap_cell_id_t1 = np.nonzero(time_index_t1)[0]
Tmap_cell_id_t2 = np.nonzero(time_index_t2)[0]
adata.uns["Tmap_cell_id_t1"] = Tmap_cell_id_t1
adata.uns["Tmap_cell_id_t2"] = Tmap_cell_id_t2
adata.uns["clonal_cell_id_t1"] = Tmap_cell_id_t1
adata.uns["clonal_cell_id_t2"] = Tmap_cell_id_t2
adata.uns["sp_idx"] = sp_idx
data_path = settings.data_path
###############################
# prepare the similarity matrix with all state info, all subsequent similarity will be down-sampled from this one.
if use_full_Smatrix and (joint_optimization or (initialize_method != "OT")):
temp_str = "0" + str(trunca_threshold[0])[2:]
round_of_smooth = np.max(smooth_array)
data_des = adata_orig.uns["data_des"][0]
similarity_file_name = os.path.join(
data_path,
f"{data_des}_Similarity_matrix_with_all_cell_states_kNN{CoSpar_KNN}_Truncate{temp_str}",
)
if not (
os.path.exists(similarity_file_name + f"_SM{round_of_smooth}.npz")
and (not compute_new)
):
similarity_matrix_full = tmap_util.generate_similarity_matrix(
adata_orig,
similarity_file_name,
round_of_smooth=round_of_smooth,
neighbor_N=CoSpar_KNN,
truncation_threshold=trunca_threshold[0],
save_subset=save_subset,
compute_new_Smatrix=compute_new,
)
if initialize_method == "OT":
# logg.info("----------------")
logg.info("Step 1: Use OT method for initialization-------")
tmap_core.infer_Tmap_from_optimal_transport(
adata,
OT_epsilon=OT_epsilon,
OT_cost=OT_cost,
OT_dis_KNN=OT_dis_KNN,
compute_new=compute_new,
use_existing_KNN_graph=use_existing_KNN_graph,
)
OT_transition_map = adata.uns["OT_transition_map"]
initialized_map = OT_transition_map
else:
# logg.info("----------------")
logg.info("Step 1: Use the HighVar method for initialization-------")
t = time.time()
tmap_core.infer_Tmap_from_HighVar(
adata,
min_counts=3,
min_cells=3,
min_gene_vscore_pctl=HighVar_gene_pctl,
sparsity_threshold=sparsity_threshold,
neighbor_N=CoSpar_KNN,
normalization_mode=normalization_mode,
use_full_Smatrix=use_full_Smatrix,
smooth_array=smooth_array,
trunca_threshold=trunca_threshold,
compute_new_Smatrix=compute_new,
max_iter_N=max_iter_N[1],
epsilon_converge=epsilon_converge[1],
)
HighVar_transition_map = adata.uns["HighVar_transition_map"]
initialized_map = HighVar_transition_map
logg.info(
f"Finishing initialization using HighVar, used time {time.time()-t}"
)
if joint_optimization:
########### Jointly optimize the transition map and the initial clonal states
if selected_two_time_points[1] in adata_orig.uns["clonal_time_points"]:
# logg.info("----------------")
logg.info(
"Step 2: Jointly optimize the transition map and the initial clonal states-------"
)
t = time.time()
tmap_core.refine_Tmap_through_joint_optimization(
adata,
initialized_map,
normalization_mode=normalization_mode,
sparsity_threshold=sparsity_threshold,
CoSpar_KNN=CoSpar_KNN,
use_full_Smatrix=use_full_Smatrix,
smooth_array=smooth_array,
max_iter_N=max_iter_N,
epsilon_converge=epsilon_converge,
trunca_threshold=trunca_threshold,
compute_new=compute_new,
use_fixed_clonesize_t1=use_fixed_clonesize_t1,
sort_clone=sort_clone,
save_subset=save_subset,
)
logg.info(f"Finishing Joint Optimization, used time {time.time()-t}")
else:
logg.warn(
"No clonal information available. Skip the joint optimization of clone and scRNAseq data"
)
if "Smatrix" in adata.uns.keys():
adata.uns.pop("Smatrix")
return adata
def infer_Tmap_from_clonal_info_alone_private(
adata_orig, method="naive", clonal_time_points=None, selected_fates=None
):
"""
Compute transition map using only the lineage information.
Here, we compute the transition map between neighboring time points.
We simply average transitions across all clones (or selected clones when method='Weinreb'),
assuming that the intra-clone transition is uniform within the same clone.
Parameters
----------
adata_orig: :class:`~anndata.AnnData` object
method: `str`, optional (default: 'naive')
Method used to compute the transition map. Choice: {'naive',
'weinreb'}. For the naive method, we simply average transitions
across all clones, assuming that the intra-clone transitions are
uniform within the same clone. For the 'weinreb' method, we first
find uni-potent clones, then compute the transition map by simply
averaging across all clonal transitions as the naive method.
selected_fates: `list`, optional (default: all selected)
List of targeted fate clusters to define uni-potent clones for the
weinreb method, which are used to compute the transition map.
clonal_time_points: `list` of `str`, optional (default: all time points)
List of time points to be included for analysis.
We assume that each selected time point has clonal measurements.
later_time_points: `list`, optional (default: None)
If specified, the function will produce a map T between these early
time points among `clonal_time_points` and the `later_time_point`.
If not specified, it produces a map T between neighboring time points.
Returns
-------
adata: :class:`~anndata.AnnData` object
The transition map is stored at adata.uns['clonal_transition_map']
"""
adata_1 = tmap_util.select_time_points(
adata_orig, time_point=clonal_time_points, extend_Tmap_space=True
)
if method not in ["naive", "weinreb"]:
logg.warn("method not in ['naive','weinreb']; set to be 'weinreb'")
method = "weinreb"
cell_id_t2_all = adata_1.uns["Tmap_cell_id_t2"]
cell_id_t1_all = adata_1.uns["Tmap_cell_id_t1"]
T_map = np.zeros((len(cell_id_t1_all), len(cell_id_t2_all)))
clone_annot = adata_1.obsm["X_clone"]
N_points = len(adata_1.uns["multiTime_cell_id_t1"])
for k in range(N_points):
cell_id_t1_temp = adata_1.uns["multiTime_cell_id_t1"][k]
cell_id_t2_temp = adata_1.uns["multiTime_cell_id_t2"][k]
if method == "naive":
logg.info("Use all clones (naive method)")
T_map_temp = clone_annot[cell_id_t1_temp] * clone_annot[cell_id_t2_temp].T
else:
logg.info("Use only uni-potent clones (weinreb et al., 2020)")
state_annote = np.array(adata_1.obs["state_info"])
if selected_fates == None:
selected_fates = list(set(state_annote))
potential_vector_clone, fate_entropy_clone = tl.compute_state_potential(
clone_annot[cell_id_t2_temp].T,
state_annote[cell_id_t2_temp],
selected_fates,
fate_count=True,
)
sel_unipotent_clone_id = np.array(
list(set(np.nonzero(fate_entropy_clone == 1)[0]))
)
clone_annot_unipotent = clone_annot[:, sel_unipotent_clone_id]
T_map_temp = (
clone_annot_unipotent[cell_id_t1_temp]
* clone_annot_unipotent[cell_id_t2_temp].T
)
logg.info(
f"Used uni-potent clone fraction {len(sel_unipotent_clone_id)/clone_annot.shape[1]}"
)
idx_t1 = np.nonzero(np.in1d(cell_id_t1_all, cell_id_t1_temp))[0]
idx_t2 = np.nonzero(np.in1d(cell_id_t2_all, cell_id_t2_temp))[0]
idx_t1_temp = np.nonzero(np.in1d(cell_id_t1_temp, cell_id_t1_all))[0]
idx_t2_temp = np.nonzero(np.in1d(cell_id_t2_temp, cell_id_t2_all))[0]
T_map[idx_t1[:, np.newaxis], idx_t2] = T_map_temp[idx_t1_temp][:, idx_t2_temp].A
T_map = T_map.astype(int)
adata_1.uns["clonal_transition_map"] = ssp.csr_matrix(T_map)
return adata_1
# the v2 version, it is the same format as infer_Tmap_from_multiTime_clones.
# We return a new adata object that will throw away existing annotations in uns.
def infer_Tmap_from_clonal_info_alone(
adata_orig,
method="naive",
clonal_time_points=None,
later_time_point=None,
selected_fates=None,
):
"""
Compute transition map using only the lineage information.
As in :func:`.infer_Tmap_from_multitime_clones`, we provide two modes of inference:
* If `later_time_point=None`, the inferred map allows transitions
between neighboring time points. For example, if
clonal_time_points=['day1','day2','day3'], then it computes transitions
for pairs (day1, day2) and (day2, day3), but not (day1, day3).
* If `later_time_point` is specified, the function produces a map
between earlier time points and this later time point. For example, if
`later_time_point='day3`, the map allows transitions for pairs (day1, day3)
and (day2, day3), but not (day1,day2).
Parameters
----------
adata_orig: :class:`~anndata.AnnData` object
method: `str`, optional (default: 'naive')
Method used to compute the transition map. Choice: {'naive',
'weinreb'}. For the naive method, we simply average transitions
across all clones, assuming that the intra-clone transitions are
uniform within the same clone. For the 'weinreb' method, we first
find uni-potent clones, then compute the transition map by simply
averaging across all clonal transitions as the naive method.
selected_fates: `list`, optional (default: all selected)
List of targeted fate clusters to define uni-potent clones for the
weinreb method, which are used to compute the transition map.
clonal_time_points: `list` of `str`, optional (default: all time points)
List of time points to be included for analysis.
We assume that each selected time point has clonal measurements.
later_time_points: `list`, optional (default: None)
If specified, the function will produce a map T between these early
time points among `clonal_time_points` and the `later_time_point`.
If not specified, it produces a map T between neighboring time points.
Returns
-------
adata: :class:`~anndata.AnnData` object
The transition map is stored at adata.uns['clonal_transition_map']
"""
hf.check_available_clonal_info(adata_orig)
clonal_time_points_0 = np.array(adata_orig.uns["clonal_time_points"])
if len(clonal_time_points_0) < 2:
raise ValueError("There are no multi-time clones. Abort the inference.")
if clonal_time_points is None:
clonal_time_points = clonal_time_points_0
if type(later_time_point) == list:
later_time_point = later_time_point[0]
if later_time_point is not None:
clonal_time_points = list(clonal_time_points) + [later_time_point]
clonal_time_points = list(set(clonal_time_points))
hf.check_input_parameters(
adata_orig,
later_time_point=later_time_point,
clonal_time_points=clonal_time_points,
)
# order the clonal time points
time_ordering = adata_orig.uns["time_ordering"]
sel_idx_temp = np.in1d(time_ordering, clonal_time_points)
clonal_time_points = time_ordering[sel_idx_temp]
if later_time_point is None:
logg.info("Infer transition map between neighboring time points.")
adata = infer_Tmap_from_clonal_info_alone_private(
adata_orig,
method=method,
clonal_time_points=clonal_time_points,
selected_fates=selected_fates,
)
return adata
else:
logg.info(
f"Infer transition map between initial time points and the later time point."
)
# compute transition map between initial time points and the later time point
sel_id = np.nonzero(np.in1d(clonal_time_points, later_time_point))[0][0]
initial_time_points = clonal_time_points[:sel_id]
time_info_orig = np.array(adata_orig.obs["time_info"])
sp_idx = np.zeros(adata_orig.shape[0], dtype=bool)
all_time_points = list(initial_time_points) + [later_time_point]
label = "t"
for xx in all_time_points:
id_array = np.nonzero(time_info_orig == xx)[0]
sp_idx[id_array] = True
label = label + "*" + str(xx)
adata = adata_orig[sp_idx]
data_des_orig = adata_orig.uns["data_des"][0]
data_des_0 = adata_orig.uns["data_des"][-1]
data_des = data_des_0 + f"_ClonalMap_Later_{label}"
adata_orig.uns["data_des"] = [data_des_orig, data_des]
time_info = np.array(adata_orig.obs["time_info"])
time_index_t2 = time_info == later_time_point
time_index_t1 = ~time_index_t2
#### used for similarity matrix generation
Tmap_cell_id_t1 = np.nonzero(time_index_t1)[0]
Tmap_cell_id_t2 = np.nonzero(time_index_t2)[0]
adata.uns["Tmap_cell_id_t1"] = Tmap_cell_id_t1
adata.uns["Tmap_cell_id_t2"] = Tmap_cell_id_t2
adata.uns["clonal_cell_id_t1"] = Tmap_cell_id_t1
adata.uns["clonal_cell_id_t2"] = Tmap_cell_id_t2
adata.uns["sp_idx"] = sp_idx
data_path = settings.data_path
transition_map = np.zeros((len(Tmap_cell_id_t1), len(Tmap_cell_id_t2)))
# logg.info("------Infer transition map between initial time points and the later time one-------")
for yy in initial_time_points:
logg.info(f"--------Current initial time point: {yy}--------")
# by default, we extend the state space to all cells at the given time point.
adata_temp = infer_Tmap_from_clonal_info_alone_private(
adata_orig,
method=method,
clonal_time_points=[yy, later_time_point],
selected_fates=selected_fates,
)
temp_id_t1 = np.nonzero(time_info == yy)[0]
sp_id_t1 = hf.converting_id_from_fullSpace_to_subSpace(
temp_id_t1, Tmap_cell_id_t1
)[0]
# by default, we extend the state space to all cells at the given time point.
# so we only need to care about t1.
transition_map[sp_id_t1, :] = adata_temp.uns["clonal_transition_map"].A
adata.uns["clonal_transition_map"] = ssp.csr_matrix(transition_map)
return adata
|
{"hexsha": "7bedaa6d09c058a4cfa7c7c81903434b417d19a2", "size": 51743, "ext": "py", "lang": "Python", "max_stars_repo_path": "cospar/tmap/map_reconstruction.py", "max_stars_repo_name": "AllonKleinLab/cospar", "max_stars_repo_head_hexsha": "6d2028717a048db7ad79b0cdb6f25910b6901eec", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 19, "max_stars_repo_stars_event_min_datetime": "2020-12-31T22:59:32.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T20:10:04.000Z", "max_issues_repo_path": "cospar/tmap/map_reconstruction.py", "max_issues_repo_name": "AllonKleinLab/cospar", "max_issues_repo_head_hexsha": "6d2028717a048db7ad79b0cdb6f25910b6901eec", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 13, "max_issues_repo_issues_event_min_datetime": "2021-04-06T16:33:55.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T21:32:53.000Z", "max_forks_repo_path": "cospar/tmap/map_reconstruction.py", "max_forks_repo_name": "AllonKleinLab/cospar", "max_forks_repo_head_hexsha": "6d2028717a048db7ad79b0cdb6f25910b6901eec", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-05-20T22:38:27.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-26T12:09:31.000Z", "avg_line_length": 42.412295082, "max_line_length": 161, "alphanum_fraction": 0.6644377017, "include": true, "reason": "import numpy,import scipy", "num_tokens": 12688}
|
SUBROUTINE HOPEN ( iret )
C************************************************************************
C* HOPEN - TIFF *
C* *
C* This subroutine opens a plot file for the device. *
C* *
C* HOPEN ( IRET ) *
C* *
C* Output parameters: *
C* IRET INTEGER Return code *
C** *
C* Log: *
C* S. Jacobs/NCEP 12/98 *
C************************************************************************
C------------------------------------------------------------------------
CALL TSOPEN ( iret )
C*
RETURN
END
|
{"hexsha": "fb3defe0a89c9b05ba301b3302e8835f2fed2320", "size": 563, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "gempak/source/driver/active/tiff/hopen.f", "max_stars_repo_name": "oxelson/gempak", "max_stars_repo_head_hexsha": "e7c477814d7084c87d3313c94e192d13d8341fa1", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 42, "max_stars_repo_stars_event_min_datetime": "2015-06-03T15:26:21.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-28T22:36:03.000Z", "max_issues_repo_path": "gempak/source/driver/active/tiff/hopen.f", "max_issues_repo_name": "oxelson/gempak", "max_issues_repo_head_hexsha": "e7c477814d7084c87d3313c94e192d13d8341fa1", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 60, "max_issues_repo_issues_event_min_datetime": "2015-05-11T21:36:08.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T16:22:42.000Z", "max_forks_repo_path": "gempak/source/driver/active/tiff/hopen.f", "max_forks_repo_name": "oxelson/gempak", "max_forks_repo_head_hexsha": "e7c477814d7084c87d3313c94e192d13d8341fa1", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 27, "max_forks_repo_forks_event_min_datetime": "2016-06-06T21:55:14.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-18T18:23:28.000Z", "avg_line_length": 28.15, "max_line_length": 73, "alphanum_fraction": 0.3019538188, "num_tokens": 135}
|
"""This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the Line class which lets you build your Line charts just
passing the arguments to the Chart class and calling the proper functions.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENCE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from six import string_types
import numpy as np
from ._chartobject import ChartObject
from ..models import ColumnDataSource, Range1d, DataRange1d
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class Line(ChartObject):
"""This is the Line class and it is in charge of plotting
Line charts in an easy and intuitive way.
Essentially, we provide a way to ingest the data, make the proper
calculations and push the references into a source object.
We additionally make calculations for the ranges.
And finally add the needed lines taking the references from the source.
"""
def __init__(self, values, index=None,
title=None, xlabel=None, ylabel=None, legend=False,
xscale="linear", yscale="linear", width=800, height=600,
tools=True, filename=False, server=False, notebook=False,
facet=False, xgrid=True, ygrid=True):
"""
Args:
values (iterable): iterable 2d representing the data series
values matrix.
index (str|1d iterable, optional): can be used to specify a
common custom index for all data series as follows:
- As a 1d iterable of any sort that will be used as
series common index
- As a string that corresponds to the key of the
mapping to be used as index (and not as data
series) if area.values is a mapping (like a dict,
an OrderedDict or a pandas DataFrame)
title (str, optional): the title of your chart. Defaults
to None.
xlabel (str, optional): the x-axis label of your chart.
Defaults to None.
ylabel (str, optional): the y-axis label of your chart.
Defaults to None.
legend (str, optional): the legend of your chart. The legend
content is inferred from incoming input.It can be
``top_left``, ``top_right``, ``bottom_left``,
``bottom_right``. ``top_right`` is set if you set it
as True. Defaults to None.
xscale (str, optional): the x-axis type scale of your chart.
It can be ``linear``, ``datetime`` or ``categorical``.
Defaults to ``datetime``.
yscale (str, optional): the y-axis type scale of your chart.
It can be ``linear``, ``datetime`` or ``categorical``.
Defaults to ``linear``.
width (int, optional): the width of your chart in pixels.
Defaults to 800.
height (int, optional): the height of you chart in pixels.
Defaults to 600.
tools (bool, optional): to enable or disable the tools in
your chart. Defaults to True
filename (str or bool, optional): the name of the file where
your chart. will be written. If you pass True to this
argument, it will use ``untitled`` as a filename.
Defaults to False.
server (str or bool, optional): the name of your chart in
the server. If you pass True to this argument, it will
use ``untitled`` as the name in the server.
Defaults to False.
notebook (bool, optional): whether to output to IPython notebook
(default: False)
facet (bool, optional): generate multiple areas on multiple
separate charts for each series if True. Defaults to
False
xgrid (bool, optional): whether to display x grid lines
(default: True)
ygrid (bool, optional): whether to display y grid lines
(default: True)
Attributes:
source (obj): datasource object for your plot,
initialized as a dummy None.
xdr (obj): x-associated datarange object for you plot,
initialized as a dummy None.
ydr (obj): y-associated datarange object for you plot,
initialized as a dummy None.
groups (list): to be filled with the incoming groups of data.
Useful for legend construction.
data (dict): to be filled with the incoming data and be passed
to the ColumnDataSource in each chart inherited class.
Needed for _set_And_get method.
attr (list): to be filled with the new attributes created after
loading the data dict.
Needed for _set_And_get method.
"""
self.values = values
self.source = None
self.xdr = None
self.ydr = None
# list to save all the groups available in the incomming input
self.groups = []
self.data = dict()
self.attr = []
self.index = index
super(Line, self).__init__(
title, xlabel, ylabel, legend, xscale, yscale, width, height,
tools, filename, server, notebook, facet, xgrid, ygrid
)
def get_data(self):
"""Calculate the chart properties accordingly from line.values.
Then build a dict containing references to all the points to be
used by the line glyph inside the ``draw`` method.
"""
self.data = dict()
# list to save all the attributes we are going to create
self.attr = []
xs = self.values_index
self.set_and_get("x", "", np.array(xs))
for col in self.values.keys():
if isinstance(self.index, string_types) and col == self.index:
continue
# save every new group we find
self.groups.append(col)
values = [self.values[col][x] for x in xs]
self.set_and_get("y_", col, values)
def get_source(self):
"""
Push the Line data into the ColumnDataSource and calculate the
proper ranges.
"""
self.source = ColumnDataSource(self.data)
self.xdr = DataRange1d(sources=[self.source.columns("x")])
y_names = self.attr[1:]
endy = max(max(self.data[i]) for i in y_names)
starty = min(min(self.data[i]) for i in y_names)
self.ydr = Range1d(
start=starty - 0.1 * (endy - starty),
end=endy + 0.1 * (endy - starty)
)
def draw(self):
"""Use the line glyphs to connect the xy points in the Line.
Takes reference points from the data loaded at the ColumnDataSource.
"""
colors = self._set_colors(self.attr)
for i, duplet in enumerate(self.attr[1:], start=1):
self.chart.make_line(self.source, 'x', duplet, colors[i - 1])
if i < len(self.attr[1:]):
self.create_plot_if_facet()
|
{"hexsha": "92a953032bff298260ec529bd6b4bc5d47b2f360", "size": 7800, "ext": "py", "lang": "Python", "max_stars_repo_path": "bokeh/charts/line.py", "max_stars_repo_name": "brian15co/bokeh", "max_stars_repo_head_hexsha": "6cecb7211277b9d838039d0eb15e50a10f9ac3d1", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-09-01T12:36:06.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-17T10:48:36.000Z", "max_issues_repo_path": "bokeh/charts/line.py", "max_issues_repo_name": "brian15co/bokeh", "max_issues_repo_head_hexsha": "6cecb7211277b9d838039d0eb15e50a10f9ac3d1", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "bokeh/charts/line.py", "max_forks_repo_name": "brian15co/bokeh", "max_forks_repo_head_hexsha": "6cecb7211277b9d838039d0eb15e50a10f9ac3d1", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.5754189944, "max_line_length": 78, "alphanum_fraction": 0.5516666667, "include": true, "reason": "import numpy", "num_tokens": 1564}
|
[STATEMENT]
lemma [iff]: "P,E,h \<turnstile> e\<^sub>1;;e\<^sub>2 :' T\<^sub>2 = (\<exists>T\<^sub>1. P,E,h \<turnstile> e\<^sub>1:' T\<^sub>1 \<and> P,E,h \<turnstile> e\<^sub>2:' T\<^sub>2)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (P,E,h \<turnstile> e\<^sub>1;; e\<^sub>2 :' T\<^sub>2) = (\<exists>T\<^sub>1. P,E,h \<turnstile> e\<^sub>1 :' T\<^sub>1 \<and> P,E,h \<turnstile> e\<^sub>2 :' T\<^sub>2)
[PROOF STEP]
apply(rule iffI)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. P,E,h \<turnstile> e\<^sub>1;; e\<^sub>2 :' T\<^sub>2 \<Longrightarrow> \<exists>T\<^sub>1. P,E,h \<turnstile> e\<^sub>1 :' T\<^sub>1 \<and> P,E,h \<turnstile> e\<^sub>2 :' T\<^sub>2
2. \<exists>T\<^sub>1. P,E,h \<turnstile> e\<^sub>1 :' T\<^sub>1 \<and> P,E,h \<turnstile> e\<^sub>2 :' T\<^sub>2 \<Longrightarrow> P,E,h \<turnstile> e\<^sub>1;; e\<^sub>2 :' T\<^sub>2
[PROOF STEP]
apply (auto elim: WTrt'.cases intro!:WTrt'_WTrts'.intros)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
|
{"llama_tokens": 489, "file": "CoreC++_Progress", "length": 3}
|
import cv2
import imutils
import numpy as np
cam = cv2.VideoCapture(0)
cv2.namedWindow("test")
img_counter = 0
ret, frame = cam.read()
# cv2.imshow("test", frame)
k = cv2.waitKey(1)
# hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# show image
blueLower = (98, 109, 20)
blueUpper = (112, 255, 255)
frame = imutils.resize(frame, width=600)
frame = cv2.bilateralFilter(frame,9,75,75)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# blobs left in the mask
# cv2.imshow('hsv', hsv)
mask = cv2.inRange(fame, blueLower, blueUpper)
mask = cv2.inRange(frame, blueLower, blueUpper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
image, contours, hier = cv2.findContours(mask, cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
for c in contours:
# get the bounding rect
x, y, w, h = cv2.boundingRect(c)
# draw a green rectangle to visualize the bounding rect
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
# get the min area rect
rect = cv2.minAreaRect(c)
box = cv2.boxPoints(rect)
# convert all coordinates floating point values to int
box = np.int0(box)
# draw a red 'nghien' rectangle
cv2.drawContours(frame, [box], 0, (0, 0, 255))
print 'Blue box detected'
print(len(contours))
# # SPACE pressed
# img_name = "opencv_frame_{}.png".format(img_counter)
# # cv2.imwrite(img_name, frame)
# print("{} written!".format(img_name))
# img_counter += 1
cam.release()
cv2.destroyAllWindows()
|
{"hexsha": "ea7363e724e238956bd7dd5383b990cce8a7f77a", "size": 1451, "ext": "py", "lang": "Python", "max_stars_repo_path": "test.py", "max_stars_repo_name": "akilawickey/irobot", "max_stars_repo_head_hexsha": "cec600889a7244ed047b25e7b88d4c846a1681a6", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test.py", "max_issues_repo_name": "akilawickey/irobot", "max_issues_repo_head_hexsha": "cec600889a7244ed047b25e7b88d4c846a1681a6", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test.py", "max_forks_repo_name": "akilawickey/irobot", "max_forks_repo_head_hexsha": "cec600889a7244ed047b25e7b88d4c846a1681a6", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.1833333333, "max_line_length": 61, "alphanum_fraction": 0.7008959338, "include": true, "reason": "import numpy", "num_tokens": 452}
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.html
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from predictor_dl_model.trainer.client_rest import predict
import numpy as np
from datetime import datetime
from datetime import timedelta
def calculate_MAPE_ERROR(testing_values:list, predicted_values:list) -> float:
MAPE_ERROR = np.average(np.divide(np.abs(np.subtract(testing_values, predicted_values)), predicted_values))
return MAPE_ERROR
def test_dlpredictor_api():
TENSORFLOW_SERVICEAPI_URL = "http://10.193.217.105:8501/v1/models/bob:predict"
model_stats = {
"model": {
"name": "s32",
"version": 1,
"duration": 90,
"train_window": 60,
"predict_window": 10, # should be <= FORWARD_OFFSET in seq2seq model
"FORWARD_OFFSET": 11
},
"stats": {# Mean and variance ?
"g_g_m": [0.32095959595959594, 0.4668649491714752],
"g_g_f": [0.3654040404040404, 0.4815635452904544],
"g_g_x": [0.31363636363636366, 0.46398999646418304],
"a_1": [0.198989898989899, 0.3992572317838901],
"a_2": [0.2474747474747475, 0.4315630593164027],
"a_3": [0.295959595959596, 0.45649211860504146],
"a_4": [0.25757575757575757, 0.43731748751040456],
"t_3G": [0.3565656565656566, 0.4790051176675845],
"t_4G": [0.29772727272727273, 0.45727819223458205],
"t_5G": [0.3457070707070707, 0.4756182644159981],
"si_1": [0.37424242424242427, 0.4839470491115894],
"si_2": [0.4042929292929293, 0.49077533664980666],
"si_3": [0.22146464646464648, 0.4152500106648333],
"price_cat_0": [0.0, 1.0],
"price_cat_1": [0.3333333333333333, 0.4714243623012701],
"price_cat_2": [0.3333333333333333, 0.47142436230126994],
"price_cat_3": [0.3333333333333333, 0.47142436230126994],
"holiday_stats": [0.044444444444444446, 0.20723493215097805],
"page_popularity": [3.9093487, 0.7969047]
}
}
duration = model_stats['model']['duration']
starting_day = datetime.strptime('2018-01-01', '%Y-%m-%d')
days = [datetime.strftime(starting_day + timedelta(days=i), '%Y-%m-%d') for i in range(duration)]
x = [7.609366416931152, 4.418840408325195, 4.787491798400879, 4.9972124099731445, 4.584967613220215,
4.394449234008789, 5.998936653137207, 6.375024795532227, 4.8903489112854, 4.477336883544922,
4.983606815338135, 4.787491798400879, 4.304065227508545, 6.040254592895508, 7.587817192077637,
5.176149845123291, 4.477336883544922, 4.8903489112854, 4.934473991394043, 4.875197410583496,
5.849324703216553, 6.278521537780762, 4.8978400230407715, 5.2257466316223145, 4.875197410583496,
5.24174690246582, 4.7004804611206055, 6.115891933441162, 6.514712810516357, 4.744932174682617,
4.905274868011475, 4.955827236175537, 5.036952495574951, 4.770684719085693, 6.079933166503906,
6.388561248779297, 5.0434250831604, 5.105945587158203, 5.1704840660095215, 4.682131290435791,
5.135798454284668, 6.0450053215026855, 6.398594856262207, 4.72738790512085, 4.007333278656006,
4.543294906616211, 5.023880481719971, 4.762174129486084, 6.03308629989624, 7.585280895233154,
4.8978400230407715, 4.465908050537109, 4.653960227966309, 4.394449234008789, 4.934473991394043,
5.828945636749268, 6.548219203948975, 4.969813346862793, 4.9904327392578125, 4.595119953155518,
4.787491798400879, 4.564348220825195, 5.746203422546387, 6.513230323791504, 4.976733684539795,
4.510859489440918, 5.003946304321289, 4.430816650390625, 3.828641414642334, 5.902633190155029,
6.473890781402588, 4.779123306274414, 4.8903489112854, 4.905274868011475, 5.075173854827881,
5.135798454284668, 6.073044300079346, 6.7405195236206055, 5.111987590789795, 4.691348075866699,
4.465908050537109, 5.075173854827881, 4.770684719085693, 6.154858112335205, 6.546785354614258,
4.7004804611206055, 4.174387454986572, 5.068904399871826, 4.543294906616211, 5.817111015319824]
full_record = np.round(np.expm1(x))
hours = [i for i in range(0,24)]
price_categories = [i for i in range(0, 4)]
records_hour_price_category_list = []
for hour in hours:
for price_category in price_categories:
records_hour_price_category_list.append((full_record, hour, price_category))
response = predict(TENSORFLOW_SERVICEAPI_URL, model_stats=model_stats, day_list=days,
uckey='magazinelock,1,3G,g_f,2,pt,1004,icc,2,11',
age='2', si='1', network='3G', gender='g_f',
media='', ip_location='', records_hour_price_list=records_hour_price_category_list)
print('Returned the response after calling ' + TENSORFLOW_SERVICEAPI_URL)
print(response)
predicted_values_list = response[0]
prediction_window = model_stats['model']['predict_window']
forward_offset = model_stats['model']['FORWARD_OFFSET']
final_start = duration - forward_offset # 90 - 11 = 79
final_end = final_start + prediction_window
testing_values = full_record[final_start:final_end]
mape_errors = []
for i in range(len(predicted_values_list)):
prediction_hour_price_category = predicted_values_list[i]
print('\nPrediction: hour ' + str(i // 4) + ', price category ' + str(i % 4 + 1))
predicted_values = prediction_hour_price_category
mape_error = round(calculate_MAPE_ERROR(testing_values, predicted_values), 4)
mape_errors.append(mape_error)
print("testing values: " + str(testing_values) + "; predicted values: " + str(predicted_values))
print('MAPE error value based on this prediction: ' + str(mape_error)+ ' (' + str(mape_error * 100) + '%)')
print('\n\nThe MAPE errors: ' + str(mape_errors))
print('The average MAPE error: ' + str(round(sum(mape_errors) / len(mape_errors), 4)))
if __name__ == "__main__":
test_dlpredictor_api()
|
{"hexsha": "ef79140edc8e0fad1c78b2452a8f0cdf0386d872", "size": 6730, "ext": "py", "lang": "Python", "max_stars_repo_path": "Model/predictor-dl-model/tests/trainer/api_test/client_rest_api_test.py", "max_stars_repo_name": "rangaswamymr/incubator-bluemarlin", "max_stars_repo_head_hexsha": "6cb60b2a41edc6509377f9eacb7660d199a9485b", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 21, "max_stars_repo_stars_event_min_datetime": "2019-10-08T16:23:44.000Z", "max_stars_repo_stars_event_max_datetime": "2020-04-08T23:14:36.000Z", "max_issues_repo_path": "Model/predictor-dl-model/tests/trainer/api_test/client_rest_api_test.py", "max_issues_repo_name": "rangaswamymr/incubator-bluemarlin", "max_issues_repo_head_hexsha": "6cb60b2a41edc6509377f9eacb7660d199a9485b", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 162, "max_issues_repo_issues_event_min_datetime": "2019-10-26T05:30:04.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T12:44:41.000Z", "max_forks_repo_path": "Model/predictor-dl-model/tests/trainer/api_test/client_rest_api_test.py", "max_forks_repo_name": "rangaswamymr/incubator-bluemarlin", "max_forks_repo_head_hexsha": "6cb60b2a41edc6509377f9eacb7660d199a9485b", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 33, "max_forks_repo_forks_event_min_datetime": "2019-10-09T01:31:12.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-29T08:00:36.000Z", "avg_line_length": 57.0338983051, "max_line_length": 115, "alphanum_fraction": 0.7046062407, "include": true, "reason": "import numpy", "num_tokens": 2252}
|
'''
Show all different interpolation methods for imshow
'''
import matplotlib.pyplot as plt
import numpy as np
# from the docs:
# If interpolation is None, default to rc image.interpolation. See also
# the filternorm and filterrad parameters. If interpolation is 'none', then
# no interpolation is performed on the Agg, ps and pdf backends. Other
# backends will fall back to 'nearest'.
#
# http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.imshow
methods = [None, 'none', 'nearest', 'bilinear', 'bicubic', 'spline16',
'spline36', 'hanning', 'hamming', 'hermite', 'kaiser', 'quadric',
'catrom', 'gaussian', 'bessel', 'mitchell', 'sinc', 'lanczos']
np.random.seed(0)
grid = np.random.rand(4, 4)
fig, axes = plt.subplots(3, 6, figsize=(12, 6),
subplot_kw={'xticks': [], 'yticks': []})
fig.subplots_adjust(hspace=0.3, wspace=0.05)
for ax, interp_method in zip(axes.flat, methods):
ax.imshow(grid, interpolation=interp_method, cmap='viridis')
ax.set_title(interp_method)
plt.show()
|
{"hexsha": "4977368b49ae3b1f297f48713f9c211432eca868", "size": 1052, "ext": "py", "lang": "Python", "max_stars_repo_path": "matplotlib_examples/examples_src/images_contours_and_fields/interpolation_methods.py", "max_stars_repo_name": "xzlmark/webspider", "max_stars_repo_head_hexsha": "133c620c65aa45abea1718b0dada09618c2115bf", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-04-09T02:35:26.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-27T17:00:21.000Z", "max_issues_repo_path": "matplotlib_examples/examples_src/images_contours_and_fields/interpolation_methods.py", "max_issues_repo_name": "colorworlds/webspider", "max_issues_repo_head_hexsha": "133c620c65aa45abea1718b0dada09618c2115bf", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "matplotlib_examples/examples_src/images_contours_and_fields/interpolation_methods.py", "max_forks_repo_name": "colorworlds/webspider", "max_forks_repo_head_hexsha": "133c620c65aa45abea1718b0dada09618c2115bf", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-04-09T02:35:08.000Z", "max_forks_repo_forks_event_max_datetime": "2020-04-09T02:35:08.000Z", "avg_line_length": 30.9411764706, "max_line_length": 76, "alphanum_fraction": 0.6815589354, "include": true, "reason": "import numpy", "num_tokens": 285}
|
# coding:utf-8
"""
create Wangmeng Song
July 4,2017
overwrite by Wangmeng Song
July 17,2017
修改固定上车时间
July 20,2017
"""
import shapefile as sf
from shapely.geometry import Polygon, Point, LinearRing
import os
import datetime
import numpy as np
import inspect
import copy
import json
import requests
PICKTIME = 3
DIFDURATION = 5
SAMEDURATION = 4
BMAPAIRPORTCOORDINATE = [30.574590, 103.955020] # 成都机场
TIANFUSQUIRE = [30.604043, 104.074086]
filedir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) + "/" + "area"
filename = ['region1.dbf', 'region2.dbf', 'region3.dbf', 'region4.dbf', 'region5.dbf', 'region6.dbf']
# 测试环境
# TimeTableInfoURL = 'https://prerelease.jichangzhuanxian.com/api/ShiftTime/GetShiftTimeByTakeOffTime'
# 正式环境
TimeTableInfoURL = 'https://mgr.jichangzhuanxian.com/api/ShiftTime/GetShiftTimeByTakeOffTime'
class TIMEANDAREA:
def __init__(self, pickuptime, area):
self.pickuptime = pickuptime
self.area = area
class RECOMDTIME:
def getorderareanum(self, lng, lat):
point = Point(lng, lat)
# getdatetime = resdict['date']+resdict['startTime']
# print type(getdatetime), getdatetime
# dateTime = datetime.datetime.strptime(getdatetime, "%Y-%m-%d%H:%M")
# print type(datetime), dateTime
# filename = self.getTxtNmae()
# print "filename", filename
for i in range(len(filename)):
tmpfilename = filedir + "/" + filename[i]
# print "tmpfilename:", tmpfilename
polys = sf.Reader(tmpfilename)
polygon = polys.shapes()
shpfilePoints = []
for shape in polygon:
shpfilePoints = shape.points
polygon = Polygon(shpfilePoints)
if polygon.contains(point):
# getonthncartime = dateTime + datetime.timedelta(minutes=i*10)
areanum = i
# print "上车时间:", getonthncartime
return areanum
else:
continue
# print "no data"
def getOrderLocVec(self, orderLocList):
orderLocVec = np.zeros([len(orderLocList), 2], dtype=float)
for i in range(len(orderLocList)):
orderLocVec[i][0] = orderLocList[i][0]
orderLocVec[i][1] = orderLocList[i][1]
return orderLocVec
def getpickuptime(self, resdict):
# pktimeandarea = []
point = Point(resdict['bdlng'], resdict['bdlat'])
getdatetime = resdict['date']+resdict['startTime']
# print type(getdatetime), getdatetime
dateTime = datetime.datetime.strptime(getdatetime, "%Y-%m-%d%H:%M")
# print type(datetime), dateTime
# filename = self.getTxtNmae()
# print "filename", filename
for i in range(len(filename)):
tmpfilename = filedir + "/" + filename[i]
# print "tmpfilename:", tmpfilename
polys = sf.Reader(tmpfilename)
polygon = polys.shapes()
shpfilePoints = []
for shape in polygon:
shpfilePoints = shape.points
polygon = Polygon(shpfilePoints)
if polygon.contains(point):
getonthncartime = dateTime + datetime.timedelta(minutes=i*10)
TIMEANDAREA.pickuptime = getonthncartime
TIMEANDAREA.area = i
# print "上车时间:", getonthncartime
# pktimeandarea.append(getonthncartime)
# pktimeandarea.append(i)
break
else:
continue
# print "no data"
return TIMEANDAREA
def getonthecardata(self, resdict):
from sendtoairport import auxfn
neworderinfo = copy.copy(resdict)
orderdisVec = []
for order in neworderinfo:
orderdisVec.append((order['bdlat'], order['bdlng']))
orderVec = self.getOrderLocVec(orderdisVec)
disVec = auxfn.calcDistVec(TIANFUSQUIRE, orderVec)
distSort = sorted(range(len(disVec)), key=lambda k: disVec[k], reverse=True)
if len(distSort) == 1: # 整辆车只有一个订单的情况
if 'pickupTime' not in neworderinfo[distSort[0]].keys() or neworderinfo[distSort[0]]['pickupTime'] is None or neworderinfo[distSort[0]]['pickupTime'] is u"":
timeandare = self.getpickuptime(neworderinfo[0])
# unixpickuptime = int(time.mktime((timeandare.pickuptime).timetuple()))
neworderinfo[0]['pickupTime'] = str(timeandare.pickuptime)
else:
strpickuptime = neworderinfo[distSort[0]]['pickupTime']
pickuptime = datetime.datetime.strptime(strpickuptime, "%Y-%m-%dT%H:%M:%S")
neworderinfo[0]['pickupTime'] = str(pickuptime)
else: # 整辆车拥有很多订单
firstpktimeandarea = self.getpickuptime(neworderinfo[distSort[0]])
currentarea = firstpktimeandarea.area
if 'pickupTime' not in neworderinfo[distSort[0]].keys() or neworderinfo[distSort[0]]['pickupTime'] is None or neworderinfo[distSort[0]]['pickupTime'] is u"":
starttime = firstpktimeandarea.pickuptime
pickuptime = starttime
neworderinfo[distSort[0]]['pickupTime'] = str(pickuptime)
else:
strpickuptime = neworderinfo[distSort[0]]['pickupTime']
pickuptime = datetime.datetime.strptime(strpickuptime, "%Y-%m-%dT%H:%M:%S")
neworderinfo[distSort[0]]['pickupTime'] = str(pickuptime)
for i in range(1, len(distSort)):
nextpktimeandarea = self.getpickuptime(neworderinfo[distSort[i]])
nextarea = nextpktimeandarea.area
if 'pickupTime' not in neworderinfo[distSort[i]].keys() or neworderinfo[distSort[i]]['pickupTime'] is None or neworderinfo[distSort[i]]['pickupTime'] is u"":
areacount = abs(nextarea - currentarea)
if areacount == 0:
pickuptime += datetime.timedelta(minutes=PICKTIME) + datetime.timedelta(minutes=SAMEDURATION)
else:
pickuptime += datetime.timedelta(minutes=PICKTIME) + datetime.timedelta(minutes=DIFDURATION * areacount)
neworderinfo[distSort[i]]['pickupTime'] = str(pickuptime)
else:
strpickuptime = neworderinfo[distSort[i]]['pickupTime']
pickuptime = datetime.datetime.strptime(strpickuptime, "%Y-%m-%dT%H:%M:%S")
neworderinfo[distSort[i]]['pickupTime'] = str(pickuptime)
currentarea = nextarea
jsondatar = json.dumps(neworderinfo, ensure_ascii=False, separators=(',', ':')).encode('utf-8')
return jsondatar
def firstGetTime(self, order):
info = copy.copy(order)
orderlng = info['bdlng']
orderlat = info['bdlat']
# get the area number
areanum = self.getorderareanum(orderlng, orderlat)
# 1、获得飞机起飞时间
planetakeofftime = info['takeofftime']
# 2、获得航段
res = requests.post(TimeTableInfoURL, json={"Time": planetakeofftime})
dataInfo = res.json()["Data"][0]
timetable = dataInfo['TimeCode']
# 3、获得航段的接送开始时间
starttime = dataInfo['PickupStartTime']
getdatetime = info['date'] + starttime
dateTime = datetime.datetime.strptime(getdatetime, "%Y-%m-%d%H:%M")
getOnTheCarTime = dateTime + datetime.timedelta(minutes=areanum*10)
# info['pickupTime'] = int(time.mktime(getOnTheCarTime.timetuple()))
info['TimeTable'] = timetable
info['pickupTime'] = str(getOnTheCarTime)
# print "转换信息",info
jsondatar = json.dumps(info, ensure_ascii=False, separators=(',', ':')).encode('utf-8')
# print "jsondata", jsondatar
return jsondatar
# return txtname
def gettheordertime(self):
polys = sf.Reader("shapefiles/test/wholeArea.shp")
polygon = polys.shapes()
shpfilePoints = []
for shape in polygon:
shpfilePoints = shape.points
polygon = Polygon(shpfilePoints)
# #[锦里,九眼桥,锦江宾馆, 仁恒置地, 桐梓林, 骡马寺]
# currentPoint = [30.662447, 104.072469] # 天府广场
# points = [[30.650817,104.056385], [30.645582,104.095192], [30.654087,104.072528],
# [30.658646,104.072563], [30.621274,104.073749], [30.672531,104.071962]]
# destination = [30.599595,104.040745] # 交界点
point = Point(104.072469, 30.662447) # 天府广场
# point = Point(104.040745,30.599595) # keyPoint
# point = Point(104.042779,30.620844)
# point in polygon test
if polygon.contains(point):
print 'inside'
else:
print 'OUT'
polExt = LinearRing(polygon.exterior.coords)
d = polExt.project(point)
p = polExt.interpolate(d)
closest_point_coords = list(p.coords)[0]
print list(p.coords)
print d
|
{"hexsha": "515d928a2d7a60fae4cf461a0c9ed241dbb67af4", "size": 9004, "ext": "py", "lang": "Python", "max_stars_repo_path": "recomTimeOnTheBus/recommendtime.py", "max_stars_repo_name": "hellodu-dev/team_schedule", "max_stars_repo_head_hexsha": "6239a6798f337f2cf5e88277d175143519045d86", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2019-04-21T09:41:57.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-15T02:07:18.000Z", "max_issues_repo_path": "recomTimeOnTheBus/recommendtime.py", "max_issues_repo_name": "hellodu-dev/team_schedule", "max_issues_repo_head_hexsha": "6239a6798f337f2cf5e88277d175143519045d86", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "recomTimeOnTheBus/recommendtime.py", "max_forks_repo_name": "hellodu-dev/team_schedule", "max_forks_repo_head_hexsha": "6239a6798f337f2cf5e88277d175143519045d86", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2017-09-08T02:45:34.000Z", "max_forks_repo_forks_event_max_datetime": "2017-09-08T02:45:34.000Z", "avg_line_length": 43.4975845411, "max_line_length": 173, "alphanum_fraction": 0.6018436251, "include": true, "reason": "import numpy", "num_tokens": 2392}
|
import numpy as np
class Hessian():
def __init__(self, f):
self.value = f.Real.Real.Real
fd1 = f.Dual[0].Real.Real[0].Real.Real
fd2 = f.Dual[1].Real.Real[1].Real.Real
self.firstDer = np.array([fd1,fd2])
hxx = f.Dual[0].Dual[0].Real[0].Real
hyy = f.Dual[0].Real.Real[1].Dual[0].Real[1].Real
hxy = f.Dual[0].Dual[0].Real[1].Real
hyx = hxy
self.hessian = np.array([[hxx,hxy],[hyx,hyy]])
def __str__(self):
return "{} val\n\n{} der\n\n{} hess".format(self.value,self.firstDer,self.hessian)
|
{"hexsha": "5fe2965ddd1626c451aeeb254c3cc019f2d7c230", "size": 514, "ext": "py", "lang": "Python", "max_stars_repo_path": "ADPYNE/Hessian.py", "max_stars_repo_name": "PYNE-AD/cs207-FinalProject", "max_stars_repo_head_hexsha": "7b146da3ebb4747ce213bf0537af3c385689ecc1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-10-26T22:24:33.000Z", "max_stars_repo_stars_event_max_datetime": "2019-10-26T22:24:46.000Z", "max_issues_repo_path": "ADPYNE/Hessian.py", "max_issues_repo_name": "PYNE-AD/cs207-FinalProject", "max_issues_repo_head_hexsha": "7b146da3ebb4747ce213bf0537af3c385689ecc1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2019-11-20T02:06:24.000Z", "max_issues_repo_issues_event_max_datetime": "2019-12-10T15:48:31.000Z", "max_forks_repo_path": "ADPYNE/Hessian.py", "max_forks_repo_name": "PYNE-AD/cs207-FinalProject", "max_forks_repo_head_hexsha": "7b146da3ebb4747ce213bf0537af3c385689ecc1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.0526315789, "max_line_length": 84, "alphanum_fraction": 0.6381322957, "include": true, "reason": "import numpy", "num_tokens": 199}
|
import time
import numpy as np
import h5py
import matplotlib.pyplot as plt
import scipy
import argparse
from PIL import Image
from scipy import ndimage
from dnn_utils import load_data, initialize_parameters_deep, L_model_forward, \
compute_cost, L_model_backward, update_parameters, predict, print_mislabeled_images
def L_layer_model(X, Y, layers_dims, learning_rate = 0.0075, num_iterations = 3000, print_cost=False):#lr was 0.009
"""
Implements a L-layer neural network: [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID.
Arguments:
X -- data, numpy array of shape (number of examples, num_px * num_px * 3)
Y -- true "label" vector (containing 0 if cat, 1 if non-cat), of shape (1, number of examples)
layers_dims -- list containing the input size and each layer size, of length (number of layers + 1).
learning_rate -- learning rate of the gradient descent update rule
num_iterations -- number of iterations of the optimization loop
print_cost -- if True, it prints the cost every 100 steps
Returns:
parameters -- parameters learnt by the model. They can then be used to predict.
costs -- list of cost at each iteration
"""
np.random.seed(1)
costs = [] # keep track of cost
# Parameters initialization. (≈ 1 line of code)
### START CODE HERE ###
parameters = initialize_parameters_deep(layers_dims)
### END CODE HERE ###
# Loop (gradient descent)
print("Training model with {} iterations and learning_rate {}".format(num_iterations, learning_rate))
for i in range(0, num_iterations):
# Forward propagation: [LINEAR -> RELU]*(L-1) -> LINEAR -> SIGMOID.
### START CODE HERE ### (≈ 1 line of code)
AL, caches = L_model_forward(X, parameters)
### END CODE HERE ###
# Compute cost.
### START CODE HERE ### (≈ 1 line of code)
cost = compute_cost(AL, Y)
### END CODE HERE ###
# Backward propagation.
### START CODE HERE ### (≈ 1 line of code)
grads = L_model_backward(AL, Y, caches)
### END CODE HERE ###
# Update parameters.
### START CODE HERE ### (≈ 1 line of code)
parameters = update_parameters(parameters, grads, learning_rate)
### END CODE HERE ###
# Print the cost every 100 training example
if print_cost and i % 100 == 0:
print ("Cost after iteration %i: %f" %(i, cost))
if print_cost and i % 100 == 0:
costs.append(cost)
return parameters, costs
def plot_cost(costs, learning_rate):
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Example with nonoptional arguments',
)
parser.add_argument('--training_dataset', action="store", dest="training_dataset",
default="datasets/train_catvnoncat.h5")
parser.add_argument('--test_dataset', action="store", dest="test_dataset",
default="datasets/test_catvnoncat.h5")
parser.add_argument('--save_dir', action="store", dest="save_dir")
parser.add_argument('--learning_rate', action="store", dest="learning_rate",
type=float, default=0.01)
parser.add_argument('--inner_layers', action="store", dest="inner_layers",
type=int, nargs='*', default=[20, 7, 5])
parser.add_argument('--iterations', action="store", dest="num_iterations",
type=int, default=500)
args = parser.parse_args()
training_dataset = args.training_dataset
test_dataset = args.test_dataset
learning_rate = args.learning_rate
num_iterations = args.num_iterations
layers_dims = [12288] + args.inner_layers + [1]
plt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
np.random.seed(1)
print("Initializing deep learning model with architecture: ",layers_dims)
# Loading data
train_x_orig, train_y, test_x_orig, test_y, classes = load_data(training_dataset, test_dataset)
#test_x_orig, test_y, classes = load_data(test_dataset)
# Reshape the training and test examples
train_x_flatten = train_x_orig.reshape(train_x_orig.shape[0], -1).T # The "-1" makes reshape flatten the remaining dimensions
test_x_flatten = test_x_orig.reshape(test_x_orig.shape[0], -1).T
# Standardize data to have feature values between 0 and 1.
train_x = train_x_flatten/255.
test_x = test_x_flatten/255.
# Shape of the network§
#layers_dims = [12288, 20, 7, 5, 1]
# Train the model
parameters, costs = L_layer_model(train_x, train_y, layers_dims,
learning_rate=learning_rate, num_iterations = num_iterations, print_cost = True)
print("Model training completed")
#Predict
pred_train, train_acc = predict(train_x, train_y, parameters)
print("Training Accuracy: ", str(train_acc))
pred_test, test_acc = predict(test_x, test_y, parameters)
print("Test Accuracy: ", str(test_acc))
#print_mislabeled_images(classes, test_x, test_y, pred_test)
# save the model
checkpoint = {'layer_dims': layers_dims,
'learning_rate': learning_rate ,
'num_iterations': num_iterations,
'classes': classes,
'parameters': parameters}
if args.save_dir:
saved_checkpoint = args.save_dir+"/checkpoint"
else:
saved_checkpoint = "checkpoint"
#with open(saved_checkpoint, 'w') as fp:
np.save(saved_checkpoint, checkpoint)
#json.dump(checkpoint, fp)
#pickle.dump(checkpoint, fp, protocol=pickle.HIGHEST_PROTOCOL)
print("Model saved at {}.npy".format(saved_checkpoint))
# Plot the costs
print("Plotting the Learning Curve")
plot_cost(costs, learning_rate)
"""
# Print Results
#print_mislabeled_images(classes, test_x, test_y, pred_test)
"""
|
{"hexsha": "8b0bea1c556bdf9bccc14874a6f68a2bab6da3de", "size": 6092, "ext": "py", "lang": "Python", "max_stars_repo_path": "train.py", "max_stars_repo_name": "santamm/DeepNet", "max_stars_repo_head_hexsha": "fd05804200eb1bd62fb3a80a793b22794e4ec7d2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "train.py", "max_issues_repo_name": "santamm/DeepNet", "max_issues_repo_head_hexsha": "fd05804200eb1bd62fb3a80a793b22794e4ec7d2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "train.py", "max_forks_repo_name": "santamm/DeepNet", "max_forks_repo_head_hexsha": "fd05804200eb1bd62fb3a80a793b22794e4ec7d2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.9212121212, "max_line_length": 131, "alphanum_fraction": 0.6644780039, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 1477}
|
% -*- root: ../gvoysey-thesis.tex -*-
\chapter{Introduction}
\label{chapter:Introduction}
\thispagestyle{myheadings}
\section{Motivation}
The variability of overall performance between putatively normal hearing listeners, particularly in supra-threshold tasks performed in complex acoustic environments such as the cocktail party problem, has been recognized in the literature for many years \citep{Cherry1953Some}. Until recently, this variability was largely attributed to a broadly-defined ``Central Processing Disorder'' in the absence of clinical Noise-Induced Hearing Loss (NIHL). The performance of the auditory periphery has been thought to be sufficiently characterized by pure tone audiometry (PTA), as well as Distortion-Product Otoacoustic Emissions (DPOAEs) and Auditory Brainstem Responses (ABR) for more detailed assessment of individual areas of the peripheral auditory system.
\section{Implication of the Auditory Periphery in Cochlear Synaptopathy}
Recently, selective deafferentiation of low spontaneous rate (low SR) fibers of the AN in the auditory periphery that do not affect audiometric thresholds have been convincingly demonstrated in mouse \citep{Kujawa2009Adding}, gerbil \citep{Furman2013NoiseInduced}, and recently, chinchilla (Liberman, unpublished); a growing body of psychophysical evidence suggests that a similar pathology occurs in humans \citep{Bharadwaj2015Individual}. In mice, synaptic damage at the hair cell in the Organ of Corti has been observed both in response to noise with intensities sufficient to induce a temporary threshold shift (TTS), which does not permanently affect Compound Action Potential (CAP) thresholds or hair cell viability, and due to age alone in quiet \citep{Sergeyenko2013AgeRelated,Fernandez2015Aging}. This phenomenon has been variously described as ``cochlear synaptopathy'' (CS) \citep{Bharadwaj2014Cochlear}, ``auditory neuropathy'', or ``Hidden Hearing Loss'' (HHL).
It is now thought that selective low-SR loss may be a hallmark of HHL \citep{Furman2013NoiseInduced,Bharadwaj2014Cochlear,Bharadwaj2015Individual,Schaette2011Tinnitus}. Consequently, it has been implicated in performance degradation in cocktail party scenarios in normal-hearing listeners \citep{Bharadwaj2015Individual,Bharadwaj2014Cochlear}. Unlike NIHL, no objective and noninvasive measure of HHL in humans has been established. While work is ongoing in cadaveric studies, the relationship between low-SR damage and HHL in humans has relied on inference from a combination of ABR, DPOAE, and psychometric measures, and no direct measure has yet been demonstrated that specifically implicates low-SR fiber loss as a sufficient causative factor for HHL.
\section{Human Psychophysical Tests Suggest a Diagnostic Measure}
Towards this goal of defining an objective measure of fiber loss,~\cite{Mehraei2015Individual,Mehraei2016Auditory} have performed a series of experiments that relate psychophysical performance in a tone-in-notched-noise ITD detection task to measured latency changes in ABR Wave V as a function of signal to noise ratio. They hypothesized that the loss of low-SR/high-threshold AN fibers would contribute to a faster recovery time of the compound action potential of the AN. In a perceptual task, this translates to higher thresholds, and faster threshold recovery. In a group of 28 normal hearing threshold (NHT) subjects, comparison of ABR data and psychoacoustic performance demonstrate a relationship consistent with an impairment in low-SR population response \citep{Mehraei2016Auditory}.
\section{Computational Models of the Periphery are not Predictive}
While psychophysical experiments have supported the hypothesis of the importance of low-SR fibers, modeling the response of the auditory periphery, brainstem, and midbrain to the stimuli used in experiments has so far failed to produce results that are with experimental results \citep{Mehraei2016Auditory}.
Many disparate models of different stages of the subcortical auditory system have been developed and are in common use. Among these are models of the middle ear, the auditory nerve, and the auditory processing areas of the brainstem and the midbrain. Any given computational model of a particular area may be optimized by its authors for particular objectives that may not be shared by other models of the same area; further, the inputs required for more than one model of a given area may be dissimilar, and the outputs may also vary considerably. Numerous projects exist to address some of these difficulties. Among these projects are EarLab (\url{http://earlab.bu.edu}), the Auditory Modeling Toolbox (\url{http://amtoolbox.sourceforge.net}), and the Cochlea modeling environment (\url{https://github.com/mrkrd/cochlea}) \citep{Rudnicki2014Cochlea}.
To date, no project fully addresses the modeling concerns that arise during the study of the role of the precortical auditory system in HHL. Further, no project currently addresses the need to easily compare the performance and behavior of individual models as they are used to study the same problems.
\section{An Improved Modeling Approach} % (fold)
\label{sec:an_improved_modeling_approach}
This work sought to extend the modeling of the peripheral and central auditory system performed by~\cite{Mehraei2015Auditory} by creating a modeling framework that allows the direct comparison of the relative effects of leading acoustic models, with two novel modeling features also incorporated.
A framework for the design of arbitrarily complex future modeling experiments that automatically incorporates permutations of model choice and model parameters was also developed to provide a modeling comparison tool to the research community.
|
{"hexsha": "55c3c0e1554ce8aee1ad6b442900eb37c6d178a8", "size": 5748, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "text/1_Intro/intro.tex", "max_stars_repo_name": "gvoysey/thesis", "max_stars_repo_head_hexsha": "766ed365f55ada08c3b6f548a6f857f9d3e49b91", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2016-07-10T17:40:15.000Z", "max_stars_repo_stars_event_max_datetime": "2017-03-10T05:37:38.000Z", "max_issues_repo_path": "text/1_Intro/intro.tex", "max_issues_repo_name": "gvoysey/thesis", "max_issues_repo_head_hexsha": "766ed365f55ada08c3b6f548a6f857f9d3e49b91", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2016-08-14T04:18:16.000Z", "max_issues_repo_issues_event_max_datetime": "2016-08-14T04:18:16.000Z", "max_forks_repo_path": "text/1_Intro/intro.tex", "max_forks_repo_name": "gvoysey/thesis", "max_forks_repo_head_hexsha": "766ed365f55ada08c3b6f548a6f857f9d3e49b91", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 205.2857142857, "max_line_length": 977, "alphanum_fraction": 0.82289492, "num_tokens": 1257}
|
[STATEMENT]
lemma ack_3: "ack (Suc (Suc (Suc 0))) j = 2 ^ (j+3) - 3"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ack (Suc (Suc (Suc 0))) j = 2 ^ (j + 3) - 3
[PROOF STEP]
proof (induct j)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. ack (Suc (Suc (Suc 0))) 0 = 2 ^ (0 + 3) - 3
2. \<And>j. ack (Suc (Suc (Suc 0))) j = 2 ^ (j + 3) - 3 \<Longrightarrow> ack (Suc (Suc (Suc 0))) (Suc j) = 2 ^ (Suc j + 3) - 3
[PROOF STEP]
case 0
[PROOF STATE]
proof (state)
this:
goal (2 subgoals):
1. ack (Suc (Suc (Suc 0))) 0 = 2 ^ (0 + 3) - 3
2. \<And>j. ack (Suc (Suc (Suc 0))) j = 2 ^ (j + 3) - 3 \<Longrightarrow> ack (Suc (Suc (Suc 0))) (Suc j) = 2 ^ (Suc j + 3) - 3
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ack (Suc (Suc (Suc 0))) 0 = 2 ^ (0 + 3) - 3
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
ack (Suc (Suc (Suc 0))) 0 = 2 ^ (0 + 3) - 3
goal (1 subgoal):
1. \<And>j. ack (Suc (Suc (Suc 0))) j = 2 ^ (j + 3) - 3 \<Longrightarrow> ack (Suc (Suc (Suc 0))) (Suc j) = 2 ^ (Suc j + 3) - 3
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>j. ack (Suc (Suc (Suc 0))) j = 2 ^ (j + 3) - 3 \<Longrightarrow> ack (Suc (Suc (Suc 0))) (Suc j) = 2 ^ (Suc j + 3) - 3
[PROOF STEP]
case (Suc j)
[PROOF STATE]
proof (state)
this:
ack (Suc (Suc (Suc 0))) j = 2 ^ (j + 3) - 3
goal (1 subgoal):
1. \<And>j. ack (Suc (Suc (Suc 0))) j = 2 ^ (j + 3) - 3 \<Longrightarrow> ack (Suc (Suc (Suc 0))) (Suc j) = 2 ^ (Suc j + 3) - 3
[PROOF STEP]
with less_le_trans
[PROOF STATE]
proof (chain)
picking this:
\<lbrakk>?x < ?y; ?y \<le> ?z\<rbrakk> \<Longrightarrow> ?x < ?z
ack (Suc (Suc (Suc 0))) j = 2 ^ (j + 3) - 3
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>?x < ?y; ?y \<le> ?z\<rbrakk> \<Longrightarrow> ?x < ?z
ack (Suc (Suc (Suc 0))) j = 2 ^ (j + 3) - 3
goal (1 subgoal):
1. ack (Suc (Suc (Suc 0))) (Suc j) = 2 ^ (Suc j + 3) - 3
[PROOF STEP]
by (fastforce simp add: power_add algebra_simps)
[PROOF STATE]
proof (state)
this:
ack (Suc (Suc (Suc 0))) (Suc j) = 2 ^ (Suc j + 3) - 3
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 1170, "file": "Ackermanns_not_PR_Primrec", "length": 11}
|
using AstroUtils
using Test, SafeTestsets
@time begin
@time @safetestset "cartToKep tests..." begin include("cartToKepTests.jl") end
end
|
{"hexsha": "ec18fce86bdd2198ad485268b4f86f4def58db58", "size": 137, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "GrantHecht/AstroUtils.jl", "max_stars_repo_head_hexsha": "40227897cc6030fa6ab505b805c7f72d1322f2b0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "GrantHecht/AstroUtils.jl", "max_issues_repo_head_hexsha": "40227897cc6030fa6ab505b805c7f72d1322f2b0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "GrantHecht/AstroUtils.jl", "max_forks_repo_head_hexsha": "40227897cc6030fa6ab505b805c7f72d1322f2b0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.8333333333, "max_line_length": 78, "alphanum_fraction": 0.7883211679, "num_tokens": 43}
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 9 10:38:44 2020
@author: jsalm
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
# from sklearn import svm, datasets
from skimage.feature import peak_local_max
from skimage.morphology import watershed
from scipy.ndimage import convolve,distance_transform_edt,label, find_objects
from sklearn.svm import SVC
from sklearn.model_selection import cross_val_score, train_test_split, learning_curve, GridSearchCV
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler, MinMaxScaler, RobustScaler, LabelEncoder
from sklearn.metrics import confusion_matrix,auc
from sklearn.decomposition import PCA
from skimage.feature import hog
from matplotlib.colors import ListedColormap
from matplotlib.patches import Patch, Rectangle
import cv2
import os
import time
import csv
import Filters
import DataManager
import ML_interface_SVM_V3
# import xlwings as xw
from IPython import get_ipython
plt.rcParams['figure.dpi'] = 100
plt.rcParams['figure.figsize'] = (10,10)
get_ipython().run_line_magic('matplotlib','qt5')
dirname = os.path.dirname(__file__)
save_bin = os.path.join(dirname,"save-bin")
global data,labels
def generate_train_sert_ID(boolim,image):
if type(boolim[0,0]) != np.bool_:
raise TypeError("args need to be type bool and tuple respectively")
'end if'
count = 0
data = np.zeros((2,boolim.shape[0]*boolim.shape[1]))
point_data = np.zeros((2,boolim.shape[0]*boolim.shape[1]))
#generate list of points
for i,row in enumerate(boolim):
for j,col in enumerate(row):
if col == True:
data[0,count] = image[i,j]
data[1,count] = 1
point_data[0,count] = i
point_data[1,count] = j
count+=1
else:
data[0,count] = image[i,j]
data[1,count] = 0
point_data[0,count] = i
point_data[1,count] = j
count+=1
'end if'
'end for'
'end for'
return data,point_data
'end def'
def generate_test_sert_ID(boolim,image):
if type(boolim[0,0]) != np.bool_:
raise TypeError("args need to be type bool and tuple respectively")
'end if'
count = 0
t_data = np.sum(boolim)
data = np.zeros((2,t_data))
point_data = np.zeros((2,t_data))
for i,row in enumerate(boolim):
for j,col in enumerate(row):
if col == True:
data[0,count] = image[i,j]
data[1,count] = 0
point_data[0,count] = i
point_data[1,count] = j
count+=1
return data,point_data
'end def'
def get_coef(generator):
weights = []
for clf in generator:
weights.append(clf.coef_)
'end for'
return weights
'end def'
def make_meshgrid(x, y, h=.02):
"""Create a mesh of points to plot in
Parameters
----------
x: data to base x-axis meshgrid on
y: data to base y-axis meshgrid on
h: stepsize for meshgrid, optional
Returns
-------
xx, yy : ndarray
"""
x_min, x_max = x.min() - 1, x.max() + 1
y_min, y_max = y.min() - 1, y.max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
return xx, yy
def plot_contours(ax, clf, xx, yy, **params):
"""Plot the decision boundaries for a classifier.
Parameters
----------
ax: matplotlib axes object
clf: a classifier
xx: meshgrid ndarray
yy: meshgrid ndarray
params: dictionary of params to pass to contourf, optional
"""
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
out = ax.contourf(xx, yy, Z, **params)
return out
def gen_point_vector(image):
point_data = np.zeros((image.shape[0]*image.shape[1],2))
count = 0
for i in range(0,image.shape[0]):
for j in range(0,image.shape[1]):
point_data[count,:] = [i,j]
count += 1
'end for'
'end for'
return point_data
'end def'
def img_to_data(image,mask,keep_all = True,*kwargs):
"""
Parameters
----------
image : TYPE
DESCRIPTION.
**params : image data type float32[:,:]
DESCRIPTION.
Returns
------
array of data of shape [image.shape[0]*image.shape[1],number_of_parameters + image_data] represents
all the parameters to be enetered into SVM image analysis
"""
#initialize with original image data
img_d = image.ravel()
img_d = img_d.reshape(img_d.shape[0],1)
con_data = img_d
param_c = 0
for data in kwargs:
new_d = data.ravel()
new_d = new_d.reshape(new_d.shape[0],1)
con_data = np.concatenate((con_data,new_d),axis = 1)
param_c += 1
'end for'
nonzero = np.sum(mask)
mask_r = mask.ravel()
mask_r = mask_r.reshape(mask_r.shape[0],1)
point_data = gen_point_vector(image)
if keep_all:
data = con_data
bool_set = mask_r.astype(int)
else:
masked = np.multiply(con_data,mask_r)
masked_new = np.zeros((nonzero,con_data.shape[1]))
point_new = np.zeros((nonzero,2))
bool_set = np.zeros((nonzero,con_data.shape[1]))
count = 0
for i,x in enumerate(masked):
if x.any() != 0:
masked_new[count,:] = x
bool_set[count,:] = mask_r[i,:]
point_new[count,:] = point_data[i,:]
count += 1
'end if'
'end for'
data = masked_new
bool_set = bool_set.astype(int)
point_data = point_new
return data,bool_set,point_data
'end def'
def data_to_img(mask,predicitons,positions):
newim = np.zeros((mask.shape[0],mask.shape[1]))
count = 0
for i,row in enumerate(mask):
for j,col in enumerate(row):
if col == True:
newim[i,j] = predictions[count]
count += 1
'end if'
'end for'
'end for'
return newim
def get_nonzeros(image,val_vector,mask,tru_type = True):
mask = mask.ravel()
mask = mask.reshape(mask.shape[0],1)
masklen = np.sum(mask.astype(int))
mask_new = np.zeros((masklen,mask.shape[1]))
points_new = np.zeros((masklen,2))
points = gen_point_vector(image)
vals_new = np.zeros((masklen,val_vector.shape[1]))
count = 0
for i,x in enumerate(mask.astype(int)):
if x != 0:
vals_new[count,:] = val_vector[i,:]
points_new[count,:] = points[i,:]
if tru_type:
# vals_new[count,-1] = 1
mask_new[count,0] = 1
else:
# vals_new[count,-1] = 0
mask_new[count,0] = 0
count += 1
return vals_new,mask_new.astype(int),points_new
# @optunity.cross_validated(x=data,y=labels,num_folds=5,regenerate_folds=True)
# def svm_rbf_tuned_auroc(x_train, y_train, x_test, y_test, logC, logGamma):
# model = SVC(C=10**logC,gamma=10**logGamma).fit(x_train,y_train)
# decision_values = model.decision_function(x_test)
# auc = optunity.metrics.roc_auc(y_test, decision_values)
# return auc
# 'end def'
def filter_pipeline(image,ff_width,wiener_size,med_size,multiplier_a=1,multiplier_d=1):
"""
Parameters
----------
image : TYPE
DESCRIPTION.
ff_width : TYPE
DESCRIPTION.
wiener_size : TYPE
DESCRIPTION.
med_size : TYPE
DESCRIPTION.
direction_features : bool, optional
DESCRIPTION. The default is True.
Returns
-------
ffimhi_new : TYPE
DESCRIPTION.
direction_features : TYPE
DESCRIPTION.
"""
direction_features = np.array([])
#Normalize image
norm_im = Filters.normalize_img(image)
#Fourier Filter for removing low frequency components
ffimhi_new = Filters.Hi_pass_filter(norm_im,ff_width)
#denoising
denoised_im = Filters.wiener(ffimhi_new,wiener_size,None)
#Median Filter
#Add running window median filter (10x10 to 30x30) and rectify the signal using max(0,val)
median_im = Filters.median_filt(denoised_im,med_size)
# median_im = ((denoised_im-median_im)>0)*median_im
#adaptive thresholding
#lowering the 3rd variable "multiplier_d" tightens border of predictions.
threshed = convolve(Filters.adaptive_threshold(denoised_im,200,256,False),Filters._d3gaussian(5,multiplier_a,multiplier_d))
#various convolution filters to pull out relative directions
diagnols = np.array([[1,0,0,0,1],
[0,1,0,1,0],
[0,0,1,0,0],
[0,1,0,1,0],
[1,0,0,0,1]])
t_cross = np.array([[0,0,1,0,0],
[0,0,1,0,0],
[1,1,1,1,1],
[0,0,1,0,0],
[0,0,1,0,0]])
di_im = convolve(denoised_im,diagnols)
t_im = convolve(denoised_im,t_cross)
#Gaussian Image
gauim = convolve(denoised_im,Filters._d3gaussian(5,1,1))
#Differential image
direction_features = (Filters.diffmat(denoised_im,np.arange(0,2*np.pi,2*np.pi/8),dim=(5,2)))
return median_im, [threshed,di_im,t_im,gauim,direction_features]
def im_watershed(image,train = True, boolim = np.array([]),a=3,d=2):
"""
image : np.array(float32)
DESCRIPTION :
train : boolean
DESCRIPTION : if train == True set boolim = np.array()
segments image using a watersheding method with distance_transform_edt as the
descriminator. Returns list of segments
"""
im_list = []
bool_list = []
gau_im = convolve(Filters.normalize_img(image),Filters._d3gaussian(16,a,d))
mn = np.mean(gau_im)
segments = gau_im > mn
D = distance_transform_edt(segments)
localMax = peak_local_max(D, indices=False, min_distance=20,
labels=segments)
markers = label(localMax,structure=np.ones((3,3)))[0]
water_im = watershed(-D,markers,mask=segments)
f = find_objects(water_im)
for seg in f:
im_list.append(image[seg])
if train:
bool_list.append(boolim[seg])
return im_list,bool_list,f
def pad_segs(im_list,bool_list,f,train = True,fill_val = 0):
"""
im_list
train : boolean
DESCRIPTION : if train == True set bool_list = np.array()
f
fill_val = 0 : TYPE, integer or function (e.g. np.nan)
DESCRIPTION.
"""
yval = []
xval = []
count = 0
for seg in f:
yval.append(abs(seg[0].stop-seg[0].start))
xval.append(abs(seg[1].stop-seg[1].start))
maxy = np.max(yval)
maxx = np.max(xval)
for seg in f:
dify = maxy - abs(seg[0].stop-seg[0].start)
difx = maxx - abs(seg[1].stop-seg[1].start)
if dify != 0 or difx != 0:
im_list[count] = np.pad(im_list[count],((0,dify),(0,difx)),'constant',constant_values=fill_val)
if train:
bool_list[count] = np.pad(bool_list[count],((0,dify),(0,difx)),'constant',constant_values=fill_val)
count += 1
return im_list, bool_list, f
def feature_extract(image, ff_width, wiener_size, med_size,train = True,boolim = np.array([])):
hog_features = []
median_im,feature_list = filter_pipeline(image,ff_width,wiener_size,med_size)
#segment image using watershed and pad images for resizing
im_list, bool_list, f = im_watershed(median_im,train,boolim)
paded_im_seg,paded_bool_seg,_ = pad_segs(im_list,bool_list,f,train)
#generate hog features
for seg in paded_im_seg:
normalized = Filters.normalize_img(seg)
hog_features.append(hog(normalized, visualize = True, block_norm='L2-Hys', pixels_per_cell=(4,4)))
'end for'
return im_list, bool_list, f, paded_im_seg, paded_bool_seg, hog_features
def get_hogs(hog_features):
hog = []
for i,val in enumerate(hog_features):
hog.append(val[0])
'end for'
return hog
def create_data(X,train=True,y = []):
y_train = []
X_in = []
for i in range(0,len(X)):
try:
X_in.append(X[i].ravel())
except AttributeError:
X_in = get_hogs(X)
break
X_train = np.vstack(X_in)
if train:
for i in y:
y_train.append(True in i)
y_train = np.array(y_train).astype(int)
return X_train, y_train
return X_train
def gen_mask(image):
mask = image > 0
return np.ma.masked_where(~mask, mask)
def overlay_predictions(image,boolim,preds,y_test,ind_test,f,**kwargs):
"""
Parameters
----------
image : np.array(float64)
image being anlazyed
boolim : np.array(bool)
label data that was used to train algorithm
preds : np.
DESCRIPTION.
y_test : TYPE
DESCRIPTION.
ind_test : TYPE
DESCRIPTION.
f : TYPE
DESCRIPTION.
**kwargs : TYPE
DESCRIPTION.
Returns
-------
int
DESCRIPTION.
"""
nH= image.shape[0]
nW= image.shape[1]
pred_im = np.zeros((nH,nW)).astype(np.float32)
# true_im = np.zeros((nH,nW)).astype(np.float32)
plt.figure("Overlayed Predictions for Test Domain",figsize = (nH/100,nW/100))
plt.imshow(image, **kwargs)
legend_ele = [Rectangle((0, 0), 1, 1, fc="w", fill=False, edgecolor='none', linewidth=0,label = "label: (actual,predict)"),
Patch(facecolor = "red",label = "segmented"),
Patch(facecolor = "orange",label = "training data")]
# plt.set_size_inches(nH/100,nW/100)
for ind in range(0,len(ind_test)):
i = ind_test[ind]
y1 = f[i][0].start
y2 = f[i][0].stop
x1 = f[i][1].start
x2 = f[i][1].stop
pred_im[y1:y2,x1:x2] = np.ones((y2-y1,x2-x1))
s = "({0},{1})".format(y_test[ind],preds[ind])
plt.text(x1, y1-5, s, fontsize = 10, bbox=dict(fill=False, edgecolor='none', linewidth=2))
plt.legend(handles = legend_ele, loc = 'lower right')
plt.imshow(gen_mask(pred_im), alpha=0.3, cmap=ListedColormap(['red']))
plt.imshow(gen_mask(boolim), alpha=0.5, cmap=ListedColormap(['orange']))
plt.savefig(os.path.join(save_bin,'overlayed_predictions.tif'),dpi=200,bbox_inches='tight')
return 0
def write_auc(fpr,tpr):
with open(os.path.join(dirname,'save-bin\\svm_auc_roc.csv'),'w',newline='') as csvfile:
spamwriter = csv.writer(csvfile,delimiter=' ',
quotechar='|',quoting=csv.QUOTE_MINIMAL)
for i in range(len(fpr)):
spamwriter.writerow([fpr[i],tpr[i]])
return 0
def read_auc():
fpr = []
tpr = []
with open(os.path.join(dirname,'save-bin\\svm_auc_roc.csv'),'r',newline='') as csvfile:
spamreader = csv.reader(csvfile,delimiter=' ',
quotechar='|')
for row in spamreader:
fpr.append(float(row[0]))
tpr.append(float(row[1]))
fpr = np.array(fpr)
tpr = np.array(tpr)
roc_auc = auc(fpr, tpr)
return fpr,tpr,roc_auc
import random
def random_ind(a,b,N):
ints = []
for i in range(0,N):
ints.append(random.randint(0,64))
return ints
### Testing ###
"""
if __name__ == '__main__':
### PARAMS ###
channel = 2
ff_width = 121
wiener_size = (5,5)
med_size = 5
###
dirname = os.path.dirname(__file__)
foldername = os.path.join(dirname,"images-5HT")
dirn = dirname
foldern = os.path.join(dirn,foldername)
im_dir = DataManager.DataMang(foldern)
im_list = [i for i in range(0,im_dir.dir_len)]
hog_features = []
for gen in im_dir.open_dir(im_list):
#load image and its information
image,nW,nH,chan,name = gen
#only want the red channel (fyi: cv2 is BGR (0,1,2 respectively) while most image processing considers
#the notation RGB (0,1,2 respectively))
image = image[:,:,channel]
#Import train data (if training your model)
train_bool = ML_interface_SVM_V3.import_train_data(name,(nW,nH),'train_71420')
#extract features from image using method(SVM.filter_pipeline) then watershed data useing thresholding algorithm (work to be done here...) to segment image.
#Additionally, extract filtered image data and hog_Features from segmented image. (will also segment train image if training model)
im_segs, bool_segs, domains, paded_im_seg, paded_bool_seg, hog_features = feature_extract(image, ff_width, wiener_size, med_size,True,train_bool)
#im_segs, _, domains, paded_im_seg, _, hog_features = feature_extract(image, ff_width, wiener_size, med_size,False)
#choose which data you want to merge together to train SVM. Been using my own filter, but could also use hog_features.
X_train,y_train = create_data(im_segs,bool_segs,True)
#X_train,y_train = create_data(im_segs,True)
"""
|
{"hexsha": "edb814adb76721168a69f55d93523cff2d35e530", "size": 17654, "ext": "py", "lang": "Python", "max_stars_repo_path": "SVM.py", "max_stars_repo_name": "eduluca/Generalized-Sklearn-ML-Pipeline", "max_stars_repo_head_hexsha": "75a3be16ca229ffe7712266cb9c1c50469ccd25d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "SVM.py", "max_issues_repo_name": "eduluca/Generalized-Sklearn-ML-Pipeline", "max_issues_repo_head_hexsha": "75a3be16ca229ffe7712266cb9c1c50469ccd25d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "SVM.py", "max_forks_repo_name": "eduluca/Generalized-Sklearn-ML-Pipeline", "max_forks_repo_head_hexsha": "75a3be16ca229ffe7712266cb9c1c50469ccd25d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.6321626617, "max_line_length": 165, "alphanum_fraction": 0.5905177297, "include": true, "reason": "import numpy,from scipy", "num_tokens": 4626}
|
// version 02: first impl to sort large files.
// sort and merge
// 30% faster than sort(1) for all 1GB 5GB 10GB files
#include <boost/noncopyable.hpp>
#include <boost/ptr_container/ptr_vector.hpp>
#include <datetime/Timestamp.h>
#include <algorithm>
#include <string>
#include <ext/vstring.h>
#include <vector>
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <sys/resource.h>
// typedef std::string string;
typedef __gnu_cxx::__sso_string string;
using muduo::Timestamp;
#define TMP_DIR "/tmp/"
class InputFile : boost::noncopyable
{
public:
InputFile(const char* filename)
: file_(fopen(filename, "rb"))
{
assert(file_);
setbuffer(file_, buffer_, sizeof buffer_);
}
~InputFile()
{
fclose(file_);
}
bool readLine(string* line)
{
char buf[256];
if (fgets_unlocked(buf, sizeof buf, file_))
{
line->assign(buf);
return true;
}
else
{
return false;
}
}
int read(char* buf, int size)
{
return fread_unlocked(buf, 1, size, file_);
}
private:
FILE* file_;
char buffer_[64*1024];
};
const int kRecordSize = 100;
const int kKeySize = 10;
class OutputFile : boost::noncopyable
{
public:
OutputFile(const char* filename)
: file_(fopen(filename, "wb"))
{
assert(file_);
setbuffer(file_, buffer_, sizeof buffer_);
}
~OutputFile()
{
fclose(file_);
}
void writeLine(const string& line)
{
if (line.empty())
{
fwrite_unlocked("\n", 1, 1, file_);
}
else if (line[line.size() - 1] == '\n')
{
fwrite_unlocked(line.c_str(), 1, line.size(), file_);
}
else
{
fwrite_unlocked(line.c_str(), 1, line.size(), file_);
fwrite_unlocked("\n", 1, 1, file_);
}
}
void writeRecord(char (&record)[kRecordSize])
{
fwrite_unlocked(record, 1, kRecordSize, file_);
}
private:
FILE* file_;
char buffer_[64*1024];
};
const bool kUseReadLine = false;
const int kBatchRecords = 10000000;
void readInput(InputFile& in, std::vector<string>* data)
{
int64_t totalSize = 0;
data->clear();
data->reserve(kBatchRecords);
for (int i = 0; i < kBatchRecords; ++i)
{
char buf[kRecordSize];
if (int n = in.read(buf, sizeof buf))
{
assert (n == kRecordSize);
totalSize += n;
data->push_back(string(buf, n));
}
else
{
break;
}
}
}
struct Key
{
char key[kKeySize];
int index;
Key(const string& record, int idx)
: index(idx)
{
memcpy(key, record.data(), sizeof key);
}
bool operator<(const Key& rhs) const
{
return memcmp(key, rhs.key, sizeof key) < 0;
}
};
void sort(const std::vector<string>& data, std::vector<Key>* keys)
{
Timestamp start = Timestamp::now();
keys->reserve(data.size());
for (size_t i = 0; i < data.size(); ++i)
{
keys->push_back(Key(data[i], i));
}
// printf("make keys %f\n", data.size(), timeDifference(Timestamp::now(), start));
std::sort(keys->begin(), keys->end());
}
int sortSplit(const char* filename)
{
std::vector<string> data;
// read
InputFile in(filename);
int batch = 0;
while(true)
{
Timestamp startThis = Timestamp::now();
readInput(in, &data);
Timestamp readDone = Timestamp::now();
printf("%zd\nread %f\n", data.size(), timeDifference(readDone, startThis));
if (data.empty())
{
break;
}
std::vector<Key> keys;
sort(data, &keys);
Timestamp sortDone = Timestamp::now();
printf("sort %f\n", timeDifference(sortDone, readDone));
// output
{
char output[256];
snprintf(output, sizeof output, TMP_DIR "tmp%d", batch++);
OutputFile out(output);
for (std::vector<Key>::iterator it = keys.begin();
it != keys.end();
++it)
{
out.writeLine(data[it->index]);
}
}
Timestamp writeDone = Timestamp::now();
printf("write %f\n", timeDifference(writeDone, sortDone));
}
return batch;
}
struct Record
{
char data[kRecordSize];
InputFile* input;
Record(InputFile* in)
: input(in)
{
}
bool next()
{
return input->read(data, sizeof data) == kRecordSize;
}
bool operator<(const Record& rhs) const
{
// make_heap to build min-heap, for merging
return memcmp(data, rhs.data, kKeySize) > 0;
}
};
void merge(const int batch)
{
printf("merge %d files\n", batch);
boost::ptr_vector<InputFile> inputs;
std::vector<Record> keys;
for (int i = 0; i < batch; ++i)
{
char filename[128];
snprintf(filename, sizeof filename, TMP_DIR "tmp%d", i);
inputs.push_back(new InputFile(filename));
Record rec(&inputs.back());
if (rec.next())
{
keys.push_back(rec);
}
}
OutputFile out("output");
std::make_heap(keys.begin(), keys.end());
while (!keys.empty())
{
std::pop_heap(keys.begin(), keys.end());
out.writeRecord(keys.back().data);
if (keys.back().next())
{
std::push_heap(keys.begin(), keys.end());
}
else
{
keys.pop_back();
}
}
}
int main(int argc, char* argv[])
{
bool kKeepIntermediateFiles = false;
{
// set max virtual memory to 3GB.
size_t kOneGB = 1024*1024*1024;
rlimit rl = { 3.0*kOneGB, 3.0*kOneGB };
setrlimit(RLIMIT_AS, &rl);
}
Timestamp start = Timestamp::now();
// sort
int batch = sortSplit(argv[1]);
Timestamp sortDone = Timestamp::now();
printf("sortSplit %f\n", timeDifference(sortDone, start));
if (batch == 1)
{
unlink("output");
rename(TMP_DIR "tmp0", "output");
}
else
{
// merge
merge(batch);
Timestamp mergeDone = Timestamp::now();
printf("mergeSplit %f\n", timeDifference(mergeDone, sortDone));
}
if (!kKeepIntermediateFiles)
{
for (int i = 0; i < batch; ++i)
{
char tmp[256];
snprintf(tmp, sizeof tmp, TMP_DIR "tmp%d", i);
unlink(tmp);
}
}
printf("total %f\n", timeDifference(Timestamp::now(), start));
}
|
{"hexsha": "48c00ec8332121a433df0ce80873d6e174729f13", "size": 5956, "ext": "cc", "lang": "C++", "max_stars_repo_path": "esort/sort02.cc", "max_stars_repo_name": "ririripley/recipes", "max_stars_repo_head_hexsha": "04267c68a7424326b4aa8dd14b1a879b59ab887c", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1418.0, "max_stars_repo_stars_event_min_datetime": "2015-01-07T09:40:09.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T08:37:02.000Z", "max_issues_repo_path": "esort/sort02.cc", "max_issues_repo_name": "ririripley/recipes", "max_issues_repo_head_hexsha": "04267c68a7424326b4aa8dd14b1a879b59ab887c", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 44.0, "max_issues_repo_issues_event_min_datetime": "2018-12-14T02:35:47.000Z", "max_issues_repo_issues_event_max_datetime": "2021-02-06T09:12:10.000Z", "max_forks_repo_path": "KM/03code/cpp/chenshuo/recipes/esort/sort02.cc", "max_forks_repo_name": "wangcy6/weekly", "max_forks_repo_head_hexsha": "f249bed5cf5a2b14d798ac33086cea0c1efe432e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 854.0, "max_forks_repo_forks_event_min_datetime": "2015-01-03T11:56:10.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T08:50:28.000Z", "avg_line_length": 18.9079365079, "max_line_length": 84, "alphanum_fraction": 0.6015782404, "num_tokens": 1678}
|
theory Ex2_1
imports Main
begin
datatype 'a tree = Leaf 'a | Branch 'a "'a tree" "'a tree"
primrec preOrder :: "'a tree \<Rightarrow> 'a list" where
"preOrder (Leaf val) = [val]"|
"preOrder (Branch val lft rgt) = val # preOrder lft @ preOrder rgt"
primrec postOrder :: "'a tree \<Rightarrow> 'a list" where
"postOrder (Leaf val) = [val]"|
"postOrder (Branch val lft rgt) = postOrder lft @ postOrder rgt @ [val]"
primrec inOrder :: "'a tree \<Rightarrow> 'a list" where
"inOrder (Leaf val) = [val]"|
"inOrder (Branch val lft rgt) = inOrder lft @ val # inOrder rgt"
primrec mirror :: "'a tree \<Rightarrow> 'a tree" where
"mirror (Leaf val) = (Leaf val)"|
"mirror (Branch val lft rgt) = Branch val (mirror rgt) (mirror lft) "
(*
lemma "inOrder (mirror t) = rev (inOrder t)"
proof (induct t)
case (Leaf x)
then show ?case by simp
next
case (Branch x1a t1 t2)
assume "inOrder (mirror t1) = rev (inOrder t1)"
assume "inOrder (mirror t2) = rev (inOrder t2)"
qed
*)
primrec root :: "'a tree \<Rightarrow> 'a" where
"root (Leaf val) = val"|
"root (Branch val _ _) = val"
primrec leftmost :: "'a tree \<Rightarrow> 'a" where
"leftmost (Leaf val) = val"|
"leftmost (Branch _ lft _) = leftmost lft"
primrec rightmost :: "'a tree \<Rightarrow> 'a" where
"rightmost (Leaf val) = val"|
"rightmost (Branch _ _ rgt) = rightmost rgt"
|
{"author": "SvenWille", "repo": "ExerciseSolutions", "sha": "1a71e30f3369d34c4691a4d010257b8c8afc566c", "save_path": "github-repos/isabelle/SvenWille-ExerciseSolutions", "path": "github-repos/isabelle/SvenWille-ExerciseSolutions/ExerciseSolutions-1a71e30f3369d34c4691a4d010257b8c8afc566c/src/isabelle/Trees and other inductive data types/Ex2_1.thy"}
|
from sklearn.cluster import KMeans
import numpy as np
from classes import *
from treelib import *
from math import *
def hi_kmeans(_first_node, _des_database_list, _b, _depth, _n_documents):
descriptors = [] # putting in a list the descriptor 128 vectors
for i in range(len(_des_database_list)):
descriptors.append(_des_database_list[i].vector)
descriptors_new = np.array(descriptors) # transform list to np array for kmeans
if len(descriptors) > _b: # do the clustering and then the recursive one only if you have at least one more vector than n of branches
kmeans = KMeans(n_clusters=_b, random_state=0).fit(descriptors_new) # kmeans clustering algorithm
kmeans_labels = kmeans.labels_ # obtaining the labels of the clusters
clusters = [[] for i in range(_b)]
centroids = kmeans.cluster_centers_ # computing centroid for each cluster
# populating the clusters labeled with the descriptors and corresponding id
for a in range(len(kmeans_labels)): # total numbers of descriptors
tmp_list = []
if len(clusters[kmeans_labels[a]]) > 0: # if you have already elements in that cluster
tmp_list.extend(clusters[kmeans_labels[a]]) # extend because want to append each elements instead of appending the list
tmp_list.append(keypoint_with_id(_des_database_list[a].vector, _des_database_list[a].id))
clusters[kmeans_labels[a]] = tmp_list
# compute td-idf weights table for each node
tfidf_scores = [[] for i in range(_b)]
tmp_list_id = [[] for i in range(_b)]
tf = [[] for i in range(_b)]
for i in range(_b):
for j in range(len(clusters[i])):
tmp_list_id[i].append(clusters[i][j].id)
for k in range(_n_documents):
try:
tf[i].append(tmp_list_id[i].count(k) / (len(clusters[i])))
except ZeroDivisionError as err:
tf[i].append(0)
try:
idf = log2(_n_documents / np.count_nonzero(tf[i])) # compute id
except ZeroDivisionError as err:
idf = 0
tfidf_scores[i] = (np.array(tf[i]) * idf).tolist()
# build tree with recursive method
if _depth > 0: # only if there is still depth
_depth -= 1
for m in range(_b):
_child = Tree(clusters[m], centroids[m], tfidf_scores[m]) # child
_first_node.addChild(_child) # adding the child to the parent
hi_kmeans(_child, clusters[m], _b, _depth,_n_documents) # kmeans clustering on each child
|
{"hexsha": "a4fd3fb7cd26ea108ded992ea91fd6d22ae01c2c", "size": 2697, "ext": "py", "lang": "Python", "max_stars_repo_path": "hi_k_means.py", "max_stars_repo_name": "favia96/Visual-Search-System", "max_stars_repo_head_hexsha": "06b3188062aabb4602ca4f2546897a19fc987a4a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "hi_k_means.py", "max_issues_repo_name": "favia96/Visual-Search-System", "max_issues_repo_head_hexsha": "06b3188062aabb4602ca4f2546897a19fc987a4a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "hi_k_means.py", "max_forks_repo_name": "favia96/Visual-Search-System", "max_forks_repo_head_hexsha": "06b3188062aabb4602ca4f2546897a19fc987a4a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.2131147541, "max_line_length": 138, "alphanum_fraction": 0.624768261, "include": true, "reason": "import numpy", "num_tokens": 628}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.