text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
import geompy
if geompy.USE_PURE_SYMPY:
from symengine import Expr, Eq, sympify, nan
else:
from sympy import Expr, Eq, sympify, nan
from sympy.simplify import sqrtdenest
from sympy import simplify
from functools import lru_cache
from typing import Union
Expression = Union[Expr, str, int, float] # Anything that is sympify-able
@lru_cache(maxsize=None)
def is_nan(element: Expression):
element = sympify(element)
return isinstance(element, type(nan))
@lru_cache(maxsize=None)
def symengine_equality(a: Expr, b: Expr):
return Eq(a, b).simplify()
@lru_cache(maxsize=None)
def optimized_simplify(expr: Expr) -> Expr:
# return sqrtdenest(expr)
# return expr.expand()
# return simplify(sqrtdenest(expr))
# return sqrtdenest(expr).expand()
return sympify(sqrtdenest(expr)).simplify()
# return expr.expand()
@lru_cache(maxsize=None)
def full_simplify(expr: Expr) -> Expr:
return simplify(optimized_simplify(expr))
|
{"hexsha": "165b67405a906d1fa7552eaf6bd13685c3a1cb37", "size": 968, "ext": "py", "lang": "Python", "max_stars_repo_path": "geompy/cas/symengine_utils.py", "max_stars_repo_name": "qthequartermasterman/geometry", "max_stars_repo_head_hexsha": "eb1619002aa36585338e59d04aa9c9d2e69a7a02", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "geompy/cas/symengine_utils.py", "max_issues_repo_name": "qthequartermasterman/geometry", "max_issues_repo_head_hexsha": "eb1619002aa36585338e59d04aa9c9d2e69a7a02", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2021-05-07T15:45:14.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-09T19:22:10.000Z", "max_forks_repo_path": "geompy/cas/symengine_utils.py", "max_forks_repo_name": "qthequartermasterman/geometry", "max_forks_repo_head_hexsha": "eb1619002aa36585338e59d04aa9c9d2e69a7a02", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.6097560976, "max_line_length": 74, "alphanum_fraction": 0.7314049587, "include": true, "reason": "from sympy", "num_tokens": 262}
|
# Fix ambiguities on julia 0.4
*(a::ResElem{fmpz}, b::fmpz) = parent(a)(data(a) * b)
*(a::fmpz, b::ResElem{fmpz}) = b*a
+(a::ResElem{fmpz}, b::fmpz) = parent(a)(data(a) + b)
+(a::fmpz, b::ResElem{fmpz}) = b + a
-(a::ResElem{fmpz}, b::fmpz) = parent(a)(data(a) - b)
-(a::fmpz, b::ResElem{fmpz}) = parent(b)(a - data(b))
function ==(a::ResElem{fmpz}, b::fmpz)
z = base_ring(a)(b)
return data(a) == mod(z, modulus(a))
end
==(a::fmpz, b::ResElem{fmpz}) = b == a
#
*(::fmpz, ::PolyElem{fmpz}) = nothing
*(::PolyElem{fmpz}, ::fmpz) = nothing
+(::fmpz, ::PolyElem{fmpz}) = nothing
+(::PolyElem{fmpz}, ::fmpz) = nothing
-(::fmpz, ::PolyElem{fmpz}) = nothing
-(::PolyElem{fmpz}, ::fmpz) = nothing
==(::fmpz, ::PolyElem{fmpz}) = nothing
==(::PolyElem{fmpz}, ::fmpz) = nothing
divexact(::PolyElem{fmpz}, ::fmpz) = nothing
evaluate(::PolyElem{fmpz}, ::fmpz) = nothing
#
*(::fmpz, ::SeriesElem{fmpz}) = nothing
*(::SeriesElem{fmpz}, ::fmpz) = nothing
+(::fmpz, ::SeriesElem{fmpz}) = nothing
+(::SeriesElem{fmpz}, ::fmpz) = nothing
-(::fmpz, ::SeriesElem{fmpz}) = nothing
-(::SeriesElem{fmpz}, ::fmpz) = nothing
==(::fmpz, ::SeriesElem{fmpz}) = nothing
==(::SeriesElem{fmpz}, ::fmpz) = nothing
*(::fmpz, ::RelSeriesElem{fmpz}) = nothing
*(::RelSeriesElem{fmpz}, ::fmpz) = nothing
+(::fmpz, ::RelSeriesElem{fmpz}) = nothing
+(::RelSeriesElem{fmpz}, ::fmpz) = nothing
-(::fmpz, ::RelSeriesElem{fmpz}) = nothing
-(::RelSeriesElem{fmpz}, ::fmpz) = nothing
==(::fmpz, ::RelSeriesElem{fmpz}) = nothing
==(::RelSeriesElem{fmpz}, ::fmpz) = nothing
*(::fmpz, ::AbsSeriesElem{fmpz}) = nothing
*(::AbsSeriesElem{fmpz}, ::fmpz) = nothing
+(::fmpz, ::AbsSeriesElem{fmpz}) = nothing
+(::AbsSeriesElem{fmpz}, ::fmpz) = nothing
-(::fmpz, ::AbsSeriesElem{fmpz}) = nothing
-(::AbsSeriesElem{fmpz}, ::fmpz) = nothing
==(::fmpz, ::AbsSeriesElem{fmpz}) = nothing
==(::AbsSeriesElem{fmpz}, ::fmpz) = nothing
*(::fmpz, ::MatElem{fmpz}) = nothing
*(::MatElem{fmpz}, ::fmpz) = nothing
+(::fmpz, ::MatElem{fmpz}) = nothing
+(::MatElem{fmpz}, ::fmpz) = nothing
-(::fmpz, ::MatElem{fmpz}) = nothing
-(::MatElem{fmpz}, ::fmpz) = nothing
==(::MatElem{fmpz}, ::fmpz) = nothing
divexact(::MatElem{fmpz}, ::fmpz) = nothing
#
setindex_t!(a::nmod_mat, b::GenRes{fmpz}, i::Int, j::Int) = setindex_!(a, data(b), i, j)
*(::FracElem{fmpz}, ::fmpz) = nothing
*(::fmpz, ::FracElem{fmpz}) = nothing
+(::FracElem{fmpz}, ::fmpz) = nothing
+(::fmpz, ::FracElem{fmpz}) = nothing
-(::FracElem{fmpz}, ::fmpz) = nothing
-(::fmpz, ::FracElem{fmpz}) = nothing
==(::FracElem{fmpz}, ::fmpz) = nothing
==(::fmpz, ::FracElem{fmpz}) = nothing
divexact(::FracElem{fmpz}, ::fmpz) = nothing
divexact(::fmpz, ::FracElem{fmpz}) = nothing
|
{"hexsha": "16f7186e49301e2a38087e47254f71da660b6bbc", "size": 2734, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "sources/src/ambiguities.jl", "max_stars_repo_name": "JeffreySarnoff/Nemo_v07_src.jl", "max_stars_repo_head_hexsha": "a8440b014212d6619dd5dd4ebfde74669f3c42e7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-02-10T02:13:07.000Z", "max_stars_repo_stars_event_max_datetime": "2019-02-10T02:13:07.000Z", "max_issues_repo_path": "sources/src/ambiguities.jl", "max_issues_repo_name": "JeffreySarnoff/Nemo_v07_src.jl", "max_issues_repo_head_hexsha": "a8440b014212d6619dd5dd4ebfde74669f3c42e7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sources/src/ambiguities.jl", "max_forks_repo_name": "JeffreySarnoff/Nemo_v07_src.jl", "max_forks_repo_head_hexsha": "a8440b014212d6619dd5dd4ebfde74669f3c42e7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 20.4029850746, "max_line_length": 88, "alphanum_fraction": 0.6005852231, "num_tokens": 1054}
|
[GOAL]
p : ℕ
inst✝⁴ : Fact (Nat.Prime p)
k : Type u_1
inst✝³ : CommRing k
inst✝² : IsDomain k
inst✝¹ : CharP k p
inst✝ : PerfectRing k p
m : ℤ
x : StandardOneDimIsocrystal p k m
⊢ ↑Φ(p, k) x = ↑p ^ m • ↑φ(p, k) x
[PROOFSTEP]
erw [smul_eq_mul]
[GOAL]
p : ℕ
inst✝⁴ : Fact (Nat.Prime p)
k : Type u_1
inst✝³ : CommRing k
inst✝² : IsDomain k
inst✝¹ : CharP k p
inst✝ : PerfectRing k p
m : ℤ
x : StandardOneDimIsocrystal p k m
⊢ ↑Φ(p, k) x =
↑(IsFractionRing.lift (_ : Function.Injective ↑(algebraMap (WittVector p k) ((fun x => K(p, k)) x)))) (↑p ^ m) *
↑φ(p, k) x
[PROOFSTEP]
simp only [map_zpow₀, map_natCast]
[GOAL]
p : ℕ
inst✝⁴ : Fact (Nat.Prime p)
k : Type u_1
inst✝³ : CommRing k
inst✝² : IsDomain k
inst✝¹ : CharP k p
inst✝ : PerfectRing k p
m : ℤ
x : StandardOneDimIsocrystal p k m
⊢ ↑Φ(p, k) x = ↑p ^ m * ↑φ(p, k) x
[PROOFSTEP]
rfl
[GOAL]
p : ℕ
inst✝⁶ : Fact (Nat.Prime p)
k✝ : Type u_1
inst✝⁵ : CommRing k✝
k : Type u_2
inst✝⁴ : Field k
inst✝³ : IsAlgClosed k
inst✝² : CharP k p
V : Type u_3
inst✝¹ : AddCommGroup V
inst✝ : Isocrystal p k V
h_dim : finrank K(p, k) V = 1
⊢ ∃ m, Nonempty (StandardOneDimIsocrystal p k m ≃ᶠⁱ[p, k] V)
[PROOFSTEP]
haveI : Nontrivial V := FiniteDimensional.nontrivial_of_finrank_eq_succ h_dim
[GOAL]
p : ℕ
inst✝⁶ : Fact (Nat.Prime p)
k✝ : Type u_1
inst✝⁵ : CommRing k✝
k : Type u_2
inst✝⁴ : Field k
inst✝³ : IsAlgClosed k
inst✝² : CharP k p
V : Type u_3
inst✝¹ : AddCommGroup V
inst✝ : Isocrystal p k V
h_dim : finrank K(p, k) V = 1
this : Nontrivial V
⊢ ∃ m, Nonempty (StandardOneDimIsocrystal p k m ≃ᶠⁱ[p, k] V)
[PROOFSTEP]
obtain ⟨x, hx⟩ : ∃ x : V, x ≠ 0 := exists_ne 0
[GOAL]
case intro
p : ℕ
inst✝⁶ : Fact (Nat.Prime p)
k✝ : Type u_1
inst✝⁵ : CommRing k✝
k : Type u_2
inst✝⁴ : Field k
inst✝³ : IsAlgClosed k
inst✝² : CharP k p
V : Type u_3
inst✝¹ : AddCommGroup V
inst✝ : Isocrystal p k V
h_dim : finrank K(p, k) V = 1
this : Nontrivial V
x : V
hx : x ≠ 0
⊢ ∃ m, Nonempty (StandardOneDimIsocrystal p k m ≃ᶠⁱ[p, k] V)
[PROOFSTEP]
have : Φ(p, k) x ≠ 0 := by simpa only [map_zero] using Φ(p, k).injective.ne hx
[GOAL]
p : ℕ
inst✝⁶ : Fact (Nat.Prime p)
k✝ : Type u_1
inst✝⁵ : CommRing k✝
k : Type u_2
inst✝⁴ : Field k
inst✝³ : IsAlgClosed k
inst✝² : CharP k p
V : Type u_3
inst✝¹ : AddCommGroup V
inst✝ : Isocrystal p k V
h_dim : finrank K(p, k) V = 1
this : Nontrivial V
x : V
hx : x ≠ 0
⊢ ↑Φ(p, k) x ≠ 0
[PROOFSTEP]
simpa only [map_zero] using Φ(p, k).injective.ne hx
[GOAL]
case intro
p : ℕ
inst✝⁶ : Fact (Nat.Prime p)
k✝ : Type u_1
inst✝⁵ : CommRing k✝
k : Type u_2
inst✝⁴ : Field k
inst✝³ : IsAlgClosed k
inst✝² : CharP k p
V : Type u_3
inst✝¹ : AddCommGroup V
inst✝ : Isocrystal p k V
h_dim : finrank K(p, k) V = 1
this✝ : Nontrivial V
x : V
hx : x ≠ 0
this : ↑Φ(p, k) x ≠ 0
⊢ ∃ m, Nonempty (StandardOneDimIsocrystal p k m ≃ᶠⁱ[p, k] V)
[PROOFSTEP]
obtain ⟨a, ha, hax⟩ : ∃ a : K(p, k), a ≠ 0 ∧ Φ(p, k) x = a • x :=
by
rw [finrank_eq_one_iff_of_nonzero' x hx] at h_dim
obtain ⟨a, ha⟩ := h_dim (Φ(p, k) x)
refine' ⟨a, _, ha.symm⟩
intro ha'
apply this
simp only [← ha, ha', zero_smul]
[GOAL]
p : ℕ
inst✝⁶ : Fact (Nat.Prime p)
k✝ : Type u_1
inst✝⁵ : CommRing k✝
k : Type u_2
inst✝⁴ : Field k
inst✝³ : IsAlgClosed k
inst✝² : CharP k p
V : Type u_3
inst✝¹ : AddCommGroup V
inst✝ : Isocrystal p k V
h_dim : finrank K(p, k) V = 1
this✝ : Nontrivial V
x : V
hx : x ≠ 0
this : ↑Φ(p, k) x ≠ 0
⊢ ∃ a, a ≠ 0 ∧ ↑Φ(p, k) x = a • x
[PROOFSTEP]
rw [finrank_eq_one_iff_of_nonzero' x hx] at h_dim
[GOAL]
p : ℕ
inst✝⁶ : Fact (Nat.Prime p)
k✝ : Type u_1
inst✝⁵ : CommRing k✝
k : Type u_2
inst✝⁴ : Field k
inst✝³ : IsAlgClosed k
inst✝² : CharP k p
V : Type u_3
inst✝¹ : AddCommGroup V
inst✝ : Isocrystal p k V
this✝ : Nontrivial V
x : V
h_dim : ∀ (w : V), ∃ c, c • x = w
hx : x ≠ 0
this : ↑Φ(p, k) x ≠ 0
⊢ ∃ a, a ≠ 0 ∧ ↑Φ(p, k) x = a • x
[PROOFSTEP]
obtain ⟨a, ha⟩ := h_dim (Φ(p, k) x)
[GOAL]
case intro
p : ℕ
inst✝⁶ : Fact (Nat.Prime p)
k✝ : Type u_1
inst✝⁵ : CommRing k✝
k : Type u_2
inst✝⁴ : Field k
inst✝³ : IsAlgClosed k
inst✝² : CharP k p
V : Type u_3
inst✝¹ : AddCommGroup V
inst✝ : Isocrystal p k V
this✝ : Nontrivial V
x : V
h_dim : ∀ (w : V), ∃ c, c • x = w
hx : x ≠ 0
this : ↑Φ(p, k) x ≠ 0
a : K(p, k)
ha : a • x = ↑Φ(p, k) x
⊢ ∃ a, a ≠ 0 ∧ ↑Φ(p, k) x = a • x
[PROOFSTEP]
refine' ⟨a, _, ha.symm⟩
[GOAL]
case intro
p : ℕ
inst✝⁶ : Fact (Nat.Prime p)
k✝ : Type u_1
inst✝⁵ : CommRing k✝
k : Type u_2
inst✝⁴ : Field k
inst✝³ : IsAlgClosed k
inst✝² : CharP k p
V : Type u_3
inst✝¹ : AddCommGroup V
inst✝ : Isocrystal p k V
this✝ : Nontrivial V
x : V
h_dim : ∀ (w : V), ∃ c, c • x = w
hx : x ≠ 0
this : ↑Φ(p, k) x ≠ 0
a : K(p, k)
ha : a • x = ↑Φ(p, k) x
⊢ a ≠ 0
[PROOFSTEP]
intro ha'
[GOAL]
case intro
p : ℕ
inst✝⁶ : Fact (Nat.Prime p)
k✝ : Type u_1
inst✝⁵ : CommRing k✝
k : Type u_2
inst✝⁴ : Field k
inst✝³ : IsAlgClosed k
inst✝² : CharP k p
V : Type u_3
inst✝¹ : AddCommGroup V
inst✝ : Isocrystal p k V
this✝ : Nontrivial V
x : V
h_dim : ∀ (w : V), ∃ c, c • x = w
hx : x ≠ 0
this : ↑Φ(p, k) x ≠ 0
a : K(p, k)
ha : a • x = ↑Φ(p, k) x
ha' : a = 0
⊢ False
[PROOFSTEP]
apply this
[GOAL]
case intro
p : ℕ
inst✝⁶ : Fact (Nat.Prime p)
k✝ : Type u_1
inst✝⁵ : CommRing k✝
k : Type u_2
inst✝⁴ : Field k
inst✝³ : IsAlgClosed k
inst✝² : CharP k p
V : Type u_3
inst✝¹ : AddCommGroup V
inst✝ : Isocrystal p k V
this✝ : Nontrivial V
x : V
h_dim : ∀ (w : V), ∃ c, c • x = w
hx : x ≠ 0
this : ↑Φ(p, k) x ≠ 0
a : K(p, k)
ha : a • x = ↑Φ(p, k) x
ha' : a = 0
⊢ ↑Φ(p, k) x = 0
[PROOFSTEP]
simp only [← ha, ha', zero_smul]
[GOAL]
case intro.intro.intro
p : ℕ
inst✝⁶ : Fact (Nat.Prime p)
k✝ : Type u_1
inst✝⁵ : CommRing k✝
k : Type u_2
inst✝⁴ : Field k
inst✝³ : IsAlgClosed k
inst✝² : CharP k p
V : Type u_3
inst✝¹ : AddCommGroup V
inst✝ : Isocrystal p k V
h_dim : finrank K(p, k) V = 1
this✝ : Nontrivial V
x : V
hx : x ≠ 0
this : ↑Φ(p, k) x ≠ 0
a : K(p, k)
ha : a ≠ 0
hax : ↑Φ(p, k) x = a • x
⊢ ∃ m, Nonempty (StandardOneDimIsocrystal p k m ≃ᶠⁱ[p, k] V)
[PROOFSTEP]
obtain ⟨b, hb, m, hmb⟩ := WittVector.exists_frobenius_solution_fractionRing p ha
[GOAL]
case intro.intro.intro.intro.intro.intro
p : ℕ
inst✝⁶ : Fact (Nat.Prime p)
k✝ : Type u_1
inst✝⁵ : CommRing k✝
k : Type u_2
inst✝⁴ : Field k
inst✝³ : IsAlgClosed k
inst✝² : CharP k p
V : Type u_3
inst✝¹ : AddCommGroup V
inst✝ : Isocrystal p k V
h_dim : finrank K(p, k) V = 1
this✝ : Nontrivial V
x : V
hx : x ≠ 0
this : ↑Φ(p, k) x ≠ 0
a : K(p, k)
ha : a ≠ 0
hax : ↑Φ(p, k) x = a • x
b : K(p, k)
hb : b ≠ 0
m : ℤ
hmb : ↑(IsFractionRing.fieldEquivOfRingEquiv (frobeniusEquiv p k)) b * a = ↑p ^ m * b
⊢ ∃ m, Nonempty (StandardOneDimIsocrystal p k m ≃ᶠⁱ[p, k] V)
[PROOFSTEP]
replace hmb : φ(p, k) b * a = (p : K(p, k)) ^ m * b := by convert hmb
[GOAL]
p : ℕ
inst✝⁶ : Fact (Nat.Prime p)
k✝ : Type u_1
inst✝⁵ : CommRing k✝
k : Type u_2
inst✝⁴ : Field k
inst✝³ : IsAlgClosed k
inst✝² : CharP k p
V : Type u_3
inst✝¹ : AddCommGroup V
inst✝ : Isocrystal p k V
h_dim : finrank K(p, k) V = 1
this✝ : Nontrivial V
x : V
hx : x ≠ 0
this : ↑Φ(p, k) x ≠ 0
a : K(p, k)
ha : a ≠ 0
hax : ↑Φ(p, k) x = a • x
b : K(p, k)
hb : b ≠ 0
m : ℤ
hmb : ↑(IsFractionRing.fieldEquivOfRingEquiv (frobeniusEquiv p k)) b * a = ↑p ^ m * b
⊢ ↑φ(p, k) b * a = ↑p ^ m * b
[PROOFSTEP]
convert hmb
[GOAL]
case intro.intro.intro.intro.intro.intro
p : ℕ
inst✝⁶ : Fact (Nat.Prime p)
k✝ : Type u_1
inst✝⁵ : CommRing k✝
k : Type u_2
inst✝⁴ : Field k
inst✝³ : IsAlgClosed k
inst✝² : CharP k p
V : Type u_3
inst✝¹ : AddCommGroup V
inst✝ : Isocrystal p k V
h_dim : finrank K(p, k) V = 1
this✝ : Nontrivial V
x : V
hx : x ≠ 0
this : ↑Φ(p, k) x ≠ 0
a : K(p, k)
ha : a ≠ 0
hax : ↑Φ(p, k) x = a • x
b : K(p, k)
hb : b ≠ 0
m : ℤ
hmb : ↑φ(p, k) b * a = ↑p ^ m * b
⊢ ∃ m, Nonempty (StandardOneDimIsocrystal p k m ≃ᶠⁱ[p, k] V)
[PROOFSTEP]
use m
[GOAL]
case h
p : ℕ
inst✝⁶ : Fact (Nat.Prime p)
k✝ : Type u_1
inst✝⁵ : CommRing k✝
k : Type u_2
inst✝⁴ : Field k
inst✝³ : IsAlgClosed k
inst✝² : CharP k p
V : Type u_3
inst✝¹ : AddCommGroup V
inst✝ : Isocrystal p k V
h_dim : finrank K(p, k) V = 1
this✝ : Nontrivial V
x : V
hx : x ≠ 0
this : ↑Φ(p, k) x ≠ 0
a : K(p, k)
ha : a ≠ 0
hax : ↑Φ(p, k) x = a • x
b : K(p, k)
hb : b ≠ 0
m : ℤ
hmb : ↑φ(p, k) b * a = ↑p ^ m * b
⊢ Nonempty (StandardOneDimIsocrystal p k m ≃ᶠⁱ[p, k] V)
[PROOFSTEP]
let F₀ : StandardOneDimIsocrystal p k m →ₗ[K(p, k)] V := LinearMap.toSpanSingleton K(p, k) V x
[GOAL]
case h
p : ℕ
inst✝⁶ : Fact (Nat.Prime p)
k✝ : Type u_1
inst✝⁵ : CommRing k✝
k : Type u_2
inst✝⁴ : Field k
inst✝³ : IsAlgClosed k
inst✝² : CharP k p
V : Type u_3
inst✝¹ : AddCommGroup V
inst✝ : Isocrystal p k V
h_dim : finrank K(p, k) V = 1
this✝ : Nontrivial V
x : V
hx : x ≠ 0
this : ↑Φ(p, k) x ≠ 0
a : K(p, k)
ha : a ≠ 0
hax : ↑Φ(p, k) x = a • x
b : K(p, k)
hb : b ≠ 0
m : ℤ
hmb : ↑φ(p, k) b * a = ↑p ^ m * b
F₀ : StandardOneDimIsocrystal p k m →ₗ[K(p, k)] V := LinearMap.toSpanSingleton K(p, k) V x
⊢ Nonempty (StandardOneDimIsocrystal p k m ≃ᶠⁱ[p, k] V)
[PROOFSTEP]
let F : StandardOneDimIsocrystal p k m ≃ₗ[K(p, k)] V :=
by
refine' LinearEquiv.ofBijective F₀ ⟨_, _⟩
· rw [← LinearMap.ker_eq_bot]
exact LinearMap.ker_toSpanSingleton K(p, k) V hx
· rw [← LinearMap.range_eq_top]
rw [← (finrank_eq_one_iff_of_nonzero x hx).mp h_dim]
rw [LinearMap.span_singleton_eq_range]
-- Porting note: `refine'` below gets confused when this is inlined.
[GOAL]
p : ℕ
inst✝⁶ : Fact (Nat.Prime p)
k✝ : Type u_1
inst✝⁵ : CommRing k✝
k : Type u_2
inst✝⁴ : Field k
inst✝³ : IsAlgClosed k
inst✝² : CharP k p
V : Type u_3
inst✝¹ : AddCommGroup V
inst✝ : Isocrystal p k V
h_dim : finrank K(p, k) V = 1
this✝ : Nontrivial V
x : V
hx : x ≠ 0
this : ↑Φ(p, k) x ≠ 0
a : K(p, k)
ha : a ≠ 0
hax : ↑Φ(p, k) x = a • x
b : K(p, k)
hb : b ≠ 0
m : ℤ
hmb : ↑φ(p, k) b * a = ↑p ^ m * b
F₀ : StandardOneDimIsocrystal p k m →ₗ[K(p, k)] V := LinearMap.toSpanSingleton K(p, k) V x
⊢ StandardOneDimIsocrystal p k m ≃ₗ[K(p, k)] V
[PROOFSTEP]
refine' LinearEquiv.ofBijective F₀ ⟨_, _⟩
[GOAL]
case refine'_1
p : ℕ
inst✝⁶ : Fact (Nat.Prime p)
k✝ : Type u_1
inst✝⁵ : CommRing k✝
k : Type u_2
inst✝⁴ : Field k
inst✝³ : IsAlgClosed k
inst✝² : CharP k p
V : Type u_3
inst✝¹ : AddCommGroup V
inst✝ : Isocrystal p k V
h_dim : finrank K(p, k) V = 1
this✝ : Nontrivial V
x : V
hx : x ≠ 0
this : ↑Φ(p, k) x ≠ 0
a : K(p, k)
ha : a ≠ 0
hax : ↑Φ(p, k) x = a • x
b : K(p, k)
hb : b ≠ 0
m : ℤ
hmb : ↑φ(p, k) b * a = ↑p ^ m * b
F₀ : StandardOneDimIsocrystal p k m →ₗ[K(p, k)] V := LinearMap.toSpanSingleton K(p, k) V x
⊢ Function.Injective ↑F₀
[PROOFSTEP]
rw [← LinearMap.ker_eq_bot]
[GOAL]
case refine'_1
p : ℕ
inst✝⁶ : Fact (Nat.Prime p)
k✝ : Type u_1
inst✝⁵ : CommRing k✝
k : Type u_2
inst✝⁴ : Field k
inst✝³ : IsAlgClosed k
inst✝² : CharP k p
V : Type u_3
inst✝¹ : AddCommGroup V
inst✝ : Isocrystal p k V
h_dim : finrank K(p, k) V = 1
this✝ : Nontrivial V
x : V
hx : x ≠ 0
this : ↑Φ(p, k) x ≠ 0
a : K(p, k)
ha : a ≠ 0
hax : ↑Φ(p, k) x = a • x
b : K(p, k)
hb : b ≠ 0
m : ℤ
hmb : ↑φ(p, k) b * a = ↑p ^ m * b
F₀ : StandardOneDimIsocrystal p k m →ₗ[K(p, k)] V := LinearMap.toSpanSingleton K(p, k) V x
⊢ LinearMap.ker F₀ = ⊥
[PROOFSTEP]
exact LinearMap.ker_toSpanSingleton K(p, k) V hx
[GOAL]
case refine'_2
p : ℕ
inst✝⁶ : Fact (Nat.Prime p)
k✝ : Type u_1
inst✝⁵ : CommRing k✝
k : Type u_2
inst✝⁴ : Field k
inst✝³ : IsAlgClosed k
inst✝² : CharP k p
V : Type u_3
inst✝¹ : AddCommGroup V
inst✝ : Isocrystal p k V
h_dim : finrank K(p, k) V = 1
this✝ : Nontrivial V
x : V
hx : x ≠ 0
this : ↑Φ(p, k) x ≠ 0
a : K(p, k)
ha : a ≠ 0
hax : ↑Φ(p, k) x = a • x
b : K(p, k)
hb : b ≠ 0
m : ℤ
hmb : ↑φ(p, k) b * a = ↑p ^ m * b
F₀ : StandardOneDimIsocrystal p k m →ₗ[K(p, k)] V := LinearMap.toSpanSingleton K(p, k) V x
⊢ Function.Surjective ↑F₀
[PROOFSTEP]
rw [← LinearMap.range_eq_top]
[GOAL]
case refine'_2
p : ℕ
inst✝⁶ : Fact (Nat.Prime p)
k✝ : Type u_1
inst✝⁵ : CommRing k✝
k : Type u_2
inst✝⁴ : Field k
inst✝³ : IsAlgClosed k
inst✝² : CharP k p
V : Type u_3
inst✝¹ : AddCommGroup V
inst✝ : Isocrystal p k V
h_dim : finrank K(p, k) V = 1
this✝ : Nontrivial V
x : V
hx : x ≠ 0
this : ↑Φ(p, k) x ≠ 0
a : K(p, k)
ha : a ≠ 0
hax : ↑Φ(p, k) x = a • x
b : K(p, k)
hb : b ≠ 0
m : ℤ
hmb : ↑φ(p, k) b * a = ↑p ^ m * b
F₀ : StandardOneDimIsocrystal p k m →ₗ[K(p, k)] V := LinearMap.toSpanSingleton K(p, k) V x
⊢ LinearMap.range F₀ = ⊤
[PROOFSTEP]
rw [← (finrank_eq_one_iff_of_nonzero x hx).mp h_dim]
[GOAL]
case refine'_2
p : ℕ
inst✝⁶ : Fact (Nat.Prime p)
k✝ : Type u_1
inst✝⁵ : CommRing k✝
k : Type u_2
inst✝⁴ : Field k
inst✝³ : IsAlgClosed k
inst✝² : CharP k p
V : Type u_3
inst✝¹ : AddCommGroup V
inst✝ : Isocrystal p k V
h_dim : finrank K(p, k) V = 1
this✝ : Nontrivial V
x : V
hx : x ≠ 0
this : ↑Φ(p, k) x ≠ 0
a : K(p, k)
ha : a ≠ 0
hax : ↑Φ(p, k) x = a • x
b : K(p, k)
hb : b ≠ 0
m : ℤ
hmb : ↑φ(p, k) b * a = ↑p ^ m * b
F₀ : StandardOneDimIsocrystal p k m →ₗ[K(p, k)] V := LinearMap.toSpanSingleton K(p, k) V x
⊢ LinearMap.range F₀ = Submodule.span K(p, k) {x}
[PROOFSTEP]
rw [LinearMap.span_singleton_eq_range]
-- Porting note: `refine'` below gets confused when this is inlined.
[GOAL]
case h
p : ℕ
inst✝⁶ : Fact (Nat.Prime p)
k✝ : Type u_1
inst✝⁵ : CommRing k✝
k : Type u_2
inst✝⁴ : Field k
inst✝³ : IsAlgClosed k
inst✝² : CharP k p
V : Type u_3
inst✝¹ : AddCommGroup V
inst✝ : Isocrystal p k V
h_dim : finrank K(p, k) V = 1
this✝ : Nontrivial V
x : V
hx : x ≠ 0
this : ↑Φ(p, k) x ≠ 0
a : K(p, k)
ha : a ≠ 0
hax : ↑Φ(p, k) x = a • x
b : K(p, k)
hb : b ≠ 0
m : ℤ
hmb : ↑φ(p, k) b * a = ↑p ^ m * b
F₀ : StandardOneDimIsocrystal p k m →ₗ[K(p, k)] V := LinearMap.toSpanSingleton K(p, k) V x
F : StandardOneDimIsocrystal p k m ≃ₗ[K(p, k)] V :=
LinearEquiv.ofBijective F₀ (_ : Function.Injective ↑F₀ ∧ Function.Surjective ↑F₀)
⊢ Nonempty (StandardOneDimIsocrystal p k m ≃ᶠⁱ[p, k] V)
[PROOFSTEP]
let E := (LinearEquiv.smulOfNeZero K(p, k) _ _ hb).trans F
[GOAL]
case h
p : ℕ
inst✝⁶ : Fact (Nat.Prime p)
k✝ : Type u_1
inst✝⁵ : CommRing k✝
k : Type u_2
inst✝⁴ : Field k
inst✝³ : IsAlgClosed k
inst✝² : CharP k p
V : Type u_3
inst✝¹ : AddCommGroup V
inst✝ : Isocrystal p k V
h_dim : finrank K(p, k) V = 1
this✝ : Nontrivial V
x : V
hx : x ≠ 0
this : ↑Φ(p, k) x ≠ 0
a : K(p, k)
ha : a ≠ 0
hax : ↑Φ(p, k) x = a • x
b : K(p, k)
hb : b ≠ 0
m : ℤ
hmb : ↑φ(p, k) b * a = ↑p ^ m * b
F₀ : StandardOneDimIsocrystal p k m →ₗ[K(p, k)] V := LinearMap.toSpanSingleton K(p, k) V x
F : StandardOneDimIsocrystal p k m ≃ₗ[K(p, k)] V :=
LinearEquiv.ofBijective F₀ (_ : Function.Injective ↑F₀ ∧ Function.Surjective ↑F₀)
E : StandardOneDimIsocrystal p k m ≃ₗ[K(p, k)] V :=
LinearEquiv.trans (LinearEquiv.smulOfNeZero K(p, k) (StandardOneDimIsocrystal p k m) b hb) F
⊢ Nonempty (StandardOneDimIsocrystal p k m ≃ᶠⁱ[p, k] V)
[PROOFSTEP]
refine' ⟨⟨E, _⟩⟩
[GOAL]
case h
p : ℕ
inst✝⁶ : Fact (Nat.Prime p)
k✝ : Type u_1
inst✝⁵ : CommRing k✝
k : Type u_2
inst✝⁴ : Field k
inst✝³ : IsAlgClosed k
inst✝² : CharP k p
V : Type u_3
inst✝¹ : AddCommGroup V
inst✝ : Isocrystal p k V
h_dim : finrank K(p, k) V = 1
this✝ : Nontrivial V
x : V
hx : x ≠ 0
this : ↑Φ(p, k) x ≠ 0
a : K(p, k)
ha : a ≠ 0
hax : ↑Φ(p, k) x = a • x
b : K(p, k)
hb : b ≠ 0
m : ℤ
hmb : ↑φ(p, k) b * a = ↑p ^ m * b
F₀ : StandardOneDimIsocrystal p k m →ₗ[K(p, k)] V := LinearMap.toSpanSingleton K(p, k) V x
F : StandardOneDimIsocrystal p k m ≃ₗ[K(p, k)] V :=
LinearEquiv.ofBijective F₀ (_ : Function.Injective ↑F₀ ∧ Function.Surjective ↑F₀)
E : StandardOneDimIsocrystal p k m ≃ₗ[K(p, k)] V :=
LinearEquiv.trans (LinearEquiv.smulOfNeZero K(p, k) (StandardOneDimIsocrystal p k m) b hb) F
⊢ ∀ (x : StandardOneDimIsocrystal p k m), ↑Φ(p, k) (↑E x) = ↑E (↑Φ(p, k) x)
[PROOFSTEP]
simp only
[GOAL]
case h
p : ℕ
inst✝⁶ : Fact (Nat.Prime p)
k✝ : Type u_1
inst✝⁵ : CommRing k✝
k : Type u_2
inst✝⁴ : Field k
inst✝³ : IsAlgClosed k
inst✝² : CharP k p
V : Type u_3
inst✝¹ : AddCommGroup V
inst✝ : Isocrystal p k V
h_dim : finrank K(p, k) V = 1
this✝ : Nontrivial V
x : V
hx : x ≠ 0
this : ↑Φ(p, k) x ≠ 0
a : K(p, k)
ha : a ≠ 0
hax : ↑Φ(p, k) x = a • x
b : K(p, k)
hb : b ≠ 0
m : ℤ
hmb : ↑φ(p, k) b * a = ↑p ^ m * b
F₀ : StandardOneDimIsocrystal p k m →ₗ[K(p, k)] V := LinearMap.toSpanSingleton K(p, k) V x
F : StandardOneDimIsocrystal p k m ≃ₗ[K(p, k)] V :=
LinearEquiv.ofBijective F₀ (_ : Function.Injective ↑F₀ ∧ Function.Surjective ↑F₀)
E : StandardOneDimIsocrystal p k m ≃ₗ[K(p, k)] V :=
LinearEquiv.trans (LinearEquiv.smulOfNeZero K(p, k) (StandardOneDimIsocrystal p k m) b hb) F
⊢ ∀ (x_1 : StandardOneDimIsocrystal p k m),
↑Φ(p, k)
(↑(LinearEquiv.trans (LinearEquiv.smulOfNeZero K(p, k) (StandardOneDimIsocrystal p k m) b hb)
(LinearEquiv.ofBijective (LinearMap.toSpanSingleton K(p, k) V x)
(_ : Function.Injective ↑F₀ ∧ Function.Surjective ↑F₀)))
x_1) =
↑(LinearEquiv.trans (LinearEquiv.smulOfNeZero K(p, k) (StandardOneDimIsocrystal p k m) b hb)
(LinearEquiv.ofBijective (LinearMap.toSpanSingleton K(p, k) V x)
(_ : Function.Injective ↑F₀ ∧ Function.Surjective ↑F₀)))
(↑Φ(p, k) x_1)
[PROOFSTEP]
intro c
[GOAL]
case h
p : ℕ
inst✝⁶ : Fact (Nat.Prime p)
k✝ : Type u_1
inst✝⁵ : CommRing k✝
k : Type u_2
inst✝⁴ : Field k
inst✝³ : IsAlgClosed k
inst✝² : CharP k p
V : Type u_3
inst✝¹ : AddCommGroup V
inst✝ : Isocrystal p k V
h_dim : finrank K(p, k) V = 1
this✝ : Nontrivial V
x : V
hx : x ≠ 0
this : ↑Φ(p, k) x ≠ 0
a : K(p, k)
ha : a ≠ 0
hax : ↑Φ(p, k) x = a • x
b : K(p, k)
hb : b ≠ 0
m : ℤ
hmb : ↑φ(p, k) b * a = ↑p ^ m * b
F₀ : StandardOneDimIsocrystal p k m →ₗ[K(p, k)] V := LinearMap.toSpanSingleton K(p, k) V x
F : StandardOneDimIsocrystal p k m ≃ₗ[K(p, k)] V :=
LinearEquiv.ofBijective F₀ (_ : Function.Injective ↑F₀ ∧ Function.Surjective ↑F₀)
E : StandardOneDimIsocrystal p k m ≃ₗ[K(p, k)] V :=
LinearEquiv.trans (LinearEquiv.smulOfNeZero K(p, k) (StandardOneDimIsocrystal p k m) b hb) F
c : StandardOneDimIsocrystal p k m
⊢ ↑Φ(p, k)
(↑(LinearEquiv.trans (LinearEquiv.smulOfNeZero K(p, k) (StandardOneDimIsocrystal p k m) b hb)
(LinearEquiv.ofBijective (LinearMap.toSpanSingleton K(p, k) V x)
(_ : Function.Injective ↑F₀ ∧ Function.Surjective ↑F₀)))
c) =
↑(LinearEquiv.trans (LinearEquiv.smulOfNeZero K(p, k) (StandardOneDimIsocrystal p k m) b hb)
(LinearEquiv.ofBijective (LinearMap.toSpanSingleton K(p, k) V x)
(_ : Function.Injective ↑F₀ ∧ Function.Surjective ↑F₀)))
(↑Φ(p, k) c)
[PROOFSTEP]
rw [LinearEquiv.trans_apply, LinearEquiv.trans_apply, LinearEquiv.smulOfNeZero_apply, LinearEquiv.smulOfNeZero_apply,
LinearEquiv.map_smul, LinearEquiv.map_smul]
-- Porting note: was
-- simp only [hax, LinearEquiv.ofBijective_apply, LinearMap.toSpanSingleton_apply,
-- LinearEquiv.map_smulₛₗ, StandardOneDimIsocrystal.frobenius_apply, Algebra.id.smul_eq_mul]
[GOAL]
case h
p : ℕ
inst✝⁶ : Fact (Nat.Prime p)
k✝ : Type u_1
inst✝⁵ : CommRing k✝
k : Type u_2
inst✝⁴ : Field k
inst✝³ : IsAlgClosed k
inst✝² : CharP k p
V : Type u_3
inst✝¹ : AddCommGroup V
inst✝ : Isocrystal p k V
h_dim : finrank K(p, k) V = 1
this✝ : Nontrivial V
x : V
hx : x ≠ 0
this : ↑Φ(p, k) x ≠ 0
a : K(p, k)
ha : a ≠ 0
hax : ↑Φ(p, k) x = a • x
b : K(p, k)
hb : b ≠ 0
m : ℤ
hmb : ↑φ(p, k) b * a = ↑p ^ m * b
F₀ : StandardOneDimIsocrystal p k m →ₗ[K(p, k)] V := LinearMap.toSpanSingleton K(p, k) V x
F : StandardOneDimIsocrystal p k m ≃ₗ[K(p, k)] V :=
LinearEquiv.ofBijective F₀ (_ : Function.Injective ↑F₀ ∧ Function.Surjective ↑F₀)
E : StandardOneDimIsocrystal p k m ≃ₗ[K(p, k)] V :=
LinearEquiv.trans (LinearEquiv.smulOfNeZero K(p, k) (StandardOneDimIsocrystal p k m) b hb) F
c : StandardOneDimIsocrystal p k m
⊢ ↑Φ(p, k)
(b •
↑(LinearEquiv.ofBijective (LinearMap.toSpanSingleton K(p, k) V x)
(_ : Function.Injective ↑F₀ ∧ Function.Surjective ↑F₀))
c) =
b •
↑(LinearEquiv.ofBijective (LinearMap.toSpanSingleton K(p, k) V x)
(_ : Function.Injective ↑F₀ ∧ Function.Surjective ↑F₀))
(↑Φ(p, k) c)
[PROOFSTEP]
rw [LinearEquiv.ofBijective_apply, LinearEquiv.ofBijective_apply]
[GOAL]
case h
p : ℕ
inst✝⁶ : Fact (Nat.Prime p)
k✝ : Type u_1
inst✝⁵ : CommRing k✝
k : Type u_2
inst✝⁴ : Field k
inst✝³ : IsAlgClosed k
inst✝² : CharP k p
V : Type u_3
inst✝¹ : AddCommGroup V
inst✝ : Isocrystal p k V
h_dim : finrank K(p, k) V = 1
this✝ : Nontrivial V
x : V
hx : x ≠ 0
this : ↑Φ(p, k) x ≠ 0
a : K(p, k)
ha : a ≠ 0
hax : ↑Φ(p, k) x = a • x
b : K(p, k)
hb : b ≠ 0
m : ℤ
hmb : ↑φ(p, k) b * a = ↑p ^ m * b
F₀ : StandardOneDimIsocrystal p k m →ₗ[K(p, k)] V := LinearMap.toSpanSingleton K(p, k) V x
F : StandardOneDimIsocrystal p k m ≃ₗ[K(p, k)] V :=
LinearEquiv.ofBijective F₀ (_ : Function.Injective ↑F₀ ∧ Function.Surjective ↑F₀)
E : StandardOneDimIsocrystal p k m ≃ₗ[K(p, k)] V :=
LinearEquiv.trans (LinearEquiv.smulOfNeZero K(p, k) (StandardOneDimIsocrystal p k m) b hb) F
c : StandardOneDimIsocrystal p k m
⊢ ↑Φ(p, k) (b • ↑(LinearMap.toSpanSingleton K(p, k) V x) c) = b • ↑(LinearMap.toSpanSingleton K(p, k) V x) (↑Φ(p, k) c)
[PROOFSTEP]
erw [LinearMap.toSpanSingleton_apply K(p, k) V x c, LinearMap.toSpanSingleton_apply K(p, k) V x]
[GOAL]
case h
p : ℕ
inst✝⁶ : Fact (Nat.Prime p)
k✝ : Type u_1
inst✝⁵ : CommRing k✝
k : Type u_2
inst✝⁴ : Field k
inst✝³ : IsAlgClosed k
inst✝² : CharP k p
V : Type u_3
inst✝¹ : AddCommGroup V
inst✝ : Isocrystal p k V
h_dim : finrank K(p, k) V = 1
this✝ : Nontrivial V
x : V
hx : x ≠ 0
this : ↑Φ(p, k) x ≠ 0
a : K(p, k)
ha : a ≠ 0
hax : ↑Φ(p, k) x = a • x
b : K(p, k)
hb : b ≠ 0
m : ℤ
hmb : ↑φ(p, k) b * a = ↑p ^ m * b
F₀ : StandardOneDimIsocrystal p k m →ₗ[K(p, k)] V := LinearMap.toSpanSingleton K(p, k) V x
F : StandardOneDimIsocrystal p k m ≃ₗ[K(p, k)] V :=
LinearEquiv.ofBijective F₀ (_ : Function.Injective ↑F₀ ∧ Function.Surjective ↑F₀)
E : StandardOneDimIsocrystal p k m ≃ₗ[K(p, k)] V :=
LinearEquiv.trans (LinearEquiv.smulOfNeZero K(p, k) (StandardOneDimIsocrystal p k m) b hb) F
c : StandardOneDimIsocrystal p k m
⊢ ↑Φ(p, k) (b • c • x) = b • ↑Φ(p, k) c • x
[PROOFSTEP]
simp only [hax, LinearEquiv.ofBijective_apply, LinearMap.toSpanSingleton_apply, LinearEquiv.map_smulₛₗ,
StandardOneDimIsocrystal.frobenius_apply, Algebra.id.smul_eq_mul]
[GOAL]
case h
p : ℕ
inst✝⁶ : Fact (Nat.Prime p)
k✝ : Type u_1
inst✝⁵ : CommRing k✝
k : Type u_2
inst✝⁴ : Field k
inst✝³ : IsAlgClosed k
inst✝² : CharP k p
V : Type u_3
inst✝¹ : AddCommGroup V
inst✝ : Isocrystal p k V
h_dim : finrank K(p, k) V = 1
this✝ : Nontrivial V
x : V
hx : x ≠ 0
this : ↑Φ(p, k) x ≠ 0
a : K(p, k)
ha : a ≠ 0
hax : ↑Φ(p, k) x = a • x
b : K(p, k)
hb : b ≠ 0
m : ℤ
hmb : ↑φ(p, k) b * a = ↑p ^ m * b
F₀ : StandardOneDimIsocrystal p k m →ₗ[K(p, k)] V := LinearMap.toSpanSingleton K(p, k) V x
F : StandardOneDimIsocrystal p k m ≃ₗ[K(p, k)] V :=
LinearEquiv.ofBijective F₀ (_ : Function.Injective ↑F₀ ∧ Function.Surjective ↑F₀)
E : StandardOneDimIsocrystal p k m ≃ₗ[K(p, k)] V :=
LinearEquiv.trans (LinearEquiv.smulOfNeZero K(p, k) (StandardOneDimIsocrystal p k m) b hb) F
c : StandardOneDimIsocrystal p k m
⊢ ↑φ(p, k) b • ↑φ(p, k) c • a • x = b • (↑p ^ m • ↑φ(p, k) c) • x
[PROOFSTEP]
simp only [← mul_smul]
[GOAL]
case h
p : ℕ
inst✝⁶ : Fact (Nat.Prime p)
k✝ : Type u_1
inst✝⁵ : CommRing k✝
k : Type u_2
inst✝⁴ : Field k
inst✝³ : IsAlgClosed k
inst✝² : CharP k p
V : Type u_3
inst✝¹ : AddCommGroup V
inst✝ : Isocrystal p k V
h_dim : finrank K(p, k) V = 1
this✝ : Nontrivial V
x : V
hx : x ≠ 0
this : ↑Φ(p, k) x ≠ 0
a : K(p, k)
ha : a ≠ 0
hax : ↑Φ(p, k) x = a • x
b : K(p, k)
hb : b ≠ 0
m : ℤ
hmb : ↑φ(p, k) b * a = ↑p ^ m * b
F₀ : StandardOneDimIsocrystal p k m →ₗ[K(p, k)] V := LinearMap.toSpanSingleton K(p, k) V x
F : StandardOneDimIsocrystal p k m ≃ₗ[K(p, k)] V :=
LinearEquiv.ofBijective F₀ (_ : Function.Injective ↑F₀ ∧ Function.Surjective ↑F₀)
E : StandardOneDimIsocrystal p k m ≃ₗ[K(p, k)] V :=
LinearEquiv.trans (LinearEquiv.smulOfNeZero K(p, k) (StandardOneDimIsocrystal p k m) b hb) F
c : StandardOneDimIsocrystal p k m
⊢ (↑φ(p, k) b * (↑φ(p, k) c * a)) • x = (b * ↑p ^ m • ↑φ(p, k) c) • x
[PROOFSTEP]
congr 1
-- Porting note: added the next two lines
[GOAL]
case h.e_a
p : ℕ
inst✝⁶ : Fact (Nat.Prime p)
k✝ : Type u_1
inst✝⁵ : CommRing k✝
k : Type u_2
inst✝⁴ : Field k
inst✝³ : IsAlgClosed k
inst✝² : CharP k p
V : Type u_3
inst✝¹ : AddCommGroup V
inst✝ : Isocrystal p k V
h_dim : finrank K(p, k) V = 1
this✝ : Nontrivial V
x : V
hx : x ≠ 0
this : ↑Φ(p, k) x ≠ 0
a : K(p, k)
ha : a ≠ 0
hax : ↑Φ(p, k) x = a • x
b : K(p, k)
hb : b ≠ 0
m : ℤ
hmb : ↑φ(p, k) b * a = ↑p ^ m * b
F₀ : StandardOneDimIsocrystal p k m →ₗ[K(p, k)] V := LinearMap.toSpanSingleton K(p, k) V x
F : StandardOneDimIsocrystal p k m ≃ₗ[K(p, k)] V :=
LinearEquiv.ofBijective F₀ (_ : Function.Injective ↑F₀ ∧ Function.Surjective ↑F₀)
E : StandardOneDimIsocrystal p k m ≃ₗ[K(p, k)] V :=
LinearEquiv.trans (LinearEquiv.smulOfNeZero K(p, k) (StandardOneDimIsocrystal p k m) b hb) F
c : StandardOneDimIsocrystal p k m
⊢ ↑φ(p, k) b * (↑φ(p, k) c * a) = b * ↑p ^ m • ↑φ(p, k) c
[PROOFSTEP]
erw [smul_eq_mul]
[GOAL]
case h.e_a
p : ℕ
inst✝⁶ : Fact (Nat.Prime p)
k✝ : Type u_1
inst✝⁵ : CommRing k✝
k : Type u_2
inst✝⁴ : Field k
inst✝³ : IsAlgClosed k
inst✝² : CharP k p
V : Type u_3
inst✝¹ : AddCommGroup V
inst✝ : Isocrystal p k V
h_dim : finrank K(p, k) V = 1
this✝ : Nontrivial V
x : V
hx : x ≠ 0
this : ↑Φ(p, k) x ≠ 0
a : K(p, k)
ha : a ≠ 0
hax : ↑Φ(p, k) x = a • x
b : K(p, k)
hb : b ≠ 0
m : ℤ
hmb : ↑φ(p, k) b * a = ↑p ^ m * b
F₀ : StandardOneDimIsocrystal p k m →ₗ[K(p, k)] V := LinearMap.toSpanSingleton K(p, k) V x
F : StandardOneDimIsocrystal p k m ≃ₗ[K(p, k)] V :=
LinearEquiv.ofBijective F₀ (_ : Function.Injective ↑F₀ ∧ Function.Surjective ↑F₀)
E : StandardOneDimIsocrystal p k m ≃ₗ[K(p, k)] V :=
LinearEquiv.trans (LinearEquiv.smulOfNeZero K(p, k) (StandardOneDimIsocrystal p k m) b hb) F
c : StandardOneDimIsocrystal p k m
⊢ ↑φ(p, k) b * (↑φ(p, k) c * a) =
b *
(↑(IsFractionRing.lift (_ : Function.Injective ↑(algebraMap (WittVector p k) ((fun x => K(p, k)) c)))) (↑p ^ m) *
↑φ(p, k) c)
[PROOFSTEP]
simp only [map_zpow₀, map_natCast]
[GOAL]
case h.e_a
p : ℕ
inst✝⁶ : Fact (Nat.Prime p)
k✝ : Type u_1
inst✝⁵ : CommRing k✝
k : Type u_2
inst✝⁴ : Field k
inst✝³ : IsAlgClosed k
inst✝² : CharP k p
V : Type u_3
inst✝¹ : AddCommGroup V
inst✝ : Isocrystal p k V
h_dim : finrank K(p, k) V = 1
this✝ : Nontrivial V
x : V
hx : x ≠ 0
this : ↑Φ(p, k) x ≠ 0
a : K(p, k)
ha : a ≠ 0
hax : ↑Φ(p, k) x = a • x
b : K(p, k)
hb : b ≠ 0
m : ℤ
hmb : ↑φ(p, k) b * a = ↑p ^ m * b
F₀ : StandardOneDimIsocrystal p k m →ₗ[K(p, k)] V := LinearMap.toSpanSingleton K(p, k) V x
F : StandardOneDimIsocrystal p k m ≃ₗ[K(p, k)] V :=
LinearEquiv.ofBijective F₀ (_ : Function.Injective ↑F₀ ∧ Function.Surjective ↑F₀)
E : StandardOneDimIsocrystal p k m ≃ₗ[K(p, k)] V :=
LinearEquiv.trans (LinearEquiv.smulOfNeZero K(p, k) (StandardOneDimIsocrystal p k m) b hb) F
c : StandardOneDimIsocrystal p k m
⊢ ↑φ(p, k) b * (↑φ(p, k) c * a) = b * (↑p ^ m * ↑φ(p, k) c)
[PROOFSTEP]
linear_combination φ(p, k) c * hmb
|
{"mathlib_filename": "Mathlib.RingTheory.WittVector.Isocrystal", "llama_tokens": 14360}
|
using TransformVariables, Parameters, Statistics, StatsFuns, Optim
using NLSolversBase
function makeLoss(model)
t = getTransform(model)
fpre = @eval $(logdensity(model))
f(par, data) = Base.invokelatest(fpre, par, data)
loss(x, data) = -f(t(x), data)
(loss=loss, t=t)
end
export getMAP
function getMAP(m :: Model ;kwargs...)
@unpack loss, t = makeLoss(m)
d = dimension(t)
kwargs = Dict(kwargs)
# @show kwargs
data = get(kwargs, :data, NamedTuple{}())
init = get(kwargs, :init, t(zeros(d)))
f(x) = loss(x, data)
opt = optimize(f, inverse(t)(init),method=LBFGS())
t(Optim.minimizer(opt))
end
|
{"hexsha": "aa7138e3dff5c3fc49054411b63bcbbf0480ca78", "size": 661, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/optim.jl", "max_stars_repo_name": "devmotion/Soss.jl", "max_stars_repo_head_hexsha": "0440210c71bdb6c69bf7b1930043e29bd19da3a4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 388, "max_stars_repo_stars_event_min_datetime": "2018-01-02T23:08:12.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T00:36:40.000Z", "max_issues_repo_path": "src/optim.jl", "max_issues_repo_name": "devmotion/Soss.jl", "max_issues_repo_head_hexsha": "0440210c71bdb6c69bf7b1930043e29bd19da3a4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 228, "max_issues_repo_issues_event_min_datetime": "2018-01-07T03:26:35.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-25T22:15:32.000Z", "max_forks_repo_path": "src/optim.jl", "max_forks_repo_name": "devmotion/Soss.jl", "max_forks_repo_head_hexsha": "0440210c71bdb6c69bf7b1930043e29bd19da3a4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 41, "max_forks_repo_forks_event_min_datetime": "2018-09-30T00:29:42.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-07T09:42:59.000Z", "avg_line_length": 20.65625, "max_line_length": 66, "alphanum_fraction": 0.6263237519, "num_tokens": 197}
|
import inspect
import warnings
import re
from typing import Union
import tensorflow as tf
import numpy as np
ArrayLike = Union[np.ndarray, tf.Tensor]
TfTensor = tf.Tensor
FreeRV = ArrayLike
def stabilize(K, shift=None):
r"""Add a diagonal shift to a covariance matrix."""
K = tf.convert_to_tensor(K)
diag = tf.linalg.diag_part(K)
if shift is None:
shift = 1e-6 if K.dtype == tf.float64 else 1e-4
return tf.linalg.set_diag(K, diag + shift)
def _inherit_docs(frommeth):
r"""Decorate a method or class to inherit docs from `frommeth`."""
def inherit(tometh):
methdocs = frommeth.__doc__
if methdocs is None:
raise ValueError("No docs to inherit!")
tometh.__doc__ = methdocs
return tometh
return inherit
def _build_docs(meth_or_cls):
r"""Decorate a method or class to build its doc strings."""
pattern = re.compile("\%\(.*\)")
modname = inspect.getmodule(meth_or_cls)
docs = meth_or_cls.__doc__
while pattern.search(docs) is not None:
docname = pattern.search(docs).group(0)[2:-1]
try:
docstr = getattr(modname, docname)
except AttributeError:
warnings.warn(
f"While documenting {meth_or_cls.__name__}, arrtibute {docname} not found.",
SyntaxWarning,
)
# FIXME: This should continue execution by skipping
# the docs not found. Instead, currently, it just stops
# execution!
break
docs = pattern.sub(docstr, docs, count=1)
meth_or_cls.__doc__ = docs
return meth_or_cls
|
{"hexsha": "5f616ae0ec26f5eea46f5a627dd246afcfa42336", "size": 1635, "ext": "py", "lang": "Python", "max_stars_repo_path": "pymc4/gp/util.py", "max_stars_repo_name": "mailology/pymc4", "max_stars_repo_head_hexsha": "42642efd89e71787e4d1af6ab8b6ca241b861f35", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 789, "max_stars_repo_stars_event_min_datetime": "2018-05-04T15:25:22.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-10T14:11:22.000Z", "max_issues_repo_path": "pymc4/gp/util.py", "max_issues_repo_name": "jdehning/pymc4", "max_issues_repo_head_hexsha": "6b4e7af81083af271704aff9c74300161d783b23", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 283, "max_issues_repo_issues_event_min_datetime": "2018-05-29T19:23:09.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-04T09:05:57.000Z", "max_forks_repo_path": "pymc4/gp/util.py", "max_forks_repo_name": "jdehning/pymc4", "max_forks_repo_head_hexsha": "6b4e7af81083af271704aff9c74300161d783b23", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 134, "max_forks_repo_forks_event_min_datetime": "2018-05-30T23:53:46.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-30T11:09:31.000Z", "avg_line_length": 28.1896551724, "max_line_length": 92, "alphanum_fraction": 0.6360856269, "include": true, "reason": "import numpy", "num_tokens": 396}
|
% BEGIN LICENSE BLOCK
% Version: CMPL 1.1
%
% The contents of this file are subject to the Cisco-style Mozilla Public
% License Version 1.1 (the "License"); you may not use this file except
% in compliance with the License. You may obtain a copy of the License
% at www.eclipse-clp.org/license.
%
% Software distributed under the License is distributed on an "AS IS"
% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
% the License for the specific language governing rights and limitations
% under the License.
%
% The Original Code is The ECLiPSe Constraint Logic Programming System.
% The Initial Developer of the Original Code is Cisco Systems, Inc.
% Portions created by the Initial Developer are
% Copyright (C) 2006 Cisco Systems, Inc. All Rights Reserved.
%
% Contributor(s):
%
% END LICENSE BLOCK
\chapter{Introduction}
%HEVEA\cutdef[1]{section}
This manual documents the major \eclipse\ libraries.
They are enabling tools for the development and delivery
of planning and scheduling applications.
Since this is an area of active research and new developments,
these libraries are subject to technical improvements, addition
of new features and redesign as part of our ongoing work.
In this section we shall briefly summarize the constraint solvers that
are available as \eclipse\ libraries.
No examples are given here - each solver has its own documentation
with examples for the interested reader.
\section{Suspended Goals: {\em suspend}}
The constraint solvers of \eclipse\ are all implemented using suspended
goals.
In fact the simplest implementation of any constraint is to suspend it
until all its variables are sufficiently instantiated, and then test it.
The library {\em suspend} contains versions of
the mathematical constraints \verb0>=0, \verb0>0,
\verb0=:=0, \verb0=\=0, \verb0=<0, \verb0<0
which behave like this\footnote{
Note that the global flag {\em coroutine} has a similar effect:
it causes the arithmetic comparisons as well as many other
built-in predicates to delay until they are sufficiently instantiated}.
\section{Finite Domains: {\em ic}}
\subsection{{\em Integer Domain}}
The standard constraint solver offered by most constraint programming
systems is the {\em finite domain} solver, which applies constraint
propagation techniques developed in the AI community
\cite{VanHentenryck}.
\eclipse\ supports finite domain constraints via the {\em ic}
library\footnote{There is also an older implementation, the {\em fd} library,
whose use is deprecated}.
This library implements finite domains of integers, and the usual
functions and constraints on variables over these domains.
\subsection{Symbolic Domain: {\em ic_symbolic}}
In addition to integer domains, \eclipse\ offers finite domains of
ordered non-numeric values, for example ${red, green, blue}$.
These are implemented by the {\em ic_symbolic} library.
Whilst there is a standard set of constraints supported by the
{\em ic} library in \eclipse\ and in
most constraint programming systems, many more finite domain
constraints have been introduced which have uses in specific
applications and do not belong in a generic constraint programming
library.
The behaviour of these constraints is to prune the finite domains of
their variables, in just the same way as the standard
constraints.
Therefore \eclipse\ offers several further libraries which implement more
constraints using the {\em ic} library.
\subsection{Global Constraints: {\em ic\_global}}
One such library is {\em ic\_global}.
It supports a variety of constraints, each of which takes as an argument
a list of finite domain variables, of unspecified length.
Such constraints are called ``global'' constraints \cite{beldiceanu}.
Examples of such constraints, available from the {ic\_global} library
are
\verb0alldifferent/10, \verb0maxlist/20, \verb0occurrences/30 and
\verb0sorted/20.
\subsection{Scheduling Constraints}
There are several \eclipse\ libraries implementing global constraints for
scheduling applications. The constraints have the same semantics,
but different propagation. The constraints take a list
of tasks (start times, durations and resource needs), and a maximum
resource level. They reduce the finite domains of the task start times
by reasoning on resource bottlenecks \cite{lepape}. Three \eclipse\ libraries
implementing scheduling constraints are
{\em cumulative}, {\em edge\_finder} and {\em edge\_finder3}.
\section{Sets}
\eclipse\ offers constraint solving over the domain of finite sets of
integers. The {\em ic\_sets} library works together with the {\em ic} library
to reason about sets and set cardinality \cite{gervet}\footnote{
There is also an older implementation, the {\em conjunto} library, which
is generally less efficient, but implements sets of symbolic elements as
well as integer sets}.
\section{Intervals}
Besides finite domains, \eclipse\ also offers continuous domains in the
form of numeric intervals.
These are also implemented by the {\em ic} library, which is an integration
of an
integer finite domain solver and interval reasoning over continuous
intervals\footnote{
The {\em ic} library replaces the old {\em ria} interval solver, and
covers most of the functionality of the finite domain solver {\em fd}}.
It solves equations and inequations between
general arithmetic expressions over continuous or integral variables.
The expressions can include non-linear functions such as $sin$, built-in
constants such as $pi$. Piecewise linear unary functions are also available.
In addition to constraints, {\em ic} offers search techniques
({\em splitting} \cite{VanHentenryck:95} and {\em squashing}
\cite{lhomme96boosting})
for solving problems involving continuous numeric variables.
\section{User-Defined Constraints}
\subsection{Generalised Propagation: {\em propia}}
The predicate {\em infers} takes as one argument
any user-defined predicate, and as a second argument a form of
propagation to be applied to that predicate.
This functionality enables the user to turn any predicate into a
constraint \cite{LeProvost93b}. The forms of propagation include finite
domains and intervals.
\subsection{Constraint Handling Rules}
The user can also specify predicates using rules with guards
\cite{Fruehwirth}.
They delay until the guard is entailed or disentailed, and then
execute or terminate accordingly.
This functionality enables the user to implement constraints in a way
that is clearer than directly using the underlying {\em suspend}
library.
\section{Repair}
The {\em repair} library allows a {\em tentative} value to be
associated with any variable \cite{cp99wkshoptalk}.
This tentative value may violate constraints on the variable, in which
case the constraint is recorded in a list of violated constraints.
The repair library also supports propagation {\em invariants}
\cite{Localizer}.
Using invariants, if a variable's tentative
value is changed, the consequences of this change can be propagated to
any variables whose tentative values depend on the changed one.
The use of tentative values in search is illustrated in the \eclipse\
``Tutorial on Search Methods''.
\section{Linear Constraints}
There are two libraries supporting linear constraint solving. The
first {\em eplex} provides an interface to external linear
programming packages.
It offers flexibility and scalability, but may
require a license for the external software.
The second {\em clpqr} can support infinite precision, but is less
efficient and scalable and offers fewer facilities.
\subsection{External Linear Solvers: {\em eplex}}
{\em eplex} supports a tight integration \cite{Bockmayr} between
external linear solvers (CPLEX \cite{ILOG} and XPRESS \cite{Dash})
and \eclipse.
Constraints as well as variables can appear in both the external
linear solver and other \eclipse\ solvers.
Variable bounds are automatically passed from the \eclipse\ {\em range}
solver to the external solver.
Optimal solutions and other solutions can be returned to \eclipse\ as
required.
Search can be carried out either in \eclipse\ or in the external solver.
\subsection{{\em clpqr}}
The {\em clpqr} library offers two implementations of the Simplex
method for solving linear constraints \cite{Holzbauer}.
One version uses rationals and
is exact. The other version uses floats.
This library employs public domain software, and can be used for small
problems (with less than 100 variables).
\subsection{Piecewise Linear: {\em eplex\_relax}}
This library handles any user-defined piecewise linear function as a
constraint closely integrated with {\em eplex}. It offers better
pruning than the standard handling of piecewise linear constraints
in the external solvers \cite{Ajili}.
%\section{Combining Linear and Finite Domain Propagation}
%\subsection{{\em fdplex}}
%A simple way to achieve maximum propagation is to send all numeric
%constraints both to {\em fd} and to {\em eplex} \cite{RWH99}.
%This requirement is automatically supported by the {\em fdplex}
%library.
\subsection{Probing for Scheduling}
For scheduling applications where the cost is dependent on each start
time, a combination of solvers can be very powerful.
For example, we can use finite domain
propagation to reason on
resources and linear constraint solving to reason on cost \cite{HaniProbe}.
The {\em probing\_for\_scheduling} library supports such a combination,
via a similar user interface to the {\em cumulative} constraint mentioned
above.
\section{Other Libraries}
The solvers described above are just a few of the many libraries
available in ECLiPSe and listed in the \eclipse\ library directory.
Libraries are not only for constraint solvers -- for example, the
{\em \eclipse\ SQL Database Interface} library provides an interface to
external Database Management Systems, allowing users to add and retrieve data
from the database within an \eclipse\ program.
Any \eclipse\ user who has implemented a constraint solver is welcome to
send the code to the \eclipse\ team so that it can be added to
the available libraries.
Comments and suggestions on the existing libraries are also welcome!
%HEVEA\cutend
|
{"hexsha": "23999fd39f9d00660800d812b63b5a59ae8be0a4", "size": 10167, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "usr/eclipseclp/documents/libman/introduction.tex", "max_stars_repo_name": "lambdaxymox/barrelfish", "max_stars_repo_head_hexsha": "06a9f54721a8d96874a8939d8973178a562c342f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 111, "max_stars_repo_stars_event_min_datetime": "2015-02-03T02:57:27.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-01T23:57:09.000Z", "max_issues_repo_path": "usr/eclipseclp/documents/libman/introduction.tex", "max_issues_repo_name": "lambdaxymox/barrelfish", "max_issues_repo_head_hexsha": "06a9f54721a8d96874a8939d8973178a562c342f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 12, "max_issues_repo_issues_event_min_datetime": "2016-03-22T14:44:32.000Z", "max_issues_repo_issues_event_max_datetime": "2020-03-18T13:30:29.000Z", "max_forks_repo_path": "usr/eclipseclp/documents/libman/introduction.tex", "max_forks_repo_name": "lambdaxymox/barrelfish", "max_forks_repo_head_hexsha": "06a9f54721a8d96874a8939d8973178a562c342f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 55, "max_forks_repo_forks_event_min_datetime": "2015-02-03T05:28:12.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T05:00:03.000Z", "avg_line_length": 44.012987013, "max_line_length": 79, "alphanum_fraction": 0.7933510377, "num_tokens": 2276}
|
import numpy as np
from numpy.testing import assert_allclose
from cyvlfeat.quickshift.quickshift import quickshift
from cyvlfeat.test_util import lena
img = lena().astype(np.float32)
def test_quickshift_medoid_maps():
i = img.copy()
maps, gaps, estimate = quickshift(i, kernel_size=2, max_dist=10, medoid=True)
assert maps.shape == (512, 512)
assert_allclose(maps[0:5, 0], [514., 1026., 1026., 1538., 1538.],
rtol=1e-3)
def test_quickshift_medoid_gaps():
i = img.copy()
maps, gaps, estimate = quickshift(i, kernel_size=2, max_dist=10, medoid=True)
assert gaps.shape == (512, 512)
assert_allclose(gaps[0:5, 0], [228071.506, 290406.801, 323886.572, 323339.597,
293392.239], rtol=1e-3)
def test_quickshift_medoid_estimate():
i = img.copy()
maps, gaps, estimate = quickshift(i, kernel_size=2, max_dist=10, medoid=True)
assert estimate.shape == (512, 512)
assert_allclose(estimate[0:5, 0], [8.699, 11.0754, 12.350, 12.322, 11.190],
rtol=1e-3)
def test_quickshift_quick_maps():
i = img.copy()
maps, gaps, estimate = quickshift(i, kernel_size=2, max_dist=10)
assert maps.shape == (512, 512)
assert_allclose(maps[0:5, 0], [2., 514., 1026., 1025., 1537.],
rtol=1e-3)
def test_quickshift_quick_gaps():
i = img.copy()
maps, gaps, estimate = quickshift(i, kernel_size=2, max_dist=10)
assert gaps.shape == (512, 512)
assert_allclose(gaps[0:6, 3], [1., 1., 1., 1.4142, 1., 2.2360],
rtol=1e-3)
def test_quickshift_quick_estimate():
i = img.copy()
maps, gaps, estimate = quickshift(i, kernel_size=2, max_dist=10)
assert estimate.shape == (512, 512)
assert_allclose(estimate[0:5, 0], [8.699, 11.0754, 12.350, 12.322, 11.190],
rtol=1e-3)
|
{"hexsha": "0f9c37296f2bb4e88f6aa5c6645a128b5179da40", "size": 1865, "ext": "py", "lang": "Python", "max_stars_repo_path": "cyvlfeat/quickshift/tests/quickshift_test.py", "max_stars_repo_name": "simmimourya1/cyvlfeat", "max_stars_repo_head_hexsha": "efc83dca40d335658620c49017ca0c814ad77132", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 20, "max_stars_repo_stars_event_min_datetime": "2016-06-17T01:17:12.000Z", "max_stars_repo_stars_event_max_datetime": "2018-02-20T17:38:21.000Z", "max_issues_repo_path": "cyvlfeat/quickshift/tests/quickshift_test.py", "max_issues_repo_name": "simmimourya/cyvlfeat", "max_issues_repo_head_hexsha": "efc83dca40d335658620c49017ca0c814ad77132", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2016-06-29T21:07:51.000Z", "max_issues_repo_issues_event_max_datetime": "2018-06-11T19:22:30.000Z", "max_forks_repo_path": "cyvlfeat/quickshift/tests/quickshift_test.py", "max_forks_repo_name": "simmimourya/cyvlfeat", "max_forks_repo_head_hexsha": "efc83dca40d335658620c49017ca0c814ad77132", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2018-02-20T10:01:59.000Z", "max_forks_repo_forks_event_max_datetime": "2018-02-20T12:19:37.000Z", "avg_line_length": 33.3035714286, "max_line_length": 82, "alphanum_fraction": 0.6273458445, "include": true, "reason": "import numpy,from numpy", "num_tokens": 615}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import json
import os
import torch.utils.data as data
class PANO(data.Dataset):
default_resolution = (512, 768)
num_classes = 1 # 1 or 32
def __init__(self, opt, split):
super(PANO, self).__init__()
self.split = split # split = test or train
self.opt = opt
self.data_dir = os.path.join(opt.data_dir, split)
self.img_file_names = []
for f in os.listdir(self.data_dir):
if f[-3:] != 'txt' and 'thum' not in f:
self.img_file_names.append(f)
self.num_samples = len(self.img_file_names)
self.max_objs = 32
def __len__(self):
return self.num_samples
|
{"hexsha": "0f980b06551ee38c1c47fb58a904a4b82b8c6766", "size": 795, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/lib/datasets/dataset/pano.py", "max_stars_repo_name": "jscsmk/CenterNet", "max_stars_repo_head_hexsha": "d7c643bba2b373c15abfa3d25ffd5304a313fa49", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/lib/datasets/dataset/pano.py", "max_issues_repo_name": "jscsmk/CenterNet", "max_issues_repo_head_hexsha": "d7c643bba2b373c15abfa3d25ffd5304a313fa49", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/lib/datasets/dataset/pano.py", "max_forks_repo_name": "jscsmk/CenterNet", "max_forks_repo_head_hexsha": "d7c643bba2b373c15abfa3d25ffd5304a313fa49", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.6451612903, "max_line_length": 57, "alphanum_fraction": 0.6465408805, "include": true, "reason": "import numpy", "num_tokens": 198}
|
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import numpy as np
import plotly.express as px
import plotly.graph_objects as go
from covid_tools.calc import fill_missing_date, fill_missing_date_groups
sns.set()
def ts_plot_setup(dpi=100, x_rotation=60):
fig, ax = plt.subplots(dpi=dpi)
ax.tick_params('x', labelrotation=x_rotation)
return fig, ax
def basic_ts_plot(df, dep_var_col, date_col='date', plot_type='scatter'):
fig, ax = ts_plot_setup()
if plot_type == 'scatter':
ax.scatter(df[date_col], df[dep_var_col].astype('float64'))
elif plot_type == 'line':
ax.plot(date_col, dep_var_col, data=df)
ax.set_xlim(df[date_col].min()-pd.Timedelta(1, 'day'),
df[date_col].max()+pd.Timedelta(1, 'day'))
ax.set_ylabel(dep_var_col)
return fig, ax
def convert_to_np_nan(df):
return df.copy().replace({pd.NA: np.nan})
def daily_and_avg_static(df, date_col, daily_change_col, rolling_avg_col, ax):
ax.tick_params('x', labelrotation=90)
sns.scatterplot(x=date_col, y=daily_change_col, data=df, ax=ax)
sns.lineplot(x=date_col, y=rolling_avg_col, data=df, ax=ax,
label=rolling_avg_col)
ax.legend()
return ax
def comparative_static(df, date_col, dep_var_col, group_col, ax):
ax.tick_params('x', labelrotation=90)
sns.lineplot(x=date_col, y=dep_var_col, hue=group_col, data=df, ax=ax)
return ax
def daily_and_avg_interactive(df, date_col, daily_change_col, rolling_avg_col):
df = convert_to_np_nan(fill_missing_date(df, date_col))
fig = go.Figure()
fig.add_scatter(x=df[date_col], y=df[daily_change_col], mode='markers',
name=daily_change_col)
fig.add_scatter(x=df[date_col], y=df[rolling_avg_col], mode='lines',
line_color=px.colors.qualitative.Plotly[0],
name=rolling_avg_col)
return fig
def comparative_interactive(df, date_col, dep_var_raw_col, dep_var_norm_col,
group_col):
df = convert_to_np_nan(fill_missing_date_groups(df, date_col, group_col))
return px.line(df, x=date_col, y=dep_var_norm_col, color=group_col,
hover_data=[dep_var_raw_col])
|
{"hexsha": "5521f68239bdbcb7902cb5c9e845fda04bd58de4", "size": 2233, "ext": "py", "lang": "Python", "max_stars_repo_path": "plot.py", "max_stars_repo_name": "amhirsch/covid_tools", "max_stars_repo_head_hexsha": "0073e8db0dc13ebcde3d86b78b53e982e34ecd5b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "plot.py", "max_issues_repo_name": "amhirsch/covid_tools", "max_issues_repo_head_hexsha": "0073e8db0dc13ebcde3d86b78b53e982e34ecd5b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "plot.py", "max_forks_repo_name": "amhirsch/covid_tools", "max_forks_repo_head_hexsha": "0073e8db0dc13ebcde3d86b78b53e982e34ecd5b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.890625, "max_line_length": 79, "alphanum_fraction": 0.6918943126, "include": true, "reason": "import numpy", "num_tokens": 579}
|
/-
Copyright (c) 2018 Robert Y. Lewis. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Robert Y. Lewis, Chris Hughes
-/
import algebra.associated
import algebra.big_operators.basic
import ring_theory.valuation.basic
/-!
# Multiplicity of a divisor
For a commutative monoid, this file introduces the notion of multiplicity of a divisor and proves
several basic results on it.
## Main definitions
* `multiplicity a b`: for two elements `a` and `b` of a commutative monoid returns the largest
number `n` such that `a ^ n ∣ b` or infinity, written `⊤`, if `a ^ n ∣ b` for all natural numbers
`n`.
* `multiplicity.finite a b`: a predicate denoting that the multiplicity of `a` in `b` is finite.
-/
variables {α : Type*}
open nat part
open_locale big_operators
/-- `multiplicity a b` returns the largest natural number `n` such that
`a ^ n ∣ b`, as an `enat` or natural with infinity. If `∀ n, a ^ n ∣ b`,
then it returns `⊤`-/
def multiplicity [comm_monoid α] [decidable_rel ((∣) : α → α → Prop)] (a b : α) : enat :=
enat.find $ λ n, ¬a ^ (n + 1) ∣ b
namespace multiplicity
section comm_monoid
variables [comm_monoid α]
/-- `multiplicity.finite a b` indicates that the multiplicity of `a` in `b` is finite. -/
@[reducible] def finite (a b : α) : Prop := ∃ n : ℕ, ¬a ^ (n + 1) ∣ b
lemma finite_iff_dom [decidable_rel ((∣) : α → α → Prop)] {a b : α} :
finite a b ↔ (multiplicity a b).dom := iff.rfl
lemma finite_def {a b : α} : finite a b ↔ ∃ n : ℕ, ¬a ^ (n + 1) ∣ b := iff.rfl
@[norm_cast]
theorem int.coe_nat_multiplicity (a b : ℕ) :
multiplicity (a : ℤ) (b : ℤ) = multiplicity a b :=
begin
apply part.ext',
{ repeat { rw [← finite_iff_dom, finite_def] },
norm_cast },
{ intros h1 h2,
apply _root_.le_antisymm; { apply nat.find_mono, norm_cast, simp } }
end
lemma not_finite_iff_forall {a b : α} : (¬ finite a b) ↔ ∀ n : ℕ, a ^ n ∣ b :=
⟨λ h n, nat.cases_on n (by { rw pow_zero, exact one_dvd _ }) (by simpa [finite, not_not] using h),
by simp [finite, multiplicity, not_not]; tauto⟩
lemma not_unit_of_finite {a b : α} (h : finite a b) : ¬is_unit a :=
let ⟨n, hn⟩ := h in mt (is_unit_iff_forall_dvd.1 ∘ is_unit.pow (n + 1)) $
λ h, hn (h b)
lemma finite_of_finite_mul_left {a b c : α} : finite a (b * c) → finite a c :=
λ ⟨n, hn⟩, ⟨n, λ h, hn (h.trans (by simp [mul_pow]))⟩
lemma finite_of_finite_mul_right {a b c : α} : finite a (b * c) → finite a b :=
by rw mul_comm; exact finite_of_finite_mul_left
variable [decidable_rel ((∣) : α → α → Prop)]
lemma pow_dvd_of_le_multiplicity {a b : α} {k : ℕ} : (k : enat) ≤ multiplicity a b → a ^ k ∣ b :=
by { rw ← enat.some_eq_coe, exact
nat.cases_on k (λ _, by { rw pow_zero, exact one_dvd _ })
(λ k ⟨h₁, h₂⟩, by_contradiction (λ hk, (nat.find_min _ (lt_of_succ_le (h₂ ⟨k, hk⟩)) hk))) }
lemma pow_multiplicity_dvd {a b : α} (h : finite a b) : a ^ get (multiplicity a b) h ∣ b :=
pow_dvd_of_le_multiplicity (by rw enat.coe_get)
lemma is_greatest {a b : α} {m : ℕ} (hm : multiplicity a b < m) : ¬a ^ m ∣ b :=
λ h, by rw [enat.lt_coe_iff] at hm; exact nat.find_spec hm.fst ((pow_dvd_pow _ hm.snd).trans h)
lemma is_greatest' {a b : α} {m : ℕ} (h : finite a b) (hm : get (multiplicity a b) h < m) :
¬a ^ m ∣ b :=
is_greatest (by rwa [← enat.coe_lt_coe, enat.coe_get] at hm)
lemma unique {a b : α} {k : ℕ} (hk : a ^ k ∣ b) (hsucc : ¬a ^ (k + 1) ∣ b) :
(k : enat) = multiplicity a b :=
le_antisymm (le_of_not_gt (λ hk', is_greatest hk' hk)) $
have finite a b, from ⟨k, hsucc⟩,
by { rw [enat.le_coe_iff], exact ⟨this, nat.find_min' _ hsucc⟩ }
lemma unique' {a b : α} {k : ℕ} (hk : a ^ k ∣ b) (hsucc : ¬ a ^ (k + 1) ∣ b) :
k = get (multiplicity a b) ⟨k, hsucc⟩ :=
by rw [← enat.coe_inj, enat.coe_get, unique hk hsucc]
lemma pow_dvd_iff_le_multiplicity {a b : α}
{k : ℕ} : a ^ k ∣ b ↔ (k : enat) ≤ multiplicity a b :=
⟨le_multiplicity_of_pow_dvd, pow_dvd_of_le_multiplicity⟩
lemma multiplicity_lt_iff_neg_dvd {a b : α} {k : ℕ} :
multiplicity a b < (k : enat) ↔ ¬ a ^ k ∣ b :=
by { rw [pow_dvd_iff_le_multiplicity, not_le] }
lemma eq_coe_iff {a b : α} {n : ℕ} :
multiplicity a b = (n : enat) ↔ a ^ n ∣ b ∧ ¬a ^ (n + 1) ∣ b :=
begin
rw [← enat.some_eq_coe],
exact ⟨λ h, let ⟨h₁, h₂⟩ := eq_some_iff.1 h in
h₂ ▸ ⟨pow_multiplicity_dvd _, is_greatest
(by { rw [enat.lt_coe_iff], exact ⟨h₁, lt_succ_self _⟩ })⟩,
λ h, eq_some_iff.2 ⟨⟨n, h.2⟩, eq.symm $ unique' h.1 h.2⟩⟩
end
lemma eq_top_iff {a b : α} :
multiplicity a b = ⊤ ↔ ∀ n : ℕ, a ^ n ∣ b :=
(enat.find_eq_top_iff _).trans $
by { simp only [not_not],
exact ⟨λ h n, nat.cases_on n (by { rw pow_zero, exact one_dvd _}) (λ n, h _), λ h n, h _⟩ }
@[simp] lemma is_unit_left {a : α} (b : α) (ha : is_unit a) : multiplicity a b = ⊤ :=
eq_top_iff.2 (λ _, is_unit_iff_forall_dvd.1 (ha.pow _) _)
lemma is_unit_right {a b : α} (ha : ¬is_unit a) (hb : is_unit b) :
multiplicity a b = 0 :=
eq_coe_iff.2 ⟨show a ^ 0 ∣ b, by simp only [pow_zero, one_dvd],
by { rw pow_one, exact λ h, mt (is_unit_of_dvd_unit h) ha hb }⟩
@[simp] lemma one_left (b : α) : multiplicity 1 b = ⊤ := is_unit_left b is_unit_one
lemma one_right {a : α} (ha : ¬is_unit a) : multiplicity a 1 = 0 := is_unit_right ha is_unit_one
@[simp] lemma get_one_right {a : α} (ha : finite a 1) : get (multiplicity a 1) ha = 0 :=
begin
rw [enat.get_eq_iff_eq_coe, eq_coe_iff, pow_zero],
simpa [is_unit_iff_dvd_one.symm] using not_unit_of_finite ha,
end
@[simp] lemma unit_left (a : α) (u : units α) : multiplicity (u : α) a = ⊤ :=
is_unit_left a u.is_unit
lemma unit_right {a : α} (ha : ¬is_unit a) (u : units α) : multiplicity a u = 0 :=
is_unit_right ha u.is_unit
lemma multiplicity_eq_zero_of_not_dvd {a b : α} (ha : ¬a ∣ b) : multiplicity a b = 0 :=
by { rw [← nat.cast_zero, eq_coe_iff], simpa }
lemma eq_top_iff_not_finite {a b : α} : multiplicity a b = ⊤ ↔ ¬ finite a b :=
part.eq_none_iff'
lemma ne_top_iff_finite {a b : α} : multiplicity a b ≠ ⊤ ↔ finite a b :=
by rw [ne.def, eq_top_iff_not_finite, not_not]
lemma lt_top_iff_finite {a b : α} : multiplicity a b < ⊤ ↔ finite a b :=
by rw [lt_top_iff_ne_top, ne_top_iff_finite]
open_locale classical
lemma multiplicity_le_multiplicity_iff {a b c d : α} : multiplicity a b ≤ multiplicity c d ↔
(∀ n : ℕ, a ^ n ∣ b → c ^ n ∣ d) :=
⟨λ h n hab, (pow_dvd_of_le_multiplicity (le_trans (le_multiplicity_of_pow_dvd hab) h)),
λ h, if hab : finite a b
then by rw [← enat.coe_get (finite_iff_dom.1 hab)];
exact le_multiplicity_of_pow_dvd (h _ (pow_multiplicity_dvd _))
else
have ∀ n : ℕ, c ^ n ∣ d, from λ n, h n (not_finite_iff_forall.1 hab _),
by rw [eq_top_iff_not_finite.2 hab, eq_top_iff_not_finite.2
(not_finite_iff_forall.2 this)]⟩
lemma multiplicity_le_multiplicity_of_dvd_left {a b c : α} (hdvd : a ∣ b) :
multiplicity b c ≤ multiplicity a c :=
multiplicity_le_multiplicity_iff.2 $ λ n h, (pow_dvd_pow_of_dvd hdvd n).trans h
lemma eq_of_associated_left {a b c : α} (h : associated a b) :
multiplicity b c = multiplicity a c :=
le_antisymm (multiplicity_le_multiplicity_of_dvd_left h.dvd)
(multiplicity_le_multiplicity_of_dvd_left h.symm.dvd)
lemma multiplicity_le_multiplicity_of_dvd_right {a b c : α} (h : b ∣ c) :
multiplicity a b ≤ multiplicity a c :=
multiplicity_le_multiplicity_iff.2 $ λ n hb, hb.trans h
lemma eq_of_associated_right {a b c : α} (h : associated b c) :
multiplicity a b = multiplicity a c :=
le_antisymm (multiplicity_le_multiplicity_of_dvd_right h.dvd)
(multiplicity_le_multiplicity_of_dvd_right h.symm.dvd)
lemma dvd_of_multiplicity_pos {a b : α} (h : (0 : enat) < multiplicity a b) : a ∣ b :=
begin
rw ← pow_one a,
apply pow_dvd_of_le_multiplicity,
simpa only [nat.cast_one, enat.pos_iff_one_le] using h
end
lemma dvd_iff_multiplicity_pos {a b : α} : (0 : enat) < multiplicity a b ↔ a ∣ b :=
⟨dvd_of_multiplicity_pos,
λ hdvd, lt_of_le_of_ne (zero_le _) (λ heq, is_greatest
(show multiplicity a b < ↑1,
by simpa only [heq, nat.cast_zero] using enat.coe_lt_coe.mpr zero_lt_one)
(by rwa pow_one a))⟩
lemma finite_nat_iff {a b : ℕ} : finite a b ↔ (a ≠ 1 ∧ 0 < b) :=
begin
rw [← not_iff_not, not_finite_iff_forall, not_and_distrib, ne.def,
not_not, not_lt, nat.le_zero_iff],
exact ⟨λ h, or_iff_not_imp_right.2 (λ hb,
have ha : a ≠ 0, from λ ha, by simpa [ha] using h 1,
by_contradiction (λ ha1 : a ≠ 1,
have ha_gt_one : 1 < a, from
lt_of_not_ge (λ ha', by { clear h, revert ha ha1, dec_trivial! }),
not_lt_of_ge (le_of_dvd (nat.pos_of_ne_zero hb) (h b))
(lt_pow_self ha_gt_one b))),
λ h, by cases h; simp *⟩
end
end comm_monoid
section comm_monoid_with_zero
variable [comm_monoid_with_zero α]
lemma ne_zero_of_finite {a b : α} (h : finite a b) : b ≠ 0 :=
let ⟨n, hn⟩ := h in λ hb, by simpa [hb] using hn
variable [decidable_rel ((∣) : α → α → Prop)]
@[simp] protected lemma zero (a : α) : multiplicity a 0 = ⊤ :=
part.eq_none_iff.2 (λ n ⟨⟨k, hk⟩, _⟩, hk (dvd_zero _))
@[simp] lemma multiplicity_zero_eq_zero_of_ne_zero (a : α) (ha : a ≠ 0) : multiplicity 0 a = 0 :=
begin
apply multiplicity.multiplicity_eq_zero_of_not_dvd,
rwa zero_dvd_iff,
end
end comm_monoid_with_zero
section comm_semiring
variables [comm_semiring α] [decidable_rel ((∣) : α → α → Prop)]
lemma min_le_multiplicity_add {p a b : α} :
min (multiplicity p a) (multiplicity p b) ≤ multiplicity p (a + b) :=
(le_total (multiplicity p a) (multiplicity p b)).elim
(λ h, by rw [min_eq_left h, multiplicity_le_multiplicity_iff];
exact λ n hn, dvd_add hn (multiplicity_le_multiplicity_iff.1 h n hn))
(λ h, by rw [min_eq_right h, multiplicity_le_multiplicity_iff];
exact λ n hn, dvd_add (multiplicity_le_multiplicity_iff.1 h n hn) hn)
end comm_semiring
section comm_ring
variables [comm_ring α] [decidable_rel ((∣) : α → α → Prop)]
open_locale classical
@[simp] protected lemma neg (a b : α) : multiplicity a (-b) = multiplicity a b :=
part.ext' (by simp only [multiplicity, enat.find, dvd_neg])
(λ h₁ h₂, enat.coe_inj.1 (by rw [enat.coe_get]; exact
eq.symm (unique ((dvd_neg _ _).2 (pow_multiplicity_dvd _))
(mt (dvd_neg _ _).1 (is_greatest' _ (lt_succ_self _))))))
lemma multiplicity_add_of_gt {p a b : α} (h : multiplicity p b < multiplicity p a) :
multiplicity p (a + b) = multiplicity p b :=
begin
apply le_antisymm,
{ apply enat.le_of_lt_add_one,
cases enat.ne_top_iff.mp (enat.ne_top_of_lt h) with k hk,
rw [hk], rw_mod_cast [multiplicity_lt_iff_neg_dvd], intro h_dvd,
rw [← dvd_add_iff_right] at h_dvd,
apply multiplicity.is_greatest _ h_dvd, rw [hk], apply_mod_cast nat.lt_succ_self,
rw [pow_dvd_iff_le_multiplicity, nat.cast_add, ← hk, nat.cast_one],
exact enat.add_one_le_of_lt h },
{ convert min_le_multiplicity_add, rw [min_eq_right (le_of_lt h)] }
end
lemma multiplicity_sub_of_gt {p a b : α} (h : multiplicity p b < multiplicity p a) :
multiplicity p (a - b) = multiplicity p b :=
by { rw [sub_eq_add_neg, multiplicity_add_of_gt]; rwa [multiplicity.neg] }
lemma multiplicity_add_eq_min {p a b : α} (h : multiplicity p a ≠ multiplicity p b) :
multiplicity p (a + b) = min (multiplicity p a) (multiplicity p b) :=
begin
rcases lt_trichotomy (multiplicity p a) (multiplicity p b) with hab|hab|hab,
{ rw [add_comm, multiplicity_add_of_gt hab, min_eq_left], exact le_of_lt hab },
{ contradiction },
{ rw [multiplicity_add_of_gt hab, min_eq_right], exact le_of_lt hab},
end
end comm_ring
section comm_cancel_monoid_with_zero
variables [comm_cancel_monoid_with_zero α]
lemma finite_mul_aux {p : α} (hp : prime p) : ∀ {n m : ℕ} {a b : α},
¬p ^ (n + 1) ∣ a → ¬p ^ (m + 1) ∣ b → ¬p ^ (n + m + 1) ∣ a * b
| n m := λ a b ha hb ⟨s, hs⟩,
have p ∣ a * b, from ⟨p ^ (n + m) * s,
by simp [hs, pow_add, mul_comm, mul_assoc, mul_left_comm]⟩,
(hp.2.2 a b this).elim
(λ ⟨x, hx⟩, have hn0 : 0 < n,
from nat.pos_of_ne_zero (λ hn0, by clear _fun_match _fun_match; simpa [hx, hn0] using ha),
have wf : (n - 1) < n, from tsub_lt_self hn0 dec_trivial,
have hpx : ¬ p ^ (n - 1 + 1) ∣ x,
from λ ⟨y, hy⟩, ha (hx.symm ▸ ⟨y, mul_right_cancel₀ hp.1
$ by rw [tsub_add_cancel_of_le (succ_le_of_lt hn0)] at hy;
simp [hy, pow_add, mul_comm, mul_assoc, mul_left_comm]⟩),
have 1 ≤ n + m, from le_trans hn0 (nat.le_add_right n m),
finite_mul_aux hpx hb ⟨s, mul_right_cancel₀ hp.1 begin
rw [tsub_add_eq_add_tsub (succ_le_of_lt hn0), tsub_add_cancel_of_le this],
clear _fun_match _fun_match finite_mul_aux,
simp [*, mul_comm, mul_assoc, mul_left_comm, pow_add] at *
end⟩)
(λ ⟨x, hx⟩, have hm0 : 0 < m,
from nat.pos_of_ne_zero (λ hm0, by clear _fun_match _fun_match; simpa [hx, hm0] using hb),
have wf : (m - 1) < m, from tsub_lt_self hm0 dec_trivial,
have hpx : ¬ p ^ (m - 1 + 1) ∣ x,
from λ ⟨y, hy⟩, hb (hx.symm ▸ ⟨y, mul_right_cancel₀ hp.1
$ by rw [tsub_add_cancel_of_le (succ_le_of_lt hm0)] at hy;
simp [hy, pow_add, mul_comm, mul_assoc, mul_left_comm]⟩),
finite_mul_aux ha hpx ⟨s, mul_right_cancel₀ hp.1 begin
rw [add_assoc, tsub_add_cancel_of_le (succ_le_of_lt hm0)],
clear _fun_match _fun_match finite_mul_aux,
simp [*, mul_comm, mul_assoc, mul_left_comm, pow_add] at *
end⟩)
lemma finite_mul {p a b : α} (hp : prime p) : finite p a → finite p b → finite p (a * b) :=
λ ⟨n, hn⟩ ⟨m, hm⟩, ⟨n + m, finite_mul_aux hp hn hm⟩
lemma finite_mul_iff {p a b : α} (hp : prime p) : finite p (a * b) ↔ finite p a ∧ finite p b :=
⟨λ h, ⟨finite_of_finite_mul_right h, finite_of_finite_mul_left h⟩,
λ h, finite_mul hp h.1 h.2⟩
lemma finite_pow {p a : α} (hp : prime p) : Π {k : ℕ} (ha : finite p a), finite p (a ^ k)
| 0 ha := ⟨0, by simp [mt is_unit_iff_dvd_one.2 hp.2.1]⟩
| (k+1) ha := by rw [pow_succ]; exact finite_mul hp ha (finite_pow ha)
variable [decidable_rel ((∣) : α → α → Prop)]
@[simp] lemma multiplicity_self {a : α} (ha : ¬is_unit a) (ha0 : a ≠ 0) :
multiplicity a a = 1 :=
by { rw ← nat.cast_one, exact
eq_coe_iff.2 ⟨by simp, λ ⟨b, hb⟩, ha (is_unit_iff_dvd_one.2
⟨b, mul_left_cancel₀ ha0 $ by { clear _fun_match,
simpa [pow_succ, mul_assoc] using hb }⟩)⟩ }
@[simp] lemma get_multiplicity_self {a : α} (ha : finite a a) :
get (multiplicity a a) ha = 1 :=
enat.get_eq_iff_eq_coe.2 (eq_coe_iff.2
⟨by simp, λ ⟨b, hb⟩,
by rw [← mul_one a, pow_add, pow_one, mul_assoc, mul_assoc,
mul_right_inj' (ne_zero_of_finite ha)] at hb;
exact mt is_unit_iff_dvd_one.2 (not_unit_of_finite ha)
⟨b, by clear _fun_match; simp * at *⟩⟩)
protected lemma mul' {p a b : α} (hp : prime p)
(h : (multiplicity p (a * b)).dom) :
get (multiplicity p (a * b)) h =
get (multiplicity p a) ((finite_mul_iff hp).1 h).1 +
get (multiplicity p b) ((finite_mul_iff hp).1 h).2 :=
have hdiva : p ^ get (multiplicity p a) ((finite_mul_iff hp).1 h).1 ∣ a,
from pow_multiplicity_dvd _,
have hdivb : p ^ get (multiplicity p b) ((finite_mul_iff hp).1 h).2 ∣ b,
from pow_multiplicity_dvd _,
have hpoweq : p ^ (get (multiplicity p a) ((finite_mul_iff hp).1 h).1 +
get (multiplicity p b) ((finite_mul_iff hp).1 h).2) =
p ^ get (multiplicity p a) ((finite_mul_iff hp).1 h).1 *
p ^ get (multiplicity p b) ((finite_mul_iff hp).1 h).2,
by simp [pow_add],
have hdiv : p ^ (get (multiplicity p a) ((finite_mul_iff hp).1 h).1 +
get (multiplicity p b) ((finite_mul_iff hp).1 h).2) ∣ a * b,
by rw [hpoweq]; apply mul_dvd_mul; assumption,
have hsucc : ¬p ^ ((get (multiplicity p a) ((finite_mul_iff hp).1 h).1 +
get (multiplicity p b) ((finite_mul_iff hp).1 h).2) + 1) ∣ a * b,
from λ h, by exact
not_or (is_greatest' _ (lt_succ_self _)) (is_greatest' _ (lt_succ_self _))
(_root_.succ_dvd_or_succ_dvd_of_succ_sum_dvd_mul hp hdiva hdivb h),
by rw [← enat.coe_inj, enat.coe_get, eq_coe_iff];
exact ⟨hdiv, hsucc⟩
open_locale classical
protected lemma mul {p a b : α} (hp : prime p) :
multiplicity p (a * b) = multiplicity p a + multiplicity p b :=
if h : finite p a ∧ finite p b then
by rw [← enat.coe_get (finite_iff_dom.1 h.1), ← enat.coe_get (finite_iff_dom.1 h.2),
← enat.coe_get (finite_iff_dom.1 (finite_mul hp h.1 h.2)),
← nat.cast_add, enat.coe_inj, multiplicity.mul' hp]; refl
else begin
rw [eq_top_iff_not_finite.2 (mt (finite_mul_iff hp).1 h)],
cases not_and_distrib.1 h with h h;
simp [eq_top_iff_not_finite.2 h]
end
lemma finset.prod {β : Type*} {p : α} (hp : prime p) (s : finset β) (f : β → α) :
multiplicity p (∏ x in s, f x) = ∑ x in s, multiplicity p (f x) :=
begin
classical,
induction s using finset.induction with a s has ih h,
{ simp only [finset.sum_empty, finset.prod_empty],
convert one_right hp.not_unit },
{ simp [has, ← ih],
convert multiplicity.mul hp }
end
protected lemma pow' {p a : α} (hp : prime p) (ha : finite p a) : ∀ {k : ℕ},
get (multiplicity p (a ^ k)) (finite_pow hp ha) = k * get (multiplicity p a) ha
| 0 := by simp [one_right hp.not_unit]
| (k+1) := have multiplicity p (a ^ (k + 1)) = multiplicity p (a * a ^ k), by rw pow_succ,
by rw [get_eq_get_of_eq _ _ this, multiplicity.mul' hp, pow', add_mul, one_mul, add_comm]
lemma pow {p a : α} (hp : prime p) : ∀ {k : ℕ},
multiplicity p (a ^ k) = k • (multiplicity p a)
| 0 := by simp [one_right hp.not_unit]
| (succ k) := by simp [pow_succ, succ_nsmul, pow, multiplicity.mul hp]
lemma multiplicity_pow_self {p : α} (h0 : p ≠ 0) (hu : ¬ is_unit p) (n : ℕ) :
multiplicity p (p ^ n) = n :=
by { rw [eq_coe_iff], use dvd_rfl, rw [pow_dvd_pow_iff h0 hu], apply nat.not_succ_le_self }
lemma multiplicity_pow_self_of_prime {p : α} (hp : prime p) (n : ℕ) :
multiplicity p (p ^ n) = n :=
multiplicity_pow_self hp.ne_zero hp.not_unit n
end comm_cancel_monoid_with_zero
section valuation
variables {R : Type*} [comm_ring R] [is_domain R] {p : R}
[decidable_rel (has_dvd.dvd : R → R → Prop)]
/-- `multiplicity` of a prime inan integral domain as an additive valuation to `enat`. -/
noncomputable def add_valuation (hp : prime p) : add_valuation R enat :=
add_valuation.of (multiplicity p) (multiplicity.zero _) (one_right hp.not_unit)
(λ _ _, min_le_multiplicity_add) (λ a b, multiplicity.mul hp)
@[simp]
lemma add_valuation_apply {hp : prime p} {r : R} : add_valuation hp r = multiplicity p r := rfl
end valuation
end multiplicity
section nat
open multiplicity
lemma multiplicity_eq_zero_of_coprime {p a b : ℕ} (hp : p ≠ 1)
(hle : multiplicity p a ≤ multiplicity p b)
(hab : nat.coprime a b) : multiplicity p a = 0 :=
begin
rw [multiplicity_le_multiplicity_iff] at hle,
rw [← nonpos_iff_eq_zero, ← not_lt, enat.pos_iff_one_le, ← nat.cast_one,
← pow_dvd_iff_le_multiplicity],
assume h,
have := nat.dvd_gcd h (hle _ h),
rw [coprime.gcd_eq_one hab, nat.dvd_one, pow_one] at this,
exact hp this
end
end nat
|
{"author": "jjaassoonn", "repo": "projective_space", "sha": "11fe19fe9d7991a272e7a40be4b6ad9b0c10c7ce", "save_path": "github-repos/lean/jjaassoonn-projective_space", "path": "github-repos/lean/jjaassoonn-projective_space/projective_space-11fe19fe9d7991a272e7a40be4b6ad9b0c10c7ce/src/ring_theory/multiplicity.lean"}
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Distributed under terms of the MIT license.
import argparse
import datetime
import cvxpy as cp
import numpy as np
import math
from sklearn.metrics import f1_score
from numpy.linalg import norm, eigh
from sklearn.model_selection import train_test_split
import time
from tqdm import tqdm
import json
from itertools import combinations
from data_gen import *
import warnings
warnings.filterwarnings("ignore")
# define a small constant for normalization
eps = 1e-6
class mix_curv_svm:
def __init__(self, mix_component, embed_data):
self.X_train = embed_data['X_train']
self.X_test = embed_data['X_test']
self.y_train = embed_data['y_train']
self.y_test = embed_data['y_test']
self.curv_value = embed_data['curv_value']
self.train_size = self.y_train.size
self.test_size = self.y_test.size
# other parameters
self.alpha_e = 1
self.alpha_s = 1
self.alpha_h = 1
self.r = 0.01
# store each component in order
prod_space_component = mix_component.split(',')
self.space_type = []
self.space_dim = []
for comp in prod_space_component:
self.space_type.append(comp[0])
if comp.startswith('e'):
self.space_dim.append(int(comp[1]))
else:
self.space_dim.append(int(comp[1]) + 1)
# Construct train and test matrices
self.G_train = np.zeros((self.train_size, self.train_size))
self.G_train_list = []
self.G_test = np.zeros((self.train_size, self.test_size))
self.G_test_list = []
start_dim = 0
for comp_idx in range(len(self.space_type)):
train_matrix = self.X_train[:, start_dim: start_dim + self.space_dim[comp_idx]]
test_matrix = self.X_test[:, start_dim: start_dim + self.space_dim[comp_idx]]
if self.space_type[comp_idx] == 'e':
Ge_train = np.matmul(train_matrix, train_matrix.T)
self.G_train += Ge_train
self.G_train_list.append(Ge_train)
Ge_test = np.matmul(train_matrix, test_matrix.T)
self.G_test += Ge_test
self.G_test_list.append(Ge_test)
elif self.space_type[comp_idx] == 'h':
R = max(np.sqrt(np.max(np.matmul(train_matrix, train_matrix.T))),
np.sqrt(np.max(np.matmul(train_matrix, test_matrix.T)))) + eps
Gh_train = np.matmul(train_matrix, train_matrix.T)
Gh_train = np.arcsin(Gh_train / (R ** 2))
self.G_train += Gh_train
self.G_train_list.append(Gh_train)
Gh_test = np.matmul(train_matrix, test_matrix.T)
Gh_test = np.arcsin(Gh_test / (R ** 2))
self.G_test += Gh_test
self.G_test_list.append(Gh_test)
elif self.space_type[comp_idx] == 's':
Cs = self.curv_value[comp_idx]
Gs_train = np.matmul(train_matrix, train_matrix.T)
Gs_train = np.arcsin((Cs * Gs_train) / (abs(Cs * Gs_train).max() + eps))
self.G_train += Gs_train
self.G_train_list.append(Gs_train)
Gs_test = np.matmul(train_matrix, test_matrix.T)
Gs_test = np.arcsin((Cs * Gs_test) / (abs(Cs * Gs_test).max() + eps))
self.G_test += Gs_test
self.G_test_list.append(Gs_test)
start_dim += self.space_dim[comp_idx]
def process_data(self, solver_type='SCS'):
Y = np.diagflat(self.y_train)
zeta = cp.Variable(self.train_size)
beta = cp.Variable(self.train_size)
epsilon = cp.Variable(1)
conds = [epsilon >= 0, zeta >= 0, Y @ (self.G_train @ beta + cp.sum(beta)) >= epsilon - zeta]
for comp_idx in range(len(self.space_type)):
if self.space_type[comp_idx] == 'e':
conds.append(cp.quad_form(beta, self.G_train_list[comp_idx]) <= self.alpha_e ** 2)
elif self.space_type[comp_idx] == 's':
conds.append(cp.quad_form(beta, self.G_train_list[comp_idx]) <= math.pi / 2)
prob = cp.Problem(cp.Minimize(-epsilon + cp.sum(zeta)), conds)
prob.solve(solver=solver_type)
beta = beta.value
epsilon = epsilon.value
zeta = zeta.value
# assume all alpha's are 1
y_pred = np.sign(np.matmul(beta.T, self.G_test) + np.sum(beta))
y_pred = y_pred.reshape((self.test_size,)).astype(int)
score = f1_score(self.y_test, y_pred, average='macro')
print(f'Mix curv SVM F1 score: {score}')
return score
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="SVM algorithm in product space form.")
parser.add_argument("--data_path1", type=str, default=None, help="Where data is located.")
parser.add_argument("--data_path2", type=str, default=None, help="Where data is located.")
parser.add_argument("--data_path3", type=str, default=None, help="Where data is located.")
parser.add_argument("--data_path4", type=str, default=None, help="Where data is located.")
parser.add_argument("--data_path_num", type=int, default=1, help="How many data path to include.")
parser.add_argument("--data_name", type=str, default="Lymphoma", help="Which dataset to test on.")
parser.add_argument("--prod_space", type=str, default="e2,h2,s2", help="Product space form.")
parser.add_argument("--test_size", type=float, default=0.2, help="Percent of test set size.")
parser.add_argument("--trails", type=int, default=10, help="Number of trails want to repeat.")
parser.add_argument("--save_path", type=str, default="results", help="Where to save results.")
parser.add_argument("--transform", type=bool, default=False, help="Where to perform inverse projection.")
args = parser.parse_args()
start = time.time()
cifar_flag = False
if args.data_name == "Lymphoma":
labels_chosen_lst = [[0, 1]]
elif args.data_name == "Blood_cell_landmark":
labels_chosen_lst = list(combinations([i for i in range(10)], 2))
# for debug only
# rnd_idx = [0, 5, 10, 15, 20, 25, 30, 35, 40]
# labels_chosen_lst = [labels_chosen_lst[i] for i in rnd_idx]
elif args.data_name == "cifar100":
cifar_flag = True
labels_chosen_lst = []
for i in range(30):
np.random.seed(i)
labels_chosen_lst.append(list(np.random.permutation(100)[0:2]))
else:
# used for debugging purpose
labels_chosen_lst = [[0, 1]]
label_trails = len(labels_chosen_lst)
acc = np.zeros((label_trails, args.trails))
# path to different files
data_path = [args.data_path1, args.data_path2, args.data_path3, args.data_path4]
data_path = data_path[0: args.data_path_num]
print(data_path)
# curvature of each file
prod_space = []
for file_name in data_path:
if cifar_flag:
prod_space.append(file_name.split('-')[2])
else:
prod_space.append(file_name.split('-')[3])
joint_prod_space = ','.join(prod_space)
assert args.prod_space == joint_prod_space
valid_acc = []
valid_trails = []
invalid_trails = []
for i in range(label_trails):
for j in range(args.trails):
embed_data = mix_data_generation(data_path, prod_space, 2, list(labels_chosen_lst[i]), svm_flag=True, cifar_flag=cifar_flag, seed=j, transform=args.transform)
mix_svm = mix_curv_svm(args.prod_space, embed_data)
# print(f'=========={i},{j}==========')
try:
acc[i, j] = mix_svm.process_data(solver_type='ECOS')
valid_acc.append(acc[i, j])
valid_trails.append((i, j))
except:
try:
acc[i, j] = mix_svm.process_data(solver_type='SCS')
valid_acc.append(acc[i, j])
valid_trails.append((i, j))
except:
invalid_trails.append((i, j))
print(f'=========={args.prod_space}==========')
print(f'Valid trails number:', len(valid_acc))
print(mean_confidence_interval(np.array(valid_acc)))
print('Time used:', time.time() - start)
print('======================================')
if not os.path.exists(args.save_path):
os.makedirs(args.save_path)
cur_time = datetime.datetime.utcnow().isoformat()
np.savez(f'{args.save_path}/{args.data_name}_{prod_space}_svm_f1_scores_{cur_time}.npz', acc=acc, valid_trails=valid_trails)
|
{"hexsha": "70261a8416ac5360de5d4b8b76ce02fe0b5fc487", "size": 8871, "ext": "py", "lang": "Python", "max_stars_repo_path": "product_space_svm.py", "max_stars_repo_name": "thupchnsky/product-space-linear-classifiers", "max_stars_repo_head_hexsha": "511a46be6061551ef6e279030505a1f0972de58b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "product_space_svm.py", "max_issues_repo_name": "thupchnsky/product-space-linear-classifiers", "max_issues_repo_head_hexsha": "511a46be6061551ef6e279030505a1f0972de58b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "product_space_svm.py", "max_forks_repo_name": "thupchnsky/product-space-linear-classifiers", "max_forks_repo_head_hexsha": "511a46be6061551ef6e279030505a1f0972de58b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.803030303, "max_line_length": 171, "alphanum_fraction": 0.5984669147, "include": true, "reason": "import numpy,from numpy,import cvxpy", "num_tokens": 2101}
|
# coding: utf-8
## @package pawpyseed.core.symmetry
# Utilities related to symmetry of the crystal structure,
# namely finding symmetrically identical k-points and the
# space group operators that map between them.
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.core.operations import SymmOp
import numpy as np
def get_symmops(structure, symprec):
"""
Helper function to get the symmetry operations of the structure
in the reciprocal lattice fractional coordinates.
Args:
structure (pymatgen.core.structure.Structure)
symprec (number): symmetry precision for pymatgen SpacegroupAnalyzer
"""
sga = SpacegroupAnalyzer(structure, symprec * max(structure.lattice.abc))
symmops = sga.get_symmetry_operations(cartesian = True)
lattice = structure.lattice.matrix
invlattice = structure.lattice.inv_matrix
newops = []
for op in symmops:
newrot = np.dot(lattice, op.rotation_matrix)
newrot = np.dot(newrot, invlattice)
newtrans = np.dot(op.translation_vector, invlattice)
newops.append(SymmOp.from_rotation_and_translation(
newrot, newtrans))
return newops
def get_nosym_kpoints(kpts, structure, init_kpts = None, symprec=1e-4,
gen_trsym = True, fil_trsym = True):
"""
Starting with a set of k-points (kpts), finds all of the k-points
that are symmetrically identical to a k-point in kpts by symmetry
transformations of a crystal (structure).
Args:
kpts (np.ndarray shape=(n,3))
"""
allkpts = [] if init_kpts == None else [kpt for kpt in init_kpts]
orig_kptnums = []
op_nums = []
symmops = get_symmops(structure, symprec)
trs = []
for i, op in enumerate(symmops):
for k, kpt in enumerate(kpts):
newkpt = np.dot(op.rotation_matrix, kpt)
newkpt -= np.around(newkpt)
newkpt[ abs(newkpt + 0.5) < 1e-5 ] = 0.5
#if ((newkpt > 0.5+1e-6) + (newkpt < -0.5+1e-6)).any():
# continue
if fil_trsym:
if newkpt[2] < -1e-6 or \
(abs(newkpt[2]) < 1e-6 and newkpt[1] < -1e-6) or \
(abs(newkpt[2]) < 1e-6 and abs(newkpt[1]) < 1e-6 and newkpt[0] < -1e-6):
continue
unique = True
for nkpt in allkpts:
diff = (newkpt - nkpt) % 1
oppdiff = 1 - diff
tst = (np.abs(diff) < 1e-4) + (np.abs(oppdiff) < 1e-4)
if ( tst.all() ):
unique = False
break
if unique:
allkpts.append(newkpt)
orig_kptnums.append(k)
op_nums.append(i)
trs.append(0)
if gen_trsym:
for i, op in enumerate(symmops):
for k, kpt in enumerate(kpts):
newkpt = np.dot(op.rotation_matrix, kpt) * -1
newkpt -= np.around(newkpt)
newkpt[ abs(newkpt + 0.5) < 1e-5 ] = 0.5
#if ((newkpt > 0.5+1e-6) + (newkpt < -0.5+1e-6)).any():
# continue
if fil_trsym:
if newkpt[2] < -1e-10 or \
(abs(newkpt[2]) < 1e-6 and newkpt[1] < -1e-6) or \
(abs(newkpt[2]) < 1e-6 and abs(newkpt[1]) < 1e-6 and newkpt[0] < -1e-6):
continue
unique = True
for nkpt in allkpts:
diff = (newkpt - nkpt) % 1
oppdiff = 1 - diff
tst = (np.abs(diff) < 1e-4) + (np.abs(oppdiff) < 1e-4)
if ( tst.all() ):
unique = False
break
if unique:
allkpts.append(newkpt)
orig_kptnums.append(k)
op_nums.append(i)
trs.append(1)
return np.array(allkpts), orig_kptnums, op_nums, symmops, trs
def get_kpt_mapping(allkpts, kpts, structure, symprec=1e-4, gen_trsym = True):
symmops = get_symmops(structure, symprec)
orig_kptnums = []
op_nums = []
trs = []
for nkpt in allkpts:
match = False
for i, op in enumerate(symmops):
for k, kpt in enumerate(kpts):
newkpt = np.dot(op.rotation_matrix, kpt)
#if ((newkpt > 0.5+1e-6) + (newkpt < -0.5+1e-6)).any():
# continue
diff = (newkpt - nkpt) % 1
oppdiff = 1 - diff
tst = (np.abs(diff) < 1e-4) + (np.abs(oppdiff) < 1e-4)
if tst.all():
match = True
orig_kptnums.append(k)
op_nums.append(i)
trs.append(0)
break
if match:
break
if match:
continue
for i, op in enumerate(symmops):
for k, kpt in enumerate(kpts):
newkpt = np.dot(op.rotation_matrix, kpt) * -1
#if ((newkpt > 0.5+1e-6) + (newkpt < -0.5+1e-6)).any():
# continue
diff = (newkpt - nkpt) % 1
oppdiff = 1 - diff
tst = (np.abs(diff) < 1e-4) + (np.abs(oppdiff) < 1e-4)
if tst.all():
match = True
orig_kptnums.append(k)
op_nums.append(i)
trs.append(1)
break
if match:
break
if not match:
raise PAWpyError("Could not find kpoint mapping to %s" % str(nkpt))
return orig_kptnums, op_nums, symmops, trs
|
{"hexsha": "4080d16c3954b838661c1814e20329602a2f0c4d", "size": 4491, "ext": "py", "lang": "Python", "max_stars_repo_path": "pawpyseed/core/symmetry.py", "max_stars_repo_name": "akashkumarsingh612/pawpyseed", "max_stars_repo_head_hexsha": "6f5aa0b8ca8c28a0221e5256afeb939c3344560b", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pawpyseed/core/symmetry.py", "max_issues_repo_name": "akashkumarsingh612/pawpyseed", "max_issues_repo_head_hexsha": "6f5aa0b8ca8c28a0221e5256afeb939c3344560b", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pawpyseed/core/symmetry.py", "max_forks_repo_name": "akashkumarsingh612/pawpyseed", "max_forks_repo_head_hexsha": "6f5aa0b8ca8c28a0221e5256afeb939c3344560b", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.3445945946, "max_line_length": 78, "alphanum_fraction": 0.6441772434, "include": true, "reason": "import numpy", "num_tokens": 1604}
|
import string
import sys
import numpy as np
import io
from hashlib import md5
if sys.version_info < (3,):
maketrans = string.maketrans
else:
maketrans = str.maketrans
def np2csv(arr):
csv = io.BytesIO()
np.savetxt(csv, arr, delimiter=',', fmt='%g')
return csv.getvalue().decode().rstrip()
def vectorize_sequences(sequences, vocabulary_length):
results = np.zeros((len(sequences), vocabulary_length))
for i, sequence in enumerate(sequences):
results[i, sequence] = 1.
return results
def one_hot_encode(messages, vocabulary_length):
data = []
for msg in messages:
temp = one_hot(msg, vocabulary_length)
data.append(temp)
return data
def text_to_word_sequence(text,
filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n',
lower=True, split=" "):
"""Converts a text to a sequence of words (or tokens).
# Arguments
text: Input text (string).
filters: list (or concatenation) of characters to filter out, such as
punctuation. Default: `!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n`,
includes basic punctuation, tabs, and newlines.
lower: boolean. Whether to convert the input to lowercase.
split: str. Separator for word splitting.
# Returns
A list of words (or tokens).
"""
if lower:
text = text.lower()
if sys.version_info < (3,):
if isinstance(text, unicode):
translate_map = dict((ord(c), unicode(split)) for c in filters)
text = text.translate(translate_map)
elif len(split) == 1:
translate_map = maketrans(filters, split * len(filters))
text = text.translate(translate_map)
else:
for c in filters:
text = text.replace(c, split)
else:
translate_dict = dict((c, split) for c in filters)
translate_map = maketrans(translate_dict)
text = text.translate(translate_map)
seq = text.split(split)
return [i for i in seq if i]
def one_hot(text, n,
filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n',
lower=True,
split=' '):
"""One-hot encodes a text into a list of word indexes of size n.
This is a wrapper to the `hashing_trick` function using `hash` as the
hashing function; unicity of word to index mapping non-guaranteed.
# Arguments
text: Input text (string).
n: int. Size of vocabulary.
filters: list (or concatenation) of characters to filter out, such as
punctuation. Default: `!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n`,
includes basic punctuation, tabs, and newlines.
lower: boolean. Whether to set the text to lowercase.
split: str. Separator for word splitting.
# Returns
List of integers in [1, n]. Each integer encodes a word
(unicity non-guaranteed).
"""
return hashing_trick(text, n,
hash_function='md5',
filters=filters,
lower=lower,
split=split)
def hashing_trick(text, n,
hash_function=None,
filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n',
lower=True,
split=' '):
"""Converts a text to a sequence of indexes in a fixed-size hashing space.
# Arguments
text: Input text (string).
n: Dimension of the hashing space.
hash_function: defaults to python `hash` function, can be 'md5' or
any function that takes in input a string and returns a int.
Note that 'hash' is not a stable hashing function, so
it is not consistent across different runs, while 'md5'
is a stable hashing function.
filters: list (or concatenation) of characters to filter out, such as
punctuation. Default: `!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n`,
includes basic punctuation, tabs, and newlines.
lower: boolean. Whether to set the text to lowercase.
split: str. Separator for word splitting.
# Returns
A list of integer word indices (unicity non-guaranteed).
`0` is a reserved index that won't be assigned to any word.
Two or more words may be assigned to the same index, due to possible
collisions by the hashing function.
The [probability](
https://en.wikipedia.org/wiki/Birthday_problem#Probability_table)
of a collision is in relation to the dimension of the hashing space and
the number of distinct objects.
"""
if hash_function is None:
hash_function = hash
elif hash_function == 'md5':
hash_function = lambda w: int(md5(w.encode()).hexdigest(), 16)
seq = text_to_word_sequence(text,
filters=filters,
lower=lower,
split=split)
return [int(hash_function(w) % (n - 1) + 1) for w in seq]
|
{"hexsha": "29412a466b9f1b07caede6c1b7f6fb5934d51a3d", "size": 5037, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/sms_spam_classifier_utilities.py", "max_stars_repo_name": "parampopat/cloud-project", "max_stars_repo_head_hexsha": "31e74cbf3f06b2be981be98d1cd3c832b8ca3a8c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/sms_spam_classifier_utilities.py", "max_issues_repo_name": "parampopat/cloud-project", "max_issues_repo_head_hexsha": "31e74cbf3f06b2be981be98d1cd3c832b8ca3a8c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/sms_spam_classifier_utilities.py", "max_forks_repo_name": "parampopat/cloud-project", "max_forks_repo_head_hexsha": "31e74cbf3f06b2be981be98d1cd3c832b8ca3a8c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.1590909091, "max_line_length": 78, "alphanum_fraction": 0.5773277745, "include": true, "reason": "import numpy", "num_tokens": 1197}
|
import numpy as np
from ineqpy import inequality
def test_gini_2d():
x = np.array([[57], [63], [81], [79], [88], [57], [42], [3], [77], [89]])
w = np.array([[2], [5], [2], [9], [5], [7], [4], [5], [9], [9]])
obtained = inequality.gini(income=x, weights=w)
expected = 0.2134389018024818
assert obtained==expected
def test_gini_1d():
x = np.array([57, 63, 81, 79, 88, 57, 42, 3, 77, 89])
w = np.array([2, 5, 2, 9, 5, 7, 4, 5, 9, 9])
obtained = inequality.gini(income=x, weights=w)
expected = 0.2134389018024818
assert obtained==expected
def test_gini_1d_0_w():
x = np.array([2, 2])
w = np.array([1000000, 1])
obtained = inequality.gini(income=x, weights=w)
expected = 0
assert obtained==expected
def test_gini_1d_0_series():
x = np.array([2, 2])
# w = np.array([1000000, 1])
obtained = inequality.gini(income=x)
expected = 0
assert obtained==expected
def test_gini_1d_1_series():
x = np.array([0, 1])
# w = np.array([1000000, 1])
obtained = inequality.gini(income=x)
expected = 1
assert obtained==expected
def test_gini_1d_1_w():
x = np.array([0, 1])
w = np.array([1, 1])
obtained = inequality.gini(income=x, weights=w)
expected = 1
assert obtained==expected
def test_atkinson_2d():
x = np.array([[57], [63], [81], [79], [88], [57], [42], [3], [77], [89]])
w = np.array([[2], [5], [2], [9], [5], [7], [4], [5], [9], [9]])
obtained = inequality.atkinson(income=x, weights=w)
expected = 0.06537929778911322
assert obtained==expected
def test_atkinson_1d():
x = np.array([57, 63, 81, 79, 88, 57, 42, 3, 77, 89])
w = np.array([2, 5, 2, 9, 5, 7, 4, 5, 9, 9])
obtained = inequality.atkinson(income=x, weights=w)
expected = 0.06537929778911322
assert obtained==expected
def test_atkinson_1d_1_w():
x = np.array([1, 1])
w = np.array([1, 1])
obtained = inequality.atkinson(income=x, weights=w)
expected = 0
assert obtained==expected
def test_theil_1d_1_w():
# TODO check this
x = np.array([1, 1])
w = np.array([1, 1])
obtained = inequality.theil(income=x, weights=w)
expected = 0
assert obtained==expected
|
{"hexsha": "468ca82afa5772d30ec1c4ef155f167f9e1a1359", "size": 2225, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_inequality.py", "max_stars_repo_name": "Grasia/IneqPy", "max_stars_repo_head_hexsha": "b6e06dcf60d1b08db03f9e055bf03510baa221c6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_inequality.py", "max_issues_repo_name": "Grasia/IneqPy", "max_issues_repo_head_hexsha": "b6e06dcf60d1b08db03f9e055bf03510baa221c6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_inequality.py", "max_forks_repo_name": "Grasia/IneqPy", "max_forks_repo_head_hexsha": "b6e06dcf60d1b08db03f9e055bf03510baa221c6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.4880952381, "max_line_length": 77, "alphanum_fraction": 0.5896629213, "include": true, "reason": "import numpy", "num_tokens": 809}
|
from typing import Any, List, Dict, Tuple, Optional, DefaultDict, Union
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch import cuda, nn, save, unsqueeze, sigmoid, stack, sum, no_grad
from transformers import BertConfig, AdamW, get_linear_schedule_with_warmup, logging
from tqdm import tqdm, trange
import os
import json
import numpy as np
from datasets import load_metric
logger = logging.get_logger()
logger.setLevel(logging.INFO)
class ParaphraserTrainer(object):
def __init__(
self,
args: List[Any],
model: Any,
tokenizer : Any,
train_dataset: Optional[TensorDataset] = None,
dev_dataset: Optional[TensorDataset] = None,
) -> None:
self.args, self.model_args, self.data_args = args
self.train_dataset = train_dataset
self.dev_dataset = dev_dataset
if self.model_args.data_parallel:
self.model = nn.DataParallel(model)
else:
self.model = model
# GPU or CPU
self.device = ("cuda" if cuda.is_available() and not self.args.no_cuda else "cpu")
self.model.to(self.device)
self.tokenizer = tokenizer
def train(self):
train_sampler = RandomSampler(self.train_dataset)
train_dataloader = DataLoader(self.train_dataset, sampler=train_sampler, batch_size=self.args.train_batch_size)
t_total = len(train_dataloader) // self.args.gradient_accumulation_steps * self.args.num_train_epochs
writer = SummaryWriter()
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)],
'weight_decay': self.args.weight_decay},
{'params': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=self.args.learning_rate, eps=self.args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=self.args.warmup_steps, num_training_steps=t_total)
# Train!
logger.info("***** Running training *****")
logger.info(f"Num examples = {len(self.train_dataset)}")
logger.info(f"Num Epochs = {self.args.num_train_epochs}")
logger.info(f"Total train batch size = {self.args.train_batch_size}")
logger.info(f"Gradient Accumulation steps = {self.args.gradient_accumulation_steps}")
logger.info(f"Total optimization steps = {t_total}")
logger.info(f"Logging steps = {self.args.logging_steps}")
logger.info(f"Save steps = {self.args.save_steps}")
global_step = 0
tr_loss = 0.0
best_model_epoch = 0
best_model_step = 0
epoch_count = -1
dev_score_history, dev_step_history = [], []
self.model.zero_grad()
train_iterator = trange(int(self.args.num_train_epochs), desc="Epoch")
for _ in train_iterator:
epoch_count+=1
epoch_iterator = tqdm(train_dataloader, desc="Iteration")
for step, batch in enumerate(epoch_iterator):
self.model.train()
batch = tuple(t.to(self.device) for t in batch) # GPU or CPU
inputs = self.load_inputs_from_batch(batch)
outputs = self.model(**inputs)
loss = outputs.loss
if self.args.gradient_accumulation_steps > 1:
loss = loss / self.args.gradient_accumulation_steps
if self.model_args.data_parallel:
loss = sum(loss)
loss.backward()
tr_loss += loss.item()
writer.add_scalar('Train/avg_loss', tr_loss / (global_step+1), global_step)
writer.add_scalar('Train/loss', loss, global_step)
epoch_iterator.set_description("step {}/{} loss={:.2f}".format(
step,
global_step,
tr_loss / (global_step+1)
))
if (step + 1) % self.args.gradient_accumulation_steps == 0:
nn.utils.clip_grad_norm_(self.model.parameters(), self.args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
self.model.zero_grad()
global_step += 1
if self.args.logging_steps > 0 and global_step % self.args.logging_steps == 0:
results = self.evaluate()
result_to_save = {'model':self.model_args.model_name_or_path,
'global_step':global_step }
for k,v in results.items():
result_to_save[k] = v
# save model
dev_score = result_to_save["bleu"]
dev_loss = result_to_save["dev_loss"]
writer.add_scalar('Dev/score', dev_score, global_step)
writer.add_scalar('Dev/loss', dev_loss, global_step)
if global_step == self.args.logging_steps or dev_score > max(dev_score_history):
self.save_model()
best_model_epoch = epoch_count
best_model_step = global_step
logger.info(f"New best model saved at step {global_step}, epoch {epoch_count}: score = {dev_score}")
else:
logger.info(f"Best model still at step {best_model_step}, epoch {best_model_epoch}")
dev_score_history += [dev_score]
dev_step_history += [global_step]
result_to_save['best_score_mean'] = max(dev_score_history)
result_to_save['best_global_step'] = best_model_step
result_to_save['best_global_epoch'] = best_model_epoch
# save log
filename = f'logs/logs_train_{self.model_args.model_nick}_{self.args.meta_task}.jsonl'
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
with open(filename,'a') as f:
f.writelines(json.dumps(result_to_save) + '\n')
return global_step, tr_loss / global_step
def evaluate(self):
dev_sampler = RandomSampler(self.dev_dataset)
dev_dataloader = DataLoader(self.dev_dataset, sampler=dev_sampler, batch_size=self.args.eval_batch_size)
# Eval!
logger.info("***** Running Evaluation *****")
logger.info(f"Num examples = {len(self.dev_dataset)}")
logger.info(f"Total eval batch size = {self.args.eval_batch_size}")
global_step = 0
e_loss = 0.0
self.model.eval()
predicted = []
labels = []
epoch_iterator = tqdm(dev_dataloader, desc="Iteration")
with no_grad():
for step, batch in enumerate(epoch_iterator):
batch = tuple(t.to(self.device) for t in batch) # GPU or CPU
inputs = self.load_inputs_from_batch(batch)
outputs = self.model(**inputs)
if self.args.meta_task == 'paraphrase':
if self.model_args.data_parallel:
generated_outputs = self.model.module.generate(input_ids = inputs['input_ids'], attention_mask = inputs['attention_mask'], num_return_sequences=1, num_beams = 5)
else:
generated_outputs = self.model.generate(input_ids = inputs['input_ids'], attention_mask = inputs['attention_mask'], num_return_sequences=1, num_beams = 5)
elif self.args.meta_task == 'transfer':
if self.model_args.data_parallel:
generated_outputs = self.model.module.generate(input_ids = inputs['input_ids'], attention_mask = inputs['attention_mask'], num_return_sequences=1, num_beams = 5)
else:
generated_outputs = self.model.generate(input_ids = inputs['input_ids'], attention_mask = inputs['attention_mask'],num_return_sequences=1, num_beams = 5)
predicted += self.tokenizer.batch_decode(generated_outputs.detach().cpu().numpy(), skip_special_tokens=True)
labels += self.tokenizer.batch_decode(inputs['labels'].detach().cpu().numpy(), skip_special_tokens=True)
loss = outputs.loss
if self.model_args.data_parallel:
loss = sum(loss)
e_loss += loss.item()
epoch_iterator.set_description("step {}/{} loss={:.2f}".format(
step,
global_step,
e_loss / (global_step+1)
))
global_step += 1
results = self.compute_stats(predicted, labels)
results['dev_loss'] = e_loss / global_step
return results
def postprocess_text(self, preds, labels):
preds = [pred.strip() for pred in preds]
labels = [[label.strip()] for label in labels]
return preds, labels
def compute_stats(self, preds, labels):
metric = load_metric("sacrebleu")
preds, labels = self.postprocess_text(preds, labels)
result = metric.compute(predictions=preds, references=labels)
result = {"bleu": result["score"]}
prediction_lens = [np.count_nonzero(pred != self.tokenizer.pad_token_id) for pred in preds]
result["gen_len"] = np.mean(prediction_lens)
result = {k: round(v, 4) for k, v in result.items()}
return result
def save_model(self):
# Save model checkpoint (Overwrite)
output_dir = self.args.output_dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = self.model.module if hasattr(self.model, 'module') else self.model
model_to_save.save_pretrained(output_dir)
# Save training arguments together with the trained model
save(self.args, os.path.join(output_dir, 'training_args.bin'))
logger.info(f"Saving model checkpoint to {output_dir}")
def load_inputs_from_batch(self, batch):
# inputs = {'input_ids': batch[0],
# 'attention_mask': batch[1],
# 'label_ids': batch[3]}
# [batch x length x task]
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'labels': batch[2],
}
return inputs
def predict_for_sentence(self, sentence):
self.model.eval()
tokenized_sentence = self.tokenizer(sentence, padding='max_length', return_tensors='pt', truncation = True)
input_ids = tokenized_sentence['input_ids'].to(self.device)
attention_mask = tokenized_sentence['attention_mask'].to(self.device)
prediction = self.model.generate(input_ids, attention_mask)
prediction = self.tokenizer.decode(prediction.detach().cpu.numpy()[0], skip_special_tokens=True).strip()
return prediction
|
{"hexsha": "f067e34af63d2565fea89d1623a3dc917ac12279", "size": 11617, "ext": "py", "lang": "Python", "max_stars_repo_path": "ml/paraphrase/paraphraser_trainer.py", "max_stars_repo_name": "DavidThe4sian/marvin", "max_stars_repo_head_hexsha": "1dddd8cdb8be55d3b553df1b2cf89e8b6b0538b6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-05-14T02:23:52.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-28T10:33:18.000Z", "max_issues_repo_path": "ml/paraphrase/paraphraser_trainer.py", "max_issues_repo_name": "DavidThe4sian/marvin", "max_issues_repo_head_hexsha": "1dddd8cdb8be55d3b553df1b2cf89e8b6b0538b6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ml/paraphrase/paraphraser_trainer.py", "max_forks_repo_name": "DavidThe4sian/marvin", "max_forks_repo_head_hexsha": "1dddd8cdb8be55d3b553df1b2cf89e8b6b0538b6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-05-31T06:38:59.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-29T02:44:22.000Z", "avg_line_length": 46.8427419355, "max_line_length": 185, "alphanum_fraction": 0.5901695791, "include": true, "reason": "import numpy", "num_tokens": 2311}
|
#!/usr/bin/python
from sympy import isprime
## part 1
def xor_stack_with_palprimes(stack, palprimes):
result = []
if len(stack) != len(palprimes):
raise Exception("Args must have the same length. Length of stack={}, length of palprimes={}.")
for i in range(len(stack)):
a = stack[i]
b = palprimes[i]
#print(a, "^", b)
result.append(chr(a ^ b))
return "".join(result)
def part_1():
stack = [17488, 16758, 16599, 16285, 16094, 15505, 15417, 14832, 14450, 13893, 13926, 13437, 12833, 12741, 12533, 11504, 11342, 10503, 10550, 10319, 975, 1007, 892, 893, 660, 743, 267, 344, 264, 339, 208, 216, 242, 172, 74, 49, 119, 113, 119, 106]
stack = stack[::-1]
palprimes = [2, 3, 5, 7, 11, 101, 131, 151, 181, 191, 313, 353, 373, 383, 727, 757, 787, 797, 919, 929, 10301, 10501, 10601, 11311, 11411, 12421, 12721, 12821, 13331, 13831, 13931, 14341, 14741, 15451, 15551, 16061, 16361, 16561, 16661, 17471]
return xor_stack_with_palprimes(stack, palprimes)
## part 2
def part_2():
stack_2 = [98426, 97850, 97604, 97280, 96815, 96443, 96354, 95934, 94865, 94952, 94669, 94440, 93969, 93766]
stack_2 = stack_2[::-1]
palprimes_99 = [93739, 94049, 94349, 94649, 94849, 94949, 95959, 96269, 96469, 96769, 97379, 97579, 97879, 98389]
return xor_stack_with_palprimes(stack_2, palprimes_99)
## part 3
def generate_odd_length_palindrome(start, end, ascending=True):
""" generate palindrome strings of odd length with the given prefix range
start = starting prefix bound
end = ending prefix bound
ascending = whether to increase or decrease when going from start to end.
returns an iterator
examples:
>>> for i in generate_odd_length_palindrome(15, 20):
print(i)
151
181
191
>>> for i in generate_odd_length_palindrome(20, 15, ascending=False):
print(i)
191
181
151
"""
full_range = range(start, end)
if not ascending:
full_range = reversed(range(end, start + 1))
for i in full_range:
s = str(i)
inv_s = s[::-1]
result = int(s + inv_s[1:])
if isprime(result):
yield result
def find_next_palindromic_prime(n):
"""
Find the next prime palindrome for the number n
Example:
>>> find_next_palindromic_prime(192)
313
>>> find_next_palindromic_prime(102)
131
"""
# if n < 11, no palindromes, pick a prime in the list
if (n <= 101):
for i in [2, 3, 5, 7, 11, 101]:
if i > n:
return i
n_str = str(n)
n_len = (len(n_str) // 2) + 1
start = int(n_str[0:n_len])
end = n * 2
for i in generate_odd_length_palindrome(start, end):
if i > n:
return i
def find_prev_palindromic_prime(n):
"""
Find the previous prime palindrome for the number n
Example:
>>> find_prev_palindromic_prime(919)
797
>>> find_prev_palindromic_prime(102)
101
"""
# if n < 11, no palindromes, pick a prime in the list
if (n <= 101):
for i in [101, 11, 7, 5, 3, 2]:
if i < n:
return i
n_str = str(n)
n_len = (len(n_str) // 2 ) + 1
start = int(n_str[0:n_len])
end = 0
for i in generate_odd_length_palindrome(start, end, ascending=False):
if i < n:
return i
def generate_sequence(start, max_terms, ascending=True):
seq = []
current_n = start
if ascending:
while len(seq) < max_terms:
res = find_next_palindromic_prime(current_n)
seq.append(res)
current_n = res
else:
while len(seq) < max_terms:
res = find_prev_palindromic_prime(current_n)
seq.append(res)
current_n = res
return seq
def compute_primes_for_stack_3():
stack_3 = [101141058, 101060206, 101030055, 100998966, 100887990, 100767085, 100707036, 100656111, 100404094, 100160922, 100131019, 100111100, 100059926, 100049982, 100030045, 9989997, 9981858, 9980815, 9978842, 9965794, 9957564, 9938304, 9935427, 9932289, 9931494, 9927388, 9926376, 9923213, 9921394, 9919154, 9918082, 9916239]
stack_3 = stack_3[::-1]
palindromic_prime_765 = find_prev_palindromic_prime(stack_3[0])
palindromic_primes_765 = generate_sequence(palindromic_prime_765, len(stack_3) - 1)
palindromic_primes_765.insert(0, palindromic_prime_765)
return palindromic_primes_765
def part_3():
stack_3 = [101141058, 101060206, 101030055, 100998966, 100887990, 100767085, 100707036, 100656111, 100404094, 100160922, 100131019, 100111100, 100059926, 100049982, 100030045, 9989997, 9981858, 9980815, 9978842, 9965794, 9957564, 9938304, 9935427, 9932289, 9931494, 9927388, 9926376, 9923213, 9921394, 9919154, 9918082, 9916239]
stack_3 = stack_3[::-1]
palprimes_765 = compute_primes_for_stack_3()
# palprimes_765 = [9916199, 9918199, 9919199, 9921299, 9923299, 9926299, 9927299, 9931399, 9932399, 9935399, 9938399, 9957599, 9965699, 9978799, 9980899, 9981899, 9989899, 100030001, 100050001, 100060001, 100111001, 100131001, 100161001, 100404001, 100656001, 100707001, 100767001, 100888001, 100999001, 101030101, 101060101, 101141101]
return xor_stack_with_palprimes(stack_3, palprimes_765)
if __name__ == "__main__":
print("Full URL: {}{}{}".format(part_1(), part_2(), part_3()))
|
{"hexsha": "310a665505659452518113824d1329b1f4218c69", "size": 5485, "ext": "py", "lang": "Python", "max_stars_repo_path": "posts/190727-googlectf-beginners-friendspacebookplusallaccessredpremium.com/solve.py", "max_stars_repo_name": "shiny-labs/shiny-labs.github.io", "max_stars_repo_head_hexsha": "c5a788e19e1260ccedfbcdab0d3cebfe45df45d4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-05-28T10:53:51.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-28T10:53:51.000Z", "max_issues_repo_path": "posts/2019/190727-googlectf-beginners-friendspacebookplusallaccessredpremium.com/solve.py", "max_issues_repo_name": "shiny-labs/shiny-labs.github.io", "max_issues_repo_head_hexsha": "c5a788e19e1260ccedfbcdab0d3cebfe45df45d4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "posts/2019/190727-googlectf-beginners-friendspacebookplusallaccessredpremium.com/solve.py", "max_forks_repo_name": "shiny-labs/shiny-labs.github.io", "max_forks_repo_head_hexsha": "c5a788e19e1260ccedfbcdab0d3cebfe45df45d4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-06-18T00:26:00.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-18T00:26:00.000Z", "avg_line_length": 31.7052023121, "max_line_length": 340, "alphanum_fraction": 0.6370100273, "include": true, "reason": "from sympy", "num_tokens": 1952}
|
SUBROUTINE DT_DCHANT
IMPLICIT NONE
DOUBLE PRECISION hv , hwt , ONE , TINY10 , ZERO
INTEGER i , ik1 , ik2 , j
SAVE
INCLUDE 'inc/dtflka'
PARAMETER (TINY10=1.0D-10,ONE=1.0D0,ZERO=0.0D0)
C HADRIN: decay channel information
INCLUDE 'inc/hndech'
C particle properties (BAMJET index convention)
INCLUDE 'inc/dtpart'
DIMENSION hwt(IDMAX9)
C change of weights wt from absolut values into the sum of wt of a dec.
DO j = 1 , IDMAX9
hwt(j) = ZERO
END DO
C DO 999 KKK=1,210
C WRITE(LOUT,'(A8,F5.2,2E10.3,2I4,2I10)')
C & ANAME(KKK),AAM(KKK),GA(KKK),TAU(KKK),IICH(KKK),IIBAR(KKK),
C & K1(KKK),K2(KKK)
C 999 CONTINUE
C STOP
DO i = 1 , 210
ik1 = K1(i)
ik2 = K2(i)
hv = ZERO
DO j = ik1 , ik2
hv = hv + WT(j)
hwt(j) = hv
C*sr 13.1.95
IF ( LPRi.GT.4 .AND. hwt(j).GT.1.0001 ) WRITE (LOUt,99010)
& hwt(j) , j , i , ik1
99010 FORMAT (2X,' ERROR IN HWT =',1F10.5,' J,I,K1=',3I5)
END DO
END DO
DO j = 1 , IDMAX9
WT(j) = hwt(j)
END DO
END SUBROUTINE
|
{"hexsha": "a85ed4ddeb30d919974546d9ca4d6be7a00470b3", "size": 1195, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "src/dpmjet/DT_DCHANT.f", "max_stars_repo_name": "pzhristov/DPMJET", "max_stars_repo_head_hexsha": "946e001290ca5ece608d7e5d1bfc7311cda7ebaa", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-06-15T01:59:00.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-01T08:39:13.000Z", "max_issues_repo_path": "src/dpmjet/DT_DCHANT.f", "max_issues_repo_name": "pzhristov/DPMJET", "max_issues_repo_head_hexsha": "946e001290ca5ece608d7e5d1bfc7311cda7ebaa", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-03-15T09:53:05.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-20T20:52:28.000Z", "max_forks_repo_path": "src/dpmjet/DT_DCHANT.f", "max_forks_repo_name": "pzhristov/DPMJET", "max_forks_repo_head_hexsha": "946e001290ca5ece608d7e5d1bfc7311cda7ebaa", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2020-07-05T02:44:05.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-20T20:49:05.000Z", "avg_line_length": 24.387755102, "max_line_length": 71, "alphanum_fraction": 0.5205020921, "num_tokens": 455}
|
/**
Copyright (c) 2016, Aumann Florian, Borella Jocelyn, Hutmacher Robin, Karrenbauer Oliver, Meißner Pascal, Schleicher Ralf, Stöckle Patrick, Trautmann Jeremias
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#pragma once
// Global Inclusion
#include <boost/shared_ptr.hpp>
#include <vector>
#include <map>
// ROS Main Inclusion
#include <ros/ros.h>
// ROS-wide Inclusion
#include <asr_msgs/AsrViewport.h>
// Local Inclusion
#include "asr_world_model/EmptyViewportList.h"
#include "asr_world_model/GetViewportList.h"
#include "asr_world_model/PushViewport.h"
#include "asr_world_model/FilterViewportDependingOnAlreadyVisitedViewports.h"
#include "world_model/model/settings.hpp"
#include "world_model/helper/debug_helper.hpp"
#include "world_model/helper/pose_helper.hpp"
namespace world_model
{
typedef std::vector<asr_msgs::AsrViewport> ViewportList;
typedef boost::shared_ptr<ViewportList> ViewportListPtr;
std::ostream& operator<<(std::ostream &strm, const ViewportList &viewport_list);
std::ostream& operator<<(std::ostream &strm, const ViewportListPtr &viewport_list_ptr);
/*!
* \brief WorldModel class provides services for adding the viewports of the next best views to a list and retrieve them. Additionally, it provides services for adding objects, detected by object localization and retrieve them.
*/
class ViewPortHandler
{
public:
/* ----------------- Public members ------------------ */
// Wrapped Constants
// PushViewport
static const inline std::string GetPushViewportServiceName ()
{ return "push_viewport"; }
// EmptyViewportList
static const inline std::string GetEmptyViewportListServiceName()
{ return "empty_viewport_list"; }
// GetViewportList
static const inline std::string GetGetViewportListServiceName()
{ return "get_viewport_list"; }
// FilterViewportDependingOnAlreadyVisitedViewports
static const inline std::string GetFilterViewportDependingOnAlreadyVisitedViewportsName()
{ return "filter_viewport_depending_on_already_visited_viewports"; }
private:
/* ----------------- Private members ------------------ */
// Vars
std::size_t number_of_all_viewports;
ViewportListPtr viewport_list_ptr_;
DebugHelperPtr debug_helper_ptr_;
PoseHelperPtr pose_helper_ptr_;
SettingsPtr settings_ptr_;
void filterObjectTypesOfViewport(asr_msgs::AsrViewport &viewport_to_filter, const asr_msgs::AsrViewport &filter_viewport);
public:
/* ----------------- Public functions ------------------ */
/*!
* \brief Creates a new instance of the ViewPortHandler
*/
ViewPortHandler(SettingsPtr settings_ptr);
/*!
* \brief Removes the whole next best view viewports from list if request.object_type is
* set to "all" in the other case just the viewports associated with the given object type.
* \param request the associated request object of the service call
* \param response the associated response object of the service
* \return the success of the service call.
*/
bool processEmptyViewportListServiceCall(asr_world_model::EmptyViewportList::Request &request,
asr_world_model::EmptyViewportList::Response &response);
/*!
* \brief Returns the whole list of next best view viewports if request.object_type is
* set to "all" else just the subset which matches the object type given in request.object_type.
* \param request the associated request object of the service call
* \param response the associated response object of the service
* \return the success of the service call.
*/
bool processGetViewportListServiceCall(asr_world_model::GetViewportList::Request &request,
asr_world_model::GetViewportList::Response &response);
/*!
* \brief Pushes a next best view viewport to a list.
* \param request the associated request object of the service call
* \param response the associated response object of the service call
* \return the success of the service call.
*/
bool processPushViewportServiceCall(asr_world_model::PushViewport::Request &request,
asr_world_model::PushViewport::Response &response);
/*!
* \brief Filter the objects of the viewport depending on already visited viewports.
* \param request the associated request object of the service call
* \param response the associated response object of the service call
* \return the success of the service call.
*/
bool processFilterViewportDependingOnAlreadyVisitedViewportsVisited(asr_world_model::FilterViewportDependingOnAlreadyVisitedViewports::Request &request,
asr_world_model::FilterViewportDependingOnAlreadyVisitedViewports::Response &response);
};
}
|
{"hexsha": "b85cd2568dfe42b8aab48a5c24dd647e713ddbe3", "size": 6294, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/world_model/view_port_handler.hpp", "max_stars_repo_name": "asr-ros/asr_world_model", "max_stars_repo_head_hexsha": "ff4ab2e8f83212be0f3aeb0224ad892d4966befd", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2019-10-29T13:37:29.000Z", "max_stars_repo_stars_event_max_datetime": "2019-10-29T13:37:29.000Z", "max_issues_repo_path": "include/world_model/view_port_handler.hpp", "max_issues_repo_name": "asr-ros/asr_world_model", "max_issues_repo_head_hexsha": "ff4ab2e8f83212be0f3aeb0224ad892d4966befd", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "include/world_model/view_port_handler.hpp", "max_forks_repo_name": "asr-ros/asr_world_model", "max_forks_repo_head_hexsha": "ff4ab2e8f83212be0f3aeb0224ad892d4966befd", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.9571428571, "max_line_length": 755, "alphanum_fraction": 0.7403876708, "num_tokens": 1302}
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This code is based on https://github.com/heartInsert/randaugment
# reference: https://arxiv.org/abs/1909.13719
from PIL import Image, ImageEnhance, ImageOps
import numpy as np
import random
class RandAugment(object):
def __init__(self, num_layers=2, magnitude=5, fillcolor=(128, 128, 128)):
self.num_layers = num_layers
self.magnitude = magnitude
self.max_level = 10
abso_level = self.magnitude / self.max_level
self.level_map = {
"shearX": 0.3 * abso_level,
"shearY": 0.3 * abso_level,
"translateX": 150.0 / 331 * abso_level,
"translateY": 150.0 / 331 * abso_level,
"rotate": 30 * abso_level,
"color": 0.9 * abso_level,
"posterize": int(4.0 * abso_level),
"solarize": 256.0 * abso_level,
"contrast": 0.9 * abso_level,
"sharpness": 0.9 * abso_level,
"brightness": 0.9 * abso_level,
"autocontrast": 0,
"equalize": 0,
"invert": 0
}
# from https://stackoverflow.com/questions/5252170/
# specify-image-filling-color-when-rotating-in-python-with-pil-and-setting-expand
def rotate_with_fill(img, magnitude):
rot = img.convert("RGBA").rotate(magnitude)
return Image.composite(rot,
Image.new("RGBA", rot.size, (128, ) * 4),
rot).convert(img.mode)
rnd_ch_op = random.choice
self.func = {
"shearX": lambda img, magnitude: img.transform(
img.size,
Image.AFFINE,
(1, magnitude * rnd_ch_op([-1, 1]), 0, 0, 1, 0),
Image.BICUBIC,
fillcolor=fillcolor),
"shearY": lambda img, magnitude: img.transform(
img.size,
Image.AFFINE,
(1, 0, 0, magnitude * rnd_ch_op([-1, 1]), 1, 0),
Image.BICUBIC,
fillcolor=fillcolor),
"translateX": lambda img, magnitude: img.transform(
img.size,
Image.AFFINE,
(1, 0, magnitude * img.size[0] * rnd_ch_op([-1, 1]), 0, 1, 0),
fillcolor=fillcolor),
"translateY": lambda img, magnitude: img.transform(
img.size,
Image.AFFINE,
(1, 0, 0, 0, 1, magnitude * img.size[1] * rnd_ch_op([-1, 1])),
fillcolor=fillcolor),
"rotate": lambda img, magnitude: rotate_with_fill(img, magnitude),
"color": lambda img, magnitude: ImageEnhance.Color(img).enhance(
1 + magnitude * rnd_ch_op([-1, 1])),
"posterize": lambda img, magnitude:
ImageOps.posterize(img, magnitude),
"solarize": lambda img, magnitude:
ImageOps.solarize(img, magnitude),
"contrast": lambda img, magnitude:
ImageEnhance.Contrast(img).enhance(
1 + magnitude * rnd_ch_op([-1, 1])),
"sharpness": lambda img, magnitude:
ImageEnhance.Sharpness(img).enhance(
1 + magnitude * rnd_ch_op([-1, 1])),
"brightness": lambda img, magnitude:
ImageEnhance.Brightness(img).enhance(
1 + magnitude * rnd_ch_op([-1, 1])),
"autocontrast": lambda img, magnitude:
ImageOps.autocontrast(img),
"equalize": lambda img, magnitude: ImageOps.equalize(img),
"invert": lambda img, magnitude: ImageOps.invert(img)
}
def __call__(self, img):
avaiable_op_names = list(self.level_map.keys())
for layer_num in range(self.num_layers):
op_name = np.random.choice(avaiable_op_names)
img = self.func[op_name](img, self.level_map[op_name])
return img
|
{"hexsha": "98df62d6b1154453702880f59c5d3079ff815d0b", "size": 4529, "ext": "py", "lang": "Python", "max_stars_repo_path": "ppcls/data/preprocess/ops/randaugment.py", "max_stars_repo_name": "PaddlePaddle/PaddleImgClass", "max_stars_repo_head_hexsha": "f5265a1f2ab7aa113ae5245223f0528e3239a5e7", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2020-03-30T04:32:01.000Z", "max_stars_repo_stars_event_max_datetime": "2020-03-30T07:51:00.000Z", "max_issues_repo_path": "ppcls/data/preprocess/ops/randaugment.py", "max_issues_repo_name": "PaddlePaddle/PaddleClassification", "max_issues_repo_head_hexsha": "51c1bdb27af15441995bf9840f7020cca9b7d9a8", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ppcls/data/preprocess/ops/randaugment.py", "max_forks_repo_name": "PaddlePaddle/PaddleClassification", "max_forks_repo_head_hexsha": "51c1bdb27af15441995bf9840f7020cca9b7d9a8", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-03-30T07:51:33.000Z", "max_forks_repo_forks_event_max_datetime": "2020-03-30T07:51:33.000Z", "avg_line_length": 41.9351851852, "max_line_length": 89, "alphanum_fraction": 0.5634797969, "include": true, "reason": "import numpy", "num_tokens": 1116}
|
import os
import pandas as pd
import numpy as np
import SQLAlchemy
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine
from flask import Flask, jsonify, render_template
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
db = SQLAlchemy(app)
Base = automap_base()
Base.prepare(db.engine, reflect=True)
Samples = Base.classes.samples
@app.route("/")
def index():
"""Return to the homepage."""
return render_template("index.html")
@app.route("/names")
def names():
"""Returns a list of names."""
stmt = db.session.query(Samples).statement
df = pd.read_sql_query(stmt, db.session.bind)
return jsonify(list(df.columns)[2:])
@app.route("/samples/<sample>")
def samples(sample):
"""Return `otu_ids`, `otu_labels`, and `sample_values`."""
stmt = db.session.query(Samples).statement
df = pd.read_sql_query(stmt, db.session.bind)
sample_data = df.loc[df[sample] > 1, ["otu_id", "otu_label", sample]]
data = {
"otu_ids": sample_data.otu_id.values.tolist(),
"sample_values": sample_data[sample].values.tolist(),
"otu_labels": sample_data.otu_label.tolist(),
}
return jsonify(data)
if __name__ == "__main__":
app.run()
|
{"hexsha": "6548a99babb722ad70bf267a4a185c430185556d", "size": 1297, "ext": "py", "lang": "Python", "max_stars_repo_path": "app.py", "max_stars_repo_name": "BradleyHigdon/plotly", "max_stars_repo_head_hexsha": "c845c52a38cc53d922b7d1f9e53920c46337faab", "max_stars_repo_licenses": ["ADSL"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "app.py", "max_issues_repo_name": "BradleyHigdon/plotly", "max_issues_repo_head_hexsha": "c845c52a38cc53d922b7d1f9e53920c46337faab", "max_issues_repo_licenses": ["ADSL"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "app.py", "max_forks_repo_name": "BradleyHigdon/plotly", "max_forks_repo_head_hexsha": "c845c52a38cc53d922b7d1f9e53920c46337faab", "max_forks_repo_licenses": ["ADSL"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.0185185185, "max_line_length": 73, "alphanum_fraction": 0.699306091, "include": true, "reason": "import numpy", "num_tokens": 304}
|
This editor can edit this entry and tell us a bit about themselves by clicking the Edit icon.
20100712 17:40:52 nbsp Hi, Im Evan. Whats with the eNigma? Are you dressed in spandex, or are you four years old? Cmon youre acting like a totally antisocial, petty and juvenile child by introducing yourself as that. Do you introduce yourself to people youre meeting for a movie as Mr.eNigma? If so, how do you pronounce the capital N? Seriously you seem cool, you write decent reviews, so why are you so aggressive and antagonistic toward your neighbors? Users/JabberWokky Evan JabberWokky Edwards
20100813 06:11:30 nbsp Why so serious?? Users/Mr.eNigma
20100813 09:50:33 nbsp Wrong supervillain. Users/JoePomidor
|
{"hexsha": "600594ce48fc9f2e76202f79e5a02e6a5b94d8d9", "size": 723, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/Mr.eNigma.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/Mr.eNigma.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/Mr.eNigma.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 72.3, "max_line_length": 507, "alphanum_fraction": 0.785615491, "num_tokens": 197}
|
import numpy as np
import matplotlib.pyplot as plt
import qiskit.pulse.library as pulse_lib
from scipy.optimize import curve_fit
from scipy.signal import find_peaks
from qiskit import IBMQ
from qiskit import pulse
from qiskit.compiler import assemble
from qiskit.tools.monitor import job_monitor
IBMQ.enable_account("a1e179011c9095c208f4168f632df8f6a85c77a56663a083106ff6b51f7e0946d743f8ab54b118a3249afe67a7dea34d3d57918a73d06733ae0fc75aa6964c04")
provider = IBMQ.get_provider(hub='ibm-q', group='open', project='main')
backends = provider.backends()
NUM_SHOTS = 1024
SIGMA = 75e-9
TRUNC = 8
MAXFREQS = 75
MAXDRIVE = 0.1
SAMPLE = 16
SCALE = 1e-14
class Calibration:
def __init__(self,backend='ibmq_armonk'):
self.backend = provider.get_backend(backend)
self.backend_config = self.backend.configuration()
assert self.backend_config.open_pulse, "Backend doesn't support Pulse"
self.dt = self.backend_config.dt
self.backend_defaults = self.backend.defaults()
###################################
## Schedules ######################
###################################
def frequency01_schedules(self,span_range=1/200,qbit=0):
schedules = []
self.f01_default = self.backend_defaults.qubit_freq_est[qbit]
span = self.f01_default*span_range
frequencies = np.linspace(-span, span, MAXFREQS)
# Define the drive pulse
drive_sigma = SAMPLE * round(SIGMA/ SAMPLE/ self.dt) # The width of the gaussian in units of dt
drive_samples = TRUNC * drive_sigma # The truncating parameter in units of dt
drive_power = MAXDRIVE
gauss = pulse_lib.gaussian(duration=drive_samples,
sigma=drive_sigma,
amp=drive_power,
name='ground_sweep_drive_pulse')
for freq in frequencies:
with pulse.build(backend=self.backend) as spec:
with pulse.align_sequential():
pulse.shift_frequency(freq, pulse.DriveChannel(qbit))
pulse.play(gauss, pulse.DriveChannel(qbit))
pulse.measure(qbit)
schedules.append(spec)
return frequencies,schedules
def frequency12_schedules(self,span_range=1/200,qbit=0,span=[-0.36e9, -0.32e9]):
schedules = []
frequencies = np.linspace(span[0],span[1], MAXFREQS)
# Define the drive pulse
gauss = self.pulse_rx01()
for freq in frequencies:
with pulse.build(backend=self.backend) as spec:
with pulse.align_sequential():
pulse.shift_frequency(self.df01_calib, pulse.DriveChannel(qbit))
pulse.play(gauss, pulse.DriveChannel(0))
pulse.shift_frequency(freq, pulse.DriveChannel(qbit))
pulse.play(gauss, pulse.DriveChannel(qbit))
pulse.measure(qbit)
schedules.append(spec)
return frequencies,schedules
def amplitude01_schedules(self,max_amp=1.0,qbit=0):
schedules = []
drive_powers = np.linspace(0, max_amp, MAXFREQS)
# Define the drive pulse
drive_sigma = SAMPLE * round(SIGMA/ SAMPLE/ self.dt) # The width of the gaussian in units of dt
drive_samples = TRUNC * drive_sigma # The truncating parameter in units of dt
for drive_power in drive_powers:
gauss = pulse_lib.gaussian(duration=drive_samples,sigma=drive_sigma,amp=drive_power)
with pulse.build(backend=self.backend) as spec:
with pulse.align_sequential():
pulse.shift_frequency(self.df01_calib, pulse.DriveChannel(qbit))
pulse.play(gauss, pulse.DriveChannel(qbit))
pulse.measure(qbit)
schedules.append(spec)
return drive_powers,schedules
def amplitude12_schedules(self,max_amp=1.0,qbit=0):
schedules = []
drive_powers = np.linspace(0, max_amp, MAXFREQS)
drive_sigma = SAMPLE * round(SIGMA/ SAMPLE/ self.dt)
drive_samples = TRUNC * drive_sigma
for drive_power in drive_powers:
gauss = pulse_lib.gaussian(duration=drive_samples,sigma=drive_sigma,amp=drive_power)
with pulse.build(backend=self.backend) as spec:
with pulse.align_sequential():
pulse.shift_frequency(self.df01_calib, pulse.DriveChannel(qbit))
pulse.play(self.pulse_rx01(), pulse.DriveChannel(qbit))
pulse.shift_frequency(self.df12_calib, pulse.DriveChannel(qbit))
pulse.play(gauss, pulse.DriveChannel(qbit))
pulse.measure(qbit)
schedules.append(spec)
return drive_powers,schedules
def amplitude_y01_schedules(self,max_amp=1.0,qbit=0):
schedules = []
drive_powers = np.linspace(0, max_amp, MAXFREQS)
# Define the drive pulse
drive_sigma = SAMPLE * round(SIGMA/ SAMPLE/ self.dt)
drive_samples = TRUNC * drive_sigma
drive_power = cal.a01_calib*1j
yrot_pulse =cal.gaussian_pulse(drive_power=drive_power/2)
for drive_power in drive_powers:
gauss = pulse_lib.gaussian(duration=drive_samples,sigma=drive_sigma,amp=drive_power)
with pulse.build(backend=self.backend) as spec:
with pulse.align_sequential():
pulse.shift_frequency(self.df01_calib, pulse.DriveChannel(qbit))
pulse.play(yrot_pulse, pulse.DriveChannel(qbit)) #sqrt(y)
pulse.play(self.pulse_rx01(), pulse.DriveChannel(qbit)) #x
pulse.play(yrot_pulse, pulse.DriveChannel(qbit)) #sqrt(y)
pulse.measure(qbit)
schedules.append(spec)
return drive_powers,schedules
def amplitude_h12_schedules(self, max_amp=1.0,qbit=0):
schedules = []
drive_powers = np.linspace(0, max_amp, MAXFREQS)
drive_sigma = SAMPLE * round(SIGMA/ SAMPLE/ self.dt)
drive_samples = TRUNC * drive_sigma
for drive_power in drive_powers:
gauss = pulse_lib.gaussian(duration=drive_samples,sigma=drive_sigma,amp=drive_power)
with pulse.build(backend=self.backend) as spec:
with pulse.align_sequential():
pulse.shift_frequency(self.df01_calib, pulse.DriveChannel(qbit))
pulse.play(self.pulse_rx01(), pulse.DriveChannel(qbit))
pulse.shift_frequency(self.df12_calib, pulse.DriveChannel(qbit))
pulse.play(self.pulse_rx12(np.pi/2), pulse.DriveChannel(qbit))
pulse.measure(qbit)
schedules.append(spec)
return drive_powers,schedules
def amplitude_h02_schedules(self, max_amp=1.0,qbit=0):
schedules = []
drive_powers = np.linspace(0, max_amp, MAXFREQS)
drive_sigma = SAMPLE * round(SIGMA/ SAMPLE/ self.dt)
drive_samples = TRUNC * drive_sigma
theta1 = 2*np.cos(1/np.sqrt(3))
theta2 = np.pi/2
for drive_power in drive_powers:
gauss = pulse_lib.gaussian(duration=drive_samples,sigma=drive_sigma,amp=drive_power)
with pulse.build(backend=self.backend) as spec:
with pulse.align_sequential():
pulse.shift_frequency(self.df01_calib, pulse.DriveChannel(qbit))
pulse.play(self.pulse_rx01(theta = np.pi/2), pulse.DriveChannel(qbit))
pulse.shift_frequency(self.df12_calib, pulse.DriveChannel(qbit))
pulse.play(self.pulse_rx12(), pulse.DriveChannel(qbit))
pulse.measure(qbit)
schedules.append(spec)
return drive_powers,schedules
def amplitude_equal_superposition_schedules(self, max_amp=1.0,qbit=0):
schedules = []
drive_powers = np.linspace(0, max_amp, MAXFREQS)
# Define the drive pulse
drive_sigma = SAMPLE * round(SIGMA/ SAMPLE/ self.dt) # The width of the gaussian in units of dt
drive_samples = TRUNC * drive_sigma # The truncating parameter in units of dt
theta1 = 2*np.cos(1/np.sqrt(3))
theta2 = np.pi/2
for drive_power in drive_powers:
gauss = pulse_lib.gaussian(duration=drive_samples,sigma=drive_sigma,amp=drive_power)
with pulse.build(backend=self.backend) as spec:
with pulse.align_sequential():
pulse.shift_frequency(self.df01_calib, pulse.DriveChannel(qbit))
pulse.play(self.pulse_rx01(theta=theta1), pulse.DriveChannel(qbit))
pulse.shift_frequency(self.df12_calib, pulse.DriveChannel(qbit))
pulse.play(self.pulse_rx12(theta=theta2), pulse.DriveChannel(qbit))
pulse.measure(qbit)
schedules.append(spec)
return drive_powers,schedules
def run_schedule(self,schedules):
job = self.backend.run(schedules, meas_level=1)
job_monitor(job)
results = job.result()
result_data = [results.get_memory(i)[0]*SCALE for i in range(len(results.results))]
return result_data
def measurement_schedule(self,num_exp=25,qbit=0):
with pulse.build(backend=self.backend) as st0:
with pulse.align_sequential():
pulse.measure(qbit)
with pulse.build(backend=self.backend) as st1:
with pulse.align_sequential():
pulse.shift_frequency(self.df01_calib, pulse.DriveChannel(qbit))
pulse.play(self.pulse_rx01(), pulse.DriveChannel(qbit))
pulse.measure(qbit)
with pulse.build(backend=self.backend) as st2:
with pulse.align_sequential():
pulse.shift_frequency(self.df01_calib, pulse.DriveChannel(qbit))
pulse.play(self.pulse_rx01(), pulse.DriveChannel(qbit))
pulse.shift_frequency(self.df12_calib, pulse.DriveChannel(qbit))
pulse.play(self.pulse_rx12(), pulse.DriveChannel(qbit))
pulse.measure(qbit)
schedules = [st0,st1,st2]
states = [0,1,2]
return states,schedules
###################################
## Fittings #######################
###################################
def fit_results_f01(self,result_data,frequencies):
params = match_sinc2(np.abs(result_data),frequencies)
self.df01_calib = params[0]
def fit_results_f12(self,result_data,frequencies):
params = match_sinc2(np.abs(result_data),frequencies)
self.df12_calib = params[0]
def fit_results_a01(self,result_data,drive_powers):
X = np.angle(result_data)
params = match_sine(X,drive_powers)
self.a01_calib = params[0]
def fit_results_a12(self,result_data,drive_powers):
X = np.angle(result_data)
params = match_sine(X,drive_powers)
self.a12_calib = params[0]
def fit_results_a_y01(self,result_data,drive_powers):
Y = np.angle(result_data)
params = match_sine(Y,drive_powers)
self.y01_calib = params[0]
def fit_results_a_h12(self, result_data, drive_powers):
H = np.angle(result_data)
params = match_sine(H, drive_powers)
self.h12_calib = params[0]
def fit_results_a_h02(self, result_data, drive_powers):
H = np.angle(result_data)
params = match_sine(H, drive_powers)
self.h02_calib = params[0]
def fit_results_a_eq_sup(self, result_data, drive_powers):
ES = np.angle(result_data)
params = match_sine(ES, drive_powers)
self.eq_sup_calib = params[0]
###################################
## Pulses #########################
###################################
def pulse_rx01(self,theta=np.pi):
# Define the drive pulse
drive_sigma = SAMPLE * round(SIGMA/ SAMPLE/ self.dt) # The width of the gaussian in units of dt
drive_samples = TRUNC * drive_sigma # The truncating parameter in units of dt
theta_wrap = theta - 2*np.pi*np.floor(theta/(2*np.pi))
drive_power = self.a01_calib*theta_wrap/np.pi
gauss = pulse_lib.gaussian(duration=drive_samples,sigma=drive_sigma,amp=drive_power)
return gauss
def pulse_rx12(self,theta=np.pi):
# Define the drive pulse
drive_sigma = SAMPLE * round(SIGMA/ SAMPLE/ self.dt) # The width of the gaussian in units of dt
drive_samples = TRUNC * drive_sigma # The truncating parameter in units of dt
theta_wrap = theta - 2*np.pi*np.floor(theta/(2*np.pi))
drive_power = self.a12_calib*theta_wrap/np.pi
gauss = pulse_lib.gaussian(duration=drive_samples,sigma=drive_sigma,amp=drive_power)
return gauss
def pulse_y01(self,theta=np.pi):
# Define the drive pulse
drive_sigma = SAMPLE * round(SIGMA/ SAMPLE/ self.dt) # The width of the gaussian in units of dt
drive_samples = TRUNC * drive_sigma # The truncating parameter in units of dt
theta_wrap = theta - 2*np.pi*np.floor(theta/(2*np.pi))
drive_power = self.y01_calib*theta_wrap/np.pi
gauss = pulse_lib.gaussian(duration=drive_samples,sigma=drive_sigma,amp=drive_power)
return gauss
###################################
## Measurements ###################
###################################
def classify_results(self,result_data):
# D dims distance to center [real class, shots , center class]
D = np.abs(np.expand_dims(self.state_centers,axis=(0,1))-np.expand_dims(result_data,axis=2))
result_state = np.argmin(D,axis=2)
return result_state
def fit_measurements(self,result_data,boolPlot = True):
num_shots = result_data.shape[1]
num_states = result_data.shape[0]
#rough etimation of centers thanks to median
self.state_centers = np.median(np.real(result_data),axis=1)+ 1j * np.median(np.imag(result_data),axis=1)
#distance to the centers, to eliminate outliers and plot cumulative distribution
dist = np.abs(np.expand_dims(self.state_centers,axis=1)-result_data)
dist_sort=np.sort(dist,axis=1)
#update centers with only inliers
self.state_centers = [np.mean(result_data[i,dist[i,:]<5]) for i in range(num_states)]
#for display purposes only
radius = [np.mean(dist[i,dist[i,:]<10]) for i in range(num_states)]
if boolPlot == True:
plt.figure(figsize=(10,5))
plt.subplot(1,2,1)
colors = ['b','r','g']
for s in range(num_states):
plt.scatter(np.real(result_data[s,:]), np.imag(result_data[s,:]),s=1,c = colors[s],alpha=0.5)
plt.scatter(np.real(self.state_centers[s]),np.imag(self.state_centers[s]),s=30,c = 'k')
t = np.linspace(0,2*np.pi,100)
circle = np.exp(1j*t)*radius[s]+self.state_centers[s]
plt.plot(np.real(circle),np.imag(circle),'k')
plt.axis("equal")
plt.title("measurements per class")
plt.xlabel("real")
plt.ylabel("imag")
plt.subplot(1,2,2)
for s in states:
plt.plot(dist_sort[s,:],np.arange(dist.shape[1]),colors[s],label = f"state {s}")
plt.title("cumulative distribution of distance wrt centers per class")
plt.legend()
result_state = self.classify_results(result_data)
confusion_matrix=np.zeros((num_states,num_states)) #rows are true classes, columns are derived classes
for i in range(num_states):
for j in range(num_states):
confusion_matrix[i,j]=np.sum(result_state[i,:]==j)
self.confusion_matrix = confusion_matrix/num_shots
def match_sine(X,ps,booltPlot=True):
N=len(X)
#init parameter
C = np.mean(X)
B = (np.max(X)-np.min(X))/2
#init frequency
Y = np.fft.fft(X - C)
Y = np.abs(Y[:int(N/2)])
imax = np.argmax(Y)
y0 = Y[imax]
yl = Y[imax-1]
yr = Y[imax+1]
a = (yl+yr)/2 - y0
b = (yr-yl)/2
c = y0
x = -b/(2*a)
dp = ps[1]-ps[0]
p_opt = dp*(N-1)/(imax + x)/2
#optim
init_param = [p_opt, 0, -B, C]
function = lambda f, fc, A, B, C: A*np.sin(np.pi*f/fc)+B*np.cos(np.pi*f/fc)+C
params, conv = curve_fit(function, ps, X, init_param)
X_model = function(ps, *params)
if booltPlot == True:
plt.plot(ps,X)
plt.plot(ps,function(ps,*init_param))
plt.plot(ps,X_model)
return params
def match_sinc2(X,fs,boolPlot=True):
#initial parameters
B = np.min(X)
fc = fs[np.argmax(X)]
A = np.max(X) - B
X_norm = (X - B)/A
x_mean = np.mean(X_norm)
df = x_mean*(np.max(fs)-np.min(fs))/np.pi/3
#fit
function = lambda f, fc, df, A, B: A*np.sinc((f-fc)/df)**2+B
params, conv = curve_fit(function, fs, X, [fc, df, A, B])
X_model = function(fs, *params)
# (params, X_model) =fit_function(fs,X,function,[fc, df, A, B])
if boolPlot == True:
plt.scatter(fs,X,c='k')
plt.plot(fs,X_model,c='b')
return params
|
{"hexsha": "feed37f052f8bf4f8da075a2707e67d0f2430f60", "size": 17784, "ext": "py", "lang": "Python", "max_stars_repo_path": "CosmiQ/ibmq/gate_constructor.py", "max_stars_repo_name": "stared/Hackathon2021", "max_stars_repo_head_hexsha": "69e2ba4345b311e62d09d02f6953b25614229e12", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 18, "max_stars_repo_stars_event_min_datetime": "2021-07-26T13:45:16.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-26T09:15:23.000Z", "max_issues_repo_path": "CosmiQ/ibmq/gate_constructor.py", "max_issues_repo_name": "stared/Hackathon2021", "max_issues_repo_head_hexsha": "69e2ba4345b311e62d09d02f6953b25614229e12", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-07-26T19:33:30.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-28T08:32:20.000Z", "max_forks_repo_path": "CosmiQ/ibmq/gate_constructor.py", "max_forks_repo_name": "stared/Hackathon2021", "max_forks_repo_head_hexsha": "69e2ba4345b311e62d09d02f6953b25614229e12", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 35, "max_forks_repo_forks_event_min_datetime": "2021-07-26T13:10:40.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T05:23:48.000Z", "avg_line_length": 41.1666666667, "max_line_length": 151, "alphanum_fraction": 0.59682861, "include": true, "reason": "import numpy,from scipy", "num_tokens": 4188}
|
from __future__ import absolute_import, division, print_function, unicode_literals
from keras import layers, models
from keras.models import Sequential
from keras import layers
import numpy as np
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.utils import to_categorical
import random
from keras import optimizers
from keras.layers import SimpleRNN, Dense
from keras.layers import Bidirectional
import os
import sys
def make_label(text):
with open("label.txt", "w") as f:
f.write(text)
f.close()
def load_data(dirname):
if dirname[-1]!='/':
dirname=dirname+'/'
listfile=os.listdir(dirname)
X = []
Y = []
XT = []
YT = []
for file in listfile:
if "_" in file:
continue
wordname = file
textlist = os.listdir(dirname + wordname)
k = 0
for text in textlist:
if "DS_" in text:
continue
textname = dirname + wordname + "/" + text
numbers = []
with open(textname, mode = 'r') as t:
numbers = [float(num) for num in t.read().split()]
for i in range(len(numbers), 25200):
numbers.extend([0.000])
row = 0
landmark_frame = []
for i in range(0, 70):
landmark_frame.extend(numbers[row:row+84])
row += 84
landmark_frame = np.array(landmark_frame)
landmark_frame = list(landmark_frame.reshape(-1,84))
if (k % 9 == 1):
XT.append(np.array(landmark_frame))
YT.append(wordname)
else:
X.append(np.array(landmark_frame))
Y.append(wordname)
k+=1
X = np.array(X)
Y = np.array(Y)
XT = np.array(XT)
YT = np.array(YT)
tmp = [[x,y] for x, y in zip(X, Y)]
random.shuffle(tmp)
tmp_test = [[xt,yt] for xt, yt in zip(XT, YT)]
random.shuffle(tmp_test)
X = [n[0] for n in tmp]
Y = [n[1] for n in tmp]
XT = [n[0] for n in tmp_test]
YT = [n[1] for n in tmp_test]
k = set(Y)
ks = sorted(k)
text = ""
for i in ks:
text = text + i + " "
make_label(text)
s = Tokenizer()
s.fit_on_texts([text])
encoded = s.texts_to_sequences([Y])[0]
encoded_test = s.texts_to_sequences([YT])[0]
encoded_updated = [x-1 for x in encoded]
encoded_test_updated = [x-1 for x in encoded_test]
one_hot = to_categorical(encoded_updated)
one_hot_test = to_categorical(encoded_test_updated)
(x_train, y_train) = X, one_hot
(x_test, y_test) = XT,one_hot_test
print(y_test.shape)
x_train = np.array(x_train)
y_train = np.array(y_train)
x_test = np.array(x_test)
y_test = np.array(y_test)
return x_train, y_train, x_test, y_test
def build_model(label):
model = Sequential()
model.add(layers.LSTM(64, return_sequences=True,
input_shape=(70, 84)))
model.add(layers.LSTM(64, return_sequences=True))
model.add(layers.LSTM(64, return_sequences=True))
model.add(layers.LSTM(64, return_sequences=True))
model.add(layers.LSTM(64, return_sequences=True))
model.add(layers.LSTM(64, return_sequences=True))
model.add(layers.LSTM(64))
model.add(layers.Dense(label, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return model
|
{"hexsha": "64c57f031a7d5aef3af1b8abcab7b0dcac194bca", "size": 3546, "ext": "py", "lang": "Python", "max_stars_repo_path": "sign-prediction/train_utils.py", "max_stars_repo_name": "kurshakuz/krsl-recogniton", "max_stars_repo_head_hexsha": "2ffecd1a1bdaae1d9cee68a7d74ae8246719157e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-11-23T18:15:28.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-29T15:51:10.000Z", "max_issues_repo_path": "sign-prediction/train_utils.py", "max_issues_repo_name": "kurshakuz/krsl-recogniton", "max_issues_repo_head_hexsha": "2ffecd1a1bdaae1d9cee68a7d74ae8246719157e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-06-03T03:34:46.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-03T03:34:46.000Z", "max_forks_repo_path": "sign-prediction/train_utils.py", "max_forks_repo_name": "kurshakuz/krsl-recogniton", "max_forks_repo_head_hexsha": "2ffecd1a1bdaae1d9cee68a7d74ae8246719157e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-02-09T21:03:05.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-17T03:43:48.000Z", "avg_line_length": 30.3076923077, "max_line_length": 82, "alphanum_fraction": 0.5882684715, "include": true, "reason": "import numpy", "num_tokens": 887}
|
! .................................................
! ____ _ _ ____ _____ _
! | _ \| | |_| | _ \| ___| |_|
! | |_) | |___ _ | |_) | |___ _
! | _ /| _ | | | | _ /|___ | | |
! | | | | | | | | | | ___| | | |
! |_| |_| |_| |_| |_| |_____| |_|
! .................................................
! PhiPsi: a general-purpose computational
! mechanics program written in Fortran.
! Website: http://phipsi.top
! Author: Shi Fang from Huaiyin Institute of
! Technology, HuaiAn, JiangSu, China
! Contact me: shifang@hyit.edu.cn
! ------------------------------------------------
! Please cite the following papers:
! (1)Shi F, Wang X L, Liu C, Liu H, Wu H A. An
! XFEM-based method with reduction technique
! for modeling hydraulic fracture propagation
! in formations containing frictional natural
! fractures. Engineering Fracture Mechanics,
! 2017, 173: 64-90.
! (2)Shi F, Wang X L, Liu C, Liu H, Wu H A. A
! coupled extended finite element approach
! for modeling hydraulic fracturing in
! consideration of proppant. Journal of
! Natural Gas Science and Engineering, 2016,
! 33: 885-897.
! (3)Shi F, Wang X L, Liu C, Liu H, Wu H A. An
! XFEM-based numerical model to calculate
! conductivity of propped fracture considering
! proppant transport, embedment and crushing.
! Journal of Petroleum Science and Engineering,
! 2018, 167: 615-626..
SUBROUTINE Assemble_Stiffness_Matrix_FEM(isub,globalK)
c Assemble the stiffness matrix.
use Global_Model
use Global_Filename
use Global_Common
use Global_Material
implicit none
integer,intent(in)::isub
double precision globalK(Total_Freedom,Total_Freedom)
integer i_E
double precision c_thick,c_D(3,3)
double precision c_X_NODES(4),c_Y_NODES(4)
integer c_NN(4)
double precision kesi(4),yita(4),weight(4)
double precision localK(8,8)
integer local(8),i_row,i_col,nIndex
print *,' Assembling the global stiffness matrix......'
globalK(1:Total_Freedom,1:Total_Freedom) = 0.0D0
call Cal_Gauss_Points_QUAD(4,
& kesi,
& yita,
& weight)
nIndex = 0
do i_E = 1,Num_Elem
c_thick = thick(Elem_Mat(i_E))
c_D = D(Elem_Mat(i_E),:,:)
c_NN = G_NN(i_E,:)
c_X_NODES = G_X_NODES(i_E,:)
c_Y_NODES = G_Y_NODES(i_E,:)
!Traditional index locations
local=[c_NN(1)*2-1,c_NN(1)*2,c_NN(2)*2-1,c_NN(2)*2,
& c_NN(3)*2-1,c_NN(3)*2,c_NN(4)*2-1,c_NN(4)*2]
!Get the element stiffness matrix of the current element
call Cal_Ele_Stiffness_Matrix_N4(c_X_NODES,c_Y_NODES,
& c_thick,c_D,kesi,yita,weight,
& localK)
do i_row = 1,8
do i_col = 1,8
globalK(local(i_row),local(i_col)) =
& globalK(local(i_row),local(i_col)) +
& localK(i_row,i_col)
end do
end do
end do
RETURN
END SUBROUTINE Assemble_Stiffness_Matrix_FEM
|
{"hexsha": "56c1e8d0f991e45102f414913bbf8ca2a7e42aab", "size": 3846, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "Assemble_Stiffness_Matrix_FEM.f", "max_stars_repo_name": "PhiPsi-Software/PhiPsi_XFEM_Fortran_Codes_Early_Version", "max_stars_repo_head_hexsha": "1eae63a8f0dc968b9d5220397879645a83e5d083", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2019-04-15T11:56:45.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-14T11:49:59.000Z", "max_issues_repo_path": "Assemble_Stiffness_Matrix_FEM.f", "max_issues_repo_name": "PhiPsi-Software/PhiPsi_XFEM_Fortran_Codes_Early_Version", "max_issues_repo_head_hexsha": "1eae63a8f0dc968b9d5220397879645a83e5d083", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Assemble_Stiffness_Matrix_FEM.f", "max_forks_repo_name": "PhiPsi-Software/PhiPsi_XFEM_Fortran_Codes_Early_Version", "max_forks_repo_head_hexsha": "1eae63a8f0dc968b9d5220397879645a83e5d083", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2019-01-20T00:55:09.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-19T00:30:28.000Z", "avg_line_length": 43.2134831461, "max_line_length": 70, "alphanum_fraction": 0.4622984919, "num_tokens": 985}
|
import time
import argparse
from datetime import datetime
import logging
import numpy as np
import os
import torch
import torch.nn.functional as F
import torch.multiprocessing as mp
from models import NavCnnModel, NavCnnRnnModel, NavCnnRnnMultModel, NavPlannerControllerModel
from data import EqaDataLoader
from metrics import NavMetric
from models import MaskedNLLCriterion
from models import get_state, ensure_shared_grads
from data import load_vocab
from torch.autograd import Variable
from tqdm import tqdm
import time
torch.backends.cudnn.enabled = False
################################################################################################
#make models trained in pytorch 4 compatible with earlier pytorch versions
import torch._utils
try:
torch._utils._rebuild_tensor_v2
except AttributeError:
def _rebuild_tensor_v2(storage, storage_offset, size, stride, requires_grad, backward_hooks):
tensor = torch._utils._rebuild_tensor(storage, storage_offset, size, stride)
tensor.requires_grad = requires_grad
tensor._backward_hooks = backward_hooks
return tensor
torch._utils._rebuild_tensor_v2 = _rebuild_tensor_v2
################################################################################################
def eval(rank, args, shared_model):
torch.cuda.set_device(args.gpus.index(args.gpus[rank % len(args.gpus)]))
if args.model_type == 'cnn':
model_kwargs = {}
model = NavCnnModel(**model_kwargs)
elif args.model_type == 'cnn+q':
model_kwargs = {
'question_input': True,
'question_vocab': load_vocab(args.vocab_json)
}
model = NavCnnModel(**model_kwargs)
elif args.model_type == 'lstm':
model_kwargs = {}
model = NavCnnRnnModel(**model_kwargs)
elif args.model_type == 'lstm+q':
model_kwargs = {
'question_input': True,
'question_vocab': load_vocab(args.vocab_json)
}
model = NavCnnRnnModel(**model_kwargs)
elif args.model_type == 'lstm-mult+q':
model_kwargs = {
'question_input': True,
'question_vocab': load_vocab(args.vocab_json)
}
model = NavCnnRnnMultModel(**model_kwargs)
elif args.model_type == 'pacman':
model_kwargs = {'question_vocab': load_vocab(args.vocab_json)}
model = NavPlannerControllerModel(**model_kwargs)
else:
exit()
eval_loader_kwargs = {
'questions_h5': getattr(args, args.eval_split + '_h5'),
'data_json': args.data_json,
'vocab': args.vocab_json,
'target_obj_conn_map_dir': args.target_obj_conn_map_dir,
'map_resolution': args.map_resolution,
'batch_size': 1,
'input_type': args.model_type,
'num_frames': 5,
'split': args.eval_split,
'max_threads_per_gpu': args.max_threads_per_gpu,
'gpu_id': args.gpus[rank % len(args.gpus)],
'to_cache': False,
'overfit': args.overfit,
'max_controller_actions': args.max_controller_actions,
}
eval_loader = EqaDataLoader(**eval_loader_kwargs)
print('eval_loader has %d samples' % len(eval_loader.dataset))
logging.info("EVAL: eval_loader has {} samples".format(len(eval_loader.dataset)))
args.output_log_path = os.path.join(args.log_dir,
'eval_' + str(rank) + '.json')
t, epoch, best_eval_acc = 0, 0, 0.0
max_epochs = args.max_epochs
if args.mode == 'eval':
max_epochs = 1
while epoch < int(max_epochs):
invalids = []
model.load_state_dict(shared_model.state_dict())
model.eval()
# that's a lot of numbers
metrics = NavMetric(
info={'split': args.eval_split,
'thread': rank},
metric_names=[
'd_0_10', 'd_0_30', 'd_0_50', 'd_T_10', 'd_T_30', 'd_T_50',
'd_D_10', 'd_D_30', 'd_D_50', 'd_min_10', 'd_min_30',
'd_min_50', 'r_T_10', 'r_T_30', 'r_T_50', 'r_e_10', 'r_e_30',
'r_e_50', 'stop_10', 'stop_30', 'stop_50', 'ep_len_10',
'ep_len_30', 'ep_len_50'
],
log_json=args.output_log_path)
if 'cnn' in args.model_type:
done = False
while done == False:
for batch in tqdm(eval_loader):
model.load_state_dict(shared_model.state_dict())
model.cuda()
idx, questions, _, img_feats, actions_in, actions_out, action_length = batch
metrics_slug = {}
# evaluate at multiple initializations
for i in [10, 30, 50]:
t += 1
if action_length[0] + 1 - i - 5 < 0:
invalids.append(idx[0])
continue
ep_inds = [
x for x in range(action_length[0] + 1 - i - 5,
action_length[0] + 1 - i)
]
sub_img_feats = torch.index_select(
img_feats, 1, torch.LongTensor(ep_inds))
init_pos = eval_loader.dataset.episode_pos_queue[
ep_inds[-1]]
h3d = eval_loader.dataset.episode_house
h3d.env.reset(
x=init_pos[0], y=init_pos[2], yaw=init_pos[3])
init_dist_to_target = h3d.get_dist_to_target(
h3d.env.cam.pos)
if init_dist_to_target < 0: # unreachable
invalids.append(idx[0])
continue
sub_img_feats_var = Variable(sub_img_feats.cuda())
if '+q' in args.model_type:
questions_var = Variable(questions.cuda())
# sample actions till max steps or <stop>
# max no. of actions = 100
episode_length = 0
episode_done = True
dists_to_target, pos_queue, actions = [
init_dist_to_target
], [init_pos], []
for step in range(args.max_episode_length):
episode_length += 1
if '+q' in args.model_type:
scores = model(sub_img_feats_var,
questions_var)
else:
scores = model(sub_img_feats_var)
prob = F.softmax(scores, dim=1)
action = int(prob.max(1)[1].data.cpu().numpy()[0])
actions.append(action)
img, _, episode_done = h3d.step(action)
episode_done = episode_done or episode_length >= args.max_episode_length
img = torch.from_numpy(img.transpose(
2, 0, 1)).float() / 255.0
img_feat_var = eval_loader.dataset.cnn(
Variable(img.view(1, 3, 224, 224)
.cuda())).view(1, 1, 3200)
sub_img_feats_var = torch.cat(
[sub_img_feats_var, img_feat_var], dim=1)
sub_img_feats_var = sub_img_feats_var[:, -5:, :]
dists_to_target.append(
h3d.get_dist_to_target(h3d.env.cam.pos))
pos_queue.append([
h3d.env.cam.pos.x, h3d.env.cam.pos.y,
h3d.env.cam.pos.z, h3d.env.cam.yaw
])
if episode_done == True:
break
# compute stats
metrics_slug['d_0_' + str(i)] = dists_to_target[0]
metrics_slug['d_T_' + str(i)] = dists_to_target[-1]
metrics_slug['d_D_' + str(
i)] = dists_to_target[0] - dists_to_target[-1]
metrics_slug['d_min_' + str(i)] = np.array(
dists_to_target).min()
metrics_slug['ep_len_' + str(i)] = episode_length
if action == 3:
metrics_slug['stop_' + str(i)] = 1
else:
metrics_slug['stop_' + str(i)] = 0
inside_room = []
for p in pos_queue:
inside_room.append(
h3d.is_inside_room(
p, eval_loader.dataset.target_room))
if inside_room[-1] == True:
metrics_slug['r_T_' + str(i)] = 1
else:
metrics_slug['r_T_' + str(i)] = 0
if any([x == True for x in inside_room]) == True:
metrics_slug['r_e_' + str(i)] = 1
else:
metrics_slug['r_e_' + str(i)] = 0
# collate and update metrics
metrics_list = []
for i in metrics.metric_names:
if i not in metrics_slug:
metrics_list.append(metrics.metrics[
metrics.metric_names.index(i)][0])
else:
metrics_list.append(metrics_slug[i])
# update metrics
metrics.update(metrics_list)
print(metrics.get_stat_string(mode=0))
print('invalids', len(invalids))
logging.info("EVAL: metrics: {}".format(metrics.get_stat_string(mode=0)))
logging.info("EVAL: invalids: {}".format(len(invalids)))
# del h3d
eval_loader.dataset._load_envs()
if len(eval_loader.dataset.pruned_env_set) == 0:
done = True
elif 'lstm' in args.model_type:
done = False
while done == False:
if args.overfit:
metrics = NavMetric(
info={'split': args.eval_split,
'thread': rank},
metric_names=[
'd_0_10', 'd_0_30', 'd_0_50', 'd_T_10', 'd_T_30', 'd_T_50',
'd_D_10', 'd_D_30', 'd_D_50', 'd_min_10', 'd_min_30',
'd_min_50', 'r_T_10', 'r_T_30', 'r_T_50', 'r_e_10', 'r_e_30',
'r_e_50', 'stop_10', 'stop_30', 'stop_50', 'ep_len_10',
'ep_len_30', 'ep_len_50'
],
log_json=args.output_log_path)
for batch in tqdm(eval_loader):
model.load_state_dict(shared_model.state_dict())
model.cuda()
idx, questions, answer, _, actions_in, actions_out, action_lengths, _ = batch
question_var = Variable(questions.cuda())
metrics_slug = {}
# evaluate at multiple initializations
for i in [10, 30, 50]:
t += 1
if action_lengths[0] - 1 - i < 0:
invalids.append([idx[0], i])
continue
h3d = eval_loader.dataset.episode_house
# forward through lstm till spawn
if len(eval_loader.dataset.episode_pos_queue[:-i]
) > 0:
images = eval_loader.dataset.get_frames(
h3d,
eval_loader.dataset.episode_pos_queue[:-i],
preprocess=True)
raw_img_feats = eval_loader.dataset.cnn(
Variable(torch.FloatTensor(images).cuda()))
actions_in_pruned = actions_in[:, :
action_lengths[0] -
i]
actions_in_var = Variable(actions_in_pruned.cuda())
action_lengths_pruned = action_lengths.clone(
).fill_(action_lengths[0] - i)
img_feats_var = raw_img_feats.view(1, -1, 3200)
if '+q' in args.model_type:
scores, hidden = model(
img_feats_var, question_var,
actions_in_var,
action_lengths_pruned.cpu().numpy())
else:
scores, hidden = model(
img_feats_var, False, actions_in_var,
action_lengths_pruned.cpu().numpy())
try:
init_pos = eval_loader.dataset.episode_pos_queue[
-i]
except:
invalids.append([idx[0], i])
continue
action_in = torch.LongTensor(1, 1).fill_(
actions_in[0,
action_lengths[0] - i]).cuda()
else:
init_pos = eval_loader.dataset.episode_pos_queue[
-i]
hidden = model.nav_rnn.init_hidden(1)
action_in = torch.LongTensor(1, 1).fill_(0).cuda()
h3d.env.reset(
x=init_pos[0], y=init_pos[2], yaw=init_pos[3])
init_dist_to_target = h3d.get_dist_to_target(
h3d.env.cam.pos)
if init_dist_to_target < 0: # unreachable
invalids.append([idx[0], i])
continue
img = h3d.env.render()
img = torch.from_numpy(img.transpose(
2, 0, 1)).float() / 255.0
img_feat_var = eval_loader.dataset.cnn(
Variable(img.view(1, 3, 224, 224).cuda())).view(
1, 1, 3200)
episode_length = 0
episode_done = True
dists_to_target, pos_queue, actions = [
init_dist_to_target
], [init_pos], []
actual_pos_queue = [(h3d.env.cam.pos.x, h3d.env.cam.pos.z, h3d.env.cam.yaw)]
for step in range(args.max_episode_length):
episode_length += 1
if '+q' in args.model_type:
scores, hidden = model(
img_feat_var,
question_var,
Variable(action_in),
False,
hidden=hidden,
step=True)
else:
scores, hidden = model(
img_feat_var,
False,
Variable(action_in),
False,
hidden=hidden,
step=True)
prob = F.softmax(scores, dim=1)
action = int(prob.max(1)[1].data.cpu().numpy()[0])
actions.append(action)
img, _, episode_done = h3d.step(action)
episode_done = episode_done or episode_length >= args.max_episode_length
img = torch.from_numpy(img.transpose(
2, 0, 1)).float() / 255.0
img_feat_var = eval_loader.dataset.cnn(
Variable(img.view(1, 3, 224, 224)
.cuda())).view(1, 1, 3200)
action_in = torch.LongTensor(
1, 1).fill_(action + 1).cuda()
dists_to_target.append(
h3d.get_dist_to_target(h3d.env.cam.pos))
pos_queue.append([
h3d.env.cam.pos.x, h3d.env.cam.pos.y,
h3d.env.cam.pos.z, h3d.env.cam.yaw
])
if episode_done == True:
break
actual_pos_queue.append([
h3d.env.cam.pos.x, h3d.env.cam.pos.z, h3d.env.cam.yaw])
# compute stats
metrics_slug['d_0_' + str(i)] = dists_to_target[0]
metrics_slug['d_T_' + str(i)] = dists_to_target[-1]
metrics_slug['d_D_' + str(
i)] = dists_to_target[0] - dists_to_target[-1]
metrics_slug['d_min_' + str(i)] = np.array(
dists_to_target).min()
metrics_slug['ep_len_' + str(i)] = episode_length
if action == 3:
metrics_slug['stop_' + str(i)] = 1
else:
metrics_slug['stop_' + str(i)] = 0
inside_room = []
for p in pos_queue:
inside_room.append(
h3d.is_inside_room(
p, eval_loader.dataset.target_room))
if inside_room[-1] == True:
metrics_slug['r_T_' + str(i)] = 1
else:
metrics_slug['r_T_' + str(i)] = 0
if any([x == True for x in inside_room]) == True:
metrics_slug['r_e_' + str(i)] = 1
else:
metrics_slug['r_e_' + str(i)] = 0
# collate and update metrics
metrics_list = []
for i in metrics.metric_names:
if i not in metrics_slug:
metrics_list.append(metrics.metrics[
metrics.metric_names.index(i)][0])
else:
metrics_list.append(metrics_slug[i])
# update metrics
metrics.update(metrics_list)
print(metrics.get_stat_string(mode=0))
print('invalids', len(invalids))
logging.info("EVAL: init_steps: {} metrics: {}".format(i, metrics.get_stat_string(mode=0)))
logging.info("EVAL: init_steps: {} invalids: {}".format(i, len(invalids)))
# del h3d
eval_loader.dataset._load_envs()
print("eval_loader pruned_env_set len: {}".format(len(eval_loader.dataset.pruned_env_set)))
logging.info("eval_loader pruned_env_set len: {}".format(len(eval_loader.dataset.pruned_env_set)))
assert len(eval_loader.dataset.pruned_env_set) > 0
if len(eval_loader.dataset.pruned_env_set) == 0:
done = True
elif 'pacman' in args.model_type:
done = False
while done == False:
if args.overfit:
metrics = NavMetric(
info={'split': args.eval_split,
'thread': rank},
metric_names=[
'd_0_10', 'd_0_30', 'd_0_50', 'd_T_10', 'd_T_30', 'd_T_50',
'd_D_10', 'd_D_30', 'd_D_50', 'd_min_10', 'd_min_30',
'd_min_50', 'r_T_10', 'r_T_30', 'r_T_50', 'r_e_10', 'r_e_30',
'r_e_50', 'stop_10', 'stop_30', 'stop_50', 'ep_len_10',
'ep_len_30', 'ep_len_50'
],
log_json=args.output_log_path)
for batch in tqdm(eval_loader):
model.load_state_dict(shared_model.state_dict())
model.cuda()
idx, question, answer, actions, action_length = batch
metrics_slug = {}
h3d = eval_loader.dataset.episode_house
# evaluate at multiple initializations
for i in [10, 30, 50]:
t += 1
if i > action_length[0]:
invalids.append([idx[0], i])
continue
question_var = Variable(question.cuda())
controller_step = False
planner_hidden = model.planner_nav_rnn.init_hidden(1)
# get hierarchical action history
(
planner_actions_in, planner_img_feats,
controller_step, controller_action_in,
controller_img_feats, init_pos,
controller_action_counter
) = eval_loader.dataset.get_hierarchical_features_till_spawn(
actions[0, :action_length[0] + 1].numpy(), i, args.max_controller_actions
)
planner_actions_in_var = Variable(
planner_actions_in.cuda())
planner_img_feats_var = Variable(
planner_img_feats.cuda())
# forward planner till spawn to update hidden state
for step in range(planner_actions_in.size(0)):
planner_scores, planner_hidden = model.planner_step(
question_var, planner_img_feats_var[step]
.unsqueeze(0).unsqueeze(0),
planner_actions_in_var[step].view(1, 1),
planner_hidden
)
h3d.env.reset(
x=init_pos[0], y=init_pos[2], yaw=init_pos[3])
init_dist_to_target = h3d.get_dist_to_target(
h3d.env.cam.pos)
if init_dist_to_target < 0: # unreachable
invalids.append([idx[0], i])
continue
dists_to_target, pos_queue, pred_actions = [
init_dist_to_target
], [init_pos], []
planner_actions, controller_actions = [], []
episode_length = 0
if args.max_controller_actions > 1:
controller_action_counter = controller_action_counter % args.max_controller_actions
controller_action_counter = max(controller_action_counter - 1, 0)
else:
controller_action_counter = 0
first_step = True
first_step_is_controller = controller_step
planner_step = True
action = int(controller_action_in)
for step in range(args.max_episode_length):
if not first_step:
img = torch.from_numpy(img.transpose(
2, 0, 1)).float() / 255.0
img_feat_var = eval_loader.dataset.cnn(
Variable(img.view(1, 3, 224,
224).cuda())).view(
1, 1, 3200)
else:
img_feat_var = Variable(controller_img_feats.cuda()).view(1, 1, 3200)
if not first_step or first_step_is_controller:
# query controller to continue or not
controller_action_in = Variable(
torch.LongTensor(1, 1).fill_(action).cuda())
controller_scores = model.controller_step(
img_feat_var, controller_action_in,
planner_hidden[0])
prob = F.softmax(controller_scores, dim=1)
controller_action = int(
prob.max(1)[1].data.cpu().numpy()[0])
if controller_action == 1 and controller_action_counter < args.max_controller_actions - 1:
controller_action_counter += 1
planner_step = False
else:
controller_action_counter = 0
planner_step = True
controller_action = 0
controller_actions.append(controller_action)
first_step = False
if planner_step:
if not first_step:
action_in = torch.LongTensor(
1, 1).fill_(action + 1).cuda()
planner_scores, planner_hidden = model.planner_step(
question_var, img_feat_var,
Variable(action_in), planner_hidden)
prob = F.softmax(planner_scores, dim=1)
action = int(
prob.max(1)[1].data.cpu().numpy()[0])
planner_actions.append(action)
episode_done = action == 3 or episode_length >= args.max_episode_length
episode_length += 1
dists_to_target.append(
h3d.get_dist_to_target(h3d.env.cam.pos))
pos_queue.append([
h3d.env.cam.pos.x, h3d.env.cam.pos.y,
h3d.env.cam.pos.z, h3d.env.cam.yaw
])
if episode_done:
break
img, _, _ = h3d.step(action)
first_step = False
# compute stats
metrics_slug['d_0_' + str(i)] = dists_to_target[0]
metrics_slug['d_T_' + str(i)] = dists_to_target[-1]
metrics_slug['d_D_' + str(
i)] = dists_to_target[0] - dists_to_target[-1]
metrics_slug['d_min_' + str(i)] = np.array(
dists_to_target).min()
metrics_slug['ep_len_' + str(i)] = episode_length
if action == 3:
metrics_slug['stop_' + str(i)] = 1
else:
metrics_slug['stop_' + str(i)] = 0
inside_room = []
for p in pos_queue:
inside_room.append(
h3d.is_inside_room(
p, eval_loader.dataset.target_room))
if inside_room[-1] == True:
metrics_slug['r_T_' + str(i)] = 1
else:
metrics_slug['r_T_' + str(i)] = 0
if any([x == True for x in inside_room]) == True:
metrics_slug['r_e_' + str(i)] = 1
else:
metrics_slug['r_e_' + str(i)] = 0
# collate and update metrics
metrics_list = []
for i in metrics.metric_names:
if i not in metrics_slug:
metrics_list.append(metrics.metrics[
metrics.metric_names.index(i)][0])
else:
metrics_list.append(metrics_slug[i])
# update metrics
metrics.update(metrics_list)
try:
print(metrics.get_stat_string(mode=0))
logging.info("EVAL: metrics: {}".format(metrics.get_stat_string(mode=0)))
except:
pass
print('epoch', epoch)
print('invalids', len(invalids))
logging.info("EVAL: epoch {}".format(epoch))
logging.info("EVAL: invalids {}".format(invalids))
# del h3d
eval_loader.dataset._load_envs()
if len(eval_loader.dataset.pruned_env_set) == 0:
done = True
epoch += 1
# checkpoint if best val loss
if metrics.metrics[8][0] > best_eval_acc: # d_D_50
best_eval_acc = metrics.metrics[8][0]
if epoch % args.eval_every == 0 and args.log == True:
metrics.dump_log()
model_state = get_state(model)
aad = dict(args.__dict__)
ad = {}
for i in aad:
if i[0] != '_':
ad[i] = aad[i]
checkpoint = {'args': ad, 'state': model_state, 'epoch': epoch}
checkpoint_path = '%s/epoch_%d_d_D_50_%.04f.pt' % (
args.checkpoint_dir, epoch, best_eval_acc)
print('Saving checkpoint to %s' % checkpoint_path)
logging.info("EVAL: Saving checkpoint to {}".format(checkpoint_path))
torch.save(checkpoint, checkpoint_path)
print('[best_eval_d_D_50:%.04f]' % best_eval_acc)
logging.info("EVAL: [best_eval_d_D_50:{:.04f}]".format(best_eval_acc))
eval_loader.dataset._load_envs(start_idx=0, in_order=True)
def train(rank, args, shared_model):
torch.cuda.set_device(args.gpus.index(args.gpus[rank % len(args.gpus)]))
if args.model_type == 'cnn':
model_kwargs = {}
model = NavCnnModel(**model_kwargs)
elif args.model_type == 'cnn+q':
model_kwargs = {
'question_input': True,
'question_vocab': load_vocab(args.vocab_json)
}
model = NavCnnModel(**model_kwargs)
elif args.model_type == 'lstm':
model_kwargs = {}
model = NavCnnRnnModel(**model_kwargs)
elif args.model_type == 'lstm-mult+q':
model_kwargs = {
'question_input': True,
'question_vocab': load_vocab(args.vocab_json)
}
model = NavCnnRnnMultModel(**model_kwargs)
elif args.model_type == 'lstm+q':
model_kwargs = {
'question_input': True,
'question_vocab': load_vocab(args.vocab_json)
}
model = NavCnnRnnModel(**model_kwargs)
elif args.model_type == 'pacman':
model_kwargs = {'question_vocab': load_vocab(args.vocab_json)}
model = NavPlannerControllerModel(**model_kwargs)
else:
exit()
lossFn = torch.nn.CrossEntropyLoss().cuda()
optim = torch.optim.Adamax(
filter(lambda p: p.requires_grad, shared_model.parameters()),
lr=args.learning_rate)
train_loader_kwargs = {
'questions_h5': args.train_h5,
'data_json': args.data_json,
'vocab': args.vocab_json,
'batch_size': args.batch_size,
'input_type': args.model_type,
'num_frames': 5,
'map_resolution': args.map_resolution,
'split': 'train',
'max_threads_per_gpu': args.max_threads_per_gpu,
'gpu_id': args.gpus[rank % len(args.gpus)],
'to_cache': args.cache,
'overfit': args.overfit,
'max_controller_actions': args.max_controller_actions,
'max_actions': args.max_actions
}
args.output_log_path = os.path.join(args.log_dir,
'train_' + str(rank) + '.json')
if 'pacman' in args.model_type:
metrics = NavMetric(
info={'split': 'train',
'thread': rank},
metric_names=['planner_loss', 'controller_loss'],
log_json=args.output_log_path)
else:
metrics = NavMetric(
info={'split': 'train',
'thread': rank},
metric_names=['loss'],
log_json=args.output_log_path)
train_loader = EqaDataLoader(**train_loader_kwargs)
print('train_loader has %d samples' % len(train_loader.dataset))
logging.info('TRAIN: train loader has {} samples'.format(len(train_loader.dataset)))
t, epoch = 0, 0
while epoch < int(args.max_epochs):
if 'cnn' in args.model_type:
done = False
all_envs_loaded = train_loader.dataset._check_if_all_envs_loaded()
while done == False:
for batch in train_loader:
t += 1
model.load_state_dict(shared_model.state_dict())
model.train()
model.cuda()
idx, questions, _, img_feats, _, actions_out, _ = batch
img_feats_var = Variable(img_feats.cuda())
if '+q' in args.model_type:
questions_var = Variable(questions.cuda())
actions_out_var = Variable(actions_out.cuda())
if '+q' in args.model_type:
scores = model(img_feats_var, questions_var)
else:
scores = model(img_feats_var)
loss = lossFn(scores, actions_out_var)
# zero grad
optim.zero_grad()
# update metrics
metrics.update([loss.data[0]])
# backprop and update
loss.backward()
ensure_shared_grads(model.cpu(), shared_model)
optim.step()
if t % args.print_every == 0:
print(metrics.get_stat_string())
logging.info("TRAIN: metrics: {}".format(metrics.get_stat_string()))
if args.log == True:
metrics.dump_log()
print('[CHECK][Cache:%d][Total:%d]' %
(len(train_loader.dataset.img_data_cache),
len(train_loader.dataset.env_list)))
logging.info('TRAIN: [CHECK][Cache:{}][Total:{}]'.format(
len(train_loader.dataset.img_data_cache), len(train_loader.dataset.env_list)))
if all_envs_loaded == False:
train_loader.dataset._load_envs(in_order=True)
if len(train_loader.dataset.pruned_env_set) == 0:
done = True
if args.cache == False:
train_loader.dataset._load_envs(
start_idx=0, in_order=True)
else:
done = True
elif 'lstm' in args.model_type:
lossFn = MaskedNLLCriterion().cuda()
done = False
all_envs_loaded = train_loader.dataset._check_if_all_envs_loaded()
total_times = []
while done == False:
start_time = time.time()
for batch in train_loader:
t += 1
model.load_state_dict(shared_model.state_dict())
model.train()
model.cuda()
idx, questions, _, img_feats, actions_in, actions_out, action_lengths, masks = batch
img_feats_var = Variable(img_feats.cuda())
if '+q' in args.model_type:
questions_var = Variable(questions.cuda())
actions_in_var = Variable(actions_in.cuda())
actions_out_var = Variable(actions_out.cuda())
action_lengths = action_lengths.cuda()
masks_var = Variable(masks.cuda())
action_lengths, perm_idx = action_lengths.sort(
0, descending=True)
img_feats_var = img_feats_var[perm_idx]
if '+q' in args.model_type:
questions_var = questions_var[perm_idx]
actions_in_var = actions_in_var[perm_idx]
actions_out_var = actions_out_var[perm_idx]
masks_var = masks_var[perm_idx]
if '+q' in args.model_type:
scores, hidden = model(img_feats_var, questions_var,
actions_in_var,
action_lengths.cpu().numpy())
else:
scores, hidden = model(img_feats_var, False,
actions_in_var,
action_lengths.cpu().numpy())
#block out masks
if args.curriculum:
curriculum_length = (epoch+1)*5
for i, action_length in enumerate(action_lengths):
if action_length - curriculum_length > 0:
masks_var[i, :action_length-curriculum_length] = 0
logprob = F.log_softmax(scores, dim=1)
loss = lossFn(
logprob, actions_out_var[:, :action_lengths.max()]
.contiguous().view(-1, 1),
masks_var[:, :action_lengths.max()].contiguous().view(
-1, 1))
# zero grad
optim.zero_grad()
# update metrics
metrics.update([loss.data[0]])
logging.info("TRAIN LSTM loss: {:.6f}".format(loss.data[0]))
# backprop and update
loss.backward()
ensure_shared_grads(model.cpu(), shared_model)
optim.step()
if t % args.print_every == 0:
print(metrics.get_stat_string())
logging.info("TRAIN: metrics: {}".format(metrics.get_stat_string()))
if args.log == True:
metrics.dump_log()
print('[CHECK][Cache:%d][Total:%d]' %
(len(train_loader.dataset.img_data_cache),
len(train_loader.dataset.env_list)))
logging.info('TRAIN: [CHECK][Cache:{}][Total:{}]'.format(
len(train_loader.dataset.img_data_cache), len(train_loader.dataset.env_list)))
if all_envs_loaded == False:
train_loader.dataset._load_envs(in_order=True)
if len(train_loader.dataset.pruned_env_set) == 0:
done = True
if args.cache == False:
train_loader.dataset._load_envs(
start_idx=0, in_order=True)
else:
done = True
elif 'pacman' in args.model_type:
planner_lossFn = MaskedNLLCriterion().cuda()
controller_lossFn = MaskedNLLCriterion().cuda()
done = False
all_envs_loaded = train_loader.dataset._check_if_all_envs_loaded()
while done == False:
for batch in train_loader:
t += 1
model.load_state_dict(shared_model.state_dict())
model.train()
model.cuda()
idx, questions, _, planner_img_feats, planner_actions_in, \
planner_actions_out, planner_action_lengths, planner_masks, \
controller_img_feats, controller_actions_in, planner_hidden_idx, \
controller_outs, controller_action_lengths, controller_masks = batch
questions_var = Variable(questions.cuda())
planner_img_feats_var = Variable(planner_img_feats.cuda())
planner_actions_in_var = Variable(
planner_actions_in.cuda())
planner_actions_out_var = Variable(
planner_actions_out.cuda())
planner_action_lengths = planner_action_lengths.cuda()
planner_masks_var = Variable(planner_masks.cuda())
controller_img_feats_var = Variable(
controller_img_feats.cuda())
controller_actions_in_var = Variable(
controller_actions_in.cuda())
planner_hidden_idx_var = Variable(
planner_hidden_idx.cuda())
controller_outs_var = Variable(controller_outs.cuda())
controller_action_lengths = controller_action_lengths.cuda(
)
controller_masks_var = Variable(controller_masks.cuda())
planner_action_lengths, perm_idx = planner_action_lengths.sort(
0, descending=True)
questions_var = questions_var[perm_idx]
planner_img_feats_var = planner_img_feats_var[perm_idx]
planner_actions_in_var = planner_actions_in_var[perm_idx]
planner_actions_out_var = planner_actions_out_var[perm_idx]
planner_masks_var = planner_masks_var[perm_idx]
controller_img_feats_var = controller_img_feats_var[
perm_idx]
controller_actions_in_var = controller_actions_in_var[
perm_idx]
controller_outs_var = controller_outs_var[perm_idx]
planner_hidden_idx_var = planner_hidden_idx_var[perm_idx]
controller_action_lengths = controller_action_lengths[
perm_idx]
controller_masks_var = controller_masks_var[perm_idx]
planner_scores, controller_scores, planner_hidden = model(
questions_var, planner_img_feats_var,
planner_actions_in_var,
planner_action_lengths.cpu().numpy(),
planner_hidden_idx_var, controller_img_feats_var,
controller_actions_in_var, controller_action_lengths)
planner_logprob = F.log_softmax(planner_scores, dim=1)
controller_logprob = F.log_softmax(
controller_scores, dim=1)
planner_loss = planner_lossFn(
planner_logprob,
planner_actions_out_var[:, :planner_action_lengths.max(
)].contiguous().view(-1, 1),
planner_masks_var[:, :planner_action_lengths.max()]
.contiguous().view(-1, 1))
controller_loss = controller_lossFn(
controller_logprob,
controller_outs_var[:, :controller_action_lengths.max(
)].contiguous().view(-1, 1),
controller_masks_var[:, :controller_action_lengths.max(
)].contiguous().view(-1, 1))
# zero grad
optim.zero_grad()
# update metrics
metrics.update(
[planner_loss.data[0], controller_loss.data[0]])
logging.info("TRAINING PACMAN planner-loss: {:.6f} controller-loss: {:.6f}".format(
planner_loss.data[0], controller_loss.data[0]))
# backprop and update
if args.max_controller_actions == 1:
(planner_loss).backward()
else:
(planner_loss + controller_loss).backward()
ensure_shared_grads(model.cpu(), shared_model)
optim.step()
if t % args.print_every == 0:
print(metrics.get_stat_string())
logging.info("TRAIN: metrics: {}".format(metrics.get_stat_string()))
if args.log == True:
metrics.dump_log()
print('[CHECK][Cache:%d][Total:%d]' %
(len(train_loader.dataset.img_data_cache),
len(train_loader.dataset.env_list)))
logging.info('TRAIN: [CHECK][Cache:{}][Total:{}]'.format(
len(train_loader.dataset.img_data_cache), len(train_loader.dataset.env_list)))
if all_envs_loaded == False:
train_loader.dataset._load_envs(in_order=True)
if len(train_loader.dataset.pruned_env_set) == 0:
done = True
if args.cache == False:
train_loader.dataset._load_envs(
start_idx=0, in_order=True)
else:
done = True
epoch += 1
if epoch % args.save_every == 0:
model_state = get_state(model)
optimizer_state = optim.state_dict()
aad = dict(args.__dict__)
ad = {}
for i in aad:
if i[0] != '_':
ad[i] = aad[i]
checkpoint = {'args': ad,
'state': model_state,
'epoch': epoch,
'optimizer': optimizer_state}
checkpoint_path = '%s/epoch_%d_thread_%d.pt' % (
args.checkpoint_dir, epoch, rank)
print('Saving checkpoint to %s' % checkpoint_path)
logging.info("TRAIN: Saving checkpoint to {}".format(checkpoint_path))
torch.save(checkpoint, checkpoint_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# data params
parser.add_argument('-train_h5', default='data/train.h5')
parser.add_argument('-val_h5', default='data/val.h5')
parser.add_argument('-test_h5', default='data/test.h5')
parser.add_argument('-data_json', default='data/data.json')
parser.add_argument('-vocab_json', default='data/vocab.json')
parser.add_argument(
'-target_obj_conn_map_dir',
default='data/target-obj-conn-maps/500')
parser.add_argument('-map_resolution', default=500, type=int)
parser.add_argument(
'-mode',
default='train+eval',
type=str,
choices=['train', 'eval', 'train+eval'])
parser.add_argument('-eval_split', default='val', type=str)
# model details
parser.add_argument(
'-model_type',
default='cnn',
choices=['cnn', 'cnn+q', 'lstm', 'lstm+q', 'lstm-mult+q', 'pacman'])
parser.add_argument('-max_episode_length', default=100, type=int)
parser.add_argument('-curriculum', default=0, type=int)
# optim params
parser.add_argument('-batch_size', default=20, type=int)
parser.add_argument('-learning_rate', default=1e-3, type=float)
parser.add_argument('-max_epochs', default=1000, type=int)
parser.add_argument('-overfit', default=False, action='store_true')
# bookkeeping
parser.add_argument('-print_every', default=5, type=int)
parser.add_argument('-eval_every', default=1, type=int)
parser.add_argument('-save_every', default=1000, type=int) #optional if you would like to save specific epochs as opposed to relying on the eval thread
parser.add_argument('-identifier', default='cnn')
parser.add_argument('-num_processes', default=1, type=int)
parser.add_argument('-max_threads_per_gpu', default=10, type=int)
# checkpointing
parser.add_argument('-checkpoint_path', default=False)
parser.add_argument('-checkpoint_dir', default='checkpoints/nav/')
parser.add_argument('-log_dir', default='logs/nav/')
parser.add_argument('-log', default=False, action='store_true')
parser.add_argument('-cache', default=False, action='store_true')
parser.add_argument('-max_controller_actions', type=int, default=5)
parser.add_argument('-max_actions', type=int)
args = parser.parse_args()
args.time_id = time.strftime("%m_%d_%H:%M")
#MAX_CONTROLLER_ACTIONS = args.max_controller_actions
if not os.path.isdir(args.log_dir):
os.makedirs(args.log_dir)
if args.curriculum:
assert 'lstm' in args.model_type #TODO: Finish implementing curriculum for other model types
logging.basicConfig(filename=os.path.join(args.log_dir, "run_{}.log".format(
str(datetime.now()).replace(' ', '_'))),
level=logging.INFO,
format='%(asctime)-15s %(message)s')
try:
args.gpus = os.environ['CUDA_VISIBLE_DEVICES'].split(',')
args.gpus = [int(x) for x in args.gpus]
except KeyError:
print("CPU not supported")
logging.info("CPU not supported")
exit()
if args.checkpoint_path != False:
print('Loading checkpoint from %s' % args.checkpoint_path)
logging.info("Loading checkpoint from {}".format(args.checkpoint_path))
args_to_keep = ['model_type']
checkpoint = torch.load(args.checkpoint_path, map_location={
'cuda:0': 'cpu'
})
for i in args.__dict__:
if i not in args_to_keep:
checkpoint['args'][i] = args.__dict__[i]
args = type('new_dict', (object, ), checkpoint['args'])
args.checkpoint_dir = os.path.join(args.checkpoint_dir,
args.time_id + '_' + args.identifier)
args.log_dir = os.path.join(args.log_dir,
args.time_id + '_' + args.identifier)
# if set to overfit; set eval_split to train
if args.overfit == True:
args.eval_split = 'train'
print(args.__dict__)
logging.info(args.__dict__)
if not os.path.exists(args.checkpoint_dir):
os.makedirs(args.checkpoint_dir)
os.makedirs(args.log_dir)
if args.model_type == 'cnn':
model_kwargs = {}
shared_model = NavCnnModel(**model_kwargs)
elif args.model_type == 'cnn+q':
model_kwargs = {
'question_input': True,
'question_vocab': load_vocab(args.vocab_json)
}
shared_model = NavCnnModel(**model_kwargs)
elif args.model_type == 'lstm':
model_kwargs = {}
shared_model = NavCnnRnnModel(**model_kwargs)
elif args.model_type == 'lstm+q':
model_kwargs = {
'question_input': True,
'question_vocab': load_vocab(args.vocab_json)
}
shared_model = NavCnnRnnModel(**model_kwargs)
elif args.model_type == 'pacman':
model_kwargs = {'question_vocab': load_vocab(args.vocab_json)}
shared_model = NavPlannerControllerModel(**model_kwargs)
else:
exit()
shared_model.share_memory()
if args.checkpoint_path != False:
print('Loading params from checkpoint: %s' % args.checkpoint_path)
logging.info("Loading params from checkpoint: {}".format(args.checkpoint_path))
shared_model.load_state_dict(checkpoint['state'])
if args.mode == 'eval':
eval(0, args, shared_model)
elif args.mode == 'train':
if args.num_processes > 1:
processes = []
for rank in range(0, args.num_processes):
# for rank in range(0, args.num_processes):
p = mp.Process(target=train, args=(rank, args, shared_model))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
train(0, args, shared_model)
else:
processes = []
# Start the eval thread
p = mp.Process(target=eval, args=(0, args, shared_model))
p.start()
processes.append(p)
# Start the training thread(s)
for rank in range(1, args.num_processes + 1):
# for rank in range(0, args.num_processes):
p = mp.Process(target=train, args=(rank, args, shared_model))
p.start()
processes.append(p)
for p in processes:
p.join()
|
{"hexsha": "206bdceed901242fca25b737a0e8e945f5ce902c", "size": 54328, "ext": "py", "lang": "Python", "max_stars_repo_path": "training/train_nav.py", "max_stars_repo_name": "catalina17/EmbodiedQA", "max_stars_repo_head_hexsha": "492c2e907697691899e7fe2102b0b859059d4efd", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 289, "max_stars_repo_stars_event_min_datetime": "2018-06-14T22:51:20.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-09T19:48:37.000Z", "max_issues_repo_path": "training/train_nav.py", "max_issues_repo_name": "catalina17/EmbodiedQA", "max_issues_repo_head_hexsha": "492c2e907697691899e7fe2102b0b859059d4efd", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 27, "max_issues_repo_issues_event_min_datetime": "2018-06-26T07:57:51.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-11T23:22:02.000Z", "max_forks_repo_path": "training/train_nav.py", "max_forks_repo_name": "catalina17/EmbodiedQA", "max_forks_repo_head_hexsha": "492c2e907697691899e7fe2102b0b859059d4efd", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 66, "max_forks_repo_forks_event_min_datetime": "2018-06-14T23:34:32.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-25T11:16:09.000Z", "avg_line_length": 40.8481203008, "max_line_length": 155, "alphanum_fraction": 0.4754454425, "include": true, "reason": "import numpy", "num_tokens": 10220}
|
import numpy as np
import pandas as pd
from . import ops
def make_chromarms(
chromsizes,
midpoints,
cols_chroms=("chrom", "length"),
cols_mids=("chrom", "mids"),
suffixes=("_p", "_q"),
):
"""
Split chromosomes into chromosome arms
Parameters
----------
chromsizes : pandas.Dataframe or pandas.Series
If pandas.Series, a map from chromosomes to lengths in bp.
If pandas.Dataframe, a dataframe with columns 'chrom' and 'length'.
midpoints : pandas.Dataframe or dict-like
Mapping of chromosomes to midpoint locations.
suffixes : tuple, optional
Suffixes to name chromosome arms. Defaults to p and q.
Returns
-------
df_chromarms
4-column BED-like DataFrame (chrom, start, end, name).
Arm names are chromosome names + suffix.
Any chromosome not included in ``mids`` will be omitted.
"""
ck1, sk1 = cols_chroms
ck2, sk2 = cols_mids
if isinstance(chromsizes, pd.Series):
df_chroms = (
pd.DataFrame(chromsizes).reset_index().rename(columns={"index": ck1})
)
elif isinstance(chromsizes, pd.DataFrame):
df_chroms = chromsizes.copy()
else:
raise ValueError("unknown input type for chromsizes")
if isinstance(midpoints, dict):
df_mids = pd.DataFrame.from_dict(midpoints, orient="index", columns=[sk2])
df_mids.reset_index(inplace=True)
df_mids.rename(columns={"index": ck2}, inplace=True)
elif isinstance(midpoints, pd.DataFrame):
df_mids = midpoints.copy()
ops._verify_columns(df_mids, [ck2, sk2])
ops._verify_columns(df_chroms, [ck1, sk1])
df_chroms["start"] = 0
df_chroms["end"] = df_chroms[sk1].values
df_chromarms = ops.split(
df_chroms,
df_mids,
add_names=True,
cols=(ck1, "start", "end"),
cols_points=(ck2, sk2),
suffixes=suffixes,
)
df_chromarms["name"].replace("[\:\[].*?[\)\_]", "", regex=True, inplace=True)
df_chromarms.drop(columns=["index_2", "length"], inplace=True)
return df_chromarms
def binnify(chromsizes, binsize, rel_ids=False):
"""
Divide a genome into evenly sized bins.
Parameters
----------
chromsizes : Series
pandas Series indexed by chromosome name with chromosome lengths in bp.
binsize : int
size of bins in bp
Returns
-------
bintable : pandas.DataFrame with columns: 'chrom', 'start', 'end'.
"""
if type(binsize) is not int:
raise ValueError("binsize must be int")
def _each(chrom):
clen = chromsizes[chrom]
n_bins = int(np.ceil(clen / binsize))
binedges = np.arange(0, (n_bins + 1)) * binsize
binedges[-1] = clen
return pd.DataFrame(
{"chrom": [chrom] * n_bins, "start": binedges[:-1], "end": binedges[1:]},
columns=["chrom", "start", "end"],
)
bintable = pd.concat(map(_each, chromsizes.keys()), axis=0, ignore_index=True)
if rel_ids:
bintable["rel_id"] = bintable.groupby("chrom").cumcount()
# if as_cat:
# bintable['chrom'] = pd.Categorical(
# bintable['chrom'],
# categories=list(chromsizes.keys()),
# ordered=True)
return bintable
def digest(fasta_records, enzyme):
"""
Divide a genome into restriction fragments.
Parameters
----------
fasta_records : OrderedDict
Dictionary of chromosome names to sequence records.
Created by: bioframe.load_fasta('/path/to/fasta.fa')
enzyme: str
Name of restriction enzyme.
Returns
-------
Dataframe with columns: 'chrom', 'start', 'end'.
"""
import Bio.Restriction as biorst
import Bio.Seq as bioseq
# http://biopython.org/DIST/docs/cookbook/Restriction.html#mozTocId447698
chroms = fasta_records.keys()
try:
cut_finder = getattr(biorst, enzyme).search
except AttributeError:
raise ValueError("Unknown enzyme name: {}".format(enzyme))
def _each(chrom):
seq = bioseq.Seq(str(fasta_records[chrom][:]))
cuts = np.r_[0, np.array(cut_finder(seq)) + 1, len(seq)].astype(int)
n_frags = len(cuts) - 1
frags = pd.DataFrame(
{"chrom": [chrom] * n_frags, "start": cuts[:-1], "end": cuts[1:]},
columns=["chrom", "start", "end"],
)
return frags
return pd.concat(map(_each, chroms), axis=0, ignore_index=True)
def frac_mapped(df, fasta_records, return_input=True):
"""
Calculate the fraction of mapped base-pairs for each interval in a dataframe.
Parameters
----------
df : pandas.DataFrame
A sets of genomic intervals stored as a DataFrame.
fasta_records : OrderedDict
Dictionary of chromosome names to sequence records.
Created by: bioframe.load_fasta('/path/to/fasta.fa')
return_input: bool
if False, only return Series named frac_mapped.
Returns
-------
df_mapped : pd.DataFrame
Original dataframe with new column 'frac_mapped' appended.
"""
if not set(df["chrom"].values).issubset(set(fasta_records.keys())):
return ValueError(
"chrom from intervals not in fasta_records: double-check genome agreement"
)
def _each(bin):
s = str(fasta_records[bin.chrom][bin.start : bin.end])
nbases = len(s)
n = s.count("N")
n += s.count("n")
return (nbases - n) / nbases
if return_input:
return pd.concat(
[df, df.apply(_each, axis=1).rename("frac_mapped", inplace=True)],
axis="columns",
)
else:
return df.apply(_each, axis=1).rename("frac_mapped", inplace=True)
def frac_gc(df, fasta_records, mapped_only=True, return_input=True):
"""
Calculate the fraction of GC basepairs for each interval in a dataframe.
Parameters
----------
df : pandas.DataFrame
A sets of genomic intervals stored as a DataFrame.
fasta_records : OrderedDict
Dictionary of chromosome names to sequence records.
Created by: bioframe.load_fasta('/path/to/fasta.fa')
mapped_only: bool
if True, ignore 'N' in the fasta_records for calculation.
if True and there are no mapped base-pairs in an interval, return np.nan.
return_input: bool
if False, only return Series named frac_mapped.
Returns
-------
df_mapped : pd.DataFrame
Original dataframe with new column 'frac_mapped' appended.
"""
if not set(df["chrom"].values).issubset(set(fasta_records.keys())):
return ValueError(
"chrom from intervals not in fasta_records: double-check genome agreement"
)
def _each(chrom_group):
chrom = chrom_group.name
seq = fasta_records[chrom]
gc = []
for _, bin in chrom_group.iterrows():
s = str(seq[bin.start : bin.end])
g = s.count("G")
g += s.count("g")
c = s.count("C")
c += s.count("c")
nbases = len(s)
if mapped_only:
n = s.count("N")
n += s.count("n")
nbases -= n
gc.append((g + c) / nbases if nbases > 0 else np.nan)
return gc
out = df.groupby("chrom", sort=False).apply(_each)
if return_input:
return pd.concat(
[df, pd.Series(data=np.concatenate(out), index=df.index).rename("GC")],
axis="columns",
)
else:
return pd.Series(data=np.concatenate(out), index=df.index).rename("GC")
def frac_gene_coverage(df, mrna_genome):
"""
Calculate number and fraction of overlaps by genes for a set of intervals stored in a dataframe.
Parameters
----------
df : pd.DataFrame
Set of genomic intervals stored as a dataframe.
mrna_genome: str
Name of genome.
Returns
-------
df_gene_coverage : pd.DataFrame
"""
raise NotImplementedError("implementation currently broken!")
# if isinstance(mrna, six.string_types):
# from .io.resources import UCSCClient
# mrna = (
# UCSCClient(mrna_genome)
# .fetch_mrna()
# .rename(columns={"tName": "chrom", "tStart": "start", "tEnd": "end"})
# )
# df_gene_coverage = ops.coverage(df, mrna)
# df_gene_coverage = ops.count_overlaps(df_gene_coverage, mrna)
# return df_gene_coverage
|
{"hexsha": "a61dddbf01ca2b002b91d211fcd2c9297d7b3248", "size": 8510, "ext": "py", "lang": "Python", "max_stars_repo_path": "bioframe/genomeops.py", "max_stars_repo_name": "itsameerkat/bioframe", "max_stars_repo_head_hexsha": "22675199efd1ee80304ed31a108505d84341d5ab", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "bioframe/genomeops.py", "max_issues_repo_name": "itsameerkat/bioframe", "max_issues_repo_head_hexsha": "22675199efd1ee80304ed31a108505d84341d5ab", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "bioframe/genomeops.py", "max_forks_repo_name": "itsameerkat/bioframe", "max_forks_repo_head_hexsha": "22675199efd1ee80304ed31a108505d84341d5ab", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.75, "max_line_length": 100, "alphanum_fraction": 0.6029377203, "include": true, "reason": "import numpy", "num_tokens": 2080}
|
#include "command.h"
#include "kits_cmd.h"
#include "genarchive.h"
#include "mergeruns.h"
#include "agglog.h"
#include "logcat.h"
#include "verifylog.h"
#include "truncatelog.h"
#include "propstats.h"
#include "logpagestats.h"
#include "loganalysis.h"
#include "dbscan.h"
#include "addbackup.h"
#include "xctlatency.h"
#include "tracerestore.h"
#include "archstats.h"
#include "logrecinfo.h"
#include "nodbgen.h"
#include <boost/foreach.hpp>
/*
* Adapted from
* http://stackoverflow.com/questions/582331/is-there-a-way-to-instantiate-objects-from-a-string-holding-their-class-name
*/
Command::ConstructorMap Command::constructorMap;
template<typename T> Command* createCommand()
{
return new T;
}
#define REGISTER_COMMAND(str, cmd) \
{ \
Command::constructorMap[str] = &createCommand<cmd>; \
}
void Command::init()
{
/*
* COMMANDS MUST BE REGISTERED HERE AND ONLY HERE
*/
REGISTER_COMMAND("logcat", LogCat);
//REGISTER_COMMAND("logreplay", LogReplay);
REGISTER_COMMAND("genarchive", GenArchive);
REGISTER_COMMAND("mergeruns", MergeRuns);
REGISTER_COMMAND("verifylog", VerifyLog);
REGISTER_COMMAND("truncatelog", TruncateLog);
REGISTER_COMMAND("dbscan", DBScan);
REGISTER_COMMAND("nodbgen", NoDBGen);
REGISTER_COMMAND("addbackup", AddBackup);
REGISTER_COMMAND("xctlatency", XctLatency);
REGISTER_COMMAND("agglog", AggLog);
REGISTER_COMMAND("logpagestats", LogPageStats);
REGISTER_COMMAND("loganalysis", LogAnalysis);
REGISTER_COMMAND("kits", KitsCommand);
REGISTER_COMMAND("propstats", PropStats);
REGISTER_COMMAND("tracerestore", RestoreTrace);
REGISTER_COMMAND("logrecinfo", LogrecInfo);
REGISTER_COMMAND("archstats", ArchStats);
}
void Command::setupCommonOptions()
{
options.add_options()
("help,h", "Displays help information regarding a specific command")
("config,c", po::value<string>()->implicit_value("zapps.conf"),
"Specify path to a config file");
}
void Command::showCommands()
{
cerr << "Usage: zapps <command> [options] "
<< endl << "Commands:" << endl;
ConstructorMap::iterator it;
for (it = constructorMap.begin(); it != constructorMap.end(); it++) {
// Options common to all commands
Command* cmd = (it->second)();
cmd->setupCommonOptions();
cmd->setupOptions();
cerr << it->first << endl << cmd->options << endl << endl;
}
}
Command* Command::parse(int argc, char ** argv)
{
if (argc >= 2) {
string cmdStr = argv[1];
std::transform(cmdStr.begin(), cmdStr.end(), cmdStr.begin(), ::tolower);
if (constructorMap.find(cmdStr) != constructorMap.end()) {
Command* cmd = constructorMap[cmdStr]();
cmd->setupCommonOptions();
cmd->setCommandString(cmdStr);
cmd->setupOptions();
po::variables_map vm;
po::store(po::parse_command_line(argc,argv,cmd->getOptions()), vm);
if (vm.count("config") > 0) {
string pathToFile = vm["config"].as<string>();
std::ifstream file;
file.open(pathToFile.c_str());
po::store(po::parse_config_file(file,cmd->getOptions(), true), vm);
}
if (vm.count("help") > 0) {
cmd->helpOption();
return NULL;
}
po::notify(vm);
cmd->setOptionValues(vm);
return cmd;
}
}
showCommands();
return NULL;
}
void Command::setupSMOptions(po::options_description& options)
{
boost::program_options::options_description smoptions("Storage Manager Options");
smoptions.add_options()
("db-config-design", po::value<string>()->default_value("normal"),
"")
("physical-hacks-enable", po::value<int>()->default_value(0),
"Enables physical hacks, such as padding of records")
("db-worker-sli", po::value<bool>()->default_value(0),
"Speculative Lock inheritance")
("db-loaders", po::value<int>()->default_value(10),
"Specifies the number of threads that are used to load the db")
("db-worker-queueloops", po::value<int>()->default_value(10),
"?")
("db-cl-batchsz", po::value<int>()->default_value(10),
"Specify the batchsize of a client executing transactions")
("db-cl-thinktime", po::value<int>()->default_value(0),
"Specify a 'thinktime' for a client")
("records-to-access", po::value<uint>()->default_value(0),
"Used in the benchmarks for the secondary indexes")
("activation_delay", po::value<uint>()->default_value(0),
"")
("db-workers", po::value<uint>()->default_value(1),
"Specify the number of workers executing transactions")
("dir-trace", po::value<string>()->default_value("RAT"),
"")
/** System related options **/
("sys-maxcpucount", po::value<uint>()->default_value(0),
"Maximum CPU Count of a system")
("sys-activecpucount", po::value<uint>()->default_value(0),
"Active CPU Count of a system")
/**SM Options**/
("sm_logdir", po::value<string>()->default_value("log"),
"Path to log directory")
("sm_dbfile", po::value<string>()->default_value("db"),
"Path to the file on which to store database pages")
("sm_format", po::value<bool>()->default_value(false),
"Format SM by emptying logdir and truncating DB file")
("sm_truncate_log", po::value<bool>()->default_value(false)
->implicit_value(true),
"Whether to truncate log partitions at SM shutdown")
("sm_truncate_archive", po::value<bool>()->default_value(false)
->implicit_value(true),
"Whether to truncate log archive runs at SM shutdown")
("sm_log_partition_size", po::value<int>()->default_value(1024),
"Size of a log partition in MB")
("sm_log_max_partitions", po::value<int>()->default_value(0),
"Maximum number of partitions maintained in log directory")
("sm_log_delete_old_partitions", po::value<bool>()->default_value(true),
"Whether to delete old log partitions as cleaner and chkpt make progress")
("sm_group_commit_size", po::value<int>(),
"Size in bytes of group commit window (higher -> larger log writes)")
("sm_group_commit_timeout", po::value<int>(),
"Max time to wait (in ms) to fill up group commit window")
("sm_log_benchmark_start", po::value<bool>()->default_value(false),
"Whether to generate benchmark_start log record on SM constructor")
("sm_page_img_compression", po::value<int>()->default_value(0),
"Enables page-image compression for every N log bytes (N=0 turns off)")
("sm_bufpoolsize", po::value<int>()->default_value(1024),
"Size of buffer pool in MB")
("sm_fakeiodelay-enable", po::value<int>()->default_value(0),
"Enables a artificial delay whenever there is a I/O operation")
("sm_fakeiodelay", po::value<uint>()->default_value(0),
"Specify the imposed delay in usec")
("sm_errlog", po::value<string>()->default_value("shoremt.err.log"),
"Path to the error log of the storage manager")
("sm_chkpt_interval", po::value<int>(),
"Interval for checkpoint flushes")
("sm_chkpt_log_based", po::value<bool>(),
"Take checkpoints decoupled from buffer and transaction manager, using log scans")
("sm_chkpt_use_log_archive", po::value<bool>(),
"Checkpoints use archived LSN to compute min_rec_lsn")
("sm_chkpt_print_propstats", po::value<bool>(),
"Print min recl lsn and dirty page coutn for every chkpt taken")
("sm_chkpt_only_root_pages", po::value<bool>(),
"Checkpoints only record dirty root pages and SPR takes care of rest")
("sm_log_fetch_buf_partitions", po::value<uint>()->default_value(0),
"Number of partitions to buffer in memory for recovery")
("sm_log_page_flushers", po::value<uint>()->default_value(1),
"Number of log page flushers")
("sm_preventive_chkpt", po::value<uint>()->default_value(1),
"Disable/enable preventive checkpoints (0 to disable, 1 to enable)")
("sm_logbuf_seg_count", po::value<int>(),
"Log Buffer Segment Count")
("sm_logbuf_flush_trigger", po::value<int>(),
"?")
("sm_logbuf_block_size", po::value<int>(),
"Log Buffer Block isze")
("sm_logbuf_part_size", po::value<int>(),
"Log Buffer part size")
("sm_carray_slots", po::value<int>(),
"")
("sm_vol_cluster_stores", po::value<bool>(),
"Cluster pages of the same store into extents")
("sm_vol_log_reads", po::value<bool>(),
"Generate log records for every page read")
("sm_vol_log_writes", po::value<bool>(),
"Generate log records for every page write")
("sm_vol_readonly", po::value<bool>(),
"Volume will be opened in read-only mode and all writes from buffer pool \
will be ignored (uses write elision and single-page recovery)")
("sm_log_o_direct", po::value<bool>(),
"Whether to open log file with O_DIRECT")
("sm_arch_o_direct", po::value<bool>(),
"Whether to open log archive files with O_DIRECT")
("sm_vol_o_direct", po::value<bool>(),
"Whether to open volume (i.e., db file) with O_DIRECT")
("sm_no_db", po::value<bool>()->implicit_value(true)->default_value(false),
"No-database mode, a.k.a. log-structured mode, a.k.a. extreme write elision: \
DB file is written and all fetched pages are rebuilt \
using single-page recovery from scratch")
("sm_batch_segment_size", po::value<int>(),
"Size of segments to use during batch restore warmup")
("sm_restart_instant", po::value<bool>(),
"Enable instant restart")
("sm_restart_log_based_redo", po::value<bool>(),
"Perform non-instant restart with log-based redo instead of page-based")
("sm_restart_prioritize_archive", po::value<bool>(),
"When performing single-page recovery, fetch as much as possible from \
log archive and minimize random reads in the recovery log")
("sm_rawlock_gc_interval_ms", po::value<int>(),
"Garbage Collection Interval in ms")
("sm_rawlock_lockpool_segsize", po::value<int>(),
"Segment size Lockpool")
("sm_rawlock_xctpool_segsize", po::value<int>(),
"Segment size Transaction Pool")
("sm_rawlock_gc_generation_count", po::value<int>(),
"Garbage collection generation count")
("sm_rawlock_gc_init_generation_count", po::value<int>(),
"Garbage collection initial generation count")
("sm_rawlock_lockpool_initseg", po::value<int>(),
"Lock pool init segment")
("sm_rawlock_xctpool_segsize", po::value<int>(),
"Transaction Pool Segment Size")
("sm_rawlock_gc_free_segment_count", po::value<int>(),
"Garbage Collection Free Segment Count")
("sm_rawlock_gc_max_segment_count", po::value<int>(),
"Garbage Collection Maximum Segment Count")
("sm_locktablesize", po::value<int>(),
"Lock table size")
("sm_rawlock_xctpool_initseg", po::value<int>(),
"Transaction Pool Initialization Segment")
("sm_bf_warmup_hit_ratio", po::value<int>(),
"Hit ratio to be achieved until system is considered warmed up (int from 0 to 100)")
("sm_bf_warmup_min_fixes", po::value<int>(),
"Only consider warmup hit ratio once this minimum number of fixes has been performed")
("sm_cleaner_decoupled", po::value<bool>(),
"Enable/Disable decoupled cleaner")
("sm_cleaner_interval", po::value<int>(),
"Cleaner sleep interval in ms")
("sm_cleaner_workspace_size", po::value<int>(),
"Size of cleaner write buffer")
("sm_cleaner_num_candidates", po::value<int>(),
"Number of candidate frames considered by each cleaner round")
("sm_cleaner_policy", po::value<string>(),
"Policy used by cleaner to select candidates")
("sm_cleaner_min_write_size", po::value<int>(),
"Page cleaner only writes clusters of pages with this minimum size")
("sm_cleaner_min_write_ignore_freq", po::value<int>(),
"Ignore min_write_size every N rounds of cleaning")
("sm_cleaner_async_candidate_collection", po::value<bool>(),
"Collect candidate frames to be cleaned in an asynchronous thread")
("sm_evict_policy", po::value<string>(),
"Policy to use in eviction (a.k.a. page replacement)")
("sm_evict_dirty_pages", po::value<bool>(),
"Do not skip dirty pages when performing eviction and write them out if necessary")
("sm_evict_random", po::value<bool>(),
"Pick eviction victim at random, instead of going round-robin over frames")
("sm_evict_use_clock", po::value<bool>(),
"Maintain clock bits on buffer frames and only evict if clock bit is zero")
("sm_async_eviction", po::value<bool>(),
"Perform eviction in a dedicated thread, while fixing threads wait")
("sm_eviction_interval", po::value<int>(),
"Interval for async eviction thread (in msec)")
("sm_wakeup_cleaner_attempts", po::value<int>(),
"How many failed eviction attempts until cleaner is woken up (0 = never)")
("sm_clean_only_attempts", po::value<int>(),
"How many failed eviction attempts until dity frames are picked as victims (0 = never)")
("sm_log_page_evictions", po::value<bool>(),
"Generate evict_page log records for every page evicted from the buffer pool")
("sm_log_page_fetches", po::value<bool>(),
"Generate fetch_page log records for every page fetched (and recovered) into the buffer pool")
("sm_archiver_workspace_size", po::value<int>(),
"Workspace size archiver")
// CS TODO: archiver currently only works with 1MB blocks
// ("sm_archiver_block_size", po::value<int>()->default_value(1024*1024),
// "Archiver Block size")
("sm_archiver_bucket_size", po::value<int>(),
"Archiver bucket size")
("sm_archiver_merging", po::value<bool>(),
"Whether to turn on asynchronous merging with log archiver")
("sm_archiver_fanin", po::value<int>(),
"Log archiver merge fan-in")
("sm_archiver_replication_factor", po::value<int>(),
"Replication factor maintained by the log archive \
run recycler (0 = never delete a run)")
("sm_archiving_blocksize", po::value<int>(),
"Archiving block size")
("sm_reformat_log", po::value<bool>(),
"Enable/Disable reformat log")
("sm_logging", po::value<bool>()->default_value(true),
"Enable/Disable logging")
("sm_decoupled_cleaner", po::value<bool>(),
"Use log-based propagation to clean pages")
("sm_shutdown_clean", po::value<bool>(),
"Force buffer before shutting down SM")
("sm_archiving", po::value<bool>(),
"Enable/Disable archiving")
("sm_async_merging", po::value<bool>(),
"Enable/Disable Asynchronous merging")
("sm_statistics", po::value<bool>(),
"Enable/Disable display of statistics")
("sm_ticker_enable", po::value<bool>(),
"Enable/Disable ticker (currently always enabled)")
("sm_ticker_msec", po::value<int>(),
"Ticker interval in millisec")
("sm_ticker_print_tput", po::value<bool>(),
"Print transaction throughput on every tick to a file tput.txt")
("sm_prefetch", po::value<bool>(),
"Enable/Disable prefetching")
("sm_backup_prefetcher_segments", po::value<int>(),
"Segment size restore")
("sm_restore_segsize", po::value<int>(),
"Segment size restore")
("sm_restore_prefetcher_window", po::value<int>(),
"Segment size restore")
("sm_restore_instant", po::value<bool>(),
"Enable/Disable instant restore")
("sm_restore_reuse_buffer", po::value<bool>(),
"Enable/Disable reusage of buffer")
("sm_restore_multiple_segments", po::value<int>(),
"Number of segments to attempt restore at once")
("sm_restore_min_read_size", po::value<int>(),
"Attempt to read at least this many bytes when scanning log archive")
("sm_restore_max_read_size", po::value<int>(),
"Attempt to read at most this many bytes when scanning log archive")
("sm_restore_preemptive", po::value<bool>(),
"Use preemptive scheduling during restore")
("sm_restore_sched_singlepass", po::value<bool>(),
"Use single-pass scheduling in restore")
("sm_restore_threads", po::value<int>(),
"Number of restore threads to use")
("sm_restore_sched_ondemand", po::value<bool>(),
"Support on-demand restore")
("sm_restore_sched_random", po::value<bool>(),
"Use random page order in restore scheduler")
("sm_bufferpool_swizzle", po::value<bool>(),
"Enable/Disable bufferpool swizzle")
("sm_write_elision", po::value<bool>(),
"Enable/Disable write elision in buffer pool")
("sm_archiver_eager", po::value<bool>(),
"Enable/Disable eager archiving")
("sm_archiver_read_whole_blocks", po::value<bool>(),
"Enable/Disable reading whole blocks in the archiver")
("sm_archiver_slow_log_grace_period", po::value<int>(),
"Enable/Disable slow log grace period")
("sm_errlog_level", po::value<string>(),
"Specify a errorlog level. Options:")
//TODO Stefan Find levels and insert them
("sm_log_impl", po::value<string>(),
"Choose log implementation. Options")
//TODO Stefan Find Implementations
("sm_backup_dir", po::value<string>(),
"Path to a backup directory")
("sm_bufferpool_replacement_policy", po::value<string>(),
"Replacement Policy")
("sm_archdir", po::value<string>()->default_value("archive"),
"Path to archive directory");
options.add(smoptions);
}
void Command::helpOption()
{
cerr << "Usage: zapps Command:" << commandString << " [options] "
<< endl << options << endl;
}
size_t LogScannerCommand::BLOCK_SIZE = 1024 * 1024;
BaseScanner* LogScannerCommand::getScanner(
bitset<logrec_t::t_max_logrec>* filter)
{
BaseScanner* s;
if (isArchive) {
if (merge) s = new MergeScanner(optionValues);
else s = new LogArchiveScanner(optionValues);
}
else {
s = new BlockScanner(optionValues, filter);
}
if (!filename.empty()) {
if (!isArchive) { s->setRestrictFile(logdir + "/" + filename); }
else { s->setRestrictFile(filename); }
}
return s;
}
void LogScannerCommand::setupOptions()
{
setupSMOptions(options);
po::options_description logscanner("Log Scanner Options");
logscanner.add_options()
("logdir,l", po::value<string>(&logdir)->required(),
"Directory containing log to be scanned")
("file,f", po::value<string>(&filename)->default_value(""),
"Scan only a specific file inside the given directory")
("archive,a", po::value<bool>(&isArchive)->default_value(false)
->implicit_value(true),
"Scan log archive files isntead of normal recovery log")
("merge,m", po::value<bool>(&merge)->default_value(false)
->implicit_value(true),
"Merge archiver input so that global sort order is produced")
("limit,n", po::value<size_t>(&limit)->default_value(0),
"Number of log records to scan")
("level", po::value<int>(&level)->default_value(-1),
"Level of log archive to scan (-1 for all)")
("pid", po::value<PageID>(&scan_pid)->default_value(0),
"PageID on which to begin scan (archive only)")
;
options.add(logscanner);
}
void Command::setSMOptions(sm_options& sm_opt, const po::variables_map& values)
{
BOOST_FOREACH(const po::variables_map::value_type& pair, values)
{
const std::string& key = pair.first;
try {
sm_opt.set_int_option(key, values[key].as<int>());
}
catch(boost::bad_any_cast const& e) {
try {
cerr << "Set option " << key << " to " << values[key].as<bool>() << endl;
sm_opt.set_bool_option(key, values[key].as<bool>());
}
catch(boost::bad_any_cast const& e) {
try {
sm_opt.set_string_option(key, values[key].as<string>());
}
catch(boost::bad_any_cast const& e) {
try {
sm_opt.set_int_option(key, values[key].as<uint>());
}
catch(boost::bad_any_cast const& e) {
cerr << "Could not process option " << key
<< " .. skippking." << endl;
continue;
}
}
}
}
};
}
|
{"hexsha": "b0624f08a08b55155a8c0f64ff9dc67fdf6ce06e", "size": 20898, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/cmd/base/command.cpp", "max_stars_repo_name": "caetanosauer/zero", "max_stars_repo_head_hexsha": "2cf0c0e4420c8b87795300aeb71a729451d4f4c9", "max_stars_repo_licenses": ["Spencer-94"], "max_stars_count": 27.0, "max_stars_repo_stars_event_min_datetime": "2015-04-21T08:52:37.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-18T03:38:58.000Z", "max_issues_repo_path": "src/cmd/base/command.cpp", "max_issues_repo_name": "caetanosauer/zero", "max_issues_repo_head_hexsha": "2cf0c0e4420c8b87795300aeb71a729451d4f4c9", "max_issues_repo_licenses": ["Spencer-94"], "max_issues_count": 24.0, "max_issues_repo_issues_event_min_datetime": "2015-07-04T10:45:41.000Z", "max_issues_repo_issues_event_max_datetime": "2018-05-03T08:52:36.000Z", "max_forks_repo_path": "src/cmd/base/command.cpp", "max_forks_repo_name": "caetanosauer/zero", "max_forks_repo_head_hexsha": "2cf0c0e4420c8b87795300aeb71a729451d4f4c9", "max_forks_repo_licenses": ["Spencer-94"], "max_forks_count": 15.0, "max_forks_repo_forks_event_min_datetime": "2015-03-31T09:57:10.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-09T13:44:58.000Z", "avg_line_length": 43.9957894737, "max_line_length": 121, "alphanum_fraction": 0.6351804, "num_tokens": 4831}
|
"""This scripts generates 3 examples where we regress HR real scans from multi-modal LR scans. Specifically we regress
HR T1 scans from LR T1 and T2 scans. We assume here that HR label maps are available with corresponding T1 scans. Thus
this script produces pairs of real HR T1 scans along with aligned HR synthetic scans (input channels) simulating T1 and
T2 scans acquired at LR.
If you use this code, please the SynthSR paper in:
https://github.com/BBillot/SynthSR/blob/master/bibtex.bib
Copyright 2020 Benjamin Billot
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in
compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is
distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied. See the License for the specific language governing permissions and limitations under the
License.
"""
import os
import time
import numpy as np
from ext.lab2im import utils
from SynthSR.brain_generator import BrainGenerator
# folder containing label maps to generate images from
labels_folder = '../../data/labels'
# folder containing corresponding images, that will be used as target regression
images_folder = '../../data/images/'
# result parameters
n_examples = 3 # number of generated examples
result_dir = '../../data/generated_images/5-SR-synthesis_real' # folder where they will be saved
# general parameters
# We now generate 2 synthetic channels, which will both be used as input. Note that it only contains True values, since
# we use real scans as regeression target. Bear in mind that input_channels onyl refers to synthetic channels (it never
# includes the real regression target).
input_channels = [True, True]
output_channel = None # the regression targets are not synthetic, but real
target_res = None # produce data at the resolution of the label maps
output_shape = 128 # randomly crop to 128^3
# label values of structure to generate from
generation_labels = '../../data/labels_classes_priors/generation_labels.npy'
# classes associating similar structures to the same Gaussian distribution
generation_classes = '../../data/labels_classes_priors/generation_classes.npy'
# Hyperparameters governing the GMM priors for the synthetic T1 and T2 scans. Note that T1s will be the the first
# synthetic channel (as we provide t1 hyperparameters first).
prior_means_t1_lr = np.load('../../data/labels_classes_priors/prior_means_t1_lr.npy')
prior_means_t2 = np.load('../../data/labels_classes_priors/prior_means_t2.npy')
prior_means = np.concatenate([prior_means_t1_lr, prior_means_t2], axis=0)
prior_stds_t1_lr = np.load('../../data/labels_classes_priors/prior_stds_t1_lr.npy')
prior_stds_t2 = np.load('../../data/labels_classes_priors/prior_stds_t2.npy')
prior_stds = np.concatenate([prior_stds_t1_lr, prior_stds_t2], axis=0)
# augmentation parameters
flipping = True
scaling_bounds = 0.1
rotation_bounds = 8
shearing_bounds = 0.01
translation_bounds = False
nonlin_std = 2.
bias_field_std = 0.2
# blurring/downsampling parameters
# We assume here that the T1 and T2 LR scans were not acquired at the same resolution/slice thickness. We provide the
# corresponding resolution in the same order as for the hyperparameters. In this example we simulate:
# 3mm coronal T1 with 3mm thickness, and 4mm sagittal T2 with 3mm thickness.
data_res = np.array([[1., 1., 3.], [1., 4.5, 1.]]) # slice spacing
thickness = np.array([[1., 1., 3.], [1., 3., 1.]]) # slice thickness
downsample = True # downsample to simulated LR
build_reliability_maps = True # add reliability map to input channels
# In this example we introduce small variations in the blurring kernel, such that the downstream network is robust to
# small changes in acquisition resolution. We provide it here with this coefficient, where the blurring simulates a
# resolution sampled in the uniform distribution U(data_res/blur_range; data_res*blur_range). Therefore blur_range must
# equal to 1 (no changes), or greater than 1.
blur_range = 1.15
# Here we have two input channels, and we want to model registration problems between the two. This may be due to head
# movement between the two acquisitions, or the fact that the two scans were not acquired in the same coordinate space
# (e.g. orthogonal T1, and T2 acquired along the hippocampal axis). This registration error will be simulated with
# respect to the first input channel.
simulate_registration_error = True
########################################################################################################
# instantiate BrainGenerator object
brain_generator = BrainGenerator(labels_dir=labels_folder,
images_dir=images_folder,
generation_labels=generation_labels,
input_channels=input_channels,
output_channel=output_channel,
target_res=target_res,
output_shape=output_shape,
generation_classes=generation_classes,
prior_means=prior_means,
prior_stds=prior_stds,
flipping=flipping,
scaling_bounds=scaling_bounds,
rotation_bounds=rotation_bounds,
shearing_bounds=shearing_bounds,
translation_bounds=translation_bounds,
simulate_registration_error=simulate_registration_error,
nonlin_std=nonlin_std,
bias_field_std=bias_field_std,
data_res=data_res,
thickness=thickness,
downsample=downsample,
blur_range=blur_range,
build_reliability_maps=build_reliability_maps)
# create result dir
utils.mkdir(result_dir)
for n in range(n_examples):
# generate !
start = time.time()
input_channels, regression_target = brain_generator.generate_brain()
end = time.time()
print('generation {0:d} took {1:.01f}s'.format(n + 1, end - start))
# save output image and label map
utils.save_volume(np.squeeze(input_channels[..., 0]), brain_generator.aff, brain_generator.header,
os.path.join(result_dir, 't1_input_%s.nii.gz' % (n + 1)))
utils.save_volume(np.squeeze(input_channels[..., 1]), brain_generator.aff, brain_generator.header,
os.path.join(result_dir, 'reliability_map_t1_input_%s.nii.gz' % (n + 1)))
utils.save_volume(np.squeeze(input_channels[..., 2]), brain_generator.aff, brain_generator.header,
os.path.join(result_dir, 't2_input_%s.nii.gz' % (n + 1)))
utils.save_volume(np.squeeze(input_channels[..., 3]), brain_generator.aff, brain_generator.header,
os.path.join(result_dir, 'reliability_map_t2_input_%s.nii.gz' % (n + 1)))
utils.save_volume(np.squeeze(regression_target), brain_generator.aff, brain_generator.header,
os.path.join(result_dir, 't1_target_%s.nii.gz' % (n + 1)))
|
{"hexsha": "5a083e899e7af222d090904c26d8623e9b324c79", "size": 7459, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/tutorials/5-SR-synthesis_multimodal_real.py", "max_stars_repo_name": "hanzhiwangchn/SynthSR", "max_stars_repo_head_hexsha": "35c2483f1da272855bbeea3e76140845106b623d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts/tutorials/5-SR-synthesis_multimodal_real.py", "max_issues_repo_name": "hanzhiwangchn/SynthSR", "max_issues_repo_head_hexsha": "35c2483f1da272855bbeea3e76140845106b623d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/tutorials/5-SR-synthesis_multimodal_real.py", "max_forks_repo_name": "hanzhiwangchn/SynthSR", "max_forks_repo_head_hexsha": "35c2483f1da272855bbeea3e76140845106b623d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 53.6618705036, "max_line_length": 119, "alphanum_fraction": 0.6861509586, "include": true, "reason": "import numpy", "num_tokens": 1598}
|
import logging
from rlberry.agents.agent import AgentWithSimplePolicy
import numpy as np
import gym.spaces as spaces
from rlberry.agents.dynprog.utils import backward_induction
from rlberry.agents.dynprog.utils import backward_induction_in_place
from rlberry.agents.kernel_based.common import map_to_representative
logger = logging.getLogger(__name__)
class RSUCBVIAgent(AgentWithSimplePolicy):
"""
Value iteration with exploration bonuses for continuous-state environments,
using a online discretization strategy.
The strategy:
- Build (online) a set of representative states
- Estimate transtions an rewards on the finite set of representative states
and actions.
Criterion: finite-horizon with discount factor gamma.
If the discount is not 1, only the Q function at h=0 is used.
The recommended policy after all the episodes is computed without
exploration bonuses.
Parameters
----------
env : Model
Online model with continuous (Box) state space and discrete actions
gamma : double
Discount factor in [0, 1]. If gamma is 1.0, the problem is set to
be finite-horizon.
horizon : int
Horizon of the objective function. If None and gamma<1, set to
1/(1-gamma).
lp_metric: int
The metric on the state space is the one induced by the p-norm,
where p = lp_metric. Default = 2, for the Euclidean metric.
scaling: numpy.ndarray
Must have the same size as state array, used to scale the states
before computing the metric.
If None, set to:
- (env.observation_space.high - env.observation_space.low) if high
and low are bounded
- np.ones(env.observation_space.shape[0]) if high or low are
unbounded
min_dist: double
Minimum distance between two representative states
max_repr: int
Maximum number of representative states.
If None, it is set to (sqrt(d)/min_dist)**d, where d
is the dimension of the state space
bonus_scale_factor : double
Constant by which to multiply the exploration bonus, controls
the level of exploration.
bonus_type : string
Type of exploration bonus. Currently, only "simplified_bernstein"
is implemented. If `reward_free` is true, this parameter is ignored
and the algorithm uses 1/n bonuses.
reward_free : bool
If true, ignores rewards and uses only 1/n bonuses.
References
----------
.. [1] Azar, Mohammad Gheshlaghi, Ian Osband, and Rémi Munos.
"Minimax regret bounds for reinforcement learning."
Proceedings of the 34th ICML, 2017.
.. [2] Strehl, Alexander L., and Michael L. Littman.
"An analysis of model-based interval estimation for Markov decision
processes."
Journal of Computer and System Sciences 74.8 (2008): 1309-1331.
.. [3] Kveton, Branislav, and Georgios Theocharous.
"Kernel-Based Reinforcement Learning on Representative States."
AAAI, 2012.
.. [4] Domingues, O. D., Ménard, P., Pirotta, M., Kaufmann, E., & Valko, M.(2020).
A kernel-based approach to non-stationary reinforcement learning in metric
spaces.
arXiv preprint arXiv:2007.05078.
"""
name = "RSUCBVI"
def __init__(
self,
env,
gamma=0.99,
horizon=100,
lp_metric=2,
scaling=None,
min_dist=0.1,
max_repr=1000,
bonus_scale_factor=1.0,
bonus_type="simplified_bernstein",
reward_free=False,
**kwargs
):
# init base class
AgentWithSimplePolicy.__init__(self, env, **kwargs)
self.gamma = gamma
self.horizon = horizon
self.lp_metric = lp_metric
self.min_dist = min_dist
self.bonus_scale_factor = bonus_scale_factor
self.bonus_type = bonus_type
self.reward_free = reward_free
# check environment
assert isinstance(self.env.observation_space, spaces.Box)
assert isinstance(self.env.action_space, spaces.Discrete)
# other checks
assert gamma >= 0 and gamma <= 1.0
if self.horizon is None:
assert gamma < 1.0, "If no horizon is given, gamma must be smaller than 1."
self.horizon = int(np.ceil(1.0 / (1.0 - gamma)))
# state dimension
self.state_dim = self.env.observation_space.shape[0]
# compute scaling, if it is None
if scaling is None:
# if high and low are bounded
if (self.env.observation_space.high == np.inf).sum() == 0 and (
self.env.observation_space.low == -np.inf
).sum() == 0:
scaling = (
self.env.observation_space.high - self.env.observation_space.low
)
# if high or low are unbounded
else:
scaling = np.ones(self.state_dim)
else:
assert scaling.ndim == 1
assert scaling.shape[0] == self.state_dim
self.scaling = scaling
# maximum value
r_range = self.env.reward_range[1] - self.env.reward_range[0]
if r_range == np.inf or r_range == 0.0:
logger.warning(
"{}: Reward range is zero or infinity. ".format(self.name)
+ "Setting it to 1."
)
r_range = 1.0
if self.gamma == 1.0:
self.v_max = r_range * horizon
else:
self.v_max = (
r_range
* (1.0 - np.power(self.gamma, self.horizon))
/ (1.0 - self.gamma)
)
# number of representative states and number of actions
if max_repr is None:
max_repr = int(
np.ceil(
(1.0 * np.sqrt(self.state_dim) / self.min_dist) ** self.state_dim
)
)
self.max_repr = max_repr
# current number of representative states
self.M = None
self.A = self.env.action_space.n
# declaring variables
self.episode = None # current episode
self.representative_states = None # coordinates of all repr states
self.N_sa = None # visits to (s, a)
self.N_sas = None # visits to (s, a, s')
self.S_sa = None # sum of rewards at (s, a)
self.B_sa = None # bonus at (s, a)
self.Q = None # Q function
self.V = None # V function
self.Q_policy = None # Q function for recommended policy
# initialize
self.reset()
def reset(self, **kwargs):
self.M = 0
self.representative_states = np.zeros((self.max_repr, self.state_dim))
self.N_sa = np.zeros((self.max_repr, self.A))
self.N_sas = np.zeros((self.max_repr, self.A, self.max_repr))
self.S_sa = np.zeros((self.max_repr, self.A))
self.B_sa = self.v_max * np.ones((self.max_repr, self.A))
self.R_hat = np.zeros((self.max_repr, self.A))
self.P_hat = np.zeros((self.max_repr, self.A, self.max_repr))
self.V = np.zeros((self.horizon, self.max_repr))
self.Q = np.zeros((self.horizon, self.max_repr, self.A))
self.Q_policy = None
self.episode = 0
def policy(self, observation):
state = observation
assert self.Q_policy is not None
repr_state = self._map_to_repr(state, False)
return self.Q_policy[0, repr_state, :].argmax()
def fit(self, budget: int, **kwargs):
del kwargs
n_episodes_to_run = budget
count = 0
while count < n_episodes_to_run:
self._run_episode()
count += 1
# compute Q function for the recommended policy
self.Q_policy, _ = backward_induction(
self.R_hat[: self.M, :],
self.P_hat[: self.M, :, : self.M],
self.horizon,
self.gamma,
)
def _map_to_repr(self, state, accept_new_repr=True):
repr_state = map_to_representative(
state,
self.lp_metric,
self.representative_states,
self.M,
self.min_dist,
self.scaling,
accept_new_repr,
)
# check if new representative state
if repr_state == self.M:
self.M += 1
return repr_state
def _update(self, state, action, next_state, reward):
repr_state = self._map_to_repr(state)
repr_next_state = self._map_to_repr(next_state)
self.N_sa[repr_state, action] += 1
self.N_sas[repr_state, action, repr_next_state] += 1
self.S_sa[repr_state, action] += reward
self.R_hat[repr_state, action] = (
self.S_sa[repr_state, action] / self.N_sa[repr_state, action]
)
self.P_hat[repr_state, action, :] = (
self.N_sas[repr_state, action, :] / self.N_sa[repr_state, action]
)
self.B_sa[repr_state, action] = self._compute_bonus(
self.N_sa[repr_state, action]
)
def _compute_bonus(self, n):
# reward-free
if self.reward_free:
bonus = 1.0 / n
return bonus
# not reward-free
if self.bonus_type == "simplified_bernstein":
bonus = self.bonus_scale_factor * np.sqrt(1.0 / n) + self.v_max / n
bonus = min(bonus, self.v_max)
return bonus
else:
raise NotImplementedError(
"Error: bonus type {} not implemented".format(self.bonus_type)
)
def _get_action(self, state, hh=0):
assert self.Q is not None
repr_state = self._map_to_repr(state, False)
return self.Q[hh, repr_state, :].argmax()
def _run_episode(self):
# interact for H steps
episode_rewards = 0
state = self.env.reset()
for hh in range(self.horizon):
action = self._get_action(state, hh)
next_state, reward, done, _ = self.env.step(action)
episode_rewards += reward # used for logging only
if self.reward_free:
reward = 0.0 # set to zero before update if reward_free
self._update(state, action, next_state, reward)
state = next_state
if done:
break
# run backward induction
backward_induction_in_place(
self.Q[:, : self.M, :],
self.V[:, : self.M],
self.R_hat[: self.M, :] + self.B_sa[: self.M, :],
self.P_hat[: self.M, :, : self.M],
self.horizon,
self.gamma,
self.v_max,
)
self.episode += 1
#
if self.writer is not None:
self.writer.add_scalar("episode_rewards", episode_rewards, self.episode)
self.writer.add_scalar("representative states", self.M, self.episode)
# return sum of rewards collected in the episode
return episode_rewards
|
{"hexsha": "53b5a0fcf250efcac42449e630e22f079e1f8e6c", "size": 10979, "ext": "py", "lang": "Python", "max_stars_repo_path": "rlberry/agents/kernel_based/rs_ucbvi.py", "max_stars_repo_name": "akrouriad/rlberry", "max_stars_repo_head_hexsha": "dde4e2cbafca05fdef1df07646bb6368059eeadf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "rlberry/agents/kernel_based/rs_ucbvi.py", "max_issues_repo_name": "akrouriad/rlberry", "max_issues_repo_head_hexsha": "dde4e2cbafca05fdef1df07646bb6368059eeadf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "rlberry/agents/kernel_based/rs_ucbvi.py", "max_forks_repo_name": "akrouriad/rlberry", "max_forks_repo_head_hexsha": "dde4e2cbafca05fdef1df07646bb6368059eeadf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.309375, "max_line_length": 87, "alphanum_fraction": 0.595318335, "include": true, "reason": "import numpy", "num_tokens": 2633}
|
"""
Created by Constantin Philippenko, 17th January 2022.
"""
import cmath
import matplotlib
import numpy as np
from matplotlib import pyplot as plt
from tqdm import tqdm
from src.CompressionModel import SQuantization, RandomSparsification, Sketching
from src.SyntheticDataset import SyntheticDataset
from src.TheoreticalCov import get_theoretical_cov
from src.Utilities import create_folder_if_not_existing
matplotlib.rcParams.update({
"pgf.texsystem": "pdflatex",
'font.family': 'serif',
'text.usetex': True,
'pgf.rcfonts': False,
'text.latex.preamble': r'\usepackage{amsfonts}'
})
SIZE_DATASET = 10**5
DIM = 100
POWER_COV = 4
R_SIGMA=0
USE_ORTHO_MATRIX = True
def prepare_sparsification(x, p):
rademacher = np.random.binomial(1, 0.5, size=len(x))
rademacher[rademacher == 0] = -1
return x * (rademacher)
def compute_diag(dataset, compressor):
X = dataset.X_complete
X_compressed = X.copy()
for i in tqdm(range(SIZE_DATASET)):
X_compressed[i] = compressor.compress(X[i])
cov_matrix = X_compressed.T.dot(X_compressed) / SIZE_DATASET
if USE_ORTHO_MATRIX:
cov_matrix = dataset.ortho_matrix.T.dot(cov_matrix).dot(dataset.ortho_matrix)
diag = np.diag(cov_matrix)
return diag, cov_matrix
def compute_diag_matrices(dataset: SyntheticDataset, dim: int, labels):
dataset.generate_constants(dim, size_dataset=SIZE_DATASET, power_cov=POWER_COV, r_sigma=R_SIGMA,
use_ortho_matrix=USE_ORTHO_MATRIX)
dataset.define_compressors()
dataset.generate_X()
no_compressor = SQuantization(0, dim=dim)
my_compressors = [no_compressor, dataset.quantizator, dataset.sparsificator, dataset.rand_sketcher,
dataset.rand1, dataset.all_or_nothinger]
all_diagonals = []
for compressor in my_compressors:
diag, cov_matrix = compute_diag(dataset, compressor)
all_diagonals.append(diag)
return all_diagonals, labels, dataset.string_for_hash()
def compute_theoretical_diag(dataset: SyntheticDataset, labels):
### No compression
all_covariance = [get_theoretical_cov(dataset, "No compression"),
get_theoretical_cov(dataset, "Qtzd"),
get_theoretical_cov(dataset, "Sparsification"),
get_theoretical_cov(dataset, "Sketching"),
get_theoretical_cov(dataset, "Rand1"),
get_theoretical_cov(dataset, "AllOrNothing")]
if USE_ORTHO_MATRIX:
for i in range(len(all_covariance)):
all_covariance[i] = dataset.ortho_matrix.T.dot(all_covariance[i]).dot(dataset.ortho_matrix)
all_diagonals = [np.diag(cov) for cov in all_covariance]
return all_diagonals, labels
if __name__ == '__main__':
labels = ["no compr.", "quantiz.", "rdk", "gauss. proj.", "rand1", "all or noth."]
dataset = SyntheticDataset()
all_diagonals, labels, hash_dataset = compute_diag_matrices(dataset, dim=DIM, labels=labels)
all_theoretical_diagonals, theoretical_labels = compute_theoretical_diag(dataset, labels=labels)
fig, axes = plt.subplots(1, 2, figsize=(10, 6))
for (diagonal, label) in zip(all_diagonals, labels):
axes[0].plot(np.log10(np.arange(1, DIM + 1)), np.log10(diagonal), label=label, lw = 2)
for (diagonal, label) in zip(all_theoretical_diagonals, theoretical_labels):
axes[1].plot(np.log10(np.arange(1, DIM + 1)), np.log10(diagonal), label=label, lw = 2, linestyle="--")
for ax in axes:
ax.tick_params(axis='both', labelsize=15)
ax.legend(loc='best', fontsize=15)
ax.set_xlabel(r"$\log(i), \forall i \in \{1, ..., d\}$", fontsize=15)
axes[0].title.set_text('Empirical eigenvalues')
axes[1].title.set_text('Theoretical eigenvalues')
axes[0].set_ylabel(r"$\log(Diag(\frac{\mathcal C (X)^T.\mathcal C (X)}{n})_i)$", fontsize=15)
plt.legend(loc='best', fontsize=15)
folder = "pictures/epsilon_eigenvalues/"
create_folder_if_not_existing(folder)
plt.savefig("{0}/{1}.eps".format(folder, hash_dataset), format='eps')
plt.show()
|
{"hexsha": "5700639e884924af5a85a37647ec59d457fc1021", "size": 4115, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/PlotEigenValues.py", "max_stars_repo_name": "philipco/structured_noise", "max_stars_repo_head_hexsha": "e7716abc5f90b2b5c3dccbb4c7e67708dab32a88", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/PlotEigenValues.py", "max_issues_repo_name": "philipco/structured_noise", "max_issues_repo_head_hexsha": "e7716abc5f90b2b5c3dccbb4c7e67708dab32a88", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/PlotEigenValues.py", "max_forks_repo_name": "philipco/structured_noise", "max_forks_repo_head_hexsha": "e7716abc5f90b2b5c3dccbb4c7e67708dab32a88", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.0082644628, "max_line_length": 110, "alphanum_fraction": 0.6913730255, "include": true, "reason": "import numpy", "num_tokens": 1076}
|
from .distribution import Distribution
from .functions import choose, mascheroni
from scipy.integrate import quad
from math import factorial
from typing import Union
from dataclasses import dataclass
@dataclass
class Binomial(Distribution):
n: Union[int, float]
p: float
def __post__init__(self):
self.discrete = isinstance(n, int)
def pmf(self, k):
if 0 <= k <= self.n:
return (
choose(self.n, self.k, exact=self.discrete)
* self.p**k * (1-self.p)**(self.n-k)
)
return 0
def cdf_continuous(self, k):
return quad(
lambda t: t**(n-k-1) * (1-t)**k,
0, 1-p
) * (n - k) * choose(self.n, self.k, exact=False)
@dataclass
class ChiSquared(Distribution):
k: int
def __post__init__(self):
self.discrete = isinstance(k, int)
def pmf(self, x):
if x <= 0:
return 0
if self.discrete:
g = factorial(x/2)
if 0 <= k <= self.n:
return (
choose(self.n, self.k, exact=self.discrete)
* self.p**k * (1-self.p)**(self.n-k)
)
return 0
def cdf_continuous(self, k):
return quad(
lambda t: t**(n-k-1) * (1-t)**k,
0, 1-p
) * (n - k) * choose(self.n, self.k, exact=False)
|
{"hexsha": "746d10fab24facd7460c4fb410b52a0a0761f059", "size": 1409, "ext": "py", "lang": "Python", "max_stars_repo_path": "noether/statistics/distcatalogue.py", "max_stars_repo_name": "yunruse/Noether", "max_stars_repo_head_hexsha": "7421a14000488819e52c1aeffa45833c27b8e3ef", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-01-27T07:44:29.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-23T22:30:43.000Z", "max_issues_repo_path": "noether/statistics/distcatalogue.py", "max_issues_repo_name": "yunruse/Noether", "max_issues_repo_head_hexsha": "7421a14000488819e52c1aeffa45833c27b8e3ef", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "noether/statistics/distcatalogue.py", "max_forks_repo_name": "yunruse/Noether", "max_forks_repo_head_hexsha": "7421a14000488819e52c1aeffa45833c27b8e3ef", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.6181818182, "max_line_length": 59, "alphanum_fraction": 0.5095812633, "include": true, "reason": "from scipy", "num_tokens": 378}
|
#!/usr/bin/env python3.6
# -*- coding: utf-8 -*-
import linecache as lc
import numpy as np
import os
from sagar.io.vasp import read_vasp
import subprocess
class ExtractValue():
def __init__(self,data_folder='./',atomic_num=3):
self.data_folder = data_folder
self.atomic_num = atomic_num
def get_energy(self):
file_osz = os.path.join(self.data_folder,'OSZICAR')
return float(subprocess.run(['tail','-1',file_osz],stdout=subprocess.PIPE).stdout.decode('utf-8').split()[4])
def get_fermi(self):
# read from scf calculation
file_outcar = os.path.join(self.data_folder,'OUTCAR')
grep_res = subprocess.Popen(['grep','E-fermi',file_outcar],stdout=subprocess.PIPE)
return float(subprocess.check_output(['tail','-1'],stdin=grep_res.stdout).decode('utf-8').split()[2])
def get_Ne_defect_free(self):
# you can get valance electrons in your defect-free system
file_pos = os.path.join(self.data_folder, 'POSCAR')
file_pot = os.path.join(self.data_folder, 'POTCAR')
ele_num_atom = [float(i) for i in subprocess.run(['grep','ZVAL',file_pot],stdout=subprocess.PIPE).stdout.decode('utf-8').split()[5::9]]
atom_num = [float(i) for i in lc.getlines(file_pos)[6].split()]
return int(np.dot(atom_num,ele_num_atom))
def get_Ne_defect(self):
#get all valance electrons number in OUTCAR
file_outcar = os.path.join(self.data_folder,'OUTCAR')
grep_res = subprocess.run(['grep','NELECT',file_outcar],stdout=subprocess.PIPE)
return int(float(grep_res.stdout.decode('utf-8').split()[2]))
def get_image(self):
file_image = os.path.join(self.data_folder,'OUTCAR')
return float(subprocess.run(['grep','Ewald',file_image],stdout=subprocess.PIPE).stdout.decode('utf-8').split('\n')[0].split()[-1])
def get_cpu_time(self):
file_outcar = os.path.join(self.data_folder,'OUTCAR')
with open(file_outcar) as f:
lines = f.readlines()
cpu_line = [line for line in lines if 'CPU' in line]
return float(cpu_line[0].split()[-1])
def get_gap(self,vbm_occupancy=0.7,cbm_occupancy=0.3):
file_eig = os.path.join(self.data_folder,'EIGENVAL')
line6 = np.genfromtxt(file_eig,skip_header=5,max_rows=1)
kpt_num, eig_num = int(line6[1]), int(line6[2])
if len(lc.getlines(file_eig)[8].split()) == 3:
isspin = False
elif len(lc.getlines(file_eig)[8].split()) == 5:
isspin = True
if not isspin:
all_eigval = np.zeros((eig_num, kpt_num*2))
for ii in range(kpt_num):
all_eigval[:,2*ii:2*ii+2] = np.genfromtxt(file_eig,
skip_header=8+eig_num*int(ii)+2*ii,
max_rows=eig_num,usecols=(1,2))
else:
all_eigval = np.zeros((eig_num, kpt_num*4))
for ii in range(kpt_num):
all_eigval[:,4*ii:4*ii+4] = np.genfromtxt(file_eig,
skip_header=8+eig_num*int(ii)+2*ii,
max_rows=eig_num,usecols=(1,2,3,4))
if not isspin:
elec_num = np.mean(all_eigval[:,1::2],axis=1)
idx1 = np.where(elec_num > vbm_occupancy)
idx2 = np.where(elec_num < cbm_occupancy)
if idx1[0][-1] - idx2[0][0] == -1:
vbm = np.max(all_eigval[idx1[0][-1],::2])
cbm = np.min(all_eigval[idx2[0][0],::2])
gap = cbm - vbm if cbm > vbm else 0
else:
print('The gap of this system can not be obtained from this progrmme',
'I suggest you carefully check the EIGENVAL by yourself')
return 0
return (vbm, cbm, gap)
else:
all_eigval_up = all_eigval[:,0::2]
all_eigval_down = all_eigval[:,1::2]
elec_num_up = np.mean(all_eigval_up[:,1::2],axis=1)
idx1 = np.where(elec_num_up > vbm_occupancy)
idx2 = np.where(elec_num_up < cbm_occupancy)
if idx1[0][-1] - idx2[0][0] == -1:
vbm_up = np.max(all_eigval_up[idx1[0][-1],::2])
cbm_up = np.min(all_eigval_up[idx2[0][0],::2])
gap_up = cbm_up - vbm_up if cbm_up > vbm_up else 0
else:
print('The gap of this system can not be obtained from this progrmme',
'I suggest you carefully check the EIGENVAL by yourself')
return 0
elec_num_down = np.mean(all_eigval_down[:,1::2],axis=1)
idx1 = np.where(elec_num_down > vbm_occupancy)
idx2 = np.where(elec_num_down < cbm_occupancy)
if idx1[0][-1] - idx2[0][0] == -1:
vbm_down = np.max(all_eigval_down[idx1[0][-1],::2])
cbm_down = np.min(all_eigval_down[idx2[0][0],::2])
gap_down = cbm_down - vbm_down if cbm_down > vbm_down else 0
else:
print('The gap of this system can not be obtained from this progrmme',
'I suggest you carefully check the EIGENVAL by yourself')
return 0
return (vbm_up, cbm_up, gap_up), (vbm_down, cbm_down, gap_down)
def get_ele_sta(no_defect_outcar,number):
number = int(number)
tmp_match_line = _get_line(no_defect_outcar,rematch='electrostatic')
rows = number // 5
col = number - rows * 5 - 1
if col == -1:
rows -= 1
col = 4
tmp_line = lc.getlines(no_defect_outcar)[tmp_match_line[0]+rows+3].split()
return float(tmp_line[2*col+1])
def _get_line(file_tmp,rematch=None):
grep_res = subprocess.Popen(['grep', rematch, file_tmp,'-n'],stdout=subprocess.PIPE)
return [int(ii) - 1 for ii in subprocess.check_output(['cut','-d',':','-f','1'],stdin=grep_res.stdout).decode('utf-8').split()]
def read_incar(incar):
import re
res = {}
with open(incar,'r') as f:
lines = f.readlines()
for line in lines:
if line.strip() == '':
continue
line = re.sub(r"\s+","",line,flags=re.UNICODE).split('=')
res[line[0]] = line[1]
return res
def read_doscar(wd):
c = read_vasp(os.path.join(wd,'POSCAR'))
line6 = np.genfromtxt(os.path.join(wd,'DOSCAR'),skip_header=5,max_rows=1)
n_dos = int(line6[2])
sum_dos = np.genfromtxt(os.path.join(wd,'DOSCAR'),skip_header=6,max_rows=n_dos)
np.savetxt(os.path.join(wd,'sum_dos.txt'),sum_dos,fmt="%.5f")
incar = read_incar(os.path.join(wd,'INCAR'))
if 'LORBIT' not in incar:
return
if int(incar['LORBIT']) == 11:
for ii in range(c.atoms.shape[0]):
p_dos = np.genfromtxt(os.path.join(wd,'DOSCAR'),skip_header=6+(1+n_dos)*(ii+1),max_rows=n_dos)
np.savetxt(os.path.join(wd,'p_dos'+str(ii)+'.txt'),p_dos,fmt="%.5f")
|
{"hexsha": "666b5d482ef163a35bc5fb3fe1fd700ece118fd7", "size": 6816, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyvaspflow/io/vasp_out.py", "max_stars_repo_name": "Zhiwei-Lu/pyvaspflow", "max_stars_repo_head_hexsha": "b80eab3e8bfc52aed6a2459dd32655f1075d9058", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2019-06-03T11:41:35.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-04T07:45:42.000Z", "max_issues_repo_path": "pyvaspflow/io/vasp_out.py", "max_issues_repo_name": "Zhiwei-Lu/pyvaspflow", "max_issues_repo_head_hexsha": "b80eab3e8bfc52aed6a2459dd32655f1075d9058", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2019-03-12T10:51:15.000Z", "max_issues_repo_issues_event_max_datetime": "2019-03-14T02:18:18.000Z", "max_forks_repo_path": "pyvaspflow/io/vasp_out.py", "max_forks_repo_name": "Zhiwei-Lu/pyvaspflow", "max_forks_repo_head_hexsha": "b80eab3e8bfc52aed6a2459dd32655f1075d9058", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2019-06-03T03:20:20.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-06T11:48:37.000Z", "avg_line_length": 43.9741935484, "max_line_length": 144, "alphanum_fraction": 0.5950704225, "include": true, "reason": "import numpy", "num_tokens": 1999}
|
theory FixRestr
imports HOLCF
begin
find_consts name:funpow
term Nat.funpow
definition chainFrom :: "('a => 'a) => ('a :: cpo) => bool"
where "chainFrom F x = ((\<forall>n. (F^^n) x \<sqsubseteq> F ((F^^n) x)) \<and> (F (\<Squnion> i. ((F^^i) x)) = (\<Squnion> i. F ((F^^i) x))))"
lemma chainFrom_chain [simp]: "chainFrom F x \<Longrightarrow> chain (\<lambda>i. (F^^i) x)"
by (rule chainI, auto simp add: chainFrom_def)
lemma iterate_stays_above: "chainFrom F x \<Longrightarrow> x \<sqsubseteq> (F^^n) x"
apply (drule chainFrom_chain)
apply (rule nat_induct)
apply (auto simp add: chain_def)
by (metis rev_below_trans)
lemma lub_chainFrom_arg: "chainFrom F x \<Longrightarrow> F (\<Squnion> i. ((F^^i) x)) = (\<Squnion> i. F ((F^^i) x))"
by (simp add: chainFrom_def)
definition
"fixR" :: "'a \<Rightarrow> ('a::cpo => 'a) \<Rightarrow> 'a" where
"fixR x F = (if chainFrom F x then (\<Squnion>i. (F^^i) x) else x)"
lemma iterate_below_fix: "chainFrom F x \<Longrightarrow> (F^^n) x \<sqsubseteq> fixR x F"
unfolding fixR_def
apply (subst if_P, assumption)
using chainFrom_chain
by (rule is_ub_thelub)
lemma fix_eq: "chainFrom F x \<Longrightarrow> fixR x F = F (fixR x F)"
apply (simp add: fixR_def)
apply (subst lub_range_shift [of _ 1, symmetric])
apply (erule chainFrom_chain)
thm contlub_cfun_arg
apply (subst lub_chainFrom_arg, assumption)
apply (drule chainFrom_chain)
apply (simp add: chain_def)
done
lemma fixR_ind: "\<lbrakk> adm P; P x; chainFrom F x; \<And>i. \<lbrakk>x \<sqsubseteq> (F^^i) x ; P ((F^^i) x)\<rbrakk> \<Longrightarrow> P (F ((F^^i) x)) \<rbrakk> \<Longrightarrow> P (fixR x F)"
unfolding fixR_def
apply (subst if_P, assumption)
apply (erule admD)
apply (erule chainFrom_chain)
apply (rule nat_induct)
apply (simp_all add: iterate_stays_above)
done
lemma fixR_ind2:
assumes adm: "adm P"
assumes above: "chainFrom F x"
assumes 0: "P x" and 1: "P (F x)"
assumes step: "!!y. \<lbrakk>x \<sqsubseteq> y ; P y; P (F y)\<rbrakk> \<Longrightarrow> P (F (F y))"
shows "P (fixR x F)"
unfolding fixR_def
apply (subst if_P, fact)
apply (rule admD [OF adm chainFrom_chain[OF above]])
apply (rule nat_less_induct)
apply (case_tac n)
apply (simp add: 0)
apply (case_tac nat)
apply (simp add: 1)
apply (frule_tac x=nat in spec)
apply (simp add: step iterate_stays_above[OF above])
done
lemma parallel_fixR_ind:
assumes adm: "adm (\<lambda>x. P (fst x) (snd x))"
assumes aboveF: "chainFrom F x1"
assumes aboveG: "chainFrom G x2"
assumes base: "P x1 x2"
assumes step: "!!y z. \<lbrakk> x1 \<sqsubseteq> y ; x2 \<sqsubseteq> z; P y z \<rbrakk> \<Longrightarrow> P (F y) (G z)"
shows "P (fixR x1 F) (fixR x2 G)"
proof -
from adm have adm': "adm (split P)"
unfolding split_def .
have "!!i. P ((F^^i) x1) ((G^^i) x2)"
by (induct_tac i, simp add: base, simp add: step iterate_stays_above[OF aboveF] iterate_stays_above[OF aboveG])
hence "!!i. split P ((F^^i) x1, (G^^i) x2)"
by simp
hence "split P (\<Squnion>i. ((F^^i) x1, (G^^i) x2))"
apply - apply (rule admD [OF adm']) by(auto intro: ch2ch_Pair simp add: chainFrom_chain[OF aboveF] chainFrom_chain[OF aboveG])
hence "split P (\<Squnion>i. ((F^^i) x1), \<Squnion>i. (G^^i) x2)"
by (simp add: lub_Pair chainFrom_chain[OF aboveF] chainFrom_chain[OF aboveG])
hence "P (\<Squnion>i. (F^^i) x1) (\<Squnion>i. (G^^i) x2)"
by simp
thus "P (fixR x1 F) (fixR x2 G)"
using aboveF aboveG
by (simp add: fixR_def)
qed
(*
lemma fix1_cont2cont[simp,cont2cont]:"\<lbrakk> cont F ; cont G ; \<And> y. G y \<sqsubseteq> (F y) \<cdot> (G y) \<rbrakk> \<Longrightarrow> cont (\<lambda>y. fix1 (G y) (F y))"
unfolding fix1_def by auto
*)
lemma[simp]: "(fixR x (\<lambda> _. x)) = x"
by (rule fixR_ind, auto)
(*
lemma fix_least_below: "x \<sqsubseteq> F \<cdot> x \<Longrightarrow> x \<sqsubseteq> y \<Longrightarrow> F\<cdot>y \<sqsubseteq> y \<Longrightarrow> fix1 x F \<sqsubseteq> y"
apply (simp add: fix1_def)
apply (rule lub_below)
apply (erule chain_iterate_from)
apply (induct_tac i)
apply simp
apply simp
apply (erule rev_below_trans) back
apply (erule monofun_cfun_arg)
done
lemmas start_below_fix1[simp] = iterate_below_fix[where n = 0, simplified]
lemma fix1_alt_start:
assumes "x \<sqsubseteq> y" and "y \<sqsubseteq> F \<cdot> x"
shows "fix1 x F = fix1 y F"
proof(rule below_antisym)
have "x \<sqsubseteq> F \<cdot> x" using assms by (metis below.r_trans)
have "y \<sqsubseteq> F \<cdot> y" using assms by (metis monofun_cfun_arg rev_below_trans)
show "fix1 x F \<sqsubseteq> fix1 y F"
by (rule parallel_fix1_ind[OF _ `x \<sqsubseteq> F \<cdot> x` `y \<sqsubseteq> F \<cdot> y`], auto intro: monofun_cfun_arg assms(1))
show "fix1 y F \<sqsubseteq> fix1 x F"
apply (rule fix_least_below[OF `y \<sqsubseteq> F \<cdot> y`])
apply (subst fix_eq[OF `x \<sqsubseteq> F\<cdot>x`])
apply (rule below_trans[OF `y \<sqsubseteq> F \<cdot> x`])
apply (rule monofun_cfun_arg)
apply (rule start_below_fix1[OF `x \<sqsubseteq> F\<cdot>x`])
apply (subst fix_eq[OF `x \<sqsubseteq> F\<cdot>x`, symmetric])
apply (rule below_refl)
done
qed
*)
end
|
{"author": "Josh-Tilles", "repo": "AFP", "sha": "f4bf1d502bde2a3469d482b62c531f1c3af3e881", "save_path": "github-repos/isabelle/Josh-Tilles-AFP", "path": "github-repos/isabelle/Josh-Tilles-AFP/AFP-f4bf1d502bde2a3469d482b62c531f1c3af3e881/thys/Launchbury/FixRestr.thy"}
|
[STATEMENT]
lemma partial_get_put: "\<rho> \<in> \<S> \<Longrightarrow> put \<sigma> (get \<rho>) = \<rho>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<rho> \<in> \<S> \<Longrightarrow> put \<sigma> (get \<rho>) = \<rho>
[PROOF STEP]
by (metis put_det weak_get_put)
|
{"llama_tokens": 108, "file": "Optics_Lens_Laws", "length": 1}
|
//
// Copyright (c) 2016-2017 Vinnie Falco (vinnie dot falco at gmail dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
// Official repository: https://github.com/boostorg/beast
//
#ifndef BEAST_MULTI_BUFFER_HPP
#define BEAST_MULTI_BUFFER_HPP
#include <beast/core/detail/config.hpp>
#include <beast/core/detail/allocator.hpp>
#include <beast/core/detail/empty_base_optimization.hpp>
#include <asio/buffer.hpp>
#include <boost/intrusive/list.hpp>
#include <iterator>
#include <limits>
#include <memory>
#include <type_traits>
namespace beast {
/** A @b DynamicBuffer that uses multiple buffers internally.
The implementation uses a sequence of one or more character arrays
of varying sizes. Additional character array objects are appended to
the sequence to accommodate changes in the size of the character
sequence.
@note Meets the requirements of @b DynamicBuffer.
@tparam Allocator The allocator to use for managing memory.
*/
template<class Allocator>
class basic_multi_buffer
#if ! BEAST_DOXYGEN
: private detail::empty_base_optimization<
typename detail::allocator_traits<Allocator>::
template rebind_alloc<char>>
#endif
{
using base_alloc_type = typename
detail::allocator_traits<Allocator>::
template rebind_alloc<char>;
// Storage for the list of buffers representing the input
// and output sequences. The allocation for each element
// contains `element` followed by raw storage bytes.
class element;
using alloc_traits = detail::allocator_traits<base_alloc_type>;
using list_type = typename boost::intrusive::make_list<element,
boost::intrusive::constant_time_size<true>>::type;
using iter = typename list_type::iterator;
using const_iter = typename list_type::const_iterator;
using size_type = typename alloc_traits::size_type;
using const_buffer = asio::const_buffer;
using mutable_buffer = asio::mutable_buffer;
static_assert(std::is_base_of<std::bidirectional_iterator_tag,
typename std::iterator_traits<iter>::iterator_category>::value,
"BidirectionalIterator requirements not met");
static_assert(std::is_base_of<std::bidirectional_iterator_tag,
typename std::iterator_traits<const_iter>::iterator_category>::value,
"BidirectionalIterator requirements not met");
std::size_t max_ =
(std::numeric_limits<std::size_t>::max)();
list_type list_; // list of allocated buffers
iter out_; // element that contains out_pos_
size_type in_size_ = 0; // size of the input sequence
size_type in_pos_ = 0; // input offset in list_.front()
size_type out_pos_ = 0; // output offset in *out_
size_type out_end_ = 0; // output end offset in list_.back()
public:
/// The type of allocator used.
using allocator_type = Allocator;
#if BEAST_DOXYGEN
/// The type used to represent the input sequence as a list of buffers.
using const_buffers_type = implementation_defined;
/// The type used to represent the output sequence as a list of buffers.
using mutable_buffers_type = implementation_defined;
#else
class const_buffers_type;
class mutable_buffers_type;
#endif
/// Destructor
~basic_multi_buffer();
/** Constructor
Upon construction, capacity will be zero.
*/
basic_multi_buffer();
/** Constructor.
@param limit The setting for @ref max_size.
*/
explicit
basic_multi_buffer(std::size_t limit);
/** Constructor.
@param alloc The allocator to use.
*/
explicit
basic_multi_buffer(Allocator const& alloc);
/** Constructor.
@param limit The setting for @ref max_size.
@param alloc The allocator to use.
*/
basic_multi_buffer(
std::size_t limit, Allocator const& alloc);
/** Move constructor
After the move, `*this` will have an empty output sequence.
@param other The object to move from. After the move,
The object's state will be as if constructed using
its current allocator and limit.
*/
basic_multi_buffer(basic_multi_buffer&& other);
/** Move constructor
After the move, `*this` will have an empty output sequence.
@param other The object to move from. After the move,
The object's state will be as if constructed using
its current allocator and limit.
@param alloc The allocator to use.
*/
basic_multi_buffer(basic_multi_buffer&& other,
Allocator const& alloc);
/** Copy constructor.
@param other The object to copy from.
*/
basic_multi_buffer(basic_multi_buffer const& other);
/** Copy constructor
@param other The object to copy from.
@param alloc The allocator to use.
*/
basic_multi_buffer(basic_multi_buffer const& other,
Allocator const& alloc);
/** Copy constructor.
@param other The object to copy from.
*/
template<class OtherAlloc>
basic_multi_buffer(basic_multi_buffer<
OtherAlloc> const& other);
/** Copy constructor.
@param other The object to copy from.
@param alloc The allocator to use.
*/
template<class OtherAlloc>
basic_multi_buffer(basic_multi_buffer<
OtherAlloc> const& other, allocator_type const& alloc);
/** Move assignment
After the move, `*this` will have an empty output sequence.
@param other The object to move from. After the move,
The object's state will be as if constructed using
its current allocator and limit.
*/
basic_multi_buffer&
operator=(basic_multi_buffer&& other);
/** Copy assignment
After the copy, `*this` will have an empty output sequence.
@param other The object to copy from.
*/
basic_multi_buffer& operator=(basic_multi_buffer const& other);
/** Copy assignment
After the copy, `*this` will have an empty output sequence.
@param other The object to copy from.
*/
template<class OtherAlloc>
basic_multi_buffer& operator=(
basic_multi_buffer<OtherAlloc> const& other);
/// Returns a copy of the associated allocator.
allocator_type
get_allocator() const
{
return this->member();
}
/// Returns the size of the input sequence.
size_type
size() const
{
return in_size_;
}
/// Returns the permitted maximum sum of the sizes of the input and output sequence.
size_type
max_size() const
{
return max_;
}
/// Returns the maximum sum of the sizes of the input sequence and output sequence the buffer can hold without requiring reallocation.
std::size_t
capacity() const;
/** Get a list of buffers that represents the input sequence.
@note These buffers remain valid across subsequent calls to `prepare`.
*/
const_buffers_type
data() const;
/** Get a list of buffers that represents the output sequence, with the given size.
@note Buffers representing the input sequence acquired prior to
this call remain valid.
*/
mutable_buffers_type
prepare(size_type n);
/** Move bytes from the output sequence to the input sequence.
@note Buffers representing the input sequence acquired prior to
this call remain valid.
*/
void
commit(size_type n);
/// Remove bytes from the input sequence.
void
consume(size_type n);
template<class Alloc>
friend
void
swap(
basic_multi_buffer<Alloc>& lhs,
basic_multi_buffer<Alloc>& rhs);
private:
template<class OtherAlloc>
friend class basic_multi_buffer;
void
delete_list();
void
reset();
template<class DynamicBuffer>
void
copy_from(DynamicBuffer const& other);
void
move_assign(basic_multi_buffer& other, std::false_type);
void
move_assign(basic_multi_buffer& other, std::true_type);
void
copy_assign(basic_multi_buffer const& other, std::false_type);
void
copy_assign(basic_multi_buffer const& other, std::true_type);
void
swap(basic_multi_buffer&);
void
swap(basic_multi_buffer&, std::true_type);
void
swap(basic_multi_buffer&, std::false_type);
void
debug_check() const;
};
/// A typical multi buffer
using multi_buffer = basic_multi_buffer<std::allocator<char>>;
} // beast
#include <beast/core/impl/multi_buffer.ipp>
#endif
|
{"hexsha": "44e19c7ea12d5fca165f4c205c1b7ccaf0569c62", "size": 8651, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/beast/core/multi_buffer.hpp", "max_stars_repo_name": "mandreyel/beast-asio-standalone", "max_stars_repo_head_hexsha": "627c633b0262a04b6dc71c7b946df63674274f0e", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 10.0, "max_stars_repo_stars_event_min_datetime": "2018-11-04T09:46:21.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-18T19:35:29.000Z", "max_issues_repo_path": "include/beast/core/multi_buffer.hpp", "max_issues_repo_name": "mandreyel/beast-asio-standalone", "max_issues_repo_head_hexsha": "627c633b0262a04b6dc71c7b946df63674274f0e", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2019-05-03T16:23:13.000Z", "max_issues_repo_issues_event_max_datetime": "2019-05-03T16:23:13.000Z", "max_forks_repo_path": "include/beast/core/multi_buffer.hpp", "max_forks_repo_name": "mandreyel/beast-asio-standalone", "max_forks_repo_head_hexsha": "627c633b0262a04b6dc71c7b946df63674274f0e", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2019-12-05T16:05:59.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-18T09:55:25.000Z", "avg_line_length": 26.9501557632, "max_line_length": 138, "alphanum_fraction": 0.6831580164, "num_tokens": 1886}
|
################################################################################
# Fundamentos de la Ciencia de Datos - 78106 - R-PL6 #
# Grupo 4 - P6 #
# Authors: #
# - David Emanuel Craciunescu #
# - Laura Pérez Medeiro #
# #
################################################################################
# Imports
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import offsetbox
from sklearn import manifold, datasets, decomposition
################################################################################
# Data Plotting #
################################################################################
# Load digits.
digits = datasets.load_digits(n_class=10)
X = digits.data
y = digits.target
# Plotting function.
def plot_MNIST(X, title=None):
# Create ranges
x_min = np.min(X, 0)
x_max = np.max(X, 0)
# Scale values to fit.
X = (X - x_min) / (x_max - x_min)
plt.figure(figsize= (10,10))
ax = plt.subplot(111)
for i in range(X.shape[0]):
plt.text(
X[i, 0],
X[i, 1],
str(digits.target[i]),
color=plt.cm.Set1(y[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
## Only print thumbnail with matplotlib > 1.0
if hasattr(offsetbox, 'AnnotationBbox'):
# Simply something big.
shown_images = np.array([[1., 1.]])
for i in range(digits.data.shape[0]):
dist = np.sum((X[i] - shown_images) ** 2, 1)
# Filter points that are too close.
if np.min(dist) < 5e-3:
continue
show_images = np.r_[shown_images, [X[i]]]
imagebox = offsetbox.AnnotationBbox(
offsetbox.OffsetImage(
digits.images[i],
cmap = plt.cm.gray_r),
X[i])
ax.add_artist(imagebox)
plt.xticks([]), plt.yticks([])
if title is not None:
plt.title(title)
################################################################################
# Data Visualization #
################################################################################
print("Computing")
X_tsne = manifold.TSNE(n_components=2, init='pca').fit_transform(X)
plot_MNIST(X_tsne, "t-SNE embedding of the digits")
plt.show()
|
{"hexsha": "33cc1ffea557509f663fa67539d4f8564d9a27e3", "size": 2791, "ext": "py", "lang": "Python", "max_stars_repo_path": "PL6/src/bidimensional.py", "max_stars_repo_name": "craciunescu/DataScience", "max_stars_repo_head_hexsha": "e246994974d817f48d6861162f2804ed4c9539ff", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-10-22T15:58:11.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-22T15:58:11.000Z", "max_issues_repo_path": "PL6/src/bidimensional.py", "max_issues_repo_name": "craciunescu/DataScience", "max_issues_repo_head_hexsha": "e246994974d817f48d6861162f2804ed4c9539ff", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-06-02T00:46:53.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-02T00:46:53.000Z", "max_forks_repo_path": "PL6/src/bidimensional.py", "max_forks_repo_name": "craciunescu/datascience", "max_forks_repo_head_hexsha": "e246994974d817f48d6861162f2804ed4c9539ff", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.7820512821, "max_line_length": 81, "alphanum_fraction": 0.3729845933, "include": true, "reason": "import numpy", "num_tokens": 513}
|
# Copyright (c) 2015-2020 by the parties listed in the AUTHORS file.
# All rights reserved. Use of this source code is governed by
# a BSD-style license that can be found in the LICENSE file.
import sys
import types
import copy
import numbers
from collections.abc import MutableMapping, Sequence, Mapping
import numpy as np
from pshmem.utils import mpi_data_type
from .mpi import MPI, comm_equal
from .instrument import Telescope
from .dist import distribute_samples
from .intervals import IntervalList
from .utils import (
Logger,
name_UID,
)
from .timing import function_timer
from .observation_data import (
DetectorData,
DetDataManager,
SharedDataManager,
IntervalsManager,
)
from .observation_view import DetDataView, SharedView, View, ViewManager, ViewInterface
from .observation_dist import (
DistDetSamp,
redistribute_data,
)
default_values = None
def set_default_values(values=None):
"""Update default values for common Observation objects.
Args:
names (dict): The dictionary specifying any name overrides.
Returns:
None
"""
global default_values
defaults = {
# names
"times": "times",
"shared_flags": "flags",
"det_data": "signal",
"det_flags": "flags",
"hwp_angle": "hwp_angle",
"azimuth": "azimuth",
"elevation": "elevation",
"boresight_azel": "boresight_azel",
"boresight_radec": "boresight_radec",
"position": "position",
"velocity": "velocity",
"pixels": "pixels",
"weights": "weights",
"quats": "quats",
"quats_azel": "quats_azel",
#
# flag masks
#
"shared_mask_invalid": 1,
"shared_mask_unstable_scanrate": 2,
"shared_mask_irregular": 4,
"det_mask_invalid": 1,
"det_mask_sso": 1 + 2,
#
# ground-specific flag masks
#
"turnaround": 1 + 2, # remove invalid bit to map turnarounds
"scan_leftright": 8,
"scan_rightleft": 16,
"sun_up": 32,
"sun_close": 64,
"elnod": 1 + 2 + 4,
}
if values is not None:
defaults.update(values)
default_values = types.SimpleNamespace(**defaults)
if default_values is None:
set_default_values()
class Observation(MutableMapping):
"""Class representing the data for one observation.
An Observation stores information about data distribution across one or more MPI
processes and is a container for four types of objects:
* Local detector data (unique to each process).
* Shared data that has one common copy for every node spanned by the
observation.
* Intervals defining spans of data with some common characteristic.
* Other arbitrary small metadata.
Small metadata can be stored directly in the Observation using normal square
bracket "[]" access to elements (an Observation is a dictionary). Groups of
detector data (e.g. "signal", "flags", etc) can be accessed in the separate
detector data dictionary (the "detdata" attribute). Shared data can be similarly
stored in the "shared" attribute. Lists of intervals are accessed in the
"intervals" attribute and data views can use any interval list to access subsets
of detector and shared data.
The detector data within an Observation is distributed among the processes in an
MPI communicator. The processes in the communicator are arranged in a rectangular
grid, with each process storing some number of detectors for a piece of time
covered by the observation. The most common configuration (and the default) is to
make this grid the size of the communicator in the "detector direction" and a size
of one in the "sample direction":
MPI det1 sample(0), sample(1), sample(2), ...., sample(N-1)
rank 0 det2 sample(0), sample(1), sample(2), ...., sample(N-1)
--------------------------------------------------------------------------
MPI det3 sample(0), sample(1), sample(2), ...., sample(N-1)
rank 1 det4 sample(0), sample(1), sample(2), ...., sample(N-1)
So each process has a subset of detectors for the whole span of the observation
time. You can override this shape by setting the process_rows to something
else. For example, process_rows=1 would result in:
MPI rank 0 | MPI rank 1
|
det1 sample(0), sample(1), ..., | ...., sample(N-1)
det2 sample(0), sample(1), ..., | ...., sample(N-1)
det3 sample(0), sample(1), ..., | ...., sample(N-1)
det4 sample(0), sample(1), ..., | ...., sample(N-1)
Args:
comm (toast.Comm): The toast communicator containing information about the
process group.
telescope (Telescope): An instance of a Telescope object.
n_samples (int): The total number of samples for this observation.
name (str): (Optional) The observation name.
uid (int): (Optional) The Unique ID for this observation. If not specified,
the UID will be computed from a hash of the name.
detector_sets (list): (Optional) List of lists containing detector names.
These discrete detector sets are used to distribute detectors- a detector
set will always be within a single row of the process grid. If None,
every detector is a set of one.
sample_sets (list): (Optional) List of lists of chunk sizes (integer numbers of
samples). These discrete sample sets are used to distribute sample data.
A sample set will always be within a single column of the process grid. If
None, any distribution break in the sample direction will happen at an
arbitrary place. The sum of all chunks must equal the total number of
samples.
process_rows (int): (Optional) The size of the rectangular process grid
in the detector direction. This number must evenly divide into the size of
comm. If not specified, defaults to the size of the communicator.
"""
view = ViewInterface()
@function_timer
def __init__(
self,
comm,
telescope,
n_samples,
name=None,
uid=None,
detector_sets=None,
sample_sets=None,
process_rows=None,
):
log = Logger.get()
self._telescope = telescope
self._name = name
self._uid = uid
if self._uid is None and self._name is not None:
self._uid = name_UID(self._name)
self.dist = DistDetSamp(
n_samples,
self._telescope.focalplane.detectors,
sample_sets,
detector_sets,
comm,
process_rows,
)
# The internal metadata dictionary
self._internal = dict()
# Set up the data managers
self.detdata = DetDataManager(self.dist)
self.shared = SharedDataManager(self.dist)
self.intervals = IntervalsManager(self.dist, n_samples)
# Fully clear the observation
def clear(self):
self.view.clear()
self.intervals.clear()
self.detdata.clear()
self.shared.clear()
self._internal.clear()
# General properties
@property
def telescope(self):
"""
(Telescope): The Telescope instance for this observation.
"""
return self._telescope
@property
def name(self):
"""
(str): The name of the observation.
"""
return self._name
@property
def uid(self):
"""
(int): The Unique ID for this observation.
"""
return self._uid
@property
def comm(self):
"""
(toast.Comm): The overall communicator.
"""
return self.dist.comm
# The MPI communicator along the current row of the process grid
@property
def comm_row(self):
"""
(mpi4py.MPI.Comm): The communicator for processes in the same row (or None).
"""
return self.dist.comm_row
@property
def comm_row_size(self):
"""
(int): The number of processes in the row communicator.
"""
return self.dist.comm_row_size
@property
def comm_row_rank(self):
"""
(int): The rank of this process in the row communicator.
"""
return self.dist.comm_row_rank
# The MPI communicator along the current column of the process grid
@property
def comm_col(self):
"""
(mpi4py.MPI.Comm): The communicator for processes in the same column (or None).
"""
return self.dist.comm_col
@property
def comm_col_size(self):
"""
(int): The number of processes in the column communicator.
"""
return self.dist.comm_col_size
@property
def comm_col_rank(self):
"""
(int): The rank of this process in the column communicator.
"""
return self.dist.comm_col_rank
# Detector distribution
@property
def all_detectors(self):
"""
(list): All detectors stored in this observation.
"""
return self.dist.detectors
@property
def local_detectors(self):
"""
(list): The detectors assigned to this process.
"""
return self.dist.dets[self.dist.comm.group_rank]
def select_local_detectors(self, selection=None):
"""
(list): The detectors assigned to this process, optionally pruned.
"""
if selection is None:
return self.local_detectors
else:
dets = list()
sel_set = set(selection)
for det in self.local_detectors:
if det in sel_set:
dets.append(det)
return dets
# Detector set distribution
@property
def all_detector_sets(self):
"""
(list): The total list of detector sets for this observation.
"""
return self.dist.detector_sets
@property
def local_detector_sets(self):
"""
(list): The detector sets assigned to this process (or None).
"""
if self.dist.detector_sets is None:
return None
else:
ds = list()
for d in range(self.dist.det_sets[self.dist.comm.group_rank].n_elem):
off = self.dist.det_sets[self.dist.comm.group_rank].offset
ds.append(self.dist.detector_sets[off + d])
return ds
# Sample distribution
@property
def n_all_samples(self):
"""(int): the total number of samples in this observation."""
return self.dist.samples
@property
def local_index_offset(self):
"""
The first sample on this process, relative to the observation start.
"""
return self.dist.samps[self.dist.comm.group_rank].offset
@property
def n_local_samples(self):
"""
The number of local samples on this process.
"""
return self.dist.samps[self.dist.comm.group_rank].n_elem
# Sample set distribution
@property
def all_sample_sets(self):
"""
(list): The input full list of sample sets used in data distribution
"""
return self.dist.sample_sets
@property
def local_sample_sets(self):
"""
(list): The sample sets assigned to this process (or None).
"""
if self.dist.sample_sets is None:
return None
else:
ss = list()
for s in range(self.dist.samp_sets[self.dist.comm.group_rank].n_elem):
off = self.dist.samp_sets[self.dist.comm.group_rank].offset
ss.append(self.dist.sample_sets[off + s])
return ss
# Mapping methods
def __getitem__(self, key):
return self._internal[key]
def __delitem__(self, key):
del self._internal[key]
def __setitem__(self, key, value):
self._internal[key] = value
def __iter__(self):
return iter(self._internal)
def __len__(self):
return len(self._internal)
def __del__(self):
if hasattr(self, "detdata"):
self.detdata.clear()
if hasattr(self, "shared"):
self.shared.clear()
def __repr__(self):
val = "<Observation"
val += f"\n name = '{self.name}'"
val += f"\n uid = '{self.uid}'"
if self.comm.comm_group is None:
val += " group has a single process (no MPI)"
else:
val += f" group has {self.comm.group_size} processes"
val += f"\n telescope = {self._telescope.__repr__()}"
for k, v in self._internal.items():
val += f"\n {k} = {v}"
val += f"\n {self.n_all_samples} total samples ({self.n_local_samples} local)"
val += f"\n shared: {self.shared}"
val += f"\n detdata: {self.detdata}"
val += f"\n intervals: {self.intervals}"
val += "\n>"
return val
def __eq__(self, other):
# Note that testing for equality is quite expensive, since it means testing all
# metadata and also all detector, shared, and interval data. This is mainly
# used for unit tests.
log = Logger.get()
fail = 0
if self.name != other.name:
fail = 1
log.verbose(f"Obs names {self.name} != {other.name}")
if self.uid != other.uid:
fail = 1
log.verbose(f"Obs uid {self.uid} != {other.uid}")
if self.telescope != other.telescope:
fail = 1
log.verbose("Obs telescopes not equal")
if self.dist != other.dist:
fail = 1
log.verbose("Obs distributions not equal")
if self._internal.keys() != other._internal.keys():
fail = 1
log.verbose("Obs metadata keys not equal")
for k, v in self._internal.items():
if v != other._internal[k]:
feq = True
try:
feq = np.allclose(v, other._internal[k])
except Exception:
# Not floating point data
feq = False
if not feq:
fail = 1
log.verbose(f"Obs metadata[{k}]: {v} != {other[k]}")
break
if self.shared != other.shared:
fail = 1
log.verbose("Obs shared data not equal")
if self.detdata != other.detdata:
fail = 1
log.verbose("Obs detdata not equal")
if self.intervals != other.intervals:
fail = 1
log.verbose("Obs intervals not equal")
if self.comm.comm_group is not None:
fail = self.comm.comm_group.allreduce(fail, op=MPI.SUM)
return fail == 0
def __ne__(self, other):
return not self.__eq__(other)
def duplicate(
self, times=None, meta=None, shared=None, detdata=None, intervals=None
):
"""Return a copy of the observation and all its data.
The times field should be the name of the shared field containing timestamps.
This is used when copying interval lists to the new observation so that these
objects reference the timestamps within this observation (rather than the old
one). If this is not specified and some intervals exist, then an exception is
raised.
The meta, shared, detdata, and intervals list specifies which of those objects
to copy to the new observation. If these are None, then all objects are
duplicated.
Args:
times (str): The name of the timestamps shared field.
meta (list): List of metadata objects to copy, or None.
shared (list): List of shared objects to copy, or None.
detdata (list): List of detdata objects to copy, or None.
intervals (list): List of intervals objects to copy, or None.
Returns:
(Observation): The new copy of the observation.
"""
log = Logger.get()
if times is None and len(self.intervals) > 0:
msg = "You must specify the times field when duplicating observations "
msg += "that have some intervals defined."
log.error(msg)
raise RuntimeError(msg)
new_obs = Observation(
self.dist.comm,
self.telescope,
self.n_all_samples,
name=self.name,
uid=self.uid,
detector_sets=self.all_detector_sets,
sample_sets=self.all_sample_sets,
process_rows=self.dist.process_rows,
)
for k, v in self._internal.items():
if meta is None or k in meta:
new_obs[k] = copy.deepcopy(v)
for name, data in self.detdata.items():
if detdata is None or name in detdata:
new_obs.detdata[name] = data
for name, data in self.shared.items():
if shared is None or name in shared:
# Create the object on the corresponding communicator in the new obs
new_obs.shared.assign_mpishared(name, data, self.shared.comm_type(name))
for name, data in self.intervals.items():
if intervals is None or name in intervals:
timespans = [(x.start, x.stop) for x in data]
new_obs.intervals[name] = IntervalList(
new_obs.shared[times], timespans=timespans
)
return new_obs
def memory_use(self):
"""Estimate the memory used by shared and detector data.
This sums the memory used by the shared and detdata attributes and returns the
total on all processes. This function is blocking on the observation
communicator.
Returns:
(int): The number of bytes of memory used by timestream data.
"""
# Get local memory from detector data
local_mem = self.detdata.memory_use()
# If there are many intervals, this could take up non-trivial space. Add them
# to the local total
for iname, it in self.intervals.items():
if len(it) > 0:
local_mem += len(it) * (
sys.getsizeof(it[0]._start)
+ sys.getsizeof(it[0]._stop)
+ sys.getsizeof(it[0]._first)
+ sys.getsizeof(it[0]._last)
)
# Sum the aggregate local memory
total = None
if self.comm.comm_group is None:
total = local_mem
else:
total = self.comm.comm_group.allreduce(local_mem, op=MPI.SUM)
# The total shared memory use is already returned on every process by this
# next function.
total += self.shared.memory_use()
return total
# Redistribution
@function_timer
def redistribute(
self,
process_rows,
times=None,
override_sample_sets=False,
override_detector_sets=False,
):
"""Take the currently allocated observation and redistribute in place.
This changes the data distribution within the observation. After
re-assigning all detectors and samples, the currently allocated shared data
objects and detector data objects are redistributed using the observation
communicator.
Args:
process_rows (int): The size of the new process grid in the detector
direction. This number must evenly divide into the size of the
observation communicator.
times (str): The shared data field representing the timestamps. This
is used to recompute the intervals after redistribution.
override_sample_sets (False, None or list): If not False, override
existing sample set boundaries in the redistributed data.
override_detector_sets (False, None or list): If not False, override
existing detector set boundaries in the redistributed data.
Returns:
None
"""
log = Logger.get()
if process_rows == self.dist.process_rows:
# Nothing to do!
return
if override_sample_sets == False:
sample_sets = self.dist.sample_sets
else:
sample_sets = override_sample_sets
if override_detector_sets == False:
detector_sets = self.dist.detector_sets
else:
detector_sets = override_detector_sets
# Create the new distribution
new_dist = DistDetSamp(
self.dist.samples,
self._telescope.focalplane.detectors,
sample_sets,
detector_sets,
self.dist.comm,
process_rows,
)
# Do the actual redistribution
new_shr_manager, new_det_manager, new_intervals_manager = redistribute_data(
self.dist, new_dist, self.shared, self.detdata, self.intervals, times=times
)
# Replace our distribution and data managers with the new ones.
del self.dist
self.dist = new_dist
self.shared.clear()
del self.shared
self.shared = new_shr_manager
self.detdata.clear()
del self.detdata
self.detdata = new_det_manager
self.intervals.clear()
del self.intervals
self.intervals = new_intervals_manager
# Accelerator use
def accel_create(self, names):
"""Create a set of data objects on the device.
This takes a dictionary with the same format as those used by the Operator
provides() and requires() methods.
Args:
names (dict): Dictionary of lists.
Returns:
None
"""
for key in names["detdata"]:
self.detdata.accel_create(key)
for key in names["shared"]:
self.shared.accel_create(key)
for key in names["intervals"]:
self.intervals.accel_create(key)
def accel_update_device(self, names):
"""Copy data objects to the device.
This takes a dictionary with the same format as those used by the Operator
provides() and requires() methods.
Args:
names (dict): Dictionary of lists.
Returns:
None
"""
for key in names["detdata"]:
self.detdata.accel_update_device(key)
for key in names["shared"]:
self.shared.accel_update_device(key)
for key in names["intervals"]:
self.intervals.accel_update_device(key)
def accel_update_host(self, names):
"""Copy data objects from the device.
This takes a dictionary with the same format as those used by the Operator
provides() and requires() methods.
Args:
names (dict): Dictionary of lists.
Returns:
None
"""
for key in names["detdata"]:
self.detdata.accel_update_host(key)
for key in names["shared"]:
self.shared.accel_update_host(key)
for key in names["intervals"]:
self.intervals.accel_update_host(key)
def accel_clear(self):
self.detdata.accel_clear()
self.shared.accel_clear()
self.intervals.accel_clear()
|
{"hexsha": "162a34997eef3cdfc946f244e3fada6e05d6d74c", "size": 23690, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/types/observation/__init__.py", "max_stars_repo_name": "nestordemeure/differentoast", "max_stars_repo_head_hexsha": "4f03f4d8732d18398930e4520b0807f299e11be3", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/types/observation/__init__.py", "max_issues_repo_name": "nestordemeure/differentoast", "max_issues_repo_head_hexsha": "4f03f4d8732d18398930e4520b0807f299e11be3", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/types/observation/__init__.py", "max_forks_repo_name": "nestordemeure/differentoast", "max_forks_repo_head_hexsha": "4f03f4d8732d18398930e4520b0807f299e11be3", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.4520547945, "max_line_length": 88, "alphanum_fraction": 0.5910088645, "include": true, "reason": "import numpy", "num_tokens": 5101}
|
'''
Created on December 2019.
@author: Soroosh Tayebi Arasteh <soroosh.arasteh@fau.de>
https://github.com/tayebiarasteh/
'''
import numpy as np
from Layers import *
from Optimization import *
import copy
class NeuralNetwork:
'''
The Neural Network defines the whole architecture by containing all its layers from the input
to the loss. This Network manages the testing and the training, that means it calls all forward
methods passing the data from the beginning to the end, as well as the optimization by calling
all backward passes afterwards.
'''
def __init__(self, optimizer, weights_initializer, bias_initializer):
'''
loss: A list which will contain the loss value for each iteration after calling train.
layers: A list which will hold the architecture.
data_layer: a member, which will provide input data and labels upon calling forward() on it.
loss_layer: Loss functions of the network. A member referring to the special layer providing loss and prediction
'''
self.optimizer = optimizer
self.loss = []
self.layers = []
self.data_layer = []
self.loss_layer = []
self.weights_initializer = weights_initializer
self.bias_initializer = bias_initializer
self._phase = Base.Phase.train
def forward(self):
self.input_tensor, self.label_tensor = self.data_layer.forward()
r_loss = 0
for layer in self.layers:
layer.phase = Base.Phase.train
self.input_tensor = layer.forward(self.input_tensor)
if hasattr(layer, 'optimizer'):
if layer.optimizer:
if layer.optimizer.regularizer:
r_loss += layer.optimizer.regularizer.norm(layer.weights)
loss = self.loss_layer.forward(self.input_tensor, self.label_tensor)
self.loss.append(loss + r_loss)
return loss + r_loss
def backward(self):
error_tensor = self.loss_layer.backward(self.label_tensor)
for layer in reversed(self.layers):
error_tensor = layer.backward(error_tensor)
def append_trainable_layer(self, layer):
layer.optimizer = copy.deepcopy(self.optimizer)
layer.initialize(self.weights_initializer, self.bias_initializer)
self.layers.append(layer)
def train(self, iterations):
self.phase = Base.Phase.train
for i in range(iterations):
loss = self.forward()
if (i+1)%200 == 0:
print("training iteration", str(i+1) + ":", 'loss =', loss)
self.backward()
def test(self, input_tensor):
'''
propagates the input tensor through the network
and returns the prediction of the last layer.
'''
self.phase = Base.Phase.test
for layer in self.layers:
layer.phase = Base.Phase.test
input_tensor = layer.forward(input_tensor)
return input_tensor
@property
def phase(self):
return self._phase
@phase.setter
def phase(self, value):
self._phase = value
|
{"hexsha": "002faa0c6b874e21c080b62d5119562a0d86d47c", "size": 3144, "ext": "py", "lang": "Python", "max_stars_repo_path": "NeuralNetwork.py", "max_stars_repo_name": "starasteh/DeepLearning_from_scratch", "max_stars_repo_head_hexsha": "6ed4685e4da57ad5ea51edf84010f2cc9725a2ba", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2020-09-13T17:03:26.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-16T14:03:59.000Z", "max_issues_repo_path": "NeuralNetwork.py", "max_issues_repo_name": "starasteh/DeepLearning_from_scratch", "max_issues_repo_head_hexsha": "6ed4685e4da57ad5ea51edf84010f2cc9725a2ba", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "NeuralNetwork.py", "max_forks_repo_name": "starasteh/DeepLearning_from_scratch", "max_forks_repo_head_hexsha": "6ed4685e4da57ad5ea51edf84010f2cc9725a2ba", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-03-17T02:20:42.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-24T16:28:45.000Z", "avg_line_length": 32.75, "max_line_length": 120, "alphanum_fraction": 0.6450381679, "include": true, "reason": "import numpy", "num_tokens": 636}
|
# https://en.wikipedia.org/wiki/Bradley%E2%80%93Terry_model
import numpy as np
import torch
import torch.nn.functional as F
from torch.autograd import grad
from scipy.optimize import minimize
def lossgrad(scores, outcomes):
# prior: player #0 is at 0 elo
scores[0] = 0.0
# gamma = 10^(elo/400)
# convert to natural base
scores = scores * np.log(10) / 400
scores = torch.tensor(scores, requires_grad=True)
loss = torch.tensor(0.0, dtype=torch.float64)
# likelihoods
for i, j in outcomes:
pair = torch.cat([scores[i, None], scores[j, None]])
logprob = F.log_softmax(pair, 0)
o = outcomes[i, j]
loss += int(o[0]) * -logprob[0]
loss += int(o[2]) * -logprob[1]
assert o[1] == 0, 'draws not supported'
g, = grad(loss, (scores,))
l, g = loss.item(), g.numpy()
# keep player #0 at 0 elo
g[0] = 0.0
#print(l, (g**2).sum() ** .5)
return l, g
class RankingError(Exception):
pass
def compute_ranking(num_players, outcomes):
"""Compute Elo ranking from match outcomes.
Outcomes are represented as a dict, where each pair of player ids
(p1, p2) maps to triplet of (p1_wins, draws, p1_losses).
:param outcomes: match results
:returns: array of Elo scores
"""
res = minimize(lossgrad, np.zeros(num_players), args=(outcomes,),
method='L-BFGS-B', jac=True)
if not res.success:
raise RankingError('did not converge')
#print('loss', res.fun)
return res.x
if __name__ == '__main__':
'''
0 1000
1 10000
2 50000
3 70000
4 80000
5 141000
'''
# (p1, p2): (p1 wins, draws, p1 losses)
outcomes = {
(5, 3): (74, 0, 26),
(5, 5): (42, 0, 58),
(4, 3): (66, 0, 34),
(5, 0): (100, 0, 0),
(5, 1): (100, 0, 0),
(4, 2): (74, 0, 26),
}
scores = compute_ranking(6, outcomes)
print(scores)
|
{"hexsha": "7bd0e61e7c1412c9b9a19202dba7f0c0d398c2b2", "size": 1949, "ext": "py", "lang": "Python", "max_stars_repo_path": "azalea/ranking.py", "max_stars_repo_name": "jseppanen/azalea", "max_stars_repo_head_hexsha": "aea0e6f76f2d76e2c527163c88268c5a6e7dfd8b", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 24, "max_stars_repo_stars_event_min_datetime": "2018-08-06T07:31:48.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-13T07:49:43.000Z", "max_issues_repo_path": "azalea/ranking.py", "max_issues_repo_name": "jseppanen/azalea", "max_issues_repo_head_hexsha": "aea0e6f76f2d76e2c527163c88268c5a6e7dfd8b", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2019-04-20T14:08:57.000Z", "max_issues_repo_issues_event_max_datetime": "2020-03-30T08:27:17.000Z", "max_forks_repo_path": "azalea/ranking.py", "max_forks_repo_name": "jseppanen/azalea", "max_forks_repo_head_hexsha": "aea0e6f76f2d76e2c527163c88268c5a6e7dfd8b", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2019-04-20T14:11:27.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-17T10:45:19.000Z", "avg_line_length": 23.4819277108, "max_line_length": 69, "alphanum_fraction": 0.572601334, "include": true, "reason": "import numpy,from scipy", "num_tokens": 632}
|
! { dg-do run }
! Short test program with a CASE statement that uses a range.
!
program select_4
integer i
do i = 1, 34, 4
select case(i)
case (:5)
if (i /= 1 .and. i /= 5) STOP 1
case (13:21)
if (i /= 13 .and. i /= 17 .and. i /= 21) STOP 2
case (29:)
if (i /= 29 .and. i /= 33) STOP 3
case default
if (i /= 9 .and. i /= 25) STOP 4
end select
end do
end program select_4
|
{"hexsha": "90792a2f4337cf5274aea32d4366c2f82678b860", "size": 436, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "validation_tests/llvm/f18/gfortran.dg/select_3.f90", "max_stars_repo_name": "brugger1/testsuite", "max_stars_repo_head_hexsha": "9b504db668cdeaf7c561f15b76c95d05bfdd1517", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2019-02-12T18:20:29.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-09T19:46:19.000Z", "max_issues_repo_path": "validation_tests/llvm/f18/gfortran.dg/select_3.f90", "max_issues_repo_name": "brugger1/testsuite", "max_issues_repo_head_hexsha": "9b504db668cdeaf7c561f15b76c95d05bfdd1517", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 24, "max_issues_repo_issues_event_min_datetime": "2020-08-31T22:05:07.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-21T18:30:03.000Z", "max_forks_repo_path": "validation_tests/llvm/f18/gfortran.dg/select_3.f90", "max_forks_repo_name": "brugger1/testsuite", "max_forks_repo_head_hexsha": "9b504db668cdeaf7c561f15b76c95d05bfdd1517", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 19, "max_forks_repo_forks_event_min_datetime": "2020-08-31T21:59:10.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-23T22:06:46.000Z", "avg_line_length": 22.9473684211, "max_line_length": 62, "alphanum_fraction": 0.5160550459, "num_tokens": 166}
|
[STATEMENT]
lemma fmrestrict_set_insert_notin:
\<open>xa \<notin> fset (fmdom N) \<Longrightarrow>
fmrestrict_set (insert xa l1) N = fmrestrict_set l1 N\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. xa \<notin> fset (fmdom N) \<Longrightarrow> fmrestrict_set (insert xa l1) N = fmrestrict_set l1 N
[PROOF STEP]
by (rule fmap_ext_fmdom)
(auto simp: fset_fmdom_fmrestrict_set fmember.rep_eq notin_fset)
|
{"llama_tokens": 178, "file": "PAC_Checker_Finite_Map_Multiset", "length": 1}
|
import matplotlib.pyplot as plt
import numpy as np
from individual import Individual
from main import *
from pynput.keyboard import Key, Controller
from random import sample, random, randrange
from operator import attrgetter
from kmeans import KMeans
from statistics import mean
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
"""
TODO:
Check that random sample returns unique elements
Check why that there are a lot of the same elements being added
"""
def det_testers(individuals):
max_diff = 100
species1 = None
species2 = None
for ind1 in individuals:
for ind2 in individuals:
if ind1 != ind2:
sim = ind1.similarity(ind2)
if sim < max_diff:
max_diff = sim
species1 = ind1
species2 = ind2
return species1, species2
def det_closest(cacti, pteras, dino):
min = 1000
min_obj = None
for c in cacti:
distance = c.rect.left - dino.rect.right
if distance < min:
min = distance
min_obj = c
for p in pteras:
distance = p.rect.left - dino.rect.right
if distance < min:
min = distance
min_obj = p
return min, min_obj
def act_on_scenario(species, cacti, pteras, dino, scenario, duck_counter):
reaction = species[scenario]
closest, closest_obj = det_closest(cacti, pteras, dino)
if closest_obj and closest_obj.processed:
return
keyboard = Controller()
if closest <= abs(reaction):
closest_obj.processed = True
# if reaction > 0:
keyboard.press(Key.space)
keyboard.release(Key.space)
# else:
# if duck_counter < 0:
# keyboard.release(Key.down)
# else:
# keyboard.press(Key.down)
# keyboard.release(Key.down)
def calc_offset(dino, container):
offset = 0
# Ignore those past us already
for c in container:
if c.rect.right < dino.rect.left:
offset += 1
return offset
def select_scenario(cacti, pteras, dino):
offset_cactus = calc_offset(dino, cacti)
cacti_amt = len(cacti) - offset_cactus
offset_ptera = calc_offset(dino, pteras)
ptera_amt = len(pteras) - offset_ptera
if cacti_amt == 1:
if ptera_amt == 0:
return 0
elif ptera_amt == 1:
return 1
elif ptera_amt == 2:
return 2
else:
print("We had 1 cactus and more than 2 Birds")
exit(-1)
elif cacti_amt == 2:
if ptera_amt == 0:
return 3
elif ptera_amt == 1:
return 4
elif ptera_amt == 2:
return 5
else:
print("We had 2 cacti and more than 2 Birds")
exit(-1)
elif cacti_amt == 0:
if ptera_amt == 0:
return 6
elif ptera_amt == 1:
return 7
elif ptera_amt == 2:
return 8
else:
print("We had no cacti and more than 2 Birds")
exit(-1)
else:
print("We had more than 2 cacti")
exit(-1)
def cause_of_death(dino, species):
if dino.isJumping and dino.movement[1] <= 0:
species.jumped_too_early = False
else:
species.jumped_too_early = True
def run_game(species):
global high_score
gamespeed = 4
startMenu = False
gameOver = False
gameQuit = False
playerDino = Dino(44, 47)
new_ground = Ground(-1 * gamespeed)
scb = Scoreboard()
highsc = Scoreboard(width * 0.78)
frame_counter = 0
duck_counter = 0
cacti = pygame.sprite.Group()
pteras = pygame.sprite.Group()
clouds = pygame.sprite.Group()
last_obstacle = pygame.sprite.Group()
Cactus.containers = cacti
Ptera.containers = pteras
Cloud.containers = clouds
temp_images, temp_rect = load_sprite_sheet('numbers.png', 12, 1, 11, int(11 * 6 / 5), -1)
HI_image = pygame.Surface((22, int(11 * 6 / 5)))
HI_rect = HI_image.get_rect()
HI_image.fill(background_col)
HI_image.blit(temp_images[10], temp_rect)
temp_rect.left += temp_rect.width
HI_image.blit(temp_images[11], temp_rect)
HI_rect.top = height * 0.1
HI_rect.left = width * 0.73
last_scenario = 0
while not gameQuit:
while startMenu:
pass
while not gameOver:
if pygame.display.get_surface() is None:
print("Couldn't load display surface")
gameQuit = True
gameOver = True
else:
scenario = select_scenario(cacti, pteras, playerDino)
if scenario != last_scenario:
last_scenario = scenario
# if duck_counter > 0:
# duck_counter -= 1
# playerDino.isDucking = True
# else:
# playerDino.isDucking = False
if not (playerDino.isJumping and playerDino.isDead):
act_on_scenario(species.strategy, cacti, pteras, playerDino, scenario, duck_counter)
for event in pygame.event.get():
if event.type == pygame.QUIT:
gameQuit = True
gameOver = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
if not playerDino.isDucking:
if playerDino.rect.bottom == int(0.98 * height):
playerDino.isJumping = True
if pygame.mixer.get_init() is not None:
jump_sound.play()
playerDino.movement[1] = -1 * playerDino.jumpSpeed
# if event.key == pygame.K_DOWN:
# if not (playerDino.isJumping and playerDino.isDead):
# playerDino.isDucking = True
# duck_counter = 50
# if event.type == pygame.KEYUP:
# if event.key == pygame.K_DOWN and duck_counter <= 0:
# playerDino.isDucking = False
if not move(cacti, playerDino, gamespeed):
gameQuit = True
species.death_scenario = scenario
cause_of_death(playerDino, species)
if not move(pteras, playerDino, gamespeed):
gameQuit = True
species.death_scenario = scenario
cause_of_death(playerDino, species)
add_cactus(last_obstacle, gamespeed, cacti)
add_ptera(last_obstacle, gamespeed, pteras, frame_counter)
if len(clouds) < 5 and randrange(0, 300) == 10:
Cloud(width, randrange(height / 5, height / 2))
playerDino.update()
cacti.update()
pteras.update()
clouds.update()
new_ground.update()
scb.update(playerDino.score)
highsc.update(high_score)
# draw updated
if pygame.display.get_surface() is not None:
screen.fill(background_col)
new_ground.draw()
clouds.draw(screen)
scb.draw()
if high_score != 0:
highsc.draw()
screen.blit(HI_image, HI_rect)
cacti.draw(screen)
pteras.draw(screen)
playerDino.draw()
pygame.display.update()
clock.tick(FPS)
if playerDino.isDead:
gameOver = True
if playerDino.score > high_score:
high_score = playerDino.score
if frame_counter % 700 == 699:
new_ground.speed -= 1
gamespeed += 1
frame_counter = (frame_counter + 1)
if gameQuit:
break
return playerDino.score
def main():
population = 100
k = 10
individuals = [None] * population
X = []
for spec in range(population):
individuals[spec] = Individual()
# individuals[spec].fitness = run_game(individuals[spec])
X.append(individuals[spec].strategy)
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
pca = PCA(n_components=2)
pca_x = pca.fit_transform(X_scaled)
pca_pop = [Individual(arr=arr) for arr in pca_x]
# initial running of individuals
centroids, labels, closest = KMeans(pca_pop, k).run()
# centroids, labels, closest = KMeans(individuals, k).run()
centroids_list = [l.strategy for l in centroids]
revert = scaler.inverse_transform(pca.inverse_transform(centroids_list))
centroid_individuals = [Individual(arr=arr) for arr in revert]
for centroid in centroid_individuals:
centroid.fitness = run_game(centroid) - 19
for i,centroid in enumerate(labels.keys()):
for individual in labels.get(centroid):
index = np.where(pca_x == individual.strategy)[0][0]
individuals[index].fitness = individuals[index].fitness_approx(centroid_individuals[i])
fittest = max(individuals, key=attrgetter('fitness'))
avg_fitness = []
fittest_score = []
generations = 0
while generations < 10:
print("Expected fittest %s: %f" % (fittest, fittest.fitness))
fittest_score.append(fittest.fitness)
print("average fitness is %f" % (mean([ind.fitness for ind in individuals])))
avg_fitness.append(mean([ind.fitness for ind in individuals]))
generations += 1
print("generation %d" % generations)
new_population = []
print("population size %d" %len(individuals))
while len(new_population) < population:
operator = random()
if operator < .8 :
ran_sample = sample(individuals, 5)
first_parent = max(ran_sample, key=attrgetter('fitness'))
ran_sample = sample(individuals, 5)
second_parent = max(ran_sample, key=attrgetter('fitness'))
first_child, second_child = first_parent.crossover(second_parent)
new_population.append(first_child)
new_population.append(second_child)
else:
ran_sample = sample(individuals, 5)
parent = max(ran_sample, key=attrgetter('fitness'))
mutated = parent.mutate()
new_population.append(mutated)
individuals = new_population
# for ind in individuals:
# ind.fitness = run_game(ind)
new_fittest = max(individuals, key=attrgetter('fitness'))
if new_fittest.fitness > fittest.fitness:
fittest = new_fittest
X = [arr.strategy for arr in individuals]
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
pca = PCA(n_components=2)
pca_x = pca.fit_transform(X_scaled)
pca_pop = [Individual(arr=arr) for arr in pca_x]
# initial running of individuals
centroids, labels, closest = KMeans(pca_pop, k).run()
centroids_list = [l.strategy for l in centroids]
revert = scaler.inverse_transform(pca.inverse_transform(centroids_list))
centroid_individuals = [Individual(arr=arr) for arr in revert]
for centroid in centroid_individuals:
centroid.fitness = run_game(centroid) - 19
for i,centroid in enumerate(labels.keys()):
for individual in labels.get(centroid):
index = np.where(pca_x == individual.strategy)[0][0]
individuals[index].fitness = individuals[index].fitness_approx(centroid_individuals[i])
print("running fittest")
score = run_game(fittest)
print("fittest had a score of %d" % score)
pygame.quit()
quit()
filename = "CPopulation_" + str(population) + "_generations_" + str(generations)
x = [i for i in range(generations)]
plt.plot(x, avg_fitness, 'x--')
plt.xlabel("generations")
plt.ylabel("fitness")
plt.title("average fitness over %d generations" %generations)
plt.savefig("figs/" + filename + "avg_fitness.png")
plt.figure()
plt.plot(x, avg_fitness, 'x--')
plt.plot(x, fittest_score, '+--')
plt.xlabel("generations")
plt.ylabel("fitness")
plt.title("average fitness and fittest score over %d generations" %generations)
plt.savefig("figs/" + filename + "fitness_and_fittest.png")
plt.show()
if __name__ == "__main__":
main()
|
{"hexsha": "48f4885b55bff544ad2bc0bde3951d3e70f5e2e3", "size": 12723, "ext": "py", "lang": "Python", "max_stars_repo_path": "genetic.py", "max_stars_repo_name": "zanussbaum/Chrome-T-Rex-Rush-master", "max_stars_repo_head_hexsha": "d1b9fce102238be7e20c403fcdca7b671be482e6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "genetic.py", "max_issues_repo_name": "zanussbaum/Chrome-T-Rex-Rush-master", "max_issues_repo_head_hexsha": "d1b9fce102238be7e20c403fcdca7b671be482e6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "genetic.py", "max_forks_repo_name": "zanussbaum/Chrome-T-Rex-Rush-master", "max_forks_repo_head_hexsha": "d1b9fce102238be7e20c403fcdca7b671be482e6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.3937007874, "max_line_length": 104, "alphanum_fraction": 0.5751002122, "include": true, "reason": "import numpy", "num_tokens": 2967}
|
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 05 15:21:19 2017
@author: Administrator
"""
from PIL import Image
import os
import numpy as np
def mergeReport(files,img_name):
baseimg=Image.open(files[0])
sz = baseimg.size
basemat=np.atleast_2d(baseimg)
for file in files[1:]:
im=Image.open(file)
#resize to same width
sz2 = im.size
if sz2[0]!=sz[0]:
print ("tar image width is:%d, ori image width is:%d" %(sz2[0],sz[0]))
im=im.resize((sz[0],round(sz2[0] / sz[0] * sz2[1])),Image.ANTIALIAS)
mat=np.atleast_2d(im)
basemat=np.append(basemat,mat,axis=1)
report_img=Image.fromarray(basemat)
report_img.save(img_name)
path1 = "E:/Desktop/neutral_test1/neutral_test/" #文件夹目录
for root,dirs,files1 in os.walk(path1): #得到文件夹下的所有文件名称
print files1
print "============================"
print files1
print "============================"
# path2 = "/home/xbsj/Desktop/1/"
# filesname2=[]
# for root,dirs,files2 in os.walk(path2):
# print files2
# files2.sort(key= lambda files2:int(files2[11:-4]))
# #files2.sort()
# print "============================"
# print files2
outpath = "E:/Desktop/neutral_test1/1/"
num_file = len(files1)
m = 0
for i in range(num_file/13): #遍历文件夹
files=[]
tempfile=path1+files1[i*13]
files.append(tempfile)
n=1
m = m+1
for j in range(4):
for k in range(3):
img1=path1+files1[n+i*13]
print n+i*13
n = n+1
files.append(img1)
if len(files)==4:
tempfile = files[3]
tempfile = tempfile.split('_')
exptype=''.join(tempfile[-2])
img_name = outpath + "%04d_" %m +exptype+".jpg"
mergeReport(files,img_name)
tempfile = files[0]
files=[]
files.append(tempfile)
print ("Finished!")
|
{"hexsha": "d727eceff25ba0739998d7710be6c09289cf2120", "size": 1978, "ext": "py", "lang": "Python", "max_stars_repo_path": "tools/montage.py", "max_stars_repo_name": "zhouxiaowei1120/practice", "max_stars_repo_head_hexsha": "95dd7ffa65f34a867578bea2f80404677cc5f5e5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2017-12-17T15:49:37.000Z", "max_stars_repo_stars_event_max_datetime": "2017-12-17T15:49:37.000Z", "max_issues_repo_path": "tools/montage.py", "max_issues_repo_name": "zhouxiaowei1120/practice", "max_issues_repo_head_hexsha": "95dd7ffa65f34a867578bea2f80404677cc5f5e5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tools/montage.py", "max_forks_repo_name": "zhouxiaowei1120/practice", "max_forks_repo_head_hexsha": "95dd7ffa65f34a867578bea2f80404677cc5f5e5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.4722222222, "max_line_length": 83, "alphanum_fraction": 0.5343781598, "include": true, "reason": "import numpy", "num_tokens": 567}
|
import cv2 as cv
import numpy as np
url = '../Resources/Photos/cats.jpg'
img = cv.imread(url)
cv.imshow('Cat', img)
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
cv.imshow('gray', gray)
# BGR HSV
hsv = cv.cvtColor(img, cv.COLOR_BGR2HSV)
cv.imshow('HSV', hsv)
# BGR to L*A*B
lab = cv.cvtColor(img, cv.COLOR_BGR2Lab)
cv.imshow('LAB', lab)
# BGR to RGB
RGB = cv.cvtColor(img, cv.COLOR_BGR2RGB)
cv.imshow('RGB', RGB)
# matplotlib
import matplotlib.pyplot as plt
plt.imshow(RGB)
plt.show()
cv.waitKey(0)
|
{"hexsha": "505158dd7bd256d3100353f9222d85804440882f", "size": 502, "ext": "py", "lang": "Python", "max_stars_repo_path": "MS2-Advanced/spaces.py", "max_stars_repo_name": "PNightOwlY/opencv-course", "max_stars_repo_head_hexsha": "71f59327a9c2226144c16aaa42157d32bd392cca", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "MS2-Advanced/spaces.py", "max_issues_repo_name": "PNightOwlY/opencv-course", "max_issues_repo_head_hexsha": "71f59327a9c2226144c16aaa42157d32bd392cca", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "MS2-Advanced/spaces.py", "max_forks_repo_name": "PNightOwlY/opencv-course", "max_forks_repo_head_hexsha": "71f59327a9c2226144c16aaa42157d32bd392cca", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 17.9285714286, "max_line_length": 42, "alphanum_fraction": 0.703187251, "include": true, "reason": "import numpy", "num_tokens": 152}
|
import tensorflow as tf
from baselines.common.process_manager import ProcessManager
import numpy as np
import yaml
import zmq
import os
from tqdm import tqdm
from rl_msg_pb2 import *
try:
from mpi4py import MPI
except ImportError:
MPI = None
class ProcessRunner(object):
"""
We use this object to make a mini batch of experiences
__init__:
- Initialize the runner
run():
- Make a mini batch
"""
def __init__(self, env, model, n_env, n_steps, gamma, lam, password,
verbose=0, **network_kwargs):
self.env = env
# assume env spec looks like DummyNAME-v0
self.env_name = env.unwrapped.spec.id.split('-')[0][5:]
self.model = model
self.ob_space = env.observation_space
self.ac_space = env.action_space
self.lam = lam
self.gamma = gamma
self.n_env = n_env
self.n_steps = n_steps
self.verbose = verbose
self.n_layer = network_kwargs['num_layers']
self.n_hidden = network_kwargs['num_hidden']
act_fn = network_kwargs['activation']
if act_fn == None:
self.act_fn = NeuralNetworkParam.NONE
elif act_fn == tf.tanh:
self.act_fn = NeuralNetworkParam.Tanh
elif act_fn == tf.nn.relu:
self.act_fn = NeuralNetworkParam.ReLU
else:
print("Wrong activation function")
self.mpi_rank = MPI.COMM_WORLD.Get_rank()
self.parameter_setting()
self.process_manager_list = { str(env_id): ProcessManager(self.ip_control_pc,
self.username, password,
self.execute_cmd+str(env_id), self.exit_cmd+str(env_id) + "'",
self.verbose) for env_id in range(self.n_env) }
if self.verbose >= 1:
print("[[Process Manager created]]")
self.context_list = { str(env_id): zmq.Context.instance() for env_id in range(self.n_env)}
self.data_socket_list = { str(env_idx):None for env_idx in range(self.n_env) }
self.policy_valfn_socket_list = { str(env_idx):None for env_idx in range(self.n_env) }
if self.verbose >= 1:
print("[[Context created]]")
def create_zmq_sockets(self, env_idx):
self.data_socket_list[str(env_idx)] = self.context_list[str(env_idx)].socket(zmq.SUB)
self.data_socket_list[str(env_idx)].setsockopt_string(zmq.SUBSCRIBE, "")
self.data_socket_list[str(env_idx)].connect(self.ip_sub_pub_list[str(env_idx)])
self.policy_valfn_socket_list[str(env_idx)] = self.context_list[str(env_idx)].socket(zmq.REQ)
self.policy_valfn_socket_list[str(env_idx)].connect(self.ip_req_rep_list[str(env_idx)])
if self.verbose >= 1:
print("[[Socket created for %d th Env]]" % env_idx)
def parameter_setting(self):
cfg_path = os.getcwd() + '/Config/' + self.env_name + '/TEST/RL_WALKING_TEST.yaml'
with open(cfg_path) as f:
config = yaml.safe_load(f)
ip_sub_pub_first = config['test_configuration']['protocol']['ip_sub_pub_prefix']
ip_req_rep_first = config['test_configuration']['protocol']['ip_req_rep_prefix']
self.username = config['test_configuration']['protocol']['username']
self.ip_control_pc = config['test_configuration']['protocol']['ip_control_pc']
self.execute_cmd = config['test_configuration']['protocol']['execute_cmd']
self.execute_cmd += ' ' + str(self.mpi_rank) + ' '
self.exit_cmd = config['test_configuration']['protocol']['exit_cmd']
self.exit_cmd += ' ' + str(self.mpi_rank) + ' '
self.ip_sub_pub_list = { str(env_id): ip_sub_pub_first + str(self.mpi_rank) + str(env_id) for env_id in range(self.n_env)}
self.ip_req_rep_list = { str(env_id): ip_req_rep_first + str(self.mpi_rank) + str(env_id) for env_id in range(self.n_env)}
def run_experiment(self, env_idx, policy_param, valfn_param):
assert( ( len(policy_param) - 1 ) / 2 == self.n_layer+1)
assert( ( len(valfn_param) / 2 ) == self.n_layer+1)
self.process_manager_list[str(env_idx)].execute_process()
self.pair_and_sync(self.policy_valfn_socket_list[str(env_idx)],
self.data_socket_list[str(env_idx)])
# ==================================================================
# send policy
# ==================================================================
pb_policy_param = NeuralNetworkParam()
for l_idx in range(self.n_layer + 1):
weight = policy_param[2*l_idx]
bias = policy_param[2*l_idx+1]
layer = pb_policy_param.layers.add()
layer.num_input = weight.shape[0]
layer.num_output = weight.shape[1]
for w_row in range(weight.shape[0]):
for w_col in range(weight.shape[1]):
layer.weight.append(weight[w_row, w_col])
for b_idx in range(bias.shape[0]):
layer.bias.append(bias[b_idx])
if l_idx == self.n_layer:
layer.act_fn = NeuralNetworkParam.NONE
else:
layer.act_fn = self.act_fn
for action_idx in range(policy_param[-1].shape[-1]):
pb_policy_param.logstd.append((policy_param[-1])[0, action_idx])
pb_policy_param_serialized = pb_policy_param.SerializeToString()
self.policy_valfn_socket_list[str(env_idx)].send(pb_policy_param_serialized)
self.policy_valfn_socket_list[str(env_idx)].recv()
if self.verbose >= 1:
print("[[Policy is set for %d th Env]]" % env_idx)
# ==================================================================
# send value function
# ==================================================================
pb_valfn_param = NeuralNetworkParam()
for l_idx in range(self.n_layer + 1):
weight = valfn_param[2*l_idx]
bias = valfn_param[2*l_idx+1]
layer = pb_valfn_param.layers.add()
layer.num_input = weight.shape[0]
layer.num_output = weight.shape[1]
for w_row in range(weight.shape[0]):
for w_col in range(weight.shape[1]):
layer.weight.append(weight[w_row, w_col])
for b_idx in range(bias.shape[0]):
layer.bias.append(bias[b_idx])
if l_idx == self.n_layer:
layer.act_fn = NeuralNetworkParam.NONE
else:
layer.act_fn = self.act_fn
pb_valfn_param_serialized = pb_valfn_param.SerializeToString()
self.policy_valfn_socket_list[str(env_idx)].send(pb_valfn_param_serialized)
self.policy_valfn_socket_list[str(env_idx)].recv()
if self.verbose >= 1:
print("[[Value function is set for %d th Env]]" % env_idx)
def pair_and_sync(self, req_rep_socket, sub_pub_socket):
while True:
try:
zmq_msg = sub_pub_socket.recv(zmq.DONTWAIT)
except zmq.ZMQError as e:
if e.errno == zmq.EAGAIN:
req_rep_socket.send(b"nope")
req_rep_socket.recv()
else:
raise
else:
req_rep_socket.send(b"world")
req_rep_socket.recv()
break;
if self.verbose >= 1 :
print("[[Sockets are all paired and synced]]")
def run(self, policy_param, valfn_param):
counts = np.zeros(shape=(self.n_steps, self.n_env), dtype=int)
mb_obs = np.zeros(shape=(self.n_steps, self.n_env, self.ob_space.shape[0]), dtype=np.float32)
mb_rewards = np.zeros(shape=(self.n_steps, self.n_env), dtype=np.float32)
mb_actions = np.zeros(shape=(self.n_steps, self.n_env, self.ac_space.shape[0]), dtype=np.float32)
actions_mean = np.zeros(shape=(self.n_steps, self.n_env, self.ac_space.shape[0]), dtype=np.float32)
mb_values = np.zeros(shape=(self.n_steps, self.n_env), dtype=np.float32)
mb_dones = np.zeros(shape=(self.n_steps, self.n_env), dtype=bool)
mb_neglogpacs = np.zeros(shape=(self.n_steps, self.n_env), dtype=np.float32)
mb_states = None
last_values = np.zeros(shape=(self.n_env))
epinfos=[]
dataset_total_rew=0
cur_ep_ret = np.zeros(shape=(self.n_env), dtype=np.float32)
b_first = np.ones(shape=(self.n_env), dtype=bool)
for env_idx in range(self.n_env):
self.create_zmq_sockets(env_idx)
self.run_experiment(env_idx, policy_param, valfn_param)
for step_idx in tqdm(range(self.n_steps), ncols=80, desc="[Trajectory Roll Out]"):
for env_idx in range(self.n_env):
pb_data = Data()
while(True):
zmq_msg = self.data_socket_list[str(env_idx)].recv()
if not (zmq_msg == b'hello'):
pb_data.ParseFromString(zmq_msg)
if pb_data.ListFields() == []:
assert(False)
else:
break
counts[step_idx, env_idx] = pb_data.count
if b_first[env_idx]:
assert(pb_data.count == 0)
b_first[env_idx] = False
mb_obs[step_idx, env_idx] = pb_data.observation
mb_rewards[step_idx, env_idx] = pb_data.reward
mb_actions[step_idx, env_idx] = pb_data.action
actions_mean[step_idx, env_idx] = pb_data.action_mean
mb_values[step_idx, env_idx] = pb_data.value
mb_dones[step_idx, env_idx] = pb_data.done
mb_neglogpacs[step_idx, env_idx] = pb_data.neglogp
cur_ep_ret[env_idx] += pb_data.reward
dataset_total_rew += pb_data.reward
if pb_data.done:
epinfos.append({'r':cur_ep_ret[env_idx],
'l':pb_data.count})
cur_ep_ret[env_idx] = 0
self.process_manager_list[str(env_idx)].quit_process()
self.create_zmq_sockets(env_idx)
self.run_experiment(env_idx, policy_param, valfn_param)
b_first[env_idx] = True
last_values = np.multiply(mb_values[-1], mb_dones[-1])
for env_idx in range(self.n_env):
self.process_manager_list[str(env_idx)].quit_process()
# __import__('ipdb').set_trace()
# discount/bootstrap off value fn
mb_returns = np.zeros_like(mb_rewards)
mb_advs = np.zeros_like(mb_rewards)
lastgaelam = 0
for t in reversed(range(self.n_steps)):
if t == self.n_steps - 1:
nextnonterminal = 1.0 - mb_dones[-1]
nextvalues = last_values
else:
nextnonterminal = 1.0 - mb_dones[t+1]
nextvalues = mb_values[t+1]
delta = mb_rewards[t] + self.gamma * nextvalues * nextnonterminal - mb_values[t]
mb_advs[t] = lastgaelam = delta + self.gamma * self.lam * nextnonterminal * lastgaelam
mb_returns = mb_advs + mb_values
return (*map(sf01, (mb_obs, mb_rewards, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs, actions_mean)),
mb_states, epinfos, dataset_total_rew)
def sf01(arr):
"""
swap and then flatten axes 0 and 1
"""
s = arr.shape
return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])
|
{"hexsha": "50c8ee43a74442774f3a2f8ca0200f51f6ab5c29", "size": 11687, "ext": "py", "lang": "Python", "max_stars_repo_path": "baselines/ppo2/process_runner.py", "max_stars_repo_name": "junhyeokahn/baselines", "max_stars_repo_head_hexsha": "4d07893edd02a5a71d1c5026b6fef9dbbe0822b5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "baselines/ppo2/process_runner.py", "max_issues_repo_name": "junhyeokahn/baselines", "max_issues_repo_head_hexsha": "4d07893edd02a5a71d1c5026b6fef9dbbe0822b5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "baselines/ppo2/process_runner.py", "max_forks_repo_name": "junhyeokahn/baselines", "max_forks_repo_head_hexsha": "4d07893edd02a5a71d1c5026b6fef9dbbe0822b5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.9357429719, "max_line_length": 134, "alphanum_fraction": 0.5790194233, "include": true, "reason": "import numpy", "num_tokens": 2619}
|
function shortcutUtils = GetShortcutUtils()
%GETSHORTCUTUTILS Gets an instance of ShortcutUtils.
%
% SHORTCUTUTILS = GETSHORTCUTUTILS() gets an instance of ShortcutUtils.
%
% Examples:
%
% shortcutUtils = GetShortcutUtils();
% methods(shortcutUtils)
% methodsview(shortcutUtils)
% $Author: rcotton $ $Date: 2010/08/23 14:35:09 $ $Revision: 1.1 $
% Copyright: Health and Safety Laboratory 2010
CheckForJVM();
shortcutUtils = com.mathworks.mlwidgets.shortcuts.ShortcutUtils;
end
|
{"author": "Sable", "repo": "mcbench-benchmarks", "sha": "ba13b2f0296ef49491b95e3f984c7c41fccdb6d8", "save_path": "github-repos/MATLAB/Sable-mcbench-benchmarks", "path": "github-repos/MATLAB/Sable-mcbench-benchmarks/mcbench-benchmarks-ba13b2f0296ef49491b95e3f984c7c41fccdb6d8/27567-shortcut-tools/shortcut_tools/GetShortcutUtils.m"}
|
import pandas as pd
import os
import csv
from nltk.sentiment.vader import SentimentIntensityAnalyzer
import re
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
matplotlib.style.use('ggplot')
mainMessageCorpus = pd.read_csv("fullText_Clean.csv",header=0, \
delimiter=",", skip_blank_lines = True)
mainMessageCorpus.dropna(how="all",inplace=False)
mainMessageCorpus = mainMessageCorpus[pd.notnull(mainMessageCorpus["Text Body"])]
#Using the vaderSentiment Analysis
analyzer = SentimentIntensityAnalyzer()
polarityList = []
for body in mainMessageCorpus["Text Body"]:
if body:
vs = analyzer.polarity_scores(body)
polarityList.append(str(vs))
dateList = []
nameList = []
bodyList = []
#Remove empty lines row by row for Date
for date in mainMessageCorpus["Date"]:
if date:
dateList.append(date)
#Remove empty lines row by row for Name
for name in mainMessageCorpus["Name"]:
if name:
nameList.append(name)
#Remove empty lines row by row for TextBody
for text in mainMessageCorpus["Text Body"]:
if text:
bodyList.append(text)
dateDf = pd.DataFrame(dateList,columns=["Date"])
nameDf = pd.DataFrame(nameList,columns=["Name"])
textBodyDf = pd.DataFrame(bodyList,columns=["Text Body"])
splitPolarityList = []
for i in polarityList:
splitPolarityList.append(i.split(','))
splitDf = pd.DataFrame(splitPolarityList,columns=["Neg","Neu","Pos","Compound"])
splitDf["Neg"] = splitDf["Neg"].str.replace('{\'neg\': ',"")
splitDf["Neu"] = splitDf["Neu"].str.replace('\'neu\': ',"")
splitDf["Pos"] = splitDf["Pos"].str.replace('\'pos\': ',"")
splitDf["Compound"] = splitDf["Compound"].str.replace('\'compound\': ',"")
splitDf["Compound"] = splitDf["Compound"].str.replace('}',"")
non_decimal = re.compile(r'[^\d.]+')
combinedDf = pd.concat([dateDf, nameDf, textBodyDf, splitDf],axis=1)
nameFreqCount = combinedDf['Name'].value_counts().to_dict()
nameFreqDf = pd.DataFrame(list(nameFreqCount.items()),columns=['Name','Frequency'])
plt.pie(
nameFreqDf['Frequency'],
labels=nameFreqDf['Name'],
shadow=False,
colors=None,
)
plt.axis('equal')
plt.tight_layout()
plt.show(block = True)
|
{"hexsha": "d3da5e5d0fa08152664d1e5e1ddf9fc316bf320e", "size": 2215, "ext": "py", "lang": "Python", "max_stars_repo_path": "MessageAnalysis.py", "max_stars_repo_name": "Beefstyles/FacebookMessageSentimentAnalysis", "max_stars_repo_head_hexsha": "c11959c54e09d1c804390b4b23aab0a2ac3f681b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-08-31T03:29:48.000Z", "max_stars_repo_stars_event_max_datetime": "2018-08-31T03:29:48.000Z", "max_issues_repo_path": "MessageAnalysis.py", "max_issues_repo_name": "Beefstyles/FacebookMessageSentimentAnalysis", "max_issues_repo_head_hexsha": "c11959c54e09d1c804390b4b23aab0a2ac3f681b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "MessageAnalysis.py", "max_forks_repo_name": "Beefstyles/FacebookMessageSentimentAnalysis", "max_forks_repo_head_hexsha": "c11959c54e09d1c804390b4b23aab0a2ac3f681b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-03-22T20:35:12.000Z", "max_forks_repo_forks_event_max_datetime": "2018-03-22T20:35:12.000Z", "avg_line_length": 27.3456790123, "max_line_length": 83, "alphanum_fraction": 0.6948081264, "include": true, "reason": "import numpy", "num_tokens": 590}
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import glob
from plotly import graph_objects as go
import ppscore as pps
import seaborn as sns
# Data import
files = sorted(glob.glob("01_data/raw/*"))
# pokemon_index = pd.read_csv(files[0], sep="|")
# pokemon = pd.read_csv(files[1], sep="|")
battle = pd.read_csv(files[2], sep="|")
# submission = pd.read_csv(files[3], sep="|")
# weakness = pd.read_csv(files[4], sep="|")
# Battle
# Target variable
plt.boxplot(battle.BattleResult)
plt.show()
plt.hist(battle.BattleResult, bins=30)
plt.show()
# Create heatmap - pokemon vs pokemon
target = "BattleResult"
grouping_var = ["Name_1", "Name_2"]
mean_rslts = battle.groupby(grouping_var, as_index=False)[target].mean()
battle_pokemons = mean_rslts.Name_1.unique()
add = pd.DataFrame(data=dict(Name_1=battle_pokemons,
Name_2=battle_pokemons,
BattleResult=np.nan))
mean_rslts = pd.concat([mean_rslts, add], axis=0)
mean_rslts.sort_values(["Name_1", "Name_2"], inplace=True)
rslts = mean_rslts.BattleResult.values.copy()
rslts.resize([144, 144])
# Normalization
rslts_min, rslts_max = np.abs(np.nanmin(rslts)),np.abs(np.nanmax(rslts))
rslts_norm = (rslts + rslts_min) / (rslts_min + rslts_max)
rslts_norm = np.nan_to_num(rslts_norm, nan=0)
# Plot
fig = go.Figure(data=go.Heatmap(
z=rslts_norm,
x=mean_rslts.Name_1.unique(),
y=mean_rslts.Name_2.unique(),
colorscale='Viridis'))
fig.update_layout(
title='Mean Battle Results',
xaxis_nticks=50)
fig.show()
# Predictive Power Score
pps_matrix = pps.matrix(battle, sample=5000)
fig = go.Figure(data=go.Heatmap(
z=pps_matrix.values,
x=pps_matrix.index,
y=pps_matrix.columns,
colorscale='Viridis'))
fig.update_layout(
title='Predictive Power Score',
xaxis_nticks=50)
fig.show()
|
{"hexsha": "34e74c270736e288e4c4f12c23e1718c7315d4e6", "size": 1887, "ext": "py", "lang": "Python", "max_stars_repo_path": "02_code/exploration/eda.py", "max_stars_repo_name": "franperic/pokehackathon", "max_stars_repo_head_hexsha": "c91977d630914d738de995ddddd1791b5b7b23b9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "02_code/exploration/eda.py", "max_issues_repo_name": "franperic/pokehackathon", "max_issues_repo_head_hexsha": "c91977d630914d738de995ddddd1791b5b7b23b9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-05-21T14:34:49.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T01:51:21.000Z", "max_forks_repo_path": "02_code/exploration/eda.py", "max_forks_repo_name": "franperic/pokehackathon", "max_forks_repo_head_hexsha": "c91977d630914d738de995ddddd1791b5b7b23b9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.5774647887, "max_line_length": 72, "alphanum_fraction": 0.6910439852, "include": true, "reason": "import numpy", "num_tokens": 550}
|
import numpy as np
import frozen_lake
newletter = b"1"
if __name__ == '__main__':
pos = 14
print(pos // 5, pos % 5)
|
{"hexsha": "f0464f79438b3b2d3e10522125e42fb7b97729d1", "size": 125, "ext": "py", "lang": "Python", "max_stars_repo_path": "Lux_Project_Env/test.py", "max_stars_repo_name": "WittyTheMighty/LUX_AI_Project", "max_stars_repo_head_hexsha": "39e302798ed6cdb98b098fd2d2bb02b3d5eda762", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Lux_Project_Env/test.py", "max_issues_repo_name": "WittyTheMighty/LUX_AI_Project", "max_issues_repo_head_hexsha": "39e302798ed6cdb98b098fd2d2bb02b3d5eda762", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Lux_Project_Env/test.py", "max_forks_repo_name": "WittyTheMighty/LUX_AI_Project", "max_forks_repo_head_hexsha": "39e302798ed6cdb98b098fd2d2bb02b3d5eda762", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 15.625, "max_line_length": 28, "alphanum_fraction": 0.632, "include": true, "reason": "import numpy", "num_tokens": 43}
|
import numpy as np
import arcpy
import netCDF4
from netCDF4 import Dataset
##read the netCDF file and print metadata
test_file = "C:\\Users\\Lance\\Documents\\GitHub\\dnppy\\undeployed\\CDRs\\PERSIANN-CDR_v01r01_19890523_c20140523.nc"
fh = Dataset(test_file,'r')
print(fh) #this prints all of the metadata info
variables =[] #variables hold the actual data like precipitation, lat values, lon values
dimensions = [] #hold A netCDF dimension used to define the x/y, or lat/lon, coordinates of variables will be placed.
#Below directly calls the attribute metadata info from the Ordered Dict of the Dataset Class.
fh.cdr_variable #calls the cdr variable
fh.geospatial_lat_min
fh.geospatial_lat_max
fh.geospatial_lon_min
fh.geospatial_lon_max
fh.geospatial_lat_units
fh.geospatial_lat_resolution
fh.geospatial_lon_units
fh.geospatial_lon_resolution
fh.spatial_resolution
var_array = fh.variables[fh.cdr_variable][:] #creates a numpy array of variable, if masked it will be masked array.
v_obj = fh.variables[fh.cdr_variable]
v_obj.dimensions #tells you how the variable is stored.
lat_dim_name = v_obj.dimensions[2]
lon_dim_name = v_obj.dimensions[1]
time_dim_name = v_obj.dimensions[0]
var_array.shape #This returns the length of each dimension for the specified variable in the arrary
lat_dim_size = var_array.shape[2]
lon_dim_size = var_array.shape[1]
time_dim_size = var_array.shape[0]
#In this section I'm looking through the lat/lon variables to find how values are indexed
first_lat = fh.variables[lat_dim_name][0]
last_lat = fh.variables[lat_dim_name][lat_dim_size - 1]
first_lon = fh.variables[lon_dim_name][0]
last_lon = fh.variables[lon_dim_name][lon_dim_size - 1]
#precip_arr.shape = (480,1440) #reshapes the array to match the lat/long earth, lon = columns and lat = rows
"""
Subsetting a box should be easy. So long as I know the index values for corners
I should be able to call it from the value like so,
find the lats/lons of the data that are within the user provided lats/lons.
lat_int = fh.geospatial_lat_resolution
lon_int = fh.geospatial_lon_resolution
fh.variables[fh.cdr_variable][0,loni:lon+i,lati:lat+i]
#pull out specific chunks based on time, lon, lat indexed values.
#User input needs to match geospatial_lat_units and geospatial_lon_units and be divisable by resolution
I need to calculated my index values for lat and lon, which will be based on user input.
Steps:
1) Check to see if user lat/lon units match the data's lat/lon units, if so convert
2) Convert units if necessary
3) Find out where the user coordinates fall on the grid of lat/lons which set at resolution intervals
lat_arr[0] <- 59.875
lat_arr[479] <- -59.875
lon_arr[0] <- 0.125
lon_arr[1439] <- 359.875
"""
#User defined bounding box, format (lat,lon)
TL = [42.258,273.501]
TR = [42.258,275.503]
BR = [37.261,275.503]
BL = [37.261,273.501]
# Assume user defined bounding box will be decimal degree
# 1) Check to see if user lat/lon units match data's lat/lon
# I don't know that CDRs will all have same lat/long units. But makes sense that they would.
if fh.geospatial_lon_min < 0 & fh.geospatial_lon_min > -180:
then units = 'Decimal Degree'
if fh.geospatial_lon_max > 180:
then match
#Subset
subset_array = v_obj[0,lon1i:lon2i,lat1i:lat2i]
#Save numpy array as raster or tif
raster = arcpy.NumPyArrayToRaster(subset_array)
raster.save(output_workplace + // name)
|
{"hexsha": "d5da33c606aac680016b062334f40a04857c49ec", "size": 3380, "ext": "py", "lang": "Python", "max_stars_repo_path": "undeployed/subjects/CDRs/NetCDF4_Numpy.py", "max_stars_repo_name": "NASA-DEVELOP/dnppy", "max_stars_repo_head_hexsha": "8f7ef6f0653f5a4ea730ee557c72a2c89c06ce0b", "max_stars_repo_licenses": ["NASA-1.3"], "max_stars_count": 65, "max_stars_repo_stars_event_min_datetime": "2015-09-10T12:59:56.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-27T22:09:03.000Z", "max_issues_repo_path": "undeployed/subjects/CDRs/NetCDF4_Numpy.py", "max_issues_repo_name": "snowzm/dnppy", "max_issues_repo_head_hexsha": "8f7ef6f0653f5a4ea730ee557c72a2c89c06ce0b", "max_issues_repo_licenses": ["NASA-1.3"], "max_issues_count": 40, "max_issues_repo_issues_event_min_datetime": "2015-04-08T19:23:30.000Z", "max_issues_repo_issues_event_max_datetime": "2015-08-04T15:53:11.000Z", "max_forks_repo_path": "undeployed/subjects/CDRs/NetCDF4_Numpy.py", "max_forks_repo_name": "snowzm/dnppy", "max_forks_repo_head_hexsha": "8f7ef6f0653f5a4ea730ee557c72a2c89c06ce0b", "max_forks_repo_licenses": ["NASA-1.3"], "max_forks_count": 45, "max_forks_repo_forks_event_min_datetime": "2015-08-14T19:09:38.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-15T18:53:16.000Z", "avg_line_length": 40.2380952381, "max_line_length": 117, "alphanum_fraction": 0.7798816568, "include": true, "reason": "import numpy", "num_tokens": 943}
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import SGD, lr_scheduler
from sklearn.metrics.cluster import normalized_mutual_info_score as nmi_score
from sklearn.metrics import adjusted_rand_score as ari_score
from sklearn.cluster import KMeans, DBSCAN
from utils.util import BCE, PairEnum, cluster_acc, Identity, AverageMeter, seed_torch, BCE_softlabels
from utils import ramps
from models.resnet import ResNet, BasicBlock
from data.cifarloader import CIFAR10Loader, CIFAR10LoaderMix, CIFAR100Loader, CIFAR100LoaderMix
from data.svhnloader import SVHNLoader, SVHNLoaderMix
from tqdm import tqdm
import numpy as np
import os
from models.NCL import NCLMemory
from utils.spacing import CentroidManager
def train(model, train_loader, unlabeled_eval_loader, args):
print ('Start Neighborhood Contrastive Learning:')
optimizer = SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=args.step_size, gamma=args.gamma)
criterion1 = nn.CrossEntropyLoss()
criterion2 = BCE()
mse = nn.MSELoss()
spacing_loss_start_epoch = 5
enable_spacing_loss = False
enable_NCL_loss = True
n_clusters = 10
beta = 5
cm = CentroidManager(512, n_clusters)
for epoch in range(args.epochs):
if epoch == spacing_loss_start_epoch:
# Extract features
model.eval()
all_features = []
for (data, _), _, _ in train_loader:
data = data.to(device)
_, feat, _, _ = model(data, 'feat_logit')
all_features.append(feat.detach().cpu().numpy())
all_features = np.vstack(all_features)
# Initialize
cm.init_clusters(all_features)
enable_spacing_loss = True
print('Initialized spacing loss.')
loss_record = AverageMeter()
model.train()
exp_lr_scheduler.step()
w = args.rampup_coefficient * ramps.sigmoid_rampup(epoch, args.rampup_length)
for batch_idx, ((x, x_bar), label, idx) in enumerate(tqdm(train_loader)):
x, x_bar, label = x.to(device), x_bar.to(device), label.to(device)
idx = idx.to(device)
mask_lb = label < args.num_labeled_classes
feat, feat_q, output1, output2 = model(x, 'feat_logit')
feat_bar, feat_k, output1_bar, output2_bar = model(x_bar, 'feat_logit')
prob1, prob1_bar, prob2, prob2_bar = F.softmax(output1, dim=1), F.softmax(output1_bar, dim=1), F.softmax(
output2, dim=1), F.softmax(output2_bar, dim=1)
rank_feat = (feat[~mask_lb]).detach()
if args.bce_type == 'cos':
# default: cosine similarity with threshold
feat_row, feat_col = PairEnum(F.normalize(rank_feat, dim=1))
tmp_distance_ori = torch.bmm(feat_row.view(feat_row.size(0), 1, -1), feat_col.view(feat_row.size(0), -1, 1))
tmp_distance_ori = tmp_distance_ori.squeeze()
target_ulb = torch.zeros_like(tmp_distance_ori).float() - 1
target_ulb[tmp_distance_ori > args.costhre] = 1
elif args.bce_type == 'RK':
# top-k rank statics
rank_idx = torch.argsort(rank_feat, dim=1, descending=True)
rank_idx1, rank_idx2 = PairEnum(rank_idx)
rank_idx1, rank_idx2 = rank_idx1[:, :args.topk], rank_idx2[:, :args.topk]
rank_idx1, _ = torch.sort(rank_idx1, dim=1)
rank_idx2, _ = torch.sort(rank_idx2, dim=1)
rank_diff = rank_idx1 - rank_idx2
rank_diff = torch.sum(torch.abs(rank_diff), dim=1)
target_ulb = torch.ones_like(rank_diff).float().to(device)
target_ulb[rank_diff > 0] = -1
prob1_ulb, _ = PairEnum(prob2[~mask_lb])
_, prob2_ulb = PairEnum(prob2_bar[~mask_lb])
# basic loss
loss_ce = criterion1(output1[mask_lb], label[mask_lb])
loss_bce = criterion2(prob1_ulb, prob2_ulb, target_ulb)
consistency_loss = F.mse_loss(prob1, prob1_bar) + F.mse_loss(prob2, prob2_bar)
loss = loss_ce + loss_bce + w * consistency_loss
# Spacing loss
if enable_spacing_loss:
spacing_loss = torch.tensor(0.).to(device)
features = feat_q.detach().cpu().numpy()
# Do re-assigment
cluster_ids = cm.update_assingment(features)
# Update centroids
elem_count = np.bincount(cluster_ids, minlength=n_clusters)
for k in range(n_clusters):
if elem_count[k] == 0:
continue
cm.update_cluster(features[cluster_ids == k], k)
# Compute loss
batch_size = feat_q.size()[0]
centroids = torch.FloatTensor(cm.centroids).to(device)
for i in range(batch_size):
# diff = feat_q[i] - centroids[cluster_ids[i]]
# distance = torch.matmul(diff.view(1, -1), diff.view(-1, 1))
# spacing_loss += 0.5 * beta * torch.squeeze(distance)
spacing_loss += 0.5 * beta * mse(feat_q[i], centroids[cluster_ids[i]])
loss += spacing_loss
if enable_NCL_loss:
# NCL loss for unlabeled data
loss_ncl_ulb = ncl_ulb(feat_q[~mask_lb], feat_k[~mask_lb], label[~mask_lb], epoch, False, ncl_la.memory.clone().detach())
# NCL loss for labeled data
loss_ncl_la = ncl_la(feat_q[mask_lb], feat_k[mask_lb], label[mask_lb], epoch, True)
if epoch > 0:
loss += loss_ncl_ulb * args.w_ncl_ulb + loss_ncl_la * args.w_ncl_la
else:
loss += loss_ncl_la * args.w_ncl_la
# ===================backward=====================
loss_record.update(loss.item(), x.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
args.head = 'head2'
print('Train Epoch: {} Avg Loss: {:.4f}'.format(epoch, loss_record.avg))
print('test on unlabeled classes')
test(model, unlabeled_eval_loader, args)
def train_old(model, train_loader, unlabeled_eval_loader, args):
print ('Start Neighborhood Contrastive Learning:')
optimizer = SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=args.step_size, gamma=args.gamma)
criterion1 = nn.CrossEntropyLoss()
criterion2 = BCE()
for epoch in range(args.epochs):
loss_record = AverageMeter()
model.train()
exp_lr_scheduler.step()
w = args.rampup_coefficient * ramps.sigmoid_rampup(epoch, args.rampup_length)
for batch_idx, ((x, x_bar), label, idx) in enumerate(tqdm(train_loader)):
x, x_bar, label = x.to(device), x_bar.to(device), label.to(device)
idx = idx.to(device)
mask_lb = label < args.num_labeled_classes
feat, feat_q, output1, output2 = model(x, 'feat_logit')
feat_bar, feat_k, output1_bar, output2_bar = model(x_bar, 'feat_logit')
prob1, prob1_bar, prob2, prob2_bar = F.softmax(output1, dim=1), F.softmax(output1_bar, dim=1), F.softmax(
output2, dim=1), F.softmax(output2_bar, dim=1)
rank_feat = (feat[~mask_lb]).detach()
if args.bce_type == 'cos':
# default: cosine similarity with threshold
feat_row, feat_col = PairEnum(F.normalize(rank_feat, dim=1))
tmp_distance_ori = torch.bmm(feat_row.view(feat_row.size(0), 1, -1), feat_col.view(feat_row.size(0), -1, 1))
tmp_distance_ori = tmp_distance_ori.squeeze()
target_ulb = torch.zeros_like(tmp_distance_ori).float() - 1
target_ulb[tmp_distance_ori > args.costhre] = 1
elif args.bce_type == 'RK':
# top-k rank statics
rank_idx = torch.argsort(rank_feat, dim=1, descending=True)
rank_idx1, rank_idx2 = PairEnum(rank_idx)
rank_idx1, rank_idx2 = rank_idx1[:, :args.topk], rank_idx2[:, :args.topk]
rank_idx1, _ = torch.sort(rank_idx1, dim=1)
rank_idx2, _ = torch.sort(rank_idx2, dim=1)
rank_diff = rank_idx1 - rank_idx2
rank_diff = torch.sum(torch.abs(rank_diff), dim=1)
target_ulb = torch.ones_like(rank_diff).float().to(device)
target_ulb[rank_diff > 0] = -1
prob1_ulb, _ = PairEnum(prob2[~mask_lb])
_, prob2_ulb = PairEnum(prob2_bar[~mask_lb])
# basic loss
loss_ce = criterion1(output1[mask_lb], label[mask_lb])
loss_bce = criterion2(prob1_ulb, prob2_ulb, target_ulb)
consistency_loss = F.mse_loss(prob1, prob1_bar) + F.mse_loss(prob2, prob2_bar)
loss = loss_ce + loss_bce + w * consistency_loss
# # NCL loss for unlabeled data
# loss_ncl_ulb = ncl_ulb(feat_q[~mask_lb], feat_k[~mask_lb], label[~mask_lb], epoch, False, ncl_la.memory.clone().detach())
# # NCL loss for labeled data
# loss_ncl_la = ncl_la(feat_q[mask_lb], feat_k[mask_lb], label[mask_lb], epoch, True)
# if epoch > 0:
# loss += loss_ncl_ulb * args.w_ncl_ulb + loss_ncl_la * args.w_ncl_la
# else:
# loss += loss_ncl_la * args.w_ncl_la
# ===================backward=====================
loss_record.update(loss.item(), x.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
args.head = 'head2'
print('Train Epoch: {} Avg Loss: {:.4f}'.format(epoch, loss_record.avg))
print('test on unlabeled classes')
test(model, unlabeled_eval_loader, args)
def test(model, test_loader, args):
n_classes = 5
model.eval()
preds = np.array([])
targets = np.array([])
features = []
for batch_idx, (x, label, _) in enumerate(tqdm(test_loader)):
x, label = x.to(device), label.to(device)
feat, feat_q, output1, output2 = model(x, 'feat_logit')
if args.head == 'head1':
output = output1
else:
output = output2
_, pred = output.max(1)
targets = np.append(targets, label.cpu().numpy())
preds = np.append(preds, pred.cpu().numpy())
features.extend(feat_q.detach().cpu().numpy())
predictions = KMeans(n_clusters=n_classes, n_init=20).fit_predict(np.array(features))
acc, nmi, ari = cluster_acc(targets.astype(int), preds.astype(int)), nmi_score(targets, preds), ari_score(targets, preds)
acc_f, nmi_f, ari_f = cluster_acc(targets.astype(int), predictions.astype(int)), nmi_score(targets, predictions), ari_score(targets, predictions)
print('From logits \t: Test acc {:.4f}, nmi {:.4f}, ari {:.4f}'.format(acc, nmi, ari))
print('From features\t: Test acc {:.4f}, nmi {:.4f}, ari {:.4f}'.format(acc_f, nmi_f, ari_f))
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description='cluster',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--lr', type=float, default=0.1)
parser.add_argument('--gamma', type=float, default=0.1)
parser.add_argument('--momentum', type=float, default=0.9)
parser.add_argument('--weight_decay', type=float, default=1e-4)
parser.add_argument('--epochs', default=200, type=int)
parser.add_argument('--rampup_length', default=150, type=int)
parser.add_argument('--rampup_coefficient', type=float, default=50)
parser.add_argument('--step_size', default=170, type=int)
parser.add_argument('--batch_size', default=128, type=int)
parser.add_argument('--num_unlabeled_classes', default=5, type=int)
parser.add_argument('--num_labeled_classes', default=5, type=int)
parser.add_argument('--dataset_root', type=str, default='./data/datasets/CIFAR/')
parser.add_argument('--exp_root', type=str, default='./data/experiments/')
parser.add_argument('--warmup_model_dir', type=str, default='./data/experiments/pretrained/auto_novel/resnet_rotnet_cifar10.pth')
parser.add_argument('--topk', default=5, type=int)
parser.add_argument('--model_name', type=str, default='resnet')
parser.add_argument('--dataset_name', type=str, default='cifar10', help='options: cifar10, cifar100')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--mode', type=str, default='train')
parser.add_argument('--bce_type', type=str, default='cos')
parser.add_argument('--hard_negative_start', default=1000, type=int)
parser.add_argument('--knn', default=-1, type=int)
parser.add_argument('--w_ncl_la', type=float, default=0.1)
parser.add_argument('--w_ncl_ulb', type=float, default=1.0)
parser.add_argument('--costhre', type=float, default=0.95)
parser.add_argument('--m_size', default=2000, type=int)
parser.add_argument('--m_t', type=float, default=0.05)
parser.add_argument('--w_pos', type=float, default=0.2)
parser.add_argument('--hard_iter', type=int, default=5)
parser.add_argument('--num_hard', type=int, default=400)
args = parser.parse_args()
args.cuda = torch.cuda.is_available()
device = torch.device("cuda" if args.cuda else "cpu")
torch.backends.cudnn.benchmark = True
seed_torch(args.seed)
runner_name = os.path.basename(__file__).split(".")[0]
model_dir = os.path.join(args.exp_root, runner_name)
if not os.path.exists(model_dir):
os.makedirs(model_dir)
args.model_dir = model_dir+'/'+'{}.pth'.format(args.model_name)
model = ResNet(BasicBlock, [2,2,2,2], args.num_labeled_classes, args.num_unlabeled_classes).to(device)
num_classes = args.num_labeled_classes + args.num_unlabeled_classes
def copy_param(model, pretrain_dir):
pre_dict = torch.load(pretrain_dir)
new = list(pre_dict.items())
dict_len = len(pre_dict.items())
model_kvpair = model.state_dict()
count = 0
for count in range(dict_len):
layer_name, weights = new[count]
if 'contrastive_head' not in layer_name and 'shortcut' not in layer_name:
if 'backbone' in layer_name:
model_kvpair[layer_name[9:]] = weights
# else:
# model_kvpair[layer_name] = weights
print (layer_name[9:])
else:
continue
model.load_state_dict(model_kvpair)
return model
if args.mode == 'train':
state_dict = torch.load(args.warmup_model_dir)
model.load_state_dict(state_dict, strict=False)
for name, param in model.named_parameters():
if 'head' not in name and 'layer4' not in name:
param.requires_grad = False
if args.dataset_name == 'cifar10':
mix_train_loader = CIFAR10LoaderMix(root=args.dataset_root, batch_size=args.batch_size, split='train', aug='twice', shuffle=True, labeled_list=range(args.num_labeled_classes), unlabeled_list=range(args.num_labeled_classes, num_classes))
unlabeled_eval_loader = CIFAR10Loader(root=args.dataset_root, batch_size=args.batch_size, split='train', aug=None, shuffle=False, target_list = range(args.num_labeled_classes, num_classes))
unlabeled_eval_loader_test = CIFAR10Loader(root=args.dataset_root, batch_size=args.batch_size, split='test', aug=None, shuffle=False, target_list = range(args.num_labeled_classes, num_classes))
labeled_eval_loader = CIFAR10Loader(root=args.dataset_root, batch_size=args.batch_size, split='test', aug=None, shuffle=False, target_list = range(args.num_labeled_classes))
elif args.dataset_name == 'cifar100':
mix_train_loader = CIFAR100LoaderMix(root=args.dataset_root, batch_size=args.batch_size, split='train', aug='twice', shuffle=True, labeled_list=range(args.num_labeled_classes), unlabeled_list=range(args.num_labeled_classes, num_classes))
unlabeled_eval_loader = CIFAR100Loader(root=args.dataset_root, batch_size=args.batch_size, split='train', aug=None, shuffle=False, target_list = range(args.num_labeled_classes, num_classes))
unlabeled_eval_loader_test = CIFAR100Loader(root=args.dataset_root, batch_size=args.batch_size, split='test', aug=None, shuffle=False, target_list = range(args.num_labeled_classes, num_classes))
labeled_eval_loader = CIFAR100Loader(root=args.dataset_root, batch_size=args.batch_size, split='test', aug=None, shuffle=False, target_list = range(args.num_labeled_classes))
ncl_ulb = NCLMemory(512, args.m_size, args.m_t, args.num_unlabeled_classes, args.knn, args.w_pos, args.hard_iter, args.num_hard, args.hard_negative_start).to(device)
ncl_la = NCLMemory(512, args.m_size, args.m_t, args.num_labeled_classes, args.knn, args.w_pos, args.hard_iter, args.num_hard, args.hard_negative_start).to(device)
if args.mode == 'train':
train(model, mix_train_loader, unlabeled_eval_loader, args)
torch.save(model.state_dict(), args.model_dir)
print("model saved to {}.".format(args.model_dir))
else:
print("model loaded from {}.".format(args.model_dir))
model.load_state_dict(torch.load(args.model_dir))
print('Evaluating on Head1')
args.head = 'head1'
print('test on labeled classes (test split)')
test(model, labeled_eval_loader, args)
print('Evaluating on Head2')
args.head = 'head2'
print('test on unlabeled classes (train split)')
test(model, unlabeled_eval_loader, args)
print('test on unlabeled classes (test split)')
test(model, unlabeled_eval_loader_test, args)
|
{"hexsha": "3308d28ed67c7ff99ff5ea19521d8667848fc118", "size": 18061, "ext": "py", "lang": "Python", "max_stars_repo_path": "ncl_cifar.py", "max_stars_repo_name": "JosephKJ/NCL", "max_stars_repo_head_hexsha": "e40bcbb6caf0f02764f46c1abc1e9597b6c96103", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ncl_cifar.py", "max_issues_repo_name": "JosephKJ/NCL", "max_issues_repo_head_hexsha": "e40bcbb6caf0f02764f46c1abc1e9597b6c96103", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ncl_cifar.py", "max_forks_repo_name": "JosephKJ/NCL", "max_forks_repo_head_hexsha": "e40bcbb6caf0f02764f46c1abc1e9597b6c96103", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 50.3091922006, "max_line_length": 245, "alphanum_fraction": 0.6416588229, "include": true, "reason": "import numpy", "num_tokens": 4393}
|
\documentclass[a4paper, 12pt]{article}
\usepackage[english]{babel}
\usepackage[utf8]{inputenc}
\usepackage [autostyle, english = american]{csquotes}
\MakeOuterQuote{"}
\usepackage{url}
\usepackage{import}
\usepackage{tabularx}
\usepackage{booktabs}
\usepackage{amsmath}
\usepackage{amsfonts}
\usepackage{graphicx}
\usepackage[margin=1.25in]{geometry}
\usepackage{caption}
\usepackage{multirow}
\usepackage[table]{xcolor}
\usepackage{rotating}
\usepackage{mathtools}
\usepackage[multiple]{footmisc}
\usepackage{xr}
\usepackage{breakcites}
\usepackage{matlab-prettifier}
\usepackage[]{mcode}
\usepackage{listings}
\usepackage{color}
\usepackage{hyperref}
\usepackage{authblk}
\title {Profile Ranking Adaptive Choice-Based Conjoint Analysis: A Simple Standby to Utility-Based Analysis for Small Survey Populations}
\author[1]{}
% \author[1]{Skyler Laney\thanks{skyler.laney@my.wheaton.edu}}
% \author[1]{Leo O'Malley\thanks{leo.omalley@my.wheaton.edu}}
% \author[1]{Cathy Shi\thanks{cathy.shi@my.wheaton.edu}}
% \author[1]{Danilo Diedrichs\thanks{danilo.diedrichs@wheaton.edu}}
% \author[1]{Nate Schatz\thanks{Nate.Schatz@my.wheaton.edu}
% \affil[1]{Department of Mathematics, Wheaton College}
\date{}
\usepackage[]{mcode}
\usepackage{matlab-prettifier}
\usepackage{listings} %For code in appendix
\usepackage{color} %red, green, blue, yellow, cyan, magenta, black, white
\definecolor{mygreen}{RGB}{28,172,0} % color values Red, Green, Blue
\definecolor{mylilas}{RGB}{170,55,241}
\usepackage{gensymb}
\usepackage{makeidx}
\makeindex
\pagestyle{empty}
\usepackage{endnotes}
\usepackage{lineno}
%\linenumbers
\begin{document}
%%%%For color MATLAB Scripts
\lstset
{ %Formatting for code in appendix
language=Matlab,
basicstyle=\scriptsize,
numbers=left,
stepnumber=1,
showstringspaces=false,
tabsize=1,
breaklines=true,
breakatwhitespace=false,
}
\maketitle
\hrulefill
\externaldocument{targettable}
\externaldocument{SimpleEquityModel}
\vspace{.7in}
\begin{abstract}
Serving as a standby to support Adaptive Choice-Based Conjoint (ACBC) surveys of very small populations, a new methodology called profile ranking based ACBC (PR-ACBC) is introduced as a simple way to overcome partworth utilities with possibly large variances. Without requiring any knowledge of partworth utilities, PR-ACBC begins with a simple computation using choice task data to obtain both individual and sample mean profile attribute rankings and their derived profile attribute level (PAL) rankings. The Maximum likelihood estimation of profile rankings for both a known population size $N$ (via multivariate hypergeometric distribution) and unknown $N$ (via Lagrange multiplier optimization) provide point estimates of population profile and PAL rankings. The sample PAL rankings are also especially useful for multidimensional scaling (MDS) which offers a two-dimensional visual representation of similarities/dissimilarities in respondents. Finally, PR-ACBC methodology is applied to a recent survey administered to a small population of disaster relief organizations. PAL rankings are compared with partworth utilities with respect to accuracy of choice task predictions and ranking of attribute importances.
\end{abstract}
%% THIS IS FOR A SHORT SCRIPT
% \begin{table}[!htpb]
% \begin{tabular}{|l|}\toprule
% {\bf MATLABScript.m}\\\hline
% \parbox[b]{5.75in}{\lstinputlisting[style=Matlab-editor]{MATLABScript.m}}\\\hline\hline
% \bottomrule
% \end{tabular}
% \end{table}
%% THIS IS FOR A LONG SCRIPT WHICH MUST BE SPLIT INTO
%% SHORTER BLOCK OF CODE YOU CAN SPECIFY THE
%% RANGE OF LINE NUMBERS DISPLAYED AND NUMBER
%% OF THE FIRST LINE
% \begin{table}[!htpb]
% \centering
% \begin{tabular}{|l|}\hline
% MATLABScript.m. (p1 of 1)\\\hline
% \parbox[b]{5.8in}{\lstinputlisting[style=Matlab-editor,firstline=20, lastline=32, firstnumber=20]{MATLABScript.m}}\\\hline
% \end{tabular}
% \end{table}
\vspace{1in}
\section{Introduction}
Adaptive Choice-Based Conjoint (ACBC) analysis surveys are a widely-utilized, well-developed, and highly effective type of conjoint analysis (Orme and Chrzan, 2017). In light of the fact that ACBC is not designed for use with very small populations ($N\le 100$), we introduce Profile-Ranking (PR-) ACBC ss a new "value-added" methdod which applies ranking theory (Alvo and Yu 2014)) to data collected during the choice-task stage to obtain attribute rankings and their associated profile attribute level (PAL) rankings. PR-ACBC does not require any knowledge of partworth utilities, and thus serves as a simple type of validation of population inferences drawn from utility-based analysis. Unlike Maximum Difference (MaxDiff) scaling which is complementary to but not directly integrable into ACBC (Sawtooth 2013), PR is easily integrated into ACBC choice task tournaments based on the Method of Paired Comparisons (MPC) (Thurstone 1927).
A small population size $N$ for which PR-ACBC is designed may be categorized based on a a rule of thumb for utility-based conjoint analysis (Orme 2014). In particular, letting $n$ be the number of survey respondents, $t$ the number of choice tasks, $a$ the number of alternatives per set, and $c$ the maximum number of levels in any attribute, we consider cases where $n < N << \frac{c}{at} \cdot 10^3$.
Following the first application of ACBC to disaster relief (Gralla et al. 2014), we used Sawtooth's Lighthouse platform to structure our choice-task stage as a single-elimination tournament beginning with 16 profiles close to the respondent's \#1 profile revealed in the ``Build Your Own'' (BYO) stage. The Lighthouse platform offers a sophisticated hierarchical-Bayesian Markov-Chain Monte-Carlo (HB MCMC) simulation (Rossi et. al. 2005) to estimate partworth utilities and their variances. Profile attribute rankings and their derived profile attribute level (PALs) rankings on the other hand can easily be computed without knowledge of partworth utilities, and so serve as a simple validity check of utility-based profile choice predictions and ranking of attribute importances for small populations. PALs are also useful in multi-dimensional scaling (MDS) (Alvo and Yu 2014) to give a 2-dimensional visual representation showing similarities in respondents and their ranking of attribute levels.
In Section 2, we introduce basic PR-ACBC methodology by means of a very simple generic (``toy'') survey with only 4 profiles constructed from 2 attributes each with 2 levels. We begin with a fundamental observation that the exact sample profile rankings and their derived PAL rankings directly obtainable from choice tournament data can not be obtained by multiple linear regression of attribute levels (part-worth utilities). We therefore develop PR-ACBC using only survey tournament data without requiring any knowledge of partworth utilities. Maximum likelihood estimate (MLE) population rankings for known population sizes are obtainable by a discrete multivariate hypergeometric distribution (Oberhofer and Kaufman 1987), and for unknown population sizes by multivariable calculus optimization using Lagrange multipliers (Stewart 2016). Population ranking intervals which must contain the unknown population profile rankings are easily computed from sample profile rankings.Similarly, the PAL-intervals which must contain the actual population attribute level rankings are easily obtained from sample data. We then show how sample PALs are useful for multi-dimensional scaling (MDS) which show similarities in respondents and PAL rankings via 2-dimensional geometric representations. This is useful for comparing and contrasting sample respondent subgroups and clustering of attributes. In Section 3 we illustrate PR methodology using a recent ACBC survey deployed to international disaster relief organizations with a headquarters in the U.S. or Canada. This small survey population provided the original context motivating this methodological study, and is a sequel to a novel application of ACBC in disaster-response research (Gralla et. al. 2014). Finally, in Section 4, we suggest a couple of major directions for further research in PR-ACBC, which is currently still in its initial stage of development.
\section{PR-ACBC Methodology}
In this section we introduce PR-ACBC methodology using a simple``toy'' survey.
\subsection{Simple Example}
Consider a generic ACBC survey with just 2 attributes each having 2 levels. We designate the 4 possible profiles $A=11, B=10, C=01, D=00$, where $X=x_1x_2$ designates that profile $X$ has level $x_1$ for the first attribute and level $x_2$ for the second attribute. Suppose we have obtained by anonymous survey choice tournament results for $n=4$ respondents from a population of size $N>4$. A sample tournament outcome is shown in Figure \ref{SimpleTourn}
\begin{figure}[!htpb]
\centering
\includegraphics[width=1.75in, height=1.5in]{SimpleTourn.png}
\caption{A respondent whose tournament data ranks profile A=11 first, C=01 second, B=10 is ranked third and D=00 fourth. }
\label{SimpleTourn}
\end{figure}
{\flushleft This} respondent profile ranking is denoted ACBD, meaning profile A=11 is ranked 1 (tournament winner), profile C=01 is ranked 2 (runner-up), profile B=10 third, and D=00 ranked 4th. The rationale for the latter 2 rankings is that assuming transitivity in match outcomes, the highest B could be ranked if all profiles were paired is 2, while D could only be ranked as high as 3.
In our toy survey with just 4 possible profiles, there are already 4!=24 possible profile rankings. In the next section we consider an actual survey of disaster relief organizations (DROs) with 4 attributes containing 3 levels each, administered to a population of roughly $N=50$ faith based disaster relief organizations. This survey has 81 profiles and 81!=5 797 126 020 747 367 985 879 734 231 578 109 105 412 357 244 731 625 958 745 865 049 716 390
179 693 892 056 256 184 534 249 745 940 480 000 000 000 000 000 000 profile rankings, For a sample size of $n=13$, and a choice task stage which provides each respondent's ranking of 16 profiles, a meaningful ranking of all the profiles is evidently impossible. As a result, our approach to ranking profiles, one more akin to using partworth utilities to determine profile choices, is to rank individual attribute levels rather than profiles as a whole.
Our toy survey has just four profile levels which we denote $(x_1,x_2)$ where $x_1$ is the attribute number (1 or 2) and $x_2$ the level number (0 or 1). Note that each level appears in exactly two profiles. For each attribute level, we compute a respondent's profile attribute level (PAL) ranking as the average of the two profile rankings in which the level appears. For example since $(1,0)$ appears in profiles $C$ and $D$, if a respondent's profile ranking is ACBD, then the $(1,0)$ PAL ranking is (2+4)/2=3. (See Table \ref{PAL1})
\begin{table}[!htpb]
\centering
\begin{tabular}{c|cccc}
Profile&\multicolumn{4}{c}{PAL Ranking}\\
Ranking&(1,1)&(1,0)&(2,1),&(2,0)\\\hline
ABCD& 1.5&3.5&2&3\\
ACBD& 2&3&1.5&3.5\\
BADC&1.5&3.5&3&2 \\
CBAD& 2.5&2.5&2&3 \\\hline
$m$&1.875&3.375&2.125&2.875\\
$s$ &.4787&.25&.6292&.6292\\
\end{tabular}
\caption{Toy survey sample PAL rankings ($n=4$, $m$=mean, $s$=standard deviation, profiles A=11,B=10,C=01, and D=00.)}
\label{PAL1}
\end{table}
In our DRO survey, there are a total of 12 PALs $(x_1,x_2)$ with $x_1\in\{1,2,3,4\}$ and $x_2\in\{1,2,3\}$. Each PAL occurs in 27 out of the 81 profiles and has roughly a 99.85\% chance of appearing in a in a single elimination round of 16 tournament (and hence have a ranking between 1 and 16). PALs not appearing in a tournament are assigned a ranking of 24.5 (the average of rankings 17 through 32 which would be assigned to the first round losers had one earlier round been included in the tournament.) In short, PAL rankings are far more meaningful than profile rankings.
The main questions we will develop in the sequel are:
\begin{itemize}
\item POPULATION INFERENCES: \emph{what can we infer from sample PAL rankings about the population PAL rankings?}; and
\item APPLICATION TO REAL SURVEYS: \emph{How well do PAL rankings predict choice-tasks and attribute importances in the DRO survey?}
\end{itemize}
\subsection{A Fundamental Observation}
In this section we show that least squares multiple regression (LSRM) will not in general give exact sample profile rankings and their corresponding PAL rankings. In other words, part-worth utilities can only approximate sample PAL rankings.
Least squares multiple linear regression (LSRM) can be used to predict sample PAL rankings as we will now explain using our toy survey. Let $U$ denote the level of attribute 1, $V$ the level of attribute 2, and $Y$ the ranking of a profilw with levels $U$ and $V$. Table \ref{Tab7} gives the dataset \{($U_i,V_i,Y_i$)\} ($i = 1,...,16$) where
\begin{eqnarray*}
U_i&=& 1 \textup{ if attribute 1 has level 1, and 0 if it has level 2}\\
V_i&=& 1 \textup{ if attribute 2 has level 1, and 0 if it has level 2}\\
Y_i&=& \textup{ Respondent's ranking of a profile with $U=U_i$, $V=V_i$.}
\end{eqnarray*}
\begin{table}[!htpb]
\centering
\small
\begin{tabular}{cc|ccccc} \hline
$U$ & $V$ & Respondent 1& Respondent 2& Respondent 3& Respondent 4\\ \hline
1 &1&$Y_1$&$Y_2$&$Y_3$&$Y_4$\\
1 &0&$Y_5$&$Y_6$&$Y_7$&$Y_8$ \\
0 &1&$Y_9$&$Y_{10}$&$Y_{11}$&$Y_{12}$ \\
0 &0&$Y_{13}$&$Y_{14}$&$Y_{15}$&$Y_{16}$ \\\hline
\end{tabular}
\caption{{\small Sample profile rankings by respondent ($n=4$).}}
\label{Tab7}
\end{table}
This dataset has certain properties:
\begin{itemize}
\item
Each column consists of a respondent's profile rankings and so contains the numbers 1,2,3 and 4.
\item
Table \ref{Tab7} can also be represented in the form of Table \ref{Tab8}, by which we see that $$\sum U_i = \sum V_i = \sum U_i^2 = \sum V_i^2= 8, $$ and $$ \sum U_iV_i = 4,$$ where the symbol $\sum $ represents $\displaystyle \sum_{n=1}^{16}$.
\end{itemize}
\begin{table}[!htpb]
\centering
\small
\begin{tabular}{cc|c}
$U$ & $V$ & Rank\\ \hline
1& 1& $Y_1$\\
1& 1& $Y_2$\\
1& 1& $Y_3$\\
1& 1& $Y_4$\\
1& 0& $Y_5$\\
1& 0& $Y_6$\\
1& 0& $Y_7$\\
1& 0& $Y_8$\\
0& 1& $Y_9$\\
0& 1& $Y_{10}$\\
0& 1& $Y_{11}$\\
0& 1& $Y_{12}$\\
0& 0& $Y_{13}$\\
0& 0& $Y_{14}$\\
0& 0& $Y_{15}$\\
0& 0& $Y_{16}$\\\hline
\end{tabular}
\caption{{\small Dataset's ranking structure with respondents combined.}}
\label{Tab8}
\end{table}
Using least squares multiple linear regression (LSMR) on the dataset in Table \ref{Tab8}, we estimate each sample profile ranking $Y_i$ as $\hat{Y}_i$:
$$
\hat{Y}_i=c_0 + c_1 U_i + c_2 V_i,
$$
{\flushleft where} the regression coefficients $c_0,c_1,c_2$ are determined by minimizing the sum of squared residuals (SSR):
$$
SSR = \sum_{n=1}^{16}(Y_i-\hat{Y}_i)^2
=\sum_{n=1}^{16}(Y_i-(c_0 + c_1 U_i + c_2 V_i))^2.
$$
To minimize the SSR, we set the partial derivatives with respect to $c_0,c_1$ and $c_2$, equal to zero:
$$\frac{\partial SSR}{\partial c_0} = \frac{\partial SSR}{\partial c_1} = \frac{\partial SSR}{\partial c_2} = 0.$$
This yields the linear system:
$$\begin{cases}
nc_0 + c_1\sum U_i + c_2\sum V_i = \sum Y_i\\
c_0\sum U_i + c_1\sum U_i^2 + c_2\sum U_iV_i = \sum U_iY_i\\
c_0\sum V_i + c_1\sum U_iV_i + c_2\sum V_i^2 = \sum V_iY_i
\end{cases},$$\\
which is equivalent to the matrix equation:
\[
\begin{bmatrix}
n& \sum U_i& \sum V_i \\
\sum U_i&\sum U_i^2&\sum U_iV_i\\
\sum V_i& \sum U_iV_i& \sum V_i^2\\
\end{bmatrix}
%
\begin{bmatrix}
c_0 \\
c_1\\
c_2
\end{bmatrix}
=
\begin{bmatrix}
\sum Y_i\\
\sum U_iY_i\\
\sum V_iY_i
\end{bmatrix}
.
\]
{\flushleft Simplifying} the sums and using Cramer's Rule we obtain the regression coefficients $c_0,c_1$ and $c_2$:
$$
c_0 =
\frac{
\begin{bmatrix}
\sum Y_i & 8 & 8\\
\sum U_iY_i& 8 & 4\\
\sum V_iY_i & 4 & 8
\end{bmatrix}
}{256} = \frac{
\begin{bmatrix}
\sum Y_i & 2 & 2\\
\sum U_iY_i& 2 & 1\\
\sum V_iY_i & 1 & 2
\end{bmatrix}
}{16},
$$
$$
c_1 =
\frac{
\begin{bmatrix}
16 & \sum Y_i & 8 \\
8 & \sum U_iY_i& 4\\
8 & \sum V_iY_i & 8
\end{bmatrix}
}{256} = \frac{
\begin{bmatrix}
4 & \sum Y_i & 2 \\
2 & \sum U_iY_i& 1\\
2 & \sum V_iY_i & 2
\end{bmatrix}
}{16}, \textup{and}
$$
$$
c_2 =
\frac{
\begin{bmatrix}
16 & 8 & \sum Y_i\\
8 & 8 & \sum U_iY_i \\
8 & 4 & \sum V_iY_i
\end{bmatrix}
}{256} = \frac{
\begin{bmatrix}
4 & 2 & \sum Y_i\\
2 & 2 & \sum U_iY_i \\
2 & 1 & \sum V_iY_i
\end{bmatrix}
}{16}.
$$
The LSMR predicted profile rankings are given by:
$$\hat{Y}_{A} = c_0 + c_1 + c_2 =
\frac{2\sum U_iY_i + 2\sum V_iY_i - \sum Y_i}{16},$$
$$\hat{Y}_{B} = c_0 + c_1 =
\frac{-6 \sum V_iY_i + \sum U_iY_i + \sum Y_i}{16},$$
$$\hat{Y}_{C} = c_0 + c_2 =
\frac{-2\sum U_iY_i + 2\sum V_iY_i + \sum Y_i}{16}, \textup{ and}$$
$$\hat{Y}_{D} = c_0 =
\frac{-2\sum U_iY_i - 2\sum V_iY_i +3 \sum Y_i}{16}.$$
The corresponding actual sample profile rankings obtained by averaging the respondent rankings are:
$$\bar{Y}_{A} = \frac{Y_1+Y_2+Y_3+Y_4}{4},$$
$$\bar{Y}_{B} = \frac{Y_5+Y_6+Y_7+Y_8}{4},$$
$$\bar{Y}_{C} = \frac{Y_9+Y_{10}+Y_{11}+Y_{12}}{4}, \textup{ and}$$
$$\bar{Y}_{D} = \frac{Y_{13}+Y_{14}+Y_{15}+Y_{16}}{4}.$$
Let $\bar{f}(X)$ denote the average sample ranking of PAL $X$. For example $f(U=1)=\frac{\bar{Y}_A+\bar{Y}_B}{2}=\frac{\sum_{i=1}^8 Y_i}{8}$. On the other hand, the LSMR prediction for this PAL's sample ranking is $\hat{f}(X)=(\hat{Y}_A+\hat{Y}_B)/2=c_0+c_1+\frac{c_2}{2}.$ The relationship between the sample PAL rankings and LSMR predicted PAL rankings is shown in Table \ref{LSMR}).
\begin{table}[!htpb]
\centering
\scriptsize
\begin{tabular}{c|c|c}
PAL & $\bar{f}(X)$ & $ \hat{f}(X)$ \\ \hline
& & \\
U=1 & $\frac{\bar{Y}_A+\bar{Y}_B}{2}$ &$c_0+c_1+\frac{c_2}{2}$\\
& & \\
U=0 & $ \frac{\bar{Y}_C+\bar{Y}_D}{2}$ &$c_0+\frac{c_2}{2}$ \\
& & \\
V=1 &$\frac{\bar{Y}_A+\bar{Y}_C}{2}$ &$c_0+c_2+\frac{c_1}{2}$ \\
& & \\
V=0 & $\frac{\bar{Y}_B+\bar{Y}_D}{2}$ &$c_0+\frac{c_1}{2}$ \\
& & \\\hline
\end{tabular}
\caption{{\small Predicted PAL rankings using LSMR coefficients.}}
\label{LSMR}
\end{table}
Table \ref{Tab9} shows that the average sample profile rankings and LSRM predicted profile rankings are different for each of the four profiles. In this case, the regression coefficients are $c_0=3.75$, $c_1=-1.25$, $c_2=-1.25$.
\begin{table}[!htpb]
\centering
\scriptsize
\begin{tabular}{cc|cccc|c|c|c}
\multicolumn{2}{c}{} &\multicolumn{4}{c}{Respondents}\\\hline
$U$ & $V$ & R 1& R 2& R 3& R 4 &Sample Rank&Predicted Sample Rank& $\mid$Residual Error$|$\\ \hline
1 &1&1&1&3&1&1.5&3.75-1.25(1)-1.25(1)=1.25&.25\\
1 &0&2&3&1&3&2.25&3.75-1.25(1)-1.25(0)=2.5&.25 \\
0 &1&3&2&2&2&2.25 &3.75-1.25(0)-1.25(1)=2.5&.25 \\
0 &0&4&4&4&4&4 &3.75-1.25(0)-1.25(0)=3.75&.25\\\hline
\end{tabular}
\caption{{\small Predicted rankings for the sample outcomes in Table \ref{PAL1}.}}
\label{Tab9}
\end{table}
Geometrically, if the four points representing the sample profile rankings are co-planar, there is no error; otherwise, the LSMR predicted profile rankings will have a residual error (Figure \ref{sec4fig}). Such a geometric interpretation is not possible for surveys involving more than 2 attributes, in which case standard LSMR residual analysis indicates the error in sample profile rankings using regression coefficients.
\begin{figure}[!htpb]
\centering
\includegraphics[width=2in,height=2in]{sec4fig.png}
\caption{{\small The LSRM predicted sample profile rankings ($\hat{Y}=3.75-1.25U-1.25V$ (square vertices) have residual errors as the actual sample profile rankings (dots) do not belong to the plane of regression. }}
\label{sec4fig}
\end{figure}
\newpage
LSMR will not in general predict exact PAL rankings, as the latter are computed directly from the sample profile rankings (Table \ref{LSMR2}).
\begin{table}[!htpb]
\centering
\scriptsize
\begin{tabular}{c|cccc|c|c|c}
&\multicolumn{4}{c|}{Respondent PAL Ranking}&&$c_0=3.75,c_1=-1.25,c_2=-1.25$&\\\hline
PAL & R 1& R 2& R 3& R 4 &Sample PAL Rank&Predicted Sample Rank&$|error|$ \\ \hline
$U=1$ &1.5&2&2.5&2&1.875&$c_0+c_1+\frac{c_2}{2}=1.875$&0\\
$U=0$&3.5&3&2.5&3&3&$c_0+\frac{c_2}{2}$=3.125&.125 \\
$V=1$&2&1.5&1.5&1.5&1.625 &$c_0+c_2+\frac{c_1}{2}=1.875$&.2 \\
$V=0$&3&3.5&3.5&3.375&2.875 &$c_0+\frac{c_1}{2}=3.125$&.25\\\hline
\end{tabular}
\caption{{\small Actual vrs. LSMR predicted PAL rankings for the sample outcomes in Table \ref{PAL1}.}}
\label{LSMR2}
\end{table}
\subsection{Maximum Likelihood Estimation of PAL-ranking Intervals}
We will proceed to develop PR-ACBC methodology without assuming any knowledge of partworth utilitities. Since sample PAL rankings are easily computed from sample profile rankings, we discuss how to obtain the population profile rankings most likely to have given the observed sample profile rankings.
\subsubsection{Known Population Size}
Let us assume our population has size $N=7$, and that our sample was a random selection of $n=4$ out of these 7. The number $N=7$ is for simplicity of illustrating the relevant cnmputations only, and could be any number $N>n=4$. We will now use a multivariate hypergeometric distribution to obtain the maximum likelihood estimate (MLE) population profile rankings, meaning the population which was must likely to have yielded the observed sample profile rankings.
Suppose in our sample with $n=4$, three different profile rankings $O_1,O_2,O_3$ are observed, with ranking $O_2$ ocurring twice in the sample. We create what we shall call a \emph{factor table}, whose $k$ largest factors $f_{ij}=1+(n_i/j)$ are used to determine the MLE population (see Table \ref{FT})
\begin{table}[!htpb]
\scriptsize
\centering
\begin{tabular}{r|ccc|c}\hline
\multicolumn{5}{c}{Choose the $k=3$ largest factors $f_{ij}$ for a population size $N=n+k=n+3$.}\\
+3& $f_{13}=4/3$ & $f_{23}=5/3$&$f_{33}=4/3$&\\
+2& $f_{12}=3/2$ & $f_{22}=2$ &$f_{32}=3/2 $&\\
+1& $f_{11}=2$ &$f_{21}=3$&$f_{31}=2 $ &\\\hline
Number observed in sample:&$n_1=1$& $n_2=2$ & $n_3=1$ & Sample size: $n=4$\\\hline
Ranking:&1=$O_1$&2=$O_2$&3=$O_3$
\end{tabular}
\caption{Probability factor table. The $k=3$ largest factors $f_{ij}=1+(n_i/j)$ are used to determine the MLE population with size $N=n+k=4+3=7$. }
\label{FT}
\end{table}
{\flushleft Let} $N_j$ denote the number of rankings $O_j$ in the population. The number of factors $a_j$ chosen from column $j$ in the factor table indicates that a population with $N_j= n_j+a_j$ is a MLE. In our case, the latter is not unique. One choice of 3 largest factors is $f_{11}=2, f_{21}=3,f_{22}=2$ so that $a_1=1$, $a_2=2$ and $a_3=0$. A population $Y$ in which $N_1=n_1+a_1=2$ , $N_2=n_2+a_2=4$, and $N_3=n_3+a_3=1$ is a MLE. Another choice of 3 largest factors is $f_{11}=2, f_{21}=3,f_{31}=1$ so that $a_1=1$, $a_2=1$ and $a_3=0$. Population $Z$ in which $N_1=n_1+a_1=2$ , $N_2=n_2+a_2=3$, and $N_3=n_3+a_3=2$ is also a MLE. We can verify this is so by computing the respective probabilities $p_Y$ and $p_Z$ that our observed sample arises from respective populations $Y$ and $Z$ :
\begin{equation}
p_Y=\frac{C(2,1)C(4,2)C(1,1)}{C(7,4)}=\frac{f_{11} \cdot f_{21}f_{22}}{C(7,4)}
\end{equation}
{\flushleft and }
\begin{equation}
p_Z=\frac{C(2,1)C(3,2)C(2,1)}{C(7,4)}=\frac{f_{11}\cdot f_{21}\cdot f_{31}}{C(7,4).}
\end{equation}
{\flushleft This} procedure generalizes to any number of rankings $m$ which appear in a sample of size $n$ ( Oberhofer and Kaufman (1987)). Let $n=\sum_{j=1}^m n_j$ where $n_j$ is the number of ranking $j$ appearing in the sample. Form the probability factor table with $f_{ij}=1+\frac{n_i}{j}$ ($i=1,...,r$, $j=1,...,m$ and $N=n+r$.) Choose the $r$ largest factors in the latter table and let $a_j$ be the number of factors chosen in column $j$. Then a population with $N_j=n_j+a_j$ ($j=1,...,m$) is a MLE.
\subsubsection{Unknown Population Size}
Let us assume now that rather than having a specified size, the population size is an unknown value $N$. In this case, we wish to determine the probability $p_i$ that a member of the population has ranking $O_i$. As before, we assume that our sample is drawn randomly from the population. The probability $p$ that the observed sample consisting of one $O_1$, two $O_3$'s and one $O_9$ is
\begin{equation}
p=f(p_1,p_3,p_9)=\frac{4!}{1!2!1!}[p_1p_3^2p_9],
\end{equation}
\label{eq:1}
{\flushleft where} $g(p_1,p_3,p_9)=p_1+p_3+p_9=1$.
The values of $p_1^*,p_3^*,$ and $p_9^*$ which maximize $H(p_1,p_3,p_9)=\ln (f(p_1,p_3,p_9))$ (and hence also maximizes $p=f(p_1,p_3,p_9)$) are obtained using Lagrange multipliers:
\begin{eqnarray*}
\nabla H(p_1^*,p_3^*,p_9^*) & = & \lambda \nabla g
(p_1^*,p_3^*,p_9^*),
\end{eqnarray*}
{\flushleft and therefore}
\begin{eqnarray*}
\frac{1}{p_1^*} & = & \lambda\\
\frac{2}{p_23^*} & = & \lambda\\
\frac{1}{p_9^*} & = & \lambda.
\end{eqnarray*}
{\flushleft (The scalar quantity $\lambda$ is called a Lagrange multiplier.) Using} $p_1^*+p_3^*+p_9^*=1$ gives $\frac{1}{\lambda} + \frac{2}{\lambda}+\frac{1}{\lambda}=1$ and so $ \lambda = 4$. Hence, the values $p_1^*=\frac{1}{4}, p32^*=\frac{1}{2}$, $p_9^*=\frac{1}{4}$ maximize the probability of the observed sample outcomes.
In general, let $n_k$ be the number of sample outcomes $O_k$ ($k=1,2,...,K$) and let $p_k$ be the probability that a respondent in the population has outcome $O_k$ $(k= 1, 2, ..., K)$. The likelihood function $f(p_1, p_2, ..., p_K)$ giving the probability of observing the sample values $n_1, ..., n_{K}$ is given by
\begin{equation}
f(p_1, ...., p_K)= \frac{n!}{n_1!n_2!\cdot\cdot\cdot n_K!} \prod_{k=1}^K p_k^{n_k},
\end{equation}
\label{eq:4}
{\flushleft with} $\sum_{k=1}^{K}n_k=n$ and $\sum_{k=1}^{K}p_k=1$.
We seek to find the values $p_1^*, ..., p_{K}^*$ which maximize the likelihood function $f$, or equivalently, the log-likelihood function
\begin{equation}
H(p_1, ..., p_K)=\ln f = \ln(n!) - \sum_{k=1}^{K} n_k! +\sum_{k=1}^{K} n_k\ln(p_k),
\end{equation}
\label{eq:5}
{\flushleft subject} to the constraint $g(p_1, ..., p_{K})=p_1+p_2+...+p_K=1$. Properties of gradients imply that the optimal values $p_i^*$ must satisfy
\begin{equation}
\nabla H(p_1^*, ..., p_K^*) = \lambda \nabla g(p_1^*, ..., p_{K}^*).
\end{equation}
\label{eq:6}
{\flushleft It} follows that for $k=1, ..., K$,
\begin{equation}
\frac{n_k}{p_k^*}=\lambda.
\end{equation}
\label{eq:7}
{\flushleft Hence,} $n=\sum_{k=1}^{K} n_k = \lambda \sum_{k=1}^{K} p_k^* = \lambda$, and so the probabilities $p_k^* = \frac{n_k}{n}$ give the maximum likelihood of the observed sample outcomes $n_k$ ($k=1, 2, ..., K$). For any sample of size $n$ and number $n_k$ of observed outcomes $O_k$ ($k=1, 2, ...K$), the maximum likelihood probabilities $p_k^*=\frac{n_k}{n}$
indicate that for a population of size $N$, the expected number $N_k$ of outcomes $O_k$ is given by $E(N_k)=p_k N.$ A maximum-likelihood population could be simulated by augmenting the observed $n$ sample outcomes, where the probability of outcome $O_k$ at each draw is given by $p_k$. For a large number of such randomly constructed populations of size $N$, for each $k$ the average number of population outcomes $O_k$ is approximately $p_k N$.
\subsection{Profile Ranking Intervals}
Maximum likelihood provides a point estimates into the population profile rankings and their corresponding PAL rankings. It is easy to construct intervals guaranteed to include the actual population profile and attribute level rankings.
First, suppose a profile X in a sample of size $n$ has mean profile ranking $\rho_n(X)$. It is easy to construct an interval which contains the mean ranking $\rho_N(X)$ for any population size $N>n$:
\begin{equation}
1+\frac{n}{N}(\rho_n(X)-1)\le \rho_N(X) \le 4-\frac{n}{N}(4-\rho_n(X))
\label{eq6}
\end{equation}
{\flushleft This} interval containing $\rho_N(X) $ is obtained by either (i) assigning the rank 1 to $X$ for all $N-n$ members of the population not in the sample (lower bound for $\rho_N(X)$); or (ii) assigning the rank 4 to $X$ for all $N-n$ non-sample population members (upper bound for $\rho_k(X)$). Let $\lambda=\frac{N-n}{N}$ be the fraction of the population not included in the sample. It is easy to show that
\begin{equation}
\rho_n(X)-\lambda(\rho_n(X)-1)\le \rho_N(X) \le \rho_n(X) + \lambda(4-\rho_n(X)).
\label{PRI}
\end{equation}
{\flushleft We} call (\ref{PRI}) a \emph{profile ranking interval} for profile $X$. The length of this interval is
$3\lambda$, where the 3 arises algebraically as the difference between the extreme rankings 1 and 4.
\begin{figure}[!htpb]
\centering
\includegraphics[width=6.5in, height=1.75in]{Confidence_Interval.png}
\caption{Given a profile $X$ and its sample ranking $\rho_n(X)$, the length of the population profile ranking interval is determined by $\lambda=\frac{k-n}{k}$, the proportion of the population who have not taken the survey.}
\label{AL}
\end{figure}
The profile ranking interval can be used to quantify sample bias. Assume that, out of a total population $N=8$, two respondents have refused to take the survey, four have completed it, and the other two have not yet replied. In this case, the length of the PRI is $\frac{3(8-4)}{8}=3/2$. If one of the non-respondents is convinced to participate, the interval length is reduced to $\frac{3(8-5)}{8}=\frac{9}{8}$, and if both non-respondents participate, then the interval is further reduced to $\frac{3(8-6)}{8}=\frac{3}{4}$. In other words, the two non-respondents cause the length of the PRI interval to be twice as large, an important consideration in seeking to elicit survey response.
In a similar way to constructing profile ranking intervals, if PAL $\chi$ has a sample ranking mean $\rho_n(\chi)$, it is easy to form a \emph{PAL ranking interval} for any population $N>n$. For our toy survey,
\begin{equation}
r_n(\chi)+2(N-n)r_n(\chi)\le r_N(\chi) \le r_n(\chi)+8(N-n)r_n(\chi),
\end{equation}
{\flushleft which} is equivalent to
\begin{equation}
r_n(X)-\lambda(r_n(X)-1)\le r_N(X) \le r_n(X) + \lambda(4-r_n(X)).
\label{ALRI}
\end{equation}
{\flushleft with} $\lambda=\frac{N-n}{N}$. Note that (\ref{PRI}) and (\ref{ALRI}) have the same form, so the length of both intervals is $3\lambda$.
\subsection{Multimensional Scaling}
One further type of analysis of PAL ranking data is a 2-dimensional geometric representation known as multidimensional scaling (MDS) (Alvo and Yu 2014).
Fundamental to MDS is use of a distance measure $d(\mu,\nu)$ in which the more similar (resp. dissimilar) are a pair of rankings $\mu$ and $\nu$, the smaller (resp. larger) is their distance. A variety of distance measures have been used for MDS, where it is conventional to number the objects being ranked $1,2,...,t$ and represent a ranking as a permutation, $\mu:S\rightarrow S$ where $S=\{1,2,...,t\}$ and $\mu(i)$ is the rank of object $i$. For example, Hamming distance (from coding theory) is defined as
\begin{equation}
d_H{(\mu,\nu)}=t-\sum_{i=1}^t I(\mu(i)=\nu(i)).
\end{equation}
{\flushleft The} indicator function $I(\cdot)$ equals 1 if the statement inside parenthesis is true and 0 otherwise. Hamming distance counts the number of positions where the permutations are different.
Another example is Spearman distance, which is akin to usual Euclidean distance
\begin{equation}
d_S{(\mu,\nu)}=\frac{1}{2}\sum_{i=1}^t(\mu(i)-\nu(i))^2.
\end{equation}
{\flushleft Note} that Hamming distance formula satisfies the three required metric properties:
\begin{itemize}
\item NON-NEGATIVITY $d_S(\mu,\nu)\ge 0$ for all $\mu$, $\nu$, with equality holding if and only if $\mu=\nu$;
\item SYMMETRY $d_S(\mu,\nu)=d_S(\nu,\mu)$
\item TRIANGLE INEQUALITY $d(\mu,\nu)+d(\nu,\sigma)\ge d(\mu,\sigma)$.
\end{itemize}
{\flushleft Spearman distance}, however, only satisfies the first two metric properties (Alvo and Yu ).
In applying MDS to PR-ACBC, we let $(i,j)$ denote level $j$ of attribute $i$, and $f(i,j)$ the average rank of level $(i,j)$ in an individual respondent's profile ranking. For example, for the profile ranking BACD, $f(1,1)=(1+2)/2=1.5$ since B=10 is ranked first and A=11 is ranked second; $f(20)=(2+4)/2=3$ since B=10 is ranked second and $D=00$ is ranked 4th.
For our toy survey, each respondent $X$ will have four average profile level rankings $f_X(1,1), f_X(1,0), f_X(2,1), f_X(2,0)$. Given 2 respondents $X$ and $Y$, we consider the squared Euclidean distance between their PAL rankings defined as
\begin{equation}
d_S(X,Y)=\sum_{i,j} [f_X(i,j)-f_Y(i,j)]^2.
\label{Spea}
\end{equation}
{\flushleft This} distance measure can be used for an ACBC survey with any number of attributes and levels.
Once a distance measure is defined, a 2-dimensional MDS is such that rankings are represented by points in an xy Cartesian coordinate system, and the Euclidean distance between these points reflects the relative distances between rankings.
Note that in this MDS, respondents R2 and R4 coincide since they have the same ranking (ACBD).
\begin{figure}[!htpb]
\centering
\includegraphics[width=6.5in, height=1.75in]{MDS1.png}
\caption{Example of an MDS using the distance function defined in (\ref{Spea}) . In this case the sample profile rankings are ABCD, ACBD (twice), and BCAD}
\label{AL}
\end{figure}
\section{ACBC Survey of Humanitarian Disaster Relief Organizations}
PR-based ACBC methodology is designed to analyze small populations and has many possible applications. One application involves a study of a small population ($N=61$) of international disaster relief organizations headquartered in the U.S. or Canada as shown in Table \ref{ORGS}.
\begin{table}[!htpb]
\scriptsize
\centering
\begin{tabular}{c|c}\hline
\multicolumn{2}{c}{Survey Population: International Disaster Relief Organizations Headuarted in the US/Canada}\\\hline
Faith-based (Christian)($N_1=49$) & \multicolumn{1}{m{2.75in}}{Adventist Community Services, Adventist Development and Relief Agency, AMG international, Baptist World Aid, Billy Graham Evangelistic Association (rapid response team), Blessings international, Brethren Disaster Ministries, Catholic Charities USA, Catholic Medical Mission Board, Church World Service, Christian Disaster Response,
Christian Aid Ministries, Convoy of Hope, Cooperative Baptist Fellowship, Covenant World Relief,
Episcopal relief and Development, Food for the Hungry, Food for the Poor, Habitat for Humanity, Hopeforce International,
International Aid, Lutheran Disaster Response, Lutheran World Relief, MAP international, MedAir,
Medical Teams International, Mennonite Central Committee, Mission Aviation Fellowship, Missionary expeditors,
The National Baptist Convention - Office of Disaster Management, Nazarene Compassionate Ministries, Operation Blessing International,
Operation Safe, Presbytarian Church in America- Mission to North America, Presbytarian Disaster Assistance,
Reach Global, S. Baptist N. Amer. Mission Board, Salvation Army World Service Organization, Samaritan's Purse,
Service International, SOS Children --not disaster relief, Tearfund USA, United Church of Christ Disaster Ministries,
United Methodist Committee on Relief (UMCOR), Water Mission, World Concern, World Relief, World Renew, World Vision }\\\hline
Non faith-based ($N_2=12$)& \multicolumn{1}{m{2.75in}}{ All Hands and Hearts,Americares, Direct Relief, Headwaters Relief Organization, Heart to Heart, Mercy Corps, Partnership with Native Americans, Save the Children, ShelterBox USA, SBP USA, Team Rubicon.}\\\hline
\end{tabular}
\caption{Small populations comprising an ACBC disaster relief organization (DRO) survey.}
\label{ORGS}
\end{table}
In disaster relief, effectiveness of a response may depend on the quality of collaboration between organizations with a broad diversity of religious and ideological perspectives. For effective coordination of relief, it is important that humanitarian organizations understand the unique traits and characteristics that shape their disaster response decisions. Through comparison of these factors, it is possible to design optimal partnerships and joint endeavors between organizations that may fulfill distinct, yet complimentary, humanitarian roles. Our research focuses on a few key attributes affecting our population group's decision whether or not to respond to an international humanitarian disaster.
To this end, we designed an ACBC survey that creates disaster profiles with attributes and levels for this survey are displayed in Figure \ref{AL}. Different disaster scenarios are paired off in the choice task (single elimination tournament) stage, beginning with 16 profiles close to the Build-Your-Own (BYO) or ideal scenario. The tournament data is the basis for profile ranking. The survey was deployed and tournament data collected using Sawtooth's Lighthouse platform.
\begin{figure}[!htpb]
\centering
\includegraphics[width=5.75in, height=7in]{AttributeLevels.png}
\caption{{\small An ACBC survey with 4 attributes consisting of 3 levels each. }}
\label{AL}
\end{figure}
\subsection{Survey Data}
As shown in Figure 4, our humanitarian survey consists of four attributes, each with three levels. Thus, the number of possible profiles is $3^4=81$. These are identified by four digit numbers $X=x_1x_2x_3x_4$ where profile $X$ has level $x_1$ for the first attribute, level $x_2$ for the second attribute, level $x_3$ for the third attribute, and level $x_4$ for the fourth attribute. In the tournament stage of the competition, there are four rounds, in which sixteen profiles face off against each other in head to head match-ups, much like the FIFA World Cup Round of 16. The competing profiles are selected from the 81 possible profiles based on the respondent's BYO preferences. We assign a ranking of \# 1 to the tournament winner, \# 2 to the runner up, \# 3 to the semifinalist who lost to \#1, \# 4 to the quarterfinalist who lost to \#1, \# 5 to the quarter-finalist who lost to \#2 and so on. Profiles that do not appear in the tournament are ranked \# 24.5 (the average of \# 17 through \#32).
An Excel file keeps track of each responding organization's tournament and the PAL ranking of each survey.
\begin{figure}[!htpb]
\centering
\includegraphics[width=2.75in, height=2in]{PAL2.png}
\includegraphics[width=2.75in, height=2in]{PAL1.png}
\caption{{\small Respondent 1's tournament (left) and profile rankings for organizations 1 and 2 (right). The latter is processed by the MATLAB script in Table \ref{MAT1} to obtain the PAL rankings. }}
\label{AL}
\end{figure}
\begin{table}[!htbp]
\flushleft
\begin{tabular}{|l|}\toprule
\parbox[b]{6.25in}{\lstinputlisting[style=Matlab-editor]{PALRanking.m}}\\\bottomrule
\end{tabular}
\caption{Main MATLAB script to compute PAL rankings.}
\label{MAT1}
\end{table}
\begin{table}[!htpb]
\flushleft
\begin{tabular}{|l|}\toprule
\parbox[b]{6.25in}{\lstinputlisting[style=Matlab-editor]{respondent.m}}\\\bottomrule
\end{tabular}
\caption{Respondent Class used by the main script in Table \ref{MAT1}}
\label{MAT12}
\end{table}
\newpage
\subsection{Survey Analysis}
\begin{table}[!htpb]
\centering
\scriptsize
\begin{tabular}{c|ccc|ccc|ccc|ccc}
\multicolumn{1}{c}{} &\multicolumn{3}{c}{Funding}&\multicolumn{3}{c}{Scale}&\multicolumn{3}{c}{Need}&\multicolumn{3}{c}{Community Access Partner}\\
\multicolumn{1}{c}{} &$\ge 75\%$&$\sim 50\%$&$<25 \%$& ISAE3&ISAE2& ISAE1/&clear&optional&unknown&none&local&outside\\
\multicolumn{1}{c}{} & & & & & & undeclared& & & & & & \\
ORG & L11&L12&L13&L21&L22&L23&L31&L32&L33&L41&L42&L43\\\hline
FBO1 &20.2&{\bf 18.6}*&22.7&20.8&{\bf 18.2}*&22.5& 21.2&{\bf 17.9}*&22.4&21.5&{\bf 18.4}*&21.6\\
FBO2 &21.2&21.5&{\bf 18.8}*&21.3&21.4&{\bf 18.8}*& {\bf 17.8}*&21.5&22.2&22.1&{\bf 17.1}*&22.3\\
FBO3 &21.1&{\bf 18.1}*&22.5&20.3&{\bf 18.5}*&22.8& 20.8&{\bf 18.7}*&22.1&21.8&21.9&{\bf 17.9}*\\
FBO4 &19.8&
{\bf 17.2}*&19.6&19.8&{\bf 17.6}*&19.3&19.4&{\bf 17.3}*&
20.0&20.8&{\bf 15.0}*&20.8
\\
FBO5 &19.0&19.2&{\bf 16.8}*&18.9&{\bf 16.8}*&19.6&
{\bf 16.5}*&19.6&19.2&19.1&{\bf 17.1}*&19.0\\
FBO6 &18.0& 17.9&{\bf 15.8}*&17.6&{\bf 15.9}*&
18.2&{\bf 15.3}*&18.2&18.2&18.8&{\bf 14}*&18.\\
FBO7 &19.3&{\bf 17.4}*&20.0&19.5&{\bf 17.4}*&
19.7&18.9&{\bf 17.8}*&19.9&20.8&{\bf 15.4}*&20.4\\
FBO8 &19.2&{\bf 17.3}*&20.1&19.6&{\bf 17.4}*&
19.6&{\bf 16.6}*&19.8&20.3&20.2&{\bf 15.8}*&20.6\\
FBO9 & 20.9&21.0&{\bf 19.6}*&21.3&21.6&
{\bf 18.7}*&20.6&{\bf 18.8}*&22.2&22.8&{\bf 16.3}*&22.4\\
FBO10&20.6&{\bf 18.4}*&22.4&20.9&{\bf 18.8}*&
21.9&{\bf 18}*&21.4&22.1&22.6&{\bf 16.8}*&22.1\\
FBO11&19.9&19.7&{\bf 17}*&19.0&{\bf 17.5}*&20.1&
18.9&{\bf 17.2}*&20.6&20.5&{\bf 15.4}*&20.8\\
FBO12&18.8&19.2&{\bf 16.2}*&18.7&{\bf 16.4}*&
19.1&17.9&{\bf 16.7}*&19.5&19.4&{\bf 15.4}*&19.4\\
FBO13&20.9&21.8&{\bf 18.8}*&21.7&{\bf 18.1}*&21.6&
{\bf 18}*&21.9&21.6&22.4&{\bf 17.3}*&21.8\\\hline
mean&19.93&{\bf 19.04}&19.26&19.96&{\bf 18.12}&20.15&{\bf 18.45}&18.98&20.79&21.00&{\bf 16.62}&20.6\\\hline
partworth&{\bf 14.94}&4.08&-19.03&{\bf 17.84}&16.93&-34.77&{\bf 53.87}&-1.24&-52.62&-66.2&{\bf 97.84}&-31.64\\\hline\hline
NGO1 &20.8&21.5&{\bf 19.2}*&21.7& {\bf 18.7}*&21.0& {\bf 17.3}*&21.6&22.6&22.4&{\bf 18.2}*&21.0\\
NGO2 &20.3&22 &{\bf 20.0}*&{\bf 18.4}*& 21.1&22.8& {\bf 18.8}*&20.9&22.6&21.3&{\bf 19.4}*&21.7\\
NGO3 &{\bf 17.54}&20.80&21.07&{\bf 18.91}*& 19.56&20.94& {\bf 16.67}&21.70&21.04&19.70*&{\bf 18.54}&21.17\\
NGO4 &{\bf 19.6}&20.3*&21.4&{\bf 18.0}*& 21.3&22.2& 21.2&{\bf 19.2}*&21.1&22.1&{\bf 17.9}*&21.6\\
NGO5&16.5&{\bf 15.1}*&17.8&16.9&{\bf 15.3}*&
17.3&{\bf 14.2}*&17.5&17.8&17.3&{\bf 16.7}*&15.4\\
NGO6 &20.6&21.5&{\bf 19.4}*&21.2&21.4&{\bf 18.8}*& {\bf 16.6}*&22.3&22.7&{\bf 18.4}*&21.6&21.4\\\hline
mean&{\bf 19.27}&20.129&19.82&{\bf 19.2}&19.56&20.52&{\bf 17.45}&20.54&21.3&20.22&{\bf 18.72}&20.36\\\hline
partworth& {\bf 31.8}&-4.77&-27.03&{\bf 29.26}&.48&-29.74&{\bf 86.13}&-1.41&-84.72&-50.36&{\bf 44.72}&5.63\\\hline
\end{tabular}
\caption{FBO/NGO Individual PAL rankings ($n=13$) and sample means compared with average zero-centered part-worth utilities. Top-ranked level in {\bf bold} (* denotes BYO level)}
\label{PAL}
\end{table}
\subsubsection{PAL ranking intervals}
\subsubsection{MDS}
\begin{figure}[!htpb]
\centering
\includegraphics[width=5.5in, height=3.75in]{RScript.pdf}
\caption{R-script used for the MDS shown in Figure \ref{MDSout1}}
\label{AL}
\end{figure}
\begin{figure}[!htpb]
\centering
\includegraphics[width=6.75in, height=5in]{Combined.pdf}
\caption{Partworth utilities and profile level rankings }
\label{Combined}
\end{figure}
\begin{figure}[!htpb]
\centering
\includegraphics[width=4.5in, height=3.75in]{FNMDS.png}
\caption{Multidimensional scaling showing similarity of sample respondents and attribute levels.}
\label{MDSout1}
\end{figure}
\newpage
\subsection{Validation of Part-worth Utilities}
\subsubsection{Attribute Importances}
Shows inadequacy of partworth utilities for determing attribute importance.
\subsubsection{Choice task prediction accuracy}
\section{Further Work}
Still in its infancy, PR-ACBC methodology has several major areas open to further research including analysis of different ranking systems for various types of choice tournaments, determination of the population nd sample sizes for which PR-ACBC is most suitable, and application of PR-ACBC to other small population studies.
\section{Conclusion}
To complement well-established partworth-utility-based conjoint analysis methodology of survey data where the target populations are typically large, we have introduced a simple, intuitive approach to a small population's profile and level rankings based on choice-task sample data. A toy survey is used to introduce the basic concepts, and an actual survey administered to a small number of disaster relief organizations used as an example of PR-ACBC methodology.
\subsection*{Project Team}
Ming-Hsuan Chuang,
Daniel Daum, Michaela Flitsch, Erica Gralla, Jarrod Goenzel, Timotius Kartawijaya, Zoe Kallus, Courtney Linscott, Sara Magnuson, Mark Nussbaum, Zach Oslund, Matthew Rueger, Nick Varberg, Mike Veatch, Joyce Yan
\section*{References}
\begin{list}{}{\itemindent=-2em}
\small
\item Alvo, M., and Yu, P.L.H. 2014. \emph{Statistical Methods for Ranking Data}. Springer.
\item Gralla, E., Goentzel, J., and Fine G. 2014. Assessing trade-offs among multiple objectives for humanitarian aid delivery using expert preferences.
\emph{Production and Operations Management}, Springer-Verlag Berlin 23(6), 978-989.
\item Oberhofer, W. and Kaufman, H.1987. Maximum Likelihood Estimation of a Multivariate Hypergeometric Distribution. \emph{Sankhya: The Indian Journal of Statistics, Series B (1960-2002)}, Indian Statistical Institute, 49(2), 188-191.
\item Orme, B.K. 2014. \emph{Getting Started with Conjoint Analysis.} Sawtooth Software.
\item Orme, B.K., and Chrzan, K. 2017. \emph{Becoming an Expert in Conjoint Analysis: Choice Modeling for Pros.} Sawtooth Software.
\item Rao, V. R. 2014. \emph{Applied Conjoint Analysis}. Springer.
\item Stewart, J. 2016. \emph{Calculus, Early Transcendentals (8E)}. Cengage Learning.
\item Rossi, P., Allenby, G. and McCulloch R. 2005. \emph{Baysian Statistics and Marketing.} John Wiley \& Sons, Ltd.
\item Sawtooth Software, 2013. The MaxDiff System Technical Paper. https://www.sawtoothsoftware.com/download/techpap/maxdifftech.pdf Accessed 11/27/2018.
\item Thurstone, L. L. 1927. A Law of Comparative Judgment, \emph{Psychological Review}, 4, 273-286.
\end{list}
\end{document}
|
{"hexsha": "d4c77275cec8db8d1552cff51b86dc53d33719c9", "size": 45790, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "mainNov27.tex", "max_stars_repo_name": "pisihara/ProfileRanking", "max_stars_repo_head_hexsha": "b8cda3d21bca0417d0786802e85f02a1faccc344", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "mainNov27.tex", "max_issues_repo_name": "pisihara/ProfileRanking", "max_issues_repo_head_hexsha": "b8cda3d21bca0417d0786802e85f02a1faccc344", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mainNov27.tex", "max_forks_repo_name": "pisihara/ProfileRanking", "max_forks_repo_head_hexsha": "b8cda3d21bca0417d0786802e85f02a1faccc344", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 56.3916256158, "max_line_length": 1927, "alphanum_fraction": 0.716641188, "num_tokens": 15436}
|
// Copyright Tom Westerhout 2017.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include "testing.hpp"
#include <boost/static_views/raw_view.hpp>
#include <boost/static_views/view_concept.hpp>
auto test_construction()
{
int as[] = {1, 2, 3, 4, 5};
int const volatile bs[] = {1, 2, 3};
static constexpr int cs[] = {1, 2, 3, 4};
constexpr int ds[] = {1, 2, 3, 4};
auto as_view = boost::static_views::raw_view(as);
using as_view_t = decltype(as_view);
BOOST_TEST_EQ(boost::static_views::View<as_view_t>, true);
BOOST_TEST_TRAIT_TRUE((std::is_nothrow_copy_constructible<as_view_t>));
BOOST_TEST_TRAIT_TRUE((std::is_nothrow_move_constructible<as_view_t>));
BOOST_TEST_TRAIT_TRUE((std::is_nothrow_copy_assignable<as_view_t>));
BOOST_TEST_TRAIT_TRUE((std::is_nothrow_move_assignable<as_view_t>));
BOOST_TEST_TRAIT_TRUE((std::is_nothrow_destructible<as_view_t>));
BOOST_TEST_TRAIT_TRUE((std::is_trivially_copyable<as_view_t>));
// BOOST_TEST_TRAIT_TRUE((std::is_trivial<as_view_t>));
BOOST_TEST_EQ(as_view_t::extent(), 5);
BOOST_TEST_EQ(as_view.size(), 5);
BOOST_TEST_EQ(as_view[0], 1);
BOOST_TEST_EQ(as_view[4], 5);
BOOST_TEST_THROWS(as_view[5], boost::static_views::out_of_bound);
BOOST_TEST_THROWS(as_view[-1], boost::static_views::out_of_bound);
BOOST_TEST_EQ(as_view.unsafe_at(2), 3);
as_view[3] = -8;
BOOST_TEST_EQ(as_view.unsafe_at(3), -8);
auto bs_view = boost::static_views::raw_view(bs);
using bs_view_t = decltype(bs_view);
BOOST_TEST_EQ(boost::static_views::View<bs_view_t>, true);
BOOST_TEST_TRAIT_TRUE((std::is_nothrow_copy_constructible<bs_view_t>));
BOOST_TEST_TRAIT_TRUE((std::is_nothrow_move_constructible<bs_view_t>));
BOOST_TEST_TRAIT_TRUE((std::is_nothrow_copy_assignable<bs_view_t>));
BOOST_TEST_TRAIT_TRUE((std::is_nothrow_move_assignable<bs_view_t>));
BOOST_TEST_TRAIT_TRUE((std::is_nothrow_destructible<bs_view_t>));
BOOST_TEST_TRAIT_TRUE((std::is_trivially_copyable<bs_view_t>));
// BOOST_TEST_TRAIT_TRUE((std::is_trivial<bs_view_t>));
BOOST_TEST_EQ(bs_view_t::extent(), 3);
BOOST_TEST_EQ(bs_view.size(), 3);
BOOST_TEST_EQ(bs_view[0], 1);
BOOST_TEST_EQ(bs_view[2], 3);
BOOST_TEST_THROWS(BOOST_STATIC_VIEWS_UNUSED auto b1 = bs_view[3],
boost::static_views::out_of_bound);
BOOST_TEST_THROWS(BOOST_STATIC_VIEWS_UNUSED auto b2 = bs_view[-1],
boost::static_views::out_of_bound);
BOOST_TEST_EQ(bs_view.unsafe_at(1), 2);
BOOST_TEST_TRAIT_TRUE(
(std::is_same<bs_view_t::reference, int const volatile&>));
constexpr auto cs_view = boost::static_views::raw_view(cs);
using cs_view_t = std::remove_const_t<decltype(cs_view)>;
BOOST_TEST_EQ(boost::static_views::View<cs_view_t>, true);
BOOST_TEST_TRAIT_TRUE((std::is_nothrow_copy_constructible<cs_view_t>));
BOOST_TEST_TRAIT_TRUE((std::is_nothrow_move_constructible<cs_view_t>));
BOOST_TEST_TRAIT_TRUE((std::is_nothrow_copy_assignable<cs_view_t>));
BOOST_TEST_TRAIT_TRUE((std::is_nothrow_move_assignable<cs_view_t>));
BOOST_TEST_TRAIT_TRUE((std::is_nothrow_destructible<cs_view_t>));
BOOST_TEST_TRAIT_TRUE((std::is_trivially_copyable<cs_view_t>));
// BOOST_TEST_TRAIT_TRUE((std::is_trivial<cs_view_t>));
BOOST_TEST_EQ(cs_view_t::extent(), 4);
STATIC_ASSERT(cs_view.size() == 4, "size() is broken.");
BOOST_TEST_EQ(cs_view[0], 1);
STATIC_ASSERT(cs_view[2] == 3, "operator[] is broken.");
STATIC_ASSERT(cs_view.unsafe_at(1) == 2, "unsafe_at() is broken.");
BOOST_TEST_TRAIT_TRUE((std::is_same<cs_view_t::reference, int const&>));
// No constexpr here, because ds has no "address" during
// compilation.
BOOST_STATIC_VIEWS_UNUSED auto const ds_view =
boost::static_views::raw_view(ds);
}
int main(void)
{
test_construction();
return boost::report_errors();
}
|
{"hexsha": "8befe984fb0f4058920b5aaebee478eb2381c22b", "size": 4069, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "test/correctness/raw_view_pass.cpp", "max_stars_repo_name": "BoostGSoC17/static-map", "max_stars_repo_head_hexsha": "32537a69dbf693697577816ee06450fc4ec2a6fb", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 10.0, "max_stars_repo_stars_event_min_datetime": "2017-11-03T17:59:37.000Z", "max_stars_repo_stars_event_max_datetime": "2019-11-20T04:29:29.000Z", "max_issues_repo_path": "test/correctness/raw_view_pass.cpp", "max_issues_repo_name": "BoostGSoC17/static-map", "max_issues_repo_head_hexsha": "32537a69dbf693697577816ee06450fc4ec2a6fb", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 4.0, "max_issues_repo_issues_event_min_datetime": "2018-07-06T21:32:21.000Z", "max_issues_repo_issues_event_max_datetime": "2018-08-18T14:13:47.000Z", "max_forks_repo_path": "test/correctness/raw_view_pass.cpp", "max_forks_repo_name": "BoostGSoC17/static-map", "max_forks_repo_head_hexsha": "32537a69dbf693697577816ee06450fc4ec2a6fb", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 7.0, "max_forks_repo_forks_event_min_datetime": "2017-07-20T21:56:40.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-14T19:11:41.000Z", "avg_line_length": 46.7701149425, "max_line_length": 76, "alphanum_fraction": 0.7193413615, "num_tokens": 1169}
|
from gym import spaces
import numpy as np
import random
from itertools import groupby
from itertools import product
class TicTacToe():
def __init__(self):
"""initialise the board"""
# initialise state as an array
self.state = [np.nan for _ in range(9)] # initialises the board position, can initialise to an array or matrix
# all possible numbers
self.all_possible_numbers = [i for i in range(1, len(self.state) + 1)] # , can initialise to an array or matrix
self.winning_checks = [(0,1,2),(3,4,5),(6,7,8), ## all rows
(0,3,6),(1,4,7),(2,5,8), ## all columns
(0,4,8),(2,4,6)] ## two diagonals
self.reset()
def is_winning(self, curr_state):
"""Takes state as an input and returns whether any row, column or diagonal has winning sum
Example: Input state- [1, 2, 3, 4, nan, nan, nan, nan, nan]
Output = False"""
## for each possible check , we will see if the sum is 15
for (i,j,k) in self.winning_checks:
if (np.nan not in (i,j,k)) and (curr_state[i]+curr_state[j]+curr_state[k] == 15):
return True
return False
def is_terminal(self, curr_state):
# Terminal state could be winning state or when the board is filled up
if self.is_winning(curr_state) == True:
return True, 'Win'
elif len(self.allowed_positions(curr_state)) ==0:
return True, 'Tie'
else:
return False, 'Resume'
def allowed_positions(self, curr_state):
"""Takes state as an input and returns all indexes that are blank"""
return [i for i, val in enumerate(curr_state) if np.isnan(val)]
def allowed_values(self, curr_state):
"""Takes the current state as input and returns all possible (unused) values that can be placed on the board"""
used_values = [val for val in curr_state if not np.isnan(val)]
agent_values = [val for val in self.all_possible_numbers if val not in used_values and val % 2 !=0]
env_values = [val for val in self.all_possible_numbers if val not in used_values and val % 2 ==0]
return (agent_values, env_values)
def action_space(self, curr_state):
"""Takes the current state as input and returns all possible actions, i.e, all combinations of allowed positions and allowed values"""
agent_actions = list(product(self.allowed_positions(curr_state), self.allowed_values(curr_state)[0]))
env_actions = list(product(self.allowed_positions(curr_state), self.allowed_values(curr_state)[1]))
return (agent_actions, env_actions)
def state_transition(self, curr_state, curr_action):
"""Takes current state and action and returns the board position just after agent's move.
Example: Input state- [1, 2, 3, 4, nan, nan, nan, nan, nan], action- [7, 9] or [position, value]
Output = [1, 2, 3, 4, nan, nan, nan, 9, nan]
"""
out_state = curr_state.copy()
out_state[curr_action[0]] = curr_action[1] ##add the used number
return out_state
def step(self, curr_state, curr_action):
"""Takes current state and action and returns the next state, reward and whether the state is terminal. Hint: First, check the board position after
agent's move, whether the game is won/loss/tied. Then incorporate environment's move and again check the board status.
Example: Input state- [1, 2, 3, 4, nan, nan, nan, nan, nan], action- [7, 9] or [position, value]
Output = ([1, 2, 3, 4, nan, nan, nan, 9, nan], -1, False)"""
##get output state after agent's move
out_state = self.state_transition(curr_state,curr_action)
##check if it's a terminal
terminal_bool , terminal_value = self.is_terminal(out_state)
if terminal_bool:
if terminal_value == "Win": ##if win , return 10
return (out_state,10,terminal_bool)
elif terminal_value == "Tie": ##if tie, return 0
return (out_state,0,terminal_bool)
else:
reward = -1
##play the environment
agent_actions, env_actions = self.action_space(out_state)
env_random_action = random.choice(env_actions)
out_state = self.state_transition(out_state, env_random_action)
terminal_bool, terminal_value = self.is_terminal(out_state)
# check for env win
if (terminal_value == 'Win'):
reward = -10
elif (terminal_value == 'Tie'):
reward = 0
return out_state, reward, terminal_bool
def reset(self):
return self.state
|
{"hexsha": "e15af54ad79308ec4bc2b4d78ca449ae95decd00", "size": 4815, "ext": "py", "lang": "Python", "max_stars_repo_path": "TCGame_Env.py", "max_stars_repo_name": "sameerg07/TicTacToe-RL", "max_stars_repo_head_hexsha": "cfad2719c043e90a25e344866ab5c505ffb83ec6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "TCGame_Env.py", "max_issues_repo_name": "sameerg07/TicTacToe-RL", "max_issues_repo_head_hexsha": "cfad2719c043e90a25e344866ab5c505ffb83ec6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "TCGame_Env.py", "max_forks_repo_name": "sameerg07/TicTacToe-RL", "max_forks_repo_head_hexsha": "cfad2719c043e90a25e344866ab5c505ffb83ec6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.5086206897, "max_line_length": 155, "alphanum_fraction": 0.6184839045, "include": true, "reason": "import numpy", "num_tokens": 1158}
|
struct DeivVecTag end
# J(f(x))*v
function auto_jacvec!(du, f, x, v,
cache1 = ForwardDiff.Dual{DeivVecTag}.(x, v),
cache2 = ForwardDiff.Dual{DeivVecTag}.(x, v))
cache1 .= Dual{DeivVecTag}.(x, v)
f(cache2,cache1)
du .= partials.(cache2, 1)
end
function auto_jacvec(f, x, v)
partials.(f(Dual{DeivVecTag}.(x, v)), 1)
end
function num_jacvec!(du,f,x,v,cache1 = similar(v),
cache2 = similar(v);
compute_f0 = true)
compute_f0 && (f(cache1,x))
T = eltype(x)
# Should it be min? max? mean?
ϵ = sqrt(eps(real(T))) * max(one(real(T)), abs(norm(x)))
@. x += ϵ*v
f(cache2,x)
@. x -= ϵ*v
@. du = (cache2 - cache1)/ϵ
end
function num_jacvec(f,x,v,f0=nothing)
f0 === nothing ? _f0 = f(x) : _f0 = f0
T = eltype(x)
# Should it be min? max? mean?
ϵ = sqrt(eps(real(T))) * max(one(real(T)), abs(minimum(x)))
(f(x.+ϵ.*v) .- f(x))./ϵ
end
function num_hesvec!(du,f,x,v,
cache1 = similar(v),
cache2 = similar(v),
cache3 = similar(v))
cache = DiffEqDiffTools.GradientCache(v[1],cache1,Val{:central})
g = let f=f,cache=cache
(dx,x) -> DiffEqDiffTools.finite_difference_gradient!(dx,f,x,cache)
end
T = eltype(x)
# Should it be min? max? mean?
ϵ = sqrt(eps(real(T))) * max(one(real(T)), abs(norm(x)))
@. x += ϵ*v
g(cache2,x)
@. x -= 2ϵ*v
g(cache3,x)
@. du = (cache2 - cache3)/(2ϵ)
end
function num_hesvec(f,x,v)
g = (x) -> DiffEqDiffTools.finite_difference_gradient(f,x)
T = eltype(x)
# Should it be min? max? mean?
ϵ = sqrt(eps(real(T))) * max(one(real(T)), abs(norm(x)))
x += ϵ*v
gxp = g(x)
x -= 2ϵ*v
gxm = g(x)
(gxp - gxm)/(2ϵ)
end
function numauto_hesvec!(du,f,x,v,
cache = ForwardDiff.GradientConfig(f,v),
cache1 = similar(v),
cache2 = similar(v))
g = let f=f,x=x,cache=cache
g = (dx,x) -> ForwardDiff.gradient!(dx,f,x,cache)
end
T = eltype(x)
# Should it be min? max? mean?
ϵ = sqrt(eps(real(T))) * max(one(real(T)), abs(norm(x)))
@. x += ϵ*v
g(cache1,x)
@. x -= 2ϵ*v
g(cache2,x)
@. du = (cache1 - cache2)/(2ϵ)
end
function numauto_hesvec(f,x,v)
g = (x) -> ForwardDiff.gradient(f,x)
T = eltype(x)
# Should it be min? max? mean?
ϵ = sqrt(eps(real(T))) * max(one(real(T)), abs(norm(x)))
x += ϵ*v
gxp = g(x)
x -= 2ϵ*v
gxm = g(x)
(gxp - gxm)/(2ϵ)
end
function autonum_hesvec!(du,f,x,v,
cache1 = similar(v),
cache2 = ForwardDiff.Dual{DeivVecTag}.(x, v),
cache3 = ForwardDiff.Dual{DeivVecTag}.(x, v))
cache = DiffEqDiffTools.GradientCache(v[1],cache1,Val{:central})
g = (dx,x) -> DiffEqDiffTools.finite_difference_gradient!(dx,f,x,cache)
cache2 .= Dual{DeivVecTag}.(x, v)
g(cache3,cache2)
du .= partials.(cache3, 1)
end
function autonum_hesvec(f,x,v)
g = (x) -> DiffEqDiffTools.finite_difference_gradient(f,x)
partials.(g(Dual{DeivVecTag}.(x, v)), 1)
end
function num_hesvecgrad!(du,g,x,v,
cache2 = similar(v),
cache3 = similar(v))
T = eltype(x)
# Should it be min? max? mean?
ϵ = sqrt(eps(real(T))) * max(one(real(T)), abs(norm(x)))
@. x += ϵ*v
g(cache2,x)
@. x -= 2ϵ*v
g(cache3,x)
@. du = (cache2 - cache3)/(2ϵ)
end
function num_hesvecgrad(g,x,v)
T = eltype(x)
# Should it be min? max? mean?
ϵ = sqrt(eps(real(T))) * max(one(real(T)), abs(norm(x)))
x += ϵ*v
gxp = g(x)
x -= 2ϵ*v
gxm = g(x)
(gxp - gxm)/(2ϵ)
end
function auto_hesvecgrad!(du,g,x,v,
cache2 = ForwardDiff.Dual{DeivVecTag}.(x, v),
cache3 = ForwardDiff.Dual{DeivVecTag}.(x, v))
cache2 .= Dual{DeivVecTag}.(x, v)
g(cache3,cache2)
du .= partials.(cache3, 1)
end
function auto_hesvecgrad(g,x,v)
partials.(g(Dual{DeivVecTag}.(x, v)), 1)
end
### Operator Forms
struct JacVec{F,T1,T2,uType}
f::F
cache1::T1
cache2::T2
u::uType
autodiff::Bool
end
function JacVec(f,u::AbstractArray;autodiff=true)
if autodiff
cache1 = ForwardDiff.Dual{DeivVecTag}.(u, u)
cache2 = ForwardDiff.Dual{DeivVecTag}.(u, u)
else
cache1 = similar(u)
cache2 = similar(u)
end
JacVec(f,cache1,cache2,u,autodiff)
end
Base.size(L::JacVec) = (length(L.cache1),length(L.cache1))
Base.size(L::JacVec,i::Int) = length(L.cache1)
Base.:*(L::JacVec,x::AbstractVector) = L.autodiff ? auto_jacvec(_u->L.f(_u),L.u,x) : num_jacvec(_u->L.f(_u),L.u,x)
function LinearAlgebra.mul!(du::AbstractVector,L::JacVec,v::AbstractVector)
if L.autodiff
auto_jacvec!(du,(_du,_u)->L.f(_du,_u),L.u,v,L.cache1,L.cache2)
else
num_jacvec!(du,(_du,_u)->L.f(_du,_u),L.u,v,L.cache1,L.cache2)
end
end
struct HesVec{F,T1,T2,uType}
f::F
cache1::T1
cache2::T2
cache3::T2
u::uType
autodiff::Bool
end
function HesVec(f,u::AbstractArray;autodiff=true)
if autodiff
cache1 = ForwardDiff.GradientConfig(f,u)
cache2 = similar(u)
cache3 = similar(u)
else
cache1 = similar(u)
cache2 = similar(u)
cache3 = similar(u)
end
HesVec(f,cache1,cache2,cache3,u,autodiff)
end
Base.size(L::HesVec) = (length(L.cache2),length(L.cache2))
Base.size(L::HesVec,i::Int) = length(L.cache2)
Base.:*(L::HesVec,x::AbstractVector) = L.autodiff ? numauto_hesvec(L.f,L.u,x) : num_hesvec(L.f,L.u,x)
function LinearAlgebra.mul!(du::AbstractVector,L::HesVec,v::AbstractVector)
if L.autodiff
numauto_hesvec!(du,L.f,L.u,v,L.cache1,L.cache2,L.cache3)
else
num_hesvec!(du,L.f,L.u,v,L.cache1,L.cache2,L.cache3)
end
end
struct HesVecGrad{G,T1,T2,uType}
g::G
cache1::T1
cache2::T2
u::uType
autodiff::Bool
end
function HesVecGrad(g,u::AbstractArray;autodiff=false)
if autodiff
cache1 = ForwardDiff.Dual{DeivVecTag}.(u, u)
cache2 = ForwardDiff.Dual{DeivVecTag}.(u, u)
else
cache1 = similar(u)
cache2 = similar(u)
end
HesVecGrad(g,cache1,cache2,u,autodiff)
end
Base.size(L::HesVecGrad) = (length(L.cache2),length(L.cache2))
Base.size(L::HesVecGrad,i::Int) = length(L.cache2)
Base.:*(L::HesVecGrad,x::AbstractVector) = L.autodiff ? auto_hesvecgrad(L.g,L.u,x) : num_hesvecgrad(L.g,L.u,x)
function LinearAlgebra.mul!(du::AbstractVector,L::HesVecGrad,v::AbstractVector)
if L.autodiff
auto_hesvecgrad!(du,L.g,L.u,v,L.cache1,L.cache2)
else
num_hesvecgrad!(du,L.g,L.u,v,L.cache1,L.cache2)
end
end
|
{"hexsha": "ea3815447503e5bd88534335a73047025f864027", "size": 6746, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/differentiation/jaches_products.jl", "max_stars_repo_name": "devmotion/SparseDiffTools.jl", "max_stars_repo_head_hexsha": "7dc407ad10b0a0e82627ea7d9dfcd2490fdf45a3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2019-05-09T19:18:39.000Z", "max_stars_repo_stars_event_max_datetime": "2019-05-27T13:36:11.000Z", "max_issues_repo_path": "src/differentiation/jaches_products.jl", "max_issues_repo_name": "devmotion/SparseDiffTools.jl", "max_issues_repo_head_hexsha": "7dc407ad10b0a0e82627ea7d9dfcd2490fdf45a3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2019-05-09T19:30:35.000Z", "max_issues_repo_issues_event_max_datetime": "2019-05-24T15:08:37.000Z", "max_forks_repo_path": "src/differentiation/jaches_products.jl", "max_forks_repo_name": "devmotion/SparseDiffTools.jl", "max_forks_repo_head_hexsha": "7dc407ad10b0a0e82627ea7d9dfcd2490fdf45a3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-05-27T14:15:16.000Z", "max_forks_repo_forks_event_max_datetime": "2019-06-24T16:52:22.000Z", "avg_line_length": 27.6475409836, "max_line_length": 114, "alphanum_fraction": 0.5772309517, "num_tokens": 2357}
|
[STATEMENT]
lemma check_addition_l_check_add:
assumes \<open>(A, B) \<in> fmap_polys_rel\<close> and \<open>(r, r') \<in> sorted_poly_rel O mset_poly_rel\<close>
\<open>(p, p') \<in> Id\<close> \<open>(q, q') \<in> Id\<close> \<open>(i, i') \<in> nat_rel\<close>
\<open>(\<V>', \<V>) \<in> \<langle>var_rel\<rangle>set_rel\<close>
shows
\<open>check_addition_l spec A \<V>' p q i r \<le> \<Down> {(st, b). (\<not>is_cfailed st \<longleftrightarrow> b) \<and>
(is_cfound st \<longrightarrow> spec = r)} (check_add B \<V> p' q' i' r')\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. check_addition_l spec A \<V>' p q i r \<le> \<Down> {(st, b). (\<not> is_cfailed st) = b \<and> (is_cfound st \<longrightarrow> spec = r)} (check_add B \<V> p' q' i' r')
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. check_addition_l spec A \<V>' p q i r \<le> \<Down> {(st, b). (\<not> is_cfailed st) = b \<and> (is_cfound st \<longrightarrow> spec = r)} (check_add B \<V> p' q' i' r')
[PROOF STEP]
have [refine]:
\<open>add_poly_l (p, q) \<le> \<Down> (sorted_poly_rel O mset_poly_rel) (add_poly_spec p' q')\<close>
if \<open>(p, p') \<in> sorted_poly_rel O mset_poly_rel\<close>
\<open>(q, q') \<in> sorted_poly_rel O mset_poly_rel\<close>
for p p' q q'
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. add_poly_l (p, q) \<le> \<Down> (sorted_poly_rel O mset_poly_rel) (add_poly_spec p' q')
[PROOF STEP]
using that
[PROOF STATE]
proof (prove)
using this:
(p, p') \<in> sorted_poly_rel O mset_poly_rel
(q, q') \<in> sorted_poly_rel O mset_poly_rel
goal (1 subgoal):
1. add_poly_l (p, q) \<le> \<Down> (sorted_poly_rel O mset_poly_rel) (add_poly_spec p' q')
[PROOF STEP]
by (auto intro!: add_poly_l_add_poly_p'[THEN order_trans] ref_two_step'
add_poly_p'_add_poly_spec
simp flip: conc_fun_chain)
[PROOF STATE]
proof (state)
this:
\<lbrakk>(?p3, ?p'3) \<in> sorted_poly_rel O mset_poly_rel; (?q3, ?q'3) \<in> sorted_poly_rel O mset_poly_rel\<rbrakk> \<Longrightarrow> add_poly_l (?p3, ?q3) \<le> \<Down> (sorted_poly_rel O mset_poly_rel) (add_poly_spec ?p'3 ?q'3)
goal (1 subgoal):
1. check_addition_l spec A \<V>' p q i r \<le> \<Down> {(st, b). (\<not> is_cfailed st) = b \<and> (is_cfound st \<longrightarrow> spec = r)} (check_add B \<V> p' q' i' r')
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. check_addition_l spec A \<V>' p q i r \<le> \<Down> {(st, b). (\<not> is_cfailed st) = b \<and> (is_cfound st \<longrightarrow> spec = r)} (check_add B \<V> p' q' i' r')
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
(A, B) \<in> \<langle>nat_rel, sorted_poly_rel O mset_poly_rel\<rangle>fmap_rel
(r, r') \<in> sorted_poly_rel O mset_poly_rel
(p, p') \<in> nat_rel
(q, q') \<in> nat_rel
(i, i') \<in> nat_rel
(\<V>', \<V>) \<in> \<langle>var_rel\<rangle>set_rel
goal (1 subgoal):
1. check_addition_l spec A \<V>' p q i r \<le> \<Down> {(st, b). (\<not> is_cfailed st) = b \<and> (is_cfound st \<longrightarrow> spec = r)} (check_add B \<V> p' q' i' r')
[PROOF STEP]
unfolding check_addition_l_def
check_not_equal_dom_err_def
[PROOF STATE]
proof (prove)
using this:
(A, B) \<in> \<langle>nat_rel, sorted_poly_rel O mset_poly_rel\<rangle>fmap_rel
(r, r') \<in> sorted_poly_rel O mset_poly_rel
(p, p') \<in> nat_rel
(q, q') \<in> nat_rel
(i, i') \<in> nat_rel
(\<V>', \<V>) \<in> \<langle>var_rel\<rangle>set_rel
goal (1 subgoal):
1. (let b = p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>' in if \<not> b then RETURN (error_msg i ((if p \<notin># dom_m A then error_msg_notin_dom p else []) @ (if q \<notin># dom_m A then error_msg_notin_dom p else []) @ (if i \<in># dom_m A then error_msg_reused_dom p else []))) else ASSERT (p \<in># dom_m A) \<bind> (\<lambda>_. let p = the (fmlookup A p) in ASSERT (q \<in># dom_m A) \<bind> (\<lambda>_. let q = the (fmlookup A q) in add_poly_l (p, q) \<bind> (\<lambda>pq. weak_equality_l pq r \<bind> (\<lambda>b. weak_equality_l r spec \<bind> (\<lambda>b'. if b then if b' then RETURN CFOUND else RETURN CSUCCESS else SPEC (\<lambda>_. True) \<bind> (\<lambda>c. RETURN (error_msg i c)))))))) \<le> \<Down> {(st, b). (\<not> is_cfailed st) = b \<and> (is_cfound st \<longrightarrow> spec = r)} (check_add B \<V> p' q' i' r')
[PROOF STEP]
apply -
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>(A, B) \<in> \<langle>nat_rel, sorted_poly_rel O mset_poly_rel\<rangle>fmap_rel; (r, r') \<in> sorted_poly_rel O mset_poly_rel; (p, p') \<in> nat_rel; (q, q') \<in> nat_rel; (i, i') \<in> nat_rel; (\<V>', \<V>) \<in> \<langle>var_rel\<rangle>set_rel\<rbrakk> \<Longrightarrow> (let b = p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>' in if \<not> b then RETURN (error_msg i ((if p \<notin># dom_m A then error_msg_notin_dom p else []) @ (if q \<notin># dom_m A then error_msg_notin_dom p else []) @ (if i \<in># dom_m A then error_msg_reused_dom p else []))) else ASSERT (p \<in># dom_m A) \<bind> (\<lambda>_. let p = the (fmlookup A p) in ASSERT (q \<in># dom_m A) \<bind> (\<lambda>_. let q = the (fmlookup A q) in add_poly_l (p, q) \<bind> (\<lambda>pq. weak_equality_l pq r \<bind> (\<lambda>b. weak_equality_l r spec \<bind> (\<lambda>b'. if b then if b' then RETURN CFOUND else RETURN CSUCCESS else SPEC (\<lambda>_. True) \<bind> (\<lambda>c. RETURN (error_msg i c)))))))) \<le> \<Down> {(st, b). (\<not> is_cfailed st) = b \<and> (is_cfound st \<longrightarrow> spec = r)} (check_add B \<V> p' q' i' r')
[PROOF STEP]
apply (rule order_trans)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<lbrakk>(A, B) \<in> \<langle>nat_rel, sorted_poly_rel O mset_poly_rel\<rangle>fmap_rel; (r, r') \<in> sorted_poly_rel O mset_poly_rel; (p, p') \<in> nat_rel; (q, q') \<in> nat_rel; (i, i') \<in> nat_rel; (\<V>', \<V>) \<in> \<langle>var_rel\<rangle>set_rel\<rbrakk> \<Longrightarrow> (let b = p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>' in if \<not> b then RETURN (error_msg i ((if p \<notin># dom_m A then error_msg_notin_dom p else []) @ (if q \<notin># dom_m A then error_msg_notin_dom p else []) @ (if i \<in># dom_m A then error_msg_reused_dom p else []))) else ASSERT (p \<in># dom_m A) \<bind> (\<lambda>_. let p = the (fmlookup A p) in ASSERT (q \<in># dom_m A) \<bind> (\<lambda>_. let q = the (fmlookup A q) in add_poly_l (p, q) \<bind> (\<lambda>pq. weak_equality_l pq r \<bind> (\<lambda>b. weak_equality_l r spec \<bind> (\<lambda>b'. if b then if b' then RETURN CFOUND else RETURN CSUCCESS else SPEC (\<lambda>_. True) \<bind> (\<lambda>c. RETURN (error_msg i c)))))))) \<le> ?y6
2. \<lbrakk>(A, B) \<in> \<langle>nat_rel, sorted_poly_rel O mset_poly_rel\<rangle>fmap_rel; (r, r') \<in> sorted_poly_rel O mset_poly_rel; (p, p') \<in> nat_rel; (q, q') \<in> nat_rel; (i, i') \<in> nat_rel; (\<V>', \<V>) \<in> \<langle>var_rel\<rangle>set_rel\<rbrakk> \<Longrightarrow> ?y6 \<le> \<Down> {(st, b). (\<not> is_cfailed st) = b \<and> (is_cfound st \<longrightarrow> spec = r)} (check_add B \<V> p' q' i' r')
[PROOF STEP]
defer
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<lbrakk>(A, B) \<in> \<langle>nat_rel, sorted_poly_rel O mset_poly_rel\<rangle>fmap_rel; (r, r') \<in> sorted_poly_rel O mset_poly_rel; (p, p') \<in> nat_rel; (q, q') \<in> nat_rel; (i, i') \<in> nat_rel; (\<V>', \<V>) \<in> \<langle>var_rel\<rangle>set_rel\<rbrakk> \<Longrightarrow> ?y6 \<le> \<Down> {(st, b). (\<not> is_cfailed st) = b \<and> (is_cfound st \<longrightarrow> spec = r)} (check_add B \<V> p' q' i' r')
2. \<lbrakk>(A, B) \<in> \<langle>nat_rel, sorted_poly_rel O mset_poly_rel\<rangle>fmap_rel; (r, r') \<in> sorted_poly_rel O mset_poly_rel; (p, p') \<in> nat_rel; (q, q') \<in> nat_rel; (i, i') \<in> nat_rel; (\<V>', \<V>) \<in> \<langle>var_rel\<rangle>set_rel\<rbrakk> \<Longrightarrow> (let b = p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>' in if \<not> b then RETURN (error_msg i ((if p \<notin># dom_m A then error_msg_notin_dom p else []) @ (if q \<notin># dom_m A then error_msg_notin_dom p else []) @ (if i \<in># dom_m A then error_msg_reused_dom p else []))) else ASSERT (p \<in># dom_m A) \<bind> (\<lambda>_. let p = the (fmlookup A p) in ASSERT (q \<in># dom_m A) \<bind> (\<lambda>_. let q = the (fmlookup A q) in add_poly_l (p, q) \<bind> (\<lambda>pq. weak_equality_l pq r \<bind> (\<lambda>b. weak_equality_l r spec \<bind> (\<lambda>b'. if b then if b' then RETURN CFOUND else RETURN CSUCCESS else SPEC (\<lambda>_. True) \<bind> (\<lambda>c. RETURN (error_msg i c)))))))) \<le> ?y6
[PROOF STEP]
apply (rule ref_two_step')
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<lbrakk>(A, B) \<in> \<langle>nat_rel, sorted_poly_rel O mset_poly_rel\<rangle>fmap_rel; (r, r') \<in> sorted_poly_rel O mset_poly_rel; (p, p') \<in> nat_rel; (q, q') \<in> nat_rel; (i, i') \<in> nat_rel; (\<V>', \<V>) \<in> \<langle>var_rel\<rangle>set_rel\<rbrakk> \<Longrightarrow> ?A9 \<le> check_add B \<V> p' q' i' r'
2. \<lbrakk>(A, B) \<in> \<langle>nat_rel, sorted_poly_rel O mset_poly_rel\<rangle>fmap_rel; (r, r') \<in> sorted_poly_rel O mset_poly_rel; (p, p') \<in> nat_rel; (q, q') \<in> nat_rel; (i, i') \<in> nat_rel; (\<V>', \<V>) \<in> \<langle>var_rel\<rangle>set_rel\<rbrakk> \<Longrightarrow> (let b = p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>' in if \<not> b then RETURN (error_msg i ((if p \<notin># dom_m A then error_msg_notin_dom p else []) @ (if q \<notin># dom_m A then error_msg_notin_dom p else []) @ (if i \<in># dom_m A then error_msg_reused_dom p else []))) else ASSERT (p \<in># dom_m A) \<bind> (\<lambda>_. let p = the (fmlookup A p) in ASSERT (q \<in># dom_m A) \<bind> (\<lambda>_. let q = the (fmlookup A q) in add_poly_l (p, q) \<bind> (\<lambda>pq. weak_equality_l pq r \<bind> (\<lambda>b. weak_equality_l r spec \<bind> (\<lambda>b'. if b then if b' then RETURN CFOUND else RETURN CSUCCESS else SPEC (\<lambda>_. True) \<bind> (\<lambda>c. RETURN (error_msg i c)))))))) \<le> \<Down> {(st, b). (\<not> is_cfailed st) = b \<and> (is_cfound st \<longrightarrow> spec = r)} ?A9
[PROOF STEP]
apply (rule check_add_alt_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>(A, B) \<in> \<langle>nat_rel, sorted_poly_rel O mset_poly_rel\<rangle>fmap_rel; (r, r') \<in> sorted_poly_rel O mset_poly_rel; (p, p') \<in> nat_rel; (q, q') \<in> nat_rel; (i, i') \<in> nat_rel; (\<V>', \<V>) \<in> \<langle>var_rel\<rangle>set_rel\<rbrakk> \<Longrightarrow> (let b = p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>' in if \<not> b then RETURN (error_msg i ((if p \<notin># dom_m A then error_msg_notin_dom p else []) @ (if q \<notin># dom_m A then error_msg_notin_dom p else []) @ (if i \<in># dom_m A then error_msg_reused_dom p else []))) else ASSERT (p \<in># dom_m A) \<bind> (\<lambda>_. let p = the (fmlookup A p) in ASSERT (q \<in># dom_m A) \<bind> (\<lambda>_. let q = the (fmlookup A q) in add_poly_l (p, q) \<bind> (\<lambda>pq. weak_equality_l pq r \<bind> (\<lambda>b. weak_equality_l r spec \<bind> (\<lambda>b'. if b then if b' then RETURN CFOUND else RETURN CSUCCESS else SPEC (\<lambda>_. True) \<bind> (\<lambda>c. RETURN (error_msg i c)))))))) \<le> \<Down> {(st, b). (\<not> is_cfailed st) = b \<and> (is_cfound st \<longrightarrow> spec = r)} (SPEC (\<lambda>b. b \<longrightarrow> p' \<in># dom_m B \<and> q' \<in># dom_m B \<and> i' \<notin># dom_m B \<and> vars r' \<subseteq> \<V>) \<bind> (\<lambda>b. if \<not> b then RETURN False else ASSERT (p' \<in># dom_m B) \<bind> (\<lambda>_. let p = the (fmlookup B p') in ASSERT (q' \<in># dom_m B) \<bind> (\<lambda>_. let q = the (fmlookup B q') in add_poly_spec p q \<bind> (\<lambda>pq. weak_equality pq r' \<bind> RETURN)))))
[PROOF STEP]
apply refine_rcg
[PROOF STATE]
proof (prove)
goal (8 subgoals):
1. \<lbrakk>(A, B) \<in> \<langle>nat_rel, sorted_poly_rel O mset_poly_rel\<rangle>fmap_rel; (r, r') \<in> sorted_poly_rel O mset_poly_rel; (p, p') \<in> nat_rel; (q, q') \<in> nat_rel; (i, i') \<in> nat_rel; (\<V>', \<V>) \<in> \<langle>var_rel\<rangle>set_rel\<rbrakk> \<Longrightarrow> RETURN (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>') \<le> SPEC (\<lambda>b. b \<longrightarrow> p' \<in># dom_m B \<and> q' \<in># dom_m B \<and> i' \<notin># dom_m B \<and> vars r' \<subseteq> \<V>)
2. \<And>b. \<lbrakk>(A, B) \<in> \<langle>nat_rel, sorted_poly_rel O mset_poly_rel\<rangle>fmap_rel; (r, r') \<in> sorted_poly_rel O mset_poly_rel; (p, p') \<in> nat_rel; (q, q') \<in> nat_rel; (i, i') \<in> nat_rel; (\<V>', \<V>) \<in> \<langle>var_rel\<rangle>set_rel; (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>', b) \<in> bool_rel\<rbrakk> \<Longrightarrow> (\<not> (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>')) = (\<not> b)
3. \<And>b. \<lbrakk>(A, B) \<in> \<langle>nat_rel, sorted_poly_rel O mset_poly_rel\<rangle>fmap_rel; (r, r') \<in> sorted_poly_rel O mset_poly_rel; (p, p') \<in> nat_rel; (q, q') \<in> nat_rel; (i, i') \<in> nat_rel; (\<V>', \<V>) \<in> \<langle>var_rel\<rangle>set_rel; (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>', b) \<in> bool_rel; \<not> (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>'); \<not> b\<rbrakk> \<Longrightarrow> (error_msg i ((if p \<notin># dom_m A then error_msg_notin_dom p else []) @ (if q \<notin># dom_m A then error_msg_notin_dom p else []) @ (if i \<in># dom_m A then error_msg_reused_dom p else [])), False) \<in> {(st, b). (\<not> is_cfailed st) = b \<and> (is_cfound st \<longrightarrow> spec = r)}
4. \<And>b. \<lbrakk>(A, B) \<in> \<langle>nat_rel, sorted_poly_rel O mset_poly_rel\<rangle>fmap_rel; (r, r') \<in> sorted_poly_rel O mset_poly_rel; (p, p') \<in> nat_rel; (q, q') \<in> nat_rel; (i, i') \<in> nat_rel; (\<V>', \<V>) \<in> \<langle>var_rel\<rangle>set_rel; (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>', b) \<in> bool_rel; \<not> \<not> (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>'); \<not> \<not> b; p' \<in># dom_m B\<rbrakk> \<Longrightarrow> p \<in># dom_m A
5. \<And>b. \<lbrakk>(A, B) \<in> \<langle>nat_rel, sorted_poly_rel O mset_poly_rel\<rangle>fmap_rel; (r, r') \<in> sorted_poly_rel O mset_poly_rel; (p, p') \<in> nat_rel; (q, q') \<in> nat_rel; (i, i') \<in> nat_rel; (\<V>', \<V>) \<in> \<langle>var_rel\<rangle>set_rel; (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>', b) \<in> bool_rel; \<not> \<not> (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>'); \<not> \<not> b; p' \<in># dom_m B; p \<in># dom_m A; q' \<in># dom_m B\<rbrakk> \<Longrightarrow> q \<in># dom_m A
6. \<And>b. \<lbrakk>(A, B) \<in> \<langle>nat_rel, sorted_poly_rel O mset_poly_rel\<rangle>fmap_rel; (r, r') \<in> sorted_poly_rel O mset_poly_rel; (p, p') \<in> nat_rel; (q, q') \<in> nat_rel; (i, i') \<in> nat_rel; (\<V>', \<V>) \<in> \<langle>var_rel\<rangle>set_rel; (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>', b) \<in> bool_rel; \<not> \<not> (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>'); \<not> \<not> b; p' \<in># dom_m B; p \<in># dom_m A; q' \<in># dom_m B; q \<in># dom_m A\<rbrakk> \<Longrightarrow> (the (fmlookup A p), the (fmlookup B p')) \<in> sorted_poly_rel O mset_poly_rel
7. \<And>b. \<lbrakk>(A, B) \<in> \<langle>nat_rel, sorted_poly_rel O mset_poly_rel\<rangle>fmap_rel; (r, r') \<in> sorted_poly_rel O mset_poly_rel; (p, p') \<in> nat_rel; (q, q') \<in> nat_rel; (i, i') \<in> nat_rel; (\<V>', \<V>) \<in> \<langle>var_rel\<rangle>set_rel; (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>', b) \<in> bool_rel; \<not> \<not> (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>'); \<not> \<not> b; p' \<in># dom_m B; p \<in># dom_m A; q' \<in># dom_m B; q \<in># dom_m A\<rbrakk> \<Longrightarrow> (the (fmlookup A q), the (fmlookup B q')) \<in> sorted_poly_rel O mset_poly_rel
8. \<And>b pq pqa ba eqa. \<lbrakk>(A, B) \<in> \<langle>nat_rel, sorted_poly_rel O mset_poly_rel\<rangle>fmap_rel; (r, r') \<in> sorted_poly_rel O mset_poly_rel; (p, p') \<in> nat_rel; (q, q') \<in> nat_rel; (i, i') \<in> nat_rel; (\<V>', \<V>) \<in> \<langle>var_rel\<rangle>set_rel; (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>', b) \<in> bool_rel; \<not> \<not> (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>'); \<not> \<not> b; p' \<in># dom_m B; p \<in># dom_m A; q' \<in># dom_m B; q \<in># dom_m A; (pq, pqa) \<in> sorted_poly_rel O mset_poly_rel; (ba, eqa) \<in> bool_rel\<rbrakk> \<Longrightarrow> weak_equality_l r spec \<bind> (\<lambda>b'. if ba then if b' then RETURN CFOUND else RETURN CSUCCESS else SPEC (\<lambda>_. True) \<bind> (\<lambda>c. RETURN (error_msg i c))) \<le> SPEC (\<lambda>c. (c, eqa) \<in> {(st, b). (\<not> is_cfailed st) = b \<and> (is_cfound st \<longrightarrow> spec = r)})
[PROOF STEP]
subgoal
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>(A, B) \<in> \<langle>nat_rel, sorted_poly_rel O mset_poly_rel\<rangle>fmap_rel; (r, r') \<in> sorted_poly_rel O mset_poly_rel; (p, p') \<in> nat_rel; (q, q') \<in> nat_rel; (i, i') \<in> nat_rel; (\<V>', \<V>) \<in> \<langle>var_rel\<rangle>set_rel\<rbrakk> \<Longrightarrow> RETURN (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>') \<le> SPEC (\<lambda>b. b \<longrightarrow> p' \<in># dom_m B \<and> q' \<in># dom_m B \<and> i' \<notin># dom_m B \<and> vars r' \<subseteq> \<V>)
[PROOF STEP]
by (drule sorted_poly_rel_vars_llist)
(auto simp: set_rel_def var_rel_def br_def)
[PROOF STATE]
proof (prove)
goal (7 subgoals):
1. \<And>b. \<lbrakk>(A, B) \<in> \<langle>nat_rel, sorted_poly_rel O mset_poly_rel\<rangle>fmap_rel; (r, r') \<in> sorted_poly_rel O mset_poly_rel; (p, p') \<in> nat_rel; (q, q') \<in> nat_rel; (i, i') \<in> nat_rel; (\<V>', \<V>) \<in> \<langle>var_rel\<rangle>set_rel; (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>', b) \<in> bool_rel\<rbrakk> \<Longrightarrow> (\<not> (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>')) = (\<not> b)
2. \<And>b. \<lbrakk>(A, B) \<in> \<langle>nat_rel, sorted_poly_rel O mset_poly_rel\<rangle>fmap_rel; (r, r') \<in> sorted_poly_rel O mset_poly_rel; (p, p') \<in> nat_rel; (q, q') \<in> nat_rel; (i, i') \<in> nat_rel; (\<V>', \<V>) \<in> \<langle>var_rel\<rangle>set_rel; (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>', b) \<in> bool_rel; \<not> (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>'); \<not> b\<rbrakk> \<Longrightarrow> (error_msg i ((if p \<notin># dom_m A then error_msg_notin_dom p else []) @ (if q \<notin># dom_m A then error_msg_notin_dom p else []) @ (if i \<in># dom_m A then error_msg_reused_dom p else [])), False) \<in> {(st, b). (\<not> is_cfailed st) = b \<and> (is_cfound st \<longrightarrow> spec = r)}
3. \<And>b. \<lbrakk>(A, B) \<in> \<langle>nat_rel, sorted_poly_rel O mset_poly_rel\<rangle>fmap_rel; (r, r') \<in> sorted_poly_rel O mset_poly_rel; (p, p') \<in> nat_rel; (q, q') \<in> nat_rel; (i, i') \<in> nat_rel; (\<V>', \<V>) \<in> \<langle>var_rel\<rangle>set_rel; (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>', b) \<in> bool_rel; \<not> \<not> (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>'); \<not> \<not> b; p' \<in># dom_m B\<rbrakk> \<Longrightarrow> p \<in># dom_m A
4. \<And>b. \<lbrakk>(A, B) \<in> \<langle>nat_rel, sorted_poly_rel O mset_poly_rel\<rangle>fmap_rel; (r, r') \<in> sorted_poly_rel O mset_poly_rel; (p, p') \<in> nat_rel; (q, q') \<in> nat_rel; (i, i') \<in> nat_rel; (\<V>', \<V>) \<in> \<langle>var_rel\<rangle>set_rel; (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>', b) \<in> bool_rel; \<not> \<not> (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>'); \<not> \<not> b; p' \<in># dom_m B; p \<in># dom_m A; q' \<in># dom_m B\<rbrakk> \<Longrightarrow> q \<in># dom_m A
5. \<And>b. \<lbrakk>(A, B) \<in> \<langle>nat_rel, sorted_poly_rel O mset_poly_rel\<rangle>fmap_rel; (r, r') \<in> sorted_poly_rel O mset_poly_rel; (p, p') \<in> nat_rel; (q, q') \<in> nat_rel; (i, i') \<in> nat_rel; (\<V>', \<V>) \<in> \<langle>var_rel\<rangle>set_rel; (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>', b) \<in> bool_rel; \<not> \<not> (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>'); \<not> \<not> b; p' \<in># dom_m B; p \<in># dom_m A; q' \<in># dom_m B; q \<in># dom_m A\<rbrakk> \<Longrightarrow> (the (fmlookup A p), the (fmlookup B p')) \<in> sorted_poly_rel O mset_poly_rel
6. \<And>b. \<lbrakk>(A, B) \<in> \<langle>nat_rel, sorted_poly_rel O mset_poly_rel\<rangle>fmap_rel; (r, r') \<in> sorted_poly_rel O mset_poly_rel; (p, p') \<in> nat_rel; (q, q') \<in> nat_rel; (i, i') \<in> nat_rel; (\<V>', \<V>) \<in> \<langle>var_rel\<rangle>set_rel; (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>', b) \<in> bool_rel; \<not> \<not> (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>'); \<not> \<not> b; p' \<in># dom_m B; p \<in># dom_m A; q' \<in># dom_m B; q \<in># dom_m A\<rbrakk> \<Longrightarrow> (the (fmlookup A q), the (fmlookup B q')) \<in> sorted_poly_rel O mset_poly_rel
7. \<And>b pq pqa ba eqa. \<lbrakk>(A, B) \<in> \<langle>nat_rel, sorted_poly_rel O mset_poly_rel\<rangle>fmap_rel; (r, r') \<in> sorted_poly_rel O mset_poly_rel; (p, p') \<in> nat_rel; (q, q') \<in> nat_rel; (i, i') \<in> nat_rel; (\<V>', \<V>) \<in> \<langle>var_rel\<rangle>set_rel; (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>', b) \<in> bool_rel; \<not> \<not> (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>'); \<not> \<not> b; p' \<in># dom_m B; p \<in># dom_m A; q' \<in># dom_m B; q \<in># dom_m A; (pq, pqa) \<in> sorted_poly_rel O mset_poly_rel; (ba, eqa) \<in> bool_rel\<rbrakk> \<Longrightarrow> weak_equality_l r spec \<bind> (\<lambda>b'. if ba then if b' then RETURN CFOUND else RETURN CSUCCESS else SPEC (\<lambda>_. True) \<bind> (\<lambda>c. RETURN (error_msg i c))) \<le> SPEC (\<lambda>c. (c, eqa) \<in> {(st, b). (\<not> is_cfailed st) = b \<and> (is_cfound st \<longrightarrow> spec = r)})
[PROOF STEP]
subgoal
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>(A, B) \<in> \<langle>nat_rel, sorted_poly_rel O mset_poly_rel\<rangle>fmap_rel; (r, r') \<in> sorted_poly_rel O mset_poly_rel; (p, p') \<in> nat_rel; (q, q') \<in> nat_rel; (i, i') \<in> nat_rel; (\<V>', \<V>) \<in> \<langle>var_rel\<rangle>set_rel; (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>', b_) \<in> bool_rel\<rbrakk> \<Longrightarrow> (\<not> (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>')) = (\<not> b_)
[PROOF STEP]
by auto
[PROOF STATE]
proof (prove)
goal (6 subgoals):
1. \<And>b. \<lbrakk>(A, B) \<in> \<langle>nat_rel, sorted_poly_rel O mset_poly_rel\<rangle>fmap_rel; (r, r') \<in> sorted_poly_rel O mset_poly_rel; (p, p') \<in> nat_rel; (q, q') \<in> nat_rel; (i, i') \<in> nat_rel; (\<V>', \<V>) \<in> \<langle>var_rel\<rangle>set_rel; (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>', b) \<in> bool_rel; \<not> (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>'); \<not> b\<rbrakk> \<Longrightarrow> (error_msg i ((if p \<notin># dom_m A then error_msg_notin_dom p else []) @ (if q \<notin># dom_m A then error_msg_notin_dom p else []) @ (if i \<in># dom_m A then error_msg_reused_dom p else [])), False) \<in> {(st, b). (\<not> is_cfailed st) = b \<and> (is_cfound st \<longrightarrow> spec = r)}
2. \<And>b. \<lbrakk>(A, B) \<in> \<langle>nat_rel, sorted_poly_rel O mset_poly_rel\<rangle>fmap_rel; (r, r') \<in> sorted_poly_rel O mset_poly_rel; (p, p') \<in> nat_rel; (q, q') \<in> nat_rel; (i, i') \<in> nat_rel; (\<V>', \<V>) \<in> \<langle>var_rel\<rangle>set_rel; (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>', b) \<in> bool_rel; \<not> \<not> (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>'); \<not> \<not> b; p' \<in># dom_m B\<rbrakk> \<Longrightarrow> p \<in># dom_m A
3. \<And>b. \<lbrakk>(A, B) \<in> \<langle>nat_rel, sorted_poly_rel O mset_poly_rel\<rangle>fmap_rel; (r, r') \<in> sorted_poly_rel O mset_poly_rel; (p, p') \<in> nat_rel; (q, q') \<in> nat_rel; (i, i') \<in> nat_rel; (\<V>', \<V>) \<in> \<langle>var_rel\<rangle>set_rel; (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>', b) \<in> bool_rel; \<not> \<not> (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>'); \<not> \<not> b; p' \<in># dom_m B; p \<in># dom_m A; q' \<in># dom_m B\<rbrakk> \<Longrightarrow> q \<in># dom_m A
4. \<And>b. \<lbrakk>(A, B) \<in> \<langle>nat_rel, sorted_poly_rel O mset_poly_rel\<rangle>fmap_rel; (r, r') \<in> sorted_poly_rel O mset_poly_rel; (p, p') \<in> nat_rel; (q, q') \<in> nat_rel; (i, i') \<in> nat_rel; (\<V>', \<V>) \<in> \<langle>var_rel\<rangle>set_rel; (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>', b) \<in> bool_rel; \<not> \<not> (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>'); \<not> \<not> b; p' \<in># dom_m B; p \<in># dom_m A; q' \<in># dom_m B; q \<in># dom_m A\<rbrakk> \<Longrightarrow> (the (fmlookup A p), the (fmlookup B p')) \<in> sorted_poly_rel O mset_poly_rel
5. \<And>b. \<lbrakk>(A, B) \<in> \<langle>nat_rel, sorted_poly_rel O mset_poly_rel\<rangle>fmap_rel; (r, r') \<in> sorted_poly_rel O mset_poly_rel; (p, p') \<in> nat_rel; (q, q') \<in> nat_rel; (i, i') \<in> nat_rel; (\<V>', \<V>) \<in> \<langle>var_rel\<rangle>set_rel; (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>', b) \<in> bool_rel; \<not> \<not> (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>'); \<not> \<not> b; p' \<in># dom_m B; p \<in># dom_m A; q' \<in># dom_m B; q \<in># dom_m A\<rbrakk> \<Longrightarrow> (the (fmlookup A q), the (fmlookup B q')) \<in> sorted_poly_rel O mset_poly_rel
6. \<And>b pq pqa ba eqa. \<lbrakk>(A, B) \<in> \<langle>nat_rel, sorted_poly_rel O mset_poly_rel\<rangle>fmap_rel; (r, r') \<in> sorted_poly_rel O mset_poly_rel; (p, p') \<in> nat_rel; (q, q') \<in> nat_rel; (i, i') \<in> nat_rel; (\<V>', \<V>) \<in> \<langle>var_rel\<rangle>set_rel; (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>', b) \<in> bool_rel; \<not> \<not> (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>'); \<not> \<not> b; p' \<in># dom_m B; p \<in># dom_m A; q' \<in># dom_m B; q \<in># dom_m A; (pq, pqa) \<in> sorted_poly_rel O mset_poly_rel; (ba, eqa) \<in> bool_rel\<rbrakk> \<Longrightarrow> weak_equality_l r spec \<bind> (\<lambda>b'. if ba then if b' then RETURN CFOUND else RETURN CSUCCESS else SPEC (\<lambda>_. True) \<bind> (\<lambda>c. RETURN (error_msg i c))) \<le> SPEC (\<lambda>c. (c, eqa) \<in> {(st, b). (\<not> is_cfailed st) = b \<and> (is_cfound st \<longrightarrow> spec = r)})
[PROOF STEP]
subgoal
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>(A, B) \<in> \<langle>nat_rel, sorted_poly_rel O mset_poly_rel\<rangle>fmap_rel; (r, r') \<in> sorted_poly_rel O mset_poly_rel; (p, p') \<in> nat_rel; (q, q') \<in> nat_rel; (i, i') \<in> nat_rel; (\<V>', \<V>) \<in> \<langle>var_rel\<rangle>set_rel; (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>', b_) \<in> bool_rel; \<not> (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>'); \<not> b_\<rbrakk> \<Longrightarrow> (error_msg i ((if p \<notin># dom_m A then error_msg_notin_dom p else []) @ (if q \<notin># dom_m A then error_msg_notin_dom p else []) @ (if i \<in># dom_m A then error_msg_reused_dom p else [])), False) \<in> {(st, b). (\<not> is_cfailed st) = b \<and> (is_cfound st \<longrightarrow> spec = r)}
[PROOF STEP]
by auto
[PROOF STATE]
proof (prove)
goal (5 subgoals):
1. \<And>b. \<lbrakk>(A, B) \<in> \<langle>nat_rel, sorted_poly_rel O mset_poly_rel\<rangle>fmap_rel; (r, r') \<in> sorted_poly_rel O mset_poly_rel; (p, p') \<in> nat_rel; (q, q') \<in> nat_rel; (i, i') \<in> nat_rel; (\<V>', \<V>) \<in> \<langle>var_rel\<rangle>set_rel; (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>', b) \<in> bool_rel; \<not> \<not> (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>'); \<not> \<not> b; p' \<in># dom_m B\<rbrakk> \<Longrightarrow> p \<in># dom_m A
2. \<And>b. \<lbrakk>(A, B) \<in> \<langle>nat_rel, sorted_poly_rel O mset_poly_rel\<rangle>fmap_rel; (r, r') \<in> sorted_poly_rel O mset_poly_rel; (p, p') \<in> nat_rel; (q, q') \<in> nat_rel; (i, i') \<in> nat_rel; (\<V>', \<V>) \<in> \<langle>var_rel\<rangle>set_rel; (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>', b) \<in> bool_rel; \<not> \<not> (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>'); \<not> \<not> b; p' \<in># dom_m B; p \<in># dom_m A; q' \<in># dom_m B\<rbrakk> \<Longrightarrow> q \<in># dom_m A
3. \<And>b. \<lbrakk>(A, B) \<in> \<langle>nat_rel, sorted_poly_rel O mset_poly_rel\<rangle>fmap_rel; (r, r') \<in> sorted_poly_rel O mset_poly_rel; (p, p') \<in> nat_rel; (q, q') \<in> nat_rel; (i, i') \<in> nat_rel; (\<V>', \<V>) \<in> \<langle>var_rel\<rangle>set_rel; (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>', b) \<in> bool_rel; \<not> \<not> (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>'); \<not> \<not> b; p' \<in># dom_m B; p \<in># dom_m A; q' \<in># dom_m B; q \<in># dom_m A\<rbrakk> \<Longrightarrow> (the (fmlookup A p), the (fmlookup B p')) \<in> sorted_poly_rel O mset_poly_rel
4. \<And>b. \<lbrakk>(A, B) \<in> \<langle>nat_rel, sorted_poly_rel O mset_poly_rel\<rangle>fmap_rel; (r, r') \<in> sorted_poly_rel O mset_poly_rel; (p, p') \<in> nat_rel; (q, q') \<in> nat_rel; (i, i') \<in> nat_rel; (\<V>', \<V>) \<in> \<langle>var_rel\<rangle>set_rel; (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>', b) \<in> bool_rel; \<not> \<not> (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>'); \<not> \<not> b; p' \<in># dom_m B; p \<in># dom_m A; q' \<in># dom_m B; q \<in># dom_m A\<rbrakk> \<Longrightarrow> (the (fmlookup A q), the (fmlookup B q')) \<in> sorted_poly_rel O mset_poly_rel
5. \<And>b pq pqa ba eqa. \<lbrakk>(A, B) \<in> \<langle>nat_rel, sorted_poly_rel O mset_poly_rel\<rangle>fmap_rel; (r, r') \<in> sorted_poly_rel O mset_poly_rel; (p, p') \<in> nat_rel; (q, q') \<in> nat_rel; (i, i') \<in> nat_rel; (\<V>', \<V>) \<in> \<langle>var_rel\<rangle>set_rel; (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>', b) \<in> bool_rel; \<not> \<not> (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>'); \<not> \<not> b; p' \<in># dom_m B; p \<in># dom_m A; q' \<in># dom_m B; q \<in># dom_m A; (pq, pqa) \<in> sorted_poly_rel O mset_poly_rel; (ba, eqa) \<in> bool_rel\<rbrakk> \<Longrightarrow> weak_equality_l r spec \<bind> (\<lambda>b'. if ba then if b' then RETURN CFOUND else RETURN CSUCCESS else SPEC (\<lambda>_. True) \<bind> (\<lambda>c. RETURN (error_msg i c))) \<le> SPEC (\<lambda>c. (c, eqa) \<in> {(st, b). (\<not> is_cfailed st) = b \<and> (is_cfound st \<longrightarrow> spec = r)})
[PROOF STEP]
subgoal
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>(A, B) \<in> \<langle>nat_rel, sorted_poly_rel O mset_poly_rel\<rangle>fmap_rel; (r, r') \<in> sorted_poly_rel O mset_poly_rel; (p, p') \<in> nat_rel; (q, q') \<in> nat_rel; (i, i') \<in> nat_rel; (\<V>', \<V>) \<in> \<langle>var_rel\<rangle>set_rel; (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>', b_) \<in> bool_rel; \<not> \<not> (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>'); \<not> \<not> b_; p' \<in># dom_m B\<rbrakk> \<Longrightarrow> p \<in># dom_m A
[PROOF STEP]
by auto
[PROOF STATE]
proof (prove)
goal (4 subgoals):
1. \<And>b. \<lbrakk>(A, B) \<in> \<langle>nat_rel, sorted_poly_rel O mset_poly_rel\<rangle>fmap_rel; (r, r') \<in> sorted_poly_rel O mset_poly_rel; (p, p') \<in> nat_rel; (q, q') \<in> nat_rel; (i, i') \<in> nat_rel; (\<V>', \<V>) \<in> \<langle>var_rel\<rangle>set_rel; (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>', b) \<in> bool_rel; \<not> \<not> (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>'); \<not> \<not> b; p' \<in># dom_m B; p \<in># dom_m A; q' \<in># dom_m B\<rbrakk> \<Longrightarrow> q \<in># dom_m A
2. \<And>b. \<lbrakk>(A, B) \<in> \<langle>nat_rel, sorted_poly_rel O mset_poly_rel\<rangle>fmap_rel; (r, r') \<in> sorted_poly_rel O mset_poly_rel; (p, p') \<in> nat_rel; (q, q') \<in> nat_rel; (i, i') \<in> nat_rel; (\<V>', \<V>) \<in> \<langle>var_rel\<rangle>set_rel; (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>', b) \<in> bool_rel; \<not> \<not> (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>'); \<not> \<not> b; p' \<in># dom_m B; p \<in># dom_m A; q' \<in># dom_m B; q \<in># dom_m A\<rbrakk> \<Longrightarrow> (the (fmlookup A p), the (fmlookup B p')) \<in> sorted_poly_rel O mset_poly_rel
3. \<And>b. \<lbrakk>(A, B) \<in> \<langle>nat_rel, sorted_poly_rel O mset_poly_rel\<rangle>fmap_rel; (r, r') \<in> sorted_poly_rel O mset_poly_rel; (p, p') \<in> nat_rel; (q, q') \<in> nat_rel; (i, i') \<in> nat_rel; (\<V>', \<V>) \<in> \<langle>var_rel\<rangle>set_rel; (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>', b) \<in> bool_rel; \<not> \<not> (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>'); \<not> \<not> b; p' \<in># dom_m B; p \<in># dom_m A; q' \<in># dom_m B; q \<in># dom_m A\<rbrakk> \<Longrightarrow> (the (fmlookup A q), the (fmlookup B q')) \<in> sorted_poly_rel O mset_poly_rel
4. \<And>b pq pqa ba eqa. \<lbrakk>(A, B) \<in> \<langle>nat_rel, sorted_poly_rel O mset_poly_rel\<rangle>fmap_rel; (r, r') \<in> sorted_poly_rel O mset_poly_rel; (p, p') \<in> nat_rel; (q, q') \<in> nat_rel; (i, i') \<in> nat_rel; (\<V>', \<V>) \<in> \<langle>var_rel\<rangle>set_rel; (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>', b) \<in> bool_rel; \<not> \<not> (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>'); \<not> \<not> b; p' \<in># dom_m B; p \<in># dom_m A; q' \<in># dom_m B; q \<in># dom_m A; (pq, pqa) \<in> sorted_poly_rel O mset_poly_rel; (ba, eqa) \<in> bool_rel\<rbrakk> \<Longrightarrow> weak_equality_l r spec \<bind> (\<lambda>b'. if ba then if b' then RETURN CFOUND else RETURN CSUCCESS else SPEC (\<lambda>_. True) \<bind> (\<lambda>c. RETURN (error_msg i c))) \<le> SPEC (\<lambda>c. (c, eqa) \<in> {(st, b). (\<not> is_cfailed st) = b \<and> (is_cfound st \<longrightarrow> spec = r)})
[PROOF STEP]
subgoal
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>(A, B) \<in> \<langle>nat_rel, sorted_poly_rel O mset_poly_rel\<rangle>fmap_rel; (r, r') \<in> sorted_poly_rel O mset_poly_rel; (p, p') \<in> nat_rel; (q, q') \<in> nat_rel; (i, i') \<in> nat_rel; (\<V>', \<V>) \<in> \<langle>var_rel\<rangle>set_rel; (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>', b_) \<in> bool_rel; \<not> \<not> (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>'); \<not> \<not> b_; p' \<in># dom_m B; p \<in># dom_m A; q' \<in># dom_m B\<rbrakk> \<Longrightarrow> q \<in># dom_m A
[PROOF STEP]
by auto
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<And>b. \<lbrakk>(A, B) \<in> \<langle>nat_rel, sorted_poly_rel O mset_poly_rel\<rangle>fmap_rel; (r, r') \<in> sorted_poly_rel O mset_poly_rel; (p, p') \<in> nat_rel; (q, q') \<in> nat_rel; (i, i') \<in> nat_rel; (\<V>', \<V>) \<in> \<langle>var_rel\<rangle>set_rel; (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>', b) \<in> bool_rel; \<not> \<not> (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>'); \<not> \<not> b; p' \<in># dom_m B; p \<in># dom_m A; q' \<in># dom_m B; q \<in># dom_m A\<rbrakk> \<Longrightarrow> (the (fmlookup A p), the (fmlookup B p')) \<in> sorted_poly_rel O mset_poly_rel
2. \<And>b. \<lbrakk>(A, B) \<in> \<langle>nat_rel, sorted_poly_rel O mset_poly_rel\<rangle>fmap_rel; (r, r') \<in> sorted_poly_rel O mset_poly_rel; (p, p') \<in> nat_rel; (q, q') \<in> nat_rel; (i, i') \<in> nat_rel; (\<V>', \<V>) \<in> \<langle>var_rel\<rangle>set_rel; (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>', b) \<in> bool_rel; \<not> \<not> (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>'); \<not> \<not> b; p' \<in># dom_m B; p \<in># dom_m A; q' \<in># dom_m B; q \<in># dom_m A\<rbrakk> \<Longrightarrow> (the (fmlookup A q), the (fmlookup B q')) \<in> sorted_poly_rel O mset_poly_rel
3. \<And>b pq pqa ba eqa. \<lbrakk>(A, B) \<in> \<langle>nat_rel, sorted_poly_rel O mset_poly_rel\<rangle>fmap_rel; (r, r') \<in> sorted_poly_rel O mset_poly_rel; (p, p') \<in> nat_rel; (q, q') \<in> nat_rel; (i, i') \<in> nat_rel; (\<V>', \<V>) \<in> \<langle>var_rel\<rangle>set_rel; (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>', b) \<in> bool_rel; \<not> \<not> (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>'); \<not> \<not> b; p' \<in># dom_m B; p \<in># dom_m A; q' \<in># dom_m B; q \<in># dom_m A; (pq, pqa) \<in> sorted_poly_rel O mset_poly_rel; (ba, eqa) \<in> bool_rel\<rbrakk> \<Longrightarrow> weak_equality_l r spec \<bind> (\<lambda>b'. if ba then if b' then RETURN CFOUND else RETURN CSUCCESS else SPEC (\<lambda>_. True) \<bind> (\<lambda>c. RETURN (error_msg i c))) \<le> SPEC (\<lambda>c. (c, eqa) \<in> {(st, b). (\<not> is_cfailed st) = b \<and> (is_cfound st \<longrightarrow> spec = r)})
[PROOF STEP]
subgoal
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>(A, B) \<in> \<langle>nat_rel, sorted_poly_rel O mset_poly_rel\<rangle>fmap_rel; (r, r') \<in> sorted_poly_rel O mset_poly_rel; (p, p') \<in> nat_rel; (q, q') \<in> nat_rel; (i, i') \<in> nat_rel; (\<V>', \<V>) \<in> \<langle>var_rel\<rangle>set_rel; (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>', b_) \<in> bool_rel; \<not> \<not> (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>'); \<not> \<not> b_; p' \<in># dom_m B; p \<in># dom_m A; q' \<in># dom_m B; q \<in># dom_m A\<rbrakk> \<Longrightarrow> (the (fmlookup A p), the (fmlookup B p')) \<in> sorted_poly_rel O mset_poly_rel
[PROOF STEP]
by auto
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>b. \<lbrakk>(A, B) \<in> \<langle>nat_rel, sorted_poly_rel O mset_poly_rel\<rangle>fmap_rel; (r, r') \<in> sorted_poly_rel O mset_poly_rel; (p, p') \<in> nat_rel; (q, q') \<in> nat_rel; (i, i') \<in> nat_rel; (\<V>', \<V>) \<in> \<langle>var_rel\<rangle>set_rel; (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>', b) \<in> bool_rel; \<not> \<not> (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>'); \<not> \<not> b; p' \<in># dom_m B; p \<in># dom_m A; q' \<in># dom_m B; q \<in># dom_m A\<rbrakk> \<Longrightarrow> (the (fmlookup A q), the (fmlookup B q')) \<in> sorted_poly_rel O mset_poly_rel
2. \<And>b pq pqa ba eqa. \<lbrakk>(A, B) \<in> \<langle>nat_rel, sorted_poly_rel O mset_poly_rel\<rangle>fmap_rel; (r, r') \<in> sorted_poly_rel O mset_poly_rel; (p, p') \<in> nat_rel; (q, q') \<in> nat_rel; (i, i') \<in> nat_rel; (\<V>', \<V>) \<in> \<langle>var_rel\<rangle>set_rel; (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>', b) \<in> bool_rel; \<not> \<not> (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>'); \<not> \<not> b; p' \<in># dom_m B; p \<in># dom_m A; q' \<in># dom_m B; q \<in># dom_m A; (pq, pqa) \<in> sorted_poly_rel O mset_poly_rel; (ba, eqa) \<in> bool_rel\<rbrakk> \<Longrightarrow> weak_equality_l r spec \<bind> (\<lambda>b'. if ba then if b' then RETURN CFOUND else RETURN CSUCCESS else SPEC (\<lambda>_. True) \<bind> (\<lambda>c. RETURN (error_msg i c))) \<le> SPEC (\<lambda>c. (c, eqa) \<in> {(st, b). (\<not> is_cfailed st) = b \<and> (is_cfound st \<longrightarrow> spec = r)})
[PROOF STEP]
subgoal
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>(A, B) \<in> \<langle>nat_rel, sorted_poly_rel O mset_poly_rel\<rangle>fmap_rel; (r, r') \<in> sorted_poly_rel O mset_poly_rel; (p, p') \<in> nat_rel; (q, q') \<in> nat_rel; (i, i') \<in> nat_rel; (\<V>', \<V>) \<in> \<langle>var_rel\<rangle>set_rel; (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>', b_) \<in> bool_rel; \<not> \<not> (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>'); \<not> \<not> b_; p' \<in># dom_m B; p \<in># dom_m A; q' \<in># dom_m B; q \<in># dom_m A\<rbrakk> \<Longrightarrow> (the (fmlookup A q), the (fmlookup B q')) \<in> sorted_poly_rel O mset_poly_rel
[PROOF STEP]
by auto
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>b pq pqa ba eqa. \<lbrakk>(A, B) \<in> \<langle>nat_rel, sorted_poly_rel O mset_poly_rel\<rangle>fmap_rel; (r, r') \<in> sorted_poly_rel O mset_poly_rel; (p, p') \<in> nat_rel; (q, q') \<in> nat_rel; (i, i') \<in> nat_rel; (\<V>', \<V>) \<in> \<langle>var_rel\<rangle>set_rel; (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>', b) \<in> bool_rel; \<not> \<not> (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>'); \<not> \<not> b; p' \<in># dom_m B; p \<in># dom_m A; q' \<in># dom_m B; q \<in># dom_m A; (pq, pqa) \<in> sorted_poly_rel O mset_poly_rel; (ba, eqa) \<in> bool_rel\<rbrakk> \<Longrightarrow> weak_equality_l r spec \<bind> (\<lambda>b'. if ba then if b' then RETURN CFOUND else RETURN CSUCCESS else SPEC (\<lambda>_. True) \<bind> (\<lambda>c. RETURN (error_msg i c))) \<le> SPEC (\<lambda>c. (c, eqa) \<in> {(st, b). (\<not> is_cfailed st) = b \<and> (is_cfound st \<longrightarrow> spec = r)})
[PROOF STEP]
subgoal
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>(A, B) \<in> \<langle>nat_rel, sorted_poly_rel O mset_poly_rel\<rangle>fmap_rel; (r, r') \<in> sorted_poly_rel O mset_poly_rel; (p, p') \<in> nat_rel; (q, q') \<in> nat_rel; (i, i') \<in> nat_rel; (\<V>', \<V>) \<in> \<langle>var_rel\<rangle>set_rel; (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>', b_) \<in> bool_rel; \<not> \<not> (p \<in># dom_m A \<and> q \<in># dom_m A \<and> i \<notin># dom_m A \<and> vars_llist r \<subseteq> \<V>'); \<not> \<not> b_; p' \<in># dom_m B; p \<in># dom_m A; q' \<in># dom_m B; q \<in># dom_m A; (pq_, pqa_) \<in> sorted_poly_rel O mset_poly_rel; (ba_, eqa_) \<in> bool_rel\<rbrakk> \<Longrightarrow> weak_equality_l r spec \<bind> (\<lambda>b'. if ba_ then if b' then RETURN CFOUND else RETURN CSUCCESS else SPEC (\<lambda>_. True) \<bind> (\<lambda>c. RETURN (error_msg i c))) \<le> SPEC (\<lambda>c. (c, eqa_) \<in> {(st, b). (\<not> is_cfailed st) = b \<and> (is_cfound st \<longrightarrow> spec = r)})
[PROOF STEP]
by (auto simp: weak_equality_l_def bind_RES_RETURN_eq)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
[PROOF STATE]
proof (state)
this:
check_addition_l spec A \<V>' p q i r \<le> \<Down> {(st, b). (\<not> is_cfailed st) = b \<and> (is_cfound st \<longrightarrow> spec = r)} (check_add B \<V> p' q' i' r')
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 20589, "file": "PAC_Checker_PAC_Checker", "length": 31}
|
"""
Basic docstring explaining example
"""
from __future__ import print_function
#********************
#sf3dmodels libraries
#********************
from sf3dmodels.outflow import OutflowModel #Model functions
import sf3dmodels.utils.units as u #Units
import sf3dmodels.rt as rt #Writing functions for radiative transfer
import sf3dmodels.Plot_model as Pm #Plotting model
import sf3dmodels.Model as Model #Grid
from sf3dmodels.grid import Overlap #Overlap submodels
#********************
#Extra libraries
#********************
import numpy as np
import time
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.colors as colors
t0 = time.time()
#********
#GRIDDING
#********
sizex = 100 * u.au
sizey = sizez = 100 * u.au
Nx = Ny = Nz = 100
GRID = Model.grid([sizex, sizey, sizez], [Nx, Ny, Nz], rt_code='lime')
#********
#MERGING
#********
files = ['disc.dat', 'outflow.dat']
#Old style
#outflows = BGG.overlap(GRID, submodels = data2merge, rho_min = 1e6)
#New style
columns = ['id', 'x', 'y', 'z', 'dens_H2', 'dens_Hplus', 'temp_gas', 'vel_x', 'vel_y', 'vel_z', 'abundance', 'gtdratio']
overlap = Overlap(GRID)
finalprop = overlap.fromfiles(columns, submodels = files, rt_code = 'lime')
#**********
#WRITING
#**********
lime = rt.Lime(GRID)
lime.finalmodel(finalprop)
#********
#TIMING
#********
print ('Ellapsed time: %.3fs' % (time.time() - t0))
print ('-------------------------------------------------\n-------------------------------------------------\n')
#********
#PLOTTING
#********
density = finalprop['dens_H2'] / 1e6 #dens. in cm^-3
temperature = finalprop['temp_gas']
weight = 1.0#100 * np.mean(density)
"""
#-----------------
#Plot for DENSITY
#-----------------
Pm.scatter3D(GRID, density, weight, NRand = 4000, axisunit = u.au, colorscale = 'log', cmap = 'cool',
colorlabel = r'${\rm log}_{10}(n [cm^{-3}])$', output = 'global_grid_dens.png', vmin = 5)
#--------------------
#Plot for TEMPERATURE
#--------------------
Pm.scatter3D(GRID, density, weight, colordim = temperature, NRand = 4000, axisunit = u.au, colorscale = 'log',
cmap = 'brg', colorlabel = r'${\rm log}_{10}(T$ $[K])$', output = 'global_grid_temp.png', vmin = 2)
"""
#******************
#3D plotting
#******************
lims = np.array([-100,100])
weight = 1.0
ax_kw = {'projection': '3d'}#, 'xlim': lims, 'ylim': lims, 'zlim': lims, 'azim': -50, 'elev': 30}
canvas3d = Pm.Canvas3d(ax_kw=ax_kw)
sp = canvas3d.scatter_random(GRID, density, weight, GRID_unit=u.au, power=0, NRand=10000, prop_min=1.0, #function arguments
marker = '+', cmap = 'jet', s = 3, edgecolors = 'none', vmin=1, norm = colors.LogNorm()) #Scatter kwargs
cbar = plt.colorbar(sp)
cbar.ax.set_ylabel(r'H$_2$ density [cm$^{-3}$]')
canvas3d.ax.set_xlabel('au')
plt.savefig('grid_dens3d.png', bbox_inches='tight')
canvas3d = Pm.Canvas3d(ax_kw=ax_kw)
sp = canvas3d.scatter_random(GRID, density, weight, prop_color = temperature, GRID_unit=u.au, power=0, NRand=10000, prop_min=1.0, #function arguments
marker = '+', cmap = 'jet', s = 3, edgecolors = 'none', vmin=1, norm = colors.LogNorm()) #Scatter kwargs
cbar = plt.colorbar(sp)
cbar.ax.set_ylabel(r'T [K]')
canvas3d.ax.set_xlabel('au')
plt.savefig('grid_temp3d.png', bbox_inches='tight')
plt.show()
|
{"hexsha": "b9b98b9811c9855875edffd32af460e7788db7c2", "size": 3387, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/disc+outflow/overlap_models.py", "max_stars_repo_name": "andizq/star-forming-regions", "max_stars_repo_head_hexsha": "1105b856c5814635692704b41da66dc6fe38092b", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2017-11-28T03:47:47.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-09T16:31:59.000Z", "max_issues_repo_path": "examples/disc+outflow/overlap_models.py", "max_issues_repo_name": "andizq/sf3dmodels", "max_issues_repo_head_hexsha": "4a2b33ad5ae75473a5b023eed712c4009e99ca3c", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2020-07-07T21:22:04.000Z", "max_issues_repo_issues_event_max_datetime": "2020-10-29T20:32:23.000Z", "max_forks_repo_path": "examples/disc+outflow/overlap_models.py", "max_forks_repo_name": "andizq/sf3dmodels", "max_forks_repo_head_hexsha": "4a2b33ad5ae75473a5b023eed712c4009e99ca3c", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2018-12-10T16:32:11.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-12T09:32:35.000Z", "avg_line_length": 32.5673076923, "max_line_length": 149, "alphanum_fraction": 0.5848833776, "include": true, "reason": "import numpy", "num_tokens": 1002}
|
from contextlib import contextmanager
import copy
import torch
import numpy as np
from base import BaseTrainer
from utils import memory_summary
from model.metric import APMeter, APMeterChallenge
def verbose(epoch, metrics, mode, name="TEST"):
r1, r5, r10, r50 = metrics["R1"], metrics["R5"], metrics["R10"], metrics["R50"]
msg = f"[{mode}]{name:s} epoch {epoch}, R@1: {r1:.1f}"
msg += f", R@5: {r5:.1f}, R@10 {r10:.1f}, R@50 {r50:.1f}"
msg += f"MedR: {metrics['MedR']:g}, MeanR: {metrics['MeanR']:.1f}"
print(msg)
@contextmanager
def ctxt_mgr(samples, device, disable_nan_checks):
"""Provide a context for managing temporary, cloned copies of retrieval
sample tensors.
The rationale here is that to use nan-checking in the model (to validate the
positions of missing experts), we need to modify the underlying tensors. This
function lets the evaluation code run (and modify) temporary copies, without
modifying the originals.
"""
if disable_nan_checks:
print("running without nan checks")
yield samples
else:
exp_dict = samples["experts"].items()
experts = {key: val.clone().to(device) for key, val in exp_dict}
samples_ = {
"experts": experts,
"ind": samples["ind"],
"text": samples["text"].to(device),
}
if "text_token_mask" in samples:
samples_["text_token_mask"] = samples["text_token_mask"].to(device)
try:
yield samples_
finally:
del samples_
class Trainer(BaseTrainer):
"""
Trainer class
Note:
Inherited from BaseTrainer.
"""
def __init__(self, model, loss, metrics, optimizer, config, data_loaders,
lr_scheduler, visualizer, disable_nan_checks, skip_first_n_saves,
include_optim_in_ckpts, force_cpu_val, cache_targets=set(),
num_keep_ckpts=3, mini_train=False, val_freq=1, skip_tboard=False):
super().__init__(model, loss, metrics, optimizer, config, mini_train=mini_train,
skip_tboard=skip_tboard, num_keep_ckpts=num_keep_ckpts)
self.config = config
self.cache_targets = cache_targets
self.data_loaders = data_loaders
self.lr_scheduler = lr_scheduler
self.mini_train = mini_train
self.disable_nan_checks = disable_nan_checks
self.len_epoch = len(self.data_loaders["train"])
self.log_step = int(np.sqrt(data_loaders["train"].batch_size))
self.visualizer = visualizer
self.force_cpu_val = force_cpu_val
self.val_freq = val_freq
self.skip_first_n_saves = skip_first_n_saves
self.include_optim_in_ckpts = include_optim_in_ckpts
self.seen = {"train": 0, "val": 0}
def _train_epoch(self, epoch):
"""
Training logic for an epoch
:param epoch: Current training epoch.
:return: A log that contains all information you want to save.
Note:
If you have additional information to record, for example:
> additional_log = {"x": x, "y": y}
merge it with log before return. i.e.
> log = {**log, **additional_log}
> return log
The metrics in log must have the key 'metrics'.
"""
total_loss = 0
self.model.train()
memory_summary()
for batch_idx, minibatch in enumerate(self.data_loaders["train"]):
for key, val in minibatch["experts"].items():
minibatch["experts"][key] = val.to(self.device)
for key in {"text", "text_token_mask"}:
if key in minibatch:
minibatch[key] = minibatch[key].to(self.device)
if "labels" in minibatch:
labels = minibatch.pop("labels").to(self.device)
self.optimizer.zero_grad()
output = self.model(**minibatch)
if "retrieval" in self.data_loaders.dataloaders:
loss = self.loss(output["cross_view_conf_matrix"])
else:
loss = self.loss(x=output["class_preds"], target=labels)
loss.backward()
self.optimizer.step()
sample_key = list(minibatch["experts"].keys())[0]
batch_size = minibatch["experts"][sample_key].shape[0]
self.seen["train"] += batch_size
if not self.skip_tboard:
# self.writer.set_step((epoch - 1) * self.len_epoch + batch_idx)
self.writer.set_step(self.seen["train"], mode="train")
self.writer.add_scalar('loss', loss.item())
total_loss += loss.item()
if batch_idx % self.log_step == 0:
prog = self._progress(batch_idx)
self.logger.info(f"Train Epoch: {epoch} {prog} Loss: {loss.item():.6f}")
if batch_idx == self.len_epoch or (self.mini_train and batch_idx > 3):
break
log = {'loss': total_loss / self.len_epoch}
if epoch % self.val_freq == 0:
nested_log, cached_preds = self._valid_epoch(epoch)
log.update(nested_log)
else:
nested_log, cached_preds = {}, None
self.logger.info(f"skipping val for epoch: {epoch}")
if self.lr_scheduler is not None:
self.lr_scheduler.step()
self.logger.info(f"LR {self.lr_scheduler.get_lr()}")
return log, cached_preds
def log_metrics(self, metric_store, metric_name, mode):
if not self.skip_tboard:
print(f"logging metrics: {metric_name}")
self.writer.set_step(step=self.seen[mode], mode=mode)
for key, value in metric_store.items():
self.writer.add_scalar(f"{metric_name}/{key}", value)
def _valid_epoch(self, epoch):
"""Validate model after an epoch of training and store results to disk.
Args:
epoch (int): the current epoch
Returns:
A log that contains information about validation
NOTE: The validation metrics in log must have the key 'val_metrics'.
"""
self.model.eval()
if not self.skip_tboard:
self.writer.mode = "val"
cached_preds = {key: {"vid_name": [], "preds": [], "labels": []}
for key in self.cache_targets}
with torch.no_grad():
if "retrieval" in self.data_loaders.dataloaders:
samples, meta = self.data_loaders["retrieval"]
sample_key = list(samples["experts"].keys())[0]
batch_size = samples["experts"][sample_key].shape[0]
self.seen["val"] += batch_size
num_queries = samples["text"].shape[0] * samples["text"].shape[1]
safe_queries = 4000
if num_queries > safe_queries:
partitions = int(np.ceil(num_queries / safe_queries))
chunk_size = int(np.ceil(samples["text"].shape[0] / partitions))
texts = copy.deepcopy(samples["text"])
sim_chunks = []
for chunk_idx in range(partitions):
chunk_start = chunk_idx * chunk_size
chunk_stop = (chunk_idx + 1) * chunk_size
samples["text"] = texts[chunk_start:chunk_stop]
with ctxt_mgr(samples, self.device,
self.disable_nan_checks) as xx:
output = self.model(**xx)
sims = output["cross_view_conf_matrix"].data
sim_chunks.append(sims)
samples["text"] = texts # restore pointer to original tensor
del texts
sims = torch.cat(sim_chunks, dim=0).data.cpu().float().numpy()
else:
with ctxt_mgr(samples, self.device, self.disable_nan_checks) as xx:
output = self.model(**xx)
self.model = self.model.to(self.device)
sims = output["cross_view_conf_matrix"].data.cpu().float().numpy()
# sample the loss (using only the first query for each video)
queries_per_vid = meta["query_masks"].shape[1]
sims_ = torch.from_numpy(sims).view(-1, queries_per_vid, sims.shape[-1])
loss = self.loss(sims_[:, 0, :].contiguous())
if not self.skip_tboard:
self.writer.add_scalar('first-query-loss', loss.item())
dataset = self.data_loaders.dataset_name
nested_metrics = {}
for metric in self.metrics:
metric_name = metric.__name__
res = metric(sims, query_masks=meta["query_masks"])
if metric_name == "mean_average_precision":
print(f"Epoch: {epoch}, mean AP: {res['mAP']}")
else:
verbose(epoch=epoch, metrics=res, name=dataset, mode=metric_name)
self.log_metrics(res, metric_name=metric_name, mode="val")
nested_metrics[metric_name] = res
# TODO(Samuel) disabled visualisation for now, simple to add in later
num_test_caps = self.data_loaders.num_test_captions
if num_test_caps == 1 and meta["raw_captions"] is not None:
if self.visualizer is not None:
self.visualizer.visualize_ranking(
sims=sims,
meta=meta,
epoch=epoch,
nested_metrics=nested_metrics,
)
return {"nested_val_metrics": nested_metrics}, cached_preds
elif "val" in self.data_loaders.dataloaders:
metrics = [x() for x in self.metrics]
for batch_idx, minibatch in enumerate(self.data_loaders["val"]):
for key, val in minibatch["experts"].items():
minibatch["experts"][key] = val.to(self.device)
labels = minibatch.pop("labels").to(self.device)
vid_name = minibatch.pop("vid_name")
output = self.model(**minibatch)
if "val" in self.cache_targets:
cached_preds["val"]["vid_name"].append(vid_name)
cached_preds["val"]["preds"].append(output["class_preds"])
for metric in metrics:
metric.add(output=output["class_preds"], target=labels)
if batch_idx % self.log_step == 0:
prog = self._progress(batch_idx)
self.logger.info(f"Val Epoch: {epoch} {prog}")
nested_metrics = {}
for metric in metrics:
if hasattr(metric, "topk"):
res = {f"top{key}": val for key, val in
zip(metric.topk, metric.value())}
self.log_metrics(res, mode="val", metric_name="accuracy")
nested_metrics["accuracy"] = res
elif isinstance(metric, APMeter):
res = {"mAP": metric.value().mean()}
self.log_metrics(res, mode="val",
metric_name="mean_ap_non_challenge")
nested_metrics["mean_ap_non_challenge"] = res
elif isinstance(metric, APMeterChallenge):
res = {"mAP": metric.value().mean()}
self.log_metrics(res, mode="val",
metric_name="mean_average_precision")
nested_metrics["mean_ap"] = res
else:
raise ValueError(f"unsupported mettric: {type(metric)}")
nested = {"nested_val_metrics": nested_metrics}
for target in self.cache_targets - {"val"}:
for batch_idx, minibatch in enumerate(self.data_loaders["tiny"]):
for key, val in minibatch["experts"].items():
minibatch["experts"][key] = val.to(self.device)
if "labels" in minibatch:
cached_preds[target]["labels"].append(minibatch.pop("labels"))
cached_preds[target]["vid_name"].append(minibatch.pop("vid_name"))
output = self.model(**minibatch)
cached_preds[target]["preds"].append(output["class_preds"])
# aggregate all cached predictions
for target in self.cache_targets:
for key, val in cached_preds[target].items():
cached_preds[key] = torch.cat(val).cpu().numpy()
return nested, cached_preds
def _progress(self, batch_idx):
base = '[{}/{} ({:.0f}%)]'
if hasattr(self.data_loaders, 'n_samples'):
current = batch_idx * self.data_loaders.batch_size
total = self.data_loaders.n_samples
else:
current = batch_idx
total = self.len_epoch
return base.format(current, total, 100.0 * current / total)
|
{"hexsha": "2799298a8339b52156e6eb97a89c497fa6c95b28", "size": 13481, "ext": "py", "lang": "Python", "max_stars_repo_path": "trainer/trainer.py", "max_stars_repo_name": "dendisuhubdy/collaborative-experts", "max_stars_repo_head_hexsha": "e6db63837537c054723ce00b73264101acc29d39", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "trainer/trainer.py", "max_issues_repo_name": "dendisuhubdy/collaborative-experts", "max_issues_repo_head_hexsha": "e6db63837537c054723ce00b73264101acc29d39", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "trainer/trainer.py", "max_forks_repo_name": "dendisuhubdy/collaborative-experts", "max_forks_repo_head_hexsha": "e6db63837537c054723ce00b73264101acc29d39", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.6390728477, "max_line_length": 90, "alphanum_fraction": 0.5455826719, "include": true, "reason": "import numpy", "num_tokens": 2808}
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import mxnet as mx
import numpy as np
import pytest
from gluonts.model.deepvar_hierarchical import (
constraint_mat,
reconciliation_error,
)
TOL = 1e-4
S = np.array(
[
[1, 1, 1, 1],
[1, 1, 0, 0],
[0, 0, 1, 1],
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
],
)
num_bottom_ts = S.shape[1]
A = constraint_mat(S)
@pytest.mark.parametrize(
"bottom_ts",
[
np.random.randint(low=0, high=100, size=num_bottom_ts), # integer data
np.random.randint(
low=1000, high=100000, size=num_bottom_ts
), # large integer data
np.random.poisson(lam=1, size=num_bottom_ts),
np.random.negative_binomial(n=1000, p=0.5, size=num_bottom_ts),
-np.random.negative_binomial(
n=1000, p=0.5, size=num_bottom_ts
), # negative data
np.random.standard_normal(size=num_bottom_ts),
],
)
def test_reconciliation_error(bottom_ts):
all_ts = S @ bottom_ts
assert reconciliation_error(mx.nd.array(A), mx.nd.array(all_ts)) < TOL
|
{"hexsha": "b16738d1d01ff07be66160901d189bc7da78fd4d", "size": 1664, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/model/deepvar_hierarchical/test_reconciliation_error.py", "max_stars_repo_name": "Xiaoxiong-Liu/gluon-ts", "max_stars_repo_head_hexsha": "097c492769258dd70b7f223f826b17b0051ceee9", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2648, "max_stars_repo_stars_event_min_datetime": "2019-06-03T17:18:27.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T08:29:22.000Z", "max_issues_repo_path": "test/model/deepvar_hierarchical/test_reconciliation_error.py", "max_issues_repo_name": "Xiaoxiong-Liu/gluon-ts", "max_issues_repo_head_hexsha": "097c492769258dd70b7f223f826b17b0051ceee9", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1220, "max_issues_repo_issues_event_min_datetime": "2019-06-04T09:00:14.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T10:45:43.000Z", "max_forks_repo_path": "test/model/deepvar_hierarchical/test_reconciliation_error.py", "max_forks_repo_name": "Xiaoxiong-Liu/gluon-ts", "max_forks_repo_head_hexsha": "097c492769258dd70b7f223f826b17b0051ceee9", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 595, "max_forks_repo_forks_event_min_datetime": "2019-06-04T01:04:31.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T10:40:26.000Z", "avg_line_length": 27.7333333333, "max_line_length": 79, "alphanum_fraction": 0.6454326923, "include": true, "reason": "import numpy", "num_tokens": 476}
|
subroutine chabs (sDCM, l, ine, ne1, ne2, a, z, r1)
! ======================================================================
!
! Determines isospin (p or n) of nucleons after pion absorption.
! This modified version keeps track of the isospin of the original
! first nucleon partner.
!
! Called by: ABSORP
!
! ine; charge of incident pion or photon
! ne1; upon entry: charge of first nucleon partner;
! ne1, ne2; upon return: the charges of the two outgoing nucleons.
! l = 1 for gammas, otherwise = 0
!
! CEM95 written by S. G. Mashnik
! Edited by A. J. Sierk, LANL T-2, February, 1996.
! Modified from old CHABS by AJS, July, 1997
! Added gamma absorption by FCG August, 2000.
! "Last" change: 12-AUG-2003 by NVMokhov
! Modified by A. J. Sierk, LANL T-16, October, 2003.
! Edited by AJS, LANL T-2, December, 2011.
! Edited by LMK, XCP-3, July 2013 (included error protection).
!
! ======================================================================
use, intrinsic:: iso_fortran_env, only: int32, real64
use standardDCMParams, only: one
implicit none
class(StandardDCM), intent(inout) :: sDCM
integer(int32), intent(in ) :: l
integer(int32), intent(in ) :: ine
integer(int32), intent(inout) :: ne1
integer(int32), intent(inout) :: ne2
real(real64), intent(in ) :: a
real(real64), intent(in ) :: z
real(real64), intent(in ) :: r1
real(real64) :: t1, t2, temp, temp1
! ======================================================================
if (l.ne.0) then
if (ne1 == 0) then
ne2 = 1
else
ne2 = 0
endif
else
t1 = z - one
t2 = a - one
temp = t2
if (temp < div0Lim .and. temp > -div0Lim) then
temp = div0Lim
write(sDCM%io%message, 1000) "61, 69, 79, 93"
call sDCM%io%print(4, 3, sDCM%io%message)
end if
if (ine < 0) then
! pi-
ne2 = 0
if (ne1 == 0) then
! neutron; must have proton partner; final state = nn
continue
elseif (ne1 == 1) then
! proton; final state must be nn or np:
temp1 = t1/temp
! Initial partner a neutron: fs = nn
if (r1 > temp1) ne1 = 0
endif
elseif (ine == 0) then
! pi0
if (ne1 == 0) then
! first partner is a neutron
temp1 = z/temp
if (r1 > temp1) then
! 2nd neutron in initial pair; fs = nn
ne2 = 0
else
! proton in initial pair; fs = np
ne2 = 1
endif
elseif (ne1 == 1) then
! first partner is a proton
temp1 = t1/temp
if (r1 > temp1) then
! Initial 2nd partner a neutron; fs = np
ne2 = 0
else
! Initial 2nd partner a 2nd proton; fs = pp
ne2 = 1
endif
endif
elseif (ine == 1) then
! pi+
ne2 = 1
if (ne1 == 0) then
! first partner is a neutron
temp1 = t1/temp
if (r1 > temp1) then
! 2nd nucleon is a neutron; fs = np
continue
else
! 2nd nucleon is a proton; fs = pp
ne1 = 1
endif
elseif (ne1 == 1) then
! first partner is a proton; initial = np; fs = pp
continue
endif
endif
endif
return
! ======================================================================
1000 format("Divide by zero error prevented in 'chabs.f90', line(s) ", A)
! ======================================================================
end subroutine chabs
|
{"hexsha": "ae63d51e98839f0579868ec2a8f9f192687f0668", "size": 3692, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/StandardDCM/chabs.f90", "max_stars_repo_name": "lanl/generalized-spallation-model", "max_stars_repo_head_hexsha": "4a2f01a873d2e8f2304b8fd1474d43d1ce8d744d", "max_stars_repo_licenses": ["Intel", "Unlicense"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-05-24T18:05:48.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-24T18:05:48.000Z", "max_issues_repo_path": "src/StandardDCM/chabs.f90", "max_issues_repo_name": "lanl/generalized-spallation-model", "max_issues_repo_head_hexsha": "4a2f01a873d2e8f2304b8fd1474d43d1ce8d744d", "max_issues_repo_licenses": ["Intel", "Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/StandardDCM/chabs.f90", "max_forks_repo_name": "lanl/generalized-spallation-model", "max_forks_repo_head_hexsha": "4a2f01a873d2e8f2304b8fd1474d43d1ce8d744d", "max_forks_repo_licenses": ["Intel", "Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.7666666667, "max_line_length": 73, "alphanum_fraction": 0.4796858072, "num_tokens": 1094}
|
/**
* Copyright (C) 2020-present MongoDB, Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the Server Side Public License, version 1,
* as published by MongoDB, Inc.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* Server Side Public License for more details.
*
* You should have received a copy of the Server Side Public License
* along with this program. If not, see
* <http://www.mongodb.com/licensing/server-side-public-license>.
*
* As a special exception, the copyright holders give permission to link the
* code of portions of this program with the OpenSSL library under certain
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. You
* must comply with the Server Side Public License in all respects for
* all of the code used other than as permitted herein. If you modify file(s)
* with this exception, you may extend this exception to your version of the
* file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also delete
* it in the license file.
*/
#include "mongo/platform/basic.h"
#include <boost/optional.hpp>
#include <fmt/format.h>
#include "mongo/logv2/log.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/ctype.h"
#include "mongo/util/hex.h"
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest
namespace mongo::ctype {
namespace {
using namespace fmt::literals;
TEST(Ctype, MatchesCxxStdlib) {
for (size_t i = 0; i < 256; ++i) {
char c = i;
unsigned char uc = i;
const std::string msg = " i={:02x}"_format(i);
ASSERT_EQ(isAlnum(c), (bool)std::isalnum(uc)) << msg;
ASSERT_EQ(isAlpha(c), (bool)std::isalpha(uc)) << msg;
ASSERT_EQ(isLower(c), (bool)std::islower(uc)) << msg;
ASSERT_EQ(isUpper(c), (bool)std::isupper(uc)) << msg;
ASSERT_EQ(isDigit(c), (bool)std::isdigit(uc)) << msg;
ASSERT_EQ(isXdigit(c), (bool)std::isxdigit(uc)) << msg;
ASSERT_EQ(isCntrl(c), (bool)std::iscntrl(uc)) << msg;
ASSERT_EQ(isGraph(c), (bool)std::isgraph(uc)) << msg;
ASSERT_EQ(isSpace(c), (bool)std::isspace(uc)) << msg;
ASSERT_EQ(isBlank(c), (bool)std::isblank(uc)) << msg;
ASSERT_EQ(isPrint(c), (bool)std::isprint(uc)) << msg;
ASSERT_EQ(isPunct(c), (bool)std::ispunct(uc)) << msg;
ASSERT_EQ(toLower(c), (char)std::tolower(uc)) << msg;
ASSERT_EQ(toUpper(c), (char)std::toupper(uc)) << msg;
}
}
TEST(Ctype, MatchesCStdlib) {
for (size_t i = 0; i < 256; ++i) {
char c = i;
unsigned char uc = i;
const std::string msg = " i={:02x}"_format(i);
ASSERT_EQ(isAlnum(c), (bool)isalnum(uc)) << msg;
ASSERT_EQ(isAlpha(c), (bool)isalpha(uc)) << msg;
ASSERT_EQ(isLower(c), (bool)islower(uc)) << msg;
ASSERT_EQ(isUpper(c), (bool)isupper(uc)) << msg;
ASSERT_EQ(isDigit(c), (bool)isdigit(uc)) << msg;
ASSERT_EQ(isXdigit(c), (bool)isxdigit(uc)) << msg;
ASSERT_EQ(isCntrl(c), (bool)iscntrl(uc)) << msg;
ASSERT_EQ(isGraph(c), (bool)isgraph(uc)) << msg;
ASSERT_EQ(isSpace(c), (bool)isspace(uc)) << msg;
ASSERT_EQ(isBlank(c), (bool)isblank(uc)) << msg;
ASSERT_EQ(isPrint(c), (bool)isprint(uc)) << msg;
ASSERT_EQ(isPunct(c), (bool)ispunct(uc)) << msg;
ASSERT_EQ(toLower(c), (char)tolower(uc)) << msg;
ASSERT_EQ(toUpper(c), (char)toupper(uc)) << msg;
}
}
TEST(Ctype, IsConstexpr) {
MONGO_STATIC_ASSERT(isAlnum('a'));
MONGO_STATIC_ASSERT(isAlpha('a'));
MONGO_STATIC_ASSERT(isLower('a'));
MONGO_STATIC_ASSERT(!isUpper('a'));
MONGO_STATIC_ASSERT(!isDigit('a'));
MONGO_STATIC_ASSERT(isXdigit('a'));
MONGO_STATIC_ASSERT(!isCntrl('a'));
MONGO_STATIC_ASSERT(isGraph('a'));
MONGO_STATIC_ASSERT(!isSpace('a'));
MONGO_STATIC_ASSERT(!isBlank('a'));
MONGO_STATIC_ASSERT(isPrint('a'));
MONGO_STATIC_ASSERT(!isPunct('a'));
MONGO_STATIC_ASSERT(toLower('a') == 'a');
MONGO_STATIC_ASSERT(toUpper('a') == 'A');
}
} // namespace
} // namespace mongo::ctype
|
{"hexsha": "98fbea63def99abf3b9a201e218e7ee21b991094", "size": 4548, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/mongo/util/ctype_test.cpp", "max_stars_repo_name": "benety/mongo", "max_stars_repo_head_hexsha": "203430ac9559f82ca01e3cbb3b0e09149fec0835", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/mongo/util/ctype_test.cpp", "max_issues_repo_name": "benety/mongo", "max_issues_repo_head_hexsha": "203430ac9559f82ca01e3cbb3b0e09149fec0835", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/mongo/util/ctype_test.cpp", "max_forks_repo_name": "benety/mongo", "max_forks_repo_head_hexsha": "203430ac9559f82ca01e3cbb3b0e09149fec0835", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.6071428571, "max_line_length": 80, "alphanum_fraction": 0.6466578716, "num_tokens": 1264}
|
# Import Packages
import os, sys, csv, random
import matplotlib.pyplot as plt
import numpy as np
from skimage import io
import seaborn as sns
import pandas as pd
from PIL import Image
from ipywidgets import widgets
from IPython.display import display
# Load in the dataset
path_to_orig_csv = 'https://raw.githubusercontent.com/biof309/spring2019-group-project-team_plant/master/src/myproject/Plant_Dataset_final.csv'
plant_orig_ds = pd.read_csv(path_to_orig_csv)
path_to_csv = "https://raw.githubusercontent.com/biof309/spring2019-group-project-team_plant/master/src/myproject/Plant%20Dataset_scale.csv"
plant_ds = pd.read_csv(path_to_csv)
#plant_orig_ds.head()
# List of Questions and Options
questionlist = [
"What size plant are you looking for?",
"What size pot do you prefer?",
"How much light will your indoor plant have?",
"Do you want a plant that cleans the air?", #This is for clean air plant
"Do you have a child or pet?", #This is for toxicity
"Did you want this plant to flower?",
"How much temperature fluctuation will this plant encounter?", #This is for plant durability
"Did you want to give this plant for a special occasion?",
"How much work do you want to do on this plant?", #This is for pruning
"Do you tend to underwater or overwater?",
"How humid is your home?"
]
optionlist = [
["Small", "Medium", "Large"],
["Small", "Medium", "Large"],
["Low","Medium","High"],
["Yes","No", "Does not matter"],#doesn't matter=yes and no
["Yes", "No", "Does not matter"], #Yes= toxicity is 0, No= all other ones
["Yes", "No","Does not matter"], #doesn't matter=everything
["Low", "Medium", "High"],
["Yes", "No", "Does not matter"], #doesn't matter=everything
["Minimal","Lots","Does not matter"], #minimal=none, lots=regular, doesn't matter=anything
["Underwater","Overwater","Does not matter"], #underwater=low, overwater=high, no=anything
["Low","Medium","High"]
]
# Import packages and define quiz
import tkinter as tk
import tkinter.messagebox
from ipywidgets import widgets
from IPython.display import display
def main(args=None):
'''
main creats the plant quiz application and main loop
'''
app = PlantQuiz() # creating the object for Application class()
app.master.title('Plant Quiz')
app.mainloop()
return app
# Style from: https://github.com/abhijitnathwani/PyQuiz/blob/master/build/lib.linux-x86_64-2.7/py_quiz/__main__.py
# GUI code for quiz questions:
class PlantQuiz(tk.Frame): # Class PlantQuiz
'''
The class PlantQuiz, which includes definitions for starting a game, creating the buttons, quitting,
loading questions, going to the next question, and storing the user's answers.
'''
def __init__(self, master=None):
tkinter.messagebox.showinfo('Welcome!','Are you ready to find out which house plant best suits you??')
tk.Frame.__init__(self, master)
self.flag=0
self.qn = 0
self.anslist = [None]*len(questionlist)
self.grid()
# declaring variables to store question and answer
self.optionA = tk.StringVar() # control variable for option A
self.optionB = tk.StringVar() # control variable for option B
self.optionC = tk.StringVar() # control variable for option C
self.selected_answer = tk.StringVar() # variable to get the selected answer
self.question = tk.StringVar() # control variable for the question to be loaded
self.questions = questionlist
top = self.winfo_toplevel()
self.createWidgets(top) # call to create the necessary widgets
self.load_question(top) # load the first question
def new_game(self,top):
'''Starts the plant quiz'''
self.load_question(top)
def confirm_quit(self):
'''Displays a message box asking if you want to quit- if yes, destroys.'''
choice = tkinter.messagebox.askyesno('Quit the Quiz','Do you really want to quit?')
if choice == True:
self.destroy()
elif choice == False:
pass
def set_ans(self,answer):
'''Sets the answer to 1, 2, or 3 depending on which answer you select.'''
if answer==1:
self.selected_answer = 1
elif answer==2:
self.selected_answer = 2
elif answer == 3:
self.selected_answer = 3
self.flag=1
def store_ans(self):
'''Stores the user responses for each question.'''
self.anslist[self.qn] = self.selected_answer
#print(str(self.selected_answer))
def load_question(self,top):
'''Loads the next question from the question list.'''
self.radioButtonA.select() # sets the first radio button as the default
self.radioButtonA.deselect()
self.answers = optionlist[self.qn]
self.question.set(questionlist[self.qn])
#length=len(self.question.get()) # get the length of the question
#width=str(100+10*length)
width=str(500) #500
height=str(300) #180
top.geometry(width+"x"+height)
self.optionA.set(optionlist[self.qn][0])
self.optionB.set(optionlist[self.qn][1])
self.optionC.set(optionlist[self.qn][2])
def next_btn(self, top):
'''Displays the next button and launches next question unless the quiz is finished.'''
#print("self.qn: ", self.qn)
self.store_ans()
if self.qn >= (len(questionlist)-1):
self.store_ans()
tkinter.messagebox.showinfo('Bye!','You are finished! Click ok to calculate results.')
self.destroy()
else:
self.qn = self.qn + 1
self.load_question(top)
def createWidgets(self,top):
''' Creates the widget buttons and defines where they are displayed.'''
# Creates widget buttons
top.resizable(True,True)
top.grid_columnconfigure(0,weight=1)
top.grid_columnconfigure(9,weight=1)
top.grid_rowconfigure(0,weight=1)
top.grid_rowconfigure(9,weight=1)
#Creating the buttons
self.quitButton = tk.Button(self, text='Quit', command=self.confirm_quit)
self.nextButton = tk.Button(self, text='Next', command=lambda: self.next_btn(top))
#Creating Radio buttons for options
self.radioButtonA = tk.Radiobutton(self,anchor='w',
textvariable=self.optionA,
variable = self.selected_answer,
value = 'A',
command = lambda: self.set_ans(1)) # the radio button call 'set_ans()' with the number to set the 'selected_answer' variable
self.radioButtonB = tk.Radiobutton(self,anchor='w',
textvariable=self.optionB,
variable = self.selected_answer,
value = 'B',
command = lambda: self.set_ans(2))
self.radioButtonC = tk.Radiobutton(self,anchor='w',
textvariable=self.optionC,
variable = self.selected_answer,
value = 'C',
command = lambda: self.set_ans(3))
#Creating the labels for options and questions
self.label_question = tk.Label(self,textvariable=self.question)
#Packing the widgets in the grid
self.label_question.grid(column=3,row=1,columnspan=4)
self.radioButtonA.grid(column=4,row=2, columnspan=3,sticky=tk.N+tk.S+tk.W+tk.E)
self.radioButtonB.grid(column=4,row=3, columnspan=3,sticky=tk.N+tk.S+tk.W+tk.E)
self.radioButtonC.grid(column=4,row=4, columnspan=3,sticky=tk.N+tk.S+tk.W+tk.E)
self.quitButton.grid(column=6,row=5) #,sticky=tk.N+tk.S+tk.W+tk.E)
self.nextButton.grid(column=3,row=5) #,sticky=tk.N+tk.S+tk.W+tk.E)
# Launch the quiz
if __name__ == "__main__":
myquiz = main()
# The variable that saves the quiz answers is called myquiz.anslist
# Converting user response list to dataframe
user_resp = myquiz.anslist
user_resp_df = pd.DataFrame(np.array(user_resp).reshape(1,11))
# Plant Selection Mechanism
# Creating a 1's and 0's dataframe
# Question 1
plant_ds['PlantSize'] = (plant_ds['PlantSize'] == user_resp_df.loc[0,0]).astype(int)
# Question 2
plant_ds['PotSize '] = (plant_ds['PotSize '] == user_resp_df.loc[0,1]).astype(int)
# Question 3
plant_ds['Light '] = (plant_ds['Light '] == user_resp_df.loc[0,2]).astype(int)
# Question 4
if (user_resp_df.loc[0,3]).astype(int) == 3:
plant_ds['CleanAirPlant'] = 1
else:
plant_ds['CleanAirPlant'] = (plant_ds['CleanAirPlant'] == user_resp_df.loc[0,3]).astype(int)
# Question 5
if (user_resp_df.loc[0,4]).astype(int) == 3:
plant_ds['Toxicitylevel'] = 1
else:
plant_ds['Toxicitylevel'] = (plant_ds['Toxicitylevel'] == user_resp_df.loc[0,4]).astype(int)
# Question 6
if (user_resp_df.loc[0,5]).astype(int) == 3:
plant_ds['Flowering '] = 1
else:
plant_ds['Flowering '] = (plant_ds['Flowering '] == user_resp_df.loc[0,5]).astype(int)
# Question 7
plant_ds['TempDurability'] = (plant_ds['TempDurability'] == user_resp_df.loc[0,6]).astype(int)
# Question 8
if (user_resp_df.loc[0,7]).astype(int) == 3:
plant_ds['SpecialOccasion '] = 1
else:
plant_ds['SpecialOccasion '] = (plant_ds['SpecialOccasion '] == user_resp_df.loc[0,7]).astype(int)
# Question 9
if (user_resp_df.loc[0,8]).astype(int) == 3:
plant_ds['Pruning '] = 1
else:
plant_ds['Pruning '] = (plant_ds['Pruning '] == user_resp_df.loc[0,8]).astype(int)
# Question 10
if (user_resp_df.loc[0,9]).astype(int) == 3:
plant_ds['Water '] = 1
else:
plant_ds['Water '] = (plant_ds['Water '] == user_resp_df.loc[0,9]).astype(int)
# Question 11
plant_ds['Humidity '] = (plant_ds['Humidity '] == user_resp_df.loc[0,10]).astype(int)
# Delete columns that we aren't using
slim_plant_df = plant_ds[["Plant", "Plant taxonomy", "PlantSize", "Light ", "Water ", "TempDurability", "Humidity ", "Flowering ", "PotSize ", "Pruning ", "SpecialOccasion ", "CleanAirPlant", "Toxicitylevel"]]
# Create a new column in this new dataset that sums all the ones and zeros
slim_plant_df.loc[:, 'total'] = slim_plant_df.iloc[:, 1:].sum(axis=1)
# Sort dataset by descending numbers of the "sum column"
final_df = slim_plant_df.sort_values(by=['total'], ascending = False)
final_df.head()
print("Top 5 Plants selected.\n")
# Displaying Results
# Display the top 5 plants
# Show horizontal bar plot with results
def showplot():
'''Show horizontal bar plot with results.'''
y_plants = []
x_perc = []
for ii in range(0,5):
y_plants = np.append(y_plants,str(ii+1)+". "+str(final_df.iloc[ii,0]))
x_perc = np.append(x_perc,round(100*(final_df.iloc[ii,13]/11),1))
plt.rcdefaults()
fig, ax = plt.subplots()
y_pos = np.arange(len(y_plants))
ax.barh(y_pos, x_perc, align='center',
color='green')
ax.set_yticks(y_pos)
ax.set_yticklabels(y_plants)
ax.invert_yaxis() # labels read top-to-bottom
ax.set_xlabel('Percentage Match %')
ax.set_title('Your top 5 plants')
ax.set_xlim([0,100])
for i, v in enumerate(x_perc):
perc_label = str(v)+"%"
ax.text(v + 3, i + .05, perc_label, color='black', fontweight='bold')
plt.show()
# Initializes the number of the plant you want to know more about
#Rename the row labels as the plant name
#pod.head()
pod = plant_orig_ds
pod.set_index("Plant", inplace=True)
def displayinfo():
'''Display information about the plant you select.'''
pn = 0
# Accept user input
pn = int(input('Please enter the number of a plant you are interested in, or type 0: '))-1
# Keep asking for user input and displaying plant information until the user enters 0 (and pn = -1)
while pn != -1:
plantname = str(final_df.iloc[pn,0])
print("Plant name:",plantname)
print("\nPlant Characteristics:")
print("\tPlant taxonomy:",plant_orig_ds.loc[plantname,'Plant taxonomy'])
print("\tPlant size:",plant_orig_ds.loc[plantname,'PlantSize'])
print("\tPot size:",plant_orig_ds.loc[plantname,'PotSize '])
print("\tFlowers:",plant_orig_ds.loc[plantname,'Flowering '])
print("\nLifestyle:")
print("\tToxicity Level:",plant_orig_ds.loc[plantname,'Toxicitylevel'])
print("\tTemperature Range:",plant_orig_ds.loc[plantname,"Temp_low (degF)"],"-",plant_orig_ds.loc[plantname,"Temp_high (degF)"],"degF")
print("\tHumidity Level:",plant_orig_ds.loc[plantname,'Humidity '])
print("\tGood For a Special Occasion?",plant_orig_ds.loc[plantname,'SpecialOccasion '])
print("\tClean Air Plant:",plant_orig_ds.loc[plantname,'CleanAirPlant'])
print("\nPlant Care:")
print("\tSunlight:",plant_orig_ds.loc[plantname,'Light '])
print("\tWater:",plant_orig_ds.loc[plantname,'Water '])
print("\tPests:",plant_orig_ds.loc[plantname,'Pests '])
print("\tPruning:",plant_orig_ds.loc[plantname,'Pruning '])
print("\nPlant Care Recommendations:")
print("\tBest type(s) of soil to use:",plant_orig_ds.loc[plantname,'Soil '], "soil")
print("\tBest way(s) to propagate:",plant_orig_ds.loc[plantname,"Propogation (multiple options)"])
print("\n")
pn = int(input('Please enter the number of a plant you are interested in, or type 0: '))-1
print("\n")
print("Thank you for taking the plant quiz! We hope you have decided on a great plant!")
print("Please visit https://www.houseplant411.com/houseplant for more info on these plants!")
showplot()
displayinfo()
|
{"hexsha": "cfd4b3eb0ede44f3cb31fda4f185c73d21864057", "size": 13848, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/myproject/PlantProject_run.py", "max_stars_repo_name": "jacksonsn5/spring2019-group-project-team_plant", "max_stars_repo_head_hexsha": "a2d720c8e8deef8cda0b7995f3aaecf0f3393aa8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/myproject/PlantProject_run.py", "max_issues_repo_name": "jacksonsn5/spring2019-group-project-team_plant", "max_issues_repo_head_hexsha": "a2d720c8e8deef8cda0b7995f3aaecf0f3393aa8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/myproject/PlantProject_run.py", "max_forks_repo_name": "jacksonsn5/spring2019-group-project-team_plant", "max_forks_repo_head_hexsha": "a2d720c8e8deef8cda0b7995f3aaecf0f3393aa8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-04-15T01:03:14.000Z", "max_forks_repo_forks_event_max_datetime": "2020-04-15T01:03:14.000Z", "avg_line_length": 39.3409090909, "max_line_length": 209, "alphanum_fraction": 0.6449306759, "include": true, "reason": "import numpy", "num_tokens": 3537}
|
Welcome to the DavisWiki. Im not sure if you are clear on what this wiki is. It is for Davis, California. So pages about businesses that arent anywhere near here are likely to be removed.
|
{"hexsha": "9513db835191421e87ac46e8955f8a16d3c8533c", "size": 188, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/caesarcunaccia.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/caesarcunaccia.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/caesarcunaccia.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 94.0, "max_line_length": 187, "alphanum_fraction": 0.7872340426, "num_tokens": 42}
|
"""# Conditional distribution
A `ConditionalDistribution` estimates the conditional distribution p(y|x) for
any x using known conditional distributions for a sample of x's.
The known conditional distributions are objects with the following methods, as
defined in `scipy.stats`:
- `pdf`
- `cdf`
- `ppf`
"""
from .distribution import Distribution
import numpy as np
from sklearn.metrics.pairwise import pairwise_kernels
import json
def linspace(distributions, num=50):
"""
Parameters
----------
distributions : list of distribution objects
num : int, default=50
Number of points to sample in the linear space.
"""
def get_lb(dist):
lb = dist.ppf(0)
return lb if lb != -np.inf else dist.ppf(.01)
def get_ub(dist):
ub = dist.ppf(1)
return ub if ub != np.inf else dist.ppf(.99)
start = min([get_lb(dist) for dist in distributions])
stop = max([get_ub(dist) for dist in distributions])
return np.linspace(start, stop, num)
def wasserstein(true_dist, estimated_dist, num=50):
"""
Parameters
----------
true_dist : distribution
estimated_dist : distribution
num : int, default=50
Returns
-------
distance : float
Negative Wasserstein distance between the true and estimated
distributions.
"""
x = linspace([true_dist, estimated_dist], num)
true_cdf, estimated_cdf = true_dist.cdf(x), estimated_dist.cdf(x)
return -abs(true_cdf - estimated_cdf).sum() * (x[-1] - x[0]) / num
metrics = dict(
wasserstein=wasserstein
)
class ConditionalDistribution():
"""
Parameters and attributes
-------------------------
metric : str
Type of kernel to use. See `sklearn.pairwise.pairwise_kernels`
gamma : float, default=1
`gamma` parameter for the kernel.
coef0 : float, default=1
`coef0` parameter for the kernel.
feature_scale : float or (# conditional features,) np.array
Scaling parameters for the features on which the distribution is
conditional.
eval_metric : str or callable, default='wasserstein'
Metric to use for scoring the conditional distribution. Currently,
only `'wasserstein'` is implemented.
Additional attributes
---------------------
given : (# known distributions x # conditional features) np.array
Each row are the values of the features on which the distribution is
conditioned. This is set during `fit`.
x : np.array
Linearly spaced over the support of the conditional distribution. Set
during `fit`.
f_x : (# known distributions x shape of `x`) np.array
PDF of the known distributions for the points in `x`. Set during
`fit`.
"""
def __init__(
self, metric='linear', gamma=1, coef0=1, feature_scale=1,
eval_metric='wasserstein'
):
self.metric = metric
self.gamma = gamma
self.coef0 = coef0
self.feature_scale = feature_scale
self.eval_metric = eval_metric
self.given, self.x, self.f_x = None, None, None
def fit(self, given, distributions, num=50):
"""
Fit the conditional distribution using know conditional distributions.
Parameters
----------
given : (# known distributions x # conditional features) np.array
Sets the `given` attribute.
distributions : list of distribution objects
Known conditional distributions.
num : int, default=50
Number of points used to approximate conditional distributions.
Returns
-------
self
"""
self.given = given
self.x = linspace(distributions, num)
self.f_x = np.array([dist.pdf(self.x) for dist in distributions])
return self
def predict(self, given):
"""
Predict a conditional distribution.
Parameters
----------
given : (# estimated distributions x # conditional features) np.array
Values of features on which to condition.
Returns
-------
conditional distributions : list of smoother.Distribution
Estimated conditional distributions.
"""
given = given.reshape(1, -1) if len(given.shape) == 1 else given
kwargs = {}
if self.metric in ('poly', 'sigmoid', 'rbf', 'laplacian', 'chi2'):
kwargs['gamma'] = self.gamma
if self.metric in ('poly', 'sigmoid'):
kwargs['coef0'] = self.coef0
weight = pairwise_kernels(
self.feature_scale*given, self.feature_scale*self.given,
metric=self.metric, **kwargs
)
f_x = weight @ self.f_x
distributions = [Distribution(self.x, f_x_i) for f_x_i in f_x]
return distributions
def score(self, given, distributions):
"""
Evaluate performance.
Parameters
----------
given : (# distributions x # conditional feautres) np.array
Values of features on which to condition.
distributions : list of distribution objects
Known conditional distributions against which to evaluate
predictions.
Returns
-------
score : float
"""
estimates = self.predict(given)
metric = (
metrics[self.eval_metric] if isinstance(self.eval_metric, str)
else self.eval_metric
)
return sum([
metric(true, estimated) for true, estimated in zip(distributions, estimates)
]) / len(distributions)
def get_params(self, deep=False):
"""
Returns
-------
parameters : dict
"""
return dict(
metric=self.metric,
gamma=self.gamma,
coef0=self.coef0,
feature_scale=self.feature_scale
)
def set_params(
self, metric=None, gamma=None, coef0=None, feature_scale=None
):
"""Used for cross validation"""
if metric is not None:
self.metric = metric
if gamma is not None:
self.gamma = gamma
if coef0 is not None:
self.coef0 = coef0
if feature_scale is not None:
self.feature_scale = feature_scale
return self
def dump(self):
"""
Returns
-------
JSON dict : str
JSON dictionary of conditional distribution state.
"""
return json.dumps(dict(
metric=self.metric,
gamma=self.gamma,
coef0=self.coef0,
feature_scale=self.feature_scale.tolist(),
eval_metric=self.eval_metric,
given=self.given.tolist(),
x=self.x.tolist(),
f_x=self.f_x.tolist()
))
@classmethod
def load(cls, state_dict):
"""
Parameters
----------
state_dict : str (JSON)
Output of `cls.dump`.
Returns
-------
conditional distribution : cls
"""
state = json.loads(state_dict)
dist = cls(
metric=state['metric'],
gamma=state['gamma'],
coef0=state['coef0'],
feature_scale=state['feature_scale'],
eval_metric=state['eval_metric']
)
dist.given = np.array(state['given'])
dist.x = np.array(state['x'])
dist.f_x = np.array(state['f_x'])
return dist
|
{"hexsha": "698cd2b50cdbd6dad08a244f6c36c8cc52595ca8", "size": 7596, "ext": "py", "lang": "Python", "max_stars_repo_path": "smoother/conditional.py", "max_stars_repo_name": "dsbowen/smoother", "max_stars_repo_head_hexsha": "cc2dadb34e7452d93226b70e7f9d1db421ee19e4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "smoother/conditional.py", "max_issues_repo_name": "dsbowen/smoother", "max_issues_repo_head_hexsha": "cc2dadb34e7452d93226b70e7f9d1db421ee19e4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "smoother/conditional.py", "max_forks_repo_name": "dsbowen/smoother", "max_forks_repo_head_hexsha": "cc2dadb34e7452d93226b70e7f9d1db421ee19e4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.2153846154, "max_line_length": 88, "alphanum_fraction": 0.5772775145, "include": true, "reason": "import numpy", "num_tokens": 1593}
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import dataclasses
import logging
import random
from typing import Optional
import numpy as np
import torch
from ml.rl.test.gym.open_ai_gym_environment import ModelType
from ml.rl.torch_utils import stack
from ml.rl.training.training_data_page import TrainingDataPage
logger = logging.getLogger(__name__)
@dataclasses.dataclass
class MemoryBuffer:
state: torch.Tensor
action: torch.Tensor
reward: torch.Tensor
next_state: torch.Tensor
next_action: torch.Tensor
terminal: torch.Tensor
possible_next_actions: Optional[torch.Tensor]
possible_next_actions_mask: Optional[torch.Tensor]
possible_actions: Optional[torch.Tensor]
possible_actions_mask: Optional[torch.Tensor]
time_diff: torch.Tensor
policy_id: torch.Tensor
@torch.no_grad() # type: ignore
def slice(self, indices):
return MemoryBuffer(
state=self.state[indices],
action=self.action[indices],
reward=self.reward[indices],
next_state=self.next_state[indices],
next_action=self.next_action[indices],
terminal=self.terminal[indices],
possible_next_actions=self.possible_next_actions[indices]
if self.possible_next_actions is not None
else None,
possible_next_actions_mask=self.possible_next_actions_mask[indices]
if self.possible_next_actions_mask is not None
else None,
possible_actions=self.possible_actions[indices]
if self.possible_actions is not None
else None,
possible_actions_mask=self.possible_actions_mask[indices]
if self.possible_actions_mask is not None
else None,
time_diff=self.time_diff[indices],
policy_id=self.policy_id[indices],
)
@torch.no_grad() # type: ignore
def insert_at(
self,
idx: int,
state: torch.Tensor,
action: torch.Tensor,
reward: float,
next_state: torch.Tensor,
next_action: torch.Tensor,
terminal: bool,
possible_next_actions: Optional[torch.Tensor],
possible_next_actions_mask: Optional[torch.Tensor],
time_diff: float,
possible_actions: Optional[torch.Tensor],
possible_actions_mask: Optional[torch.Tensor],
policy_id: int,
):
self.state[idx] = state
self.action[idx] = action
self.reward[idx] = reward
self.next_state[idx] = next_state
self.next_action[idx] = next_action
self.terminal[idx] = terminal
if self.possible_actions is not None:
self.possible_actions[idx] = possible_actions
if self.possible_actions_mask is not None:
self.possible_actions_mask[idx] = possible_actions_mask
if self.possible_next_actions is not None:
self.possible_next_actions[idx] = possible_next_actions
if self.possible_next_actions_mask is not None:
self.possible_next_actions_mask[idx] = possible_next_actions_mask
self.time_diff[idx] = time_diff
self.policy_id[idx] = policy_id
@classmethod
def create(
cls,
max_size: int,
state_dim: int,
action_dim: int,
max_possible_actions: Optional[int],
has_possble_actions: bool,
):
return cls(
state=torch.zeros((max_size, state_dim)),
action=torch.zeros((max_size, action_dim)),
reward=torch.zeros((max_size, 1)),
next_state=torch.zeros((max_size, state_dim)),
next_action=torch.zeros((max_size, action_dim)),
terminal=torch.zeros((max_size, 1), dtype=torch.uint8),
possible_next_actions=torch.zeros(
(max_size, max_possible_actions, action_dim)
)
if has_possble_actions
else None,
possible_next_actions_mask=torch.zeros((max_size, max_possible_actions))
if max_possible_actions
else None,
possible_actions=torch.zeros((max_size, max_possible_actions, action_dim))
if has_possble_actions
else None,
possible_actions_mask=torch.zeros((max_size, max_possible_actions))
if max_possible_actions
else None,
time_diff=torch.zeros((max_size, 1)),
policy_id=torch.zeros((max_size, 1), dtype=torch.long),
)
class OpenAIGymMemoryPool:
def __init__(self, max_replay_memory_size: int):
"""
Creates an OpenAIGymMemoryPool object.
:param max_replay_memory_size: Upper bound on the number of transitions
to store in replay memory.
"""
self.max_replay_memory_size = max_replay_memory_size
self.memory_num = 0
# Not initializing in the beginning because we don't know the shapes
self.memory_buffer: Optional[MemoryBuffer] = None
@property
def size(self):
return min(self.memory_num, self.max_replay_memory_size)
@property
def state_dim(self):
assert self.memory_buffer is not None
return self.memory_buffer.state.shape[1]
@property
def action_dim(self):
assert self.memory_buffer is not None
return self.memory_buffer.action.shape[1]
def sample_memories(self, batch_size, model_type, chunk=None):
"""
Samples transitions from replay memory uniformly at random by default
or pass chunk for deterministic sample.
*Note*: 1-D vectors such as state & action get stacked to make a 2-D
matrix, while a 2-D matrix such as possible_actions (in the parametric
case) get concatenated to make a bigger 2-D matrix
:param batch_size: Number of sampled transitions to return.
:param model_type: Model type (discrete, parametric).
:param chunk: Index of chunk of data (for deterministic sampling).
"""
if chunk is None:
indices = torch.randint(0, self.size, size=(batch_size,))
else:
start_idx = chunk * batch_size
end_idx = start_idx + batch_size
indices = range(start_idx, end_idx)
memory = self.memory_buffer.slice(indices)
states = memory.state
next_states = memory.next_state
assert states.dim() == 2
assert next_states.dim() == 2
if model_type == ModelType.PYTORCH_PARAMETRIC_DQN.value:
num_possible_actions = memory.possible_actions_mask.shape[1]
actions = memory.action
next_actions = memory.next_action
tiled_states = states.repeat(1, num_possible_actions).reshape(
-1, states.shape[1]
)
possible_actions = memory.possible_actions.reshape(-1, actions.shape[1])
possible_actions_state_concat = torch.cat(
(tiled_states, possible_actions), dim=1
)
possible_actions_mask = memory.possible_actions_mask
tiled_next_states = next_states.repeat(1, num_possible_actions).reshape(
-1, next_states.shape[1]
)
possible_next_actions = memory.possible_next_actions.reshape(
-1, actions.shape[1]
)
possible_next_actions_state_concat = torch.cat(
(tiled_next_states, possible_next_actions), dim=1
)
possible_next_actions_mask = memory.possible_next_actions_mask
else:
possible_actions = None
possible_actions_state_concat = None
possible_next_actions = None
possible_next_actions_state_concat = None
possible_next_actions_mask = memory.possible_next_actions_mask
possible_actions_mask = memory.possible_actions_mask
actions = memory.action
next_actions = memory.next_action
assert len(actions.size()) == 2
assert len(next_actions.size()) == 2
rewards = memory.reward
not_terminal = 1 - memory.terminal
time_diffs = memory.time_diff
return TrainingDataPage(
states=states,
actions=actions,
propensities=None,
rewards=rewards,
next_states=next_states,
next_actions=next_actions,
not_terminal=not_terminal,
time_diffs=time_diffs,
possible_actions_mask=possible_actions_mask,
possible_actions_state_concat=possible_actions_state_concat,
possible_next_actions_mask=possible_next_actions_mask,
possible_next_actions_state_concat=possible_next_actions_state_concat,
)
def insert_into_memory(
self,
state: torch.Tensor,
action: torch.Tensor,
reward: float,
next_state: torch.Tensor,
next_action: torch.Tensor,
terminal: bool,
possible_next_actions: Optional[torch.Tensor],
possible_next_actions_mask: Optional[torch.Tensor],
time_diff: float,
possible_actions: Optional[torch.Tensor],
possible_actions_mask: Optional[torch.Tensor],
policy_id: int,
):
"""
Inserts transition into replay memory in such a way that retrieving
transitions uniformly at random will be equivalent to reservoir sampling.
"""
if self.memory_buffer is None:
assert state.shape == next_state.shape
assert len(state.shape) == 1
assert action.shape == next_action.shape
assert len(action.shape) == 1
if possible_actions_mask is not None:
assert possible_next_actions_mask is not None
assert possible_actions_mask.shape == possible_next_actions_mask.shape
assert len(possible_actions_mask.shape) == 1
max_possible_actions = possible_actions_mask.shape[0]
else:
max_possible_actions = None
assert (possible_actions is not None) == (possible_next_actions is not None)
self.memory_buffer = MemoryBuffer.create(
max_size=self.max_replay_memory_size,
state_dim=state.shape[0],
action_dim=action.shape[0],
max_possible_actions=max_possible_actions,
has_possble_actions=possible_actions is not None,
)
insert_idx = None
if self.memory_num < self.max_replay_memory_size:
insert_idx = self.memory_num
else:
rand_idx = torch.randint(0, self.memory_num, size=(1,)).item()
if rand_idx < self.max_replay_memory_size:
insert_idx = rand_idx # type: ignore
if insert_idx is not None:
self.memory_buffer.insert_at(
insert_idx,
state,
action,
reward,
next_state,
next_action,
terminal,
possible_next_actions,
possible_next_actions_mask,
time_diff,
possible_actions,
possible_actions_mask,
policy_id,
)
self.memory_num += 1
|
{"hexsha": "29634980a64c13e581adc00f25bb2f6c07de5b9b", "size": 11375, "ext": "py", "lang": "Python", "max_stars_repo_path": "ml/rl/test/gym/open_ai_gym_memory_pool.py", "max_stars_repo_name": "sdaulton/ReAgent", "max_stars_repo_head_hexsha": "426d4915dcd90beb3c3781d030c64e748e336351", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-01-11T18:16:32.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-30T09:34:58.000Z", "max_issues_repo_path": "ml/rl/test/gym/open_ai_gym_memory_pool.py", "max_issues_repo_name": "sdaulton/ReAgent", "max_issues_repo_head_hexsha": "426d4915dcd90beb3c3781d030c64e748e336351", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ml/rl/test/gym/open_ai_gym_memory_pool.py", "max_forks_repo_name": "sdaulton/ReAgent", "max_forks_repo_head_hexsha": "426d4915dcd90beb3c3781d030c64e748e336351", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.6935483871, "max_line_length": 88, "alphanum_fraction": 0.6313846154, "include": true, "reason": "import numpy", "num_tokens": 2273}
|
# Class for experimenting various single input DL models at word level
#
# model_type type of the model to be used into ['lstm', 'bidLstm', 'cnn', 'cudnngru', 'cudnnlstm']
# fold_count number of folds for k-fold training (default is 1)
#
import pandas as pd
import numpy as np
import pandas as pd
import sys, os
import argparse
import math
import json
import time
import shutil
from delft.textClassification.data_generator import DataGenerator
from keras import backend as K
from keras.engine.topology import Layer
from keras import initializers, regularizers, constraints
from keras.models import Model, load_model
from keras.layers import Dense, Embedding, Input, concatenate
from keras.layers import LSTM, Bidirectional, Dropout, SpatialDropout1D, AveragePooling1D, GlobalAveragePooling1D, TimeDistributed, Masking, Lambda
from keras.layers import GRU, MaxPooling1D, Conv1D, GlobalMaxPool1D, Activation, Add, Flatten, BatchNormalization
from keras.layers import CuDNNGRU, CuDNNLSTM
from keras.optimizers import RMSprop, Adam, Nadam
from keras.preprocessing import text, sequence
from keras.callbacks import EarlyStopping, ModelCheckpoint
from sklearn.metrics import log_loss, roc_auc_score, r2_score
from sklearn.model_selection import train_test_split
from sklearn.metrics import precision_score, precision_recall_fscore_support
#import utilities.Attention
from delft.utilities.Attention import Attention
#from ToxicAttentionAlternative import AttentionAlternative
#from ToxicAttentionWeightedAverage import AttentionWeightedAverage
from delft.textClassification.preprocess import BERT_classifier_processor
from delft.utilities.bert.run_classifier_delft import *
import delft.utilities.bert.modeling as modeling
import delft.utilities.bert.optimization as optimization
import delft.utilities.bert.tokenization as tokenization
# seed is fixed for reproducibility
from numpy.random import seed
seed(7)
from tensorflow import set_random_seed
set_random_seed(8)
modelTypes = ['lstm', 'bidLstm_simple', 'bidLstm', 'cnn', 'cnn2', 'cnn3', 'mix1', 'dpcnn',
'conv', "gru", "gru_simple", 'lstm_cnn', 'han', 'bert-base-en', 'scibert', 'biobert']
# default parameters of the different DL models
parameters_lstm = {
'max_features': 200000,
'maxlen': 300,
'embed_size': 300,
'epoch': 40,
'batch_size': 256,
'dropout_rate': 0.3,
'recurrent_dropout_rate': 0.3,
'recurrent_units': 64,
'dense_size': 32,
'resultFile': 'LSTM.csv'
}
parameters_bidLstm_simple = {
'max_features': 200000,
'maxlen': 300,
'embed_size': 300,
'epoch': 25,
'batch_size': 256,
'dropout_rate': 0.3,
'recurrent_dropout_rate': 0.3,
'recurrent_units': 300,
'dense_size': 256,
'resultFile': 'BidLSTM_simple.csv'
}
parameters_bidLstm = {
'max_features': 200000,
'maxlen': 300,
'embed_size': 300,
'epoch': 25,
'batch_size': 256,
'dropout_rate': 0.3,
'recurrent_dropout_rate': 0.3,
'recurrent_units': 300,
'dense_size': 256,
'resultFile': 'BidLSTM_attention.csv'
}
parameters_cnn = {
'max_features': 200000,
'maxlen': 250,
'embed_size': 300,
'epoch': 30,
'batch_size': 256,
'dropout_rate': 0.3,
'recurrent_dropout_rate': 0.3,
'recurrent_units': 64,
'dense_size': 32,
'resultFile': 'CNN.csv'
}
parameters_cnn2 = {
'max_features': 200000,
'maxlen': 250,
'embed_size': 300,
'epoch': 30,
'batch_size': 256,
'dropout_rate': 0.3,
'recurrent_dropout_rate': 0.3,
'recurrent_units': 64,
'dense_size': 32,
'resultFile': 'CNN2.csv'
}
parameters_cnn3 = {
'max_features': 200000,
'maxlen': 300,
'embed_size': 300,
'epoch': 30,
'batch_size': 256,
'dropout_rate': 0.3,
'recurrent_dropout_rate': 0.3,
'recurrent_units': 64,
'dense_size': 32,
'resultFile': 'CNN3.csv'
}
parameters_lstm_cnn = {
'max_features': 200000,
'maxlen': 250,
'embed_size': 300,
'epoch': 30,
'batch_size': 256,
'dropout_rate': 0.3,
'recurrent_dropout_rate': 0.3,
'recurrent_units': 64,
'dense_size': 32,
'resultFile': 'LSTM_CNN.csv'
}
parameters_conv = {
'max_features': 200000,
'maxlen': 250,
'embed_size': 300,
'epoch': 25,
'batch_size': 256,
'dropout_rate': 0.3,
'recurrent_dropout_rate': 0.3,
'recurrent_units': 256,
'dense_size': 64,
'resultFile': 'CNN.csv'
}
parameters_gru = {
'max_features': 200000,
'maxlen': 300,
'embed_size': 300,
'epoch': 30,
'batch_size': 256,
'dropout_rate': 0.3,
'recurrent_dropout_rate': 0.3,
'recurrent_units': 64,
'dense_size': 32,
'resultFile': 'GRU.csv'
}
parameters_gru_old = {
'max_features': 200000,
'maxlen': 300,
'embed_size': 300,
'epoch': 30,
'batch_size': 512,
'dropout_rate': 0.3,
'recurrent_dropout_rate': 0.3,
'recurrent_units': 64,
'dense_size': 32,
'resultFile': 'GRU.csv'
}
parameters_gru_simple = {
'max_features': 200000,
'maxlen': 300,
'embed_size': 300,
'epoch': 30,
'batch_size': 512,
'dropout_rate': 0.3,
'recurrent_dropout_rate': 0.3,
'recurrent_units': 64,
'dense_size': 32,
'resultFile': 'GRU_simple.csv'
}
parameters_mix1 = {
'max_features': 200000,
'maxlen': 300,
'embed_size': 300,
'epoch': 30,
'batch_size': 256,
'dropout_rate': 0.3,
'recurrent_dropout_rate': 0.3,
'recurrent_units': 64,
'dense_size': 32,
'resultFile': 'mix1.csv'
}
parameters_dpcnn = {
'max_features': 200000,
'maxlen': 300,
'embed_size': 300,
'epoch': 25,
'batch_size': 256,
'dropout_rate': 0.3,
'recurrent_dropout_rate': 0.3,
'recurrent_units': 64,
'dense_size': 32,
'resultFile': 'dpcnn.csv'
}
parametersMap = { 'lstm' : parameters_lstm, 'bidLstm_simple' : parameters_bidLstm_simple, 'bidLstm': parameters_bidLstm,
'cnn': parameters_cnn, 'cnn2': parameters_cnn2, 'cnn3': parameters_cnn3, 'lstm_cnn': parameters_lstm_cnn,
'mix1': parameters_mix1, 'gru': parameters_gru, 'gru_simple': parameters_gru_simple,
'dpcnn': parameters_dpcnn, 'conv': parameters_conv }
# basic LSTM
def lstm(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes):
#inp = Input(shape=(maxlen, ))
input_layer = Input(shape=(maxlen, embed_size), )
#x = Embedding(max_features, embed_size, weights=[embedding_matrix],
# trainable=False)(inp)
x = LSTM(recurrent_units, return_sequences=True, dropout=dropout_rate,
recurrent_dropout=dropout_rate)(input_layer)
#x = CuDNNLSTM(recurrent_units, return_sequences=True)(x)
x = Dropout(dropout_rate)(x)
x_a = GlobalMaxPool1D()(x)
x_b = GlobalAveragePooling1D()(x)
#x_c = AttentionWeightedAverage()(x)
#x_a = MaxPooling1D(pool_size=2)(x)
#x_b = AveragePooling1D(pool_size=2)(x)
x = concatenate([x_a,x_b])
x = Dense(dense_size, activation="relu")(x)
x = Dropout(dropout_rate)(x)
x = Dense(nb_classes, activation="sigmoid")(x)
model = Model(inputs=input_layer, outputs=x)
model.summary()
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return model
# bidirectional LSTM
def bidLstm_simple(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes):
#inp = Input(shape=(maxlen, ))
input_layer = Input(shape=(maxlen, embed_size), )
#x = Embedding(max_features, embed_size, weights=[embedding_matrix], trainable=False)(inp)
x = Bidirectional(LSTM(recurrent_units, return_sequences=True, dropout=dropout_rate,
recurrent_dropout=dropout_rate))(input_layer)
x = Dropout(dropout_rate)(x)
x_a = GlobalMaxPool1D()(x)
x_b = GlobalAveragePooling1D()(x)
#x_c = AttentionWeightedAverage()(x)
#x_a = MaxPooling1D(pool_size=2)(x)
#x_b = AveragePooling1D(pool_size=2)(x)
x = concatenate([x_a,x_b])
x = Dense(dense_size, activation="relu")(x)
x = Dropout(dropout_rate)(x)
x = Dense(nb_classes, activation="sigmoid")(x)
model = Model(inputs=input_layer, outputs=x)
model.summary()
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return model
# bidirectional LSTM with attention layer
def bidLstm(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes):
#inp = Input(shape=(maxlen, ))
input_layer = Input(shape=(maxlen, embed_size), )
#x = Embedding(max_features, embed_size, weights=[embedding_matrix], trainable=False)(inp)
x = Bidirectional(LSTM(recurrent_units, return_sequences=True, dropout=dropout_rate,
recurrent_dropout=dropout_rate))(input_layer)
#x = Dropout(dropout_rate)(x)
x = Attention(maxlen)(x)
#x = AttentionWeightedAverage(maxlen)(x)
#print('len(x):', len(x))
#x = AttentionWeightedAverage(maxlen)(x)
x = Dense(dense_size, activation="relu")(x)
x = Dropout(dropout_rate)(x)
x = Dense(nb_classes, activation="sigmoid")(x)
model = Model(inputs=input_layer, outputs=x)
model.summary()
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
# conv+GRU with embeddings
def cnn(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes):
#inp = Input(shape=(maxlen, ))
input_layer = Input(shape=(maxlen, embed_size), )
#x = Embedding(max_features, embed_size, weights=[embedding_matrix], trainable=False)(inp)
x = Dropout(dropout_rate)(input_layer)
x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x)
x = MaxPooling1D(pool_size=2)(x)
x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x)
x = MaxPooling1D(pool_size=2)(x)
x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x)
x = MaxPooling1D(pool_size=2)(x)
x = GRU(recurrent_units)(x)
x = Dropout(dropout_rate)(x)
x = Dense(dense_size, activation="relu")(x)
x = Dense(nb_classes, activation="sigmoid")(x)
model = Model(inputs=input_layer, outputs=x)
model.summary()
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
def cnn2_best(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes):
#inp = Input(shape=(maxlen, ))
input_layer = Input(shape=(maxlen, embed_size), )
#x = Embedding(max_features, embed_size, weights=[embedding_matrix], trainable=False)(inp)
x = Dropout(dropout_rate)(input_layer)
x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x)
#x = MaxPooling1D(pool_size=2)(x)
x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x)
#x = MaxPooling1D(pool_size=2)(x)
x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x)
#x = MaxPooling1D(pool_size=2)(x)
x = GRU(recurrent_units, return_sequences=False, dropout=dropout_rate,
recurrent_dropout=dropout_rate)(x)
#x = Dropout(dropout_rate)(x)
x = Dense(dense_size, activation="relu")(x)
x = Dense(nb_classes, activation="sigmoid")(x)
model = Model(inputs=input_layer, outputs=x)
model.summary()
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
def cnn2(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes):
#inp = Input(shape=(maxlen, ))
input_layer = Input(shape=(maxlen, embed_size), )
#x = Embedding(max_features, embed_size, weights=[embedding_matrix], trainable=False)(inp)
x = Dropout(dropout_rate)(input_layer)
x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x)
#x = MaxPooling1D(pool_size=2)(x)
x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x)
#x = MaxPooling1D(pool_size=2)(x)
x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x)
#x = MaxPooling1D(pool_size=2)(x)
x = GRU(recurrent_units, return_sequences=False, dropout=dropout_rate,
recurrent_dropout=dropout_rate)(x)
#x = Dropout(dropout_rate)(x)
x = Dense(dense_size, activation="relu")(x)
x = Dense(nb_classes, activation="sigmoid")(x)
model = Model(inputs=input_layer, outputs=x)
model.summary()
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
def cnn3(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes):
#inp = Input(shape=(maxlen, ))
input_layer = Input(shape=(maxlen, embed_size), )
#x = Embedding(max_features, embed_size, weights=[embedding_matrix], trainable=False)(inp)
x = GRU(recurrent_units, return_sequences=True, dropout=dropout_rate,
recurrent_dropout=dropout_rate)(input_layer)
#x = Dropout(dropout_rate)(x)
x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x)
x = MaxPooling1D(pool_size=2)(x)
x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x)
x = MaxPooling1D(pool_size=2)(x)
x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x)
x = MaxPooling1D(pool_size=2)(x)
x_a = GlobalMaxPool1D()(x)
x_b = GlobalAveragePooling1D()(x)
#x_c = AttentionWeightedAverage()(x)
#x_a = MaxPooling1D(pool_size=2)(x)
#x_b = AveragePooling1D(pool_size=2)(x)
x = concatenate([x_a,x_b])
#x = Dropout(dropout_rate)(x)
x = Dense(dense_size, activation="relu")(x)
x = Dense(nb_classes, activation="sigmoid")(x)
model = Model(inputs=input_layer, outputs=x)
model.summary()
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
def conv(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes):
filter_kernels = [7, 7, 5, 5, 3, 3]
#inp = Input(shape=(maxlen, ))
input_layer = Input(shape=(maxlen, embed_size), )
#x = Embedding(max_features, embed_size, weights=[embedding_matrix], trainable=False)(inp)
conv = Conv1D(nb_filter=recurrent_units, filter_length=filter_kernels[0], border_mode='valid', activation='relu')(input_layer)
conv = MaxPooling1D(pool_length=3)(conv)
conv1 = Conv1D(nb_filter=recurrent_units, filter_length=filter_kernels[1], border_mode='valid', activation='relu')(conv)
conv1 = MaxPooling1D(pool_length=3)(conv1)
conv2 = Conv1D(nb_filter=recurrent_units, filter_length=filter_kernels[2], border_mode='valid', activation='relu')(conv1)
conv3 = Conv1D(nb_filter=recurrent_units, filter_length=filter_kernels[3], border_mode='valid', activation='relu')(conv2)
conv4 = Conv1D(nb_filter=recurrent_units, filter_length=filter_kernels[4], border_mode='valid', activation='relu')(conv3)
conv5 = Conv1D(nb_filter=recurrent_units, filter_length=filter_kernels[5], border_mode='valid', activation='relu')(conv4)
conv5 = MaxPooling1D(pool_length=3)(conv5)
conv5 = Flatten()(conv5)
z = Dropout(0.5)(Dense(dense_size, activation='relu')(conv5))
#x = GlobalMaxPool1D()(x)
x = Dense(nb_classes, activation="sigmoid")(z)
model = Model(inputs=input_layer, outputs=x)
model.summary()
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
# LSTM + conv
def lstm_cnn(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes):
#inp = Input(shape=(maxlen, ))
input_layer = Input(shape=(maxlen, embed_size), )
#x = Embedding(max_features, embed_size, weights=[embedding_matrix],
# trainable=False)(inp)
x = LSTM(recurrent_units, return_sequences=True, dropout=dropout_rate,
recurrent_dropout=dropout_rate)(input_layer)
x = Dropout(dropout_rate)(x)
x = Conv1D(filters=recurrent_units, kernel_size=2, padding='same', activation='relu')(x)
x = Conv1D(filters=300,
kernel_size=5,
padding='valid',
activation='tanh',
strides=1)(x)
#x = MaxPooling1D(pool_size=2)(x)
#x = Conv1D(filters=300,
# kernel_size=5,
# padding='valid',
# activation='tanh',
# strides=1)(x)
#x = MaxPooling1D(pool_size=2)(x)
#x = Conv1D(filters=300,
# kernel_size=3,
# padding='valid',
# activation='tanh',
# strides=1)(x)
x_a = GlobalMaxPool1D()(x)
x_b = GlobalAveragePooling1D()(x)
x = concatenate([x_a,x_b])
x = Dense(dense_size, activation="relu")(x)
x = Dropout(dropout_rate)(x)
x = Dense(nb_classes, activation="sigmoid")(x)
model = Model(inputs=input_layer, outputs=x)
model.summary()
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return model
# 2 bid. GRU
def gru(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes):
#input_layer = Input(shape=(maxlen,))
input_layer = Input(shape=(maxlen, embed_size), )
#embedding_layer = Embedding(max_features, embed_size,
# weights=[embedding_matrix], trainable=False)(input_layer)
x = Bidirectional(GRU(recurrent_units, return_sequences=True, dropout=dropout_rate,
recurrent_dropout=recurrent_dropout_rate))(input_layer)
x = Dropout(dropout_rate)(x)
x = Bidirectional(GRU(recurrent_units, return_sequences=True, dropout=dropout_rate,
recurrent_dropout=recurrent_dropout_rate))(x)
#x = AttentionWeightedAverage(maxlen)(x)
x_a = GlobalMaxPool1D()(x)
x_b = GlobalAveragePooling1D()(x)
#x_c = AttentionWeightedAverage()(x)
#x_a = MaxPooling1D(pool_size=2)(x)
#x_b = AveragePooling1D(pool_size=2)(x)
x = concatenate([x_a,x_b], axis=1)
#x = Dense(dense_size, activation="relu")(x)
#x = Dropout(dropout_rate)(x)
x = Dense(dense_size, activation="relu")(x)
output_layer = Dense(nb_classes, activation="sigmoid")(x)
model = Model(inputs=input_layer, outputs=output_layer)
model.summary()
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(clipvalue=1, clipnorm=1),
#optimizer='adam',
metrics=['accuracy'])
return model
def gru_best(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes):
#input_layer = Input(shape=(maxlen,))
input_layer = Input(shape=(maxlen, embed_size), )
#embedding_layer = Embedding(max_features, embed_size,
# weights=[embedding_matrix], trainable=False)(input_layer)
x = Bidirectional(GRU(recurrent_units, return_sequences=True, dropout=dropout_rate,
recurrent_dropout=dropout_rate))(input_layer)
x = Dropout(dropout_rate)(x)
x = Bidirectional(GRU(recurrent_units, return_sequences=True, dropout=dropout_rate,
recurrent_dropout=dropout_rate))(x)
#x = AttentionWeightedAverage(maxlen)(x)
x_a = GlobalMaxPool1D()(x)
x_b = GlobalAveragePooling1D()(x)
#x_c = AttentionWeightedAverage()(x)
#x_a = MaxPooling1D(pool_size=2)(x)
#x_b = AveragePooling1D(pool_size=2)(x)
x = concatenate([x_a,x_b], axis=1)
#x = Dense(dense_size, activation="relu")(x)
#x = Dropout(dropout_rate)(x)
x = Dense(dense_size, activation="relu")(x)
output_layer = Dense(nb_classes, activation="sigmoid")(x)
model = Model(inputs=input_layer, outputs=output_layer)
model.summary()
model.compile(loss='binary_crossentropy',
#optimizer=RMSprop(clipvalue=1, clipnorm=1),
optimizer='adam',
metrics=['accuracy'])
return model
# 1 layer bid GRU
def gru_simple(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes):
#input_layer = Input(shape=(maxlen,))
input_layer = Input(shape=(maxlen, embed_size), )
#embedding_layer = Embedding(max_features, embed_size,
# weights=[embedding_matrix], trainable=False)(input_layer)
x = Bidirectional(GRU(recurrent_units, return_sequences=True, dropout=dropout_rate,
recurrent_dropout=dropout_rate))(input_layer)
#x = AttentionWeightedAverage(maxlen)(x)
x_a = GlobalMaxPool1D()(x)
x_b = GlobalAveragePooling1D()(x)
#x_c = AttentionWeightedAverage()(x)
#x_a = MaxPooling1D(pool_size=2)(x)
#x_b = AveragePooling1D(pool_size=2)(x)
x = concatenate([x_a,x_b], axis=1)
#x = Dense(dense_size, activation="relu")(x)
#x = Dropout(dropout_rate)(x)
x = Dense(dense_size, activation="relu")(x)
output_layer = Dense(nb_classes, activation="sigmoid")(x)
model = Model(inputs=input_layer, outputs=output_layer)
model.summary()
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(clipvalue=1, clipnorm=1),
#optimizer='adam',
metrics=['accuracy'])
return model
# bid GRU + bid LSTM
def mix1(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes):
#input_layer = Input(shape=(maxlen,))
input_layer = Input(shape=(maxlen, embed_size), )
#embedding_layer = Embedding(max_features, embed_size,
# weights=[embedding_matrix], trainable=False)(input_layer)
x = Bidirectional(GRU(recurrent_units, return_sequences=True, dropout=dropout_rate,
recurrent_dropout=recurrent_dropout_rate))(input_layer)
x = Dropout(dropout_rate)(x)
x = Bidirectional(LSTM(recurrent_units, return_sequences=True, dropout=dropout_rate,
recurrent_dropout=recurrent_dropout_rate))(x)
x_a = GlobalMaxPool1D()(x)
x_b = GlobalAveragePooling1D()(x)
x = concatenate([x_a,x_b])
x = Dense(dense_size, activation="relu")(x)
output_layer = Dense(nb_classes, activation="sigmoid")(x)
model = Model(inputs=input_layer, outputs=output_layer)
model.summary()
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(clipvalue=1, clipnorm=1),
#optimizer='adam',
metrics=['accuracy'])
return model
# DPCNN
def dpcnn(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes):
#input_layer = Input(shape=(maxlen, ))
input_layer = Input(shape=(maxlen, embed_size), )
#X = Embedding(max_features, embed_size, weights=[embedding_matrix],
# trainable=False)(input_layer)
# first block
X_shortcut1 = input_layer
X = Conv1D(filters=recurrent_units, kernel_size=2, strides=3)(X_shortcut1)
X = Activation('relu')(X)
X = Conv1D(filters=recurrent_units, kernel_size=2, strides=3)(X)
X = Activation('relu')(X)
# connect shortcut to the main path
X = Activation('relu')(X_shortcut1) # pre activation
X = Add()([X_shortcut1,X])
X = MaxPooling1D(pool_size=3, strides=2, padding='valid')(X)
# second block
X_shortcut2 = X
X = Conv1D(filters=recurrent_units, kernel_size=2, strides=3)(X)
X = Activation('relu')(X)
X = Conv1D(filters=recurrent_units, kernel_size=2, strides=3)(X)
X = Activation('relu')(X)
# connect shortcut to the main path
X = Activation('relu')(X_shortcut2) # pre activation
X = Add()([X_shortcut2,X])
X = MaxPooling1D(pool_size=2, strides=2, padding='valid')(X)
# Output
X = Flatten()(X)
X = Dense(nb_classes, activation='sigmoid')(X)
model = Model(inputs = input_layer, outputs = X, name='dpcnn')
model.summary()
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
def getModel(model_config, training_config):
model_type = model_config.model_type
fold_count = model_config.fold_number
# for BERT models, parameters are set at class level
if model_config.model_type.find('bert') != -1:
print("model_config.maxlen: " + str(model_config.maxlen))
print("model_config.batch_size: " + str(model_config.batch_size))
model = BERT_classifier(model_config,
fold_count=fold_count,
labels=model_config.list_classes,
class_weights=training_config.class_weights)
# the following will ensuring that the model stays warm/available
model.load()
return model
# default model parameters
parameters = parametersMap[model_type]
embed_size = parameters['embed_size']
maxlen = parameters['maxlen']
batch_size = parameters['batch_size']
recurrent_units = parameters['recurrent_units']
dropout_rate = parameters['dropout_rate']
recurrent_dropout_rate = parameters['recurrent_dropout_rate']
dense_size = parameters['dense_size']
# overwrite with config paramters
embed_size = model_config.word_embedding_size
maxlen = model_config.maxlen
batch_size = training_config.batch_size
max_epoch = training_config.max_epoch
model_type = model_config.model_type
use_roc_auc = training_config.use_roc_auc
nb_classes = len(model_config.list_classes)
dropout_rate = model_config.dropout
recurrent_dropout_rate = model_config.recurrent_dropout
# awww Python has no case/switch statement :D
if (model_type == 'bidLstm'):
model = bidLstm(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes)
elif (model_type == 'bidLstm_simple'):
model = bidLstm_simple(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes)
elif (model_type == 'lstm'):
model = lstm(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes)
elif (model_type == 'cnn'):
model = cnn(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes)
elif (model_type == 'cnn2'):
model = cnn2(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes)
elif (model_type == 'cnn3'):
model = cnn3(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes)
elif (model_type == 'lstm_cnn'):
model = lstm_cnn(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes)
elif (model_type == 'conv'):
model = dpcnn(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes)
elif (model_type == 'mix1'):
model = mix1(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes)
elif (model_type == 'dpcnn'):
model = dpcnn(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes)
elif (model_type == 'gru'):
model = gru(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes)
elif (model_type == 'gru_simple'):
model = gru_simple(maxlen, embed_size, recurrent_units, dropout_rate, recurrent_dropout_rate, dense_size, nb_classes)
else:
raise (OSError('The model type '+model_type+' is unknown'))
return model
def train_model(model,
list_classes,
batch_size,
max_epoch,
use_roc_auc,
class_weights,
training_generator,
validation_generator,
val_y,
use_ELMo=False,
use_BERT=False,
multiprocessing=True,
callbacks=None):
best_loss = -1
best_roc_auc = -1
best_weights = None
best_epoch = 0
current_epoch = 1
while current_epoch <= max_epoch:
#model.fit(train_x, train_y, batch_size=batch_size, epochs=1)
nb_workers = 6
if use_ELMo or use_BERT:
# worker at 0 means the training will be executed in the main thread
nb_workers = 0
multiprocessing = False
model.fit_generator(
generator=training_generator,
use_multiprocessing=multiprocessing,
workers=nb_workers,
class_weight=class_weights,
epochs=1, callbacks=callbacks)
y_pred = model.predict_generator(
generator=validation_generator,
use_multiprocessing=multiprocessing,
workers=nb_workers)
total_loss = 0.0
total_roc_auc = 0.0
# we distinguish 1-class and multiclass problems
if len(list_classes) is 1:
total_loss = log_loss(val_y, y_pred, labels=[0,1])
if len(np.unique(val_y)) == 1:
# roc_auc_score sklearn implementation is not working in this case, it needs more balanced batches
# a simple fix is to return the r2_score instead in this case (which is a regression score and not a loss)
roc_auc = r2_score(val_y, y_pred)
if roc_auc < 0:
roc_auc = 0
else:
total_roc_auc = roc_auc_score(val_y, y_pred)
else:
for j in range(0, len(list_classes)):
#for n in range(0, len(val_y[:, j])):
# print(val_y[n, j])
#print(val_y[:, j])
#print(y_pred[:, j])
loss = log_loss(val_y[:, j], y_pred[:, j], labels=[0,1])
total_loss += loss
if len(np.unique(val_y[:, j])) == 1:
# roc_auc_score sklearn implementation is not working in this case, it needs more balanced batches
# a simple fix is to return the r2_score instead in this case (which is a regression score and not a loss)
roc_auc = r2_score(val_y[:, j], y_pred[:, j])
if roc_auc < 0:
roc_auc = 0
else:
roc_auc = roc_auc_score(val_y[:, j], y_pred[:, j])
total_roc_auc += roc_auc
total_loss /= len(list_classes)
total_roc_auc /= len(list_classes)
if use_roc_auc:
print("Epoch {0} loss {1} best_loss {2} (for info) ".format(current_epoch, total_loss, best_loss))
print("Epoch {0} roc_auc {1} best_roc_auc {2} (for early stop) ".format(current_epoch, total_roc_auc, best_roc_auc))
else:
print("Epoch {0} loss {1} best_loss {2} (for early stop) ".format(current_epoch, total_loss, best_loss))
print("Epoch {0} roc_auc {1} best_roc_auc {2} (for info) ".format(current_epoch, total_roc_auc, best_roc_auc))
current_epoch += 1
if total_loss < best_loss or best_loss == -1 or math.isnan(best_loss) is True:
best_loss = total_loss
if use_roc_auc is False:
best_weights = model.get_weights()
best_epoch = current_epoch
elif use_roc_auc is False:
if current_epoch - best_epoch == 5:
break
if total_roc_auc > best_roc_auc or best_roc_auc == -1:
best_roc_auc = total_roc_auc
if use_roc_auc:
best_weights = model.get_weights()
best_epoch = current_epoch
elif use_roc_auc:
if current_epoch - best_epoch == 5:
break
model.set_weights(best_weights)
if use_roc_auc:
return model, best_roc_auc
else:
return model, best_loss
def train_folds(X, y, model_config, training_config, embeddings, callbacks=None):
fold_count = model_config.fold_number
max_epoch = training_config.max_epoch
model_type = model_config.model_type
use_roc_auc = training_config.use_roc_auc
class_weights = training_config.class_weights
fold_size = len(X) // fold_count
models = []
scores = []
for fold_id in range(0, fold_count):
print('\n------------------------ fold ' + str(fold_id) + '--------------------------------------')
fold_start = fold_size * fold_id
fold_end = fold_start + fold_size
if fold_id == fold_size - 1:
fold_end = len(X)
train_x = np.concatenate([X[:fold_start], X[fold_end:]])
train_y = np.concatenate([y[:fold_start], y[fold_end:]])
val_x = X[fold_start:fold_end]
val_y = y[fold_start:fold_end]
training_generator = DataGenerator(train_x, train_y, batch_size=training_config.batch_size,
maxlen=model_config.maxlen, list_classes=model_config.list_classes,
embeddings=embeddings, shuffle=True)
validation_generator = DataGenerator(val_x, val_y, batch_size=training_config.batch_size,
maxlen=model_config.maxlen, list_classes=model_config.list_classes,
embeddings=embeddings, shuffle=False)
foldModel, best_score = train_model(getModel(model_config, training_config),
model_config.list_classes, training_config.batch_size, max_epoch, use_roc_auc,
class_weights, training_generator, validation_generator, val_y,
model_config.use_ELMo,
model_config.use_BERT,
multiprocessing=training_config.multiprocessing, callbacks=callbacks)
models.append(foldModel)
#model_path = os.path.join("../data/models/textClassification/",model_name, model_type+".model{0}_weights.hdf5".format(fold_id))
#foldModel.save_weights(model_path, foldModel.get_weights())
#foldModel.save(model_path)
#del foldModel
scores.append(best_score)
all_scores = sum(scores)
avg_score = all_scores/fold_count
if (use_roc_auc):
print("Average best roc_auc scores over the", fold_count, "fold: ", avg_score)
else:
print("Average best log loss scores over the", fold_count, "fold: ", avg_score)
return models
def predict(model, predict_generator, use_ELMo=False, use_BERT=False, use_main_thread_only=False):
nb_workers = 6
multiprocessing = True
if use_ELMo or use_BERT or use_main_thread_only:
# worker at 0 means the training will be executed in the main thread
nb_workers = 0
multiprocessing = False
y = model.predict_generator(
generator=predict_generator,
use_multiprocessing=multiprocessing,
workers=nb_workers)
return y
def predict_folds(models, predict_generator, use_ELMo=False, use_BERT=False, use_main_thread_only=False):
fold_count = len(models)
y_predicts_list = []
for fold_id in range(0, fold_count):
model = models[fold_id]
#y_predicts = model.predict(xte)
nb_workers = 6
multiprocessing = True
if use_ELMo or use_BERT or use_main_thread_only:
# worker at 0 means the training will be executed in the main thread
nb_workers = 0
multiprocessing = False
y_predicts = model.predict_generator(
generator=predict_generator,
use_multiprocessing=multiprocessing,
workers=nb_workers)
y_predicts_list.append(y_predicts)
y_predicts = np.ones(y_predicts_list[0].shape)
for fold_predict in y_predicts_list:
y_predicts *= fold_predict
y_predicts **= (1. / len(y_predicts_list))
return y_predicts
class BERT_classifier():
"""
BERT classifier model with fine-tuning.
Implementation is an adaptation of the official repository:
https://github.com/google-research/bert
For reference:
--
@article{devlin2018bert,
title={BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding},
author={Devlin, Jacob and Chang, Ming-Wei and Lee, Kenton and Toutanova, Kristina},
journal={arXiv preprint arXiv:1810.04805},
year={2018}
}
"""
def __init__(self, config, model_name=None, fold_count=1, labels=None, class_weights=None):
self.graph = tf.get_default_graph()
print("config.maxlen: ", config.maxlen)
print("config.batch_size: ", config.batch_size)
if model_name is not None:
self.model_name = model_name
else:
self.model_name = config.model_name
self.model_type = config.model_type
# we get the BERT pretrained files from the embeddings registry
description = _get_description(self.model_type)
self.class_weights = class_weights
if description is None:
raise Exception('no embeddings description found for ' + self.model_type)
self.fold_count = fold_count
# note: postpone the instanciation if not available, it normally means that
# we load a fine-tuned model and we don't need to look at the original
# pre-trained resources (this is mandatory for the vocabulary when predicting)
self.config_file = None
self.weight_file = None
self.vocab_file = None
if description != None:
if "path-config" in description and os.path.isfile(description["path-config"]):
self.config_file = description["path-config"]
if "path-weights" in description and os.path.isfile(description["path-weights"]+".data-00000-of-00001"):
self.weight_file = description["path-weights"]
if "path-vocab" in description and os.path.isfile(description["path-vocab"]):
self.vocab_file = description["path-vocab"]
self.labels = labels
self.do_lower_case = False
self.max_seq_length= config.maxlen
self.train_batch_size = config.batch_size
self.predict_batch_size = config.batch_size
self.learning_rate = 2e-5
self.num_train_epochs = 1.0
self.warmup_proportion = 0.1
self.master = None
self.save_checkpoints_steps = 99999999 # <----- don't want to save any checkpoints
self.iterations_per_loop = 1000
self.model_dir = 'data/models/textClassification/' + self.model_name
# defaulting to fine-tuned model if available
if self.config_file is None:
self.config_file = os.path.join(self.model_dir, 'bert_config.json')
if self.weight_file is None:
self.weight_file = os.path.join(self.model_dir, 'model.ckpt')
if self.vocab_file is None:
self.vocab_file = os.path.join(self.model_dir, 'vocab.txt')
self.tokenizer = tokenization.FullTokenizer(vocab_file=self.vocab_file, do_lower_case=self.do_lower_case)
#self.processor = BERT_classifier_processor(labels=labels)
self.bert_config = modeling.BertConfig.from_json_file(self.config_file)
def train(self, x_train=None, y_train=None):
'''
Train the classifier(s). We train fold_count classifiers if fold_count>1.
'''
start = time.time()
# remove possible previous model(s)
for fold_number in range(0, self.fold_count):
if os.path.exists(self.model_dir+str(fold_number)):
shutil.rmtree(self.model_dir+str(fold_number))
train_examples = self.processor.get_train_examples(x_train=x_train, y_train=y_train)
if self.fold_count == 1:
self.train_fold(0, train_examples)
# model is unique so rename the fold model under the main model folder
shutil.rmtree(self.model_dir)
os.rename(self.model_dir+"0", self.model_dir)
else:
fold_size = len(train_examples) // self.fold_count
for fold_id in range(0, self.fold_count):
tf.logging.info('\n------------------------ fold ' + str(fold_id) + '--------------------------------------')
fold_start = fold_size * fold_id
fold_end = fold_start + fold_size
if fold_id == fold_size - 1:
fold_end = len(train_examples)
fold_train_examples = train_examples[:fold_start] + train_examples[fold_end:]
self.train_fold(fold_id, fold_train_examples)
end = time.time()
tf.logging.info("\nTotal training complete in " + str(end - start) + " seconds")
def train_fold(self, fold_number, train_examples):
'''
Train the classifier
'''
start = time.time()
print("len(train_examples): ", len(train_examples))
print("self.train_batch_size: ", self.train_batch_size)
print("self.num_train_epochs: ", self.num_train_epochs)
num_train_steps = int(len(train_examples) / self.train_batch_size * self.num_train_epochs)
print("num_train_steps: ", num_train_steps)
print("self.warmup_proportion: ", self.warmup_proportion)
num_warmup_steps = int(num_train_steps * self.warmup_proportion)
print("num_warmup_steps: ", num_warmup_steps)
model_fn = model_fn_builder(
bert_config=self.bert_config,
num_labels=len(self.labels),
init_checkpoint=self.weight_file,
learning_rate=self.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_one_hot_embeddings=True)
run_config = self._get_run_config(fold_number)
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=False,
model_fn=model_fn,
config=run_config,
train_batch_size=self.train_batch_size)
# create dir if does not exist
if not os.path.exists(self.model_dir+str(fold_number)):
os.makedirs(self.model_dir+str(fold_number))
train_file = os.path.join(self.model_dir+str(fold_number), "train.tf_record")
file_based_convert_examples_to_features(train_examples, self.labels,
self.max_seq_length, self.tokenizer, train_file)
tf.logging.info("***** Running training *****")
tf.logging.info(" Num examples = %d", len(train_examples))
tf.logging.info(" Batch size = %d", self.train_batch_size)
tf.logging.info(" Num steps = %d", num_train_steps)
print("self.max_seq_length: ", self.max_seq_length)
print("self.train_batch_size: ", self.train_batch_size)
train_input_fn = file_based_input_fn_builder(
input_file=train_file,
seq_length=self.max_seq_length,
is_training=True,
drop_remainder=False,
batch_size=self.train_batch_size)
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
end = time.time()
print("\nTraining complete in " + str(end - start) + " seconds")
# cleaning the training garbages
os.remove(train_file)
# the initial check point has prefix model.ckpt-0* and can be removed
# (given that it is 1.3GB file, it's preferable!)
garbage = os.path.join(self.model_dir+str(fold_number), "model.ckpt-0.data-00000-of-00001")
if os.path.exists(garbage):
os.remove(garbage)
garbage = os.path.join(self.model_dir+str(fold_number), "model.ckpt-0.index")
if os.path.exists(garbage):
os.remove(garbage)
garbage = os.path.join(self.model_dir+str(fold_number), "model.ckpt-0.meta")
if os.path.exists(garbage):
os.remove(garbage)
# we need to save the vocab file and bert config files from the initial pre-trained model
if not self.config_file is None:
destination = os.path.join(self.model_dir+str(fold_number), "bert_config.json")
shutil.copyfile(self.config_file, destination)
if not self.vocab_file is None:
destination = os.path.join(self.model_dir+str(fold_number), "vocab.txt")
shutil.copyfile(self.vocab_file, destination)
# we need to rename the fine-tune weight and related files as default checkpoint
for f in os.listdir(self.model_dir+str(fold_number)):
if f.endswith(".data-00000-of-00001"):
# get the checkpoint number
ind = f.find("-")
if ind == -1:
print("warning: invalid weight file name, " + f)
continue
ind2 = f.find(".", ind+1)
if ind2 == -1:
print("warning: invalid weight file name, " + f)
continue
ckpt_num = f[ind+1:ind2]
# rename weight file
new_name = f.replace("-"+ckpt_num, "")
os.rename(os.path.join(self.model_dir+str(fold_number), f), os.path.join(self.model_dir+str(fold_number), new_name))
# rename index and meta file
new_name = f.replace("-"+ckpt_num, "")
new_name = new_name.replace(".data-00000-of-00001", ".meta")
os.rename(os.path.join(self.model_dir+str(fold_number), f.replace(".data-00000-of-00001", ".meta")),
os.path.join(self.model_dir+str(fold_number), new_name))
new_name = f.replace("-"+ckpt_num, "")
new_name = new_name.replace(".data-00000-of-00001", ".index")
os.rename(os.path.join(self.model_dir+str(fold_number), f.replace(".data-00000-of-00001", ".index")),
os.path.join(self.model_dir+str(fold_number), new_name))
break
# finally we save the checkpoint file to point to this default checkpoint
destination = os.path.join(self.model_dir+str(fold_number), "checkpoint")
with open(destination, "w") as f:
f.write('model_checkpoint_path: "model.ckpt"\nall_model_checkpoint_paths: "model.ckpt-0"\n"all_model_checkpoint_paths: "model.ckpt"\n')
def eval(self, x_test=None, y_test=None, run_number=0):
'''
Train and eval the nb_runs classifier(s) against holdout set. If nb_runs>1, the final
score are averaged over the nb_runs models.
'''
start = time.time()
predict_examples, y_test = self.processor.get_test_examples(x_test=x_test, y_test=y_test)
#y_test_gold = np.asarray([np.argmax(line) for line in y_test])
y_predicts = self.eval_fold(predict_examples)
result_intermediate = np.asarray([np.argmax(line) for line in y_predicts])
def vectorize(index, size):
result = np.zeros(size)
if index < size:
result[index] = 1
return result
result_binary = np.array([vectorize(xi, len(self.labels)) for xi in result_intermediate])
precision, recall, fscore, support = precision_recall_fscore_support(y_test, result_binary, average=None)
print('\n')
print('{:>14} {:>12} {:>12} {:>12} {:>12}'.format(" ", "precision", "recall", "f-score", "support"))
p = 0
for the_class in self.labels:
the_class = the_class[:14]
print('{:>14} {:>12} {:>12} {:>12} {:>12}'.format(the_class, "{:10.4f}"
.format(precision[p]), "{:10.4f}".format(recall[p]), "{:10.4f}".format(fscore[p]), support[p]))
p += 1
runtime = round(time.time() - start, 3)
print("Total runtime for eval: " + str(runtime) + " seconds")
def eval_fold(self, predict_examples, fold_number=0):
num_actual_predict_examples = len(predict_examples)
predict_file = os.path.join(self.model_dir+str(fold_number), "predict.tf_record")
file_based_convert_examples_to_features(predict_examples, self.labels,
self.max_seq_length, self.tokenizer,
predict_file)
tf.logging.info("***** Running holdout prediction*****")
tf.logging.info(" Num examples = %d (%d actual, %d padding)",
len(predict_examples), num_actual_predict_examples,
len(predict_examples) - num_actual_predict_examples)
tf.logging.info(" Batch size = %d", self.predict_batch_size)
predict_input_fn = file_based_input_fn_builder(
input_file=predict_file,
seq_length=self.max_seq_length,
is_training=False,
drop_remainder=False,
batch_size=self.predict_batch_size)
num_train_steps = int(31861 / self.train_batch_size * self.num_train_epochs)
num_warmup_steps = int(num_train_steps * self.warmup_proportion)
model_fn = model_fn_builder(
bert_config=self.bert_config,
num_labels=len(self.labels),
init_checkpoint=self.weight_file,
learning_rate=self.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
#use_tpu=self.use_tpu,
use_one_hot_embeddings=True)
run_config = self._get_run_config(fold_number)
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=False,
model_fn=model_fn,
config=run_config,
predict_batch_size=self.predict_batch_size)
result = estimator.predict(input_fn=predict_input_fn)
y_pred = np.zeros(shape=(len(predict_examples),len(self.labels)))
p = 0
for prediction in result:
probabilities = prediction["probabilities"]
q = 0
for class_probability in probabilities:
if self.class_weights is not None:
y_pred[p,q] = class_probability * self.class_weights[q]
else:
y_pred[p,q] = class_probability
q += 1
p += 1
# cleaning the garbages
os.remove(predict_file)
return y_pred
def predict(self, texts, fold_number=0):
if self.loaded_estimator is None:
self.load_model(fold_number)
# create the DeLFT json result remplate
'''
res = {
"software": "DeLFT",
"date": datetime.datetime.now().isoformat(),
"model": self.model_name,
"classifications": []
}
'''
if texts is None or len(texts) == 0:
return res
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
y_pred = np.zeros(shape=(len(texts),len(self.labels)))
y_pos = 0
for text_batch in list(chunks(texts, self.predict_batch_size)):
if type(text_batch) is np.ndarray:
text_batch = text_batch.tolist()
# if the size of the last batch is less than the batch size, we need to fill it with dummy input
num_current_batch = len(text_batch)
if num_current_batch < self.predict_batch_size:
dummy_text = text_batch[-1]
for p in range(0, self.predict_batch_size-num_current_batch):
text_batch.append(dummy_text)
# segment in batches corresponding to self.predict_batch_size
input_examples = self.processor.create_inputs(text_batch, dummy_label=self.labels[0])
input_features = convert_examples_to_features(input_examples, self.labels, self.max_seq_length, self.tokenizer)
results = self.loaded_estimator.predict(input_features, self.max_seq_length, self.predict_batch_size)
#y_pred = np.zeros(shape=(num_current_batch,len(self.labels)))
p = 0
for prediction in results:
if p == num_current_batch:
break
probabilities = prediction["probabilities"]
q = 0
for class_probability in probabilities:
if self.class_weights and len(self.class_weights) == len(probabilities):
y_pred[y_pos+p,q] = class_probability * self.class_weights[q]
else:
y_pred[y_pos+p,q] = class_probability
q += 1
p += 1
y_pos += num_current_batch
'''
y_pred_best = np.asarray([np.argmax(line) for line in y_pred])
i = 0
for text in text_batch:
if i == num_current_batch:
break
classification = {
"text": text
}
j = 0
for cl in self.labels:
classification[cl] = float(y_pred[i,j])
j += 1
best = {
"class": self.labels[y_pred_best[i]],
"conf": float(y_pred[i][y_pred_best[i]])
}
classification['selection'] = best
res["classifications"].append(classification)
i += 1
'''
return y_pred
def _get_run_config(self, fold_number=0):
tpu_cluster_resolver = None
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=self.master,
model_dir=self.model_dir+str(fold_number),
save_checkpoints_steps=self.save_checkpoints_steps,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=self.iterations_per_loop,
#num_shards=self.num_tpu_cores,
per_host_input_for_training=is_per_host)
)
return run_config
def load(self):
# default
num_train_steps = int(10000 / self.train_batch_size * self.num_train_epochs)
num_warmup_steps = int(num_train_steps * self.warmup_proportion)
model_fn = model_fn_builder(
bert_config=self.bert_config,
num_labels=len(self.labels),
init_checkpoint=self.weight_file,
learning_rate=self.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_one_hot_embeddings=True)
run_config = self._get_run_config(0)
self.loaded_estimator = FastPredict(tf.contrib.tpu.TPUEstimator(
use_tpu=False,
model_fn=model_fn,
config=run_config,
predict_batch_size=self.predict_batch_size), input_fn_generator)
def _get_description(name, path="./embedding-registry.json"):
registry_json = open(path).read()
registry = json.loads(registry_json)
for emb in registry["embeddings-contextualized"]:
if emb["name"] == name:
return emb
for emb in registry["transformers"]:
if emb["name"] == name:
return emb
return None
|
{"hexsha": "e3a172b7dd5fc9ea33b0b98ed8e98ae7ff7903d9", "size": 55839, "ext": "py", "lang": "Python", "max_stars_repo_path": "delft/textClassification/models.py", "max_stars_repo_name": "tantikristanti/delft", "max_stars_repo_head_hexsha": "620ddf9e55e13213d2fc9af25b9d01331256d698", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 333, "max_stars_repo_stars_event_min_datetime": "2018-05-16T07:02:05.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T11:30:32.000Z", "max_issues_repo_path": "delft/textClassification/models.py", "max_issues_repo_name": "tantikristanti/delft", "max_issues_repo_head_hexsha": "620ddf9e55e13213d2fc9af25b9d01331256d698", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 126, "max_issues_repo_issues_event_min_datetime": "2018-06-26T18:47:18.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T05:59:28.000Z", "max_forks_repo_path": "delft/textClassification/models.py", "max_forks_repo_name": "tantikristanti/delft", "max_forks_repo_head_hexsha": "620ddf9e55e13213d2fc9af25b9d01331256d698", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 67, "max_forks_repo_forks_event_min_datetime": "2018-05-15T21:28:59.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-20T19:10:29.000Z", "avg_line_length": 40.6989795918, "max_line_length": 148, "alphanum_fraction": 0.6401260768, "include": true, "reason": "import numpy,from numpy", "num_tokens": 13441}
|
import cv2
import numpy as np
image = cv2.imread('apple.jpg')
cv2.imshow('Original Image', image)
cv2.waitKey(0)
#Guassian Blurr
Gaussian = cv2.GaussianBlur(image,(7,7),0)
cv2.imshow('Gaussian Blurring', Gaussian)
cv2.imwrite('GaussianResult.jpg',Gaussian)
cv2.waitKey(0)
#Median Blur
median = cv2.medianBlur(image,5)
cv2.imshow('Median Blurring', median)
cv2.imwrite('MedianResult.jpg',median)
cv2.waitKey(0)
#Bilateral Blur
bilateral = cv2.bilateralFilter(image, 9, 75, 75)
cv2.imshow('Bilateral Blur', bilateral)
cv2.imwrite('BilateralResult.jpg',bilateral)
cv2.waitKey(0)
|
{"hexsha": "90204ead5b1f6a0040bca0f09bf50c82334e5052", "size": 581, "ext": "py", "lang": "Python", "max_stars_repo_path": "SingleCORE/Smoothing Algorithm/SmoothingAlgorithm.py", "max_stars_repo_name": "RjPatil27/ACA-Project", "max_stars_repo_head_hexsha": "96c60c4e93633a2362cd032dbd555396ded18081", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "SingleCORE/Smoothing Algorithm/SmoothingAlgorithm.py", "max_issues_repo_name": "RjPatil27/ACA-Project", "max_issues_repo_head_hexsha": "96c60c4e93633a2362cd032dbd555396ded18081", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "SingleCORE/Smoothing Algorithm/SmoothingAlgorithm.py", "max_forks_repo_name": "RjPatil27/ACA-Project", "max_forks_repo_head_hexsha": "96c60c4e93633a2362cd032dbd555396ded18081", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-08-30T23:13:20.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-21T16:27:11.000Z", "avg_line_length": 22.3461538462, "max_line_length": 49, "alphanum_fraction": 0.760757315, "include": true, "reason": "import numpy", "num_tokens": 177}
|
# slc_prj.py
import os
import os.path as osp
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize, LogNorm
import astropy.units as au
import astropy.constants as ac
from ..load_sim import LoadSim
from ..io.read_starpar_vtk import read_starpar_vtk
from ..plt_tools.cmap_shift import cmap_shift
from ..classic.plot_tools.scatter_sp import scatter_sp
from ..classic.utils import texteffect
class SliceProj:
@staticmethod
def get_extent(domain):
r = dict()
r['x'] = (domain['le'][1], domain['re'][1],
domain['le'][2], domain['re'][2])
r['y'] = (domain['le'][0], domain['re'][0],
domain['le'][2], domain['re'][2])
r['z'] = (domain['le'][0], domain['re'][0],
domain['le'][1], domain['re'][1])
return r
@LoadSim.Decorators.check_pickle
def read_slc(self, num, axes=['x', 'y', 'z'],
fields=['nH', 'nH2', 'nHI', 'nHII', 'T', 'nHn', 'chi_PE',
'Erad_FUV', 'Erad_LyC'], prefix='slc',
savdir=None, force_override=False):
axes = np.atleast_1d(axes)
ds = self.load_vtk(num=num)
res = dict()
res['extent'] = self.get_extent(ds.domain)
res['time'] = ds.domain['time']
for ax in axes:
dat = ds.get_slice(ax, fields, pos='c', method='nearest')
res[ax] = dict()
for f in fields:
# if 'velocity' in f:
# for k in ('3',):
# res[ax][f+k] = dat[f+k].data
# else:
res[ax][f] = dat[f].data
return res
@LoadSim.Decorators.check_pickle
def read_prj(self, num, axes=['x', 'y', 'z'],
fields=['density', 'xHI', 'xH2', 'xHII', 'nesq'],
prefix='prj',
savdir=None, force_override=False):
axtoi = dict(x=0, y=1, z=2)
axes = np.atleast_1d(axes)
ds = self.load_vtk(num=num)
dat = ds.get_field(fields, as_xarray=True)
res = dict()
res['extent'] = self.get_extent(ds.domain)
res['time'] = ds.domain['time']
for ax in axes:
i = axtoi[ax]
dx = ds.domain['dx'][i]*self.u.length
conv_Sigma = (dx*self.u.muH*ac.u.cgs/au.cm**3).to('Msun/pc**2')
conv_EM = (dx*au.cm**-6).to('pc cm-6')
res[ax] = dict()
res[ax]['Sigma'] = (np.sum(dat['density'], axis=2-i)*conv_Sigma).data
if 'xH2' in fields:
res[ax]['Sigma_H2'] = (np.sum(dat['density']*dat['xH2'],
axis=2-i)*conv_Sigma).data
if 'xHI' in fields:
res[ax]['Sigma_HI'] = (np.sum(dat['density']*dat['xHI'],
axis=2-i)*conv_Sigma).data
if 'xHII' in fields:
res[ax]['Sigma_HII'] = (np.sum(dat['density']*dat['xHII'],
axis=2-i)*conv_Sigma).data
if 'nesq' in fields:
res[ax]['EM'] = (np.sum(dat['nesq'], axis=2-i)*conv_EM).data
if 'specific_scalar[1]' in fields:
res[ax]['Sigma_scalar1'] = (np.sum(dat['density']*dat['specific_scalar[1]'],
axis=2-i)*conv_Sigma).data
if 'specific_scalar[2]' in fields:
res[ax]['Sigma_scalar2'] = (np.sum(dat['density']*dat['specific_scalar[2]'],
axis=2-i)*conv_Sigma).data
return res
@staticmethod
def plt_imshow(ax, dat, dim='z', field='Sigma', cmap='viridis',
norm=mpl.colors.LogNorm()):
im = ax.imshow(dat[dim][field], cmap=cmap, extent=dat['extent'][dim],
norm=norm, origin='lower', interpolation='none')
return im
def plt_snapshot(self, num, savefig=True):
d = self.read_prj(num, force_override=False)
sp = self.load_starpar_vtk(num)
nr = 3
nc = 4
fig, axes = plt.subplots(nr, nc, figsize=(16, 12.5), # constrained_layout=True,
gridspec_kw=dict(hspace=0.0, wspace=0.0))
norm = LogNorm(1e-1,1e3)
norm_EM = LogNorm(3e1,3e5)
im1 = []
im2 = []
im3 = []
im4 = []
for i, axis in enumerate(('x','y','z')):
extent = d['extent'][axis]
im1.append(axes[i, 0].imshow(d[axis]['Sigma'], norm=norm,
extent=extent, origin='lower'))
im2.append(axes[i, 1].imshow(d[axis]['Sigma_H2'], norm=norm,
extent=extent, origin='lower'))
im3.append(axes[i, 2].imshow(d[axis]['Sigma_HI'], norm=norm,
extent=extent, origin='lower'))
im4.append(axes[i, 3].imshow(d[axis]['EM'], norm=norm_EM,
extent=extent, origin='lower', cmap='plasma'))
# Overplot starpar
if not sp.empty:
scatter_sp(sp, axes[i, 0], axis=axis, type='proj', kpc=False,
norm_factor=4.0, agemax=10.0)
scatter_sp(sp, axes[i, 3], axis=axis, type='proj', kpc=False,
norm_factor=4.0, agemax=10.0)
for ax in axes.flatten():
plt.axis('on')
ax.set_xticklabels([])
ax.set_xticks([])
ax.set_yticks([])
ax.set_aspect('equal')
# Add colorbars
labels = [r'$\Sigma_{\rm gas}\;[{M_{\odot}\,{\rm pc}^{-2}}]$',
r'$\Sigma_{\rm H_2}\;[{M_{\odot}\,{\rm pc}^{-2}}]$',
r'$\Sigma_{\rm H\,I}\;[{M_{\odot}\,{\rm pc}^{-2}}]$',
r'${\rm EM}\;[{\rm pc}\,{\rm cm}^{-6}]$']
for j,im,label in zip(range(nc),(im1,im2,im3,im4),labels):
bbox_ax_top = axes[0,j].get_position()
cax = fig.add_axes([bbox_ax_top.x0+0.01, bbox_ax_top.y1+0.01,
bbox_ax_top.x1-bbox_ax_top.x0-0.02, 0.015])
cbar = plt.colorbar(im[0], cax=cax, orientation='horizontal')
cbar.set_label(label=label, fontsize='small')
cbar.ax.xaxis.set_ticks_position('top')
cbar.ax.xaxis.set_label_position('top')
cbar_yticks = plt.getp(cbar.ax.axes, 'xticklabels')
plt.setp(cbar_yticks, color='k', fontsize='x-small')
#cbar.ax.set_yticks(arange(vmin, vmax, 2), size='small')
plt.subplots_adjust(wspace=None, hspace=None)
plt.suptitle(self.basename + ' t={0:4.1f}'.format(sp.time))
if savefig:
savdir = osp.join(self.savdir, 'snapshots')
# savdir = osp.join('/tigress/jk11/figures/GMC', self.basename, 'snapshots')
if not osp.exists(savdir):
os.makedirs(savdir)
savname = osp.join(savdir, '{0:s}_{1:04d}.png'.format(self.basename, num))
plt.savefig(savname, dpi=200, bbox_inches='tight')
return fig
|
{"hexsha": "c617b567d7c581cbc3430dffa0b3511a435efc33", "size": 7286, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyathena/sf_cloud_rad/slc_prj.py", "max_stars_repo_name": "changgoo/pyathena-1", "max_stars_repo_head_hexsha": "c461ac3390d773537ce52393e3ebf68a3282aa46", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pyathena/sf_cloud_rad/slc_prj.py", "max_issues_repo_name": "changgoo/pyathena-1", "max_issues_repo_head_hexsha": "c461ac3390d773537ce52393e3ebf68a3282aa46", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pyathena/sf_cloud_rad/slc_prj.py", "max_forks_repo_name": "changgoo/pyathena-1", "max_forks_repo_head_hexsha": "c461ac3390d773537ce52393e3ebf68a3282aa46", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.4777777778, "max_line_length": 92, "alphanum_fraction": 0.4881965413, "include": true, "reason": "import numpy,import astropy", "num_tokens": 1957}
|
import cv2
import pandas as pd
import numpy as np
import argparse
import time
from keypoint_utils import KeypointMapper
def make_args():
parser = argparse.ArgumentParser()
parser.add_argument('--video', '-v', default='', help='Enter path to video')
parser.add_argument('--csv_file', '-cf', default='', help='Enter points file')
parser.add_argument('--delay', '-d', type=float, default=0., help='Enter delay between each frame')
parser.add_argument('--window_length', '-wl', type=int, default=0, help='Enter the window length to retrieve from csv file')
parser.add_argument('--polyorder', '-p', type=int, default=0, help='Enter the polyorder to retrieve from csv file')
parser.add_argument('--unfiltered', '-unf', default=False, action='store_true', help='Call this to view unfiltered mapping of points')
parser.add_argument('--full', '-f', action='store_true', help='Call this to view full screen video')
parser.add_argument('--hand', default='right')
parser.add_argument('-o', '--output', default=None, type=str, help="Saves the output video to provided path")
parser.add_argument('-bbg', '--black-background', action='store_true', help="Omit background or not")
parser.add_argument('--pcolor', type=str, help="Enter joints mapping color")
parser.add_argument('--lcolor', type=str, help="Enter stick (line) mapping color")
parser.add_argument('-s', '--start', type=int, default=0, help="Enter frame to start from")
args = parser.parse_args()
assert args.video != ''
assert args.csv_file != ''
if args.window_length != 0:
assert args.window_length > args.polyorder
assert args.unfiltered != True
return args
def main():
args = make_args()
if not args.full:
cv2.namedWindow('Image', cv2.WINDOW_NORMAL)
else:
cv2.namedWindow('Image', cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty('Image', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
mapper = KeypointMapper(
video=args.video,
csv_file=args.csv_file,
hand=args.hand,
filtered=not args.unfiltered)
if args.output is not None:
out = mapper.get_writer_object(args.output)
for _ in range(0, args.start):
ret, img = mapper.get_next()
while True:
ret, img = mapper.get_next()
if not ret: break
if args.black_background:
img = np.zeros(img.shape, dtype=np.uint8)
img = mapper.draw(img, pcolor=args.pcolor, lcolor=args.lcolor)
cv2.imshow('Image', img)
if args.output is not None:
out.write(img)
time.sleep(args.delay)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if args.output is not None:
print(f'Video saved at {args.output}')
out.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
|
{"hexsha": "8b629c35765454aad934249a88287f0406bf70fa", "size": 2934, "ext": "py", "lang": "Python", "max_stars_repo_path": "pose_estimation/visualise/keypoint_signature_mapping.py", "max_stars_repo_name": "Rohansjamadagni/topspin-tracker", "max_stars_repo_head_hexsha": "301abdb4b7eda1596c9e9647cd5f3aa22fa36aea", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-07-10T08:12:08.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-04T20:26:00.000Z", "max_issues_repo_path": "pose_estimation/visualise/keypoint_signature_mapping.py", "max_issues_repo_name": "Rohansjamadagni/topspin-tracker", "max_issues_repo_head_hexsha": "301abdb4b7eda1596c9e9647cd5f3aa22fa36aea", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pose_estimation/visualise/keypoint_signature_mapping.py", "max_forks_repo_name": "Rohansjamadagni/topspin-tracker", "max_forks_repo_head_hexsha": "301abdb4b7eda1596c9e9647cd5f3aa22fa36aea", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-08-03T17:01:15.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-26T21:31:24.000Z", "avg_line_length": 34.9285714286, "max_line_length": 138, "alphanum_fraction": 0.6441717791, "include": true, "reason": "import numpy", "num_tokens": 683}
|
import os
import random
import time
import gym
import numpy as np
# use following command to install required package and all the dependencies:
# pip install gym[box2d,atari]
# for windows replace one of the atari files:
# pip install -f https://github.com/Kojoley/atari-py/releases atari_py
def ex_01():
env = gym.make('CartPole-v1') # utworzenie środowiska
env.seed(42)
print(f'{env.action_space=}')
print(f'{env.observation_space=}')
print(f'{env.observation_space.high=}')
print(f'{env.observation_space.low=}')
action = 0
env.reset() # reset środowiska do stanu początkowego
for _ in range(1000): # kolejne kroki symulacji
env.render() # renderowanie obrazu
time.sleep(0.05)
# completely random action
action = env.action_space.sample() # wybór akcji (tutaj: losowa akcja)
# flip-flopping the action
# if action:
# action = 0
# else:
# action = 1
observation, reward, done, info = env.step(action) # wykonanie akcji
print(f'{action=}, {observation=}, {reward=}, {info=}')
if done:
print('The end!')
time.sleep(1)
env.reset()
env.close() # zamknięcie środowiska
def ex_02():
env = gym.make('FrozenLake-v1') # utworzenie środowiska
env.seed(42)
print(f'{env.action_space=}')
print(f'{env.observation_space=}')
q_table = np.zeros((env.observation_space.n, env.action_space.n), dtype=np.float64)
lr = 0.3
discount_factor = 0.95
epsilon = 0.9
no_training_episodes = 20000
for i in range(no_training_episodes): # kolejne kroki symulacji
observation = env.reset() # reset środowiska do stanu początkowego
done = False
total_reward = 0
while not done:
if random.uniform(0, 1) < epsilon:
# eksploracja
action = env.action_space.sample() # wybór akcji (tutaj: losowa akcja)
else:
# eksploatacja
action = np.argmax(q_table[observation])
next_observation, reward, done, info = env.step(action) # wykonanie akcji
total_reward += reward
# print(f'{action=}, {observation=}, {reward=}, {info=}')
max_next_observation = np.max(q_table[next_observation])
q_table[observation, action] = \
(1 - lr) * q_table[observation, action] + lr * (reward + discount_factor * max_next_observation)
observation = next_observation
if i % 100 == 0:
print(f'total_reward episode: {i+1}: {total_reward}')
no_test_episodes = 100
for i in range(no_test_episodes): # kolejne kroki symulacji
observation = env.reset() # reset środowiska do stanu początkowego
env.render()
done = False
total_reward = 0
while not done:
action = np.argmax(q_table[observation])
observation, reward, done, info = env.step(action) # wykonanie akcji
total_reward += reward
print(f'observation: {observation}, reward: {reward}, info: {info}')
os.system('cls')
env.render() # renderowanie obrazu
time.sleep(0.1)
# if i % 100:
print(f'total_reward episode: {i+1}: {total_reward}')
input()
env.close() # zamknięcie środowiska
def main():
ex_01()
ex_02()
if __name__ == '__main__':
main()
|
{"hexsha": "8c772ac843e0114803a92319eb990b556382f380", "size": 3493, "ext": "py", "lang": "Python", "max_stars_repo_path": "machine_learning_course/lab_s01e09_reinforcement_learning.py", "max_stars_repo_name": "PUTvision/MachineLearningCourse", "max_stars_repo_head_hexsha": "0958615e096bed8b7837c0840e7cc8c1df001cff", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "machine_learning_course/lab_s01e09_reinforcement_learning.py", "max_issues_repo_name": "PUTvision/MachineLearningCourse", "max_issues_repo_head_hexsha": "0958615e096bed8b7837c0840e7cc8c1df001cff", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "machine_learning_course/lab_s01e09_reinforcement_learning.py", "max_forks_repo_name": "PUTvision/MachineLearningCourse", "max_forks_repo_head_hexsha": "0958615e096bed8b7837c0840e7cc8c1df001cff", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 42, "max_forks_repo_forks_event_min_datetime": "2020-10-12T07:54:54.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-21T18:26:35.000Z", "avg_line_length": 29.8547008547, "max_line_length": 112, "alphanum_fraction": 0.5989121099, "include": true, "reason": "import numpy", "num_tokens": 946}
|
from pylsl import StreamInlet, resolve_stream
import sys
import time
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from scipy.integrate import simps
from scipy import signal
import eegspectrum
def main(epochTime,fileNumber):
i=0
# first resolve an EEG stream on the lab network
streams = resolve_stream('type', 'EEG')
#start file system for recording.
inlet = StreamInlet(streams[0])
sys.stdout = open("Data/data_streams/dataStreamA"+str(fileNumber)+".csv", "w")
start_time= time.time()
#------read from stream with time pased as argument-------
while i<epochTime:
# get a new sample (you can also omit the timestamp part if you're not
# interested in it)
offset = inlet.time_correction()
#print('Offset: ' + str(offset))
sample, timestamp = inlet.pull_sample()
measureTime=time.time()
print(sample,measureTime-start_time)
#print(sample)
#print(timestamp-offset)
i=measureTime-start_time
#sys.stdout.close()
def read(fileName):
#----make sure data is consistent-----
data=pd.read_csv(fileName, header=None)
#print (data)
def clean(fileName,i):
#----parse data file and remove unwanted characters----
with open(fileName, 'r') as infile, open('Data/output_streams/output'+str(i)+'.csv', 'w') as outfile:
data = infile.read()
data = data.replace("[", "")
data = data.replace("]", ",")
#create array here to speed the data collection.
#full_Data = np.append(full_Data, np.array([[1,2,3]]), axis=0)
outfile.write(data)
def powerAnalizerRelative(fileName,lowV,highV,column):
fullData=np.genfromtxt(fileName,delimiter=',')
data=fullData[:,column]
#data = np.loadtxt(fileName)
sns.set(font_scale=1.2)
# Define sampling frequency and time vector
sf = 100.
time = np.arange(data.size) / sf
#------next bin process------
win = 4 * sf
freqs, psd = signal.welch(data, sf, nperseg=win)
#----------- Define band lower and upper limits---------------
low, high = lowV, highV
# Find intersecting values in frequency vector
idx_delta = np.logical_and(freqs >= low, freqs <= high)
# Frequency resolution
freq_res = freqs[1] - freqs[10] # = 1 / 4 = 0.25
# Compute the absolute power by approximating the area under the curve
delta_power = simps(psd[idx_delta], dx=freq_res)
total_power = simps(psd, dx=freq_res)
delta_rel_power = delta_power / total_power
#print('Relative',band,' power: %.3f' % delta_rel_power)
return delta_rel_power
def computeAverages(fileName,z):
sys.stdout = open("Data/computedAverages/computed"+".csv", "a")
#Delta wave – (0.5 – 3 Hz)
#Theta wave – (4 – 7 Hz)
#Alpha wave – (8 – 12 Hz)
#Mu wave – (7.5 – 12.5 Hz)
#SMR wave – (12.5 – 15.5 Hz)
#Beta wave – (15 – 30 Hz)
#Gamma wave – (>30 Hz)
for k in range (6):
D=powerAnalizerRelative(fileName,0.5,3,k)
T=powerAnalizerRelative(fileName,4,7,k)
A=powerAnalizerRelative(fileName,8,12,k)
B=powerAnalizerRelative(fileName,15,30,k)
G=powerAnalizerRelative(fileName,30,45,k)
print(D,",",T,",",A,",",B,",",G,",")
#sys.stdout = close("computedAverages\computed"+str(z)+".csv")
#i=i+1
if __name__ == '__main__':
studyTime=310
epochTime=2
fileLenght=studyTime/epochTime
#created the data array to work online in a faster method
#full_Data=np.empty(0,5)
for i in range(int(fileLenght)):
fileName="Data/data_streams/dataStreamA"+str(i)+".csv"
main(epochTime,i)
clean(fileName,i)
output="Data/output_streams/output"+str(i)+".csv"
computeAverages(output,i)
# dont plot on the same script, ploting requires different tread, otherwise samples are droped with pause.
|
{"hexsha": "4ad84d45f0755b180f8318bb82ff7d6e27999777", "size": 4096, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/pyScript/liblsl-Python-master/Package/Work_dir/ReceiveData.py", "max_stars_repo_name": "MScResearch/EEGprocessing", "max_stars_repo_head_hexsha": "6ad37e80baadc462723fc4c3c7c3cec0c1c88dac", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "python/pyScript/liblsl-Python-master/Package/Work_dir/ReceiveData.py", "max_issues_repo_name": "MScResearch/EEGprocessing", "max_issues_repo_head_hexsha": "6ad37e80baadc462723fc4c3c7c3cec0c1c88dac", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/pyScript/liblsl-Python-master/Package/Work_dir/ReceiveData.py", "max_forks_repo_name": "MScResearch/EEGprocessing", "max_forks_repo_head_hexsha": "6ad37e80baadc462723fc4c3c7c3cec0c1c88dac", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.5714285714, "max_line_length": 114, "alphanum_fraction": 0.61328125, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1062}
|
using BandedMatrices, MatrixFactorizations, LinearAlgebra, Test, Random
Random.seed!(0)
@testset "QR tests" begin
for T in (Float64,ComplexF64,Float32,ComplexF32)
A=brand(T,10,10,3,2)
Q,R=qr(A)
@test Matrix(Q)*Matrix(R) ≈ A
b=rand(T,10)
@test mul!(similar(b),Q,mul!(similar(b),Q',b)) ≈ b
for j=1:size(A,2)
@test Q' * A[:,j] ≈ R[:,j]
end
A=brand(T,14,10,3,2)
Q,R=qr(A)
@test Matrix(Q)*Matrix(R) ≈ A
for k=1:size(A,1),j=1:size(A,2)
@test Q[k,j] ≈ Matrix(Q)[k,j]
end
A=brand(T,10,14,3,2)
Q,R=qr(A)
@test Matrix(Q)*Matrix(R) ≈ A
for k=1:size(Q,1),j=1:size(Q,2)
@test Q[k,j] ≈ Matrix(Q)[k,j]
end
A=brand(T,100,100,3,4)
@test qr(A).factors ≈ LinearAlgebra.qrfactUnblocked!(Matrix(A)).factors
@test qr(A).τ ≈ LinearAlgebra.qrfactUnblocked!(Matrix(A)).τ
b=rand(T,100)
@test qr(A)\b ≈ Matrix(A)\b
b=rand(T,100,2)
@test qr(A)\b ≈ Matrix(A)\b
@test_throws DimensionMismatch qr(A) \ randn(3)
@test_throws DimensionMismatch qr(A).Q'randn(3)
A=brand(T,102,100,3,4)
@test qr(A).factors ≈ LinearAlgebra.qrfactUnblocked!(Matrix(A)).factors
@test qr(A).τ ≈ LinearAlgebra.qrfactUnblocked!(Matrix(A)).τ
b=rand(T,102)
@test qr(A)\b ≈ Matrix(A)\b
b=rand(T,102,2)
@test qr(A)\b ≈ Matrix(A)\b
@test_throws DimensionMismatch qr(A) \ randn(3)
@test_throws DimensionMismatch qr(A).Q'randn(3)
A=brand(T,100,102,3,4)
@test qr(A).factors ≈ LinearAlgebra.qrfactUnblocked!(Matrix(A)).factors
@test qr(A).τ ≈ LinearAlgebra.qrfactUnblocked!(Matrix(A)).τ
b=rand(T,100)
@test_broken qr(A)\b ≈ Matrix(A)\b
A = Tridiagonal(randn(T,99), randn(T,100), randn(T,99))
@test qr(A).factors ≈ LinearAlgebra.qrfactUnblocked!(Matrix(A)).factors
@test qr(A).τ ≈ LinearAlgebra.qrfactUnblocked!(Matrix(A)).τ
b=rand(T,100)
@test qr(A)\b ≈ Matrix(A)\b
b=rand(T,100,2)
@test qr(A)\b ≈ Matrix(A)\b
@test_throws DimensionMismatch qr(A) \ randn(3)
@test_throws DimensionMismatch qr(A).Q'randn(3)
end
@testset "Mixed types" begin
A=brand(10,10,3,2)
b=rand(ComplexF64,10)
Q,R=qr(A)
@test R\(Q'*b) ≈ qr(A)\b ≈ Matrix(A)\b
A=brand(ComplexF64,10,10,3,2)
b=rand(10)
Q,R=qr(A)
@test R\(Q'*b) ≈ qr(A)\b ≈ Matrix(A)\b
A = BandedMatrix{Int}(undef, (2,1), (4,4))
A.data .= 1:length(A.data)
Q, R = qr(A)
@test Q*R ≈ A
end
end
@testset "QL tests" begin
for T in (Float64,ComplexF64,Float32,ComplexF32)
A=brand(T,10,10,3,2)
Q,L=ql(A)
@test ql(A).factors ≈ ql!(Matrix(A)).factors
@test ql(A).τ ≈ ql!(Matrix(A)).τ
@test Matrix(Q)*Matrix(L) ≈ A
b=rand(T,10)
@test mul!(similar(b),Q,mul!(similar(b),Q',b)) ≈ b
for j=1:size(A,2)
@test Q' * A[:,j] ≈ L[:,j]
end
A=brand(T,14,10,3,2)
Q,L=ql(A)
@test ql(A).factors ≈ ql!(Matrix(A)).factors
@test ql(A).τ ≈ ql!(Matrix(A)).τ
@test_broken Matrix(Q)*Matrix(L) ≈ A
for k=1:size(A,1),j=1:size(A,2)
@test Q[k,j] ≈ Matrix(Q)[k,j]
end
A=brand(T,10,14,3,2)
Q,L=ql(A)
@test ql(A).factors ≈ ql!(Matrix(A)).factors
@test ql(A).τ ≈ ql!(Matrix(A)).τ
@test Matrix(Q)*Matrix(L) ≈ A
for k=1:size(Q,1),j=1:size(Q,2)
@test Q[k,j] ≈ Matrix(Q)[k,j]
end
A=brand(T,10,14,3,6)
Q,L=ql(A)
@test ql(A).factors ≈ ql!(Matrix(A)).factors
@test ql(A).τ ≈ ql!(Matrix(A)).τ
@test Matrix(Q)*Matrix(L) ≈ A
for k=1:size(Q,1),j=1:size(Q,2)
@test Q[k,j] ≈ Matrix(Q)[k,j]
end
A=brand(T,100,100,3,4)
@test ql(A).factors ≈ ql!(Matrix(A)).factors
@test ql(A).τ ≈ ql!(Matrix(A)).τ
b=rand(T,100)
@test ql(A)\b ≈ Matrix(A)\b
b=rand(T,100,2)
@test ql(A)\b ≈ Matrix(A)\b
@test_throws DimensionMismatch ql(A) \ randn(3)
@test_throws DimensionMismatch ql(A).Q'randn(3)
A=brand(T,102,100,3,4)
@test ql(A).factors ≈ ql!(Matrix(A)).factors
@test ql(A).τ ≈ ql!(Matrix(A)).τ
b=rand(T,102)
@test_broken ql(A)\b ≈ Matrix(A)\b
b=rand(T,102,2)
@test_broken ql(A)\b ≈ Matrix(A)\b
@test_throws DimensionMismatch ql(A) \ randn(3)
@test_throws DimensionMismatch ql(A).Q'randn(3)
A=brand(T,100,102,3,4)
@test ql(A).factors ≈ ql!(Matrix(A)).factors
@test ql(A).τ ≈ ql!(Matrix(A)).τ
b=rand(T,100)
@test_broken ql(A)\b ≈ Matrix(A)\b
A = Tridiagonal(randn(T,99), randn(T,100), randn(T,99))
@test ql(A).factors ≈ ql!(Matrix(A)).factors
@test ql(A).τ ≈ ql!(Matrix(A)).τ
b=rand(T,100)
@test ql(A)\b ≈ Matrix(A)\b
b=rand(T,100,2)
@test ql(A)\b ≈ Matrix(A)\b
@test_throws DimensionMismatch ql(A) \ randn(3)
@test_throws DimensionMismatch ql(A).Q'randn(3)
end
@testset "lmul!/rmul!" begin
A = brand(100,100,3,4)
Q,R = qr(A)
x = randn(100)
b = randn(100,2)
@test lmul!(Q, copy(x)) ≈ Matrix(Q)*x
@test lmul!(Q, copy(b)) ≈ Matrix(Q)*b
@test lmul!(Q', copy(x)) ≈ Matrix(Q)'*x
@test lmul!(Q', copy(b)) ≈ Matrix(Q)'*b
c = randn(2,100)
@test rmul!(copy(c), Q) ≈ c*Matrix(Q)
@test rmul!(copy(c), Q') ≈ c*Matrix(Q')
A = brand(100,100,3,4)
Q,L = ql(A)
x = randn(100)
b = randn(100,2)
@test lmul!(Q, copy(x)) ≈ Matrix(Q)*x
@test lmul!(Q, copy(b)) ≈ Matrix(Q)*b
@test lmul!(Q', copy(x)) ≈ Matrix(Q)'*x
@test lmul!(Q', copy(b)) ≈ Matrix(Q)'*b
c = randn(2,100)
@test rmul!(copy(c), Q) ≈ c*Matrix(Q)
@test rmul!(copy(c), Q') ≈ c*Matrix(Q')
end
@testset "Mixed types" begin
A=brand(10,10,3,2)
b=rand(ComplexF64,10)
Q,L=ql(A)
@test L\(Q'*b) ≈ ql(A)\b ≈ Matrix(A)\b
@test Q*L ≈ A
A=brand(ComplexF64,10,10,3,2)
b=rand(10)
Q,L=ql(A)
@test Q*L ≈ A
@test L\(Q'*b) ≈ ql(A)\b ≈ Matrix(A)\b
A = BandedMatrix{Int}(undef, (2,1), (4,4))
A.data .= 1:length(A.data)
Q, L = ql(A)
@test_broken Q*L ≈ A
end
end
|
{"hexsha": "25b7beac5dc4ae253d92163dd10ba30b02c8782b", "size": 6602, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/test_bandedqr.jl", "max_stars_repo_name": "kshyatt/BandedMatrices.jl", "max_stars_repo_head_hexsha": "ecfc1957397064740534d1c17b1752c654796544", "max_stars_repo_licenses": ["BSD-3-Clause-Open-MPI"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/test_bandedqr.jl", "max_issues_repo_name": "kshyatt/BandedMatrices.jl", "max_issues_repo_head_hexsha": "ecfc1957397064740534d1c17b1752c654796544", "max_issues_repo_licenses": ["BSD-3-Clause-Open-MPI"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/test_bandedqr.jl", "max_forks_repo_name": "kshyatt/BandedMatrices.jl", "max_forks_repo_head_hexsha": "ecfc1957397064740534d1c17b1752c654796544", "max_forks_repo_licenses": ["BSD-3-Clause-Open-MPI"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.7069767442, "max_line_length": 79, "alphanum_fraction": 0.5054528931, "num_tokens": 2415}
|
//==================================================================================================
/*!
@file
@copyright 2016 NumScale SAS
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt)
*/
//==================================================================================================
#ifndef BOOST_SIMD_FUNCTION_IS_LESS_HPP_INCLUDED
#define BOOST_SIMD_FUNCTION_IS_LESS_HPP_INCLUDED
#if defined(DOXYGEN_ONLY)
namespace boost { namespace simd
{
/*!
@ingroup group-predicates
This function object returns @ref True or @ref False according x is less than y or not.
Infix notation can be used with operator '<'.
@par Header <boost/simd/function/is_less.hpp>
@par Note
Using `is_less(x,y)` is equivalent to `x < y`
@par Example:
@snippet is_less.cpp is_less
@par Possible output:
@snippet is_less.txt is_less
**/
as_logical_t<Value> is_less(Value const& x, Value const& y);
} }
#endif
#include <boost/simd/function/scalar/is_less.hpp>
#include <boost/simd/function/simd/is_less.hpp>
#endif
|
{"hexsha": "a56a23ecc9af8c648e3b57d4dd54df759751cc2e", "size": 1160, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "third_party/boost/simd/function/is_less.hpp", "max_stars_repo_name": "SylvainCorlay/pythran", "max_stars_repo_head_hexsha": "908ec070d837baf77d828d01c3e35e2f4bfa2bfa", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 6.0, "max_stars_repo_stars_event_min_datetime": "2018-02-25T22:23:33.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-15T15:13:12.000Z", "max_issues_repo_path": "third_party/boost/simd/function/is_less.hpp", "max_issues_repo_name": "SylvainCorlay/pythran", "max_issues_repo_head_hexsha": "908ec070d837baf77d828d01c3e35e2f4bfa2bfa", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "third_party/boost/simd/function/is_less.hpp", "max_forks_repo_name": "SylvainCorlay/pythran", "max_forks_repo_head_hexsha": "908ec070d837baf77d828d01c3e35e2f4bfa2bfa", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 7.0, "max_forks_repo_forks_event_min_datetime": "2017-12-12T12:36:31.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-10T14:27:07.000Z", "avg_line_length": 24.1666666667, "max_line_length": 100, "alphanum_fraction": 0.5896551724, "num_tokens": 249}
|
import unittest
import numpy
from csep.utils.calc import bin1d_vec, cleaner_range
class TestCleanerRange(unittest.TestCase):
def setUp(self):
self.start = 0.0
self.end = 0.9
self.dh = 0.1
self.truth = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
def test_discrepancy_with_arange_catch_failure(self):
ar = numpy.arange(self.start, self.end + self.dh / 2, self.dh)
cr = cleaner_range(self.start, self.end, self.dh)
self.assertRaises(AssertionError, numpy.testing.assert_array_equal, ar, cr)
self.assertRaises(AssertionError, numpy.testing.assert_array_equal, ar, self.truth)
def test_discrepancy_with_direct_input(self):
cr = cleaner_range(self.start, self.end, self.dh)
numpy.testing.assert_array_equal(self.truth, cr)
class TestBin1d(unittest.TestCase):
def test_bin1d_vec(self):
data = [0.34, 0.35]
bin_edges = [0.33, 0.34, 0.35, 0.36]
test = bin1d_vec(data, bin_edges).tolist()
expected = [1, 2]
self.assertListEqual(test, expected)
def test_bin1d_vec2(self):
data = [0.9999999]
bin_edges = [0.8, 0.9, 1.0]
test = bin1d_vec(data, bin_edges)
expected = [1]
self.assertListEqual(test.tolist(), expected)
def test_bin1d_vec3(self):
data = [-118.9999999]
bin_edges = [-119.0, -118.9, -118.8]
test = bin1d_vec(data, bin_edges)
expected = [0]
self.assertListEqual(test.tolist(), expected)
def test_bin1d_vec4(self):
data = [-118.9999999]
bin_edges = [-119.0, -118.98, -118.96]
test = bin1d_vec(data, bin_edges)
expected = [0]
self.assertListEqual(test.tolist(), expected)
def test_bin1d_vec5(self):
data = [-119.0]
bin_edges = [-119.0, -118.98, -118.96]
test = bin1d_vec(data, bin_edges)
expected = [0]
self.assertListEqual(test.tolist(), expected)
def test_bin1d_vec6(self):
data = [1189999.99999]
bin_edges = [1189999.9, 1190000.0, 1200000.0]
test = bin1d_vec(data, bin_edges)
expected = [0]
self.assertListEqual(test.tolist(), expected)
def test_bin1d_vec7(self):
data = [-118.98]
bin_edges = [-119.0, -118.98, -118.96]
test = bin1d_vec(data, bin_edges)
expected = [1]
self.assertListEqual(test.tolist(), expected)
def test_bin1d_vec8(self):
data = [-118.9600000001]
bin_edges = [-119.0, -118.98, -118.96]
test = bin1d_vec(data, bin_edges)
expected = [1]
self.assertListEqual(test.tolist(), expected)
def test_bin1d_vec9(self):
data = [-118.97999999]
bin_edges = [-119.0, -118.98, -118.96]
test = bin1d_vec(data, bin_edges)
expected = [1]
self.assertListEqual(test.tolist(), expected)
def test_bin1d_vec_int(self):
data = [1, 3, 5, 10, 20]
bin_edges = [0, 10, 20, 30]
test = bin1d_vec(data, bin_edges)
expected = [0, 0, 0, 1, 2]
self.assertListEqual(test.tolist(), expected)
def test_upper_limit_right_continuous(self):
data = [40, 40, 40]
bin_edges = [0, 10, 20, 30]
test = bin1d_vec(data, bin_edges, right_continuous=True)
expected = [3, 3, 3]
self.assertListEqual(test.tolist(), expected)
def test_upper_limit_not_continuous(self):
data = [30, 30, 30]
bin_edges = [0, 10, 20, 30]
test = bin1d_vec(data, bin_edges)
expected = [3, 3, 3]
self.assertListEqual(test.tolist(), expected)
def test_lower_limit(self):
data = [0]
bin_edges = [0, 10, 20, 30]
test = bin1d_vec(data, bin_edges)
expected = [0]
self.assertListEqual(test.tolist(), expected)
def test_less_and_greater_than(self):
data = [-1, 35, 40]
bin_edges = [0, 10, 20, 30]
test = bin1d_vec(data, bin_edges)
expected = [-1, 3, -1]
self.assertListEqual(test.tolist(), expected)
def test_scalar_outside(self):
from csep.utils.calc import bin1d_vec
mbins = numpy.arange(5.95, 9, 0.1) # This gives bins from 5.95 to 8.95
idx = bin1d_vec(5.95, mbins, tol=0.00001, right_continuous=True)
self.assertEqual(idx, 0)
idx = bin1d_vec(6, mbins, tol=0.00001, right_continuous=True) # This would give 0: Which is fine.
self.assertEqual(idx, 0)
idx = bin1d_vec(5, mbins, tol=0.00001, right_continuous=True)
self.assertEqual(idx, -1)
idx = bin1d_vec(4, mbins, tol=0.00001, right_continuous=True)
self.assertEqual(idx, -1)
|
{"hexsha": "0dea97039592e245ff2c69a5358a6cb501d3277b", "size": 4693, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_calc.py", "max_stars_repo_name": "mherrmann3/pycsep", "max_stars_repo_head_hexsha": "17a1ba2abd7c729f0eee20c08c94b9d3c7bfdc6f", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_calc.py", "max_issues_repo_name": "mherrmann3/pycsep", "max_issues_repo_head_hexsha": "17a1ba2abd7c729f0eee20c08c94b9d3c7bfdc6f", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_calc.py", "max_forks_repo_name": "mherrmann3/pycsep", "max_forks_repo_head_hexsha": "17a1ba2abd7c729f0eee20c08c94b9d3c7bfdc6f", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.5902777778, "max_line_length": 106, "alphanum_fraction": 0.6017472832, "include": true, "reason": "import numpy", "num_tokens": 1425}
|
"""
python module for calculating microlensing magnification with finite source size effect
by Sunao Sugiyama
Jan 19, 2022
"""
import numpy as np
from. import fftlog
from scipy.special import j0, j1, jn, gamma
from scipy.special import ellipk as spellipk
from scipy.special import ellipe as spellipe
from scipy.interpolate import InterpolatedUnivariateSpline as ius
# analytic expressions of Fourier counterparts of source star profiles, disk, limb and higher order limb.
def sk_disk(k, rho):
x = k*rho
ans = np.ones(x.shape) # x -> 0 limit
sel = x>0
ans[sel] = 2*j1(x[sel])/x[sel]
return ans
def sk_limb(k, rho, n):
x = k*rho
ans = 1.0/(n+2)*np.ones(x.shape) # x-> 0 limit
sel = x > 0
nu = 1+n/2
ans[sel] = 2**nu*gamma(nu)*jn(nu, x[sel])/x[sel]**nu * nu
return ans
# FFT based magnification
class magnification:
def __init__(self, profile_names=['disk'], profile_args=None, normalize_sk=True):
self.fft_umin = -6 # this choice is validated for rho > 1e-4 to ensure 0.3% precision
self.fft_umax = 3
self.N_fft = 1024
self.zero_alloc = 1e-200
self.normalize_sk = normalize_sk
self.set_profile(profile_names, profile_args)
self.init_Apk()
self.init_small_rho()
def set_profile(self, profile_names, profile_args):
self.profile_names = profile_names
if profile_args is None:
self.profile_args = dict()
else:
self.profile_args = profile_args
for profile_name in self.profile_names:
if profile_name in self.profile_args:
continue
self.profile_args[profile_name] = dict()
def init_Apk(self):
u = np.logspace(self.fft_umin, self.fft_umax, self.N_fft)
u2Au = ((u**2+2.0)/(u**2+4.0)**0.5/u - 1) * u**2
h = fftlog.hankel(u, u2Au, nu=1.5, N_extrap_high=512, N_extrap_low=512)
self.k, apk = h.hankel(0)
self.apk = apk*2*np.pi
def init_small_rho(self, alloc=True):
x = np.logspace(-5, 5, 1024)
dump = np.exp(-(x/100)**2)
for profile_name in self.profile_names:
if profile_name == 'disk':
fx = x * sk_disk(x, 1) * dump
h = fftlog.hankel(x, fx, nu=1.5, N_pad=1024)
u, af0 = h.hankel(0)
self.profile_args[profile_name]['af0'] = ius(u, af0, ext=3)
elif profile_name in ['limb1']:
fx = x * sk_limb(x, 1, 1) * dump
h = fftlog.hankel(x, fx, nu=1.5, N_pad=1024)
u, af0 = h.hankel(0)
self.profile_args[profile_name]['af0'] = ius(u, af0, ext=3)
elif profile_name in ['limb2']:
fx = x * sk_limb(x, 1, 2) * dump
h = fftlog.hankel(x, fx, nu=1.5, N_pad=1024)
u, af0 = h.hankel(0)
self.profile_args[profile_name]['af0'] = ius(u, af0, ext=3)
else:
# compute sk
u = np.logspace(self.fft_umin, self.fft_umax, self.N_fft)
su = self.profile_args[profile_name]['su'](u, 1)
# beyond rho should be set to nonzero small value for FFT convergence.
u2su = u**2*su
if alloc:
u2su[u>=1] = self.zero_alloc
h = fftlog.hankel(u, u2su, nu=1.5, N_extrap_high=512, N_extrap_low=512)
k, sk = h.hankel(0)
fx = x * ius(k,sk,ext=1)(x) * dump
h = fftlog.hankel(x, fx, nu=1.5, N_pad=1024)
u, af0 = h.hankel(0)
self.profile_args[profile_name]['af0'] = ius(u, af0, ext=3)
# this approximation is validated for `rho<self.rho_thre`
self.rho_thre = 1e-4
# this approximation is validated for `u > self.u_thre*rho`.
self.u_thre = 10
def A(self, u, rho, profile_name):
if profile_name == 'disk':
return self.A_disk(u, rho)
elif profile_name == 'limb1':
return self.A_limb1(u, rho)
elif profile_name == 'limb2':
return self.A_limb2(u, rho)
else:
return self.A_user(u, rho, profile_name)
def A_point(self, u):
return (u**2+2)/u/(u**2+4)**0.5
def A_disk(self, u, rho):
u = np.atleast_1d(u)
if rho == 0.0:
return self.A_point(u)
elif rho < self.rho_thre:
a = np.ones(u.shape)
sel = u<self.u_thre*rho
a[sel] = self.profile_args['disk']['af0'](u[sel]/rho) / rho + 1
sel = u>=self.u_thre*rho
a[sel] = self.A_point(u[sel])
return a
else:
k_rho = 2*np.pi/rho
dump = np.exp(-(self.k/k_rho/50)**2)
cj = self.apk*self.k**2 * sk_disk(self.k, rho) * dump
h = fftlog.hankel(self.k, cj, nu=1.5, N_pad=512)
u_fft, a_fft = h.hankel(0)
a_fft = a_fft/2/np.pi
a_fft = a_fft + 1
a = np.ones(u.shape)
sel = np.abs(u) > 0
u_pad = [100]
a_pad = [1]*len(u_pad)
sel_fft = u_fft<100
a[sel] = log_interp(np.concatenate([u_fft[sel_fft], u_pad]),
np.concatenate([a_fft[sel_fft], a_pad]), np.abs(u[sel]) )
a[u==0] = (rho**2+4)**0.5/rho
return a
def A_limb1(self, u, rho):
u = np.atleast_1d(u)
if rho == 0.0:
return self.A_point(u)
elif rho < self.rho_thre:
a = np.ones(u.shape)
sel = u<self.u_thre*rho
a[sel] = self.profile_args['limb1']['af0'](u[sel]/rho) / rho + 1
sel = u>=self.u_thre*rho
a[sel] = self.A_point(u[sel])
return a
else:
k_rho = 2*np.pi/rho
dump = np.exp(-(self.k/k_rho/50)**2)
cj = self.apk*self.k**2 * sk_limb(self.k, rho, 1) * dump
h = fftlog.hankel(self.k, cj, nu=1.5, N_pad=512)
u_fft, a_fft = h.hankel(0)
a_fft = a_fft/2/np.pi
a_fft = a_fft + 1
a = np.ones(u.shape)
sel = np.abs(u) > 0
u_pad = [100]
a_pad = [1]*len(u_pad)
sel_fft = u_fft<100
a[sel] = log_interp(np.concatenate([u_fft[sel_fft], u_pad]),
np.concatenate([a_fft[sel_fft], a_pad]), np.abs(u[sel]) )
a[u==0] = (2+1) * (2*(rho**2+2)*spellipe(-rho**2/4)-(rho**2+4)*spellipk(-rho**2/4)) / 3.0/rho**3
return a
def A_limb2(self, u, rho):
u = np.atleast_1d(u)
if rho == 0.0:
return self.A_point(u)
elif rho < self.rho_thre:
a = np.ones(u.shape)
sel = u<self.u_thre*rho
a[sel] = self.profile_args['limb2']['af0'](u[sel]/rho) / rho + 1
sel = u>=self.u_thre*rho
a[sel] = self.A_point(u[sel])
return a
else:
k_rho = 2.0*np.pi/rho
dump = np.exp(-(self.k/k_rho/50)**2)
cj = self.apk*self.k**2 * sk_limb(self.k, rho, 2) * dump
h = fftlog.hankel(self.k, cj, nu=1.5, N_pad=512)
u_fft, a_fft = h.hankel(0)
a_fft = a_fft/2/np.pi
a_fft = a_fft + 1
a = np.ones(u.shape)
sel = np.abs(u) > 0
u_pad = [100]
a_pad = [1]*len(u_pad)
sel_fft = u_fft<100
a[sel] = log_interp(np.concatenate([u_fft[sel_fft], u_pad]),
np.concatenate([a_fft[sel_fft], a_pad]), np.abs(u[sel]) )
a[u==0] = (2+2) * (rho*(2+rho**2)*(4+rho**2)**0.5 - 8*np.arcsinh(rho/2)) / 4/rho**4
return a
# functions below are used for finite source magnification
# with user-defined source profile
def compute_sk(self, rho, profile_name, alloc=True):
r = np.logspace(self.fft_umin, self.fft_umax, self.N_fft)
su = self.profile_args[profile_name]['su'](r, rho)
# beyond rho should be set to nonzero small value for FFT convergence.
u2su = r**2*su
if alloc:
u2su[r>=rho] = self.zero_alloc
h = fftlog.hankel(r, u2su, nu=1.5, N_extrap_high=512, N_extrap_low=512)
k, sk = h.hankel(0)
sk = sk*2*np.pi
if self.normalize_sk:
sk = sk/sk[0]
return k, sk
def A_user(self, u, rho, profile_name, alloc=True):
u = np.atleast_1d(u)
if rho == 0.0:
return self.A_point(u)
elif rho < self.rho_thre:
a = np.ones(u.shape)
sel = u<self.u_thre*rho
a[sel] = self.profile_args[profile_name]['af0'](u[sel]/rho) / rho + 1
sel = u>=self.u_thre*rho
a[sel] = self.A_point(u[sel])
return a
else:
# compute sk
_, sk = self.compute_sk(rho, profile_name, alloc=alloc)
# compute finite source
k_rho = 2.0*np.pi/rho
dump = np.exp(-(self.k/k_rho/50)**2)
cj = self.apk*self.k**2 * sk * dump
h = fftlog.hankel(self.k, cj, nu=1.5, N_pad=512)
u_fft, a_fft = h.hankel(0)
a_fft = a_fft/2/np.pi
a_fft = a_fft + 1
a = np.ones(u.shape)
sel = np.abs(u) > 0
u_pad = [100]
a_pad = [1]*len(u_pad)
sel_fft = u_fft<100
a[sel] = log_interp(np.concatenate([u_fft[sel_fft], u_pad]),
np.concatenate([a_fft[sel_fft], a_pad]), np.abs(u[sel]) )
a[u==0] = self.profile_args[profile_name].get('a0', 0.0)
return a
### Utility functions ####################
def log_interp(x,y,xnew):
"""
Apply interpolation in logarithmic space for both x and y.
Beyound input x range, returns 10^0=1
"""
ynew = 10**ius(np.log10(x), np.log10(y), ext=3)(np.log10(xnew))
return ynew
|
{"hexsha": "15b39eb20a19952328d40ce8816690df512e6b29", "size": 10139, "ext": "py", "lang": "Python", "max_stars_repo_path": "magnification/mag_fft.py", "max_stars_repo_name": "git-sunao/fft-extended-source", "max_stars_repo_head_hexsha": "f10ff7a8f55cb899f383cd741ec9e48b148a762f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "magnification/mag_fft.py", "max_issues_repo_name": "git-sunao/fft-extended-source", "max_issues_repo_head_hexsha": "f10ff7a8f55cb899f383cd741ec9e48b148a762f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "magnification/mag_fft.py", "max_forks_repo_name": "git-sunao/fft-extended-source", "max_forks_repo_head_hexsha": "f10ff7a8f55cb899f383cd741ec9e48b148a762f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.1165413534, "max_line_length": 108, "alphanum_fraction": 0.5071506066, "include": true, "reason": "import numpy,from scipy", "num_tokens": 3045}
|
import numpy as np
import tensorflow as tf
import tensorflow.keras as tfk
import u_net3
INPUT_DIM = [132, 132, 116]
OUTPUT_DIM = [44, 44, 28]
NO_CHANNELS = 3
NO_CLASSES = 3
NO_FILTERS = 32
unet_model = u_net3.UNet3D(in_channels=NO_CHANNELS, out_classes=NO_CLASSES, img_shape = [INPUT_DIM[0], INPUT_DIM[1], INPUT_DIM[2], NO_CHANNELS], no_filters=NO_FILTERS)
unet_model.build(input_shape=(1, INPUT_DIM[0], INPUT_DIM[1], INPUT_DIM[2], NO_CHANNELS))
unet_model.summary()
print(1)
|
{"hexsha": "4d7828ba2409551d3ee264e4577057b1ecfed8f1", "size": 480, "ext": "py", "lang": "Python", "max_stars_repo_path": "main.py", "max_stars_repo_name": "cianmscannell/3DU-Net", "max_stars_repo_head_hexsha": "222382366ab7bdd06f9c6decdd32a468ffca2b29", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-05-29T03:27:28.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-29T03:27:28.000Z", "max_issues_repo_path": "main.py", "max_issues_repo_name": "wangfaofao/3DU-Net", "max_issues_repo_head_hexsha": "222382366ab7bdd06f9c6decdd32a468ffca2b29", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "main.py", "max_forks_repo_name": "wangfaofao/3DU-Net", "max_forks_repo_head_hexsha": "222382366ab7bdd06f9c6decdd32a468ffca2b29", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-05-29T03:27:20.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-29T03:27:20.000Z", "avg_line_length": 28.2352941176, "max_line_length": 167, "alphanum_fraction": 0.7666666667, "include": true, "reason": "import numpy", "num_tokens": 153}
|
"""Find naked singles"""
import numpy as np
from typing import List
from ..core.types import Cell, Placement
def find_placements(
grid: np.ndarray,
candidates: np.ndarray,
cells: List[Cell],
) -> List[Placement]:
return [
Placement(cell, digit)
for cell in cells
if len(candidates[cell]) == 1 and grid[cell] == 0
for digit in candidates[cell]
]
|
{"hexsha": "0ea6fa18c6153596b8892d3f1535c004ee0bfdb4", "size": 400, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyslab/strategies/naked_single.py", "max_stars_repo_name": "benhorsburgh/pyslab", "max_stars_repo_head_hexsha": "d495ca87110f1ac0fb1b49df49024f590bbb6c5c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pyslab/strategies/naked_single.py", "max_issues_repo_name": "benhorsburgh/pyslab", "max_issues_repo_head_hexsha": "d495ca87110f1ac0fb1b49df49024f590bbb6c5c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pyslab/strategies/naked_single.py", "max_forks_repo_name": "benhorsburgh/pyslab", "max_forks_repo_head_hexsha": "d495ca87110f1ac0fb1b49df49024f590bbb6c5c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.0526315789, "max_line_length": 57, "alphanum_fraction": 0.6325, "include": true, "reason": "import numpy", "num_tokens": 96}
|
from qunetsim.components import Host
from .computing_host import ComputingHost
from .clock import Clock
from ..utils import DefaultOperationTime
from ..utils.constants import Constants
from ..objects import Operation, Circuit, Layer
import numpy as np
import uuid
import json
from typing import List, Optional, Dict, Tuple
class ControllerHost(Host):
"""
Controller host object which acts as a master node in a centralised
distributed network system.
"""
def __init__(
self,
host_id: str,
computing_host_ids: Optional[List[str]] = None,
gate_time: Optional[Dict[str, int]] = None,
backend: Optional = None,
):
"""
Returns the important things for the controller hosts
Args:
host_id (str): The ID of the controller host
computing_host_ids (list): The IDs of computing/slave hosts
gate_time (dict): A mapping of gate names to time the gate takes
to execute for each computing host
backend (Backend): Backend for qubits
"""
super().__init__(host_id, backend=backend)
self.term_assignment = dict()
self._computing_host_ids = (
computing_host_ids if computing_host_ids is not None else []
)
self.add_c_connections(self._computing_host_ids)
self._clock = Clock.get_instance()
self._circuit_max_execution_time = 0
# TODO: Take gate_time as an input from computing hosts
if gate_time is None:
gate_time = {}
for computing_host_id in self._computing_host_ids:
gate_time[computing_host_id] = DefaultOperationTime
self._gate_time = gate_time
self._results = None
self._backend = backend
@property
def computing_host_ids(self):
"""
Get the *computing_host_ids* associated to the controller host
Returns:
(list): The IDs of computing/slave hosts
"""
return self._computing_host_ids
@property
def results(self):
"""
Get the final output of the algorithm or the error reported by the
computing hosts
Returns:
(dict): The final output/error from every computing host
"""
return self._results
def create_distributed_network(
self, num_computing_hosts: int, num_qubits_per_host: int
) -> Tuple[List[ComputingHost], Dict[str, List[str]]]:
"""
Create a network of *num_computing_hosts* completely connected computing nodes with
*num_qubits_per_host* each.
Args:
num_computing_hosts (int): The number of computing hosts to initialize
num_qubits_per_host (int): The number of qubits on each computing host
Returns:
(tuple): The list of computing hosts and the qubit map for their qubits
"""
id_prefix = "QPU_"
computing_hosts = []
q_map = {}
self._computing_host_ids = [
f"{id_prefix}{str(i)}" for i in range(num_computing_hosts)
]
for i in range(num_computing_hosts):
computing_host = ComputingHost(
host_id=id_prefix + str(i),
controller_host_id=self.host_id,
total_qubits=num_qubits_per_host,
total_pre_allocated_qubits=num_qubits_per_host,
backend=self._backend,
)
self._gate_time[id_prefix + str(i)] = DefaultOperationTime
self.add_c_connection(id_prefix + str(i))
computing_hosts.append(computing_host)
q_map[computing_host.host_id] = [
f"q_{str(i)}_{str(j)}" for j in range(num_qubits_per_host)
]
for outer_computing_host in computing_hosts:
for inner_computing_host in computing_hosts:
if outer_computing_host.host_id != inner_computing_host.host_id:
outer_computing_host.add_connection(inner_computing_host.host_id)
outer_computing_host.start()
return computing_hosts, q_map
def connect_host(self, computing_host_id: str, gate_time: Dict[str, int] = None):
"""
Adds a computing host to the distributed network
Args:
computing_host_id (str): The ID of the computing host
gate_time (dict): A mapping of gate names to time the gate
takes to execute for the computing host to be added
"""
self.connect_hosts([computing_host_id], [gate_time])
def connect_hosts(
self, computing_host_ids: List[str], gate_times: List[Dict[str, int]] = None
):
"""
Adds multiple computing hosts to the distributed network
Args:
computing_host_ids (list): The ID of the computing host
gate_times (list): A list of mappings of gate names to time the gate
takes to execute for the computing host to be added
"""
for i, computing_host_id in enumerate(computing_host_ids):
self._computing_host_ids.append(computing_host_id)
self.add_c_connection(computing_host_id)
if gate_times is None or len(gate_times) == 0 or gate_times[i] is None:
gate_time = DefaultOperationTime
else:
gate_time = gate_times[i]
self._gate_time[computing_host_id] = gate_time
def _create_distributed_schedules(self, circuit: Circuit):
"""
Creates a distributed schedule for each of the computing host
Args:
circuit (Circuit): The Circuit object which contains
information regarding a quantum circuit
"""
time_layer_end = self._clock.ticks
operation_schedule = []
layers = circuit.layers
# We form an intermediate schedule which is used before splitting
# the schedules for each computing host
for layer in layers:
max_execution_time = 0
for operation in layer.operations:
op = operation.get_dict()
op["layer_end"] = time_layer_end
operation_schedule.append(op)
# Find the maximum time taken to execute this layer
execution_time = self._get_operation_execution_time(
op["computing_host_ids"][0], operation.name, operation.gate
)
max_execution_time = max(max_execution_time, execution_time)
time_layer_end += max_execution_time
computing_host_schedules = {}
for computing_host_id in self._computing_host_ids:
computing_host_schedule = []
for op in operation_schedule:
if op["computing_host_ids"][0] == computing_host_id:
computing_host_schedule.append(op)
computing_host_schedules[computing_host_id] = computing_host_schedule
return computing_host_schedules, time_layer_end
@staticmethod
def _replace_control_gates(control_gate_info: list, current_layer: Layer):
"""
Replace control gates with a distributed version of the control gate
over the different computing hosts
Args:
control_gate_info (list): List of information regarding control
gates present in one layer
current_layer (Layer): Layer object in which the control gates
are present
"""
max_gates = 0
for gate_info in control_gate_info:
max_gates = max(len(gate_info["operations"]), max_gates)
circuit_len = Constants.DISTRIBUTED_CONTROL_CIRCUIT_LEN + max_gates
operations = [[] for _ in range(circuit_len)]
for gate_info in control_gate_info:
control_qubit = gate_info["control_qubit"]
control_host = gate_info["computing_hosts"][0]
target_host = gate_info["computing_hosts"][1]
epr_qubit_id = str(uuid.uuid4())
bit_id_1, bit_id_2 = str(uuid.uuid4()), str(uuid.uuid4())
# Generate new EPR pair (counted in the pre-allocated qubits) for the
# two computing hosts
op_1 = Operation(
name=Constants.SEND_ENT,
qids=[epr_qubit_id],
computing_host_ids=[control_host, target_host],
pre_allocated_qubits=True,
)
op_2 = Operation(
name=Constants.REC_ENT,
qids=[epr_qubit_id],
computing_host_ids=[target_host, control_host],
pre_allocated_qubits=True,
)
current_layer.add_operations([op_1, op_2])
# Circuit to implement distributed control gate
itr = 0
op_1 = Operation(
name=Constants.TWO_QUBIT,
qids=[control_qubit, epr_qubit_id],
gate=Operation.CNOT,
computing_host_ids=[control_host],
)
operations[itr].extend([op_1])
itr += 1
op_1 = Operation(
name=Constants.MEASURE,
qids=[epr_qubit_id],
cids=[bit_id_1],
computing_host_ids=[control_host],
)
operations[itr].extend([op_1])
itr += 1
op_1 = Operation(
name=Constants.SEND_CLASSICAL,
cids=[bit_id_1],
computing_host_ids=[control_host, target_host],
)
op_2 = Operation(
name=Constants.REC_CLASSICAL,
cids=[bit_id_1],
computing_host_ids=[target_host, control_host],
)
operations[itr].extend([op_1, op_2])
itr += 1
op_1 = Operation(
name=Constants.CLASSICAL_CTRL_GATE,
qids=[epr_qubit_id],
cids=[bit_id_1],
gate=Operation.X,
computing_host_ids=[target_host],
)
operations[itr].extend([op_1])
# The control gate we are trying to implement
for op in gate_info["operations"][::-1]:
itr += 1
op_1 = Operation(
name=Constants.TWO_QUBIT,
qids=[epr_qubit_id, op.get_target_qubit()],
gate=op.gate,
gate_param=op.gate_param,
computing_host_ids=[target_host],
)
operations[itr].extend([op_1])
itr += 1
op_1 = Operation(
name=Constants.SINGLE,
qids=[epr_qubit_id],
gate=Operation.H,
computing_host_ids=[target_host],
)
operations[itr].extend([op_1])
itr += 1
op_1 = Operation(
name=Constants.MEASURE,
qids=[epr_qubit_id],
cids=[bit_id_2],
computing_host_ids=[target_host],
)
operations[itr].extend([op_1])
itr += 1
op_1 = Operation(
name=Constants.SEND_CLASSICAL,
cids=[bit_id_2],
computing_host_ids=[target_host, control_host],
)
op_2 = Operation(
name=Constants.REC_CLASSICAL,
cids=[bit_id_2],
computing_host_ids=[control_host, target_host],
)
operations[itr].extend([op_1, op_2])
itr += 1
op_1 = Operation(
name=Constants.CLASSICAL_CTRL_GATE,
qids=[control_qubit],
cids=[bit_id_2],
gate=Operation.Z,
computing_host_ids=[control_host],
)
operations[itr].extend([op_1])
# Make the new layers from the operations
distributed_layers = []
if control_gate_info:
for ops in operations:
layer = Layer(ops)
distributed_layers.append(layer)
return current_layer, distributed_layers
def _generate_distributed_circuit(self, circuit: Circuit) -> Circuit:
"""
Takes the user input monolithic circuit and converts it to a
distributed circuit over the computing hosts connected to the
controller host. Here, we replace the normal two qubit control
gates to distributed control gates.
Args:
circuit (Circuit): The Circuit object which contains
information regarding a quantum circuit
"""
distributed_circuit_layers = []
layers = circuit.layers
control_gate_info = circuit.control_gate_info()
for layer_index, layer in enumerate(layers):
new_layer = Layer(operations=[])
for op in layer.operations:
if not op.is_control_gate_over_two_hosts():
new_layer.add_operation(op)
new_layer, distributed_layers = self._replace_control_gates(
control_gate_info[layer_index], new_layer
)
if new_layer.operations:
distributed_circuit_layers.append(new_layer)
distributed_circuit_layers.extend(distributed_layers)
distributed_circuit = Circuit(circuit.q_map, distributed_circuit_layers)
return distributed_circuit
def _get_operation_execution_time(
self, computing_host_id: str, op_name: str, gate: str
) -> float:
"""
Return the execution time for an operation for a specific computing
host
Args:
computing_host_id (str): The IDs of computing/slave hosts
op_name (str): Name of the operation
gate (str): Name of the gate being performed in the operation,
if any
Returns:
(float): The operation execution time
"""
operation_time = self._gate_time[computing_host_id]
gate_op_names = [
Constants.SINGLE,
Constants.TWO_QUBIT,
Constants.CLASSICAL_CTRL_GATE,
]
if op_name in gate_op_names:
execution_time = operation_time[op_name][gate]
else:
execution_time = operation_time[op_name]
return execution_time
def generate_and_send_schedules(self, circuit: Circuit):
"""
Generate and send distributed schedules to all the computing hosts
associated to the circuit
Args:
circuit (Circuit): The Circuit object which contains information
regarding a quantum circuit
"""
distributed_circuit = self._generate_distributed_circuit(circuit)
(
computing_host_schedules,
max_execution_time,
) = self._create_distributed_schedules(distributed_circuit)
self._circuit_max_execution_time = max_execution_time
self.send_broadcast(json.dumps(computing_host_schedules, cls=NumpyEncoder))
# Wait for the computing hosts to receive the broadcast
for host_id in self._computing_host_ids:
self.get_next_classical(host_id, wait=-1)
# Initialise the clock and start running the algorithm
self._clock.initialise(self._circuit_max_execution_time)
self._clock.start()
def receive_results(self):
"""
Receive the final output results from all the computing hosts
"""
results = {}
for host_id in self._computing_host_ids:
result = self.get_next_classical(host_id, wait=-1)
# I think this is a bug with QuNetSim... Adding a hack for now
# to overcome it...
if result.content == "ACK":
result = self.get_next_classical(host_id, wait=-1)
try:
result = result.content
results.update(json.loads(result))
except json.decoder.JSONDecodeError:
pass
self._results = results
def schedule_expectation_terms(
self,
hamiltonian: List[Tuple[float, List[Tuple[str, int]]]],
q_map: Dict[str, List[str]],
):
"""
Assign the terms of a Hamiltonian to the different computing hosts in the network
"""
# First check the sanity of the list type-wise
assert len(hamiltonian) > 0, "Empty list of terms passed"
assert all(
isinstance(x, tuple) for x in hamiltonian
), "Can only accept a list of tuples"
for term in hamiltonian:
_, observables = term
assert isinstance(
observables, list
), "Each term must include a list of observables."
assert all(
isinstance(obs_type, str) and isinstance(idx, int)
for obs_type, idx in observables
), "The list of observables must be of tuples of types (str, int)"
# We assume that all QPUs have enough qubits for VQE
number_of_computing_hosts = len(q_map.keys())
idx_assignment = np.array_split(
np.arange(len(hamiltonian)), number_of_computing_hosts
)
# Then assign the terms as per the number
computing_host_ids = list(q_map.keys())
for i in range(number_of_computing_hosts):
self.term_assignment[computing_host_ids[i]] = []
for i, arr in enumerate(idx_assignment):
for val in arr:
self.term_assignment[computing_host_ids[i]].append(hamiltonian[val])
return
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
if isinstance(obj, complex):
return obj.real, obj.imag
return json.JSONEncoder.default(self, obj)
|
{"hexsha": "1f7fe8a4993c7501e30af3acbd835a8872e6f0cc", "size": 17958, "ext": "py", "lang": "Python", "max_stars_repo_path": "interlinq/components/controller_host.py", "max_stars_repo_name": "rheaparekh/Distributed_Quantum_Phase_Estimation", "max_stars_repo_head_hexsha": "3441a023fedd113b8b3e68e6c6c07f6642c0344a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 24, "max_stars_repo_stars_event_min_datetime": "2021-01-27T14:21:53.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-16T23:43:51.000Z", "max_issues_repo_path": "interlinq/components/controller_host.py", "max_issues_repo_name": "rheaparekh/Distributed_Quantum_Phase_Estimation", "max_issues_repo_head_hexsha": "3441a023fedd113b8b3e68e6c6c07f6642c0344a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 15, "max_issues_repo_issues_event_min_datetime": "2021-02-28T17:58:52.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-27T15:27:03.000Z", "max_forks_repo_path": "interlinq/components/controller_host.py", "max_forks_repo_name": "rheaparekh/Distributed-Quantum-Phase-Estimation", "max_forks_repo_head_hexsha": "3441a023fedd113b8b3e68e6c6c07f6642c0344a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2021-01-27T10:23:12.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-12T01:45:57.000Z", "avg_line_length": 34.3365200765, "max_line_length": 91, "alphanum_fraction": 0.5928833946, "include": true, "reason": "import numpy", "num_tokens": 3699}
|
"""Noise model reference
Model of a generic piecewise noise, LVDT noise, and Geophone noise are avaliable.
"""
import numpy as np
import scipy.optimize
def piecewise_noise(f, n0, exp=[0], fc=[0]):
"""Piecewise noise specified corner frequencies and exponents
Parameters
----------
f: list of int/float or numpy.ndarray
The frequency axis of the noise.
n0: int/float
The noise level at 1 Hz with the first exponent.
exp: list of int/float
The list of exponents of each section of noise separated by the \
corner frequencies.
fc: list of int/float
The list of corner frequencies in increaing order. The length of \
fc must be 1 less then the length of exp
Returns
-------
noise: numpy.ndarray
The piecewise noise array.
"""
list(fc)
if fc[-1] < np.inf:
fc.append(np.inf)
noise = np.zeros_like(f)
fc_index = 0
for i in range(len(f)):
if f[i] >= fc[fc_index]:
fc_index += 1
n0 = n0 * fc[fc_index-1]**(exp[fc_index-1]-exp[fc_index])
noise[i] = n0 * f[i]**exp[fc_index]
# print(fc_index)
return np.array(noise)
def lvdt_noise(f, n0, fc, exp=[-0.5, 0]):
"""LVDT noise
Parameters
----------
f: list of int/float or numpy.ndarray
The frequency axis of the noise.
n0: int/float
The noise level at 1 Hz with the exponent of -0.5.
fc: int/float
The corner frequency at which the exponent changes from -0.5 to 0.
exp: list of float, optional.
The exponents of the frequency dependency before and after the
corner frequency. Defaults [-0.5, 0]
Returns
-------
noise: numpy.ndarray
The piecewise noise array.
Notes
-----
The LVDT noise noise typically has a :math:`f^{-0.5}` dependency \
before the corner frequency and is flat after that.
"""
return piecewise_noise(f, n0, exp=exp, fc=[fc])
def geophone_noise(f, n0, fc, exp = [-3.5, -1]):
"""Geophone noise
Parameters
----------
f: list of int/float or numpy.ndarray
The frequency axis of the noise.
n0: int/float
The noise level at 1 Hz with the exponent of -3.5.
fc: int/float
The corner frequency at which the exponent changes from -3.5 to -1.
exp: list of float, optional.
The exponents of the frequency dependency before and after the
corner frequency. Defaults [-3.5, -1].
Returns
-------
noise: numpy.ndarray
The piecewise noise array.
Notes
-----
The geophone noise noise typically has a :math:`f^{-3.5}` dependency \
before the corner frequency and depends on :math:`f^{-1}` after that.
"""
return piecewise_noise(f, n0, exp=exp, fc=[fc])
|
{"hexsha": "9cac00e2eb1d0bdde674bf2d498158386ab32c2f", "size": 3059, "ext": "py", "lang": "Python", "max_stars_repo_path": "archive/kontrol_v1/model/noise.py", "max_stars_repo_name": "terrencetec/kontrol", "max_stars_repo_head_hexsha": "ba6461784e38d01399efeb7a42911259f9254db0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-08-31T10:34:43.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-10T20:48:59.000Z", "max_issues_repo_path": "archive/kontrol_v1/model/noise.py", "max_issues_repo_name": "terrencetec/kontrol", "max_issues_repo_head_hexsha": "ba6461784e38d01399efeb7a42911259f9254db0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 33, "max_issues_repo_issues_event_min_datetime": "2020-06-16T18:38:25.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-24T00:48:55.000Z", "max_forks_repo_path": "archive/kontrol_v1/model/noise.py", "max_forks_repo_name": "terrencetec/kontrol", "max_forks_repo_head_hexsha": "ba6461784e38d01399efeb7a42911259f9254db0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.2871287129, "max_line_length": 82, "alphanum_fraction": 0.5599869238, "include": true, "reason": "import numpy,import scipy", "num_tokens": 752}
|
# -*- coding: utf-8 -*-
#
# k平均法による画像の減色処理
#
# 2015/04/24 ver1.0
#
import numpy as np
from numpy.random import randint
from PIL import Image
# ------------#
# Parameters #
# ------------#
Colors = [2, 3, 5, 16] # 減色後の色数(任意の個数の色数を指定できます)
# k平均法による減色処理
def run_kmeans(pixels, k):
cls = [0] * len(pixels)
# 代表色の初期値をランダムに設定
center = []
for i in range(k):
center.append(np.array([randint(256), randint(256), randint(256)]))
print(map(lambda x: x.tolist(), center))
distortion = 0
# 最大50回のIterationを実施
for iter_num in range(50):
center_new = []
for i in range(k):
center_new.append(np.array([0, 0, 0]))
num_points = [0] * k
distortion_new = 0
# E Phase: 各データが属するグループ(代表色)を計算
for pix, point in enumerate(pixels):
min_dist = 256 * 256 * 3
point = np.array(point)
for i in range(k):
d = sum([x * x for x in point - center[i]])
if d < min_dist:
min_dist = d
cls[pix] = i
center_new[cls[pix]] += point
num_points[cls[pix]] += 1
distortion_new += min_dist
# M Phase: 新しい代表色を計算
for i in range(k):
center_new[i] = center_new[i] / num_points[i]
center = center_new
print
map(lambda x: x.tolist(), center)
print("Distortion = %d" % distortion_new)
# Distortion(J)の変化が0.5%未満になったら終了
if iter_num > 0 and distortion - distortion_new < distortion * 0.005:
break
distortion = distortion_new
# 画像データの各ピクセルを代表色で置き換え
for pix, point in enumerate(pixels):
pixels[pix] = tuple(center[cls[pix]])
return pixels
# Main
if __name__ == '__main__':
for k in Colors:
print("k=%d" % k)
# 画像ファイルの読み込み
im = Image.open("Images/black_board.JPG")
pixels = list(im.convert('RGB').getdata())
# k平均法による減色処理
result = run_kmeans(pixels, k)
# 画像データの更新とファイル出力
im.putdata(result) # Update image
im.save("Images/new_board.bmp" % k, "BMP")
|
{"hexsha": "762294ed052c1860d233fceddf13df5e939093c2", "size": 2221, "ext": "py", "lang": "Python", "max_stars_repo_path": "k_means.py", "max_stars_repo_name": "Sabaniki/Evin", "max_stars_repo_head_hexsha": "b68e1ec16505169b7073c274e72d7f8b227051e3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "k_means.py", "max_issues_repo_name": "Sabaniki/Evin", "max_issues_repo_head_hexsha": "b68e1ec16505169b7073c274e72d7f8b227051e3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "k_means.py", "max_forks_repo_name": "Sabaniki/Evin", "max_forks_repo_head_hexsha": "b68e1ec16505169b7073c274e72d7f8b227051e3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.0853658537, "max_line_length": 78, "alphanum_fraction": 0.5258892391, "include": true, "reason": "import numpy,from numpy", "num_tokens": 731}
|
(**************************************************************************)
(* *)
(* This file is part of octant-proof. *)
(* *)
(* Copyright (C) 2019-2020 Orange *)
(* *)
(* you can redistribute it and/or modify it under the terms of the GNU *)
(* Lesser General Public License as published by the Free Software *)
(* Foundation, either version 3 of the License, or (at your option) *)
(* any later version. *)
(* *)
(* It is distributed in the hope that it will be useful, *)
(* but WITHOUT ANY WARRANTY; without even the implied warranty of *)
(* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *)
(* GNU Lesser General Public License for more details. *)
(* *)
(* You should have received a copy of the GNU Lesser General Public *)
(* License along with the software. If not, see *)
(* <https://www.gnu.org/licenses/>. *)
(* *)
(**************************************************************************)
Require Import syntax.
Require Import occurrences.
Require Import subs.
Require Import pmatch.
Require Import bSemantics.
Require Import monotonicity.
Require Import soundness.
From mathcomp
Require Import ssreflect ssrbool ssrnat eqtype seq ssrfun choice fintype tuple finset bigop finfun.
Require Import bigop_aux.
Require Import utils.
Require Import finseqs.
Require Import fintrees.
Require Import Coq.Program.Equality.
Require Import Sumbool.
Set Implicit Arguments.
Unset Strict Implicit.
Implicit Types (s r : sub) (d def : syntax.constant) (t : term) (a : atom)
(ga : gatom) (tl : list atom) (cl : clause) (i : interp).
Section tSemantics.
Variable p : program.
Variable gat_def : gatom.
(** * Trace semantics *)
(** ** Rule grounding *)
Section rul_gr.
(** rule grounding: a pair of a clause and a substitution *)
Inductive rul_gr :=
| RS : clause -> sub -> rul_gr.
(** Conversion to and from pair so that we have a cancellable *)
Definition rul_gr_rep l := match l with
| RS c g => (c, g) end.
Definition rul_gr_pre l := match l with
| (c, g) => RS c g end.
Lemma rul_gr_repK : cancel rul_gr_rep rul_gr_pre.
Proof. by case. Qed.
(** [rul_gr] is an eq type *)
Definition rul_gr_eqMixin :=
CanEqMixin rul_gr_repK.
Canonical rul_gr_eqType := Eval hnf in EqType rul_gr rul_gr_eqMixin.
(** [rul_gr] is a choice type *)
Definition rul_gr_choiceMixin :=
CanChoiceMixin rul_gr_repK.
Canonical rul_gr_choiceType := Eval hnf in ChoiceType rul_gr rul_gr_choiceMixin.
(** [rul_gr] is a count type *)
Definition rul_gr_countMixin :=
(@CanCountMixin (prod_countType clause_countType sub)
rul_gr _ _ rul_gr_repK).
Canonical rul_gr_countType := Eval hnf in CountType rul_gr rul_gr_countMixin.
(** [rul_gr] is a finite type *)
Definition rul_gr_finMixin :=
(@CanFinMixin rul_gr_countType
(prod_finType clause_finType sub)
_ _ rul_gr_repK).
Canonical rul_gr_finType := Eval hnf in FinType rul_gr rul_gr_finMixin.
End rul_gr.
(** ** Semantic trees (traces) *)
Section trace_sem_trees.
(** A semantic tree is a tree with bounded width [bn] (the maximal size of
the body of clause),
- nodes are in [rul_gr] ie. pairs of clause and substitution
- leaves are ground atoms (from the interpretation). *)
Definition trace_sem_trees := (@WUtree_sf rul_gr_finType gatom_finType bn gat_def).
(** force typing *)
Definition my_tst_sub x (H : wu_pred x) : trace_sem_trees := WU_sf_sub H.
(** Get the head node label (ie. the last clause and substitution) *)
Definition tst_node_head (t : trace_sem_trees) := match (val t) with
| ABLeaf _ => None
| ABNode h _ => Some h end.
(** Deduced term: either the fact or the head atom of the last clause grounded by the substitution. *)
Definition ded def (t : trace_sem_trees) := match (val t) with
| ABLeaf f => f
| ABNode (RS (Clause h _) s) _ => gr_atom_def def s h end.
(** translate a set of traces in an interpretation *)
Definition sem_tree_to_inter def (ts : {set trace_sem_trees}) : interp := [set ded def x | x in ts].
(** Checks that the deduction from the traces is equal to ghe atoms grounded by the supplied substitution.
This is what is the relation existing begween the children of a trace node and the body of the clause associated to that node *)
Definition ded_sub_equal (def : syntax.constant) (lx : seq trace_sem_trees) (s : sub) (ats : seq atom) :=
(map (ded def) lx) == (map (gr_atom_def def s) ats).
(** Computes the traces representing the consequence of a clause
from the traces representing the current interpretation *)
Definition cons_clause_t def (cl : clause) (k : {set trace_sem_trees}) : {set trace_sem_trees} :=
let b := (body_cl cl) in
let subs := match_body (sem_tree_to_inter def k) b in
pset [set (wutree_option_fst (@wu_pcons_seq rul_gr_finType gatom_finType bn gat_def (RS cl s) lx))
| lx : (size b).-tuple trace_sem_trees,
s : sub in subs &
(ded_sub_equal def lx s b &&
all (mem k) lx)].
Set Keyed Unification.
(** A member of the consequences of the clause is an ABnode labelled by (cl, s) where s is a substitution *)
Lemma cons_clause_h def cl k (xtrace : trace_sem_trees) :
(xtrace \in cons_clause_t def cl k) -> exists s, ABroot (val xtrace) = inl (RS cl s).
Proof.
move=> /mem_pset_set /imset2P [prevtrees s Hprevtreesin] H1.
unfold wu_pcons_seq. unfold wu_pcons_wlist.
case (sumbool_of_bool (wall (WUnotin (RS cl s)) (seq_to_wlist bn prevtrees))) => [H2|H2] H3.
- exists s. by inversion H3 as [H4].
- inversion H3.
Qed.
(** ** Trace semantics definition *)
(** Initialize the trace semantic from a standard interpretation of the
EDB by creating leaf trees.*)
Definition base_sem_t (i : interp) : {set trace_sem_trees} :=
[set my_tst_sub (wu_pred_leaf x) | x in i].
(** the forward chain step for the trace semantics. *)
Definition fwd_chain_t def (k : {set trace_sem_trees}) : {set trace_sem_trees} :=
k :|: \bigcup_(cl <- p) cons_clause_t def cl k.
(** interp_t is the equivalent of interp for the trace semantics *)
Notation interp_t := {set trace_sem_trees}.
(** The m iterate of the semantics *)
Definition sem_t (def : syntax.constant) (m : nat) (i : interp) :=
iter m (fwd_chain_t def) (base_sem_t i).
(** The leaf trees in the semantics are the ground atoms of the initial
interpretation *)
Lemma sem_t_leaf def i ga Htra m : {| wht := ABLeaf rul_gr_finType ga; Hwht := Htra |}
\in sem_t def m i -> ga \in i.
Proof.
induction m as [|m Hm].
by move=>/imsetP [gab Hgabin [->]].
move=>/setUP [H|H].
apply/Hm/H.
destruct (bigcup_seqP _ _ _ _ _ _ H) as [gab Hgabin Hgabeq].
destruct (andP Hgabeq) as [H1 H2].
destruct (cons_clause_h H1) as [s Hf].
inversion Hf.
Qed.
(** The result of fwd_chain contains its argument *)
Lemma fwd_chain_t_inc (it : interp_t) def : it \subset fwd_chain_t def it.
Proof. exact: subsetUl. Qed.
(** technical lemma rephrasing the above one*)
Lemma fwd_chain_t_inc_single (t : trace_sem_trees) (it : interp_t) def : (t \in it) -> t \in fwd_chain_t def it.
Proof.
move => Hin ; assert (it \subset fwd_chain_t def it). apply fwd_chain_t_inc.
apply subset_to_in with (s1 :=it) ; [apply Hin | apply H].
Qed.
(** and the same for [fwd_chain] *)
Lemma fwd_chain_inc_single ga i def : (ga \in i) -> ga \in fwd_chain def p i.
Proof.
move => H.
unfold fwd_chain.
apply/setUP.
left ; apply H.
Qed.
(** Simple reinterpretation of definition of [ded_sub_equal] *)
Lemma ded_sub_equal_equal_to_def l tval def s : (ded_sub_equal def tval s l) = ([seq ded def i | i <- tval] == [seq gr_atom_def def s i | i <- l]).
Proof.
by destruct tval ; destruct l.
Qed.
(** Trivial technical lemma but inversion was too agressive in the following proof*)
Lemma simple_cons_eq_inversion {T : Type} (a b : T) (l ll : seq T) : (a::l = b::ll) -> (a = b /\ l = ll).
Proof.
move => H.
by inversion H.
Qed.
(** If applying s on a list l gives a list of ground atom, then using [s] as a grounding and
applying it to l will also give this list
of ground atoms. *)
Lemma stail_eq_to_gr_atom_eq (l : seq atom) (def : syntax.constant) (s : sub) (gtl : seq gatom) (H : stail l s = [seq to_atom ga | ga <- gtl]) :
[seq gr_atom (to_gr def s) a | a <- l] = gtl.
Proof.
move:gtl H.
induction l.
- by destruct gtl.
- destruct gtl as [|g0 gtl].
+ move => //.
+ move => /= H.
destruct (simple_cons_eq_inversion H) as [Heqh Heqtl].
assert (H0 : gr_atom_def def s a = g0).
- apply/gr_atom_defP/Heqh.
rewrite <- (gr_atom_defE def s a) in H0.
by rewrite H0 ((IHl _ Heqtl)).
Qed.
(** If applying the substitution gives ground atoms, we have [ded_sub_equal] without explicit
grounding on the list of atoms [l] *)
Lemma ded_gr_equal_stail l (gtailrec : (size l).-tuple trace_sem_trees) def s gtl :
([seq ded def i | i <- gtailrec] = gtl) -> (stail l s = [seq to_atom ga | ga <- gtl]) -> ded_sub_equal def gtailrec s l.
Proof.
unfold ded_sub_equal.
move =>-> /stail_eq_to_gr_atom_eq H.
rewrite -(H def);apply/eqP. clear gtailrec. clear H.
induction l as [|h l Hl];auto. simpl.
apply/f_equal2.
simpl. by rewrite (gr_atom_defE def s).
apply Hl.
Qed.
(** There is no new leaf in the consequence of a
clause *)
Lemma no_deduced_leaf (x : gatom_finType) (def : syntax.constant) (k : {set trace_sem_trees})
(Habs : wutree_fin (wu_leaf x) \in \bigcup_cl cons_clause_t def cl k) : False.
Proof.
destruct (bigcupP Habs) as [cl Htriv Hin].
destruct ((imset2P (mem_pset_set Hin))) as [decs sub Hindecs Hsubin Heq].
unfold wu_pcons_seq in Heq.
unfold wu_pcons_wlist in Heq.
destruct Sumbool.sumbool_of_bool in Heq ; inversion Heq.
Qed.
Lemma cons_clause_t_desc def (deduced : {set [finType of trace_sem_trees]}) (h : rul_gr_finType) (l : seq (@ABtree rul_gr_finType gatom_finType))
(t : trace_sem_trees) (Hin : val t \in l)
(Hwup : @wu_pred _ _ bn (ABNode h l)) : (my_tst_sub Hwup
\in \big[setU (T:=[finType of trace_sem_trees])/set0]_cl
cons_clause_t def cl deduced) -> t \in deduced.
Proof.
move => /(@bigcupP [finType of trace_sem_trees] clause_finType _ _ _) Hcons.
unfold mem.
destruct Hcons as [acl Htriv Hhlin].
unfold mem in Hhlin.
destruct (@imset2P (tuple_finType (size (body_cl acl)) [finType of trace_sem_trees]) _ _ _ _ _ _ (mem_pset_set Hhlin)) as [lx s Hlxin H Heq].
unfold wu_pcons_seq in Heq. unfold wu_pcons_wlist in Heq.
destruct Sumbool.sumbool_of_bool as [Hclnotin | Hclin] in Heq.
- inversion Heq as [[Hheq Hleq]]. rewrite seq_wlistK in Hleq.
+ rewrite in_set in H. destruct (andP H) as [Hsmatch Hb]. destruct (andP Hb) as [Hdedeq Hall].
unfold all in Hall.
apply (allP Hall).
destruct t as [t Ht].
destruct (all_prop_in (@sub_val_map _ _ trace_sem_trees l lx Hleq) Hin) as [tval Hinbis].
simpl in Hinbis. unfold WU_sf_sub in Hinbis.
assert (Hproofseq : tval = Ht). apply eq_irrelevance.
rewrite <- Hproofseq.
unfold mem. unfold in_mem. simpl. destruct lx as [lx Hlx].
simpl. simpl in Hinbis.
clear Hwup Hhlin Hlxin Hclnotin Heq Hheq Hleq H Hsmatch Hb Hdedeq Hall Hlx.
(* quite ugly, but avoids dependent types shenanigan *)
(* my guess would be that some properties of canonicals do not
automatically lift to seqs *)
induction lx.
- by simpl in Hinbis.
- simpl. simpl in IHlx. simpl in Hinbis. destruct (orP Hinbis) as [Hta | Htl].
+ rewrite (eqP Hta). by apply/orP ; left.
+ apply/orP ; right ; apply/IHlx/Htl.
+ destruct lx as [lx Hlx].
simpl ; rewrite (eqP Hlx). apply wlist_to_seq_size.
- inversion Heq.
Qed.
(** If a tree is in a given step of the trace semantic, so are all its subtrees. *)
Lemma trace_sem_prev_trees nb_iter def init :
forall (t1 t2 : trace_sem_trees), t2 \in (sem_t def nb_iter init)
-> subtree (val t1) (val t2) -> t1 \in (sem_t def nb_iter init).
Proof.
induction nb_iter as [|nb_iter Hit].
- move => t1 t2 Ht2sem Hsub.
destruct (imsetP Ht2sem) as [t2_atom Ht2_ain Ht2_eq].
rewrite Ht2_eq in Hsub.
simpl in Hsub.
apply/imsetP.
exists t2_atom.
+ apply Ht2_ain.
+ apply/eqP.
by rewrite <- (@inj_eq [finType of trace_sem_trees] (@ABtree_eqType _ _) _ val_inj).
- move => t1 t2 Ht2sem Hsub.
unfold fwd_chain_t.
unfold fwd_chain_t in Ht2sem.
destruct (setUP Ht2sem) as [Ht2old | Ht2new].
+ (* t2 was already in deduced, using induction hyp *)
apply/setUP ; left.
apply (Hit t1 t2 Ht2old Hsub).
+ (* t2 was just deduced. 2 cases : t1 = t2 or t1 stricly smaller *)
destruct t2 as [t2 Ht2wu].
destruct t2.
- (* leaf in cons_clause_t: absurd *)
exfalso.
apply no_deduced_leaf with (x := s) (def := def) (k := (sem_t def nb_iter init)).
assert (Heq : Wht Ht2wu == (wu_leaf s)).
+ by rewrite <- inj_eq with (f := val) ; [ | apply val_inj].
rewrite <- (eqP Heq). apply (@bigcup_type_seq _ _ (fun cl => cons_clause_t def cl (sem_t def nb_iter init)) _ p Ht2new).
- (* we can now compute subtree *)
destruct (orP Hsub) as [Ht1t2abeq | Ht1ssubt2].
+ (* t1 = t2 *)
assert (Ht1t2eq : t1 == Wht Ht2wu).
(* tain, faut vraiment une tactique pour ça ... *)
destruct t1 as [t1 Ht1].
rewrite <- (@inj_eq _ (@ABtree_eqType _ _) val val_inj).
simpl. simpl. simpl in Ht1t2abeq. by rewrite (eqP Ht1t2abeq).
by rewrite (eqP Ht1t2eq).
+ (* t1 strict subtree of t2 *)
apply/setUP ; left.
fold (@subtree rul_gr_finType gatom_finType) in Ht1ssubt2.
destruct (hasP Ht1ssubt2) as [desc Hdescl Hsubdesc].
pose Hwupredl := wu_pred_descs Ht2wu.
pose Hwupredpred := allP Hwupredl _ Hdescl.
pose Hwupredinded := @cons_clause_t_desc def (sem_t def nb_iter init) s l (Wht Hwupredpred) Hdescl Ht2wu.
apply/(Hit t1 (Wht Hwupredpred)). apply (Hwupredinded (bigcup_type_seq Ht2new)). apply Hsubdesc.
Qed.
(** If a tree is in a given step of the trace semantic, its strict
subtrees are in the previous step. *)
Lemma trace_sem_prev_trees_m1 nb_iter def init :
forall (t1 t2 : trace_sem_trees), t2 \in (sem_t def nb_iter init)
-> strict_subtree (val t1) (val t2) -> t1 \in (sem_t def nb_iter.-1 init).
Proof.
induction nb_iter as [|nb_iter Hit].
- by move => t1 t2 /imsetP [ga Hgain ->] /=.
- move => t1 t2 Hsemt Hss. pose Hsemt_cop := Hsemt. clearbody Hsemt_cop.
pose Hss_cop := Hss. clearbody Hss_cop. move:Hss. move:Hsemt.
move=> /setUP [Hrec|]. move=>Hsub.
destruct nb_iter;simpl. destruct (imsetP Hrec) as [ga Hga Heq]. apply/imsetP.
exists ga;auto. rewrite Heq in Hsub. inversion Hsub.
apply/setUP;left. by apply Hit with (t2 := t2).
move=>/bigcup_seqP [cl Hclin /andP [/imsetP [t]]]. rewrite mem_pmap.
move=>/mapP [to /mapP [too]]. rewrite mem_enum.
move=>/imset2P [descs sb Hdescsin]. rewrite in_set.
move=>/andP [Hsbmatch /andP [Hded Hall] -> ->].
unfold wu_pcons_seq. unfold wu_pcons_wlist.
destruct Sumbool.sumbool_of_bool;move=>// [->] Ht2eq.
rewrite Ht2eq. rewrite Ht2eq in Hsemt_cop.
move=>Htriv /=. rewrite seq_wlistK. move=>/hasP [t15 H15in Hsub].
assert (Hwu : @wu_pred _ _ bn t15).
apply (@wu_pred_sub _ _ _ _ (val t2)). rewrite Ht2eq. apply/orP;right. apply/hasP.
exists t15. rewrite seq_wlistK. apply H15in. destruct descs. rewrite (eqP i).
apply wlist_to_seq_size. destruct t15. auto. by apply/orP;left.
rewrite Ht2eq. simpl. apply (wu_merge
(wu_cons_uniq (x:=RS cl sb) (tl:=wlist_to_seq (seq_to_wlist bn descs)) (wall_to_all e))
(ABwidth_cons (RS cl sb) (tl:=[seq wht i | i <- wlist_to_seq (seq_to_wlist bn descs)])
(size_map_leq (wlist_to_seq_size (seq_to_wlist bn descs)))
(wu_list_width (wlist_to_seq (seq_to_wlist bn descs))))).
destruct (mapP H15in) as [t15d H15din H15teq]. clear H15in. destruct descs as [descs Hdescs].
simpl in Hall. simpl in H15din. destruct t1 as [t1 Ht1]. simpl in *.
destruct t15.
assert (Ht1eq : {| wht := t1; Hwht := Ht1 |} = {|wht := (ABLeaf rul_gr_eqType g); Hwht := wu_pred_leaf g|}).
apply/val_inj. simpl in Hsub. simpl. by rewrite (eqP Hsub).
rewrite Ht1eq. destruct t15d as [t15d H15d].
assert (Ht1eqb : {| wht := ABLeaf rul_gr_eqType g; Hwht := wu_pred_leaf g |} = {| wht := t15d; Hwht := H15d |}).
apply/val_inj. simpl. by rewrite H15teq. rewrite Ht1eqb.
apply (allP Hall {| wht := t15d; Hwht := H15d |} H15din).
destruct (orP Hsub) as [Heq|Hsss].
assert (Ht1eq : {| wht := t1; Hwht := Ht1 |} = t15d).
apply/val_inj. simpl in Hsub. simpl. by rewrite (eqP Heq).
rewrite Ht1eq. destruct t15d as [t15d H15d].
apply (allP Hall {| wht := t15d; Hwht := H15d |} H15din).
destruct (hasP Hsss) as [trt Htrtin Htsub]. clear Hsss.
assert (Htrt: @wu_pred _ _ bn trt).
destruct t15d as [t15d Ht15d].
apply wu_pred_sub with (t4 := t15d). simpl in H15teq. rewrite -H15teq.
apply/orP. right. apply/hasP. exists trt;auto. apply subtree_refl.
apply Ht15d.
pose Hprev := (allP Hall t15d H15din). clearbody Hprev.
apply (trace_sem_prev_trees Hprev). simpl. rewrite -H15teq.
simpl. apply/orP;right. apply/hasP. exists trt. apply Htrtin.
apply Htsub. destruct descs as [azer Ht]. simpl. rewrite (eqP Ht).
apply wlist_to_seq_size.
Qed.
(** ** Completeness of the trace semantic.
For each atom in the regular semantic, there is a tree in the
trace semantic that can be interpreted (it deduces) the atom *)
Lemma trace_sem_completeness nb_iter def i :
prog_safe p -> [forall x in (sem p def nb_iter i), exists y in (sem_t def nb_iter i), ded def y == x].
Proof.
induction nb_iter as [|nb_iter Hit] ;
move=> Hsafe ; apply/forallP ; move => x ; apply/implyP ; move => Hin ; apply/existsP.
- by exists (wu_leaf x) ; apply/andP ; split ; [apply mem_imset | ].
- destruct (setUP Hin).
+ destruct (existsP (implyP (forallP (Hit Hsafe) x) H)) as [x0 Hx0].
exists x0 ; apply/andP ; split ; [apply (fwd_chain_t_inc_single _ (bool_to_prop_l Hx0)) | apply (bool_to_prop_r Hx0)].
+ rewrite bigcup_seq in H.
clear Hin.
destruct (bigcupP H) as [cl Hin Hxsub].
destruct (imsetP Hxsub) as [s Hsded Hstail].
destruct (match_tl_sound Hsded) as [gtl Htailseq Hall].
destruct cl as [h l].
simpl in Hstail ; simpl in Htailseq ; simpl in Hsded.
assert (Hgtailrec : exists gtailrec : (size l).-tuple trace_sem_trees,
map (ded def) gtailrec = gtl /\ all (mem (sem_t def nb_iter i)) gtailrec).
+ pose Hb := (stail_eq_to_gr_atom_eq def Htailseq).
rewrite <- Hb.
clear Hin Hsded Hxsub.
move : l Hstail Htailseq Hb.
move => l. apply (@wlist_ind _ bn) with (u := l). clear l.
induction gtl.
- move => s0 Ps Hstail Htailseq Hb.
destruct s0.
+ unfold seq_to_wlist_uncut. rewrite seq_to_bnil. by exists [tuple].
+ unfold wlist_to_seq_co in Hb. rewrite seq_wlist_uncut_K in Hb.
inversion Hb.
- move => s0 Ps Hstail Htailseq Hb.
destruct s0 ; unfold wlist_to_seq_co in Hb ; rewrite seq_wlist_uncut_K in Hb.
+ inversion Hb.
+ inversion Hb.
destruct (existsP (implyP (forallP (Hit Hsafe) a) (bool_to_prop_l Hall))) as [abis Habis].
unfold wlist_to_seq_co in Htailseq ; rewrite seq_wlist_uncut_K in Htailseq.
unfold wlist_to_seq_co ; rewrite seq_wlist_uncut_K.
assert (Ps1 : size s1 <= bn). apply leq_trans with (n := (size s1).+1). auto. apply Ps.
assert (Hegtl : exists gtailrec : (size (seq_to_wlist_uncut Ps1)).-tuple trace_sem_trees,
[seq ded def i | i <- gtailrec] = [seq gr_atom (to_gr def s) a | a <- seq_to_wlist_uncut Ps1]
/\ all (mem (sem_t def nb_iter i)) gtailrec).
- assert (Hstails1 : stail (seq_to_wlist_uncut Ps1) s = [seq to_atom ga | ga <- gtl]).
unfold wlist_to_seq_co ; rewrite seq_wlist_uncut_K. apply (simple_cons_eq_inversion Htailseq).
assert (H8bis : ([seq gr_atom (to_gr def s) a | a <- seq_to_wlist_uncut Ps1] = gtl)).
unfold wlist_to_seq_co ; rewrite seq_wlist_uncut_K. auto.
apply (IHgtl (bool_to_prop_r Hall) s1 Ps1 Hstail Hstails1 H8bis).
unfold wlist_to_seq_co in Hegtl ; rewrite seq_wlist_uncut_K in Hegtl.
destruct Hegtl as [gtailrec [Hgtlrecded Hgtlrcall]].
by exists [tuple of (abis :: gtailrec)] ; split ; simpl ;
[rewrite Hgtlrecded (eqP (bool_to_prop_r Habis)) ; inversion Hb
| apply/andP ; split ; [apply (bool_to_prop_l Habis) |apply Hgtlrcall]].
inversion Hgtailrec as [gtailrec [Hdedprev Hindeduced0]].
(*pose cl := {| ssval := Clause h l; ssvalP := Hclpclause |}.*)
pose cl := Clause h l.
pose tl := (seq_to_wlist bn gtailrec).
destruct (Sumbool.sumbool_of_bool (@wall trace_sem_trees bn (WUnotin (RS cl s)) tl)) as [Hprev | Hnotprev_wlist].
+ exists (@wu_cons_wlist _ _ bn gat_def (RS cl s) tl Hprev).
apply/andP ; split.
- apply/setUP ; right ; rewrite bigcup_seq ; apply/bigcupP.
exists cl.
- apply Hin.
- unfold fwd_chain_t ; unfold cons_clause_t ; unfold wu_pcons_seq ; unfold wu_pcons_wlist.
apply/mem_set_pset/imset2P.
apply Imset2spec with (x1 := gtailrec) (x2 := s).
+ auto.
+ apply/setIdP ; split.
- apply subset_to_in with (s1 := match_body (sem p def nb_iter i) (body_cl (Clause h l))).
+ auto.
+ apply match_body_incr ; apply/subsetP ; move => y Hy.
destruct (existsP (implyP (forallP (Hit Hsafe) y) Hy)) as [xy Hxy].
apply/imsetP.
exists xy.
- apply (bool_to_prop_l Hxy).
- apply/eqP. rewrite eq_sym. apply (bool_to_prop_r Hxy).
- apply/andP ; split.
+ by apply ded_gr_equal_stail with (gtl := gtl).
+ apply Hindeduced0.
(* Very unelegant... *)
+ destruct Sumbool.sumbool_of_bool as [Hprevbis | Hprevcontr].
- by rewrite (eq_irrelevance Hprev Hprevbis).
- by rewrite Hprev in Hprevcontr.
- by rewrite Hstail ; apply/eqP.
+ rewrite wall_allK in Hnotprev_wlist.
assert (Hnot_prev : ~~ @all trace_sem_trees (WUnotin (RS cl s)) (wlist_to_seq tl)).
- by rewrite Hnotprev_wlist.
clear Hnotprev_wlist.
rewrite <- has_predC in Hnot_prev.
(* The tree in tl (previous traces) that was built upon the needed one *)
destruct (hasP Hnot_prev) as [tsup Htsupintl Hclinsupnegneg].
unfold mem in Htsupintl.
unfold WUnotin in Hclinsupnegneg. unfold ABnotin in Hclinsupnegneg.
pose Hclinsup := negPn Hclinsupnegneg.
destruct (ABin_extract Hclinsup) as [prevocc Hposub Hporooteq].
destruct tsup as [tsup Hwutsup].
pose Hwupo := wu_pred_sub Hposub Hwutsup.
exists (Wht Hwupo). apply/andP ; split.
- apply/setUP ; left. apply trace_sem_prev_trees with (t2 := (Wht Hwutsup)).
+ apply (allP Hindeduced0).
unfold tl in Htsupintl.
rewrite seq_wlist_uncut_K in Htsupintl.
- unfold mem. unfold mem in Htsupintl. unfold trace_sem_trees.
simpl. simpl in Htsupintl. unfold in_mem in Htsupintl. simpl in Htsupintl.
unfold in_mem ; simpl.
destruct gtailrec as [gtailrec Htlr].
simpl. simpl in Htsupintl. simpl in tl.
clear Hgtailrec Hdedprev Hindeduced0 Hnot_prev Hclinsupnegneg Hclinsup Hwupo Hposub Hporooteq Htlr.
(* cf. cons_clause_t_desc *)
induction gtailrec.
+ by simpl in Htsupintl.
+ simpl in Htsupintl ; simpl ; simpl in IHgtailrec.
destruct (orP Htsupintl) as [Htsupa | Htsupl].
- by rewrite (eqP Htsupa) ; apply/orP ; left.
- apply/orP ; right ; apply/IHgtailrec/Htsupl.
- destruct gtailrec as [gtlrec Hgtlrec]. rewrite (eqP Hgtlrec). apply wlist_to_seq_size.
+ apply Hposub.
- rewrite Hstail.
dependent destruction prevocc ; inversion Hporooteq.
inversion Hporooteq. unfold ded. simpl. by rewrite H1.
Qed.
(** ** Soundness of the trace semantics *)
(** technical lemma *)
Lemma type_preserving_inversion (A B C : finType) (f2 : A -> B -> C)
(D1 : mem_pred A) (D2 : A -> mem_pred B)
: forall y : C, imset2_spec f2 D1 D2 y-> (exists x1 : A, exists x2 : B, (in_mem x1 D1 /\ in_mem x2 (D2 x1) /\ (y = f2 x1 x2))).
Proof.
move => y im2spec.
inversion im2spec.
exists x1.
exists x2.
move => //.
Qed.
(** technical lemma: unification failed with normal setIdP *)
Lemma setIdP_bool_to_prop {T : finType} (pA pB : pred T) : forall x, x \in [set y | pA y & pB y] -> ((pA x) /\ (pB x)).
Proof.
move => x Hx.
apply/setIdP/Hx.
Qed.
(** For all trace in the m step of the trace semantic, its deduced atom is
in the m step of the regular semantics *)
Lemma trace_sem_soundness nb_iter def i:
prog_safe p -> [forall t in (sem_t def nb_iter i), ded def t \in (sem p def nb_iter i)].
Proof.
move=> Hsafe.
induction nb_iter as [|nb_iter Hit];
apply/forallP ; move => t ; apply/implyP ; move => Hin.
- destruct (imsetP Hin) as [x Hxi Hxeq].
by destruct t ; rewrite Hxeq.
- destruct (setUP Hin) as [Hprev | Hjded].
+ by apply/fwd_chain_inc_single/((implyP (forallP Hit t))).
+ apply subset_to_in with (s1 := \bigcup_(cl <- p) cons_clause def cl (sem p def nb_iter i)).
- rewrite bigcup_seq.
rewrite bigcup_seq in Hjded.
destruct (bigcupP Hjded) as [pcl Hxpcl Hpclcons].
clear Hjded.
apply/bigcupP.
exists pcl.
+ apply Hxpcl.
+ destruct (type_preserving_inversion (imset2P (mem_pset_set Hpclcons))) as [ls [s [Htriv [Hsubmatch Ht]]]].
destruct (setIdP_bool_to_prop Hsubmatch) as [Hsmatch Hdedall].
apply/imsetP.
exists s.
- apply subset_to_in with (s1 := match_body (sem_tree_to_inter def (sem_t def nb_iter i)) (body_cl pcl)).
+ apply Hsmatch.
+ apply/match_body_incr/subsetP.
move => ga Hga.
unfold sem_tree_to_inter in Hga.
destruct (imsetP Hga) as [pre_ga Hpre_ga_ded H_ga_ded].
rewrite H_ga_ded.
by apply (implyP (forallP Hit pre_ga)).
- unfold wu_pcons_seq in Ht. unfold wu_pcons_wlist in Ht. destruct Sumbool.sumbool_of_bool as [Heq | Habs] in Ht.
+ inversion Ht as [Hrt]. unfold ded. simpl. by destruct pcl.
destruct pcl as [ppcl]. by destruct ppcl.
- apply subsetUr.
Qed.
(** If we have a node in the trace semantics with [(cl, s)] as root, then there is an interpretation where the body of [cl] matches
producing [s]. *)
Lemma trace_sem_head_match def cl s l m i Htr :
prog_safe p
-> {| wht := ABNode (RS cl s) l; Hwht := Htr |} \in sem_t def m i
-> [exists i' : interp, ((s \in match_body i' (body_cl cl)))].
Proof.
move:cl s l i Htr.
induction m as [|m Hm].
move=>/= cl s l i Htr psafe /imsetP [ga Hgain [Hgaeq]].
inversion Hgaeq.
move=> /= cl s l i Htr psafe.
unfold fwd_chain_t.
move=>/setUP [Hrec|Hnew].
by apply (@Hm cl s l i Htr).
destruct (bigcup_seqP _ _ _ _ _ _ Hnew)
as [clb Hclbin Hclbcons]. clear Hnew.
destruct (andP Hclbcons) as [Hclbconsb Htriv]. clear Hclbcons. clear Htriv.
move:Hclbconsb. move=> /mem_pset_set /imset2P [prev_trees sb Hprevsin Hsub Htreeeq].
move:Hsub. rewrite in_set. move=>/andP [Hmatch /andP [Hprev Hininterp]].
unfold wu_pcons_seq in Htreeeq. unfold wu_pcons_wlist in Htreeeq.
destruct Sumbool.sumbool_of_bool in Htreeeq; [|inversion Htreeeq].
inversion Htreeeq as [[Hcleq Hseq Hleq]].
apply/existsP. exists (sem_tree_to_inter def (sem_t def m i)). apply Hmatch.
Qed.
(** technical lemma *)
Lemma sterm_ga_eq def s aa aga :
[seq sterm s x | x <- aa ] = [seq Val x | x <- aga]
-> aga = [seq gr_term_def def s i | i <- aa].
Proof.
move=>H. apply Logic.eq_sym. rewrite -(@map_id _ aga). move:H.
apply map_square_eq. move=>x y. apply gr_term_defP.
Qed.
(** technical lemma *)
Lemma stail_ded_eq cl s gtl def :
stail (body_cl cl) s = [seq to_atom ga | ga <- gtl]
-> [seq gr_atom_def def s i | i <- body_cl cl] =
[seq ded def i | i <- [seq my_tst_sub (x:=ABLeaf rul_gr x) (wu_pred_leaf x) | x <- gtl]].
Proof.
rewrite -map_comp.
apply map_square_eq.
move=>x y /gr_atom_defP H. by rewrite (H def).
Qed.
(** technical lemma *)
Lemma all_mem_edb_tst gtl (i : interp) :
all (mem i) gtl ->
all (mem [set my_tst_sub (x:=ABLeaf rul_gr x) (wu_pred_leaf x) | x in i])
[seq my_tst_sub (x:=ABLeaf rul_gr_finType x) (wu_pred_leaf x) | x <- gtl].
Proof.
move=>/allP H. apply/allP=>x /mapP [ga Hga ->].
apply/imsetP. exists ga. apply/H/Hga.
reflexivity.
Qed.
(** Let [p] be safe, forall clause [cl] of [p],
and substitution [s] result of matching [cl]
against the standard semantics of [p] at step
[m], then there is a trace [t] in the (m+1)
trace-semantics of [p] whose head is [(cl,s)] *)
Lemma trace_sem_completeness_b def m i: prog_safe p ->
[forall cl:clause in p, [forall s:sub in match_body (sem p def m i) (body_cl cl),
[exists t in sem_t def m.+1 i, (tst_node_head t) == Some (RS cl s)]]].
Proof.
move=>Hpsafe.
induction m as [|m Hm].
- apply/forallP=>cl. apply/implyP=>Hclin. apply/forallP=>s. apply/implyP=>/= Hsmatch.
destruct (match_tl_sound Hsmatch) as [gtl Hgtleq Hallgtlin].
pose sgtl := map (fun x => my_tst_sub (x:=ABLeaf rul_gr_finType x) (wu_pred_leaf x)) gtl.
assert (Hsize : size sgtl = size (body_cl cl)).
+ unfold sgtl. rewrite size_map. unfold stail in Hgtleq.
rewrite -(@size_map _ _ (fun x => satom x s)) -(@size_map _ _ to_atom). by apply/f_equal.
destruct (Sumbool.sumbool_of_bool (wall (WUnotin (RS cl s)) (seq_to_wlist bn sgtl)))
as [Hnotprevin|Hprevin].
+ apply/existsP.
exists (@wu_cons_wlist _ _ bn gat_def (RS cl s) (seq_to_wlist bn sgtl) Hnotprevin).
apply/andP;split.
unfold fwd_chain_t. apply/setUP;right. apply/bigcup_seqP.
exists cl. apply/Hclin. apply/andP;split;auto.
unfold base_sem_t.
apply/mem_set_pset/imset2P. rewrite -Hsize.
exists (in_tuple sgtl) s;auto.
rewrite in_set. apply/andP;split. simpl.
assert (Hieq : (sem_tree_to_inter def [set my_tst_sub (x:=ABLeaf rul_gr x) (wu_pred_leaf x) | x in i]) = i).
- unfold sem_tree_to_inter. apply/eqP. rewrite eqEsubset;apply/andP;split;apply/subsetP.
+ move=>y /imsetP [z] /imsetP [ga Hgain ->] ->. apply Hgain.
+ move=>y Hyin. apply/imsetP. exists (my_tst_sub (x:=ABLeaf rul_gr y) (wu_pred_leaf y)).
apply/imsetP. exists y. apply Hyin. auto. auto.
by rewrite Hieq.
apply/andP;split. unfold ded_sub_equal. unfold sgtl.
apply/eqP/Logic.eq_sym/stail_ded_eq/Hgtleq.
simpl. apply/all_mem_edb_tst/Hallgtlin. unfold wu_pcons_seq. unfold wu_pcons_wlist.
destruct (Sumbool.sumbool_of_bool)
as [Ht|Hf]; [|by exfalso; rewrite Hf in Hnotprevin].
by apply/f_equal/f_equal/val_inj. auto.
+ rewrite wall_allK seq_wlist_uncut_K in Hprevin.
assert (Hnot_prev : ~~ @all trace_sem_trees (WUnotin (RS cl s)) (in_tuple sgtl)).
- by rewrite Hprevin.
clear Hprevin.
rewrite <- has_predC in Hnot_prev.
destruct (hasP Hnot_prev) as [tsup Htsupintl Hclinsupnegneg]. clear Hnot_prev.
unfold WUnotin in Hclinsupnegneg. unfold ABnotin in Hclinsupnegneg. unfold predC in Hclinsupnegneg.
simpl in Hclinsupnegneg. rewrite Bool.negb_involutive in Hclinsupnegneg.
unfold ABin in Hclinsupnegneg.
destruct (ABin_extract Hclinsupnegneg) as [ptr Hsub Hroot].
unfold sgtl in Htsupintl.
destruct (mapP Htsupintl) as [ga Hgain Htsupeq].
rewrite Htsupeq in Hsub. simpl in Hsub.
destruct ptr. inversion Hroot. move: Hsub =>/eqP //.
+ destruct cl as [h tl]. simpl. unfold tail in tl. rewrite Hsize.
apply wlist_to_seq_size.
- apply/forallP=>cl. apply/implyP=>Hclin. apply/forallP=>s. apply/implyP=>/= Hsmatch.
unfold fwd_chain in Hsmatch.
destruct (match_tl_sound Hsmatch) as [gtl Hgtleq Hallgtlin].
assert (Hall : all (fun ga=> [exists tr:trace_sem_trees ,
(tr \in sem_t def m.+1 i) && (ded def tr == ga)]) gtl).
apply/allP. move=>ga Hga. destruct (setUP (allP Hallgtlin ga Hga)) as [Hgain|Hgain].
+ destruct (existsP (implyP (forallP (trace_sem_completeness m def i Hpsafe) ga) Hgain))
as [tr Htr]. destruct (andP Htr) as [H1 H2]. clear Htr.
apply/existsP. exists tr. apply/andP;split;auto.
by apply/setUP;left.
+ destruct (bigcup_seqP _ _ _ _ _ _ Hgain) as [clb Hclbin Hclb].
destruct (andP Hclb) as [H1 H2]. clear H2.
destruct (imsetP H1) as [sb Hsbin ->].
destruct (existsP (implyP (forallP (implyP (forallP Hm clb) Hclbin) sb) Hsbin)) as [tr Htr].
destruct (andP Htr) as [H2 H3].
apply/existsP. exists tr. apply/andP;split;auto. move:H3. destruct tr as [[] H5]; move=>/eqP [H3].
inversion H3. unfold ded. simpl. rewrite H3. by destruct clb.
destruct (all_exist_seq Hall) as [trs Htrs]. destruct (andP Htrs) as [Htrsin Htrseq]. clear Htrs.
assert (Hsizeb : size gtl = size (body_cl cl)).
rewrite -(@size_map _ _ to_atom). rewrite -(@size_map _ _ (fun x => satom x s) (body_cl cl)). by apply/f_equal.
assert (Hsize : size (map snd trs) = size (body_cl cl)).
by rewrite -Hsizeb -(eqP Htrsin) !size_map.
destruct (Sumbool.sumbool_of_bool (wall (WUnotin (RS cl s)) (seq_to_wlist bn (map snd trs))))
as [Hnotprevin|Hprevin].
+ apply/existsP.
exists (@wu_cons_wlist _ _ bn gat_def (RS cl s) (seq_to_wlist bn (map snd trs)) Hnotprevin).
apply/andP;split.
unfold fwd_chain_t. apply/setUP;right. apply/bigcup_seqP.
exists cl. apply/Hclin. apply/andP;split;auto.
apply/mem_set_pset/imset2P. rewrite -Hsize.
exists (in_tuple (map snd trs)) s;auto.
rewrite in_set. apply/andP;split. simpl.
assert (H : (sem p def m i
:|: \big[setU (T:=gatom_finType)/set0]_(cl <- p) cons_clause def cl (sem p def m i))
\subset (sem_tree_to_inter def
(sem_t def m i
:|: \big[setU (T:=wutree_finType)/set0]_(cl0 <- p) cons_clause_t def cl0 (sem_t def m i)))).
apply/subsetP. move=>y /setUP [Hy|Hy];apply/imsetP.
destruct (existsP (implyP (forallP (trace_sem_completeness m def i Hpsafe) y) Hy))
as [tr Htr]. destruct (andP Htr) as [Htr1 Htr2].
exists tr. apply/setUP;left;auto. by rewrite (eqP Htr2).
destruct (bigcup_seqP _ _ _ _ _ _ Hy) as [clb Hclbin Hclb]. destruct (andP Hclb) as [Hclbb Htriv].
destruct (imsetP Hclbb) as [sb Hsbin Hsbeq].
destruct (existsP (implyP (forallP (implyP (forallP Hm clb) Hclbin) sb) Hsbin))
as [tr Htr]. destruct (andP Htr) as [Htr1 Htr2].
exists tr. apply Htr1. rewrite Hsbeq. unfold tst_node_head in Htr2. unfold ded.
destruct tr as [[] Htrt]; pose Hf := eqP Htr2; inversion Hf as [Hff]. simpl in *.
rewrite Hff. by destruct clb as [hclb tlclb].
apply (subsetP (match_body_incr cl H) s Hsmatch).
apply/andP;split. unfold ded_sub_equal.
clear Hm. clear Hsmatch. clear Hallgtlin. clear Hsizeb. clear Hsize. clear Hnotprevin.
clear Hclin. move:Hgtleq.
destruct cl as [hcl tcl]. simpl. apply (@wlist_ind _ bn) with (u := tcl).
unfold wlist_to_seq_co. move=> l Pl. rewrite seq_wlist_uncut_K. clear tcl. clear Pl.
move:l trs Hall Htrsin Htrseq. induction gtl as [|gh gtl Hgtl];move=>[|hl tll];move=>[|htrs tltrs] //=.
move=>/andP [H1 H2] /andP [H31 H32] /andP [H4 H5] [H6 H7] H8.
assert (Hrew : ded def htrs.2 = gr_atom_def def s hl). destruct htrs as [ga [[gab|[clb sb]] Htr]];
simpl in *. destruct (andP H4) as [H9 H10]. rewrite (eqP H10).
destruct hl as [[phl ahl] Hhl]. unfold gr_atom_def. rewrite (eqP H31).
destruct gh as [[pga aga] Hga]. apply/val_inj. simpl in *. unfold gr_raw_atom_def.
rewrite H6. apply/f_equal. simpl. apply/sterm_ga_eq/H7. destruct (andP H4) as [H41 H42].
rewrite (eqP H42) (eqP H31). destruct gh as [[pgh agh] Hgh]; destruct hl as [[phl ahl] Hhl];
apply/val_inj. simpl in *. unfold gr_raw_atom_def. simpl. rewrite H6. apply/f_equal.
apply/sterm_ga_eq/H7.
rewrite Hrew. apply/eqP/f_equal. apply/eqP/Hgtl;auto.
apply/allP. move=>x Hx.
destruct (mapP Hx) as [[v vtr] Hvtrin Hvtreq].
destruct (andP (allP Htrseq (v, vtr) Hvtrin)) as [Hvtrsem Hvtrded].
simpl in *. rewrite -Hvtreq in Hvtrsem. apply Hvtrsem.
unfold wu_pcons_seq. unfold wu_pcons_wlist.
destruct (Sumbool.sumbool_of_bool)
as [Ht|Hf]; [|by exfalso; rewrite Hf in Hnotprevin].
by apply/f_equal/f_equal/val_inj. auto.
+ rewrite wall_allK seq_wlist_uncut_K in Hprevin.
assert (Hnot_prev : ~~ @all trace_sem_trees (WUnotin (RS cl s)) (in_tuple (map snd trs))).
- by rewrite Hprevin.
clear Hprevin.
rewrite <- has_predC in Hnot_prev.
destruct (hasP Hnot_prev) as [tsup Htsupintl Hclinsupnegneg]. clear Hnot_prev.
unfold WUnotin in Hclinsupnegneg. unfold ABnotin in Hclinsupnegneg. unfold predC in Hclinsupnegneg.
simpl in Hclinsupnegneg. rewrite Bool.negb_involutive in Hclinsupnegneg.
destruct (ABin_extract Hclinsupnegneg) as [ptr Hsub Hroot].
apply/existsP. destruct tsup as [tsup Htsup].
exists {|wht := ptr ; Hwht := (wu_pred_sub Hsub Htsup)|}. apply/andP;split.
unfold fwd_chain_t. apply/setUP;left.
destruct (mapP Htsupintl) as [[v vtr] Hvtrin Hvtreq].
destruct (andP (allP Htrseq (v, vtr) Hvtrin)) as [Hvtrsem Hvtrded].
simpl in *. rewrite -Hvtreq in Hvtrsem.
assert (Hin: {| wht := tsup; Hwht := Htsup |} \in sem_t def m.+1 i). apply Hvtrsem.
apply (@trace_sem_prev_trees _ _ _ {| wht := ptr; Hwht := wu_pred_sub (t1:=ptr) (t2:=tsup) Hsub Htsup |}
{| wht := tsup; Hwht := Htsup |} Hin). apply Hsub.
unfold tst_node_head. simpl. by destruct ptr; inversion Hroot.
+ destruct cl as [h tl]. simpl. unfold tail in tl. rewrite Hsize.
apply wlist_to_seq_size.
Qed.
(** All the clauses in the roots in the
trace semantics of [p]
are clauses of [p] *)
Lemma tr_cl_in (tr : trace_sem_trees) def cl s m i:
tr \in sem_t def m i
-> ABroot (val tr) = inl (RS cl s)
-> cl \in p.
Proof.
induction m as [|m Hm]. move=>/imsetP [ga Hga ->] //.
move=>/setUP [Hrec|]. by apply Hm.
move=>/bigcup_seqP [clb Hclbin /andP [/imsetP [trb Htrbin ->] Htriv]].
rewrite mem_pmap in Htrbin. move:Htrbin.
move=>/mapP [trt /mapP [trp]]. rewrite mem_enum.
move=>/imset2P [descs sb Hdescsin H1 ->] ->.
unfold wu_pcons_seq. unfold wu_pcons_wlist.
destruct Sumbool.sumbool_of_bool;
by move=>// [->] [<-].
Qed.
(** For a safe program (for the heads), and
any trace in the semantics, if its interpretation is an EDB predicate, then
the trace is a leaf. *)
Lemma sem_t_Edb_pred def k i (tr : trace_sem_trees) ga :
tr \in sem_t def k i
-> prog_safe_hds p
-> ded def tr = ga
-> predtype (sym_gatom ga) = Edb
-> exists gab, wht tr = (ABLeaf rul_gr gab).
Proof.
induction k as [|k Hk].
move=>/imsetP [x Hxin ->] H1 H2 /=. by exists x.
move=>Htrin. pose Htrinb := Htrin. clearbody Htrinb. move:Htrin.
move=>/setUP [Hrec|]. apply/Hk/Hrec.
move=>/bigcup_seqP [cl Hclin /andP [/imsetP [pred]]].
rewrite mem_pmap. move=>/mapP [opred /mapP [oopred]].
rewrite mem_enum. move=>/imset2P [descs sd Hdescsin].
rewrite in_set.
move=>/andP [sdmatch /andP [Hded Hall]] -> -> Heq -> Htriv Hsafe Hdga Htyp.
move:Heq. unfold wu_pcons_seq. unfold wu_pcons_wlist.
destruct (Sumbool.sumbool_of_bool);move=>// [Heq].
destruct pred as [[xga|[clx sx] ds] Hpred].
by exists xga.
rewrite -Hdga in Htyp. unfold ded in Htyp.
pose Htypb := (allP Hsafe cl Hclin). clearbody Htypb.
destruct clx as [hclx tlclx]. simpl in Htyp.
inversion Heq as [[Hcleq Hseq Hdeq]].
destruct cl as [hcl tlcl].
inversion Hcleq as [[Hhcleq Htlcleq]].
rewrite Hhcleq in Htyp.
unfold safe_cl_hd in Htypb.
destruct hcl. simpl in *.
rewrite Htyp in Htypb. pose Hf := eqP Htypb. inversion Hf.
Qed.
(** For a safe interpretation (an EDB), and
any trace in the semantics, if its interpretation is an IDB predicate, then
the trace is an internal node. *)
Lemma sem_t_Idb_pred def k i (tr : trace_sem_trees) ga :
tr \in sem_t def k i
(*-> prog_safe_hds p*)
-> safe_edb i
-> ded def tr = ga
-> predtype (sym_gatom ga) = Idb
-> exists clsb, exists descs, wht tr = (ABNode clsb descs).
Proof.
induction k as [|k Hk].
move=>/imsetP [x Hxin ->] H1 /= H2. unfold ded in H2. simpl in H2.
rewrite -H2 (eqP (implyP (forallP H1 x) Hxin)) //.
move=>Htrin. have Htrinb := Htrin. move:Htrin.
move=>/setUP [Hrec|]. apply/Hk/Hrec.
move=>/bigcup_seqP [cl Hclin /andP [/imsetP [pred]]].
rewrite mem_pmap. move=>/mapP [opred /mapP [oopred]].
rewrite mem_enum. move=>/imset2P [descs sd Hdescsin].
rewrite in_set.
move=>/andP [sdmatch /andP [Hded Hall]] -> -> Heq -> Htriv Hsafe Hdga Htyp.
move:Heq. unfold wu_pcons_seq. unfold wu_pcons_wlist.
destruct (Sumbool.sumbool_of_bool);move=>// [Heq].
destruct pred as [[xga|[clx sx] ds] Hpred].
unfold wu_cons_wlist in Heq. unfold wu_cons in Heq.
inversion Heq.
exists (RS clx sx). exists ds. auto.
Qed.
(** ** Utility lemmas relating tocc and the trace
we find in the semantics *)
(** If we have a program with safe heads (no edb pred) and we have a trace in the semantics whose root is
clause [cl]. If we have an occurrence, that points to a term of this clause and is on an atom that is
an EDB predicate, then this will appear as a leaf
in the direct children of the trace. *)
Lemma edb_in_sem_t_descs def cl s descs Hwht f (tocc : t_occ_finType p) k (i : interp) :
{| wht := ABNode (RS cl s) descs; Hwht := Hwht |}
\in sem_t def k i
-> prog_safe_hds p
-> nth_error p (r_ind tocc) = Some cl
-> p_at tocc = Some f
-> predtype f = Edb
-> exists ga, nth_error descs (b_ind tocc) = Some (@ABLeaf _ _ ga).
Proof.
move:cl s descs Hwht f tocc.
induction k as [|k Hk];move=>cl s descs Hwht f tocc /=.
move=>/imsetP [x] //.
move=>/setUP [Hrec|].
apply (Hk cl s descs Hwht f tocc Hrec).
move=>/bigcup_seqP [clb Hclbin /andP
[/mem_pset_set /imset2P [db sb Hdbin Hsbin Heq] Htriv]].
rewrite in_set in Hsbin.
destruct (andP Hsbin) as [Hsbmatch H].
destruct (andP H) as [Hded Hall]. clear H. clear Hsbin. clear Htriv.
move:Heq. unfold wu_pcons_seq. unfold wu_pcons_wlist.
destruct Sumbool.sumbool_of_bool;move=>// [Hcleq Hseq ->].
rewrite seq_wlistK.
unfold ded_sub_equal in Hded. unfold p_at. unfold at_at.
move=>Hsafe ->. move=>Hnth.
destruct (nth_error_case (body_cl cl) (b_ind tocc)) as [Hnone|[ato Hato]].
by rewrite Hnone in Hnth. destruct Hato as [Hatoin Hnthb].
assert (Heqb : nth_error [seq ded def i | i <- db] (b_ind tocc)
= Some (gr_atom_def def sb ato)).
rewrite (eqP Hded) -Hcleq -Hseq. apply/(nth_error_map)/Hnthb.
assert (Ht : exists trb, (nth_error db (b_ind tocc) = Some trb
/\ ded def trb = gr_atom_def def sb ato)).
apply/nth_error_preim/Heqb.
destruct Ht as [trb [Hnthrb Hdedrb]].
move=>Htyp.
pose Hpred := (allP Hall trb (nth_error_in Hnthrb)). clearbody Hpred.
assert (Hfeq : f = (sym_gatom (gr_atom_def def sb ato))).
rewrite Hnthb in Hnth. by inversion Hnth.
rewrite Hfeq in Htyp.
destruct (@sem_t_Edb_pred def k i trb (gr_atom_def def sb ato) Hpred Hsafe Hdedrb Htyp)
as [ga Hga]. exists ga.
destruct db. simpl.
simpl in Hnthrb.
by rewrite (@nth_error_map _ _ (@wht (Finite.eqType rul_gr_finType) (Finite.eqType gatom_finType) bn) tval (b_ind tocc) _ Hnthrb) Hga.
rewrite size_tuple. apply wlist_to_seq_size.
Qed.
(** Same as above, but with an IDB predicate, in that case, the child must be an internal node and its
head symbol must be the IDB predicate found *)
Lemma idb_in_sem_t_descs def cl s descs Hwht f (tocc : t_occ_finType p) k (i : interp) :
{| wht := ABNode (RS cl s) descs; Hwht := Hwht |}
\in sem_t def k i
-> safe_edb i
-> nth_error p (r_ind tocc) = Some cl
-> p_at tocc = Some f
-> predtype f = Idb
-> exists clsb, exists descsb, hsym_cl (rul_gr_rep clsb).1 = f /\
nth_error descs (b_ind tocc) = Some (@ABNode _ _ clsb descsb).
Proof.
move:cl s descs Hwht f tocc.
induction k as [|k Hk];move=>cl s descs Hwht f tocc /=.
move=>/imsetP [x] //.
move=>/setUP [Hrec|].
apply (Hk cl s descs Hwht f tocc Hrec).
move=>/bigcup_seqP [clb Hclbin /andP
[/mem_pset_set /imset2P [db sb Hdbin Hsbin Heq] Htriv]].
rewrite in_set in Hsbin.
destruct (andP Hsbin) as [Hsbmatch H].
destruct (andP H) as [Hded Hall]. clear H. clear Hsbin. clear Htriv.
move:Heq. unfold wu_pcons_seq. unfold wu_pcons_wlist.
destruct Sumbool.sumbool_of_bool;move=>// [Hcleq Hseq ->].
rewrite seq_wlistK.
unfold ded_sub_equal in Hded. unfold p_at. unfold at_at.
move=>Hsafe ->. move=>Hnth.
destruct (nth_error_case (body_cl cl) (b_ind tocc)) as [Hnone|[ato Hato]].
by rewrite Hnone in Hnth. destruct Hato as [Hatoin Hnthb].
assert (Heqb : nth_error [seq ded def i | i <- db] (b_ind tocc)
= Some (gr_atom_def def sb ato)).
rewrite (eqP Hded) -Hcleq -Hseq. apply/(nth_error_map)/Hnthb.
assert (Ht : exists trb, (nth_error db (b_ind tocc) = Some trb
/\ ded def trb = gr_atom_def def sb ato)).
apply/nth_error_preim/Heqb.
destruct Ht as [trb [Hnthrb Hdedrb]].
move=>Htyp.
pose Hpred := (allP Hall trb (nth_error_in Hnthrb)). clearbody Hpred.
assert (Hfeq : f = (sym_gatom (gr_atom_def def sb ato))).
rewrite Hnthb in Hnth. by inversion Hnth.
rewrite Hfeq in Htyp.
destruct (@sem_t_Idb_pred def k i trb (gr_atom_def def sb ato) Hpred Hsafe Hdedrb Htyp)
as [rulgr Hdescs]. exists rulgr. destruct Hdescs as [descsb H]. exists descsb.
destruct db. simpl.
simpl in Hnthrb. split.
rewrite Hfeq. simpl. destruct trb as [[|[[[[]]]]]].
inversion H.
simpl in H. unfold ded in Hdedrb. simpl in Hdedrb. inversion H.
unfold hsym_cl. simpl in *. by inversion Hdedrb.
by rewrite (@nth_error_map _ _ (@wht (Finite.eqType rul_gr_finType) (Finite.eqType gatom_finType) bn) tval (b_ind tocc) _ Hnthrb) H.
rewrite size_tuple. apply wlist_to_seq_size.
Qed.
End trace_sem_trees.
End tSemantics.
|
{"author": "Orange-OpenSource", "repo": "octant-proof", "sha": "ac920f5d906b7822ec585bc1bf3ec55ee74acddf", "save_path": "github-repos/coq/Orange-OpenSource-octant-proof", "path": "github-repos/coq/Orange-OpenSource-octant-proof/octant-proof-ac920f5d906b7822ec585bc1bf3ec55ee74acddf/octalgo/tSemantics.v"}
|
import sympy
import sympy.functions.elementary.exponential as symExp
constant = sympy.symbols('constant')
def get_constant(): return constant
def integrateFunc(func, variable, bounds=None, paramsToSub = {}, conds='none'):
if bounds == None:
func_int = sympy.integrate(func.subs(paramsToSub), variable, conds=conds) + constant
else:
func_int = sympy.integrate(func.subs(paramsToSub), (variable, bounds[0], bounds[1]))
return func_int
|
{"hexsha": "ba41a4e9862a03ff3ef4f16d28c0c8ab940fcbe7", "size": 442, "ext": "py", "lang": "Python", "max_stars_repo_path": "lib/auxlib.py", "max_stars_repo_name": "carlos-faria/Stochastic-Processes", "max_stars_repo_head_hexsha": "2ee57a1029566b606af781ec5d307eb33434fb79", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lib/auxlib.py", "max_issues_repo_name": "carlos-faria/Stochastic-Processes", "max_issues_repo_head_hexsha": "2ee57a1029566b606af781ec5d307eb33434fb79", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lib/auxlib.py", "max_forks_repo_name": "carlos-faria/Stochastic-Processes", "max_forks_repo_head_hexsha": "2ee57a1029566b606af781ec5d307eb33434fb79", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.0, "max_line_length": 86, "alphanum_fraction": 0.757918552, "include": true, "reason": "import sympy", "num_tokens": 113}
|
# This file was generated by the Julia Swagger Code Generator
# Do not modify this file directly. Modify the swagger specification instead.
mutable struct NetworkWatcherPropertiesFormat <: SwaggerModel
provisioningState::Any # spec type: Union{ Nothing, String } # spec name: provisioningState
function NetworkWatcherPropertiesFormat(;provisioningState=nothing)
o = new()
validate_property(NetworkWatcherPropertiesFormat, Symbol("provisioningState"), provisioningState)
setfield!(o, Symbol("provisioningState"), provisioningState)
o
end
end # type NetworkWatcherPropertiesFormat
const _property_map_NetworkWatcherPropertiesFormat = Dict{Symbol,Symbol}(Symbol("provisioningState")=>Symbol("provisioningState"))
const _property_types_NetworkWatcherPropertiesFormat = Dict{Symbol,String}(Symbol("provisioningState")=>"String")
Base.propertynames(::Type{ NetworkWatcherPropertiesFormat }) = collect(keys(_property_map_NetworkWatcherPropertiesFormat))
Swagger.property_type(::Type{ NetworkWatcherPropertiesFormat }, name::Symbol) = Union{Nothing,eval(Meta.parse(_property_types_NetworkWatcherPropertiesFormat[name]))}
Swagger.field_name(::Type{ NetworkWatcherPropertiesFormat }, property_name::Symbol) = _property_map_NetworkWatcherPropertiesFormat[property_name]
const _allowed_NetworkWatcherPropertiesFormat_provisioningState = ["Succeeded", "Updating", "Deleting", "Failed"]
function check_required(o::NetworkWatcherPropertiesFormat)
true
end
function validate_property(::Type{ NetworkWatcherPropertiesFormat }, name::Symbol, val)
if name === Symbol("provisioningState")
Swagger.validate_param(name, "NetworkWatcherPropertiesFormat", :enum, val, _allowed_NetworkWatcherPropertiesFormat_provisioningState)
end
end
|
{"hexsha": "ed11b38cac532637ae4253fd9a205d987715b1f1", "size": 1784, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Network/NetworkWatcherClient/model_NetworkWatcherPropertiesFormat.jl", "max_stars_repo_name": "UnofficialJuliaMirrorSnapshots/Azure.jl-34b51195-c7f2-5807-8107-6ca017e2682c", "max_stars_repo_head_hexsha": "30a6866bc570c1e0d7f8f2da7f49b93ba13cefb3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/Network/NetworkWatcherClient/model_NetworkWatcherPropertiesFormat.jl", "max_issues_repo_name": "UnofficialJuliaMirrorSnapshots/Azure.jl-34b51195-c7f2-5807-8107-6ca017e2682c", "max_issues_repo_head_hexsha": "30a6866bc570c1e0d7f8f2da7f49b93ba13cefb3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Network/NetworkWatcherClient/model_NetworkWatcherPropertiesFormat.jl", "max_forks_repo_name": "UnofficialJuliaMirrorSnapshots/Azure.jl-34b51195-c7f2-5807-8107-6ca017e2682c", "max_forks_repo_head_hexsha": "30a6866bc570c1e0d7f8f2da7f49b93ba13cefb3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 52.4705882353, "max_line_length": 165, "alphanum_fraction": 0.8071748879, "num_tokens": 371}
|
import matplotlib.pyplot as plt
import numpy as np
def Bisection(func, x, y, n):
## func= function,
## x,y = teo guess points
## n= number of iterations
if func(x) * func(y) >= 0:
print("Wrong Input")
return
a = x ### First point
b = y ## second point
for N in range(0, n):
z = (a + b) / 2
if func(a) * func(z) < 0: ### condition where root is available
b = z
# a=a
else: # func(z)*func(b)<0:
#b=b
a = z
# elif func(z)==0:
# print("Found exact solution, root= ", z)
# #return z
# else:
# print("Bisection Method fails")
# #return None
print("Found exact solution, root is = " + str(z))
## ---------Functions of which we want to find the roots--------
print("(a): x^3-20 ")
print("(b): (2*x-1)*(x-3) ")
## -----------choice for the function--------------
choice = input("Enter the choice for function (a) or (b): ")
### finding roots
# -----------First function--------------
if choice == "a":
func = lambda x: x ** 3 - 20
x = int(input("Enter Upper limit(try 3): ")) ##give 3
y = int(input("Enter Lower Limit (try 2): ")) ## give 2
result = Bisection(func, x, y, 25)
print(result)
##---------------Plotting--------
x = np.linspace(-5, 5, 50)
f = x ** 3 - 20
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.spines['left'].set_position('center')
ax.spines['bottom'].set_position('zero')
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
#plot the function
plt.plot(x, f, 'g', label='f(x)=x^3-20')
plt.xlabel('x') # naming the x axis
plt.ylabel('f(x)') # naming the y axis
# giving a title to my graph
plt.legend(loc='upper left')
#plt.title('f(x)=x^3-20')
plt.show()
#------------Next Function---------
else:
func = lambda x: (2 * x - 1) * (x - 3)
x = float(input("Enter Upper limit (try 0): ")) ##give 0
y = float(input("Enter Lower Limit (try 0.9): ")) ## give 0.9
result = Bisection(func, x, y, 25)
print(result) ## Exact answer is 0.5
###------------Plotting-------------
x = np.linspace(-5, 5, 50)
f = (2 * x - 1) * (x - 3)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.spines['left'].set_position('center')
ax.spines['bottom'].set_position('zero')
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
#plot the function
plt.plot(x, f, 'c', label='f(x)=(2x-1)(x-3)')
plt.xlabel('x') # naming the x axis
plt.ylabel('f(x)') # naming the y axis
# giving a title to my graph
plt.legend(loc='upper left')
#plt.title('f(x)=x^3-20')
plt.show()
|
{"hexsha": "6d209d90249ac8b62c35c71f9d2fdbd6b6a74073", "size": 3095, "ext": "py", "lang": "Python", "max_stars_repo_path": "Numerical_Methods_Physics/Bisection.py", "max_stars_repo_name": "Simba2805/Computational_Physics_Python", "max_stars_repo_head_hexsha": "be687939c16a1d08066939830ac31ba666a3e1bb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Numerical_Methods_Physics/Bisection.py", "max_issues_repo_name": "Simba2805/Computational_Physics_Python", "max_issues_repo_head_hexsha": "be687939c16a1d08066939830ac31ba666a3e1bb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Numerical_Methods_Physics/Bisection.py", "max_forks_repo_name": "Simba2805/Computational_Physics_Python", "max_forks_repo_head_hexsha": "be687939c16a1d08066939830ac31ba666a3e1bb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.149122807, "max_line_length": 73, "alphanum_fraction": 0.4978998384, "include": true, "reason": "import numpy", "num_tokens": 921}
|
import os
import sys
import numpy as np
import keras
import kaldi_io
import tensorflow as tf
from keras.models import Model
from keras.layers import Input
from learning_to_adapt.model import load_model, create_maml, create_model, create_adapter, create_model_wrapper, set_model_weights
def converted_models_produce_correct_output(m_in, m_out):
# Test that converted models
adapt_x = np.random.normal(size=(4, 3, 20, 78, 40))
adapt_y = np.ones((4, 3, 20, 50, 1))
test_x = np.random.normal(size=(4, 20, 78, 40))
# Workaround for MAML models with wrong input dimensions
# maml = m_in.get_layer('maml_1')
# maml.wrapper.batch_size = 4
# m_in = create_maml(maml.wrapper, maml.get_weights()[0], maml.num_steps, maml.use_second_order_derivatives, maml.use_lr_per_step, maml.use_kld_regularization)
# m_in.load_weights(weights_in)
reference_predictions = m_in.predict([adapt_x, adapt_y, test_x])[1][0]
test_predictions = m_out.predict(test_x[0])
return np.allclose(reference_predictions, test_predictions)
if __name__ == '__main__':
model_in = sys.argv[1]
weights_in = sys.argv[2]
model_out = sys.argv[3]
meta_out = sys.argv[4]
if not model_in.endswith('.h5') or not model_out.endswith('.h5') or not meta_out.endswith('.h5'):
raise TypeError ('Unsupported model type. Please use h5 format. Update Keras if needed')
m_in = load_model(model_in)
m_in.load_weights(weights_in)
weights = m_in.get_weights()
# Bugfix for wrongly saved model-wrapper
m_in.get_layer('maml_1').wrapper.set_weights(m_in.get_layer('model_wrapper_2').get_weights())
try:
lda = m_in.get_layer('lda_1')
lda_weights = [x.flatten() for x in lda.get_weights()]
except ValueError:
lda_weights = []
model_weights = weights[0][0]
maml = m_in.get_layer('maml_1')
m_out = create_model(maml.wrapper, m_in.get_layer('lda_1'))
model_weights = np.concatenate(lda_weights + [maml.get_weights()[0].flatten()])
set_model_weights(m_out, model_weights, maml.wrapper)
assert converted_models_produce_correct_output(m_in, m_out)
m_out.compile(loss='sparse_categorical_crossentropy', optimizer='adam')
m_out.save(model_out)
m_out.summary()
adapter = create_adapter(create_model_wrapper(m_out), maml.num_steps, maml.use_lr_per_step, maml.use_kld_regularization, maml.get_weights()[1:])
adapter.save(meta_out)
adapter.summary()
print maml.get_weights()[1]
|
{"hexsha": "773df43ecd7bf32df0a602d8e854a82578c4c9f8", "size": 2488, "ext": "py", "lang": "Python", "max_stars_repo_path": "steps/meta/convert_maml_model.py", "max_stars_repo_name": "ondrejklejch/learning_to_adapt", "max_stars_repo_head_hexsha": "6de0b98370769596da16a1688582925ea2e1fa29", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 18, "max_stars_repo_stars_event_min_datetime": "2019-10-24T04:42:16.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-24T03:07:59.000Z", "max_issues_repo_path": "steps/meta/convert_maml_model.py", "max_issues_repo_name": "choko/learning_to_adapt", "max_issues_repo_head_hexsha": "6de0b98370769596da16a1688582925ea2e1fa29", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "steps/meta/convert_maml_model.py", "max_forks_repo_name": "choko/learning_to_adapt", "max_forks_repo_head_hexsha": "6de0b98370769596da16a1688582925ea2e1fa29", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2018-08-31T01:08:50.000Z", "max_forks_repo_forks_event_max_datetime": "2019-05-10T12:12:57.000Z", "avg_line_length": 35.5428571429, "max_line_length": 163, "alphanum_fraction": 0.7250803859, "include": true, "reason": "import numpy", "num_tokens": 654}
|
import cv2
import numpy as np
# -----------------------读取原始图像--------------------------
o = cv2.imread("cc.bmp")
cv2.imshow("original", o)
# 读取轮廓
gray = cv2.cvtColor(o, cv2.COLOR_BGR2GRAY)
ret, binary = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)
contours, hierarchy = cv2.findContours(binary, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
cnt = contours[0]
# 绘制空心轮廓
mask1 = np.zeros(gray.shape, np.uint8)
cv2.drawContours(mask1, [cnt], 0, 255, 2)
pixelpoints1 = cv2.findNonZero(mask1)
print("pixelpoints1.shape=", pixelpoints1)
print("pixelpoints=\n", pixelpoints1)
cv2.imshow("mask1", mask1)
# 绘制实心轮廓
mask2 = np.zeros(gray.shape, np.uint8)
cv2.drawContours(mask2, [cnt], 0, 255, -1)
pixelpoints2 = cv2.findNonZero(mask2)
print("pixelpoints2.shape=", pixelpoints2.shape)
print("pixelpoints2=\n", pixelpoints2)
cv2.imshow("mask2", mask2)
# 释放窗口
cv2.waitKey()
cv2.destroyAllWindows()
|
{"hexsha": "f8f45cf04c3d9ed000ed0ddb20867a031d84cd1b", "size": 887, "ext": "py", "lang": "Python", "max_stars_repo_path": "opencv_learn/charpter12/demo_12.36.py", "max_stars_repo_name": "zhangxinzhou/play_game", "max_stars_repo_head_hexsha": "854448f8416b2d3f98bb2c3ed0f7d834a61593de", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "opencv_learn/charpter12/demo_12.36.py", "max_issues_repo_name": "zhangxinzhou/play_game", "max_issues_repo_head_hexsha": "854448f8416b2d3f98bb2c3ed0f7d834a61593de", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "opencv_learn/charpter12/demo_12.36.py", "max_forks_repo_name": "zhangxinzhou/play_game", "max_forks_repo_head_hexsha": "854448f8416b2d3f98bb2c3ed0f7d834a61593de", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.8787878788, "max_line_length": 86, "alphanum_fraction": 0.7001127396, "include": true, "reason": "import numpy", "num_tokens": 312}
|
import numpy as np
from shapely import affinity
from shapely.geometry import Point
from shapely.geometry.base import BaseGeometry
import problem_solution
# resolution of the polygon approximating a circle then scaled to approximate the ellipsis; according to Shapely documentation, a resolution of 16 allows to cover 99.8% of the circle's area (https://shapely.readthedocs.io/en/stable/manual.html#object.buffer)
RESOLUTION = 16
class Ellipse(BaseGeometry):
def __init__(self, center, half_width, half_height, polygon=None):
"""Constructor"""
if type(center) != Point:
center = Point(center[0], center[1])
self.center = center
self.half_width = half_width
self.half_height = half_height
# approximate the ellipse as a polygon to avoid having to define checks (intersection, is-within) with all other shapes
circle_approximation = center.buffer(1, resolution=RESOLUTION)
if not polygon:
polygon = affinity.scale(circle_approximation, self.half_width, self.half_height)
self.polygon = polygon
def __reduce__(self):
return Ellipse, (self.center, self.half_width, self.half_height, self.polygon)
def intersects(self, other):
"""Returns True if geometries intersect, else False"""
# use the approximate polygon for the check
return problem_solution.do_shapes_intersect(self.polygon, other)
def within(self, other):
"""Returns True if geometry is within the other, else False"""
# use the approximate polygon for the check
return problem_solution.does_shape_contain_other(other, self.polygon)
def contains(self, other):
"""Returns True if the geometry contains the other, else False"""
# use the approximate polygon for the check
return problem_solution.does_shape_contain_other(self.polygon, other)
@property
def area(self):
"""Unitless area of the geometry (float)"""
return np.pi * self.half_width * self.half_height
@property
def ctypes(self):
return None
@property
def __array_interface__(self):
return None
def _set_coords(self, ob):
return None
@property
def xy(self):
return None
@property
def __geo_interface__(self):
return None
def svg(self, scale_factor=1., **kwargs):
return None
|
{"hexsha": "9a34811a6cc4888beeb48b972c8e5a794449914a", "size": 2422, "ext": "py", "lang": "Python", "max_stars_repo_path": "ellipse.py", "max_stars_repo_name": "uuanuo/KnapsackPacking", "max_stars_repo_head_hexsha": "1188d13f4f3071c3ca29b33cceafe4f8f5666691", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ellipse.py", "max_issues_repo_name": "uuanuo/KnapsackPacking", "max_issues_repo_head_hexsha": "1188d13f4f3071c3ca29b33cceafe4f8f5666691", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ellipse.py", "max_forks_repo_name": "uuanuo/KnapsackPacking", "max_forks_repo_head_hexsha": "1188d13f4f3071c3ca29b33cceafe4f8f5666691", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.5365853659, "max_line_length": 258, "alphanum_fraction": 0.6857968621, "include": true, "reason": "import numpy", "num_tokens": 535}
|
from matplotlib import pyplot as plt
import numpy as np
from abmarl.sim.components.agent import \
AttackingAgent, BroadcastingAgent, GridMovementAgent, \
PositionObservingAgent, LifeObservingAgent, TeamObservingAgent, AgentObservingAgent
from abmarl.sim.components.state import GridPositionState, BroadcastState, LifeState
from abmarl.sim.components.actor import GridMovementActor, AttackActor, BroadcastActor
from abmarl.sim.components.observer import PositionObserver, LifeObserver, TeamObserver
from abmarl.sim.components.done import TeamDeadDone
from abmarl.sim.components.wrappers.observer_wrapper import \
PositionRestrictedObservationWrapper, TeamBasedCommunicationWrapper
from abmarl.sim import AgentBasedSimulation
from abmarl.tools.matplotlib_utils import mscatter
class AllChannelsObservingAgent(
PositionObservingAgent, LifeObservingAgent, TeamObservingAgent, AgentObservingAgent
): pass
class CommunicatingAgent(BroadcastingAgent, AllChannelsObservingAgent): pass
class BattleAgent(AttackingAgent, GridMovementAgent, AllChannelsObservingAgent): pass
class TeamBattleCommsSim(AgentBasedSimulation):
def __init__(self, **kwargs):
self.agents = kwargs['agents']
# state
self.position_state = GridPositionState(**kwargs)
self.life_state = LifeState(**kwargs)
self.broadcast_state = BroadcastState(**kwargs)
# observer
position_observer = PositionObserver(position_state=self.position_state, **kwargs)
life_observer = LifeObserver(**kwargs)
team_observer = TeamObserver(**kwargs)
partial_observer = PositionRestrictedObservationWrapper(
[position_observer, team_observer, life_observer], **kwargs
)
self.comms_observer = TeamBasedCommunicationWrapper([partial_observer], **kwargs)
# actor
self.move_actor = GridMovementActor(position_state=self.position_state, **kwargs)
self.attack_actor = AttackActor(**kwargs)
self.broadcast_actor = BroadcastActor(broadcast_state=self.broadcast_state, **kwargs)
# done
self.done = TeamDeadDone(**kwargs)
self.finalize()
def reset(self, **kwargs):
self.position_state.reset(**kwargs)
self.life_state.reset(**kwargs)
self.broadcast_state.reset(**kwargs)
def step(self, action_dict, **kwargs):
# Process attacking
for agent_id, action in action_dict.items():
attacking_agent = self.agents[agent_id]
attacked_agent = self.attack_actor.process_action(attacking_agent, action, **kwargs)
if attacked_agent is not None:
self.life_state.modify_health(attacked_agent, -attacking_agent.attack_strength)
# Process movement
for agent_id, action in action_dict.items():
self.move_actor.process_action(self.agents[agent_id], action, **kwargs)
# Process broadcasting
for agent_id, action in action_dict.items():
self.broadcast_actor.process_action(self.agents[agent_id], action, **kwargs)
def render(self, fig=None, **kwargs):
fig.clear()
ax = fig.gca()
# Draw the agents
render_condition = {agent.id: agent.is_alive for agent in self.agents.values()}
shape_dict = {agent.id: 'o' if agent.team == 1 else 's' for agent in self.agents.values()}
ax.set(xlim=(0, self.position_state.region), ylim=(0, self.position_state.region))
ax.set_xticks(np.arange(0, self.position_state.region, 1))
ax.set_yticks(np.arange(0, self.position_state.region, 1))
ax.grid()
agents_x = [
agent.position[1] + 0.5 for agent in self.agents.values() if render_condition[agent.id]
]
agents_y = [
self.position_state.region - 0.5 - agent.position[0] for agent in self.agents.values()
if render_condition[agent.id]
]
shape = [shape_dict[agent_id] for agent_id in shape_dict if render_condition[agent_id]]
mscatter(agents_x, agents_y, ax=ax, m=shape, s=200, edgecolor='black', facecolor='gray')
plt.plot()
plt.pause(1e-6)
def get_obs(self, agent_id, **kwargs):
agent = self.agents[agent_id]
return self.comms_observer.get_obs(agent, **kwargs)
def get_reward(self, agent_id, **kwargs):
pass
def get_done(self, agent_id, **kwargs):
return self.done.get_done(self.agents[agent_id], **kwargs)
def get_all_done(self, **kwargs):
return self.done.get_all_done(**kwargs)
def get_info(self, agent_id, **kwargs):
return {}
if __name__ == "__main__":
agents = {
'agent0': CommunicatingAgent(
id='agent0', initial_position=np.array([7, 7]), team=1, broadcast_range=11,
agent_view=11
),
'agent1': BattleAgent(
id='agent1', initial_position=np.array([0, 4]), team=1, agent_view=2, attack_range=1,
move_range=1, attack_strength=1
),
'agent2': BattleAgent(
id='agent2', initial_position=np.array([0, 7]), team=1, agent_view=2, attack_range=1,
move_range=1, attack_strength=1
),
'agent3': BattleAgent(
id='agent3', initial_position=np.array([0, 10]), team=1, agent_view=2, attack_range=1,
move_range=1, attack_strength=1
),
'agent4': BattleAgent(
id='agent4', initial_position=np.array([14, 4]), team=2, agent_view=2, attack_range=1,
move_range=1, attack_strength=1
),
'agent5': BattleAgent(
id='agent5', initial_position=np.array([14, 7]), team=2, agent_view=2, attack_range=1,
move_range=1, attack_strength=1
),
'agent6': BattleAgent(
id='agent6', initial_position=np.array([14, 10]), team=2, agent_view=2, attack_range=1,
move_range=1, attack_strength=1
),
}
sim = TeamBattleCommsSim(
region=15,
agents=agents,
number_of_teams=2
)
sim.reset()
fig = plt.figure()
sim.render(fig=fig)
for _ in range(50):
action_dict = {
agent.id: agent.action_space.sample() for agent in sim.agents.values()
if agent.is_alive
}
sim.step(action_dict)
sim.render(fig=fig)
print(sim.get_all_done())
|
{"hexsha": "d7824396db0dfa3d30f820e9447b1f9f429c6e76", "size": 6373, "ext": "py", "lang": "Python", "max_stars_repo_path": "abmarl/sim/components/examples/comms_team_battle.py", "max_stars_repo_name": "Leonardo767/Abmarl", "max_stars_repo_head_hexsha": "9fada5447b09174c6a70b6032b4a8d08b66c4589", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "abmarl/sim/components/examples/comms_team_battle.py", "max_issues_repo_name": "Leonardo767/Abmarl", "max_issues_repo_head_hexsha": "9fada5447b09174c6a70b6032b4a8d08b66c4589", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "abmarl/sim/components/examples/comms_team_battle.py", "max_forks_repo_name": "Leonardo767/Abmarl", "max_forks_repo_head_hexsha": "9fada5447b09174c6a70b6032b4a8d08b66c4589", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.0981595092, "max_line_length": 99, "alphanum_fraction": 0.6640514671, "include": true, "reason": "import numpy", "num_tokens": 1483}
|
[STATEMENT]
lemma the_cat_sspan_Comp_app_\<oo>\<oo>[cat_ss_cs_simps]:
assumes "g = \<oo>\<^sub>S\<^sub>S" and "f = \<oo>\<^sub>S\<^sub>S"
shows "g \<circ>\<^sub>A\<^bsub>\<leftarrow>\<bullet>\<rightarrow>\<^sub>C\<^esub> f = g" "g \<circ>\<^sub>A\<^bsub>\<leftarrow>\<bullet>\<rightarrow>\<^sub>C\<^esub> f = f"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. g \<circ>\<^sub>A\<^bsub>\<leftarrow>\<bullet>\<rightarrow>\<^sub>C\<^esub> f = g &&& g \<circ>\<^sub>A\<^bsub>\<leftarrow>\<bullet>\<rightarrow>\<^sub>C\<^esub> f = f
[PROOF STEP]
proof-
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. g \<circ>\<^sub>A\<^bsub>\<leftarrow>\<bullet>\<rightarrow>\<^sub>C\<^esub> f = g
2. g \<circ>\<^sub>A\<^bsub>\<leftarrow>\<bullet>\<rightarrow>\<^sub>C\<^esub> f = f
[PROOF STEP]
from assms
[PROOF STATE]
proof (chain)
picking this:
g = \<oo>\<^sub>S\<^sub>S
f = \<oo>\<^sub>S\<^sub>S
[PROOF STEP]
have "[g, f]\<^sub>\<circ> \<in>\<^sub>\<circ> cat_sspan_composable"
[PROOF STATE]
proof (prove)
using this:
g = \<oo>\<^sub>S\<^sub>S
f = \<oo>\<^sub>S\<^sub>S
goal (1 subgoal):
1. [g, f]\<^sub>\<circ> \<in>\<^sub>\<circ> cat_sspan_composable
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
[g, f]\<^sub>\<circ> \<in>\<^sub>\<circ> cat_sspan_composable
goal (2 subgoals):
1. g \<circ>\<^sub>A\<^bsub>\<leftarrow>\<bullet>\<rightarrow>\<^sub>C\<^esub> f = g
2. g \<circ>\<^sub>A\<^bsub>\<leftarrow>\<bullet>\<rightarrow>\<^sub>C\<^esub> f = f
[PROOF STEP]
with assms
[PROOF STATE]
proof (chain)
picking this:
g = \<oo>\<^sub>S\<^sub>S
f = \<oo>\<^sub>S\<^sub>S
[g, f]\<^sub>\<circ> \<in>\<^sub>\<circ> cat_sspan_composable
[PROOF STEP]
show "g \<circ>\<^sub>A\<^bsub>\<leftarrow>\<bullet>\<rightarrow>\<^sub>C\<^esub> f = g" "g \<circ>\<^sub>A\<^bsub>\<leftarrow>\<bullet>\<rightarrow>\<^sub>C\<^esub> f = f"
[PROOF STATE]
proof (prove)
using this:
g = \<oo>\<^sub>S\<^sub>S
f = \<oo>\<^sub>S\<^sub>S
[g, f]\<^sub>\<circ> \<in>\<^sub>\<circ> cat_sspan_composable
goal (1 subgoal):
1. g \<circ>\<^sub>A\<^bsub>\<leftarrow>\<bullet>\<rightarrow>\<^sub>C\<^esub> f = g &&& g \<circ>\<^sub>A\<^bsub>\<leftarrow>\<bullet>\<rightarrow>\<^sub>C\<^esub> f = f
[PROOF STEP]
unfolding the_cat_sspan_components(5)
[PROOF STATE]
proof (prove)
using this:
g = \<oo>\<^sub>S\<^sub>S
f = \<oo>\<^sub>S\<^sub>S
[g, f]\<^sub>\<circ> \<in>\<^sub>\<circ> cat_sspan_composable
goal (1 subgoal):
1. (\<lambda>gf\<in>\<^sub>\<circ>cat_sspan_composable. if gf = [\<aa>\<^sub>S\<^sub>S, \<gg>\<^sub>S\<^sub>S]\<^sub>\<circ> \<Rightarrow> \<gg>\<^sub>S\<^sub>S | gf = [\<bb>\<^sub>S\<^sub>S, \<ff>\<^sub>S\<^sub>S]\<^sub>\<circ> \<Rightarrow> \<ff>\<^sub>S\<^sub>S | otherwise \<Rightarrow> gf\<lparr>[]\<^sub>\<circ>\<rparr>) \<lparr>g, f\<rparr>\<^sub>\<bullet> = g &&& (\<lambda>gf\<in>\<^sub>\<circ>cat_sspan_composable. if gf = [\<aa>\<^sub>S\<^sub>S, \<gg>\<^sub>S\<^sub>S]\<^sub>\<circ> \<Rightarrow> \<gg>\<^sub>S\<^sub>S | gf = [\<bb>\<^sub>S\<^sub>S, \<ff>\<^sub>S\<^sub>S]\<^sub>\<circ> \<Rightarrow> \<ff>\<^sub>S\<^sub>S | otherwise \<Rightarrow> gf\<lparr>[]\<^sub>\<circ>\<rparr>) \<lparr>g, f\<rparr>\<^sub>\<bullet> = f
[PROOF STEP]
by (auto simp: nat_omega_simps)
[PROOF STATE]
proof (state)
this:
g \<circ>\<^sub>A\<^bsub>\<leftarrow>\<bullet>\<rightarrow>\<^sub>C\<^esub> f = g
g \<circ>\<^sub>A\<^bsub>\<leftarrow>\<bullet>\<rightarrow>\<^sub>C\<^esub> f = f
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 1406, "file": "CZH_Elementary_Categories_czh_ecategories_CZH_ECAT_SS", "length": 9}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created Nov 2020
@author: hassi
"""
# Let's start by importing all we need.
import numpy as np
from math import sqrt
print("Ch 2: Quantum gates")
print("-------------------")
# Set up the basic matrices
print("Vector representations of our qubits:")
print("-------------------------------------")
qubits = {"|0\u27E9":np.array([1,0]), "|1\u27E9":np.array([0,1]), "(|0\u27E9+|1\u27E9)/\u221a2":1/sqrt(2)*np.array([1,1])}
for q in qubits:
print(q, "\n", qubits[q].round(3))
input("Press return to continue...\n")
print("Matrix representations of our quantum gates:")
print("--------------------------------------------")
gates ={"id":np.array([[1, 0], [0, 1]]),"x":np.array([[0, 1], [1, 0]]), "h":1/sqrt(2)*np.array([[1, 1], [1, -1]])}
for g in gates:
print(g, "\n", gates[g].round(3))
# Now, let's apply the defined gates on our qubits.
# Matrix manipulations
input("Press return to continue...\n")
print("Gate manipulations of our qubits:")
print("---------------------------------")
for g in gates:
print("Gate:",g)
for q in qubits:
print(q,"\n",qubits[q].round(3),"->", np.dot(gates[g],qubits[q]).round(3))
print("\n")
input("Press return to continue...\n")
print("Vector representations of our two qubits:")
print("-----------------------------------------")
twoqubits = {"|00\u27E9":np.array([1,0,0,0]), "|01\u27E9":np.array([0,1,0,0]),"|10\u27E9":np.array([0,0,1,0]),"|11\u27E9":np.array([0,0,0,1]),"|PH\u27E9":np.array([0.5,-0.5,0.5,-0.5])}
for b in twoqubits:
print(b, "\n", twoqubits[b])
input("Press return to continue...\n")
print("Matrix representations of our quantum gates:")
print("--------------------------------------------")
twogates ={"cx":np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]]), "swap":np.array([[1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]])}
for g in twogates:
print(g, "\n", twogates[g].round())
input("Press return to continue...\n")
# Matrix manipulations
print("Gate manipulations of our qubits:")
print("---------------------------------")
for g in twogates:
print("Gate:",g)
for b in twoqubits:
print(b,"\n",twoqubits[b],"->", np.dot(twogates[g],twoqubits[b]))
print("\n")
|
{"hexsha": "1edf7eb14fd6a5eab2373893232e2d83985cbc43", "size": 2270, "ext": "py", "lang": "Python", "max_stars_repo_path": "Chapter02/ch2_r3_qubit_gates.py", "max_stars_repo_name": "PacktPublishing/Quantum-Computing-in-Practice-with-IBM-Q-Experience", "max_stars_repo_head_hexsha": "91423f8ff1d039b5eb3fd18fc64bbb5967fdd5c1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 24, "max_stars_repo_stars_event_min_datetime": "2020-11-21T20:33:51.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-26T06:41:27.000Z", "max_issues_repo_path": "Chapter02/ch2_r3_qubit_gates.py", "max_issues_repo_name": "videomover/Quantum-Computing-in-Practice-with-Qiskit-and-IBM-Quantum-Experience", "max_issues_repo_head_hexsha": "938123d051c5bab72110011b3a05e515bb69ca09", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-02-07T14:32:12.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-25T07:23:46.000Z", "max_forks_repo_path": "Chapter02/ch2_r3_qubit_gates.py", "max_forks_repo_name": "videomover/Quantum-Computing-in-Practice-with-Qiskit-and-IBM-Quantum-Experience", "max_forks_repo_head_hexsha": "938123d051c5bab72110011b3a05e515bb69ca09", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 16, "max_forks_repo_forks_event_min_datetime": "2020-11-03T07:49:11.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-26T06:41:29.000Z", "avg_line_length": 28.7341772152, "max_line_length": 184, "alphanum_fraction": 0.5317180617, "include": true, "reason": "import numpy", "num_tokens": 758}
|
module mod_rmbv_dia
! **********************************************************************
! Author : C. Voemel
! Date of last modification : 7.7.00
! Description : PERFORMS MV MULT. WITH MATRIX IN 'DIA'-STORAGE
! rmbv = Right Multiplication By Vector: y=Ax
! **********************************************************************
use representation_of_data
use properties
implicit none
interface rmbv_dia
module procedure irmbv_dia
module procedure srmbv_dia
module procedure drmbv_dia
module procedure crmbv_dia
module procedure zrmbv_dia
end interface
contains
! **********************************************************************
! **********************************************************************
subroutine irmbv_dia (mat,x,y,ierr)
implicit none
type(ispmat ), pointer :: mat
integer , dimension(:), intent(in) :: x
integer , dimension(:), intent(out) :: y
integer, intent(out) :: ierr
integer :: m,n,i,j
integer :: lda,ndiag,start_a,end_a,start_x,start_y
character :: diag,type,part
ierr = -1
m = size(y)
n = size(x)
if ((mat%FIDA.ne.'DIA').or.(mat%M.ne.m).or.(mat%K.ne.n)) then
ierr = blas_error_param
return
end if
call get_infoa(mat%INFOA,'d',lda,ierr)
if (ierr.ne.0) then
ierr = blas_error_param
return
end if
call get_infoa(mat%INFOA,'e',ndiag,ierr)
if (ierr.ne.0) then
ierr = blas_error_param
return
end if
call get_descra(mat%DESCRA,'d',diag,ierr)
if (ierr.ne.0) then
ierr = blas_error_param
return
end if
call get_descra(mat%DESCRA,'t',type,ierr)
if (ierr.ne.0) then
ierr = blas_error_param
return
end if
call get_descra(mat%DESCRA,'a',part,ierr)
if (ierr.ne.0) then
ierr = blas_error_param
return
end if
y = 0
if (diag.eq.'U') then !process unstored diagonal
if (m.eq.n) then
y = x
else
ierr = blas_error_param
return
end if
end if
if ((type.eq.'S').and.(.not.(part.eq.'B')).and.(m.eq.n)) then
if (part.eq.'U') then
do i=1,ndiag
start_x = max(0,mat%IA1(i))
start_y = max(0,-mat%IA1(i))
if (mat%IA1(i).gt.0) then
start_a = (i-1)*lda
end_a = i*lda -mat%IA1(i)+mat%K-lda
j = 1
do while((start_a + j).le.end_a)
y(start_y +j) = y(start_y +j) &
+ mat%A(start_a+j) * x(start_x +j)
y(start_x +j) = y(start_x +j) &
+ mat%A(start_a+j) * x(start_y +j)
j = j+1
end do
else if (mat%IA1(i).eq.0) then
start_a = (i-1)*lda
end_a = i*lda
j = 1
do while((start_a + j).le.end_a)
y(start_y+j) = y(start_y+j) &
+ mat%A(start_a+j) * x(start_x+j)
j = j+1
end do
else
cycle
end if
end do
end if
ierr = 0
else if((type.eq.'H').and.(.not.(part.eq.'B')).and.(m.eq.n)) &
then
if (part.eq.'U') then
do i=1,ndiag
start_x = max(0,mat%IA1(i))
start_y = max(0,-mat%IA1(i))
if (mat%IA1(i).gt.0) then
start_a = (i-1)*lda
end_a = i*lda -mat%IA1(i)+mat%K-lda
j = 1
do while((start_a + j).le.end_a)
y(start_y +j) = y(start_y +j) &
+ mat%A(start_a+j) * x(start_x +j)
y(start_x +j) = y(start_x +j) &
+ (mat%A(start_a+j)) * x(start_y +j)
j = j+1
end do
else if (mat%IA1(i).eq.0) then
start_a = (i-1)*lda
end_a = i*lda
j = 1
do while((start_a + j).le.end_a)
y(start_y+j) = y(start_y+j) &
+ mat%A(start_a+j) * x(start_x+j)
j = j+1
end do
else
cycle
end if
end do
else !(part.eq.'L')
do i=1,ndiag
start_x = max(0,mat%IA1(i))
start_y = max(0,-mat%IA1(i))
if (mat%IA1(i).lt.0) then
start_a = (i-1)*lda -mat%IA1(i)-mat%M+lda
end_a = i*lda
j = 1
do while((start_a + j).le.end_a)
y(start_y+j) = y(start_y+j) &
+ mat%A(start_a+j) * x(start_x+j)
y(start_x +j) = y(start_x+j) &
+ (mat%A(start_a+j)) * x(start_y+j)
j = j+1
end do
else if (mat%IA1(i).eq.0) then
start_a = (i-1)*lda
end_a = i*lda
j = 1
do while((start_a + j).le.end_a)
y(start_y+j) = y(start_y+j) &
+ mat%A(start_a+j) * x(start_x+j)
j = j+1
end do
else
cycle
end if
end do
end if
ierr = 0
else !no symmetry
do i=1,ndiag
start_x = max(0,mat%IA1(i))
start_y = max(0,-mat%IA1(i))
if (mat%IA1(i).gt.mat%K-lda) then
start_a = (i-1)*lda
end_a = i*lda -mat%IA1(i)+mat%K-lda
else if (mat%IA1(i).lt.-mat%M+lda) then
start_a = (i-1)*lda -mat%IA1(i)-mat%M+lda
end_a = i*lda
else
start_a = (i-1)*lda
end_a = i*lda
end if
j = 1
do while((start_a + j).le.end_a)
y(start_y+j) = y(start_y+j) &
+ mat%A(start_a+j) * x(start_x+j)
j = j+1
end do
end do
ierr = 0
end if
end subroutine irmbv_dia
! **********************************************************************
! **********************************************************************
subroutine srmbv_dia (mat,x,y,ierr)
implicit none
type(sspmat ), pointer :: mat
real(KIND=sp) , dimension(:), intent(in) :: x
real(KIND=sp) , dimension(:), intent(out) :: y
integer, intent(out) :: ierr
integer :: m,n,i,j
integer :: lda,ndiag,start_a,end_a,start_x,start_y
character :: diag,type,part
ierr = -1
m = size(y)
n = size(x)
if ((mat%FIDA.ne.'DIA').or.(mat%M.ne.m).or.(mat%K.ne.n)) then
ierr = blas_error_param
return
end if
call get_infoa(mat%INFOA,'d',lda,ierr)
if (ierr.ne.0) then
ierr = blas_error_param
return
end if
call get_infoa(mat%INFOA,'e',ndiag,ierr)
if (ierr.ne.0) then
ierr = blas_error_param
return
end if
call get_descra(mat%DESCRA,'d',diag,ierr)
if (ierr.ne.0) then
ierr = blas_error_param
return
end if
call get_descra(mat%DESCRA,'t',type,ierr)
if (ierr.ne.0) then
ierr = blas_error_param
return
end if
call get_descra(mat%DESCRA,'a',part,ierr)
if (ierr.ne.0) then
ierr = blas_error_param
return
end if
y = 0.0e0
if (diag.eq.'U') then !process unstored diagonal
if (m.eq.n) then
y = x
else
ierr = blas_error_param
return
end if
end if
if ((type.eq.'S').and.(.not.(part.eq.'B')).and.(m.eq.n)) then
if (part.eq.'U') then
do i=1,ndiag
start_x = max(0,mat%IA1(i))
start_y = max(0,-mat%IA1(i))
if (mat%IA1(i).gt.0) then
start_a = (i-1)*lda
end_a = i*lda -mat%IA1(i)+mat%K-lda
j = 1
do while((start_a + j).le.end_a)
y(start_y +j) = y(start_y +j) &
+ mat%A(start_a+j) * x(start_x +j)
y(start_x +j) = y(start_x +j) &
+ mat%A(start_a+j) * x(start_y +j)
j = j+1
end do
else if (mat%IA1(i).eq.0) then
start_a = (i-1)*lda
end_a = i*lda
j = 1
do while((start_a + j).le.end_a)
y(start_y+j) = y(start_y+j) &
+ mat%A(start_a+j) * x(start_x+j)
j = j+1
end do
else
cycle
end if
end do
end if
ierr = 0
else if((type.eq.'H').and.(.not.(part.eq.'B')).and.(m.eq.n)) &
then
if (part.eq.'U') then
do i=1,ndiag
start_x = max(0,mat%IA1(i))
start_y = max(0,-mat%IA1(i))
if (mat%IA1(i).gt.0) then
start_a = (i-1)*lda
end_a = i*lda -mat%IA1(i)+mat%K-lda
j = 1
do while((start_a + j).le.end_a)
y(start_y +j) = y(start_y +j) &
+ mat%A(start_a+j) * x(start_x +j)
y(start_x +j) = y(start_x +j) &
+ (mat%A(start_a+j)) * x(start_y +j)
j = j+1
end do
else if (mat%IA1(i).eq.0) then
start_a = (i-1)*lda
end_a = i*lda
j = 1
do while((start_a + j).le.end_a)
y(start_y+j) = y(start_y+j) &
+ mat%A(start_a+j) * x(start_x+j)
j = j+1
end do
else
cycle
end if
end do
else !(part.eq.'L')
do i=1,ndiag
start_x = max(0,mat%IA1(i))
start_y = max(0,-mat%IA1(i))
if (mat%IA1(i).lt.0) then
start_a = (i-1)*lda -mat%IA1(i)-mat%M+lda
end_a = i*lda
j = 1
do while((start_a + j).le.end_a)
y(start_y+j) = y(start_y+j) &
+ mat%A(start_a+j) * x(start_x+j)
y(start_x +j) = y(start_x+j) &
+ (mat%A(start_a+j)) * x(start_y+j)
j = j+1
end do
else if (mat%IA1(i).eq.0) then
start_a = (i-1)*lda
end_a = i*lda
j = 1
do while((start_a + j).le.end_a)
y(start_y+j) = y(start_y+j) &
+ mat%A(start_a+j) * x(start_x+j)
j = j+1
end do
else
cycle
end if
end do
end if
ierr = 0
else !no symmetry
do i=1,ndiag
start_x = max(0,mat%IA1(i))
start_y = max(0,-mat%IA1(i))
if (mat%IA1(i).gt.mat%K-lda) then
start_a = (i-1)*lda
end_a = i*lda -mat%IA1(i)+mat%K-lda
else if (mat%IA1(i).lt.-mat%M+lda) then
start_a = (i-1)*lda -mat%IA1(i)-mat%M+lda
end_a = i*lda
else
start_a = (i-1)*lda
end_a = i*lda
end if
j = 1
do while((start_a + j).le.end_a)
y(start_y+j) = y(start_y+j) &
+ mat%A(start_a+j) * x(start_x+j)
j = j+1
end do
end do
ierr = 0
end if
end subroutine srmbv_dia
! **********************************************************************
! **********************************************************************
subroutine drmbv_dia (mat,x,y,ierr)
implicit none
type(dspmat ), pointer :: mat
real(KIND=dp) , dimension(:), intent(in) :: x
real(KIND=dp) , dimension(:), intent(out) :: y
integer, intent(out) :: ierr
integer :: m,n,i,j
integer :: lda,ndiag,start_a,end_a,start_x,start_y
character :: diag,type,part
ierr = -1
m = size(y)
n = size(x)
if ((mat%FIDA.ne.'DIA').or.(mat%M.ne.m).or.(mat%K.ne.n)) then
ierr = blas_error_param
return
end if
call get_infoa(mat%INFOA,'d',lda,ierr)
if (ierr.ne.0) then
ierr = blas_error_param
return
end if
call get_infoa(mat%INFOA,'e',ndiag,ierr)
if (ierr.ne.0) then
ierr = blas_error_param
return
end if
call get_descra(mat%DESCRA,'d',diag,ierr)
if (ierr.ne.0) then
ierr = blas_error_param
return
end if
call get_descra(mat%DESCRA,'t',type,ierr)
if (ierr.ne.0) then
ierr = blas_error_param
return
end if
call get_descra(mat%DESCRA,'a',part,ierr)
if (ierr.ne.0) then
ierr = blas_error_param
return
end if
y = 0.0d0
if (diag.eq.'U') then !process unstored diagonal
if (m.eq.n) then
y = x
else
ierr = blas_error_param
return
end if
end if
if ((type.eq.'S').and.(.not.(part.eq.'B')).and.(m.eq.n)) then
if (part.eq.'U') then
do i=1,ndiag
start_x = max(0,mat%IA1(i))
start_y = max(0,-mat%IA1(i))
if (mat%IA1(i).gt.0) then
start_a = (i-1)*lda
end_a = i*lda -mat%IA1(i)+mat%K-lda
j = 1
do while((start_a + j).le.end_a)
y(start_y +j) = y(start_y +j) &
+ mat%A(start_a+j) * x(start_x +j)
y(start_x +j) = y(start_x +j) &
+ mat%A(start_a+j) * x(start_y +j)
j = j+1
end do
else if (mat%IA1(i).eq.0) then
start_a = (i-1)*lda
end_a = i*lda
j = 1
do while((start_a + j).le.end_a)
y(start_y+j) = y(start_y+j) &
+ mat%A(start_a+j) * x(start_x+j)
j = j+1
end do
else
cycle
end if
end do
end if
ierr = 0
else if((type.eq.'H').and.(.not.(part.eq.'B')).and.(m.eq.n)) &
then
if (part.eq.'U') then
do i=1,ndiag
start_x = max(0,mat%IA1(i))
start_y = max(0,-mat%IA1(i))
if (mat%IA1(i).gt.0) then
start_a = (i-1)*lda
end_a = i*lda -mat%IA1(i)+mat%K-lda
j = 1
do while((start_a + j).le.end_a)
y(start_y +j) = y(start_y +j) &
+ mat%A(start_a+j) * x(start_x +j)
y(start_x +j) = y(start_x +j) &
+ (mat%A(start_a+j)) * x(start_y +j)
j = j+1
end do
else if (mat%IA1(i).eq.0) then
start_a = (i-1)*lda
end_a = i*lda
j = 1
do while((start_a + j).le.end_a)
y(start_y+j) = y(start_y+j) &
+ mat%A(start_a+j) * x(start_x+j)
j = j+1
end do
else
cycle
end if
end do
else !(part.eq.'L')
do i=1,ndiag
start_x = max(0,mat%IA1(i))
start_y = max(0,-mat%IA1(i))
if (mat%IA1(i).lt.0) then
start_a = (i-1)*lda -mat%IA1(i)-mat%M+lda
end_a = i*lda
j = 1
do while((start_a + j).le.end_a)
y(start_y+j) = y(start_y+j) &
+ mat%A(start_a+j) * x(start_x+j)
y(start_x +j) = y(start_x+j) &
+ (mat%A(start_a+j)) * x(start_y+j)
j = j+1
end do
else if (mat%IA1(i).eq.0) then
start_a = (i-1)*lda
end_a = i*lda
j = 1
do while((start_a + j).le.end_a)
y(start_y+j) = y(start_y+j) &
+ mat%A(start_a+j) * x(start_x+j)
j = j+1
end do
else
cycle
end if
end do
end if
ierr = 0
else !no symmetry
do i=1,ndiag
start_x = max(0,mat%IA1(i))
start_y = max(0,-mat%IA1(i))
if (mat%IA1(i).gt.mat%K-lda) then
start_a = (i-1)*lda
end_a = i*lda -mat%IA1(i)+mat%K-lda
else if (mat%IA1(i).lt.-mat%M+lda) then
start_a = (i-1)*lda -mat%IA1(i)-mat%M+lda
end_a = i*lda
else
start_a = (i-1)*lda
end_a = i*lda
end if
j = 1
do while((start_a + j).le.end_a)
y(start_y+j) = y(start_y+j) &
+ mat%A(start_a+j) * x(start_x+j)
j = j+1
end do
end do
ierr = 0
end if
end subroutine drmbv_dia
! **********************************************************************
! **********************************************************************
subroutine crmbv_dia (mat,x,y,ierr)
implicit none
type(cspmat ), pointer :: mat
complex(KIND=sp) , dimension(:), intent(in) :: x
complex(KIND=sp) , dimension(:), intent(out) :: y
integer, intent(out) :: ierr
integer :: m,n,i,j
integer :: lda,ndiag,start_a,end_a,start_x,start_y
character :: diag,type,part
ierr = -1
m = size(y)
n = size(x)
if ((mat%FIDA.ne.'DIA').or.(mat%M.ne.m).or.(mat%K.ne.n)) then
ierr = blas_error_param
return
end if
call get_infoa(mat%INFOA,'d',lda,ierr)
if (ierr.ne.0) then
ierr = blas_error_param
return
end if
call get_infoa(mat%INFOA,'e',ndiag,ierr)
if (ierr.ne.0) then
ierr = blas_error_param
return
end if
call get_descra(mat%DESCRA,'d',diag,ierr)
if (ierr.ne.0) then
ierr = blas_error_param
return
end if
call get_descra(mat%DESCRA,'t',type,ierr)
if (ierr.ne.0) then
ierr = blas_error_param
return
end if
call get_descra(mat%DESCRA,'a',part,ierr)
if (ierr.ne.0) then
ierr = blas_error_param
return
end if
y = (0.0e0, 0.0e0)
if (diag.eq.'U') then !process unstored diagonal
if (m.eq.n) then
y = x
else
ierr = blas_error_param
return
end if
end if
if ((type.eq.'S').and.(.not.(part.eq.'B')).and.(m.eq.n)) then
if (part.eq.'U') then
do i=1,ndiag
start_x = max(0,mat%IA1(i))
start_y = max(0,-mat%IA1(i))
if (mat%IA1(i).gt.0) then
start_a = (i-1)*lda
end_a = i*lda -mat%IA1(i)+mat%K-lda
j = 1
do while((start_a + j).le.end_a)
y(start_y +j) = y(start_y +j) &
+ mat%A(start_a+j) * x(start_x +j)
y(start_x +j) = y(start_x +j) &
+ mat%A(start_a+j) * x(start_y +j)
j = j+1
end do
else if (mat%IA1(i).eq.0) then
start_a = (i-1)*lda
end_a = i*lda
j = 1
do while((start_a + j).le.end_a)
y(start_y+j) = y(start_y+j) &
+ mat%A(start_a+j) * x(start_x+j)
j = j+1
end do
else
cycle
end if
end do
end if
ierr = 0
else if((type.eq.'H').and.(.not.(part.eq.'B')).and.(m.eq.n)) &
then
if (part.eq.'U') then
do i=1,ndiag
start_x = max(0,mat%IA1(i))
start_y = max(0,-mat%IA1(i))
if (mat%IA1(i).gt.0) then
start_a = (i-1)*lda
end_a = i*lda -mat%IA1(i)+mat%K-lda
j = 1
do while((start_a + j).le.end_a)
y(start_y +j) = y(start_y +j) &
+ mat%A(start_a+j) * x(start_x +j)
y(start_x +j) = y(start_x +j) &
+ conjg (mat%A(start_a+j)) * x(start_y +j)
j = j+1
end do
else if (mat%IA1(i).eq.0) then
start_a = (i-1)*lda
end_a = i*lda
j = 1
do while((start_a + j).le.end_a)
y(start_y+j) = y(start_y+j) &
+ mat%A(start_a+j) * x(start_x+j)
j = j+1
end do
else
cycle
end if
end do
else !(part.eq.'L')
do i=1,ndiag
start_x = max(0,mat%IA1(i))
start_y = max(0,-mat%IA1(i))
if (mat%IA1(i).lt.0) then
start_a = (i-1)*lda -mat%IA1(i)-mat%M+lda
end_a = i*lda
j = 1
do while((start_a + j).le.end_a)
y(start_y+j) = y(start_y+j) &
+ mat%A(start_a+j) * x(start_x+j)
y(start_x +j) = y(start_x+j) &
+ conjg (mat%A(start_a+j)) * x(start_y+j)
j = j+1
end do
else if (mat%IA1(i).eq.0) then
start_a = (i-1)*lda
end_a = i*lda
j = 1
do while((start_a + j).le.end_a)
y(start_y+j) = y(start_y+j) &
+ mat%A(start_a+j) * x(start_x+j)
j = j+1
end do
else
cycle
end if
end do
end if
ierr = 0
else !no symmetry
do i=1,ndiag
start_x = max(0,mat%IA1(i))
start_y = max(0,-mat%IA1(i))
if (mat%IA1(i).gt.mat%K-lda) then
start_a = (i-1)*lda
end_a = i*lda -mat%IA1(i)+mat%K-lda
else if (mat%IA1(i).lt.-mat%M+lda) then
start_a = (i-1)*lda -mat%IA1(i)-mat%M+lda
end_a = i*lda
else
start_a = (i-1)*lda
end_a = i*lda
end if
j = 1
do while((start_a + j).le.end_a)
y(start_y+j) = y(start_y+j) &
+ mat%A(start_a+j) * x(start_x+j)
j = j+1
end do
end do
ierr = 0
end if
end subroutine crmbv_dia
! **********************************************************************
! **********************************************************************
subroutine zrmbv_dia (mat,x,y,ierr)
implicit none
type(zspmat ), pointer :: mat
complex(KIND=dp) , dimension(:), intent(in) :: x
complex(KIND=dp) , dimension(:), intent(out) :: y
integer, intent(out) :: ierr
integer :: m,n,i,j
integer :: lda,ndiag,start_a,end_a,start_x,start_y
character :: diag,type,part
ierr = -1
m = size(y)
n = size(x)
if ((mat%FIDA.ne.'DIA').or.(mat%M.ne.m).or.(mat%K.ne.n)) then
ierr = blas_error_param
return
end if
call get_infoa(mat%INFOA,'d',lda,ierr)
if (ierr.ne.0) then
ierr = blas_error_param
return
end if
call get_infoa(mat%INFOA,'e',ndiag,ierr)
if (ierr.ne.0) then
ierr = blas_error_param
return
end if
call get_descra(mat%DESCRA,'d',diag,ierr)
if (ierr.ne.0) then
ierr = blas_error_param
return
end if
call get_descra(mat%DESCRA,'t',type,ierr)
if (ierr.ne.0) then
ierr = blas_error_param
return
end if
call get_descra(mat%DESCRA,'a',part,ierr)
if (ierr.ne.0) then
ierr = blas_error_param
return
end if
y = (0.0d0, 0.0d0)
if (diag.eq.'U') then !process unstored diagonal
if (m.eq.n) then
y = x
else
ierr = blas_error_param
return
end if
end if
if ((type.eq.'S').and.(.not.(part.eq.'B')).and.(m.eq.n)) then
if (part.eq.'U') then
do i=1,ndiag
start_x = max(0,mat%IA1(i))
start_y = max(0,-mat%IA1(i))
if (mat%IA1(i).gt.0) then
start_a = (i-1)*lda
end_a = i*lda -mat%IA1(i)+mat%K-lda
j = 1
do while((start_a + j).le.end_a)
y(start_y +j) = y(start_y +j) &
+ mat%A(start_a+j) * x(start_x +j)
y(start_x +j) = y(start_x +j) &
+ mat%A(start_a+j) * x(start_y +j)
j = j+1
end do
else if (mat%IA1(i).eq.0) then
start_a = (i-1)*lda
end_a = i*lda
j = 1
do while((start_a + j).le.end_a)
y(start_y+j) = y(start_y+j) &
+ mat%A(start_a+j) * x(start_x+j)
j = j+1
end do
else
cycle
end if
end do
end if
ierr = 0
else if((type.eq.'H').and.(.not.(part.eq.'B')).and.(m.eq.n)) &
then
if (part.eq.'U') then
do i=1,ndiag
start_x = max(0,mat%IA1(i))
start_y = max(0,-mat%IA1(i))
if (mat%IA1(i).gt.0) then
start_a = (i-1)*lda
end_a = i*lda -mat%IA1(i)+mat%K-lda
j = 1
do while((start_a + j).le.end_a)
y(start_y +j) = y(start_y +j) &
+ mat%A(start_a+j) * x(start_x +j)
y(start_x +j) = y(start_x +j) &
+ conjg (mat%A(start_a+j)) * x(start_y +j)
j = j+1
end do
else if (mat%IA1(i).eq.0) then
start_a = (i-1)*lda
end_a = i*lda
j = 1
do while((start_a + j).le.end_a)
y(start_y+j) = y(start_y+j) &
+ mat%A(start_a+j) * x(start_x+j)
j = j+1
end do
else
cycle
end if
end do
else !(part.eq.'L')
do i=1,ndiag
start_x = max(0,mat%IA1(i))
start_y = max(0,-mat%IA1(i))
if (mat%IA1(i).lt.0) then
start_a = (i-1)*lda -mat%IA1(i)-mat%M+lda
end_a = i*lda
j = 1
do while((start_a + j).le.end_a)
y(start_y+j) = y(start_y+j) &
+ mat%A(start_a+j) * x(start_x+j)
y(start_x +j) = y(start_x+j) &
+ conjg (mat%A(start_a+j)) * x(start_y+j)
j = j+1
end do
else if (mat%IA1(i).eq.0) then
start_a = (i-1)*lda
end_a = i*lda
j = 1
do while((start_a + j).le.end_a)
y(start_y+j) = y(start_y+j) &
+ mat%A(start_a+j) * x(start_x+j)
j = j+1
end do
else
cycle
end if
end do
end if
ierr = 0
else !no symmetry
do i=1,ndiag
start_x = max(0,mat%IA1(i))
start_y = max(0,-mat%IA1(i))
if (mat%IA1(i).gt.mat%K-lda) then
start_a = (i-1)*lda
end_a = i*lda -mat%IA1(i)+mat%K-lda
else if (mat%IA1(i).lt.-mat%M+lda) then
start_a = (i-1)*lda -mat%IA1(i)-mat%M+lda
end_a = i*lda
else
start_a = (i-1)*lda
end_a = i*lda
end if
j = 1
do while((start_a + j).le.end_a)
y(start_y+j) = y(start_y+j) &
+ mat%A(start_a+j) * x(start_x+j)
j = j+1
end do
end do
ierr = 0
end if
end subroutine zrmbv_dia
! **********************************************************************
! **********************************************************************
end module mod_rmbv_dia
|
{"hexsha": "96fe9d5dddb31cb8912642c0cd7b94b4809e72d7", "size": 29998, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "ext/SOFTWARE/rmbv_dia.f90", "max_stars_repo_name": "ittnas/qsc", "max_stars_repo_head_hexsha": "5e23c7f0c2985d79b966f8301a17c2679a5f2c8d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-09-23T09:41:11.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-23T09:41:11.000Z", "max_issues_repo_path": "ext/SOFTWARE/rmbv_dia.f90", "max_issues_repo_name": "ittnas/qsc", "max_issues_repo_head_hexsha": "5e23c7f0c2985d79b966f8301a17c2679a5f2c8d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ext/SOFTWARE/rmbv_dia.f90", "max_forks_repo_name": "ittnas/qsc", "max_forks_repo_head_hexsha": "5e23c7f0c2985d79b966f8301a17c2679a5f2c8d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.0035005834, "max_line_length": 72, "alphanum_fraction": 0.3825921728, "num_tokens": 8304}
|
from __future__ import print_function, division
import numpy as np
import Nio
fn = "MSG3-SEVI-MSG15-0100-NA-20130521001244.164000000Z-1074164.h5"
opt = Nio.options()
opt.FileStructure = 'advanced'
f = Nio.open_file(fn, "r", options=opt)
#f = Nio.open_file(fn)
print(list(f.variables.keys()))
#print f.groups
#n = 0
#for key in f.groups.keys():
# n += 1
# print "groun %d: <%s>" %(n, key)
#g = f.groups['/U_MARF/MSG/Level1_5/DATA/Channel_07']
g = f.groups['U-MARF/MSG/Level1.5/DATA/Channel 07']
print(g)
palette = g.variables['Palette']
print(palette)
print("\nLineSideInfo_DESCR:")
lsid = g.variables['LineSideInfo_DESCR'][:]
print(lsid[:])
dims = lsid.shape
for n in range(dims[0]):
name = str(lsid[:][n][0])
value = str(lsid[:][n][1])
print("name %3d: %40s, value %3d: %20s" %(n, name, n, value))
print("\nPacketHeader_DESCR:")
phd = g.variables['PacketHeader_DESCR']
print(phd)
dims = phd.shape
for n in range(dims[0]):
name = str(phd[:][n][0])
value = str(phd[:][n][1])
print("name %3d: %25s, value %3d: %40s" %(n, name, n, value))
print("\nPacketHeader_DESCR:")
pha = g.variables['PacketHeader_ARRAY']
print(pha)
dims = pha.shape
for n in range(0, dims[0], 200):
name = str(pha[:][n][0])
value = str(pha[:][n][1])
print("name %5d: %25s, value %5d: %40s" %(n, name, n, value))
lsia = g.variables['LineSideInfo_ARRAY']
print(lsia)
dims = lsia.shape
for n in range(0, dims[0], 200):
field_0 = str(lsia[:][n][0])
field_1 = str(lsia[:][n][1])
field_2 = str(lsia[:][n][2])
print("No. %5d: field_0: %20s, field_1: %20s, field_2: %20s" %(n, field_0, field_1, field_2))
|
{"hexsha": "10b657712a5eb9a7ce1bfaaeff859e273f6970bc", "size": 1637, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/read_h5_compound.py", "max_stars_repo_name": "briandobbins/pynio", "max_stars_repo_head_hexsha": "1dd5fc0fc133f2b8d329ae68929bd3c6c1c5fa7c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/read_h5_compound.py", "max_issues_repo_name": "briandobbins/pynio", "max_issues_repo_head_hexsha": "1dd5fc0fc133f2b8d329ae68929bd3c6c1c5fa7c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/read_h5_compound.py", "max_forks_repo_name": "briandobbins/pynio", "max_forks_repo_head_hexsha": "1dd5fc0fc133f2b8d329ae68929bd3c6c1c5fa7c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.1846153846, "max_line_length": 97, "alphanum_fraction": 0.6316432498, "include": true, "reason": "import numpy", "num_tokens": 607}
|
% Options for packages loaded elsewhere
\PassOptionsToPackage{unicode}{hyperref}
\PassOptionsToPackage{hyphens}{url}
%
\documentclass[
]{article}
\usepackage{lmodern}
\usepackage{amssymb,amsmath}
\usepackage{ifxetex,ifluatex}
\ifnum 0\ifxetex 1\fi\ifluatex 1\fi=0 % if pdftex
\usepackage[T1]{fontenc}
\usepackage[utf8]{inputenc}
\usepackage{textcomp} % provide euro and other symbols
\else % if luatex or xetex
\usepackage{unicode-math}
\defaultfontfeatures{Scale=MatchLowercase}
\defaultfontfeatures[\rmfamily]{Ligatures=TeX,Scale=1}
\fi
% Use upquote if available, for straight quotes in verbatim environments
\IfFileExists{upquote.sty}{\usepackage{upquote}}{}
\IfFileExists{microtype.sty}{% use microtype if available
\usepackage[]{microtype}
\UseMicrotypeSet[protrusion]{basicmath} % disable protrusion for tt fonts
}{}
\makeatletter
\@ifundefined{KOMAClassName}{% if non-KOMA class
\IfFileExists{parskip.sty}{%
\usepackage{parskip}
}{% else
\setlength{\parindent}{0pt}
\setlength{\parskip}{6pt plus 2pt minus 1pt}}
}{% if KOMA class
\KOMAoptions{parskip=half}}
\makeatother
\usepackage{xcolor}
\IfFileExists{xurl.sty}{\usepackage{xurl}}{} % add URL line breaks if available
\IfFileExists{bookmark.sty}{\usepackage{bookmark}}{\usepackage{hyperref}}
\hypersetup{
hidelinks,
pdfcreator={LaTeX via pandoc}}
\urlstyle{same} % disable monospaced font for URLs
\setlength{\emergencystretch}{3em} % prevent overfull lines
\providecommand{\tightlist}{%
\setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}}
\setcounter{secnumdepth}{-\maxdimen} % remove section numbering
\ifluatex
\usepackage{selnolig} % disable illegal ligatures
\fi
\author{}
\date{}
\begin{document}
\hypertarget{honors-seminar-in-machine-learning}{%
\section{Honors Seminar in Machine
Learning}\label{honors-seminar-in-machine-learning}}
Math 3094, Spring Semester 2021 University of Connecticut
\hypertarget{instructors}{%
\subsubsection{Instructors}\label{instructors}}
\begin{itemize}
\tightlist
\item
\href{mailto:khlee@math.uconn.edu}{Kyu-Hwan Lee}
\item
\href{mailto:jeremy.teitelbaum@uconn.edu}{Jeremy Teitelbaum}
\end{itemize}
\hypertarget{introduction}{%
\subsubsection{Introduction}\label{introduction}}
The interdisciplinary field known as Machine Learning or Data Science
draws together techniques from computer science, mathematics, and
statistics to extract meaning from data. In this course, we will discuss
some of the essential mathematical ideas in this field.
While our focus will be on the role of Calculus, Probability, and Linear
Algebra, we will introduce computational techniques using Python and the
Jupyter notebook environment, and some ideas from statistics, in order
to closely link theory and practice.
\hypertarget{schedule}{%
\subsubsection{Schedule}\label{schedule}}
The course will meet synchronously online on Tuesdays and Thursdays from
11:00 to 12:15 EST.
\hypertarget{topics}{%
\subsubsection{Topics}\label{topics}}
Topics will include Linear Regression, Gradient Descent, Logistic
Regression, Principal Component Analysis, and others as time permits.
The course will include both (online) lectures and lab sessions.
\hypertarget{assessment}{%
\subsubsection{Assessment}\label{assessment}}
Students will be expected to complete two projects, one due at midterm
time and one by the final. The final project may be a
continuation/extension of the midterm project. A typical project will be
an example data analysis written up using the Jupyter notebook. Projects
may be done individually or in groups of up to three people.
\hypertarget{resources}{%
\subsubsection{Resources}\label{resources}}
We will use the \href{http://campuswire.com}{Campus Wire} platform for
online help and discussions. Students enrolled in the course should
receive an electronic invite to the forum. Contact one of the professors
if you need access.
We will rely on the Python programming language, the Anaconda open
source data science platform, and the Jupyter notebook environment for
our computer work. All of this software can be obtained for Linux, Mac,
or Windows from the Anaconda website:
\href{http://www.anaconda.com}{www.anaconda.com}.
A very brief guide to installing the software is
\href{installing.md}{available here}.
There is no official textbook for the course. We will be providing notes
as we progress. The following texts may be useful as references.
\begin{itemize}
\item
James, Witten, Hastie, Tibshirani. An Introduction to Statistical
Learning (with Applications in R). This is an introductory text on
machine learning with a more statistical emphasis than our course, and
with computer examples in R instead of Python. It is an excellent and
informative work, and it is \href{https://statlearning.com/}{available
for free} from the book home page.
\item
Bass, Alonso-Ruiz, Baudoin, et. al.\\
\href{https://probability.oer.math.uconn.edu/3160-oer/}{UConn's Open
Undergraduate Probability Text}. This is the (open source) textbook
for UConn's undergraduate probability course, Math 3160.
\item
Boyd, S. and Vandenberghe, L.
\href{https://web.stanford.edu/~boyd/vmls/}{Introduction to Applied
Linear Algebra}. This is a (free) introductory text on Linear Algebra
with a focus on applications, especially to Least Squares.
\item
Treil, S.
\href{https://www.math.brown.edu/streil/papers/LADW/LADW.html}{Linear
Algebra Done Wrong}. This is a more theoretical linear algebra text
that treats important topics such as inner product spaces.
\item
Bishop, C.
\href{https://www.microsoft.com/en-us/research/people/cmbishop/prml-book/}{Pattern
Recognition and Machine Learning} This is a (free) comprehensive look
at machine learning; it claims to be aimed at ``advanced
undergraduates or first year PhD students'' but is technically
demanding.
\end{itemize}
\hypertarget{policy-statements}{%
\subsection{Policy Statements}\label{policy-statements}}
\hypertarget{academic-integrity}{%
\subsubsection{Academic Integrity}\label{academic-integrity}}
Students are bound by the university's policies on academic integrity.
\hypertarget{students-with-disabilities}{%
\subsubsection{Students with
disabilities}\label{students-with-disabilities}}
Students with disabilities should contact one of the instructors as soon
as possible to discuss any accommodations needed during the semester due
to a documented disabilities. If you have a documented disability for
which you wish to request academic accommodations and have not contacted
the Center for Students with Disabilities, please do so as soon as
possible. The CSD is located in Wilbur Cross, Room 204 and can be
reached at (860) 486-2020 or at csd@uconn.edu. Detailed information
regarding the process to request accommodations is available on the CSD
website at www.csd.uconn.edu.
\hypertarget{policy-against-discrimination-harassment-and-inappropriate-romantic-relationships}{%
\subsubsection{Policy Against Discrimination, Harassment and
Inappropriate Romantic
Relationships}\label{policy-against-discrimination-harassment-and-inappropriate-romantic-relationships}}
The University is committed to maintaining an environment free of
discrimination or discriminatory harassment directed toward any person
or group within its community -- students, employees, or visitors.
Academic and professional excellence can flourish only when each member
of our community is assured an atmosphere of mutual respect. All members
of the University community are responsible for the maintenance of an
academic and work environment in which people are free to learn and work
without fear of discrimination or discriminatory harassment. In
addition, inappropriate Romantic relationships can undermine the
University's mission when those in positions of authority abuse or
appear to abuse their authority. To that end, and in accordance with
federal and state law, the University prohibits discrimination and
discriminatory harassment, as well as inappropriate Romantic
relationships, and such behavior will be met with appropriate
disciplinary action, up to and including dismissal from the University.
More information is available at http://policy.uconn.edu/?p=2884.
\hypertarget{sexual-assault-reporting-policy}{%
\subsubsection{Sexual Assault Reporting
Policy}\label{sexual-assault-reporting-policy}}
To protect the campus community, all non-confidential University
employees (including faculty) are required to report assaults they
witness or are told about to the Office of Diversity \& Equity under the
Sexual Assault Response Policy. The University takes all reports with
the utmost seriousness. Please be aware that while the information you
provide will remain private, it will not be confidential and will be
shared with University officials who can help. More information is
available at http://sexualviolence.uconn.edu/.
\end{document}
|
{"hexsha": "d1c280823c37eff753ab576139fba2740175efdd", "size": 8901, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "docs/syllabus.tex", "max_stars_repo_name": "acs14007/Math-3094-Spring-2021", "max_stars_repo_head_hexsha": "0a16c4256f8fe08eda841b740d0c02b75005d049", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-01-11T02:50:17.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-11T02:50:17.000Z", "max_issues_repo_path": "docs/syllabus.tex", "max_issues_repo_name": "acs14007/Math-3094-Spring-2021", "max_issues_repo_head_hexsha": "0a16c4256f8fe08eda841b740d0c02b75005d049", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-01-20T14:25:07.000Z", "max_issues_repo_issues_event_max_datetime": "2021-01-25T20:38:04.000Z", "max_forks_repo_path": "docs/syllabus.tex", "max_forks_repo_name": "acs14007/Math-3094-Spring-2021", "max_forks_repo_head_hexsha": "0a16c4256f8fe08eda841b740d0c02b75005d049", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-01-11T02:50:44.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-11T02:50:44.000Z", "avg_line_length": 41.0184331797, "max_line_length": 104, "alphanum_fraction": 0.7965397146, "num_tokens": 2200}
|
subroutine pgenhr(jj)
!! ~ ~ ~ PURPOSE ~ ~ ~
!! this subroutine distributes daily rainfall exponentially within the day
!! ~ ~ ~ INCOMING VARIABLES ~ ~ ~
!! name |units |definition
!! ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
!! amp_r(:,:) |none |alpha factor for rain(mo max 0.5h rain)
!! hru_km(:) |km^2 |area of HRU in square kilometers
!! hru_sub(:) |none |subbasin in which HRU is located
!! idg(:) |none |array location of random number seed
!! |used for a given process
!! idt |minutes |length of time step used to report
!! |precipitation data for sub-daily modeling
!! jj |none |HRU number
!! i_mo |none |month being simulated
!! rndseed(:,:) |none |random number generator seed
!! subp(:) |mm H2O |precipitation for the day in HRU
!! tconc(:) |hr |time of concentration for HRU
!! ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
!! ~ ~ ~ OUTGOING VARIABLES ~ ~ ~
!! name |units |definition
!! ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
!! rainsub(:) |mm H2O |rainfall during time step
!! ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
!! ~ ~ ~ LOCAL DEFINITIONS ~ ~ ~
!! name |units |definition
!! ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
!! ab |mm H2O |lowest value al5 can have
!! ajp |mm H2O |highest value al5 can have
!! al5 |none |fraction of total rainfall on day that occurs
!! |during 0.5h highest intensity rainfall
!! altc |none |equation coefficient
!! blm |none |lowest random number value allowed
!! dur |hours |duration of storm during day
!! ihour |none |counter
!! itime |none |time step during day
!! j |none |HRU number
!! k |none |random number seed, counter
!! nhour |none |number of time steps per hour
!! pkrain |mm H2O |volume of rain at time of peak rainfall
!! pkrr |mm/hr |peak rainfall rate
!! pt |min |time during day
!! qmn |none |mean random number value
!! rtp |min |time of peak rainfall rate
!! rx |mm H2O |total rainfall at end of time step
!! sumrain |mm H2O |total amount of daily rainfall prior to
!! |time step
!! uplm |none |highest random number value
!! vv |none |random number between 0.0 and 1.0 that
!! |represents time to peak rainfall rate
!! |expressed as a fraction of storm duration
!! xk1 |none |1st constant in dimensionless exponential
!! |rainfall distribution
!! xk2 |none |2nd constant in dimensionless exponential
!! |rainfall distribution
!! xkp1 |hr |1st constant in exponential rainfall
!! |distribution
!! xkp2 |hr |2nd constant in exponential rainfall
!! |distribution
!! ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
!! ~ ~ ~ SUBROUTINES/FUNCTIONS CALLED ~ ~ ~
!! Intrinsic: Log
!! SWAT: Atri, Expo
!! ~ ~ ~ ~ ~ ~ END SPECIFICATIONS ~ ~ ~ ~ ~ ~
use parm
integer, intent (in) :: jj
integer :: itime, pt, ihour, nhour, k
real :: vv, blm, qmn, uplm, dur, ab, ajp, altc, pkrain, rtp
real :: xk1, xk2, xkp1, xkp2, rx, pkrr, sumrain
!! calculate maximum half-hour rainfall
ab = 0.02083
ajp = 0.
al5 = 0.
ajp = 1. - Expo(-125. / (subp(jj) + 5.))
al5 = Atri(ab, amp_r(i_mo,hru_sub(jj)), ajp, rndseed(10,jj))
!! need peak rainfall rate
!! calculate peak rate using same method as that for peak runoff
altc = 0.
pkrr = 0.
altc = 1. - Expo(2. * tconc(jj) * Log(1. - al5))
pkrr = altc * subp(jj) / tconc(jj) !! mm/h
!! generate random number between 0.0 and 1.0
!! because all input set to constant value, vv always the same
!! vv => time to peak expressed as fraction of total storm duration
vv = 0.
blm = 0.05
qmn = 0.25
uplm = 0.95
k = 8
vv = Atri(blm, qmn, uplm, k)
!vv = 0.03
!! calculate storm duration
xk1 = 0.
xk2 = 0.
dur = 0.
xk1 = vv / 4.605
xk2 = (1.- vv) / 4.605
dur = subp(jj) / (pkrr * (xk1 + xk2))
if (dur > 24.0) then
dur = 24.0
pkrr = subp(jj) / (dur * (xk1 + xk2))
end if
!! calculate amount of total rainfall fallen at time of peak
!! rainfall and time of peak rainfall in units of minutes
pkrain = 0.
rtp = 0.
pkrain = vv * subp(jj)
rtp = vv * dur * 60
!! calculate constants for exponential rainfall distribution
!! equation
xkp1 = 0.
xkp2 = 0.
xkp1 = dur * xk1
xkp2 = dur * xk2
pt = 0
pt = idt
itime = 1
sumrain = 0.
!! do before time of peak rainfall
!! do while pt less than rtp
do
if (pt >= Int(rtp)) exit
rx = 0.
rx = pkrain - pkrr * xkp1 *
& (1. - Exp((Real(pt) - rtp) / (60. * xkp1)))
rainsub(jj,itime) = rx - sumrain
pt = pt + idt
itime = itime + 1
if (itime > nstep) exit
sumrain = 0.
sumrain = rx
end do
!! after peak rainfall and before end of storm
do
if (pt >= Int(dur * 60.)) exit
rx = 0.
rx = pkrain + pkrr * xkp2 *
& (1. - Exp((rtp - Real(pt)) / (60. * xkp2)))
rainsub(jj,itime) = rx - sumrain
pt = pt + idt
itime = itime + 1
if (itime > nstep) exit
sumrain = 0.
sumrain = rx
end do
!! at end of storm
if (subp(jj) > sumrain .and. itime <= nstep) then
rainsub(jj,itime) = subp(jj) - sumrain
end if
return
end
|
{"hexsha": "def25d6a09ecec6b2d5151fbc9e90f84ce6fb514", "size": 6853, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "src/pgenhr.f", "max_stars_repo_name": "allixender/swatmodel-trusty64", "max_stars_repo_head_hexsha": "dd797671a5b7bd2c9c00f89dc62541a72450be4b", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/pgenhr.f", "max_issues_repo_name": "allixender/swatmodel-trusty64", "max_issues_repo_head_hexsha": "dd797671a5b7bd2c9c00f89dc62541a72450be4b", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/pgenhr.f", "max_forks_repo_name": "allixender/swatmodel-trusty64", "max_forks_repo_head_hexsha": "dd797671a5b7bd2c9c00f89dc62541a72450be4b", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2016-05-29T14:04:50.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-09T15:52:44.000Z", "avg_line_length": 39.8430232558, "max_line_length": 81, "alphanum_fraction": 0.4303224865, "num_tokens": 2011}
|
"""PyMC3-ArviZ conversion code."""
import logging
import warnings
from typing import ( # pylint: disable=unused-import
TYPE_CHECKING,
Any,
Dict,
Iterable,
List,
Mapping,
Optional,
Tuple,
Union,
)
import numpy as np
import xarray as xr
from aesara.graph.basic import Constant
from aesara.tensor.sharedvar import SharedVariable
from aesara.tensor.subtensor import AdvancedIncSubtensor
from arviz import InferenceData, concat, rcParams
from arviz.data.base import CoordSpec, DimSpec
from arviz.data.base import dict_to_dataset as _dict_to_dataset
from arviz.data.base import generate_dims_coords, make_attrs, requires
import pymc3
from pymc3.aesaraf import extract_obs_data
from pymc3.distributions import logpt
from pymc3.model import modelcontext
from pymc3.util import get_default_varnames
if TYPE_CHECKING:
from typing import Set # pylint: disable=ungrouped-imports
from pymc3.backends.base import MultiTrace # pylint: disable=invalid-name
from pymc3.model import Model
___all__ = [""]
_log = logging.getLogger("pymc3")
# random variable object ...
Var = Any # pylint: disable=invalid-name
class _DefaultTrace:
"""
Utility for collecting samples into a dictionary.
Name comes from its similarity to ``defaultdict``:
entries are lazily created.
Parameters
----------
samples : int
The number of samples that will be collected, per variable,
into the trace.
Attributes
----------
trace_dict : Dict[str, np.ndarray]
A dictionary constituting a trace. Should be extracted
after a procedure has filled the `_DefaultTrace` using the
`insert()` method
"""
trace_dict: Dict[str, np.ndarray] = {}
_len: Optional[int] = None
def __init__(self, samples: int):
self._len = samples
self.trace_dict = {}
def insert(self, k: str, v, idx: int):
"""
Insert `v` as the value of the `idx`th sample for the variable `k`.
Parameters
----------
k: str
Name of the variable.
v: anything that can go into a numpy array (including a numpy array)
The value of the `idx`th sample from variable `k`
ids: int
The index of the sample we are inserting into the trace.
"""
value_shape = np.shape(v)
# initialize if necessary
if k not in self.trace_dict:
array_shape = (self._len,) + value_shape
self.trace_dict[k] = np.empty(array_shape, dtype=np.array(v).dtype)
# do the actual insertion
if value_shape == ():
self.trace_dict[k][idx] = v
else:
self.trace_dict[k][idx, :] = v
def dict_to_dataset(
data,
library=None,
coords=None,
dims=None,
attrs=None,
default_dims=None,
skip_event_dims=None,
index_origin=None,
):
"""Temporal workaround for dict_to_dataset.
Once ArviZ>0.11.2 release is available, only two changes are needed for everything to work.
1) this should be deleted, 2) dict_to_dataset should be imported as is from arviz, no underscore,
also remove unnecessary imports
"""
if default_dims is None:
return _dict_to_dataset(
data, library=library, coords=coords, dims=dims, skip_event_dims=skip_event_dims
)
else:
out_data = {}
for name, vals in data.items():
vals = np.atleast_1d(vals)
val_dims = dims.get(name)
val_dims, coords = generate_dims_coords(vals.shape, name, dims=val_dims, coords=coords)
coords = {key: xr.IndexVariable((key,), data=coords[key]) for key in val_dims}
out_data[name] = xr.DataArray(vals, dims=val_dims, coords=coords)
return xr.Dataset(data_vars=out_data, attrs=make_attrs(library=library))
class InferenceDataConverter: # pylint: disable=too-many-instance-attributes
"""Encapsulate InferenceData specific logic."""
model = None # type: Optional[Model]
nchains = None # type: int
ndraws = None # type: int
posterior_predictive = None # Type: Optional[Mapping[str, np.ndarray]]
predictions = None # Type: Optional[Mapping[str, np.ndarray]]
prior = None # Type: Optional[Mapping[str, np.ndarray]]
def __init__(
self,
*,
trace=None,
prior=None,
posterior_predictive=None,
log_likelihood=True,
predictions=None,
coords: Optional[CoordSpec] = None,
dims: Optional[DimSpec] = None,
model=None,
save_warmup: Optional[bool] = None,
density_dist_obs: bool = True,
index_origin: Optional[int] = None,
):
self.save_warmup = rcParams["data.save_warmup"] if save_warmup is None else save_warmup
self.trace = trace
# this permits us to get the model from command-line argument or from with model:
self.model = modelcontext(model)
self.attrs = None
if trace is not None:
self.nchains = trace.nchains if hasattr(trace, "nchains") else 1
if hasattr(trace.report, "n_draws") and trace.report.n_draws is not None:
self.ndraws = trace.report.n_draws
self.attrs = {
"sampling_time": trace.report.t_sampling,
"tuning_steps": trace.report.n_tune,
}
else:
self.ndraws = len(trace)
if self.save_warmup:
warnings.warn(
"Warmup samples will be stored in posterior group and will not be"
" excluded from stats and diagnostics."
" Do not slice the trace manually before conversion",
UserWarning,
)
self.ntune = len(self.trace) - self.ndraws
self.posterior_trace, self.warmup_trace = self.split_trace()
else:
self.nchains = self.ndraws = 0
self.prior = prior
self.posterior_predictive = posterior_predictive
self.log_likelihood = log_likelihood
self.predictions = predictions
self.index_origin = rcParams["data.index_origin"] if index_origin is None else index_origin
def arbitrary_element(dct: Dict[Any, np.ndarray]) -> np.ndarray:
return next(iter(dct.values()))
if trace is None:
# if you have a posterior_predictive built with keep_dims,
# you'll lose here, but there's nothing I can do about that.
self.nchains = 1
get_from = None
if predictions is not None:
get_from = predictions
elif posterior_predictive is not None:
get_from = posterior_predictive
elif prior is not None:
get_from = prior
if get_from is None:
# pylint: disable=line-too-long
raise ValueError(
"When constructing InferenceData must have at least"
" one of trace, prior, posterior_predictive or predictions."
)
aelem = arbitrary_element(get_from)
self.ndraws = aelem.shape[0]
self.coords = {} if coords is None else coords
if hasattr(self.model, "coords"):
self.coords = {**self.model.coords, **self.coords}
self.coords = {key: value for key, value in self.coords.items() if value is not None}
self.dims = {} if dims is None else dims
if hasattr(self.model, "RV_dims"):
model_dims = {
var_name: [dim for dim in dims if dim is not None]
for var_name, dims in self.model.RV_dims.items()
}
self.dims = {**model_dims, **self.dims}
self.density_dist_obs = density_dist_obs
self.observations = self.find_observations()
def find_observations(self) -> Optional[Dict[str, Var]]:
"""If there are observations available, return them as a dictionary."""
if self.model is None:
return None
observations = {}
for obs in self.model.observed_RVs:
aux_obs = getattr(obs.tag, "observations", None)
if aux_obs is not None:
try:
obs_data = extract_obs_data(aux_obs)
observations[obs.name] = obs_data
except TypeError:
warnings.warn(f"Could not extract data from symbolic observation {obs}")
else:
warnings.warn(f"No data for observation {obs}")
return observations
def split_trace(self) -> Tuple[Union[None, "MultiTrace"], Union[None, "MultiTrace"]]:
"""Split MultiTrace object into posterior and warmup.
Returns
-------
trace_posterior: MultiTrace or None
The slice of the trace corresponding to the posterior. If the posterior
trace is empty, None is returned
trace_warmup: MultiTrace or None
The slice of the trace corresponding to the warmup. If the warmup trace is
empty or ``save_warmup=False``, None is returned
"""
trace_posterior = None
trace_warmup = None
if self.save_warmup and self.ntune > 0:
trace_warmup = self.trace[: self.ntune]
if self.ndraws > 0:
trace_posterior = self.trace[self.ntune :]
return trace_posterior, trace_warmup
def log_likelihood_vals_point(self, point, var, log_like_fun):
"""Compute log likelihood for each observed point."""
# TODO: This is a cheap hack; we should filter-out the correct
# variables some other way
point = {i.name: point[i.name] for i in log_like_fun.f.maker.inputs if i.name in point}
log_like_val = np.atleast_1d(log_like_fun(point))
if isinstance(var.owner.op, AdvancedIncSubtensor):
try:
obs_data = extract_obs_data(var.tag.observations)
except TypeError:
warnings.warn(f"Could not extract data from symbolic observation {var}")
mask = obs_data.mask
if np.ndim(mask) > np.ndim(log_like_val):
mask = np.any(mask, axis=-1)
log_like_val = np.where(mask, np.nan, log_like_val)
return log_like_val
def _extract_log_likelihood(self, trace):
"""Compute log likelihood of each observation."""
if self.trace is None:
return None
if self.model is None:
return None
if self.log_likelihood is True:
cached = [(var, self.model.fn(logpt(var))) for var in self.model.observed_RVs]
else:
cached = [
(var, self.model.fn(logpt(var)))
for var in self.model.observed_RVs
if var.name in self.log_likelihood
]
log_likelihood_dict = _DefaultTrace(len(trace.chains))
for var, log_like_fun in cached:
for k, chain in enumerate(trace.chains):
log_like_chain = [
self.log_likelihood_vals_point(point, var, log_like_fun)
for point in trace.points([chain])
]
log_likelihood_dict.insert(var.name, np.stack(log_like_chain), k)
return log_likelihood_dict.trace_dict
@requires("trace")
def posterior_to_xarray(self):
"""Convert the posterior to an xarray dataset."""
var_names = get_default_varnames(self.trace.varnames, include_transformed=False)
data = {}
data_warmup = {}
for var_name in var_names:
if self.warmup_trace:
data_warmup[var_name] = np.array(
self.warmup_trace.get_values(var_name, combine=False, squeeze=False)
)
if self.posterior_trace:
data[var_name] = np.array(
self.posterior_trace.get_values(var_name, combine=False, squeeze=False)
)
return (
dict_to_dataset(
data,
library=pymc3,
coords=self.coords,
dims=self.dims,
attrs=self.attrs,
index_origin=self.index_origin,
),
dict_to_dataset(
data_warmup,
library=pymc3,
coords=self.coords,
dims=self.dims,
attrs=self.attrs,
index_origin=self.index_origin,
),
)
@requires("trace")
def sample_stats_to_xarray(self):
"""Extract sample_stats from PyMC3 trace."""
data = {}
rename_key = {
"model_logp": "lp",
"mean_tree_accept": "acceptance_rate",
"depth": "tree_depth",
"tree_size": "n_steps",
}
data = {}
data_warmup = {}
for stat in self.trace.stat_names:
name = rename_key.get(stat, stat)
if name == "tune":
continue
if self.warmup_trace:
data_warmup[name] = np.array(
self.warmup_trace.get_sampler_stats(stat, combine=False)
)
if self.posterior_trace:
data[name] = np.array(self.posterior_trace.get_sampler_stats(stat, combine=False))
return (
dict_to_dataset(
data,
library=pymc3,
dims=None,
coords=self.coords,
attrs=self.attrs,
index_origin=self.index_origin,
),
dict_to_dataset(
data_warmup,
library=pymc3,
dims=None,
coords=self.coords,
attrs=self.attrs,
index_origin=self.index_origin,
),
)
@requires("trace")
@requires("model")
def log_likelihood_to_xarray(self):
"""Extract log likelihood and log_p data from PyMC3 trace."""
if self.predictions or not self.log_likelihood:
return None
data_warmup = {}
data = {}
warn_msg = (
"Could not compute log_likelihood, it will be omitted. "
"Check your model object or set log_likelihood=False"
)
if self.posterior_trace:
try:
data = self._extract_log_likelihood(self.posterior_trace)
except TypeError:
warnings.warn(warn_msg)
if self.warmup_trace:
try:
data_warmup = self._extract_log_likelihood(self.warmup_trace)
except TypeError:
warnings.warn(warn_msg)
return (
dict_to_dataset(
data,
library=pymc3,
dims=self.dims,
coords=self.coords,
skip_event_dims=True,
index_origin=self.index_origin,
),
dict_to_dataset(
data_warmup,
library=pymc3,
dims=self.dims,
coords=self.coords,
skip_event_dims=True,
index_origin=self.index_origin,
),
)
def translate_posterior_predictive_dict_to_xarray(self, dct) -> xr.Dataset:
"""Take Dict of variables to numpy ndarrays (samples) and translate into dataset."""
data = {}
for k, ary in dct.items():
shape = ary.shape
if shape[0] == self.nchains and shape[1] == self.ndraws:
data[k] = ary
elif shape[0] == self.nchains * self.ndraws:
data[k] = ary.reshape((self.nchains, self.ndraws, *shape[1:]))
else:
data[k] = np.expand_dims(ary, 0)
# pylint: disable=line-too-long
_log.warning(
"posterior predictive variable %s's shape not compatible with number of chains and draws. "
"This can mean that some draws or even whole chains are not represented.",
k,
)
return dict_to_dataset(
data, library=pymc3, coords=self.coords, dims=self.dims, index_origin=self.index_origin
)
@requires(["posterior_predictive"])
def posterior_predictive_to_xarray(self):
"""Convert posterior_predictive samples to xarray."""
return self.translate_posterior_predictive_dict_to_xarray(self.posterior_predictive)
@requires(["predictions"])
def predictions_to_xarray(self):
"""Convert predictions (out of sample predictions) to xarray."""
return self.translate_posterior_predictive_dict_to_xarray(self.predictions)
def priors_to_xarray(self):
"""Convert prior samples (and if possible prior predictive too) to xarray."""
if self.prior is None:
return {"prior": None, "prior_predictive": None}
if self.observations is not None:
prior_predictive_vars = list(self.observations.keys())
prior_vars = [key for key in self.prior.keys() if key not in prior_predictive_vars]
else:
prior_vars = list(self.prior.keys())
prior_predictive_vars = None
priors_dict = {}
for group, var_names in zip(
("prior", "prior_predictive"), (prior_vars, prior_predictive_vars)
):
priors_dict[group] = (
None
if var_names is None
else dict_to_dataset(
{k: np.expand_dims(self.prior[k], 0) for k in var_names},
library=pymc3,
coords=self.coords,
dims=self.dims,
index_origin=self.index_origin,
)
)
return priors_dict
@requires("observations")
@requires("model")
def observed_data_to_xarray(self):
"""Convert observed data to xarray."""
if self.predictions:
return None
return dict_to_dataset(
self.observations,
library=pymc3,
coords=self.coords,
dims=self.dims,
default_dims=[],
index_origin=self.index_origin,
)
@requires(["trace", "predictions"])
@requires("model")
def constant_data_to_xarray(self):
"""Convert constant data to xarray."""
# For constant data, we are concerned only with deterministics and
# data. The constant data vars must be either pm.Data
# (TensorSharedVariable) or pm.Deterministic
constant_data_vars = {} # type: Dict[str, Var]
def is_data(name, var) -> bool:
assert self.model is not None
return (
var not in self.model.deterministics
and var not in self.model.observed_RVs
and var not in self.model.free_RVs
and var not in self.model.potentials
and (self.observations is None or name not in self.observations)
and isinstance(var, (Constant, SharedVariable))
)
# I don't know how to find pm.Data, except that they are named
# variables that aren't observed or free RVs, nor are they
# deterministics, and then we eliminate observations.
for name, var in self.model.named_vars.items():
if is_data(name, var):
constant_data_vars[name] = var
if not constant_data_vars:
return None
constant_data = {}
for name, vals in constant_data_vars.items():
if hasattr(vals, "get_value"):
vals = vals.get_value()
elif hasattr(vals, "data"):
vals = vals.data
constant_data[name] = vals
return dict_to_dataset(
constant_data,
library=pymc3,
coords=self.coords,
dims=self.dims,
default_dims=[],
index_origin=self.index_origin,
)
def to_inference_data(self):
"""Convert all available data to an InferenceData object.
Note that if groups can not be created (e.g., there is no `trace`, so
the `posterior` and `sample_stats` can not be extracted), then the InferenceData
will not have those groups.
"""
id_dict = {
"posterior": self.posterior_to_xarray(),
"sample_stats": self.sample_stats_to_xarray(),
"log_likelihood": self.log_likelihood_to_xarray(),
"posterior_predictive": self.posterior_predictive_to_xarray(),
"predictions": self.predictions_to_xarray(),
**self.priors_to_xarray(),
"observed_data": self.observed_data_to_xarray(),
}
if self.predictions:
id_dict["predictions_constant_data"] = self.constant_data_to_xarray()
else:
id_dict["constant_data"] = self.constant_data_to_xarray()
return InferenceData(save_warmup=self.save_warmup, **id_dict)
def to_inference_data(
trace: Optional["MultiTrace"] = None,
*,
prior: Optional[Dict[str, Any]] = None,
posterior_predictive: Optional[Dict[str, Any]] = None,
log_likelihood: Union[bool, Iterable[str]] = True,
coords: Optional[CoordSpec] = None,
dims: Optional[DimSpec] = None,
model: Optional["Model"] = None,
save_warmup: Optional[bool] = None,
density_dist_obs: bool = True,
) -> InferenceData:
"""Convert pymc3 data into an InferenceData object.
All three of them are optional arguments, but at least one of ``trace``,
``prior`` and ``posterior_predictive`` must be present.
For a usage example read the
:ref:`Creating InferenceData section on from_pymc3 <creating_InferenceData>`
Parameters
----------
trace : MultiTrace, optional
Trace generated from MCMC sampling. Output of
:func:`~pymc3.sampling.sample`.
prior : dict, optional
Dictionary with the variable names as keys, and values numpy arrays
containing prior and prior predictive samples.
posterior_predictive : dict, optional
Dictionary with the variable names as keys, and values numpy arrays
containing posterior predictive samples.
log_likelihood : bool or array_like of str, optional
List of variables to calculate `log_likelihood`. Defaults to True which calculates
`log_likelihood` for all observed variables. If set to False, log_likelihood is skipped.
coords : dict of {str: array-like}, optional
Map of coordinate names to coordinate values
dims : dict of {str: list of str}, optional
Map of variable names to the coordinate names to use to index its dimensions.
model : Model, optional
Model used to generate ``trace``. It is not necessary to pass ``model`` if in
``with`` context.
save_warmup : bool, optional
Save warmup iterations InferenceData object. If not defined, use default
defined by the rcParams.
density_dist_obs : bool, default True
Store variables passed with ``observed`` arg to
:class:`~pymc.distributions.DensityDist` in the generated InferenceData.
Returns
-------
arviz.InferenceData
"""
if isinstance(trace, InferenceData):
return trace
return InferenceDataConverter(
trace=trace,
prior=prior,
posterior_predictive=posterior_predictive,
log_likelihood=log_likelihood,
coords=coords,
dims=dims,
model=model,
save_warmup=save_warmup,
density_dist_obs=density_dist_obs,
).to_inference_data()
### Later I could have this return ``None`` if the ``idata_orig`` argument is supplied. But
### perhaps we should have an inplace argument?
def predictions_to_inference_data(
predictions,
posterior_trace: Optional["MultiTrace"] = None,
model: Optional["Model"] = None,
coords: Optional[CoordSpec] = None,
dims: Optional[DimSpec] = None,
idata_orig: Optional[InferenceData] = None,
inplace: bool = False,
) -> InferenceData:
"""Translate out-of-sample predictions into ``InferenceData``.
Parameters
----------
predictions: Dict[str, np.ndarray]
The predictions are the return value of :func:`~pymc3.sample_posterior_predictive`,
a dictionary of strings (variable names) to numpy ndarrays (draws).
posterior_trace: MultiTrace
This should be a trace that has been thinned appropriately for
``pymc3.sample_posterior_predictive``. Specifically, any variable whose shape is
a deterministic function of the shape of any predictor (explanatory, independent, etc.)
variables must be *removed* from this trace.
model: Model
The pymc3 model. It can be ommited if within a model context.
coords: Dict[str, array-like[Any]]
Coordinates for the variables. Map from coordinate names to coordinate values.
dims: Dict[str, array-like[str]]
Map from variable name to ordered set of coordinate names.
idata_orig: InferenceData, optional
If supplied, then modify this inference data in place, adding ``predictions`` and
(if available) ``predictions_constant_data`` groups. If this is not supplied, make a
fresh InferenceData
inplace: boolean, optional
If idata_orig is supplied and inplace is True, merge the predictions into idata_orig,
rather than returning a fresh InferenceData object.
Returns
-------
InferenceData:
May be modified ``idata_orig``.
"""
if inplace and not idata_orig:
raise ValueError(
"Do not pass True for inplace unless passing" "an existing InferenceData as idata_orig"
)
new_idata = InferenceDataConverter(
trace=posterior_trace,
predictions=predictions,
model=model,
coords=coords,
dims=dims,
log_likelihood=False,
).to_inference_data()
if idata_orig is None:
return new_idata
elif inplace:
concat([idata_orig, new_idata], dim=None, inplace=True)
return idata_orig
else:
# if we are not returning in place, then merge the old groups into the new inference
# data and return that.
concat([new_idata, idata_orig], dim=None, copy=True, inplace=True)
return new_idata
|
{"hexsha": "8a3f7b46cc7661169b766db44be9149c385a4836", "size": 26498, "ext": "py", "lang": "Python", "max_stars_repo_path": "pymc3/backends/arviz.py", "max_stars_repo_name": "akoscsiszar/pymc3", "max_stars_repo_head_hexsha": "52c842be508eb25a3a9c6df657b92c31f3b561a7", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-03-27T19:58:47.000Z", "max_stars_repo_stars_event_max_datetime": "2020-03-27T19:58:47.000Z", "max_issues_repo_path": "pymc3/backends/arviz.py", "max_issues_repo_name": "akoscsiszar/pymc3", "max_issues_repo_head_hexsha": "52c842be508eb25a3a9c6df657b92c31f3b561a7", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-06-13T12:12:35.000Z", "max_issues_repo_issues_event_max_datetime": "2020-06-13T12:12:35.000Z", "max_forks_repo_path": "pymc3/backends/arviz.py", "max_forks_repo_name": "akoscsiszar/pymc3", "max_forks_repo_head_hexsha": "52c842be508eb25a3a9c6df657b92c31f3b561a7", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.3211267606, "max_line_length": 111, "alphanum_fraction": 0.6025737792, "include": true, "reason": "import numpy,import pymc3,from pymc3", "num_tokens": 5695}
|
try:
import numpy as np
except ImportError:
np = None
try:
import pandas as pd
except ImportError:
pd = None
from tests.numpy.testcase import NumpyBaseTestCase
from clickhouse_driver import errors
ErrorCodes = errors.ErrorCodes
class NullableTestCase(NumpyBaseTestCase):
def test_simple(self):
columns = 'a Nullable(Int32)'
data = [np.array([3, None, 2], dtype=object)]
with self.create_table(columns):
self.client.execute(
'INSERT INTO test (a) VALUES', data, columnar=True
)
query = 'SELECT * FROM test'
inserted = self.emit_cli(query)
self.assertEqual(
inserted, '3\n\\N\n2\n'
)
inserted = self.client.execute(query, columnar=True)
self.assertArraysEqual(inserted[0], data[0])
self.assertEqual(inserted[0].dtype, object)
def test_simple_dataframe(self):
columns = (
'a Int64, '
'b Nullable(Float64), '
'c Nullable(String), '
'd Nullable(Int64)'
)
df = pd.DataFrame({
'a': [1, 2, 3],
'b': [1.0, None, np.nan],
'c': ['a', None, np.nan],
'd': [1, None, None],
}, dtype=object)
expected = pd.DataFrame({
'a': np.array([1, 2, 3], dtype=np.int64),
'b': np.array([1.0, None, np.nan], dtype=object),
'c': np.array(['a', None, None], dtype=object),
'd': np.array([1, None, None], dtype=object),
})
with self.create_table(columns):
rv = self.client.insert_dataframe('INSERT INTO test VALUES', df)
self.assertEqual(rv, 3)
df2 = self.client.query_dataframe('SELECT * FROM test ORDER BY a')
self.assertTrue(expected.equals(df2))
|
{"hexsha": "3aa3cf2c8619640596a0f410f6b96ea09dcb67d6", "size": 1866, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/numpy/columns/test_nullable.py", "max_stars_repo_name": "fasttrack-solutions/clickhouse-driver", "max_stars_repo_head_hexsha": "676dfb09f74b8b55bfecaedbe70ddc971e1badd7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 823, "max_stars_repo_stars_event_min_datetime": "2017-05-16T15:30:15.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T08:39:04.000Z", "max_issues_repo_path": "tests/numpy/columns/test_nullable.py", "max_issues_repo_name": "fasttrack-solutions/clickhouse-driver", "max_issues_repo_head_hexsha": "676dfb09f74b8b55bfecaedbe70ddc971e1badd7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 277, "max_issues_repo_issues_event_min_datetime": "2017-07-11T11:35:34.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-08T06:52:09.000Z", "max_forks_repo_path": "tests/numpy/columns/test_nullable.py", "max_forks_repo_name": "fasttrack-solutions/clickhouse-driver", "max_forks_repo_head_hexsha": "676dfb09f74b8b55bfecaedbe70ddc971e1badd7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 175, "max_forks_repo_forks_event_min_datetime": "2017-10-11T08:41:12.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-22T03:59:35.000Z", "avg_line_length": 29.619047619, "max_line_length": 78, "alphanum_fraction": 0.5407288317, "include": true, "reason": "import numpy", "num_tokens": 443}
|
# coding=utf-8
# Copyright 2018 The Dopamine Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The implicit quantile networks (IQN) agent.
The agent follows the description given in "Implicit Quantile Networks for
Distributional RL" (Dabney et. al, 2018).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from dopamine.jax import networks
from dopamine.jax.agents.dqn import dqn_agent
import gin
import jax
import jax.numpy as jnp
import numpy as onp
import optax
import tensorflow as tf
@functools.partial(
jax.vmap,
in_axes=(None, None, None, 0, 0, 0, None, None, None, None, None),
out_axes=(None, 0))
def target_quantile_values(network_def, online_params, target_params,
next_states, rewards, terminals,
num_tau_prime_samples, num_quantile_samples,
cumulative_gamma, double_dqn, rng):
"""Build the target for return values at given quantiles.
Args:
network_def: Linen Module used for inference.
online_params: Parameters used for the online network.
target_params: Parameters used for the target network.
next_states: numpy array of batched next states.
rewards: numpy array of batched rewards.
terminals: numpy array of batched terminals.
num_tau_prime_samples: int, number of tau' samples (static_argnum).
num_quantile_samples: int, number of quantile samples (static_argnum).
cumulative_gamma: float, cumulative gamma to use (static_argnum).
double_dqn: bool, whether to use double DQN (static_argnum).
rng: Jax random number generator.
Returns:
Jax random number generator.
The target quantile values.
"""
rewards = jnp.tile(rewards, [num_tau_prime_samples])
is_terminal_multiplier = 1. - terminals.astype(jnp.float32)
# Incorporate terminal state to discount factor.
gamma_with_terminal = cumulative_gamma * is_terminal_multiplier
gamma_with_terminal = jnp.tile(gamma_with_terminal, [num_tau_prime_samples])
rng, rng1, rng2 = jax.random.split(rng, num=3)
# Compute Q-values which are used for action selection for the next states
# in the replay buffer. Compute the argmax over the Q-values.
if double_dqn:
outputs_action = network_def.apply(online_params,
next_states,
num_quantiles=num_quantile_samples,
rng=rng1)
else:
outputs_action = network_def.apply(target_params,
next_states,
num_quantiles=num_quantile_samples,
rng=rng1)
target_quantile_values_action = outputs_action.quantile_values
target_q_values = jnp.squeeze(
jnp.mean(target_quantile_values_action, axis=0))
# Shape: batch_size.
next_qt_argmax = jnp.argmax(target_q_values)
# Get the indices of the maximium Q-value across the action dimension.
# Shape of next_qt_argmax: (num_tau_prime_samples x batch_size).
next_state_target_outputs = network_def.apply(
target_params,
next_states,
num_quantiles=num_tau_prime_samples,
rng=rng2)
next_qt_argmax = jnp.tile(next_qt_argmax, [num_tau_prime_samples])
target_quantile_vals = (
jax.vmap(lambda x, y: x[y])(next_state_target_outputs.quantile_values,
next_qt_argmax))
target_quantile_vals = rewards + gamma_with_terminal * target_quantile_vals
# We return with an extra dimension, which is expected by train.
return rng, jax.lax.stop_gradient(target_quantile_vals[:, None])
@functools.partial(jax.jit, static_argnums=(0, 3, 10, 11, 12, 13, 14, 15))
def train(network_def, online_params, target_params, optimizer, optimizer_state,
states, actions, next_states, rewards, terminals, num_tau_samples,
num_tau_prime_samples, num_quantile_samples, cumulative_gamma,
double_dqn, kappa, rng):
"""Run a training step."""
def loss_fn(params, rng_input, target_quantile_vals):
def online(state):
return network_def.apply(params, state, num_quantiles=num_tau_samples,
rng=rng_input)
model_output = jax.vmap(online)(states)
quantile_values = model_output.quantile_values
quantiles = model_output.quantiles
chosen_action_quantile_values = jax.vmap(lambda x, y: x[:, y][:, None])(
quantile_values, actions)
# Shape of bellman_erors and huber_loss:
# batch_size x num_tau_prime_samples x num_tau_samples x 1.
bellman_errors = (target_quantile_vals[:, :, None, :] -
chosen_action_quantile_values[:, None, :, :])
# The huber loss (see Section 2.3 of the paper) is defined via two cases:
# case_one: |bellman_errors| <= kappa
# case_two: |bellman_errors| > kappa
huber_loss_case_one = (
(jnp.abs(bellman_errors) <= kappa).astype(jnp.float32) *
0.5 * bellman_errors ** 2)
huber_loss_case_two = (
(jnp.abs(bellman_errors) > kappa).astype(jnp.float32) *
kappa * (jnp.abs(bellman_errors) - 0.5 * kappa))
huber_loss = huber_loss_case_one + huber_loss_case_two
# Tile by num_tau_prime_samples along a new dimension. Shape is now
# batch_size x num_tau_prime_samples x num_tau_samples x 1.
# These quantiles will be used for computation of the quantile huber loss
# below (see section 2.3 of the paper).
quantiles = jnp.tile(quantiles[:, None, :, :],
[1, num_tau_prime_samples, 1, 1]).astype(jnp.float32)
# Shape: batch_size x num_tau_prime_samples x num_tau_samples x 1.
quantile_huber_loss = (jnp.abs(quantiles - jax.lax.stop_gradient(
(bellman_errors < 0).astype(jnp.float32))) * huber_loss) / kappa
# Sum over current quantile value (num_tau_samples) dimension,
# average over target quantile value (num_tau_prime_samples) dimension.
# Shape: batch_size x num_tau_prime_samples x 1.
loss = jnp.sum(quantile_huber_loss, axis=2)
loss = jnp.mean(loss, axis=1)
return jnp.mean(loss)
rng, target_quantile_vals = target_quantile_values(
network_def,
online_params,
target_params,
next_states,
rewards,
terminals,
num_tau_prime_samples,
num_quantile_samples,
cumulative_gamma,
double_dqn,
rng)
grad_fn = jax.value_and_grad(loss_fn)
rng, rng_input = jax.random.split(rng)
loss, grad = grad_fn(online_params, rng_input, target_quantile_vals)
updates, optimizer_state = optimizer.update(grad, optimizer_state,
params=online_params)
online_params = optax.apply_updates(online_params, updates)
return rng, optimizer_state, online_params, loss
@functools.partial(jax.jit, static_argnums=(0, 4, 5, 6, 7, 8, 9, 11, 12))
def select_action(network_def, params, state, rng, num_quantile_samples,
num_actions, eval_mode, epsilon_eval, epsilon_train,
epsilon_decay_period, training_steps, min_replay_history,
epsilon_fn):
"""Select an action from the set of available actions.
Chooses an action randomly with probability self._calculate_epsilon(), and
otherwise acts greedily according to the current Q-value estimates.
Args:
network_def: Linen Module to use for inference.
params: Linen params (frozen dict) to use for inference.
state: input state to use for inference.
rng: Jax random number generator.
num_quantile_samples: int, number of quantile samples (static_argnum).
num_actions: int, number of actions (static_argnum).
eval_mode: bool, whether we are in eval mode (static_argnum).
epsilon_eval: float, epsilon value to use in eval mode (static_argnum).
epsilon_train: float, epsilon value to use in train mode (static_argnum).
epsilon_decay_period: float, decay period for epsilon value for certain
epsilon functions, such as linearly_decaying_epsilon, (static_argnum).
training_steps: int, number of training steps so far.
min_replay_history: int, minimum number of steps in replay buffer
(static_argnum).
epsilon_fn: function used to calculate epsilon value (static_argnum).
Returns:
Jax random number generator.
int, the selected action.
"""
epsilon = jnp.where(eval_mode,
epsilon_eval,
epsilon_fn(epsilon_decay_period,
training_steps,
min_replay_history,
epsilon_train))
rng, rng1, rng2 = jax.random.split(rng, num=3)
p = jax.random.uniform(rng1)
return rng, jnp.where(p <= epsilon,
jax.random.randint(rng2, (), 0, num_actions),
jnp.argmax(jnp.mean(
network_def.apply(
params, state,
num_quantiles=num_quantile_samples,
rng=rng2).quantile_values, axis=0), axis=0))
@gin.configurable
class JaxImplicitQuantileAgent(dqn_agent.JaxDQNAgent):
"""An extension of Rainbow to perform implicit quantile regression."""
def __init__(self,
num_actions,
observation_shape=dqn_agent.NATURE_DQN_OBSERVATION_SHAPE,
observation_dtype=dqn_agent.NATURE_DQN_DTYPE,
stack_size=dqn_agent.NATURE_DQN_STACK_SIZE,
network=networks.ImplicitQuantileNetwork,
kappa=1.0,
num_tau_samples=32,
num_tau_prime_samples=32,
num_quantile_samples=32,
quantile_embedding_dim=64,
double_dqn=False,
gamma=0.99,
update_horizon=1,
min_replay_history=20000,
update_period=4,
target_update_period=8000,
epsilon_fn=dqn_agent.linearly_decaying_epsilon,
epsilon_train=0.01,
epsilon_eval=0.001,
epsilon_decay_period=250000,
replay_scheme='prioritized',
optimizer='adam',
summary_writer=None,
summary_writing_frequency=500):
"""Initializes the agent and constructs the necessary components.
Most of this constructor's parameters are IQN-specific hyperparameters whose
values are taken from Dabney et al. (2018).
Args:
num_actions: int, number of actions the agent can take at any state.
observation_shape: tuple of ints or an int. If single int, the observation
is assumed to be a 2D square.
observation_dtype: DType, specifies the type of the observations. Note
that if your inputs are continuous, you should set this to jnp.float32.
stack_size: int, number of frames to use in state stack.
network: flax.linen Module that is initialized by shape in _create_network
below. See dopamine.jax.networks.JaxImplicitQuantileNetwork as an
example.
kappa: float, Huber loss cutoff.
num_tau_samples: int, number of online quantile samples for loss
estimation.
num_tau_prime_samples: int, number of target quantile samples for loss
estimation.
num_quantile_samples: int, number of quantile samples for computing
Q-values.
quantile_embedding_dim: int, embedding dimension for the quantile input.
double_dqn: boolean, whether to perform double DQN style learning
as described in Van Hasselt et al.: https://arxiv.org/abs/1509.06461.
gamma: float, discount factor with the usual RL meaning.
update_horizon: int, horizon at which updates are performed, the 'n' in
n-step update.
min_replay_history: int, number of transitions that should be experienced
before the agent begins training its value function.
update_period: int, period between DQN updates.
target_update_period: int, update period for the target network.
epsilon_fn: function expecting 4 parameters:
(decay_period, step, warmup_steps, epsilon). This function should return
the epsilon value used for exploration during training.
epsilon_train: float, the value to which the agent's epsilon is eventually
decayed during training.
epsilon_eval: float, epsilon used when evaluating the agent.
epsilon_decay_period: int, length of the epsilon decay schedule.
replay_scheme: str, 'prioritized' or 'uniform', the sampling scheme of the
replay memory.
optimizer: str, name of optimizer to use.
summary_writer: SummaryWriter object for outputting training statistics.
Summary writing disabled if set to None.
summary_writing_frequency: int, frequency with which summaries will be
written. Lower values will result in slower training.
"""
self.kappa = kappa
# num_tau_samples = N below equation (3) in the paper.
self.num_tau_samples = num_tau_samples
# num_tau_prime_samples = N' below equation (3) in the paper.
self.num_tau_prime_samples = num_tau_prime_samples
# num_quantile_samples = k below equation (3) in the paper.
self.num_quantile_samples = num_quantile_samples
# quantile_embedding_dim = n above equation (4) in the paper.
self.quantile_embedding_dim = quantile_embedding_dim
# option to perform double dqn.
self.double_dqn = double_dqn
super(JaxImplicitQuantileAgent, self).__init__(
num_actions=num_actions,
observation_shape=observation_shape,
observation_dtype=observation_dtype,
stack_size=stack_size,
network=functools.partial(
network, quantile_embedding_dim=quantile_embedding_dim),
gamma=gamma,
update_horizon=update_horizon,
min_replay_history=min_replay_history,
update_period=update_period,
target_update_period=target_update_period,
epsilon_fn=epsilon_fn,
epsilon_train=epsilon_train,
epsilon_eval=epsilon_eval,
epsilon_decay_period=epsilon_decay_period,
optimizer=optimizer,
summary_writer=summary_writer,
summary_writing_frequency=summary_writing_frequency)
def _build_networks_and_optimizer(self):
self._rng, rng = jax.random.split(self._rng)
self.online_params = self.network_def.init(
rng, x=self.state, num_quantiles=self.num_tau_samples,
rng=self._rng)
self.optimizer = dqn_agent.create_optimizer(self._optimizer_name)
self.optimizer_state = self.optimizer.init(self.online_params)
self.target_network_params = self.online_params
def begin_episode(self, observation):
"""Returns the agent's first action for this episode.
Args:
observation: numpy array, the environment's initial observation.
Returns:
int, the selected action.
"""
self._reset_state()
self._record_observation(observation)
if not self.eval_mode:
self._train_step()
self._rng, self.action = select_action(self.network_def,
self.online_params,
self.state,
self._rng,
self.num_quantile_samples,
self.num_actions,
self.eval_mode,
self.epsilon_eval,
self.epsilon_train,
self.epsilon_decay_period,
self.training_steps,
self.min_replay_history,
self.epsilon_fn)
self.action = onp.asarray(self.action)
return self.action
def step(self, reward, observation):
"""Records the most recent transition and returns the agent's next action.
We store the observation of the last time step since we want to store it
with the reward.
Args:
reward: float, the reward received from the agent's most recent action.
observation: numpy array, the most recent observation.
Returns:
int, the selected action.
"""
self._last_observation = self._observation
self._record_observation(observation)
if not self.eval_mode:
self._store_transition(self._last_observation, self.action, reward, False)
self._train_step()
self._rng, self.action = select_action(self.network_def,
self.online_params,
self.state,
self._rng,
self.num_quantile_samples,
self.num_actions,
self.eval_mode,
self.epsilon_eval,
self.epsilon_train,
self.epsilon_decay_period,
self.training_steps,
self.min_replay_history,
self.epsilon_fn)
self.action = onp.asarray(self.action)
return self.action
def _train_step(self):
"""Runs a single training step.
Runs training if both:
(1) A minimum number of frames have been added to the replay buffer.
(2) `training_steps` is a multiple of `update_period`.
Also, syncs weights from online_params to target_network_params if training
steps is a multiple of target update period.
"""
if self._replay.add_count > self.min_replay_history:
if self.training_steps % self.update_period == 0:
self._sample_from_replay_buffer()
self._rng, self.optimizer_state, self.online_params, loss = train(
self.network_def,
self.online_params,
self.target_network_params,
self.optimizer,
self.optimizer_state,
self.replay_elements['state'],
self.replay_elements['action'],
self.replay_elements['next_state'],
self.replay_elements['reward'],
self.replay_elements['terminal'],
self.num_tau_samples,
self.num_tau_prime_samples,
self.num_quantile_samples,
self.cumulative_gamma,
self.double_dqn,
self.kappa,
self._rng)
if (self.summary_writer is not None and
self.training_steps > 0 and
self.training_steps % self.summary_writing_frequency == 0):
summary = tf.compat.v1.Summary(value=[
tf.compat.v1.Summary.Value(tag='QuantileLoss',
simple_value=loss)])
self.summary_writer.add_summary(summary, self.training_steps)
if self.training_steps % self.target_update_period == 0:
self._sync_weights()
self.training_steps += 1
|
{"hexsha": "0c131475d2a9e91bc0ae6b05e3c5c1cffa1476c6", "size": 19708, "ext": "py", "lang": "Python", "max_stars_repo_path": "dopamine/jax/agents/implicit_quantile/implicit_quantile_agent.py", "max_stars_repo_name": "crawlingcub/dopamine", "max_stars_repo_head_hexsha": "0d155c12f96606188a97a001e02189bdd3723d4d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-11-03T11:55:23.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-03T11:55:33.000Z", "max_issues_repo_path": "dopamine/jax/agents/implicit_quantile/implicit_quantile_agent.py", "max_issues_repo_name": "huifeidzhu/dopamine", "max_issues_repo_head_hexsha": "5139b0138d3133ff323e3902483c3c9aa33bfe89", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dopamine/jax/agents/implicit_quantile/implicit_quantile_agent.py", "max_forks_repo_name": "huifeidzhu/dopamine", "max_forks_repo_head_hexsha": "5139b0138d3133ff323e3902483c3c9aa33bfe89", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.2876404494, "max_line_length": 80, "alphanum_fraction": 0.6518165212, "include": true, "reason": "import numpy,import jax", "num_tokens": 4198}
|
import torch
import numpy as np
from torch.autograd import Variable
import torch.nn as nn
import torch.optim
import json
import torch.utils.data.sampler
import os
import glob
import random
import time
from tqdm import tqdm
import configs
import backbone
import data.feature_loader as feat_loader
from data.datamgr import SetDataManager
from methods.baselinetrain import BaselineTrain
from methods.baselinefinetune import BaselineFinetune
from methods.protonet import ProtoNet
from methods.matchingnet import MatchingNet
from methods.relationnet import RelationNet
from methods.maml import MAML
from io_utils import model_dict, parse_args, get_resume_file, get_best_file , get_assigned_file
def feature_evaluation(cl_data_file, model, n_way = 5, n_support = 5, n_query = 15, adaptation = False):
class_list = cl_data_file.keys()
select_class = random.sample(class_list,n_way)
z_all = []
for cl in select_class:
img_feat = cl_data_file[cl]
perm_ids = np.random.permutation(len(img_feat)).tolist()
z_all.append( [ np.squeeze( img_feat[perm_ids[i]]) for i in range(n_support+n_query) ] ) # stack each batch
z_all = torch.from_numpy(np.array(z_all) )
model.n_query = n_query
if adaptation:
scores = model.set_forward_adaptation(z_all, is_feature = True)
else:
scores = model.set_forward(z_all, is_feature = True)
pred = scores.data.cpu().numpy().argmax(axis = 1)
y = np.repeat(range( n_way ), n_query )
acc = np.mean(pred == y)*100
return acc
if __name__ == '__main__':
params = parse_args('test')
acc_all = []
iter_num = 600
few_shot_params = dict(n_way = params.test_n_way , n_support = params.n_shot)
if params.dataset in ['omniglot', 'cross_char']:
assert params.model == 'Conv4' and not params.train_aug ,'omniglot only support Conv4 without augmentation'
params.model = 'Conv4S'
if params.method == 'baseline':
model = BaselineFinetune( model_dict[params.model], **few_shot_params )
elif params.method == 'baseline++':
model = BaselineFinetune( model_dict[params.model], loss_type = 'dist', **few_shot_params )
elif params.method == 'protonet':
model = ProtoNet( model_dict[params.model], **few_shot_params )
elif params.method == 'matchingnet':
model = MatchingNet( model_dict[params.model], **few_shot_params )
elif params.method in ['relationnet', 'relationnet_softmax']:
if params.model == 'Conv4':
feature_model = backbone.Conv4NP
elif params.model == 'Conv6':
feature_model = backbone.Conv6NP
elif params.model == 'Conv4S':
feature_model = backbone.Conv4SNP
else:
feature_model = lambda: model_dict[params.model]( flatten = False )
loss_type = 'mse' if params.method == 'relationnet' else 'softmax'
model = RelationNet( feature_model, loss_type = loss_type , **few_shot_params )
elif params.method in ['maml' , 'maml_approx']:
backbone.ConvBlock.maml = True
backbone.SimpleBlock.maml = True
backbone.BottleneckBlock.maml = True
backbone.ResNet.maml = True
model = MAML( model_dict[params.model], approx = (params.method == 'maml_approx') , **few_shot_params )
if params.dataset in ['omniglot', 'cross_char']: #maml use different parameter in omniglot
model.n_task = 32
model.task_update_num = 1
model.train_lr = 0.1
else:
raise ValueError('Unknown method')
model = model.cuda()
checkpoint_dir = '%s/checkpoints/%s/%s_%s' %(configs.save_dir, params.dataset, params.model, params.method)
if params.train_aug:
checkpoint_dir += '_aug'
if params.limit_n_classes >= 0:
checkpoint_dir += '_{}classes'.format(params.limit_n_classes)
if params.limit_n_images >= 0:
checkpoint_dir += '_{}images'.format(params.limit_n_images)
if not params.method in ['baseline', 'baseline++'] :
checkpoint_dir += '_%dway_%dshot' %( params.train_n_way, params.train_n_shot)
#modelfile = get_resume_file(checkpoint_dir)
if not params.method in ['baseline', 'baseline++'] :
if params.save_iter != -1:
modelfile = get_assigned_file(checkpoint_dir,params.save_iter)
else:
modelfile = get_best_file(checkpoint_dir)
if modelfile is not None:
tmp = torch.load(modelfile)
model.load_state_dict(tmp['state'])
split = params.split
if params.save_iter != -1:
split_str = split + "_" +str(params.save_iter)
else:
split_str = split
if params.method in ['maml', 'maml_approx']: #maml do not support testing with feature
if 'Conv' in params.model:
if params.dataset in ['omniglot', 'cross_char']:
image_size = 28
else:
image_size = 84
else:
image_size = 224
datamgr = SetDataManager(image_size, n_eposide = iter_num, n_query = 15 , **few_shot_params)
if params.dataset == 'cross':
if split == 'base':
loadfile = configs.data_dir['miniImagenet'] + 'all.json'
else:
loadfile = configs.data_dir['CUB'] + split +'.json'
elif params.dataset == 'cross_char':
if split == 'base':
loadfile = configs.data_dir['omniglot'] + 'noLatin.json'
else:
loadfile = configs.data_dir['emnist'] + split +'.json'
else:
loadfile = configs.data_dir[params.dataset] + split + '.json'
novel_loader = datamgr.get_data_loader( loadfile, aug = False)
if params.adaptation:
model.task_update_num = 100 #We perform adaptation on MAML simply by updating more times.
model.eval()
acc_mean, acc_std = model.test_loop( novel_loader, return_std = True)
else:
novel_file = os.path.join( checkpoint_dir.replace("checkpoints","features"), split_str +".hdf5") #defaut split = novel, but you can also test base or val classes
cl_data_file = feat_loader.init_loader(novel_file)
for i in tqdm(range(iter_num)):
acc = feature_evaluation(cl_data_file, model, n_query = 15, adaptation = params.adaptation, **few_shot_params)
acc_all.append(acc)
acc_all = np.asarray(acc_all)
acc_mean = np.mean(acc_all)
acc_std = np.std(acc_all)
print("Method: {0}\nModel: {1}\nSetting: {2}-way {3}-shot".format(params.method, checkpoint_dir, params.test_n_way, params.n_shot))
print('%d Test Acc = %4.2f%% +- %4.2f%%' %(iter_num, acc_mean, 1.96* acc_std/np.sqrt(iter_num)))
with open('./record/results.txt' , 'a') as f:
timestamp = time.strftime("%Y%m%d-%H%M%S", time.localtime())
aug_str = '-aug' if params.train_aug else ''
aug_str += '-adapted' if params.adaptation else ''
if params.method in ['baseline', 'baseline++'] :
exp_setting = '%s-%s-%s-%s%s %sshot %sway_test' %(params.dataset, split_str, params.model, params.method, aug_str, params.n_shot, params.test_n_way )
else:
exp_setting = '%s-%s-%s-%s%s %sshot %sway_train %sway_test' %(params.dataset, split_str, params.model, params.method, aug_str , params.n_shot , params.train_n_way, params.test_n_way )
acc_str = '%d Test Acc = %4.2f%% +- %4.2f%%' %(iter_num, acc_mean, 1.96* acc_std/np.sqrt(iter_num))
f.write( 'Time: %s, Setting: %s, Acc: %s \n' %(timestamp,exp_setting,acc_str) )
|
{"hexsha": "ca4d5b5681825330234becdedbe16793e3829df9", "size": 7683, "ext": "py", "lang": "Python", "max_stars_repo_path": "omni-mini/prelinear/test.py", "max_stars_repo_name": "indy-lab/ProtoTransfer", "max_stars_repo_head_hexsha": "90a526bb209160e376b2b8290e99b0f62b240052", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 43, "max_stars_repo_stars_event_min_datetime": "2020-06-23T08:09:08.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-03T03:05:34.000Z", "max_issues_repo_path": "omni-mini/prelinear/test.py", "max_issues_repo_name": "Asphalt93/ProtoTransfer", "max_issues_repo_head_hexsha": "2e186ffd5bd795244c6dd7192575b84f935c5749", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2020-07-03T20:58:24.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-04T06:34:09.000Z", "max_forks_repo_path": "omni-mini/prelinear/test.py", "max_forks_repo_name": "Asphalt93/ProtoTransfer", "max_forks_repo_head_hexsha": "2e186ffd5bd795244c6dd7192575b84f935c5749", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2020-07-21T03:26:29.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-26T06:10:37.000Z", "avg_line_length": 44.1551724138, "max_line_length": 195, "alphanum_fraction": 0.641546271, "include": true, "reason": "import numpy", "num_tokens": 1928}
|
import jax
import jax.numpy as jnp
import numpy as onp
from flax import struct
from flax.optim.adam import _AdamParamState
from ..hessian_computation import average_magnitude
from .second_order_optimizer_builder import SecondOrderOptimizerDef
@struct.dataclass
class _AdahessianHyperParams:
learning_rate: onp.ndarray
beta1: onp.ndarray
beta2: onp.ndarray
eps: onp.ndarray
weight_decay: onp.ndarray
hessian_power: onp.array
class Adahessian(SecondOrderOptimizerDef):
"""Adahessian optimizer,
like Adam but uses a hessian approximation instead of the square of the gradient
"""
def __init__(self, learning_rate=1e-3, beta1=0.9, beta2=0.999, eps=1e-8, weight_decay=0.0, hessian_power=1):
"""Constructor for the Adahessian optimizer.
Args:
learning_rate: the step size used to update the parameters (default: 1e-3).
beta1: the coefficient used for the moving average of the gradient (default: 0.9).
beta2: the coefficient used for the moving average of the gradient magnitude (default: 0.999).
eps: the term added to the gradient magnitude estimate for numerical stability (default: 1e-8).
weight_decay: AdamW style weight decay rate (relative to learning rate) (default: 0.0).
hessian_power: hessian power (default: 1).
"""
hyper_params = _AdahessianHyperParams(learning_rate, beta1, beta2, eps, weight_decay, hessian_power)
super().__init__(hyper_params)
def init_param_state(self, param):
return _AdamParamState(jnp.zeros_like(param), jnp.zeros_like(param))
def apply_param_gradient(self, step, hyper_params, param, state, grad, hessian):
"""takes an additional hessian parameter"""
beta1 = hyper_params.beta1
beta2 = hyper_params.beta2
weight_decay = hyper_params.weight_decay
hessian = average_magnitude(hessian)
hessian_sq = jax.lax.square(hessian)
grad_ema = beta1 * state.grad_ema + (1. - beta1) * grad
grad_sq_ema = beta2 * state.grad_sq_ema + (1. - beta2) * hessian_sq
# bias correction
t = step + 1.
grad_ema_corr = grad_ema / (1 - beta1 ** t)
grad_sq_ema_corr = grad_sq_ema / (1 - beta2 ** t)
denom = jnp.sqrt(grad_sq_ema_corr) ** hyper_params.hessian_power + hyper_params.eps
new_param = param - hyper_params.learning_rate * grad_ema_corr / denom
new_param -= hyper_params.learning_rate * weight_decay * param
new_state = _AdamParamState(grad_ema, grad_sq_ema)
return new_param, new_state
|
{"hexsha": "eec1d6241aa7f2bd8fe1d688c61be46c76e8fbb9", "size": 2611, "ext": "py", "lang": "Python", "max_stars_repo_path": "adahessianJax/flaxOptimizer/adahessian.py", "max_stars_repo_name": "nestordemeure/AdaHessianJax", "max_stars_repo_head_hexsha": "20f789093141fdfe998753f85bca6e8b3f5acb81", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2020-10-11T21:13:09.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-12T13:03:33.000Z", "max_issues_repo_path": "adahessianJax/flaxOptimizer/adahessian.py", "max_issues_repo_name": "nestordemeure/AdaHessianJax", "max_issues_repo_head_hexsha": "20f789093141fdfe998753f85bca6e8b3f5acb81", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-03-11T10:20:06.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-11T19:57:58.000Z", "max_forks_repo_path": "adahessianJax/flaxOptimizer/adahessian.py", "max_forks_repo_name": "nestordemeure/AdaHessianJax", "max_forks_repo_head_hexsha": "20f789093141fdfe998753f85bca6e8b3f5acb81", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-10-12T01:48:23.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-21T13:19:33.000Z", "avg_line_length": 43.5166666667, "max_line_length": 112, "alphanum_fraction": 0.6981999234, "include": true, "reason": "import numpy,import jax", "num_tokens": 672}
|
##################################################################################################
# Integration with UncertainData.jl (sampling from full supports of the furnishing distributions)
#################################################################################################
uvals_x = [UncertainValue(Normal, rand(Normal(0, 5)), abs(rand(Normal(0, 3)))) for i = 1:100]
uvals_y = [UncertainValue(Normal, rand(Normal(0, 5)), abs(rand(Normal(0, 3)))) for i = 1:100];
# UncertainValueDataset
UVX = UncertainValueDataset(uvals_x)
UVY = UncertainValueDataset(uvals_y)
# UncertainIndexDataset
UVX_idx = UncertainIndexDataset(uvals_x)
UVY_idx = UncertainIndexDataset(uvals_y)
# Real-valued vectors
x = resample.(uvals_x);
y = resample.(uvals_y);
binnings = [RectangularBinning(i) for i = 3:6]
vftest = VisitationFrequencyTest(binning = binnings, ηs = 1)
@test causality(x, y, vftest) isa Array{<:Real, 0}
@test causality(uvals_x, uvals_y, vftest) isa Array{<:Real, 0}
@test causality(x, uvals_y, vftest) isa Array{<:Real, 0}
@test causality(uvals_x, y, vftest) isa Array{<:Real, 0}
@test causality(UVX, UVY, vftest) isa Array{<:Real, 0}
@test causality(x, UVY, vftest) isa Array{<:Real, 0}
@test causality(UVX, y, vftest) isa Array{<:Real, 0}
vftest = VisitationFrequencyTest(binning = binnings, ηs = 1:5)
@test causality(x, y, vftest) isa Array{T, 1} where T
@test causality(uvals_x, uvals_y, vftest) isa Array{T, 1} where T
@test causality(x, uvals_y, vftest) isa Array{T, 1} where T
@test causality(uvals_x, y, vftest) isa Array{T, 1} where T
@test causality(UVX, UVY, vftest) isa Array{T, 1} where T
@test causality(x, UVY, vftest) isa Array{T, 1} where T
@test causality(UVX, y, vftest) isa Array{T, 1} where T
@test causality(x, y, vftest) |> length == 5
@test causality(uvals_x, uvals_y, vftest) |> length == 5
@test causality(x, uvals_y, vftest) |> length == 5
@test causality(uvals_x, y, vftest) |> length == 5
@test causality(UVX, UVY, vftest) |> length == 5
@test causality(x, UVY, vftest) |> length == 5
@test causality(UVX, y, vftest) |> length == 5
################################################################
# Integration with UncertainData.jl (with sampling constraints)
################################################################
onevar_constraints = ConstrainedResampling(TruncateStd(1))
twovar_constraints = ConstrainedResampling(TruncateStd(2), TruncateStd(1))
test_vf = VisitationFrequencyTest(binning = RectangularBinning(5), ηs = 1)
@test causality(x, y, test_vf, twovar_constraints) isa Array{<:Real, 0}
@test causality(uvals_x, uvals_y, test_vf, twovar_constraints) isa Array{<:Real, 0}
@test causality(x, uvals_y, test_vf, onevar_constraints) isa Array{<:Real, 0}
@test causality(uvals_x, y, test_vf, onevar_constraints) isa Array{<:Real, 0}
@test causality(UVX, UVY, test_vf, twovar_constraints) isa Array{<:Real, 0}
@test causality(uvals_x, UVY, test_vf, twovar_constraints) isa Array{<:Real, 0}
@test causality(UVX, uvals_y, test_vf, twovar_constraints) isa Array{<:Real, 0}
@test causality(x, UVY, test_vf, onevar_constraints) isa Array{<:Real, 0}
@test causality(UVX, y, test_vf, onevar_constraints) isa Array{<:Real, 0}
test_vf = VisitationFrequencyTest(binning = RectangularBinning(5), ηs = -3:3)
@test causality(x, y, test_vf, twovar_constraints) isa Array{<:Real, 1}
@test causality(uvals_x, uvals_y, test_vf, twovar_constraints) isa Array{<:Real, 1}
@test causality(x, uvals_y, test_vf, onevar_constraints) isa Array{<:Real, 1}
@test causality(uvals_x, y, test_vf, onevar_constraints) isa Array{<:Real, 1}
@test causality(UVX, UVY, test_vf, twovar_constraints) isa Array{<:Real, 1}
@test causality(uvals_x, UVY, test_vf, twovar_constraints) isa Array{<:Real, 1}
@test causality(UVX, uvals_y, test_vf, twovar_constraints) isa Array{<:Real, 1}
@test causality(x, UVY, test_vf, onevar_constraints) isa Array{<:Real, 1}
@test causality(UVX, y, test_vf, onevar_constraints) isa Array{<:Real, 1}
|
{"hexsha": "29b6e52fbf6475361aa7792281850c534e5e94c1", "size": 3963, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/causality_tests/integration_uncertaindata/test_uncertaindata_integration_VisitationFrequencyTest.jl", "max_stars_repo_name": "UnofficialJuliaMirrorSnapshots/CausalityTools.jl-5520caf5-2dd7-5c5d-bfcb-a00e56ac49f7", "max_stars_repo_head_hexsha": "93935b3bc73738c52b004e9cf23d6f6a4778982c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 24, "max_stars_repo_stars_event_min_datetime": "2020-06-11T01:51:15.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T18:39:43.000Z", "max_issues_repo_path": "test/causality_tests/integration_uncertaindata/test_uncertaindata_integration_VisitationFrequencyTest.jl", "max_issues_repo_name": "UnofficialJuliaMirrorSnapshots/CausalityTools.jl-5520caf5-2dd7-5c5d-bfcb-a00e56ac49f7", "max_issues_repo_head_hexsha": "93935b3bc73738c52b004e9cf23d6f6a4778982c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 27, "max_issues_repo_issues_event_min_datetime": "2020-12-21T02:52:22.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-31T10:40:08.000Z", "max_forks_repo_path": "test/causality_tests/integration_uncertaindata/test_uncertaindata_integration_VisitationFrequencyTest.jl", "max_forks_repo_name": "UnofficialJuliaMirrorSnapshots/CausalityTools.jl-5520caf5-2dd7-5c5d-bfcb-a00e56ac49f7", "max_forks_repo_head_hexsha": "93935b3bc73738c52b004e9cf23d6f6a4778982c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2020-09-27T08:56:28.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-08T15:05:51.000Z", "avg_line_length": 51.4675324675, "max_line_length": 98, "alphanum_fraction": 0.6767600303, "num_tokens": 1444}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.