text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
from __future__ import print_function, division
import os
on_rtd = os.environ.get('READTHEDOCS') == 'True'
if not on_rtd:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.interpolate import UnivariateSpline as interpolate
from scipy.integrate import quad
else:
np, pd, plt = (None, None, None)
interpolate, quad = (None, None)
from ..plotutils import setfig
from ..hashutils import hashcombine, hasharray
from .constraints import FunctionLowerLimit
class ContrastCurve(object):
"""Object representing an imaging contrast curve
Usually accessed via :class:`ContrastCurveFromFile`
and then applied using :class:`ContrastCurveConstraint`,
e.g., through :func:`StarPopulation.apply_cc`.
:param rs:
Angular separation from target star, in arcsec.
:param dmags:
Magnitude contrast.
:param band:
Photometric bandpass in which observation is taken.
:param mag:
Magnitude of central star (rarely used?)
:param name:
Name; e.g., "PHARO J-band", "Keck AO", etc.
Should be a decent label.
"""
def __init__(self,rs,dmags,band,mag=None,name=None):
#if band=='K' or band=="K'":
# band = 'Ks'
rs = np.atleast_1d(rs)
dmags = np.atleast_1d(dmags)
self.rs = rs
self.dmags = dmags
self.band = band
self.mag = mag
self.contrastfn = interpolate(rs,dmags,s=0)
self.rmax = rs.max()
self.rmin = rs.min()
if name is None:
self.name = '%s band' % self.band
else:
self.name = name
def plot(self,fig=None,**kwargs):
setfig(fig)
plt.plot(self.rs,self.dmags,**kwargs)
plt.title('%s band contrast curve' % self.band)
plt.gca().invert_yaxis()
plt.xlabel('Separation [arcsec]')
plt.ylabel('$\Delta %s$' % self.band)
def __eq__(self,other):
return hash(self)==hash(other)
def __ne__(self,other):
return not self.__eq__(other)
def __hash__(self):
return hashcombine(hasharray(self.rs),
hasharray(self.dmags),
self.band,
self.mag)
def __call__(self,r):
r = np.atleast_1d(r)
dmags = np.atleast_1d(self.contrastfn(r))
dmags[r >= self.rmax] = self.contrastfn(self.rmax)
dmags[r < self.rmin] = 0
#put something in here to "extend" beyond rmax?
return dmags
def __add__(self,other):
if type(other) not in [type(1),type(1.),type(self)]:
raise ValueError('Can only add a number or another ContrastCurve.')
if type(other) in [type(1),type(1.)]:
dmags = self.dmags + other
return ContrastCurve(self.rs,dmags,self.band,self.mag)
def __repr__(self):
return '<%s: %s>' % (type(self),self.name)
def power(self,floor=10,rmin=0.1,use_quad=False):
if use_quad:
return quad(self,rmin,self.rmax)[0]/((self.rmax-rmin)*floor)
else:
rs = np.linspace(rmin,self.rmax,100)
return np.trapz(self(rs),rs)
class ContrastCurveConstraint(FunctionLowerLimit):
def __init__(self,rs,dmags,cc,name='CC',**kwargs):
self.rs = rs
self.dmags = dmags
self.cc = cc
FunctionLowerLimit.__init__(self,rs,dmags,cc,name=name,**kwargs)
def __str__(self):
return '%s contrast curve' % self.name
def update_rs(self,rs):
self.rs = rs
FunctionLowerLimit.__init__(self,rs,self.dmags,self.cc,name=self.name)
logging.info('%s updated with new rsky values.' % self.name)
class ContrastCurveFromFile(ContrastCurve):
"""A contrast curve derived from a two-column file
:param filename:
Filename of contrast curve; first column separation in arcsec,
second column delta-mag.
:param band:
Bandpass of imaging observation.
:param mas:
Set to ``True`` if separation is in milliarcsec rather than
arcsec.
"""
def __init__(self,filename,band,mag=None, mas=False, **kwargs):
rs,dmags = np.loadtxt(filename,unpack=True)
if mas: #convert from milliarcsec
rs /= 1000.
ContrastCurve.__init__(self,rs,dmags,band,mag, **kwargs)
self.filename = filename
class VelocityContrastCurve(object):
def __init__(self,vs,dmags,band='g'):
self.vs = vs
self.dmags = dmags
self.band = band
if np.size(vs) > 1:
self.contrastfn = interpolate(vs,dmags,s=0)
self.vmax = vs.max()
self.vmin = vs.min()
else: #simple case; one v, one dmag
def cfn(v):
v = np.atleast_1d(abs(v))
dmags = np.zeros(v.shape)
dmags[v>=self.vs] = self.dmags
dmags[v<self.vs] = 0
return dmags
self.contrastfn = cfn
self.vmax = self.vs
self.vmin = self.vs
def __call__(self,v):
v = np.atleast_1d(np.absolute(v))
dmags = np.atleast_1d(self.contrastfn(v))
dmags[v >= self.vmax] = self.contrastfn(self.vmax)
dmags[v < self.vmin] = 0
#put something in here to "extend" beyond vmax?
return dmags
class VelocityContrastCurveConstraint(FunctionLowerLimit):
def __init__(self,vels,dmags,vcc,name='VCC',**kwargs):
self.vels = vels
self.dmags = dmags
self.vcc = vcc
FunctionLowerLimit.__init__(self,vels,dmags,vcc,name=name,**kwargs)
|
{"hexsha": "e142df527cedee13693c444ab925b7ed50da420e", "size": 5615, "ext": "py", "lang": "Python", "max_stars_repo_path": "vespa/stars/contrastcurve.py", "max_stars_repo_name": "Li-Yangyang/VESPA", "max_stars_repo_head_hexsha": "91b29a3707e2a4eebaab46002acb3cd831825c5f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 37, "max_stars_repo_stars_event_min_datetime": "2015-03-30T09:01:25.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-14T04:25:01.000Z", "max_issues_repo_path": "vespa/stars/contrastcurve.py", "max_issues_repo_name": "ShubhamShaswat/VESPA", "max_issues_repo_head_hexsha": "0446b54d48009f3655cfd1a3957ceea21d3adcaa", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 32, "max_issues_repo_issues_event_min_datetime": "2016-01-21T13:45:46.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-15T18:01:43.000Z", "max_forks_repo_path": "vespa/stars/contrastcurve.py", "max_forks_repo_name": "ShubhamShaswat/VESPA", "max_forks_repo_head_hexsha": "0446b54d48009f3655cfd1a3957ceea21d3adcaa", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 23, "max_forks_repo_forks_event_min_datetime": "2015-02-26T19:17:51.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-28T22:57:45.000Z", "avg_line_length": 30.8516483516, "max_line_length": 79, "alphanum_fraction": 0.5983971505, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1473}
|
using Logging
using Random
"Return a random index to be filled from the garden mask."
function randomindex(mask::Matrix{Bool})::Int
while true
i = rand(1:length(mask))
if mask[i]
return i
end
end
end
"Swap to the elements corresponding to the two provided indices."
function swap!(garden::Matrix{Int}, i::Int, j::Int)
t = garden[i]
garden[i] = garden[j]
garden[j] = t
garden
end
"Return the neighbours to be filled of the cell at the given index."
function neighbours(garden::Matrix{Int}, idx::Int)::Vector{Int}
m, n = size(garden)
i, j = Tuple(CartesianIndices(garden)[idx])
neighbourindices = [(i, j-1), (i, j+1), (i-1, j), (i+1, j)]
# cells filled with 0 are not part of the garden
[
garden[k, l] for (k, l) in neighbourindices
if 0 < k <= m && 0 < l <= n && garden[k, l] != 0
]
end
"Compute the cost difference when swapping the two provided indices."
function deltacost(garden::Matrix{Int}, costs::Matrix{Float64}, i::Int, j::Int)::Float64
cost = 0
for k in neighbours(garden, i)
cost += costs[k, garden[j]] - costs[k, garden[i]]
end
for k in neighbours(garden, j)
cost += costs[k, garden[i]] - costs[k, garden[j]]
end
cost
end
"Compute the total cost of a garden configuration."
function gardencost(garden::Matrix{Int}, mask::Matrix{Bool}, costs::Matrix{Float64})::Float64
cost = 0
for i = 1:length(mask)
if mask[i] == 0
continue
end
for k in neighbours(garden, i)
cost += costs[k, garden[i]]
end
end
cost / 2
end
"Update the garden using Metropolis-Hastings, using the inverse temperature beta."
function update!(
garden::Matrix{Int},
mask::Matrix{Bool},
costs::Matrix{Float64},
beta::Float64 = 5.0
)
N = length(garden)
i = randomindex(mask)
j = randomindex(mask)
while i == j
j = randomindex(mask)
end
d = deltacost(garden, costs, i, j)
@debug "cost difference $d"
if rand() < exp(- beta * d)
@debug "swapping indices $i and $j"
swap!(garden, i, j)
return d
end
return 0.0
end
"Fill the garden randomly with a predefined number for each plant."
function fillgardenrandomly!(garden::Matrix{Int}, mask::Matrix{Bool}, plants::DataFrame)
cells = vcat([repeat([plant], count) for (plant, count) in eachrow(plants)]...)
# fill the remaining slots with random plants
diffcount = sum(mask) - length(cells)
cells = vcat(cells, rand(cells, diffcount))
garden[mask] = shuffle!(indexin(cells, plants.name))
garden
end
"Update the garden for a given number of steps, and return the total cost over time."
function gardenevolution!(garden::Matrix{Int}, mask::Matrix{Bool}, costs::Matrix{Float64}; steps::Int = 10000, beta::Float64 = 5.0)
gardencosts = [gardencost(garden, mask, costs)]
for i = 1:steps
update!(garden, mask, costs, beta)
if mod(i, 1000) == 0
append!(gardencosts, gardencost(garden, mask, costs))
end
end
gardencosts
end
|
{"hexsha": "343ed8ed4574aa2a3c74a5bd48bd29d1693a407a", "size": 3115, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/mcmc.jl", "max_stars_repo_name": "dlozeve/GardenOptim", "max_stars_repo_head_hexsha": "1c82d1716aea120d26f9988edea5a9b4e62dffe0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/mcmc.jl", "max_issues_repo_name": "dlozeve/GardenOptim", "max_issues_repo_head_hexsha": "1c82d1716aea120d26f9988edea5a9b4e62dffe0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/mcmc.jl", "max_forks_repo_name": "dlozeve/GardenOptim", "max_forks_repo_head_hexsha": "1c82d1716aea120d26f9988edea5a9b4e62dffe0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.6666666667, "max_line_length": 131, "alphanum_fraction": 0.6250401284, "num_tokens": 902}
|
exp(π) - π
# Solve x^2 + 5x + 6 = 0 using the quadratic formula (real arithmetics)
# Coefficients a,b,c in ax^2 + bx + c = 0
a = 1
b = 5
c = 6
# The quadratic formula
d = sqrt(b^2 - 4*a*c)
r1 = (-b - d) / 2a
r2 = (-b + d) / 2a
println("The roots are ", r1, " and ", r2)
function myfunc(x,y)
x + y
end
myfunc(1,2)
myfunc2(x,y) = x + 2y
myfunc2(3,5)
myfunc3 = (x,y) -> x + 3y
myfunc3(3,5)
# Same thing without giving the function a name:
((x,y) -> x + 3y)(3,5)
function mynewfunc(x,y)
out1 = x + y
out2 = out1 * (2x + y)
out1, out2
end
y1, y2 = mynewfunc(2,1)
function real_roots_of_quadratic(a,b,c)
# Compute the real roots of the quadratic ax^2 + bx + c = 0
d = sqrt(b^2 - 4*a*c)
r1 = (-b - d) / 2a
r2 = (-b + d) / 2a
r1, r2
end
real_roots_of_quadratic(1, 5, 6)
real_roots_of_quadratic(-1, 5, 6)
|
{"hexsha": "909f49efd4cba4cf2265164ee7cbd4dfcbf8d204", "size": 845, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "textbook/_build/jupyter_execute/content/Introduction/Functions.jl", "max_stars_repo_name": "NoseKnowsAll/NoseKnowsAll.github.io", "max_stars_repo_head_hexsha": "b2cff3e33cc2087770fb4aecb38b7925ad8d6e5a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "textbook/_build/jupyter_execute/content/Introduction/Functions.jl", "max_issues_repo_name": "NoseKnowsAll/NoseKnowsAll.github.io", "max_issues_repo_head_hexsha": "b2cff3e33cc2087770fb4aecb38b7925ad8d6e5a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "textbook/_build/jupyter_execute/content/Introduction/Functions.jl", "max_forks_repo_name": "NoseKnowsAll/NoseKnowsAll.github.io", "max_forks_repo_head_hexsha": "b2cff3e33cc2087770fb4aecb38b7925ad8d6e5a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 16.9, "max_line_length": 71, "alphanum_fraction": 0.5727810651, "num_tokens": 375}
|
[STATEMENT]
lemma PiE_eq_iff:
"Pi\<^sub>E I F = Pi\<^sub>E I F' \<longleftrightarrow> (\<forall>i\<in>I. F i = F' i) \<or> ((\<exists>i\<in>I. F i = {}) \<and> (\<exists>i\<in>I. F' i = {}))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (Pi\<^sub>E I F = Pi\<^sub>E I F') = ((\<forall>i\<in>I. F i = F' i) \<or> (\<exists>i\<in>I. F i = {}) \<and> (\<exists>i\<in>I. F' i = {}))
[PROOF STEP]
proof (intro iffI disjCI)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<lbrakk>Pi\<^sub>E I F = Pi\<^sub>E I F'; \<not> ((\<exists>i\<in>I. F i = {}) \<and> (\<exists>i\<in>I. F' i = {}))\<rbrakk> \<Longrightarrow> \<forall>i\<in>I. F i = F' i
2. (\<forall>i\<in>I. F i = F' i) \<or> (\<exists>i\<in>I. F i = {}) \<and> (\<exists>i\<in>I. F' i = {}) \<Longrightarrow> Pi\<^sub>E I F = Pi\<^sub>E I F'
[PROOF STEP]
assume eq[simp]: "Pi\<^sub>E I F = Pi\<^sub>E I F'"
[PROOF STATE]
proof (state)
this:
Pi\<^sub>E I F = Pi\<^sub>E I F'
goal (2 subgoals):
1. \<lbrakk>Pi\<^sub>E I F = Pi\<^sub>E I F'; \<not> ((\<exists>i\<in>I. F i = {}) \<and> (\<exists>i\<in>I. F' i = {}))\<rbrakk> \<Longrightarrow> \<forall>i\<in>I. F i = F' i
2. (\<forall>i\<in>I. F i = F' i) \<or> (\<exists>i\<in>I. F i = {}) \<and> (\<exists>i\<in>I. F' i = {}) \<Longrightarrow> Pi\<^sub>E I F = Pi\<^sub>E I F'
[PROOF STEP]
assume "\<not> ((\<exists>i\<in>I. F i = {}) \<and> (\<exists>i\<in>I. F' i = {}))"
[PROOF STATE]
proof (state)
this:
\<not> ((\<exists>i\<in>I. F i = {}) \<and> (\<exists>i\<in>I. F' i = {}))
goal (2 subgoals):
1. \<lbrakk>Pi\<^sub>E I F = Pi\<^sub>E I F'; \<not> ((\<exists>i\<in>I. F i = {}) \<and> (\<exists>i\<in>I. F' i = {}))\<rbrakk> \<Longrightarrow> \<forall>i\<in>I. F i = F' i
2. (\<forall>i\<in>I. F i = F' i) \<or> (\<exists>i\<in>I. F i = {}) \<and> (\<exists>i\<in>I. F' i = {}) \<Longrightarrow> Pi\<^sub>E I F = Pi\<^sub>E I F'
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<not> ((\<exists>i\<in>I. F i = {}) \<and> (\<exists>i\<in>I. F' i = {}))
[PROOF STEP]
have "(\<forall>i\<in>I. F i \<noteq> {}) \<and> (\<forall>i\<in>I. F' i \<noteq> {})"
[PROOF STATE]
proof (prove)
using this:
\<not> ((\<exists>i\<in>I. F i = {}) \<and> (\<exists>i\<in>I. F' i = {}))
goal (1 subgoal):
1. (\<forall>i\<in>I. F i \<noteq> {}) \<and> (\<forall>i\<in>I. F' i \<noteq> {})
[PROOF STEP]
using PiE_eq_empty_iff[of I F] PiE_eq_empty_iff[of I F']
[PROOF STATE]
proof (prove)
using this:
\<not> ((\<exists>i\<in>I. F i = {}) \<and> (\<exists>i\<in>I. F' i = {}))
(Pi\<^sub>E I F = {}) = (\<exists>i\<in>I. F i = {})
(Pi\<^sub>E I F' = {}) = (\<exists>i\<in>I. F' i = {})
goal (1 subgoal):
1. (\<forall>i\<in>I. F i \<noteq> {}) \<and> (\<forall>i\<in>I. F' i \<noteq> {})
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
(\<forall>i\<in>I. F i \<noteq> {}) \<and> (\<forall>i\<in>I. F' i \<noteq> {})
goal (2 subgoals):
1. \<lbrakk>Pi\<^sub>E I F = Pi\<^sub>E I F'; \<not> ((\<exists>i\<in>I. F i = {}) \<and> (\<exists>i\<in>I. F' i = {}))\<rbrakk> \<Longrightarrow> \<forall>i\<in>I. F i = F' i
2. (\<forall>i\<in>I. F i = F' i) \<or> (\<exists>i\<in>I. F i = {}) \<and> (\<exists>i\<in>I. F' i = {}) \<Longrightarrow> Pi\<^sub>E I F = Pi\<^sub>E I F'
[PROOF STEP]
with PiE_eq_iff_not_empty[of I F F']
[PROOF STATE]
proof (chain)
picking this:
\<lbrakk>\<And>i. i \<in> I \<Longrightarrow> F i \<noteq> {}; \<And>i. i \<in> I \<Longrightarrow> F' i \<noteq> {}\<rbrakk> \<Longrightarrow> (Pi\<^sub>E I F = Pi\<^sub>E I F') = (\<forall>i\<in>I. F i = F' i)
(\<forall>i\<in>I. F i \<noteq> {}) \<and> (\<forall>i\<in>I. F' i \<noteq> {})
[PROOF STEP]
show "\<forall>i\<in>I. F i = F' i"
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>\<And>i. i \<in> I \<Longrightarrow> F i \<noteq> {}; \<And>i. i \<in> I \<Longrightarrow> F' i \<noteq> {}\<rbrakk> \<Longrightarrow> (Pi\<^sub>E I F = Pi\<^sub>E I F') = (\<forall>i\<in>I. F i = F' i)
(\<forall>i\<in>I. F i \<noteq> {}) \<and> (\<forall>i\<in>I. F' i \<noteq> {})
goal (1 subgoal):
1. \<forall>i\<in>I. F i = F' i
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<forall>i\<in>I. F i = F' i
goal (1 subgoal):
1. (\<forall>i\<in>I. F i = F' i) \<or> (\<exists>i\<in>I. F i = {}) \<and> (\<exists>i\<in>I. F' i = {}) \<Longrightarrow> Pi\<^sub>E I F = Pi\<^sub>E I F'
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. (\<forall>i\<in>I. F i = F' i) \<or> (\<exists>i\<in>I. F i = {}) \<and> (\<exists>i\<in>I. F' i = {}) \<Longrightarrow> Pi\<^sub>E I F = Pi\<^sub>E I F'
[PROOF STEP]
assume "(\<forall>i\<in>I. F i = F' i) \<or> (\<exists>i\<in>I. F i = {}) \<and> (\<exists>i\<in>I. F' i = {})"
[PROOF STATE]
proof (state)
this:
(\<forall>i\<in>I. F i = F' i) \<or> (\<exists>i\<in>I. F i = {}) \<and> (\<exists>i\<in>I. F' i = {})
goal (1 subgoal):
1. (\<forall>i\<in>I. F i = F' i) \<or> (\<exists>i\<in>I. F i = {}) \<and> (\<exists>i\<in>I. F' i = {}) \<Longrightarrow> Pi\<^sub>E I F = Pi\<^sub>E I F'
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
(\<forall>i\<in>I. F i = F' i) \<or> (\<exists>i\<in>I. F i = {}) \<and> (\<exists>i\<in>I. F' i = {})
[PROOF STEP]
show "Pi\<^sub>E I F = Pi\<^sub>E I F'"
[PROOF STATE]
proof (prove)
using this:
(\<forall>i\<in>I. F i = F' i) \<or> (\<exists>i\<in>I. F i = {}) \<and> (\<exists>i\<in>I. F' i = {})
goal (1 subgoal):
1. Pi\<^sub>E I F = Pi\<^sub>E I F'
[PROOF STEP]
using PiE_eq_empty_iff[of I F] PiE_eq_empty_iff[of I F']
[PROOF STATE]
proof (prove)
using this:
(\<forall>i\<in>I. F i = F' i) \<or> (\<exists>i\<in>I. F i = {}) \<and> (\<exists>i\<in>I. F' i = {})
(Pi\<^sub>E I F = {}) = (\<exists>i\<in>I. F i = {})
(Pi\<^sub>E I F' = {}) = (\<exists>i\<in>I. F' i = {})
goal (1 subgoal):
1. Pi\<^sub>E I F = Pi\<^sub>E I F'
[PROOF STEP]
by (auto simp: PiE_def)
[PROOF STATE]
proof (state)
this:
Pi\<^sub>E I F = Pi\<^sub>E I F'
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 2818, "file": null, "length": 17}
|
import numpy as np
import pytest
from pytest import approx
from desdeov2.problem.Constraint import (
ConstraintError,
ScalarConstraint,
constraint_function_factory,
)
@pytest.fixture
def objective_vector_1():
return np.array([1.0, 5.0, 10.0])
@pytest.fixture
def decision_vector_1():
return np.array([-1.0, 10.0, 5.0, -3.5])
@pytest.fixture
def equal_constraint():
def constraint(decision_vector, objective_vector):
x = decision_vector
y = objective_vector
h = 50 # Must equal this
res = h - (x[1] * y[2] - x[2] * y[1])
# An equality constraint is true only when res equals zero
return -abs(res)
return constraint
@pytest.fixture
def lt_constraint():
def constraint(decision_vector, objective_vector):
x = decision_vector
y = objective_vector
lt = 13.5 # Must be less than this
res = lt - (x[0] * x[2] + y[2] + y[1])
return res
return constraint
@pytest.fixture
def gt_constraint():
def constraint(decision_vector, objective_vector):
x = decision_vector
y = objective_vector
gt = -5.5 # Must be greater than this
res = ((y[0] * y[2]) / (x[1] * x[2] * x[3])) - gt
return res
return constraint
@pytest.fixture
def simple_constraint_factory(decision_vector_1, objective_vector_1):
def factory(constraint):
return ScalarConstraint(
"test", len(decision_vector_1), len(objective_vector_1), constraint
)
return factory
def test_init(decision_vector_1, objective_vector_1, equal_constraint):
name = "test"
cons = ScalarConstraint(
name, len(decision_vector_1), len(objective_vector_1), equal_constraint
)
assert cons.name == "test"
assert cons.n_decision_vars == len(decision_vector_1)
assert cons.n_objective_funs == len(objective_vector_1)
res1 = equal_constraint(decision_vector_1, objective_vector_1)
res2 = cons.evaluator(decision_vector_1, objective_vector_1)
assert res1 == approx(res2)
def test_equal_cons(
simple_constraint_factory,
equal_constraint,
decision_vector_1,
objective_vector_1,
):
cons = simple_constraint_factory(equal_constraint)
res = cons.evaluate(decision_vector_1, objective_vector_1)
assert res == approx(-25.0)
def test_gt_cons(
simple_constraint_factory,
gt_constraint,
decision_vector_1,
objective_vector_1,
):
cons = simple_constraint_factory(gt_constraint)
res = cons.evaluate(decision_vector_1, objective_vector_1)
assert res == approx(5.442857142857143)
def test_lt_cons(
simple_constraint_factory,
lt_constraint,
decision_vector_1,
objective_vector_1,
):
cons = simple_constraint_factory(lt_constraint)
res = cons.evaluate(decision_vector_1, objective_vector_1)
assert res == approx(3.5)
def test_bad_evaluate_call(
simple_constraint_factory,
equal_constraint,
decision_vector_1,
objective_vector_1,
):
cons = simple_constraint_factory(equal_constraint)
# Too few decision variables
with pytest.raises(ConstraintError):
cons.evaluate(decision_vector_1[1:], objective_vector_1)
# Too few objective function values
with pytest.raises(ConstraintError):
cons.evaluate(decision_vector_1, objective_vector_1[1:])
# Too many decision variables
with pytest.raises(ConstraintError):
cons.evaluate(np.ones(10), objective_vector_1)
# Too many objective function values
with pytest.raises(ConstraintError):
cons.evaluate(decision_vector_1, np.ones(10))
def test_constraint_function_factory_equal():
cons = constraint_function_factory(
lambda x, y: x[0] + x[1] + y[0] + y[1], 10.0, "=="
)
decision_vector = np.array([2.5, 7.5])
objective_vector = np.array([-7.1, 10.2])
res = cons(decision_vector, objective_vector)
assert res == approx(-3.1)
def test_constraint_function_factory_lt():
cons = constraint_function_factory(
lambda x, y: x[0] + x[1] + y[0] + y[1], 5.0, "<"
)
decision_vector = np.array([2.5, 7.5])
objective_vector = np.array([-7.1, 10.2])
res = cons(decision_vector, objective_vector)
assert res == approx(-8.1)
def test_constraint_function_factory_gt():
cons = constraint_function_factory(
lambda x, y: x[0] + x[1] + y[0] + y[1], 9.5, ">"
)
decision_vector = np.array([2.5, 7.5])
objective_vector = np.array([-7.1, 10.2])
res = cons(decision_vector, objective_vector)
assert res == approx(3.6)
def test_constraint_function_factory_bad_operator():
with pytest.raises(ValueError):
constraint_function_factory(lambda x: x[0] + x[1], 9.5, "x")
|
{"hexsha": "e0a276929d6b48814daa16fb1b3abb5ffa127c5a", "size": 4761, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/problem/test_constraint.py", "max_stars_repo_name": "gialmisi/DESDEOv2", "max_stars_repo_head_hexsha": "0eeb4687d2e539845ab86a5018ff99b92e4ca5cf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-08-08T05:11:21.000Z", "max_stars_repo_stars_event_max_datetime": "2019-08-08T05:11:21.000Z", "max_issues_repo_path": "tests/problem/test_constraint.py", "max_issues_repo_name": "gialmisi/DESDEOv2", "max_issues_repo_head_hexsha": "0eeb4687d2e539845ab86a5018ff99b92e4ca5cf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2019-08-25T08:49:33.000Z", "max_issues_repo_issues_event_max_datetime": "2019-09-06T08:06:46.000Z", "max_forks_repo_path": "tests/problem/test_constraint.py", "max_forks_repo_name": "gialmisi/DESDEOv2", "max_forks_repo_head_hexsha": "0eeb4687d2e539845ab86a5018ff99b92e4ca5cf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-11-07T14:42:29.000Z", "max_forks_repo_forks_event_max_datetime": "2019-11-07T14:42:29.000Z", "avg_line_length": 24.796875, "max_line_length": 79, "alphanum_fraction": 0.6815795001, "include": true, "reason": "import numpy", "num_tokens": 1225}
|
"""
scc(di_ei, di_ej, scc_init::SCCInit)
Compute to which strongly connected component each vertex belongs.
`di_ei` and `di_ej` describe the edges such that the k-th component of di_ei and k-th component
of `di_ej` form an edge i.e `di_ei = [1, 1, 2]` and `di_ej = [3, 4, 3]` =>
those three edges (1, 3), (1, 4), (2, 3)
`di_ei` must be sorted asc to make this function work.
The last argument is a `SCCInit` struct which itself provides the memory for the functionality.
This speeds up the process as no new memory needs to be allocated.
"""
function scc(di_ei, di_ej, scc_init::SCCInit)
len = length(di_ei)
index_ei = scc_init.index_ei
n = length(index_ei) - 1
# compute the starting index for each vertex
# bascially knowing where to start/end when looking for neighbors
last = di_ei[1]
prev_last = 1
c = 2
last_i = 0
@inbounds for i in 2:len
di_ei[i] == 0 && break
if di_ei[i] > last
j = last
index_ei[(last + 1):di_ei[i]] .= c
last = di_ei[i]
end
c += 1
last_i = i
end
index_ei[(di_ei[last_i] + 1):end] .= c
index_ei[1] = 1
id = 0
sccCount = 0
ids = scc_init.ids
low = scc_init.low
on_stack = scc_init.on_stack
group_id = scc_init.group_id
ids .= -1
low .= 0
on_stack .= false
stack = Int[]
group_id .= 0
c_group_id = 1
# start dfs from each vertex if unconnected graph
@inbounds for s in 1:n
ids[s] != -1 && continue # if visited already continue
dfs_work = Vector{Tuple{Int,Int}}()
# the 0 in dfs_work represents whether it's the first time calling that vertex
push!(dfs_work, (s, 0))
dfs_stack = Int[]
while !isempty(dfs_work)
at, i = pop!(dfs_work)
if i == 0
on_stack[at] = true
id += 1
ids[at] = id
low[at] = id
push!(dfs_stack, at)
end
recurse = false
# only works because `di_ei` is sorted
for j in (index_ei[at] + i):(index_ei[at + 1] - 1)
to = di_ej[j]
# println("$to is successor of $at")
if ids[to] == -1
push!(dfs_work, (at, j - index_ei[at] + 1))
push!(dfs_work, (to, 0))
recurse = true
break
elseif on_stack[to]
low[at] = min(low[at], ids[to])
end
end
recurse && continue
# if at is the representative of the group
if ids[at] == low[at]
# take from stack as long as the representative doesn't appear
# and put all of them in the same scc
while true
w = pop!(dfs_stack)
on_stack[w] = false
group_id[w] = c_group_id
w == at && break
end
c_group_id += 1
end
if !isempty(dfs_work)
w = at
at, _ = dfs_work[end]
low[at] = min(low[at], low[w])
end
end
end
return group_id
end
|
{"hexsha": "55033aa045cb9330ece83f08ec596b939f098ec0", "size": 3276, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/constraints/all_different/scc.jl", "max_stars_repo_name": "Azzaare/ConstraintSolver.jl", "max_stars_repo_head_hexsha": "feec0320d9c26e16c2ced59bf11877ac7141ce8a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 120, "max_stars_repo_stars_event_min_datetime": "2019-09-05T23:51:58.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-27T05:04:40.000Z", "max_issues_repo_path": "src/constraints/all_different/scc.jl", "max_issues_repo_name": "Azzaare/ConstraintSolver.jl", "max_issues_repo_head_hexsha": "feec0320d9c26e16c2ced59bf11877ac7141ce8a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 273, "max_issues_repo_issues_event_min_datetime": "2019-09-05T11:20:15.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-26T09:57:00.000Z", "max_forks_repo_path": "src/constraints/all_different/scc.jl", "max_forks_repo_name": "Azzaare/ConstraintSolver.jl", "max_forks_repo_head_hexsha": "feec0320d9c26e16c2ced59bf11877ac7141ce8a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 14, "max_forks_repo_forks_event_min_datetime": "2019-09-13T08:36:43.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-28T18:08:03.000Z", "avg_line_length": 30.6168224299, "max_line_length": 95, "alphanum_fraction": 0.5061050061, "num_tokens": 922}
|
# Author - K.G. Abeywardena
# Date - 29/01/2020
import sys
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import eigh
import matplotlib
plt.style.use('classic')
#Function for creating the Hamiltonian matrix
def Hamiltonian(N, dx, potential, h_bar, mass):
"""
This fucntion creates the Hamiltoninan matrix that is required to calculate the numerical solution
for the Schrodinger's 1-D Wave Equation for a given 1-D potential curve
Inputs:
N : Number of data points to consider
dx : Differnce between two consequetive x values
potential : Potential function
h_bar : -(m*)/(2h) where m* = equivalent mass and h = plank constant
mass : Mass of the particle
Outputs:
norm vectors : Normalized eigen vectors of the Hamiltonian Matrix
eigen energy : Eigen Values of the Hamiltonian Matrix
probability : Probability Density Distribution
"""
Laplacian = (-2*np.diag(np.ones((N)),0) + np.diag(np.ones((N-1)),1) + np.diag(np.ones((N-1)),-1))/(dx**2)
H = -(h_bar**2/(2*mass))*Laplacian + np.diag(potential)
eigen_energy, eigen_vectors = eigh(H)
probability = np.abs(eigen_vectors)**2
K = np.sum(probability, axis=0)*dx
norm_vectors = eigen_vectors/np.sqrt(K) #Normalizing the eigen vectors
probability /= K #Normalizing the PDFs
return norm_vectors, eigen_energy, probability
#Plotting Functions
def plotWavefunc(x, potential, num_states, wave_vects, energy, func_name):
"""
This function is for the visualization of the Potential Energy Functions and
Corresponding Wave Functions which are the solutions of to the wave equation.
Inputs:
x : Data points
potential : Potential Energy Function
num_states : Number of energy primary states
wave_vects : Wave function - as a set of vectors
energy : Energy values of each state
func_name : Function name of potential energy curve
Outputs:
None
"""
plt.figure('WaveFunc')
Labels = []
for i in range(num_states-1, -1, -1):
plt.plot(x, wave_vects[:,i] + energy[i])
Labels.append('E = {:10.4f}eV'.format(energy[i]))
Labels.append('Potential Energy Function')
if 'Linear' in func_name:
plt.plot(x, np.transpose(potential)*max(max(energy[:num_states+1]), max(potential))/max(potential+10**-40), 'k--')
else:
plt.plot(x, np.transpose(potential)*min(max(energy[:num_states+1]), max(potential))/max(potential+10**-40), 'k--')
plt.xlabel('Distance (x)', fontdict={'weight': 'bold', 'size': 16, 'color': 'black'})
plt.ylabel('Energy (eV)', fontdict={'weight': 'bold', 'size': 16, 'color': 'black'})
plt.legend(Labels, loc='upper right', fontsize = 'medium')
plt.title(f'Wave Functions for {func_name}', fontdict={'weight': 'bold', 'size': 18, 'color': 'black'})
plt.grid(b=True)
plt.autoscale(tight=True, axis='both')
plt.savefig(f'wavePlot-{func_name}.png')
plt.autoscale(tight=True)
plt.close('all')
return
def plotProbfunc(x, potential, num_states, prob_vects, energy, func_name):
"""
This function is for the visualization of the Potential Energy Functions and
Corresponding PDFs which are the solutions of to the wave equation.
Inputs:
x : Data points
potential : Potential Energy Function
num_states : Number of energy primary states
prob_vects : PDF of each wave - as a set of vectors
energy : Energy values of each state
func_name : Function name of potential energy curve
Outputs:
None
"""
plt.figure('PDF')
Labels = []
for i in range(num_states-1, -1, -1):
plt.plot(x, prob_vects[:,i] + energy[i])
Labels.append('E = {:10.4f}eV'.format(energy[i]))
Labels.append('Potential Energy Function')
if 'Linear' in func_name:
plt.plot(x, np.transpose(potential)*max(max(energy[:num_states+1]), max(potential))/max(potential+10**-40), 'k--')
else:
plt.plot(x, np.transpose(potential)*min(max(energy[:num_states+1]), max(potential))/max(potential+10**-40), 'k--')
plt.xlabel('Distance (x)', fontdict={'weight': 'bold', 'size': 16, 'color': 'black'})
plt.ylabel('Energy (eV)', fontdict={'weight': 'bold', 'size': 16, 'color': 'black'})
plt.legend(Labels, loc='upper right', fontsize = 'medium')
plt.title(f'PDF for {func_name}', fontdict={'weight': 'bold', 'size': 16, 'color': 'black'})
plt.grid(b=True)
plt.autoscale(tight=True, axis='both')
plt.savefig(f'probPlot-{func_name}.png')
plt.autoscale(tight=True)
plt.close('all')
return
#Potential Enegery Functions
def FreeParticle(N, dx, h_bar, mass, x, n_states):
V_x = np.zeros((N))
wave_vec, energy, prob = Hamiltonian(N,dx, V_x, h_bar, mass)
plotWavefunc(x, V_x, n_states, wave_vec, energy, 'Free-Particle')
plotProbfunc(x, V_x, n_states, prob, energy, 'Free-Particle')
return
def infiniteWell(N, dx, h_bar, mass, x, n_states):
n = np.floor(N/3).astype('int32')
V_x = np.ones((N)) * 2000
V_x[n:2*n] = 0
wave_vec, energy, prob = Hamiltonian(N,dx, V_x, h_bar, mass)
plotWavefunc(x, V_x, n_states, wave_vec, energy, 'Near Infinte Potential Well')
plotProbfunc(x, V_x, n_states, prob, energy, 'Near Infinte Potential Well')
return
def finiteWell(N, dx, h_bar, mass, x, n_states):
n = np.floor(N/3).astype('int32')
V_x = np.ones((N)) * 10
V_x[n:2*n] = 0
wave_vec, energy, prob = Hamiltonian(N,dx, V_x, h_bar, mass)
plotWavefunc(x, V_x, n_states, wave_vec, energy, 'Finte Potential Well')
plotProbfunc(x, V_x, n_states, prob, energy, 'Finte Potential Well')
return
def linearfunc(N, dx, h_bar, mass, x, n_states):
V_x = x
wave_vec, energy, prob = Hamiltonian(N,dx, V_x, h_bar, mass)
plotWavefunc(x, V_x, n_states, wave_vec, energy, 'Linear Potential Function')
plotProbfunc(x, V_x, n_states, prob, energy, 'Linear Potential Function')
return
def HarmonicOcilator(N, dx, h_bar, mass, x, n_states):
V_x = x**2
wave_vec, energy, prob = Hamiltonian(N,dx, V_x, h_bar, mass)
plotWavefunc(x, V_x, n_states, wave_vec, energy, 'Harmonic Ocillator Potential Function')
plotProbfunc(x, V_x, n_states, prob, energy, 'Harmonic Ocillator Potential Function')
return
def StepBarrier(N, dx, h_bar, mass, x, n_states):
V_x = np.zeros((N))
V_x[N//2:] += 10
wave_vec, energy, prob = Hamiltonian(N,dx, V_x, h_bar, mass)
plotWavefunc(x, V_x, n_states, wave_vec, energy, 'Step Potential Function')
plotProbfunc(x, V_x, n_states, prob, energy, 'Step Potential Function')
return
def Triangular(N, dx, h_bar, mass, x, n_states):
n = np.floor(N/3).astype('int32')
V_x = np.ones((N)) * 100
V_x[n:2*n] = x[n:2*n]*40
wave_vec, energy, prob = Hamiltonian(N,dx, V_x, h_bar, mass)
plotWavefunc(x, V_x, n_states, wave_vec, energy, 'Triangular Potential Function')
plotProbfunc(x, V_x, n_states, prob, energy, 'Traingular Potential Function')
return
def CustomBarrier(N, dx, h_bar, mass, x, n_states):
n = np.floor(N/20).astype('int32')
V_x = np.zeros((N))
V_x[n*10:n*11] = 5
wave_vec, energy, prob = Hamiltonian(N,dx, V_x, h_bar, mass)
plotWavefunc(x, V_x, n_states, wave_vec, energy, 'Custom Potential Function')
plotProbfunc(x, V_x, n_states, prob, energy, 'Custom Potential Function')
return
if __name__ == '__main__':
if len(sys.argv) < 4:
print('| python file | x limit (L) | number of points (N)| number of eigen states |')
else:
xLimit = int(sys.argv[1]) #user input for x-co-ordinate limit
N = int(sys.argv[2]) #user input for the number of samples
eigen_states = int(sys.argv[3]) #user input for the number of eigen states to look at
print('Enter- 0:Free particle | 1: Near infinite well | 2: Finite well | 3: Linear | 4: Harmonic Oscillator | 5: Step Barrier | 6: Triangular | 7: Custom Barrier')
try:
potential_func = int(input('Enter the potential function number (0-7):')) #user input for type of potential energy
except:
print('Enter- 0:Free particle | 1: Near infinite well | 2: Finite well | 3: Linear | 4: Harmonic Oscillator | 5: Step Barrier | 6: Triangular | 7: Custom Barrier')
x = np.linspace(-xLimit, xLimit, N)
dx = x[1] - x[0]
h_bar = 1
mass = 1
plt.rcParams['figure.figsize'] = (10,8)
plt.rc('legend', fontsize=12)
plt.rcParams['legend.frameon'] = True
if potential_func == 0:
FreeParticle(N, dx, h_bar, mass, x, eigen_states )
elif potential_func == 1:
infiniteWell(N, dx, h_bar, mass, x, eigen_states )
elif potential_func == 2:
finiteWell(N, dx, h_bar, mass, x, eigen_states )
elif potential_func == 3:
linearfunc(N, dx, h_bar, mass, x, eigen_states )
elif potential_func == 4:
HarmonicOcilator(N, dx, h_bar, mass, x, eigen_states )
elif potential_func == 5:
StepBarrier(N, dx, h_bar, mass, x, eigen_states )
elif potential_func == 6:
Triangular(N, dx, h_bar, mass, x, eigen_states )
elif potential_func == 7:
CustomBarrier(N, dx, h_bar, mass, x, eigen_states)
else:
print('Invalida input')
|
{"hexsha": "b23dea736bc58043d076e8a44943d35d791fa7b4", "size": 10133, "ext": "py", "lang": "Python", "max_stars_repo_path": "Python_scripts/Schrodinger-Solver-1D.py", "max_stars_repo_name": "Kalana304/Schrodinger-s-Wave-Equation---NumericalSolution", "max_stars_repo_head_hexsha": "f9f1fd7be055b4f850cdce6e1fd1b34fb4eeca6a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-06-15T12:49:06.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-19T23:13:31.000Z", "max_issues_repo_path": "Python_scripts/Schrodinger-Solver-1D.py", "max_issues_repo_name": "Kalana304/SchrodingerWaveEquation_NumericalSolution", "max_issues_repo_head_hexsha": "f9f1fd7be055b4f850cdce6e1fd1b34fb4eeca6a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Python_scripts/Schrodinger-Solver-1D.py", "max_forks_repo_name": "Kalana304/SchrodingerWaveEquation_NumericalSolution", "max_forks_repo_head_hexsha": "f9f1fd7be055b4f850cdce6e1fd1b34fb4eeca6a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.0456431535, "max_line_length": 176, "alphanum_fraction": 0.5986381131, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2746}
|
# coding: utf-8
# Copyright (c) 2021 AkaiKKRteam.
# Distributed under the terms of the Apache License, Version 2.0.
import shutil
from pymatgen.core.sites import PeriodicSite
from pymatgen.core import Structure
from pymatgen.analysis.structure_matcher import StructureMatcher
import json
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.io.cif import CifParser
import sys
import subprocess
import hashlib
import os
import numpy as np
import matplotlib.pyplot as plt
from .descriptor.RDF import RDFConverter
"""
requires
spglib >=1.16.2
pymatgen >=2022.0.14
"""
sys.path.append("/home/kino/tmp/AkaiKKRPythonUtil/util")
if "test_script" not in sys.modules:
from pyakaikkr import AkaikkrJob
from pyakaikkr.Error import *
from pyakaikkr import StructureSpeciesConverter
def _make_displc(anclr, displc=[0, 0, 0]):
displc_list = []
for anclr1 in anclr:
displc1_list = []
for z in anclr1:
displc1_list.append(displc)
displc_list.append(displc1_list)
return displc_list
def _run_it(specx, ciffile, directory, structurefile="structure.json",
displc=False, use_bravais=True, fmt="cif",
inputcard="inputcard_geom", outputcard="out_geom.log"):
"""make inputcard from ciffile and run specx
Args:
specx (str): specx path
ciffile (str): cif filename
directory (str): directory to write inputcard and execute specx
structurefile (str): json structure file. Defaults to "structure.json"
displc (bool, optional): add displc in param or not. Defaults to False.
use_bravis (bool, optional): use bravais lattice, Defaults to True.
fmt (bool, optional): ciffile format. Defaults to "cif".
inputcard (str, optional): inputcard filename. Defaults to "inputcard_go".
outputcard (str, optional): ouptutcard filename. Defaults to "out_go.log".
Raises:
CIF2KKRNoStructureError: failed to read structure
KKRFailedExecutionError: failed to run specx, self.return_code is also made.
Returns:
bool: success or failed
"""
os.makedirs(directory, exist_ok=True)
meta = {"ciffile": ciffile, "directory": directory}
filepath = os.path.join(directory, "meta.json")
with open(filepath, "w") as f:
json.dump(meta, f)
job = AkaikkrJob(directory)
param = job.default
param["maxitr"] = 0 # stop after showing structures
struc_param = None
# first read as primitive. if it fails, read as conventional
try:
struc_param = job.read_structure(
ciffile, use_bravais=use_bravais, cif_primitive=True, fmt=fmt)
except CIF2KKRSpgDifferentError as err:
print(err)
print("WARNING: read structure again by cif_primitive=False")
struc_param = job.read_structure(
ciffile, use_bravais=use_bravais, cif_primitive=False, fmt=fmt)
except CIF2KKRNsiteInconsistentError as err:
print(err)
print("WARNING: primitive and kkr nsite are inconsistent")
try:
struc_param = job.read_structure(
ciffile, use_bravais=use_bravais, cif_primitive=False, fmt=fmt)
except CIF2KKRNsiteInconsistentError as err:
print(err)
print("WARNING: primitive and kkr nsite are inconsistent")
# possibility CIF2KKRNsiteInconsistentError occurs
if struc_param is None:
raise CIF2KKRNoStructureError("no structure is read")
structurepath = os.path.join(directory, structurefile)
with open(structurepath, "w") as f:
json.dump(struc_param, f)
print("save to", structurepath)
param.update(struc_param)
if displc:
displc_param = {"displc": _make_displc(
param["anclr"], displc=[0, 0, 0])}
param.update(displc_param)
param.update({"go": "geom"})
job.make_inputcard(param, inputcard)
print("saved to", os.path.join(directory, inputcard))
if True:
job.run(specx, inputcard, outputcard)
else:
cmd = f"cd {directory}; {specx} < {inputcard} > {outputcard}"
ret = subprocess.call(cmd, shell=True)
if ret != 0:
raise KKRFailedExecutionError("return_code={}".format(ret))
stoppped_by_errtrp = job.check_stopped_by_errtrp(outputcard)
if stoppped_by_errtrp:
raise KKRFailedExecutionError("stopped by errtrp")
return struc_param
def _read_output(directory: str, outputcard: str) -> Structure:
"""read outputcar in directory
Args:
directory (str): directory of job
outputcard (str): outputcard filename
Returns:
pymatgen.core.Structure: structure
"""
akaikkr = AkaikkrJob(directory)
struc = akaikkr.make_pymatgenstructure(outputcard)
return struc
if False:
def make_rdf(structure: Structure, rcut=4.0) -> np.ndarray:
"""make radial distribution function
Args:
structure (pymatgen.core.structure): struture
rcut (float, optional): cut off distance. Defaults to 4.0.
Returns:
np.ndarray: radial distribution function
"""
nndistance = []
for site in structure.sites:
neighbor = structure.get_neighbors(site, r=rcut)
for nn in neighbor:
# nndistance.append(round(nn[1], nround))
nndistance.append(nn[1])
nndistance = np.array(nndistance)
nndistance.sort()
return nndistance
def show_equiv_matrix(prim_struc: Structure, input_analyzer: StructureMatcher) -> np.ndarray:
"""show qeuivalenet matrix
Args:
prim_struc (pymatgen.core.Structure): structure
input_analyzer (pymatgen.analysis.structure_matcher.StructureMatcher): structure matcher
Returns:
np.ndarray: equivalenet matrix of sites by spacegroup operations
"""
print("lattice", prim_struc.lattice)
print("sites", prim_struc.sites)
print("specis", prim_struc.species)
species = prim_struc.species
print("species", species)
ops = input_analyzer.get_space_group_operations()
print(ops)
n = len(prim_struc.sites)
# equiv_matrix = np.full((n, n), False)
equiv_matrix = np.identity(n, dtype=bool)
for i1 in range(n):
for i2 in range(i1, n):
site1 = PeriodicSite(
species=species[i1], coords=prim_struc.sites[i1].frac_coords, lattice=prim_struc.lattice)
site2 = PeriodicSite(
species=species[i2], coords=prim_struc.sites[i2].frac_coords, lattice=prim_struc.lattice)
eq = ops.are_symmetrically_equivalent([site1], [site2])
equiv_matrix[i1, i2] = eq
for i1 in range(n):
for i2 in range(i1, n):
equiv_matrix[i2, i1] = equiv_matrix[i1, i2]
print(equiv_matrix)
return equiv_matrix
def _make_both_rdf(input_rdf: np.ndarray, output_rdf: np.ndarray, bins: int) -> (np.ndarray, np.ndarray, np.ndarray, np.ndarray):
"""[summary]
Args:
input_rdf (np.ndarray): input RDF
output_rdf (np.ndarray): output RDF
bins (int): number of bins
Returns:
np.ndarray: input histogram
np.ndarray: output histogram
np.ndarray: input histogram bin edges
np.ndarray: output histogram bin edges
"""
if False:
xmin = np.min([np.min(input_rdf), np.min(output_rdf)])
xmax = np.max([np.max(input_rdf), np.max(output_rdf)])
xrange = (xmin, xmax)
else:
xrange = []
for op in [np.min, np.max]:
xrange.append(op([op(input_rdf), op(output_rdf)]))
input_hist, input_edges = np.histogram(
input_rdf, bins=bins, range=xrange)
output_hist, outputput_edges = np.histogram(
input_rdf, bins=bins, range=xrange)
return input_hist, output_hist, input_edges, outputput_edges
def _plot_rdf(input_rdf, output_rdf, directory, bins=200):
"""plot input and output RDF
Args:
input_rdf (nd.array): input RDF
output_rdf (nd.array): output RDF
directory (str): directory to save png
bins (int, optional): number of bins. Defaults to 200.
"""
if True:
input_hist, output_hist, input_binedges, output_binedges = _make_both_rdf(
input_rdf, output_rdf, bins)
input_bin = (input_binedges[:-1] + input_binedges[1:])*0.5
output_bin = (output_binedges[:-1] + output_binedges[1:])*0.5
width = input_bin[1]-input_bin[0]
fig, axes = plt.subplots(3, 1)
ax = axes[0]
ax.bar(input_bin, input_hist, width=width,
color="blue")
ax.set_title("cif (pymatgen)")
ax = axes[1]
ax.bar(output_bin, output_hist, width=width,
color="blue")
ax.set_title("kkr output")
ax = axes[2]
ax.axhline(0, linestyle="--")
ax.bar(input_bin, output_hist-input_hist, width=width,)
ax.set_xlabel("distance")
ax.set_title("difference")
else:
fig, axes = plt.subplots(3, 1)
ax = axes[0]
ax.hist(input_rdf,
color="blue", bins=bins)
ax.set_title("cif (pymatgen)")
ax = axes[1]
ax.hist(output_rdf,
color="red", bins=bins)
ax.set_title("kkr output")
ax = axes[2]
ax.hist(input_rdf,
alpha=0.5, color="blue", bins=bins)
ax.hist(output_rdf,
alpha=0.5, color="red", bins=bins)
ax.set_xlabel("distance")
ax.set_title("both")
fig.tight_layout()
filename = os.path.join(directory, "rdf.png")
fig.savefig(filename)
print("saved to", filename)
fig.clf()
plt.close(fig)
def _rdf_diff_is_zero(input_rdf, output_rdf, output_msg=False, bins=200):
"""whether the difference of RDF is zero or not
Args:
input_rdf (np.ndarray): input RDF
output_rdf (np.ndarray): output RDF
output_msg (str, optional): returns output message or bool. Defaults to False.
bins (int, optional): number of bins. Defaults to 200.
Returns:
[type]: [description]
"""
input_hist, output_hist, input_binedges, output_binedges = _make_both_rdf(
input_rdf, output_rdf, bins)
msg = []
s = "rdf difference, bins= {}".format(bins)
print(s)
msg.append(s)
diff = output_hist-input_hist
print("_rdf_diff_is_zero", diff)
msg.append(str(diff))
if output_msg:
return msg
else:
res = np.all(diff == 0)
print("np.all(diff == 0)", res)
return res
class CompareCifKkr:
SUCCESS = "success"
FAILED = "failed"
NO_STRUCTURE = "no_structure"
NO_ELEMENT = "unknown_element_in_input_file"
FAILED_TO_GET_SPG_INFO = "failed_to_get_spginfo"
FAILED_TO_RUN_SPECX = "failed_to_run_specx"
NO_SPG_MATCH = "spg_not_match"
def __init__(self, ciffile: str, specx: str, displc: bool = False, fmt: str = "cif",
parent_directory: str = "output",
inputcard: str = "inputcard_geom", outputcard: str = "out_geom.log",
Vc: str = "Og", structurefile: str = "structure.json"):
"""Constructure
Args:
ciffile (str): a cif file
specx (str): akaikkr program
displc (bool, optional): include displc field or not. Defaults to False.
fmt (str, optional): format of the cif file. Defaults to "cif".
inputcard (str, optional): inputcard name. Defaults to "inputcard_geom".
outputcard (str, optional): outputcard name. Defaults to "out_geom.log".
Vc (str, optional): element treated as Z=0. Defaults to "Og".
structurefile (str, optional): structure json file. Defaults to "structure.json".
"""
self.ciffile = ciffile
self.specx = specx
self.displc = displc
self.fmt = fmt
self.Vc = Vc
self.structurefile = structurefile
self.inputcard = inputcard
self.outputcard = outputcard
self.all_done = False
self.parent_directory = parent_directory
hashstr = hashlib.md5(ciffile.encode()).hexdigest()
self.directory = hashstr
path = os.path.join(self.parent_directory, self.directory)
os.makedirs(path, exist_ok=True)
self.input_struc = None
self.prim_stand_struc = None
self.kkr_struc = None
self.struc_param = None
def matchedflag2msg(self, flag):
"""convert True|False to string message
Args:
flag (bool): structure is the same or not
Returns:
str: string message
"""
if flag:
return self.SUCCESS
else:
return self.FAILED
def input_struc_to(self, fmt="cif", filename="input.cif"):
filepath = os.path.join(self.parent_directory,
self.directory, filename)
print("input_struc_to filepath", filepath)
try:
self.prim_stand_struc.to(fmt=fmt, filename=filepath)
except IndexError:
print(f"failed to write {filepath}\n"
"probably because of uknown element.\nbut continue.")
def output_struc_to(self, fmt="cif", filename="output.cif"):
filepath = os.path.join(self.parent_directory,
self.directory, filename)
try:
self.kkr_struc.to(fmt=fmt, filename=filepath)
except IndexError:
print(f"failed to write {filepath}\n"
"probably because of uknown element.\nbut continue.")
def convert_and_compare(self, use_bravais=True, scale=False):
"""scale should be False
Args:
use_bravais (bool, optional): use bravais lattice. Defaults to True.
scale (bool, optional): scale flag of StructureMatcher. Defaults to True.
Raises:
ValueError: [description]
Returns:
[type]: [description]
"""
if self.fmt == "cif":
try:
parser = CifParser(self.ciffile)
except AssertionError:
self.msg = self.NO_STRUCTURE
return self.NO_STRUCTURE
try:
self.conv_struc = parser.get_structures(primitive=False)[0]
except ValueError:
self.msg = self.NO_STRUCTURE
return self.NO_STRUCTURE
self.input_struc = self.conv_struc
try:
self.prim_struc = parser.get_structures(primitive=True)[0]
except ValueError:
self.msg = self.NO_STRUCTURE
return self.NO_STRUCTURE
self.input_struc = self.prim_struc
elif self.fmt == "vasp" or self.fmt == "poscar":
try:
self.prim_struc = Structure.from_file(self.ciffile)
except ValueError:
self.msg = self.NO_STRUCTURE
return self.NO_STRUCTURE
self.conv_struc = self.prim_struc # dummy
self.input_struc = self.prim_struc
self.conv_struc_spg_info = None
try:
self.conv_struc_spg_info = self.conv_struc.get_space_group_info()
except TypeError:
self.msg = "failed to get spginfo of conventional structure"
return self.FAILED_TO_GET_SPG_INFO
if self.conv_struc_spg_info is not None:
print("get_structures(primitive=False), symmetry, nsite",
self.conv_struc_spg_info, len(self.conv_struc.sites))
self.prim_struc_spg_info = None
try:
self.prim_struc_spg_info = self.prim_struc.get_space_group_info()
except TypeError:
self.msg = "failed to get spginfo of primitive structure"
return self.FAILED_TO_GET_SPG_INFO
if self.prim_struc_spg_info is not None:
print("get_structures(primitive=True), symmetry, nsite",
self.prim_struc_spg_info, len(self.prim_struc.sites))
analyzer = SpacegroupAnalyzer(self.conv_struc)
try:
self.prim_stand_struc = analyzer.get_primitive_standard_structure()
except AttributeError as err:
self.msg = "failed to use SpacegroupAnalyzer. Probably it is an alloy."
print(self.msg)
self.prim_stand_struc = None
if self.prim_stand_struc is None:
# try StructureSpeciesConverter
structurespeciesconverter = StructureSpeciesConverter(
self.conv_struc)
substitutedstructure = structurespeciesconverter.structure
analyzer = SpacegroupAnalyzer(
substitutedstructure)
cs_structure = analyzer.get_primitive_standard_structure()
converted_structure = structurespeciesconverter.inverse_conversion(
cs_structure)
self.prim_stand_struc = converted_structure
# delete local variables
del substitutedstructure
del cs_structure
del analyzer
self.prim_stand_struc_spg_info = self.prim_stand_struc.get_space_group_info()
print("prim_stand_struc, symmetry, nsite",
self.prim_stand_struc.get_space_group_info(),
len(self.prim_stand_struc.sites))
# use prim_stand_struc
self.input_struc = self.prim_stand_struc
self.input_struc_spg_info = self.prim_stand_struc_spg_info
# @property self.input_struc = self.prim_stand_struc
if self.input_struc is not None:
self.input_struc_to()
if self.input_struc:
inputcard = self.inputcard
outputcard = self.outputcard
try:
self.struc_param = _run_it(self.specx, self.ciffile,
directory=os.path.join(
self.parent_directory, self.directory),
structurefile=self.structurefile,
displc=self.displc, use_bravais=use_bravais,
fmt=self.fmt,
inputcard=inputcard, outputcard=outputcard)
except CIF2KKRGetStructureError as err:
self.msg = str(err)
return self.NO_STRUCTURE
except CIF2KKRGetConventionalStandardStructureError as err:
self.msg = str(err)
return self.FAILED
except CIF2KKRSpgDifferentError as err:
self.msg = str(err)
return self.FAILED
except CIF2KKRCellShapeError as err:
self.msg = str(err)
return self.FAILED
except CIF2KKRUnknownElementError as err:
self.msg = str(err)
return self.NO_ELEMENT
except CIF2KKRNoStructureError as err:
self.msg = str(err)
return self.NO_STRUCTURE
except KKRFailedExecutionError as err:
self.msg = str(err)
return self.FAILED_TO_RUN_SPECX
try:
self.kkr_struc = _read_output(os.path.join(
self.parent_directory, self.directory), outputcard)
except ValueError:
self.msg = self.NO_ELEMENT
return self.NO_ELEMENT
if self.kkr_struc:
self.output_struc_to()
else:
self.msg = self.NO_STRUCTURE
return self.NO_STRUCTURE
self.kkr_struc_spg_info = self.kkr_struc.get_space_group_info()
print("kkr_struc, symmetry, nsite", self.kkr_struc_spg_info,
len(self.kkr_struc.sites))
if self.input_struc and self.kkr_struc:
matcher = StructureMatcher(scale=scale)
mathced = False
try:
self.kkr_struc_spg_info = self.kkr_struc_spg_info
except TypeError:
print(
"failed to get space group symbol of structure. but continue.")
raise TypeError
print("kkr space group symbol, nsite",
self.kkr_struc_spg_info[1], len(self.kkr_struc.sites))
input_struc = self.input_struc
kkr_struc = self.kkr_struc
print("input prim structure num_site",
input_struc.num_sites)
print("kkr prim structure num_site", kkr_struc.num_sites)
# compare by size
if len(input_struc.sites) != len(kkr_struc.sites):
self.msg = "# of sites are different. skip showing diff of min_dist_matrix"
matched = False
return self.matchedflag2msg(matched)
matched = matcher.fit(input_struc, kkr_struc)
if matched:
self.msg = "matched by structure matcher"
else:
# compare by rdf
rdfconverter = RDFConverter()
input_rdf = rdfconverter.make_nndistance(input_struc)['all']
self.input_rdf = input_rdf
output_rdf = rdfconverter.make_nndistance(kkr_struc)['all']
self.output_rdf = output_rdf
_plot_rdf(input_rdf, output_rdf, directory=os.path.join(
self.parent_directory, self.directory))
if _rdf_diff_is_zero(input_rdf, output_rdf):
self.msg = "matched by rdf"
matched = True
# optional check
# spg may not match becaseu of a bug of spglib
spgmatch = [True, True, True]
if self.input_struc_spg_info[1] != self.kkr_struc_spg_info[1]:
self.msg = "input and kkr space group is different {} != {}".format(
self.input_struc_spg_info[1], self.kkr_struc_spg_info[1])
print("WARNING", self.msg)
spgmatch[0] = False
if self.prim_struc_spg_info[1] != self.kkr_struc_spg_info[1]:
self.msg = "prim and kkr space group is different {} != {}".format(
self.prim_struc_spg_info[1], self.kkr_struc_spg_info[1])
print("WARNING", self.msg)
spgmatch[1] = False
print(self.conv_struc_spg_info, self.kkr_struc_spg_info)
if self.conv_struc_spg_info[1] != self.kkr_struc_spg_info[1]:
self.msg = "conv and kkr space group is different {} != {}".format(
self.conv_struc_spg_info[1], self.kkr_struc_spg_info[1])
print("WARNING", self.msg)
spgmatch[2] = False
spgmatch = np.array(spgmatch)
if np.any(spgmatch == False):
self.msg = self.matchedflag2msg(
matched)+", but "+self.NO_SPG_MATCH
return self.matchedflag2msg(matched)
self.msg = "structure isn't the same"
return self.matchedflag2msg(matched)
def log_msg(self, msg, filename=None):
lines = []
lines.append("ciffile = " + self.ciffile)
lines.append("directory = " + self.directory)
lines.append("msg = " + msg)
try:
lines.append("prim space group, number {}".format(
self.prim_struc_spg_info))
except AttributeError:
pass
try:
lines.append("conv space group, number {}".format(
self.conv_struc_spg_info))
except AttributeError:
pass
try:
lines.append("input space group, number {}".format(
self.input_struc_spg_info))
except AttributeError:
pass
try:
lines.append("output space group, number {}".format(
self.kkr_struc_spg_info))
except AttributeError:
pass
try:
lines.append("input structure len, {}".format(
self.input_struc.num_sites))
except AttributeError:
pass
try:
lines.append("output structure len, {}".format(
self.kkr_struc.num_sites))
except AttributeError:
lines.append("probably failed to make kkr structure")
try:
if self.input_struc.num_sites == self.kkr_struc.num_sites:
lines.append("input structure {}".format(self.input_struc))
lines.append(("output structure {}".format(self.kkr_struc)))
except AttributeError:
pass
if self.input_struc is not None and self.kkr_struc is not None:
try:
x = self.input_rdf
except:
rdfconverter = RDFConverter()
self.input_rdf = rdfconverter.make_nndistance(self.input_struc)[
'all']
try:
x = self.output_rdf
except:
rdfconverter = RDFConverter()
self.output_rdf = rdfconverter.make_nndistance(self.kkr_struc)[
'all']
msg = _rdf_diff_is_zero(
self.input_rdf, self.output_rdf, output_msg=True)
lines.extend(msg)
_plot_rdf(self.input_rdf, self.output_rdf, directory=os.path.join(
self.parent_directory, self.directory))
else:
if self.input_struc is None:
lines.append("input_struc is None")
if self.kkr_struc is None:
lines.append("kkr_struc is None")
lines.append("")
lines.append("")
if filename is not None:
with open(filename, "a") as f:
f.write("\n".join(lines))
print()
print("appended to", filename)
print()
return lines
def get_structure_param(self, remove_temperaryfiles=True):
struc_file = os.path.join(self.parent_directory,
self.directory, self.structurefile)
with open(struc_file) as f:
struc_param = json.load(f)
if remove_temperaryfiles:
path_remove = os.path.join(self.parent_directory,
self.directory)
shutil.rmtree(path_remove, ignore_errors=True)
try:
os.rmdir(self.parent_directory)
except OSError as err:
pass
return struc_param
|
{"hexsha": "2281c1db764c4ecb2874422fcdf39511609d9a3f", "size": 26601, "ext": "py", "lang": "Python", "max_stars_repo_path": "library/PyAkaiKKR/pyakaikkr/CompareCifKkr.py", "max_stars_repo_name": "AkaiKKRteam/AkaiKKRPythonUtil", "max_stars_repo_head_hexsha": "be716747de83c9f1787b6ac1c9a61ef725a643dd", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "library/PyAkaiKKR/pyakaikkr/CompareCifKkr.py", "max_issues_repo_name": "AkaiKKRteam/AkaiKKRPythonUtil", "max_issues_repo_head_hexsha": "be716747de83c9f1787b6ac1c9a61ef725a643dd", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2021-11-26T06:28:13.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-18T05:45:11.000Z", "max_forks_repo_path": "library/PyAkaiKKR/pyakaikkr/CompareCifKkr.py", "max_forks_repo_name": "AkaiKKRteam/AkaiKKRPythonUtil", "max_forks_repo_head_hexsha": "be716747de83c9f1787b6ac1c9a61ef725a643dd", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-11-26T03:06:15.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-26T04:45:31.000Z", "avg_line_length": 36.2411444142, "max_line_length": 129, "alphanum_fraction": 0.5964437427, "include": true, "reason": "import numpy", "num_tokens": 6127}
|
library(ggplot2)
m = 1 #rotation order
phi = seq(0,2*pi,length.out = 100)
r = seq(0,1,length.out = 50)
real_filter = function(r,phi,m)exp(-r^2)*cos(phi*m)
img_filter = function(r,phi,m)exp(-r^2)*sin(phi*m)
df = expand.grid(phi = phi, r = r)
df$real_filter = real_filter(df$r,df$phi,m)
df$img_filter = img_filter(df$r,df$phi,m)
df$input = 0
df$input[df$phi > 0 & df$phi < pi/6 & df$r>0.48 & df$r<0.52] = 1
df$input[df$phi > pi/12-0.01 & df$phi < pi/12+0.01 & df$r>0.3 & df$r<0.7] = 1
p_in <- ggplot(df) + geom_tile(aes(phi,r,fill=input))+ coord_polar(direction = -1,start=3*pi/2)
# plot the original image on the polar coordinates
plot(p_in)
df$input_rotate = 0
df$input_rotate[df$phi > 0+pi/2 & df$phi < pi/6+pi/2 & df$r>0.48 & df$r<0.52] = 1
df$input_rotate[df$phi > pi/12+pi/2-0.01 & df$phi < pi/12+0.01+pi/2 & df$r>0.3 & df$r<0.7] = 1
p_in_rotate <- ggplot(df) + geom_tile(aes(phi,r,fill=input_rotate))+ coord_polar(direction = -1,start=3*pi/2)
# plot the rotated image on the polar coordinates (pi/2, counter-clockwise)
plot(p_in_rotate)
# kernel weights
p_real <- ggplot(df) + geom_tile(aes(phi,r,fill=real_filter))+ coord_polar(direction = -1,start=3*pi/2)
p_img <- ggplot(df) + geom_tile(aes(phi,r,fill=img_filter))+ coord_polar(direction = -1,start=3*pi/2)
# Real and imaginary part of the convolution operation, which resulted in a complex number
out_real = sum(df$input*df$real_filter)
out_img = sum(df$input*df$img_filter)
out_real_rotate = sum(df$input_rotate*df$real_filter)
out_img_rotate = sum(df$input_rotate*df$img_filter)
plot(0,0,type='n',xlim = c(-100,100),ylim = c(-100,100))
abline(h=0,lty = 2)
abline(v=0,lty = 2)
lines(c(0,out_real),c(0,out_img),col=2)
lines(c(0,out_real_rotate),c(0,out_img_rotate),col=3)
|
{"hexsha": "9f1f6ec4122f1bb9c84ee809c29675dc0e1c6b5a", "size": 1758, "ext": "r", "lang": "R", "max_stars_repo_path": "project/proof-of-concept/harmonic_filters.r", "max_stars_repo_name": "amorehead/Equivariant-GNNs", "max_stars_repo_head_hexsha": "4e81136242a4c8905b0e5fc39be5f704a42cc5e1", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-10-07T12:53:51.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-04T19:26:08.000Z", "max_issues_repo_path": "project/proof-of-concept/harmonic_filters.r", "max_issues_repo_name": "amorehead/Equivariant-GNNs", "max_issues_repo_head_hexsha": "4e81136242a4c8905b0e5fc39be5f704a42cc5e1", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "project/proof-of-concept/harmonic_filters.r", "max_forks_repo_name": "amorehead/Equivariant-GNNs", "max_forks_repo_head_hexsha": "4e81136242a4c8905b0e5fc39be5f704a42cc5e1", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.3548387097, "max_line_length": 109, "alphanum_fraction": 0.6854379977, "num_tokens": 633}
|
\section{Conclusions}
\label{sec:conclusions}
In this work we showed the implementation of a bot for botnets with centralized C\&C layer. The developed bot is fundamentally thought for testing and educational showcase, but it is actually ready to interact with a real web controller.
The implemented architecture makes it really easy to extend the bot code for educational experimentations.
Our bot is configurable and instructable by convenient web user interfaces.
|
{"hexsha": "5981dfe99c6fa92db0374fe9888d25cb20a9678f", "size": 468, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "botnet/sec/conclusions.tex", "max_stars_repo_name": "gmarciani/research", "max_stars_repo_head_hexsha": "7cc526fe7cd9916ceaf8285c4e4bc4dce4028537", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2017-07-27T13:31:43.000Z", "max_stars_repo_stars_event_max_datetime": "2018-07-20T12:54:12.000Z", "max_issues_repo_path": "botnet/sec/conclusions.tex", "max_issues_repo_name": "gmarciani/research", "max_issues_repo_head_hexsha": "7cc526fe7cd9916ceaf8285c4e4bc4dce4028537", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "botnet/sec/conclusions.tex", "max_forks_repo_name": "gmarciani/research", "max_forks_repo_head_hexsha": "7cc526fe7cd9916ceaf8285c4e4bc4dce4028537", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-02-17T13:30:49.000Z", "max_forks_repo_forks_event_max_datetime": "2018-02-17T13:30:49.000Z", "avg_line_length": 66.8571428571, "max_line_length": 237, "alphanum_fraction": 0.8247863248, "num_tokens": 90}
|
from typing import Dict, List
import numpy as np
def _extract_batch_length(preds: Dict[str, np.ndarray]) -> int:
"""Extracts batch length of predictions."""
batch_length = None
for key, value in preds.items():
batch_length = batch_length or value.shape[0]
if value.shape[0] != batch_length:
raise ValueError(f"Batch length of predictions should be same. {key} has different batch length than others.")
return batch_length
def unbatch_preds(preds: Dict[str, np.ndarray]) -> List[Dict[str, np.ndarray]]:
"""Unbatch predictions, as in estimator.predict().
"""
return [{key: value[i] for key, value in preds.items()} for i in range(_extract_batch_length(preds))]
|
{"hexsha": "1356136d76ab0b3e15e7fabb67e26264953c836b", "size": 721, "ext": "py", "lang": "Python", "max_stars_repo_path": "expats/common/tensor.py", "max_stars_repo_name": "octanove/expats", "max_stars_repo_head_hexsha": "fd06a50b4c38505135c075b47df0b84003c51ab3", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 19, "max_stars_repo_stars_event_min_datetime": "2021-04-09T06:21:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T00:21:37.000Z", "max_issues_repo_path": "expats/common/tensor.py", "max_issues_repo_name": "octanove/expats", "max_issues_repo_head_hexsha": "fd06a50b4c38505135c075b47df0b84003c51ab3", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "expats/common/tensor.py", "max_forks_repo_name": "octanove/expats", "max_forks_repo_head_hexsha": "fd06a50b4c38505135c075b47df0b84003c51ab3", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-09-06T08:05:34.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-24T09:16:07.000Z", "avg_line_length": 34.3333333333, "max_line_length": 122, "alphanum_fraction": 0.6879334258, "include": true, "reason": "import numpy", "num_tokens": 167}
|
MODULE ED_SPARSE_MATRIX !THIS VERSION CONTAINS ONLY DBLE ELEMENT: (SYMMETRIC MATRIX)
USE SF_IOTOOLS, only: str,free_unit
USE SF_CONSTANTS, only: zero
#ifdef _MPI
USE SF_MPI
USE MPI
#endif
implicit none
private
type sparse_row_csr
integer :: size !actual
complex(8),dimension(:),allocatable :: vals
integer,dimension(:),allocatable :: cols
end type sparse_row_csr
type sparse_matrix_csr
type(sparse_row_csr),dimension(:),pointer :: row
integer :: Nrow
integer :: Ncol
logical :: status=.false.
#ifdef _MPI
integer :: istart=0 !global start index for MPI storage
integer :: iend=0
integer :: ishift=0
logical :: mpi=.false.
#endif
end type sparse_matrix_csr
!INIT SPARSE MATRICES
interface sp_init_matrix
module procedure :: sp_init_matrix_csr
#ifdef _MPI
module procedure :: mpi_sp_init_matrix_csr
#endif
end interface sp_init_matrix
!DELETE SPARSE MATRIX
interface sp_delete_matrix
module procedure :: sp_delete_matrix_csr
#ifdef _MPI
module procedure :: mpi_sp_delete_matrix_csr
#endif
end interface sp_delete_matrix
!INSERT ELEMENTS
interface sp_insert_element
module procedure :: sp_insert_element_csr
#ifdef _MPI
module procedure :: mpi_sp_insert_element_csr
#endif
end interface sp_insert_element
!LOAD STANDARD MATRIX INTO SPARSE MATRICES
interface sp_load_matrix
module procedure :: sp_load_matrix_csr
#ifdef _MPI
module procedure :: mpi_sp_load_matrix_csr
#endif
end interface sp_load_matrix
!DUMP SPARSE MATRIX INTO STANDARD MATRIX
interface sp_dump_matrix
module procedure :: sp_dump_matrix_csr
#ifdef _MPI
module procedure :: mpi_sp_dump_matrix_csr
#endif
end interface sp_dump_matrix
!SPY PRINT SPARSE MATRIX
interface sp_spy_matrix
module procedure :: sp_spy_matrix_csr
#ifdef _MPI
module procedure :: mpi_sp_spy_matrix_csr
#endif
end interface sp_spy_matrix
#ifdef _MPI
interface sp_set_mpi_matrix
module procedure :: sp_set_mpi_matrix_csr
end interface sp_set_mpi_matrix
#endif
!Linked-List Sparse Matrix
public :: sparse_matrix_csr
public :: sp_init_matrix !init the sparse matrix !checked
public :: sp_delete_matrix !delete the sparse matrix !checked
public :: sp_insert_element !insert an element !checked
public :: sp_load_matrix !create sparse from array !checked
public :: sp_dump_matrix !dump sparse into array !checked
public :: sp_spy_matrix !
#ifdef _MPI
public :: sp_set_mpi_matrix
#endif
interface add_to
module procedure :: add_to_I
module procedure :: add_to_D
module procedure :: add_to_Z
end interface add_to
integer :: MpiIerr
contains
!+------------------------------------------------------------------+
!PURPOSE: initialize the sparse matrix list
!+------------------------------------------------------------------+
subroutine sp_init_matrix_csr(sparse,N,N1)
type(sparse_matrix_csr),intent(inout) :: sparse
integer :: N
integer,optional :: N1
integer :: i
!
!put here a delete statement to avoid problems
if(sparse%status)stop "sp_init_matrix: alreay allocate can not init"
!
sparse%Nrow=N
sparse%Ncol=N
if(present(N1))sparse%Ncol=N1
!
allocate(sparse%row(N))
do i=1,N
sparse%row(i)%size=0
allocate(sparse%row(i)%vals(0)) !empty array
allocate(sparse%row(i)%cols(0)) !empty array
end do
!
sparse%status=.true.
!
end subroutine sp_init_matrix_csr
#ifdef _MPI
subroutine mpi_sp_init_matrix_csr(MpiComm,sparse,N,N1)
integer :: MpiComm
type(sparse_matrix_csr),intent(inout) :: sparse
integer :: N
integer,optional :: N1
integer :: i,Ncol,Nloc
!
if(MpiComm==Mpi_Comm_Null)return
!
call sp_test_matrix_mpi(MpiComm,sparse,"mpi_sp_init_matrix_csr")
!
Ncol = N
if(present(N1))Ncol=N1
!
Nloc = sparse%iend-sparse%istart+1
!
call sp_init_matrix_csr(sparse,Nloc,Ncol)
!
end subroutine mpi_sp_init_matrix_csr
#endif
!+------------------------------------------------------------------+
!PURPOSE: delete an entire sparse matrix
!+------------------------------------------------------------------+
subroutine sp_delete_matrix_csr(sparse)
type(sparse_matrix_csr),intent(inout) :: sparse
integer :: i
!
if(.not.sparse%status)return !stop "Warning SPARSE/sp_delete_matrix: sparse not allocated already."
!
do i=1,sparse%Nrow
deallocate(sparse%row(i)%vals)
deallocate(sparse%row(i)%cols)
sparse%row(i)%Size = 0
enddo
deallocate(sparse%row)
!
sparse%Nrow=0
sparse%Ncol=0
sparse%status=.false.
#ifdef _MPI
sparse%istart = 0
sparse%iend = 0
sparse%ishift = 0
sparse%mpi = .false.
#endif
end subroutine sp_delete_matrix_csr
#ifdef _MPI
subroutine mpi_sp_delete_matrix_csr(MpiComm,sparse)
integer :: MpiComm
type(sparse_matrix_csr),intent(inout) :: sparse
integer :: i
!
if(MpiComm==Mpi_Comm_Null)return
!
if(.not.sparse%status)return !stop "Error SPARSE/mpi_sp_delete_matrix: sparse is not allocated."
!
do i=1,sparse%Nrow
deallocate(sparse%row(i)%vals)
deallocate(sparse%row(i)%cols)
sparse%row(i)%Size = 0
enddo
deallocate(sparse%row)
!
sparse%Nrow=0
sparse%Ncol=0
sparse%status=.false.
!
sparse%istart=0
sparse%iend=0
sparse%ishift=0
sparse%mpi=.false.
!
end subroutine mpi_sp_delete_matrix_csr
#endif
!+------------------------------------------------------------------+
!PURPOSE: insert an element value at position (i,j) in the sparse matrix
!+------------------------------------------------------------------+
subroutine sp_insert_element_csr(sparse,value,i,j)
type(sparse_matrix_csr),intent(inout) :: sparse
complex(8),intent(in) :: value
integer,intent(in) :: i,j
type(sparse_row_csr),pointer :: row
integer :: column,pos
logical :: iadd
!
column = j
!
row => sparse%row(i)
!
iadd = .false. !check if column already exist
if(any(row%cols == column))then !
pos = binary_search(row%cols,column) !find the position column in %cols
iadd=.true. !set Iadd to true
endif
!
if(iadd)then !this column exists so just sum it up
row%vals(pos)=row%vals(pos) + value !add up value to the current one in %vals
else !this column is new. increase counter and store it
! row%vals = [row%vals,value]
! row%cols = [row%cols,column]
call add_to(row%vals,value)
call add_to(row%cols,column)
row%Size = row%Size + 1
endif
!
if(row%Size > sparse%Ncol)stop "sp_insert_element_csr ERROR: row%Size > sparse%Ncol"
!
end subroutine sp_insert_element_csr
#ifdef _MPI
subroutine mpi_sp_insert_element_csr(MpiComm,sparse,value,i,j)
integer :: MpiComm
type(sparse_matrix_csr),intent(inout) :: sparse
complex(8),intent(in) :: value
integer,intent(in) :: i,j
type(sparse_row_csr),pointer :: row
integer :: column,pos
logical :: iadd
!
if(MpiComm==Mpi_Comm_Null)return
!
call sp_test_matrix_mpi(MpiComm,sparse," mpi_sp_insert_element_csr")
!
column = j
!
row => sparse%row(i-sparse%Ishift)
!
iadd = .false. !check if column already exist
if(any(row%cols == column))then !
pos = binary_search(row%cols,column) !find the position column in %cols
iadd=.true. !set Iadd to true
endif
!
if(iadd)then !this column exists so just sum it up
row%vals(pos)=row%vals(pos) + value !add up value to the current one in %vals
else !this column is new. increase counter and store it
! row%vals = [row%vals,value]
! row%cols = [row%cols,column]
call add_to(row%vals,value)
call add_to(row%cols,column)
row%Size = row%Size + 1
endif
!
if(row%Size > sparse%Ncol)stop "mpi_sp_insert_element_csr ERROR: row%Size > sparse%Ncol"
!
end subroutine mpi_sp_insert_element_csr
#endif
!+------------------------------------------------------------------+
!PURPOSE: load a regular matrix (2dim array) into a sparse matrix
!+------------------------------------------------------------------+
subroutine sp_load_matrix_csr(matrix,sparse)
complex(8),dimension(:,:),intent(in) :: matrix
type(sparse_matrix_csr),intent(inout) :: sparse
integer :: i,j,Ndim1,Ndim2
!
Ndim1=size(matrix,1)
Ndim2=size(matrix,2)
!
if(sparse%status)call sp_delete_matrix_csr(sparse)
call sp_init_matrix_csr(sparse,Ndim1,Ndim2)
!
do i=1,Ndim1
do j=1,Ndim2
if(matrix(i,j)/=zero)call sp_insert_element_csr(sparse,matrix(i,j),i,j)
enddo
enddo
end subroutine sp_load_matrix_csr
#ifdef _MPI
subroutine mpi_sp_load_matrix_csr(MpiComm,matrix,sparse)
integer :: MpiComm
complex(8),dimension(:,:),intent(in) :: matrix
type(sparse_matrix_csr),intent(inout) :: sparse
integer :: i,j,Ndim1,Ndim2
!
if(MpiComm==Mpi_Comm_Null)return
!
call sp_test_matrix_mpi(MpiComm,sparse," mpi_sp_load_matrix_csr")
!
Ndim1=size(matrix,1)
Ndim2=size(matrix,2)
!
if(sparse%status)call sp_delete_matrix_csr(sparse)
call mpi_sp_init_matrix_csr(MpiComm,sparse,Ndim1,Ndim2)
!
do i=sparse%Istart,sparse%Iend
do j=1,Ndim2
if(matrix(i,j)/=zero)call mpi_sp_insert_element_csr(MpiComm,sparse,matrix(i,j),i,j)
enddo
enddo
end subroutine mpi_sp_load_matrix_csr
#endif
!+------------------------------------------------------------------+
!PURPOSE: dump a sparse matrix into a regular 2dim array
!+------------------------------------------------------------------+
subroutine sp_dump_matrix_csr(sparse,matrix)
type(sparse_matrix_csr),intent(in) :: sparse
complex(8),dimension(:,:),intent(inout) :: matrix
integer :: i,j,Ndim1,Ndim2
!
Ndim1=size(matrix,1)
Ndim2=size(matrix,2)
!
if(sparse%Nrow/=Ndim1 .OR. sparse%Ncol/=Ndim2)stop "Warning SPARSE/dump_matrix: dimensions error"
!
do i=1,Ndim1
do j=1,sparse%row(i)%Size
matrix(i,sparse%row(i)%cols(j)) = matrix(i,sparse%row(i)%cols(j)) + sparse%row(i)%vals(j)
enddo
enddo
end subroutine sp_dump_matrix_csr
#ifdef _MPI
subroutine mpi_sp_dump_matrix_csr(MpiComm,sparse,matrix)
integer :: MpiComm
type(sparse_matrix_csr),intent(in) :: sparse
complex(8),dimension(:,:),intent(inout) :: matrix
complex(8),dimension(:,:),allocatable :: matrix_tmp
integer :: i,impi,j,N1_,N2_,Ndim1,Ndim2,Nrow,Ncol
!
if(MpiComm==Mpi_Comm_Null)return
!
call sp_test_matrix_mpi(MpiComm,sparse," mpi_sp_dump_matrix_csr")
!
Ndim1=size(matrix,1)
Ndim2=size(matrix,2)
!
N1_ = sparse%Nrow
N2_ = sparse%Ncol
Nrow = 0
Ncol = 0
call MPI_AllReduce(N1_,Nrow,1,MPI_Integer,MPI_SUM,MpiComm,MpiIerr)
call MPI_AllReduce(N2_,Ncol,1,MPI_Integer,MPI_MAX,MpiComm,MpiIerr)
!
if(Nrow>Ndim1 .OR. Ncol>Ndim2)stop "Warning SPARSE/mpi_dump_matrix: dimensions error"
!
allocate(matrix_tmp(Ndim1,Ndim2));matrix_tmp=zero
do i=sparse%Istart,sparse%Iend
impi = i - sparse%Ishift
do j=1,sparse%row(impi)%Size
matrix_tmp(i,sparse%row(impi)%cols(j))=matrix_tmp(i,sparse%row(impi)%cols(j))+sparse%row(impi)%vals(j)
enddo
enddo
!
! Matrix=0d0
call AllReduce_Mpi(MpiComm,Matrix_tmp,Matrix)
!
end subroutine mpi_sp_dump_matrix_csr
#endif
!+------------------------------------------------------------------+
!PURPOSE: pretty print a sparse matrix on a given unit using format fmt
!+------------------------------------------------------------------+
subroutine sp_spy_matrix_csr(sparse,header)
type(sparse_matrix_csr) :: sparse
character ( len = * ) :: header
integer :: N1,N2
character ( len = 255 ) :: command_filename
integer :: command_unit
character ( len = 255 ) :: data_filename
integer :: data_unit
integer :: i, j
integer :: nz_num
character ( len = 255 ) :: png_filename
!
! Create data file.
!
!
N1 = sparse%Nrow
N2 = sparse%Ncol
data_filename = trim ( header ) // '_data.dat'
open (unit=free_unit(data_unit), file = data_filename, status = 'replace' )
nz_num = 0
do i=1,N1
do j=1,sparse%row(i)%size
write(data_unit,'(2x,i6,2x,i6)') sparse%row(i)%cols(j),i
nz_num = nz_num + 1
enddo
enddo
close(data_unit)
!
! Create command file.
!
command_filename = "plot_"//str(header)//'_commands.gp'
open(unit = free_unit(command_unit), file = command_filename, status = 'replace' )
write(command_unit,'(a)') '#unset key'
write(command_unit,'(a)') 'set terminal postscript eps enhanced color font "Times-Roman,16"'
write(command_unit,'(a)') 'set output "|ps2pdf -sEPSCrop - '//str(header)//".pdf"//'"'
write(command_unit,'(a)') 'set size ratio -1'
write(command_unit,'(a)') 'set xlabel "<--- J --->"'
write(command_unit,'(a)') 'set ylabel "<--- I --->"'
write(command_unit,'(a,i6,a)')'set title "',nz_num,' nonzeros for '//str(header)//'"'
write(command_unit,'(a)') 'set timestamp'
write(command_unit,'(a)' )'plot [x=1:'//str(N1)//'] [y='//str(N2)//':1] "'//&
str(data_filename)//'" w p pt 5 ps 0.4 lc rgb "red"'
close ( unit = command_unit )
return
end subroutine sp_spy_matrix_csr
#ifdef _MPI
subroutine mpi_sp_spy_matrix_csr(MpiComm,sparse,header)
integer :: MpiComm
type(sparse_matrix_csr) :: sparse
character ( len = * ) :: header
integer :: N1,N2,N1_,N2_
character ( len = 255 ) :: command_filename
integer :: command_unit
character ( len = 255 ) :: data_filename(1)
integer :: data_unit
integer :: i, j
integer :: nz_num,mpirank
character ( len = 255 ) :: png_filename
!
if(MpiComm==Mpi_Comm_Null)return
!
call sp_test_matrix_mpi(MpiComm,sparse," mpi_sp_spy_matrix_csr")
!
MpiRank = get_Rank_MPI(MpiComm)
!
! Create data file.
!
N1_=sparse%Nrow
N2_=sparse%Ncol
N1=0
N2=0
call MPI_AllReduce(N1_,N1,1,MPI_Integer,MPI_SUM,MpiComm,MpiIerr)
call MPI_AllReduce(N2_,N2,1,MPI_Integer,MPI_MAX,MpiComm,MpiIerr)
!
nz_num = 0
!
data_filename(1) = trim(header)//"_rank"//str(MpiRank,4)//'_matrix.dat'
open(unit=free_unit(data_unit),file=data_filename(1), status = 'replace' )
do i=1,sparse%Nrow
do j=1,sparse%row(i)%Size
write(data_unit,'(2x,i6,2x,i6)') sparse%row(i)%cols(j),i+sparse%Ishift
nz_num = nz_num + 1
enddo
enddo
write(data_unit,'(2x,i6,2x,i6)')
close(data_unit)
!
!
call MPI_Barrier(MpiComm,MpiIerr)
!
! Create command file.
!
command_filename = "plot_"//trim(header)//"_rank"//str(MpiRank,4)//'_commands.gp'
open(unit = free_unit(command_unit), file = command_filename, status = 'replace' )
write(command_unit,'(a)') '#unset key'
write(command_unit,'(a)') 'set terminal postscript eps enhanced color font "Times-Roman,16"'
write(command_unit,'(a)') 'set output "|ps2pdf -sEPSCrop - '//str(header)//"_rank"//str(MpiRank,4)//".pdf"//'"'
write(command_unit,'(a)') 'set size ratio -1'
write(command_unit,'(a)') 'set xlabel "<--- J --->"'
write(command_unit,'(a)') 'set ylabel "<--- I --->"'
write(command_unit,'(a,i6,a)' ) &
'set title "',nz_num,' nonzeros for '//str(header)//"_rank"//str(MpiRank,4)//'"'
write(command_unit,'(a)') 'set timestamp'
write(command_unit,'(a)' )'plot [x=1:'//str(N1)//'] [y='//str(N2)//':1] "'//&
str(data_filename(1))//'" w p pt 5 ps 0.4 lc rgb "red"'
close ( unit = command_unit )
return
end subroutine mpi_sp_spy_matrix_csr
#endif
#ifdef _MPI
subroutine sp_set_mpi_matrix_csr(MpiComm,sparse,istart,iend,ishift)
integer :: MpiComm
type(sparse_matrix_csr),intent(inout) :: sparse
integer :: istart,iend,ishift
!
if(MpiComm==Mpi_Comm_Null)return
!
sparse%istart = istart
sparse%iend = iend
sparse%ishift = ishift
sparse%mpi = .true.
end subroutine sp_set_mpi_matrix_csr
subroutine sp_test_matrix_mpi(MpiComm,sparse,text)
integer :: MpiComm
type(sparse_matrix_csr),intent(in) :: sparse
character(len=*) :: text
integer :: MpiRank
!
if(MpiComm==Mpi_Comm_Null)stop "sp_test_matrix_mpi ERROR: called in with MpiComm = Mpi_Comm_Null"
!
MpiRank = get_Rank_MPI(MpiComm)
if(.not.sparse%mpi)then
print*,"Rank, Error in "//trim(text)//": mpi no set"
stop
endif
end subroutine sp_test_matrix_mpi
#endif
!##################################################################
!##################################################################
! AUXILIARY COMPUTATIONAL ROUTINES
!##################################################################
!##################################################################
recursive function binary_search(Ain,value) result(bsresult)
integer,intent(in) :: Ain(:), value
integer :: bsresult, mid
integer,dimension(size(Ain)) :: A,Order
!
a = ain
call sort_array(a,Order)
!
mid = size(a)/2 + 1
if (size(a) == 0) then
bsresult = 0 ! not found
!stop "binary_search error: value not found"
else if (a(mid) > value) then
bsresult= binary_search(a(:mid-1), value)
else if (a(mid) < value) then
bsresult = binary_search(a(mid+1:), value)
if (bsresult /= 0) then
bsresult = mid + bsresult
end if
else
bsresult = mid ! SUCCESS!!
end if
!
bsresult = Order(bsresult)
!
end function binary_search
subroutine add_to_I(vec,val)
integer,dimension(:),allocatable,intent(inout) :: vec
integer,intent(in) :: val
integer,dimension(:),allocatable :: tmp
integer :: n
!
if (allocated(vec)) then
n = size(vec)
allocate(tmp(n+1))
tmp(:n) = vec
call move_alloc(tmp,vec)
n = n + 1
else
n = 1
allocate(vec(n))
end if
!
!Put val as last entry:
vec(n) = val
!
if(allocated(tmp))deallocate(tmp)
end subroutine add_to_I
subroutine add_to_D(vec,val)
real(8),dimension(:),allocatable,intent(inout) :: vec
real(8),intent(in) :: val
real(8),dimension(:),allocatable :: tmp
integer :: n
!
if (allocated(vec)) then
n = size(vec)
allocate(tmp(n+1))
tmp(:n) = vec
call move_alloc(tmp,vec)
n = n + 1
else
n = 1
allocate(vec(n))
end if
!
!Put val as last entry:
vec(n) = val
!
if(allocated(tmp))deallocate(tmp)
end subroutine add_to_D
subroutine add_to_Z(vec,val)
complex(8),dimension(:),allocatable,intent(inout) :: vec
complex(8),intent(in) :: val
complex(8),dimension(:),allocatable :: tmp
integer :: n
!
if (allocated(vec)) then
n = size(vec)
allocate(tmp(n+1))
tmp(:n) = vec
call move_alloc(tmp,vec)
n = n + 1
else
n = 1
allocate(vec(n))
end if
!
!Put val as last entry:
vec(n) = val
!
if(allocated(tmp))deallocate(tmp)
end subroutine add_to_Z
!+------------------------------------------------------------------+
!PURPOSE : Sort an array, gives the new ordering of the label.
!+------------------------------------------------------------------+
subroutine sort_array(array,order)
implicit none
integer,dimension(:) :: array
integer,dimension(size(array)) :: order
integer,dimension(size(array)) :: backup
integer :: i
forall(i=1:size(array))order(i)=i
call qsort_sort(array, order,1, size(array))
do i=1,size(array)
backup(i)=array(order(i))
enddo
array=backup
contains
recursive subroutine qsort_sort( array, order, left, right )
integer, dimension(:) :: array
integer, dimension(:) :: order
integer :: left
integer :: right
integer :: i
integer :: last
if ( left .ge. right ) return
call qsort_swap( order, left, qsort_rand(left,right) )
last = left
do i = left+1, right
if ( compare(array(order(i)), array(order(left)) ) .lt. 0 ) then
last = last + 1
call qsort_swap( order, last, i )
endif
enddo
call qsort_swap( order, left, last )
call qsort_sort( array, order, left, last-1 )
call qsort_sort( array, order, last+1, right )
end subroutine qsort_sort
!---------------------------------------------!
subroutine qsort_swap( order, first, second )
integer, dimension(:) :: order
integer :: first, second
integer :: tmp
tmp = order(first)
order(first) = order(second)
order(second) = tmp
end subroutine qsort_swap
!---------------------------------------------!
integer function qsort_rand( lower, upper )
integer :: lower, upper
real(8) :: r
call random_number(r)
qsort_rand = lower + nint(r * (upper-lower))
end function qsort_rand
!---------------------------------------------!
function compare(f,g)
implicit none
integer :: f,g
integer :: compare
if(f<g) then
compare=-1
else
compare=1
endif
end function compare
end subroutine sort_array
end module ED_SPARSE_MATRIX
|
{"hexsha": "9f3880f7a5ff2f066fb6bf6da9e5a8c5b6ea86e1", "size": 23989, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "ED_SPARSE_MATRIX.f90", "max_stars_repo_name": "QcmPlab/CDMFT-LANC-ED", "max_stars_repo_head_hexsha": "e76127efc4eb5552f474828be6b6dceca5abef09", "max_stars_repo_licenses": ["FSFAP"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-07-10T08:01:18.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-10T08:01:18.000Z", "max_issues_repo_path": "ED_SPARSE_MATRIX.f90", "max_issues_repo_name": "QcmPlab/CDMFT-LANC-ED", "max_issues_repo_head_hexsha": "e76127efc4eb5552f474828be6b6dceca5abef09", "max_issues_repo_licenses": ["FSFAP"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ED_SPARSE_MATRIX.f90", "max_forks_repo_name": "QcmPlab/CDMFT-LANC-ED", "max_forks_repo_head_hexsha": "e76127efc4eb5552f474828be6b6dceca5abef09", "max_forks_repo_licenses": ["FSFAP"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.2891414141, "max_line_length": 115, "alphanum_fraction": 0.543999333, "num_tokens": 6216}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 22 21:50:45 2018
@author: amajidsinar
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 20 23:30:12 2018
@author: amajidsinar
"""
import numpy as np
class Knn():
def __init__(self, k, dist='euc'):
avDist = ['euc', 'manhattan']
if dist not in avDist:
pass
self.k = k
self.dist = dist
def fit(self,data_known,label_known):
self.data_known = data_known
self.label_known = label_known
def L1_distance(self):
diff = self.data_known - self.data_unknown.reshape((self.data_unknown.shape[0],1,self.data_unknown.shape[1]))
return (diff**2).sum(2)
def manhattan(self):
diff = self.data_known - self.data_unknown.reshape((self.data_unknown.shape[0],1,self.data_unknown.shape[1]))
return np.abs(diff).sum(2)
def predict(self, data_unknown):
self.data_unknown = data_unknown
#sort label
if self.dist == 'euc':
dist_index = np.argsort(self.L2_distance())
else:
dist_index = np.argsort(self.manhattan())
label = self.label_known[dist_index]
#only pick until kth index
label = label[:,:self.k]
#return the mode
label_predict = []
for i in range(self.data_unknown.shape[0]):
values,counts = np.unique(label[i], return_counts=True)
ind = np.argmax(counts)
label_predict.append(values[ind])
return label_predict
import random
def split(data_known,label_known,training_percentage):
#data_set and label is static
data_set = data_known
label = label_known
#take percentage*len(data)
index = random.sample(range(len(data_known)),int(training_percentage*len(data_known)))
data_known = data_set[index]
label_known = label[index]
data_unknown = np.delete(data_set, index, axis=0)
label_unknown = np.delete(label, index, axis=0)
return (data_known,label_known,data_unknown,label_unknown)
#load iris
from sklearn import datasets
digits = datasets.load_digits()
data_known = digits.data
label_known = digits.target
data_known,label_known,data_unknown,label_unknown = split(data_known,label_known,0.8)
val_accuracy = []
for i in np.arange(1,40):
knn = Knn(i)
knn.fit(data_known,label_known)
label_predict = knn.predict(data_unknown)
performance = np.mean(label_predict == label_unknown)
val_accuracy.append(performance)
import matplotlib.pyplot as plt
plt.title('accuracy vs k plot of digit recognition')
plt.xlabel('k')
plt.ylabel('accuracy')
plt.plot(val_accuracy)
|
{"hexsha": "f69a8f652b848fe8393199a957c31e0204a448ac", "size": 2721, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/KnnClassifier-OOP-Tensor.py", "max_stars_repo_name": "python-itb/knn-from-scratch", "max_stars_repo_head_hexsha": "dbc6fb53cffb245a76d35b9ff85ac8cb21877ca8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/KnnClassifier-OOP-Tensor.py", "max_issues_repo_name": "python-itb/knn-from-scratch", "max_issues_repo_head_hexsha": "dbc6fb53cffb245a76d35b9ff85ac8cb21877ca8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2018-03-20T06:47:32.000Z", "max_issues_repo_issues_event_max_datetime": "2018-10-25T10:54:08.000Z", "max_forks_repo_path": "src/KnnClassifier-OOP-Tensor.py", "max_forks_repo_name": "python-itb/knn-from-scratch", "max_forks_repo_head_hexsha": "dbc6fb53cffb245a76d35b9ff85ac8cb21877ca8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2018-03-20T06:43:11.000Z", "max_forks_repo_forks_event_max_datetime": "2019-04-15T16:34:28.000Z", "avg_line_length": 26.9405940594, "max_line_length": 117, "alphanum_fraction": 0.6541712606, "include": true, "reason": "import numpy", "num_tokens": 650}
|
using CartesianGP
using Base.Test
# ZERO
@test ZERO.func() == 0
# ONE
@test ONE.func() == typemax(BitString)
# AND
@test AND.func([ZERO.func(), ZERO.func()]...) == ZERO.func()
@test AND.func([ONE.func(), ZERO.func()]...) == ZERO.func()
@test AND.func([ZERO.func(), ONE.func()]...) == ZERO.func()
@test AND.func([ONE.func(), ONE.func()]...) == ONE.func()
# OR
@test OR.func([ZERO.func(), ZERO.func()]...) == ZERO.func()
@test OR.func([ONE.func(), ZERO.func()]...) == ONE.func()
@test OR.func([ZERO.func(), ONE.func()]...) == ONE.func()
@test OR.func([ONE.func(), ONE.func()]...) == ONE.func()
# XOR
@test XOR.func([ZERO.func(), ZERO.func()]...) == ZERO.func()
@test XOR.func([ONE.func(), ZERO.func()]...) == ONE.func()
@test XOR.func([ZERO.func(), ONE.func()]...) == ONE.func()
@test XOR.func([ONE.func(), ONE.func()]...) == ZERO.func()
# NOT
@test NOT.func([ZERO.func()]...) == ONE.func()
@test NOT.func([ONE.func()]...) == ZERO.func()
|
{"hexsha": "fa8f017d1244ab1ae71acc4fe6a9d8e01a897c37", "size": 948, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/Func.jl", "max_stars_repo_name": "glesica/CartesianGP.jl", "max_stars_repo_head_hexsha": "e74bc164c9ae59a9318ebe6ec9aa99306d23131e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2017-10-04T09:25:21.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-17T01:19:14.000Z", "max_issues_repo_path": "test/Func.jl", "max_issues_repo_name": "glesica/CartesianGP.jl", "max_issues_repo_head_hexsha": "e74bc164c9ae59a9318ebe6ec9aa99306d23131e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2015-01-30T03:08:57.000Z", "max_issues_repo_issues_event_max_datetime": "2015-05-03T17:53:40.000Z", "max_forks_repo_path": "test/Func.jl", "max_forks_repo_name": "glesica/CartesianGP.jl", "max_forks_repo_head_hexsha": "e74bc164c9ae59a9318ebe6ec9aa99306d23131e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2016-11-29T07:08:55.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-01T18:09:41.000Z", "avg_line_length": 25.6216216216, "max_line_length": 60, "alphanum_fraction": 0.5664556962, "num_tokens": 248}
|
[STATEMENT]
theorem ll_001:
"t \<in> {20 .. 20} \<longrightarrow>
(s, x1, x2, x3, x4, x5, x6, x7) \<in>
{(0, 1.19, 1.04, 1.49, 2.39, 0.99, 0.09, 0.44) ..
(0, 1.21, 1.06, 1.51, 2.41, 1.01, 0.11, 0.46)} \<longrightarrow>
t \<in> ll.existence_ivl0 (s, x1, x2, x3, x4, x5, x6, x7) \<and>
ll.flow0 (s, x1, x2, x3, x4, x5, x6, x7) t \<in>
{(19.99, 0.8, 0.3, 0.5, 2.5, 0.1, 0.0, 0.25) ..
(20, 1.0, 0.5, 0.7, 3.0, 0.3, 0.1, 0.35)}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. t \<in> point_ivl 20 \<longrightarrow> (s, x1, x2, x3, x4, x5, x6, x7) \<in> {(0, 119 / 10\<^sup>2, 104 / 10\<^sup>2, 149 / 10\<^sup>2, 239 / 10\<^sup>2, 99 / 10\<^sup>2, 9 / 10\<^sup>2, 44 / 10\<^sup>2)..(0, 121 / 10\<^sup>2, 106 / 10\<^sup>2, 151 / 10\<^sup>2, 241 / 10\<^sup>2, 101 / 10\<^sup>2, 11 / 10\<^sup>2, 46 / 10\<^sup>2)} \<longrightarrow> t \<in> ll.existence_ivl0 (s, x1, x2, x3, x4, x5, x6, x7) \<and> ll.flow0 (s, x1, x2, x3, x4, x5, x6, x7) t \<in> {(1999 / 10\<^sup>2, 8 / 10, 3 / 10, 5 / 10, 25 / 10, 1 / 10, 0 / 10, 25 / 10\<^sup>2)..(20, 10 / 10, 5 / 10, 7 / 10, 30 / 10, 3 / 10, 1 / 10, 35 / 10\<^sup>2)}
[PROOF STEP]
by (tactic \<open>ode_bnds_tac @{thms laub_fas_def} 30 60 30 13 [(0, 4, "0x7f7f7f")] (* "out_ll_001.out" *) "" @{context} 1\<close>)
|
{"llama_tokens": 861, "file": "Ordinary_Differential_Equations_Ex_ARCH_COMP_Examples_ARCH_COMP", "length": 1}
|
Subroutine cgssor3(rhs,sol,n,m,nx)
!
! Solves via PCG with SSOR preconditioner.
! Uses sparse implementation on space grid.
!
implicit none
real*8 rhs(n,m)
real*8 sol(n,m)
real,dimension (0:nx+1,0:nx+1,1:m):: u,p,q,r,rhat
real,dimension (0:nx+1) :: x,y
real,dimension (1:m+1) :: oldrho, rho,alpha
real :: error,dx2,w,time
integer ::i,j,k,kk,n,nx,m,mcg
w = 1.7
u = 0.0
kk = m
r = 0.0
rhat = 0.0
q = 0.0
p = 0.0
do k = 1,kk
do j = 1,nx
do i = 1,nx
r(i,j,k) = rhs(i+(j-1)*(nx),k)
enddo
enddo
enddo
error = 1.
mcg = 0
rho = 0.0
do while ((error>.0001).and.(mcg<200))
mcg = mcg+1
oldrho = rho
! Execute SSOR preconditioner
do k=1,kk
do j= 1,nx
do i = 1,nx
rhat(i,j,k) = w*(r(i,j,k)+rhat(i-1,j,k)+rhat(i,j-1,k))*.25
enddo
enddo
enddo
rhat(1:nx,1:nx,1:kk) = ((2.-w)/w)*4.*rhat(1:nx,1:nx,1:kk)
do k = 1,kk
do j= nx,1,-1
do i = nx,1,-1
rhat(i,j,k) = w*(rhat(i,j,k)+rhat(i+1,j,k)+rhat(i,j+1,k))*.25
enddo
enddo
enddo
! Find conjugate direction
do k = 1,kk
rho(k) = sum(r(1:nx,1:nx,k)*rhat(1:nx,1:nx,k))
If (rho(k).ne.0.) then
if (mcg.eq.1) then
p(1:nx,1:nx,k) = rhat(1:nx,1:nx,k)
else
p(1:nx,1:nx,k) = rhat(1:nx,1:nx,k) +
1 (rho(k)/oldrho(k))*p(1:nx,1:nx,k)
endif
! Execute matrix product q = Ap
q(1:nx,1:nx,k)=4.0*p(1:nx,1:nx,k)-p(0:nx-1,1:nx,k)-
1 p(2:nx+1,1:nx,k)
1 - p(1:nx,0:nx-1,k) - p(1:nx,2:nx+1,k)
! Find steepest descent
alpha(k) = rho(k)/sum(p(1:nx,1:nx,k)*q(1:nx,1:nx,k))
u(1:nx,1:nx,k) = u(1:nx,1:nx,k) + alpha(k)*p(1:nx,1:nx,k)
r(1:nx,1:nx,k) = r(1:nx,1:nx,k) - alpha(k)*q(1:nx,1:nx,k)
endif
enddo
error = maxval(abs(r(1:nx,1:nx,1:kk)))
enddo
do k = 1,kk
do j = 1,nx
do i = 1,nx
sol(i+(j-1)*(nx),k) = u(i,j,k)
enddo
enddo
enddo
! print*, mcg ,error
end subroutine
|
{"hexsha": "10d00725c276b6a933b495fe39b2284735bb2c2e", "size": 2134, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "src/cgssor3.f", "max_stars_repo_name": "imohame/LabCode", "max_stars_repo_head_hexsha": "b7fca6e58f6c26917ff4a8862ab473da282d027d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/cgssor3.f", "max_issues_repo_name": "imohame/LabCode", "max_issues_repo_head_hexsha": "b7fca6e58f6c26917ff4a8862ab473da282d027d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/cgssor3.f", "max_forks_repo_name": "imohame/LabCode", "max_forks_repo_head_hexsha": "b7fca6e58f6c26917ff4a8862ab473da282d027d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-04-19T08:21:42.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-18T02:43:24.000Z", "avg_line_length": 26.0243902439, "max_line_length": 65, "alphanum_fraction": 0.4686035614, "num_tokens": 967}
|
from pdb import set_trace as T
import torch
from torch import nn, optim
from torch.nn import functional as F
from torch.nn.parameter import Parameter
from torch.autograd import Variable
from torch.distributions import Categorical
import numpy as np
import time
#Same padded (odd k)
def Conv2d(fIn, fOut, k, stride=1):
pad = int((k-1)/2)
return torch.nn.Conv2d(fIn, fOut, k, stride=stride, padding=pad)
class StimNet(nn.Module):
def __init__(self, xdim, h, ydim):
super().__init__()
self.conv1 = Conv2d(8, int(h/2), 3, stride=2)
self.conv2 = Conv2d(int(h/2), h, 3, stride=2)
self.fc1 = torch.nn.Linear(5+4*4*h, h)
self.fc2 = torch.nn.Linear(h, ydim)
def forward(self, conv, flat):
if len(conv.shape) == 3:
conv = conv.view(1, *conv.shape)
flat = flat.view(1, *flat.shape)
x, batch = conv, conv.shape[0]
x = torch.nn.functional.relu(self.conv1(x))
x = torch.nn.functional.relu(self.conv2(x))
x = x.view(batch, -1)
x = torch.cat((x, flat), dim=1)
x = torch.nn.functional.relu(self.fc1(x))
x = self.fc2(x)
pi = x.view(batch, -1)
return pi
def classify(logits):
#logits = logits + 0.15*torch.norm(logits)
distribution = Categorical(F.softmax(logits, dim=1))
atn = distribution.sample()
return atn
class ANN(nn.Module):
def __init__(self, xdim, h, ydim):
super().__init__()
self.stimNet = StimNet(xdim, 24, ydim)
self.valNet = StimNet(xdim, 24, 1)
#self.curNet = CurNet(xdim, 24, ydim)
self.conv, self.flat, self.ent, self.stim, self.idx = [], [], [], [], []
def recv(self, conv, flat, ent, stim, idx):
self.conv.append(conv)
self.flat.append(flat)
self.ent.append(ent)
self.stim.append(stim)
self.idx.append(idx)
def send(self):
conv = torch.stack(self.conv, dim=0)
flat = torch.stack(self.flat, dim=0)
pi, val, atn = [], [], []
#for c, f in zip(conv, flat):
# p, v, a = self.forward(c, f)
# pi.append(p)
# val.append(v)
# atn.append(a)
pi, val, atn = self.forward(conv, flat)
pi = [e.view(1, -1) for e in pi]
val = [e.view(1, -1) for e in val]
atn = [e.view(1) for e in atn]
ret = list(zip(pi, val, self.ent, self.stim, atn, self.idx))
self.conv, self.flat, self.ent, self.stim, self.idx = [], [], [], [], []
return ret
def forward(self, conv, flat):
pi = self.stimNet(conv, flat)
val = self.valNet(conv, flat)
atn = classify(pi)
#ri, li = self.curNet(ents, entID, atn, conv, flat)
return pi, val, atn
if __name__ == '__main__':
ann = ANN(1850, 32, 6)#.cuda()
batch = 100
conv = torch.rand(batch, 8, 15, 15)#.cuda()
flat = torch.rand(batch, 5)#.cuda()
while True:
start = time.time()
_ = ann(conv, flat)
print(1.0 / (time.time() - start))
|
{"hexsha": "7d38f72895165b26af0bc83eb3f4a8ba511c2856", "size": 2932, "ext": "py", "lang": "Python", "max_stars_repo_path": "jsuarez/tools/GPUTest.py", "max_stars_repo_name": "jarbus/neural-mmo", "max_stars_repo_head_hexsha": "7ad02fab50f2781c0a71f7d2afd10c1503110736", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1450, "max_stars_repo_stars_event_min_datetime": "2019-03-04T15:47:38.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T03:33:35.000Z", "max_issues_repo_path": "jsuarez/tools/GPUTest.py", "max_issues_repo_name": "jarbus/neural-mmo", "max_issues_repo_head_hexsha": "7ad02fab50f2781c0a71f7d2afd10c1503110736", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 34, "max_issues_repo_issues_event_min_datetime": "2019-03-05T09:50:38.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-31T15:20:27.000Z", "max_forks_repo_path": "jsuarez/tools/GPUTest.py", "max_forks_repo_name": "LaudateCorpus1/neural-mmo", "max_forks_repo_head_hexsha": "a9a7c34a1fb24fbf252e2958bdb869c213e580a3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 164, "max_forks_repo_forks_event_min_datetime": "2019-03-04T16:09:19.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-26T15:43:40.000Z", "avg_line_length": 28.4660194175, "max_line_length": 78, "alphanum_fraction": 0.5832196453, "include": true, "reason": "import numpy", "num_tokens": 912}
|
## -------->> [[file:../nstandr.src.org::*cockburn_combabbrev][cockburn_combabbrev:1]]
##' Collapses single character sequences
##'
##' @param x Object (table or vector)
##' @param wrap_in_spaces Whether to wrap strings in spaces before processing because the algorithm assumes assumes that each string begins and ends with space. Default is TRUE.
##' @inheritDotParams standardize_options
##' @return standardized names table
##'
##' @md
##' @export
cockburn_combabbrev <- function(x
, wrap_in_spaces = TRUE
, ...) {
x_vector <- get_target(x)
## wrap in spaces
if (wrap_in_spaces) {
x_vector <- paste0(" ", x_vector, " ")
}
## collapse
sapply(x_vector, \(org_name) {
reg <- gregexpr("(?=\\s\\w(\\s+)\\w\\s)", org_name, perl = TRUE)
## check if there are matches
if(reg[[1]][1] != -1) {
char <- strsplit(org_name, "", fixed = TRUE) |> unlist()
pos <- mapply(function(from, length.out) seq(from, length.out = length.out)
, from = attr(reg[[1]],"capture.start")
, length.out = attr(reg[[1]],"capture.length")
, SIMPLIFY = FALSE) |> unlist()
char[pos] <- ""
char |> paste(collapse = "")
} else {
org_name
}
}, USE.NAMES = FALSE) |>
inset_target(x)
}
## --------<< cockburn_combabbrev:1 ends here
## -------->> [[file:../nstandr.src.org::*Derwent][Derwent:1]]
##' @eval attr(cockburn_replace_derwent, "@title")
##' @description It is a version from Cockburn, I. M., A. Agrawal,
##' J. Bessen, J. H. S. Graham, B. H. Hall, and M. MacGarvie
##' (2009), The NBER Patent Citations Datafile Update. It differs
##' from original dervert standartization
##' @inherit replace_patterns params return
##' @inheritDotParams standardize_options
##' @return standardized names table
##' @family magerman
##' @seealso replace_patterns
##'
##' @md
##' @export
cockburn_replace_derwent <- make_alias(replace_patterns
, patterns = cockburn_patterns_derwent
, patterns_mode = "first")
attr(cockburn_replace_derwent, "@title") <-
"Performs Derwent standardization of organizational names"
## --------<< Derwent:1 ends here
## -------->> [[file:../nstandr.src.org::*Compustat][Compustat:1]]
##' @eval attr(cockburn_replace_compustat, "@title")
##' @inherit replace_patterns params return
##' @inheritDotParams standardize_options
##' @return standardized names table
##' @family magerman
##' @seealso replace_patterns
##'
##' @md
##' @export
cockburn_replace_compustat <- make_alias(replace_patterns
, patterns = cockburn_patterns_compustat)
attr(cockburn_replace_compustat, "@title") <-
"COMPUSTAT specific standardization for organizational names"
##' @eval attr(cockburn_replace_compustat_names, "@title")
##' @inherit replace_patterns params return
##' @inheritDotParams standardize_options
##' @return standardized names table
##' @family magerman
##' @seealso replace_patterns
##'
##' @md
##' @export
cockburn_replace_compustat_names <- make_alias(replace_patterns
, patterns = cockburn_patterns_compustat_names
, patterns_type = "trim_exact")
attr(cockburn_replace_compustat_names, "@title") <-
"COMPUSTAT specific standardization for organizational names. Full name replacements."
## --------<< Compustat:1 ends here
## -------->> [[file:../nstandr.src.org::*Identify Entity Type][Identify Entity Type:1]]
##' Identifies Entity Type
##'
##' @param x vector or table
##' @param verbose For debuging. If set will message which procedures were done.
##' @inheritDotParams standardize_options
##' @return standardized names table
##'
##' @md
##' @export
cockburn_detect_type <- function(x
, verbose = FALSE
, ...) {
do_verbosely <- \(x, fun) {
fun_name <- deparse(substitute(fun))
if(verbose) message("- ", fun_name)
x <- do.call(fun, list(x))
return(x)
}
x |>
do_verbosely(cockburn_detect_corp) |>
do_verbosely(cockburn_detect_indiv) |>
do_verbosely(cockburn_detect_govt) |>
do_verbosely(cockburn_detect_univ) |>
do_verbosely(cockburn_detect_inst) |>
do_verbosely(cockburn_detect_inst_conds) |>
do_verbosely(cockburn_detect_inst_german) |>
do_verbosely(cockburn_detect_hosp)
}
##' Cleanup Entity Type
##'
##' @param x vector or table
##' @inheritDotParams replace_patterns
##' @return standardized names table
##'
##' @md
##' @export
cockburn_replace_type <- function(x, ...) {
x |>
cockburn_replace_govt() |>
cockburn_replace_univ()
}
## --------<< Identify Entity Type:1 ends here
## -------->> [[file:../nstandr.src.org::*Firms (Corporates)][Firms (Corporates):1]]
##' @eval attr(cockburn_detect_corp, "@title")
##' @description From non_corporates.do file. Source - https://sites.google.com/site/patentdataproject/Home/posts/namestandardizationroutinesuploaded
##' @inherit detect_patterns params return
##' @inheritDotParams standardize_options
##' @return standardized names table
##' @family magerman
##' @seealso detect_patterns
##'
##' @md
##' @export
cockburn_detect_corp <- make_alias(detect_patterns
, patterns = cockburn_patterns_corp
, output_codes_col_name = "{col_name_}entity_type"
, merge_existing_codes = "append_to_existing"
, patterns_codes = "firm"
, return_only_first_detected_code = TRUE)
attr(cockburn_detect_corp, "@title") <-
"Detect Corporates (code - 'firm')"
## --------<< Firms (Corporates):1 ends here
## -------->> [[file:../nstandr.src.org::*Individuals][Individuals:1]]
##' @eval attr(cockburn_detect_indiv, "@title")
##' @description From non_corporates.do file. Source -
##' https://sites.google.com/site/patentdataproject/Home/posts/namestandardizationroutinesuploaded
##' @inherit detect_patterns params return
##' @inheritDotParams standardize_options
##' @return standardized names table
##' @family magerman
##' @seealso detect_patterns
##'
##' @md
##' @export
cockburn_detect_indiv <- make_alias(detect_patterns
, patterns = cockburn_patterns_indiv
, patterns_codes = "indiv"
, output_codes_col_name = "{col_name_}entity_type"
, merge_existing_codes = "append_to_existing"
, return_only_first_detected_code = TRUE)
attr(cockburn_detect_indiv, "@title") <-
"Detect Individuals (Non-Corporates group)"
## --------<< Individuals:1 ends here
## -------->> [[file:../nstandr.src.org::*Government][Government:1]]
##' @eval attr(cockburn_detect_govt, "@title")
##' @description From non_corporates.do file. Source - https://sites.google.com/site/patentdataproject/Home/posts/namestandardizationroutinesuploaded
##' @inherit detect_patterns params return
##' @inheritDotParams standardize_options
##' @return standardized names table
##' @family magerman
##' @seealso detect_patterns
##'
##' @md
##' @export
cockburn_detect_govt <- make_alias(detect_patterns
, patterns = cockburn_patterns_govt
, patterns_codes = "govt"
, output_codes_col_name = "{col_name_}entity_type"
, merge_existing_codes = "append_to_existing"
, return_only_first_detected_code = TRUE)
attr(cockburn_detect_govt, "@title") <-
"Detect Goverment Organizations (Non-Corporates group)"
##' @eval attr(cockburn_replace_govt, "@title")
##' @description From non_corporates.do file. Source - https://sites.google.com/site/patentdataproject/Home/posts/namestandardizationroutinesuploaded
##' @inherit replace_patterns params return
##' @inheritDotParams standardize_options
##' @return standardized names table
##' @family magerman
##' @seealso replace_patterns
##'
##' @md
##' @export
cockburn_replace_govt <- make_alias(replace_patterns
, patterns = cockburn_patterns_govt_cleanup)
attr(cockburn_replace_govt, "@title") <-
"Cleanup Goverment Organizations (Non-Corporates group)"
## --------<< Government:1 ends here
## -------->> [[file:../nstandr.src.org::*Universities][Universities:1]]
##' @eval attr(cockburn_detect_univ, "@title")
##' @description From non_corporates.do file. Source - https://sites.google.com/site/patentdataproject/Home/posts/namestandardizationroutinesuploaded
##' @inherit detect_patterns params return
##' @inheritDotParams standardize_options
##' @return standardized names table
##' @family magerman
##' @seealso detect_patterns
##'
##' @md
##' @export
cockburn_detect_univ <- make_alias(detect_patterns
, patterns = cockburn_patterns_univ
, patterns_codes = "univ"
, output_codes_col_name = "{col_name_}entity_type"
, merge_existing_codes = "append_to_existing"
, return_only_first_detected_code = TRUE)
attr(cockburn_detect_univ, "@title") <-
"Detect Universities (Non-Corporates group)"
##' @eval attr(cockburn_replace_univ, "@title")
##' @description From non_corporates.do file. Source - https://sites.google.com/site/patentdataproject/Home/posts/namestandardizationroutinesuploaded
##' @inherit replace_patterns params return
##' @inheritDotParams standardize_options
##' @return standardized names table
##' @family magerman
##' @seealso replace_patterns
##'
##' @md
##' @export
cockburn_replace_univ <- make_alias(replace_patterns
, patterns = cockburn_patterns_univ_cleanup)
attr(cockburn_replace_univ, "@title") <-
"Cleanup Universities (Non-Corporates group)"
## --------<< Universities:1 ends here
## -------->> [[file:../nstandr.src.org::*Non-profit institutes][Non-profit institutes:1]]
##' @eval attr(cockburn_detect_inst, "@title")
##' @description From non_corporates.do file. Source - https://sites.google.com/site/patentdataproject/Home/posts/namestandardizationroutinesuploaded
##' @inherit detect_patterns params return
##' @inheritDotParams standardize_options
##' @return standardized names table
##' @family magerman
##' @seealso detect_patterns
##'
##' @md
##' @export
cockburn_detect_inst <- make_alias(detect_patterns
, patterns = cockburn_patterns_inst
, patterns_codes = "inst"
, output_codes_col_name = "{col_name_}entity_type"
, merge_existing_codes = "append_to_existing"
, return_only_first_detected_code = TRUE)
attr(cockburn_detect_inst, "@title") <-
"Detect Non-profit Institutes (Non-Corporates group)"
## --------<< Non-profit institutes:1 ends here
## -------->> [[file:../nstandr.src.org::*Complex conditions][Complex conditions:1]]
##' @eval attr(cockburn_detect_inst_conds_1, "@title")
##' @description STATA equivalent: replace asstype = "inst" if strpos(standard_name," COUNCIL OF ")>0 & strpos(standard_name," RES ")>0
##' @inherit detect_patterns params return
##' @inheritDotParams standardize_options
##' @return standardized names table
##' @family magerman
##' @seealso detect_patterns
##'
##' @md
##' @export
cockburn_detect_inst_conds_1 <- make_alias(detect_patterns
, patterns = " COUNCIL OF .* RES | RES .* COUNCIL OF "
, patterns_type = "regex"
, patterns_codes = "inst"
, output_codes_col_name = "{col_name_}entity_type"
, merge_existing_codes = "append_to_existing"
, return_only_first_detected_code = TRUE)
attr(cockburn_detect_inst_conds_1, "@title") <-
"Detects Non-profit institutes with special conditions"
##' Detects Non-profit institutes with special conditions
##'
##' STATA equivalent
##' replace asstype = "inst" if strpos(standard_name," FOUND ")~=0 & asstype~="univ"
##' assume a bug: " FOUND ")~=0 -> " FOUND ")>0
##' replace asstype = "inst" if strpos(standard_name," INST ")>0 & asstype~="univ"
##'
##' @param x table. Expected that x has a column with codes for universities
##' @param output_codes_col_name column with codes for universities ("univ"). Default is last column of x
##' @param merge_existing_codes same as in [detect_patterns()]
##' @inheritDotParams standardize_options
##' @return standardized names table
##'
##' @md
##' @export
cockburn_detect_inst_conds_2 <- function(x
, output_codes_col_name = "{col_name_}entity_type"
, merge_existing_codes = "append_to_existing"
, ...) {
conds <- get_target(x
, output_col_name = output_codes_col_name
, output_placement = "append_to_x"
, rows = NULL
, return_null_for_new_col = TRUE) |>
lapply(`%in%`, "univ") |>
sapply(any, na.rm = TRUE) |>
(\(conds) if(length(conds) == 0) NULL else !conds)()
rows <- get_col_and_rows()$rows
detect_patterns(x
, patterns = c(" FOUND "
, " INST ")
, rows = and_rows(conds, rows, x)
, output_codes_col_name = output_codes_col_name
, patterns_codes = "inst"
, merge_existing_codes = merge_existing_codes
, return_only_first_detected_code = TRUE)
}
##' Detects Non-profit institutes with special conditions
##'
##' @param x table. Expected that x has a column with codes for universities
##' @param merge_existing_codes same as in [detect_patterns()]
##' @param output_codes_col_name column with codes for universities ("univ"). Default is last column of x
##' @inheritDotParams standardize_options
##' @return standardized names table
##'
##' @md
##' @export
cockburn_detect_inst_conds <- function(x
, merge_existing_codes = "append_to_existing"
, output_codes_col_name = "{col_name_}entity_type"
, ...) {
x |>
cockburn_detect_inst_conds_1(merge_existing_codes = merge_existing_codes
, output_codes_col_name = output_codes_col_name) |>
cockburn_detect_inst_conds_2(output_codes_col_name = output_codes_col_name
, merge_existing_codes = merge_existing_codes)
}
## --------<< Complex conditions:1 ends here
## -------->> [[file:../nstandr.src.org::*German Non-profit institutes][German Non-profit institutes:1]]
##' @eval attr(cockburn_detect_inst_german, "@title")
##' @description "EINGETRAGENER VEREIN. NON PROFIT SOCIETY/ASSOCIATION."
##' @param x table
##' @param output_codes_col_name same as in [detect_patterns()]
##' @param merge_existing_codes same as in [detect_patterns()]
##' @inheritDotParams standardize_options
##' @return standardized names table
##'
##' @md
##' @export
cockburn_detect_inst_german <- function(x
, output_codes_col_name = "{col_name_}entity_type"
, merge_existing_codes = "append_to_existing"
, ...) {
rows <- get_col_and_rows()$rows
conds <- detect_patterns(x
, patterns = c(" UNIV "
, " GMBH "
, " KGAA "
, " KG "
, " AG "
, " EG "
, " OHG ")
, patterns_codes = TRUE
, no_match_code = FALSE
, return_only_first_detected_code = TRUE
, return_only_codes = TRUE)
detect_patterns(x, patterns = c(" STIFTUNG "
, " EINGETRAGENER VEREIN ")
, output_codes_col_name = output_codes_col_name
, merge_existing_codes = merge_existing_codes
, rows = and_rows(rows, conds, x)
, patterns_codes = "inst"
, return_only_first_detected_code = TRUE)
}
attr(cockburn_detect_inst_german, "@title") <- "Detects German Non-profit institutes"
## --------<< German Non-profit institutes:1 ends here
## -------->> [[file:../nstandr.src.org::*Hospitals][Hospitals:1]]
##' @eval attr(cockburn_detect_hosp, "@title")
##' @description From non_corporates.do file. Source - https://sites.google.com/site/patentdataproject/Home/posts/namestandardizationroutinesuploaded
##' @inherit detect_patterns params return
##' @inheritDotParams standardize_options
##' @return standardized names table
##' @family magerman
##' @seealso detect_patterns
##'
##' @md
##' @export
cockburn_detect_hosp <- make_alias(detect_patterns
, patterns = cockburn_patterns_hosp
, patterns_codes = "hosp"
, return_only_first_detected_code = TRUE
, output_codes_col_name = "{col_name_}entity_type"
, merge_existing_codes = "append_to_existing")
attr(cockburn_detect_hosp, "@title") <-
"Detect Hospitals (Non-Corporates group)"
## --------<< Hospitals:1 ends here
## -------->> [[file:../nstandr.src.org::*Punctuation][Punctuation:1]]
##' Removes punctuation and standardise some symbols.
##'
##' @param x object
##' @inheritDotParams standardize_options
##' @return standardized names table
##'
##' @md
##' @export
cockburn_replace_punctuation <- function(x
, ...) {
x |>
replace_patterns(patterns = cockburn_replace_punctuation_and) |>
replace_patterns(patterns = cockburn_replace_punctuation_the
, patterns_type_col = 3) |>
## I swapted patstat with amadeus otherwise òâêîé will not become oaeie
replace_patterns(patterns = cockburn_replace_punctuation_patstat) |>
replace_patterns(patterns = cockburn_replace_punctuation_amadeus) |>
replace_patterns(patterns = cockburn_replace_punctuation_char)
}
## --------<< Punctuation:1 ends here
## -------->> [[file:../nstandr.src.org::*Standard Name][Standard Name:1]]
##' Create standard name
##'
##' @param x object
##' @inheritDotParams replace_patterns
##' @return standardized names table
##'
##' @md
##' @export
cockburn_replace_standard_names <- function(x
, ...) {
x |>
cockburn_replace_derwent() |>
replace_patterns(patterns = cockburn_patterns_standard_names_additional) |>
replace_patterns(patterns = cockburn_patterns_standard_names_country_specific)
}
## --------<< Standard Name:1 ends here
## -------->> [[file:../nstandr.src.org::*Stem Name][Stem Name:1]]
##' @eval attr(cockburn_remove_standard_names, "@title")
##' @inherit replace_patterns params return
##' @inheritDotParams standardize_options
##' @return standardized names table
##' @family magerman
##' @seealso replace_patterns
##'
##' @md
##' @export
cockburn_remove_standard_names <- make_alias(replace_patterns
, patterns = cockburn_patterns_stem_name
, replacements = " ")
attr(cockburn_remove_standard_names, "@title") <-
"Creates so called stem name (a name with all legal entity identifiers removed)"
## --------<< Stem Name:1 ends here
## -------->> [[file:../nstandr.src.org::*USPTO special][USPTO special:1]]
##' @eval attr(cockburn_remove_uspto, "@title")
##' @inherit replace_patterns params return
##' @inheritDotParams standardize_options
##' @return standardized names table
##' @family magerman
##' @seealso replace_patterns
##'
##' @md
##' @export
cockburn_remove_uspto <- make_alias(replace_patterns
, patterns = cockburn_patterns_uspto)
attr(cockburn_remove_uspto, "@title") <-
"Removes special USPTO codes."
##' @eval attr(cockburn_detect_uspto, "@title")
##' @inherit detect_patterns params return
##' @inheritDotParams standardize_options
##' @return standardized names table
##' @family magerman
##' @seealso detect_patterns
##'
##' @md
##' @export
cockburn_detect_uspto <- make_alias(detect_patterns
, patterns = ";"
, patterns_codes = "indiv"
, output_codes_col_name = "{col_name_}entity_type"
, return_only_first_detected_code = TRUE)
attr(cockburn_detect_uspto, "@title") <-
"Special USPTO codes. Codes as 'indiv'"
## --------<< USPTO special:1 ends here
## -------->> [[file:../nstandr.src.org::*Combined Cockburn Procedures][Combined Cockburn Procedures:1]]
##' Standardizes strings using exact procedures described in Cockburn, et al. (2009)
##' @param x table or vector
##' @param cockburn_procedures list of procedures to pass to `standardize` function. Default is `cockburn_procedures.list`
##' @param detect_legal_form Whether to detect legal forms. Default is FALSE
##' @param return_x_before_common_words_removal Whether to save standardized column before `common.words.removal` procedure. Default is FALSE
##' @inheritDotParams standardize
##' @return standardized names table
##'
##' @references Cockburn, et al. (2009)
##'
##' @md
##' @export
standardize_cockburn <- function(x
, cockburn_procedures = cockburn_procedures_table
, detect_legal_form = FALSE
, return_x_before_common_words_removal = FALSE
, ... ) {
if(is.data.frame(cockburn_procedures)) {
cockburn_procedures <- standardize_make_procedures_list(cockburn_procedures)
}
## do some tweaks on cockburn_procedures
if(!detect_legal_form) {
cockburn_procedures <-
cockburn_procedures[
!(sapply(cockburn_procedures, `[[`, 1) %in%
c("cockburn_detect_type", "cockburn_detect_uspto"))
]
}
if(return_x_before_common_words_removal) {
cockburn_procedures[[
which(sapply(cockburn_procedures, `[[` , 1) %in% "cockburn_combabbrev")
]] <- list("cockburn_combabbrev", append_output_copy = TRUE)
}
standardize(x, cockburn_procedures, ...)
}
## --------<< Combined Cockburn Procedures:1 ends here
|
{"hexsha": "3196f475f277e782ccf78f888ca3eea2b9177947", "size": 23383, "ext": "r", "lang": "R", "max_stars_repo_path": "R/cockburn.r", "max_stars_repo_name": "stasvlasov/nstandr", "max_stars_repo_head_hexsha": "2cd418ccd2d11a45b1166a5ae7a54d9590debdc9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "R/cockburn.r", "max_issues_repo_name": "stasvlasov/nstandr", "max_issues_repo_head_hexsha": "2cd418ccd2d11a45b1166a5ae7a54d9590debdc9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "R/cockburn.r", "max_forks_repo_name": "stasvlasov/nstandr", "max_forks_repo_head_hexsha": "2cd418ccd2d11a45b1166a5ae7a54d9590debdc9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.3957307061, "max_line_length": 177, "alphanum_fraction": 0.6109566779, "num_tokens": 5451}
|
'''
Derived from: https://github.com/jonasrothfuss/ProMP/blob/master/meta_policy_search/envs/mujoco_envs/ant_rand_goal.py
However, the task is actually different. We are asking the ant to navigate to points on the perimeter
of the circle, not inside the circle.
'''
import numpy as np
from collections import OrderedDict
from gym import utils
from rlkit.envs.meta_mujoco_env import MetaMujocoEnv
from rlkit.envs.meta_task_params_sampler import MetaTaskParamsSampler
class _BaseParamsSampler(MetaTaskParamsSampler):
def __init__(self, goals, random=7823):
super().__init__()
if not isinstance(random, np.random.RandomState):
random = np.random.RandomState(random)
self._random = random
self.goals = goals
self._ptr = 0
def sample(self):
p = self.goals[self._random.choice(self.goals.shape[0])]
return {'goal_pos': p}, p
def sample_unique(self, num):
idxs = self._random.choice(self.goals.shape[0], size=num, replace=False)
p_samples = self.goals[idxs]
return list(
map(
lambda p: ({'goal_pos': p}, p),
p_samples
)
)
def __iter__(self):
# dangerous
self._ptr = 0
return self
def __next__(self):
if self._ptr == self.goals.shape[0]:
self._ptr = 0
raise StopIteration
p = self.goals[self._ptr]
self._ptr += 1
return {'goal_pos': p}, p
class _ExpertTrainParamsSampler(_BaseParamsSampler):
def __init__(self, random=8819, num_samples=200):
a = np.linspace(0, 2*np.pi, num=num_samples, endpoint=False)
# is this in the original where you wanna sample inside the disc
# r = 3 * np.random.random(num_tasks) ** 0.5
r = 2.0
goals = np.stack((r * np.cos(a), r * np.sin(a)), axis=-1)
super().__init__(goals, random=random)
class _ExpertTestParamsSampler(_BaseParamsSampler):
def __init__(self, random=5322, num_samples=100):
_random = np.random.RandomState(random)
a = np.linspace(0, 2*np.pi, num=num_samples, endpoint=False)
# a = _random.uniform(size=num_samples) * 2 * np.pi
# is this in the original where you wanna sample inside the disc
# r = 3 * np.random.random(num_tasks) ** 0.5
r = 2.0
goals = np.stack((r * np.cos(a), r * np.sin(a)), axis=-1)
super().__init__(goals, random=random)
class _Expert120DegreesParamsSampler(_BaseParamsSampler):
def __init__(self, random=2837, num_samples=60):
a = np.linspace(-np.pi/3.0, np.pi/3.0, num=num_samples, endpoint=True)
# is this in the original where you wanna sample inside the disc
# r = 3 * np.random.random(num_tasks) ** 0.5
r = 2.0
goals = np.stack((r * np.cos(a), r * np.sin(a)), axis=-1)
super().__init__(goals, random=random)
class _Expert60DegreesParamsSampler(_BaseParamsSampler):
def __init__(self, random=2837, num_samples=30):
a = np.linspace(0.0, np.pi/3.0, num=num_samples, endpoint=True)
# is this in the original where you wanna sample inside the disc
# r = 3 * np.random.random(num_tasks) ** 0.5
r = 2.0
goals = np.stack((r * np.cos(a), r * np.sin(a)), axis=-1)
super().__init__(goals, random=random)
class _ExpertMiddle60DegreesParamsSampler(_BaseParamsSampler):
def __init__(self, random=2837, num_samples=30, r=2.0):
a = np.linspace(np.pi/3.0, 2*np.pi/3.0, num=num_samples, endpoint=True)
# is this in the original where you wanna sample inside the disc
# r = 3 * np.random.random(num_tasks) ** 0.5
# r = 2.0
goals = np.stack((r * np.cos(a), r * np.sin(a)), axis=-1)
super().__init__(goals, random=random)
class _Expert2DirectionsParamsSampler(_BaseParamsSampler):
def __init__(self, random=2837, num_samples=2):
goals = np.array(
[
[2.0, 0.0],
# [0.0, 2.0]
[2**0.5, 2**0.5]
]
)
super().__init__(goals, random=random)
class _Expert45DegFartherParamsSampler(_BaseParamsSampler):
def __init__(self, random=2837, num_samples=2):
goals = np.array(
[
[4.0, 0.0],
# [0.0, 2.0]
[8**0.5, 8**0.5]
]
)
super().__init__(goals, random=random)
class _Expert90DegApartParamsSampler(_BaseParamsSampler):
def __init__(self, random=2837, num_samples=2):
# radius 4 at 45 and 135 degrees
goals = np.array(
[
# [3.4, 3.4],
# [-3.4, 3.4]
[2.0, 0.0],
[0.0, 2.0],
]
)
super().__init__(goals, random=random)
class _ExpertOpposite2DirectionsParamsSampler(_BaseParamsSampler):
def __init__(self, random=2837, num_samples=2):
goals = np.array(
[
[2.0, 0.0],
# # [0.0, 2.0]
[-2.0, 0.0]
# [4.0, 0.0],
# [-4.0, 0.0]
]
)
super().__init__(goals, random=random)
class _ExpertOneDirectionParamsSampler(_BaseParamsSampler):
def __init__(self, random=2837, num_samples=1):
goals = np.array(
[
# [4.0, 0.0],
# [0.0, 4.0],
# [0.0, -4.0],
# [-4.0, 0.0]
# [16.0, 0.0],
# [0.0, 16.0],
# [0.0, -16.0],
[-16.0, 0.0]
# [1.85, 0.77]
# [0.77, 1.85]
# [-0.77, 1.85]
# [-1.85, 0.77]
# [-1.85, -0.77]
# [1.85, -0.77]
# [-0.77, -1.85]
]
)
# goals = np.array(
# [
# # [ 1.96, 0.39],
# # [ 1.66, 1.11],
# # [ 1.11, 1.66],
# # [ 0.39, 1.96],
# # [-0.39, 1.96],
# # [-1.11, 1.66],
# # [-1.66, 1.11],
# # [-1.96, 0.39],
# # [-1.96, -0.39],
# # [-1.66, -1.11],
# # [-1.11, -1.66],
# # [-0.39, -1.96],
# # [ 0.39, -1.96],
# # [ 1.11, -1.66],
# # [ 1.66, -1.11],
# # [ 1.96, -0.39]
# # test tasks
# # [ 1.99, 0.2 ],
# # [ 1.76, 0.94],
# # [ 1.27, 1.55],
# # [ 0.58, 1.91],
# # [-0.2 , 1.99],
# # [-0.94, 1.76],
# # [-1.55, 1.27],
# # [-1.91, 0.58],
# # [-1.99, -0.2 ],
# # [-1.76, -0.94],
# # [-1.27, -1.55],
# # [-0.58, -1.91],
# # [ 0.2 , -1.99],
# # [ 0.94, -1.76],
# # [ 1.55, -1.27],
# # [ 1.91, -0.58]
# ]
# )
print('\n\n')
print(goals)
print('\n\n')
super().__init__(goals, random=random)
class _ExpertLineParamsSampler(_BaseParamsSampler):
def __init__(self, random=2837):
a = np.linspace(-10.0, 10.0, num=21, endpoint=True)
goals = np.stack((4 * np.ones(a.shape[0]), a), axis=-1)
super().__init__(goals, random=random)
class _ExpertFivePointsParamsSampler(_BaseParamsSampler):
def __init__(self, random=2837, num_samples=1):
goals = np.array(
[
[2.0, 0.0],
[1.41, 1.41],
[0.0, 2.0],
[-1.41, 1.41],
[-2.0, 0.0]
]
)
print('\n\n')
print(goals)
print('\n\n')
super().__init__(goals, random=random)
class _ExpertEightPointsParamsSampler(_BaseParamsSampler):
def __init__(self, random=2837, num_samples=1):
goals = np.array(
[
[2.0, 0.0],
[1.41, 1.41],
[0.0, 2.0],
[-1.41, 1.41],
[-2.0, 0.0],
[-1.41, -1.41],
[0.0, -2.0],
[1.41, -1.41]
]
)
print('\n\n')
print(goals)
print('\n\n')
super().__init__(goals, random=random)
class _Expert16PointsTrainParamsSampler(_BaseParamsSampler):
def __init__(self, random=2837, num_samples=1):
goals = np.array(
[
[2.0, 0.0],
[1.41, 1.41],
[0.0, 2.0],
[-1.41, 1.41],
[-2.0, 0.0],
[-1.41, -1.41],
[0.0, -2.0],
[1.41, -1.41],
[1.85, 0.77],
[0.77, 1.85],
[-0.77, 1.85],
[-1.85, 0.77],
[-1.85, -0.77],
[-0.77, -1.85],
[0.77, -1.85],
[1.85, -0.77],
]
)
print('\n\n')
print(goals)
print('\n\n')
super().__init__(goals, random=random)
class _Expert16PointsTestParamsSampler(_BaseParamsSampler):
def __init__(self, random=2837, num_samples=1):
goals = np.array(
[
[1.96, 0.39],
[1.66, 1.11],
[1.11, 1.66],
[0.39, 1.96],
[0.39, 1.96],
[1.11, 1.66],
[1.66, 1.11],
[1.96, 0.39],
[1.96, -0.39],
[1.66, -1.11],
[1.11, -1.66],
[0.39, -1.96],
[0.39, -1.96],
[1.11, -1.66],
[1.66, -1.11],
[1.96, -0.39]
]
)
print('\n\n')
print(goals)
print('\n\n')
super().__init__(goals, random=random)
class _Expert32PointsParamsSampler(_BaseParamsSampler):
def __init__(self, random=2837, num_samples=1):
goals = np.array(
[
[2.0, 0.0],
[1.41, 1.41],
[0.0, 2.0],
[-1.41, 1.41],
[-2.0, 0.0],
[-1.41, -1.41],
[0.0, -2.0],
[1.41, -1.41],
[1.85, 0.77],
[0.77, 1.85],
[-0.77, 1.85],
[-1.85, 0.77],
[-1.85, -0.77],
[-0.77, -1.85],
[0.77, -1.85],
[1.85, -0.77],
[1.96, 0.39],
[1.66, 1.11],
[1.11, 1.66],
[0.39, 1.96],
[0.39, 1.96],
[1.11, 1.66],
[1.66, 1.11],
[1.96, 0.39],
[1.96, -0.39],
[1.66, -1.11],
[1.11, -1.66],
[0.39, -1.96],
[0.39, -1.96],
[1.11, -1.66],
[1.66, -1.11],
[1.96, -0.39]
]
)
print('\n\n')
print(goals)
print('\n\n')
super().__init__(goals, random=random)
class _ExpertTestTasksFor32PointsParamsSampler(_BaseParamsSampler):
def __init__(self, random=2837, num_samples=1):
goals = np.array(
[
[ 1.99, 0.2 ],
[ 1.76, 0.94],
[ 1.27, 1.55],
[ 0.58, 1.91],
[-0.2 , 1.99],
[-0.94, 1.76],
[-1.55, 1.27],
[-1.91, 0.58],
[-1.99, -0.2 ],
[-1.76, -0.94],
[-1.27, -1.55],
[-0.58, -1.91],
[ 0.2 , -1.99],
[ 0.94, -1.76],
[ 1.55, -1.27],
[ 1.91, -0.58]
]
)
print('\n\n')
print(goals)
print('\n\n')
super().__init__(goals, random=random)
class _Expert24PointsParamsSampler(_BaseParamsSampler):
def __init__(self, random=2837, num_samples=1):
r = 2.0
a = np.linspace(0, 2*np.pi, num=24, endpoint=False)
goals = np.stack((r * np.cos(a), r * np.sin(a)), axis=-1)
print('\n\n')
print(goals)
print('\n\n')
super().__init__(goals, random=random)
class _ExpertTwoPointsParamsSampler(_BaseParamsSampler):
def __init__(self, random=2837, num_samples=1):
goals = np.array(
[
[1.41, 1.41],
[-1.41, 1.41],
]
)
print('\n\n')
print(goals)
print('\n\n')
super().__init__(goals, random=random)
class r_20_45to90_ParamsSampler(_BaseParamsSampler):
def __init__(self, random=2837, num_samples=1):
a = np.linspace(np.pi/4.0, np.pi/2.0, num=10, endpoint=True)
goals = np.stack([np.cos(a), np.sin(a)], axis=1)
goals *= 20.0
print('\n\n')
print(goals)
print('\n\n')
super().__init__(goals, random=random)
class r_20_90to135_ParamsSampler(_BaseParamsSampler):
def __init__(self, random=2837, num_samples=1):
a = np.linspace(np.pi/2.0, 3*np.pi/4.0, num=10, endpoint=True)
goals = np.stack([np.cos(a), np.sin(a)], axis=1)
goals *= 20.0
print('\n\n')
print(goals)
print('\n\n')
super().__init__(goals, random=random)
class AntRandGoalEnv(MetaMujocoEnv, utils.EzPickle):
def __init__(self):
self.goal_pos = np.array([1.41, 1.41])
# MetaMujocoEnv.__init__(self, 'ant.xml', 5)
MetaMujocoEnv.__init__(self, 'low_gear_ratio_ant.xml', 5)
utils.EzPickle.__init__(self)
def sample_tasks(self, n_tasks):
raise NotImplementedError()
# a = np.random.random(n_tasks) * 2 * np.pi
# r = 3 * np.random.random(n_tasks) ** 0.5
# return np.stack((r * np.cos(a), r * np.sin(a)), axis=-1)
def step(self, a):
self.do_simulation(a, self.frame_skip)
xposafter = self.get_body_com("torso")
l1_dist = np.sum(np.abs(xposafter[:2] - self.goal_pos))
l2_dist = np.sqrt(np.sum(np.square(xposafter[:2] - self.goal_pos)))
# goal_reward = -np.sum(np.abs(xposafter[:2] - self.goal_pos)) # make it happy, not suicidal
# goal_reward = -np.sum(np.square(xposafter[:2] - self.goal_pos)) # make it happy, not suicidal
# goal_reward = -1.0 * l2_dist # make it happy, not suicidal
# goal_reward = -1.0 * l1_dist
# goal_reward = -1.0 * (l2_dist**2)
goal_reward = -1.0 * l2_dist
# ctrl_cost = .1 * np.square(a).sum()
ctrl_cost = 0.5 * 1e-2 * np.square(a).sum()
# contact_cost = 0.5 * 1e-3 * np.sum(np.square(np.clip(self.sim.data.cfrc_ext, -1, 1)))
contact_cost = 0.0
# survive_reward = 1.0
survive_reward = 0.0
# survive_reward = 4.0
reward = goal_reward - ctrl_cost - contact_cost + survive_reward
# if l2_dist < 0.5:
# reward = 1.0
# else:
# reward = 0.0
state = self.state_vector()
# notdone = np.isfinite(state).all() and 1.0 >= state[2] >= 0.
# done = not notdone
done = False
ob = self._get_obs()
return ob, reward, done, dict(
l1_dist=l1_dist,
l2_dist=l2_dist,
reward_forward=goal_reward,
reward_ctrl=-ctrl_cost,
reward_contact=-contact_cost,
reward_survive=survive_reward)
def _get_obs(self):
# obs = np.concatenate([
# self.sim.data.qpos.flat,
# self.sim.data.qvel.flat,
# self.get_body_com("torso")[:2].flat
# ])
# version used in SMILe experiments
obs = np.concatenate([
self.sim.data.qpos.flat,
self.sim.data.qvel.flat,
# np.clip(self.sim.data.cfrc_ext, -1, 1).flat,
self.get_body_com("torso").flat
])
# print('---')
# print(self.get_body_com("torso"))
# print(np.array(self.get_body_com("torso").flat))
# print(np.concatenate([self.get_body_com("torso").flat]))
# print(obs)
return {
'obs': obs.copy(),
'obs_task_params': self.goal_pos.copy()
}
def reset_model(self):
qpos = self.init_qpos + self.np_random.uniform(size=self.model.nq, low=-.1, high=.1)
qvel = self.init_qvel + self.np_random.randn(self.model.nv) * .1
self.set_state(qpos, qvel)
return self._get_obs()
def viewer_setup(self):
self.viewer.cam.distance = self.model.stat.extent * 0.5
def reset(self, task_params=None, obs_task_params=None):
if task_params is None:
self.goal_pos = self.sample_tasks(1)[0]
else:
self.goal_pos = task_params['goal_pos']
obs = super().reset()
return obs
@property
def task_identifier(self):
return tuple(self.goal_pos)
def task_id_to_obs_task_params(self, task_id):
return np.array(task_id)
def task_id_to_task_params(self, task_id):
return {'goal_pos': np.array(task_id)}
def log_statistics(self, paths):
# this is run so rarely that it doesn't matter if it's a little inefficient
progs = [np.mean([d["l1_dist"] for d in path["env_infos"]]) for path in paths]
last_100_dists = [np.mean([d["l1_dist"] for d in path["env_infos"]][-100:]) for path in paths]
min_dists = [np.min([d["l1_dist"] for d in path["env_infos"]]) for path in paths]
ctrl_cost = [-np.mean([d["reward_ctrl"] for d in path["env_infos"]]) for path in paths]
contact_cost = [-np.mean([d["reward_contact"] for d in path["env_infos"]]) for path in paths]
l2_min_dists = [np.min([d["l2_dist"] for d in path["env_infos"]]) for path in paths]
l2_last_100_dists = [np.mean([d["l2_dist"] for d in path["env_infos"]][-100:]) for path in paths]
return_dict = OrderedDict()
return_dict['AverageClosest'] = np.mean(min_dists)
return_dict['MaxClosest'] = np.max(min_dists)
return_dict['MinClosest'] = np.min(min_dists)
return_dict['StdClosest'] = np.std(min_dists)
return_dict['AverageLast100'] = np.mean(last_100_dists)
return_dict['MaxLast100'] = np.max(last_100_dists)
return_dict['MinLast100'] = np.min(last_100_dists)
return_dict['StdLast100'] = np.std(last_100_dists)
return_dict['AverageForwardReturn'] = np.mean(progs)
return_dict['MaxForwardReturn'] = np.max(progs)
return_dict['MinForwardReturn'] = np.min(progs)
return_dict['StdForwardReturn'] = np.std(progs)
return_dict['AverageCtrlCost'] = np.mean(ctrl_cost)
return_dict['AverageContactCost'] = np.mean(contact_cost)
return_dict['L2AverageClosest'] = np.mean(l2_min_dists)
return_dict['L2MaxClosest'] = np.max(l2_min_dists)
return_dict['L2MinClosest'] = np.min(l2_min_dists)
return_dict['L2StdClosest'] = np.std(l2_min_dists)
return_dict['L2AverageLast100'] = np.mean(l2_last_100_dists)
return_dict['L2MaxLast100'] = np.max(l2_last_100_dists)
return_dict['L2MinLast100'] = np.min(l2_last_100_dists)
return_dict['L2StdLast100'] = np.std(l2_last_100_dists)
return return_dict
if __name__ == "__main__":
# e = _ExpertTrainParamsSampler(num_samples=200)
e = _ExpertTestParamsSampler(num_samples=200)
print(e.goals)
print(e.sample())
print(e.sample())
print(e.sample())
print(e.sample())
p1 = e.sample()[1]
p2 = e.sample()[1]
print(np.linalg.norm(p1 - p2))
p1 = e.sample()[1]
p2 = e.sample()[1]
print(np.linalg.norm(p1 - p2))
# print(e.sample_unique(10))
# env = AntRandGoalEnv()
# while True:
# env.reset()
# print(env.goal_pos)
# for _ in range(100):
# # env.render()
# obs, reward, _, _ = env.step(env.action_space.sample()) # take a random action
|
{"hexsha": "372938f5fc70b018363fc4b68e45a2248ebc31e0", "size": 20230, "ext": "py", "lang": "Python", "max_stars_repo_path": "rlkit/envs/ant_rand_goal.py", "max_stars_repo_name": "yifan-you-37/rl_swiss", "max_stars_repo_head_hexsha": "8b0ee7caa5c1fa93860916004cf4fd970667764f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 56, "max_stars_repo_stars_event_min_datetime": "2019-10-20T03:09:02.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T09:21:40.000Z", "max_issues_repo_path": "rlkit/envs/ant_rand_goal.py", "max_issues_repo_name": "yifan-you-37/rl_swiss", "max_issues_repo_head_hexsha": "8b0ee7caa5c1fa93860916004cf4fd970667764f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-10-01T07:33:51.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-12T03:40:57.000Z", "max_forks_repo_path": "rlkit/envs/ant_rand_goal.py", "max_forks_repo_name": "yifan-you-37/rl_swiss", "max_forks_repo_head_hexsha": "8b0ee7caa5c1fa93860916004cf4fd970667764f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2019-11-04T16:56:09.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-25T09:21:41.000Z", "avg_line_length": 32.9478827362, "max_line_length": 117, "alphanum_fraction": 0.4913989125, "include": true, "reason": "import numpy", "num_tokens": 6015}
|
from BPTK_Py import Agent, Event
import numpy as np
class MovingPerson(Agent):
STATES = ["HEALTHY", "INFECTED_LIGHT", "INFECTED_HARD", "DEAD", "RECOVERED"]
MOVING_LIST = [[0, 0], [0, -1], [0, 1], [-1, 0], [1, 0], [1, -1], [1, 1], [-1, 1], [-1, -1]]
def initialize(self):
self.state = np.random.choice(self.STATES, p=[0.8, 0.16, 0.04, 0, 0])
self.register_event_handler(self.STATES, "infection_hard", self.handle_infection_hard_event)
self.register_event_handler(self.STATES, "infection_light",
self.handle_infection_light_event)
found = False
while not found:
foundOne = False
position =(np.random.randint(20),np.random.randint(20))
for agent in self.model.agents:
if agent == self:
continue
if agent.position == position:
foundOne = True
break
if not foundOne:
found = True
self.position = position
def handle_infection_hard_event(self, event):
if self.state == "HEALTHY":
self.state = "INFECTED_HARD"
def handle_infection_light_event(self, event):
if self.state == "HEALTHY":
self.state = "INFECTED_LIGHT"
def act(self, time, round_no, step_no):
if self.state == "HEALTHY":
self.move()
elif self.state == "DEAD":
pass
elif self.
if self.state == "INFECTED_LIGHT":
# or self.state == "INFECTED_HARD":
infected_contacts = 0
for i in range(0, self.model.contact_rate):
if np.random.choice(["HEALTHY", "INFECTED"], p=[1 - self.model.infectivity, self.model.infectivity]) \
== "INFECTED":
infected_contacts += 1
self.infected = infected_contacts
infected_light = round(0.8 * infected_contacts)
infected_hard = round(0.2 * infected_contacts)
event_factory_hard = lambda agent_id: Event("infection_hard", self.id, agent_id, data=None)
event_factory_light = lambda agent_id: Event("infection_light", self.id, agent_id, data=None)
self.model.random_events("person", infected_hard, event_factory_hard)
self.model.random_events("person", infected_light, event_factory_light)
def move(self):
position_found = 0
found = False
while not found:
indices = [i for i in range(0, len(self.MOVING_LIST))]
moving = self.MOVING_LIST[np.random.choice(indices)]
position_found = (self.position[0]+moving[0], self.position[1]+moving[1])
if position_found[0] >= 0 and position_found[0]<=20 and \
position_found[1] >= 0 and position_found[1] <=20:
found = True
break
self.position = position_found
def find_neighbors(self):
position = 0
neighbors_list = []
for moving_element in self.MOVING_LIST:
position = (self.position[0]+moving_element[0], self.position[1]+moving_element[1])
for agent in self.model.agents:
if agent == self:
continue
if agent.position == position:
neighbors_list.append(agent.position)
return neighbors_list
def check_infected(self, neighbors):
for agent in self.model.agents:
if agent == self:
continue
if agent.position in neighbors:
if agent.state == "INFECTED":
self.state = "INFECTED"
self.model.infected += 1
print("Waaah I got infected")
self.infected = 1
break
def infect_neighbors(self,neighbors):
for agent in self.model.agents:
if agent == self:
continue
if agent.position in neighbors:
agent.state = "INFECTED"
|
{"hexsha": "a9e364b4718f7db8eeede114ce0b6329c49e3808", "size": 4097, "ext": "py", "lang": "Python", "max_stars_repo_path": "abm/src/agents/moving_person.py", "max_stars_repo_name": "transentis/sim-covid-19", "max_stars_repo_head_hexsha": "7a4da7c3c7fa75f1df94c223b74211449b46eba3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-05-16T08:29:50.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-08T14:43:00.000Z", "max_issues_repo_path": "abm/src/agents/moving_person.py", "max_issues_repo_name": "transentis/sim-covid-19", "max_issues_repo_head_hexsha": "7a4da7c3c7fa75f1df94c223b74211449b46eba3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "abm/src/agents/moving_person.py", "max_forks_repo_name": "transentis/sim-covid-19", "max_forks_repo_head_hexsha": "7a4da7c3c7fa75f1df94c223b74211449b46eba3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-05-07T09:16:53.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-04T07:49:45.000Z", "avg_line_length": 39.019047619, "max_line_length": 118, "alphanum_fraction": 0.5533317061, "include": true, "reason": "import numpy", "num_tokens": 923}
|
[STATEMENT]
lemma fin_cut_same_Cons[simp]: "fin_cut_same x (y # xs) =
(if fin_cut_same x xs = [] then if x = y then [] else [y] else y # fin_cut_same x xs)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. fin_cut_same x (y # xs) = (if fin_cut_same x xs = [] then if x = y then [] else [y] else y # fin_cut_same x xs)
[PROOF STEP]
unfolding fin_cut_same_def Least_fin_cut_same
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. take (length (y # xs) - length (takeWhile (\<lambda>xa. xa = x) (rev (y # xs)))) (y # xs) = (if take (length xs - length (takeWhile (\<lambda>xa. xa = x) (rev xs))) xs = [] then if x = y then [] else [y] else y # take (length xs - length (takeWhile (\<lambda>xa. xa = x) (rev xs))) xs)
[PROOF STEP]
apply auto
[PROOF STATE]
proof (prove)
goal (4 subgoals):
1. \<lbrakk>x = y; length xs \<le> length (takeWhile (\<lambda>x. x = y) (rev xs))\<rbrakk> \<Longrightarrow> Suc (length xs) \<le> length (takeWhile (\<lambda>x. x = y) (rev xs @ [y]))
2. \<lbrakk>x = y; \<not> length xs \<le> length (takeWhile (\<lambda>x. x = y) (rev xs)); xs \<noteq> []\<rbrakk> \<Longrightarrow> take (Suc (length xs) - length (takeWhile (\<lambda>x. x = y) (rev xs @ [y]))) (y # xs) = y # take (length xs - length (takeWhile (\<lambda>x. x = y) (rev xs))) xs
3. \<lbrakk>x \<noteq> y; length xs \<le> length (takeWhile (\<lambda>xa. xa = x) (rev xs))\<rbrakk> \<Longrightarrow> take (Suc (length xs) - length (takeWhile (\<lambda>xa. xa = x) (rev xs @ [y]))) (y # xs) = [y]
4. \<lbrakk>x \<noteq> y; \<not> length xs \<le> length (takeWhile (\<lambda>xa. xa = x) (rev xs)); xs \<noteq> []\<rbrakk> \<Longrightarrow> take (Suc (length xs) - length (takeWhile (\<lambda>xa. xa = x) (rev xs @ [y]))) (y # xs) = y # take (length xs - length (takeWhile (\<lambda>xa. xa = x) (rev xs))) xs
[PROOF STEP]
apply (simp add: takeWhile_takes_all)
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<lbrakk>x = y; \<not> length xs \<le> length (takeWhile (\<lambda>x. x = y) (rev xs)); xs \<noteq> []\<rbrakk> \<Longrightarrow> take (Suc (length xs) - length (takeWhile (\<lambda>x. x = y) (rev xs @ [y]))) (y # xs) = y # take (length xs - length (takeWhile (\<lambda>x. x = y) (rev xs))) xs
2. \<lbrakk>x \<noteq> y; length xs \<le> length (takeWhile (\<lambda>xa. xa = x) (rev xs))\<rbrakk> \<Longrightarrow> take (Suc (length xs) - length (takeWhile (\<lambda>xa. xa = x) (rev xs @ [y]))) (y # xs) = [y]
3. \<lbrakk>x \<noteq> y; \<not> length xs \<le> length (takeWhile (\<lambda>xa. xa = x) (rev xs)); xs \<noteq> []\<rbrakk> \<Longrightarrow> take (Suc (length xs) - length (takeWhile (\<lambda>xa. xa = x) (rev xs @ [y]))) (y # xs) = y # take (length xs - length (takeWhile (\<lambda>xa. xa = x) (rev xs))) xs
[PROOF STEP]
apply (simp add: takeWhile_takes_all)
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<lbrakk>x = y; \<exists>x\<in>set xs. x \<noteq> y; xs \<noteq> []\<rbrakk> \<Longrightarrow> take (Suc (length xs) - length (takeWhile (\<lambda>x. x = y) (rev xs @ [y]))) (y # xs) = y # take (length xs - length (takeWhile (\<lambda>x. x = y) (rev xs))) xs
2. \<lbrakk>x \<noteq> y; length xs \<le> length (takeWhile (\<lambda>xa. xa = x) (rev xs))\<rbrakk> \<Longrightarrow> take (Suc (length xs) - length (takeWhile (\<lambda>xa. xa = x) (rev xs @ [y]))) (y # xs) = [y]
3. \<lbrakk>x \<noteq> y; \<not> length xs \<le> length (takeWhile (\<lambda>xa. xa = x) (rev xs)); xs \<noteq> []\<rbrakk> \<Longrightarrow> take (Suc (length xs) - length (takeWhile (\<lambda>xa. xa = x) (rev xs @ [y]))) (y # xs) = y # take (length xs - length (takeWhile (\<lambda>xa. xa = x) (rev xs))) xs
[PROOF STEP]
apply auto
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<And>xa. \<lbrakk>x = y; xs \<noteq> []; xa \<in> set xs; xa \<noteq> y\<rbrakk> \<Longrightarrow> take (Suc (length xs) - length (takeWhile (\<lambda>x. x = y) (rev xs))) (y # xs) = y # take (length xs - length (takeWhile (\<lambda>x. x = y) (rev xs))) xs
2. \<lbrakk>x \<noteq> y; length xs \<le> length (takeWhile (\<lambda>xa. xa = x) (rev xs))\<rbrakk> \<Longrightarrow> take (Suc (length xs) - length (takeWhile (\<lambda>xa. xa = x) (rev xs @ [y]))) (y # xs) = [y]
3. \<lbrakk>x \<noteq> y; \<not> length xs \<le> length (takeWhile (\<lambda>xa. xa = x) (rev xs)); xs \<noteq> []\<rbrakk> \<Longrightarrow> take (Suc (length xs) - length (takeWhile (\<lambda>xa. xa = x) (rev xs @ [y]))) (y # xs) = y # take (length xs - length (takeWhile (\<lambda>xa. xa = x) (rev xs))) xs
[PROOF STEP]
apply (metis (full_types) Suc_diff_le length_rev length_takeWhile_le take_Suc_Cons)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<lbrakk>x \<noteq> y; length xs \<le> length (takeWhile (\<lambda>xa. xa = x) (rev xs))\<rbrakk> \<Longrightarrow> take (Suc (length xs) - length (takeWhile (\<lambda>xa. xa = x) (rev xs @ [y]))) (y # xs) = [y]
2. \<lbrakk>x \<noteq> y; \<not> length xs \<le> length (takeWhile (\<lambda>xa. xa = x) (rev xs)); xs \<noteq> []\<rbrakk> \<Longrightarrow> take (Suc (length xs) - length (takeWhile (\<lambda>xa. xa = x) (rev xs @ [y]))) (y # xs) = y # take (length xs - length (takeWhile (\<lambda>xa. xa = x) (rev xs))) xs
[PROOF STEP]
apply (simp add: takeWhile_takes_all)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<lbrakk>x \<noteq> y; \<forall>xa\<in>set xs. xa = x\<rbrakk> \<Longrightarrow> take (Suc (length xs) - length (takeWhile (\<lambda>xa. xa = x) (rev xs @ [y]))) (y # xs) = [y]
2. \<lbrakk>x \<noteq> y; \<not> length xs \<le> length (takeWhile (\<lambda>xa. xa = x) (rev xs)); xs \<noteq> []\<rbrakk> \<Longrightarrow> take (Suc (length xs) - length (takeWhile (\<lambda>xa. xa = x) (rev xs @ [y]))) (y # xs) = y # take (length xs - length (takeWhile (\<lambda>xa. xa = x) (rev xs))) xs
[PROOF STEP]
apply (subst takeWhile_append2)
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. \<And>xa. \<lbrakk>x \<noteq> y; \<forall>xa\<in>set xs. xa = x; xa \<in> set (rev xs)\<rbrakk> \<Longrightarrow> xa = x
2. \<lbrakk>x \<noteq> y; \<forall>xa\<in>set xs. xa = x\<rbrakk> \<Longrightarrow> take (Suc (length xs) - length (rev xs @ takeWhile (\<lambda>xa. xa = x) [y])) (y # xs) = [y]
3. \<lbrakk>x \<noteq> y; \<not> length xs \<le> length (takeWhile (\<lambda>xa. xa = x) (rev xs)); xs \<noteq> []\<rbrakk> \<Longrightarrow> take (Suc (length xs) - length (takeWhile (\<lambda>xa. xa = x) (rev xs @ [y]))) (y # xs) = y # take (length xs - length (takeWhile (\<lambda>xa. xa = x) (rev xs))) xs
[PROOF STEP]
apply auto
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>x \<noteq> y; \<not> length xs \<le> length (takeWhile (\<lambda>xa. xa = x) (rev xs)); xs \<noteq> []\<rbrakk> \<Longrightarrow> take (Suc (length xs) - length (takeWhile (\<lambda>xa. xa = x) (rev xs @ [y]))) (y # xs) = y # take (length xs - length (takeWhile (\<lambda>xa. xa = x) (rev xs))) xs
[PROOF STEP]
apply (simp add: takeWhile_takes_all)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>x \<noteq> y; \<exists>xa\<in>set xs. xa \<noteq> x; xs \<noteq> []\<rbrakk> \<Longrightarrow> take (Suc (length xs) - length (takeWhile (\<lambda>xa. xa = x) (rev xs @ [y]))) (y # xs) = y # take (length xs - length (takeWhile (\<lambda>xa. xa = x) (rev xs))) xs
[PROOF STEP]
apply auto
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>xa. \<lbrakk>x \<noteq> y; xs \<noteq> []; xa \<in> set xs; xa \<noteq> x\<rbrakk> \<Longrightarrow> take (Suc (length xs) - length (takeWhile (\<lambda>xa. xa = x) (rev xs))) (y # xs) = y # take (length xs - length (takeWhile (\<lambda>xa. xa = x) (rev xs))) xs
[PROOF STEP]
apply (metis (full_types) Suc_diff_le length_rev length_takeWhile_le take_Suc_Cons)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
|
{"llama_tokens": 3144, "file": "MSO_Regex_Equivalence_Pi_Regular_Operators", "length": 13}
|
import pandas as pd
import numpy as np
import torch
from tqdm import tqdm
from rdkit import Chem
from rdkit.Chem import AllChem
from torch.utils.data import DataLoader, Dataset
#from mordred import Calculator, descriptors, TopoPSA,Weight, CarbonTypes,SLogP, MoeType
from rdkit import Chem
from tqdm import tqdm
from rdkit.Chem.rdmolops import GetAdjacencyMatrix
from scipy.linalg import block_diag
from scipy import stats
from rdkit import RDLogger
def get_fingerprints_user(data, label ,bitSize_circular=2048, morgan_radius=2):
index_not_convertable = []
"""
Computes the Fingerprints from Molecules
"""
# if label is string get colum number
#Disable printing Warnings
RDLogger.DisableLog('rdApp.*')
feature_matrix= pd.DataFrame(np.zeros((data.shape[0],bitSize_circular)), dtype=int)
for i in tqdm(range(data.shape[0])):
try:
feature_matrix.iloc[i,:] = np.array(AllChem.GetMorganFingerprintAsBitVect(Chem.MolFromSmiles(data.iloc[i, label]),morgan_radius,nBits=bitSize_circular))
except:
feature_matrix.iloc[i,:] = 0
index_not_convertable.append(i)
RDLogger.EnableLog('rdApp.*')
if len(index_not_convertable)> 0:
print("\n",len(index_not_convertable), " Molecules could not be read.")
return feature_matrix, index_not_convertable
def get_fingerprints(data, bitSize_circular=2048, labels_default=None , labels_morgan=None, morgan_radius=2):
""" Computes the Fingerprints from Molecules
"""
feature_matrix= pd.DataFrame(np.zeros((data.shape[0],bitSize_circular)), dtype=int)
for i in tqdm(range(data.shape[0])):
feature_matrix.iloc[i,:] = np.array(AllChem.GetMorganFingerprintAsBitVect(Chem.MolFromSmiles(data.smiles.iloc[i]),morgan_radius,nBits=bitSize_circular))
return(feature_matrix)
class FPAutoencoder_Dataset(Dataset):
def __init__(self,fingerprint, np, npl):
self.len = fingerprint.shape[0]
self.fingerprint=(torch.tensor(fingerprint.values, dtype=torch.float))
self.np = torch.tensor(np, dtype = torch.float)
self.npl = torch.tensor(npl, dtype = torch.float)
def __getitem__(self, index):
return self.fingerprint[index], self.np[index], self.npl[index]
def __len__(self):
return self.len
class GraphDataset(Dataset):
def __init__(self,feature, adjacency, target_reg, target_clf ):
self.len = len(adjacency)
self.adjacency = [torch.tensor(adj, dtype=torch.float) for adj in adjacency]
self.feature = [torch.tensor(feat.values, dtype=torch.float) for feat in feature]
self.target_reg = torch.tensor(target_reg, dtype=torch.float)
self.target_clf = torch.tensor(target_clf, dtype=torch.float)
def __getitem__(self, index):
return self.feature[index], self.adjacency[index], self.adjacency[index].shape[0], self.target_reg[index], self.target_clf[index]
def __len__(self):
return self.len
def graph_collate(batch):
feat = [item[0] for item in batch ]
adj = [item[1] for item in batch ]
sep_list = [item[2] for item in batch ]
target_reg = torch.stack([item[3] for item in batch ])
target_clf = torch.stack([item[4] for item in batch ])
adj = torch.tensor(block_diag(*adj), dtype=(torch.float))
feat = torch.cat(feat, dim=0)
return [feat, adj, sep_list], target_reg, target_clf.unsqueeze(1)
def create_gcn_features(smiles):
print( "\n Generating Graph Conv Features ... \n")
# get Rdkit Molecules
mols=[Chem.MolFromSmiles(x) for x in smiles]
#mols= mols+maskri_mol
#atom type
atom_dummy=pd.get_dummies([atom.GetAtomicNum() for x in mols for atom in x.GetAtoms() ])
#degree
degree=pd.get_dummies([atom.GetDegree() for x in mols for atom in x.GetAtoms()])
degree.columns=[ "degree_"+str(i) for i in range(degree.shape[1])]
#Get Hybridization
hy=[atom.GetHybridization() for x in mols for atom in x.GetAtoms()]
hybridization=pd.get_dummies(hy)
hybridization.columns=[ "hybrid_"+str(i) for i in range(hybridization.shape[1])]
#aromaticity
aromaticity =pd.get_dummies([atom.GetIsAromatic() for x in mols for atom in x.GetAtoms()])
aromaticity=aromaticity.drop([0], axis=1)
aromaticity.columns= ["InAromatic"]
#formal charge
formal_charge=pd.get_dummies([atom.GetFormalCharge() for x in mols for atom in x.GetAtoms()])
formal_charge.columns=[ "charge_"+str(i) for i in range(formal_charge.shape[1])]
#formal charge
implicit_valence=pd.get_dummies([atom.GetImplicitValence() for x in mols for atom in x.GetAtoms()])
implicit_valence.columns=[ "implicit_valence_"+str(i) for i in range(implicit_valence.shape[1])]
#GetNumRadicalElectrons(
chirality=pd.get_dummies([atom.GetChiralTag () for x in mols for atom in x.GetAtoms()])
chirality.columns=[ "chirality_"+str(i) for i in range(chirality.shape[1])]
#get protons
num_h=pd.get_dummies([atom.GetNumImplicitHs() for x in mols for atom in x.GetAtoms()])
num_h.columns=[ "num_h_"+str(i) for i in range(num_h.shape[1])]
#concatenat features
atom_features=pd.concat([atom_dummy,degree,num_h,chirality, implicit_valence,formal_charge,aromaticity, hybridization],axis=1)
#usse only atom type to predict
#atom_features=atom_dummy
#generate Adjacency and Feature Matrix
adjs=[None]*len(mols)
feat=[None]*len(mols)
index=0
for i in range(len(mols)):
A = GetAdjacencyMatrix(mols[i])
adjs[i]=norm_adj(A)
feat[i]=atom_features.iloc[index:(index+A.shape[0]),:].reset_index(drop=True)
index+=A.shape[0]
return adjs, feat
class FPDataset(Dataset):
def __init__(self,fingerprint, target_reg, target_clf ):
self.len = fingerprint.shape[0]
self.fingerprint=(torch.tensor(fingerprint.values, dtype=torch.float))
self.target_reg = torch.tensor(target_reg, dtype=torch.float)
self.target_clf = torch.tensor(target_clf, dtype=torch.float)
def __getitem__(self, index):
return self.fingerprint[index], self.target_reg[index], self.target_clf[index]
def __len__(self):
return self.len
# =============================================================================
# def comp_descriptors(smiles):
# mols = [Chem.MolFromSmiles(smile) for smile in smiles ]
# calc = Calculator()
# calc.register(MoeType.EState_VSA(1))
# calc.register(MoeType.EState_VSA(2))
# calc.register(MoeType.EState_VSA(3))
# calc.register(MoeType.EState_VSA(4))
# calc.register(MoeType.EState_VSA(5))
# calc.register(MoeType.EState_VSA(6))
# calc.register(MoeType.EState_VSA(7))
# calc.register(MoeType.EState_VSA(8))
# calc.register(MoeType.EState_VSA(9))
# calc.register(MoeType.EState_VSA(10))
# calc.register(MoeType.EState_VSA(11))
#
# calc.register(MoeType.PEOE_VSA(1))
# calc.register(MoeType.PEOE_VSA(2))
# calc.register(MoeType.PEOE_VSA(3))
# calc.register(MoeType.PEOE_VSA(4))
# calc.register(MoeType.PEOE_VSA(5))
# calc.register(MoeType.PEOE_VSA(6))
# calc.register(MoeType.PEOE_VSA(7))
# calc.register(MoeType.PEOE_VSA(8))
# calc.register(MoeType.PEOE_VSA(9))
# calc.register(MoeType.PEOE_VSA(10))
# calc.register(MoeType.PEOE_VSA(11))
# calc.register(MoeType.PEOE_VSA(12))
# calc.register(MoeType.PEOE_VSA(13))
# calc.register(MoeType.PEOE_VSA(14))
#
# calc.register(MoeType.SMR_VSA(1))
# calc.register(MoeType.SMR_VSA(2))
# calc.register(MoeType.SMR_VSA(3))
# calc.register(MoeType.SMR_VSA(4))
# calc.register(MoeType.SMR_VSA(5))
# calc.register(MoeType.SMR_VSA(6))
# calc.register(MoeType.SMR_VSA(7))
# calc.register(MoeType.SMR_VSA(8))
# calc.register(MoeType.SMR_VSA(9))
# calc.register(MoeType.SMR_VSA(10))
#
# calc.register(MoeType.SlogP_VSA(1))
# calc.register(MoeType.SlogP_VSA(2))
# calc.register(MoeType.SlogP_VSA(3))
# calc.register(MoeType.SlogP_VSA(4))
# calc.register(MoeType.SlogP_VSA(5))
# calc.register(MoeType.SlogP_VSA(6))
# calc.register(MoeType.SlogP_VSA(7))
# calc.register(MoeType.SlogP_VSA(8))
# calc.register(MoeType.SlogP_VSA(9))
# calc.register(MoeType.SlogP_VSA(10))
# calc.register(MoeType.SlogP_VSA(11))
# calc.register(MoeType.SlogP_VSA(12))
#
#
# calc.register(TopoPSA.TopoPSA(no_only=False))
#
# return calc.pandas(mols)
#
# =============================================================================
def calculate_sensitivity_specificity(y_test, y_pred_test):
# Note: More parameters are defined than necessary.
# This would allow return of other measures other than sensitivity and specificity
# Get true/false for whether a breach actually occurred
actual_pos = y_test == 1
actual_neg = y_test == 0
# Get true and false test (true test match actual, false tests differ from actual)
true_pos = (y_pred_test == 1) & (actual_pos)
false_pos = (y_pred_test == 1) & (actual_neg)
true_neg = (y_pred_test == 0) & (actual_neg)
false_neg = (y_pred_test == 0) & (actual_pos)
# Calculate accuracy
accuracy = np.mean(y_pred_test == y_test)
# Calculate sensitivity and specificity
sensitivity = np.sum(true_pos) / np.sum(actual_pos)
specificity = np.sum(true_neg) / np.sum(actual_neg)
return sensitivity, specificity, accuracy
def norm_adj(x):
"""Normalizes Adjacency Matrix
Parameters
----------
x : matrix
adjacency matrix
Returns
-------
normlized adjacency matrix
"""
x_hat=x+np.eye(x.shape[0])
D_inv=np.diag(np.array(np.sum(x_hat, axis=1))**(-0.5))
return(np.matmul(np.matmul(D_inv,x_hat), D_inv))
def mean_confidence_interval(data, confidence=0.95):
a = 1.0 * np.array(data)
n = len(a)
m, se = np.mean(a), stats.sem(a)
h = se * stats.t.ppf((1 + confidence) / 2., n-1)
return m, h
|
{"hexsha": "108736d26134643a3a65d20ccf3305fb5da1e0ec", "size": 10202, "ext": "py", "lang": "Python", "max_stars_repo_path": "model/framework/neural_npfp/neural_npfp/utils.py", "max_stars_repo_name": "ersilia-os/eos6tg8", "max_stars_repo_head_hexsha": "10c0d532b789ef922bb53469584b97ba6c335be3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "model/framework/neural_npfp/neural_npfp/utils.py", "max_issues_repo_name": "ersilia-os/eos6tg8", "max_issues_repo_head_hexsha": "10c0d532b789ef922bb53469584b97ba6c335be3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "model/framework/neural_npfp/neural_npfp/utils.py", "max_forks_repo_name": "ersilia-os/eos6tg8", "max_forks_repo_head_hexsha": "10c0d532b789ef922bb53469584b97ba6c335be3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.7006802721, "max_line_length": 164, "alphanum_fraction": 0.6651636934, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2802}
|
import pytest
import numpy as np
import inspect
import random
from graphgraph import operators as op
class LaplacianConstraintError(RuntimeError):
pass
class LaplacianConstraints:
def __init__(self, in_matrix):
self.in_matrix = in_matrix
self.validate()
def is_symmetric(self):
if not np.all(self.in_matrix == self.in_matrix.T):
raise LaplacianConstraintError("input matrix is not symmetric")
def is_diagonal_nonnegative(self):
if not np.all(np.diag(self.in_matrix) >= 0):
raise LaplacianConstraintError("diagonal elements are not all nonnegative")
def is_off_diagonal_nonpositive(self):
if not np.all(
self.in_matrix[np.triu_indices(n=self.in_matrix.shape[0], k=1)] <= 0
):
raise LaplacianConstraintError(
"off-diagonal elements are not all nonpositive"
)
def is_row_sum_zero(self):
if not np.all(np.abs(np.sum(self.in_matrix, axis=0)) < 1e-9):
raise LaplacianConstraintError("row sum is not identically zero")
def validate(self):
constraints = [
m[1]
for m in inspect.getmembers(
LaplacianConstraints, predicate=inspect.isfunction
)
if m[0].startswith("is")
]
for c in constraints:
c(self)
def test_laplacian_operator():
p = random.randint(a=3, b=100)
weights = np.random.uniform(size=int(0.5 * p * (p - 1)))
LaplacianConstraints(op.laplacian_op(weights)).validate()
@pytest.mark.parametrize(
"inv_op_name, op_name",
[(op.inv_laplacian_op, op.laplacian_op), (op.inv_adjacency_op, op.adjacency_op)],
)
def test_inverse_operators(inv_op_name, op_name):
weights = np.random.uniform(size=6)
weights_ = inv_op_name(op_name(weights))
np.testing.assert_allclose(weights, weights_)
@pytest.mark.parametrize(
"adj_op_name, op_name",
[(op.adj_laplacian_op, op.laplacian_op), (op.adj_adjacency_op, op.adjacency_op)],
)
def test_adjoint_operators(adj_op_name, op_name):
"test the inner product equality between the linear operator and its adjoint"
p = random.randint(a=3, b=100)
X = np.random.uniform(size=p * p).reshape((p, p))
weights = np.random.uniform(size=int(0.5 * p * (p - 1)))
np.testing.assert_almost_equal(
np.sum(X * op_name(weights)), np.sum(weights * adj_op_name(X))
)
|
{"hexsha": "084e0b48b9c82bb6fb98bee6b3bc4637303c175a", "size": 2432, "ext": "py", "lang": "Python", "max_stars_repo_path": "graphgraph/tests/test_operators.py", "max_stars_repo_name": "mirca/graphgraph.py", "max_stars_repo_head_hexsha": "68b29a2dfeae1654db85a3b03c51ce4798c6608d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "graphgraph/tests/test_operators.py", "max_issues_repo_name": "mirca/graphgraph.py", "max_issues_repo_head_hexsha": "68b29a2dfeae1654db85a3b03c51ce4798c6608d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "graphgraph/tests/test_operators.py", "max_forks_repo_name": "mirca/graphgraph.py", "max_forks_repo_head_hexsha": "68b29a2dfeae1654db85a3b03c51ce4798c6608d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-07-17T11:33:00.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-04T10:55:46.000Z", "avg_line_length": 31.5844155844, "max_line_length": 87, "alphanum_fraction": 0.6595394737, "include": true, "reason": "import numpy", "num_tokens": 602}
|
import logging
import os
import random
import traceback
from datetime import date
from pathlib import Path
from typing import Optional, Union
import numpy as np
import requests
import torch
import torch.nn as nn
from rxnebm.model import FF, G2E, S2E, model_utils
def setup_paths(
load_trained: Optional[bool] = False,
date_trained: Optional[str] = None,
ckpt_root: Optional[Union[str, bytes, os.PathLike]] = None,
) -> Union[str, bytes, os.PathLike]:
"""
Parameters
----------
root : Union[str, bytes, os.PathLike] (Default = None)
path to the root folder where checkpoints will be stored
If None, this is set to full/path/to/rxnebm/checkpoints/
"""
if load_trained:
if date_trained is None:
raise ValueError("Please provide date_trained as DD_MM_YYYY")
else:
date_trained = date.today().strftime("%d_%m_%Y")
if ckpt_root is None:
ckpt_root = Path(__file__).resolve().parents[1] / "checkpoints"
else:
ckpt_root = Path(ckpt_root)
checkpoint_folder = ckpt_root / date_trained
os.makedirs(checkpoint_folder, exist_ok=True)
print(f"created checkpoint_folder: {checkpoint_folder}")
return checkpoint_folder
def load_or_create_vocab(args):
"""Currently only supports loading. The vocab is small enough that a single universal vocab suffices"""
root = Path(__file__).resolve().parents[1] / "data" / "cleaned_data"
vocab = {}
with open(root / args.vocab_file, "r") as f:
for i, line in enumerate(f):
token = line.strip()
vocab[token] = i
return vocab
def load_model_and_opt(
args,
checkpoint_folder: Union[str, bytes, os.PathLike],
optimizer_name: str = "Adam",
):
curr_device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
checkpoint_filename = f'{args.model_name}_{args.old_expt_name}_checkpoint.pth.tar'
checkpoint = torch.load(
Path(checkpoint_folder) / checkpoint_filename,
map_location=torch.device(curr_device),
)
print(f"loaded checkpoint from {Path(checkpoint_folder) / checkpoint_filename}")
begin_epoch = checkpoint["epoch"] + 1
if args.model_name == "FeedforwardEBM":
saved_model = FF.FeedforwardEBM(args)
elif args.model_name == "GraphEBM_1MPN": # Graph to energy, project both reactants & products w/ dot product output
saved_model = G2E.GraphEBM_1MPN(args)
elif args.model_name == "GraphEBM_2MPN": # Graph to energy, separate encoders + projections, feedforward output
saved_model = G2E.GraphEBM_2MPN(args)
elif args.model_name == "TransformerEBM":
assert args.vocab_file is not None, "Please provide precomputed --vocab_file!"
vocab = load_or_create_vocab(args)
saved_model = S2E.TransformerEBM(args, vocab)
else:
raise ValueError("Unrecognized model name")
saved_optimizer = model_utils.get_optimizer(optimizer_name)(
saved_model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay
)
# https://discuss.pytorch.org/t/missing-keys-unexpected-keys-in-state-dict-when-loading-self-trained-model/22379/14
for key in list(checkpoint["state_dict"].keys()):
if 'module.' in key:
checkpoint["state_dict"][key.replace('module.', '')] = checkpoint["state_dict"][key]
del checkpoint["state_dict"][key]
saved_model.load_state_dict(checkpoint["state_dict"])
saved_optimizer.load_state_dict(checkpoint["optimizer"])
print('Loaded model and optimizer state dicts')
if torch.cuda.is_available() and not args.ddp: # if ddp, need to move within each process
# move optimizer tensors to gpu https://github.com/pytorch/pytorch/issues/2830
for state in saved_optimizer.state.values():
for k, v in state.items():
if torch.is_tensor(v):
state[k] = v.cuda()
return saved_model, saved_optimizer, begin_epoch
def send_message(msg, chat_id, bot_token):
"""
params:
-------
msg: message you want to receive
chat_id: CHAT_ID
bot_token: API_KEY of your bot
"""
url = f'https://api.telegram.org/bot{bot_token}/sendMessage'
data = {'chat_id': str(chat_id), 'text': f'{msg}'}
requests.post(url, data)
|
{"hexsha": "1644ce8ddc33292e0d60eb657d2f546326ca2ba2", "size": 4338, "ext": "py", "lang": "Python", "max_stars_repo_path": "rxnebm/experiment/expt_utils.py", "max_stars_repo_name": "coleygroup/rxn-ebm", "max_stars_repo_head_hexsha": "8480822d0d8ad74e46edf693ad1cdc787291f422", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2021-12-18T23:03:41.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T15:31:57.000Z", "max_issues_repo_path": "rxnebm/experiment/expt_utils.py", "max_issues_repo_name": "coleygroup/rxn-ebm", "max_issues_repo_head_hexsha": "8480822d0d8ad74e46edf693ad1cdc787291f422", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "rxnebm/experiment/expt_utils.py", "max_forks_repo_name": "coleygroup/rxn-ebm", "max_forks_repo_head_hexsha": "8480822d0d8ad74e46edf693ad1cdc787291f422", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-12-18T23:03:42.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-18T23:03:42.000Z", "avg_line_length": 37.3965517241, "max_line_length": 126, "alphanum_fraction": 0.6763485477, "include": true, "reason": "import numpy", "num_tokens": 1040}
|
module concurrency_api
use :: event_module, only : event
use :: event_handler_module, only : event_handler
use :: stream_module, only : stream
use :: stream_handler_module, only : stream_handler
use :: dependency_manager_module, only : dependency_manager
use :: abstract_concurrency_factory_module, only : abstract_concurrency_factory
implicit none
private
public :: event
public :: event_handler
public :: stream
public :: stream_handler
public :: dependency_manager
public :: abstract_concurrency_factory
public :: concurrency_factory
public :: concurrency_initialize
public :: concurrency_finalize
class(abstract_concurrency_factory), allocatable :: concurrency_factory
contains
subroutine concurrency_initialize(factory)
class(abstract_concurrency_factory), intent(in) :: factory
concurrency_factory = factory
end subroutine concurrency_initialize
subroutine concurrency_finalize()
if ( allocated(concurrency_factory) ) then
call concurrency_factory%cleanup()
deallocate(concurrency_factory)
end if
end subroutine concurrency_finalize
end module concurrency_api
|
{"hexsha": "3394713ab51a6d8d44efa56d6da204e6227fec95", "size": 1217, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/modules/concurrency_api/concurrency_api.f90", "max_stars_repo_name": "cheshyre/ntcl-data", "max_stars_repo_head_hexsha": "bd7b982c81b1611b32b7c2d4500154cd97a0fd30", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/modules/concurrency_api/concurrency_api.f90", "max_issues_repo_name": "cheshyre/ntcl-data", "max_issues_repo_head_hexsha": "bd7b982c81b1611b32b7c2d4500154cd97a0fd30", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/modules/concurrency_api/concurrency_api.f90", "max_forks_repo_name": "cheshyre/ntcl-data", "max_forks_repo_head_hexsha": "bd7b982c81b1611b32b7c2d4500154cd97a0fd30", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.2051282051, "max_line_length": 83, "alphanum_fraction": 0.7354149548, "num_tokens": 230}
|
'''
Basic test installation is working
'''
import collections
import numpy as np
import pytest
from OscopeBootstrap import SyntheticDataset
from OscopeBootstrap.oscope_tf import bootstrap_hypothesis_test
DataParameters = collections.namedtuple('DataParameters', 'ngroups NG N G noiselevel')
@pytest.fixture
def dataset_options() -> DataParameters:
return DataParameters(
ngroups=3,
NG=3, # number of genes in group
G=100, # total number of genes
N=5, # number of cells
noiselevel=1,
)
@pytest.fixture
def dataset_generation(dataset_options):
data, phaseG, angularSpeed = SyntheticDataset.GetSimISyntheticData(NG=dataset_options.NG,
G=dataset_options.G,
N=dataset_options.N,
noiseLevel=dataset_options.noiselevel,
ngroups=dataset_options.ngroups)
return data, phaseG, angularSpeed
def test_generation(dataset_options, dataset_generation):
data, phaseG, angularSpeed = dataset_generation
assert data.shape == (dataset_options.G, dataset_options.N)
assert phaseG.shape == (dataset_options.G, ) # phase for each gene
assert angularSpeed.shape == (dataset_options.G, )
assert np.all(~np.isnan(data))
|
{"hexsha": "1b63826d74558f3d72b7b8ccc81ddb501173d517", "size": 1467, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/test_basics.py", "max_stars_repo_name": "alexisboukouvalas/OscoNet", "max_stars_repo_head_hexsha": "f100d1ccfe8f7dad050a3082773a4b6383a4994a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-09-03T10:00:44.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-03T10:00:44.000Z", "max_issues_repo_path": "test/test_basics.py", "max_issues_repo_name": "alexisboukouvalas/OscoNet", "max_issues_repo_head_hexsha": "f100d1ccfe8f7dad050a3082773a4b6383a4994a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-02-10T02:22:05.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T02:22:05.000Z", "max_forks_repo_path": "test/test_basics.py", "max_forks_repo_name": "alexisboukouvalas/OscoNet", "max_forks_repo_head_hexsha": "f100d1ccfe8f7dad050a3082773a4b6383a4994a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-09-25T16:44:30.000Z", "max_forks_repo_forks_event_max_datetime": "2019-09-25T16:44:30.000Z", "avg_line_length": 35.7804878049, "max_line_length": 109, "alphanum_fraction": 0.6053169734, "include": true, "reason": "import numpy", "num_tokens": 278}
|
# Boltzmann Machines
Why use a generative model rather than the more well known discriminative deep neural networks (DNN)?
* Discriminitave methods have several limitations: They are mainly supervised learning methods, thus requiring labeled data. And there are tasks they cannot accomplish, like drawing new examples from an unknown probability distribution.
* A generative model can learn to represent and sample from a probability distribution. The core idea is to learn a parametric model of the probability distribution from which the training data was drawn. As an example
a. A model for images could learn to draw new examples of cats and dogs, given a training dataset of images of cats and dogs.
b. Generate a sample of an ordered or disordered Ising model phase, having been given samples of such phases.
c. Model the trial function for Monte Carlo calculations
4. Both use gradient-descent based learning procedures for minimizing cost functions
5. Energy based models don't use backpropagation and automatic differentiation for computing gradients, instead turning to Markov Chain Monte Carlo methods.
6. DNNs often have several hidden layers. A restricted Boltzmann machine has only one hidden layer, however several RBMs can be stacked to make up Deep Belief Networks, of which they constitute the building blocks.
History: The RBM was developed by amongst others Geoffrey Hinton, called by some the "Godfather of Deep Learning", working with the University of Toronto and Google.
A BM is what we would call an undirected probabilistic graphical model
with stochastic continuous or discrete units.
It is interpreted as a stochastic recurrent neural network where the
state of each unit(neurons/nodes) depends on the units it is connected
to. The weights in the network represent thus the strength of the
interaction between various units/nodes.
It turns into a Hopfield network if we choose deterministic rather
than stochastic units. In contrast to a Hopfield network, a BM is a
so-called generative model. It allows us to generate new samples from
the learned distribution.
A standard BM network is divided into a set of observable and visible units $\hat{x}$ and a set of unknown hidden units/nodes $\hat{h}$.
Additionally there can be bias nodes for the hidden and visible layers. These biases are normally set to $1$.
BMs are stackable, meaning they cwe can train a BM which serves as input to another BM. We can construct deep networks for learning complex PDFs. The layers can be trained one after another, a feature which makes them popular in deep learning
However, they are often hard to train. This leads to the introduction of so-called restricted BMs, or RBMS.
Here we take away all lateral connections between nodes in the visible layer as well as connections between nodes in the hidden layer. The network is illustrated in the figure below.
<!-- dom:FIGURE: [figures/RBM.png, width=800 frac=1.0] -->
<!-- begin figure -->
<p style="font-size: 0.9em"><i>Figure 1: </i></p><!-- end figure -->
## The network
**The network layers**:
1. A function $\mathbf{x}$ that represents the visible layer, a vector of $M$ elements (nodes). This layer represents both what the RBM might be given as training input, and what we want it to be able to reconstruct. This might for example be the pixels of an image, the spin values of the Ising model, or coefficients representing speech.
2. The function $\mathbf{h}$ represents the hidden, or latent, layer. A vector of $N$ elements (nodes). Also called "feature detectors".
The goal of the hidden layer is to increase the model's expressive power. We encode complex interactions between visible variables by introducing additional, hidden variables that interact with visible degrees of freedom in a simple manner, yet still reproduce the complex correlations between visible degrees in the data once marginalized over (integrated out).
Examples of this trick being employed in physics:
1. The Hubbard-Stratonovich transformation
2. The introduction of ghost fields in gauge theory
3. Shadow wave functions in Quantum Monte Carlo simulations
**The network parameters, to be optimized/learned**:
1. $\mathbf{a}$ represents the visible bias, a vector of same length as $\mathbf{x}$.
2. $\mathbf{b}$ represents the hidden bias, a vector of same lenght as $\mathbf{h}$.
3. $W$ represents the interaction weights, a matrix of size $M\times N$.
### Joint distribution
The restricted Boltzmann machine is described by a Bolztmann distribution
<!-- Equation labels as ordinary links -->
<div id="_auto1"></div>
$$
\begin{equation}
P_{rbm}(\mathbf{x},\mathbf{h}) = \frac{1}{Z} e^{-\frac{1}{T_0}E(\mathbf{x},\mathbf{h})},
\label{_auto1} \tag{1}
\end{equation}
$$
where $Z$ is the normalization constant or partition function, defined as
<!-- Equation labels as ordinary links -->
<div id="_auto2"></div>
$$
\begin{equation}
Z = \int \int e^{-\frac{1}{T_0}E(\mathbf{x},\mathbf{h})} d\mathbf{x} d\mathbf{h}.
\label{_auto2} \tag{2}
\end{equation}
$$
It is common to ignore $T_0$ by setting it to one.
### Network Elements, the energy function
The function $E(\mathbf{x},\mathbf{h})$ gives the **energy** of a
configuration (pair of vectors) $(\mathbf{x}, \mathbf{h})$. The lower
the energy of a configuration, the higher the probability of it. This
function also depends on the parameters $\mathbf{a}$, $\mathbf{b}$ and
$W$. Thus, when we adjust them during the learning procedure, we are
adjusting the energy function to best fit our problem.
An expression for the energy function is
$$
E(\hat{x},\hat{h}) = -\sum_{ia}^{NA}b_i^a \alpha_i^a(x_i)-\sum_{jd}^{MD}c_j^d \beta_j^d(h_j)-\sum_{ijad}^{NAMD}b_i^a \alpha_i^a(x_i)c_j^d \beta_j^d(h_j)w_{ij}^{ad}.
$$
Here $\beta_j^d(h_j)$ and $\alpha_i^a(x_j)$ are so-called transfer functions that map a given input value to a desired feature value. The labels $a$ and $d$ denote that there can be multiple transfer functions per variable. The first sum depends only on the visible units. The second on the hidden ones. **Note** that there is no connection between nodes in a layer.
The quantities $b$ and $c$ can be interpreted as the visible and hidden biases, respectively.
The connection between the nodes in the two layers is given by the weights $w_{ij}$.
### Defining different types of RBMs
There are different variants of RBMs, and the differences lie in the types of visible and hidden units we choose as well as in the implementation of the energy function $E(\mathbf{x},\mathbf{h})$.
**Binary-Binary RBM:**
RBMs were first developed using binary units in both the visible and hidden layer. The corresponding energy function is defined as follows:
<!-- Equation labels as ordinary links -->
<div id="_auto3"></div>
$$
\begin{equation}
E(\mathbf{x}, \mathbf{h}) = - \sum_i^M x_i a_i- \sum_j^N b_j h_j - \sum_{i,j}^{M,N} x_i w_{ij} h_j,
\label{_auto3} \tag{3}
\end{equation}
$$
where the binary values taken on by the nodes are most commonly 0 and 1.
**Gaussian-Binary RBM:**
Another varient is the RBM where the visible units are Gaussian while the hidden units remain binary:
<!-- Equation labels as ordinary links -->
<div id="_auto4"></div>
$$
\begin{equation}
E(\mathbf{x}, \mathbf{h}) = \sum_i^M \frac{(x_i - a_i)^2}{2\sigma_i^2} - \sum_j^N b_j h_j - \sum_{i,j}^{M,N} \frac{x_i w_{ij} h_j}{\sigma_i^2}.
\label{_auto4} \tag{4}
\end{equation}
$$
1. RBMs are Useful when we model continuous data (i.e., we wish $\mathbf{x}$ to be continuous)
2. Requires a smaller learning rate, since there's no upper bound to the value a component might take in the reconstruction
Other types of units include:
1. Softmax and multinomial units
2. Gaussian visible and hidden units
3. Binomial units
4. Rectified linear units
### Cost function
When working with a training dataset, the most common training approach is maximizing the log-likelihood of the training data. The log likelihood characterizes the log-probability of generating the observed data using our generative model. Using this method our cost function is chosen as the negative log-likelihood. The learning then consists of trying to find parameters that maximize the probability of the dataset, and is known as Maximum Likelihood Estimation (MLE).
Denoting the parameters as $\boldsymbol{\theta} = a_1,...,a_M,b_1,...,b_N,w_{11},...,w_{MN}$, the log-likelihood is given by
<!-- Equation labels as ordinary links -->
<div id="_auto5"></div>
$$
\begin{equation}
\mathcal{L}(\{ \theta_i \}) = \langle \text{log} P_\theta(\boldsymbol{x}) \rangle_{data}
\label{_auto5} \tag{5}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="_auto6"></div>
$$
\begin{equation}
= - \langle E(\boldsymbol{x}; \{ \theta_i\}) \rangle_{data} - \text{log} Z(\{ \theta_i\}),
\label{_auto6} \tag{6}
\end{equation}
$$
where we used that the normalization constant does not depend on the data, $\langle \text{log} Z(\{ \theta_i\}) \rangle = \text{log} Z(\{ \theta_i\})$
Our cost function is the negative log-likelihood, $\mathcal{C}(\{ \theta_i \}) = - \mathcal{L}(\{ \theta_i \})$
### Optimization / Training
The training procedure of choice often is Stochastic Gradient Descent (SGD). It consists of a series of iterations where we update the parameters according to the equation
<!-- Equation labels as ordinary links -->
<div id="_auto7"></div>
$$
\begin{equation}
\boldsymbol{\theta}_{k+1} = \boldsymbol{\theta}_k - \eta \nabla \mathcal{C} (\boldsymbol{\theta}_k)
\label{_auto7} \tag{7}
\end{equation}
$$
at each $k$-th iteration. There are a range of variants of the algorithm which aim at making the learning rate $\eta$ more adaptive so the method might be more efficient while remaining stable.
We now need the gradient of the cost function in order to minimize it. We find that
<!-- Equation labels as ordinary links -->
<div id="_auto8"></div>
$$
\begin{equation}
\frac{\partial \mathcal{C}(\{ \theta_i\})}{\partial \theta_i}
= \langle \frac{\partial E(\boldsymbol{x}; \theta_i)}{\partial \theta_i} \rangle_{data}
+ \frac{\partial \text{log} Z(\{ \theta_i\})}{\partial \theta_i}
\label{_auto8} \tag{8}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="_auto9"></div>
$$
\begin{equation}
= \langle O_i(\boldsymbol{x}) \rangle_{data} - \langle O_i(\boldsymbol{x}) \rangle_{model},
\label{_auto9} \tag{9}
\end{equation}
$$
where in order to simplify notation we defined the "operator"
<!-- Equation labels as ordinary links -->
<div id="_auto10"></div>
$$
\begin{equation}
O_i(\boldsymbol{x}) = \frac{\partial E(\boldsymbol{x}; \theta_i)}{\partial \theta_i},
\label{_auto10} \tag{10}
\end{equation}
$$
and used the statistical mechanics relationship between expectation values and the log-partition function:
<!-- Equation labels as ordinary links -->
<div id="_auto11"></div>
$$
\begin{equation}
\langle O_i(\boldsymbol{x}) \rangle_{model} = \text{Tr} P_\theta(\boldsymbol{x})O_i(\boldsymbol{x}) = - \frac{\partial \text{log} Z(\{ \theta_i\})}{\partial \theta_i}.
\label{_auto11} \tag{11}
\end{equation}
$$
The data-dependent term in the gradient is known as the positive phase
of the gradient, while the model-dependent term is known as the
negative phase of the gradient. The aim of the training is to lower
the energy of configurations that are near observed data points
(increasing their probability), and raising the energy of
configurations that are far from observed data points (decreasing
their probability).
The gradient of the negative log-likelihood cost function of a Binary-Binary RBM is then
<!-- Equation labels as ordinary links -->
<div id="_auto12"></div>
$$
\begin{equation}
\frac{\partial \mathcal{C} (w_{ij}, a_i, b_j)}{\partial w_{ij}} = \langle x_i h_j \rangle_{data} - \langle x_i h_j \rangle_{model}
\label{_auto12} \tag{12}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="_auto13"></div>
$$
\begin{equation}
\frac{\partial \mathcal{C} (w_{ij}, a_i, b_j)}{\partial a_{ij}} = \langle x_i \rangle_{data} - \langle x_i \rangle_{model}
\label{_auto13} \tag{13}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="_auto14"></div>
$$
\begin{equation}
\frac{\partial \mathcal{C} (w_{ij}, a_i, b_j)}{\partial b_{ij}} = \langle h_i \rangle_{data} - \langle h_i \rangle_{model}.
\label{_auto14} \tag{14}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="_auto15"></div>
$$
\begin{equation}
\label{_auto15} \tag{15}
\end{equation}
$$
To get the expectation values with respect to the *data*, we set the visible units to each of the observed samples in the training data, then update the hidden units according to the conditional probability found before. We then average over all samples in the training data to calculate expectation values with respect to the data.
### Kullback-Leibler relative entropy
When the goal of the training is to approximate a probability
distribution, as it is in generative modeling, another relevant
measure is the **Kullback-Leibler divergence**, also known as the
relative entropy or Shannon entropy. It is a non-symmetric measure of the
dissimilarity between two probability density functions $p$ and
$q$. If $p$ is the unkown probability which we approximate with $q$,
we can measure the difference by
<!-- Equation labels as ordinary links -->
<div id="_auto16"></div>
$$
\begin{equation}
\text{KL}(p||q) = \int_{-\infty}^{\infty} p (\boldsymbol{x}) \log \frac{p(\boldsymbol{x})}{q(\boldsymbol{x})} d\boldsymbol{x}.
\label{_auto16} \tag{16}
\end{equation}
$$
Thus, the Kullback-Leibler divergence between the distribution of the
training data $f(\boldsymbol{x})$ and the model distribution $p(\boldsymbol{x}|
\boldsymbol{\theta})$ is
<!-- Equation labels as ordinary links -->
<div id="_auto17"></div>
$$
\begin{equation}
\text{KL} (f(\boldsymbol{x})|| p(\boldsymbol{x}| \boldsymbol{\theta})) = \int_{-\infty}^{\infty}
f (\boldsymbol{x}) \log \frac{f(\boldsymbol{x})}{p(\boldsymbol{x}| \boldsymbol{\theta})} d\boldsymbol{x}
\label{_auto17} \tag{17}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="_auto18"></div>
$$
\begin{equation}
= \int_{-\infty}^{\infty} f(\boldsymbol{x}) \log f(\boldsymbol{x}) d\boldsymbol{x} - \int_{-\infty}^{\infty} f(\boldsymbol{x}) \log
p(\boldsymbol{x}| \boldsymbol{\theta}) d\boldsymbol{x}
\label{_auto18} \tag{18}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="_auto19"></div>
$$
\begin{equation}
%= \mathbb{E}_{f(\boldsymbol{x})} (\log f(\boldsymbol{x})) - \mathbb{E}_{f(\boldsymbol{x})} (\log p(\boldsymbol{x}| \boldsymbol{\theta}))
= \langle \log f(\boldsymbol{x}) \rangle_{f(\boldsymbol{x})} - \langle \log p(\boldsymbol{x}| \boldsymbol{\theta}) \rangle_{f(\boldsymbol{x})}
\label{_auto19} \tag{19}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="_auto20"></div>
$$
\begin{equation}
= \langle \log f(\boldsymbol{x}) \rangle_{data} + \langle E(\boldsymbol{x}) \rangle_{data} + \log Z
\label{_auto20} \tag{20}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="_auto21"></div>
$$
\begin{equation}
= \langle \log f(\boldsymbol{x}) \rangle_{data} + \mathcal{C}_{LL} .
\label{_auto21} \tag{21}
\end{equation}
$$
The first term is constant with respect to $\boldsymbol{\theta}$ since $f(\boldsymbol{x})$ is independent of $\boldsymbol{\theta}$. Thus the Kullback-Leibler Divergence is minimal when the second term is minimal. The second term is the log-likelihood cost function, hence minimizing the Kullback-Leibler divergence is equivalent to maximizing the log-likelihood.
To further understand generative models it is useful to study the
gradient of the cost function which is needed in order to minimize it
using methods like stochastic gradient descent.
The partition function is the generating function of
expectation values, in particular there are mathematical relationships
between expectation values and the log-partition function. In this
case we have
<!-- Equation labels as ordinary links -->
<div id="_auto22"></div>
$$
\begin{equation}
\langle \frac{ \partial E(\boldsymbol{x}; \theta_i) } { \partial \theta_i} \rangle_{model}
= \int p(\boldsymbol{x}| \boldsymbol{\theta}) \frac{ \partial E(\boldsymbol{x}; \theta_i) } { \partial \theta_i} d\boldsymbol{x}
= -\frac{\partial \log Z(\theta_i)}{ \partial \theta_i} .
\label{_auto22} \tag{22}
\end{equation}
$$
Here $\langle \cdot \rangle_{model}$ is the expectation value over the model probability distribution $p(\boldsymbol{x}| \boldsymbol{\theta})$.
## Setting up for gradient descent calculations
Using the previous relationship we can express the gradient of the cost function as
<!-- Equation labels as ordinary links -->
<div id="_auto23"></div>
$$
\begin{equation}
\frac{\partial \mathcal{C}_{LL}}{\partial \theta_i}
= \langle \frac{ \partial E(\boldsymbol{x}; \theta_i) } { \partial \theta_i} \rangle_{data} + \frac{\partial \log Z(\theta_i)}{ \partial \theta_i}
\label{_auto23} \tag{23}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="_auto24"></div>
$$
\begin{equation}
= \langle \frac{ \partial E(\boldsymbol{x}; \theta_i) } { \partial \theta_i} \rangle_{data} - \langle \frac{ \partial E(\boldsymbol{x}; \theta_i) } { \partial \theta_i} \rangle_{model}
\label{_auto24} \tag{24}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="_auto25"></div>
$$
\begin{equation}
%= \langle O_i(\boldsymbol{x}) \rangle_{data} - \langle O_i(\boldsymbol{x}) \rangle_{model}
\label{_auto25} \tag{25}
\end{equation}
$$
This expression shows that the gradient of the log-likelihood cost
function is a **difference of moments**, with one calculated from
the data and one calculated from the model. The data-dependent term is
called the **positive phase** and the model-dependent term is
called the **negative phase** of the gradient. We see now that
minimizing the cost function results in lowering the energy of
configurations $\boldsymbol{x}$ near points in the training data and
increasing the energy of configurations not observed in the training
data. That means we increase the model's probability of configurations
similar to those in the training data.
The gradient of the cost function also demonstrates why gradients of
unsupervised, generative models must be computed differently from for
those of for example FNNs. While the data-dependent expectation value
is easily calculated based on the samples $\boldsymbol{x}_i$ in the training
data, we must sample from the model in order to generate samples from
which to caclulate the model-dependent term. We sample from the model
by using MCMC-based methods. We can not sample from the model directly
because the partition function $Z$ is generally intractable.
As in supervised machine learning problems, the goal is also here to
perform well on **unseen** data, that is to have good
generalization from the training data. The distribution $f(x)$ we
approximate is not the **true** distribution we wish to estimate,
it is limited to the training data. Hence, in unsupervised training as
well it is important to prevent overfitting to the training data. Thus
it is common to add regularizers to the cost function in the same
manner as we discussed for say linear regression.
## RBMs for the quantum many body problem
The idea of applying RBMs to quantum many body problems was presented by G. Carleo and M. Troyer, working with ETH Zurich and Microsoft Research.
Some of their motivation included
* The wave function $\Psi$ is a monolithic mathematical quantity that contains all the information on a quantum state, be it a single particle or a complex molecule. In principle, an exponential amount of information is needed to fully encode a generic many-body quantum state.
* There are still interesting open problems, including fundamental questions ranging from the dynamical properties of high-dimensional systems to the exact ground-state properties of strongly interacting fermions.
* The difficulty lies in finding a general strategy to reduce the exponential complexity of the full many-body wave function down to its most essential features. That is
a. Dimensional reduction
b. Feature extraction
* Among the most successful techniques to attack these challenges, artifical neural networks play a prominent role.
* Want to understand whether an artifical neural network may adapt to describe a quantum system.
Carleo and Troyer applied the RBM to the quantum mechanical spin lattice systems of the Ising model and Heisenberg model, with encouraging results. Our goal is to test the method on systems of moving particles. For the spin lattice systems it was natural to use a binary-binary RBM, with the nodes taking values of 1 and -1. For moving particles, on the other hand, we want the visible nodes to be continuous, representing position coordinates. Thus, we start by choosing a Gaussian-binary RBM, where the visible nodes are continuous and hidden nodes take on values of 0 or 1. If eventually we would like the hidden nodes to be continuous as well the rectified linear units seem like the most relevant choice.
## Representing the wave function
The wavefunction should be a probability amplitude depending on
$\boldsymbol{x}$. The RBM model is given by the joint distribution of
$\boldsymbol{x}$ and $\boldsymbol{h}$
<!-- Equation labels as ordinary links -->
<div id="_auto26"></div>
$$
\begin{equation}
F_{rbm}(\mathbf{x},\mathbf{h}) = \frac{1}{Z} e^{-\frac{1}{T_0}E(\mathbf{x},\mathbf{h})}.
\label{_auto26} \tag{26}
\end{equation}
$$
To find the marginal distribution of $\boldsymbol{x}$ we set:
<!-- Equation labels as ordinary links -->
<div id="_auto27"></div>
$$
\begin{equation}
F_{rbm}(\mathbf{x}) = \sum_\mathbf{h} F_{rbm}(\mathbf{x}, \mathbf{h})
\label{_auto27} \tag{27}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="_auto28"></div>
$$
\begin{equation}
= \frac{1}{Z}\sum_\mathbf{h} e^{-E(\mathbf{x}, \mathbf{h})}.
\label{_auto28} \tag{28}
\end{equation}
$$
Now this is what we use to represent the wave function, calling it a neural-network quantum state (NQS)
<!-- Equation labels as ordinary links -->
<div id="_auto29"></div>
$$
\begin{equation}
\Psi (\mathbf{X}) = F_{rbm}(\mathbf{x})
\label{_auto29} \tag{29}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="_auto30"></div>
$$
\begin{equation}
= \frac{1}{Z}\sum_{\boldsymbol{h}} e^{-E(\mathbf{x}, \mathbf{h})}
\label{_auto30} \tag{30}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="_auto31"></div>
$$
\begin{equation}
= \frac{1}{Z} \sum_{\{h_j\}} e^{-\sum_i^M \frac{(x_i - a_i)^2}{2\sigma^2} + \sum_j^N b_j h_j + \sum_\
{i,j}^{M,N} \frac{x_i w_{ij} h_j}{\sigma^2}}
\label{_auto31} \tag{31}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="_auto32"></div>
$$
\begin{equation}
= \frac{1}{Z} e^{-\sum_i^M \frac{(x_i - a_i)^2}{2\sigma^2}} \prod_j^N (1 + e^{b_j + \sum_i^M \frac{x\
_i w_{ij}}{\sigma^2}}).
\label{_auto32} \tag{32}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="_auto33"></div>
$$
\begin{equation}
\label{_auto33} \tag{33}
\end{equation}
$$
## Choose the cost function
Now we don't necessarily have training data (unless we generate it by using some other method). However, what we do have is the variational principle which allows us to obtain the ground state wave function by minimizing the expectation value of the energy of a trial wavefunction (corresponding to the untrained NQS). Similarly to the traditional variational Monte Carlo method then, it is the local energy we wish to minimize. The gradient to use for the stochastic gradient descent procedure is
<!-- Equation labels as ordinary links -->
<div id="_auto34"></div>
$$
\begin{equation}
C_i = \frac{\partial \langle E_L \rangle}{\partial \theta_i}
= 2(\langle E_L \frac{1}{\Psi}\frac{\partial \Psi}{\partial \theta_i} \rangle - \langle E_L \rangle \langle \frac{1}{\Psi}\frac{\partial \Psi}{\partial \theta_i} \rangle ),
\label{_auto34} \tag{34}
\end{equation}
$$
where the local energy is given by
<!-- Equation labels as ordinary links -->
<div id="_auto35"></div>
$$
\begin{equation}
E_L = \frac{1}{\Psi} \hat{\mathbf{H}} \Psi.
\label{_auto35} \tag{35}
\end{equation}
$$
### Mathematical details
Because we are restricted to potential functions which are positive it
is convenient to express them as exponentials, so that
<!-- Equation labels as ordinary links -->
<div id="_auto36"></div>
$$
\begin{equation}
\phi_C (\boldsymbol{x}_C) = e^{-E_C(\boldsymbol{x}_C)}
\label{_auto36} \tag{36}
\end{equation}
$$
where $E(\boldsymbol{x}_C)$ is called an *energy function*, and the
exponential representation is the *Boltzmann distribution*. The
joint distribution is defined as the product of potentials.
The joint distribution of the random variables is then
$$
p(\boldsymbol{x}) = \frac{1}{Z} \prod_C \phi_C (\boldsymbol{x}_C) \nonumber
$$
$$
= \frac{1}{Z} \prod_C e^{-E_C(\boldsymbol{x}_C)} \nonumber
$$
$$
= \frac{1}{Z} e^{-\sum_C E_C(\boldsymbol{x}_C)} \nonumber
$$
4
0
<
<
<
!
!
M
A
T
H
_
B
L
O
C
K
<!-- Equation labels as ordinary links -->
<div id="_auto38"></div>
$$
\begin{equation}
p_{BM}(\boldsymbol{x}, \boldsymbol{h}) = \frac{1}{Z_{BM}} e^{-\frac{1}{T}E_{BM}(\boldsymbol{x}, \boldsymbol{h})} ,
\label{_auto38} \tag{38}
\end{equation}
$$
with the partition function
<!-- Equation labels as ordinary links -->
<div id="_auto39"></div>
$$
\begin{equation}
Z_{BM} = \int \int e^{-\frac{1}{T} E_{BM}(\tilde{\boldsymbol{x}}, \tilde{\boldsymbol{h}})} d\tilde{\boldsymbol{x}} d\tilde{\boldsymbol{h}} .
\label{_auto39} \tag{39}
\end{equation}
$$
$T$ is a physics-inspired parameter named temperature and will be assumed to be 1 unless otherwise stated. The energy function of the Boltzmann machine determines the interactions between the nodes and is defined
$$
E_{BM}(\boldsymbol{x}, \boldsymbol{h}) = - \sum_{i, k}^{M, K} a_i^k \alpha_i^k (x_i)
- \sum_{j, l}^{N, L} b_j^l \beta_j^l (h_j)
- \sum_{i,j,k,l}^{M,N,K,L} \alpha_i^k (x_i) w_{ij}^{kl} \beta_j^l (h_j) \nonumber
$$
<!-- Equation labels as ordinary links -->
<div id="_auto40"></div>
$$
\begin{equation}
- \sum_{i, m=i+1, k}^{M, M, K} \alpha_i^k (x_i) v_{im}^k \alpha_m^k (x_m)
- \sum_{j,n=j+1,l}^{N,N,L} \beta_j^l (h_j) u_{jn}^l \beta_n^l (h_n).
\label{_auto40} \tag{40}
\end{equation}
$$
Here $\alpha_i^k (x_i)$ and $\beta_j^l (h_j)$ are one-dimensional
transfer functions or mappings from the given input value to the
desired feature value. They can be arbitrary functions of the input
variables and are independent of the parameterization (parameters
referring to weight and biases), meaning they are not affected by
training of the model. The indices $k$ and $l$ indicate that there can
be multiple transfer functions per variable. Furthermore, $a_i^k$ and
$b_j^l$ are the visible and hidden bias. $w_{ij}^{kl}$ are weights of
the \textbf{inter-layer} connection terms which connect visible and
hidden units. $ v_{im}^k$ and $u_{jn}^l$ are weights of the
\textbf{intra-layer} connection terms which connect the visible units
to each other and the hidden units to each other, respectively.
We remove the intra-layer connections by setting $v_{im}$ and $u_{jn}$
to zero. The expression for the energy of the RBM is then
<!-- Equation labels as ordinary links -->
<div id="_auto41"></div>
$$
\begin{equation}
E_{RBM}(\boldsymbol{x}, \boldsymbol{h}) = - \sum_{i, k}^{M, K} a_i^k \alpha_i^k (x_i)
- \sum_{j, l}^{N, L} b_j^l \beta_j^l (h_j)
- \sum_{i,j,k,l}^{M,N,K,L} \alpha_i^k (x_i) w_{ij}^{kl} \beta_j^l (h_j).
\label{_auto41} \tag{41}
\end{equation}
$$
resulting in
$$
P_{RBM} (\boldsymbol{x}) = \int P_{RBM} (\boldsymbol{x}, \tilde{\boldsymbol{h}}) d \tilde{\boldsymbol{h}} \nonumber
$$
$$
= \frac{1}{Z_{RBM}} \int e^{-E_{RBM} (\boldsymbol{x}, \tilde{\boldsymbol{h}}) } d\tilde{\boldsymbol{h}} \nonumber
$$
$$
= \frac{1}{Z_{RBM}} \int e^{\sum_{i, k} a_i^k \alpha_i^k (x_i)
+ \sum_{j, l} b_j^l \beta_j^l (\tilde{h}_j)
+ \sum_{i,j,k,l} \alpha_i^k (x_i) w_{ij}^{kl} \beta_j^l (\tilde{h}_j)}
d\tilde{\boldsymbol{h}} \nonumber
$$
$$
= \frac{1}{Z_{RBM}} e^{\sum_{i, k} a_i^k \alpha_i^k (x_i)}
\int \prod_j^N e^{\sum_l b_j^l \beta_j^l (\tilde{h}_j)
+ \sum_{i,k,l} \alpha_i^k (x_i) w_{ij}^{kl} \beta_j^l (\tilde{h}_j)} d\tilde{\boldsymbol{h}} \nonumber
$$
$$
= \frac{1}{Z_{RBM}} e^{\sum_{i, k} a_i^k \alpha_i^k (x_i)}
\biggl( \int e^{\sum_l b_1^l \beta_1^l (\tilde{h}_1) + \sum_{i,k,l} \alpha_i^k (x_i) w_{i1}^{kl} \beta_1^l (\tilde{h}_1)} d \tilde{h}_1 \nonumber
$$
$$
\times \int e^{\sum_l b_2^l \beta_2^l (\tilde{h}_2) + \sum_{i,k,l} \alpha_i^k (x_i) w_{i2}^{kl} \beta_2^l (\tilde{h}_2)} d \tilde{h}_2 \nonumber
$$
$$
\times ... \nonumber
$$
$$
\times \int e^{\sum_l b_N^l \beta_N^l (\tilde{h}_N) + \sum_{i,k,l} \alpha_i^k (x_i) w_{iN}^{kl} \beta_N^l (\tilde{h}_N)} d \tilde{h}_N \biggr) \nonumber
$$
<!-- Equation labels as ordinary links -->
<div id="_auto42"></div>
$$
\begin{equation}
= \frac{1}{Z_{RBM}} e^{\sum_{i, k} a_i^k \alpha_i^k (x_i)}
\prod_j^N \int e^{\sum_l b_j^l \beta_j^l (\tilde{h}_j) + \sum_{i,k,l} \alpha_i^k (x_i) w_{ij}^{kl} \beta_j^l (\tilde{h}_j)} d\tilde{h}_j
\label{_auto42} \tag{42}
\end{equation}
$$
Similarly
$$
P_{RBM} (\boldsymbol{h}) = \frac{1}{Z_{RBM}} \int e^{-E_{RBM} (\tilde{\boldsymbol{x}}, \boldsymbol{h})} d\tilde{\boldsymbol{x}} \nonumber
$$
<!-- Equation labels as ordinary links -->
<div id="_auto43"></div>
$$
\begin{equation}
= \frac{1}{Z_{RBM}} e^{\sum_{j, l} b_j^l \beta_j^l (h_j)}
\prod_i^M \int e^{\sum_k a_i^k \alpha_i^k (\tilde{x}_i)
+ \sum_{j,k,l} \alpha_i^k (\tilde{x}_i) w_{ij}^{kl} \beta_j^l (h_j)} d\tilde{x}_i
\label{_auto43} \tag{43}
\end{equation}
$$
Using Bayes theorem
$$
P_{RBM} (\boldsymbol{h}|\boldsymbol{x}) = \frac{P_{RBM} (\boldsymbol{x}, \boldsymbol{h})}{P_{RBM} (\boldsymbol{x})} \nonumber
$$
$$
= \frac{\frac{1}{Z_{RBM}} e^{\sum_{i, k} a_i^k \alpha_i^k (x_i)
+ \sum_{j, l} b_j^l \beta_j^l (h_j)
+ \sum_{i,j,k,l} \alpha_i^k (x_i) w_{ij}^{kl} \beta_j^l (h_j)}}
{\frac{1}{Z_{RBM}} e^{\sum_{i, k} a_i^k \alpha_i^k (x_i)}
\prod_j^N \int e^{\sum_l b_j^l \beta_j^l (\tilde{h}_j) + \sum_{i,k,l} \alpha_i^k (x_i) w_{ij}^{kl} \beta_j^l (\tilde{h}_j)} d\tilde{h}_j} \nonumber
$$
<!-- Equation labels as ordinary links -->
<div id="_auto44"></div>
$$
\begin{equation}
= \prod_j^N \frac{e^{\sum_l b_j^l \beta_j^l (h_j) + \sum_{i,k,l} \alpha_i^k (x_i) w_{ij}^{kl} \beta_j^l (h_j)} }
{\int e^{\sum_l b_j^l \beta_j^l (\tilde{h}_j) + \sum_{i,k,l} \alpha_i^k (x_i) w_{ij}^{kl} \beta_j^l (\tilde{h}_j)} d\tilde{h}_j}
\label{_auto44} \tag{44}
\end{equation}
$$
Similarly
$$
P_{RBM} (\boldsymbol{x}|\boldsymbol{h}) = \frac{P_{RBM} (\boldsymbol{x}, \boldsymbol{h})}{P_{RBM} (\boldsymbol{h})} \nonumber
$$
<!-- Equation labels as ordinary links -->
<div id="_auto45"></div>
$$
\begin{equation}
= \prod_i^M \frac{e^{\sum_k a_i^k \alpha_i^k (x_i)
+ \sum_{j,k,l} \alpha_i^k (x_i) w_{ij}^{kl} \beta_j^l (h_j)}}
{\int e^{\sum_k a_i^k \alpha_i^k (\tilde{x}_i)
+ \sum_{j,k,l} \alpha_i^k (\tilde{x}_i) w_{ij}^{kl} \beta_j^l (h_j)} d\tilde{x}_i}
\label{_auto45} \tag{45}
\end{equation}
$$
The original RBM had binary visible and hidden nodes. They were
showned to be universal approximators of discrete distributions.
It was also shown that adding hidden units yields
strictly improved modelling power. The common choice of binary values
are 0 and 1. However, in some physics applications, -1 and 1 might be
a more natural choice. We will here use 0 and 1.
<!-- Equation labels as ordinary links -->
<div id="_auto46"></div>
$$
\begin{equation}
E_{BB}(\boldsymbol{x}, \mathbf{h}) = - \sum_i^M x_i a_i- \sum_j^N b_j h_j - \sum_{i,j}^{M,N} x_i w_{ij} h_j.
\label{_auto46} \tag{46}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="_auto47"></div>
$$
\begin{equation}
p_{BB}(\boldsymbol{x}, \boldsymbol{h}) = \frac{1}{Z_{BB}} e^{\sum_i^M a_i x_i + \sum_j^N b_j h_j + \sum_{ij}^{M,N} x_i w_{ij} h_j}
\label{_auto47} \tag{47}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="_auto48"></div>
$$
\begin{equation}
= \frac{1}{Z_{BB}} e^{\boldsymbol{x}^T \boldsymbol{a} + \boldsymbol{b}^T \boldsymbol{h} + \boldsymbol{x}^T \boldsymbol{W} \boldsymbol{h}}
\label{_auto48} \tag{48}
\end{equation}
$$
with the partition function
<!-- Equation labels as ordinary links -->
<div id="_auto49"></div>
$$
\begin{equation}
Z_{BB} = \sum_{\boldsymbol{x}, \boldsymbol{h}} e^{\boldsymbol{x}^T \boldsymbol{a} + \boldsymbol{b}^T \boldsymbol{h} + \boldsymbol{x}^T \boldsymbol{W} \boldsymbol{h}} .
\label{_auto49} \tag{49}
\end{equation}
$$
### Marginal Probability Density Functions
In order to find the probability of any configuration of the visible units we derive the marginal probability density function.
<!-- Equation labels as ordinary links -->
<div id="_auto50"></div>
$$
\begin{equation}
p_{BB} (\boldsymbol{x}) = \sum_{\boldsymbol{h}} p_{BB} (\boldsymbol{x}, \boldsymbol{h})
\label{_auto50} \tag{50}
\end{equation}
$$
$$
= \frac{1}{Z_{BB}} \sum_{\boldsymbol{h}} e^{\boldsymbol{x}^T \boldsymbol{a} + \boldsymbol{b}^T \boldsymbol{h} + \boldsymbol{x}^T \boldsymbol{W} \boldsymbol{h}} \nonumber
$$
$$
= \frac{1}{Z_{BB}} e^{\boldsymbol{x}^T \boldsymbol{a}} \sum_{\boldsymbol{h}} e^{\sum_j^N (b_j + \boldsymbol{x}^T \boldsymbol{w}_{\ast j})h_j} \nonumber
$$
$$
= \frac{1}{Z_{BB}} e^{\boldsymbol{x}^T \boldsymbol{a}} \sum_{\boldsymbol{h}} \prod_j^N e^{ (b_j + \boldsymbol{x}^T \boldsymbol{w}_{\ast j})h_j} \nonumber
$$
$$
= \frac{1}{Z_{BB}} e^{\boldsymbol{x}^T \boldsymbol{a}} \bigg ( \sum_{h_1} e^{(b_1 + \boldsymbol{x}^T \boldsymbol{w}_{\ast 1})h_1}
\times \sum_{h_2} e^{(b_2 + \boldsymbol{x}^T \boldsymbol{w}_{\ast 2})h_2} \times \nonumber
$$
$$
... \times \sum_{h_2} e^{(b_N + \boldsymbol{x}^T \boldsymbol{w}_{\ast N})h_N} \bigg ) \nonumber
$$
$$
= \frac{1}{Z_{BB}} e^{\boldsymbol{x}^T \boldsymbol{a}} \prod_j^N \sum_{h_j} e^{(b_j + \boldsymbol{x}^T \boldsymbol{w}_{\ast j}) h_j} \nonumber
$$
<!-- Equation labels as ordinary links -->
<div id="_auto51"></div>
$$
\begin{equation}
= \frac{1}{Z_{BB}} e^{\boldsymbol{x}^T \boldsymbol{a}} \prod_j^N (1 + e^{b_j + \boldsymbol{x}^T \boldsymbol{w}_{\ast j}}) .
\label{_auto51} \tag{51}
\end{equation}
$$
A similar derivation yields the marginal probability of the hidden units
<!-- Equation labels as ordinary links -->
<div id="_auto52"></div>
$$
\begin{equation}
p_{BB} (\boldsymbol{h}) = \frac{1}{Z_{BB}} e^{\boldsymbol{b}^T \boldsymbol{h}} \prod_i^M (1 + e^{a_i + \boldsymbol{w}_{i\ast}^T \boldsymbol{h}}) .
\label{_auto52} \tag{52}
\end{equation}
$$
### Conditional Probability Density Functions
We derive the probability of the hidden units given the visible units using Bayes' rule
$$
p_{BB} (\boldsymbol{h}|\boldsymbol{x}) = \frac{p_{BB} (\boldsymbol{x}, \boldsymbol{h})}{p_{BB} (\boldsymbol{x})} \nonumber
$$
$$
= \frac{ \frac{1}{Z_{BB}} e^{\boldsymbol{x}^T \boldsymbol{a} + \boldsymbol{b}^T \boldsymbol{h} + \boldsymbol{x}^T \boldsymbol{W} \boldsymbol{h}} }
{\frac{1}{Z_{BB}} e^{\boldsymbol{x}^T \boldsymbol{a}} \prod_j^N (1 + e^{b_j + \boldsymbol{x}^T \boldsymbol{w}_{\ast j}})} \nonumber
$$
$$
= \frac{ e^{\boldsymbol{x}^T \boldsymbol{a}} e^{ \sum_j^N (b_j + \boldsymbol{x}^T \boldsymbol{w}_{\ast j} ) h_j} }
{ e^{\boldsymbol{x}^T \boldsymbol{a}} \prod_j^N (1 + e^{b_j + \boldsymbol{x}^T \boldsymbol{w}_{\ast j}})} \nonumber
$$
$$
= \prod_j^N \frac{ e^{(b_j + \boldsymbol{x}^T \boldsymbol{w}_{\ast j} ) h_j} }
{1 + e^{b_j + \boldsymbol{x}^T \boldsymbol{w}_{\ast j}}} \nonumber
$$
<!-- Equation labels as ordinary links -->
<div id="_auto53"></div>
$$
\begin{equation}
= \prod_j^N p_{BB} (h_j| \boldsymbol{x}) .
\label{_auto53} \tag{53}
\end{equation}
$$
From this we find the probability of a hidden unit being "on" or "off":
<!-- Equation labels as ordinary links -->
<div id="_auto54"></div>
$$
\begin{equation}
p_{BB} (h_j=1 | \boldsymbol{x}) = \frac{ e^{(b_j + \boldsymbol{x}^T \boldsymbol{w}_{\ast j} ) h_j} }
{1 + e^{b_j + \boldsymbol{x}^T \boldsymbol{w}_{\ast j}}}
\label{_auto54} \tag{54}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="_auto55"></div>
$$
\begin{equation}
= \frac{ e^{(b_j + \boldsymbol{x}^T \boldsymbol{w}_{\ast j} )} }
{1 + e^{b_j + \boldsymbol{x}^T \boldsymbol{w}_{\ast j}}}
\label{_auto55} \tag{55}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="_auto56"></div>
$$
\begin{equation}
= \frac{ 1 }{1 + e^{-(b_j + \boldsymbol{x}^T \boldsymbol{w}_{\ast j})} } ,
\label{_auto56} \tag{56}
\end{equation}
$$
and
<!-- Equation labels as ordinary links -->
<div id="_auto57"></div>
$$
\begin{equation}
p_{BB} (h_j=0 | \boldsymbol{x}) =\frac{ 1 }{1 + e^{b_j + \boldsymbol{x}^T \boldsymbol{w}_{\ast j}} } .
\label{_auto57} \tag{57}
\end{equation}
$$
Similarly we have that the conditional probability of the visible units given the hidden are
<!-- Equation labels as ordinary links -->
<div id="_auto58"></div>
$$
\begin{equation}
p_{BB} (\boldsymbol{x}|\boldsymbol{h}) = \prod_i^M \frac{ e^{ (a_i + \boldsymbol{w}_{i\ast}^T \boldsymbol{h}) x_i} }{ 1 + e^{a_i + \boldsymbol{w}_{i\ast}^T \boldsymbol{h}} }
\label{_auto58} \tag{58}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="_auto59"></div>
$$
\begin{equation}
= \prod_i^M p_{BB} (x_i | \boldsymbol{h}) .
\label{_auto59} \tag{59}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="_auto60"></div>
$$
\begin{equation}
p_{BB} (x_i=1 | \boldsymbol{h}) = \frac{1}{1 + e^{-(a_i + \boldsymbol{w}_{i\ast}^T \boldsymbol{h} )}}
\label{_auto60} \tag{60}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="_auto61"></div>
$$
\begin{equation}
p_{BB} (x_i=0 | \boldsymbol{h}) = \frac{1}{1 + e^{a_i + \boldsymbol{w}_{i\ast}^T \boldsymbol{h} }} .
\label{_auto61} \tag{61}
\end{equation}
$$
### Gaussian-Binary Restricted Boltzmann Machines
Inserting into the expression for $E_{RBM}(\boldsymbol{x},\boldsymbol{h})$ in equation results in the energy
$$
E_{GB}(\boldsymbol{x}, \boldsymbol{h}) = \sum_i^M \frac{(x_i - a_i)^2}{2\sigma_i^2}
- \sum_j^N b_j h_j
-\sum_{ij}^{M,N} \frac{x_i w_{ij} h_j}{\sigma_i^2} \nonumber
$$
<!-- Equation labels as ordinary links -->
<div id="_auto62"></div>
$$
\begin{equation}
= \vert\vert\frac{\boldsymbol{x} -\boldsymbol{a}}{2\boldsymbol{\sigma}}\vert\vert^2 - \boldsymbol{b}^T \boldsymbol{h}
- (\frac{\boldsymbol{x}}{\boldsymbol{\sigma}^2})^T \boldsymbol{W}\boldsymbol{h} .
\label{_auto62} \tag{62}
\end{equation}
$$
### Joint Probability Density Function
$$
p_{GB} (\boldsymbol{x}, \boldsymbol{h}) = \frac{1}{Z_{GB}} e^{-\vert\vert\frac{\boldsymbol{x} -\boldsymbol{a}}{2\boldsymbol{\sigma}}\vert\vert^2 + \boldsymbol{b}^T \boldsymbol{h}
+ (\frac{\boldsymbol{x}}{\boldsymbol{\sigma}^2})^T \boldsymbol{W}\boldsymbol{h}} \nonumber
$$
$$
= \frac{1}{Z_{GB}} e^{- \sum_i^M \frac{(x_i - a_i)^2}{2\sigma_i^2}
+ \sum_j^N b_j h_j
+\sum_{ij}^{M,N} \frac{x_i w_{ij} h_j}{\sigma_i^2}} \nonumber
$$
<!-- Equation labels as ordinary links -->
<div id="_auto63"></div>
$$
\begin{equation}
= \frac{1}{Z_{GB}} \prod_{ij}^{M,N} e^{-\frac{(x_i - a_i)^2}{2\sigma_i^2}
+ b_j h_j
+\frac{x_i w_{ij} h_j}{\sigma_i^2}} ,
\label{_auto63} \tag{63}
\end{equation}
$$
with the partition function given by
<!-- Equation labels as ordinary links -->
<div id="_auto64"></div>
$$
\begin{equation}
Z_{GB} = \int \sum_{\tilde{\boldsymbol{h}}}^{\tilde{\boldsymbol{H}}} e^{-\vert\vert\frac{\tilde{\boldsymbol{x}} -\boldsymbol{a}}{2\boldsymbol{\sigma}}\vert\vert^2 + \boldsymbol{b}^T \tilde{\boldsymbol{h}}
+ (\frac{\tilde{\boldsymbol{x}}}{\boldsymbol{\sigma}^2})^T \boldsymbol{W}\tilde{\boldsymbol{h}}} d\tilde{\boldsymbol{x}} .
\label{_auto64} \tag{64}
\end{equation}
$$
### Marginal Probability Density Functions
We proceed to find the marginal probability densitites of the
Gaussian-binary RBM. We first marginalize over the binary hidden units
to find $p_{GB} (\boldsymbol{x})$
$$
p_{GB} (\boldsymbol{x}) = \sum_{\tilde{\boldsymbol{h}}}^{\tilde{\boldsymbol{H}}} p_{GB} (\boldsymbol{x}, \tilde{\boldsymbol{h}}) \nonumber
$$
$$
= \frac{1}{Z_{GB}} \sum_{\tilde{\boldsymbol{h}}}^{\tilde{\boldsymbol{H}}}
e^{-\vert\vert\frac{\boldsymbol{x} -\boldsymbol{a}}{2\boldsymbol{\sigma}}\vert\vert^2 + \boldsymbol{b}^T \tilde{\boldsymbol{h}}
+ (\frac{\boldsymbol{x}}{\boldsymbol{\sigma}^2})^T \boldsymbol{W}\tilde{\boldsymbol{h}}} \nonumber
$$
<!-- Equation labels as ordinary links -->
<div id="_auto65"></div>
$$
\begin{equation}
= \frac{1}{Z_{GB}} e^{-\vert\vert\frac{\boldsymbol{x} -\boldsymbol{a}}{2\boldsymbol{\sigma}}\vert\vert^2}
\prod_j^N (1 + e^{b_j + (\frac{\boldsymbol{x}}{\boldsymbol{\sigma}^2})^T \boldsymbol{w}_{\ast j}} ) .
\label{_auto65} \tag{65}
\end{equation}
$$
We next marginalize over the visible units. This is the first time we
marginalize over continuous values. We rewrite the exponential factor
dependent on $\boldsymbol{x}$ as a Gaussian function before we integrate in
the last step.
$$
p_{GB} (\boldsymbol{h}) = \int p_{GB} (\tilde{\boldsymbol{x}}, \boldsymbol{h}) d\tilde{\boldsymbol{x}} \nonumber
$$
$$
= \frac{1}{Z_{GB}} \int e^{-\vert\vert\frac{\tilde{\boldsymbol{x}} -\boldsymbol{a}}{2\boldsymbol{\sigma}}\vert\vert^2 + \boldsymbol{b}^T \boldsymbol{h}
+ (\frac{\tilde{\boldsymbol{x}}}{\boldsymbol{\sigma}^2})^T \boldsymbol{W}\boldsymbol{h}} d\tilde{\boldsymbol{x}} \nonumber
$$
$$
= \frac{1}{Z_{GB}} e^{\boldsymbol{b}^T \boldsymbol{h} } \int \prod_i^M
e^{- \frac{(\tilde{x}_i - a_i)^2}{2\sigma_i^2} + \frac{\tilde{x}_i \boldsymbol{w}_{i\ast}^T \boldsymbol{h}}{\sigma_i^2} } d\tilde{\boldsymbol{x}} \nonumber
$$
$$
= \frac{1}{Z_{GB}} e^{\boldsymbol{b}^T \boldsymbol{h} }
\biggl( \int e^{- \frac{(\tilde{x}_1 - a_1)^2}{2\sigma_1^2} + \frac{\tilde{x}_1 \boldsymbol{w}_{1\ast}^T \boldsymbol{h}}{\sigma_1^2} } d\tilde{x}_1 \nonumber
$$
$$
\times \int e^{- \frac{(\tilde{x}_2 - a_2)^2}{2\sigma_2^2} + \frac{\tilde{x}_2 \boldsymbol{w}_{2\ast}^T \boldsymbol{h}}{\sigma_2^2} } d\tilde{x}_2 \nonumber
$$
$$
\times ... \nonumber
$$
$$
\times \int e^{- \frac{(\tilde{x}_M - a_M)^2}{2\sigma_M^2} + \frac{\tilde{x}_M \boldsymbol{w}_{M\ast}^T \boldsymbol{h}}{\sigma_M^2} } d\tilde{x}_M \biggr) \nonumber
$$
$$
= \frac{1}{Z_{GB}} e^{\boldsymbol{b}^T \boldsymbol{h}} \prod_i^M
\int e^{- \frac{(\tilde{x}_i - a_i)^2 - 2\tilde{x}_i \boldsymbol{w}_{i\ast}^T \boldsymbol{h}}{2\sigma_i^2} } d\tilde{x}_i \nonumber
$$
$$
= \frac{1}{Z_{GB}} e^{\boldsymbol{b}^T \boldsymbol{h}} \prod_i^M
\int e^{- \frac{\tilde{x}_i^2 - 2\tilde{x}_i(a_i + \tilde{x}_i \boldsymbol{w}_{i\ast}^T \boldsymbol{h}) + a_i^2}{2\sigma_i^2} } d\tilde{x}_i \nonumber
$$
$$
= \frac{1}{Z_{GB}} e^{\boldsymbol{b}^T \boldsymbol{h}} \prod_i^M
\int e^{- \frac{\tilde{x}_i^2 - 2\tilde{x}_i(a_i + \boldsymbol{w}_{i\ast}^T \boldsymbol{h}) + (a_i + \boldsymbol{w}_{i\ast}^T \boldsymbol{h})^2 - (a_i + \boldsymbol{w}_{i\ast}^T \boldsymbol{h})^2 + a_i^2}{2\sigma_i^2} } d\tilde{x}_i \nonumber
$$
$$
= \frac{1}{Z_{GB}} e^{\boldsymbol{b}^T \boldsymbol{h}} \prod_i^M
\int e^{- \frac{(\tilde{x}_i - (a_i + \boldsymbol{w}_{i\ast}^T \boldsymbol{h}))^2 - a_i^2 -2a_i \boldsymbol{w}_{i\ast}^T \boldsymbol{h} - (\boldsymbol{w}_{i\ast}^T \boldsymbol{h})^2 + a_i^2}{2\sigma_i^2} } d\tilde{x}_i \nonumber
$$
$$
= \frac{1}{Z_{GB}} e^{\boldsymbol{b}^T \boldsymbol{h}} \prod_i^M
e^{\frac{2a_i \boldsymbol{w}_{i\ast}^T \boldsymbol{h} +(\boldsymbol{w}_{i\ast}^T \boldsymbol{h})^2 }{2\sigma_i^2}}
\int e^{- \frac{(\tilde{x}_i - a_i - \boldsymbol{w}_{i\ast}^T \boldsymbol{h})^2}{2\sigma_i^2}}
d\tilde{x}_i \nonumber
$$
<!-- Equation labels as ordinary links -->
<div id="_auto66"></div>
$$
\begin{equation}
= \frac{1}{Z_{GB}} e^{\boldsymbol{b}^T \boldsymbol{h}} \prod_i^M
\sqrt{2\pi \sigma_i^2}
e^{\frac{2a_i \boldsymbol{w}_{i\ast}^T \boldsymbol{h} +(\boldsymbol{w}_{i\ast}^T \boldsymbol{h})^2 }{2\sigma_i^2}} .
\label{_auto66} \tag{66}
\end{equation}
$$
### Conditional Probability Density Functions
We finish by deriving the conditional probabilities.
$$
p_{GB} (\boldsymbol{h}| \boldsymbol{x}) = \frac{p_{GB} (\boldsymbol{x}, \boldsymbol{h})}{p_{GB} (\boldsymbol{x})} \nonumber
$$
$$
= \frac{\frac{1}{Z_{GB}} e^{-\vert\vert\frac{\boldsymbol{x} -\boldsymbol{a}}{2\boldsymbol{\sigma}}\vert\vert^2 + \boldsymbol{b}^T \boldsymbol{h}
+ (\frac{\boldsymbol{x}}{\boldsymbol{\sigma}^2})^T \boldsymbol{W}\boldsymbol{h}}}
{\frac{1}{Z_{GB}} e^{-\vert\vert\frac{\boldsymbol{x} -\boldsymbol{a}}{2\boldsymbol{\sigma}}\vert\vert^2}
\prod_j^N (1 + e^{b_j + (\frac{\boldsymbol{x}}{\boldsymbol{\sigma}^2})^T \boldsymbol{w}_{\ast j}} ) }
\nonumber
$$
$$
= \prod_j^N \frac{e^{(b_j + (\frac{\boldsymbol{x}}{\boldsymbol{\sigma}^2})^T \boldsymbol{w}_{\ast j})h_j } }
{1 + e^{b_j + (\frac{\boldsymbol{x}}{\boldsymbol{\sigma}^2})^T \boldsymbol{w}_{\ast j}}} \nonumber
$$
<!-- Equation labels as ordinary links -->
<div id="_auto67"></div>
$$
\begin{equation}
= \prod_j^N p_{GB} (h_j|\boldsymbol{x}).
\label{_auto67} \tag{67}
\end{equation}
$$
The conditional probability of a binary hidden unit $h_j$ being on or off again takes the form of a sigmoid function
$$
p_{GB} (h_j =1 | \boldsymbol{x}) = \frac{e^{b_j + (\frac{\boldsymbol{x}}{\boldsymbol{\sigma}^2})^T \boldsymbol{w}_{\ast j} } }
{1 + e^{b_j + (\frac{\boldsymbol{x}}{\boldsymbol{\sigma}^2})^T \boldsymbol{w}_{\ast j}}} \nonumber
$$
<!-- Equation labels as ordinary links -->
<div id="_auto68"></div>
$$
\begin{equation}
= \frac{1}{1 + e^{-b_j - (\frac{\boldsymbol{x}}{\boldsymbol{\sigma}^2})^T \boldsymbol{w}_{\ast j}}}
\label{_auto68} \tag{68}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="_auto69"></div>
$$
\begin{equation}
p_{GB} (h_j =0 | \boldsymbol{x}) =
\frac{1}{1 + e^{b_j +(\frac{\boldsymbol{x}}{\boldsymbol{\sigma}^2})^T \boldsymbol{w}_{\ast j}}} .
\label{_auto69} \tag{69}
\end{equation}
$$
The conditional probability of the continuous $\boldsymbol{x}$ now has another form, however.
$$
p_{GB} (\boldsymbol{x}|\boldsymbol{h})
= \frac{p_{GB} (\boldsymbol{x}, \boldsymbol{h})}{p_{GB} (\boldsymbol{h})} \nonumber
$$
$$
= \frac{\frac{1}{Z_{GB}} e^{-\vert\vert\frac{\boldsymbol{x} -\boldsymbol{a}}{2\boldsymbol{\sigma}}\vert\vert^2 + \boldsymbol{b}^T \boldsymbol{h}
+ (\frac{\boldsymbol{x}}{\boldsymbol{\sigma}^2})^T \boldsymbol{W}\boldsymbol{h}}}
{\frac{1}{Z_{GB}} e^{\boldsymbol{b}^T \boldsymbol{h}} \prod_i^M
\sqrt{2\pi \sigma_i^2}
e^{\frac{2a_i \boldsymbol{w}_{i\ast}^T \boldsymbol{h} +(\boldsymbol{w}_{i\ast}^T \boldsymbol{h})^2 }{2\sigma_i^2}}}
\nonumber
$$
$$
= \prod_i^M \frac{1}{\sqrt{2\pi \sigma_i^2}}
\frac{e^{- \frac{(x_i - a_i)^2}{2\sigma_i^2} + \frac{x_i \boldsymbol{w}_{i\ast}^T \boldsymbol{h}}{2\sigma_i^2} }}
{e^{\frac{2a_i \boldsymbol{w}_{i\ast}^T \boldsymbol{h} +(\boldsymbol{w}_{i\ast}^T \boldsymbol{h})^2 }{2\sigma_i^2}}}
\nonumber
$$
$$
= \prod_i^M \frac{1}{\sqrt{2\pi \sigma_i^2}}
\frac{e^{-\frac{x_i^2 - 2a_i x_i + a_i^2 - 2x_i \boldsymbol{w}_{i\ast}^T\boldsymbol{h} }{2\sigma_i^2} } }
{e^{\frac{2a_i \boldsymbol{w}_{i\ast}^T \boldsymbol{h} +(\boldsymbol{w}_{i\ast}^T \boldsymbol{h})^2 }{2\sigma_i^2}}}
\nonumber
$$
$$
= \prod_i^M \frac{1}{\sqrt{2\pi \sigma_i^2}}
e^{- \frac{x_i^2 - 2a_i x_i + a_i^2 - 2x_i \boldsymbol{w}_{i\ast}^T\boldsymbol{h}
+ 2a_i \boldsymbol{w}_{i\ast}^T \boldsymbol{h} +(\boldsymbol{w}_{i\ast}^T \boldsymbol{h})^2}
{2\sigma_i^2} }
\nonumber
$$
$$
= \prod_i^M \frac{1}{\sqrt{2\pi \sigma_i^2}}
e^{ - \frac{(x_i - b_i - \boldsymbol{w}_{i\ast}^T \boldsymbol{h})^2}{2\sigma_i^2}} \nonumber
$$
<!-- Equation labels as ordinary links -->
<div id="_auto70"></div>
$$
\begin{equation}
= \prod_i^M \mathcal{N}
(x_i | b_i + \boldsymbol{w}_{i\ast}^T \boldsymbol{h}, \sigma_i^2)
\label{_auto70} \tag{70}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="_auto71"></div>
$$
\begin{equation}
\Rightarrow p_{GB} (x_i|\boldsymbol{h}) = \mathcal{N}
(x_i | b_i + \boldsymbol{w}_{i\ast}^T \boldsymbol{h}, \sigma_i^2) .
\label{_auto71} \tag{71}
\end{equation}
$$
The form of these conditional probabilities explains the name
"Gaussian" and the form of the Gaussian-binary energy function. We see
that the conditional probability of $x_i$ given $\boldsymbol{h}$ is a normal
distribution with mean $b_i + \boldsymbol{w}_{i\ast}^T \boldsymbol{h}$ and variance
$\sigma_i^2$.
## Neural Quantum States
The wavefunction should be a probability amplitude depending on $\boldsymbol{x}$. The RBM model is given by the joint distribution of $\boldsymbol{x}$ and $\boldsymbol{h}$
<!-- Equation labels as ordinary links -->
<div id="_auto72"></div>
$$
\begin{equation}
F_{rbm}(\boldsymbol{x},\mathbf{h}) = \frac{1}{Z} e^{-\frac{1}{T_0}E(\boldsymbol{x},\mathbf{h})}
\label{_auto72} \tag{72}
\end{equation}
$$
To find the marginal distribution of $\boldsymbol{x}$ we set:
<!-- Equation labels as ordinary links -->
<div id="_auto73"></div>
$$
\begin{equation}
F_{rbm}(\mathbf{x}) = \sum_\mathbf{h} F_{rbm}(\mathbf{x}, \mathbf{h})
\label{_auto73} \tag{73}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="_auto74"></div>
$$
\begin{equation}
= \frac{1}{Z}\sum_\mathbf{h} e^{-E(\mathbf{x}, \mathbf{h})}
\label{_auto74} \tag{74}
\end{equation}
$$
Now this is what we use to represent the wave function, calling it a neural-network quantum state (NQS)
<!-- Equation labels as ordinary links -->
<div id="_auto75"></div>
$$
\begin{equation}
\Psi (\mathbf{X}) = F_{rbm}(\mathbf{x})
\label{_auto75} \tag{75}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="_auto76"></div>
$$
\begin{equation}
= \frac{1}{Z}\sum_{\boldsymbol{h}} e^{-E(\mathbf{x}, \mathbf{h})}
\label{_auto76} \tag{76}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="_auto77"></div>
$$
\begin{equation}
= \frac{1}{Z} \sum_{\{h_j\}} e^{-\sum_i^M \frac{(x_i - a_i)^2}{2\sigma^2} + \sum_j^N b_j h_j + \sum_{i,j}^{M,N} \frac{x_i w_{ij} h_j}{\sigma^2}}
\label{_auto77} \tag{77}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="_auto78"></div>
$$
\begin{equation}
= \frac{1}{Z} e^{-\sum_i^M \frac{(x_i - a_i)^2}{2\sigma^2}} \prod_j^N (1 + e^{b_j + \sum_i^M \frac{x_i w_{ij}}{\sigma^2}})
\label{_auto78} \tag{78}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="_auto79"></div>
$$
\begin{equation}
\label{_auto79} \tag{79}
\end{equation}
$$
The above wavefunction is the most general one because it allows for
complex valued wavefunctions. However it fundamentally changes the
probabilistic foundation of the RBM, because what is usually a
probability in the RBM framework is now a an amplitude. This means
that a lot of the theoretical framework usually used to interpret the
model, i.e. graphical models, conditional probabilities, and Markov
random fields, breaks down. If we assume the wavefunction to be
postive definite, however, we can use the RBM to represent the squared
wavefunction, and thereby a probability. This also makes it possible
to sample from the model using Gibbs sampling, because we can obtain
the conditional probabilities.
<!-- Equation labels as ordinary links -->
<div id="_auto80"></div>
$$
\begin{equation}
|\Psi (\mathbf{X})|^2 = F_{rbm}(\mathbf{X})
\label{_auto80} \tag{80}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="_auto81"></div>
$$
\begin{equation}
\Rightarrow \Psi (\mathbf{X}) = \sqrt{F_{rbm}(\mathbf{X})}
\label{_auto81} \tag{81}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="_auto82"></div>
$$
\begin{equation}
= \frac{1}{\sqrt{Z}}\sqrt{\sum_{\{h_j\}} e^{-E(\mathbf{X}, \mathbf{h})}}
\label{_auto82} \tag{82}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="_auto83"></div>
$$
\begin{equation}
= \frac{1}{\sqrt{Z}} \sqrt{\sum_{\{h_j\}} e^{-\sum_i^M \frac{(X_i - a_i)^2}{2\sigma^2} + \sum_j^N b_j h_j + \sum_{i,j}^{M,N} \frac{X_i w_{ij} h_j}{\sigma^2}} }
\label{_auto83} \tag{83}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="_auto84"></div>
$$
\begin{equation}
= \frac{1}{\sqrt{Z}} e^{-\sum_i^M \frac{(X_i - a_i)^2}{4\sigma^2}} \sqrt{\sum_{\{h_j\}} \prod_j^N e^{b_j h_j + \sum_i^M \frac{X_i w_{ij} h_j}{\sigma^2}}}
\label{_auto84} \tag{84}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="_auto85"></div>
$$
\begin{equation}
= \frac{1}{\sqrt{Z}} e^{-\sum_i^M \frac{(X_i - a_i)^2}{4\sigma^2}} \sqrt{\prod_j^N \sum_{h_j} e^{b_j h_j + \sum_i^M \frac{X_i w_{ij} h_j}{\sigma^2}}}
\label{_auto85} \tag{85}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="_auto86"></div>
$$
\begin{equation}
= \frac{1}{\sqrt{Z}} e^{-\sum_i^M \frac{(X_i - a_i)^2}{4\sigma^2}} \prod_j^N \sqrt{e^0 + e^{b_j + \sum_i^M \frac{X_i w_{ij}}{\sigma^2}}}
\label{_auto86} \tag{86}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="_auto87"></div>
$$
\begin{equation}
= \frac{1}{\sqrt{Z}} e^{-\sum_i^M \frac{(X_i - a_i)^2}{4\sigma^2}} \prod_j^N \sqrt{1 + e^{b_j + \sum_i^M \frac{X_i w_{ij}}{\sigma^2}}}
\label{_auto87} \tag{87}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="_auto88"></div>
$$
\begin{equation}
\label{_auto88} \tag{88}
\end{equation}
$$
### Cost function
This is where we deviate from what is common in machine
learning. Rather than defining a cost function based on some dataset,
our cost function is the energy of the quantum mechanical system. From
the variational principle we know that minizing this energy should
lead to the ground state wavefunction. As stated previously the local
energy is given by
<!-- Equation labels as ordinary links -->
<div id="_auto89"></div>
$$
\begin{equation}
E_L = \frac{1}{\Psi} \hat{\mathbf{H}} \Psi,
\label{_auto89} \tag{89}
\end{equation}
$$
and the gradient is
<!-- Equation labels as ordinary links -->
<div id="_auto90"></div>
$$
\begin{equation}
G_i = \frac{\partial \langle E_L \rangle}{\partial \alpha_i}
= 2(\langle E_L \frac{1}{\Psi}\frac{\partial \Psi}{\partial \alpha_i} \rangle - \langle E_L \rangle \langle \frac{1}{\Psi}\frac{\partial \Psi}{\partial \alpha_i} \rangle ),
\label{_auto90} \tag{90}
\end{equation}
$$
where $\alpha_i = a_1,...,a_M,b_1,...,b_N,w_{11},...,w_{MN}$.
We use that $\frac{1}{\Psi}\frac{\partial \Psi}{\partial \alpha_i}
= \frac{\partial \ln{\Psi}}{\partial \alpha_i}$,
and find
<!-- Equation labels as ordinary links -->
<div id="_auto91"></div>
$$
\begin{equation}
\ln{\Psi({\mathbf{X}})} = -\ln{Z} - \sum_m^M \frac{(X_m - a_m)^2}{2\sigma^2}
+ \sum_n^N \ln({1 + e^{b_n + \sum_i^M \frac{X_i w_{in}}{\sigma^2}})}.
\label{_auto91} \tag{91}
\end{equation}
$$
This gives
<!-- Equation labels as ordinary links -->
<div id="_auto92"></div>
$$
\begin{equation}
\frac{\partial }{\partial a_m} \ln\Psi
= \frac{1}{\sigma^2} (X_m - a_m)
\label{_auto92} \tag{92}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="_auto93"></div>
$$
\begin{equation}
\frac{\partial }{\partial b_n} \ln\Psi
=
\frac{1}{e^{-b_n-\frac{1}{\sigma^2}\sum_i^M X_i w_{in}} + 1}
\label{_auto93} \tag{93}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="_auto94"></div>
$$
\begin{equation}
\frac{\partial }{\partial w_{mn}} \ln\Psi
= \frac{X_m}{\sigma^2(e^{-b_n-\frac{1}{\sigma^2}\sum_i^M X_i w_{in}} + 1)}.
\label{_auto94} \tag{94}
\end{equation}
$$
If $\Psi = \sqrt{F_{rbm}}$ we have
<!-- Equation labels as ordinary links -->
<div id="_auto95"></div>
$$
\begin{equation}
\ln{\Psi({\mathbf{X}})} = -\frac{1}{2}\ln{Z} - \sum_m^M \frac{(X_m - a_m)^2}{4\sigma^2}
+ \frac{1}{2}\sum_n^N \ln({1 + e^{b_n + \sum_i^M \frac{X_i w_{in}}{\sigma^2}})},
\label{_auto95} \tag{95}
\end{equation}
$$
which results in
<!-- Equation labels as ordinary links -->
<div id="_auto96"></div>
$$
\begin{equation}
\frac{\partial }{\partial a_m} \ln\Psi
= \frac{1}{2\sigma^2} (X_m - a_m)
\label{_auto96} \tag{96}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="_auto97"></div>
$$
\begin{equation}
\frac{\partial }{\partial b_n} \ln\Psi
=
\frac{1}{2(e^{-b_n-\frac{1}{\sigma^2}\sum_i^M X_i w_{in}} + 1)}
\label{_auto97} \tag{97}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="_auto98"></div>
$$
\begin{equation}
\frac{\partial }{\partial w_{mn}} \ln\Psi
= \frac{X_m}{2\sigma^2(e^{-b_n-\frac{1}{\sigma^2}\sum_i^M X_i w_{in}} + 1)}.
\label{_auto98} \tag{98}
\end{equation}
$$
Let us assume again that our Hamiltonian is
<!-- Equation labels as ordinary links -->
<div id="_auto99"></div>
$$
\begin{equation}
\hat{\mathbf{H}} = \sum_p^P (-\frac{1}{2}\nabla_p^2 + \frac{1}{2}\omega^2 r_p^2 ) + \sum_{p<q} \frac{1}{r_{pq}},
\label{_auto99} \tag{99}
\end{equation}
$$
where the first summation term represents the standard harmonic
oscillator part and the latter the repulsive interaction between two
electrons. Natural units ($\hbar=c=e=m_e=1$) are used, and $P$ is the
number of particles. This gives us the following expression for the
local energy ($D$ being the number of dimensions)
<!-- Equation labels as ordinary links -->
<div id="_auto100"></div>
$$
\begin{equation}
E_L = \frac{1}{\Psi} \mathbf{H} \Psi
\label{_auto100} \tag{100}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="_auto101"></div>
$$
\begin{equation}
= \frac{1}{\Psi} (\sum_p^P (-\frac{1}{2}\nabla_p^2 + \frac{1}{2}\omega^2 r_p^2 ) + \sum_{p<q} \frac{1}{r_{pq}}) \Psi
\label{_auto101} \tag{101}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="_auto102"></div>
$$
\begin{equation}
= -\frac{1}{2}\frac{1}{\Psi} \sum_p^P \nabla_p^2 \Psi
+ \frac{1}{2}\omega^2 \sum_p^P r_p^2 + \sum_{p<q} \frac{1}{r_{pq}}
\label{_auto102} \tag{102}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="_auto103"></div>
$$
\begin{equation}
= -\frac{1}{2}\frac{1}{\Psi} \sum_p^P \sum_d^D \frac{\partial^2 \Psi}{\partial x_{pd}^2} + \frac{1}{2}\omega^2 \sum_p^P r_p^2 + \sum_{p<q} \frac{1}{r_{pq}}
\label{_auto103} \tag{103}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="_auto104"></div>
$$
\begin{equation}
= \frac{1}{2} \sum_p^P \sum_d^D (-(\frac{\partial}{\partial x_{pd}} \ln\Psi)^2 -\frac{\partial^2}{\partial x_{pd}^2} \ln\Psi + \omega^2 x_{pd}^2) + \sum_{p<q} \frac{1}{r_{pq}}.
\label{_auto104} \tag{104}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="_auto105"></div>
$$
\begin{equation}
\label{_auto105} \tag{105}
\end{equation}
$$
Letting each visible node in the Boltzmann machine
represent one coordinate of one particle, we obtain
<!-- Equation labels as ordinary links -->
<div id="_auto106"></div>
$$
\begin{equation}
E_L =
\frac{1}{2} \sum_m^M (-(\frac{\partial}{\partial v_m} \ln\Psi)^2 -\frac{\partial^2}{\partial v_m^2} \ln\Psi + \omega^2 v_m^2) + \sum_{p<q} \frac{1}{r_{pq}},
\label{_auto106} \tag{106}
\end{equation}
$$
where we have that
<!-- Equation labels as ordinary links -->
<div id="_auto107"></div>
$$
\begin{equation}
\frac{\partial}{\partial x_m} \ln\Psi
= - \frac{1}{\sigma^2}(x_m - a_m) + \frac{1}{\sigma^2} \sum_n^N \frac{w_{mn}}{e^{-b_n - \frac{1}{\sigma^2}\sum_i^M x_i w_{in}} + 1}
\label{_auto107} \tag{107}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="_auto108"></div>
$$
\begin{equation}
\frac{\partial^2}{\partial x_m^2} \ln\Psi
= - \frac{1}{\sigma^2} + \frac{1}{\sigma^4}\sum_n^N \omega_{mn}^2 \frac{e^{b_n + \frac{1}{\sigma^2}\sum_i^M x_i w_{in}}}{(e^{b_n + \frac{1}{\sigma^2}\sum_i^M x_i w_{in}} + 1)^2}.
\label{_auto108} \tag{108}
\end{equation}
$$
We now have all the expressions neeeded to calculate the gradient of
the expected local energy with respect to the RBM parameters
$\frac{\partial \langle E_L \rangle}{\partial \alpha_i}$.
If we use $\Psi = \sqrt{F_{rbm}}$ we obtain
<!-- Equation labels as ordinary links -->
<div id="_auto109"></div>
$$
\begin{equation}
\frac{\partial}{\partial x_m} \ln\Psi
= - \frac{1}{2\sigma^2}(x_m - a_m) + \frac{1}{2\sigma^2} \sum_n^N
\frac{w_{mn}}{e^{-b_n-\frac{1}{\sigma^2}\sum_i^M x_i w_{in}} + 1}
\label{_auto109} \tag{109}
\end{equation}
$$
<!-- Equation labels as ordinary links -->
<div id="_auto110"></div>
$$
\begin{equation}
\frac{\partial^2}{\partial x_m^2} \ln\Psi
= - \frac{1}{2\sigma^2} + \frac{1}{2\sigma^4}\sum_n^N \omega_{mn}^2 \frac{e^{b_n + \frac{1}{\sigma^2}\sum_i^M x_i w_{in}}}{(e^{b_n + \frac{1}{\sigma^2}\sum_i^M x_i w_{in}} + 1)^2}.
\label{_auto110} \tag{110}
\end{equation}
$$
The difference between this equation and the previous one is that we multiply by a factor $1/2$.
## Python version for the two non-interacting particles
```
%matplotlib inline
# 2-electron VMC code for 2dim quantum dot with importance sampling
# Using gaussian rng for new positions and Metropolis- Hastings
# Added restricted boltzmann machine method for dealing with the wavefunction
# RBM code based heavily off of:
# https://github.com/CompPhysics/ComputationalPhysics2/tree/gh-pages/doc/Programs/BoltzmannMachines/MLcpp/src/CppCode/ob
from math import exp, sqrt
from random import random, seed, normalvariate
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import sys
# Trial wave function for the 2-electron quantum dot in two dims
def WaveFunction(r,a,b,w):
sigma=1.0
sig2 = sigma**2
Psi1 = 0.0
Psi2 = 1.0
Q = Qfac(r,b,w)
for iq in range(NumberParticles):
for ix in range(Dimension):
Psi1 += (r[iq,ix]-a[iq,ix])**2
for ih in range(NumberHidden):
Psi2 *= (1.0 + np.exp(Q[ih]))
Psi1 = np.exp(-Psi1/(2*sig2))
return Psi1*Psi2
# Local energy for the 2-electron quantum dot in two dims, using analytical local energy
def LocalEnergy(r,a,b,w):
sigma=1.0
sig2 = sigma**2
locenergy = 0.0
Q = Qfac(r,b,w)
for iq in range(NumberParticles):
for ix in range(Dimension):
sum1 = 0.0
sum2 = 0.0
for ih in range(NumberHidden):
sum1 += w[iq,ix,ih]/(1+np.exp(-Q[ih]))
sum2 += w[iq,ix,ih]**2 * np.exp(Q[ih]) / (1.0 + np.exp(Q[ih]))**2
dlnpsi1 = -(r[iq,ix] - a[iq,ix]) /sig2 + sum1/sig2
dlnpsi2 = -1/sig2 + sum2/sig2**2
locenergy += 0.5*(-dlnpsi1*dlnpsi1 - dlnpsi2 + r[iq,ix]**2)
if(interaction==True):
for iq1 in range(NumberParticles):
for iq2 in range(iq1):
distance = 0.0
for ix in range(Dimension):
distance += (r[iq1,ix] - r[iq2,ix])**2
locenergy += 1/sqrt(distance)
return locenergy
# Derivate of wave function ansatz as function of variational parameters
def DerivativeWFansatz(r,a,b,w):
sigma=1.0
sig2 = sigma**2
Q = Qfac(r,b,w)
WfDer = np.empty((3,),dtype=object)
WfDer = [np.copy(a),np.copy(b),np.copy(w)]
WfDer[0] = (r-a)/sig2
WfDer[1] = 1 / (1 + np.exp(-Q))
for ih in range(NumberHidden):
WfDer[2][:,:,ih] = w[:,:,ih] / (sig2*(1+np.exp(-Q[ih])))
return WfDer
# Setting up the quantum force for the two-electron quantum dot, recall that it is a vector
def QuantumForce(r,a,b,w):
sigma=1.0
sig2 = sigma**2
qforce = np.zeros((NumberParticles,Dimension), np.double)
sum1 = np.zeros((NumberParticles,Dimension), np.double)
Q = Qfac(r,b,w)
for ih in range(NumberHidden):
sum1 += w[:,:,ih]/(1+np.exp(-Q[ih]))
qforce = 2*(-(r-a)/sig2 + sum1/sig2)
return qforce
def Qfac(r,b,w):
Q = np.zeros((NumberHidden), np.double)
temp = np.zeros((NumberHidden), np.double)
for ih in range(NumberHidden):
temp[ih] = (r*w[:,:,ih]).sum()
Q = b + temp
return Q
# Computing the derivative of the energy and the energy
def EnergyMinimization(a,b,w):
NumberMCcycles= 10000
# Parameters in the Fokker-Planck simulation of the quantum force
D = 0.5
TimeStep = 0.05
# positions
PositionOld = np.zeros((NumberParticles,Dimension), np.double)
PositionNew = np.zeros((NumberParticles,Dimension), np.double)
# Quantum force
QuantumForceOld = np.zeros((NumberParticles,Dimension), np.double)
QuantumForceNew = np.zeros((NumberParticles,Dimension), np.double)
# seed for rng generator
seed()
energy = 0.0
DeltaE = 0.0
EnergyDer = np.empty((3,),dtype=object)
DeltaPsi = np.empty((3,),dtype=object)
DerivativePsiE = np.empty((3,),dtype=object)
EnergyDer = [np.copy(a),np.copy(b),np.copy(w)]
DeltaPsi = [np.copy(a),np.copy(b),np.copy(w)]
DerivativePsiE = [np.copy(a),np.copy(b),np.copy(w)]
for i in range(3): EnergyDer[i].fill(0.0)
for i in range(3): DeltaPsi[i].fill(0.0)
for i in range(3): DerivativePsiE[i].fill(0.0)
#Initial position
for i in range(NumberParticles):
for j in range(Dimension):
PositionOld[i,j] = normalvariate(0.0,1.0)*sqrt(TimeStep)
wfold = WaveFunction(PositionOld,a,b,w)
QuantumForceOld = QuantumForce(PositionOld,a,b,w)
#Loop over MC MCcycles
for MCcycle in range(NumberMCcycles):
#Trial position moving one particle at the time
for i in range(NumberParticles):
for j in range(Dimension):
PositionNew[i,j] = PositionOld[i,j]+normalvariate(0.0,1.0)*sqrt(TimeStep)+\
QuantumForceOld[i,j]*TimeStep*D
wfnew = WaveFunction(PositionNew,a,b,w)
QuantumForceNew = QuantumForce(PositionNew,a,b,w)
GreensFunction = 0.0
for j in range(Dimension):
GreensFunction += 0.5*(QuantumForceOld[i,j]+QuantumForceNew[i,j])*\
(D*TimeStep*0.5*(QuantumForceOld[i,j]-QuantumForceNew[i,j])-\
PositionNew[i,j]+PositionOld[i,j])
GreensFunction = exp(GreensFunction)
ProbabilityRatio = GreensFunction*wfnew**2/wfold**2
#Metropolis-Hastings test to see whether we accept the move
if random() <= ProbabilityRatio:
for j in range(Dimension):
PositionOld[i,j] = PositionNew[i,j]
QuantumForceOld[i,j] = QuantumForceNew[i,j]
wfold = wfnew
#print("wf new: ", wfnew)
#print("force on 1 new:", QuantumForceNew[0,:])
#print("pos of 1 new: ", PositionNew[0,:])
#print("force on 2 new:", QuantumForceNew[1,:])
#print("pos of 2 new: ", PositionNew[1,:])
DeltaE = LocalEnergy(PositionOld,a,b,w)
DerPsi = DerivativeWFansatz(PositionOld,a,b,w)
DeltaPsi[0] += DerPsi[0]
DeltaPsi[1] += DerPsi[1]
DeltaPsi[2] += DerPsi[2]
energy += DeltaE
DerivativePsiE[0] += DerPsi[0]*DeltaE
DerivativePsiE[1] += DerPsi[1]*DeltaE
DerivativePsiE[2] += DerPsi[2]*DeltaE
# We calculate mean values
energy /= NumberMCcycles
DerivativePsiE[0] /= NumberMCcycles
DerivativePsiE[1] /= NumberMCcycles
DerivativePsiE[2] /= NumberMCcycles
DeltaPsi[0] /= NumberMCcycles
DeltaPsi[1] /= NumberMCcycles
DeltaPsi[2] /= NumberMCcycles
EnergyDer[0] = 2*(DerivativePsiE[0]-DeltaPsi[0]*energy)
EnergyDer[1] = 2*(DerivativePsiE[1]-DeltaPsi[1]*energy)
EnergyDer[2] = 2*(DerivativePsiE[2]-DeltaPsi[2]*energy)
return energy, EnergyDer
#Here starts the main program with variable declarations
NumberParticles = 2
Dimension = 2
NumberHidden = 2
interaction=False
# guess for parameters
a=np.random.normal(loc=0.0, scale=0.001, size=(NumberParticles,Dimension))
b=np.random.normal(loc=0.0, scale=0.001, size=(NumberHidden))
w=np.random.normal(loc=0.0, scale=0.001, size=(NumberParticles,Dimension,NumberHidden))
# Set up iteration using stochastic gradient method
Energy = 0
EDerivative = np.empty((3,),dtype=object)
EDerivative = [np.copy(a),np.copy(b),np.copy(w)]
# Learning rate eta, max iterations, need to change to adaptive learning rate
eta = 0.001
MaxIterations = 50
iter = 0
np.seterr(invalid='raise')
Energies = np.zeros(MaxIterations)
EnergyDerivatives1 = np.zeros(MaxIterations)
EnergyDerivatives2 = np.zeros(MaxIterations)
while iter < MaxIterations:
Energy, EDerivative = EnergyMinimization(a,b,w)
agradient = EDerivative[0]
bgradient = EDerivative[1]
wgradient = EDerivative[2]
a -= eta*agradient
b -= eta*bgradient
w -= eta*wgradient
Energies[iter] = Energy
print("Energy:",Energy)
#EnergyDerivatives1[iter] = EDerivative[0]
#EnergyDerivatives2[iter] = EDerivative[1]
#EnergyDerivatives3[iter] = EDerivative[2]
iter += 1
#nice printout with Pandas
import pandas as pd
from pandas import DataFrame
pd.set_option('max_columns', 6)
data ={'Energy':Energies}#,'A Derivative':EnergyDerivatives1,'B Derivative':EnergyDerivatives2,'Weights Derivative':EnergyDerivatives3}
frame = pd.DataFrame(data)
print(frame)
```
|
{"hexsha": "ab62a8d84a4aa9f3239d7c0399208ccaed023515", "size": 105965, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "doc/LectureNotes/boltzmannmachines.ipynb", "max_stars_repo_name": "Schoyen/ComputationalPhysics2", "max_stars_repo_head_hexsha": "9cf10ffb2557cc73c4e6bab060d53690ee39426f", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "doc/LectureNotes/boltzmannmachines.ipynb", "max_issues_repo_name": "Schoyen/ComputationalPhysics2", "max_issues_repo_head_hexsha": "9cf10ffb2557cc73c4e6bab060d53690ee39426f", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "doc/LectureNotes/boltzmannmachines.ipynb", "max_forks_repo_name": "Schoyen/ComputationalPhysics2", "max_forks_repo_head_hexsha": "9cf10ffb2557cc73c4e6bab060d53690ee39426f", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.2376026772, "max_line_length": 718, "alphanum_fraction": 0.5216722503, "converted": true, "num_tokens": 23343}
|
# -*- coding: utf-8 -*-
# -------------------------------------------------------------------------
# 文件目的:演示通过岭回归算法来控制过拟合
# 创建日期:2018/2/3
# -------------------------------------------------------------------------
from math import sqrt
import matplotlib.pyplot as plt
import numpy as np
from sklearn import linear_model
from bk.pa.common import open_url
"""
"""
target_url = "http://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-red.csv"
# ----------------------------------------------------------------------------
# ~ Main
if __name__ == '__main__':
# 存放标签和数据到列表中
xList = []
labels = [] # 判别标志
names = [] # 字段标题
# 首行存储字段标题
firstLine = True
# 读入Winequality数据集,并且将其解析为包含标签与属性的记录
with open_url(target_url) as data:
for line in data:
if firstLine:
names = line.strip().split(";")
firstLine = False
else:
row = line.strip().split(";")
labels.append(float(row[-1]))
row.pop()
floatRow = [float(num) for num in row]
xList.append(floatRow)
# 将数据集分为2个子集:
indices = range(len(xList)) # 总行数
# 测试集xListTest包含1/3的数据
xListTest = [xList[i] for i in indices if i % 3 == 0]
# 训练集xListTrain包含2/3的数据
xListTrain = [xList[i] for i in indices if i % 3 != 0]
labelsTest = [labels[i] for i in indices if i % 3 == 0]
labelsTrain = [labels[i] for i in indices if i % 3 != 0]
# 检查下各数据形态
xTrain = np.array(xListTrain);
yTrain = np.array(labelsTrain)
xTest = np.array(xListTest);
yTest = np.array(labelsTest)
print("Shape of xTrain array", xTrain.shape)
print("Shape of yTrain array", yTrain.shape)
print("Shape of xTest array", xTest.shape)
print("Shape of yTest array", yTest.shape)
# 惩罚系数
alphaList = [0.1 ** i for i in [0, 1, 2, 3, 4, 5, 6]]
# 输出错误率
rmsError = []
for alph in alphaList:
# 使用scikit-learn的岭回归算法
wineRidgeModel = linear_model.Ridge(alpha=alph)
wineRidgeModel.fit(xTrain, yTrain)
# 错误率 =
rmsError.append(np.linalg.norm((yTest - wineRidgeModel.predict(xTest)), 2) / sqrt(len(yTest)))
sqrt(len(yTest))
print("RMS Error alpha")
for i in range(len(rmsError)):
print(rmsError[i], alphaList[i])
# 输出错误率与alph取值间的联系
x = range(len(rmsError))
plt.plot(x, rmsError, 'k')
plt.xlabel('-log(alpha)')
plt.ylabel('Error (RMS)')
plt.show()
# 输出使用岭回归的实际口感得分与预测得分的散点图
indexBest = rmsError.index(min(rmsError))
alph = alphaList[indexBest]
wineRidgeModel = linear_model.Ridge(alpha=alph)
wineRidgeModel.fit(xTrain, yTrain)
errorVector = yTest - wineRidgeModel.predict(xTest)
plt.hist(errorVector)
plt.xlabel("Bin Boundaries")
plt.ylabel("Counts")
plt.show()
# 输出预测错误(错误率)的直方图
plt.scatter(wineRidgeModel.predict(xTest), yTest, s=100, alpha=0.10)
plt.xlabel('Predicted Taste Score')
plt.ylabel('Actual Taste Score')
plt.show()
|
{"hexsha": "f80517a9c6d0ad887b68f2342085f09ef5224614", "size": 3070, "ext": "py", "lang": "Python", "max_stars_repo_path": "bk/pa/03/ridgeWine.py", "max_stars_repo_name": "oraocp/pystat", "max_stars_repo_head_hexsha": "c01384683a289e667990ebc25e74ab287ffc14af", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "bk/pa/03/ridgeWine.py", "max_issues_repo_name": "oraocp/pystat", "max_issues_repo_head_hexsha": "c01384683a289e667990ebc25e74ab287ffc14af", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "bk/pa/03/ridgeWine.py", "max_forks_repo_name": "oraocp/pystat", "max_forks_repo_head_hexsha": "c01384683a289e667990ebc25e74ab287ffc14af", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.2380952381, "max_line_length": 104, "alphanum_fraction": 0.5651465798, "include": true, "reason": "import numpy", "num_tokens": 944}
|
// introduces boost::barrier to make multiple threads wait at a specific point
#define BOOST_THREAD_VERSION 4
#include <boost/thread.hpp>
#include <boost/thread/synchronized_value.hpp> // include required
#include <boost/asio.hpp>
#include <string>
#include <sstream>
#include <iostream>
#include <functional>
std::string get_robotstxt(const std::string &host)
{
using namespace boost::asio;
io_service ioservice;
ip::tcp::resolver resolver(ioservice);
ip::tcp::resolver::query query(host, "http");
auto it = resolver.resolve(query);
ip::tcp::socket socket(ioservice);
socket.connect(*it);
std::string request = "GET /robots.txt HTTP/1.1\r\nHost: " + host + "\r\n\r\n";
write(socket, buffer(request));
streambuf response;
boost::system::error_code ec;
read(socket, response, transfer_all(), ec);
if (ec == error::eof)
{
std::ostringstream os;
os << &response;
std::string s = os.str();
std::size_t idx = s.find("\r\n\r\n");
if (idx != std::string::npos)
s.erase(0, idx + 4);
return s;
}
return "";
}
boost::synchronized_value<std::vector<std::string>, boost::mutex> lines;
// barrier to make two threads wait
boost::barrier barrier(2);
void store_lines_from_robotstxt_and_output_them(const std::string &host)
{
std::string robotstxt = get_robotstxt(host);
std::istringstream is(robotstxt);
std::string s;
while (std::getline(is, s))
{
auto p = lines.synchronize();
p->emplace_back(s);
}
// wait() blocks until the required number of threads has called wait();
// wait() returns true for only one thread
if (barrier.wait())
{
// value() provides unsynchronized access (no synchronization required
// as only one thread iterates over and outputs the lines)
for (auto &line : lines.value())
std::cout << line << '\n';
}
}
int main()
{
boost::thread_group group;
group.create_thread(std::bind(store_lines_from_robotstxt_and_output_them, "theboostcpplibraries.com"));
group.create_thread(std::bind(store_lines_from_robotstxt_and_output_them, "dieboostcppbibliotheken.de"));
group.join_all();
}
|
{"hexsha": "de1c3be033d64faa25e817db506e3388b9856682", "size": 2053, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "45_barrier.cpp", "max_stars_repo_name": "BorisSchaeling/Boost.Thread-examples", "max_stars_repo_head_hexsha": "82b25a45c676cbbc0ec6707ec88d99f13d32143e", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 3.0, "max_stars_repo_stars_event_min_datetime": "2018-11-26T09:39:44.000Z", "max_stars_repo_stars_event_max_datetime": "2020-04-22T03:09:09.000Z", "max_issues_repo_path": "45_barrier.cpp", "max_issues_repo_name": "BorisSchaeling/Boost.Thread-examples", "max_issues_repo_head_hexsha": "82b25a45c676cbbc0ec6707ec88d99f13d32143e", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "45_barrier.cpp", "max_forks_repo_name": "BorisSchaeling/Boost.Thread-examples", "max_forks_repo_head_hexsha": "82b25a45c676cbbc0ec6707ec88d99f13d32143e", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2019-02-28T08:48:06.000Z", "max_forks_repo_forks_event_max_datetime": "2020-04-25T13:06:13.000Z", "avg_line_length": 28.9154929577, "max_line_length": 106, "alphanum_fraction": 0.7135898685, "num_tokens": 530}
|
import os
import sys
import logging
import datetime
import warnings
import pickle
from shutil import copyfile
import numpy as np
from preprocessing import load_scramble_data
from parameter_parser import Parameters
from algorithms.decision_tree import DecisionTree
from algorithms.random_forest import RandomForest
from algorithms.feed_forward_nn import FeedForwardNN
# Mute CPU- and OS-specific warnings from TensorFlow backend of FeedForwardNN
os.environ['KMP_WARNINGS'] = 'off'
os.environ['KMP_AFFINITY'] = 'disabled'
# Header detailing version that's printed out at the beginning of each run and at the top of each log file
header = ['Multisource Input Model Output Security Analysis Suite (MIMOSAS) v1.0.0-release.1', 'Copyright (C) 2019 University of California, Berkeley', 'https://complexity.berkeley.edu/mimosas/']
def start_logger(path):
"""
Start and return a new logger. Add console and file outputs.
@params:
path - Required : path to desired file output of logger (Str)
"""
# Create new logger
logger = logging.getLogger(path)
logger.setLevel(logging.DEBUG)
# Create output options for logger (file & console)
file_handler = logging.FileHandler(path)
stream_handler = logging.StreamHandler(sys.stdout)
# Formate output
formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(message)s')
file_handler.setFormatter(formatter)
stream_handler.setFormatter(formatter)
# Catch all outputs
file_handler.setLevel(logging.DEBUG)
stream_handler.setLevel(logging.DEBUG)
# Add output options to logger
logger.addHandler(file_handler)
logger.addHandler(stream_handler)
return logger
def stop_logger(logger):
"""
Detatch console and file outputs from logger and delete logger
@params:
logger - Required : logger object to be stopped (Logger)
"""
for handler in logger.handlers:
logger.removeHandler(handler)
del handler
del logger
def create_session_directory(algorithm):
"""
Creates directory to hold files for this algorithm's execution session; returns its filepath
@params:
algorithm - Required : name of the algorithm being run this session
"""
# Path string based on UTC time (second precision) at the time of creation
session = datetime.datetime.utcnow().strftime('%Y_%b_%d_%Hh_%Mm_%Ss')
path = os.path.join('.', 'saved_models', algorithm, session)
# Create directory at path if it doesn't already exist
if not os.path.exists(path):
os.makedirs(path)
return path
def main(main_path):
"""
Parse command line arguments and run either train or test mode
"""
# Output program header
with open('mimosas.txt', 'r') as f:
contents = f.read()
print(contents)
print()
for line in header:
print(line)
print('\n')
# Parse config file
parameters = Parameters(main_path)
# Run indicated models
if ('DECISION_TREE' in parameters.config.sections()):
run_decision_tree(parameters)
if ('RANDOM_FOREST' in parameters.config.sections()):
run_random_forest(parameters)
if ('FEED_FORWARD' in parameters.config.sections()):
run_feed_forward_nn(parameters)
def run_decision_tree(parameters):
"""
Run decision tree - including train, test, validation if applicable as indicated in the config file
@params:
parameters - Required : parameter object containing parameters loaded from config file (Parameter)
"""
# Create session directory
path = create_session_directory('decision_tree')
# Copy session config file used as backup
copyfile(parameters.config_file, os.path.join(path, 'used_conf.config'))
# Create session logger
logger = start_logger(os.path.join(path, 'training.log'))
for line in header:
logger.info(line)
logger.info('')
# Initialize models object
models = DecisionTree(parameters, path, logger)
# Check for loadable models
if 'Train' in parameters.config['MAIN']['Mode'].split(','):
# If the Load_Model_Path parameter in CONFIG is left blank, don't bother trying to load a model.
if parameters.config['DECISION_TREE']['Load_Model_Path']:
# Try to load a model from the specified path.
try:
models.load_models(parameters.config['DECISION_TREE']['Load_Model_Path'])
# If load fails, train from scratch instead.
except:
logger.info('There is no loadable models object at: ' + parameters.config['DECISION_TREE']['Load_Model_Path'])
logger.info('MIMOSAS will train new model(s) from scratch.')
logger.info('')
# If load is successful, prepare to continue training.
else:
# Report successful load to terminal; this session's logfile will be overwritten with log from model's history.
logger.info('Loadable models object found at ' + parameters.config['DECISION_TREE']['Load_Model_Path'])
# Copy loaded model to the session directory
copyfile(parameters.config['DECISION_TREE']['Load_Model_Path'], os.path.join(path, os.path.basename(parameters.config['DECISION_TREE']['Load_Model_Path'])))
# Copy loaded training log
copyfile(os.path.join(os.path.dirname(parameters.config['DECISION_TREE']['Load_Model_Path']), 'training.log'), os.path.join(path, 'training.log'))
else:
if 'Test' in parameters.config['MAIN']['Mode'].split(','):
# Try to load a model from the specified path.
try:
models.load_models(parameters.config['DECISION_TREE']['Load_Model_Path'])
# If load fails, exit function because there is nothing to test.
except:
logger.info('There is no loadable models object to test at: ' + parameters.config['DECISION_TREE']['Load_Model_Path'])
logger.info('Exiting Decision Tree.')
stop_logger(logger)
return None
# If load is successful, prepare to test.
else:
# Report successful load to terminal; this session's logfile will be overwritten with log from model's history.
logger.info('Loadable models object found at ' + parameters.config['DECISION_TREE']['Load_Model_Path'])
# Copy loaded model to the session directory
copyfile(parameters.config['DECISION_TREE']['Load_Model_Path'], os.path.join(path, os.path.basename(parameters.config['DECISION_TREE']['Load_Model_Path'])))
# Copy loaded training log
copyfile(os.path.join(os.path.dirname(parameters.config['DECISION_TREE']['Load_Model_Path']), 'training.log'), os.path.join(path, 'training.log'))
# Load data
X_train, X_test, y_train, y_test = load_scramble_data(parameters, logger)
# Train, if specified in CONFIG
if 'Train' in parameters.config['MAIN']['Mode'].split(','):
logger.info('Running DecisionTree in Train Mode.')
logger.info('')
models.train(X_train, y_train)
# Save model, if specified in CONFIG
# Note: Overwrites saved model (check if True)
if parameters.config['MAIN']['Save'] == 'True':
models.save_models()
# Exit Train Mode execution
logger.info('DONE TRAINING.')
logger.info('')
logger.info('')
# Test, if specified in CONFIG
if ('Test' in parameters.config['MAIN']['Mode']):
logger.info('Running DecisionTree in Test Mode.')
logger.info('')
# Score the model's prediction performance on the test set.
models.test(X_test, y_test)
# Exit Test Mode execution
logger.info('DONE TESTING.')
logger.info('')
logger.info('')
# Stop logger
stop_logger(logger)
def run_random_forest(parameters):
"""
Run random forest - including train, test, validation if applicable as indicated in the config file
@params:
parameters - Required : parameter object containing parameters loaded from config file (Parameter)
"""
# Create session directory
path = create_session_directory('random_forest')
# Copy session config file used as backup
copyfile(parameters.config_file, os.path.join(path, 'used_conf.config'))
# Create session logger
logger = start_logger(os.path.join(path, 'training.log'))
for line in header:
logger.info(line)
logger.info('')
# Initialize models object
models = RandomForest(parameters, path, logger)
# Check for loadable models
if 'Train' in parameters.config['MAIN']['Mode'].split(','):
# If the Load_Model_Path parameter in CONFIG is left blank, don't bother trying to load a model.
if parameters.config['RANDOM_FOREST']['Load_Model_Path']:
# Try to load a model from the specified path.
try:
models.load_models(parameters.config['RANDOM_FOREST']['Load_Model_Path'])
# If load fails, train from scratch instead.
except:
logger.info('There is no loadable models object at: ' + parameters.config['RANDOM_FOREST']['Load_Model_Path'])
logger.info('MIMOSAS will train new model(s) from scratch.')
logger.info('')
# If load is successful, prepare to continue training.
else:
# Report successful load to terminal; this session's logfile will be overwritten with log from model's history.
logger.info('Loadable models object found at ' + parameters.config['RANDOM_FOREST']['Load_Model_Path'])
# Copy loaded model to the session directory
copyfile(parameters.config['RANDOM_FOREST']['Load_Model_Path'], os.path.join(path, os.path.basename(parameters.config['RANDOM_FOREST']['Load_Model_Path'])))
# Copy loaded training log
copyfile(os.path.join(os.path.dirname(parameters.config['RANDOM_FOREST']['Load_Model_Path']), 'training.log'), os.path.join(path, 'training.log'))
else:
if 'Test' in parameters.config['MAIN']['Mode'].split(','):
# Try to load a model from the specified path.
try:
models.load_models(parameters.config['RANDOM_FOREST']['Load_Model_Path'])
# If load fails, exit function because there is nothing to test.
except:
logger.info('There is no loadable models object to test at: ' + parameters.config['RANDOM_FOREST']['Load_Model_Path'])
logger.info('Exiting Random Forest.')
stop_logger(logger)
return None
# If load is successful, prepare to test.
else:
# Report successful load to terminal; this session's logfile will be overwritten with log from model's history.
logger.info('Loadable models object found at ' + parameters.config['RANDOM_FOREST']['Load_Model_Path'])
# Copy loaded model to the session directory
copyfile(parameters.config['RANDOM_FOREST']['Load_Model_Path'], os.path.join(path, os.path.basename(parameters.config['RANDOM_FOREST']['Load_Model_Path'])))
# Copy loaded training log
copyfile(os.path.join(os.path.dirname(parameters.config['RANDOM_FOREST']['Load_Model_Path']), 'training.log'), os.path.join(path, 'training.log'))
# Report successful load
logger.info('Models object successfully loaded.')
# Load data
X_train, X_test, y_train, y_test = load_scramble_data(parameters, logger)
# Train, if specified in CONFIG
if 'Train' in parameters.config['MAIN']['Mode'].split(','):
logger.info('Running RandomForest in Train Mode.')
logger.info('')
models.train(X_train, y_train)
# Save model, if specified in CONFIG
# Note: Overwrites saved model (check if True)
if parameters.config['MAIN']['Save'] == 'True':
models.save_models()
# Exit Train Mode execution
logger.info('DONE TRAINING.')
logger.info('')
logger.info('')
# Test, if specified in CONFIG
if ('Test' in parameters.config['MAIN']['Mode']):
logger.info('Running RandomForest in Test Mode.')
logger.info('')
# Score the model's prediction performance on the test set.
models.test(X_test, y_test)
# Exit Test Mode execution
logger.info('DONE TESTING.')
logger.info('')
logger.info('')
# Stop logger
stop_logger(logger)
def run_feed_forward_nn(parameters):
"""
Run feed-forward NN - including train, test, validation if applicable as indicated in the config file
@params:
parameters - Required : parameter object containing parameters loaded from config file (Parameter)
"""
# Create session directory
path = create_session_directory('feed_forward_nn')
# Copy session config file used as backup
copyfile(parameters.config_file, os.path.join(path, 'used_conf.config'))
# Create session logger
logger = start_logger(os.path.join(path, 'training.log'))
for line in header:
logger.info(line)
logger.info('')
# Initialize models object
models = FeedForwardNN(parameters, path, logger)
# Check for loadable models
if 'Train' in parameters.config['MAIN']['Mode'].split(','):
# If the Load_Model_Path parameter in CONFIG is left blank, don't bother trying to load a model.
if parameters.config['FEED_FORWARD']['Load_Model_Path']:
# Try to load a model from the specified path.
try:
models.load_models(parameters.config['FEED_FORWARD']['Load_Model_Path'])
# If load fails, train from scratch instead.
except:
logger.info('There is no loadable models object at: ' + parameters.config['FEED_FORWARD']['Load_Model_Path'])
logger.info('MIMOSAS will train new model(s) from scratch.')
logger.info('')
# If load is successful, prepare to continue training.
else:
# Copy loaded model to the session directory
# copyfile(parameters.config['FEED_FORWARD']['Load_Model_Path'], os.path.join(path, os.path.basename(parameters.config['FEED_FORWARD']['Load_Model_Path'])))
# Copy loaded training log
copyfile(os.path.join(os.path.dirname(parameters.config['FEED_FORWARD']['Load_Model_Path']), 'training.log'), os.path.join(path, 'training.log'))
# Report successful load to terminal; this session's logfile will be overwritten with log from model's history.
logger.info('Loadable models object found at ' + parameters.config['FEED_FORWARD']['Load_Model_Path'])
else:
if 'Test' in parameters.config['MAIN']['Mode'].split(','):
# Try to load a model from the specified path.
try:
models.load_models(parameters.config['FEED_FORWARD']['Load_Model_Path'])
# If load fails, exit function because there is nothing to test.
except:
logger.info('There is no loadable models object to test at: ' + parameters.config['FEED_FORWARD']['Load_Model_Path'])
logger.info('Exiting FeedForwardNN.')
stop_logger(logger)
return None
# If load is successful, prepare to test.
else:
# Copy loaded model to the session directory
copyfile(parameters.config['FEED_FORWARD']['Load_Model_Path'], os.path.join(path, os.path.basename(parameters.config['FEED_FORWARD']['Load_Model_Path'])))
# Copy loaded training log
copyfile(os.path.join(os.path.dirname(parameters.config['FEED_FORWARD']['Load_Model_Path']), 'training.log'), os.path.join(path, 'training.log'))
# Nicely-formatted parameters of the most successful classifier in the loaded models object
logger.info('Models successfully loaded; the best-performing model has the following parameters:')
for param, value in models.models.best_estimator_.get_params().items():
logger.info('{}: {}'.format(param, value))
logger.info('')
# Load data
X_train, X_test, y_train, y_test = load_scramble_data(parameters, logger)
# Train, if specified in CONFIG
if 'Train' in parameters.config['MAIN']['Mode'].split(','):
logger.info('Running FeedForwardNN in Train Mode.')
logger.info('')
models.train(X_train, y_train)
# Save model, if specified in CONFIG
# Note: Overwrites saved model (check if True)
if parameters.config['MAIN']['Save'] == 'True':
models.save_models()
logger.info('DONE TRAINING.')
logger.info('')
logger.info('')
# Test, if specified in CONFIG
if ('Test' in parameters.config['MAIN']['Mode']):
logger.info('Running FeedForwardNN in Test Mode.')
logger.info('')
# Score the model's prediction performance on the test set.
models.test(X_test, y_test)
# If feature selection is indicated in the CONFIG file, use the RFA and RFE
# algorithms to quantify model performance as a function of input feature set
if (parameters.config['FEED_FORWARD']['Feature_Selection'] == 'True'):
models.recursive_feature_addition(X_train, X_test, y_train, y_test)
models.recursive_feature_elimination(X_train, X_test, y_train, y_test)
# Exit execution
logger.info('DONE TESTING.')
logger.info('')
logger.info('')
# Stop logger
stop_logger(logger)
if __name__ == '__main__':
with warnings.catch_warnings():
warnings.simplefilter("ignore")
main_path = os.path.dirname(sys.argv[0]);
main(main_path)
|
{"hexsha": "36ded32f96577ab75b77093802ea461b0dd006a3", "size": 18340, "ext": "py", "lang": "Python", "max_stars_repo_path": "main.py", "max_stars_repo_name": "nonproliferation/mimosas", "max_stars_repo_head_hexsha": "40b68de164c68928df9f4b1ad42ee0629a3c3d8b", "max_stars_repo_licenses": ["BSD-4-Clause-UC"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-04-09T22:26:03.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-09T22:26:03.000Z", "max_issues_repo_path": "main.py", "max_issues_repo_name": "nonproliferation/mimosas", "max_issues_repo_head_hexsha": "40b68de164c68928df9f4b1ad42ee0629a3c3d8b", "max_issues_repo_licenses": ["BSD-4-Clause-UC"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "main.py", "max_forks_repo_name": "nonproliferation/mimosas", "max_forks_repo_head_hexsha": "40b68de164c68928df9f4b1ad42ee0629a3c3d8b", "max_forks_repo_licenses": ["BSD-4-Clause-UC"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-09-30T21:13:10.000Z", "max_forks_repo_forks_event_max_datetime": "2019-09-30T21:13:10.000Z", "avg_line_length": 37.8925619835, "max_line_length": 195, "alphanum_fraction": 0.640348964, "include": true, "reason": "import numpy", "num_tokens": 3697}
|
# Make data using sklearn
import numpy as np
from sklearn.datasets import make_blobs
points, y = make_blobs(n_samples=20, centers=3, n_features=2, random_state=0)
# Compute HDBSCAN* in parallel using our algorithm
from pyhdbscan import HDBSCAN
dendro = HDBSCAN(points, 3) # minPts = 3
# Visualize dendrogram using scipy
from matplotlib import pyplot as plt
from scipy.cluster.hierarchy import dendrogram, linkage
fig = plt.figure(figsize=(3,2.3))
dn = dendrogram(dendro, color_threshold=0, no_labels=True)
fig.savefig("example.pdf")
|
{"hexsha": "290364bb83f055dca827ad831d35b026ea2b5dea", "size": 541, "ext": "py", "lang": "Python", "max_stars_repo_path": "pybindings/example.py", "max_stars_repo_name": "wangyiqiu/hdbscan", "max_stars_repo_head_hexsha": "0f21522a4d34db040dd0cd6c6506ad39ed6f2a9e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2021-03-29T05:54:11.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-28T04:13:10.000Z", "max_issues_repo_path": "pybindings/example.py", "max_issues_repo_name": "wangyiqiu/hdbscan", "max_issues_repo_head_hexsha": "0f21522a4d34db040dd0cd6c6506ad39ed6f2a9e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-09-16T05:38:33.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-16T22:18:26.000Z", "max_forks_repo_path": "pybindings/example.py", "max_forks_repo_name": "wangyiqiu/hdbscan", "max_forks_repo_head_hexsha": "0f21522a4d34db040dd0cd6c6506ad39ed6f2a9e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-07-13T23:42:47.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-05T12:55:20.000Z", "avg_line_length": 24.5909090909, "max_line_length": 77, "alphanum_fraction": 0.7781885397, "include": true, "reason": "import numpy,from scipy", "num_tokens": 147}
|
import pandas as pd
import os as os
import sqlite3
import matplotlib.pyplot as plt
import re
import time
import numpy
try:
from ladybug.sql import SQLiteResult
from ladybug.datacollection import HourlyContinuousCollection, \
MonthlyCollection, DailyCollection
except ImportError as e:
raise ImportError('\nFailed to import ladybug:\n\t{}'.format(e))
try:
from honeybee.config import folders
except ImportError as e:
raise ImportError('\nFailed to import honeybee:\n\t{}'.format(e))
try:
from honeybee_energy.result.loadbalance import LoadBalance
except ImportError as e:
raise ImportError('\nFailed to import honeybee_energy:\n\t{}'.format(e))
try:
import ladybug.datatype
except ImportError as e:
raise ImportError('\nFailed to import ladybug:\n\t{}'.format(e))
try:
from ladybug.datacollection import BaseCollection
except ImportError as e:
raise ImportError('\nFailed to import ladybug:\n\t{}'.format(e))
def subtract_loss_from_gain(gain_load, loss_load):
"""Create a single DataCollection from gains and losses."""
total_loads = []
for gain, loss in zip(gain_load, loss_load):
total_load = gain - loss
total_load.header.metadata['type'] = \
total_load.header.metadata['type'].replace('Gain ', '')
total_loads.append(total_load)
return total_loads
def serialize_data(data_dicts):
"""Reserialize a list of collection dictionaries."""
if len(data_dicts) == 0:
return []
elif data_dicts[0]['type'] == 'HourlyContinuousCollection':
return [HourlyContinuousCollection.from_dict(data) for data in data_dicts]
elif data_dicts[0]['type'] == 'MonthlyCollection':
return [MonthlyCollection.from_dict(data) for data in data_dicts]
elif data_dicts[0]['type'] == 'DailyCollection':
return [DailyCollection.from_dict(data) for data in data_dicts]
def pos(col):
return col[col > 0].sum()
def neg(col):
return col[col < 0].sum()
# List of all the output strings that will be requested
cooling_outputs = LoadBalance.COOLING + (
'Cooling Coil Electricity Energy',
'Chiller Electricity Energy',
'Zone VRF Air Terminal Cooling Electricity Energy',
'VRF Heat Pump Cooling Electricity Energy',
'Chiller Heater System Cooling Electricity Energy',
'District Cooling Chilled Water Energy',
'Evaporative Cooler Electricity Energy')
heating_outputs = LoadBalance.HEATING + (
'Boiler NaturalGas Energy',
'Heating Coil Total Heating Energy',
'Heating Coil NaturalGas Energy',
'Heating Coil Electricity Energy',
'Humidifier Electricity Energy',
'Zone VRF Air Terminal Heating Electricity Energy',
'VRF Heat Pump Heating Electricity Energy',
'VRF Heat Pump Defrost Electricity Energy',
'VRF Heat Pump Crankcase Heater Electricity Energy',
'Chiller Heater System Heating Electricity Energy',
'District Heating Hot Water Energy',
'Baseboard Electricity Energy',
'Hot_Water_Loop_Central_Air_Source_Heat_Pump Electricity Consumption',
'Boiler Electricity Energy',
'Water Heater NaturalGas Energy',
'Water Heater Electricity Energy',
'Cooling Coil Water Heating Electricity Energy')
lighting_outputs = LoadBalance.LIGHTING
electric_equip_outputs = LoadBalance.ELECTRIC_EQUIP
gas_equip_outputs = LoadBalance.GAS_EQUIP
process_outputs = LoadBalance.PROCESS
shw_outputs = ('Water Use Equipment Heating Energy',) + LoadBalance.HOT_WATER
fan_electric_outputs = (
'Zone Ventilation Fan Electricity Energy',
'Fan Electricity Energy',
'Cooling Tower Fan Electricity Energy')
pump_electric_outputs = 'Pump Electricity Energy'
people_gain_outputs = LoadBalance.PEOPLE_GAIN
solar_gain_outputs = LoadBalance.SOLAR_GAIN
infil_gain_outputs = LoadBalance.INFIL_GAIN
infil_loss_outputs = LoadBalance.INFIL_LOSS
vent_loss_outputs = LoadBalance.VENT_LOSS
vent_gain_outputs = LoadBalance.VENT_GAIN
nat_vent_gain_outputs = LoadBalance.NAT_VENT_GAIN
nat_vent_loss_outputs = LoadBalance.NAT_VENT_LOSS
Fresh_Air="Zone Infiltration Current Density Volume Flow Rate"
Mechanical_Vent="Zone Mechanical Ventilation Mass Flow Rate"
all_output = \
[cooling_outputs, heating_outputs, lighting_outputs, electric_equip_outputs, gas_equip_outputs,
process_outputs, shw_outputs, fan_electric_outputs, pump_electric_outputs,
people_gain_outputs, solar_gain_outputs, infil_gain_outputs, infil_loss_outputs,
vent_loss_outputs, vent_gain_outputs, nat_vent_gain_outputs, nat_vent_loss_outputs]
_sql=r'C:\Users\JTHOM\OneDrive - Ramboll\Documents\Dump\Sql\de66f1cf\outputs\sql\eplusout.sql'
def get_SQL(sql_FP):
#assert os.path.isfile(_sql), 'No sql file found at: {}.'.format(_sql)
assert os.path.isfile(sql_FP), 'No sql file found at: {}.'.format(_sql)
# small file on windows; use IronPython like usual
# create the SQL result parsing object
sql_obj = SQLiteResult(sql_FP)
# get all of the results relevant for energy use
cooling = sql_obj.data_collections_by_output_name(cooling_outputs)
heating = sql_obj.data_collections_by_output_name(heating_outputs)
lighting = sql_obj.data_collections_by_output_name(lighting_outputs)
electric_equip = sql_obj.data_collections_by_output_name(electric_equip_outputs)
hot_water = sql_obj.data_collections_by_output_name(shw_outputs)
vent_masss_flow= sql_obj.data_collections_by_output_name(Mechanical_Vent)
#fresh_air_Flow= sql_obj.data_collections_by_output_name(Fresh_Air)
infil_gain = sql_obj.data_collections_by_output_name(infil_gain_outputs)
infil_loss = sql_obj.data_collections_by_output_name(infil_loss_outputs)
vent_loss = sql_obj.data_collections_by_output_name(vent_loss_outputs)
vent_gain = sql_obj.data_collections_by_output_name(vent_gain_outputs)
# do arithmetic with any of the gain/loss data collections
if len(infil_gain) == len(infil_loss):
infiltration_load = subtract_loss_from_gain(infil_gain, infil_loss)
if len(vent_gain) == len(vent_loss) == len(cooling) == len(heating):
mech_vent_loss = subtract_loss_from_gain(heating, vent_loss)
mech_vent_gain = subtract_loss_from_gain(cooling, vent_gain)
mech_vent_load = [data.duplicate() for data in
subtract_loss_from_gain(mech_vent_gain, mech_vent_loss)]
for load in mech_vent_load:
load.header.metadata['type'] = \
'Zone Ideal Loads Ventilation Heat Energy'
Results_Dict={"out:Annual Heat":heating,"out:Annual Cool":cooling,"out:Annual Lighting":lighting,"out:Annual Elec equipt":electric_equip,
"out:Annual DHW":hot_water,"out:Annual Mech Ventilation":vent_masss_flow,'out: Infiltration Load':infiltration_load,
"out:Mech Vent Load":mech_vent_load}
return(Results_Dict)
def sql_Data(_data,type_):
operator = '+'
statement = 'data {} data_i'.format(operator)
# perform the arithmetic operation
data = _data[0]
for data_i in _data[1:]:
data = eval(statement, {'data': data, 'data_i': data_i}) # I love Python!
# try to replace the data collection type
try:
data = data.duplicate()
if type_:
data.header.metadata['type'] = type_
elif 'type' in data.header.metadata:
d_unit = data.header.unit
for key in ladybug.datatype.UNITS:
if d_unit in ladybug.datatype.UNITS[key]:
base_type = ladybug.datatype.TYPESDICT[key]()
data.header.metadata['type'] = str(base_type)
break
else:
data.header.metadata['type'] = 'Unknown Data Type'
if 'System' in data.header.metadata:
data.header.metadata.pop('System')
if 'Zone' in data.header.metadata:
data.header.metadata.pop('Zone')
except AttributeError:
pass # data was not a data collection; just return it anyway
return(list(data.values))
def Results_PP(Results_Dict,Name):
t1=time.time()
Results=[sql_Data(k,v) for k,v in zip(Results_Dict.values(),Results_Dict.keys())]
DF=pd.DataFrame(Results,index=Results_Dict.keys()).T
Index=pd.date_range(start='1/1/2021', periods=8760, freq='h')
DF=DF.set_index(Index)
Summed_Totals=DF.sum()
Summed_Totals['out:Name']=Name
#Fresh air load pre processing
Summed_Totals["out:FA Cooling Load"]=DF["out:Mech Vent Load"].agg([pos])['pos']
Summed_Totals["out:FA Heating Load"]=DF["out:Mech Vent Load"].agg([neg])['neg']
Summed_Totals["out:FA Total Load"]=(-1*Summed_Totals["out:FA Heating Load"])+Summed_Totals["out:FA Cooling Load"]
#Infiltration load pre processing
Summed_Totals["out:INF Cooling Load"]=DF['out: Infiltration Load'].agg([pos])['pos']
Summed_Totals["out:INF Heating Load"]=DF['out: Infiltration Load'].agg([neg])['neg']
Summed_Totals["out:INF Total Load"]=(-1*Summed_Totals["out:INF Heating Load"])+Summed_Totals["out:INF Cooling Load"]
print(Summed_Totals)
#Summed_Totals["out:FA Cooling Load"]=Temp["out:FA Cooling Load"]
#Summed_Totals["out:FA Heating Load"]=Temp["out:FA Heating Load"]
Dict=Summed_Totals.to_dict()
t2=time.time()
print(("It takes %s seconds to extract "+Name) % (t2 - t1))
return(Dict)
def extract_name(Name):
print(Name)
Code=re.split("_",Name)[1]
#print(Code)
_Wall=Code[0]
_Roof=Code[1]
_Ground_Floor=Code[2]
_Glazing=Code[3]
_Rooflights=Code[4]
_Airtightness=Code[5]
Wall=[{'Name':'Wall.Basecase_'},
{'Name':'Wall.Regs_'},
{'Name':'Wall.Pilot_'}][int(_Wall)]
Roof=[{'Name':'Roof.Basecase_'},
{'Name':'Roof.Regs_'}][int(_Roof)]
Floor=[{'Name':'Floor.Basecase_'},
{'Name':'Floor.Regs_'}][int(_Ground_Floor)]
Glazing=[{'Name':'Glazing.Basecase_'},
{'Name':'Glazing.Regs_'},
{'Name':'Glazing.Pilot_'}][int(_Glazing)]
Rooflights=[{'Name':'Rooflights.Basecase_'},
{'Name':'Rooflights.Regs_'},
{'Name':'Rooflights.Pilot_'}][int(_Rooflights)]
AirTightness=[{'Name':'AirTightness.Basecase'},
{'Name':'Airtightness.Regs'},
{'Name':'Airtightness.Pilot'}][int(_Airtightness)]
Name=Name+"_"+Wall['Name']+Roof['Name']+Floor['Name']+Glazing['Name']+Rooflights['Name']+AirTightness['Name']
#print(Name)
return(Name)
def Results_Hourly(Results_Dict,Name):
t1=time.time()
Results=[sql_Data(k,v) for k,v in zip(Results_Dict.values(),Results_Dict.keys())]
DF=pd.DataFrame(Results,index=Results_Dict.keys()).T
Index=pd.date_range(start='1/1/2021', periods=8760, freq='h')
DF=DF.set_index(Index)
Summed_Totals=DF#.sum()
Summed_Totals['out:Name']=Name
#Fresh air load pre processing
Summed_Totals["out:FA Cooling Load"]=DF["out:Mech Vent Load"].agg([pos])['pos']
Summed_Totals["out:FA Heating Load"]=DF["out:Mech Vent Load"].agg([neg])['neg']
Summed_Totals["out:FA Total Load"]=(-1*Summed_Totals["out:FA Heating Load"])+Summed_Totals["out:FA Cooling Load"]
#Infiltration load pre processing
Summed_Totals["out:INF Cooling Load"]=DF['out: Infiltration Load'].agg([pos])['pos']
Summed_Totals["out:INF Heating Load"]=DF['out: Infiltration Load'].agg([neg])['neg']
Summed_Totals["out:INF Total Load"]=(-1*Summed_Totals["out:INF Heating Load"])+Summed_Totals["out:INF Cooling Load"]
print(Summed_Totals)
#Summed_Totals["out:FA Cooling Load"]=Temp["out:FA Cooling Load"]
#Summed_Totals["out:FA Heating Load"]=Temp["out:FA Heating Load"]
ict=Summed_Totals.to_dict()
t2=time.time()
print(("It takes %s seconds to extract "+Name) % (t2 - t1))
return(Summed_Totals)
print(("It takes %s seconds to upload "+Reality_Name) % (t2 - t1))
#Please provide a filepath to the diretory file , which should contain the names and file paths of the sql results
directory_fp=r'C:\\Users\\JTHOM\\OneDrive - Ramboll\\Documents\\Dump\\Sql\\ST_James\\Directory.xlsx'
DF=pd.read_excel(directory_fp)
Names=DF['Name']
File_Paths=DF['File_Path']
#Please eneter a filepath for the location of the data output from the SQL files
FP=r'C:\\Users\\JTHOM\\OneDrive - Ramboll\\Documents\\Dump\\Sql\\ST_James\\data.xlsx'
#FP=r'C:\\Users\\JTHOM\\OneDrive - Ramboll\\Documents\\Dump\\Sql\\Plaza\\data.xlsx'
#print([Name_in) for (Name_in) in zip(DF['Name'],DF['File_Path'])])
Batch_Download=pd.DataFrame([Results_PP(get_SQL(_sql),extract_name(Name_in)) for (Name_in,_sql) in zip(DF['Name'],DF['File_Path'])])
Batch_Download.to_excel(FP)
#_sql_FP=r'C:\\Users\\JTHOM\\OneDrive - Ramboll\\Documents\\Dump\\Sql\\Plaza\\Plaza_000000\\116a9b0d\\outputs\\sql\\eplusout.sql'
"""
_sql_FP=r'C:\Users\JTHOM\OneDrive - Ramboll\Documents\Dump\Sql\Plaza\Plaza_000000\6d837694\outputs\sql\eplusout.sql"'
Annual=Results_Hourly(get_SQL(_sql_FP),'Results 2')
Annual.to_excel(FP)
"""
#fi1, ax1 = plt.subplots()
#DF.plot(ax=ax1)
#fi2, ax2 = plt.subplots()
#Summed_Totals.plot.bar(ax=ax2)
#plt.show()
|
{"hexsha": "a56b109a75681baa236497f826cdbcdff609b633", "size": 13091, "ext": "py", "lang": "Python", "max_stars_repo_path": "Passive/Passive_Results_Reader.py", "max_stars_repo_name": "JTHOMRAMBOLL/Bruntwood_V1", "max_stars_repo_head_hexsha": "2fdd7190d92535267cbcb9b36c559ce6deb5841b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Passive/Passive_Results_Reader.py", "max_issues_repo_name": "JTHOMRAMBOLL/Bruntwood_V1", "max_issues_repo_head_hexsha": "2fdd7190d92535267cbcb9b36c559ce6deb5841b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Passive/Passive_Results_Reader.py", "max_forks_repo_name": "JTHOMRAMBOLL/Bruntwood_V1", "max_forks_repo_head_hexsha": "2fdd7190d92535267cbcb9b36c559ce6deb5841b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.5029411765, "max_line_length": 141, "alphanum_fraction": 0.7057520434, "include": true, "reason": "import numpy", "num_tokens": 3468}
|
FUNCTION GATAN2 (ARG1, ARG2)
C
C ARC TANGENT ARG1/ARG2
C
REAL*8 DATAN2,ARG1,ARG2
C
GATAN2 = DATAN2 (ARG1,ARG2)
RETURN
END
|
{"hexsha": "648a5a208ee26130ea1617523ac745670fcb9994", "size": 156, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "packages/PIPS/validation/ArrayResizing/fpppp/gatan2.f", "max_stars_repo_name": "DVSR1966/par4all", "max_stars_repo_head_hexsha": "86b33ca9da736e832b568c5637a2381f360f1996", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 51, "max_stars_repo_stars_event_min_datetime": "2015-01-31T01:51:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-18T02:01:50.000Z", "max_issues_repo_path": "packages/PIPS/validation/ArrayResizing/fpppp/gatan2.f", "max_issues_repo_name": "DVSR1966/par4all", "max_issues_repo_head_hexsha": "86b33ca9da736e832b568c5637a2381f360f1996", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2017-05-29T09:29:00.000Z", "max_issues_repo_issues_event_max_datetime": "2019-03-11T16:01:39.000Z", "max_forks_repo_path": "packages/PIPS/validation/ArrayResizing/fpppp/gatan2.f", "max_forks_repo_name": "DVSR1966/par4all", "max_forks_repo_head_hexsha": "86b33ca9da736e832b568c5637a2381f360f1996", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2015-03-26T08:05:38.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-18T02:01:51.000Z", "avg_line_length": 15.6, "max_line_length": 34, "alphanum_fraction": 0.5897435897, "num_tokens": 67}
|
using CodecXz
using TranscodingStreams
using Test
@testset "Xz Codec" begin
codec = XzCompressor()
@test codec isa XzCompressor
@test occursin(r"^(CodecXz\.)?XzCompressor\(level=\d, check=\d+\)$", sprint(show, codec))
@test CodecXz.initialize(codec) === nothing
@test CodecXz.finalize(codec) === nothing
codec = XzDecompressor()
@test codec isa XzDecompressor
@test occursin(r"^(CodecXz\.)?XzDecompressor\(memlimit=\d+, flags=\d+\)$", sprint(show, codec))
@test CodecXz.initialize(codec) === nothing
@test CodecXz.finalize(codec) === nothing
# Generated by `lzma.compress(b"foo")` on CPython 3.5.2.
data = Vector(b"\xfd7zXZ\x00\x00\x04\xe6\xd6\xb4F\x02\x00!\x01\x16\x00\x00\x00t/\xe5\xa3\x01\x00\x02foo\x00\x00X\x15\xa9{,\xe6,\x98\x00\x01\x1b\x03\x0b/\xb9\x10\x1f\xb6\xf3}\x01\x00\x00\x00\x00\x04YZ")
@test read(XzDecompressorStream(IOBuffer(data))) == b"foo"
@test read(XzDecompressorStream(IOBuffer(vcat(data, data)))) == b"foofoo"
# corrupt data
data[[1,3,5]] = b"bug"
@test_throws ErrorException read(XzDecompressorStream(IOBuffer(data)))
@test XzCompressorStream <: TranscodingStreams.TranscodingStream
@test XzDecompressorStream <: TranscodingStreams.TranscodingStream
TranscodingStreams.test_roundtrip_read(XzCompressorStream, XzDecompressorStream)
TranscodingStreams.test_roundtrip_write(XzCompressorStream, XzDecompressorStream)
TranscodingStreams.test_roundtrip_lines(XzCompressorStream, XzDecompressorStream)
TranscodingStreams.test_roundtrip_transcode(XzCompressor, XzDecompressor)
@test_throws ArgumentError XzCompressor(level=10)
@test_throws ArgumentError XzDecompressor(memlimit=0)
end
|
{"hexsha": "8491e1f89b7f39abe4ed03856030cc3c0a6e6826", "size": 1708, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "UnofficialJuliaMirror/CodecXz.jl-ba30903b-d9e8-5048-a5ec-d1f5b0d4b47b", "max_stars_repo_head_hexsha": "55579aa6e0587eb911c734da37752e0a851cd528", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2020-10-12T07:45:33.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-24T00:42:47.000Z", "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "UnofficialJuliaMirror/CodecXz.jl-ba30903b-d9e8-5048-a5ec-d1f5b0d4b47b", "max_issues_repo_head_hexsha": "55579aa6e0587eb911c734da37752e0a851cd528", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2017-06-16T02:00:29.000Z", "max_issues_repo_issues_event_max_datetime": "2019-11-02T23:07:45.000Z", "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "UnofficialJuliaMirror/CodecXz.jl-ba30903b-d9e8-5048-a5ec-d1f5b0d4b47b", "max_forks_repo_head_hexsha": "55579aa6e0587eb911c734da37752e0a851cd528", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2017-06-09T19:53:44.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-07T18:55:52.000Z", "avg_line_length": 46.1621621622, "max_line_length": 205, "alphanum_fraction": 0.7441451991, "num_tokens": 538}
|
import numpy as np
import pandas as pd
import datetime as dt
from sklearn import preprocessing
class DataUtil(object):
#
# This class contains data specific information.
# It does the following:
# - Read data from file
# - Normalise it
# - Split it into train, dev (validation) and test
# - Create X and Y for each of the 3 sets (train, dev, test) according to the following:
# Every sample (x, y) shall be created as follows:
# - x --> window number of values
# - y --> one value that is at horizon in the future i.e. that is horizon away past the last value of x
# This way X and Y will have the following dimensions:
# - X [number of samples, window, number of multivariate time series]
# - Y [number of samples, number of multivariate time series]
def __init__(self, hparams, normalise = 2):
try:
self.hparams = hparams
# self.hparams.data_name, ,
#print("Start reading data")
filename = './data/{}/{}{}'.format(self.hparams.data_name, self.hparams.data_name, '.csv')
self.rawdata = pd.read_csv(filename, parse_dates=[0])
index_colname = self.rawdata.columns[0]
series = {'all':[0,-1], 'load':[0,59], 'price':[59,90], 'wind':[90, 147], 'solar':[147, 183]}
splits = self.datasplitter(self.hparams.powerset)
if self.hparams.n_multiv == 183 or self.hparams.n_multiv == 321:
self.rawdata.drop([index_colname], axis=1, inplace=True)
self.rawdata = self.rawdata.to_numpy()
if splits != 0:
self.rawdata = self.rawdata[:,splits[0]:splits[1]]
else:
if splits != 0:
if splits[0] != 0:
self.rawdata = self.rawdata.iloc[:, np.r_[0,splits[0]+1:splits[1]+1]]
else:
self.rawdata = self.rawdata.iloc[:, splits[0]:splits[1]+1]
if self.hparams.calendar == 'True' or self.hparams.calendar == 1 or self.hparams.n_multiv == 189 or self.hparams.n_multiv == 327:
data = self.rawdata
data['hour'] = data[index_colname].dt.hour
data = self.encode(data, 'hour', 23)
data['month'] = data[index_colname].dt.month
data = self.encode(data, 'month', 12)
data['day'] = data[index_colname].dt.day
data = self.encode(data, 'day', 365)
self.rawdata = data
self.rawdata.drop([index_colname, 'hour', 'month', 'day'], axis=1, inplace=True)
else:
self.rawdata.drop([index_colname], axis=1, inplace=True)
self.rawdata = self.rawdata.to_numpy()
#print("End reading data")
self.w = self.hparams.window
self.h = self.hparams.horizon
self.data = np.zeros(self.rawdata.shape)
self.trainpart = self.hparams.split_train
self.n, self.m = self.data.shape
self.normalise = normalise
self.normalise_data(normalise)
self.split_data(self.hparams.split_train, self.hparams.split_validation, self.hparams.split_test)
except IOError as err:
# In case file is not found, all of the above attributes will not have been created
# Hence, in order to check if this call was successful, you can call hasattr on this object
# to check if it has attribute 'data' for example
print(f"Error opening data file ... {err}")
def normalise_data(self, normalise):
#print(f"Normalise: {normalise}")
if normalise == 0: # do not normalise
self.data = self.rawdata
if normalise == 1: # normalise each timeseries alone. This is the default mode
self.scale = np.ones(self.m)
for i in range(self.m):
self.scale[i] = np.max(np.abs(self.rawdata[:int(self.trainpart * self.n), i]))
self.data[:, i] = self.rawdata[:, i] / self.scale[i]
if normalise == 2: # normalise timeseries using MinMaxScaler
# MinMaxScaler RobustScaler Normalizer MaxAbsScaler StandardScaler
self.scaler = preprocessing.MinMaxScaler()
self.scale = self.scaler.fit(self.rawdata[:int(self.trainpart * self.n)])
self.data = self.scale.transform(self.rawdata)
def split_data(self, train, valid, test):
#print(f"Splitting data into training set ({train}), validation set ({valid}) and testing set ({1 - (train + valid)})")
train_set = range(self.w + self.h - 1, int(train * self.n))
valid_set = range(int(train * self.n), int((train + valid) * self.n))
if test == 0.001: # the default value --> we use rest for testing
test_set = range(int((train + valid) * self.n), self.n)
else: # if test set vas defined we used the portion after validation to test
test_set = range(int((train + valid) * self.n), int((train + valid + test) * self.n))
self.train = self.get_data(train_set)
self.valid = self.get_data(valid_set)
self.test = self.get_data(test_set)
def get_data(self, rng):
n = len(rng)
X = np.zeros((n, self.w, self.m))
Y = np.zeros((n, 1, self.m))
for i in range(n):
end = rng[i] - self.h + 1
start = end - self.w
X[i,:,:] = self.data[start:end, :]
Y[i,:,:] = self.data[rng[i],:]
#print(f"Shape of data X: {X.shape}, Y: {Y.shape} ")
return [X, Y]
def encode(self, data, col, max_val):
data[col + '_sin'] = np.sin(2 * np.pi * data[col]/max_val)
data[col + '_cos'] = np.cos(2 * np.pi * data[col]/max_val)
return data
def datasplitter(self,i):
switcher={
'load':[0,59],
'price':[59,90],
'wind':[90,147],
'solar':[147,183],
}
return switcher.get(i,0)
def __len__(self):
if self.set_type == 'train':
return self.sample_num_train
elif self.set_type == 'validation':
return self.sample_num_validation
else:
return self.sample_num_test
def __getitem__(self, idx):
sample = [self.samples[idx, :, :], self.labels[idx, :, :]]
return sample
|
{"hexsha": "486eb56dcf6b3c06c6b9861c7e059aea61452d00", "size": 6622, "ext": "py", "lang": "Python", "max_stars_repo_path": "DSANet/datautil.py", "max_stars_repo_name": "ps789/multivariate-deep-learning", "max_stars_repo_head_hexsha": "aa09b1419dffc34cf2ee8b6ee00d7ecbe4d0d6e6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2020-11-24T12:46:21.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T01:59:24.000Z", "max_issues_repo_path": "DSANet/datautil.py", "max_issues_repo_name": "ps789/multivariate-deep-learning", "max_issues_repo_head_hexsha": "aa09b1419dffc34cf2ee8b6ee00d7ecbe4d0d6e6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "DSANet/datautil.py", "max_forks_repo_name": "ps789/multivariate-deep-learning", "max_forks_repo_head_hexsha": "aa09b1419dffc34cf2ee8b6ee00d7ecbe4d0d6e6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2021-03-11T19:33:33.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-24T21:06:07.000Z", "avg_line_length": 42.7225806452, "max_line_length": 145, "alphanum_fraction": 0.5495318635, "include": true, "reason": "import numpy", "num_tokens": 1602}
|
# Import the usual libraries
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import coron_wg
from tqdm.auto import trange, tqdm
filt, mask, pupil = ('F335M', 'MASK335R', 'CIRCLYOT')
import os
# Update output directories
coron_wg.fig_dir = coron_wg.base_dir + 'output_M335R/'
coron_wg.contrast_maps_dir = coron_wg.base_dir + 'contrast_maps_M335R/'
for path in [coron_wg.fig_dir, coron_wg.contrast_maps_dir]:
if not os.path.isdir(path):
os.makedirs(path)
print(f'Creating: {path}')
else:
print(f'Path alreayd exists: {path}')
# For Requirements WFE, cycle through jitters, tacq, and IEC scenarios
imode = 2
for imode_jitt in trange(4, leave=False, desc='Jitter/TACQ'):
for imode_iec in trange(3, leave=False, desc='IEC'):
coron_wg.run_obs(filt, mask, pupil, imode=imode, imode_iec=imode_iec, imode_jitt=imode_jitt)
|
{"hexsha": "407239c50988a3f443c65b96234813ec886b3e96", "size": 895, "ext": "py", "lang": "Python", "max_stars_repo_path": "notebooks/Commissioning/CoronWG/run_sims_requirements.py", "max_stars_repo_name": "JarronL/pyNRC", "max_stars_repo_head_hexsha": "0354f0635dd4c5391ca3a769fa9e5ead83661c30", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2017-01-09T05:11:11.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-25T18:30:41.000Z", "max_issues_repo_path": "notebooks/Commissioning/CoronWG/run_sims_requirements.py", "max_issues_repo_name": "JarronL/pyNRC", "max_issues_repo_head_hexsha": "0354f0635dd4c5391ca3a769fa9e5ead83661c30", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 24, "max_issues_repo_issues_event_min_datetime": "2017-05-25T04:45:47.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-25T18:31:48.000Z", "max_forks_repo_path": "notebooks/Commissioning/CoronWG/run_sims_requirements.py", "max_forks_repo_name": "JarronL/pyNRC", "max_forks_repo_head_hexsha": "0354f0635dd4c5391ca3a769fa9e5ead83661c30", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2017-01-27T22:40:55.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-30T18:41:46.000Z", "avg_line_length": 29.8333333333, "max_line_length": 100, "alphanum_fraction": 0.7206703911, "include": true, "reason": "import numpy", "num_tokens": 273}
|
"""
This part of code is the DQN brain, which is a brain of the agent.
All decisions are made in here.
Using Tensorflow to build the neural network.
View more on my tutorial page: https://morvanzhou.github.io/tutorials/
Using:
Tensorflow: 1.0
gym: 0.7.3
"""
import numpy as np
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
import itertools
# np.random.seed(1)
# tf.set_random_seed(1)
# Deep Q Network off-policy
class DeepQNetwork:
def __init__(
self,
n_actions=4,
n_features=2,
learning_rate=0.01,
reward_decay=0.9,
e_greedy=0.8,
replace_target_iter=300,
memory_size=500,
batch_size=32,
len_max = 1000,
e_greedy_increment=None,
output_graph=False,
dueling = True
):
self.no_fea = n_features
self.n_actions = n_actions
self.n_features = n_features
self.lr = learning_rate
self.gamma = reward_decay
self.epsilon_max = e_greedy
self.replace_target_iter = replace_target_iter
self.memory_size = memory_size
self.batch_size = batch_size
self.epsilon_increment = e_greedy_increment
self.epsilon = 0 if e_greedy_increment is not None else self.epsilon_max
self.dueling = dueling
self.len_max =len_max
# total learning step
self.learn_step_counter = 0
# initialize zero memory [s, a, r, s_]
# n_features =2, devotes start and end. add 1, is the length.
self.memory = np.zeros((self.memory_size, (n_features+1) * 2 + 2 + self.len_max)) # 128 is the length of indices
# consist of [target_net, evaluate_net]
self._build_net()
t_params = tf.get_collection('target_net_params')
e_params = tf.get_collection('eval_net_params')
self.replace_target_op = [tf.assign(t, e) for t, e in zip(t_params, e_params)]
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
self.sess = tf.Session(config=config)
if output_graph:
# $ tensorboard --logdir=logs
# tf.train.SummaryWriter soon be deprecated, use following
tf.summary.FileWriter("logs/", self.sess.graph)
self.sess.run(tf.global_variables_initializer())
self.cost_his = [] # cost history
# Two nets have the same structure but different parameters.
# One with parameters eval_net_params, another with target_net_params.
def _build_net(self):
def build_layers(s, c_names, n_l1, w_initializer, b_initializer):
with tf.variable_scope('l1'):
w1 = tf.get_variable('w1', [self.n_features, n_l1], initializer=w_initializer, collections=c_names)
b1 = tf.get_variable('b1', [1, n_l1], initializer=b_initializer, collections=c_names)
l1 = tf.nn.relu(tf.matmul(s, w1) + b1)
if self.dueling:
# Dueling DQN
with tf.variable_scope('Value'):
w2 = tf.get_variable('w2', [n_l1, 1], initializer=w_initializer, collections=c_names)
b2 = tf.get_variable('b2', [1, 1], initializer=b_initializer, collections=c_names)
self.V = tf.matmul(l1, w2) + b2
with tf.variable_scope('Advantage'):
w2 = tf.get_variable('w2', [n_l1, self.n_actions], initializer=w_initializer, collections=c_names)
b2 = tf.get_variable('b2', [1, self.n_actions], initializer=b_initializer, collections=c_names)
self.A = tf.matmul(l1, w2) + b2
with tf.variable_scope('Q'):
out = self.V + (self.A - tf.reduce_mean(self.A, axis=1, keep_dims=True)) # Q = V(s) + A(s,a)
else:
with tf.variable_scope('Q'):
w2 = tf.get_variable('w2', [n_l1, self.n_actions], initializer=w_initializer, collections=c_names)
b2 = tf.get_variable('b2', [1, self.n_actions], initializer=b_initializer, collections=c_names)
out = tf.matmul(l1, w2) + b2
return out
# ------------------ build evaluate_net ------------------
self.s = tf.placeholder(tf.float32, [None, self.no_fea], name='s') # input
self.q_target = tf.placeholder(tf.float32, [None, self.n_actions], name='Q_target') # for calculating loss
# self.inputarray = tf.Variable(tf.constant(0, shape=[None, self.no_fea]), dtype=tf.float32)
# self.inputarray = self.make_input_row(self.s)
with tf.variable_scope('eval_net'):
# c_names(collections_names) are the collections to store variables
c_names, n_l1, w_initializer, b_initializer = \
['eval_net_params', tf.GraphKeys.GLOBAL_VARIABLES], 32, \
tf.random_normal_initializer(0., 0.3), tf.constant_initializer(0.1) # config of layers, 0 is mean, 0.3 is stddev
# first layer. collections is used later when assign to target net
with tf.variable_scope('l1'):
w1 = tf.get_variable('w1', [self.no_fea, n_l1], initializer=w_initializer, collections=c_names)
b1 = tf.get_variable('b1', [1, n_l1], initializer=b_initializer, collections=c_names)
l1 = tf.nn.relu(tf.matmul(self.s, w1) + b1)
# second layer. collections is used later when assign to target net
with tf.variable_scope('l2'):
w2 = tf.get_variable('w2', [n_l1, self.n_actions], initializer=w_initializer, collections=c_names)
b2 = tf.get_variable('b2', [1, self.n_actions], initializer=b_initializer, collections=c_names)
self.q_eval = tf.matmul(l1, w2) + b2
with tf.variable_scope('loss'):
# loss = [(q_target-q_eval)^2]/n where n = len(q_target)
self.loss = tf.reduce_mean(tf.squared_difference(self.q_target, self.q_eval))
with tf.variable_scope('train'):
self._train_op = tf.train.RMSPropOptimizer(self.lr).minimize(self.loss)
# ------------------ build target_net ------------------
self.s_ = tf.placeholder(tf.float32, [None, self.n_features], name='s_') # input, s_ denotes s' or s_{t+1}
# self.inputarray_ = self.make_input_row(self.s_)
with tf.variable_scope('target_net'):
# c_names(collections_names) are the collections to store variables
c_names = ['target_net_params', tf.GraphKeys.GLOBAL_VARIABLES]
# first layer. collections is used later when assign to target net
with tf.variable_scope('l1'):
w1 = tf.get_variable('w1', [self.no_fea, n_l1], initializer=w_initializer, collections=c_names)
b1 = tf.get_variable('b1', [1, n_l1], initializer=b_initializer, collections=c_names)
l1 = tf.nn.relu(tf.matmul(self.s_, w1) + b1)
# second layer. collections is used later when assign to target net
with tf.variable_scope('l2'):
w2 = tf.get_variable('w2', [n_l1, self.n_actions], initializer=w_initializer, collections=c_names)
b2 = tf.get_variable('b2', [1, self.n_actions], initializer=b_initializer, collections=c_names)
self.q_next = tf.matmul(l1, w2) + b2
def store_transition(self, s, a, r, indices, s_,):
if not hasattr(self, 'memory_counter'):
self.memory_counter = 0
# change the tuple s to narray s
s = np.asarray([x for xs in s for x in xs])
s_ = np.array([x for xs in s_ for x in xs])
# print 's, [a, r], s_', s, [a, r], s_
transition = np.hstack((s, [a, r], indices, s_))
# replace the old memory with new memory
index = self.memory_counter % self.memory_size
self.memory[index, :] = transition
self.memory_counter += 1
def make_input_row(self, state):
input_array = np.zeros(self.no_fea)
print 'sssss',state
state = state[0]
print state
# state[0][0] is the start point in the state, state[0][-1] is the end point, make the attention bar as 1.
input_array[state[0]:state[-1]+1] = 1
input_array = input_array[np.newaxis, :]
return input_array
def choose_action(self, state):
# to have batch dimension when feed into tf placeholder
short_state = np.zeros(1, dtype=[('start', np.float32), ('end', np.float32)])
short_state['start'] = state['start']
short_state['end'] = state['end']
short_state = np.asarray([x for xs in short_state for x in xs])
short_state = short_state[np.newaxis, :]
# print state, state.shape,
# make the intial input_array
# input_array = self.make_input_row(state)
if np.random.uniform() < self.epsilon:
# forward feed the state and get q value for every actions
actions_value = self.sess.run(self.q_eval, feed_dict={self.s: short_state})
action = np.argmax(actions_value)
else:
action = np.random.randint(0, self.n_actions)
return action
def learn(self):
# check to replace target parameters
if self.learn_step_counter % self.replace_target_iter == 0:
self.sess.run(self.replace_target_op)
print('\ntarget_params_replaced\n')
# sample batch memory from all memory
if self.memory_counter > self.memory_size:
sample_index = np.random.choice(self.memory_size, size=self.batch_size)
else:
sample_index = np.random.choice(self.memory_counter, size=self.batch_size)
batch_memory = self.memory[sample_index, :]
q_next, q_eval = self.sess.run(
[self.q_next, self.q_eval],
feed_dict={
self.s_: batch_memory[:, -self.n_features:], # fixed params
self.s: batch_memory[:, :self.n_features], # newest params
})
# change q_target w.r.t q_eval's action
q_target = q_eval.copy()
batch_index = np.arange(self.batch_size, dtype=np.int32)
eval_act_index = batch_memory[:, self.n_features].astype(int)
reward = batch_memory[:, self.n_features + 1]
q_target[batch_index, eval_act_index] = reward + self.gamma * np.max(q_next, axis=1) # q_next is q_target
"""
For example in this batch I have 2 samples and 3 actions:
q_eval =
[[1, 2, 3],
[4, 5, 6]]
q_target = q_eval =
[[1, 2, 3],
[4, 5, 6]]
Then change q_target with the real q_target value w.r.t the q_eval's action.
For example in:
sample 0, I took action 0, and the max q_target value is -1;
sample 1, I took action 2, and the max q_target value is -2:
q_target =
[[-1, 2, 3],
[4, 5, -2]]
So the (q_target - q_eval) becomes:
[[(-1)-(1), 0, 0],
[0, 0, (-2)-(6)]]
We then backpropagate this error w.r.t the corresponding action to network,
leave other action as error=0 cause we didn't choose it.
"""
# train eval network
_, self.cost = self.sess.run([self._train_op, self.loss],
feed_dict={self.s: batch_memory[:, :self.n_features],
self.q_target: q_target})
self.cost_his.append(self.cost)
# increasing epsilon. if epsilon_increment != None, the epsilon will gradually increase, the increment = epsilon_increment
self.epsilon = self.epsilon + self.epsilon_increment if self.epsilon < self.epsilon_max else self.epsilon_max
self.learn_step_counter += 1
def plot_cost(self):
plt.plot(np.arange(len(self.cost_his)), self.cost_his)
plt.ylabel('Cost')
plt.xlabel('training steps')
plt.show()
|
{"hexsha": "1651023a5b526bdaedd0374551579005e2f41027", "size": 12001, "ext": "py", "lang": "Python", "max_stars_repo_path": "RL_brain.py", "max_stars_repo_name": "xiangzhang1015/adaptiveeegclassification", "max_stars_repo_head_hexsha": "57573e1411b2984d20dc4fe54e39cb5742fb2638", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "RL_brain.py", "max_issues_repo_name": "xiangzhang1015/adaptiveeegclassification", "max_issues_repo_head_hexsha": "57573e1411b2984d20dc4fe54e39cb5742fb2638", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "RL_brain.py", "max_forks_repo_name": "xiangzhang1015/adaptiveeegclassification", "max_forks_repo_head_hexsha": "57573e1411b2984d20dc4fe54e39cb5742fb2638", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.5567375887, "max_line_length": 130, "alphanum_fraction": 0.603116407, "include": true, "reason": "import numpy", "num_tokens": 2889}
|
# -*- coding: utf-8 -*-
"""
-------------------------------------------------------------------------------
Authors: Parshan Pakiman | https://parshanpakiman.github.io/
Selva Nadarajah | https://selvan.people.uic.edu/
Licensing Information: The MIT License
-------------------------------------------------------------------------------
"""
from scipy.stats import sem,t
import numpy as np
import pandas as pd
import os
from datetime import datetime
from shutil import copyfile
from itertools import chain, combinations
def index_unique_sub_list(input_list):
#--------------------------------------------------------------------------
# Returns the location of locations in a list with unique values
#--------------------------------------------------------------------------
_, indices = np.unique(np.asarray(input_list), return_index=True,axis=0)
return indices
def mean_confidence_interval(data, confidence=0.95):
#--------------------------------------------------------------------------
# Computes confidence interval around mean
#--------------------------------------------------------------------------
a = 1.0 * np.array(data)
n = len(a)
m, se = np.mean(a), sem(a)
h = se * t.ppf((1 + confidence) / 2., n-1)
return m, m-h, m+h,se
def make_text_bold(string):
#--------------------------------------------------------------------------
# Makes a text bold in terminal
#--------------------------------------------------------------------------
return '{}{}{}'.format('\033[1m', string, '\033[0m')
class output_handler:
#--------------------------------------------------------------------------
# Collects and stores outputs of an algorithm.
#--------------------------------------------------------------------------
def __init__(self,instance_conf):
#----------------------------------------------------------------------
# Inititalization
#----------------------------------------------------------------------
self.mdp_name = instance_conf['mdp_conf']['mdp_name']
self.basis_func_type = instance_conf['basis_func_conf']['basis_func_type']
self.batch_size = instance_conf['basis_func_conf']['batch_size']
self.instance_number = instance_conf['mdp_conf']['instance_number']
self.state_relevance_inner_itr = instance_conf['greedy_pol_conf']['state_relevance_inner_itr']
self.output_table = pd.DataFrame()
self.path = None
self.filename = None
self.lb_filename = '/LowerBound_' + self.mdp_name +'.csv'
self.setup_output_path()
def setup_output_path(self):
#----------------------------------------------------------------------
# Set the path to store outputs
#----------------------------------------------------------------------
self.path = 'Output/' + self.mdp_name
assert os.path.isdir(self.path)
if not os.path.isdir(self.path + '/instance_'+self.instance_number):
os.mkdir(self.path + '/instance_'+self.instance_number)
self.path = self.path + '/instance_'+self.instance_number
copyfile('MDP/'+ self.mdp_name+ '/Instances/instance_'+self.instance_number+'.py', self.path + '/instance_'+self.instance_number+'.py')
def save_lower_bound(self,lower_bound_list):
#----------------------------------------------------------------------
# Save lower bound into a file
#----------------------------------------------------------------------
pd.DataFrame(lower_bound_list,columns=['# bases','# constrs','FALP Obj','ALP ConT', 'ALP SlvT','lb_mean', 'lb_lb','lb_ub', 'lb_se','LB RT','best_lower_bound','TOT RT']).to_csv(self.path + self.lb_filename)
def load_lower_bound(self):
#----------------------------------------------------------------------
# Load lower bound from a file
#----------------------------------------------------------------------
df = pd.read_csv(self.path + self.lb_filename)
df = df[['lb_mean', 'lb_lb','lb_ub', 'lb_se','best_lower_bound']]
return np.asarray(df.iloc[[-1]]).flatten()
def append_to_outputs( self,
algorithm_name: str, # FALP, FGLP
state_relevance_name: str, # uniform, (5,5,5), greedy_policy
basis_seed: int, # seed number for basis function
num_basis_func: int, # 10, 20, ...
num_constr: int, # num of constraints in ALP
FALP_obj: float, # value of ALP objective
ALP_con_runtime: float, # time to construct ALP to get VFA
ALP_slv_runtime: float, # time tosolve ALP to get VFA
best_lower_bound: float, # best lower bound on the optimal cost until the current iteration
lower_bound_lb: float, # 95% lower bound on the optimal cost lower bound
lower_bound_mean: float, # mean lower bound on the optimal cost
lower_bound_se: float, # standard error of the lower bound on the optimal cost
lower_bound_ub: float, # 95% upper bound on the optimal cost lower bound
lower_bound_runtime: float, # runtime of computing lower bound on the optimla cost
best_policy_cost: float, # best upper bound (policy cost) on the optimal cost until the current iteration
policy_cost_lb: float, # 95% lower bound on the greedy policy cost
policy_cost_mean: float, # mean of the greedy policy cost
policy_cost_se: float, # standard error of greedy policy cost
policy_cost_ub: float, # 95% upper bound on the greedy policy cost
policy_cost_runtime: float, # runtime of computing greedy policy cost
total_runtime: float, # total runtime
SGFALP_obj: float = None,
SG_runtime: float = None,
):
#----------------------------------------------------------------------
# Having algorithm's results up to the current iteration, append
# new results to it.
#----------------------------------------------------------------------
self.filename = '/' + self.mdp_name + '_' + self.basis_func_type + '_' + algorithm_name + '_' +\
state_relevance_name+'_inner_update_'+str(self.state_relevance_inner_itr)+\
'_Batch_'+str(self.batch_size)+ '_seed_' + str(basis_seed) +'.csv'
SGFALP_ = None if SGFALP_obj is None else[round(SGFALP_obj,1)]
SG_runtime_ = None if SG_runtime is None else[round(SG_runtime,4)]
if not policy_cost_mean in [0.0,float('inf')]:
opt_gap_ = 100*(policy_cost_mean - lower_bound_mean)/policy_cost_mean
else:
opt_gap_ = float('inf')
info =\
{ 'update time' : datetime.now().strftime("%d-%m-%Y - %H : %M"),
'mdp' : [self.mdp_name],
'algorithm' : [algorithm_name],
'basis_func_seed' : [basis_seed],
'state relevance' : [state_relevance_name],
'# bases' : [num_basis_func],
'# constrs' : [num_constr],
'FALP obj' : [round(FALP_obj,1)],
'SGFALP' : SGFALP_,
'ALP Constr time' : [round(ALP_con_runtime,4)],
'ALP Solve time' : [round(ALP_slv_runtime,4)],
'SG time' : SG_runtime_,
'best_lower_bound' : [round(best_lower_bound,1)],
'lower bound lb' : [round(lower_bound_lb,1)],
'lower bound mean' : [round(lower_bound_mean,1)],
'lower bound se' : [round(lower_bound_se,2)],
'lower bound ub' : [round(lower_bound_ub,1)],
'lower bound runtime' : [round(lower_bound_runtime,4)],
'best_policy_cost' : [round(best_policy_cost,1)],
'policy cost lb' : [round(policy_cost_lb,1)],
'policy cost mean' : [round(policy_cost_mean,1)],
'policy cost se' : [round(policy_cost_se,2)],
'policy cost ub' : [round(policy_cost_ub,1)],
'policy cost runtime' : [round(policy_cost_runtime,4)],
'tot runtime' : [round(total_runtime,4)],
'opt gap' : [round(opt_gap_,1)],
'lower bound fluctuation' : [round(100*(lower_bound_mean - best_lower_bound)/best_lower_bound,1)],
'policy cost fluctuation' : [round(100*(best_policy_cost - policy_cost_mean)/best_policy_cost,1)],
}
self.output_table = self.output_table.append(pd.DataFrame(info),ignore_index = True)
self.output_table.to_csv(self.path + self.filename)
def is_PIC_config_valid(config):
#--------------------------------------------------------------------------
# Add assertion if you need to check an instance of the PIC application
# is "valid". This function is called inside each instance.
#--------------------------------------------------------------------------
pass
def prune_similar_columns(matrix,threshold):
#--------------------------------------------------------------------------
# Prune similar columns of a matrix; not used in the current code.
#--------------------------------------------------------------------------
already_considered = []
similar_columns = []
for i in range(len(matrix.T)):
column = matrix.T[i]
if not i in already_considered:
column = np.asarray([column]).T
diff = column - matrix
norm = np.max(np.abs(diff),axis=0)
index = [_ for _ in range(len(norm)) if norm[_] < threshold]
already_considered += index
similar_columns.append((i,index))
keep = [similar_columns[_][0] for _ in range(len(similar_columns))]
remove = [_ for _ in range(len(similar_columns)) if not _ in keep]
return remove
class output_handler_option_pricing:
#--------------------------------------------------------------------------
# Collects and stores outputs of an algorithm.
#--------------------------------------------------------------------------
def __init__(self,instance_conf):
#----------------------------------------------------------------------
# Inititalization
#----------------------------------------------------------------------
self.mdp_name = instance_conf['mdp_conf']['mdp_name']
self.state_relevance_type = instance_conf['mdp_conf']['state_relevance_type']
self.basis_func_type = instance_conf['basis_func_conf']['basis_func_type']
self.batch_size = instance_conf['basis_func_conf']['batch_size']
self.instance_number = instance_conf['mdp_conf']['instance_number']
self.output_table = pd.DataFrame()
self.path = None
self.filename = None
self.setup_output_path()
def setup_output_path(self):
#----------------------------------------------------------------------
# Set the path to store outputs
#----------------------------------------------------------------------
self.path = 'Output/' + self.mdp_name
assert os.path.isdir(self.path)
if not os.path.isdir(self.path + '/instance_'+self.instance_number):
os.mkdir(self.path + '/instance_'+self.instance_number)
self.path = self.path + '/instance_'+self.instance_number
copyfile('MDP/'+ self.mdp_name+ '/Instances/instance_'+self.instance_number+'.py', self.path + '/instance_'+self.instance_number+'.py')
def append_to_outputs( self,
algorithm_name: str, # FALP, FGLP
basis_seed: int, # seed number for basis function
num_basis_func: int, # 10, 20, ...
num_constr: int, # num of constraints in ALP
FALP_obj: float, # value of ALP objective
ALP_con_runtime: float, # time to construct ALP to get VFA
ALP_slv_runtime: float, # time tosolve ALP to get VFA
train_LB_mean: float, # lower bound on the training sample paths
train_LB_SE: float, # lower bound on the training sample paths
test_LB_mean: float, # lower bound on the training sample paths
test_LB_SE: float, # lower bound on the training sample paths
test_LB_runtime: float, # untime of computing greedy policy cost
upp_bound = 0.0,
upp_bound_sd = 0.0,
best_upp_bound = 0.0,
upp_bound_runtime = 0.0,
train_opt_gap = 0.0,
test_opt_gap = 0.0,
total_runtime = 0.0, # total runtime
):
#----------------------------------------------------------------------
# Having algorithm's results up to the current iteration, append
# new results to it.
#----------------------------------------------------------------------
self.filename = '/' + self.mdp_name + '_' + self.basis_func_type + '_' + algorithm_name + '_'+ self.state_relevance_type + '_batch_'+str(self.batch_size)+ '_seed_' + str(basis_seed) +'.csv'
info ={ 'update time' : datetime.now().strftime("%d-%m-%Y - %H : %M"),
'mdp' : [self.mdp_name],
'algorithm' : [algorithm_name],
'basis_func_seed' : [basis_seed],
'# bases' : [num_basis_func],
'# constrs' : [num_constr],
'FALP obj' : [round(FALP_obj,1)],
'ALP Constr time' : [round(ALP_con_runtime,4)],
'ALP Solve time' : [round(ALP_slv_runtime,4)],
'Train pol cost mean' : [round(train_LB_mean,4)],
'Train pol cost SE' : [round(train_LB_SE,4)],
'Test pol cost mean' : [round(test_LB_mean,4)],
'Test pol cost SE' : [round(test_LB_SE,4)],
'Test pol runbtime' : [round(test_LB_runtime,1)],
'Upper Bound' : [round(upp_bound,4)],
'Upper Bound SD' : [round(upp_bound_sd,4)],
'Best Upper Bound' : [round(best_upp_bound,4)],
'Upper Bound runbtime' : [round(upp_bound_runtime,1)],
'Train Opt Gap' : [round(train_opt_gap,4)],
'Test Opt Gap' : [round(test_opt_gap,4)],
'tot runtime' : [round(total_runtime,4)],
}
self.output_table = self.output_table.append(pd.DataFrame(info),ignore_index = True)
self.output_table.to_csv(self.path + self.filename)
|
{"hexsha": "eb7d5c676f371f65392c2cc9623df5cd4229049f", "size": 16517, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils.py", "max_stars_repo_name": "Self-guided-Approximate-Linear-Programs/Self-guided-ALPs-and-Related-Benchmarks", "max_stars_repo_head_hexsha": "5dd2b4a10fd9a4be3aa2456c70a2045f935bb63b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2020-01-13T02:12:18.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-30T20:08:01.000Z", "max_issues_repo_path": "utils.py", "max_issues_repo_name": "Self-guided-Approximate-Linear-Programs/Self-guided-ALPs-and-Related-Benchmarks", "max_issues_repo_head_hexsha": "5dd2b4a10fd9a4be3aa2456c70a2045f935bb63b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils.py", "max_forks_repo_name": "Self-guided-Approximate-Linear-Programs/Self-guided-ALPs-and-Related-Benchmarks", "max_forks_repo_head_hexsha": "5dd2b4a10fd9a4be3aa2456c70a2045f935bb63b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2020-01-21T07:29:25.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-31T08:48:06.000Z", "avg_line_length": 57.7517482517, "max_line_length": 213, "alphanum_fraction": 0.4454804141, "include": true, "reason": "import numpy,from scipy", "num_tokens": 3188}
|
\chapter{Background}
\ifpdf
\graphicspath{{Chapter2/Figs/Raster/}{Chapter2/Figs/PDF/}{Chapter2/Figs/}}
\else
\graphicspath{{Chapter2/Figs/Vector/}{Chapter2/Figs/}}
\fi
The goal of this chapter is to provide only the necessary background in
recurrent neural networks and generative probabilistic sequence modelling
required for understanding our models, experiments, and results. It also
introduces some common definitions and clarifies notation used throughout later chapters.
A basic understanding of Western music theory and neural networks is assumed.
Readers unfamiliar with concepts such as piano rolls, Roman numeral analysis,
and cadences, should review \vref{sec:music-theory-primer} for a quick primer
and \citet{piston1978harmony,denny1960oxford} for more thorough coverage.
Likewise, those whom wish to review concepts such as activation functions,
neurons, and applying recurrent neural networks over arbitrary length sequences
are advised to review \vref{sec:primer-nn} and consult
\citet{bengio2009learning} for further reference.
\section{Recurrent neural networks}\label{sec:bg-rnn}
Our use of the term \emph{recurrent neural network} (RNN) refers in particular
to linear Elman-type RNNs \citep{elman1990finding} whose dynamics are described
by \vref{eq:rnn-dynamics} (review \cref{sec:primer-nn} if this is unfamiliar).
\subsection{Notation}
\nomenclature[z-RNN]{RNN}{Recurrent Neural Network}
\nomenclature[a-input]{$\x$}{layer inputs}
\nomenclature[a-hidden-state]{$\h$}{hidden state (\ie memory cell contents)}
\nomenclature[a-output]{$\y$}{layer outputs}
\nomenclature[a-Wxh]{$\W$}{weight matrix}
\nomenclature[a-Nin]{$N_{in}$}{dimensionality of inputs}
\nomenclature[a-Nhid]{$N_{hid}$}{dimensionality of hidden state}
\nomenclature[a-Nout]{$N_{out}$}{dimensionality of outputs}
\nomenclature[a-T]{$T$}{total number of timesteps in a sequence}
\nomenclature[g-sxh]{$\sigma$}{elementwise activation function}
\nomenclature[g-th]{$\theta$}{model parameters}
\nomenclature[r-l]{$(l)$}{layer index in multi-layer networks}
\nomenclature[s-t]{$t$}{time index}
\nomenclature[s-st]{$st$}{connections from source $s$ to target $t$}
We begin by clarifying common notation and conventions used to describe RNNs.
Unless otherwise specified, future use of notation should be interpreted as
defined in this section.
We use the subscript $t \in \{ 1,2,\cdots,T \}$ to denote the \emph{time index}
within a sequence of length $T \in \NN$
A sequence of \emph{inputs} is denoted by $\x$ and the sequence elements at timestep $t$ is
denoted by $\x_t \in \RR^{N_{in}}$ and assumed to have dimensionality $N_{in} \in \NN$.
Similarly, $\h_t \in \RR^{N_{hid}}$ and $\y_t \in \RR^{N_{out}}$ denote elements
from the \emph{hidden state} and \emph{output} sequences respectively.
To describe model parameters, we use $\W$ to indicate a real-valued
\emph{weight matrix} consisting of all the connection weights between two sets
of neurons and $\sigma(\cdot)$ to indicate an elementwise \emph{activation
function}. The collection of all model parameters is denoted by $\vec{\theta}$.
When further clarity is required, we use subscripts $\W_{st}$ denote the
connection weights from a set of neurons $s$ to another set of neurons $t$ (\ie
in \vref{sec:LSTM}, $\W_{xf}$ and $\W_{xh}$ refer to the connections from the
inputs to the forget gate and hidden state respectively). Subscripts on
activation functions $\sigma_{s,t}(\cdot)$ are to be interpreted analogously.
Equipped with the above notation, the equations for RNN time dynamics can be
expressed as
\begin{equation}\label{eq:rnn-dynamics}
\left.\begin{aligned}
\h_t &=& \W_{xh} \sigma_{xh} \left( \x_t \right) + \W_{hh} \sigma_{hh} \left( \h_{t-1} \right)\\
\y_t &=& \W_{hy} \sigma_{hy} \left( \h_t \right)
\end{aligned}
\right\}
\qquad \text{RNN time dynamics}
\end{equation}
When discussing multi-layer networks, we use $L \in \NN$ to denote total number
of layers and parenthesized superscripts $(l)$ for $l \in \{1,2,\cdots,L\}$ to
indicate the layer. For example, $\z^{(2)}_t$ is the hidden states of the
second layer and $N^{(3)}_{in}$ is the dimensionality of the third layer's
inputs $\x^{(3)}_t$. Unless stated otherwise, multi-layer networks will assume
that the outputs of the $l-1$st layer are used as the inputs of the $l$th layer
(\ie $\forall t: \x^{(l)}_t = \y^{(l-1)}_t$).
\subsection{The memory cell abstraction}
While a large number of proposed RNN variants exist
\citep{elman1990finding,jordan1997serial,hochreiter1997long,cho2014learning,Koutnik2014,Mikolov2015},
most share the same underlying structure and differ only in their
implementation details of \cref{eq:rnn-dynamics}. Encapsulating these
differences within an abstraction enables general discussion about RNN
architecture without making a specific choice on implementation.
To do so, we introduce the \emph{memory cell} abstraction to encapsulate the
details of computing $\y_t$ and $\h_t$ from $\x_t$ and $\h_{t-1}$. This is
illustrated visually in \cref{fig:rnn-elman}, which shows a standard Elman-type
RNN \citep{elman1990finding} with the memory cell indicated by a dashed box
isolating the recurrent hidden state. The edges entering the memory cell
($\x_t$, $\h_{t-1}$) are the \emph{memory cell inputs} and the outgoing edges
($\y_t$, $\h_t$) are the \emph{memory cell outputs}. In essence, the memory
cell abstracts away differences across RNN variants in their implementation of
\cref{eq:rnn-dynamics}.
\begin{figure}[tb]
\centering
\input{Chapter2/Figs/nn-rnn-elman.pdf_tex}
\caption{An Elman-type RNN with a single hidden layer. The recurrent hidden
state is illustrated as unit-delayed (denoted by $z^{-1}$) feedback edges
from the hidden states to the input layer. The memory cell encapsulating the
hidden state is also shown.}
\label{fig:rnn-elman}
\end{figure}
\subsection{Operations on RNNs: stacking and unrolling}
\subsubsection{Stacking memory cells to form deep RNNs}
Just like deep neural networks, RNNs can be \emph{stacked} to form deep RNNs
\citep{el1995hierarchical,schmidhuber1992learning} by treating the
outputs from the $l-1$st layer's memory cells as inputs to the $l$th layer (see \cref{fig:rnn-multi-unrolled}).
\begin{figure}[tb]
\centering
\resizebox{4.5in}{!}{\input{Chapter2/Figs/rnn-multi-unrolled.pdf_tex}}
\caption{Block diagram representation of a -layer RNN (left) and its
corresponding DAG (right) after unrolling. The blocks labelled
with $\h_t$ represent memory cells whose parameters are shared across all times
$t$.}
\label{fig:rnn-multi-unrolled}
\end{figure}
Prior work has observed that ``deep RNNs outperformed the conventional, shallow RNN''
\citet{pascanu2013construct}, affirming the importance of stacking multiple layers
in RNNs. The improved modelling can be attributed to two primary
factors: composition of multiple non-linear activation functions and an
increase in the number of paths for backpropagated error signals to flow. The
former reason is analogous to the case in deep belief networks, which is well
documented \citep{bengio2009learning}. To understand the latter, notice that in
\cref{fig:rnn-multi-unrolled} there is only a single path from $\x_{t-1}$ to
$\y_{t}$ hence the conditional independence $\y_{t} \independent \x_{t-1} |
\h^{(1)}_t$ is satisfied. However, in \cref{fig:rnn-multi-unrolled} there are
multiple paths from $\x_{t-1}$ to $\y_{t}$ (\eg passing through either
$\h^{(2)}_{t-1} \to \h^{(2)}_t$ or $\h^{(1)}_{t-1} \to \h^{(1)}_t$) through
which information may flow.
\subsubsection{Unrolling RNNs into directed acyclic graphs}
\nomenclature[z-DAG]{DAG}{Directed Acyclic Graph}
Given an input sequence $\{\x\}_{t=1}^T$, an RNN can be \emph{unrolled} into a
\emph{directed acyclic graph} (DAG) comprised of $T$ copies of the memory cell
connected forwards in time. This is illustrated for a stacked 2-layer RNN in
\cref{fig:rnn-multi-unrolled}, where the vectors $\y_t$, $\h_t$, and $\x_t$ are
depicted as blocks and the $\h_t$ is understood to represent a memory cell.
% \begin{figure}[tb]
% \centering
% \resizebox{4.5in}{!}{\input{Chapter2/Figs/rnn-single-unrolled.pdf_tex}}
% \caption{Signal flow diagram representation of a single-layer RNN (left) and its
% corresponding DAG (right) after unrolling. The blocks labelled
% with $\h_t$ represent memory cells whose parameters are shared across all times
% $t$.}
% \label{fig:rnn-single-unrolled}
% \end{figure}
\Cref{fig:rnn-multi-unrolled} shows that the hidden state $\h_t$ is passed
forwards throughout the sequence of computations. This gives rise to an
alternative interpretation of the hidden state as a temporal memory mechanism.
Under this interpretation, updating the hidden state $\h_t$ can be viewed as
\emph{writing} information from the current inputs $\x_t$ to memory and
producing the outputs $\y_t$ can be interpreted as \emph{reading} information
from memory.
% Additionally, parameters need not be shared
% across different layers so the stacked RNN can learn different time dynamics
% for each layer.
\subsection{Training RNNs and backpropagation through time}
\nomenclature[z-BPTT]{BPTT}{Backpropagation Through Time}
\nomenclature[o-E]{$\mathcal{E}$}{error or loss}
\nomenclature[o-Et]{$\mathcal{E}_t$}{error or loss at time $t$}
The parameters $\vec{\theta}$ of a RNN are typically learned from data by
minimizing some \emph{cost} $\mathcal{E} = \sum_{1 \leq t \leq T}
\mathcal{E}_t(\x_t)$ measuring the performance of the network on some task.
This optimization is usually performed using iterative methods which require
computation of gradients $\frac{\pd \mathcal{E}}{\pd \vec{\theta}}$ at each
iteration.
In feed-forward networks, computation of gradients can be performed efficiently
using backpropagation
\citep{bryson1963optimal,linnainmaa1970representation,rumelhart1988learning}.
While time-delayed recurrent hidden state connections appear to complicate
matters initially, unrolling the RNN removes the time-delayed recurrent edges
and converts the RNN into a DAG (\eg \vref{fig:rnn-multi-unrolled}) which can
be interpreted as a $T$ layered feed-forward neural network with parameters
shared across all $T$ layers.
This view of unrolled RNNs as feedforward networks motivates
\emph{backpropagation through time} (BPTT) \citep{goller1996learning}, a method
for training RNNs which applies backpropagation to the unrolled DAG.
\begin{figure}[tb]
\centering
\input{Chapter2/Figs/rnn-bptt.pdf_tex}
\caption{The gradients accumulated along network edges in BPTT.}
\label{fig:rnn-bptt}
\end{figure}
\Cref{fig:rnn-bptt} shows how BPTT, just like regular backpropagation, divides
the computation of a global gradient $\frac{\pd \mathcal{E}}{\pd \theta}$ into
a series of local gradient computations, each of which involves significantly
less variables and is hence cheaper to compute. However, whereas the depth of
feedforward networks is fixed, the unrolled RNN's depth is equal to the input
sequence length $T$ and may introduce problems when $T$ is very large.
\subsubsection{Vanishing/exploding gradients}
It is well known that naive implementations of memory cells often suffer from
two problems also affecting very deep feedforward networks: the \emph{vanishing
gradient} and \emph{exploding gradient} \citep{Bengio1994}.
To illustrate the problem, express the computation represented by
\cref{fig:rnn-bptt} mathematically by applying the chain rule to
the RNN dynamics equation (\vref{eq:rnn-dynamics}):
\begin{align}
\frac{\pd \mathcal{E}}{\pd \vec{\theta}} &= \sum_{1 \leq t \leq T} \frac{\pd \mathcal{E}_t}{\pd \vec{\theta}} \label{eq:err-total}\\
\frac{\pd \mathcal{E}_t}{\pd \vec{\theta}} &= \sum_{1 \leq k \leq t} \left(
\frac{\pd \mathcal{E}_t}{\pd \y_t}
\frac{\pd \y_t}{\pd \h_t}
\frac{\pd \h_t}{\pd \h_k}
\frac{\pd \h_k}{\pd \vec{\theta}}
\right) \label{eq:error-t}\\
\frac{\pd \h_t}{\pd \h_k} &=
\prod_{t \geq i > k} \frac{\pd \h_i}{\pd \h_{i-1}}
= \prod_{t \geq i > k} \W_{hh}^\tp \diag \left( \sigma_{hh}'( \h_{i-1} ) \right)
\label{eq:error-transfer}
\end{align}
\Cref{eq:error-t} expresses how the error $\mathcal{E}_t$ at time $t$ is a sum
of \emph{temporal contributions} $
\frac{\pd \mathcal{E}_t}{\pd \y_t}
\frac{\pd \y_t}{\pd \h_t}
\frac{\pd \h_t}{\pd \h_k}
\frac{\pd \h_k}{\pd \vec{\theta}}$
measuring how $\vec{\theta}$'s impact on $\h_k$ affects the cost
$\mathcal{E}_t$ at some future time $t > k$. The quantity
$\frac{\pd \h_t}{\pd \h_k}$ in \cref{eq:error-transfer} measures the affect of
the hidden state $\h_k$ on some future state $\h_t$ where $t > k$ and can be
interpreted as transferring the error ``in time'' from step $t$ back to step
$k$ \citep{Pascanu2012}.
Both vanishing and exploding gradients are due to the product in
\cref{eq:error-transfer} exponentially growing or shrinking over long
time-spans (\ie $t \gg k$), preventing error signals to be transferred across
long time-spans and learning of long-term dependencies. In
\vref{sec:vanishing-exploding-gradients} we prove that a sufficient condition
for vanishing gradients is:
\begin{equation}\label{eq:vanishing-gradients-suff}
\left\| \W_{hh} \right\| < \frac{1}{\gamma_\sigma}
\end{equation}
where $\| \cdot \|$ is the matrix operator norm (see \vref{eq:operator-norm}),
$\W_{hh}$ is as defined in \vref{eq:rnn-dynamics},
and $\gamma_\sigma$ is a constant depending on the choice of activation function
(\eg $\gamma_\sigma = 1$ for $\sigma_{hh} = \tanh$, $\gamma_\sigma = 0.25$ for
$\sigma_{hh} = \sigmoid$).
This difficulty learning relationships between events spaced far apart in time
presents a significant challenge for music applications. As noted by
\citet{cooper1963rhythmic}:
\begin{quote}
Long-term dependencies are at the heart of what defines a style of music, with
events spanning several notes or bars contributing to the formation of metrical and phrasal
structure.
\end{quote}
\subsection{Long short term memory: solving the vanishing gradient}\label{sec:LSTM}
\nomenclature[z-LSTM]{LSTM}{Long Short Term Memory}
\nomenclature[z-CEC]{CEC}{Constant Error Carousel}
\nomenclature[a-input-gate]{$\i_t$}{input gate values at time $t$}
\nomenclature[a-forget-gate]{$\f_t$}{forget gate values at time $t$}
\nomenclature[a-output-gate]{$\o_t$}{output gate values at time $t$}
\nomenclature[x-odot]{$\odot$}{elementwise multiplication}
In order to build a model which learns long range dependencies, vanishing
gradients must be avoided. A popular memory cell architecture which does so is
\emph{long short term memory} (LSTM). Proposed by \citet{hochreiter1997long},
LSTM solves the vanishing gradient problem by enforcing \emph{constant error
flow} on \cref{eq:error-transfer}, that is
\begin{equation}\label{eq:const-err-flow}
\forall t, \forall \h_t: \W_{hh}^\tp \sigma_{hh}' (\h_{t}) = \matr{I}
\end{equation}
where $\matr{I}$ is the identity matrix.
As a consequence of constant error flow, \vref{eq:error-transfer} becomes
\begin{equation}
\frac{\pd \h_t}{\pd \h_k}
= \prod_{t \geq i > k} \W_{hh}^\tp \diag \left( \sigma_{hh}'( \h_{i-1} ) \right)
= \prod_{t \geq i > k} \matr{I}
= \matr{I}
\end{equation}
The dependence on the time-interval $t-k$ is no longer present, ameliorating
the exponential decay causing vanishing gradients and enabling long-range
dependencies (\ie $t \gg k$) to be learned.
Integrating \cref{eq:const-err-flow} with respect to $\h_t$ yields $\W_{hh}
\sigma_{hh}(\h_{t}) = \h_{t}$. Since this must hold for any hidden state
$\h_{t}$, this means that:
\begin{enumerate}
\item $\W_{hh}$ must be full rank
\item $\sigma_{hh}$ must be linear
\item $\W_{hh} \sigma_{hh} = \matr{I}$
\end{enumerate}
In the \emph{constant error carousel} (CEC), this is ensured by setting
$\sigma_{hh} = \W_{hh} = \I$. This may be interpreted as removing time dynamics
on $\h$ in order to permit error signals to be transferred backwards in time
(\cref{eq:error-transfer}) without modification (\ie $\forall t \geq k: \frac{\pd
\h_t}{\pd \h_k} = \I$).
In addition to using a CEC, a LSTM introduces three gates controlling access to the CEC:
\begin{description}
\item[Input gate]: scales input $\x_t$ elementwise by $\i_t \in [0,1]$, \emph{writes} to $\h_t$
\item[Output gate]: scales output $\y_t$ elementwise by $\o_t \in [0,1]$, \emph{reads} from $\h_t$
\item[Forget gate]: scales previous cell value $\h_{t-1}$ by $\f_t \in [0,1]$, \emph{resets} $\h_t$
\end{description}
Mathematically, the LSTM model is defined by the following set of equations:
\begin{align}
\i_t &= \sigmoid(\W_{xi} \x_t + \W_{yi} \y_{t-1} + \b_i) \\
\o_t &= \sigmoid(\W_{xo} \x_t + \W_{yo} \y_{t-1} + \b_o) \\
\f_t &= \sigmoid(\W_{xf} \x_t + \W_{yf} \y_{t-1} + \b_f) \\
\h_t &= \f_t \odot \h_{t-1} + \i_t \odot \tanh(\W_{xh}\x_t + y_{t-1} \W_{yh} + \b_h) \\
\y_t &= \o_t \odot \tanh(\h_t)
\end{align}
where $\odot$ denotes elementwise multiplication of vectors.
Notice that the gates ($\i_t$, $\o_t$, and $\f_t$) controlling flow in and out
of the CEC are time dependent. This permits interpreting the gates as a
mechanism enabling LSTM to learn which error signals to trap in the CEC and
when to release them \citep{hochreiter1997long}, allowing error signals to
potentially be transported across long time lags.
\begin{figure}[tb]
\centering
\input{Chapter2/Figs/lstm-unit-2.pdf_tex}
\caption{Schematic for a single LSTM memory cell. Notice how the gates $\i_t$, $\o_t$, and $\f_t$ control access to the constant error carousel (CEC).}
\label{fig:lstm-cell}
\end{figure}
Some authors define LSTM such that $\h_t$ is not used to compute gate
activations, referring to \cref{fig:lstm-cell} as LSTM with ``peephole
connections'' \citep{gers2000recurrent}. We will use LSTM to refer to the
model as described above.
\subsubsection{Practicalities for successful applications of LSTM}
Many successful applications of LSTM
\citep{devlin2014fast,zaremba2015empirical,pascanu2013construct} employ some
common practical techniques. Perhaps most important is \emph{gradient norm
clipping} \citep{Mikolov2012,Pascanu2012} where the gradient is scaled or
clipped whenever it exceeds a threshold. This is necessary because while
vanishing gradients are mitigated by CECs, LSTM do not explicitly protect
against exploding gradients.
Another common practice is the use of methods for reducing overfitting and
improving generalization. In particular, \emph{dropout}
\citep{hinton2012improving} is commonly applied between stacked memory cell
layers to regularize the learned features and prevent co-adaptation
\citep{zaremba2014recurrent}. Additionally, \emph{batch normalization}
\citep{ioffe2015batch} of memory cell hidden states is also commonly done to
reduce co-variate shifts, accelerate training, and improve generalization.
Finally, applications of RNNs to long sequences can incur a prohibitively high
cost for a single parameter update \citep{citeulike:13881859}. For instance,
computing the gradient of an RNN on a sequence of length $1000$ costs the
equivalent of a forward and backward pass on a $1000$ layer feed-forward
network. This issue is typically addressed by only back-propagating error
signals a fixed number of timesteps back in the unrolled network, a technique
known as \emph{truncated BPTT} \citep{williams1990efficient}. As the hidden
states in the unrolled network have already been exposed to many previous
timesteps, learning of long range structure is still possible with truncated
BPTT.
|
{"hexsha": "5675d9ce803fd16ba7b44d63e3c1371611d42be0", "size": 19606, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "Chapter2/chapter2.tex", "max_stars_repo_name": "feynmanliang/bachbot-thesis", "max_stars_repo_head_hexsha": "08abadd3d5f4960d8c40f48c0e46622aa507bf4b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Chapter2/chapter2.tex", "max_issues_repo_name": "feynmanliang/bachbot-thesis", "max_issues_repo_head_hexsha": "08abadd3d5f4960d8c40f48c0e46622aa507bf4b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Chapter2/chapter2.tex", "max_forks_repo_name": "feynmanliang/bachbot-thesis", "max_forks_repo_head_hexsha": "08abadd3d5f4960d8c40f48c0e46622aa507bf4b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 50.2717948718, "max_line_length": 155, "alphanum_fraction": 0.7440579414, "num_tokens": 5728}
|
# From stockcharts.com:
#
# Interpretation
#
# The Aroon indicators fluctuate above/below a centerline (50) and
# are bound between 0 and 100. These three levels are important for
# interpretation. At its most basic, the bulls have the edge when
# Aroon-Up is above 50 and Aroon-Down is below 50. This indicates a
# greater propensity for new x-day highs than lows. The converse is
# true for a downtrend. The bears have the edge when Aroon-Up is
# below 50 and Aroon-Down is above 50.
#
# A surge to 100 indicates that a trend may be emerging. This can
# be confirmed with a decline in the other Aroon indicator. For
# example, a move to 100 in Aroon-Up combined with a decline below
# 30 in Aroon-Down shows upside strength. Consistently high readings
# mean prices are regularly hitting new highs or new lows for the
# specified period. Prices are moving consistently higher when Aroon-Up
# remains in the 70-100 range for an extended period. Conversely,
# consistently low readings indicate that prices are seldom hitting new
# highs or lows. Prices are NOT moving lower when Aroon-Down remains in
# the 0-30 range for an extended period. This does not mean prices are
# moving higher though. For that we need to check Aroon-Up.
#
# New Trend Emerging
#
# There are three stages to an emerging trend signal. First, the Aroon
# lines will cross. Second, the Aroon lines will cross above/below 50.
# Third, one of the Aroon lines will reach 100. For example, the first
# stage of an uptrend signal is when Aroon-Up moves above Aroon-Down. This
# shows new highs becoming more recent than new lows. Keep in mind that
# Aroon measures the time elapsed, not the price. The second stage is when
# Aroon-Up moves above 50 and Aroon-Down moves below 50. The third stage is
# when Aroon-Up reaches 100 and Aroon-Down remains at relatively low levels.
# The first and second stages do not always occur in that order. Sometimes
# Aroon-Up will break above 50 and then above Aroon-Down. Reverse engineering
# the uptrend stages will give you the emerging downtrend signal. Aroon-Down
# breaks above Aroon-Up, breaks above 50 and reaches 100.
from ..indicators import Indicator, SignalStrengthTypes, SignalTypes
from tradingtools.utils.equitydata import PastQuoteDataKeys
from numpy import max, min, subtract
from datetime import datetime
WINDOW_SIZE = 20
NUM_PERIODS = 5
class Aroon(Indicator):
def __init__(self, num_periods=NUM_PERIODS, window_size=WINDOW_SIZE):
super(Aroon, self).__init__()
self._num_periods = num_periods
self._window_size = window_size
_title = 'Aroon'
_description_url = 'http://www.investopedia.com/terms/a/aroon.asp'
def window(self, window_size=None):
if window_size is not None:
if window_size > 0:
self._window_size = window_size
else:
return self._window_size
def calculate(self, high_data, low_data):
"""
Calculates the Aroon for historic data
:param historic_data: List of floats representing past data (price, volume, etc)
:return: List of floats representing the averages
"""
if len(high_data) != len(low_data):
raise ValueException('Aroon error: high and low data have different lengths')
if len(high_data) < self._window_size:
print 'Aroon requires data for at least one window'
return []
aroon_up = []
aroon_down = []
for n in range(len(high_data) - self._window_size):
high_segment = high_data[n:n+self._window_size + 1]
low_segment = low_data[n:n+self._window_size + 1]
max_index = high_segment.index(max(high_segment))
min_index = low_segment.index(min(low_segment))
aroon_up.append(100.0 * max_index / self._window_size)
aroon_down.append(100.0 * min_index / self._window_size)
return aroon_up, aroon_down
def calculate_for_symbol(self, symbol, end_date=datetime.today()):
"""
Calculates Aroon for a given symbol
:param symbol: String Stock symbol for which to calculare SMA
:param end_date: Date most recent date over which to calculate
:param key: String key in historic data for which to calculate sma
:return: Tuple consisting of 2 lists (aroon_up, aroon_down)
"""
data = self._data_for_symbol(symbol, self._num_periods + self._window_size, end_date, key=None)
high_data = [x[PastQuoteDataKeys.ADJ_HIGH] for x in data]
low_data = [x[PastQuoteDataKeys.ADJ_LOW] for x in data]
return self.calculate(high_data, low_data)
def analyze(self, high_data, low_data):
aroon_up, aroon_down = self.calculate(high_data, low_data)
aroon_delta = subtract(aroon_up, aroon_down)
signal_type = SignalTypes.NEUTRAL
if aroon_down[-1] < 50 < aroon_up[-1]:
signal_type = SignalTypes.BULLISH
elif aroon_up[-1] < 50 < aroon_down[-1]:
signal_type = SignalTypes.BEARISH
if aroon_delta[-1] > 50:
signal_strength = SignalStrengthTypes.STRONG
elif aroon_delta[-1] > 20:
signal_strength = SignalStrengthTypes.MODEST
else:
signal_strength = SignalStrengthTypes.WEAK
return dict(signal_type=signal_type, signal_strength=signal_strength, aroon_up=aroon_up[-1], aroon_down=aroon_down[-1])
def analyze_for_symbol(self, symbol, end_date=datetime.today()):
data = self._data_for_symbol(symbol, self._num_periods + self._window_size, end_date, key=None)
high_data = [x[PastQuoteDataKeys.ADJ_HIGH] for x in data]
low_data = [x[PastQuoteDataKeys.ADJ_LOW] for x in data]
return self.analyze(high_data, low_data)
|
{"hexsha": "7fb8ee81562a2b4829c645a3144c27cc441e9c97", "size": 5783, "ext": "py", "lang": "Python", "max_stars_repo_path": "tradingtools/technicals/indicators/Aroon.py", "max_stars_repo_name": "innes213/TradingTools", "max_stars_repo_head_hexsha": "85f9d15d38824f61af26ff060457fceb965f11ce", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2017-05-28T22:43:59.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-14T03:40:16.000Z", "max_issues_repo_path": "tradingtools/technicals/indicators/Aroon.py", "max_issues_repo_name": "innes213/TradingTools", "max_issues_repo_head_hexsha": "85f9d15d38824f61af26ff060457fceb965f11ce", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tradingtools/technicals/indicators/Aroon.py", "max_forks_repo_name": "innes213/TradingTools", "max_forks_repo_head_hexsha": "85f9d15d38824f61af26ff060457fceb965f11ce", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.264, "max_line_length": 127, "alphanum_fraction": 0.7065536919, "include": true, "reason": "from numpy", "num_tokens": 1470}
|
@testset "matmul" begin
using NeuralAttentionlib.Matmul
function matmul_test(x, y, s)
cx = x isa CollapsedDimArray ? collapseddim(x) : x
cy = y isa CollapsedDimArray ? collapseddim(y) : y
return matmul(x, y, s) ≈ batched_mul(cx, cy) .* s
end
uwcs(x) = size(unwrap_collapse(x))
@testset "gemm_strided Float64" begin
a = randn(7, 6, 5, 4, 3, 2)
b = randn(42, 20, 6)
bt = randn(20, 42, 6)
c = randn(42, 60, 2)
ct = randn(60, 42, 2)
s = rand() + 1
ca1 = CollapsedDimArray(a, 3, 5)
ca2 = CollapsedDimArray(a, 3, 6)
@test matmul_test(ca1, bt, s)
@test matmul_test(ca1, batched_transpose(b), s)
@test matmul_test(bt, ca1, s)
@test matmul_test(batched_transpose(b), ca1, s)
@test matmul_test(batched_transpose(ca1), b, s)
@test matmul_test(batched_transpose(ca1), batched_transpose(bt), s)
@test matmul_test(b, batched_transpose(ca1), s)
@test matmul_test(batched_transpose(bt), batched_transpose(ca1), s)
@test matmul_test(ca1, batched_adjoint(b), s)
@test matmul_test(batched_adjoint(b), ca1, s)
@test matmul_test(batched_adjoint(ca1), b, s)
@test matmul_test(batched_adjoint(ca1), batched_adjoint(bt), s)
@test matmul_test(b, batched_adjoint(ca1), s)
@test matmul_test(batched_adjoint(bt), batched_adjoint(ca1), s)
@test uwcs(matmul(ca1, bt)) == (7, 6, 42, 6)
@test uwcs(matmul(bt, ca1)) == (20, 5, 4, 3, 2)
@test uwcs(matmul(batched_transpose(ca1), b)) == (5, 4, 20, 6)
@test uwcs(matmul(batched_transpose(b), ca1)) == (20, 5, 4, 3, 2)
@test matmul_test(ca2, ct, s)
@test matmul_test(ca2, batched_transpose(c), s)
@test matmul_test(ct, ca2, s)
@test matmul_test(batched_transpose(c), ca2, s)
@test matmul_test(batched_transpose(ca2), c, s)
@test matmul_test(batched_transpose(ca2), batched_transpose(ct), s)
@test matmul_test(c, batched_transpose(ca2), s)
@test matmul_test(batched_transpose(ct), batched_transpose(ca2), s)
@test uwcs(matmul(ca2, ct)) == (7, 6, 42, 2)
@test uwcs(matmul(ct, ca2)) == (60, 5, 4, 3, 2)
@test uwcs(matmul(batched_transpose(ca2), c)) == (5, 4, 3, 60, 2)
@test uwcs(matmul(batched_transpose(c), ca2)) == (60, 5, 4, 3, 2)
end
@testset "gemm_strided Float32" begin
a = randn(Float32, 7, 6, 5, 4, 3, 2)
b = randn(Float32, 42, 20, 6)
bt = randn(Float32, 20, 42, 6)
c = randn(Float32, 42, 60, 2)
ct = randn(Float32, 60, 42, 2)
s = rand(Float32) + 1
ca1 = CollapsedDimArray(a, 3, 5)
ca2 = CollapsedDimArray(a, 3, 6)
@test matmul_test(ca1, bt, s)
@test matmul_test(ca1, batched_transpose(b), s)
@test matmul_test(bt, ca1, s)
@test matmul_test(batched_transpose(b), ca1, s)
@test matmul_test(batched_transpose(ca1), b, s)
@test matmul_test(batched_transpose(ca1), batched_transpose(bt), s)
@test matmul_test(b, batched_transpose(ca1), s)
@test matmul_test(batched_transpose(bt), batched_transpose(ca1), s)
@test matmul_test(ca1, batched_adjoint(b), s)
@test matmul_test(batched_adjoint(b), ca1, s)
@test matmul_test(batched_adjoint(ca1), b, s)
@test matmul_test(batched_adjoint(ca1), batched_adjoint(bt), s)
@test matmul_test(b, batched_adjoint(ca1), s)
@test matmul_test(batched_adjoint(bt), batched_adjoint(ca1), s)
@test matmul_test(ca2, ct, s)
@test matmul_test(ca2, batched_transpose(c), s)
@test matmul_test(ct, ca2, s)
@test matmul_test(batched_transpose(c), ca2, s)
@test matmul_test(batched_transpose(ca2), c, s)
@test matmul_test(batched_transpose(ca2), batched_transpose(ct), s)
@test matmul_test(c, batched_transpose(ca2), s)
@test matmul_test(batched_transpose(ct), batched_transpose(ca2), s)
end
@testset "gemm_strided ComplexF64" begin
a = randn(ComplexF64, 7, 6, 5, 4, 3, 2)
b = randn(ComplexF64, 42, 20, 6)
bt = randn(ComplexF64, 20, 42, 6)
c = randn(ComplexF64, 42, 60, 2)
ct = randn(ComplexF64, 60, 42, 2)
s = rand(ComplexF64) + 1
ca1 = CollapsedDimArray(a, 3, 5)
ca2 = CollapsedDimArray(a, 3, 6)
@test matmul_test(ca1, bt, s)
@test matmul_test(ca1, batched_transpose(b), s)
@test matmul_test(bt, ca1, s)
@test matmul_test(batched_transpose(b), ca1, s)
@test matmul_test(batched_transpose(ca1), b, s)
@test matmul_test(batched_transpose(ca1), batched_transpose(bt), s)
@test matmul_test(b, batched_transpose(ca1), s)
@test matmul_test(batched_transpose(bt), batched_transpose(ca1), s)
@test matmul_test(ca1, batched_adjoint(b), s)
@test matmul_test(batched_adjoint(b), ca1, s)
@test matmul_test(batched_adjoint(ca1), b, s)
@test matmul_test(batched_adjoint(ca1), batched_adjoint(bt), s)
@test matmul_test(b, batched_adjoint(ca1), s)
@test matmul_test(batched_adjoint(bt), batched_adjoint(ca1), s)
@test matmul_test(ca2, ct, s)
@test matmul_test(ca2, batched_transpose(c), s)
@test matmul_test(ct, ca2, s)
@test matmul_test(batched_transpose(c), ca2, s)
@test matmul_test(batched_transpose(ca2), c, s)
@test matmul_test(batched_transpose(ca2), batched_transpose(ct), s)
@test matmul_test(c, batched_transpose(ca2), s)
@test matmul_test(batched_transpose(ct), batched_transpose(ca2), s)
end
@testset "gemm_strided ComplexF32" begin
a = randn(ComplexF32, 7, 6, 5, 4, 3, 2)
b = randn(ComplexF32, 42, 20, 6)
bt = randn(ComplexF32, 20, 42, 6)
c = randn(ComplexF32, 42, 60, 2)
ct = randn(ComplexF32, 60, 42, 2)
s = rand(ComplexF32) + 1
ca1 = CollapsedDimArray(a, 3, 5)
ca2 = CollapsedDimArray(a, 3, 6)
@test matmul_test(ca1, bt, s)
@test matmul_test(ca1, batched_transpose(b), s)
@test matmul_test(bt, ca1, s)
@test matmul_test(batched_transpose(b), ca1, s)
@test matmul_test(batched_transpose(ca1), b, s)
@test matmul_test(batched_transpose(ca1), batched_transpose(bt), s)
@test matmul_test(b, batched_transpose(ca1), s)
@test matmul_test(batched_transpose(bt), batched_transpose(ca1), s)
@test matmul_test(ca1, batched_adjoint(b), s)
@test matmul_test(batched_adjoint(b), ca1, s)
@test matmul_test(batched_adjoint(ca1), b, s)
@test matmul_test(batched_adjoint(ca1), batched_adjoint(bt), s)
@test matmul_test(b, batched_adjoint(ca1), s)
@test matmul_test(batched_adjoint(bt), batched_adjoint(ca1), s)
@test matmul_test(ca2, ct, s)
@test matmul_test(ca2, batched_transpose(c), s)
@test matmul_test(ct, ca2, s)
@test matmul_test(batched_transpose(c), ca2, s)
@test matmul_test(batched_transpose(ca2), c, s)
@test matmul_test(batched_transpose(ca2), batched_transpose(ct), s)
@test matmul_test(c, batched_transpose(ca2), s)
@test matmul_test(batched_transpose(ct), batched_transpose(ca2), s)
end
@testset "AD" begin
test_rrule(matmul, randn(7,6,5), randn(6, 2), randn())
test_rrule(matmul, randn(7,6,5,4), randn(6), randn())
test_rrule(matmul, CollapsedDimArray(randn(2,2,2,2,2,3), 4, 6), batched_transpose(randn(5,4,3)), randn(); check_inferred=false)
end
end
|
{"hexsha": "af83477d233ed3220d79eeec7a4ffa5d8527e06d", "size": 7640, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/matmul.jl", "max_stars_repo_name": "foldfelis/NeuralAttentionlib.jl", "max_stars_repo_head_hexsha": "52cb258807c9b8d308e14db0f99ec0d3492607c9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 18, "max_stars_repo_stars_event_min_datetime": "2021-08-08T09:21:50.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T14:58:29.000Z", "max_issues_repo_path": "test/matmul.jl", "max_issues_repo_name": "foldfelis/NeuralAttentionlib.jl", "max_issues_repo_head_hexsha": "52cb258807c9b8d308e14db0f99ec0d3492607c9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 10, "max_issues_repo_issues_event_min_datetime": "2021-08-13T05:54:19.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-19T05:51:34.000Z", "max_forks_repo_path": "test/matmul.jl", "max_forks_repo_name": "foldfelis/NeuralAttentionlib.jl", "max_forks_repo_head_hexsha": "52cb258807c9b8d308e14db0f99ec0d3492607c9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-08-13T07:22:46.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-17T02:55:07.000Z", "avg_line_length": 43.4090909091, "max_line_length": 135, "alphanum_fraction": 0.6232984293, "num_tokens": 2604}
|
#ifndef UTILS_H
#define UTILS_H
#include <boost/dynamic_bitset.hpp>
bool is_match(boost::dynamic_bitset<> r1, boost::dynamic_bitset<> r2);
boost::dynamic_bitset<> intersection(boost::dynamic_bitset<> r1, boost::dynamic_bitset<> r2);
bool is_zero(boost::dynamic_bitset<> r1);
#endif
|
{"hexsha": "4d767bbc496bb30d1eaba25ac5102cf650521b48", "size": 285, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "cmodule/utils.hpp", "max_stars_repo_name": "ymktw/SDNProbe", "max_stars_repo_head_hexsha": "46dee9737951012dc378f4d71675844402093569", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3.0, "max_stars_repo_stars_event_min_datetime": "2017-07-17T04:12:27.000Z", "max_stars_repo_stars_event_max_datetime": "2017-07-22T06:37:21.000Z", "max_issues_repo_path": "cmodule/utils.hpp", "max_issues_repo_name": "ymktw/SDNProbe", "max_issues_repo_head_hexsha": "46dee9737951012dc378f4d71675844402093569", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cmodule/utils.hpp", "max_forks_repo_name": "ymktw/SDNProbe", "max_forks_repo_head_hexsha": "46dee9737951012dc378f4d71675844402093569", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.9090909091, "max_line_length": 93, "alphanum_fraction": 0.7684210526, "num_tokens": 79}
|
import numpy as np
import pickle
lexicon = []
with open('ptb.txt','r') as f:
contents = f.readlines()
for l in contents[:len(contents)]:
all_words=l.split()
lexicon += list(all_words)
lexicon = list(set(lexicon));
print(lexicon)
vocb_size=len(lexicon)+1
print('vocb_size', vocb_size)
pad=vocb_size
voc = lexicon
vocab=dict([(x, y) for (y, x) in enumerate(voc)])
rev_vocab=dict([(x, y) for (x, y) in enumerate(voc)])
# Input data_file
with open('ptb.txt','r') as f:
contents = f.readlines()
x=[]
for sentence in contents:
a=sentence;
d=sorted(a);
token=[vocab.get(w) for w in d]
n_token=[]
for i in token:
if(i!= None):
n_token.append(i)
x.append(n_token)
max_len=0
ipf=[]
for i in x:
if max_len<=len(i):
max_len=len(i)
else: max_len=max_len
for i in x:
j=[]
j=np.lib.pad(i, (0,max_len-len(i)), 'constant', constant_values=(pad))
ipf.append(j)
data_x = np.array(ipf).T
# Train_X
testing_size=int(data_x.shape[1]-1)
train_x= data_x[:,:testing_size]
print('train_x.shape', train_x.shape)
#Getting Seq_len
seq_length=len(train_x)
print('seq_length', seq_length)
n_sents=train_x.shape[1]
print('n_sents', n_sents)
#Test_X
test_x=data_x[:,testing_size:]
print('test_x.shape', test_x.shape)
t=test_x.flatten()
tok=[rev_vocab.get(i) for i in t]
n_token=[]
for i in tok:
if(i!= None):
n_token.append(i)
c = ''.join(n_token);
print('Test_i/p',c)
# Output data_file
with open('ptb.txt','r') as f:
contents = f.readlines()
y=[]
for l in contents[:len(contents)]:
token=[vocab.get(w) for w in l]
n_token=[]
for i in token:
if(i!=None):
n_token.append(i)
y.append(n_token)
max_len=0
opf=[]
w=[]
for i in y:
if max_len<=len(i):
max_len=len(i)
else: max_len=max_len
for i in y:
j=[]
j=np.lib.pad(i, (0,max_len-len(i)), 'constant', constant_values=(pad))
p=np.ones_like(j,dtype=np.float32)
for i in xrange(len(j)):
if j[i]==pad:
index_v=i
p[index_v]=0
w.append(p)
opf.append(j)
data_y = np.array(opf).T
#train_y
testing_size=int(data_y.shape[1]-1)
train_y=data_y[:,:testing_size]
print('train_y.shape', train_y.shape)
#test_y
test_y=data_y[:,testing_size:]
print('test_y.shape', test_y.shape)
t=test_y.flatten()
tok=[rev_vocab.get(i) for i in t]
n_token=[]
for i in tok:
if(i!= None):
n_token.append(i)
c = ''.join(n_token);
print('Test_o/p',c)
#data_weights
testing_size=int(data_y.shape[1]-1)
data_weight=np.array(w).T
weight=data_weight[:,:testing_size]
print('weight.shape', weight.shape)
with open('data_set.pickle','wb') as f:
pickle.dump([train_x,train_y,test_x,test_y,weight,seq_length,n_sents,vocb_size,rev_vocab],f)
|
{"hexsha": "98a8a8a9b2991fa2d5e162300e834da5e1906655", "size": 2856, "ext": "py", "lang": "Python", "max_stars_repo_path": "dicn.py", "max_stars_repo_name": "pratz4u/anagram", "max_stars_repo_head_hexsha": "9289ef57028b494328784789fd3c986bb1d06a90", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "dicn.py", "max_issues_repo_name": "pratz4u/anagram", "max_issues_repo_head_hexsha": "9289ef57028b494328784789fd3c986bb1d06a90", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dicn.py", "max_forks_repo_name": "pratz4u/anagram", "max_forks_repo_head_hexsha": "9289ef57028b494328784789fd3c986bb1d06a90", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 15.1111111111, "max_line_length": 93, "alphanum_fraction": 0.6221988796, "include": true, "reason": "import numpy", "num_tokens": 856}
|
# -*- coding: utf-8 -*-
"""Bank.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1tMPdw2KcPL1QKwzoieGUrpqLRXAWg0Uf
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
df = pd.read_csv('bank.csv', delimiter = ';', quoting = 3)
df=df.dropna(how='any')
df
from sklearn.preprocessing import LabelEncoder
le1=LabelEncoder()
le2=LabelEncoder()
df['"age"']=le1.fit_transform(df['"age"'])
df['"job"']=le1.fit_transform(df['"job"'])
df['"marital"']=le1.fit_transform(df['"marital"'])
df['"education"']=le1.fit_transform(df['"education"'])
df['"default"']=le1.fit_transform(df['"default"'])
df['"housing"']=le1.fit_transform(df['"housing"'])
df['"loan"']=le1.fit_transform(df['"loan"'])
df['"contact"']=le1.fit_transform(df['"contact"'])
df['"month"']=le1.fit_transform(df['"month"'])
df['"day_of_week"']=le1.fit_transform(df['"day_of_week"'])
df['"duration"']=le1.fit_transform(df['"duration"'])
df['"campaign"']=le1.fit_transform(df['"campaign"'])
df['"pdays"']=le1.fit_transform(df['"pdays"'])
df['"previous"']=le1.fit_transform(df['"previous"'])
df['"poutcome"']=le1.fit_transform(df['"poutcome"'])
df['"emp.var.rate"']=le1.fit_transform(df['"emp.var.rate"'])
df['"cons.price.idx"']=le1.fit_transform(df['"cons.price.idx"'])
df['"cons.conf.idx"']=le1.fit_transform(df['"cons.conf.idx"'])
df['"euribor3m"']=le1.fit_transform(df['"euribor3m"'])
df['"nr.employed"']=le1.fit_transform(df['"nr.employed"'])
df['"y"']=le2.fit_transform(df['"y"'])
df
X=df.iloc[:,:-1].values
y=df.iloc[:,-1].values
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
import tensorflow as tf
ann = tf.keras.models.Sequential()
ann.add(tf.keras.layers.Dense(units=30, activation='relu'))
ann.add(tf.keras.layers.Dense(units=30, activation='relu'))
ann.add(tf.keras.layers.Dense(units=1,activation='sigmoid'))
ann.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
history=ann.fit(X_train, y_train, batch_size=15, epochs=150)
ann.save("Bank.h5")
plt.figure(0)
plt.plot(history.history['accuracy'], label='training accuracy')
plt.title('Accuracy')
plt.xlabel('epochs')
plt.ylabel('accuracy')
plt.legend()
plt.savefig('Accuracy.png')
plt.figure(1)
plt.plot(history.history['loss'], label='training loss')
plt.title('Loss')
plt.xlabel('epochs')
plt.ylabel('loss')
plt.legend()
plt.savefig('Loss.png')
print("Saved Model & Graph to disk")
y_pred = ann.predict(X_test)
y_pred=np.round(y_pred)
print(np.concatenate((y_pred.reshape(len(y_pred),1), y_test.reshape(len(y_test),1)),1))
from sklearn.metrics import confusion_matrix, accuracy_score
cm = confusion_matrix(y_test, y_pred)
print(cm)
accuracy_score(y_test, y_pred)
|
{"hexsha": "2486b0a22f5df70e59a2dd4402fe9d16a5508bc2", "size": 2870, "ext": "py", "lang": "Python", "max_stars_repo_path": "bank.py", "max_stars_repo_name": "The-SocialLion/Bank-Marketing-prediction-using-ANN", "max_stars_repo_head_hexsha": "a240d3eacc1ffd7c1640a3414db53d0dab7d3a8e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "bank.py", "max_issues_repo_name": "The-SocialLion/Bank-Marketing-prediction-using-ANN", "max_issues_repo_head_hexsha": "a240d3eacc1ffd7c1640a3414db53d0dab7d3a8e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "bank.py", "max_forks_repo_name": "The-SocialLion/Bank-Marketing-prediction-using-ANN", "max_forks_repo_head_hexsha": "a240d3eacc1ffd7c1640a3414db53d0dab7d3a8e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.7647058824, "max_line_length": 93, "alphanum_fraction": 0.7003484321, "include": true, "reason": "import numpy", "num_tokens": 821}
|
(* Title: HOL/Library/Uprod.thy
Author: Andreas Lochbihler, ETH Zurich *)
section \<open>Unordered pairs\<close>
theory Uprod imports Main begin
typedef ('a, 'b) commute = "{f :: 'a \<Rightarrow> 'a \<Rightarrow> 'b. \<forall>x y. f x y = f y x}"
morphisms apply_commute Abs_commute
by auto
setup_lifting type_definition_commute
lemma apply_commute_commute: "apply_commute f x y = apply_commute f y x"
by(transfer) simp
context includes lifting_syntax begin
lift_definition rel_commute :: "('a \<Rightarrow> 'b \<Rightarrow> bool) \<Rightarrow> ('c \<Rightarrow> 'd \<Rightarrow> bool) \<Rightarrow> ('a, 'c) commute \<Rightarrow> ('b, 'd) commute \<Rightarrow> bool"
is "\<lambda>A B. A ===> A ===> B" .
end
definition eq_upair :: "('a \<times> 'a) \<Rightarrow> ('a \<times> 'a) \<Rightarrow> bool"
where "eq_upair = (\<lambda>(a, b) (c, d). a = c \<and> b = d \<or> a = d \<and> b = c)"
lemma eq_upair_simps [simp]:
"eq_upair (a, b) (c, d) \<longleftrightarrow> a = c \<and> b = d \<or> a = d \<and> b = c"
by(simp add: eq_upair_def)
lemma equivp_eq_upair: "equivp eq_upair"
by(auto simp add: equivp_def fun_eq_iff)
quotient_type 'a uprod = "'a \<times> 'a" / eq_upair by(rule equivp_eq_upair)
lift_definition Upair :: "'a \<Rightarrow> 'a \<Rightarrow> 'a uprod" is Pair parametric Pair_transfer[of "A" "A" for A] .
lemma uprod_exhaust [case_names Upair, cases type: uprod]:
obtains a b where "x = Upair a b"
by transfer fastforce
lemma Upair_inject [simp]: "Upair a b = Upair c d \<longleftrightarrow> a = c \<and> b = d \<or> a = d \<and> b = c"
by transfer auto
code_datatype Upair
lift_definition case_uprod :: "('a, 'b) commute \<Rightarrow> 'a uprod \<Rightarrow> 'b" is case_prod
parametric case_prod_transfer[of A A for A] by auto
lemma case_uprod_simps [simp, code]: "case_uprod f (Upair x y) = apply_commute f x y"
by transfer auto
lemma uprod_split: "P (case_uprod f x) \<longleftrightarrow> (\<forall>a b. x = Upair a b \<longrightarrow> P (apply_commute f a b))"
by transfer auto
lemma uprod_split_asm: "P (case_uprod f x) \<longleftrightarrow> \<not> (\<exists>a b. x = Upair a b \<and> \<not> P (apply_commute f a b))"
by transfer auto
lift_definition not_equal :: "('a, bool) commute" is "(\<noteq>)" by auto
lemma apply_not_equal [simp]: "apply_commute not_equal x y \<longleftrightarrow> x \<noteq> y"
by transfer simp
definition proper_uprod :: "'a uprod \<Rightarrow> bool"
where "proper_uprod = case_uprod not_equal"
lemma proper_uprod_simps [simp, code]: "proper_uprod (Upair x y) \<longleftrightarrow> x \<noteq> y"
by(simp add: proper_uprod_def)
context includes lifting_syntax begin
private lemma set_uprod_parametric':
"(rel_prod A A ===> rel_set A) (\<lambda>(a, b). {a, b}) (\<lambda>(a, b). {a, b})"
by transfer_prover
lift_definition set_uprod :: "'a uprod \<Rightarrow> 'a set" is "\<lambda>(a, b). {a, b}"
parametric set_uprod_parametric' by auto
lemma set_uprod_simps [simp, code]: "set_uprod (Upair x y) = {x, y}"
by transfer simp
lemma finite_set_uprod [simp]: "finite (set_uprod x)"
by(cases x) simp
private lemma map_uprod_parametric':
"((A ===> B) ===> rel_prod A A ===> rel_prod B B) (\<lambda>f. map_prod f f) (\<lambda>f. map_prod f f)"
by transfer_prover
lift_definition map_uprod :: "('a \<Rightarrow> 'b) \<Rightarrow> 'a uprod \<Rightarrow> 'b uprod" is "\<lambda>f. map_prod f f"
parametric map_uprod_parametric' by auto
lemma map_uprod_simps [simp, code]: "map_uprod f (Upair x y) = Upair (f x) (f y)"
by transfer simp
private lemma rel_uprod_transfer':
"((A ===> B ===> (=)) ===> rel_prod A A ===> rel_prod B B ===> (=))
(\<lambda>R (a, b) (c, d). R a c \<and> R b d \<or> R a d \<and> R b c) (\<lambda>R (a, b) (c, d). R a c \<and> R b d \<or> R a d \<and> R b c)"
by transfer_prover
lift_definition rel_uprod :: "('a \<Rightarrow> 'b \<Rightarrow> bool) \<Rightarrow> 'a uprod \<Rightarrow> 'b uprod \<Rightarrow> bool"
is "\<lambda>R (a, b) (c, d). R a c \<and> R b d \<or> R a d \<and> R b c" parametric rel_uprod_transfer'
by auto
lemma rel_uprod_simps [simp, code]:
"rel_uprod R (Upair a b) (Upair c d) \<longleftrightarrow> R a c \<and> R b d \<or> R a d \<and> R b c"
by transfer auto
lemma Upair_parametric [transfer_rule]: "(A ===> A ===> rel_uprod A) Upair Upair"
unfolding rel_fun_def by transfer auto
lemma case_uprod_parametric [transfer_rule]:
"(rel_commute A B ===> rel_uprod A ===> B) case_uprod case_uprod"
unfolding rel_fun_def by transfer(force dest: rel_funD)
end
bnf uprod: "'a uprod"
map: map_uprod
sets: set_uprod
bd: natLeq
rel: rel_uprod
proof -
show "map_uprod id = id" unfolding fun_eq_iff by transfer auto
show "map_uprod (g \<circ> f) = map_uprod g \<circ> map_uprod f" for f :: "'a \<Rightarrow> 'b" and g :: "'b \<Rightarrow> 'c"
unfolding fun_eq_iff by transfer auto
show "map_uprod f x = map_uprod g x" if "\<And>z. z \<in> set_uprod x \<Longrightarrow> f z = g z"
for f :: "'a \<Rightarrow> 'b" and g x using that by transfer auto
show "set_uprod \<circ> map_uprod f = (`) f \<circ> set_uprod" for f :: "'a \<Rightarrow> 'b" by transfer auto
show "card_order natLeq" by(rule natLeq_card_order)
show "BNF_Cardinal_Arithmetic.cinfinite natLeq" by(rule natLeq_cinfinite)
show "regularCard natLeq" by(rule regularCard_natLeq)
show "ordLess2 (card_of (set_uprod x)) natLeq" for x :: "'a uprod"
by (auto simp flip: finite_iff_ordLess_natLeq)
show "rel_uprod R OO rel_uprod S \<le> rel_uprod (R OO S)"
for R :: "'a \<Rightarrow> 'b \<Rightarrow> bool" and S :: "'b \<Rightarrow> 'c \<Rightarrow> bool" by(rule predicate2I)(transfer; auto)
show "rel_uprod R = (\<lambda>x y. \<exists>z. set_uprod z \<subseteq> {(x, y). R x y} \<and> map_uprod fst z = x \<and> map_uprod snd z = y)"
for R :: "'a \<Rightarrow> 'b \<Rightarrow> bool" by transfer(auto simp add: fun_eq_iff)
qed
lemma pred_uprod_code [simp, code]: "pred_uprod P (Upair x y) \<longleftrightarrow> P x \<and> P y"
by(simp add: pred_uprod_def)
instantiation uprod :: (equal) equal begin
definition equal_uprod :: "'a uprod \<Rightarrow> 'a uprod \<Rightarrow> bool"
where "equal_uprod = (=)"
lemma equal_uprod_code [code]:
"HOL.equal (Upair x y) (Upair z u) \<longleftrightarrow> x = z \<and> y = u \<or> x = u \<and> y = z"
unfolding equal_uprod_def by simp
instance by standard(simp add: equal_uprod_def)
end
quickcheck_generator uprod constructors: Upair
lemma UNIV_uprod: "UNIV = (\<lambda>x. Upair x x) ` UNIV \<union> (\<lambda>(x, y). Upair x y) ` Sigma UNIV (\<lambda>x. UNIV - {x})"
apply(rule set_eqI)
subgoal for x by(cases x) auto
done
context begin
private lift_definition upair_inv :: "'a uprod \<Rightarrow> 'a"
is "\<lambda>(x, y). if x = y then x else undefined" by auto
lemma finite_UNIV_prod [simp]:
"finite (UNIV :: 'a uprod set) \<longleftrightarrow> finite (UNIV :: 'a set)" (is "?lhs = ?rhs")
proof
assume ?lhs
hence "finite (range (\<lambda>x :: 'a. Upair x x))" by(rule finite_subset[rotated]) simp
hence "finite (upair_inv ` range (\<lambda>x :: 'a. Upair x x))" by(rule finite_imageI)
also have "upair_inv (Upair x x) = x" for x :: 'a by transfer simp
then have "upair_inv ` range (\<lambda>x :: 'a. Upair x x) = UNIV" by(auto simp add: image_image)
finally show ?rhs .
qed(simp add: UNIV_uprod)
end
lemma card_UNIV_uprod:
"card (UNIV :: 'a uprod set) = card (UNIV :: 'a set) * (card (UNIV :: 'a set) + 1) div 2"
(is "?UPROD = ?A * _ div _")
proof(cases "finite (UNIV :: 'a set)")
case True
from True obtain f :: "nat \<Rightarrow> 'a" where bij: "bij_betw f {0..<?A} UNIV"
by (blast dest: ex_bij_betw_nat_finite)
hence [simp]: "f ` {0..<?A} = UNIV" by(rule bij_betw_imp_surj_on)
have "UNIV = (\<lambda>(x, y). Upair (f x) (f y)) ` (SIGMA x:{0..<?A}. {..x})"
apply(rule set_eqI)
subgoal for x
apply(cases x)
apply(clarsimp)
subgoal for a b
apply(cases "inv_into {0..<?A} f a \<le> inv_into {0..<?A} f b")
subgoal by(rule rev_image_eqI[where x="(inv_into {0..<?A} f _, inv_into {0..<?A} f _)"])
(auto simp add: inv_into_into[where A="{0..<?A}" and f=f, simplified] intro: f_inv_into_f[where f=f, symmetric])
subgoal
apply(simp only: not_le)
apply(drule less_imp_le)
apply(rule rev_image_eqI[where x="(inv_into {0..<?A} f _, inv_into {0..<?A} f _)"])
apply(auto simp add: inv_into_into[where A="{0..<?A}" and f=f, simplified] intro: f_inv_into_f[where f=f, symmetric])
done
done
done
done
hence "?UPROD = card \<dots>" by simp
also have "\<dots> = card (SIGMA x:{0..<?A}. {..x})"
apply(rule card_image)
using bij[THEN bij_betw_imp_inj_on]
by(simp add: inj_on_def Ball_def)(metis leD le_eq_less_or_eq le_less_trans)
also have "\<dots> = sum Suc {0..<?A}"
by (subst card_SigmaI) simp_all
also have "\<dots> = sum of_nat {Suc 0..?A}"
using sum.atLeastLessThan_reindex [symmetric, of Suc 0 ?A id]
by (simp del: sum.op_ivl_Suc add: atLeastLessThanSuc_atLeastAtMost)
also have "\<dots> = ?A * (?A + 1) div 2"
using gauss_sum_from_Suc_0 [of ?A, where ?'a = nat] by simp
finally show ?thesis .
qed simp
end
|
{"author": "seL4", "repo": "isabelle", "sha": "e1ab32a3bb41728cd19541063283e37919978a4c", "save_path": "github-repos/isabelle/seL4-isabelle", "path": "github-repos/isabelle/seL4-isabelle/isabelle-e1ab32a3bb41728cd19541063283e37919978a4c/src/HOL/Library/Uprod.thy"}
|
import gi
import time
import math
import cairo
import numpy
gi.require_version('Gtk', '3.0') # noqa
gi.require_version('Gio', '2.0') # noqa
gi.require_version('GLib', '2.0') # noqa
gi.require_version('Wnck', '3.0') # noqa
gi.require_version('GdkPixbuf', '2.0') # noqa
from PIL import Image
from threading import Thread
from applications import AppCache, WindowTracker, groupings, get_icon_pixbuf_for_appinfo, get_gicon_pixbuf
from gi.repository import Gtk, Gio, GdkPixbuf, GLib, Wnck
from dominantcolors import rgba2rgb, find_dominant_colors
default_screen = Wnck.Screen.get_default()
def lerp(start, end, amt):
return (1-amt)*start+amt*end
def pixbuf2image(pix):
data = pix.get_pixels()
w = pix.props.width
h = pix.props.height
stride = pix.props.rowstride
mode = "RGB"
if pix.props.has_alpha == True:
mode = "RGBA"
im = Image.frombytes(mode, (w, h), data, "raw", mode, stride)
return im
def image2pixbuf(im):
width, height = im.size
return GdkPixbuf.Pixbuf.new_from_bytes(GLib.Bytes.new(im.tobytes("raw", "RGBA")), GdkPixbuf.Colorspace.RGB,
True, 8, width, height, width * 4)
def should_show_window(window):
return window.get_window_type() == Wnck.WindowType.NORMAL or window.get_window_type() == Wnck.WindowType.SPLASHSCREEN
def create_icon(click_event, *, icon_image, name="Application"):
arr = numpy.asarray(icon_image)
if icon_image.mode == 'RGBA':
arr = rgba2rgb(arr)
DOT_SPACING = 20
DOT_SIZE = 5
MAXIMUM_DOTS = 5
dot_amount = 0
dot_color = find_dominant_colors(arr, 1)[0]
dot_opacity = 1
icon_pixbuf = image2pixbuf(icon_image)
icon_pixbuf = icon_pixbuf.scale_simple(
100, 100, GdkPixbuf.InterpType.BILINEAR)
def update_dots(amount, color=None, opacity=None):
nonlocal dot_amount, dot_color, dot_opacity
dot_amount = min(amount, MAXIMUM_DOTS)
dot_color = color if color else dot_color
dot_opacity = opacity if opacity else dot_opacity
dots.queue_draw()
def render_dots(widget, ctx):
cairo_color = [*map(lambda element: element / 255, dot_color)]
widget_size, _ = widget.get_allocated_size()
for i in range(dot_amount):
dot_x = widget_size.width / 2 - ((i - (dot_amount - 1) / 2) *
DOT_SPACING)
dot_y = widget_size.height / 2
pat = cairo.RadialGradient(
dot_x, dot_y, 0.0, dot_x, dot_y, DOT_SIZE)
pat.add_color_stop_rgb(0, *cairo_color)
pat.add_color_stop_rgb(0.3, *cairo_color)
pat.add_color_stop_rgb(
1, *map(lambda element: min(element + 0.03, 1), cairo_color))
ctx.arc(dot_x, dot_y, DOT_SIZE, 0, 2 * math.pi)
ctx.set_source(pat)
ctx.fill()
box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=10)
box.set_tooltip_text(name)
image = Gtk.Image.new_from_pixbuf(icon_pixbuf)
dots = Gtk.DrawingArea()
dots.connect("draw", render_dots)
dots.set_size_request(-1, DOT_SIZE + 10)
dots.queue_draw()
box.add(image)
box.add(dots)
box.show_all()
return (box, update_dots)
ANIMATE_DURATION = 2010
class Dock(Gtk.Bin):
def __init__(self, screen=default_screen, app_cache=None, window_tracker=None):
super().__init__()
self.current_width = 0
self.old_width = 0
self.animation_progress = 1
self.animation_start = 0
self._box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=10)
self._box.get_style_context().add_class("dock")
self.app_cache = app_cache or AppCache(load_applications=True)
self.window_tracker = window_tracker or WindowTracker(
app_cache=self.app_cache, screen=screen, flter=self.window_filter)
self.window_tracker.track_by(should_show_window)
self.window_tracker.connect("update", self.rerender)
self.add(self._box)
self.show_all()
def calc_width(self):
return len(self._box.get_children()) * 110 + 10
def rerender(self, _):
self.set_size_request(self.calc_width(), -1)
groups = self.window_tracker.get_groups(
groupby=groupings.by_wmclass_group)
for child in self._box.get_children():
self._box.remove(child)
for group in groups:
app_info = self.app_cache.get_appinfo_for_wmclass(
group[0].get_class_group_name())
icon, update_icon = create_icon(lambda: None, icon_image=pixbuf2image(get_icon_pixbuf_for_appinfo(
app_info) if app_info else get_gicon_pixbuf(Gio.ThemedIcon.new_from_names(["dialog-error-symbolic"]))), name=app_info.get_name() if app_info else group[0].get_name())
self._box.add(icon)
icon.show()
update_icon(len(group))
print("group added:", app_info.get_name()
if app_info else group[0].get_name())
# self.animate_width(self.calc_width())w
# anim_thread = Thread(target=lambda: self.animate_width(self.calc_width()))
# anim_thread.start()
def animation_step(self, *_):
current_time = time.time()
animation_progress = min(
(current_time - self.animation_start) * 1000 / ANIMATE_DURATION, 1)
self.set_size_request(
lerp(self.old_width, self.current_width, animation_progress), -1)
print(self.get_size_request().width)
self.queue_draw()
return animation_progress < 1
def animate_width(self, target):
if self.current_width != target:
self.old_width = self.current_width
self.current_width = target
self.animation_start = time.time()
GLib.timeout_add(100, self.animation_step)
def window_filter(self, window):
return window.get_window_type() == Wnck.WindowType.NORMAL
|
{"hexsha": "f931078340acd5a0b03113d9744862991ae55439", "size": 6014, "ext": "py", "lang": "Python", "max_stars_repo_path": "dock.py", "max_stars_repo_name": "Team-Diffusion/diffusion-dock", "max_stars_repo_head_hexsha": "9ae6c12ba82ab86392bb544a5a01bbf2c03cfc8b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "dock.py", "max_issues_repo_name": "Team-Diffusion/diffusion-dock", "max_issues_repo_head_hexsha": "9ae6c12ba82ab86392bb544a5a01bbf2c03cfc8b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-06-08T21:14:32.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T00:22:50.000Z", "max_forks_repo_path": "dock.py", "max_forks_repo_name": "Team-Diffusion/diffusion-dock", "max_forks_repo_head_hexsha": "9ae6c12ba82ab86392bb544a5a01bbf2c03cfc8b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-05-08T19:54:54.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-08T19:54:54.000Z", "avg_line_length": 30.841025641, "max_line_length": 182, "alphanum_fraction": 0.648486864, "include": true, "reason": "import numpy", "num_tokens": 1479}
|
import numpy as np
from pyraf import iraf
from pyraf.iraf import kepler
'''
Useful functions for Kepler light curve processing
Use this with the program 'makelc.py'
Originally by Jean McKeever
Edited and improved by Meredith Rawls
'''
# calculate orbital phase
# times must be a list of observation times in the same units as BJD0
# it returns 'phases': orbital phases from 0 to 1
# it also returns 'phasedoubles': twice as long as 'phases' and now from 0 to 2
def phasecalc(times, period=100, BJD0=2454833):
phases = []
cycles = []
for i in range(0, len(times)):
fracP = (times[i] - BJD0) / period
if fracP < 0:
phases.append(fracP % 1)
cycles.append(int(fracP))
else:
phases.append(fracP % 1)
cycles.append(int(fracP) + 1)
#print(fracP, phases[i])
return phases
# remove long-term trends
# uses a simple 3rd-order polynomial by default
# operates on one array at a time (e.g., after all quarters have been combined)
def long_detrend(t, flux, order=3):
model = np.polyfit(t, flux, order)
fit = np.zeros(len(t))
# apply the model coefficients to create the fit
for i in range(0, order+1):
fit += model[i]*np.power(t, (order-i))
#flux = flux/fit*1e6 - 1e6 # put it in ppm >:(
flux = flux/fit*np.median(flux) # don't put it in ppm, because ppm is annoying
return t, flux
# Delete any observation that has one or more NaN values.
# Assumes there are six parallel arrays... use dummy arrays if you don't have 6
# columns of interest to operate on (sorry).
# Operates on one quarter at a time
def nan_delete(time, flux, ferr, other1, other2, other3):
a = []
a = [time, flux, ferr, other1, other2, other3]
atrans = np.transpose(a)
newatrans = []
newa = []
for row in atrans:
# only save rows that DON'T contain a NaN value
if np.isnan(row).any() != True:
newatrans.append(row)
newa = np.transpose(newatrans)
newtime = newa[0]
newflux = newa[1]
newferr = newa[2]
newother1 = newa[3]
newother2 = newa[4]
newother3 = newa[5]
return newtime, newflux, newferr, newother1, newother2, newother3
# Put data from different quarters on the same AVERAGE level
# operates on a list of arrays (multiple quarters) all at once
# DON'T USE THIS ONE
# def normalize_qtr_avg(flux):
# sumflux = 0
# npts = 0
# for arr in flux:
# sumflux += np.nansum(arr)
# npts += len(arr[arr>0])
# avgflux = sumflux/npts # overall average for all quarters
# for arr in flux:
# avg_arr = np.mean(arr[arr>0]) # average for an individual quarter
# arr += avgflux - avg_arr
# return flux
# Put data from different quarters on the same MEDIAN level
# operates on a list of arrays (multiple quarters) all at once
def normalize_qtr_med(flux):
sumflux = 0
npts = 0
for arr in flux:
sumflux += np.nansum(arr)
npts += len(arr)
avgflux = sumflux/npts # overall average for all quarters
for arr in flux:
med_arr = np.median(arr) # median for an individual quarter
arr += avgflux - med_arr
return flux
# Line up the gaps within each quarter
# operates on a list of arrays (multiple quarters) all at once
def lineup_qtr_gaps(time, flux, maskstart, maskend):
diffs = np.zeros(len(time) - 1)
for i in range(0,len(time) - 1): # loop through quarters
# calculate differences between flux points at quarter start/end
start = 0
end = -1
for idx, mask in enumerate(maskstart):
while (time[i][end] > maskstart[idx] and time[i][end] < maskend[idx]):
#print('end', end, time[i][end], maskstart[idx], maskend[idx])
end -= 1
while (time[i+1][start] > maskstart[idx] and time[i+1][start] < maskend[idx]):
#print('start', start, time[i+1][start], maskstart[idx], maskend[idx])
start += 1
diffs[i] = (flux[i][end] - flux[i+1][start])
# maxi will find the point with the largest change in flux
maxi = lambda z: np.where(max(abs(z)) == abs(z))[0][0]
cntr = 0 # counter
max_val = max(abs(diffs))
while max_val > 100: #original value here was 100
# this is the index of the largest change in flux, so it needs adjusting
ind = maxi(diffs)
# this is the actual change in flux associated with that index
diff = diffs[ind]
# adjust the flux at this spot and its neighbor so they meet
flux[ind] = flux[ind] - diff/2.0
flux[ind+1] = flux[ind+1] + diff/2.0
diffs = np.zeros(len(time) - 1)
for i in range(0, len(time) - 1):
# calculate differences between flux points at quarter start/end, again
start = 0
end = -1
for idx, mask in enumerate(maskstart):
while time[i][end] > maskstart[idx] and time[i][end] < maskend[idx]:
#print('end', end, time[i][end], maskstart[idx], maskend[idx])
end -= 1
while time[i+1][start] > maskstart[idx] and time[i+1][start] < maskend[idx]:
#print('start', start, time[i+1][start], maskstart[idx], maskend[idx])
start += 1
diffs[i] = (flux[i][end] - flux[i+1][start])
cntr += 1 # count how many times this while-loop happens
max_val = max(abs(diffs))
# print(max_val, cntr)
return time, flux
# performs detrending with cotrending basis vectors (cbvs)
# lcin and lcout must both be FITS filenames
def kepcotrend(lcin, lcout, cbvfile, maskfile=''):
iraf.kepcotrend(infile=lcin, outfile=lcout, cbvfile=cbvfile,
vectors='1 2', method='simplex', fitpower=1, iterate='yes', sigmaclip=2.0,
maskfile=maskfile, scinterp='None', plot='no', clobber='yes', verbose='no')
return
|
{"hexsha": "2c9220dbfde66eef7cdc24410685f09d9949bf06", "size": 5357, "ext": "py", "lang": "Python", "max_stars_repo_path": "lc_functions.py", "max_stars_repo_name": "mrawls/kepler-makelc", "max_stars_repo_head_hexsha": "72a929b04d1c71bb5e854b96a9901544f681ed86", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-09-10T01:35:08.000Z", "max_stars_repo_stars_event_max_datetime": "2018-09-10T01:35:08.000Z", "max_issues_repo_path": "lc_functions.py", "max_issues_repo_name": "mrawls/kepler-makelc", "max_issues_repo_head_hexsha": "72a929b04d1c71bb5e854b96a9901544f681ed86", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lc_functions.py", "max_forks_repo_name": "mrawls/kepler-makelc", "max_forks_repo_head_hexsha": "72a929b04d1c71bb5e854b96a9901544f681ed86", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.6917808219, "max_line_length": 82, "alphanum_fraction": 0.6824715326, "include": true, "reason": "import numpy", "num_tokens": 1655}
|
from __future__ import print_function
import argparse
import os
import sys
import random
import math
import shutil
import numbers
import numpy as np
import torch
import torch.nn.parallel
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
import torch.utils.data
from torch.utils.tensorboard import SummaryWriter
from source.points_to_surf_model import PointsToSurfModel
from source import data_loader
from source import sdf_nn
from source.base import evaluation
debug = False
def parse_arguments(args=None):
parser = argparse.ArgumentParser()
parser.add_argument('--name', type=str, default='debug',
help='training run name')
parser.add_argument('--desc', type=str, default='My training run for single-scale normal estimation.',
help='description')
parser.add_argument('--indir', type=str, default='datasets/abc_minimal',
help='input folder (meshes)')
parser.add_argument('--outdir', type=str, default='models',
help='output folder (trained models)')
parser.add_argument('--logdir', type=str, default='logs',
help='training log folder')
parser.add_argument('--trainset', type=str, default='trainset.txt',
help='training set file name')
parser.add_argument('--testset', type=str, default='testset.txt',
help='test set file name')
parser.add_argument('--save_interval', type=int, default='10',
help='save model each n epochs')
parser.add_argument('--debug_interval', type=int, default='1',
help='print logging info each n epochs')
parser.add_argument('--refine', type=str, default='',
help='refine model at this path')
parser.add_argument('--gpu_idx', type=int, default=[0], nargs='+',
help='set < 0 to use CPU')
parser.add_argument('--patch_radius', type=float, default=0.05,
help='Neighborhood of points that is queried with the network. '
'This enables you to set the trade-off between computation time and tolerance for '
'sparsely sampled surfaces. Use r <= 0.0 for k-NN queries.')
# training parameters
parser.add_argument('--net_size', type=int, default=1024,
help='number of neurons in the largest fully connected layer')
parser.add_argument('--nepoch', type=int, default=2,
help='number of epochs to train for')
parser.add_argument('--batchSize', type=int, default=2,
help='input batch size')
parser.add_argument('--patch_center', type=str, default='point',
help='center patch at...\n'
'point: center point\n'
'mean: patch mean')
parser.add_argument('--patch_point_count_std', type=float, default=0,
help='standard deviation of the number of points in a patch')
parser.add_argument('--patches_per_shape', type=int, default=1000,
help='number of patches sampled from each shape in an epoch')
parser.add_argument('--sub_sample_size', type=int, default=500,
help='number of points of the point cloud that are trained with each patch')
parser.add_argument('--workers', type=int, default=8,
help='number of data loading workers - 0 means same thread as main execution')
parser.add_argument('--cache_capacity', type=int, default=100,
help='Max. number of dataset elements (usually shapes) to hold in the cache at the same time.')
parser.add_argument('--seed', type=int, default=3627473,
help='manual seed')
parser.add_argument('--single_transformer', type=int, default=0,
help='0: two transformers for the global and local information, \n'
'rotate local points with matrix trained from global points\n'
'1: single transformer for both local and global points')
parser.add_argument('--uniform_subsample', type=int, default=0,
help='1: global sub-sample uniformly sampled from the point cloud\n'
'0: distance-depending probability for global sub-sample')
parser.add_argument('--fixed_subsample', type=int, default=0,
help='1: use the same fixed sub-sample for all patches\n'
'0: use a different random sub-sample for each patch')
parser.add_argument('--shared_transformer', type=int, default=0,
help='use a single shared QSTN that takes both the local and global point sets as input')
parser.add_argument('--training_order', type=str, default='random',
help='order in which the training patches are presented:\n'
'random: fully random over the entire dataset (the set of all patches is permuted)\n'
'random_shape_consecutive: random over the entire dataset, but patches of a shape \n'
'remain consecutive (shapes and patches inside a shape are permuted)')
parser.add_argument('--identical_epochs', type=int, default=False,
help='use same patches in each epoch, mainly for debugging')
parser.add_argument('--lr', type=float, default=0.001,
help='learning rate')
parser.add_argument('--scheduler_steps', type=int, nargs='+', default=[75, 125],
help='the lr will be multiplicated with 0.1 at these epochs')
parser.add_argument('--momentum', type=float, default=0.9,
help='gradient descent momentum')
parser.add_argument('--normal_loss', type=str, default='ms_euclidean',
help='Normal loss type:\n'
'ms_euclidean: mean square euclidean distance\n'
'ms_oneminuscos: mean square 1-cos(angle error)')
# model hyperparameters
parser.add_argument('--outputs', type=str, nargs='+', default=['imp_surf', 'imp_surf_magnitude', 'imp_surf_sign',
'patch_pts_ids', 'p_index'],
help='outputs of the network, a list with elements of:\n'
'unoriented_normals: unoriented (flip-invariant) point normals\n'
'oriented_normals: oriented point normals\n'
'p_index: output debug info to validate the model\n'
'imp_surf: distance from query point to patch\n'
'imp_surf_magnitude: magnitude for distance from query point to patch\n'
'imp_surf_sign: sign for distance from query point to patch\n'
'patch_pts_ids: ids for all points in a patch')
parser.add_argument('--use_point_stn', type=int, default=True,
help='use point spatial transformer')
parser.add_argument('--use_feat_stn', type=int, default=True,
help='use feature spatial transformer')
parser.add_argument('--sym_op', type=str, default='max',
help='symmetry operation')
parser.add_argument('--points_per_patch', type=int, default=50,
help='max. number of points per patch')
parser.add_argument('--debug', type=int, default=0,
help='set to 1 of you want debug outputs to validate the model')
return parser.parse_args(args=args)
def do_logging(writer, log_prefix, epoch, opt, loss,
batchind, fraction_done, num_batch, train, output_names, metrics_dict: dict):
current_step = (epoch + fraction_done) * num_batch * opt.batchSize
loss_cpu = [l.detach().cpu().item() for l in loss]
loss_sum = sum(loss_cpu)
writer.add_scalar('loss/{}/total'.format('train' if train else 'eval'), loss_sum, current_step)
if len(loss_cpu) > 1:
for wi, w in enumerate(loss_cpu):
writer.add_scalar('loss/{}/comp_{}'.format('train' if train else 'eval', output_names[wi]),
w, current_step)
keys_to_log = {'abs_dist_rms', 'accuracy', 'precision', 'recall', 'f1_score'}
for key in metrics_dict.keys():
if key in keys_to_log and isinstance(metrics_dict[key], numbers.Number):
value = metrics_dict[key]
if math.isnan(value):
value = 0.0
writer.add_scalar('metrics/{}/{}'.format('train' if train else 'eval', key), value, current_step)
if batchind % opt.debug_interval == 0:
state_string = \
'[{name} {epoch}: {batch}/{n_batches}] {prefix} loss: {loss:+.2f}, rmse: {rmse:+.2f}, f1: {f1:+.2f}'.format(
name=opt.name, epoch=epoch, batch=batchind, n_batches=num_batch - 1,
prefix=log_prefix, loss=loss_sum,
rmse=metrics_dict['abs_dist_rms'], f1=metrics_dict['f1_score'])
print(state_string)
def points_to_surf_train(opt):
devices = [torch.device('cpu' if gi < 0 else f'cuda:{gi}') for gi in opt.gpu_idx]
print(f'Training on {len(devices)} devices:')
for device in devices:
print(f' {str(device)}')
# colored console output, works e.g. on Ubuntu (WSL)
green = lambda x: '\033[92m' + x + '\033[0m'
blue = lambda x: '\033[94m' + x + '\033[0m'
log_dirname = os.path.join(opt.logdir, opt.name)
params_filename = os.path.join(opt.outdir, '%s_params.pth' % opt.name)
model_filename = os.path.join(opt.outdir, '%s_model.pth' % opt.name)
desc_filename = os.path.join(opt.outdir, '%s_description.txt' % opt.name)
if os.path.exists(log_dirname) or os.path.exists(model_filename):
if opt.name != 'test':
response = input('A training run named "{}" already exists, overwrite? (y/n) '.format(opt.name))
if response == 'y':
del_log = True
else:
return
else:
del_log = True
if del_log:
if os.path.exists(log_dirname):
try:
shutil.rmtree(log_dirname)
except OSError:
print("Can't delete " + log_dirname)
# get indices in targets and predictions corresponding to each output
target_features = []
output_target_ind = []
output_pred_ind = []
output_names = []
output_loss_weights = dict()
pred_dim = 0
for o in opt.outputs:
if o == 'imp_surf':
if o not in target_features:
target_features.append(o)
output_names.append(o)
output_target_ind.append(target_features.index(o))
output_pred_ind.append(pred_dim)
output_loss_weights[o] = 1.0
pred_dim += 1
elif o == 'imp_surf_magnitude':
if o not in target_features:
target_features.append(o)
output_names.append(o)
output_target_ind.append(target_features.index(o))
output_pred_ind.append(pred_dim)
output_loss_weights[o] = 1.0 # try higher weight here
pred_dim += 1
elif o == 'imp_surf_sign':
if o not in target_features:
target_features.append(o)
output_names.append(o)
output_target_ind.append(target_features.index(o))
output_pred_ind.append(pred_dim)
output_loss_weights[o] = 1.0
pred_dim += 1
elif o == 'p_index':
if o not in target_features:
target_features.append(o)
output_target_ind.append(target_features.index(o))
elif o == 'patch_pts_ids':
if o not in target_features:
target_features.append(o)
output_target_ind.append(target_features.index(o))
else:
raise ValueError('Unknown output: %s' % o)
if pred_dim <= 0:
raise ValueError('Prediction is empty for the given outputs.')
# create model
use_query_point = any([f in opt.outputs for f in ['imp_surf', 'imp_surf_magnitude', 'imp_surf_sign']])
p2s_model = PointsToSurfModel(
net_size_max=opt.net_size,
num_points=opt.points_per_patch,
output_dim=pred_dim,
use_point_stn=opt.use_point_stn,
use_feat_stn=opt.use_feat_stn,
sym_op=opt.sym_op,
use_query_point=use_query_point,
sub_sample_size=opt.sub_sample_size,
do_augmentation=True,
single_transformer=opt.single_transformer,
shared_transformation=opt.shared_transformer,
)
start_epoch = 0
if opt.refine != '':
print(f'Refining weights from {opt.refine}')
p2s_model.cuda(device=devices[0]) # same order as in training
p2s_model = torch.nn.DataParallel(p2s_model, device_ids=devices)
p2s_model.load_state_dict(torch.load(opt.refine))
try:
# expecting a file name like 'vanilla_model_50.pth'
model_file = str(opt.refine)
last_underscore_pos = model_file.rfind('_')
last_dot_pos = model_file.rfind('.')
start_epoch = int(model_file[last_underscore_pos+1:last_dot_pos]) + 1
print(f'Continuing training from epoch {start_epoch}')
except:
print(f'Warning: {opt.refine} has no epoch in the name. The Tensorboard log will continue at '
f'epoch 0 and might be messed up!')
if opt.seed < 0:
opt.seed = random.randint(1, 10000)
print("Random Seed: %d" % opt.seed)
random.seed(opt.seed)
torch.manual_seed(opt.seed)
# create train and test dataset loaders
train_dataset = data_loader.PointcloudPatchDataset(
root=opt.indir,
shape_list_filename=opt.trainset,
points_per_patch=opt.points_per_patch,
patch_features=target_features,
point_count_std=opt.patch_point_count_std,
seed=opt.seed,
identical_epochs=opt.identical_epochs,
center=opt.patch_center,
cache_capacity=opt.cache_capacity,
pre_processed_patches=True,
sub_sample_size=opt.sub_sample_size,
num_workers=int(opt.workers),
patch_radius=opt.patch_radius,
epsilon=-1, # not necessary for training
uniform_subsample=opt.uniform_subsample,
fixed_subsample=opt.fixed_subsample,
)
if opt.training_order == 'random':
train_datasampler = data_loader.RandomPointcloudPatchSampler(
train_dataset,
patches_per_shape=opt.patches_per_shape,
seed=opt.seed,
identical_epochs=opt.identical_epochs)
elif opt.training_order == 'random_shape_consecutive':
train_datasampler = data_loader.SequentialShapeRandomPointcloudPatchSampler(
train_dataset,
patches_per_shape=opt.patches_per_shape,
seed=opt.seed,
identical_epochs=opt.identical_epochs)
else:
raise ValueError('Unknown training order: %s' % opt.training_order)
def seed_train_worker(worker_id):
worker_seed = torch.initial_seed() % 2**32 # initial_seed returns a different seed for each worker and each epoch (as a long, so it needs to be cast to an int)
np.random.seed(worker_seed)
random.seed(worker_seed)
train_dataset.rng.seed(worker_seed)
train_dataset.rng_global_sample.seed(worker_seed)
train_dataloader = torch.utils.data.DataLoader(
train_dataset,
sampler=train_datasampler,
batch_size=opt.batchSize,
num_workers=int(opt.workers),
worker_init_fn=seed_train_worker)
test_dataset = data_loader.PointcloudPatchDataset(
root=opt.indir,
shape_list_filename=opt.testset,
points_per_patch=opt.points_per_patch,
patch_features=target_features,
point_count_std=opt.patch_point_count_std,
seed=opt.seed,
identical_epochs=opt.identical_epochs,
center=opt.patch_center,
cache_capacity=opt.cache_capacity,
pre_processed_patches=True,
sub_sample_size=opt.sub_sample_size,
patch_radius=opt.patch_radius,
num_workers=int(opt.workers),
epsilon=-1, # not necessary for training
uniform_subsample=opt.uniform_subsample,
fixed_subsample=opt.fixed_subsample,
)
if opt.training_order == 'random':
test_datasampler = data_loader.RandomPointcloudPatchSampler(
test_dataset,
patches_per_shape=opt.patches_per_shape,
seed=opt.seed,
identical_epochs=opt.identical_epochs)
elif opt.training_order == 'random_shape_consecutive':
test_datasampler = data_loader.SequentialShapeRandomPointcloudPatchSampler(
test_dataset,
patches_per_shape=opt.patches_per_shape,
seed=opt.seed,
identical_epochs=opt.identical_epochs)
else:
raise ValueError('Unknown training order: %s' % opt.training_order)
def seed_test_worker(worker_id):
worker_seed = torch.initial_seed() % 2**32 # initial_seed returns a different seed for each worker and each epoch (as a long, so it needs to be cast to an int)
np.random.seed(worker_seed)
random.seed(worker_seed)
test_dataset.rng.seed(worker_seed)
test_dataset.rng_global_sample.seed(worker_seed)
test_dataloader = torch.utils.data.DataLoader(
test_dataset,
sampler=test_datasampler,
batch_size=opt.batchSize,
num_workers=int(opt.workers),
worker_init_fn=seed_test_worker)
# keep the exact training shape names for later reference
opt.train_shapes = train_dataset.shape_names
opt.test_shapes = test_dataset.shape_names
print('Training set: {} patches (in {} batches) | Test set: {} patches (in {} batches)'.format(
len(train_datasampler), len(train_dataloader), len(test_datasampler), len(test_dataloader)))
try:
os.makedirs(opt.outdir)
except OSError:
pass
train_fraction_done = 0.0
log_writer = SummaryWriter(log_dirname, comment=opt.name)
log_writer.add_scalar('LR', opt.lr, 0)
# milestones in number of optimizer iterations
optimizer = optim.SGD(p2s_model.parameters(), lr=opt.lr, momentum=opt.momentum)
# SGD changes lr depending on training progress
# scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[], gamma=0.1) # constant lr
scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=opt.scheduler_steps, gamma=0.1)
if opt.refine == '':
p2s_model.cuda(device=devices[0])
p2s_model = torch.nn.DataParallel(p2s_model, device_ids=devices)
train_num_batch = len(train_dataloader)
test_num_batch = len(test_dataloader)
# save parameters
torch.save(opt, params_filename)
# save description
with open(desc_filename, 'w+') as text_file:
print(opt.desc, file=text_file)
for epoch in range(start_epoch, opt.nepoch, 1):
train_enum = enumerate(train_dataloader, 0)
test_batchind = -1
test_fraction_done = 0.0
test_enum = enumerate(test_dataloader, 0)
for train_batchind, batch_data_train in train_enum:
# batch data to GPU
for key in batch_data_train.keys():
batch_data_train[key] = batch_data_train[key].cuda(device=devices[0], non_blocking=True)
# set to training mode
p2s_model.train()
# zero gradients
optimizer.zero_grad()
pred_train = p2s_model(batch_data_train)
loss_train = compute_loss(
pred=pred_train, batch_data=batch_data_train,
outputs=opt.outputs,
output_loss_weights=output_loss_weights,
fixed_radius=opt.patch_radius > 0.0
)
loss_total = sum(loss_train)
# back-propagate through entire network to compute gradients of loss w.r.t. parameters
loss_total.backward()
# parameter optimization step
optimizer.step()
train_fraction_done = (train_batchind+1) / train_num_batch
if debug:
from source import evaluation
evaluation.visualize_patch(
patch_pts_ps=batch_data_train['patch_pts_ps'][0].cpu(),
query_point_ps=batch_data_train['imp_surf_query_point_ps'][0].cpu(),
pts_sub_sample_ms=batch_data_train['pts_sub_sample_ms'][0].cpu(),
query_point_ms=batch_data_train['imp_surf_query_point_ms'][0].cpu(),
file_path='debug/patch_train.off')
metrics_dict = calc_metrics(outputs=opt.outputs, pred=pred_train, gt_data=batch_data_train)
do_logging(writer=log_writer, log_prefix=green('train'), epoch=epoch, opt=opt, loss=loss_train,
batchind=train_batchind, fraction_done=train_fraction_done, num_batch=train_num_batch,
train=True, output_names=output_names, metrics_dict=metrics_dict)
while test_fraction_done <= train_fraction_done and test_batchind + 1 < test_num_batch:
# set to evaluation mode, no auto-diff
p2s_model.eval()
test_batchind, batch_data_test = next(test_enum)
# batch data to GPU
for key in batch_data_test.keys():
batch_data_test[key] = batch_data_test[key].cuda(device=devices[0], non_blocking=True)
# forward pass
with torch.no_grad():
pred_test = p2s_model(batch_data_test)
loss_test = compute_loss(
pred=pred_test, batch_data=batch_data_test,
outputs=opt.outputs,
output_loss_weights=output_loss_weights,
fixed_radius=opt.patch_radius > 0.0
)
metrics_dict = calc_metrics(outputs=opt.outputs, pred=pred_test, gt_data=batch_data_test)
test_fraction_done = (test_batchind+1) / test_num_batch
do_logging(writer=log_writer, log_prefix=blue('test'),
epoch=epoch, opt=opt, loss=loss_test, batchind=test_batchind,
fraction_done=test_fraction_done, num_batch=train_num_batch,
train=False, output_names=output_names, metrics_dict=metrics_dict)
# end of epoch save model, overwriting the old model
if epoch % opt.save_interval == 0 or epoch == opt.nepoch-1:
torch.save(p2s_model.state_dict(), model_filename)
# save model in a separate file in epochs 0,5,10,50,100,500,1000, ...
if epoch % (5 * 10**math.floor(math.log10(max(2, epoch-1)))) == 0 or epoch % 100 == 0 or epoch == opt.nepoch-1:
torch.save(p2s_model.state_dict(), os.path.join(opt.outdir, '%s_model_%d.pth' % (opt.name, epoch)))
# update and log lr
lr_before_update = scheduler.get_last_lr()
if isinstance(lr_before_update, list):
lr_before_update = lr_before_update[0]
scheduler.step()
lr_after_update = scheduler.get_last_lr()
if isinstance(lr_after_update, list):
lr_after_update = lr_after_update[0]
if lr_before_update != lr_after_update:
print('LR changed from {} to {} in epoch {}'.format(lr_before_update, lr_after_update, epoch))
current_step = (epoch + 1) * train_num_batch * opt.batchSize - 1
log_writer.add_scalar('LR', lr_after_update, current_step)
log_writer.flush()
log_writer.close()
def compute_loss(pred, batch_data, outputs, output_loss_weights, fixed_radius):
loss = []
if 'imp_surf' in outputs:
o_pred = pred.squeeze()
o_target = batch_data['imp_surf_ms'].squeeze()
if not fixed_radius:
o_patch_radius = batch_data['patch_radius_ms']
o_target /= o_patch_radius
loss.append(sdf_nn.calc_loss_distance(pred=o_pred, target=o_target) *
output_loss_weights['imp_surf'])
if 'imp_surf_magnitude' in outputs and 'imp_surf_sign' in outputs:
o_pred = pred[:, 0].squeeze()
o_target = batch_data['imp_surf_magnitude_ms'].squeeze()
if not fixed_radius:
o_patch_radius = batch_data['patch_radius_ms']
o_target /= o_patch_radius
loss.append(sdf_nn.calc_loss_magnitude(pred=o_pred, target=o_target) *
output_loss_weights['imp_surf_magnitude'])
o_pred = pred[:, 1].squeeze()
o_target = batch_data['imp_surf_dist_sign_ms'].squeeze()
loss.append(sdf_nn.calc_loss_sign(pred=o_pred, target=o_target) *
output_loss_weights['imp_surf_sign'])
return loss
def calc_metrics(outputs, pred, gt_data):
def compute_rmse_abs_dist(pred, gt):
abs_dist = sdf_nn.post_process_magnitude(pred)
rmse = torch.sqrt(torch.mean((abs_dist.abs() - gt.squeeze().abs()) ** 2))
return rmse.detach().cpu().item()
def compare_classification(pred, gt):
inside_class = sdf_nn.post_process_sign(pred)
eval_dict = evaluation.compare_predictions_binary_tensors(
ground_truth=gt.squeeze(), predicted=inside_class, prediction_name='training_metrics')
return eval_dict
if 'imp_surf_magnitude' in outputs and 'imp_surf_sign' in outputs:
abs_dist_rms = compute_rmse_abs_dist(pred=pred[:, 0].squeeze(), gt=gt_data['imp_surf_magnitude_ms'])
eval_dict = compare_classification(pred=pred[:, 1].squeeze(),
gt=gt_data['imp_surf_dist_sign_ms'])
eval_dict['abs_dist_rms'] = abs_dist_rms
return eval_dict
elif 'imp_surf' in outputs:
abs_dist_rms = compute_rmse_abs_dist(pred=pred.squeeze(), gt=gt_data['imp_surf_ms'])
pred_class = pred.squeeze()
pred_class[pred_class < 0.0] = -1.0
pred_class[pred_class >= 0.0] = 1.0
eval_dict = compare_classification(pred=pred_class,
gt=gt_data['imp_surf_dist_sign_ms'])
eval_dict['abs_dist_rms'] = abs_dist_rms
return eval_dict
else:
return {}
if __name__ == '__main__':
train_opt = parse_arguments()
points_to_surf_train(train_opt)
|
{"hexsha": "67802072b5996788df4d44c78ca5d9f2c38da67c", "size": 26683, "ext": "py", "lang": "Python", "max_stars_repo_path": "source/points_to_surf_train.py", "max_stars_repo_name": "paulguerrero/points2surf", "max_stars_repo_head_hexsha": "fbe409215d221a3023bcc9762b71826ea523ef85", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "source/points_to_surf_train.py", "max_issues_repo_name": "paulguerrero/points2surf", "max_issues_repo_head_hexsha": "fbe409215d221a3023bcc9762b71826ea523ef85", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "source/points_to_surf_train.py", "max_forks_repo_name": "paulguerrero/points2surf", "max_forks_repo_head_hexsha": "fbe409215d221a3023bcc9762b71826ea523ef85", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.5459098497, "max_line_length": 167, "alphanum_fraction": 0.6267661058, "include": true, "reason": "import numpy", "num_tokens": 5728}
|
#!/Users/james/miniconda3/envs/selva_tf/bin/python
import os,sys
import numpy as np
import cooler
def getBins(coolfile):
binsInfo = {}
chroms = coolfile.chroms()["name"][:]
print("DEBUG: \n",chroms)
print("DEBUG coolfile.bins.keys():\n",coolfile.bins().keys())
print("DEBUG coolfile.bins()[chrom]:\n",coolfile.bins()["chrom"][:])
print("DEBUG ====================")
for chrom in chroms:
idxarray = np.where(coolfile.bins()["chrom"][:] == chrom)
chromstart = idxarray[0][0]
chromend = idxarray[0][-1]
binsInfo[chrom] = [chromstart,chromend]
#print chrom, (chromend - chromstart + 1)
return binsInfo
def dumpMatrix(resolution,coolfile,binsInfo,chrom1,chrom2,outdir,name):
chrom1start,chrom1end = binsInfo[chrom1]
chrom2start,chrom2end = binsInfo[chrom2]
matrixName = '_'.join([name, str(resolution)+'kb',chrom1,chrom2,"InterMap_matrix.txt"])
matrixfile = os.path.join(outdir,matrixName)
matrix = coolfile.matrix(balance=True)[chrom1start:(chrom1end + 1), chrom2start:(chrom2end + 1)]
np.savetxt(matrixfile,matrix,fmt='%.5f',delimiter='\t')
return matrixfile
def coolToMatrix(matrixFile,resolution,outdir,name):
MatrixInfo = {}
coolfile = cooler.Cooler(matrixFile)
binsInfo = getBins(coolfile)
rankedChroms = ["chr1", "chr2", "chr3", "chr4", "chr5", "chr6", "chr7", "chr8", "chr9", "chr10", "chr11", "chr12", "chr13", "chr14", "chr15", "chr16", "chr17", "chr18", "chr19", "chr20", "chr21", "chr22", "chrX", "chrY"]
outdir = os.path.join(outdir,"InterMap_matrix")
if not os.path.isdir(outdir):
os.mkdir(outdir)
for i in range(0,len(rankedChroms)-2):
for j in range((i+1), len(rankedChroms)-1):
chrom1 = rankedChroms[i]
chrom2 = rankedChroms[j]
#print chrom1,chrom2
matrixfile = dumpMatrix(resolution,coolfile,binsInfo,chrom1,chrom2,outdir,name)
MatrixInfo[chrom1+'_'+chrom2] = matrixfile
return MatrixInfo
matrixFile = "/Users/james/src/sandbox/cooler/MB286.mcool::/resolutions/1024000"
#matrixFile = "/Users/james/projects/jdurbin_notebooks/james/MB286/MB286.mcool::/resolutions/1024000"
coolfile = cooler.Cooler(matrixFile)
binsInfo = getBins(coolfile)
print(binsInfo)
#[chrom1start:(chrom1end + 1), chrom2start:(chrom2end + 1)]
## matrix1MbInfo = coolToMatrix(matrixfile1Mb,1000,opts.outdir,opts.name)
## matrix100kbInfo = coolToMatrix(matrixfile100kb,100,opts.outdir,opts.name)
|
{"hexsha": "b60028948845bd33c7211b9822de41a6df886b23", "size": 2501, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/cooler/test1.py", "max_stars_repo_name": "jdurbin/sandbox", "max_stars_repo_head_hexsha": "ee982f7386ae02c5937dbaee867710b5cd2cc71b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "python/cooler/test1.py", "max_issues_repo_name": "jdurbin/sandbox", "max_issues_repo_head_hexsha": "ee982f7386ae02c5937dbaee867710b5cd2cc71b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/cooler/test1.py", "max_forks_repo_name": "jdurbin/sandbox", "max_forks_repo_head_hexsha": "ee982f7386ae02c5937dbaee867710b5cd2cc71b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.3387096774, "max_line_length": 224, "alphanum_fraction": 0.6697321072, "include": true, "reason": "import numpy", "num_tokens": 772}
|
from kaggle_environments.envs.hungry_geese.hungry_geese import Observation, Configuration, Action, row_col
from kaggle_environments import evaluate, make, utils
import numpy as np
actions = np.array(["EAST", "SOUTH", "NORTH", "WEST"])
opp_actions = {'EAST': 'WEST', 'WEST': 'EAST', 'NORTH':'SOUTH', 'SOUTH':'NORTH'}
# Creates a class for an agent so we can keep track of the last action
class RandomAgent:
def __init__(self, configuration: Configuration):
self.configuration = configuration
self.last_action = None
def __call__(self, observation: Observation):
action = np.random.choice(actions)
while action == opp_actions.get(self.last_action, ""):
action = np.random.choice(actions)
self.last_action = action
return action
cached_agents = {}
def agent(obs, config):
index = obs["index"]
if index not in cached_agents :
cached_agents[index] = RandomAgent(Configuration(config))
return cached_agents[index](Observation(obs))
|
{"hexsha": "1456d735a91fe33d7dbfb26cbcecb4eeeccb0096", "size": 1019, "ext": "py", "lang": "Python", "max_stars_repo_path": "notebooks/1.0-bgc-Tutorial.py", "max_stars_repo_name": "BrunoGomesCoelho/Eat-Move-Learn", "max_stars_repo_head_hexsha": "d3bbe6062100205590d52c083339edaddd791da4", "max_stars_repo_licenses": ["RSA-MD"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "notebooks/1.0-bgc-Tutorial.py", "max_issues_repo_name": "BrunoGomesCoelho/Eat-Move-Learn", "max_issues_repo_head_hexsha": "d3bbe6062100205590d52c083339edaddd791da4", "max_issues_repo_licenses": ["RSA-MD"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "notebooks/1.0-bgc-Tutorial.py", "max_forks_repo_name": "BrunoGomesCoelho/Eat-Move-Learn", "max_forks_repo_head_hexsha": "d3bbe6062100205590d52c083339edaddd791da4", "max_forks_repo_licenses": ["RSA-MD"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.7407407407, "max_line_length": 106, "alphanum_fraction": 0.6987242395, "include": true, "reason": "import numpy", "num_tokens": 240}
|
import os
import scipy.misc
import numpy as np
from model import DCGAN
from utils import pp, visualize, to_json, show_all_variables, generate_random_images, encode, generate_image_from_seed, generate_walk_in_latent_space, generate_continuous_random_interps, generate_continuous_interps_from_json, generate_single_value_changes, generate_sin_cycle, generate_sin_cycle_all_100
import tensorflow as tf
flags = tf.app.flags
flags.DEFINE_integer("epoch", 25, "Epoch to train [25]")
flags.DEFINE_float("learning_rate", 0.0002, "Learning rate of for adam [0.0002]")
flags.DEFINE_float("beta1", 0.5, "Momentum term of adam [0.5]")
flags.DEFINE_float("train_size", np.inf, "The size of train images [np.inf]")
flags.DEFINE_integer("batch_size", 64, "The size of batch images [64]")
flags.DEFINE_integer("input_height", 108, "The size of image to use (will be center cropped). [108]")
flags.DEFINE_integer("input_width", None, "The size of image to use (will be center cropped). If None, same value as input_height [None]")
flags.DEFINE_integer("output_height", 64, "The size of the output images to produce [64]")
flags.DEFINE_integer("output_width", None, "The size of the output images to produce. If None, same value as output_height [None]")
flags.DEFINE_string("dataset", "celebA", "The name of dataset [celebA, mnist, lsun]")
flags.DEFINE_string("input_fname_pattern", "*.jpg", "Glob pattern of filename of input images [*]")
flags.DEFINE_string("checkpoint_dir", "checkpoint", "Directory name to save the checkpoints [checkpoint]")
flags.DEFINE_string("data_dir", "./data", "Root directory of dataset [data]")
flags.DEFINE_string("sample_dir", "samples", "Directory name to save the image samples [samples]")
flags.DEFINE_boolean("train", False, "True for training, False for testing [False]")
flags.DEFINE_boolean("crop", False, "True for training, False for testing [False]")
flags.DEFINE_boolean("visualize", False, "True for visualizing, False for nothing [False]")
flags.DEFINE_integer("generate_test_images", 100, "Number of images to generate during test. [100]")
# MEEE custom flags
flags.DEFINE_string("input_seed_path", None, "Path to the json file to be inputted to generator.")
flags.DEFINE_integer("walk_rand_seed", None, "Seed for PRNG to be inputted (to recreate previous film)")
flags.DEFINE_integer("walk_num", 2700, "Number of frames of walk in latent space.")
flags.DEFINE_float("max_jump_step", 0.03, "Maximum value for one step in jump in latent space (mode 16)")
flags.DEFINE_float("min_jump_step", None, "Minimum value for one step in jump in latent space (mode 16)")
flags.DEFINE_integer("generation_mode", 1, "Generation mode used in testing. Please refer to README.txt")
flags.DEFINE_string("checkpoint_name", None, "Name of the checkpoint file to load from e.g. DCGAN.model-183502")
flags.DEFINE_string("interp_json", None, "Path to json file which contains the info needed to generate mode 10.")
flags.DEFINE_string("sin_cycle_json", None, "Path to json file which contains the info needed to generate mode 14.")
FLAGS = flags.FLAGS
def main(_):
pp.pprint(flags.FLAGS.__flags)
if FLAGS.input_width is None:
FLAGS.input_width = FLAGS.input_height
if FLAGS.output_width is None:
FLAGS.output_width = FLAGS.output_height
if not os.path.exists(FLAGS.checkpoint_dir):
os.makedirs(FLAGS.checkpoint_dir)
if not os.path.exists(FLAGS.sample_dir):
os.makedirs(FLAGS.sample_dir)
#gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
run_config = tf.ConfigProto()
run_config.gpu_options.allow_growth=True
with tf.Session(config=run_config) as sess:
if FLAGS.dataset == 'mnist':
dcgan = DCGAN(
sess,
input_width=FLAGS.input_width,
input_height=FLAGS.input_height,
output_width=FLAGS.output_width,
output_height=FLAGS.output_height,
batch_size=FLAGS.batch_size,
sample_num=FLAGS.batch_size,
y_dim=10,
z_dim=FLAGS.generate_test_images,
dataset_name=FLAGS.dataset,
input_fname_pattern=FLAGS.input_fname_pattern,
crop=FLAGS.crop,
checkpoint_dir=FLAGS.checkpoint_dir,
sample_dir=FLAGS.sample_dir,
data_dir=FLAGS.data_dir)
else:
dcgan = DCGAN(
sess,
input_width=FLAGS.input_width,
input_height=FLAGS.input_height,
output_width=FLAGS.output_width,
output_height=FLAGS.output_height,
batch_size=FLAGS.batch_size,
sample_num=FLAGS.batch_size,
z_dim=FLAGS.generate_test_images,
dataset_name=FLAGS.dataset,
input_fname_pattern=FLAGS.input_fname_pattern,
crop=FLAGS.crop,
checkpoint_dir=FLAGS.checkpoint_dir,
sample_dir=FLAGS.sample_dir,
data_dir=FLAGS.data_dir,
# MEEE ATOM options
checkpoint_name=FLAGS.checkpoint_name)
show_all_variables()
if FLAGS.train:
dcgan.train(FLAGS)
else:
if not dcgan.load(FLAGS.checkpoint_dir)[0]:
raise Exception("[!] Train a model first, then run test mode")
mode = FLAGS.generation_mode
if mode == 1: # Generate 300 random images and their seed value json files
generate_random_images(sess, dcgan, FLAGS, 2000)
elif mode == 2: # Generate 1.5 min random num of frames per interpolation. With cut: A - B | C - D
generate_continuous_random_interps(sess, dcgan, FLAGS, 2700, True, True)
elif mode == 3: # Generate 1.5 min 32 frames per interpolation. With cut: A - B | C - D
generate_continuous_random_interps(sess, dcgan, FLAGS, 2700, True, False)
elif mode == 4: # Generate 1.5 min random num of frames per interpolation. With cut: A - B - C
generate_continuous_random_interps(sess, dcgan, FLAGS, 2700, False, True)
elif mode == 5: # Generate 1.5 min 32 frames per interpolation. With cut: A - B - C
generate_continuous_random_interps(sess, dcgan, FLAGS, 2700, False, False)
# NOTE: for walk in latent space, it is required to pass in --input_seed_path <filename>.json
elif mode == 6: # Walk in latent space, velocity/acceleration with clamp mode
generate_walk_in_latent_space(sess, dcgan, FLAGS, 6)
elif mode == 7: # Walk in latent space, velocity/acceleration with wrap mode
generate_walk_in_latent_space(sess, dcgan, FLAGS, 7)
elif mode == 8: # Walk in latent space, default mode (not velocity/acceleration)
generate_walk_in_latent_space(sess, dcgan, FLAGS, 8)
elif mode == 9: # Walk in latent space, velocity/acceleration with reverse mode
generate_walk_in_latent_space(sess, dcgan, FLAGS, 9)
elif mode == 10: # Generate continuous interpretation from a json file
generate_continuous_interps_from_json(sess, dcgan, FLAGS)
elif mode == 11: # Walk in latent space, velocity/acceleration wrap mode, only update 50 out of 100 values
generate_walk_in_latent_space(sess, dcgan, FLAGS, 11)
elif mode == 12: # 10th to 100000th digit change for 1st number of seed
generate_single_value_changes(sess, dcgan, FLAGS, 2)
elif mode == 13: # Sinusoidal cycling of first value, 2 cycles, 10 seconds per cycle
generate_sin_cycle(sess, dcgan, FLAGS, 2, 10, 13)
elif mode == 14: # Sinusoidal cycling of values specified by json (--sin_cycle_json)
generate_sin_cycle(sess, dcgan, FLAGS, 1, 1, 14)
elif mode == 15: # Sinusoidal cycling through all 100 numbers, 6s percycle
generate_sin_cycle_all_100(sess, dcgan, FLAGS)
elif mode == 16: # Jump in latent space, velocity/acceleration with wrap mode
generate_walk_in_latent_space(sess, dcgan, FLAGS, 16)
# Generate
# generate_image_from_seed(sess, dcgan, FLAGS)
# encode(sess, dcgan, FLAGS)
# to_json("./web/js/layers.js", [dcgan.h0_w, dcgan.h0_b, dcgan.g_bn0],
# [dcgan.h1_w, dcgan.h1_b, dcgan.g_bn1],
# [dcgan.h2_w, dcgan.h2_b, dcgan.g_bn2],
# [dcgan.h3_w, dcgan.h3_b, dcgan.g_bn3],
# [dcgan.h4_w, dcgan.h4_b, None])
# Below is codes for visualization
# OPTION = 0
OPTION = 1
# visualize(sess, dcgan, FLAGS, OPTION)
if __name__ == '__main__':
tf.app.run()
|
{"hexsha": "ae9eb56d1b20d88567166b6280c5e8cae03e9834", "size": 8301, "ext": "py", "lang": "Python", "max_stars_repo_path": "main.py", "max_stars_repo_name": "hyeminchocho/DCGAN-tensorflow", "max_stars_repo_head_hexsha": "3dce578865b8fad11532a28990f0814f2bcc9e98", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "main.py", "max_issues_repo_name": "hyeminchocho/DCGAN-tensorflow", "max_issues_repo_head_hexsha": "3dce578865b8fad11532a28990f0814f2bcc9e98", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "main.py", "max_forks_repo_name": "hyeminchocho/DCGAN-tensorflow", "max_forks_repo_head_hexsha": "3dce578865b8fad11532a28990f0814f2bcc9e98", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 53.2115384615, "max_line_length": 303, "alphanum_fraction": 0.708709794, "include": true, "reason": "import numpy,import scipy", "num_tokens": 2075}
|
import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
import math
import sys
from generator_network import *
from discriminator_network import *
from common import *
def gen_image_processing(gan_out):
# Scale image values from [-1, 1] to [0, 1] (TanH -> TF float32 image ranges)
img_f32 = (gan_out + 1.0) / 2.0
img_8u = tf.image.convert_image_dtype(img_f32, tf.uint8, saturate=True)
return img_8u
def waifunet_parameters(parser):
group = parser.add_argument_group('General WaifuNet Parameters')
group.add_argument('--wasserstein', action='store_true', help='If set, use est. Wasserstein distance for loss instead of standard GAN losses')
group.add_argument('--z-size', type=int, default=256, help='Dimensionality of Z (noise) vectors')
group.add_argument('--label-size', type=int, default=1000, help='Dimensionality of Y (tag) vectors')
group.add_argument('--output-height', type=int, default=1024, help='Height of output images')
group.add_argument('--output-width', type=int, default=1024, help='Width of output images')
# Alternately: 5e-5 for Wasserstein GANs
group.add_argument('--learning-rate', type=float, default=2e-4, help='Learning rate for generator and discriminator networks')
group.add_argument('--beta1', type=float, default=0.5, help='Beta1 parameter for Adam optimizers (for both generator and discriminator)')
group.add_argument('--n-gpus', type=int, default=1, help='Number of GPUs to use for training.')
class waifunet(object):
# Builds the model.
# Options:
# 'z_size' [default 256]: Length / dimensionality of Z vector
# 'label_size' [default 1000]: Length / dimensionality of Y vector
# 'output_width', 'output_height' [default 1024 for both]: Dimension of generator output
# 'learning_rate' [recommended value 2e-4]: Learning rate for Adam / RMSProp optimization
# 'beta1' [recommended value 0.5]: Beta1 parameter for Adam optimization
#
# Input tensor parameters:
# 'noise_in', 'labels_in': Generator network inputs (noise vectors and tags)
# 'sample_images_in, sample_labels_in': Discriminator sample inputs
# 'dsc_labels_in': Correct / incorrect labels for discriminator inputs (Fully-correct vs. mismatched tags and images)
def __init__(self, args, noise_in, labels_in, sample_batch, mismatched_batch): #labels_in, sample_images_in, sample_labels_in, dsc_labels_in):
self.args = args
self.noise = noise_in
self.labels = labels_in
self.samples = sample_batch
self.mismatched = mismatched_batch
self.optimizer = tf.train.RMSPropOptimizer(learning_rate=args.learning_rate)
def gen_training_step(self, sess, summaries=False, trace=False):
if trace:
run_meta = tf.RunMetadata()
run_opts = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
else:
run_meta = None
run_opts = None
if summaries:
_, gen_summary = sess.run([self.gen_train, self.gen_summaries], options=run_opts, run_metadata=run_meta)
else:
gen_summary = None
sess.run(self.gen_train, options=run_opts, run_metadata=run_meta)
return gen_summary, run_meta
def dsc_training_step(self, sess, summaries=False, trace=False):
if trace:
run_meta = tf.RunMetadata()
run_opts = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
else:
run_meta = None
run_opts = None
if summaries:
_, dsc_summary = sess.run([self.dsc_train, self.dsc_summaries], options=run_opts, run_metadata=run_meta)
else:
dsc_summary = None
sess.run(self.dsc_train, options=run_opts, run_metadata=run_meta)
return dsc_summary, run_meta
# Network outputs:
# self.dsc_train: Performs a single discriminator network training step when evaluated.
# self.gen_train: Performs a single generator network training steps when evaluated.
# self.dsc_summaries: Returns a merged summary tensor for a discriminator training step.
# self.gen_summaries: Returns a merged summary tensor for a generator training step; includes images.
# self.gen_images: Returns the generated images from the generator network (for each tower)
def build(self):
if self.args.n_gpus <= 1:
# Single-tower
self.single_tower_gradients()
else:
self.multi_tower_gradients()
self.training_ops()
self.summary_ops()
def multi_tower_gradients(self):
gen_grads = []
dsc_grads = []
gen_losses = []
dsc_losses = []
self.gen_images = []
with tf.variable_scope('WaifuNet'):
for i in range(self.args.n_gpus):
with tf.device("/gpu:{:d}".format(i)):
grads, losses, nets = self.per_tower_ops()
tf.get_variable_scope().reuse_variables()
gen_grads.append(grads['gen'])
dsc_grads.append(grads['dsc'])
gen_losses.append(losses['gen'])
dsc_losses.append(losses['dsc'])
gen_out = gen_image_processing(nets['gen'].out)
self.gen_images.append(gen_out)
# Now average gradients across all towers:
self.gen_grads = self.average_gradients(gen_grads)
self.dsc_grads = self.average_gradients(dsc_grads)
# And average losses across all towers
self.gen_loss = tf.reduce_mean(gen_losses)
self.dsc_loss = tf.reduce_mean(dsc_loss)
def single_tower_gradients(self):
with tf.variable_scope('WaifuNet'):
grads, losses, nets = self.per_tower_ops()
self.gen_grads = grads['gen']
self.dsc_grads = grads['dsc']
self.gen_loss = losses['gen']
self.dsc_loss = losses['dsc']
gen_out = gen_image_processing(nets['gen'].out)
self.gen_images = [gen_out]
def training_ops(self):
global_step = tf.contrib.framework.get_or_create_global_step()
gen_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope='WaifuNet/Generator')
dsc_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope='WaifuNet/Discriminator')
with tf.control_dependencies(gen_update_ops):
self.gen_train = self.optimizer.apply_gradients(self.gen_grads, global_step=global_step)
with tf.control_dependencies(dsc_update_ops):
self.dsc_train = self.optimizer.apply_gradients(self.dsc_grads, global_step=global_step)
def summary_ops(self):
tf.summary.scalar('Mean Generator Loss', self.gen_loss, collections=['gen-summaries'])
tf.summary.histogram('Generator Gradients', self.gen_grads, collections=['gen-summaries'])
tf.summary.image('Generator Output', self.gen_images[0], collections=['gen-summaries'])
tf.summary.scalar('Mean Discriminator Loss', self.dsc_loss, collections=['dsc-summaries'])
tf.summary.histogram('Discriminator Gradients', self.dsc_grads, collections=['dsc-summaries'])
self.gen_summaries = tf.summary.merge_all(key='gen-summaries')
self.dsc_summaries = tf.summary.merge_all(key='dsc-summaries')
def average_gradients(self, tower_grads):
average_grads = []
for gv in zip(*tower_grads):
grads = []
var = gv[0][1]
for grad, _ in gv:
grads.append(tf.expand_dims(grad, 0))
avg_grad = tf.concat(grads, axis=0)
avg_grad = tf.reduce_mean(grads, axis=0)
average_grads.append( (avg_grad, var) )
return average_grads
def per_tower_ops(self):
gen = Generator(args, self.noise, self.labels)
fake_image = gen.build()
dsc_fake_net = Discriminator(args, fake_image)
dsc_real_net = Discriminator(args, self.samples[0])
dsc_fake, pred_labels_fake = dsc_fake_net.build()
dsc_real, pred_labels_real = dsc_real_net.build(reuse=True)
if not self.args.wasserstein:
gen_loss, dsc_loss = self.standard_gan_loss(dsc_fake, dsc_real, dsc_mismatch)
else:
gen_loss, dsc_loss = self.wasserstein_gan_loss(dsc_fake, dsc_real)
# Labeling losses for generator and discriminator
xentropy_fake = tf.nn.sigmoid_cross_entropy_with_logits(labels=self.labels, logits=pred_labels_fake)
xentropy_real = tf.nn.sigmoid_cross_entropy_with_logits(labels=self.samples[1], logits=pred_labels_real)
gen_loss += xentropy_fake
dsc_loss += xentropy_real
# For WGANs, add an input-gradient norm term to the loss
if self.args.wasserstein:
grad_norms_fake = dsc_fake_net.input_gradient_norms()
grad_norms_real = dsc_real_net.input_gradient_norms()
fake_norm_loss = tf.square(grad_norms_fake - 1)
real_norm_loss = tf.square(grad_norms_real - 1)
dsc_loss += tf.reduce_mean([fake_norm_loss, real_norm_loss])
gen_grads = self.optimizer.compute_gradients(gen_loss, var_list=gen.vars)
dsc_grads = self.optimizer.compute_gradients(dsc_loss, var_list=dsc_fake_net.vars)
nets = {'gen': gen, 'dsc_fake': dsc_fake_net, 'dsc_real': dsc_real_net}
grads = {'dsc': dsc_loss, 'gen': gen_loss}
losses = {'dsc': dsc_loss, 'gen': gen_loss}
return grads, losses, nets
def wasserstein_gan_loss(self, dsc_fake_out, dsc_real_out):
debug_print("[WaifuNet] Creating Wasserstein GAN loss ops...")
critic_real_mean = tf.reduce_mean(dsc_real_out)
critic_fake_mean = tf.reduce_mean(dsc_fake_out)
dsc_loss = critic_real_mean - critic_fake_mean
gen_loss = critic_fake_mean
return gen_loss, dsc_loss
def standard_gan_loss(self, dsc_fake_out, dsc_real_out):
debug_print("[WaifuNet] Creating standard GAN loss ops...")
# Discriminator outputs probability that sample came from training dataset
batch_size = self.dsc_fake_out.shape.as_list()[0]
dsc_fake_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=dsc_fake_out,
labels=tf.random_uniform([self.args.batch_size], minval=0.0, maxval=0.3),#tf.zeros_like(self.dsc_fake_out),
name='discriminator-fake-loss'
))
dsc_sample_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=dsc_real_out,
labels=self.samples[2],
name='discriminator-sample-loss'
))
gen_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=dsc_fake_out,
labels=tf.random_uniform([self.args.batch_size], minval=0.7, maxval=1.2),#tf.ones_like(self.dsc_fake_out),
name='generator-loss'
))
dsc_loss = dsc_fake_loss + dsc_sample_loss
return gen_loss, dsc_loss
|
{"hexsha": "a7589aa7817a3bc3103b4685dcdfaa48d6a88c75", "size": 11034, "ext": "py", "lang": "Python", "max_stars_repo_path": "generative-waifu-network/waifunet.py", "max_stars_repo_name": "stmobo/Machine-Learning", "max_stars_repo_head_hexsha": "83f69c7afb0a4bc1dc94482b8d23805e8ab2acde", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2017-09-26T04:39:04.000Z", "max_stars_repo_stars_event_max_datetime": "2017-10-12T08:57:51.000Z", "max_issues_repo_path": "generative-waifu-network/waifunet.py", "max_issues_repo_name": "stmobo/Machine-Learning", "max_issues_repo_head_hexsha": "83f69c7afb0a4bc1dc94482b8d23805e8ab2acde", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "generative-waifu-network/waifunet.py", "max_forks_repo_name": "stmobo/Machine-Learning", "max_forks_repo_head_hexsha": "83f69c7afb0a4bc1dc94482b8d23805e8ab2acde", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.3258426966, "max_line_length": 146, "alphanum_fraction": 0.6692042777, "include": true, "reason": "import numpy", "num_tokens": 2573}
|
import numpy as np
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Dense, Conv2D, Dropout, Flatten, MaxPooling2D
import matplotlib.pyplot as plt
from keras.datasets import mnist
from keras.utils import np_utils
from keras.utils import to_categorical
from keras import backend as K
class DigitRecoganiseController:
def __init__(self, pool_size, rate, number_neurons_output, epochs):
self.pool_size = pool_size # (2, 2)
self.rate = rate # 0.2
self.number_neurons_output = number_neurons_output #10
self.epochs = epochs
def initialize_cnn(self):
model = Sequential()
## Conv2D
## filters: Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution).
model.add(Conv2D(32, (5,5), input_shape = (28, 28, 1), activation = 'relu'))
model.add(MaxPooling2D(pool_size = self.pool_size))
model.add(Conv2D(16, (3,3), activation = 'relu'))
model.add(MaxPooling2D(pool_size = self.pool_size))
model.add(Dropout(self.rate))
model.add(Flatten()) # Flattening the 2D arrays for fully connected layers
model.add(Dense(128, activation = 'relu'))
model.add(Dense(64, activation = 'relu'))
model.add(Dense(self.number_neurons_output, activation = 'softmax'))
return model
def fit_model(self, model, X_train, y_train, X_test, y_test):
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics = ['accuracy'])
model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs = self.epochs, batch_size=200)
return model
def evaluate_model(self, model, x_test, y_test):
return model.evaluate(x_test, y_test)
def predict(self, model, x_test, y_test, image_index, img_rows, img_cols):
plt.imshow(x_test[image_index].reshape(28, 28),cmap='Greys')
pred = model.predict(x_test[image_index].reshape(1, img_rows, img_cols, 1))
print(pred.argmax())
if __name__ == "__main__":
seed = 7
np.random.seed(seed)
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = np.expand_dims(X_train, axis=-1)
X_test = np.expand_dims(X_test, axis=-1)
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
# Normalize inputs from 0-255 to 0-1
X_train = X_train / 255
X_test = X_test / 255
number_neurons_output = y_test.shape[1]
pool_size = (2, 2)
rate = 0.2
epochs = 10
recognizer = DigitRecoganiseController(pool_size, rate, number_neurons_output, epochs)
model = recognizer.initialize_cnn()
model = recognizer.fit_model(model, X_train, y_train, X_test, y_test)
print(recognizer.evaluate_model(model, X_test, y_test))
# serialize model to JSON
model_json = model.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("model.h5")
print("Saved model to disk")
|
{"hexsha": "95a4fde93a7258600ff050e3f6aa9ea0303fbfe9", "size": 3055, "ext": "py", "lang": "Python", "max_stars_repo_path": "controller/digit_recoganise_controller.py", "max_stars_repo_name": "smarth06/Smart_Sudoku", "max_stars_repo_head_hexsha": "0ce87e749fda6c44e4a459f3d5ec030a42c09cb7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-06-14T18:29:09.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-14T18:29:09.000Z", "max_issues_repo_path": "controller/digit_recoganise_controller.py", "max_issues_repo_name": "smarth06/Smart_Sudoku", "max_issues_repo_head_hexsha": "0ce87e749fda6c44e4a459f3d5ec030a42c09cb7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2020-11-13T18:49:30.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T01:42:33.000Z", "max_forks_repo_path": "controller/digit_recoganise_controller.py", "max_forks_repo_name": "smarth06/Smart_Sudoku", "max_forks_repo_head_hexsha": "0ce87e749fda6c44e4a459f3d5ec030a42c09cb7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-04-06T04:14:57.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-15T15:08:43.000Z", "avg_line_length": 38.6708860759, "max_line_length": 123, "alphanum_fraction": 0.6821603928, "include": true, "reason": "import numpy", "num_tokens": 778}
|
[STATEMENT]
lemma generate_valid_stateful_policy_IFSACS_2_noIFS_noACSsideeffects_imp_fullgraph:
assumes validReqs: "valid_reqs M"
and wfG: "wf_graph G"
and high_level_policy_valid: "all_security_requirements_fulfilled M G"
and edgesList: "(set edgesList) = edges G"
and no_ACS_sideeffects: "\<forall>F \<in> get_offending_flows (get_ACS M) \<lparr>nodes = nodes G, edges = edges G \<union> backflows (edges G)\<rparr>. F \<subseteq> (backflows (edges G)) - (edges G)"
and no_IFS: "get_IFS M = []"
shows "stateful_policy_to_network_graph (generate_valid_stateful_policy_IFSACS_2 G M edgesList) = undirected G"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. stateful_policy_to_network_graph (generate_valid_stateful_policy_IFSACS_2 G M edgesList) = undirected G
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. stateful_policy_to_network_graph (generate_valid_stateful_policy_IFSACS_2 G M edgesList) = undirected G
[PROOF STEP]
from filter_IFS_no_violations_accu_no_IFS[OF valid_reqs_IFS_D[OF validReqs] wfG no_IFS] edgesList
[PROOF STATE]
proof (chain)
picking this:
set ?accu \<union> set ?edgesList \<subseteq> edges G \<Longrightarrow> filter_IFS_no_violations_accu G M ?accu ?edgesList = rev ?edgesList @ ?accu
set edgesList = edges G
[PROOF STEP]
have "filter_IFS_no_violations G M edgesList = rev edgesList"
[PROOF STATE]
proof (prove)
using this:
set ?accu \<union> set ?edgesList \<subseteq> edges G \<Longrightarrow> filter_IFS_no_violations_accu G M ?accu ?edgesList = rev ?edgesList @ ?accu
set edgesList = edges G
goal (1 subgoal):
1. filter_IFS_no_violations G M edgesList = rev edgesList
[PROOF STEP]
by(simp add: filter_IFS_no_violations_def)
[PROOF STATE]
proof (state)
this:
filter_IFS_no_violations G M edgesList = rev edgesList
goal (1 subgoal):
1. stateful_policy_to_network_graph (generate_valid_stateful_policy_IFSACS_2 G M edgesList) = undirected G
[PROOF STEP]
from this filter_compliant_stateful_ACS_subseteq_input
[PROOF STATE]
proof (chain)
picking this:
filter_IFS_no_violations G M edgesList = rev edgesList
set (filter_compliant_stateful_ACS ?G ?M ?Es) \<subseteq> set ?Es
[PROOF STEP]
have flows_state_IFS: "flows_state (generate_valid_stateful_policy_IFSACS_2 G M edgesList) = set (filter_compliant_stateful_ACS G M edgesList)"
[PROOF STATE]
proof (prove)
using this:
filter_IFS_no_violations G M edgesList = rev edgesList
set (filter_compliant_stateful_ACS ?G ?M ?Es) \<subseteq> set ?Es
goal (1 subgoal):
1. flows_state (generate_valid_stateful_policy_IFSACS_2 G M edgesList) = set (filter_compliant_stateful_ACS G M edgesList)
[PROOF STEP]
by(auto simp add: generate_valid_stateful_policy_IFSACS_2_def)
[PROOF STATE]
proof (state)
this:
flows_state (generate_valid_stateful_policy_IFSACS_2 G M edgesList) = set (filter_compliant_stateful_ACS G M edgesList)
goal (1 subgoal):
1. stateful_policy_to_network_graph (generate_valid_stateful_policy_IFSACS_2 G M edgesList) = undirected G
[PROOF STEP]
have flowsfix: "flows_fix (generate_valid_stateful_policy_IFSACS_2 G M edgesList) = edges G"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. flows_fix (generate_valid_stateful_policy_IFSACS_2 G M edgesList) = edges G
[PROOF STEP]
by(simp add: generate_valid_stateful_policy_IFSACS_2_def)
[PROOF STATE]
proof (state)
this:
flows_fix (generate_valid_stateful_policy_IFSACS_2 G M edgesList) = edges G
goal (1 subgoal):
1. stateful_policy_to_network_graph (generate_valid_stateful_policy_IFSACS_2 G M edgesList) = undirected G
[PROOF STEP]
have hosts: "hosts (generate_valid_stateful_policy_IFSACS_2 G M edgesList) = nodes G"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. hosts (generate_valid_stateful_policy_IFSACS_2 G M edgesList) = nodes G
[PROOF STEP]
by(simp add: generate_valid_stateful_policy_IFSACS_2_def)
[PROOF STATE]
proof (state)
this:
hosts (generate_valid_stateful_policy_IFSACS_2 G M edgesList) = nodes G
goal (1 subgoal):
1. stateful_policy_to_network_graph (generate_valid_stateful_policy_IFSACS_2 G M edgesList) = undirected G
[PROOF STEP]
from filter_compliant_stateful_ACS_accu_no_side_effects[OF valid_reqs_ACS_D[OF validReqs] wfG no_ACS_sideeffects]
[PROOF STATE]
proof (chain)
picking this:
\<lbrakk>set ?accu \<union> set ?edgesList \<subseteq> edges G; \<forall>a\<in>set ?accu. a \<notin> backflows (edges G)\<rbrakk> \<Longrightarrow> filter_compliant_stateful_ACS_accu G M ?accu ?edgesList = rev (filter (\<lambda>e. e \<notin> backflows (edges G)) ?edgesList) @ ?accu
[PROOF STEP]
have
"filter_compliant_stateful_ACS G M edgesList = rev [e\<leftarrow>edgesList . e \<notin> backflows (edges G)]"
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>set ?accu \<union> set ?edgesList \<subseteq> edges G; \<forall>a\<in>set ?accu. a \<notin> backflows (edges G)\<rbrakk> \<Longrightarrow> filter_compliant_stateful_ACS_accu G M ?accu ?edgesList = rev (filter (\<lambda>e. e \<notin> backflows (edges G)) ?edgesList) @ ?accu
goal (1 subgoal):
1. filter_compliant_stateful_ACS G M edgesList = rev (filter (\<lambda>e. e \<notin> backflows (edges G)) edgesList)
[PROOF STEP]
by(simp add: filter_compliant_stateful_ACS_def edgesList)
[PROOF STATE]
proof (state)
this:
filter_compliant_stateful_ACS G M edgesList = rev (filter (\<lambda>e. e \<notin> backflows (edges G)) edgesList)
goal (1 subgoal):
1. stateful_policy_to_network_graph (generate_valid_stateful_policy_IFSACS_2 G M edgesList) = undirected G
[PROOF STEP]
hence filterACS: "set (filter_compliant_stateful_ACS G M edgesList) = edges G - (backflows (edges G))"
[PROOF STATE]
proof (prove)
using this:
filter_compliant_stateful_ACS G M edgesList = rev (filter (\<lambda>e. e \<notin> backflows (edges G)) edgesList)
goal (1 subgoal):
1. set (filter_compliant_stateful_ACS G M edgesList) = edges G - backflows (edges G)
[PROOF STEP]
using edgesList
[PROOF STATE]
proof (prove)
using this:
filter_compliant_stateful_ACS G M edgesList = rev (filter (\<lambda>e. e \<notin> backflows (edges G)) edgesList)
set edgesList = edges G
goal (1 subgoal):
1. set (filter_compliant_stateful_ACS G M edgesList) = edges G - backflows (edges G)
[PROOF STEP]
by force
[PROOF STATE]
proof (state)
this:
set (filter_compliant_stateful_ACS G M edgesList) = edges G - backflows (edges G)
goal (1 subgoal):
1. stateful_policy_to_network_graph (generate_valid_stateful_policy_IFSACS_2 G M edgesList) = undirected G
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. stateful_policy_to_network_graph (generate_valid_stateful_policy_IFSACS_2 G M edgesList) = undirected G
[PROOF STEP]
apply(simp add: undirected_backflows stateful_policy_to_network_graph_def all_flows_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. hosts (generate_valid_stateful_policy_IFSACS_2 G M edgesList) = nodes G \<and> flows_fix (generate_valid_stateful_policy_IFSACS_2 G M edgesList) \<union> flows_state (generate_valid_stateful_policy_IFSACS_2 G M edgesList) \<union> backflows (flows_state (generate_valid_stateful_policy_IFSACS_2 G M edgesList)) = edges G \<union> backflows (edges G)
[PROOF STEP]
apply(simp add: hosts filterACS flows_state_IFS flowsfix)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. edges G \<union> (edges G - backflows (edges G)) \<union> backflows (edges G - backflows (edges G)) = edges G \<union> backflows (edges G)
[PROOF STEP]
apply(simp add: backflows_minus_backflows)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. edges G \<union> (edges G - backflows (edges G)) \<union> (backflows (edges G) - edges G) = edges G \<union> backflows (edges G)
[PROOF STEP]
by fast
[PROOF STATE]
proof (state)
this:
stateful_policy_to_network_graph (generate_valid_stateful_policy_IFSACS_2 G M edgesList) = undirected G
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 3063, "file": "Network_Security_Policy_Verification_TopoS_Stateful_Policy_Algorithm", "length": 23}
|
using ASTModels
using Continuables
@expr a b c
d = a + b
e = c*d
# TODO build continuation macro @cont to automatically create the other method version, both for function(cont, ...) for f(cont, ) = as well as for cont not being the first one
|
{"hexsha": "b094d9705774f0b581a06ec1a7f019cd5b3ad79d", "size": 244, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/develop.jl", "max_stars_repo_name": "schlichtanders/ASTModels.jl", "max_stars_repo_head_hexsha": "47207a687f5f81a59d375be3eeba17b9a38d178a", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-10-22T14:01:55.000Z", "max_stars_repo_stars_event_max_datetime": "2019-10-22T14:01:55.000Z", "max_issues_repo_path": "test/develop.jl", "max_issues_repo_name": "schlichtanders/ASTModels.jl", "max_issues_repo_head_hexsha": "47207a687f5f81a59d375be3eeba17b9a38d178a", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/develop.jl", "max_forks_repo_name": "schlichtanders/ASTModels.jl", "max_forks_repo_head_hexsha": "47207a687f5f81a59d375be3eeba17b9a38d178a", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.1111111111, "max_line_length": 176, "alphanum_fraction": 0.7254098361, "num_tokens": 64}
|
[STATEMENT]
lemma integrable_inner_left[simp, intro]:
"(c \<noteq> 0 \<Longrightarrow> integrable M f) \<Longrightarrow> integrable M (\<lambda>x. f x \<bullet> c)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (c \<noteq> (0::'a) \<Longrightarrow> integrable M f) \<Longrightarrow> integrable M (\<lambda>x. f x \<bullet> c)
[PROOF STEP]
unfolding integrable.simps
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (c \<noteq> (0::'a) \<Longrightarrow> Ex (has_bochner_integral M f)) \<Longrightarrow> Ex (has_bochner_integral M (\<lambda>x. f x \<bullet> c))
[PROOF STEP]
by fastforce
|
{"llama_tokens": 222, "file": null, "length": 2}
|
import argparse
import numpy as np
import dynamics
import plot
import random_matrix
import sample_point
parser = argparse.ArgumentParser(description='molnet example')
parser.add_argument('--seed', '-s', type=int,
help='random seed', default=0)
parser.add_argument('--node-size', '-N', type=int,
help='Node size', default=2)
parser.add_argument('--channel-size', '-C', type=int,
help='Node size', default=1)
parser.add_argument('--largest-singular-value', '-S', type=float,
help='The largest singular value', default=1.)
parser.add_argument('--sample-point-size', '-T', type=int,
help='# of Sample points per an interval', default=20)
parser.add_argument('--length', '-L', type=float,
help='Length of each side of domain', default=1.)
parser.add_argument('--out', '-O', type=str,
help='Output directory', default='result')
args = parser.parse_args()
np.random.seed(args.seed)
# Generate random matrices
N = args.node_size
lambda_ = 0.5 * np.ones(N)
lambda_[0] = 1.0
P = random_matrix.make_p(lambda_)
C = args.channel_size
s = np.ones(C) * -0.5
s[0] = args.largest_singular_value
W = random_matrix.make_w(s)
b = np.zeros(C)
# Make dynamics
f = dynamics.make_dynamics(P, W, b)
# Make sample points and forward one time step
T = args.sample_point_size
L = args.length
p = sample_point.make_sample_points(L, T, N, C)
p_next = f(p)
# Debug print
lambda_, e = np.linalg.eig(P)
e = e[:, np.argsort(lambda_)]
e1 = e[:, -1] # eigen vector for largest eigen vector
e2 = e[:, -2] # eigen vector for second largest eigen vector
_, s, _ = np.linalg.svd(W)
print('P: ', P)
print('W:', W)
print('Singular values: ', s)
print('Largest eigen vector 1: ', e1)
print('Second Largest eigen vector: ', e2)
plot.streamplot(p, p_next, L, e1[1] / e1[0],
'W={}'.format(float(W)),
args.out)
|
{"hexsha": "aa66ab2b71dabd53b63aad8239141e580a35d946", "size": 1952, "ext": "py", "lang": "Python", "max_stars_repo_path": "gnn_dynamics/main.py", "max_stars_repo_name": "delta2323/gnn-asymptotics", "max_stars_repo_head_hexsha": "0246e29df9b64f49b2b4bd929e3e3393eadbb0d7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 29, "max_stars_repo_stars_event_min_datetime": "2020-02-25T20:24:22.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-30T12:05:39.000Z", "max_issues_repo_path": "gnn_dynamics/main.py", "max_issues_repo_name": "delta2323/gnn-asymptotics", "max_issues_repo_head_hexsha": "0246e29df9b64f49b2b4bd929e3e3393eadbb0d7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-09-09T09:00:13.000Z", "max_issues_repo_issues_event_max_datetime": "2021-04-21T06:30:20.000Z", "max_forks_repo_path": "gnn_dynamics/main.py", "max_forks_repo_name": "delta2323/gnn-asymptotics", "max_forks_repo_head_hexsha": "0246e29df9b64f49b2b4bd929e3e3393eadbb0d7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2020-05-21T01:40:23.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-30T12:05:41.000Z", "avg_line_length": 28.2898550725, "max_line_length": 74, "alphanum_fraction": 0.637295082, "include": true, "reason": "import numpy", "num_tokens": 509}
|
"""Contains functions for coordinate transformations."""
import math
import numpy as np
import pandas as pd
from weldx import util
from weldx.transformations.types import types_timeindex
__all__ = [
"build_time_index",
"is_orthogonal",
"is_orthogonal_matrix",
"normalize",
"orientation_point_plane",
"orientation_point_plane_containing_origin",
"point_left_of_line",
"reflection_sign",
"scale_matrix",
"vector_points_to_left_of_vector",
]
def build_time_index(
time: types_timeindex = None,
time_ref: pd.Timestamp = None,
) -> pd.TimedeltaIndex:
"""Build time index used for xarray objects.
Parameters
----------
time:
Datetime- or Timedelta-like time index.
time_ref:
Reference timestamp for Timedelta inputs.
Returns
-------
pandas.TimedeltaIndex
"""
if time is None:
return time, time_ref
time = util.to_pandas_time_index(time)
if isinstance(time, pd.DatetimeIndex):
if time_ref is None:
time_ref = time[0]
time = time - time_ref
return time, time_ref
def scale_matrix(scale_x, scale_y, scale_z) -> np.ndarray:
"""Return a scaling matrix.
Parameters
----------
scale_x :
Scaling factor in x direction
scale_y :
Scaling factor in y direction
scale_z :
Scaling factor in z direction
Returns
-------
numpy.ndarray
Scaling matrix
"""
return np.diag([scale_x, scale_y, scale_z]).astype(float)
def normalize(a):
"""Normalize (l2 norm) an ndarray along the last dimension.
Parameters
----------
a :
data in ndarray
Returns
-------
numpy.ndarray
Normalized ndarray
"""
norm = np.linalg.norm(a, axis=(-1), keepdims=True)
if not np.all(norm):
raise ValueError("Length 0 encountered during normalization.")
return a / norm
def orientation_point_plane_containing_origin(point, p_a, p_b):
"""Determine a points orientation relative to a plane containing the origin.
The side is defined by the winding order of the triangle 'origin - A -
B'. When looking at it from the left-hand side, the ordering is clockwise
and counter-clockwise when looking from the right-hand side.
The function returns 1 if the point lies left of the plane, -1 if it is
on the right and 0 if it lies on the plane.
Note, that this function is not appropriate to check if a point lies on
a plane since it has no tolerance to compensate for numerical errors.
Additional note: The points A and B can also been considered as two
vectors spanning the plane.
Parameters
----------
point :
Point
p_a :
Second point of the triangle 'origin - A - B'.
p_b :
Third point of the triangle 'origin - A - B'.
Returns
-------
int
1, -1 or 0 (see description)
"""
if (
math.isclose(np.linalg.norm(p_a), 0)
or math.isclose(np.linalg.norm(p_b), 0)
or math.isclose(np.linalg.norm(p_b - p_a), 0)
):
raise ValueError("One or more points describing the plane are identical.")
return np.sign(np.linalg.det([p_a, p_b, point]))
def orientation_point_plane(point, p_a, p_b, p_c):
"""Determine a points orientation relative to an arbitrary plane.
The side is defined by the winding order of the triangle 'A - B - C'.
When looking at it from the left-hand side, the ordering is clockwise
and counter-clockwise when looking from the right-hand side.
The function returns 1 if the point lies left of the plane, -1 if it is
on the right and 0 if it lies on the plane.
Note, that this function is not appropriate to check if a point lies on
a plane since it has no tolerance to compensate for numerical errors.
Parameters
----------
point :
Point
p_a :
First point of the triangle 'A - B - C'.
p_b :
Second point of the triangle 'A - B - C'.
p_c :
Third point of the triangle 'A - B - C'.
Returns
-------
int
1, -1 or 0 (see description)
"""
vec_a_b = p_b - p_a
vec_a_c = p_c - p_a
vec_a_point = point - p_a
return orientation_point_plane_containing_origin(vec_a_point, vec_a_b, vec_a_c)
def is_orthogonal(vec_u, vec_v, tolerance=1e-9):
"""Check if vectors are orthogonal.
Parameters
----------
vec_u :
First vector
vec_v :
Second vector
tolerance :
Numerical tolerance (Default value = 1e-9)
Returns
-------
bool
True or False
"""
if math.isclose(np.dot(vec_u, vec_u), 0) or math.isclose(np.dot(vec_v, vec_v), 0):
raise ValueError("One or both vectors have zero length.")
return math.isclose(np.dot(vec_u, vec_v), 0, abs_tol=tolerance)
def is_orthogonal_matrix(a: np.ndarray, atol=1e-9) -> bool:
"""Check if ndarray is orthogonal matrix in the last two dimensions.
Parameters
----------
a :
Matrix to check
atol :
atol to pass onto np.allclose (Default value = 1e-9)
Returns
-------
bool
True if last 2 dimensions of a are orthogonal
"""
return np.allclose(np.matmul(a, a.swapaxes(-1, -2)), np.eye(a.shape[-1]), atol=atol)
def point_left_of_line(point, line_start, line_end):
"""Determine if a point lies left of a line.
Returns 1 if the point is left of the line and -1 if it is to the right.
If the point is located on the line, this function returns 0.
Parameters
----------
point :
Point
line_start :
Starting point of the line
line_end :
End point of the line
Returns
-------
int
1,-1 or 0 (see description)
"""
vec_line_start_end = line_end - line_start
vec_line_start_point = point - line_start
return vector_points_to_left_of_vector(vec_line_start_point, vec_line_start_end)
def reflection_sign(matrix):
"""Get a sign indicating if the transformation is a reflection.
Returns -1 if the transformation contains a reflection and 1 if not.
Parameters
----------
matrix :
Transformation matrix
Returns
-------
int
1 or -1 (see description)
"""
sign = int(np.sign(np.linalg.det(matrix)))
if sign == 0:
raise ValueError("Invalid transformation")
return sign
def vector_points_to_left_of_vector(vector, vector_reference):
"""Determine if a vector points to the left of another vector.
Returns 1 if the vector points to the left of the reference vector and
-1 if it points to the right. In case both vectors point into the same
or the opposite directions, this function returns 0.
Parameters
----------
vector :
Vector
vector_reference :
Reference vector
Returns
-------
int
1,-1 or 0 (see description)
"""
return int(np.sign(np.linalg.det([vector_reference, vector])))
|
{"hexsha": "fbc08b0250fd1dad6d7ca2ca3ce5c27e42a4f301", "size": 7040, "ext": "py", "lang": "Python", "max_stars_repo_path": "weldx/transformations/util.py", "max_stars_repo_name": "marscher/weldx", "max_stars_repo_head_hexsha": "a5debd8af957009b12fd366589fed1aa41f78176", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "weldx/transformations/util.py", "max_issues_repo_name": "marscher/weldx", "max_issues_repo_head_hexsha": "a5debd8af957009b12fd366589fed1aa41f78176", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "weldx/transformations/util.py", "max_forks_repo_name": "marscher/weldx", "max_forks_repo_head_hexsha": "a5debd8af957009b12fd366589fed1aa41f78176", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.4444444444, "max_line_length": 88, "alphanum_fraction": 0.6346590909, "include": true, "reason": "import numpy", "num_tokens": 1685}
|
import numpy as np
def main():
f_name = './output/adjacency/neur_adj_100_400_{0}.dat'
c = np.zeros(30)
for i in range(0, len(c)):
data = np.loadtxt(f_name.format(i))
d_sum = np.sum(data)
d_dim = data.shape[0]
c[i]=(d_sum/d_dim)
print(c, np.mean(c))
if __name__ == '__main__':
main()
|
{"hexsha": "589efd6398af1156fe1f9b2d9447b3d0cd28676f", "size": 303, "ext": "py", "lang": "Python", "max_stars_repo_path": "visualisation/adjacency/connect.py", "max_stars_repo_name": "sproberts92/neuron-model", "max_stars_repo_head_hexsha": "3c11b3749da876f4008131c977cfa22158825b74", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "visualisation/adjacency/connect.py", "max_issues_repo_name": "sproberts92/neuron-model", "max_issues_repo_head_hexsha": "3c11b3749da876f4008131c977cfa22158825b74", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "visualisation/adjacency/connect.py", "max_forks_repo_name": "sproberts92/neuron-model", "max_forks_repo_head_hexsha": "3c11b3749da876f4008131c977cfa22158825b74", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 15.9473684211, "max_line_length": 55, "alphanum_fraction": 0.6336633663, "include": true, "reason": "import numpy", "num_tokens": 99}
|
#include <iostream>
#include <fstream>
#include <cmath>
#include <string>
#include <Eigen/Core>
#include <Eigen/Dense>
using namespace Eigen;
class Interpolation {
public:
// input:
static constexpr int POINT_LIMIT = 30; // Maximum size of input point set
int n; // Number of input points
float x[POINT_LIMIT], y[POINT_LIMIT];
public:
// output:
static constexpr float LEFT_LIMIT = 0.f; // Left end of sampling interval
static constexpr float RIGHT_LIMIT = 10.f; // Right end of sampling interval
static constexpr float SAMPLE_RATE = 0.05f; // Distance of adjoining sample points
static constexpr int SAMPLE_POINTS = static_cast<int>((RIGHT_LIMIT - LEFT_LIMIT) / SAMPLE_RATE) + 1; // Number of total sample points
float out_x[SAMPLE_POINTS], out_y[SAMPLE_POINTS];
Interpolation(int num, const float *ix, const float *iy) {
n = num;
memcpy_s(x, num * sizeof(float), ix, num * sizeof(float));
memcpy_s(y, num * sizeof(float), iy, num * sizeof(float));
memset(out_y, 0, sizeof(float) * SAMPLE_POINTS);
for (int i = 0; i < SAMPLE_POINTS; ++i) {
out_x[i] = (LEFT_LIMIT + i * SAMPLE_RATE);
}
}
void outputToCSV(const char *filename, std::string title = "") { // output results to CSV
std::ofstream csv(filename, std::ios::out | std::ios::app);
csv << title << ",";
for (int i = 0; i < SAMPLE_POINTS; ++i) {
csv << out_y[i] << char(i == SAMPLE_POINTS - 1 ? '\n' : ',');
}
csv.close();
}
virtual void evaluate() = 0; // interpolate
};
class LinearInterpolation : public Interpolation {
public:
LinearInterpolation(int num, const float *ix, const float *iy) : Interpolation(num, ix, iy) {}
void evaluate() final {
int k = -1;
for (int i = 0; i < SAMPLE_POINTS; ++i) {
while (k + 1 < n && x[k + 1] < out_x[i]) ++k;
if (k == -1 || out_x[i] > x[n - 1]) {
out_y[i] = 0.0f;
}
else {
float fact = (out_x[i] - x[k]) / (x[k + 1] - x[k]);
out_y[i] = fact * y[k + 1] + (1.0f - fact) * y[k];
}
}
}
};
class LagrangeInterpolation : public Interpolation {
public:
LagrangeInterpolation(int num, const float *ix, const float *iy) : Interpolation(num, ix, iy) {}
void evaluate() final {
for (int h = 0; h < SAMPLE_POINTS; ++h) {
float res = 0.0f;
for (int i = 0; i < n; ++i) {
float tmp = y[i];
for (int j = 0; j < n; ++j) {
if (i != j) {
tmp = tmp * (out_x[h] - x[j]);
tmp = tmp / (x[i] - x[j]);
}
}
res += tmp;
}
out_y[h] = res;
}
}
};
class GaussInterpolation : public Interpolation {
public:
float b0, sigma;
GaussInterpolation(int num, const float *ix, const float *iy,
float _b0, float _sigma = 1.0f) : b0(_b0), sigma(_sigma), Interpolation(num, ix, iy) {}
float gauss(float x, float x0) {
return std::exp(-std::pow((x - x0) / sigma, 2.0f) / 2.0f);
}
void evaluate() final {
MatrixXf A(n, n), B(n, 1);
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
A(i, j) = gauss(x[i], x[j]);
}
B(i, 0) = y[i] - b0;
}
MatrixXf W = A.inverse() * B;
for (int i = 0; i < SAMPLE_POINTS; ++i) {
float res = b0;
for (int j = 0; j < n; ++j) {
res += W(j, 0) * gauss(out_x[i], x[j]);
}
out_y[i] = res;
}
}
};
class LeastSquareInterpolation : public Interpolation {
public:
int m;
LeastSquareInterpolation(int num, const float *ix, const float *iy, int _m)
: m(_m), Interpolation(num, ix, iy) {}
void evaluate() final {
MatrixXf A(n, m + 1);
for (int i = 0; i < n; ++i) {
float pow = 1.0f;
for (int j = m; j >= 0; --j) {
A(i, j) = pow;
pow *= x[i];
}
}
MatrixXf B(n, 1);
for (int i = 0; i < n; ++i) {
B(i, 0) = y[i];
}
MatrixXf W = (A.transpose() * A).inverse() * A.transpose() * B;
for (int i = 0; i < SAMPLE_POINTS; ++i) {
float pow = 1.0f, res = 0.0f;
for (int j = m; j >= 0; --j) {
res += pow * W(j, 0);
pow *= out_x[i];
}
out_y[i] = res;
}
}
};
class RidgeRegressionInterpolation : public Interpolation {
public:
int m;
float lambda;
RidgeRegressionInterpolation(int num, const float *ix, const float *iy,
int _m, float _lambda) : m(_m), lambda(_lambda), Interpolation(num, ix, iy) {}
void evaluate() final {
MatrixXf A(n, m + 1);
for (int i = 0; i < n; ++i) {
float pow = 1.0f;
for (int j = m; j >= 0; --j) {
A(i, j) = pow;
pow *= x[i];
}
}
MatrixXf B(n, 1);
for (int i = 0; i < n; ++i) {
B(i, 0) = y[i];
}
MatrixXf W = (A.transpose() * A + lambda * MatrixXf::Identity(m + 1, m + 1)).inverse() * A.transpose() * B;
for (int i = 0; i <= m; i++) std::cout << W(i, 0) << " ";
for (int i = 0; i < SAMPLE_POINTS; ++i) {
float pow = 1.0f, res = 0.0f;
for (int j = m; j >= 0; --j) {
res += pow * W(j, 0);
pow *= out_x[i];
}
out_y[i] = res;
}
}
};
int main() {
std::ifstream dataflow("data.txt", std::ios::in);
int num;
float x[Interpolation::POINT_LIMIT], y[Interpolation::POINT_LIMIT];
dataflow >> num;
for (int i = 0; i < num; ++i) {
dataflow >> x[i];
}
for (int i = 0; i < num; ++i) {
dataflow >> y[i];
}
dataflow.close();
LinearInterpolation inter0(num, x, y);
LagrangeInterpolation inter1(num, x, y);
GaussInterpolation inter2(num, x, y, 0.0f);
LeastSquareInterpolation inter3(num, x, y, 2);
RidgeRegressionInterpolation inter4(num, x, y, 5, 10000.0f);
inter0.evaluate();
inter1.evaluate();
inter2.evaluate();
inter3.evaluate();
inter4.evaluate();
inter0.outputToCSV("result.csv", "LinearInterpolation");
inter1.outputToCSV("result.csv", "LagrangeInterpolation");
inter2.outputToCSV("result.csv", "GaussInterpolation");
inter3.outputToCSV("result.csv", "LeastSquareInterpolation");
inter4.outputToCSV("result.csv", "RidgeRegressionInterpolation");
return 0;
}
|
{"hexsha": "48585a6b431402e01f0d72dab7f4b828fec63bec", "size": 5715, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "homeworks/HW1/interpolation.cpp", "max_stars_repo_name": "g1n0st/GAMES102", "max_stars_repo_head_hexsha": "44a8cf9db102109c8fd15c8dc06aa6ad1519a5eb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7.0, "max_stars_repo_stars_event_min_datetime": "2020-10-23T16:33:45.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-17T23:49:36.000Z", "max_issues_repo_path": "homeworks/HW1/interpolation.cpp", "max_issues_repo_name": "g1n0st/GAMES102", "max_issues_repo_head_hexsha": "44a8cf9db102109c8fd15c8dc06aa6ad1519a5eb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "homeworks/HW1/interpolation.cpp", "max_forks_repo_name": "g1n0st/GAMES102", "max_forks_repo_head_hexsha": "44a8cf9db102109c8fd15c8dc06aa6ad1519a5eb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3.0, "max_forks_repo_forks_event_min_datetime": "2021-03-18T08:45:36.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-17T02:36:06.000Z", "avg_line_length": 25.9772727273, "max_line_length": 134, "alphanum_fraction": 0.5860017498, "num_tokens": 1975}
|
function value = daub2_condition ( n )
%*****************************************************************************80
%
%% DAUB2_DETERMINANT returns the L1 condition of the DAUB2 matrix.
%
% Licensing:
%
% This code is distributed under the GNU LGPL license.
%
% Modified:
%
% 25 January 2015
%
% Author:
%
% John Burkardt
%
% Parameters:
%
% Input, integer N, the order of the matrix.
%
% Output, real VALUE, the L1 condition.
%
c0 = sqrt ( 2.0 ) / 2.0;
c1 = sqrt ( 2.0 ) / 2.0;
a_norm = abs ( c0 ) + abs ( c1 );
b_norm = a_norm;
value = a_norm * b_norm;
return
end
|
{"author": "johannesgerer", "repo": "jburkardt-m", "sha": "1726deb4a34dd08a49c26359d44ef47253f006c1", "save_path": "github-repos/MATLAB/johannesgerer-jburkardt-m", "path": "github-repos/MATLAB/johannesgerer-jburkardt-m/jburkardt-m-1726deb4a34dd08a49c26359d44ef47253f006c1/test_mat/daub2_condition.m"}
|
/*=============================================================================
Copyright (c) 2010-2016 Bolero MURAKAMI
https://github.com/bolero-MURAKAMI/Sprig
Distributed under the Boost Software License, Version 1.0. (See accompanying
file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
=============================================================================*/
#ifndef SPRIG_POLICY_DETAIL_POLICIES_UNIQUE_CHECK_IMPL_HPP
#define SPRIG_POLICY_DETAIL_POLICIES_UNIQUE_CHECK_IMPL_HPP
#include <sprig/config/config.hpp>
#ifdef SPRIG_USING_PRAGMA_ONCE
# pragma once
#endif // #ifdef SPRIG_USING_PRAGMA_ONCE
#include <cstddef>
#include <boost/mpl/size_t.hpp>
#include <boost/mpl/deref.hpp>
#include <boost/mpl/iter_fold.hpp>
#include <boost/mpl/lambda.hpp>
#include <boost/preprocessor/cat.hpp>
#include <sprig/tmp/iter_index.hpp>
#include <sprig/policy/detail/policy_unique_check_impl.hpp>
//
// SPRIG_POLICYIES_UNIQUE_CHECK_IMPL
//
#define SPRIG_POLICYIES_UNIQUE_CHECK_IMPL(TAGS, TYPES, DELECTIVE) \
struct BOOST_PP_CAT(policies_unique_checker_, __LINE__) { \
private: \
template<typename Tag, std::size_t Index, typename Tags, typename Types> \
struct policy_unique_checker { \
private: \
SPRIG_POLICY_UNIQUE_CHECK_IMPL(Tag, TYPES, DELECTIVE); \
}; \
template<typename Tag, typename indexT, typename Tags, typename Types> \
struct element_checker \
: public boost::mpl::size_t<0> \
, public policy_unique_checker<Tag, indexT::value, Tags, Types> \
{}; \
public: \
static std::size_t const value = boost::mpl::iter_fold< \
TAGS, \
boost::mpl::size_t<0>, \
element_checker< \
boost::mpl::deref<boost::mpl::_2>, \
sprig::tmp::iter_index<TAGS, boost::mpl::_2>, \
TAGS, \
TYPES \
> \
>::type::value; \
}; \
static std::size_t const BOOST_PP_CAT(policies_unique_check_, __LINE__) \
= (BOOST_PP_CAT(policies_unique_checker_, __LINE__)::value)
#endif // #ifndef SPRIG_POLICY_DETAIL_POLICIES_UNIQUE_CHECK_IMPL_HPP
|
{"hexsha": "ee30e4bebfd786e93141df802e774223092efffd", "size": 2003, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "sprig/policy/detail/policies_unique_check_impl.hpp", "max_stars_repo_name": "bolero-MURAKAMI/Sprig", "max_stars_repo_head_hexsha": "51ce4db4f4d093dee659a136f47249e4fe91fc7a", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2017-10-24T13:56:24.000Z", "max_stars_repo_stars_event_max_datetime": "2018-09-28T13:21:22.000Z", "max_issues_repo_path": "sprig/policy/detail/policies_unique_check_impl.hpp", "max_issues_repo_name": "bolero-MURAKAMI/Sprig", "max_issues_repo_head_hexsha": "51ce4db4f4d093dee659a136f47249e4fe91fc7a", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sprig/policy/detail/policies_unique_check_impl.hpp", "max_forks_repo_name": "bolero-MURAKAMI/Sprig", "max_forks_repo_head_hexsha": "51ce4db4f4d093dee659a136f47249e4fe91fc7a", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2016-04-12T03:26:06.000Z", "max_forks_repo_forks_event_max_datetime": "2018-09-28T13:21:22.000Z", "avg_line_length": 34.5344827586, "max_line_length": 79, "alphanum_fraction": 0.6874687968, "num_tokens": 525}
|
############
# Starring #
############
function stargazers(repo; options...)
results, page_data = gh_get_paged_json("/repos/$(name(repo))/stargazers"; options...)
return map(Owner, results), page_data
end
function starred(user; options...)
results, page_data = gh_get_paged_json("/users/$(name(user))/starred"; options...)
return map(Repo, results), page_data
end
star(repo; options...) = gh_put("/user/starred/$(name(repo))"; options...)
unstar(repo; options...) = gh_delete("/user/starred/$(name(repo))"; options...)
############
# Watching #
############
function watchers(repo; options...)
results, page_data = gh_get_paged_json("/repos/$(name(repo))/subscribers"; options...)
return map(Owner, results), page_data
end
function watched(owner; options...)
results, page_data = gh_get_paged_json("/users/$(name(owner))/subscriptions"; options...)
return map(Repo, results), page_data
end
watch(repo; options...) = gh_put("/repos/$(name(repo))/subscription"; options...)
unwatch(repo; options...) = gh_delete("/repos/$(name(repo))/subscription"; options...)
|
{"hexsha": "dbdac00b4aee24938c59c3ca92649ccf64c35247", "size": 1100, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/activity/activity.jl", "max_stars_repo_name": "JuliaPackageMirrors/GitHub.jl", "max_stars_repo_head_hexsha": "74aedaab2f0c1a64e396b6ff0442dbcfd0f037ab", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-06-20T04:29:38.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-20T04:29:38.000Z", "max_issues_repo_path": "src/activity/activity.jl", "max_issues_repo_name": "JuliaPackageMirrors/GitHub.jl", "max_issues_repo_head_hexsha": "74aedaab2f0c1a64e396b6ff0442dbcfd0f037ab", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/activity/activity.jl", "max_forks_repo_name": "JuliaPackageMirrors/GitHub.jl", "max_forks_repo_head_hexsha": "74aedaab2f0c1a64e396b6ff0442dbcfd0f037ab", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-06-20T04:30:03.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-20T04:30:03.000Z", "avg_line_length": 30.5555555556, "max_line_length": 93, "alphanum_fraction": 0.6554545455, "num_tokens": 265}
|
"""
@author Mayank Mittal, Jingzhou Liu
@email mittalma@ethz.ch, jingzhou.liu@mail.utoronto.ca
@brief Implementation of Spidey robot in Isaac Sim.
"""
# python
import os
import numpy as np
import scipy.spatial.transform as tf
from typing import Optional
# omniverse
from pxr import Usd, UsdGeom, Gf, Semantics
import omni.isaac.dynamic_control._dynamic_control as omni_dc
# spidey
from spidey_python.utils.message import *
from spidey_python.utils.errors import *
from spidey_python.omniverse.robot.robot_base import RobotBase
from spidey_python.omniverse.robot.spidey import SpideyDim, SpideyBot
# Default dictionary for setting up the spidey robot.
SPIDEY_ROBOT_DEFAULT_CONFIG = {
# Type of controller for spidey ["position", "velocity", "effort"]
'spidey_ctrl': 'position',
# Disable gravity for spidey
'spidey_disable_gravity': False,
# path to the USD file for the robot
'usd_path': os.path.join(os.environ["SPIDEY_ROOT"], "omniverse/resources/usd", "robot/spidey/spidey.usd")
}
class SpideyRobot(RobotBase):
"""
@brief Implementation of Spidey robot.
State: q_j, dq_j
Input: dq_j
"""
"""
Instantiation
"""
def __init__(self, stage: Usd.Stage, prim_path: Optional[str] = '/spidey',
config: Optional[dict] = None, meters_per_unit: Optional[float] = 1.0):
"""
Defines the variables and constants for the system.
:param stage: The USD stage to import robot into.
:param prim_path: The path for the primitive in the stage.
:param config: A dictionary containing parameters for the class (default: SPIDEY_ROBOT_DEFAULT_CONFIG).
:param meters_per_unit: The units of conversion from simulator's scale to meters.
"""
super().__init__()
# Check that input is correct
assert os.path.isabs(prim_path)
assert isinstance(meters_per_unit, float)
# Copy args to internal variables
self._prim_path = prim_path
self._meters_per_unit = meters_per_unit
# Update the default dictionary
self._config = SPIDEY_ROBOT_DEFAULT_CONFIG
if config is not None:
self._config.update(config)
# Persistent Scene-graph related in Universal Scene Description
self._stage = stage
self._prim = None
# Handles to various ov-kit plugins
self._dc_handle = None
# Handles related to robot
self._articulation_handle = None
# Definitions for various internal dimensions
self._dims = SpideyDim
# Parts of the robot are defined separately since each are controlled differently.
self._parts = {
"spidey": SpideyBot(meters_per_unit, ee_handle_names=["leg1_link3_tip","leg2_link3_tip","leg3_link3_tip","leg4_link3_tip"], transform_ext=None)
}
# Store DOF properties
self._dof_properties = {
"lower_limits": np.array([]),
"upper_limits": np.array([]),
"max_velocity": np.array([]),
"max_efforts": np.array([]),
}
# Default state of the robot
self._robot_default_state = {
"pos": np.concatenate([self._parts["spidey"].default_state["pos"]]),
"vel": np.concatenate([self._parts["spidey"].default_state["vel"]])
}
# Dynamics information of the robot
self._robot_state = {
# Generalized coordinates: spidey joints (12)
"pos": np.zeros(self._dims.GeneralizedCoordinatesDim.value),
# Generalized velocities: spidey joints (12)
"vel": np.zeros(self._dims.GeneralizedVelocitiesDim.value)
}
def __del__(self):
"""
Cleanup after exiting
"""
pass
def __str__(self) -> str:
"""
:return: A string containing information about the instance's state.
"""
# set print options for numpy
np.set_printoptions(precision=4)
# print message
msg = f"Spidey @ \'{self._prim_path}\'\n" \
" Spidey Feet Poses:\n" \
f" I_r_IE: {self._parts['spidey'].I_r_IE} \n" \
f" q_IE: {self._parts['spidey'].q_IE} \n" \
" Spidey State:\n" \
f" q_j: {self.q[0:]} \n" \
f" dq_j: {self.u[0:]} \n" \
return msg
"""
Properties
"""
@property
def prim(self) -> Usd.Prim:
"""
:return: The USD primitive instance corresponding to the robot.
"""
return self._prim
@property
def prim_path(self) -> str:
"""
:return: The path to the prim the stage.
"""
return self._prim_path
@property
def dof_properties(self) -> dict:
"""
:return: A dictionary containing the DOF properties such as joint limits.
"""
return self._dof_properties
@property
def q(self) -> np.ndarray:
"""
:return: The generalized coordinates of the robot.
"""
return self._robot_state["pos"]
@property
def u(self) -> np.ndarray:
"""
:return: The generalized velocities of the robot.
"""
return self._robot_state["vel"]
@property
def config(self) -> dict:
"""
:return: A dictionary containing parameters for the class.
"""
return self._config
@property
def parts(self) -> dict:
"""
:return: A dictionory providing direct access to various parts of the robot.
"""
return self._parts
@property
def default_state(self) -> dict:
"""
:return: The default state of the robot.
"""
return self._robot_default_state
@property
def state(self) -> dict:
"""
:return: The current state of the robot.
"""
return self._robot_state
@property
def base_link_pose(self)-> np.ndarray:
"""
:return: The current pose of the base_link: [q_w, q_x, q_y, q_z, x_root, y_root, z_root]
"""
return self._parts["spidey"].base_link_pose
"""
Helpers
"""
def toggle_visibility(self, visible: bool):
""" Toggle visibility of the robot prim in the scene.
:param visible: Flag to whether make prim visible or invisible.
"""
# get imageable object
imageable = UsdGeom.Imageable(self._prim)
# toggle visibility
if visible:
imageable.MakeVisible()
else:
imageable.MakeInvisible()
def set_semantic_label(self, label: str):
"""
Set the semantic label corresponding to the prim.
:param label: Name of the semantic label.
"""
# create semantics api if not exists
if not self._prim.HasAPI(Semantics.SemanticsAPI):
sem = Semantics.SemanticsAPI.Apply(self._prim, "Semantics")
sem.CreateSemanticTypeAttr()
sem.CreateSemanticDataAttr()
else:
sem = Semantics.SemanticsAPI.Get(self._prim, "Semantics")
# set attributes
sem.GetSemanticTypeAttr().Set("class")
sem.GetSemanticDataAttr().Set(label)
def set_prim_pose(self, pos: np.ndarray, quat: Optional[np.ndarray] = None):
""" Set location of the root of the robot in the stage.
:param pos: (x, y, z) cartesian coordinates for location of root of the robot in the world frame.
:param quat: (x, y, z, w) quaternion coordinates of orientation of root of the robot in the world frame.
Default orientation is (0, 0, 0, 1), i.e. identity w.r.t. world.
"""
if self._prim is None:
print_warn(f"Prim not found at \'{self._prim_path}\'. Please ensure that the USD stage has the prim.")
return
# convert to datatypes accepted by simulator
if not isinstance(pos, Gf.Vec3d):
pos = pos / self._meters_per_unit
pos = Gf.Vec3d(*pos)
# if orientation not provided, default to identity
if quat is not None:
rotm = tf.Rotation.from_quat(quat).as_matrix()
rotm = Gf.Matrix3d(*rotm.ravel())
else:
rotm = Gf.Matrix3d().SetIdentity()
# set attribute properties for the transform on the primitive
properties = self._prim.GetPropertyNames()
if "xformOp:transform" in properties:
transform_attr = self._prim.GetAttribute("xformOp:transform")
matrix = self._prim.GetAttribute("xformOp:transform").Get()
matrix.SetTranslateOnly(pos).SetRotateOnly(rotm)
transform_attr.Set(matrix)
else:
xform = UsdGeom.Xformable(self._prim)
# xform_op = xform.AddXformOp(UsdGeom.XformOp.TypeTransform, UsdGeom.XformOp.PrecisionDouble, "")
# xform_op.Set(Gf.Matrix4d().SetTranslate(pos).SetRotate(rotm))
matrix = Gf.Matrix4d().SetTranslateOnly(pos).SetRotateOnly(rotm)
xform.AddTransformOp().Set(matrix)
def set_state(self, q: np.ndarray, u: np.ndarray):
""" Set the dof state of the robot.
:param q: Generalized coordinates for the robot.
:param u: Generalized velocities for the robot.
"""
# convert input to numpy array (sanity)
q = np.asarray(q)
u = np.asarray(u)
# check input is of right shape
assert q.shape == (self._dims.GeneralizedCoordinatesDim.value,)
assert u.shape == (self._dims.GeneralizedVelocitiesDim.value,)
# for spidey
self._parts["spidey"].set_state(q[:], u[:])
"""
Operations
"""
def create(self):
"""
Loads the robot into the Omniverse stage.
@note This function is kept separate in case one wants to create an instance of the class without launching
the simulator. Or, if one doesn't want to create a new primitive programmatically but refer to an
exisiting one in the current USD stage.
"""
# Extract USD path from configuration
usd_path = self._config["usd_path"]
# check that path exists
if not os.path.exists(usd_path):
msg = f"File not found: {usd_path}"
print_error(msg)
raise FileNotFoundError(msg)
else:
print_info(f"Loading from: {usd_path}.")
# define persistent scene graph geometry for the robot
self._prim = self._stage.DefinePrim(self._prim_path, "Xform")
# add reference to the USD in the current stage
self._prim.GetReferences().AddReference(usd_path)
# check that the path to articulation in scene-graph is correct
assert self._prim_path == self._prim.GetPath().pathString
def setup(self, dc: omni_dc.DynamicControl):
"""
Registers the assets and configures internal variables of the robot.
:param dc: Handle to dynamic control plugin instance.
"""
# get prim if it doesn't exist yet
# this is to deal with the scenario when the stage already has the prim so user does not create one.
if self._prim is None:
self._prim = self._stage.GetPrimAtPath(self._prim_path)
# check that prim exists. (GetPrimPath returns invalid prim if one doesn't exist)
if not self._prim.IsValid():
msg = f"Prim not found at \'{self._prim_path}\'. Please ensure that the USD stage has the prim."
print_error(msg)
raise OmniverseError(msg)
# initialize dynamic control handle
self._dc_handle = dc
# initialize handle to the articulation for robot through dynamic control toolbox
self._articulation_handle = self._dc_handle.get_articulation(self._prim_path)
if self._articulation_handle == omni_dc.INVALID_HANDLE:
raise InvalidHandleError(f"Failed to obtain robot at \'{self._prim_path}\'")
# get number of degrees of freedom of robot
num_dofs = self._dc_handle.get_articulation_dof_count(self._articulation_handle)
num_dofs_expected = self._parts["spidey"].dof
# check that number of DOFs are correct
if num_dofs != num_dofs_expected:
raise OmniverseError(f"Incorrect number of degrees of freedom. "
f"Expected {num_dofs_expected} but received {num_dofs}.")
# setup handles for the robot
# For spidey
self._parts["spidey"].setup(self._articulation_handle, self._dc_handle, ctrl=self._config["spidey_ctrl"],
dof_offset=0,
disable_gravity=self._config["spidey_disable_gravity"])
# get joint poperties
dof_props = self._dc_handle.get_articulation_dof_properties(self._articulation_handle)
# store essential dof properties internally
self._dof_properties["lower_limits"] = np.asarray(dof_props["lower"])
self._dof_properties["upper_limits"] = np.asarray(dof_props["upper"])
self._dof_properties["max_velocity"] = np.asarray(dof_props["maxVelocity"])
self._dof_properties["max_effort"] = np.asarray(dof_props["maxEffort"])
# root spawned position
self.set_prim_pose(pos=np.array([0.0, 0.0, 0.0]), quat=None)
# set default initial state of the robot
self.set_state(q=self._robot_default_state["pos"], u=self._robot_default_state["vel"])
# update the internal buffers
self.update()
# print status
print_notify(f"Setup complete for spidey robot: \'{self._prim_path}\'.")
def advance(self, spidey_cmd: np.ndarray = None):
"""Apply input command to the robot.
:param spidey_cmd: The joint command for spidey.
"""
if spidey_cmd is None:
spidey_cmd = self.q[:]
# apply command to the robot
self._parts["spidey"].apply_command(spidey_cmd)
def update(self):
"""
Updates the buffers for dynamics state of the robot.
"""
# update the wrappers
self._parts["spidey"].update()
# fill base pose to generalized coordinates
self._robot_state["pos"][:] = self._parts["spidey"].state["pos"]
# fill base velocity to generalized velocities
self._robot_state["vel"][:] = self._parts["spidey"].state["vel"]
def display(self):
"""
Display the configuration of the robot.
"""
print(f"Articulation handle: {self._articulation_handle}")
# Print information about kinematic chain
root_link_index = self._dc_handle.get_articulation_root_body(self._articulation_handle)
print("--- Hierarchy:\n"
f"{self._convert_kinematic_hierarchy_to_string(root_link_index)}")
# Information about the body states of the robot
body_states = self._dc_handle.get_articulation_body_states(self._articulation_handle, omni_dc.STATE_ALL)
print_info("--- Body states:\n"
f"{body_states}")
# Information about the DOF states of the robot.
dof_states = self._dc_handle.get_articulation_dof_states(self._articulation_handle, omni_dc.STATE_ALL)
print_info("--- DOF states:\n"
f"{dof_states}")
# Information about the DOF properties of the robot.
dof_props = self._dc_handle.get_articulation_dof_properties(self._articulation_handle)
print_info("--- DOF properties:\n"
"[type] [has-limits] [lower] [upper] [drive-mode] [max-vel] [max-effort] [stiffness] [damping]\n"
f"{dof_props}")
"""
Internals
"""
def _convert_kinematic_hierarchy_to_string(self, body_index, indent_level=0) -> str:
""" Reads the articulation handle and converts kinematic tree into a string.
:param body_index: Index of the body to start iteration with.
:param indent_level: Indentation level in the converted message
:return: A string message containing the kinematic tree.
"""
# define current indentation
indent = "|" + "-" * indent_level
# get name of the body
body_name = self._dc_handle.get_rigid_body_name(body_index)
# add body name to string
str_output = f"{indent}Body: {body_name}\n"
# iterate over children of the body
for i in range(self._dc_handle.get_rigid_body_child_joint_count(body_index)):
# get joint name
joint = self._dc_handle.get_rigid_body_child_joint(body_index, i)
joint_name = self._dc_handle.get_joint_name(joint)
# get child link name
child = self._dc_handle.get_joint_child_body(joint)
child_name = self._dc_handle.get_rigid_body_name(child)
# add information to string output
str_output += f"{indent}>>Joint: {joint_name} -> {child_name}\n"
# iterate recrusively for depth-first-search
str_output += self._convert_kinematic_hierarchy_to_string(child, indent_level + 4)
# return result
return str_output
# EOF
|
{"hexsha": "f0e96a3d86cbf40e50a4bc16144529bc81d9d5be", "size": 17186, "ext": "py", "lang": "Python", "max_stars_repo_path": "spidey_simulation/spidey_py/spidey_python/omniverse/robot/spidey_robot.py", "max_stars_repo_name": "JasonJZLiu/Spidey-Quadruped", "max_stars_repo_head_hexsha": "74c1817f997b354bae4fffd2728f2cc94947062c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2021-06-14T03:12:18.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-23T12:58:56.000Z", "max_issues_repo_path": "spidey_simulation/spidey_py/spidey_python/omniverse/robot/spidey_robot.py", "max_issues_repo_name": "JasonJZLiu/Spidey-Quadruped", "max_issues_repo_head_hexsha": "74c1817f997b354bae4fffd2728f2cc94947062c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "spidey_simulation/spidey_py/spidey_python/omniverse/robot/spidey_robot.py", "max_forks_repo_name": "JasonJZLiu/Spidey-Quadruped", "max_forks_repo_head_hexsha": "74c1817f997b354bae4fffd2728f2cc94947062c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.0590909091, "max_line_length": 155, "alphanum_fraction": 0.6246945188, "include": true, "reason": "import numpy,import scipy", "num_tokens": 4018}
|
[STATEMENT]
lemma ine_ins_neg1:
assumes "\<not> ine P m"
and "exprChannel x m"
shows "x \<notin> ins P"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. x \<notin> ins P
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
\<not> ine P m
exprChannel x m
goal (1 subgoal):
1. x \<notin> ins P
[PROOF STEP]
by (simp add: ine_def, auto)
|
{"llama_tokens": 158, "file": "CryptoBasedCompositionalProperties_Secrecy", "length": 2}
|
import numpy as np
from scipy.integrate import simps
for i in range(1,4):
a = np.loadtxt(f"ezrho{i}.out")
ir_max = 501
val = a[:ir_max,0]
r = a[:ir_max,1]
a2 = simps(val, r)
print(a2)
|
{"hexsha": "bf5ce98b62f79b1c625c6c559252662f60c2dcc5", "size": 212, "ext": "py", "lang": "Python", "max_stars_repo_path": "Test/Unit/eval_p2.py", "max_stars_repo_name": "pmu2022/lsms", "max_stars_repo_head_hexsha": "3c5f266812cad0b6d570bef9f5abb590d044ef92", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-27T14:45:51.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-27T14:45:51.000Z", "max_issues_repo_path": "Test/Unit/eval_p2.py", "max_issues_repo_name": "pmu2022/lsms", "max_issues_repo_head_hexsha": "3c5f266812cad0b6d570bef9f5abb590d044ef92", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Test/Unit/eval_p2.py", "max_forks_repo_name": "pmu2022/lsms", "max_forks_repo_head_hexsha": "3c5f266812cad0b6d570bef9f5abb590d044ef92", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 15.1428571429, "max_line_length": 35, "alphanum_fraction": 0.5849056604, "include": true, "reason": "import numpy,from scipy", "num_tokens": 77}
|
import numpy
import matplotlib as mplt
__all__ = ['lineplot']
def lineplot(vertices, indices, linewidths=1):
"""Plot 2D line segments"""
vertices = numpy.asarray(vertices)
indices = numpy.asarray(indices)
#3d tensor [segment index][vertex index][x/y value]
lines = vertices[numpy.ravel(indices),:].reshape((indices.shape[0],2,2))
col = mplt.collections.LineCollection(lines)
col.set_color('k')
col.set_linewidth(linewidths)
sub = mplt.pylab.gca()
sub.add_collection(col,autolim=True)
sub.autoscale_view()
|
{"hexsha": "41bee8481ded4d242004d2bcded88a43ef1dd774", "size": 563, "ext": "py", "lang": "Python", "max_stars_repo_path": "Examples/CoarseFineSplitting/draw.py", "max_stars_repo_name": "pombreda/pyamg", "max_stars_repo_head_hexsha": "ecd464de4d16e16bc905d84df181025ddf3c1958", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-07-03T15:32:01.000Z", "max_stars_repo_stars_event_max_datetime": "2018-07-03T15:32:01.000Z", "max_issues_repo_path": "Examples/Rootnode/draw.py", "max_issues_repo_name": "pombreda/pyamg", "max_issues_repo_head_hexsha": "ecd464de4d16e16bc905d84df181025ddf3c1958", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Examples/Rootnode/draw.py", "max_forks_repo_name": "pombreda/pyamg", "max_forks_repo_head_hexsha": "ecd464de4d16e16bc905d84df181025ddf3c1958", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.5909090909, "max_line_length": 76, "alphanum_fraction": 0.6856127886, "include": true, "reason": "import numpy", "num_tokens": 146}
|
import os
import argparse
import json
import numpy as np
import torch
import torch.nn as nn
import pickle
from util import rescale, find_max_epoch, print_size, sampling, calc_diffusion_hyperparams, AverageMeter
from util_fastdpmv2 import fast_sampling_function_v2
torch_version = torch.__version__
if torch_version == '1.7.1':
from models.pointnet2_ssg_sem import PointNet2SemSegSSG
from models.pointnet2_with_pcld_condition import PointNet2CloudCondition
from models.point_upsample_module import point_upsample
from chamfer_loss_new import Chamfer_F1
try:
from emd import EMD_distance
EMD_module_loaded = True
except:
print('The emd module is not loaded')
EMD_module_loaded = False
elif torch_version == '1.4.0':
import sys
sys.path.append('models/pvd')
from model_forward import PVCNN2
from metrics.evaluation_metrics import EMD_CD
else:
raise Exception('Pytorch version %s is not supported' % torch_version)
from dataset import get_dataloader
from eval.plot_result import plot_result
from eval.compare_eval_result import plot_result_list
import pdb
from dataparallel import MyDataParallel
import h5py
import time
name_to_number ={
'plane': '02691156',
'bench': '02828884',
'cabinet': '02933112',
'car': '02958343',
'chair': '03001627',
'monitor': '03211117',
'lamp': '03636649',
'speaker': '03691459',
'firearm': '04090263',
'couch': '04256520',
'table': '04379243',
'cellphone': '04401088',
'watercraft': '04530566'}
number_to_name = {}
for k in name_to_number.keys():
number_to_name[name_to_number[k]] = k
def evaluate(net, testloader, diffusion_hyperparams, print_every_n_steps=200, parallel=True,
dataset='shapenet', scale=1, save_generated_samples=False, save_dir = None,
task = 'completion', refine_output_scale_factor=None, max_print_nums=1e8,
save_multiple_t_slices=False,
t_slices=[5, 10, 20, 50, 100, 200, 400, 600, 800],
use_a_precomputed_XT=False, T_step=100,
point_upsample_factor=1, include_displacement_center_to_final_output=False,
compute_emd=True, compute_cd=True,
num_points=None, augment_data_during_generation=False,
noise_magnitude_added_to_gt=0.01, add_noise_to_generated_for_refine_exp=False,
return_all_metrics=False,
fast_sampling=False, fast_sampling_config=None, diffusion_config=None):
assert task in ['completion', 'refine_completion', 'denoise']
CD_meter = AverageMeter()
F1_meter = AverageMeter()
EMD_meter = AverageMeter()
total_len = len(testloader)
if fast_sampling:
assert not save_multiple_t_slices
assert not use_a_precomputed_XT
if not dataset in ['shapenet', 'shapenet_pytorch', 'mvp_dataset', 'shapenet_chunk', 'mvp40', 'partnet']:
raise Exception('%s dataset is not supported' % dataset)
if use_a_precomputed_XT:
assert task == 'completion'
assert dataset in ['mvp_dataset', 'shapenet_chunk', 'mvp40', 'partnet'] # right now we only implemented this feature for mvp dataset
if augment_data_during_generation:
assert task == 'completion'
assert dataset in ['mvp_dataset', 'shapenet_chunk', 'mvp40', 'partnet']
if dataset == 'shapenet' or dataset=='shapenet_pytorch':
total_meta = []
elif dataset in ['mvp_dataset', 'shapenet_chunk', 'mvp40', 'partnet']:
# total meta is label info
total_meta = torch.rand(0).cuda().long()
# cd_distance = torch.rand(0).cuda()
# emd_distance = torch.rand(0).cuda()
metrics = {'cd_distance': torch.rand(0).cuda(), 'emd_distance': torch.rand(0).cuda(),
'cd_p': torch.rand(0).cuda(), 'f1': torch.rand(0).cuda()}
# cd_module = Chamfer_Loss()
f1_threshold = 0.001 if dataset == 'mvp40' else 0.0001
if torch_version == '1.7.1':
cd_module = Chamfer_F1(f1_threshold=f1_threshold)
if compute_emd and EMD_module_loaded:
emd_module = EMD_distance()
if parallel:
net = MyDataParallel(net)
if torch_version == '1.7.1':
cd_module = nn.DataParallel(cd_module)
if compute_emd and EMD_module_loaded:
emd_module = nn.DataParallel(emd_module)
if save_generated_samples:
print('generated_samples will be saved to the directory', save_dir)
if dataset in ['mvp_dataset', 'shapenet_chunk', 'mvp40', 'partnet']:
total_generated_data = None
if save_multiple_t_slices:
generated_data_t_slices = None
print_interval = int(np.ceil(total_len / max_print_nums))
total_time = 0
for idx, data in enumerate(testloader):
if dataset in ['mvp_dataset', 'shapenet_chunk', 'mvp40', 'partnet']:
label = data['label'].cuda()
condition = data['partial'].cuda()
gt = data['complete'].cuda()
if task == 'refine_completion':
generated = data['generated'].cuda()
if use_a_precomputed_XT:
XT = data['XT'].cuda()
else:
XT = None
if augment_data_during_generation:
# in this case, condition, gt, generated, XT are all agumented
M_inv = data['M_inv'].cuda()
translation = data['translation'].cuda()
batch = gt.shape[0]
try:
num_points = gt.shape[1]
except:
num_points = num_points
print('num points is set to %d' % num_points)
# print('num points is set to the number of points (%d) in the partial point cloud' % num_points)
if (idx) % print_interval == 0:
print('begin generating')
net.reset_cond_features()
start = time.time()
# pdb.set_trace()
if task == 'refine_completion':
if add_noise_to_generated_for_refine_exp:
generated = generated + torch.normal(0, noise_magnitude_added_to_gt, size=generated.shape, device=generated.device)
displacement = net(generated, condition, ts=None, label=label)
if point_upsample_factor > 1:
generated_data, _ = point_upsample(generated, displacement, point_upsample_factor,
include_displacement_center_to_final_output,
refine_output_scale_factor)
else:
generated_data = generated + displacement * refine_output_scale_factor
# loss = loss_function(X, refined_X, batch_reduction='mean')
elif task == 'denoise':
generated = gt + torch.normal(0, noise_magnitude_added_to_gt, size=gt.shape, device=gt.device)
displacement = net(generated, condition=condition, ts=None, label=label)
generated_data = generated + displacement * refine_output_scale_factor
else:
if save_multiple_t_slices:
assert dataset in ['mvp_dataset', 'shapenet_chunk', 'mvp40', 'partnet']# shapenet is not supported yet
generated_data, result_slices = sampling(net, (batch,num_points,3),
diffusion_hyperparams,
print_every_n_steps=print_every_n_steps, label=label,
condition=condition,
verbose=False, return_multiple_t_slices=True,
t_slices=t_slices,
use_a_precomputed_XT=use_a_precomputed_XT, step=T_step, XT=XT)
# result_slices is a dict that contains torch tensors
else:
if fast_sampling:
generated_data = fast_sampling_function_v2(net, (batch,num_points,3), diffusion_hyperparams, # DDPM parameters
diffusion_config,
print_every_n_steps=print_every_n_steps, label=label,
verbose=False, condition=condition,
**fast_sampling_config)
else:
# generated_data = gt + torch.normal(0, 0.01, size=gt.shape, device=gt.device)
generated_data = sampling(net, (batch,num_points,3),
diffusion_hyperparams,
print_every_n_steps=print_every_n_steps, label=label,
condition=condition,
verbose=False,
use_a_precomputed_XT=use_a_precomputed_XT, step=T_step, XT=XT)
generation_time = time.time() - start
total_time = total_time + generation_time
# generated_data = torch.rand(batch,num_points,3,device=gt.device)
if augment_data_during_generation:
generated_data = torch.matmul(generated_data - translation, M_inv)
gt = torch.matmul(gt - translation, M_inv)
generated_data = generated_data/2/scale
gt = gt/2/scale
if save_multiple_t_slices:
for key in result_slices.keys():
if augment_data_during_generation:
result_slices[key] = torch.matmul(result_slices[key] - translation, M_inv)
result_slices[key] = result_slices[key]/2/scale
result_slices[key] = result_slices[key].detach().cpu().numpy()
torch.cuda.empty_cache()
if torch_version == '1.7.1':
if compute_cd:
cd_p, dist, f1 = cd_module(generated_data, gt)
cd_loss = dist.mean().detach().cpu().item()
f1_loss = f1.mean().detach().cpu().item()
else:
dist = torch.zeros(generated_data.shape[0], device=generated_data.device, dtype=generated_data.dtype)
cd_p = dist
f1 = dist
cd_loss = dist.mean().detach().cpu().item()
f1_loss = f1.mean().detach().cpu().item()
if compute_emd and EMD_module_loaded:
emd_cost = emd_module(generated_data, gt)
else:
emd_cost = torch.zeros_like(dist)
emd_loss = emd_cost.mean().detach().cpu().item()
else: # 1.4.0
result = EMD_CD(generated_data, gt, f1_threshold = f1_threshold)
dist = result['CD']
cd_p = dist
f1 = result['fscore']
emd_cost = result['EMD']
cd_loss = dist.mean().detach().cpu().item()
f1_loss = f1.mean().detach().cpu().item()
emd_loss = emd_cost.mean().detach().cpu().item()
if dataset == 'shapenet':
total_meta = total_meta + data[3]
elif dataset == 'shapenet_pytorch':
total_meta = total_meta + list(data[3])
elif dataset in ['mvp_dataset', 'shapenet_chunk', 'mvp40', 'partnet']:
total_meta = torch.cat([total_meta, label])
metrics['cd_distance'] = torch.cat([metrics['cd_distance'], dist])
metrics['emd_distance'] = torch.cat([metrics['emd_distance'], emd_cost])
metrics['cd_p'] = torch.cat([metrics['cd_p'], cd_p])
metrics['f1'] = torch.cat([metrics['f1'], f1])
CD_meter.update(cd_loss, n=batch)
F1_meter.update(f1_loss, n=batch)
EMD_meter.update(emd_loss, n=batch)
if (idx) % print_interval == 0:
print('progress [%d/%d] %.4f (%d samples) CD distance %.8f EMD distance %.8f F1 score %.6f this batch time %.2f total generation time %.2f' % (idx, total_len,
idx/total_len, batch, CD_meter.avg, EMD_meter.avg, F1_meter.avg, generation_time, total_time), flush=True)
# if task == 'completion' and save_generated_samples:
if save_generated_samples:
if dataset in ['shapenet', 'shapenet_pytorch']:
meta = data[3]
# meta_files = [os.path.split(m)[-1] for m in meta]
for i in range(len(meta)):
meta_split = meta[i].split('/')
meta_file = os.path.join(meta_split[-2], meta_split[-1])
save_file = os.path.join(save_dir, meta_file)
save_data = generated_data[i].detach().cpu().numpy()
hf = h5py.File(save_file, 'w')
hf.create_dataset('data', data=save_data)
hf.close()
elif dataset in ['mvp_dataset', 'shapenet_chunk', 'mvp40', 'partnet']:
if dataset == 'mvp_dataset':
save_file = os.path.join(save_dir, 'mvp_generated_data_%dpts.h5' % num_points)
elif dataset == 'shapenet_chunk':
save_file = os.path.join(save_dir, 'shapenet_generated_data_%dpts.h5' % num_points)
elif dataset == 'mvp40':
save_file = os.path.join(save_dir, 'mvp40_generated_data_%dpts.h5' % num_points)
elif dataset == 'partnet':
save_file = os.path.join(save_dir, 'partnet_generated_data_%dpts.h5' % num_points)
if total_generated_data is None:
total_generated_data = generated_data.detach().cpu().numpy()
else:
total_generated_data = np.concatenate([total_generated_data,
generated_data.detach().cpu().numpy()], axis=0)
hf = h5py.File(save_file, 'w')
hf.create_dataset('data', data=total_generated_data)
hf.close()
# save t slices
if save_multiple_t_slices:
if generated_data_t_slices is None:
generated_data_t_slices = result_slices
else:
for t in t_slices:
generated_data_t_slices[t] = np.concatenate([generated_data_t_slices[t],
result_slices[t]], axis=0)
for t in t_slices:
if dataset == 'mvp_dataset':
t_save_file = os.path.join(save_dir, 'mvp_generated_data_%dpts_T%d.h5' % (num_points, t))
elif dataset == 'shapenet_chunk':
t_save_file = os.path.join(save_dir, 'shapenet_generated_data_%dpts_T%d.h5' % (num_points, t))
elif dataset == 'mvp40':
t_save_file = os.path.join(save_dir, 'mvp40_generated_data_%dpts_T%d.h5' % (num_points, t))
elif dataset == 'partnet':
t_save_file = os.path.join(save_dir, 'partnet_generated_data_%dpts_T%d.h5' % (num_points, t))
hf = h5py.File(t_save_file, 'w')
hf.create_dataset('data', data=generated_data_t_slices[t])
hf.close()
if (idx) % print_interval == 0:
print('%d files have been saved to the directory %s' % (batch, save_dir))
if dataset in ['mvp_dataset', 'shapenet_chunk', 'mvp40', 'partnet']:
total_meta = total_meta.detach().cpu().numpy()
if return_all_metrics:
return CD_meter.avg, EMD_meter.avg, total_meta, metrics
else:
return CD_meter.avg, EMD_meter.avg, total_meta, metrics['cd_distance'], metrics['emd_distance']
def get_each_category_distance(files):
handle = open(files, 'rb')
data = pickle.load(handle)
handle.close()
# pdb.set_trace()
meta = data['meta']
distance_keys = ['cd_distance', 'emd_distance']
cate_split_result = []
for distance in distance_keys:
split_result = {}
for k in name_to_number.keys():
split_result[k] = []
for i, m in enumerate(meta):
number = m.split('/')[-2]
cate = number_to_name[number]
split_result[cate].append(data[distance][i])
final_split_result = {}
for k in split_result.keys():
if len(split_result[k]) > 0:
final_split_result[k] = np.array(split_result[k]).mean()
# print(k, final_split_result[k])
cate_split_result.append(final_split_result)
for idx, dis in enumerate(distance_keys):
new_key = dis + '_category_split_result'
data[new_key] = cate_split_result[idx]
handle = open(files, 'wb')
pickle.dump(data, handle)
handle.close()
print('Have splitted distance of each category for file %s' % files, flush=True)
return 0
def gather_eval_result_of_different_iters(directory, match1, match2, nomatch=None, split_category = False, save_suffix = '', plot=True,
# gather all evaluation results from all ckpts and plot them in figures
gathered_keys=['iter', 'avg_cd', 'avg_emd', 'cd_distance_category_split_result', 'emd_distance_category_split_result']):
files = [f for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f))]
files = [f for f in files if match1 in f and match2 in f]
if not nomatch is None:
files = [f for f in files if not nomatch in f]
gathered_results = {}
for f in files:
if split_category:
get_each_category_distance(os.path.join(directory, f))
handle = open(os.path.join(directory, f), 'rb')
data = pickle.load(handle)
handle.close()
for key in gathered_keys:
if key in data.keys():
if isinstance(data[key], dict): # data[key] is a dictionary
if key in gathered_results.keys():
for sub_key in data[key].keys():
gathered_results[key][sub_key].append(data[key][sub_key])
# data[key][sub_key] is a single number
else:
gathered_results[key] = {}
for sub_key in data[key].keys():
gathered_results[key][sub_key] = [ data[key][sub_key] ]
else: # data[key] is a single number
if key in gathered_results.keys():
gathered_results[key].append(data[key])
else:
gathered_results[key] = [data[key]]
else:
print('key %s is not in the data loaded from file %s' % (key, f), flush=True)
save_file = os.path.join(directory, 'gathered_eval_result'+save_suffix+'.pkl')
handle = open(save_file, 'wb')
pickle.dump(gathered_results, handle)
handle.close()
if plot:
plot_result(gathered_results, gathered_keys[0], os.path.join(directory, 'figures'+save_suffix),
plot_values=gathered_keys[1:], print_lowest_value=False)
return gathered_results
def plot_train_and_val_eval_result(eval_dir):
# plot testset and trainset figures in the same figure, and find the ckpt that has the lowest loss value
label_list = ['test set', 'train set']
files = ['gathered_eval_result.pkl', 'gathered_eval_result_trainset.pkl']
file_list = [os.path.join(eval_dir, files[i]) for i in range(len(files))]
plot_values = ['avg_cd', 'avg_emd', 'avg_cd_p', 'avg_f1']
result_list = []
for f in file_list:
handle = open(f, 'rb')
result = pickle.load(handle)
result_list.append(result)
handle.close()
save_dir = os.path.join(eval_dir, 'compare_test_and_train_set')
plot_result_list(result_list, 'iter', label_list, save_dir, line_style=None, plot_values=plot_values,
print_lowest_value=True)
|
{"hexsha": "833ed468c1a0dcf1a5d2d54a752581cbe08c6f91", "size": 19759, "ext": "py", "lang": "Python", "max_stars_repo_path": "pointnet2/completion_eval.py", "max_stars_repo_name": "ZhaoyangLyu/Point_Diffusion_Refinement", "max_stars_repo_head_hexsha": "857fcd176dcc9c1a93a9fec27390502fa6c9e29d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 24, "max_stars_repo_stars_event_min_datetime": "2021-12-29T11:28:34.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-27T15:20:46.000Z", "max_issues_repo_path": "pointnet2/completion_eval.py", "max_issues_repo_name": "ZhaoyangLyu/Point_Diffusion_Refinement", "max_issues_repo_head_hexsha": "857fcd176dcc9c1a93a9fec27390502fa6c9e29d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pointnet2/completion_eval.py", "max_forks_repo_name": "ZhaoyangLyu/Point_Diffusion_Refinement", "max_forks_repo_head_hexsha": "857fcd176dcc9c1a93a9fec27390502fa6c9e29d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2022-03-06T12:58:24.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-15T06:18:38.000Z", "avg_line_length": 46.6014150943, "max_line_length": 171, "alphanum_fraction": 0.5921352295, "include": true, "reason": "import numpy", "num_tokens": 4438}
|
#!/opt/anaconda3/envs/py27/bin/python
# -*- coding: UTF-8 -*-
import cgi, os, sys
import cgitb; cgitb.enable()
from pandas import *
import numpy as np
import twd97
from pyproj import Proj
import tempfile as tf
Latitude_Pole, Longitude_Pole = 23.61000, 120.9900
Xcent, Ycent = twd97.fromwgs84(Latitude_Pole, Longitude_Pole)
pnyc = Proj(proj='lcc', datum='NAD83', lat_1=10, lat_2=40,
lat_0=Latitude_Pole, lon_0=Longitude_Pole, x_0=0, y_0=0.0)
form = cgi.FieldStorage()
SED='/usr/bin/sed -ie'
pth='/tmp/wrose/'
rst='/Library/WebServer/Documents/taiwan/'
CGI='/Library/WebServer/CGI-Executables/'
NCL='/opt/anaconda3/envs/ncl_stable/bin/ncl '
MBLs={'obsv':'/Library/WebServer/Documents/taiwan/taiMarbleScale.ncl',\
'forc':'/Library/WebServer/Documents/taiwan/chnMarble.ncl'}
ran=tf.NamedTemporaryFile().name.replace('/','').replace('tmp','')
os.system('mkdir -p '+pth)
print "Content-Type: text/html\n\n "
print open(CGI+'header.txt','r')
fileitem = [form['filename'+str(i)] for i in range(2)]
fns,dfs=[],[]
col=['x','y']
for i in range(2):
if fileitem[i].filename:
fns.append( os.path.basename(fileitem[i].filename))
open(pth+fns[i], 'wb').write(fileitem[i].file.read())
df=read_csv(pth+fns[i],header=None)
if len(df.columns)>2:
col_tmp=df.columns
for j in range(2,len(df.columns)):
del df[col_tmp[j]]
df.columns=col
if type(df.loc[0,'x'])!=float:
df=df.drop(0).reset_index(drop=True)
df.x=np.array(df.x,dtype=float)
df.y=np.array(df.y,dtype=float)
if max(df.x)>360.:
x_lcp,y_lcp=np.array(df.x)-Xcent,np.array(df.y)-Ycent
lon, lat = pnyc(x_lcp, y_lcp, inverse=True)
df.x,dfy=lon,lat
dfs.append(df)
if len(dfs[0])>len(dfs[1]):
line,marks=(dfs[i] for i in range(2))
else:
marks,line=(dfs[i] for i in range(2))
line[col].set_index('x').to_csv(rst+'line.csv',header=None)
marks[col].set_index('x').to_csv(rst+'marks.csv',header=None)
MBL=MBLs['obsv']
mbl=MBL.split('/')[-1]
rmb='trj_'+ran+'.ncl'
title='trj. for '+fns[0].replace('.csv','')
cmd ='source /opt/local/bin/conda_ini ncl_stable >/tmp/wrose/wrose.out;'
cmd+='cd '+rst+';'
cmd+='cp '+mbl+' '+rmb+';'+SED+(' "s/TITLE/{:s}/g" '+rmb).format(title)+';'
cmd+= NCL+rmb+'>>/tmp/wrose/wrose.out;'
cmd+='cp topo.png ./trj_'+ran+'.png'
os.system('echo "'+cmd+'">>/tmp/wrose/wrose.out')
os.system(cmd)
os.system('echo "OK 3!">>/tmp/wrose/wrose.out')
fn2='../../taiwan/trj_'+ran+'.png'
print """
<p>The resultant PNG will be automatically downloaded shortly. If it doesn't, click <a data-auto-download href="%s">%s</a></p>
</body>
</html>
""" % (fn2,fn2.split('/')[-1])
|
{"hexsha": "e5b3ef9072d0e90d1284da0a53562cf790520499", "size": 2601, "ext": "py", "lang": "Python", "max_stars_repo_path": "utilities/CGI-pythons/save_trjMarble.py", "max_stars_repo_name": "sinotec2/Focus-on-Air-Quality", "max_stars_repo_head_hexsha": "eac84651eaf6300a16f25a4d76b97a7f53454035", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "utilities/CGI-pythons/save_trjMarble.py", "max_issues_repo_name": "sinotec2/Focus-on-Air-Quality", "max_issues_repo_head_hexsha": "eac84651eaf6300a16f25a4d76b97a7f53454035", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utilities/CGI-pythons/save_trjMarble.py", "max_forks_repo_name": "sinotec2/Focus-on-Air-Quality", "max_forks_repo_head_hexsha": "eac84651eaf6300a16f25a4d76b97a7f53454035", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.9240506329, "max_line_length": 126, "alphanum_fraction": 0.661668589, "include": true, "reason": "import numpy", "num_tokens": 885}
|
module fractdata
implicit double precision(a-h,o-z)
parameter(nsmall=140)
parameter(ncomp=10)
parameter(maxdouble=50)
parameter(maxfamily=50)
save
character*80 icomm
character*2 sym(91)
character*80 inputformat
character*20 ititle
dimension numb(91)
dimension rad(91),subtimes(10)
character*80, allocatable :: abinitio(:)
character*2, allocatable :: atoms(:)
integer, allocatable :: numa(:)
integer, allocatable :: nstop(:),numat(:),natstore(:,:)
integer, allocatable :: isign(:),itype(:,:),ibond(:,:,:)
integer, allocatable :: ilink(:,:),map(:,:),kf(:)
integer, allocatable :: nfam(:),ifam(:,:),mult(:),ichg(:)
integer, allocatable :: mbstore(:),nbstore(:),multstore(:)
integer, allocatable :: junk(:,:),itotf(:)
integer, allocatable :: nb(:),mb(:),newmult(:),matb(:,:)
integer, allocatable :: mats(:),matr(:),itp(:),ibo(:,:)
integer, allocatable :: ma(:,:),notthere(:),numgroups(:)
real*8, allocatable :: coords(:,:),radius(:),c(:,:)
integer, allocatable :: ibf(:,:,:),itf(:,:),ngpf(:,:,:)
integer newfrag,Level,natom,natomall,nbondso,nbonds,nffinal
integer itfinal,icycle,nfrag,nafrag,nf,nbig3,nfstore,nhstore
integer nocapsatall,nelim,nabitio,iterfinal,nfragm,numhbonds
real*8 dtol
end module fractdata
|
{"hexsha": "1c6c864855312ecc35b293eabf89a2d52438090a", "size": 1473, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/solventcharges/fractdata.f90", "max_stars_repo_name": "mickcollins/SMFA", "max_stars_repo_head_hexsha": "2a730ffc879991460c614d25e3dab566b805b379", "max_stars_repo_licenses": ["DOC"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2019-03-01T15:26:20.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-14T13:12:12.000Z", "max_issues_repo_path": "src/solventcharges/fractdata.f90", "max_issues_repo_name": "mickcollins/SMFA", "max_issues_repo_head_hexsha": "2a730ffc879991460c614d25e3dab566b805b379", "max_issues_repo_licenses": ["DOC"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-03-06T17:53:03.000Z", "max_issues_repo_issues_event_max_datetime": "2019-03-10T17:34:30.000Z", "max_forks_repo_path": "src/solventcharges/fractdata.f90", "max_forks_repo_name": "mickcollins/SMFAPAC", "max_forks_repo_head_hexsha": "2a730ffc879991460c614d25e3dab566b805b379", "max_forks_repo_licenses": ["DOC"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-08-06T06:13:39.000Z", "max_forks_repo_forks_event_max_datetime": "2018-08-06T06:13:39.000Z", "avg_line_length": 32.7333333333, "max_line_length": 70, "alphanum_fraction": 0.577053632, "num_tokens": 421}
|
/*! \file deleter.h
\brief gsl_interp_accelとgsl_splineのデリータを宣言・定義したヘッダファイル
Copyright © 2015 @dc1394 All Rights Reserved.
This software is released under the BSD 2-Clause License.
*/
#ifndef _DELETER_H_
#define _DELETER_H_
#pragma once
#include <gsl/gsl_errno.h>
#include <gsl/gsl_spline.h>
namespace getdata {
//! A function.
/*!
gsl_interp_accelへのポインタを解放するラムダ式
\param acc gsl_interp_accelへのポインタ
*/
static auto const gsl_interp_accel_deleter = [](gsl_interp_accel * acc) {
gsl_interp_accel_free(acc);
};
//! A function.
/*!
gsl_splineへのポインタを解放するラムダ式
\param spline gsl_splineへのポインタ
*/
static auto const gsl_spline_deleter = [](gsl_spline * spline) {
gsl_spline_free(spline);
};
}
#endif // _DELETER_H_
|
{"hexsha": "0bab56f063e03d45b799af313d5c2ec04be8b12b", "size": 811, "ext": "h", "lang": "C", "max_stars_repo_path": "getdata/deleter.h", "max_stars_repo_name": "dc1394/SchracVisualize", "max_stars_repo_head_hexsha": "0ac49e883a4f9b92a48d224350f3d1967a1dbfe7", "max_stars_repo_licenses": ["Intel", "X11", "OLDAP-2.2.1", "Unlicense"], "max_stars_count": 5.0, "max_stars_repo_stars_event_min_datetime": "2015-05-02T05:26:00.000Z", "max_stars_repo_stars_event_max_datetime": "2016-01-08T04:52:02.000Z", "max_issues_repo_path": "getdata/deleter.h", "max_issues_repo_name": "dc1394/SchracVisualize", "max_issues_repo_head_hexsha": "0ac49e883a4f9b92a48d224350f3d1967a1dbfe7", "max_issues_repo_licenses": ["Intel", "X11", "OLDAP-2.2.1", "Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "getdata/deleter.h", "max_forks_repo_name": "dc1394/SchracVisualize", "max_forks_repo_head_hexsha": "0ac49e883a4f9b92a48d224350f3d1967a1dbfe7", "max_forks_repo_licenses": ["Intel", "X11", "OLDAP-2.2.1", "Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.3421052632, "max_line_length": 77, "alphanum_fraction": 0.67324291, "num_tokens": 263}
|
#
# Copyright (C) 2020 IBM. All Rights Reserved.
#
# See LICENSE.txt file in the root directory
# of this source tree for licensing information.
#
import random
from math import cos, isnan, pi, sin, sqrt
from typing import Dict, Optional, Tuple
import numpy as np
import portion
import torch
from PIL import Image
import vsrl.verifier.expr as vexpr
from vsrl.rl.envs.render_helpers import paste_coordinates
from vsrl.spaces import CompactSet
from vsrl.symmap.symbolic_mapper import SymFeatExtractor
from vsrl.utils.assets import get_image_path
from ._utils import gen_separated_points
from .env import Env
REACHED_STEP_LIMIT = 4
MOVED_OFF_MAP = 3
REACHED_GOAL_HAZARD = 2
REACHED_GOAL = 1
NOT_DONE = 0
REACHED_HAZARD = -1
class PMGFSymFeatExtractor(SymFeatExtractor):
agent_id = 0
hazard_id = 2
# TODO: make it easier for max_n_obstacles to stay in sync with PMGF
def __init__(self, detector: torch.nn.Module, max_n_obstacles: int = 10):
super().__init__(detector)
self.max_n_obstacles = max_n_obstacles
self._range = torch.nn.Parameter(
torch.arange(
PMGoalFinding._obs_start_idx,
PMGoalFinding._obs_start_idx + 2 * max_n_obstacles,
2,
),
requires_grad=False,
)
def forward(self, imgs):
"""
The symbolic state vector has the following data in it at the given indices:
[0, 1]: ego_x, ego_y
[2, 3]: goal_x, goal_y (nan; we do detect this but it isn't used)
[4]: theta (nan but we should try to estimate this at some point?)
[5, 6]: v, w (nan)
[7:]: hazard1_x, hazard1_y, hazard2_x, ...
If a detection for `ego` is missing, (0, 0) will be used. For hazards, because
the number could be variable, (nan, nan) is used if the number is less than
max_n_obstacles.
:param imgs: nchw
:returns: n x d where d = 7 + 2 * max_n_obstacles
"""
img_idx, class_id, center_x, center_y, _ = super().forward(imgs)
sym_feats = torch.full(
(len(imgs), 7 + 2 * self.max_n_obstacles), float("nan"), device=imgs.device
)
agent_idx = class_id == self.agent_id
agent_img_idx = img_idx[agent_idx]
hazard_idx = class_id == self.hazard_id
hazard_img_idx = img_idx[hazard_idx]
sym_feats[:, :2] = 0 # ensure no NaNs for agent location
sym_feats[agent_img_idx, 0] = center_x[agent_idx]
sym_feats[agent_img_idx, 1] = center_y[agent_idx]
_, counts = torch.unique(hazard_img_idx, return_counts=True)
col_idx = torch.cat([self._range[:c] for c in counts])
sym_feats[hazard_img_idx, col_idx] = center_x[hazard_idx]
sym_feats[hazard_img_idx, col_idx + 1] = center_y[hazard_idx]
return sym_feats
class PMGoalFinding(Env):
"""
DSolve[odes = {
x'[t] == v[t]*Cos[theta[t]],
y'[t] == v[t]*Sin[theta[t]],
v'[t] == a,
theta'[t] == w,
theta[0] == theta0,
x[0] == x0,
y[0] == y0,
v[0] == v0
}, {x[t], y[t], v[t], theta[t]}, t]
theta[t] -> theta0 + t w
v[t] -> a t + v0,
x[t] -> (1/(w^2))(w^2 x0 - a Cos[theta0] -
v0 w Sin[theta0] + a Cos[theta0 + t w] +
a t w Sin[theta0 + t w] + v0 w Sin[theta0 + t w]),
y[t] -> (1/(w^2))(w^2 y0 + v0 w Cos[theta0] -
a Sin[theta0] - a t w Cos[theta0 + t w] -
v0 w Cos[theta0 + t w] + a Sin[theta0 + t w])
"""
SymFeatClass = PMGFSymFeatExtractor
# indices into self._state for certain parts of the state
_ego_x_idx: int = 0
_ego_y_idx: int = 1
_goal_x_idx: int = 2
_goal_y_idx: int = 3
_theta_idx: int = 4
_sin_theta_idx = 5
_cos_theta_idx = 6
_v_idx: int = 7
_w_idx: int = 8
_obs_start_idx: int = 9 # index of first obstacle. from here on, the state is
# (obs0_x, obs0_y, obs1_x, ...) for all num_obstacles obstacles
def __init__(
self,
num_obstacles: int = 10, # TODO
img_scale: int = 1,
grayscale: bool = False,
oracle_obs: bool = False,
safe_sep: float = 1.0,
extra_hazard_size: Optional[int] = None,
walls: bool = False,
dense_rewards: bool = False,
randomize_goal: bool = False,
):
"""
:param extra_hazard_size: for debugging only; this renders hazards a second time
with half alpha and with height and width increased by this amount. This is
useful to visualize how far a safe agent has to stay away from hazards if it
has a certain number of pixels of maximum detection error.
:param walls: don't let the agent go off the screen; no boundary penalty
:param dense_rewards: rewards for distance + angle to goal
"""
# load graphics
scene = Image.open(get_image_path("top/bg.png"))
self._pointer = Image.open(get_image_path("pointer.png")) # for debugging.
self._egoimg = Image.open(get_image_path("top/blue.png"))
self._hazardimg = Image.open(get_image_path("top/hazard.png"))
self._goalimg = Image.open(get_image_path("top/goal.png"))
img_names = ["_egoimg", "_hazardimg", "_goalimg", "_scene"]
self.map_buffer = 12
self.T = 0.1
self.A = 30 # can change v by max 3 per step
self.B = 30 # should be positive; accel range is [-B, A]
self.min_w = -1
self.max_w = 1
self.max_v = 30 # max 3 pixels per step
self._safe_sep = safe_sep
self.num_obstacles = num_obstacles
self._extra_hazard_size = extra_hazard_size
self._walls = walls
self._dense_rewards = dense_rewards
self._randomize_goal = randomize_goal
self.place_pointers = False
# [v, cos(theta), sin(theta)]
vector_obs_bounds = ([0, -1, -1], [self.max_v, 1, 1])
super().__init__(
img_scale,
grayscale,
oracle_obs,
scene,
img_names,
vector_obs_bounds=vector_obs_bounds,
)
# _radius determines if a collision is happening. We'll use the sizes of the
# images to directly compute when they overlap.
assert self._hazardimg.size[0] == self._hazardimg.size[1]
# not true for ego img, but width is larger, so radius is still okay
# assert self._egoimg.size[0] == self._egoimg.size[1]
assert self._goalimg.size[0] == self._hazardimg.size[0]
self._radius = self._egoimg.size[0] / 2 + self._hazardimg.size[0] / 2
self._safe_sep += self._radius
# If true, we'll place yellow dots at the midpoints of various objects.
self._max_goal_dist = sqrt(self._width ** 2 + self._height ** 2)
def _make_action_space(self) -> CompactSet:
return CompactSet(
{
vexpr.Variable("w"): (self.min_w, self.max_w), # rotational velocity
vexpr.Variable("a"): (-self.B, self.A), # translational acceleration
}
)
def _make_oracle_space(self) -> CompactSet:
# make sure this stays in sync with the indices into the state array defined
# in init
ranges: Dict[vexpr.Variable, Tuple[float, float]] = {
vexpr.Variable("ego_x"): (
0 + self.map_buffer,
self._width - self.map_buffer,
),
vexpr.Variable("ego_y"): (
0 + self.map_buffer,
self._height - self.map_buffer,
),
vexpr.Variable("goal_x"): (
0 + self.map_buffer,
self._width - self.map_buffer,
),
vexpr.Variable("goal_y"): (
0 + self.map_buffer,
self._height - self.map_buffer,
),
vexpr.Variable("theta"): (-2 * pi, 2 * pi),
vexpr.Variable("sin_theta"): (-1, 1),
vexpr.Variable("cos_theta"): (-1, 1),
vexpr.Variable("v"): (0, self.max_v),
vexpr.Variable("w"): (self.min_w, self.max_w),
}
# Add obstacles to the oracle space.
for i in range(1, self.num_obstacles + 1):
ranges[vexpr.Variable(f"obs{i}_x")] = (
0 + self.map_buffer,
self._width - self.map_buffer,
)
ranges[vexpr.Variable(f"obs{i}_y")] = (
0 + self.map_buffer,
self._height - self.map_buffer,
)
return CompactSet(ranges)
def step(self, action: np.ndarray) -> Tuple[np.ndarray, float, bool, dict]:
"""
:param action: w, a
"""
if self._done:
# TODO: make a custom exception for this; use it in all envs
raise ValueError(
"This episode has terminated; call reset before stepping again."
)
assert action in self.action_space
w, a = action
v0 = self._state[self._v_idx]
new_v = v0 + a * self.T
# enforce the bounds on v ([0, 1]) by changing a.
if new_v < 0:
a = -v0 / self.T
new_v = 0
elif new_v > self.max_v:
a = (self.max_v - v0) / self.T
new_v = self.max_v
theta0 = self._state[self._theta_idx]
sin_theta0 = self._state[self._sin_theta_idx]
cos_theta0 = self._state[self._cos_theta_idx]
new_state = self._state.copy()
tw = self.T * w
theta = theta0 + tw
# keep theta in [-2 * pi, 2 * pi]
if theta > 2 * pi:
theta -= 2 * pi
elif theta < -2 * pi:
theta += 2 * pi
cos_theta = cos(theta)
sin_theta = sin(theta)
new_state[self._w_idx] = w
new_state[self._v_idx] += a * self.T
new_state[self._theta_idx] = theta
new_state[self._sin_theta_idx] = sin_theta
new_state[self._cos_theta_idx] = cos_theta
if w != 0:
new_state[self._ego_x_idx] += (1 / w ** 2) * (
-a * cos_theta0
- v0 * w * sin_theta0
+ a * cos_theta
+ a * tw * sin_theta
+ v0 * w * sin_theta
)
new_state[self._ego_y_idx] += (1 / w ** 2) * (
v0 * w * cos_theta0
- a * sin_theta0
- a * tw * cos_theta
- v0 * w * cos_theta
+ a * sin_theta
)
else:
new_state[self._ego_x_idx] += cos_theta0 * max(
0, v0 * self.T + a * self.T ** 2 / 2
)
new_state[self._ego_y_idx] += sin_theta0 * max(
0, v0 * self.T + a * self.T ** 2 / 2
)
if self._walls: # keep the agent in-bounds; no boundary penalties
x = new_state[self._ego_x_idx]
y = new_state[self._ego_y_idx]
new_state[self._ego_x_idx] = min(
max(self.map_buffer, x), self._width - self.map_buffer
)
new_state[self._ego_y_idx] = min(
max(self.map_buffer, y), self._height - self.map_buffer
)
self._step += 1
reward, done_code = self._get_reward_and_done_code(
self._state, action, new_state
)
self._state = new_state
self._done = done_code != NOT_DONE
obs = self._get_obs(np.array([new_v, cos_theta, sin_theta], dtype=np.float32))
return (
obs,
reward,
self._done,
{
"done_reason": done_code,
"unsafe": done_code in (REACHED_GOAL_HAZARD, REACHED_HAZARD),
},
)
def _is_valid_state(self, state):
return state in self.oracle_space
def _final_state_description(self, done):
if done == MOVED_OFF_MAP:
return "moved off map"
elif done == REACHED_GOAL_HAZARD:
return "reached goal and hazard at the same time"
elif done == REACHED_GOAL:
return "reached goal and not currently touching any hazards"
elif done == NOT_DONE:
return "not done"
elif done == REACHED_HAZARD:
return (
f"crashed into a hazard at position {self._find_collision(self._state)}"
)
elif done == REACHED_STEP_LIMIT:
return f"Reached limit of {self.horizon:,} steps."
else:
return f"done, but not sure why. Done code was {done}"
def _get_reward_and_done_code(self, state1, action, state2) -> Tuple[float, int]:
"""
:param state:
:return: (reward, done_code). Use _final_state_description to interpret done_code.
"""
egox, egoy = state2[:2]
vector_to_goal = state2[2:4] - state2[:2]
dist_to_goal = sqrt(np.dot(vector_to_goal, vector_to_goal))
# if we have gamma = 0.99 then getting a constant reward c for H steps gives
# reward < 100c. We want it to be better to go to the goal right away than to
# get the distance reward from right next to the goal, so we make its maximum
# value < 1 / 100 * goal reward.
# or, to be careful, in case gamma = 1, we can make
# max_dist_reward * H < goal_reward
if self._dense_rewards:
vector_to_goal /= dist_to_goal # normalize
theta = state2[self._theta_idx]
angle_to_goal = np.dot(vector_to_goal, np.array([cos(theta), sin(theta)]))
# convert both rewards to [0, 1] then scale their sum
dist_reward = 1 - dist_to_goal / self._max_goal_dist
angle_reward = angle_to_goal / 2 + 0.5
reward = (dist_reward + angle_reward) / 20
else:
reward = 0
at_goal = dist_to_goal <= self._radius
is_colliding = self._collision(state2)
if not (
self.map_buffer <= egox <= self._width - self.map_buffer
and self.map_buffer <= egoy <= self._height - self.map_buffer
):
return -1, MOVED_OFF_MAP
if at_goal:
if is_colliding:
return 0, REACHED_GOAL_HAZARD
return 10, REACHED_GOAL
if is_colliding:
return -1, REACHED_HAZARD
if self._step >= self.horizon:
return reward, REACHED_STEP_LIMIT
return reward, NOT_DONE
def _collision(self, state):
return self._find_collision(state) is not None
def _find_collision(self, state) -> Optional[Tuple[int, int]]:
egox = state[self._ego_x_idx]
egoy = state[self._ego_y_idx]
for i in range(self.num_obstacles):
xidx = self._obs_start_idx + 2 * i
yidx = xidx + 1
ox, oy = state[xidx], state[yidx]
if self._circle_contains(egox, egoy, ox, oy, self._radius):
return ox, oy
return None
def _circle_contains(self, x, y, c_x, c_y, c_radius):
return (x - c_x) ** 2 + (y - c_y) ** 2 <= c_radius ** 2
def reset(self) -> np.ndarray:
state = np.empty_like(self._state)
angle = random.random() * 2 * pi
if self._randomize_goal:
initial_points = None
else:
initial_points = np.array([[3 * self._width // 4, 3 * self._height // 4]])
# place objects so they don't collide
points = gen_separated_points(
self.num_obstacles + 2,
sep=self._hazardimg.width,
lower_bounds=np.array([self.map_buffer, self.map_buffer]),
upper_bounds=np.array(
[self._width - self.map_buffer, self._height - self.map_buffer]
),
initial_points=initial_points,
)
state[self._w_idx] = 0
state[self._v_idx] = 0
state[self._theta_idx] = angle
state[self._sin_theta_idx] = sin(angle)
state[self._cos_theta_idx] = cos(angle)
state[self._goal_x_idx] = points[0, 0]
state[self._goal_y_idx] = points[0, 1]
state[self._ego_x_idx] = points[1, 0]
state[self._ego_y_idx] = points[1, 1]
state[self._obs_start_idx :] = points[2:].ravel()
assert self._is_valid_state(state), self.oracle_space.to_state(state)
self._state = state
self._done = False
self._step = 0
self._prev_frame.fill(0)
obs = self._get_obs(np.array([0, cos(angle), sin(angle)], dtype=np.float32))
return obs
def _rotated_ego_img(self, state: np.ndarray) -> Image:
theta = state[self._theta_idx]
# the image is facing downwards, so we need a 90 degree offset
angle = 90 + theta * 180 / pi
ego_rotated = self._egoimg.rotate(angle)
ego_rotated.mask = self._egoimg.mask.rotate(angle)
return ego_rotated
def render(self) -> Image:
rotated_ego = self._rotated_ego_img(self._state)
scene = self._scene.copy()
for i in range(self.num_obstacles):
xidx = self._obs_start_idx + 2 * i
yidx = xidx + 1
if self._extra_hazard_size is not None:
shape = (
self._hazardimg.width + self._extra_hazard_size,
self._hazardimg.height + self._extra_hazard_size,
)
img = self._hazardimg.resize(shape)
mask = self._hazardimg.mask.resize(shape)
mask = Image.fromarray(
((np.asarray(mask) / 2)).astype(np.uint8), mode="L"
)
scene.paste(
img,
paste_coordinates(
img, self._state[xidx], self._height - self._state[yidx],
),
mask=mask,
)
scene.paste(
self._hazardimg,
paste_coordinates(
self._hazardimg, self._state[xidx], self._height - self._state[yidx]
),
mask=self._hazardimg.mask,
)
if self.place_pointers:
scene.paste(
self._pointer,
(int(self._state[xidx]), self._height - int(self._state[yidx]),),
)
scene.paste(
self._goalimg,
paste_coordinates(
self._goalimg,
self._state[self._goal_x_idx],
self._height - self._state[self._goal_y_idx],
),
mask=self._goalimg.mask,
)
scene.paste(
rotated_ego,
paste_coordinates(
rotated_ego,
self._state[self._ego_x_idx],
self._height - self._state[self._ego_y_idx],
),
mask=rotated_ego.mask,
)
if self.place_pointers:
scene.paste(
self._pointer,
(
int(self._state[self._ego_x_idx]),
self._height - int(self._state[self._ego_y_idx]),
),
)
return scene
def state_constants(self):
return {
vexpr.Variable("A"): vexpr.Number(self.A),
vexpr.Variable("B"): vexpr.Number(self.B),
vexpr.Variable("T"): vexpr.Number(self.T),
vexpr.Variable("safe_sep"): vexpr.Number(self._safe_sep),
vexpr.Variable("min_w"): vexpr.Number(self.min_w),
vexpr.Variable("max_w"): vexpr.Number(self.max_w),
}
@staticmethod
def constraint_func(action, sym_feats, B, T, safe_sep):
"""
L-inf norm version: (doesn't take direction of the agent into account)
(
abs(x - ox) > v^2 / (2 * B) + (A / B + 1) * (A / 2 * T^2 + T * v)
| abs(y - oy) > v^2 / (2 * B) + (A / B + 1) * (A / 2 * T^2 + T * v)
)
(`A` here is the current desired acceleration, not the max acceleration)
Note that the environment bounds the velocity to [0, max_v] by having the
acceleration action not set the acceleration directly if the velocity would go
out of bounds. The constraint doesn't take this into account, so some
acceleration actions might be deemed unsafe which actually would be safe to take
(but the actual acceleration used in the environment step would be different than
the action in such cases).
The distance in the constraint is equivalent to:
pos_diff = v * T + a / 2 * T ** 2 # after taking one step with accel of `a`
v_new = v + a * T
stop_time = v_new / B # stop time / distance after one step
stop_dist = v_new * stop_time + -B / 2 * stop_time ** 2
safe_dist = pos_diff + stop_dist
WARNING: pos_diff could be negative here, but we don't allow negative v.
If this presents an issue, we should modify the constraint to take into account
how the environment changes `a` if `v` would become negative (or change the
environment to make `T` smaller in such cases instead of changing `a`. I worry
that might make the learning more difficult, though, especially if the agent
repeatedly tries to use a very negative `a` when `v` is already nearly 0.)
"""
w, a = action
x, y = sym_feats[:2]
v = sym_feats[PMGoalFinding._v_idx]
safe_dist = (
(v ** 2 / (2 * B)) + ((a / B + 1) * (a / 2 * T ** 2 + T * v))
) + safe_sep
for i in range(PMGoalFinding._obs_start_idx, len(sym_feats), 2):
ox = sym_feats[i]
if isnan(ox): # once one object is nan, all others will be
break
oy = sym_feats[i + 1]
if abs(x - ox) < safe_dist and abs(y - oy) < safe_dist:
return False
return True
@staticmethod
def constrained_sample(sym_feats, min_w, max_w, B, A, T, safe_sep):
"""
w is unconstrained (at least with the l-inf norm constraint; if we use a less
conservative constraint, we'll have to deal with w too)
The sampling here is more complex because the possible values for acc are the
intersection of possible values for each hazard's constraint and each hazard has
a union of two terms as a constraint. There will only ever be two compact ranges
that contain the safe space for all objects considered so far, but we have to
track these carefully.
Start with the constraints (shown for x but almost identical for y):
abs(x - ox) > v^2 / (2 * B) + (A / B + 1) * (A / 2 * T^2 + T * v)
then write as a quadratic in A:
0 > A^2 * a + A * b + (c - abs(x - ox))
a = T^2 / (2 * B)
b = T * V / B + T^2 / 2
c = T * v + v^2 / (2 * B)
Use the quadratic formula to find the zeros (only abs(x - ox) has to change for
each object and for x / y). a > 0, so the area between the zeros is the safe set
for A (unioned between x and y, intersected over all hazards).
"""
w = min_w + (max_w - min_w) * random.random()
x, y = sym_feats[:2]
v = sym_feats[PMGoalFinding._v_idx]
a = T ** 2 / (2 * B)
b = T * v / B + T ** 2 / 2
c0 = T * v + v ** 2 / (2 * B) + safe_sep
safe_sets = portion.closed(-B, A)
for i in range(PMGoalFinding._obs_start_idx, len(sym_feats), 2):
ox = sym_feats[i]
if isnan(ox):
break
oy = sym_feats[i + 1]
c_x = c0 - abs(x - ox)
c_y = c0 - abs(y - oy)
d = b ** 2 - 4 * a * c_x
if d < 0: # fallback action is -B
z11, z12 = -B, -B
else:
d = sqrt(d)
z12 = (-b + d) / (2 * a)
z11 = (-b - d) / (2 * a)
z11, z12 = (z11, z12) if z11 < z12 else (z12, z11)
d = b ** 2 - 4 * a * c_y
if d < 0:
z21, z22 = -B, -B
else:
d = sqrt(d)
z22 = (-b + d) / (2 * a)
z21 = (-b - d) / (2 * a)
z21, z22 = (z21, z22) if z21 < z22 else (z22, z21)
safe_sets &= portion.closed(z11, z12) | portion.closed(z21, z22)
if safe_sets.empty:
return np.array([max_w, -B], dtype=np.float32)
volumes = tuple(s.upper - s.lower for s in safe_sets)
safe_set = random.choices(safe_sets, weights=volumes)[0]
acc = safe_set.lower + (safe_set.upper - safe_set.lower) * random.random()
return np.array([w, acc], dtype=np.float32)
|
{"hexsha": "e16732fe998891bbe7d6ac071bb39e1fab05bc19", "size": 24666, "ext": "py", "lang": "Python", "max_stars_repo_path": "vsrl/rl/envs/pm_goal_finding.py", "max_stars_repo_name": "nrfulton/vsrl-framework", "max_stars_repo_head_hexsha": "c778824b3285e3e994a4c5846c7b1c2ac03c669b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-03-22T03:31:32.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-22T03:31:32.000Z", "max_issues_repo_path": "vsrl/rl/envs/pm_goal_finding.py", "max_issues_repo_name": "nrfulton/vsrl-framework", "max_issues_repo_head_hexsha": "c778824b3285e3e994a4c5846c7b1c2ac03c669b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "vsrl/rl/envs/pm_goal_finding.py", "max_forks_repo_name": "nrfulton/vsrl-framework", "max_forks_repo_head_hexsha": "c778824b3285e3e994a4c5846c7b1c2ac03c669b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.0061633282, "max_line_length": 90, "alphanum_fraction": 0.5529879186, "include": true, "reason": "import numpy", "num_tokens": 6460}
|
c***CALCF**********************
subroutine calcF(option,iwell,iprod)
implicit none
include 'cdparams.fh'
include 'cdrange.fh'
include 'cdrates.fh'
include 'cdlimfit.fh'
include 'cdffunc.fh'
c local variables
character*8 option
integer it,ip,iwell,iprod
real*8 RKlind,concM
c end declarations
do 500 it = 1,ntemps
do 500 ip = 1,npres
concM = pres(it,ip) / (82.1d0*temp(it))
c get lindemann forms (depends on depth of well and
c whether stabilization or product channel); also do
c special dissoc case
if (addTerm) then
Pr(it,ip) = (RK(iwell,iprod,it,0) +
2 RK(iwell,iprod,it,-1)/concM)
3 *concM**nPr(iwell,iprod) /
4 RK(iwell,iprod,it,npres+1)
c or, do general case
else
Pr(it,ip) = RK(iwell,iprod,it,0)
2 *concM**nPr(iwell,iprod) /
3 RK(iwell,iprod,it,npres+1)
endif
RKlind = RK(iwell,iprod,it,npres+1)
2 *concM**nRKinf(iwell,iprod)
3 *Pr(it,ip) / (1.d0 + Pr(it,ip))
Fcalc(it,ip) = RK(iwell,iprod,it,ip) / RKlind
500 continue
return
end
c***FOUTPUT************************
subroutine Foutput(option,lfout,iwell,iprod)
c fill out output files with Fcalc vs T, P
implicit none
include 'cdparams.fh'
include 'cdwell0.fh'
include 'cdlabels.fh'
include 'cdrange.fh'
include 'cdffunc.fh'
c local variables
character option*8,RCname*20
integer it,ip,iwell,iprod
integer lfout,ipmax
real*8 Flognm
character tab
c end declarations
tab = char(9)
RCname = PDname(inpwell,inpchan)
write(lfout,50) option,inpwell,iwell,iprod,
2 RCname,PDname(iwell,iprod)
50 format(//,1x,a,'(',i2,') -> ',i2,':',i2,' ',a,' = ',a)
write(lfout,150) tab,tab,tab,tab,tab
150 format(/,1x,' T (K) ',a,' log P ',a,' log Pr',a,' Fcalc ',
2 a,' -ln F ',a,'ln F |n')
do 350 it = 1,ntemps
write(lfout,270) tab,tab,tab,temp(it),tab,temp(it),tab,temp(it)
270 format(' ',a,a,3(a,f5.0,' K'))
c get lineshape maxima
ipmax = 1
do 200 ip = 1,npres
if (Fcalc(it,ip).le.Fcalc(it,ipmax)) ipmax = ip
200 continue
Flognm = -dlog(Fcalc(it,ipmax))
c make sure there are no divide overflows
if (Flognm.gt.0) then
do 300 ip = 1,npres
write(lfout,280) temp(it),tab,dlog10(pres(it,ip)),tab,
2 dlog10(Pr(it,ip)),tab,Fcalc(it,ip),tab,
3 -dlog(Fcalc(it,ip)),tab,-dlog(Fcalc(it,ip))/Flognm
280 format(' ',f7.2,a,f7.3,a,f7.3,a,f7.4,a,f7.4,a,f7.4)
300 continue
else
do 320 ip = 1,npres
write(lfout,280) temp(it),tab,dlog10(pres(it,ip)),tab,
2 dlog10(Pr(it,ip)),tab,Fcalc(it,ip),tab,
3 -dlog(Fcalc(it,ip))
320 continue
endif
350 continue
return
end
c***TOUTPUT************************
subroutine Toutput(option,ltout,iwell,iprod)
c fill out output files with Fcalc vs T, P
implicit none
include 'cdparams.fh'
include 'cdwell0.fh'
include 'cdlabels.fh'
include 'cdrange.fh'
include 'cdffunc.fh'
c local variables
character option*8,RCname*20
integer it,ip,iwell,iprod
integer ltout,ipmax,ipleft,ipright
real*8 slope,PRLlog,PRRlog,PRsum,PRdif
character tab
c end declarations
tab = char(9)
RCname = PDname(inpwell,inpchan)
write(ltout,50) option,inpwell,iwell,iprod,
2 RCname,PDname(iwell,iprod)
50 format(//,1x,a,'(',i2,') -> ',i2,':',i2,' ',a,' = ',a)
write(ltout,160) tab,tab,tab,tab,tab,tab
160 format(/,1x,' T (K) ',a,' log P ',a,' log Pr',a,' Fcalc ',
2 a,' -ln F ',a,'log Pr+',a,'log Pr-')
do 400 it = 1,ntemps
c get lineshape maxima (F minima)
ipmax = 1
do 200 ip = 1,npres
if (Fcalc(it,ip).le.Fcalc(it,ipmax)) ipmax = ip
200 continue
c proceed if Fmin is not too close to unity (otherwise problems)
if (-dlog10(Fcalc(it,ipmax)).gt.1.d-4) then
c get lineshape left and right HWHM
ipleft = 1
ipright = npres
do 230 ip = 1,npres
if ((dlog(Fcalc(it,ip))/dlog(Fcalc(it,ipmax)).lt.0.5d0)
2 .and.(ip.lt.ipmax)) ipleft = ip
if ((dlog(Fcalc(it,ip))/dlog(Fcalc(it,ipmax)).gt.0.5d0)
2 .and.(ip.gt.ipmax)) ipright = ip
230 continue
c we do some interpolation (could be trouble if F is constant)
slope = (dlog10(PR(it,ipleft+1))-dlog10(PR(it,ipleft)))/
2 (dlog(Fcalc(it,ipleft+1))-dlog(Fcalc(it,ipleft)))
PRLlog = dlog10(PR(it,ipleft))+slope*
2 (0.5d0*dlog(Fcalc(it,ipmax))-dlog(Fcalc(it,ipleft)))
if (ipright.eq.npres) ipright = npres-1
slope = (dlog10(PR(it,ipright+1))-dlog10(PR(it,ipright)))/
2 (dlog(Fcalc(it,ipright+1))-dlog(Fcalc(it,ipright)))
PRRlog = dlog10(PR(it,ipright))+slope*
2 (0.5d0*dlog(Fcalc(it,ipmax))-dlog(Fcalc(it,ipright)))
c note PRLlog is negative
PRsum = PRRlog - PRLlog
PRdif = PRRlog + PRLlog
write(ltout,250) temp(it),tab,dlog10(pres(it,ipmax)),tab,
2 dlog10(PR(it,ipmax)),tab,Fcalc(it,ipmax),tab,
3 -dlog(Fcalc(it,ipmax)),tab,PRsum,tab,PRdif
250 format(' ',f7.2,a,f7.3,a,f7.3,a,f7.4,a,f7.4,a,f7.3,a,f7.3)
else
write(ltout,250) temp(it),tab,dlog10(pres(it,ipmax)),tab,
2 dlog10(PR(it,ipmax)),tab,Fcalc(it,ipmax),tab,
3 -dlog(Fcalc(it,ipmax))
endif
400 continue
return
end
|
{"hexsha": "daa4122554fe9c76d4c961837ca0bf51452b7ae8", "size": 6340, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "source/deprecated/chemdis/src/cdfouts.f", "max_stars_repo_name": "sean-v8/RMG-Java", "max_stars_repo_head_hexsha": "e284bb6b14f06690da157da33dab4a86d55f50d0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-09-12T06:21:05.000Z", "max_stars_repo_stars_event_max_datetime": "2019-09-12T06:21:05.000Z", "max_issues_repo_path": "source/deprecated/chemdis/src/cdfouts.f", "max_issues_repo_name": "bslakman/RMG-Java", "max_issues_repo_head_hexsha": "e5e9690ba06d206fba43705d72aaf8185995065e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "source/deprecated/chemdis/src/cdfouts.f", "max_forks_repo_name": "bslakman/RMG-Java", "max_forks_repo_head_hexsha": "e5e9690ba06d206fba43705d72aaf8185995065e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.3469387755, "max_line_length": 73, "alphanum_fraction": 0.5077287066, "num_tokens": 2044}
|
"""
Basic Equations for Solid Mechanics
###################################
References
----------
.. [Gray2001] J. P. Gray et al., "SPH elastic dynamics", Computer Methods
in Applied Mechanics and Engineering, 190 (2001), pp 6641 - 6662.
"""
from pysph.sph.equation import Equation
from pysph.sph.scheme import Scheme
from textwrap import dedent
from pysph.base.utils import get_particle_array
import numpy as np
def get_bulk_mod(G, nu):
''' Get the bulk modulus from shear modulus and Poisson ratio '''
return 2.0 * G * (1 + nu) / (3 * (1 - 2 * nu))
def get_speed_of_sound(E, nu, rho0):
return np.sqrt(E / (3 * (1. - 2 * nu) * rho0))
def get_shear_modulus(E, nu):
return E / (2. * (1. + nu))
def get_particle_array_elastic_dynamics(constants=None, **props):
"""Return a particle array for the Standard SPH formulation of
solids.
Parameters
----------
constants : dict
Dictionary of constants
Other Parameters
----------------
props : dict
Additional keywords passed are set as the property arrays.
See Also
--------
get_particle_array
"""
solids_props = [
'cs', 'e', 'v00', 'v01', 'v02', 'v10', 'v11', 'v12', 'v20', 'v21',
'v22', 'r00', 'r01', 'r02', 'r11', 'r12', 'r22', 's00', 's01', 's02',
's11', 's12', 's22', 'as00', 'as01', 'as02', 'as11', 'as12', 'as22',
's000', 's010', 's020', 's110', 's120', 's220', 'arho', 'au', 'av',
'aw', 'ax', 'ay', 'az', 'ae', 'rho0', 'u0', 'v0', 'w0', 'x0', 'y0',
'z0', 'e0'
]
# set wdeltap to -1. Which defaults to no self correction
consts = {
'wdeltap': -1.,
'n': 4,
'G': 0.0,
'E': 0.0,
'nu': 0.0,
'rho_ref': 1000.0,
'c0_ref': 0.0
}
if constants:
consts.update(constants)
pa = get_particle_array(constants=consts, additional_props=solids_props,
**props)
# set the shear modulus G
pa.G[0] = get_shear_modulus(pa.E[0], pa.nu[0])
# set the speed of sound
pa.cs = np.ones_like(pa.x) * get_speed_of_sound(pa.E[0], pa.nu[0],
pa.rho_ref[0])
pa.c0_ref[0] = get_speed_of_sound(pa.E[0], pa.nu[0], pa.rho_ref[0])
# default property arrays to save out.
pa.set_output_arrays([
'x', 'y', 'z', 'u', 'v', 'w', 'rho', 'm', 'h', 'pid', 'gid', 'tag', 'p'
])
return pa
class IsothermalEOS(Equation):
r""" Compute the pressure using the Isothermal equation of state:
:math:`p = p_0 + c_0^2(\rho_0 - \rho)`
"""
def loop(self, d_idx, d_rho, d_p, d_c0_ref, d_rho_ref):
d_p[d_idx] = d_c0_ref[0] * d_c0_ref[0] * (d_rho[d_idx] - d_rho_ref[0])
class MonaghanArtificialStress(Equation):
r"""**Artificial stress to remove tensile instability**
The dispersion relations in [Gray2001] are used to determine the
different components of :math:`R`.
Angle of rotation for particle :math:`a`
.. math::
\tan{2 \theta_a} = \frac{2\sigma_a^{xy}}{\sigma_a^{xx} - \sigma_a^{yy}}
In rotated frame, the new components of the stress tensor are
.. math::
\bar{\sigma}_a^{xx} = \cos^2{\theta_a} \sigma_a^{xx} + 2\sin{\theta_a}
\cos{\theta_a}\sigma_a^{xy} + \sin^2{\theta_a}\sigma_a^{yy}\\
\bar{\sigma}_a^{yy} = \sin^2{\theta_a} \sigma_a^{xx} + 2\sin{\theta_a}
\cos{\theta_a}\sigma_a^{xy} + \cos^2{\theta_a}\sigma_a^{yy}
Components of :math:`R` in rotated frame:
.. math::
\bar{R}_{a}^{xx}=\begin{cases}-\epsilon\frac{\bar{\sigma}_{a}^{xx}}
{\rho^{2}} & \bar{\sigma}_{a}^{xx}>0\\0 & \bar{\sigma}_{a}^{xx}\leq0
\end{cases}\\
\bar{R}_{a}^{yy}=\begin{cases}-\epsilon\frac{\bar{\sigma}_{a}^{yy}}
{\rho^{2}} & \bar{\sigma}_{a}^{yy}>0\\0 & \bar{\sigma}_{a}^{yy}\leq0
\end{cases}
Components of :math:`R` in original frame:
.. math::
R_a^{xx} = \cos^2{\theta_a} \bar{R}_a^{xx} +
\sin^2{\theta_a} \bar{R}_a^{yy}\\
R_a^{yy} = \sin^2{\theta_a} \bar{R}_a^{xx} +
\cos^2{\theta_a} \bar{R}_a^{yy}\\
R_a^{xy} = \sin{\theta_a} \cos{\theta_a}\left(\bar{R}_a^{xx} -
\bar{R}_a^{yy}\right)
"""
def __init__(self, dest, sources, eps=0.3):
r"""
Parameters
----------
eps : float
constant
"""
self.eps = eps
super(MonaghanArtificialStress, self).__init__(dest, sources)
def _cython_code_(self):
code = dedent("""
cimport cython
from pysph.base.linalg3 cimport eigen_decomposition
from pysph.base.linalg3 cimport transform_diag_inv
""")
return code
def loop(self, d_idx, d_rho, d_p, d_s00, d_s01, d_s02, d_s11, d_s12, d_s22,
d_r00, d_r01, d_r02, d_r11, d_r12, d_r22):
r"""Compute the stress terms
Parameters
----------
d_sxx : DoubleArray
Stress Tensor Deviatoric components (Symmetric)
d_rxx : DoubleArray
Artificial stress components (Symmetric)
"""
# 1/rho_a^2
rhoi = d_rho[d_idx]
rhoi21 = 1. / (rhoi * rhoi)
## Matrix and vector declarations ##
# Matrix of Eigenvectors (columns)
R = declare('matrix((3,3))')
# Artificial stress in the original coordinates
Rab = declare('matrix((3,3))')
# Stress tensor with pressure.
S = declare('matrix((3,3))')
# Eigenvalues
V = declare('matrix((3,))')
# Artificial stress in principle direction
rd = declare('matrix((3,))')
# get the diagonal terms for the stress tensor adding pressure
S[0][0] = d_s00[d_idx] - d_p[d_idx]
S[1][1] = d_s11[d_idx] - d_p[d_idx]
S[2][2] = d_s22[d_idx] - d_p[d_idx]
S[1][2] = d_s12[d_idx]
S[2][1] = d_s12[d_idx]
S[0][2] = d_s02[d_idx]
S[2][0] = d_s02[d_idx]
S[0][1] = d_s01[d_idx]
S[1][0] = d_s01[d_idx]
# compute the principle stresses
eigen_decomposition(S, R, cython.address(V[0]))
# artificial stress corrections
if V[0] > 0:
rd[0] = -self.eps * V[0] * rhoi21
else:
rd[0] = 0
if V[1] > 0:
rd[1] = -self.eps * V[1] * rhoi21
else:
rd[1] = 0
if V[2] > 0:
rd[2] = -self.eps * V[2] * rhoi21
else:
rd[2] = 0
# transform artificial stresses in original frame
transform_diag_inv(cython.address(rd[0]), R, Rab)
# store the values
d_r00[d_idx] = Rab[0][0]
d_r11[d_idx] = Rab[1][1]
d_r22[d_idx] = Rab[2][2]
d_r12[d_idx] = Rab[1][2]
d_r02[d_idx] = Rab[0][2]
d_r01[d_idx] = Rab[0][1]
class MomentumEquationWithStress(Equation):
r"""**Momentum Equation with Artificial Stress**
.. math::
\frac{D\vec{v_a}^i}{Dt} = \sum_b m_b\left(\frac{\sigma_a^{ij}}{\rho_a^2}
+\frac{\sigma_b^{ij}}{\rho_b^2} + R_{ab}^{ij}f^n \right)\nabla_a W_{ab}
where
.. math::
f_{ab} = \frac{W(r_{ab})}{W(\Delta p)}\\
R_{ab}^{ij} = R_{a}^{ij} + R_{b}^{ij}
"""
def initialize(self, d_idx, d_au, d_av, d_aw):
d_au[d_idx] = 0.0
d_av[d_idx] = 0.0
d_aw[d_idx] = 0.0
def loop(self, d_idx, s_idx, d_rho, s_rho, s_m, d_p, s_p, d_s00, d_s01,
d_s02, d_s11, d_s12, d_s22, s_s00, s_s01, s_s02, s_s11, s_s12,
s_s22, d_r00, d_r01, d_r02, d_r11, d_r12, d_r22, s_r00, s_r01,
s_r02, s_r11, s_r12, s_r22, d_au, d_av, d_aw, d_wdeltap, d_n, WIJ,
DWIJ):
pa = d_p[d_idx]
pb = s_p[s_idx]
rhoa = d_rho[d_idx]
rhob = s_rho[s_idx]
rhoa21 = 1. / (rhoa * rhoa)
rhob21 = 1. / (rhob * rhob)
s00a = d_s00[d_idx]
s01a = d_s01[d_idx]
s02a = d_s02[d_idx]
s10a = d_s01[d_idx]
s11a = d_s11[d_idx]
s12a = d_s12[d_idx]
s20a = d_s02[d_idx]
s21a = d_s12[d_idx]
s22a = d_s22[d_idx]
s00b = s_s00[s_idx]
s01b = s_s01[s_idx]
s02b = s_s02[s_idx]
s10b = s_s01[s_idx]
s11b = s_s11[s_idx]
s12b = s_s12[s_idx]
s20b = s_s02[s_idx]
s21b = s_s12[s_idx]
s22b = s_s22[s_idx]
r00a = d_r00[d_idx]
r01a = d_r01[d_idx]
r02a = d_r02[d_idx]
r10a = d_r01[d_idx]
r11a = d_r11[d_idx]
r12a = d_r12[d_idx]
r20a = d_r02[d_idx]
r21a = d_r12[d_idx]
r22a = d_r22[d_idx]
r00b = s_r00[s_idx]
r01b = s_r01[s_idx]
r02b = s_r02[s_idx]
r10b = s_r01[s_idx]
r11b = s_r11[s_idx]
r12b = s_r12[s_idx]
r20b = s_r02[s_idx]
r21b = s_r12[s_idx]
r22b = s_r22[s_idx]
# Add pressure to the deviatoric components
s00a = s00a - pa
s00b = s00b - pb
s11a = s11a - pa
s11b = s11b - pb
s22a = s22a - pa
s22b = s22b - pb
# compute the kernel correction term
# if wdeltap is less than zero then no correction
# needed
if d_wdeltap[0] > 0.:
fab = WIJ / d_wdeltap[0]
fab = pow(fab, d_n[0])
art_stress00 = fab * (r00a + r00b)
art_stress01 = fab * (r01a + r01b)
art_stress02 = fab * (r02a + r02b)
art_stress10 = art_stress01
art_stress11 = fab * (r11a + r11b)
art_stress12 = fab * (r12a + r12b)
art_stress20 = art_stress02
art_stress21 = art_stress12
art_stress22 = fab * (r22a + r22b)
else:
art_stress00 = 0.0
art_stress01 = 0.0
art_stress02 = 0.0
art_stress10 = art_stress01
art_stress11 = 0.0
art_stress12 = 0.0
art_stress20 = art_stress02
art_stress21 = art_stress12
art_stress22 = 0.0
# compute accelerations
mb = s_m[s_idx]
d_au[d_idx] += (
mb * (s00a * rhoa21 + s00b * rhob21 + art_stress00) * DWIJ[0] +
mb * (s01a * rhoa21 + s01b * rhob21 + art_stress01) * DWIJ[1] +
mb * (s02a * rhoa21 + s02b * rhob21 + art_stress02) * DWIJ[2])
d_av[d_idx] += (
mb * (s10a * rhoa21 + s10b * rhob21 + art_stress10) * DWIJ[0] +
mb * (s11a * rhoa21 + s11b * rhob21 + art_stress11) * DWIJ[1] +
mb * (s12a * rhoa21 + s12b * rhob21 + art_stress12) * DWIJ[2])
d_aw[d_idx] += (
mb * (s20a * rhoa21 + s20b * rhob21 + art_stress20) * DWIJ[0] +
mb * (s21a * rhoa21 + s21b * rhob21 + art_stress21) * DWIJ[1] +
mb * (s22a * rhoa21 + s22b * rhob21 + art_stress22) * DWIJ[2])
class HookesDeviatoricStressRate(Equation):
r""" Rate of change of stress
.. math::
\frac{dS^{ij}}{dt} = 2\mu\left(\epsilon^{ij} - \frac{1}{3}\delta^{ij}
\epsilon^{ij}\right) + S^{ik}\Omega^{jk} + \Omega^{ik}S^{kj}
where
.. math::
\epsilon^{ij} = \frac{1}{2}\left(\frac{\partial v^i}{\partial x^j} +
\frac{\partial v^j}{\partial x^i}\right)\\
\Omega^{ij} = \frac{1}{2}\left(\frac{\partial v^i}{\partial x^j} -
\frac{\partial v^j}{\partial x^i} \right)
"""
def initialize(self, d_idx, d_as00, d_as01, d_as02, d_as11, d_as12,
d_as22):
d_as00[d_idx] = 0.0
d_as01[d_idx] = 0.0
d_as02[d_idx] = 0.0
d_as11[d_idx] = 0.0
d_as12[d_idx] = 0.0
d_as22[d_idx] = 0.0
def loop(self, d_idx, d_s00, d_s01, d_s02, d_s11, d_s12, d_s22, d_v00,
d_v01, d_v02, d_v10, d_v11, d_v12, d_v20, d_v21, d_v22, d_as00,
d_as01, d_as02, d_as11, d_as12, d_as22, d_G):
v00 = d_v00[d_idx]
v01 = d_v01[d_idx]
v02 = d_v02[d_idx]
v10 = d_v10[d_idx]
v11 = d_v11[d_idx]
v12 = d_v12[d_idx]
v20 = d_v20[d_idx]
v21 = d_v21[d_idx]
v22 = d_v22[d_idx]
s00 = d_s00[d_idx]
s01 = d_s01[d_idx]
s02 = d_s02[d_idx]
s10 = d_s01[d_idx]
s11 = d_s11[d_idx]
s12 = d_s12[d_idx]
s20 = d_s02[d_idx]
s21 = d_s12[d_idx]
s22 = d_s22[d_idx]
# strain rate tensor is symmetric
eps00 = v00
eps01 = 0.5 * (v01 + v10)
eps02 = 0.5 * (v02 + v20)
eps10 = eps01
eps11 = v11
eps12 = 0.5 * (v12 + v21)
eps20 = eps02
eps21 = eps12
eps22 = v22
# rotation tensor is asymmetric
omega00 = 0.0
omega01 = 0.5 * (v01 - v10)
omega02 = 0.5 * (v02 - v20)
omega10 = -omega01
omega11 = 0.0
omega12 = 0.5 * (v12 - v21)
omega20 = -omega02
omega21 = -omega12
omega22 = 0.0
tmp = 2.0 * d_G[0]
trace = 1.0 / 3.0 * (eps00 + eps11 + eps22)
# S_00
d_as00[d_idx] = tmp*( eps00 - trace ) + \
( s00*omega00 + s01*omega01 + s02*omega02) + \
( s00*omega00 + s10*omega01 + s20*omega02)
# S_01
d_as01[d_idx] = tmp*(eps01) + \
( s00*omega10 + s01*omega11 + s02*omega12) + \
( s01*omega00 + s11*omega01 + s21*omega02)
# S_02
d_as02[d_idx] = tmp*eps02 + \
(s00*omega20 + s01*omega21 + s02*omega22) + \
(s02*omega00 + s12*omega01 + s22*omega02)
# S_11
d_as11[d_idx] = tmp*( eps11 - trace ) + \
(s10*omega10 + s11*omega11 + s12*omega12) + \
(s01*omega10 + s11*omega11 + s21*omega12)
# S_12
d_as12[d_idx] = tmp*eps12 + \
(s10*omega20 + s11*omega21 + s12*omega22) + \
(s02*omega10 + s12*omega11 + s22*omega12)
# S_22
d_as22[d_idx] = tmp*(eps22 - trace) + \
(s20*omega20 + s21*omega21 + s22*omega22) + \
(s02*omega20 + s12*omega21 + s22*omega22)
class EnergyEquationWithStress(Equation):
def __init__(self, dest, sources, alpha=1.0, beta=1.0, eta=0.01):
self.alpha = float(alpha)
self.beta = float(beta)
self.eta = float(eta)
super(EnergyEquationWithStress, self).__init__(dest, sources)
def initialize(self, d_idx, d_ae):
d_ae[d_idx] = 0.0
def loop(self, d_idx, s_idx, s_m, d_rho, s_rho, d_p, s_p, d_cs, s_cs, d_ae,
XIJ, VIJ, DWIJ, HIJ, R2IJ, RHOIJ1):
rhoa = d_rho[d_idx]
ca = d_cs[d_idx]
pa = d_p[d_idx]
rhob = s_rho[s_idx]
cb = s_cs[s_idx]
pb = s_p[s_idx]
mb = s_m[s_idx]
rhoa2 = 1. / (rhoa * rhoa)
rhob2 = 1. / (rhob * rhob)
# artificial viscosity
vijdotxij = VIJ[0] * XIJ[0] + VIJ[1] * XIJ[1] + VIJ[2] * XIJ[2]
piij = 0.0
if vijdotxij < 0:
cij = 0.5 * (d_cs[d_idx] + s_cs[s_idx])
muij = (HIJ * vijdotxij) / (R2IJ + self.eta * self.eta * HIJ * HIJ)
piij = -self.alpha * cij * muij + self.beta * muij * muij
piij = piij * RHOIJ1
vijdotdwij = VIJ[0] * DWIJ[0] + VIJ[1] * DWIJ[1] + VIJ[2] * DWIJ[2]
# thermal energy contribution
d_ae[d_idx] += 0.5 * mb * (pa * rhoa2 + pb * rhob2 + piij)
def post_loop(self, d_idx, d_rho, d_s00, d_s01, d_s02, d_s11, d_s12, d_s22,
d_v00, d_v01, d_v02, d_v10, d_v11, d_v12, d_v20, d_v21,
d_v22, d_ae):
# particle density
rhoa = d_rho[d_idx]
# deviatoric stress rate (symmetric)
s00a = d_s00[d_idx]
s01a = d_s01[d_idx]
s02a = d_s02[d_idx]
s10a = d_s01[d_idx]
s11a = d_s11[d_idx]
s12a = d_s12[d_idx]
s20a = d_s02[d_idx]
s21a = d_s12[d_idx]
s22a = d_s22[d_idx]
# strain rate tensor (symmetric)
eps00 = d_v00[d_idx]
eps01 = 0.5 * (d_v01[d_idx] + d_v10[d_idx])
eps02 = 0.5 * (d_v02[d_idx] + d_v20[d_idx])
eps10 = eps01
eps11 = d_v11[d_idx]
eps12 = 0.5 * (d_v12[d_idx] + d_v21[d_idx])
eps20 = eps02
eps21 = eps12
eps22 = d_v22[d_idx]
# energy accelerations
#sdoteij = s00a*eps00 + s01a*eps01 + s10a*eps10 + s11a*eps11
sdoteij = (s00a * eps00 + s01a * eps01 + s02a * eps02 + s10a * eps10 +
s11a * eps11 + s12a * eps12 + s20a * eps20 + s21a * eps21 +
s22a * eps22)
d_ae[d_idx] += 1. / rhoa * sdoteij
class ElasticSolidsScheme(Scheme):
def __init__(self, elastic_solids, solids, dim, artificial_stress_eps=0.3,
xsph_eps=0.5, alpha=1.0, beta=1.0):
self.elastic_solids = elastic_solids
self.solids = solids
self.dim = dim
self.solver = None
self.alpha = alpha
self.beta = beta
self.xsph_eps = xsph_eps
self.artificial_stress_eps = artificial_stress_eps
def get_equations(self):
from pysph.sph.equation import Group
from pysph.sph.basic_equations import (
ContinuityEquation, MonaghanArtificialViscosity, XSPHCorrection,
VelocityGradient2D)
from pysph.sph.solid_mech.basic import (
IsothermalEOS, MomentumEquationWithStress,
HookesDeviatoricStressRate, MonaghanArtificialStress)
equations = []
g1 = []
all = self.solids + self.elastic_solids
for elastic_solid in self.elastic_solids:
g1.append(
# p
IsothermalEOS(elastic_solid, sources=None))
g1.append(
# vi,j : requires properties v00, v01, v10, v11
VelocityGradient2D(dest=elastic_solid, sources=all))
g1.append(
# rij : requires properties r00, r01, r02, r11, r12, r22,
# s00, s01, s02, s11, s12, s22
MonaghanArtificialStress(dest=elastic_solid, sources=None,
eps=self.artificial_stress_eps))
equations.append(Group(equations=g1))
g2 = []
for elastic_solid in self.elastic_solids:
g2.append(ContinuityEquation(dest=elastic_solid, sources=all), )
g2.append(
# au, av
MomentumEquationWithStress(dest=elastic_solid, sources=all), )
g2.append(
# au, av
MonaghanArtificialViscosity(dest=elastic_solid, sources=all,
alpha=self.alpha,
beta=self.beta), )
g2.append(
# a_s00, a_s01, a_s11
HookesDeviatoricStressRate(dest=elastic_solid, sources=None), )
g2.append(
# ax, ay, az
XSPHCorrection(dest=elastic_solid, sources=[elastic_solid],
eps=self.xsph_eps), )
equations.append(Group(g2))
return equations
def configure_solver(self, kernel=None, integrator_cls=None,
extra_steppers=None, **kw):
from pysph.base.kernels import CubicSpline
if kernel is None:
kernel = CubicSpline(dim=self.dim)
steppers = {}
if extra_steppers is not None:
steppers.update(extra_steppers)
from pysph.sph.integrator import EPECIntegrator
from pysph.sph.integrator_step import SolidMechStep
cls = integrator_cls if integrator_cls is not None else EPECIntegrator
step_cls = SolidMechStep
for name in self.elastic_solids:
if name not in steppers:
steppers[name] = step_cls()
integrator = cls(**steppers)
from pysph.solver.solver import Solver
self.solver = Solver(dim=self.dim, integrator=integrator,
kernel=kernel, **kw)
|
{"hexsha": "8ac26c86dd2b41386e238cae048263bc0edca06d", "size": 20170, "ext": "py", "lang": "Python", "max_stars_repo_path": "pysph/sph/solid_mech/basic.py", "max_stars_repo_name": "nauaneed/pysph", "max_stars_repo_head_hexsha": "9cb9a859934939307c65a25cbf73e4ecc83fea4a", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 293, "max_stars_repo_stars_event_min_datetime": "2017-05-26T14:41:15.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T09:56:16.000Z", "max_issues_repo_path": "pysph/sph/solid_mech/basic.py", "max_issues_repo_name": "nauaneed/pysph", "max_issues_repo_head_hexsha": "9cb9a859934939307c65a25cbf73e4ecc83fea4a", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 217, "max_issues_repo_issues_event_min_datetime": "2017-05-29T15:48:14.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-24T16:16:55.000Z", "max_forks_repo_path": "pysph/sph/solid_mech/basic.py", "max_forks_repo_name": "nauaneed/pysph", "max_forks_repo_head_hexsha": "9cb9a859934939307c65a25cbf73e4ecc83fea4a", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 126, "max_forks_repo_forks_event_min_datetime": "2017-05-25T19:17:32.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-25T11:23:24.000Z", "avg_line_length": 29.7932053176, "max_line_length": 80, "alphanum_fraction": 0.5229053049, "include": true, "reason": "import numpy", "num_tokens": 6693}
|
#ifndef ASYNC_SOCKET_BASE_HXX
#define ASYNC_SOCKET_BASE_HXX
#include <deque>
#include <asio.hpp>
#include <boost/bind.hpp>
#include <boost/function.hpp>
#include <boost/enable_shared_from_this.hpp>
#include "DataBuffer.hxx"
#include "StunTuple.hxx"
#define RECEIVE_BUFFER_SIZE 4096 // ?slg? should we shrink this to something closer to MTU (1500 bytes)? !hbr! never actually increase it otherwise re-assembled UDP packets get lost. (was 2048)
namespace reTurn {
class AsyncSocketBaseHandler;
class AsyncSocketBaseDestroyedHandler;
class AsyncSocketBase :
public boost::enable_shared_from_this<AsyncSocketBase>
{
public:
AsyncSocketBase(asio::io_service& ioService);
virtual ~AsyncSocketBase();
virtual unsigned int getSocketDescriptor() = 0;
virtual void registerAsyncSocketBaseHandler(AsyncSocketBaseHandler* handler) { mAsyncSocketBaseHandler = handler; }
/// Note: The following API's are thread safe and queue the request to be handled by the ioService thread
virtual asio::error_code bind(const asio::ip::address& address, unsigned short port) = 0;
virtual void connect(const std::string& address, unsigned short port) = 0;
/// Note: destination is ignored for TCP and TLS connections
virtual void send(const StunTuple& destination, boost::shared_ptr<DataBuffer>& data); // Send unframed data
virtual void send(const StunTuple& destination, unsigned short channel, boost::shared_ptr<DataBuffer>& data); // send with turn framing
/// Overlapped calls to receive functions have no effect
virtual void receive();
virtual void framedReceive();
virtual void close();
bool isConnected() { return mConnected; }
asio::ip::address& getConnectedAddress() { return mConnectedAddress; }
unsigned short getConnectedPort() { return mConnectedPort; }
virtual void setOnBeforeSocketClosedFp(boost::function<void(unsigned int)> fp) { mOnBeforeSocketCloseFp = fp; }
/// Use these if you already operating within the ioService thread
virtual void doSend(const StunTuple& destination, unsigned short channel, boost::shared_ptr<DataBuffer>& data, unsigned int bufferStartPos=0);
virtual void doSend(const StunTuple& destination, boost::shared_ptr<DataBuffer>& data, unsigned int bufferStartPos=0);
virtual void doReceive();
virtual void doFramedReceive();
/// Class override callbacks
virtual void onConnectSuccess() { assert(false); }
virtual void onConnectFailure(const asio::error_code& e) { assert(false); }
virtual void onReceiveSuccess(const asio::ip::address& address, unsigned short port, boost::shared_ptr<DataBuffer>& data) = 0;
virtual void onReceiveFailure(const asio::error_code& e) = 0;
virtual void onSendSuccess() = 0;
virtual void onSendFailure(const asio::error_code& e) = 0;
/// Utility API
static boost::shared_ptr<DataBuffer> allocateBuffer(unsigned int size);
// Stubbed out async handlers needed by Protocol specific Subclasses of this - the requirement for these
// to be in the base class all revolves around the shared_from_this() use/requirement
virtual void start() { assert(false); }
virtual void stop() { assert(false); }
virtual void handleReadHeader(const asio::error_code& e) { assert(false); }
virtual void handleServerHandshake(const asio::error_code& e) { assert(false); }
virtual void handleTcpResolve(const asio::error_code& ec, asio::ip::tcp::resolver::iterator endpoint_iterator) { assert(false); }
virtual void handleUdpResolve(const asio::error_code& ec, asio::ip::udp::resolver::iterator endpoint_iterator) { assert(false); }
virtual void handleConnect(const asio::error_code& ec, asio::ip::tcp::resolver::iterator endpoint_iterator) { assert(false); }
virtual void handleClientHandshake(const asio::error_code& ec, asio::ip::tcp::resolver::iterator endpoint_iterator) { assert(false); }
protected:
/// Handle completion of a sendData operation.
virtual void handleSend(const asio::error_code& e);
virtual void handleReceive(const asio::error_code& e, std::size_t bytesTransferred);
/// The io_service used to perform asynchronous operations.
asio::io_service& mIOService;
/// Receive Buffer and state
boost::shared_ptr<DataBuffer> mReceiveBuffer;
bool mReceiving;
/// Connected Info and State
asio::ip::address mConnectedAddress;
unsigned short mConnectedPort;
bool mConnected;
/// Handlers
AsyncSocketBaseHandler* mAsyncSocketBaseHandler;
/// Provides an opportunity for the app to clean up, e.g., QoS-related data or resources
/// just before the socket is closed
boost::function<void(unsigned int)> mOnBeforeSocketCloseFp;
private:
virtual void transportSend(const StunTuple& destination, std::vector<asio::const_buffer>& buffers) = 0;
virtual void transportReceive() = 0;
virtual void transportFramedReceive() = 0;
virtual void transportClose() = 0;
virtual const asio::ip::address getSenderEndpointAddress() = 0;
virtual unsigned short getSenderEndpointPort() = 0;
virtual void sendFirstQueuedData();
class SendData
{
public:
SendData(const StunTuple& destination, boost::shared_ptr<DataBuffer>& frameData, boost::shared_ptr<DataBuffer>& data, unsigned int bufferStartPos = 0) :
mDestination(destination), mFrameData(frameData), mData(data), mBufferStartPos(bufferStartPos) {}
StunTuple mDestination;
boost::shared_ptr<DataBuffer> mFrameData;
boost::shared_ptr<DataBuffer> mData;
unsigned int mBufferStartPos;
};
/// Queue of data to send
typedef std::deque<SendData> SendDataQueue;
SendDataQueue mSendDataQueue;
};
typedef boost::shared_ptr<AsyncSocketBase> ConnectionPtr;
}
#endif
/* ====================================================================
Copyright (c) 2007-2008, Plantronics, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of Plantronics nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
==================================================================== */
|
{"hexsha": "fc4336b49a55f27e15070b7f1addf434e9920cc2", "size": 7403, "ext": "hxx", "lang": "C++", "max_stars_repo_path": "reTurn/AsyncSocketBase.hxx", "max_stars_repo_name": "dulton/reSipServer", "max_stars_repo_head_hexsha": "ac4241df81c1e3eef2e678271ffef4dda1fc6747", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2019-04-15T14:10:58.000Z", "max_stars_repo_stars_event_max_datetime": "2019-04-15T14:10:58.000Z", "max_issues_repo_path": "reTurn/AsyncSocketBase.hxx", "max_issues_repo_name": "dulton/reSipServer", "max_issues_repo_head_hexsha": "ac4241df81c1e3eef2e678271ffef4dda1fc6747", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "reTurn/AsyncSocketBase.hxx", "max_forks_repo_name": "dulton/reSipServer", "max_forks_repo_head_hexsha": "ac4241df81c1e3eef2e678271ffef4dda1fc6747", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2019-10-31T09:11:09.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-17T01:00:49.000Z", "avg_line_length": 44.8666666667, "max_line_length": 193, "alphanum_fraction": 0.7421315683, "num_tokens": 1678}
|
cdis
cdis Open Source License/Disclaimer, Forecast Systems Laboratory
cdis NOAA/OAR/FSL, 325 Broadway Boulder, CO 80305
cdis
cdis This software is distributed under the Open Source Definition,
cdis which may be found at http://www.opensource.org/osd.html.
cdis
cdis In particular, redistribution and use in source and binary forms,
cdis with or without modification, are permitted provided that the
cdis following conditions are met:
cdis
cdis - Redistributions of source code must retain this notice, this
cdis list of conditions and the following disclaimer.
cdis
cdis - Redistributions in binary form must provide access to this
cdis notice, this list of conditions and the following disclaimer, and
cdis the underlying source code.
cdis
cdis - All modifications to this software must be clearly documented,
cdis and are solely the responsibility of the agent making the
cdis modifications.
cdis
cdis - If significant modifications or enhancements are made to this
cdis software, the FSL Software Policy Manager
cdis (softwaremgr@fsl.noaa.gov) should be notified.
cdis
cdis THIS SOFTWARE AND ITS DOCUMENTATION ARE IN THE PUBLIC DOMAIN
cdis AND ARE FURNISHED "AS IS." THE AUTHORS, THE UNITED STATES
cdis GOVERNMENT, ITS INSTRUMENTALITIES, OFFICERS, EMPLOYEES, AND
cdis AGENTS MAKE NO WARRANTY, EXPRESS OR IMPLIED, AS TO THE USEFULNESS
cdis OF THE SOFTWARE AND DOCUMENTATION FOR ANY PURPOSE. THEY ASSUME
cdis NO RESPONSIBILITY (1) FOR THE USE OF THE SOFTWARE AND
cdis DOCUMENTATION; OR (2) TO PROVIDE TECHNICAL SUPPORT TO USERS.
cdis
cdis
C***********************************************************************
C** NOAA/NESDIS/SOCC/SOFTWARE BRANCH AND ISI
C***********************************************************************
C**
C** PROJECT OPERATIONS GROUND EQUIPMENT FOR GOES-NEXT
C** SYSTEM EARTH LOCATION USERS GUIDE
C** ROUTINE GIMLOC
C** SOURCE F.GIMLOC
C** LOAD NAME ANY
C** PROGRAMMER THOMAS I. BABICKI
C**
C** VER. DATA BY COMMENT
C** ---- -------- -- ---------------------------------------------
C** A 5/01/89 TB INITIAL CREATION(SOCC/ISI)
C**
C** B 2/19/93 JH ADD ACCESS TO BOTH IMAGER AND SOUNDER SETS
C**
C***********************************************************************
C
C AWS -- I eliminated the PROGRAM code here...
C
C***********************************************************************
C**
C** NOAA/NESDIS/SOCC/SOFTWARE BRANCH
C**
C***********************************************************************
C**
C** PROJECT : OPERATIONS GROUND EQUIPMENT FOR GOES-NEXT
C** SYSTEM : EARTH LOCATION USERS GUIDE
C** ROUTINE : TIME50
C** SOURCE : F.TIME50
C** LOAD NAME : ANY
C** PROGRAMMER: THOMAS I. BABICKI
C**
C** VER. DATA BY COMMENT
C** ---- -------- --- ---------------------------------------------
C** A 2/17/89 TB INITIAL CREATION
C**
C***********************************************************************
C**
C** TIME50 ACCEPTS TWO WORDS CONTAINING DATE AND TIME
C** AND RETURNS TIME EXPRESSED AS DOUBLE PRECISION MINUTES FROM
C** 1950 JAN. 1.0
C**
C***********************************************************************
C**
C** CALLED BY : ANY
C** COMMONS MODIFIED: NONE
C** INPUTS : NONE
C** OUTPUTS : NONE
C** ROUTINES CALLED : NONE
C**
C***********************************************************************
FUNCTION TIME50(I)
C
C CALLING PARAMETERS
C
INTEGER*4 I(2)
C
C LOCAL VARIABLES
C
INTEGER*4 NY
C YEAR
INTEGER*4 ND
C DAY OF YEAR
INTEGER*4 NH
C HOUR
INTEGER*4 NM
C MINUTE
REAL*8 S
C SECONDS
INTEGER J
C
C INCLUDE FILES - REC
C
C
C***********************************************************************
C
C CONVERT INPUT YEAR, DAY OF YEAR, HOUR AND MINUTE
C TO INTEGER VALUES.
C
NY=I(1)/10000
IAA=I(1)-(NY*10000)
ND=(I(1)-(NY*10000))*.1
IAB=(IAA-(ND*10))*10
NBC=I(2)/10000000.
IAC=I(2)-(NBC*10000000)
NH=IAB+NBC
DEF=I(2)-IAC
NM=IAC*.00001
S=(I(2)-(DEF+(NM*100000)))*.001D0
PRINT 1000,NY,ND,NH,NM,S
1000 FORMAT (1H1,'YEAR =',I4,/,1X,'JDAY =',I3,/,1X,
* 'HOUR =',I2,/,1X,'MIN =',I2,/,1X,
* 'SEC =',F6.3)
C
C***********************************************************************
C
C HERE WE CONVERT INTEGER YEAR AND DAY OF YEAR TO NUMBER OF
C DAYS FROM 0 HOUR UT, 1950 JAN. 1.0
C THIS CONVERTION IS BASED ON AN ALGORITHM BY FLIEGEL AND VAN
C FLANDERN, COMM. OF ACM, VOL.11, NO. 10, OCT. 1968 (P.657)
C
J=ND+1461*(NY+4799)/4-3*((NY+4899)/100)/4-2465022
C
C COMPUTE TIME IN MINUTES FROM JANUARY 1.0, 1950
C
TIME50=J*1440.D0+NH*60.D0+NM+S/60.D0
RETURN
END
C***********************************************************************
C** NOAA/NESDIS/SOCC/SOFTWARE BRANCH
C***********************************************************************
C**
C** PROJECT : OPERATIONS GROUND EQUIPMENT FOR GOES-NEXT
C** SYSTEM : EARTH LOCATION USERS GUIDE
C** ROUTINE : TIMEX
C** SOURCE : F.TIMEX
C** LOAD NAME : ANY
C** PROGRAMMER: THOMAS I. BABICKI
C**
C** VER. DATA BY COMMENT
C** ---- -------- --- ---------------------------------------------
C** A 4/25/89 TB INITIAL CREATION
C***********************************************************************
FUNCTION TIMEX(NY,ND,NH,NM,S)
C
REAL*8 TIMEX
C
C CALLING PARAMETERS
C
INTEGER*4 NY
C YEAR
INTEGER*4 ND
C DAY OF YEAR
INTEGER*4 NH
C HOUR
INTEGER*4 NM
C MINUTE
REAL*8 S
C SECONDS
INTEGER J
C
C***********************************************************************
C
PRINT 1000,NY,ND,NH,NM,S
1000 FORMAT (/,1X,'YEAR =',I4,/,1X,'JDAY =',I3,/,1X,
* 'HOUR =',I2,/,1X,'MIN =',I2,/,1X,
* 'SEC =',F6.3)
C
C***********************************************************************
C
J=ND+1461*(NY+4799)/4-3*((NY+4899)/100)/4-2465022
C
C COMPUTE ACTUAL TIME IN MINUTES FROM JANUARY 1.0, 1950
C
TIMEX=J*1440.D0+NH*60.D0+NM+S/60.D0
RETURN
END
C***********************************************************************
C**
C** INTEGRAL SYSTEMS, INC.
C**
C***********************************************************************
C**
C** PROJECT : OPERATIONS GROUND EQUIPMENT FOR GOES-NEXT
C** SYSTEM : EARTH LOCATION USERS GUIDE
C** ROUTINE : SETCONS
C** SOURCE : F.SETCONS
C** LOAD NAME : ANY
C** PROGRAMMER: IGOR LEVINE
C**
C** VER. DATA BY COMMENT
C** ---- -------- --- ---------------------------------------------
C** A 02/16/89 IL INITIAL CREATION
C**
C** B 05/19/94 NP ADDED CALCULATION OF INSTRUMENT ELEVATION AND
C** SCAN ANGLE BIASES BASED ON USER INPUT
C***********************************************************************
C**
C** THIS SUBROUTINE GENERATES CONSTANTS IN COMMON INSTCOMM
C**
C***********************************************************************
C**
C** CALLED BY : ANY
C** COMMONS MODIFIED: INSTCO
C** INPUTS : NONE
C** OUTPUTS : NONE
C** ROUTINES CALLED : NONE
C**
C***********************************************************************
C***********************************************************************
SUBROUTINE SETCON(INSTR,NS_NAD_CY,NS_NAD_INC,EW_NAD_CY,EW_NAD_INC)
C
C CALLING PARAMETERS
C
C
C LOCAL VARIABLES
INTEGER NS_NAD_CY,NS_NAD_INC,EW_NAD_CY,EW_NAD_INC
C
C
C INCLUDE FILES
C
INCLUDE 'elcons.inc'
INCLUDE 'instco.inc'
C***********************************************************************
INCMAX(1)=6136
INCMAX(2)=2805
ELVINC(1)=8.0D-6
ELVINC(2)=17.5D-6
SCNINC(1)=16.D-6
SCNINC(2)=35.D-6
ELVLN(1)=28.D-6
ELVLN(2)=280.D-6
SCNPX(1)=16.D-6
SCNPX(2)=280.D-6
C ************************************************************
C COMMENTED OUT ELEVATION AND SCAN BIAS CONSTANTS SINCE INSTRUMENT
C EARTH NADIR POSITION IS AVAILABLE IN GVAR DATA AND PERIODICALLY
C UPDATED
C
C
C ELVMAX(1)=0.220896D0
C ELVMAX(2)=0.22089375D0
C SCNMAX(1)=0.24544D0
C SCNMAX(2)=0.2454375D0
C RECOMPUTE ELEVATION AND SCAN BIASES BASED ON USER INPUTS OF
C CYCLES & INCREMENTS OBTAINED FROM GVAR
c ELVMAX(INSTR) = (NS_NAD_CY*INCMAX(INSTR)+NS_NAD_INC)*ELVINC(INSTR)
IF(INSTR.EQ.1)THEN
ELVMAX(INSTR)=(NS_NAD_CY*INCMAX(INSTR)+NS_NAD_INC)*ELVINC(INSTR)
ELSE
ELVMAX(INSTR)=((9-NS_NAD_CY)*INCMAX(INSTR)-NS_NAD_INC)
+ *ELVINC(INSTR)
ENDIF
SCNMAX(INSTR) = (EW_NAD_CY*INCMAX(INSTR)+EW_NAD_INC)*SCNINC(INSTR)
C ************************************************************
RETURN
END
C***********************************************************************
C**
C** INTEGRAL SYSTEMS, INC.
C**
C***********************************************************************
C**
C** PROJECT : OPERATIONS GROUND EQUIPMENT FOR GOES-NEXT
C** SYSTEM : EARTH LOCATION USERS GUIDE
C** ROUTINE : LMODEL
C** SOURCE : F.LMODEL
C** LOAD NAME : ANY
C** PROGRAMMER: IGOR LEVINE
C**
C** VER. DATA BY COMMENT
C** ---- -------- --- ---------------------------------------------
C** 1 01/09/89 IL INITIAL CREATION
C** 2 06/02/89 IL COORDINATE AXES CHANGED ACCORDING TO
C** FORD'S DEFINITION IN SDAIP, DRL 504-01
C** 3 08/21/89 IL CORRECTED ORBIT ANGLE COMPUTATIONS
C** 4 03/08/94 SC S/C COMPENSATION APPLIED UNCONDITIONALLY;
C** REFERENCE RADIAL DISTANCE, LATITUDE AND
C** ORBIT YAW SET TO ZERO IF IMC DISABLED.
C** 5 03/08/94 SC ADDED TRAP FOR SLAT=SYAW=0; CORRECTED
C** EXPRESSION FOR LAM.
C***********************************************************************
C**
C** THIS SUBROUTINE COMPUTES THE POSITION OF THE SATELLITE AND THE
C** ATTITUDE OF THE IMAGER OR SOUNDER. THE CALCULATIONS ARE BASED
C** ON THE OATS ORBIT AND ATTITUDE MODEL REPRESENTED BY THE O&A
C** PARAMETER SET IN GVAR BLOCK 0.
C** INPUTS:
C** TIME, EPOCH TIME, O&A PARAMETER SET, IMC STATUS.
C**
C** OUTPUTS:
C** THE SPACECRAFT POSITION VECTOR IN EARTH FIXED COORDINATES;
C** THE GEOMETRIC ROLL, PITCH, YAW ANGLES AND THE ROLL,
C** PITCH MISALIGNMENTS FOR EITHER THE IMAGER OR THE SOUNDER;
C** THE EARTH FIXED TO INSTRUMENT FRAME TRANSFORMATION MATRIX;
C** GEOGRAPHIC LATITUDE AND LONGITUDE AT SUBSATELLITE POINT.
C**
C** DESCRIPTION
C** LMODEL ACCEPTS AN INPUT DOUBLE PRECISION TIME IN MINUTES FROM
C** 1950, JAN.1.0 AND AN INPUT SET OF O&A PARAMETERS AND COMPUTES
C** POSITION OF THE SATELLITE, THE ATTITUDE ANGLES AND ATTITUDE
C** MISALIGNMENTS AND THE INSTRUMENT TO EARTH FIXED COORDINATES
C** TRANSFORMATION MATRIX.
C**
C***********************************************************************
C**
C** CALLED BY : ANY
C** COMMONS MODIFIED: /ELCOMM/ XS,Q3,PITCH,ROLL,YAW,PMA,RMA,BT
C** INPUTS : NONE
C** OUTPUTS : NONE
C** ROUTINES CALLED : INST2ER,GATT
C**
C***********************************************************************
C***********************************************************************
SUBROUTINE LMODEL(T,TU,REC,IMC,RLAT,RLON)
C
C CALLING ARGUMENTS
C
REAL*8 T
C INPUT TIME FROM 1950, JAN 1.0 (MIN)
REAL*8 TU
C EPOCH TIME FROM 1950, JAN 1.0 (MIN)
REAL*8 REC(336)
C INPUT O&A PARAMETER SET
INTEGER IMC
C INPUT IMC STATUS: 0 - ON, 1 - OFF
REAL*8 RLAT
C SUBSATELLITE GEODETIC LATITUDE (RAD)
REAL*8 RLON
C SUBSATELLITE LONGITUDE IN RADIANS
C
C LOCAL VARIABLES
C
REAL*8 R
C NORMALIZED SATELLITE DISTANCE (IN UNITS OF KMER9)
REAL*8 TS
C TIME FROM EPOCH IN MINUTES
REAL*8 B(3,3)
C SPACCRAFT TO EARTH FIXED COORDINATES TRANSFORMATION
C MATRIX
REAL*8 TE
C EXPONENENTIAL TIME DELAY FROM EPOCH (IN MINUTES)
REAL*8 PHI
C SUBSATELLITE GEOCENTRIC LATITUDE IN RADIANS
REAL*8 DR
C RADIAL DISTANCE FROM THE NOMINAL (KM)
REAL*8 PSI
C ORBITAL YAW (IN RADIANS)
REAL*8 LAM
C IMC LONGITUDE (IN RADIANS)
REAL*8 U
C ARGUMENT OF LATITUDE (IN RADIANS)
REAL*8 SU,CU
C SIN(U), COS(U)
REAL*8 SI,CI
C SINE AND COSINE OF THE ORBIT INCLINATION
REAL*8 SLAT
C SINE OF GEOCENTRIC LATITUDE
REAL*8 ASC
C LONGITUDE OF THE ASCENDING NODE IN RADIANS
REAL*8 SA,CA
C SINE AND COSINE OF ASC
REAL*8 SYAW
C SINE OF THE ORBIT YAW
REAL*8 WA
C SOLAR ORBIT ANGLE IN RADIANS
REAL*8 W
C ORBIT ANGLE IN RADIANS
REAL*8 SW,CW
C SIN(W), COS(W)
REAL*8 S2W,C2W
C SIN(2*W), COS(2*W)
REAL*8 SW1,CW1
C SIN(0.927*W), COS(0.927*W)
REAL*8 SW3,CW3
C SINE AND COSINE OF 1.9268*W
REAL*8 DLAT
C CHANGE IN SINE OF GEOCENTRIC LATITUDE
REAL*8 DYAW
C CHANGE IN SINE OF ORBIT YAW
REAL*8 GATT
C SUBROUTINE FUNCTION
c REAL*8 A1,A2
C WORK AREAS
C
C INCLUDE FILES
C
INCLUDE 'elcons.inc'
INCLUDE 'elcomm.inc'
C
C***********************************************************************
C
C ASSIGN REFERENCE VALUES TO THE SUBSATELLITE LONGITUDE AND
C LATITUDE, THE RADIAL DISTANCE AND THE ORBIT YAW.
C
LAM=REC(5)
DR=REC(6)
PHI=REC(7)
PSI=REC(8)
C
C ASSIGN REFERENCE VALUES TO THE ATTITUDES AND MISALIGNMENTS
C
ROLL=REC(9)
PITCH=REC(10)
YAW=REC(11)
RMA=0.0D0
PMA=0.0D0
C
C IF IMC IS OFF, COMPUTE CHANGES IN THE SATELLITE ORBIT
C
IF (IMC.NE.0) THEN
C
C SET REFERENCE RADIAL DISTANCE, LATITUDE AND ORBIT YAW TO ZERO
C
DR=0.0D0
PHI=0.0D0
PSI=0.0D0
C
C COMPUTE TIME SINCE EPOCH (IN MINUTES)
C
TS=T-TU
C
C COMPUTES ORBIT ANGLE AND THE RELATED TRIGONOMETRIC FUNCTIONS.
C EARTH ROTATIONAL RATE=.729115E-4 (RAD/S)
C
W=0.729115D-4*60.0D0*TS
SW=DSIN(W)
CW=DCOS(W)
SW1=DSIN(0.927D0*W)
CW1=DCOS(0.927D0*W)
S2W=DSIN(2.0D0*W)
C2W=DCOS(2.0D0*W)
SW3=DSIN(1.9268D0*W)
CW3=DCOS(1.9268D0*W)
C
C COMPUTES CHANGE IN THE IMC LONGITUDE FROM THE REFERENCE
C
LAM=LAM+REC(18)+(REC(19)+REC(20)*W)*W
1 +(REC(27)*SW1+REC(28)*CW1+REC(21)*SW+REC(22)*CW
2 +REC(23)*S2W+REC(24)*C2W + REC(25)*SW3+REC(26)*CW3
3 +W*(REC(29)*SW+REC(30)*CW))*2.0D0
C
C COMPUTES CHANGE IN RADIAL DISTANCE FROM THE REFERENCE (KM)
C
DR=DR + REC(31) + REC(32)*CW+REC(33)*SW
1 +REC(34)*C2W+REC(35)*S2W + REC(36)*CW3+REC(37)*SW3
2 +REC(38)*CW1+REC(39)*SW1 + W*(REC(40)*CW+REC(41)*SW)
C
C COMPUTES THE SINE OF THE CHANGE IN THE GEOCENTRIC LATITUDE
C
DLAT=REC(42) + REC(43)*CW+REC(44)*SW
1 +REC(45)*C2W+REC(46)*S2W
2 +W*(REC(47)*CW+REC(48)*SW)
3 +REC(49)*CW1+REC(50)*SW1
C
C COMPUTES GEOCENTRIC LATITUDE BY USING AN EXPANSION FOR ARCSINE
C
PHI=PHI+DLAT*(1.0D0+DLAT*DLAT/6.0D0)
C
C COMPUTES SINE OF THE CHANGE IN THE ORBIT YAW
C
DYAW=REC(51) + REC(52)*SW+REC(53)*CW
1 +REC(54)*S2W+REC(55)*C2W
2 +W*(REC(56)*SW+REC(57)*CW)
3 +REC(58)*SW1+REC(59)*CW1
C
C COMPUTES THE ORBIT YAW BY USING AN EXPANSION FOR ARCSINE.
C
PSI=PSI+DYAW*(1.0D0+DYAW*DYAW/6.0D0)
C
C CALCULATION OF CHANGES IN THE SATELLITE ORBIT ENDS HERE
C
END IF
C
C CONVERSION OF THE IMC LONGITUDE AND ORBIT YAW TO THE SUBSATELLITE
C LONGITUDE AND THE ORBIT INCLINATION (REF: GOES-PCC-TM-2473, INPUTS
C REQUIRED FOR EARTH LOCATION AND GRIDDING BY SPS, JUNE 6, 1988)
C
SLAT=DSIN(PHI)
SYAW=DSIN(PSI)
SI=SLAT**2+SYAW**2
CI=DSQRT(1.0D0-SI)
SI=DSQRT(SI)
IF (SLAT.EQ.0.0D0.AND.SYAW .EQ. 0.0D0) THEN
U=0.0D0
ELSE
U=DATAN2(SLAT,SYAW)
ENDIF
SU=DSIN(U)
CU=DCOS(U)
C
C COMPUTES LONGITUDE OF THE ASCENDING NODE
C
ASC=LAM - U
SA=DSIN(ASC)
CA=DCOS(ASC)
C
C COMPUTES THE SUBSATELLITE GEOGRAPHIC LATITUDE
C
RLAT=DATAN(AEBE2*DTAN(PHI))
C
C COMPUTES THE SUBSATELLITE LONGITUDE
C
RLON=ASC+DATAN2(CI*SU,CU)
C
C COMPUTES THE SPACECRAFT TO EARTH FIXED COORDINATES TRANSFORMATION
C MATRIX:
C (VECTOR IN ECEF COORDINATES) = B * (VECTOR IN S/C COORDINATES)
C
B(1,2)=-SA*SI
B(2,2)= CA*SI
B(3,2)=-CI
B(1,3)=-CA*CU+SA*SU*CI
B(2,3)=-SA*CU-CA*SU*CI
B(3,3)=-SLAT
B(1,1)=-CA*SU-SA*CU*CI
B(2,1)=-SA*SU+CA*CU*CI
B(3,1)= CU*SI
C
C COMPUTES THE NORMALIZED SPACECRAFT POSITION VECTOR IN EARTH FIXED
C COORDINATES - XS.
C
R=(NOMORB+DR)/AE
XS(1)=-B(1,3)*R
XS(2)=-B(2,3)*R
XS(3)=-B(3,3)*R
C
C PRECOMPUTES Q3 (USED IN LPOINT)
C
Q3=XS(1)**2+XS(2)**2+AEBE2*XS(3)**2-1.0D0
C
C COMPUTES THE ATTITUDES AND MISALIGNMENTS IF IMC IS OFF
C
IF (IMC.NE.0) THEN
C
C COMPUTES THE SOLAR ORBIT ANGLE
C
WA=REC(60)*TS
C
C COMPUTES THE DIFFERENCE BETWEEN CURRENT TIME, TS, AND THE
C EXPONENTIAL TIME, REC(61). NOTE THAT BOTH TIMES ARE SINCE EPOCH.
C
TE=TS-REC(61)
C
C COMPUTES ROLL + ROLL MISALIGNMENT
C
ROLL=ROLL+GATT(62,REC,WA,TE)
C
C COMPUTES PITCH + PITCH MISALIGNMENT
C
PITCH=PITCH+GATT(117,REC,WA,TE)
C
C COMPUTES YAW
C
YAW=YAW+GATT(172,REC,WA,TE)
C
C COMPUTES ROLL MISALIGNMENT
C
RMA=GATT(227,REC,WA,TE)
C
C COMPUTES PITCH MISALIGNMENT
C
PMA=GATT(282,REC,WA,TE)
C
C APPLY THE SPCECRAFT COMPENSATION
C
ROLL=ROLL+REC(15)
PITCH=PITCH+REC(16)
YAW=YAW+REC(17)
END IF
C
C COMPUTES THE INSTRUMENT TO EARTH FIXED COORDINATES TRANSFORMATION
C MATRIX - BT
C
CALL INST2ER(ROLL,PITCH,YAW,B,BT)
RETURN
END
C***********************************************************************
C***********************************************************************
C**
C** INTEGRAL SYSTEMS, INC.
C**
C***********************************************************************
C**
C** PROJECT : OPERATIONS GROUND EQUIPMENT FOR GOES-NEXT
C** SYSTEM : EARTH LOCATION USERS GUIDE
C** ROUTINE : GPOINT
C** SOURCE : F.GPOINT
C** LOAD NAME : ANY
C** PROGRAMMER: IGOR LEVINE
C**
C** VER. DATA BY COMMENT
C** ---- -------- --- ---------------------------------------------
C** A 12/10/87 IL INITIAL CREATION
C** A 06/10/88 IL REPLACED ASIN WITH ATAN TO SAVE TIME
C** A 06/02/89 IL COORDINATE AXES CHANGED ACCORDING TO
C** FORD'S DEFINITION IN SDAIP, DRL 504-01
C** 4 03/08/94 SC IMPLEMENTED NEW FORMULAE FOR SCAN ANGLE
C** CORRECTION DUE TO MISALIGNMENTS
C***********************************************************************
C**
C** THIS SUBROUTINE CONVERTS GEOGRAPHIC LATITUDE AND LONGITUDE
C** TO THE RELATED ELEVATION AND SCAN ANGLES.
C**
C***********************************************************************
C**
C** CALLED BY : ANY
C** COMMONS MODIFIED: NONE
C** INPUTS : NONE
C** OUTPUTS : NONE
C** ROUTINES CALLED : NONE
C**
C***********************************************************************
C***********************************************************************
SUBROUTINE GPOINT(RLAT,RLON,ALF,GAM,IERR)
C
C CALLING PARAMETERS
C
REAL*8 RLAT
C GEOGRAPHIC LATITUDE IN RADIANS (INPUT)
REAL*8 RLON
C GEOGRAPHIC LONGITUDE IN RADIANS (INPUT)
REAL*8 ALF
C ELEVATION ANGLE IN RADIANS (OUTPUT)
REAL*8 GAM
C SCAN ANGLE IN RADIANS (OUTPUT)
INTEGER IERR
C OUTPUT STATUS; 0 - SUCCESSFUL COMPLETION,
C 1 - POINT WITH GIVEN LAT/LON IS INVISIBLE
C
C LOCAL VARIABLES
C
REAL*8 F(3)
C POINTING VECTOR IN EARTH CENTERED COORDINATES
REAL*8 FT(3)
C POINTING VECTOR IN INSTRUMENT COORDINATES
REAL*8 U(3)
C COORDINATES OF THE EARTH POINT (KM)
REAL*8 SING,SLAT,W1,W2
C WORK SPACE
C
C INCLUDE FILES
C
INCLUDE 'elcons.inc'
INCLUDE 'elcomm.inc'
C***********************************************************************
C
C COMPUTES SINUS OF GEOGRAPHIC (GEODETIC) LATITUDE
C
SING=DSIN(RLAT)
W1=AEBE4*SING*SING
C
C SINUS OF THE GEOCENTRIC LATITUDE
C
SLAT=((0.375D0*W1-0.5D0)*W1+1.0D0)*SING/AEBE2
C
C COMPUTES LOCAL EARTH RADIUS AT SPECIFIED POINT
C
W2=SLAT*SLAT
W1=AEBE3*W2
W1=(0.375D0*W1-0.5D0)*W1+1.D0
C
C COMPUTES CARTESIAN COORDINATES OF THE POINT
C
U(3)=SLAT*W1
W2=W1*DSQRT(1.0D0-W2)
U(1)=W2*DCOS(RLON)
U(2)=W2*DSIN(RLON)
C
C POINTING VECTOR FROM SATELLITE TO THE EARTH POINT
C
F(1)=U(1)-XS(1)
F(2)=U(2)-XS(2)
F(3)=U(3)-XS(3)
W2=U(1)*SNGL(F(1))+U(2)*SNGL(F(2))+
1 U(3)*SNGL(F(3))*AEBE2
C
C VERIFIES VISIBILITY OF THE POINT
C
IF (W2.GT.0.0D0) THEN
C INVISIBLE POINT ON THE EARTH
IERR=1
ALF=99999.0D0
GAM=99999.0D0
RETURN
END IF
C
C CONVERTS POINTING VECTOR TO INSTRUMENT COORDINATES
C
FT(1)=BT(1,1)*F(1)+BT(2,1)*F(2)+BT(3,1)*F(3)
FT(2)=BT(1,2)*F(1)+BT(2,2)*F(2)+BT(3,2)*F(3)
FT(3)=BT(1,3)*F(1)+BT(2,3)*F(2)+BT(3,3)*F(3)
C
C CONVERTS POINTING VECTOR TO SCAN AND ELEVATION ANGLES AND
C CORRECTS FOR THE ROLL AND PITCH MISALIGNMENTS
C
GAM=DATAN(FT(1)/SQRT(FT(2)**2+FT(3)**2))
ALF=-DATAN(FT(2)/FT(3))
W1=DSIN(ALF)
W2=DCOS(GAM)
ALF=ALF+RMA*(1.0D0-DCOS(ALF)/W2)+PMA*W1*(1.0D0/W2+DTAN(GAM))
GAM=GAM-RMA*W1
IERR=0
RETURN
END
C***********************************************************************
C***********************************************************************
C**
C** INTEGRAL SYSTEMS, INC.
C**
C***********************************************************************
C**
C** PROJECT : OPERATIONS GROUND EQUIPMENT FOR GOES-NEXT
C** SYSTEM : EARTH LOCATION USERS GUIDE
C** ROUTINE : INST2ER
C** SOURCE : F.INST2ER
C** LOAD NAME : ANY
C** PROGRAMMER: IGOR LEVINE
C**
C** VER. DATA BY COMMENT
C** ---- -------- --- ---------------------------------------------
C** 1 08/16/88 IL INITIAL CREATION
C** 2 11/11/88 IL TRIGONOMETRIC FUNCTIONS REPLACED WITH
C** SMALL ANGLE APPROXIMATIONS
C** 3 06/02/89 IL COORDINATE AXES CHANGED ACCORDING TO
C** FORD'S DEFINITION IN SDAIP, DRL 504-01
C**
C***********************************************************************
C**
C** INST2ER ACCEPTS THE SINGLE PRECISION ROLL, PITCH AND YAW ANGLES
C** OF AN INSTRUMENT AND RETURNS THE DOUBLE PRECISION INSTRUMENT TO
C** EARTH COORDINATES TRANSFORMATION MATRIX.
C**
C***********************************************************************
C**
C** CALLED BY : ANY
C** COMMONS MODIFIED: NONE
C** INPUTS : NONE
C** OUTPUTS : NONE
C** ROUTINES CALLED : NONE
C**
C***********************************************************************
C***********************************************************************
SUBROUTINE INST2ER(R,P,Y,A,AT)
C
C CALLING PARAMETERS
C
REAL*8 R
C ROLL ANGLE IN RADIANS
REAL*8 P
C PITCH ANGLE IN RADIANS
REAL*8 Y
C YAW ANGLE IN RADIANS
REAL*8 A(3,3)
C SPACECRAFT TO ECEF COORDINATES
C TRANSFORMATION MATRIX
REAL*8 AT(3,3)
C INSTRUMENT TO ECEF COORDINATES
C TRANSFORMATION MATRIX
C
C LOCAL VARIABLES
C
REAL*8 RPY(3,3)
C INSTRUMENT TO BODY COORDINATES
C TRANSFORMATION MATRIX
INTEGER*4 I,J
C INDICES
C
C INCLUDE FILES
C
C***********************************************************************
C
C WE COMPUTE INSTRUMENT TO BODY COORDINATES TRANSFORMATION
C MATRIX BY USING A SMALL ANGLE APPROXIMATION OF TRIGONOMETRIC
C FUNCTIONS OF THE ROLL, PITCH AND YAW.
C
RPY(1,1)=1.0D0-0.50D0*(P*P+Y*Y)
RPY(1,2)=-Y
RPY(1,3)=P
RPY(2,1)=Y+P*R
RPY(2,2)=1.0D0-0.50D0*(Y*Y+R*R)
RPY(2,3)=-R
RPY(3,1)=-P+R*Y
RPY(3,2)=R+P*Y
RPY(3,3)=1.0D0-0.50D0*(P*P+R*R)
C
C MULTIPLICATION OF MATRICES A AND RPY
C
DO 20 I=1,3
DO 10 J=1,3
AT(I,J)=A(I,1)*RPY(1,J)+A(I,2)*RPY(2,J)+A(I,3)*RPY(3,J)
10 CONTINUE
20 CONTINUE
RETURN
END
C***********************************************************************
C***********************************************************************
C**
C** INTEGRAL SYSTEMS, INC.
C**
C***********************************************************************
C**
C** PROJECT : OPERATIONS GROUND EQUIPMENT FOR GOES-NEXT
C** SYSTEM : EARTH LOCATION USERS GUIDE
C** ROUTINE : LPOINT
C** SOURCE : F.LPOINT
C** LOAD NAME : ANY
C** PROGRAMMER: IGOR LEVINE
C**
C** VER. DATA BY COMMENT
C** ---- -------- --- ---------------------------------------------
C** A 01/09/89 IL INITIAL CREATION
C** A 06/02/89 IL COORDINATE AXES CHANGED ACCORDING TO
C** FORD'S DEFINITION IN SDAIP, DRL504-01
C** 3 03/08/94 SC IMPLEMENTED NEW FORMULAE FOR SCAN ANGLE
C** CORRECTIONS DUE TO MISALIGNMENTS
C***********************************************************************
C**
C** THIS SUBROUTINE CONVERTS THE INSTRUMENT ELEVATION AND SCAN
C** ANGLES TO THE RELATED GEOGRAPHIC LATITUDE AND LONGITUDE.
C**
C***********************************************************************
C**
C** CALLED BY : ANY
C** COMMONS MODIFIED: NONE
C** INPUTS : NONE
C** OUTPUTS : NONE
C** ROUTINES CALLED : NONE
C**
C***********************************************************************
C***********************************************************************
SUBROUTINE LPOINT(ALPHA,ZETA,RLAT,RLON,IERR)
C
C CALLING PARAMETERS
C
REAL*8 ALPHA
C ELEVATION ANGLE (RAD)
REAL*8 ZETA
C SCAN ANGLE (RAD)
REAL*8 RLAT
C LATITUDE IN RADIANS (OUTPUT)
REAL*8 RLON
C LONGITUDE IN RADIANS (OUTPUT)
INTEGER IERR
C OUTPUT STATUS; 0 - POINT ON THE EARTH
C FOUND, 1 - INSTRUMENT POINTS OFF EARTH
C
C LOCAL VARIABLES
C
REAL*8 G1(3)
C POINTING VECTOR IN EARTH CENTERED COORDINATES
REAL*8 H
C SLANT DISTANCE TO THE EARTH POINT (KM)
REAL*8 Q1,Q2,D
C WORK SPACE
REAL*8 G(3)
C POINTING VECTOR IN INSTRUMENT COORDINATES
REAL*8 U(3)
C COORDINATES OF THE EARTH POINT (KM)
REAL*8 SA,CA,DA,DZ,D1,CZ
C WORK SPACE
C
C INCLUDE FILES
C
INCLUDE 'elcons.inc'
INCLUDE 'elcomm.inc'
C***********************************************************************
IERR=1
C
C COMPUTES TRIGONOMETRIC FUNCTIONS OF THE SCAN AND ELEVATION
C ANGLES CORRECTED FOR THE ROLL AND PITCH MISALIGNMENTS
C
CA=DCOS(ALPHA)
SA=DSIN(ALPHA)
CZ=DCOS(ZETA)
DA=ALPHA-PMA*SA*(1.0D0/CZ+DTAN(ZETA))-RMA*(1.0D0-CA/CZ)
DZ=ZETA+RMA*SA
C CORRECTED SCAN ANGLE
CZ=DCOS(DZ)
C
C COMPUTES POINTING VECTOR IN INSTRUMENT COORDINATES
C
G(1)=DSIN(DZ)
G(2)=-CZ*DSIN(DA)
G(3)=CZ*DCOS(DA)
C
C TRANSFORMS THE POINTING VECTOR TO EARTH FIXED COORDINATES
C
G1(1)=BT(1,1)*G(1)+BT(1,2)*G(2)+BT(1,3)*G(3)
G1(2)=BT(2,1)*G(1)+BT(2,2)*G(2)+BT(2,3)*G(3)
G1(3)=BT(3,1)*G(1)+BT(3,2)*G(2)+BT(3,3)*G(3)
C
C COMPUTES COEFFICIENTS AND SOLVES A QUADRATIC EQUATION TO
C FIND THE INTERSECT OF THE POINTING VECTOR WITH THE EARTH
C SURFACE
C
Q1=G1(1)**2+G1(2)**2+AEBE2*G1(3)**2
Q2=XS(1)*G1(1)+XS(2)*G1(2)+AEBE2*XS(3)*G1(3)
D=Q2*Q2-Q1*Q3
IF (DABS(D).LT.1.D-9) D=0.0D0
C
C IF THE DISCIMINANTE OF THE EQUATION, D, IS NEGATIVE, THE
C INSTRUMENT POINTS OFF THE EARTH
C
IF (D.LT.0.0D0) THEN
RLAT=999999.0D0
RLON=999999.0D0
RETURN
END IF
D=DSQRT(D)
C
C SLANT DISTANCE FROM THE SATELLITE TO THE EARTH POINT
C
H=-(Q2+D)/Q1
C
C CARTESIAN COORDINATES OF THE EARTH POINT
C
U(1)=XS(1)+H*G1(1)
U(2)=XS(2)+H*G1(2)
U(3)=XS(3)+H*G1(3)
C
C SINUS OF GEOCENTRIC LATITUDE
C
D1=U(3)/DSQRT(U(1)**2+U(2)**2+U(3)**2)
C
C GEOGRAPHIC (GEODETIC) COORDINATES OF THE POINT
C
RLAT=DATAN(AEBE2*D1/DSQRT(1.0D0-D1*D1))
RLON=DATAN2(U(2),U(1))
IERR=0
RETURN
END
C***********************************************************************
C***********************************************************************
C**
C** INTEGRAL SYSTEMS, INC.
C**
C***********************************************************************
C**
C** PROJECT : OPERATIONS GROUND EQUIPMENT FOR GOES-NEXT
C** SYSTEM : EARTH LOCATION USERS GUIDE
C** ROUTINE : SNDELOC
C** SOURCE : F.SNDELOC
C** LOAD NAME : ANY
C** PROGRAMMER: IGOR LEVINE
C**
C** VER. DATA BY COMMENT
C** ---- -------- --- ---------------------------------------------
C** A 02/16/89 IL INITIAL CREATION
C***********************************************************************
C**
C** SNDELOC ACCEPTS THE MIRROR POSITION IN CYCLES AND INCREMENTS,
C** SERVO ERROR VALUES, AND THE POSITIONAL OFFSETS FOR FOUR DETECTORS
C** OF A SELECTED SOUNDER CHANNEL AND COMPUTES THE DETECTOR EARTH
C** LOCATIONS IN LATITUDE/LONGITUDE COORDINATES.
C**
C***********************************************************************
C**
C** CALLED BY : ANY
C** COMMONS MODIFIED: NONE
C** INPUTS : NONE
C** OUTPUTS : NONE
C** ROUTINES CALLED : LPOINT
C**
C***********************************************************************
C***********************************************************************
SUBROUTINE SNDELO(CYEW,INCEW,CYNS,INCNS,SVEW,SVNS,DOFF,GEO)
C
C CALLING PARAMETERS
C
INTEGER CYEW
C E-W CYCLES
INTEGER INCEW
C E-W INCREMENTS
INTEGER CYNS
C N-S CYCLES
INTEGER INCNS
C N-S INCREMENTS
REAL*8 SVEW
C E-W SERVO ERROR IN RADIANS
REAL*8 SVNS
C N-S SERVO ERROR IN RADIANS
REAL*8 DOFF(4,2)
C OFFSETS FOR 4 DETECTORS (RADIANS)
C DOFF(*,1) = E-W OFFSET
C DOFF(*,2) = N-S OFFSET
REAL*8 GEO(4,2)
C GEOGRAPHIC COORDINATES RELATED TO 4 DETECTORS
C GEO(*,1) = LATITUDE IN RADIANS
C GEO(*,2) = LONGITUDE IN RADIANS
C
C LOCAL VARIABLES
C
c REAL*8 E,S,H,EV,SC,ALPHA,BETA,SINE,COSE,DE,DS
REAL*8 E,S,H,EV,SC,SINE,COSE,DE,DS
INTEGER I,IER
C
C INCLUDE FILES
C
INCLUDE 'instco.inc'
C***********************************************************************
C
C CONVERT THE MIRROR POSITION, GIVEN IN CYCLES AND INCREMENTS, TO
C ELEVATION AND SCAN ANGLES
C
C E=(CYNS*INCMAX(2)+INCNS)*ELVINC(2)-ELVMAX(2)
E=((CYNS-9)*INCMAX(2)+INCNS)*ELVINC(2)-ELVMAX(2)
S=(CYEW*INCMAX(2)+INCEW)*SCNINC(2)-SCNMAX(2)
C
C CORRECT ELEVATION AND SCAN ANGLES FOR SERVO ERRORS OBTAINING THE
C TRUE MIRROR POINTING
C
E=E+SVNS
S=S+SVEW
SINE=DSIN(E)
COSE=DCOS(E)
H=-2.0D0*SCNPX(2)
C
C COMPUTE DETECTOR ROTATION OFFSETS FOR EACH DETECTOR
C
C ALPHA = 0.643501D0 + E
C BETA = 0.244979D0 - E
C
C DOFF(1,1) = -0.064976D0
C DOFF(1,2) = 0.00042D0
C DOFF(2,1) = 0.00056D0
C DOFF(2,2) = 0.00014D0
C DOFF(3,1) = -0.064976D0
C DOFF(3,2) = -0.065396D0
C DOFF(4,1) = 0.00056D0
C DOFF(4,2) = -0.065116D0
C DOFF(1,1) = - 700.0D0*DCOS(ALPHA)*1.0D-6
C DOFF(1,2) = 700.0D0*DSIN(ALPHA)*1.0D-6
C DOFF(2,1) = 577.23479D0*DCOS(BETA)*1.D-6
C DOFF(2,2) = 577.23479D0*DSIN(BETA)*1.0D-6
C DOFF(3,1) = - 577.23479D0*DCOS(BETA)*1.0D-6
C DOFF(3,2) = - 577.23479D0*DSIN(BETA)*1.0D-6
C DOFF(4,1) = 700.0D0*DCOS(ALPHA)*1.0D-6
C DOFF(4,2) = - 700.0D0*DSIN(ALPHA)*1.0D-6
C
C COMPUTE EARTH LOCATIONS FOR FOUR DETECTORS
C
DO 10 I=1,4
C COMPUTE POSITIONAL OFFSETS OF I-TH DETECTOR
DE=(2.5-I)*ELVLN(2)+DOFF(I,2)
DS=H+DOFF(I,1)
C
C COMPUTE ELEVATION AND SCAN ANGLES RELATED TO I-TH DETECTOR
C AND CORRECT THEM FOR THE DETECTOR POSITIONAL OFFSETS
C
C EV=E+DOFF(I,2)
C SC=S+DOFF(I,1)
C
C CONVERT POSITIONAL OFFSETS TO ANGULAR OFFSETS AND
C CORRECT ELEVATION AND SCAN ANGLES
EV = E + DE*COSE - DS*SINE
SC = S + DE*SINE + DS*COSE
C TRANSFORM DETECTOR'S POINTING ANGLES TO GEOGRAPHIC COORDINATES
C OF THE CORRESPONDING POINT ON THE EARTH SURFACE.
C NOTE: IF A DETECTOR LOOKS OFF THE EARTH, THE RELATED LATITUDE
C AND LONGITUDE ARE SET TO 999999.
C
CALL LPOINT(EV,SC,GEO(I,1),GEO(I,2),IER)
H=-H
10 CONTINUE
RETURN
END
C***********************************************************************
C***********************************************************************
C***********************************************************************
C**
C** INTEGRAL SYSTEMS, INC.
C**
C***********************************************************************
C**
C** PROJECT : OPERATIONS GROUND EQUIPMENT FOR GOES-NEXT
C** SYSTEM : EARTH LOCATION USERS GUIDE
C** ROUTINE : EVSC2LPF
C** SOURCE : F.EVSC2LPF
C** LOAD NAME : ANY
C** PROGRAMMER: IGOR LEVINE
C**
C** VER. DATA BY COMMENT
C** ---- -------- --- ---------------------------------------------
C** A 10/27/88 IL INITIAL CREATION
C**
C***********************************************************************
C**
C** THIS SUBROUTINE CONVERTS ELEVATION AND SCAN ANGLES
C** TO THE FRACTIONAL LINE AND PIXEL NUMBERS.
C**
C***********************************************************************
C**
C** CALLED BY : ANY
C** COMMONS MODIFIED: NONE
C** INPUTS : NONE
C** OUTPUTS : NONE
C** ROUTINES CALLED : NONE
C**
C***********************************************************************
C***********************************************************************
SUBROUTINE EVSC2L(INSTR,ELEV,SCAN,RL,RP)
C
C CALLING PARAMETERS
C
INTEGER INSTR
C INSTRUMENT CODE (1-IMAGER, 2-SOUNDER)
REAL*8 ELEV
C ELEVATION ANGLE IN RADIANS
REAL*8 SCAN
C SCAN ANGLE IN RADIANS
REAL*8 RL
C LINE NUMBER
REAL*8 RP
C PIXEL NUMBER
C
C LOCAL VARIABLES - NONE
C
C
C INCLUDE FILES
C
INCLUDE 'instco.inc'
C**************************************************************
C
C COMPUTE FRACTIONAL LINE NUMBER
C
RL=(ELVMAX(INSTR)-ELEV)/ELVLN(INSTR)
IF (INSTR.EQ.1) THEN
RL=RL+4.5D0
ELSE
RL=RL+2.5D0
END IF
C
C COMPUTE FRACTIONAL PIXEL NUMBER
C
RP=(SCNMAX(INSTR)+SCAN)/SCNPX(INSTR)+1.0D0
RETURN
END
C***********************************************************************
C**
C** INTEGRAL SYSTEMS, INC.
C**
C***********************************************************************
C**
C** PROJECT : OPERATIONS GROUND EQUIPMENT FOR GOES-NEXT
C** SYSTEM : EARTH LOCATION USERS GUIDE
C** ROUTINE : EVLN
C** SOURCE : F.EVLN
C** LOAD NAME : ANY
C** PROGRAMMER: IGOR LEVINE
C**
C** VER. DATA BY COMMENT
C** ---- -------- --- ---------------------------------------------
C** A 10/27/88 IL INITIAL CREATION
C***********************************************************************
C**
C** THIS FUNCTION CONVERTS FRACTIONAL LINE NUMBER TO ELEVATION ANGLE
C** IN RADIANS.
C**
C***********************************************************************
C**
C** CALLED BY : ANY
C** COMMONS MODIFIED: NONE
C** INPUTS : NONE
C** OUTPUTS : NONE
C** ROUTINES CALLED : NONE
C**
C***********************************************************************
C***********************************************************************
FUNCTION EVLN(INSTR,RLINE)
C
C CALLING PARAMETERS
C
INTEGER INSTR
C INSTRUMENT CODE (1-IMAGER, 2-SOUNDER)
REAL*8 EVLN,RLINE
C FRACTIONAL LINE NUMBER
C
C LOCAL VARIABLES - NONE
C
C
C INCLUDE FILES
C
INCLUDE 'instco.inc'
C***********************************************************************
IF (INSTR.EQ.1) THEN
EVLN=ELVMAX(INSTR)*1.0D0 - (RLINE-4.5 )*(ELVLN(INSTR)*1.0D0)
ELSE
EVLN=ELVMAX(INSTR)*1.0D0 - (RLINE
+ -2.5D0)*(ELVLN(INSTR)*1.0D0)
END IF
RETURN
END
C***********************************************************************
C**
C** INTEGRAL SYSTEMS, INC.
C**
C***********************************************************************
C**
C** PROJECT : OPERATIONS GROUND EQUIPMENT FOR GOES-NEXT
C** SYSTEM : EARTH LOCATION USERS GUIDE
C** ROUTINE : SCPX
C** SOURCE : F.SCPX
C** LOAD NAME : ANY
C** PROGRAMMER: IGOR LEVINE
C**
C** VER. DATA BY COMMENT
C** ---- -------- --- ---------------------------------------------
C** A 09/22/87 IL INITIAL CREATION
C***********************************************************************
C**
C** THIS FUNCTION CONVERTS FRACTIONAL PIXEL NUMBER TO SCAN ANGLE
C** IN RADIANS.
C**
C***********************************************************************
C**
C** CALLED BY : ANY
C** COMMONS MODIFIED: NONE
C** INPUTS : NONE
C** OUTPUTS : NONE
C** ROUTINES CALLED : NONE
C**
C***********************************************************************
C***********************************************************************
FUNCTION SCPX(INSTR,PIX)
C
C CALLING PARAMETERS
C
INTEGER INSTR
C INSTRUMENT CODE (1-IMAGER, 2-SOUNDER)
REAL*8 SCPX,PIX
C FRACTIONAL PIXEL NUMBER
C
C LOCAL VARIABLES
C
C
C INCLUDE FILES
C
INCLUDE 'instco.inc'
C***********************************************************************
SCPX=((PIX*1.0D0)-1.0D0)*(SCNPX(INSTR)*1.0D0)
+ -(SCNMAX(INSTR)*1.0D0)
RETURN
END
C***********************************************************************
C**
C** INTEGRAL SYSTEMS, INC.
C**
C***********************************************************************
C**
C** PROJECT : OPERATIONS GROUND EQUIPMENT FOR GOES-NEXT
C** SYSTEM : EARTH LOCATION USERS GUIDE
C** ROUTINE : GATT
C** SOURCE : F.GATT
C** LOAD NAME : ANY
C** PROGRAMMER: IGOR LEVINE
C**
C** VER. DATA BY COMMENT
C** ---- -------- --- ---------------------------------------------
C** A 12/01/88 IL INITIAL CREATION
C**
C***********************************************************************
C**
C** THIS FUNCTION COMPUTES AN ATTITUDE/MISALIGNMENT ANGLE FROM A
C** GIVEN SUBSET OF THE O&A PARAMETERS IN GVAR BLOK 0.
C** ARGUMENT K0 INDICATES THE FIRST WORD OF THE SUBSET.
C**
C***********************************************************************
C**
C** CALLED BY : LMODEL
C** COMMONS MODIFIED: NONE
C** INPUTS : NONE
C** OUTPUTS : NONE
C** ROUTINES CALLED : NONE
C**
C***********************************************************************
C***********************************************************************
FUNCTION GATT(K0,REC,WA,TE)
C
C CALLING PARAMETERS
C
INTEGER K0
C STARTING POSITION OF A PARAMETER SUBSET IN THE
C O&A SET
REAL*8 REC(336)
C INPUT O&A PARAMETER SET
REAL*8 WA
C INPUT SOLAR ORBIT ANGLE IN RADIANS
REAL*8 TE
C INPUT EXPONENTIAL TIME DELAY FROM EPOCH (MINUTES)
C
C LOCAL VARIABLES
C
INTEGER*4 I,J,M,L,LL,K
REAL*8 GATT,IR,JR,MR,ATT
EQUIVALENCE (J,JR),(M,MR)
C
C INCLUDE FILES
C
C***********************************************************************
C
C CONSTANT COMPONENT
C
K=K0
ATT=REC(K+2)
C
C COMPUTES THE EXPONENTIAL TERM
C
IF ((TE.GE.0.0D0).AND.(REC(K+1).GT. 0))THEN
ATT=ATT+REC(K)*DEXP(-TE/REC(K+1))
ENDIF
C
C EXTRACTS THE NUMBER OF SINUSOIDS
C
IR=REC(K+3)
I = INT(IR)
C
C CALCULATION OF SINUSOIDS
C
DO 10 L=1,I
ATT=ATT+REC(K+2*L+2)*DCOS(WA*L+REC(K+2*L+3))
10 CONTINUE
C
C POINTER TO THE NUMBER OF MONOMIAL SINUSOIDS
C
K=K+34
C
C EXTACTS NUMBER OF MONOMIAL SINUSOIDS
C
IR=REC(K)
I=INT(IR)
C KKK=REC(K)
C
C COMPUTES MONOMIAL SINUSOIDS
C
DO 20 L=1,I
LL=K+5*L
C
C ORDER OF SINUSOID
C
JR=REC(LL-4)
C
C ORDER OF MONOMIAL SINUSOID
C
MR=REC(LL-3)
C
ATT=ATT+REC(LL-2)*((WA-REC(LL))**M)*DCOS(J*WA+REC(LL-1))
20 CONTINUE
GATT=ATT
RETURN
END
C* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
C* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
C* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
C DLAT,DLON...... 50.00000 -150.00000 (15X,F9.5,2X,F10.5)
C LINE...(1),(2). 3584 1230 (15X,I5,2X,I5)
C IPIXEL.(1),(2).10253 1155 (15X,I5,2X,I5)
C KEY............0 (15X,I1)
C IMC............1 (15X,I1)
C INSTR..........2 (15X,I1)
C EPOCH TIME.....1989032062934.567 (15X,I4,I3,2I2,F6.3)
C START TIME.....1989032064934.567 (15X,I4,I3,2I2,F6.3)
C* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* *
C DLAT ---------- LATITUDE (NEGATIVE IS SOUTH)(F9.5)
C DLON ---------- LONGITUDE (NEGATIVE IS WEST)(F10.5)
C LINE(2) ------- LINE NO. (SEE INSTR FOR LINE(INSTR)(I5)
C IPIXEL(2) ----- PIXEL NO. (SEE INSTR FOR IPIXEL(INSTR)(I5)
C KEY ----------- 0 = LINE/PIXEL TO LAT/LON CONVERSION.
C 1 = LAT/LON TO LINE/PIXEL CONVERSION.
C IMC ----------- IMC STATUS (0=IMC ON, 1=IMC OFF)
C INSTR --------- INSTRUMENT (1=IMAGER, 2=SOUNDER)
C EPOCH TIME----- YEAR, J-DAY, HOUR, MINUTE, SECONDS
C START TIME----- YEAR, J-DAY, HOUR, MINUTE, SECONDS
C TEST FOR USER:INPUT DATA BLOCK(UNNUM DATA BLOCK)..........
C FIFTY(50) LINES AVAILABLE FOR USE IN INPUT BLOCK..........
C***********************************************************************
C* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
C* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
C* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
|
{"hexsha": "fa2687301d3e5c98380a08b044807e9a40b3c140", "size": 101934, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "src/lib/goesinav/gimloc.f", "max_stars_repo_name": "maxinye/laps-mirror", "max_stars_repo_head_hexsha": "b3f7c08273299a9e19b2187f96bd3eee6e0aa01b", "max_stars_repo_licenses": ["Intel", "Unlicense", "OLDAP-2.2.1", "NetCDF"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2019-04-05T12:28:22.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-29T06:37:29.000Z", "max_issues_repo_path": "src/lib/goesinav/gimloc.f", "max_issues_repo_name": "longwosion/laps-mirror", "max_issues_repo_head_hexsha": "b3f7c08273299a9e19b2187f96bd3eee6e0aa01b", "max_issues_repo_licenses": ["Intel", "NetCDF", "OLDAP-2.2.1", "Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/lib/goesinav/gimloc.f", "max_forks_repo_name": "longwosion/laps-mirror", "max_forks_repo_head_hexsha": "b3f7c08273299a9e19b2187f96bd3eee6e0aa01b", "max_forks_repo_licenses": ["Intel", "NetCDF", "OLDAP-2.2.1", "Unlicense"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2019-04-27T12:51:17.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-19T13:57:44.000Z", "avg_line_length": 71.7845070423, "max_line_length": 89, "alphanum_fraction": 0.2054859026, "num_tokens": 15031}
|
# This file is part of astro_metadata_translator.
#
# Developed for the LSST Data Management System.
# This product includes software developed by the LSST Project
# (http://www.lsst.org).
# See the LICENSE file at the top-level directory of this distribution
# for details of code ownership.
#
# Use of this source code is governed by a 3-clause BSD-style
# license that can be found in the LICENSE file.
"""Properties calculated by this package.
Defines all properties in one place so that both `ObservationInfo` and
`MetadataTranslator` can use them. In particular, the translator
base class can use knowledge of these properties to predefine translation
stubs with documentation attached, and `ObservationInfo` can automatically
define the getter methods.
"""
__all__ = ("PROPERTIES", )
import astropy.coordinates
import astropy.time
import astropy.units
# Dict of properties to tuple where tuple is:
# - description of property
# - Python type of property as a string (suitable for docstrings)
PROPERTIES = {"telescope": ("Full name of the telescope.", "str", str),
"instrument": ("The instrument used to observe the exposure.", "str", str),
"location": ("Location of the observatory.", "astropy.coordinates.EarthLocation",
astropy.coordinates.EarthLocation),
"exposure_id": ("Unique (with instrument) integer identifier for this observation.", "int",
int),
"visit_id": ("""ID of the Visit this Exposure is associated with.
Science observations should essentially always be
associated with a visit, but calibration observations
may not be.""", "int", int),
"physical_filter": ("The bandpass filter used for this observation.", "str", str),
"datetime_begin": ("Time of the start of the observation.", "astropy.time.Time",
astropy.time.Time),
"datetime_end": ("Time of the end of the observation.", "astropy.time.Time",
astropy.time.Time),
"exposure_time": ("Duration of the exposure with shutter open (seconds).",
"astropy.units.Quantity", astropy.units.Quantity),
"dark_time": ("Duration of the exposure with shutter closed (seconds).",
"astropy.units.Quantity", astropy.units.Quantity),
"boresight_airmass": ("Airmass of the boresight of the telescope.", "float", float),
"boresight_rotation_angle": ("Angle of the instrument in boresight_rotation_coord frame.",
"astropy.coordinates.Angle", astropy.coordinates.Angle),
"boresight_rotation_coord": ("Coordinate frame of the instrument rotation angle"
" (options: sky, unknown).", "str", str),
"detector_num": ("Unique (for instrument) integer identifier for the sensor.", "int", int),
"detector_name": ("Name of the detector within the instrument (might not be unique"
" if there are detector groups).",
"str", str),
"detector_unique_name": ("Unique name of the detector within the focal plane, generally"
" combining detector_group with detector_name.",
"str", str),
"detector_serial": ("Serial number/string associated with this detector.", "str", str),
"detector_group": ("Collection name of which this detector is a part. "
"Can be None if there are no detector groupings.", "str", str),
"detector_exposure_id": ("Unique integer identifier for this detector in this exposure.",
"int", int),
"object": ("Object of interest or field name.", "str", str),
"temperature": ("Temperature outside the dome.", "astropy.units.Quantity",
astropy.units.Quantity),
"pressure": ("Atmospheric pressure outside the dome.", "astropy.units.Quantity",
astropy.units.Quantity),
"relative_humidity": ("Relative humidity outside the dome.", "float", float),
"tracking_radec": ("Requested RA/Dec to track.", "astropy.coordinates.SkyCoord",
astropy.coordinates.SkyCoord),
"altaz_begin": ("Telescope boresight azimuth and elevation at start of observation.",
"astropy.coordinates.AltAz", astropy.coordinates.AltAz),
"science_program": ("Observing program (survey or proposal) identifier.", "str", str),
"observation_type": ("Type of observation (currently: science, dark, flat, bias, focus).",
"str", str),
"observation_id": ("Label uniquely identifying this observation "
"(can be related to 'exposure_id').",
"str", str),
"exposure_group": ("Label to use to associate this exposure with others "
"(can be related to 'exposure_id').",
"str", str),
}
|
{"hexsha": "3f022c58757d62266987e017043a6aedc6e7dcf3", "size": 5369, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/astro_metadata_translator/properties.py", "max_stars_repo_name": "HyperSuprime-Cam/astro_metadata_translator", "max_stars_repo_head_hexsha": "b976306e3e6fb85232cc838a145475ae8f16ca31", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "python/astro_metadata_translator/properties.py", "max_issues_repo_name": "HyperSuprime-Cam/astro_metadata_translator", "max_issues_repo_head_hexsha": "b976306e3e6fb85232cc838a145475ae8f16ca31", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/astro_metadata_translator/properties.py", "max_forks_repo_name": "HyperSuprime-Cam/astro_metadata_translator", "max_forks_repo_head_hexsha": "b976306e3e6fb85232cc838a145475ae8f16ca31", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 60.3258426966, "max_line_length": 105, "alphanum_fraction": 0.587073943, "include": true, "reason": "import astropy", "num_tokens": 1007}
|
import DataStore
testStore : DataStore (SString .+. SString .+. SInt)
testStore = addToStore ("Mercury", "Mariner 10", 1974) $
addToStore ("Venus", "Venera", 1961) $
addToStore ("Uranus", "Voyager 2", 1986) $
addToStore ("Pluto", "New Horizons", 2015) $
empty
listItems : DataStore schema -> List (SchemaType schema)
listItems input with (storeView input)
listItems DataStore.empty | SNil = []
listItems (addToStore entry store) | (SAdd entry store rec)
= entry :: listItems store | rec
filterKeys : (test : SchemaType val_schema -> Bool) ->
DataStore (SString .+. val_schema) -> List String
filterKeys test input with (storeView input)
filterKeys test DataStore.empty | SNil = []
filterKeys test (addToStore (key, value) store) | (SAdd (key, value) store rec)
= if test value
then key :: filterKeys test store | rec
else filterKeys test store | rec
|
{"hexsha": "8ab31022f95d4a1f5e44b76d087450e01ea7be46", "size": 963, "ext": "idr", "lang": "Idris", "max_stars_repo_path": "idris2/tests/typedd-book/chapter10/TestStore.idr", "max_stars_repo_name": "Qqwy/Idris2-Erlang", "max_stars_repo_head_hexsha": "945f9c12d315d73bfda2d441bc5f9f20696b5066", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "idris2/tests/typedd-book/chapter10/TestStore.idr", "max_issues_repo_name": "Qqwy/Idris2-Erlang", "max_issues_repo_head_hexsha": "945f9c12d315d73bfda2d441bc5f9f20696b5066", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "idris2/tests/typedd-book/chapter10/TestStore.idr", "max_forks_repo_name": "Qqwy/Idris2-Erlang", "max_forks_repo_head_hexsha": "945f9c12d315d73bfda2d441bc5f9f20696b5066", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.52, "max_line_length": 81, "alphanum_fraction": 0.6386292835, "num_tokens": 261}
|
import pandas as pd
import numpy as np
import schedule as sc
def find_similars_days(f_list,s_list):
list_days=[]
for i in f_list:
for j in s_list:
if i==j:
result=True
list_days.append(i)
#return result
return list_days
def find_similars_hours(f_list,s_list):
list_hours=[]
for i in f_list:
for j in s_list:
if i==j:
result=True
list_hours.append(i)
#continue
return list_hours
class Client:
tel_number=''
lec_days=[]
time_period=[]
def __init__(self,tel_number,lec_days,time_period):
self.tel_number = tel_number
self.lec_days = lec_days
self.time_period = time_period
def set_tel_number(self,tel_number):
self.tel_number = tel_number
def set_lec_days(self,lec_days):
self.lec_days = lec_days
def set_time_period(self,time_period):
self.time_period = time_period
def get_tel_number(self):
return self.tel_number
def get_lec_days(self):
return self.lec_days
def get_time_period(self):
return self.time_period.strftime("%H:%M:%S").tolist()
first_client=Client('380965217864',['Saturday','Sunday'],pd.date_range("18:00", "18:30", freq="30min"))
second_client=Client('380500397333',['Saturday','Sunday'],pd.date_range("12:00", "20:30", freq="30min"))
third_client=Client('380505878609',['Tuesday'],pd.date_range("18:00", "18:30", freq="30min"))
fourth_client=Client('380961623961',['Saturday','Sunday'],pd.date_range("17:00", "20:30", freq="30min"))
fifth_client=Client('380950858030',['Sunday'],pd.date_range("20:00", "20:30", freq="30min"))
sixth_client=Client('380930252818',['Monday','Tuesday','Wednesday','Thursday','Friday'],pd.date_range("13:14", "18:20", freq="30min"))
seventh_client=Client('380960828714',['Tuesday','Friday'],pd.date_range("09:00", "13:00", freq="30min"))
eight_client=Client('380955546535',['Sunday'],pd.date_range("18:00", "20:00", freq="30min"))
nineth_client=Client('380674867255',['Monday','Tuesday','Wednesday','Thursday','Friday'],pd.date_range("10:00", "16:30", freq="30min"))
tenth_client=Client('380976233984',['Saturday'],pd.date_range("13:00", "14:00", freq="30min"))
base={0:first_client,1:second_client,2:third_client,3:fourth_client,4:fifth_client,5:sixth_client,6:seventh_client,7:eight_client,8:nineth_client,9:tenth_client}
def set_schedule(f_client,s_client):
f_condition=find_similars_days(f_client.get_lec_days(),s_client.get_lec_days())
s_condition=find_similars_hours(f_client.get_time_period(),s_client.get_time_period())
if len(f_condition)!=0 and len(s_condition)!=0:
schedule_clients=sc.Schedule(str(f_condition[0]),str(f_client.get_tel_number()),str(s_client.get_tel_number()),str(s_condition[0]))
#print("Phone number of first client : "+str(f_client.get_tel_number())+" Phone number of second client : "+str(s_client.get_tel_number())+" days for practice: "+str(f_condition[0])+" hours for practice: "+str(s_condition[0]))
scheduleString=str(f_condition[0])+' '+str(f_client.get_tel_number())+" "+str(s_client.get_tel_number())+' ('+str(s_condition[0])+') |'
return scheduleString
schStr=''
for i in range(0,len(base)-1):
for j in range(0,len(base)-1):
sch=set_schedule(base[i],base[j+1])
if(sch!=None):
schStr+=sch
#print(sch)
list_sch=schStr.split("|")
#print(list_sch)
list_monday=[]
list_sunday=[]
list_tuesday=[]
list_saturday=[]
for i in list_sch:
if 'Monday' in i:
list_monday.append(i)
elif 'Saturday' in i:
list_saturday.append(i)
elif 'Sunday' in i:
list_sunday.append(i)
elif 'Tuesday' in i:
list_tuesday.append(i)
print('Monday:')
for i in list_monday:
print(i.replace('Monday',' '))
print('Tuesday:')
for i in list_tuesday:
print(i.replace('Tuesday',' '))
print('Saturday:')
for i in list_saturday:
print(i.replace('Saturday',' '))
print('Sunday:')
for i in list_sunday:
print(i.replace('Sunday',' '))
#print(type(pd.date_range("11:00", "21:30", freq="30min")))
|
{"hexsha": "93bcaf02bc710ec9840d2a822862f63d95dc71b2", "size": 4319, "ext": "py", "lang": "Python", "max_stars_repo_path": "client.py", "max_stars_repo_name": "Eugenemdk/schedule-practices-algorithm", "max_stars_repo_head_hexsha": "b852b7afd38f6e58501be80b8617d63385f2c43b", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "client.py", "max_issues_repo_name": "Eugenemdk/schedule-practices-algorithm", "max_issues_repo_head_hexsha": "b852b7afd38f6e58501be80b8617d63385f2c43b", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "client.py", "max_forks_repo_name": "Eugenemdk/schedule-practices-algorithm", "max_forks_repo_head_hexsha": "b852b7afd38f6e58501be80b8617d63385f2c43b", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.9145299145, "max_line_length": 238, "alphanum_fraction": 0.6450567261, "include": true, "reason": "import numpy", "num_tokens": 1136}
|
from analysis.patterns_in_bio_data import bio_data_runs
import numpy as np
from matplotlib import pylab as plt
from analysis.functions import bio_process
# importing the list of all runs of the bio data from the function 'bio_data_runs'
bio_runs = bio_data_runs()
# calculating the mean value of all runs
mean_data = list(map(lambda elements: np.mean(elements), zip(*bio_runs)))
# number of slices
num_slices = int(len(mean_data) / 100)
slices = []
offset = 0
yticks = []
step = 0.25
# forming list for shadows plotting
all_bio_slices = []
for k in range(len(bio_runs)):
bio_slices= []
offset= 0
for i in range(int(len(bio_runs[k]) / 100)):
bio_slices_tmp = []
for j in range(offset, offset + 100):
bio_slices_tmp.append(bio_runs[k][j])
bio_slices.append(bio_slices_tmp)
offset += 100
all_bio_slices.append(bio_slices) # list [4][16][100]
all_bio_slices = list(zip(*all_bio_slices)) # list [16][4][100]
# creating the list of lists (dots per slice)
offset = 0
for sl in range(num_slices):
slices_tmp = []
for dot in range(offset, offset + 100):
slices_tmp.append(mean_data[dot])
slices.append(slices_tmp)
offset += 100
# creating the list of dots of stimulations
stimulations = []
for stim in range(0, 1601, 100):
stimulations.append(stim)
print(stimulations)
# creating the list of lists (volts and stimulations)
volts_and_stims = []
volts_and_stims.append(mean_data)
volts_and_stims.append(stimulations)
# calculating the latencies
lat_amp = bio_process(volts_and_stims, 16)
latencies = lat_amp[0]
print("latencies = ", latencies)
# creating the list of x & y coordinates of the lines
x_coor = []
y_coor = []
# plotting the slices
for index, sl in enumerate(all_bio_slices):
offset = index * 10
times = [time * step for time in range(len(all_bio_slices[0][0]))]
for run in range(len(sl)):
plt.plot(times, [s + offset for s in sl[run]], color='gray')
for index, run in enumerate(slices):
offset = index * 10
yticks.append(run[0] + offset)
times = [time * step for time in range(len(run))]
plt.plot(times, [s + offset for s in run], linewidth=3)
# plotting of the dots
plt.plot(latencies[index], run[int(latencies[index] / step)] + offset, marker='.', markersize=8)
# pltting of the lines
x_coor.append(latencies[index])
y_coor.append(run[int(latencies[index] / step)] + offset)
x_2_coors = []
y_2_coors = []
if len(x_coor) > 1:
x_2_coors.append(x_coor[-2])
x_2_coors.append(x_coor[-1])
y_2_coors.append(y_coor[-2])
y_2_coors.append(y_coor[-1])
plt.plot(x_2_coors, y_2_coors, linestyle='--', color='black')
plt.xticks(range(26), [i if i % 1 == 0 else "" for i in range(26)], fontsize=14)
plt.yticks(yticks, range(1, len(slices) + 1), fontsize=14)
plt.xlim(0, 25)
plt.grid(which='major', axis='x', linestyle='--', linewidth=0.5)
plt.show()
|
{"hexsha": "16a121a33688f837bbeb62ff85172e3f5408370d", "size": 2799, "ext": "py", "lang": "Python", "max_stars_repo_path": "analysis/trapezium.py", "max_stars_repo_name": "KseniiaKarpova/memristive-spinal-cord", "max_stars_repo_head_hexsha": "15be4aea80ede6315b9bf4ea76eec17b900f31e4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "analysis/trapezium.py", "max_issues_repo_name": "KseniiaKarpova/memristive-spinal-cord", "max_issues_repo_head_hexsha": "15be4aea80ede6315b9bf4ea76eec17b900f31e4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "analysis/trapezium.py", "max_forks_repo_name": "KseniiaKarpova/memristive-spinal-cord", "max_forks_repo_head_hexsha": "15be4aea80ede6315b9bf4ea76eec17b900f31e4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.4494382022, "max_line_length": 97, "alphanum_fraction": 0.7148981779, "include": true, "reason": "import numpy", "num_tokens": 826}
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 20 15:12:49 2016
@author: uzivatel
"""
import numpy as np
import scipy
from functools import partial
from copy import deepcopy
from .general import Coordinate,Grid
from ...General.UnitsManager import PositionUnitsManaged,position_units
from ...General.types import UnitsManaged
from ..positioningTools import RotateAndMove, RotateAndMove_1, CenterMolecule
class DensityGrid(PositionUnitsManaged):
''' Class representing electronic density on spatial grid (e.g. molecular
orbitals, transition density, ...)
origin : numpy.array of real (dimension 3)
origin of density grid (Position managed units)
grid : numpy.array of integer (dimension 3)
number of grid points at each dimension
step : numpy.array of real (dimension 3x3)
step[i,:] translational vector in first dimension (Position managed units)
data : numpy.array of real (dimension Npoints_x x Npoints_y x Npoints_z)
Density values on the grid. data[i,j,k] correspond to the point with
coordinates self.origin+i*self.step[0,:]+j*self.step[1,:]+kk*self.step[2,:]
type : string
If ``typ='mo'`` density values correspond to real wavefunction otherwise
it is an electron density
indx : integer
Index of molecular orbital to which wavefunction correspond
coor : Coordinate class
Atomic coordinates for every atom in the molecule or complex.
(Position managed units)
at_charge : numpy array of real, integer or string (dimension Natoms)
Proton number for every atom in the molecule or complex
Functions
----------
rotate :
Rotate the density and all its properties by specified angles in
radians in positive direction.
rotate_1 :
Inverse totation to rotate
move :
Moves the density and all its properties along specified vector
center :
Center the density and allign in defined plane
copy :
Create 1 to 1 deep copy of the density with all classes and types.
import_cub :
Read density from cube file
output :
Outputs density into cube file
get_axes :
Outputs x, y and z axis of the grid on which density is evaluated
(only for nonrotated grid - oriented along coordinate axis)
copy :
Create 1 to 1 deep copy of the density with all classes and types.
dipole :
Numerical calculation of dipole from the density
dipole_partial :
Numerical calculation of dipole for only specified spatial cut of the
density. (only for nonrotated grid - oriented along coordinate axis)
cut :
Spatial cut of the density which is outputed as a new density.
(only for nonrotated grid - oriented along coordinate axis)
calc_atomic_properties :
Calculate atomic charges and dipoles from numerical integration of the
density into individual atoms. Quantity from grid point will be assigned
to nearest atom.
'''
origin=UnitsManaged("origin")
step=UnitsManaged("step")
def __init__(self,origin,grid,step,density,typ='mo',mo_indx=1,Coor=None,At_charge=None):
if origin is None:
self.origin=None
else:
self.origin = np.copy(origin)
if grid is None:
self.grid=None
else:
self.grid = np.copy(grid)
if step is None:
self.step=None
else:
self.step = np.copy(step)
if density is None:
self.data=None
else:
self.data = np.copy(density)
self.type = typ
self.indx = mo_indx
if Coor is None:
self.coor=None
else:
self.coor = Coordinate(Coor)
self.at_charge = np.copy(At_charge)
def output(self,filename='density.cub'):
''' Output density to cube file
Parameters
----------
filename : string (optional - init='density.cub')
Output file name including the path to output folder
'''
with position_units('Bohr'):
Coor = np.copy(self.coor.value)
Grid = np.copy(self.grid)
Step = np.copy(self.step)
At_charge = np.copy(self.at_charge)
with open(filename, "wt") as f:
# Vypis hlavicky
f.write("____Zde muze byt napsano cokoliv____ \n MO coefficients \n")
# f.write(" %i %5.2f %5.2f %5.2f \n" % (-len(qc.at_coord),min_[0],min_[1],min_[2]))
if self.type=='mo':
f.write("{:5d}".format(-len(Coor)))
else:
f.write("{:5d}".format(len(Coor)))
for ii in range(3):
f.write("{:12.6f}".format(self.origin[ii]))
f.write("{:5d}\n".format(1))
f.write("{:5d}{:12.6f}{:12.6f}{:12.6f}\n".format(Grid[0], Step[0,0], Step[0,1], Step[0,2] ))
f.write("{:5d}{:12.6f}{:12.6f}{:12.6f}\n".format(Grid[1], Step[1,0], Step[1,1], Step[1,2] ))
f.write("{:5d}{:12.6f}{:12.6f}{:12.6f}\n".format(Grid[2], Step[2,0], Step[2,1], Step[2,2] ))
for ii in range(len(Coor)):
f.write("{:5d}{:12.6f}{:12.6f}{:12.6f}{:12.6f}\n".format(int(float(At_charge[ii])), float(At_charge[ii]), Coor[ii,0], Coor[ii,1], Coor[ii,2]))
if self.type=='mo':
f.write("{:5d}{:5d}\n".format(1, self.indx))
# vypis molekuloveho orbitalu na gridu
for ii in range(self.grid[0]):
for jj in range(self.grid[1]):
for kk in range(self.grid[2]):
f.write("{:13.5E}".format(self.data[ii,jj,kk]))
if (kk % 6) == 5:
f.write("\n")
#f.write("\n")
if self.grid[2]%6!=0:
f.write("\n")
def import_cub(self,filename):
''' Import data from density cube file
Parameters
----------
filename : string
Imput file name (.cub) including the path to file folder
'''
origin=np.zeros(3,dtype='f8')
self.grid=np.zeros(3,dtype='i8')
step=np.zeros((3,3),dtype='f8')
fid = open(filename,'r') # Open the file
flines = fid.readlines() # Read the WHOLE file into RAM
fid.close() # Close the file
thisline = flines[2].split()
Natom=np.abs(int(thisline[0]))
if int(thisline[0]) < 0:
self.type='mo'
else:
self.type='transition'
self.at_charge=np.zeros(Natom,dtype='f')
Coor=np.zeros((Natom,3),dtype='f8')
for ii in range(3):
origin[ii]=float(thisline[ii+1])
for kk in range(3):
thisline = flines[kk+3].split()
self.grid[kk]=int(thisline[0])
for ii in range(3):
step[kk,ii]=float(thisline[ii+1])
# atomic information:
for kk in range(Natom):
thisline = flines[kk+6].split()
self.at_charge[kk]=float(thisline[1])
for ii in range(3):
Coor[kk,ii]=float(thisline[ii+2])
if self.type=='mo':
thisline = flines[Natom+6].split()
self.indx=int(thisline[1])
il=7
else:
il=6
with position_units('Bohr'):
self.coor=Coordinate(Coor)
self.origin=origin.copy()
self.step=step.copy()
# read density
self.data=np.zeros((self.grid[0],self.grid[1],self.grid[2]),dtype='f8')
counter=np.zeros(3,dtype='i8')
for kk in range(Natom+il,len(flines)):
line = flines[kk] # The current line as string
thisline = line.split() # The current line split into segments
for ii in range(6):
self.data[counter[0],counter[1],counter[2]]=float(thisline[ii])
counter[2]+=1
if counter[2]==self.grid[2]:
counter[2]=0
counter[1]+=1
if counter[1]==self.grid[1]:
counter[1]=0
counter[0]+=1
break
def get_axes(self):
""" Outputs x, y and z axis of the grid. ** Working only for grid
oriented along coordinate axis (nonrotated grid)**
Returns
--------
x,y,z : numpy array of float (dimension Grid_Nx, Grid_Ny, Grid_Nz)
Coordinates of grid points in coordinate axes
"""
print("Working only for nonrotated grid oriented along coordinate axes")
x=np.arange(self.grid[0])*self.step[0,0]+self.origin[0]
y=np.arange(self.grid[1])*self.step[1,1]+self.origin[1]
z=np.arange(self.grid[2])*self.step[2,2]+self.origin[2]
return x,y,z
def copy(self):
''' Copy DensityGrid class variable into the new one
Returns
----------
density_new : DensityGrid class
New DensityGrid class variable with exactly the same values as the
original one
Notes
----------
We have to use this function because simple density_new=density_old
only create pointer to the old density and therefore all changes in
density_new would be also done on density_old and this is what we don't
want
'''
density_new = deepcopy(self)
return density_new
def move(self,dx,dy,dz):
''' Moves density grid in space
Parameters
----------
dx,dy,dz : real
Distance of density shift along x resp. y resp.
z axis.
'''
vec=np.array([dx,dy,dz],dtype='f8')
self.origin=self.origin+vec
self.coor.move(dx,dy,dz)
def rotate(self,rotxy,rotxz,rotyz):
''' Rotate DENSITY in SPACE in positive rotational angle
(if right thumb pointing in direction of axes fingers are pointing in
positive rotation direction). First is rotation aroud z axes then around y axes and then around
x axes.
Parameters
----------
rotxy,rotxz,rotyz : real
`rotxy` resp. `rotxz` resp. `rotyz` is angle in RADIANS of rotation
around z resp. y resp. x axis in positive direction
'''
# Rotation handled in atomic units
#print('Pred rotaci')
self._origin=RotateAndMove(np.array([self._origin]),0.0,0.0,0.0,rotxy,rotxz,rotyz)
self.coor.rotate(rotxy,rotxz,rotyz)
self._step=RotateAndMove(self._step,0.0,0.0,0.0,rotxy,rotxz,rotyz)
#print('Po rotaci')
def rotate_1(self,rotxy,rotxz,rotyz):
''' Rotate DENSITY in SPACE in negative rotational angle
(if right thumb pointing in direction of axes fingers are pointing in
positive rotation direction). First is rotation aroud x axes then around y axes and then around
z axes. Inverse function to rotate(rotxy,rotxz,rotyz)
Parameters
----------
rotxy,rotxz,rotyz : real
`rotxy` resp. `rotxz` resp. `rotyz` is angle in RADIANS of rotation
around z resp. y resp. x axis in positive direction
'''
#print('Pred rotaci')
self._origin=RotateAndMove_1(np.array([self._origin]),0.0,0.0,0.0,rotxy,rotxz,rotyz)
self.coor.rotate_1(rotxy,rotxz,rotyz)
self._step=RotateAndMove_1(self._step,0.0,0.0,0.0,rotxy,rotxz,rotyz)
#print('Po rotaci')
def center(self,indx_center,indx_x,indx_y):
''' Center density according to defined center and main axes
Center atom will be in origin of coordinate system
(will have [0.0,0.0,0.0] coordinates) and vector X will be pointing into
direction of x axes and vector Y will be in xy plane. Vector X and Y
are defined by atomic indexes.
Parameters
----------
indx_center : int or list of int
When `indx_center`=i it refers to atomic coordnitate of ith atom
(counted from zero) => center=coor[i,:]. When `indx_center`=[i,j,k,..]
than center is center of all listed atoms (average coordinate) =>
center=(coor[i,:]+coor[j,:]+coor[k,:]...)/N
indx_x : int or list of int of length 2 or 4
When `indx_x`=i than vector X is defined as Coor[i,:]-center.
When `indx_x`=[i,j] than vector X is defined as Coor[j,:]-Coor[i,:].
When `indx_x`=[i,j,k,l] than vector X is defined as
(Coor[j,:]-Coor[i,:])+(Coor[l,:]-Coor[k,:]).
indx_y : int or list of int of length 2 or 4
When `indx_y`=i than vector Y is defined as Coor[i,:]-center.
When `indx_y`=[i,j] than vector Y is defined as Coor[j,:]-Coor[i,:].
When `indx_y`=[i,j,k,l] than vector Y is defined as
(Coor[j,:]-Coor[i,:])+(Coor[l,:]-Coor[k,:]).
'''
Coor_ext=[]
for ii in range(len(self.coor._value)):
Coor_ext.append(self.coor._value[ii])
Coor_ext.append(self._origin)
Coor_ext=np.array(Coor_ext)
Coor_centered,Phi,Psi,Chi,center=CenterMolecule(Coor_ext,indx_center,indx_x,indx_y,print_angles=True)
with position_units("Bohr"):
self.coor=Coordinate(Coor_centered[0,:])
for ii in range(1,len(Coor_centered)-1):
self.coor.add_coor(Coor_centered[ii,:])
self._origin=Coor_centered[len(self.coor._value),:]
self._step=RotateAndMove(self._step,0.0,0.0,0.0,Phi,Psi,Chi)
def dipole(self,output_center=False):
''' Calculate numericaly dipole from density. For ground state electron
density it calculates ground state dipole and fror transition density
it calculates transition dipole
Returns
----------
dipole : numpy.array of real (dimension 3)
dipole in ATOMIC UNITS (e*bohr)
Notes
----------
It calculates Int{-r.rho(r)dxdydz} which is dipole
'''
# TODO: repair matrix approach to be used also for rotated density
if 0: # This works only for nonrotated grid - change but keep the idea
grid=Grid()
grid.init_from_cub(self)
dipole=np.zeros(3,dtype='f8')
dipole[0]=np.sum(np.multiply(grid.X,self.data))
dipole[1]=np.sum(np.multiply(grid.Y,self.data))
dipole[2]=np.sum(np.multiply(grid.Z,self.data))
dipole = -np.multiply(grid.ddV,dipole)
dV=np.dot(self.step[0,:],np.cross(self.step[1,:],self.step[2,:]))
dipole=np.multiply(-dV,dipole)
return dipole
else:
# more efficient would be to create 3D grids with coordinates then multiply and then sum all
dipole = np.zeros(3,dtype='f8')
center = np.zeros(3,dtype='f8')
for ii in range(self.grid[0]):
for jj in range(self.grid[1]):
for kk in range(self.grid[2]):
rr=self._origin+ii*self._step[0,:]+jj*self._step[1,:]+kk*self._step[2,:]
dipole+=self.data[ii,jj,kk]*rr
center+=np.abs(self.data[ii,jj,kk])*rr
dV=np.dot(self._step[0,:],np.cross(self._step[1,:],self._step[2,:]))
dipole=dipole*dV
center = center/np.sum(np.abs(self.data))
print('Dipole calculated by function dipole was chaged from -dipole to dipole. Make sure that you are using right value')
if output_center:
return -dipole,center
else:
return -dipole
def dipole_partial(self,x_min=None,x_max=None,y_min=None,y_max=None,z_min=None,z_max=None):
''' Calculate numericaly dipole from part of the density. For ground
state electron density it calculates ground state partial dipole and
from transition density it calculates partial transition dipole.
Parameters
----------
x_min,x_max : real (optional - init=None)
Specifies minimal and maximal x coordinate
between which density is used for calculation of dipole. If some of
those values are not specified there is taken minimal resp. maximal
x coordinate of the density.
y_min,y_max : real (optional - init=None)
Specifies minimal and maximal y coordinate
between which density is used for calculation of dipole. If some of
those values are not specified there is taken minimal resp. maximal
y coordinate of the density.
z_min,z_max : real (optional - init=None)
Specifies minimal and maximal z coordinate
between which density is used for calculation of dipole. If some of
those values are not specified there is taken minimal resp. maximal
z coordinate of the density.
Returns
----------
dipole : numpy.array of real (dimension 3)
dipole in ATOMIC UNITS (e*bohr)
Notes
Resulting dipole is numericaly calculated integral
Int_{x_min,y_min,z_min}^{x_max,y_max,z_max} (-r.rho(r))dxdydz
'''
if x_min==None:
x_min=-1.0e5
else:
x_min=PositionUnitsManaged.manager.convert_position_2_internal_u(x_min)
if x_max==None:
x_max=1.0e5
else:
x_max=PositionUnitsManaged.manager.convert_position_2_internal_u(x_max)
if y_min==None:
y_min=-1.0e5
else:
y_min=PositionUnitsManaged.manager.convert_position_2_internal_u(y_min)
if y_max==None:
y_max=1.0e5
else:
y_max=PositionUnitsManaged.manager.convert_position_2_internal_u(y_max)
if z_min==None:
z_min=-1.0e5
else:
z_min=PositionUnitsManaged.manager.convert_position_2_internal_u(z_min)
if z_max==None:
z_max=1.0e5
else:
z_max=PositionUnitsManaged.manager.convert_position_2_internal_u(z_max)
# TODO: Convert boundaries from current values to internal
#print(x_min,x_max,y_min,y_max,z_min,z_max)
dipole=np.zeros(3,dtype='f8')
for ii in range(self.grid[0]):
for jj in range(self.grid[1]):
for kk in range(self.grid[2]):
rr=self._origin+ii*self._step[0,:]+jj*self._step[1,:]+kk*self._step[2,:]
if rr[0]>=x_min and rr[0]<=x_max and rr[1]>=y_min and rr[1]<=y_max and rr[2]>=z_min and rr[2]<=z_max:
dipole+=self.data[ii,jj,kk]*rr
dV=np.dot(self._step[0,:],np.cross(self._step[1,:],self._step[2,:]))
dipole=dipole*dV
print('Dipole calculated by function dipole_partial was chaged from -dipole to dipole. Make sure that you are using right value')
return -dipole
def cut(self,x_min=None,x_max=None,y_min=None,y_max=None,z_min=None,z_max=None):
''' Takes a cut of density. **Works only for original (nonrotated) transition
density with step[0,:] pointing along x axis, step[1,:] pointing along
y axis and step[2,:] pointing along z axis.**
Parameters
----------
x_min,x_max : real (optional - init=None)
Specifies minimal and maximal x coordinate in ATOMIC UNITS (Bohr)
between which density is outputed. If some of those values are not
specified there is taken minimal resp. maximal x coordinate of the
density
y_min,y_max : real (optional - init=None)
Specifies minimal and maximal y coordinate in ATOMIC UNITS (Bohr)
between which density is outputed. If some of those values are not
specified there is taken minimal resp. maximal y coordinate of the
density
z_min,z_max : real (optional - init=None)
Specifies minimal and maximal z coordinate in ATOMIC UNITS (Bohr)
between which density is outputed. If some of those values are not
specified there is taken minimal resp. maximal z coordinate of the
density
Returns
----------
cuted_density : DensityGrid class
DensityGrid class with desity which is subsystem of original density
and it is defined on grid points with coordinates: x_min <= x <= x_max,
y_min <= y <= y_max and z_min <= z <= z_max.
'''
if x_min==None:
if self._step[0,0]>0:
x_min=self._origin[0]
else:
x_min=self._origin[0]+self._step[0,0]*(self.grid[0]-1)
else:
x_min=PositionUnitsManaged.manager.convert_position_2_internal_u(x_min)
if x_max==None:
if self._step[0,0]>0:
x_max=self._origin[0]+self._step[0,0]*(self.grid[0]-1)
else:
x_max=self._origin[0]
else:
x_max=PositionUnitsManaged.manager.convert_position_2_internal_u(x_max)
if y_min==None:
if self._step[1,1]>0:
y_min=self._origin[1]
else:
y_min=self._origin[1]+self._step[1,1]*(self.grid[1]-1)
else:
y_min=PositionUnitsManaged.manager.convert_position_2_internal_u(y_min)
if y_max==None:
if self._step[1,1]>0:
y_max=self._origin[1]+self._step[1,1]*(self.grid[1]-1)
else:
y_max=self._origin[1]
else:
y_max=PositionUnitsManaged.manager.convert_position_2_internal_u(y_max)
if z_min==None:
if self._step[2,2]>0:
z_min=self._origin[2]
else:
z_min=self._origin[2]+self._step[2,2]*(self.grid[2]-1)
else:
z_min=PositionUnitsManaged.manager.convert_position_2_internal_u(z_min)
if z_max==None:
if self._step[2,2]>0:
z_max=self._origin[2]+self._step[2,2]*(self.grid[2]-1)
else:
z_max=self._origin[2]
else:
z_max=PositionUnitsManaged.manager.convert_position_2_internal_u(z_max)
#print(x_min,x_max,y_min,y_max,z_min,z_max)
x=[0,0]
if self._step[0,0]>0:
for ii in range(self.grid[0]):
if self._origin[0]+self._step[0,0]*ii<x_min:
x[0]=ii+1
elif self._origin[0]+self._step[0,0]*ii>x_max and x[1]==0:
x[1]=ii-1
if x[1]==0:
x[1]=self.grid[0]
else:
for ii in range(self.grid[0]):
if self._origin[0]+self._step[0,0]*ii>x_max:
x[0]=ii+1
elif self._origin[0]+self._step[0,0]*ii<x_min and x[1]==0:
x[1]=ii-1
if x[1]==0:
x[1]=self.grid[0]
y=[0,0]
if self._step[1,1]>0:
for ii in range(self.grid[1]):
if self._origin[1]+self._step[1,1]*ii<y_min:
y[0]=ii+1
elif self._origin[1]+self._step[1,1]*ii>y_max and y[1]==0:
y[1]=ii-1
if y[1]==0:
y[1]=self.grid[1]
else:
for ii in range(self.grid[1]):
if self._origin[1]+self._step[1,1]*ii>y_max:
y[0]=ii+1
elif self._origin[1]+self._step[1,1]*ii<y_min and y[1]==0:
y[1]=ii-1
if y[1]==0:
y[1]=self.grid[0]
z=[0,0]
if self._step[2,2]>0:
for ii in range(self.grid[2]):
if self._origin[2]+self._step[2,2]*ii<z_min:
z[0]=ii+1
elif self._origin[2]+self._step[2,2]*ii>z_max and z[1]==0:
z[1]=ii-1
if z[1]==0:
z[1]=self.grid[2]
else:
print('z is negative')
for ii in range(self.grid[2]):
if self._origin[2]+self._step[2,2]*ii>z_max:
z[0]=ii+1
elif self._origin[2]+self._step[2,2]*ii<z_min and z[1]==0:
z[1]=ii-1
if z[1]==0:
z[1]=self.grid[2]
#print(x,y,z)
origin_new=self._origin[:]+self._step[0,:]*x[0]+self._step[1,:]*y[0]+self._step[2,:]*z[0]
grid_new=np.array([x[1]-x[0],y[1]-y[0],z[1]-z[0]])
data_new=self.data[x[0]:x[1],y[0]:y[1],z[0]:z[1]]
step_new=np.copy(self._step)
with position_units("Bohr"):
cuted_density=DensityGrid(origin_new,grid_new,step_new,data_new,typ=np.copy(self.type),mo_indx=np.copy(self.indx),Coor=np.copy(self.coor.value),At_charge=np.copy(self.at_charge))
return cuted_density
def calc_atomic_properties(self):
''' Calculate atomic charges and atomic dipoles by numericaly integrating
density. Fisrt it is determined to which atom the grid point is the closest
and to this atom small delta charge and dipole is added.
Atomic charges are calculated as a sum of density from grid points for
which this atom is the closest one. The atomic dipoles are calculated
as vector from atom to grid point multiplied by density.
Returns
----------
charges : numpy.array of real (dimension Natoms)
Atomic charges for every atom of the system
dipoles : numpy.array of real (dimension Natoms x 3)
Atomic dipole in ATOMIC UNITS (e*bohr) for every atom
'''
Nat=len(self.coor._value)
charges=np.zeros(Nat,dtype='f8')
dipoles=np.zeros((Nat,3),dtype='f8')
for ii in range(self.grid[0]):
for jj in range(self.grid[1]):
for kk in range(self.grid[2]):
rr=self._origin+ii*self._step[0,:]+jj*self._step[1,:]+kk*self._step[2,:]
dist_min=30.0
index=0
for ll in range(len(self.coor._value)):
dist=np.sqrt(np.dot(rr-self.coor._value[ll],rr-self.coor._value[ll]))
if dist<dist_min:
index=ll
dist_min=np.copy(dist)
charges[index]+=self.data[ii,jj,kk]
dipoles[index,:]+=(rr-self.coor._value[index])*self.data[ii,jj,kk]
dV=np.dot(self._step[0,:],np.cross(self._step[1,:],self._step[2,:]))
print('Atomic dipole calculated by function calc_atomic_properties was chaged from -dipole to dipole. Make sure that you are using right value')
return charges*dV,-dipoles*dV
def _elpot_at_position(self,position):
''' Calculate electrostatic potential for electronic density assumed that
it is composed of cubic boxes with homogenous charge distribition
**THIS IS WERY CRUDE APPROXIMATION AND I HAVE SHOWN BY COMPARING CALCULATED
POTENTIAL FROM ATOMIC ORBITALS WITH THIS ONE THAT IT DOESN'T PROVIDE (NOT
EVEN CLOSE TO) REAL POTENTIAL**
Parameters
----------
position : numpy.array of real (dimension 3)
Coordinates in ATOMIC UNITS (Bohr) of point where we would like to
calculate electrostatic potential
Returns
----------
result : real
Potential at `position` in ATOMIC UNITS
'''
result=0.0
def aux_function(rr,stepx,stepy,stepz,t):
res=scipy.special.erf(t*(stepx/2-rr[0]))+scipy.special.erf(t*(stepx/2+rr[0]))
res=res * (scipy.special.erf(t*(stepy/2-rr[1]))+scipy.special.erf(t*(stepy/2+rr[1])))
res=res * (scipy.special.erf(t*(stepz/2-rr[2]))+scipy.special.erf(t*(stepz/2+rr[2])))
res=res/t**3
return res
rr1=np.copy(position)
for m in range(self.grid[0]):
for n in range(self.grid[1]):
for o in range(self.grid[2]):
rr2=self._origin + m*self._step[0,:]+n*self._step[1,:]+o*self._step[2,:]
dr=rr1-rr2
tmax=max([5/np.abs(np.abs(dr[0])-np.abs(self._step[0,0]/2)),5/np.abs(np.abs(dr[1])-np.abs(self._step[1,1]/2)),5/np.abs(np.abs(dr[2])-np.abs(self._step[2,2]/2))])
#if tmax<5e-1:
# ESP_Grid[i,j,k]-=self.data[m,n,o]/np.sqrt(np.dot(dr,dr))*dV
#else:
tmax=max([200,tmax])
aux_function_partial = partial(aux_function,dr,self._step[0,0],self._step[1,1],self._step[2,2])
result-=self.data[m,n,o]*np.pi/4*scipy.integrate.quadrature(aux_function_partial,0,tmax,tol=1e-05,maxiter=100)[0]
return result
def _dens_to_ESP2(self):
''' This should create electrostatic potential grid file from electronic
density assumed that it is composed of cubic boxes with homogenous
charge distribition.
**THIS IS WERY CRUDE APPROXIMATION AND I HAVE SHOWN BY COMPARING CALCULATED
POTENTIAL FROM ATOMIC ORBITALS WITH THIS ONE THAT IT DOESN'T PROVIDE (NOT
EVEN CLOSE TO) REAL POTENTIAL**
'''
ESP=DensityGrid(self.origin,self.grid,self.step,None,Coor=self.coor.value,At_charge=self.at_charge)
''' Calculate volume element '''
vecX=np.copy(self._step[0,:])
vecY=np.copy(self._step[1,:])
vecZ=np.cross(vecX,vecY)
dV=np.dot(vecZ,self._step[2,:])
ESP._origin=ESP._origin+self._step[0,:]/2.0+self._step[1,:]/2.0+self._step[2,:]/2.0
ESP_Grid=np.zeros((self.grid[0],self.grid[1],self.grid[2]),dtype='f8')
def aux_function(rr,stepx,stepy,stepz,t):
res=scipy.special.erf(t*(stepx/2-rr[0]))+scipy.special.erf(t*(stepx/2+rr[0]))
res=res * (scipy.special.erf(t*(stepy/2-rr[1]))+scipy.special.erf(t*(stepy/2+rr[1])))
res=res * (scipy.special.erf(t*(stepz/2-rr[2]))+scipy.special.erf(t*(stepz/2+rr[2])))
res=res/t**3
return res
for i in range(ESP.grid[0]):
print(i,'/',ESP.grid[0])
for j in range(ESP.grid[1]):
for k in range(ESP.grid[2]):
rr1=ESP._origin + i*ESP._step[0,:]+j*ESP._step[1,:]+k*ESP._step[2,:]
for m in range(self.grid[0]):
for n in range(self.grid[1]):
for o in range(self.grid[2]):
rr2=self._origin + m*self._step[0,:]+n*self._step[1,:]+o*self._step[2,:]
dr=rr1-rr2
tmax=max([5/np.abs(np.abs(dr[0])-np.abs(self._step[0,0]/2)),5/np.abs(np.abs(dr[1])-np.abs(self._step[1,1]/2)),5/np.abs(np.abs(dr[2])-np.abs(self._step[2,2]/2))])
if tmax<5e-1:
ESP_Grid[i,j,k]-=self.data[m,n,o]/np.sqrt(np.dot(dr,dr))*dV
else:
tmax=max([200,tmax])
aux_function_partial = partial(aux_function,dr,self._step[0,0],self._step[1,1],self._step[2,2])
ESP_Grid[i,j,k]-=self.data[m,n,o]*np.sqrt(np.pi)/4*scipy.integrate.quadrature(aux_function_partial,0,tmax,tol=1e-05,maxiter=100)[0]
ESP_Grid[i,j,k]-=np.pi/tmax**2*self.data[i,j,k]
#ESP_Grid=ESP_Grid
#for m in range(ESP.grid[0]):
# for n in range(ESP.grid[1]):
# for o in range(ESP.grid[2]):
# for ii in range(len(self.coor)):
# dr=ESP.origin + m*ESP.step[0,:]+n*ESP.step[1,:]+o*ESP.step[2,:]-self.coor[ii]
# norm2=np.sqrt(np.dot(dr,dr))
# ESP_Grid[m,n,o]+=self.at_charge[ii]/norm2
ESP.data=np.copy(ESP_Grid)
return ESP
|
{"hexsha": "57d33c109f584c7fde6ff3246ab70608819e9834", "size": 33797, "ext": "py", "lang": "Python", "max_stars_repo_path": "QChemTool/QuantumChem/Classes/density.py", "max_stars_repo_name": "slamavl/QChemTool", "max_stars_repo_head_hexsha": "b6b17adf6cfa8ac1db47acba93aab1ee49c1be47", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "QChemTool/QuantumChem/Classes/density.py", "max_issues_repo_name": "slamavl/QChemTool", "max_issues_repo_head_hexsha": "b6b17adf6cfa8ac1db47acba93aab1ee49c1be47", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-01-03T12:08:41.000Z", "max_issues_repo_issues_event_max_datetime": "2018-01-03T12:08:41.000Z", "max_forks_repo_path": "QChemTool/QuantumChem/Classes/density.py", "max_forks_repo_name": "slamavl/QChemTool", "max_forks_repo_head_hexsha": "b6b17adf6cfa8ac1db47acba93aab1ee49c1be47", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.4967824968, "max_line_length": 198, "alphanum_fraction": 0.533834364, "include": true, "reason": "import numpy,import scipy", "num_tokens": 8444}
|
#!/usr/bin/env python
#
# -------------------------------------------------------------------------------------
#
# Copyright (c) 2016, ytirahc, www.mobiledevtrek.com
# All rights reserved. Copyright holder cannot be held liable for any damages.
#
# Distributed under the Apache License (ASL).
# http://www.apache.org/licenses/
# *****
# Description: Python script to resize images by percentage and apply sepia tone effect
# using OpenCV and NumPy (developed with & tested against Python 3.5, OpenCV 3.1 and
# NumPy 1.10.4)
# Resize
# The jpg image files of the specified input directory are resized by the specified percentages
# in the array resizePercentages and saved to the specified output directory.
# Sepia
# The jpg image files of the specified input directory have the sepia tone effect applied and saved
# to the specified output directory.
#
# Usage: Running the script will both resize and apply the sepia tone effect to the jpg images in the
# input directory, saving the results to the output directory
# *****
import os
import numpy as np
import cv2
# *****
# SoftLight
#
# Description: Implements the soft light blending mode as per w3c
# https://en.wikipedia.org/wiki/Blend_modes#Soft_Light
#
# Parameters:
# inTopImg : Open OpenCV image (top)
# inBottomImg : Open OpenCV image (bottom)
# *****
def SoftLight(inTopImg,inBottomImg):
# Normalize color values to between 0 and 1
topImgArray = np.asarray(inTopImg) / 255.0
bottomImgArray = np.asarray(inBottomImg) / 255.0
softLightImgArray = SoftLightF(topImgArray, bottomImgArray)
# Convert colors back to between 0 to 255
softLightImgArray = softLightImgArray * 255.0
return softLightImgArray
# *****
# SoftLightF
#
# Description: Implements f(bottom image, top image) portion of w3c soft light blending equation
#
# Parameters:
# inTopImgArray : Top image as array
# inBottomImgArray : Bottom image as array
# *****
def SoftLightF(inTopImgArray,inBottomImgArray):
softLightFArray = np.where(inTopImgArray <= 0.5,inBottomImgArray - ((1 - (2 * inTopImgArray)) * inBottomImgArray * (1 - inBottomImgArray)),inBottomImgArray + (2 * inTopImgArray - 1) * (SoftLightG(inBottomImgArray) - inBottomImgArray))
return softLightFArray
# *****
# SoftLightG
#
# Description: Implements f(bottom image) portion of w3c soft light blending equation
#
# Parameters:
# inBottomImgArray : Bottom image as array
# *****
def SoftLightG(inBottomImgArray):
softLightGArray = np.where(inBottomImgArray <= 0.25, ((16 * inBottomImgArray - 12) * inBottomImgArray + 4) * inBottomImgArray, np.sqrt(inBottomImgArray))
return softLightGArray
# *****
# SepiaToneEffectAndSave
#
# Description: Applies sepia tone effect to input image and saves the result
#
# Parameters:
# inImage : An OpenCV image
# inSepiaImageFN : Output path and file name where the result is saved
# *****
def SepiaToneEffectAndSave(inImage, inSepiaImageFN):
# Desaturate (but needs to be RGB for later operations)
imgGrey = cv2.cvtColor(inImage,cv2.COLOR_BGR2GRAY)
imgGrey = cv2.cvtColor(imgGrey,cv2.COLOR_GRAY2RGB) # Need RGB for matrix math
# Apply a slight blur
imgSmooth = cv2.GaussianBlur(imgGrey,(5,5),0)
# Blend the sepia tone color with the greyscale layer using soft light
imgWidth, imgHeight, imgChannels = imgGrey.shape
imgSepiaColor = np.zeros((imgWidth,imgHeight,3), np.uint8)
imgSepiaColor[:,:] = (42,89,226) # BGR
imgSepia = SoftLight(imgSepiaColor, imgSmooth)
cv2.imwrite(inSepiaImageFN,imgSepia)
# *****
# ResizeImageByPercentAndSave
#
# Description: Resizes image by specified percentage and saves the result
#
# Parameters:
# inImage : An OpenCV image
# inResizePercentage : Percentage by which to resize image as a non negative integer
# inResizedImageFN : Output path and file name where the result is saved
# *****
def ResizeImageByPercentAndSave(inImage, inResizePercentage, inResizedImageFN):
resizeFraction = inResizePercentage / 100
imgResize = cv2.resize(inImage, (0,0), fx=resizeFraction, fy=resizeFraction)
cv2.imwrite(inResizedImageFN,imgResize)
batchInputImageDir = os.path.join("..","images","in") # Input directory where jpg files reside
batchOutputImageDir = os.path.join("..","images","out") # Output directory where results are saves as jpg image files
resizePercentages = [75, 50, 25] # Percentages to by which to resize input images
# Iterate through all jpgs in the input directory
for jpgFile in os.listdir(batchInputImageDir):
if jpgFile.endswith(".jpg"): # Process jpg files only
# Determine full path and filename
imageName, imageExt = os.path.splitext(jpgFile)
batchInImageFN = os.path.join(batchInputImageDir, jpgFile)
print("Currently processing image: " + batchInImageFN)
# Open the input image to process
img = cv2.imread(batchInImageFN)
# Resize image by given percentages
for resizePercentage in resizePercentages:
batchOutImageFN = os.path.join(batchOutputImageDir, imageName + "_" + str(resizePercentage) + ".jpg")
ResizeImageByPercentAndSave(img, resizePercentage, batchOutImageFN)
# Apply the sepia tone effect
batchOutImageFN = os.path.join(batchOutputImageDir, imageName + "_sepia.jpg")
SepiaToneEffectAndSave(img, batchOutImageFN)
print("Finished processing all jpg images in input directory: " + batchInputImageDir)
print("Output images files located in the directory: " + batchOutputImageDir)
|
{"hexsha": "f75d45f9c5d0de212de2f63603282d3ba1993815", "size": 5864, "ext": "py", "lang": "Python", "max_stars_repo_path": "posts/032016/BatchProcessingOpenCV.py", "max_stars_repo_name": "ytirahc/mobile-dev-trek", "max_stars_repo_head_hexsha": "336602fbc6f91776b6a3e8b845dc2c87d5f4e592", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-10-06T06:44:51.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-06T06:48:40.000Z", "max_issues_repo_path": "posts/032016/BatchProcessingOpenCV.py", "max_issues_repo_name": "ytirahc/mobile-dev-trek", "max_issues_repo_head_hexsha": "336602fbc6f91776b6a3e8b845dc2c87d5f4e592", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "posts/032016/BatchProcessingOpenCV.py", "max_forks_repo_name": "ytirahc/mobile-dev-trek", "max_forks_repo_head_hexsha": "336602fbc6f91776b6a3e8b845dc2c87d5f4e592", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.3253012048, "max_line_length": 239, "alphanum_fraction": 0.6804229195, "include": true, "reason": "import numpy", "num_tokens": 1449}
|
[STATEMENT]
lemma has_distinguishing_Eq: "has_distinguishing (Eq (V v) (L l)) G \<Longrightarrow> \<exists>l'. (Eq (V v) (L l')) \<in> set G \<and> l \<noteq> l'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. has_distinguishing (Eq (V v) (L l)) G \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set G \<and> l \<noteq> l'
[PROOF STEP]
proof (induct G)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. has_distinguishing (Eq (V v) (L l)) [] \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set [] \<and> l \<noteq> l'
2. \<And>a G. \<lbrakk>has_distinguishing (Eq (V v) (L l)) G \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set G \<and> l \<noteq> l'; has_distinguishing (Eq (V v) (L l)) (a # G)\<rbrakk> \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set (a # G) \<and> l \<noteq> l'
[PROOF STEP]
case (Cons a G)
[PROOF STATE]
proof (state)
this:
has_distinguishing (Eq (V v) (L l)) G \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set G \<and> l \<noteq> l'
has_distinguishing (Eq (V v) (L l)) (a # G)
goal (2 subgoals):
1. has_distinguishing (Eq (V v) (L l)) [] \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set [] \<and> l \<noteq> l'
2. \<And>a G. \<lbrakk>has_distinguishing (Eq (V v) (L l)) G \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set G \<and> l \<noteq> l'; has_distinguishing (Eq (V v) (L l)) (a # G)\<rbrakk> \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set (a # G) \<and> l \<noteq> l'
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
has_distinguishing (Eq (V v) (L l)) G \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set G \<and> l \<noteq> l'
has_distinguishing (Eq (V v) (L l)) (a # G)
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
has_distinguishing (Eq (V v) (L l)) G \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set G \<and> l \<noteq> l'
has_distinguishing (Eq (V v) (L l)) (a # G)
goal (1 subgoal):
1. \<exists>l'. Eq (V v) (L l') \<in> set (a # G) \<and> l \<noteq> l'
[PROOF STEP]
apply (cases a)
[PROOF STATE]
proof (prove)
goal (5 subgoals):
1. \<And>x1. \<lbrakk>has_distinguishing (Eq (V v) (L l)) G \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set G \<and> l \<noteq> l'; has_distinguishing (Eq (V v) (L l)) (a # G); a = Bc x1\<rbrakk> \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set (a # G) \<and> l \<noteq> l'
2. \<And>x21 x22. \<lbrakk>has_distinguishing (Eq (V v) (L l)) G \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set G \<and> l \<noteq> l'; has_distinguishing (Eq (V v) (L l)) (a # G); a = Eq x21 x22\<rbrakk> \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set (a # G) \<and> l \<noteq> l'
3. \<And>x31 x32. \<lbrakk>has_distinguishing (Eq (V v) (L l)) G \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set G \<and> l \<noteq> l'; has_distinguishing (Eq (V v) (L l)) (a # G); a = Gt x31 x32\<rbrakk> \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set (a # G) \<and> l \<noteq> l'
4. \<And>x41 x42. \<lbrakk>has_distinguishing (Eq (V v) (L l)) G \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set G \<and> l \<noteq> l'; has_distinguishing (Eq (V v) (L l)) (a # G); a = In x41 x42\<rbrakk> \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set (a # G) \<and> l \<noteq> l'
5. \<And>x51 x52. \<lbrakk>has_distinguishing (Eq (V v) (L l)) G \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set G \<and> l \<noteq> l'; has_distinguishing (Eq (V v) (L l)) (a # G); a = Nor x51 x52\<rbrakk> \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set (a # G) \<and> l \<noteq> l'
[PROOF STEP]
apply simp
[PROOF STATE]
proof (prove)
goal (4 subgoals):
1. \<And>x21 x22. \<lbrakk>has_distinguishing (Eq (V v) (L l)) G \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set G \<and> l \<noteq> l'; has_distinguishing (Eq (V v) (L l)) (a # G); a = Eq x21 x22\<rbrakk> \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set (a # G) \<and> l \<noteq> l'
2. \<And>x31 x32. \<lbrakk>has_distinguishing (Eq (V v) (L l)) G \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set G \<and> l \<noteq> l'; has_distinguishing (Eq (V v) (L l)) (a # G); a = Gt x31 x32\<rbrakk> \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set (a # G) \<and> l \<noteq> l'
3. \<And>x41 x42. \<lbrakk>has_distinguishing (Eq (V v) (L l)) G \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set G \<and> l \<noteq> l'; has_distinguishing (Eq (V v) (L l)) (a # G); a = In x41 x42\<rbrakk> \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set (a # G) \<and> l \<noteq> l'
4. \<And>x51 x52. \<lbrakk>has_distinguishing (Eq (V v) (L l)) G \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set G \<and> l \<noteq> l'; has_distinguishing (Eq (V v) (L l)) (a # G); a = Nor x51 x52\<rbrakk> \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set (a # G) \<and> l \<noteq> l'
[PROOF STEP]
apply (case_tac x21)
[PROOF STATE]
proof (prove)
goal (8 subgoals):
1. \<And>x21 x22 x1. \<lbrakk>has_distinguishing (Eq (V v) (L l)) G \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set G \<and> l \<noteq> l'; has_distinguishing (Eq (V v) (L l)) (a # G); a = Eq x21 x22; x21 = L x1\<rbrakk> \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set (a # G) \<and> l \<noteq> l'
2. \<And>x21 x22 x2. \<lbrakk>has_distinguishing (Eq (V v) (L l)) G \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set G \<and> l \<noteq> l'; has_distinguishing (Eq (V v) (L l)) (a # G); a = Eq x21 x22; x21 = V x2\<rbrakk> \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set (a # G) \<and> l \<noteq> l'
3. \<And>x21 x22 x31 x32. \<lbrakk>has_distinguishing (Eq (V v) (L l)) G \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set G \<and> l \<noteq> l'; has_distinguishing (Eq (V v) (L l)) (a # G); a = Eq x21 x22; x21 = Plus x31 x32\<rbrakk> \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set (a # G) \<and> l \<noteq> l'
4. \<And>x21 x22 x41 x42. \<lbrakk>has_distinguishing (Eq (V v) (L l)) G \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set G \<and> l \<noteq> l'; has_distinguishing (Eq (V v) (L l)) (a # G); a = Eq x21 x22; x21 = Minus x41 x42\<rbrakk> \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set (a # G) \<and> l \<noteq> l'
5. \<And>x21 x22 x51 x52. \<lbrakk>has_distinguishing (Eq (V v) (L l)) G \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set G \<and> l \<noteq> l'; has_distinguishing (Eq (V v) (L l)) (a # G); a = Eq x21 x22; x21 = Times x51 x52\<rbrakk> \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set (a # G) \<and> l \<noteq> l'
6. \<And>x31 x32. \<lbrakk>has_distinguishing (Eq (V v) (L l)) G \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set G \<and> l \<noteq> l'; has_distinguishing (Eq (V v) (L l)) (a # G); a = Gt x31 x32\<rbrakk> \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set (a # G) \<and> l \<noteq> l'
7. \<And>x41 x42. \<lbrakk>has_distinguishing (Eq (V v) (L l)) G \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set G \<and> l \<noteq> l'; has_distinguishing (Eq (V v) (L l)) (a # G); a = In x41 x42\<rbrakk> \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set (a # G) \<and> l \<noteq> l'
8. \<And>x51 x52. \<lbrakk>has_distinguishing (Eq (V v) (L l)) G \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set G \<and> l \<noteq> l'; has_distinguishing (Eq (V v) (L l)) (a # G); a = Nor x51 x52\<rbrakk> \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set (a # G) \<and> l \<noteq> l'
[PROOF STEP]
apply simp
[PROOF STATE]
proof (prove)
goal (7 subgoals):
1. \<And>x21 x22 x2. \<lbrakk>has_distinguishing (Eq (V v) (L l)) G \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set G \<and> l \<noteq> l'; has_distinguishing (Eq (V v) (L l)) (a # G); a = Eq x21 x22; x21 = V x2\<rbrakk> \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set (a # G) \<and> l \<noteq> l'
2. \<And>x21 x22 x31 x32. \<lbrakk>has_distinguishing (Eq (V v) (L l)) G \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set G \<and> l \<noteq> l'; has_distinguishing (Eq (V v) (L l)) (a # G); a = Eq x21 x22; x21 = Plus x31 x32\<rbrakk> \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set (a # G) \<and> l \<noteq> l'
3. \<And>x21 x22 x41 x42. \<lbrakk>has_distinguishing (Eq (V v) (L l)) G \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set G \<and> l \<noteq> l'; has_distinguishing (Eq (V v) (L l)) (a # G); a = Eq x21 x22; x21 = Minus x41 x42\<rbrakk> \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set (a # G) \<and> l \<noteq> l'
4. \<And>x21 x22 x51 x52. \<lbrakk>has_distinguishing (Eq (V v) (L l)) G \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set G \<and> l \<noteq> l'; has_distinguishing (Eq (V v) (L l)) (a # G); a = Eq x21 x22; x21 = Times x51 x52\<rbrakk> \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set (a # G) \<and> l \<noteq> l'
5. \<And>x31 x32. \<lbrakk>has_distinguishing (Eq (V v) (L l)) G \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set G \<and> l \<noteq> l'; has_distinguishing (Eq (V v) (L l)) (a # G); a = Gt x31 x32\<rbrakk> \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set (a # G) \<and> l \<noteq> l'
6. \<And>x41 x42. \<lbrakk>has_distinguishing (Eq (V v) (L l)) G \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set G \<and> l \<noteq> l'; has_distinguishing (Eq (V v) (L l)) (a # G); a = In x41 x42\<rbrakk> \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set (a # G) \<and> l \<noteq> l'
7. \<And>x51 x52. \<lbrakk>has_distinguishing (Eq (V v) (L l)) G \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set G \<and> l \<noteq> l'; has_distinguishing (Eq (V v) (L l)) (a # G); a = Nor x51 x52\<rbrakk> \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set (a # G) \<and> l \<noteq> l'
[PROOF STEP]
apply (case_tac x22)
[PROOF STATE]
proof (prove)
goal (11 subgoals):
1. \<And>x21 x22 x2 x1. \<lbrakk>has_distinguishing (Eq (V v) (L l)) G \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set G \<and> l \<noteq> l'; has_distinguishing (Eq (V v) (L l)) (a # G); a = Eq x21 x22; x21 = V x2; x22 = L x1\<rbrakk> \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set (a # G) \<and> l \<noteq> l'
2. \<And>x21 x22 x2 x2a. \<lbrakk>has_distinguishing (Eq (V v) (L l)) G \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set G \<and> l \<noteq> l'; has_distinguishing (Eq (V v) (L l)) (a # G); a = Eq x21 x22; x21 = V x2; x22 = V x2a\<rbrakk> \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set (a # G) \<and> l \<noteq> l'
3. \<And>x21 x22 x2 x31 x32. \<lbrakk>has_distinguishing (Eq (V v) (L l)) G \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set G \<and> l \<noteq> l'; has_distinguishing (Eq (V v) (L l)) (a # G); a = Eq x21 x22; x21 = V x2; x22 = Plus x31 x32\<rbrakk> \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set (a # G) \<and> l \<noteq> l'
4. \<And>x21 x22 x2 x41 x42. \<lbrakk>has_distinguishing (Eq (V v) (L l)) G \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set G \<and> l \<noteq> l'; has_distinguishing (Eq (V v) (L l)) (a # G); a = Eq x21 x22; x21 = V x2; x22 = Minus x41 x42\<rbrakk> \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set (a # G) \<and> l \<noteq> l'
5. \<And>x21 x22 x2 x51 x52. \<lbrakk>has_distinguishing (Eq (V v) (L l)) G \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set G \<and> l \<noteq> l'; has_distinguishing (Eq (V v) (L l)) (a # G); a = Eq x21 x22; x21 = V x2; x22 = Times x51 x52\<rbrakk> \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set (a # G) \<and> l \<noteq> l'
6. \<And>x21 x22 x31 x32. \<lbrakk>has_distinguishing (Eq (V v) (L l)) G \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set G \<and> l \<noteq> l'; has_distinguishing (Eq (V v) (L l)) (a # G); a = Eq x21 x22; x21 = Plus x31 x32\<rbrakk> \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set (a # G) \<and> l \<noteq> l'
7. \<And>x21 x22 x41 x42. \<lbrakk>has_distinguishing (Eq (V v) (L l)) G \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set G \<and> l \<noteq> l'; has_distinguishing (Eq (V v) (L l)) (a # G); a = Eq x21 x22; x21 = Minus x41 x42\<rbrakk> \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set (a # G) \<and> l \<noteq> l'
8. \<And>x21 x22 x51 x52. \<lbrakk>has_distinguishing (Eq (V v) (L l)) G \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set G \<and> l \<noteq> l'; has_distinguishing (Eq (V v) (L l)) (a # G); a = Eq x21 x22; x21 = Times x51 x52\<rbrakk> \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set (a # G) \<and> l \<noteq> l'
9. \<And>x31 x32. \<lbrakk>has_distinguishing (Eq (V v) (L l)) G \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set G \<and> l \<noteq> l'; has_distinguishing (Eq (V v) (L l)) (a # G); a = Gt x31 x32\<rbrakk> \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set (a # G) \<and> l \<noteq> l'
10. \<And>x41 x42. \<lbrakk>has_distinguishing (Eq (V v) (L l)) G \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set G \<and> l \<noteq> l'; has_distinguishing (Eq (V v) (L l)) (a # G); a = In x41 x42\<rbrakk> \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set (a # G) \<and> l \<noteq> l'
A total of 11 subgoals...
[PROOF STEP]
apply (metis has_distinguishing.simps(2) list.set_intros(1) list.set_intros(2))
[PROOF STATE]
proof (prove)
goal (10 subgoals):
1. \<And>x21 x22 x2 x2a. \<lbrakk>has_distinguishing (Eq (V v) (L l)) G \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set G \<and> l \<noteq> l'; has_distinguishing (Eq (V v) (L l)) (a # G); a = Eq x21 x22; x21 = V x2; x22 = V x2a\<rbrakk> \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set (a # G) \<and> l \<noteq> l'
2. \<And>x21 x22 x2 x31 x32. \<lbrakk>has_distinguishing (Eq (V v) (L l)) G \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set G \<and> l \<noteq> l'; has_distinguishing (Eq (V v) (L l)) (a # G); a = Eq x21 x22; x21 = V x2; x22 = Plus x31 x32\<rbrakk> \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set (a # G) \<and> l \<noteq> l'
3. \<And>x21 x22 x2 x41 x42. \<lbrakk>has_distinguishing (Eq (V v) (L l)) G \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set G \<and> l \<noteq> l'; has_distinguishing (Eq (V v) (L l)) (a # G); a = Eq x21 x22; x21 = V x2; x22 = Minus x41 x42\<rbrakk> \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set (a # G) \<and> l \<noteq> l'
4. \<And>x21 x22 x2 x51 x52. \<lbrakk>has_distinguishing (Eq (V v) (L l)) G \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set G \<and> l \<noteq> l'; has_distinguishing (Eq (V v) (L l)) (a # G); a = Eq x21 x22; x21 = V x2; x22 = Times x51 x52\<rbrakk> \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set (a # G) \<and> l \<noteq> l'
5. \<And>x21 x22 x31 x32. \<lbrakk>has_distinguishing (Eq (V v) (L l)) G \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set G \<and> l \<noteq> l'; has_distinguishing (Eq (V v) (L l)) (a # G); a = Eq x21 x22; x21 = Plus x31 x32\<rbrakk> \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set (a # G) \<and> l \<noteq> l'
6. \<And>x21 x22 x41 x42. \<lbrakk>has_distinguishing (Eq (V v) (L l)) G \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set G \<and> l \<noteq> l'; has_distinguishing (Eq (V v) (L l)) (a # G); a = Eq x21 x22; x21 = Minus x41 x42\<rbrakk> \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set (a # G) \<and> l \<noteq> l'
7. \<And>x21 x22 x51 x52. \<lbrakk>has_distinguishing (Eq (V v) (L l)) G \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set G \<and> l \<noteq> l'; has_distinguishing (Eq (V v) (L l)) (a # G); a = Eq x21 x22; x21 = Times x51 x52\<rbrakk> \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set (a # G) \<and> l \<noteq> l'
8. \<And>x31 x32. \<lbrakk>has_distinguishing (Eq (V v) (L l)) G \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set G \<and> l \<noteq> l'; has_distinguishing (Eq (V v) (L l)) (a # G); a = Gt x31 x32\<rbrakk> \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set (a # G) \<and> l \<noteq> l'
9. \<And>x41 x42. \<lbrakk>has_distinguishing (Eq (V v) (L l)) G \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set G \<and> l \<noteq> l'; has_distinguishing (Eq (V v) (L l)) (a # G); a = In x41 x42\<rbrakk> \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set (a # G) \<and> l \<noteq> l'
10. \<And>x51 x52. \<lbrakk>has_distinguishing (Eq (V v) (L l)) G \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set G \<and> l \<noteq> l'; has_distinguishing (Eq (V v) (L l)) (a # G); a = Nor x51 x52\<rbrakk> \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set (a # G) \<and> l \<noteq> l'
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<exists>l'. Eq (V v) (L l') \<in> set (a # G) \<and> l \<noteq> l'
goal (1 subgoal):
1. has_distinguishing (Eq (V v) (L l)) [] \<Longrightarrow> \<exists>l'. Eq (V v) (L l') \<in> set [] \<and> l \<noteq> l'
[PROOF STEP]
qed auto
|
{"llama_tokens": 7771, "file": "Extended_Finite_State_Machine_Inference_heuristics_Least_Upper_Bound", "length": 12}
|
""" Lucy richardson deconvolution
based on code from Martin Weigert's gputools
"""
import os
import numpy as np
from gputools import OCLArray, OCLProgram, get_device
from gputools import convolve, fft_convolve, fft, fft_plan
from gputools import OCLElementwiseKernel
from typing import Optional, Callable
import warnings
import gputools
_multiply_inplace = OCLElementwiseKernel("float *a, float * b", "a[i] = a[i] * b[i]", "mult_inplace")
_divide_inplace = OCLElementwiseKernel(
"float *a, float * b", "b[i] = a[i]*b[i]/(b[i]*b[i]+0.001f)", "divide_inplace"
)
_complex_multiply = OCLElementwiseKernel(
"cfloat_t *a, cfloat_t * b,cfloat_t * res", "res[i] = cfloat_mul(a[i],b[i])", "mult"
)
_complex_multiply_inplace = OCLElementwiseKernel(
"cfloat_t *a, cfloat_t * b", "a[i] = cfloat_mul(a[i],b[i])", "mult_inplace"
)
_complex_divide = OCLElementwiseKernel(
"cfloat_t *a, cfloat_t * b,cfloat_t * res", "res[i] = cfloat_divide(b[i],a[i])", "div"
)
_complex_divide_inplace = OCLElementwiseKernel(
"cfloat_t *a, cfloat_t * b", "b[i] = cfloat_divide(a[i],b[i])", "divide_inplace"
)
class Deconvolver_RL_gputools(object):
""" fft deconvolver based on Martin Weigert's gputools fft-based RL implementation
breaks it into two stages to avoid unnecessary processig and allocation
"""
def __init__(self, psf: np.ndarray, psf_is_fftshifted: bool = False, n_iter=10):
""" setup deconvolution for a given shape """
self.shape = psf.shape
if not psf_is_fftshifted:
psf = np.fft.fftshift(psf)
self.n_iter = n_iter
# What happens here? Indices are being flipped ? Why. What if it is 3D?
psfflip = psf[::-1, ::-1]
self.psf_g = OCLArray.from_array(psf.astype(np.complex64))
self.psfflip_f_g = OCLArray.from_array(psfflip.astype(np.complex64))
self.plan = fft_plan(self.shape)
# transform psf
fft(self.psf_g, inplace=True)
fft(self.psfflip_f_g, inplace=True)
# get temp
self.tmp_g = OCLArray.empty(psf.shape, np.complex64)
def run(self, data: np.ndarray):
if data.shape != self.shape:
raise ValueError("data and h have to be same shape")
# set up some gpu buffers
data64 = data.astype(np.complex64)
y_g = OCLArray.from_array(data64)
u_g = OCLArray.from_array(data64)
# hflipped_g = OCLArray.from_array(h.astype(np.complex64))
for i in range(self.n_iter):
# logger.info("Iteration: {}".format(i))
fft_convolve(u_g, self.psf_g, plan=self.plan, res_g=self.tmp_g, kernel_is_fft=True)
_complex_divide_inplace(y_g, self.tmp_g)
fft_convolve(self.tmp_g, self.psfflip_f_g, plan=self.plan, inplace=True, kernel_is_fft=True)
_complex_multiply_inplace(u_g, self.tmp_g)
# can abs be calculated on the gpu ?
return np.abs(u_g.get())
## below are simple wrappers to make the interface compatible with the functions
## that were originally written for deconvolution with flowdec
## eventually all of those should be refactured into a classs
def init_rl_deconvolver(**kwargs):
""" dummy, nothing to initialiaze for the gputools deconv
Note: maybe one can setup and keep the fft plan, this may require
changes to gputools code.
"""
return None
def get_deconv_function(psf: np.ndarray, deconvolver: object, n_iter: int) -> Callable:
decon = Deconvolver_RL_gputools(psf=psf, n_iter=n_iter)
return decon.run
|
{"hexsha": "392844e95e07ac91f4c3b51f017354822479243f", "size": 3639, "ext": "py", "lang": "Python", "max_stars_repo_path": "lls_dd/deconv_gputools.py", "max_stars_repo_name": "VolkerH/Lattice_Lightsheet_Deskew_Deconv", "max_stars_repo_head_hexsha": "83aa17bf44eb140fd79c42e2790a3240a518d189", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2019-02-07T09:45:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-06T01:47:51.000Z", "max_issues_repo_path": "lls_dd/deconv_gputools.py", "max_issues_repo_name": "VolkerH/Lattice_Lightsheet_Deskew_Deconv", "max_issues_repo_head_hexsha": "83aa17bf44eb140fd79c42e2790a3240a518d189", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 29, "max_issues_repo_issues_event_min_datetime": "2019-02-07T10:50:10.000Z", "max_issues_repo_issues_event_max_datetime": "2020-12-01T03:32:06.000Z", "max_forks_repo_path": "lls_dd/deconv_gputools.py", "max_forks_repo_name": "VolkerH/Lattice_Lightsheet_Deskew_Deconv", "max_forks_repo_head_hexsha": "83aa17bf44eb140fd79c42e2790a3240a518d189", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2019-07-09T06:12:36.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-21T16:05:49.000Z", "avg_line_length": 34.9903846154, "max_line_length": 105, "alphanum_fraction": 0.6600714482, "include": true, "reason": "import numpy", "num_tokens": 993}
|
\section{Task Planning (Jim)}
The task planner in this work is based on the framework introduced in \cite{HKG2009}.
A reactive robot task specification is expressed in LTL formulas.
Then the specification is automatically transformed to a correct-by-construction discrete controller.
At last, the controller is continuously implemented to generate desired robot behaviors.
Different from \cite{HKG2009}, in this work we aim to synthesize controllers for multiple robots executing a manipulation task.
Therefore, there are three main challenges in this work for task planning, as described in the following sections.
\subsection{Specification Language for Multi-robot}
In \cite{ChenDSB12,Diaz-MercadoJBE15}, the author introduces an approach for specifying robot tasks in formal language and generating controllers for a team of robots.
However, the type of robot tasks is limited to non-reactive, i.e. the environment is assume static and the robot behavior does not depend on the environment state.
The framework in \cite{HKG2009} allows reactive robot task, such as "if the robot sense a soda can, bring the can to the kitchen".
However, the task specification only issues commands for a single robot.
In order to specify reactive tasks for a team of robots, we need to extend the specification language to able to express multi-robot tasks.
\subsection{Tasks Allocation}
The framework employed in this work will generate a centralized controller for a team of robots.
We will then distribute tasks to each robots in a synchronized process.
One method is to handle task allocation in the high-level specification.
Therefore, task distribution for each robot will be encoded in the synthesized controller.
The drawback for this method is that, whenever a new task allocation is required,
possibly due to failing to find a feasible trajectory, the entire discrete controller needs to be resynthesized.
Another method for task allocation is to treat all robots as one virtual robot with multiple redundant action abilities.
The specification will only describe tasks for this virtual robot,
and the task allocation will be determined when executing the synthesized controller.
The disadvantage of this method is that the algorithm will spend more time during execution for computing the task allocation plan.
Both method will be implemented in this work.
We will compare the performance of both methods and choose the better one.
\subsection{Feedback from Trajectory Planner}
Once task are allocated to each robot, the task planner will invoke the trajectory planner to move each robot arm to desired location without collision.
In the situation where the trajectory planner fails to find a feasible path for an arm,
it will provide feedback to the task planner about such failure.
The task planner will then incorporate the information into the task specification and come up with a new discrete plan.
If no plan can be created, the task planner will notify the user the possible cause of failure, e.g. the robot arm cannot reach the desire position.
|
{"hexsha": "9b715d1f7cc731181a8e43000925296d96012b81", "size": 3056, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "proposal/highlevel.tex", "max_stars_repo_name": "jimjing/MAndM", "max_stars_repo_head_hexsha": "efd6581851814ace386cc59285d6290ea096630b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "proposal/highlevel.tex", "max_issues_repo_name": "jimjing/MAndM", "max_issues_repo_head_hexsha": "efd6581851814ace386cc59285d6290ea096630b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "proposal/highlevel.tex", "max_forks_repo_name": "jimjing/MAndM", "max_forks_repo_head_hexsha": "efd6581851814ace386cc59285d6290ea096630b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 87.3142857143, "max_line_length": 168, "alphanum_fraction": 0.817408377, "num_tokens": 587}
|
import os
import json
import torch
import lib.utils.data as torchdata
import cv2
from torchvision import transforms
from scipy.misc import imread, imresize
import numpy as np
# Round x to the nearest multiple of p and x' >= x
def round2nearest_multiple(x, p):
return ((x - 1) // p + 1) * p
class TrainDataset(torchdata.Dataset):
def __init__(self, odgt, opt, max_sample=-1, batch_per_gpu=1):
self.root_dataset = opt.root_dataset
self.imgSize = opt.imgSize
self.imgMaxSize = opt.imgMaxSize
self.random_flip = opt.random_flip
# max down sampling rate of network to avoid rounding during conv or pooling
self.padding_constant = opt.padding_constant
# down sampling rate of segm labe
self.segm_downsampling_rate = opt.segm_downsampling_rate
self.batch_per_gpu = batch_per_gpu
# classify images into two classes: 1. h > w and 2. h <= w
self.batch_record_list = [[], []]
# override dataset length when trainig with batch_per_gpu > 1
self.cur_idx = 0
# mean and std
self.img_transform = transforms.Compose([
transforms.Normalize(mean=[102.9801, 115.9465, 122.7717], std=[1., 1., 1.])
])
self.list_sample = [json.loads(x.rstrip()) for x in open(odgt, 'r')]
self.if_shuffled = False
if max_sample > 0:
self.list_sample = self.list_sample[0:max_sample]
self.num_sample = len(self.list_sample)
assert self.num_sample > 0
print('# samples: {}'.format(self.num_sample))
def _get_sub_batch(self):
while True:
# get a sample record
this_sample = self.list_sample[self.cur_idx]
if this_sample['height'] > this_sample['width']:
self.batch_record_list[0].append(this_sample) # h > w, go to 1st class
else:
self.batch_record_list[1].append(this_sample) # h <= w, go to 2nd class
# update current sample pointer
self.cur_idx += 1
if self.cur_idx >= self.num_sample:
self.cur_idx = 0
np.random.shuffle(self.list_sample)
if len(self.batch_record_list[0]) == self.batch_per_gpu:
batch_records = self.batch_record_list[0]
self.batch_record_list[0] = []
break
elif len(self.batch_record_list[1]) == self.batch_per_gpu:
batch_records = self.batch_record_list[1]
self.batch_record_list[1] = []
break
return batch_records
def __getitem__(self, index):
# NOTE: random shuffle for the first time. shuffle in __init__ is useless
if not self.if_shuffled:
np.random.shuffle(self.list_sample)
self.if_shuffled = True
# get sub-batch candidates
batch_records = self._get_sub_batch()
# resize all images' short edges to the chosen size
if isinstance(self.imgSize, list):
this_short_size = np.random.choice(self.imgSize)
else:
this_short_size = self.imgSize
# calculate the BATCH's height and width
# since we concat more than one samples, the batch's h and w shall be larger than EACH sample
batch_resized_size = np.zeros((self.batch_per_gpu, 2), np.int32)
for i in range(self.batch_per_gpu):
img_height, img_width = batch_records[i]['height'], batch_records[i]['width']
this_scale = min(this_short_size / min(img_height, img_width), \
self.imgMaxSize / max(img_height, img_width))
img_resized_height, img_resized_width = img_height * this_scale, img_width * this_scale
batch_resized_size[i, :] = img_resized_height, img_resized_width
batch_resized_height = np.max(batch_resized_size[:, 0])
batch_resized_width = np.max(batch_resized_size[:, 1])
# Here we must pad both input image and segmentation map to size h' and w' so that p | h' and p | w'
batch_resized_height = int(round2nearest_multiple(batch_resized_height, self.padding_constant))
batch_resized_width = int(round2nearest_multiple(batch_resized_width, self.padding_constant))
assert self.padding_constant >= self.segm_downsampling_rate,\
'padding constant must be equal or large than segm downsamping rate'
batch_images = torch.zeros(self.batch_per_gpu, 3, batch_resized_height, batch_resized_width)
batch_segms = torch.zeros(self.batch_per_gpu, batch_resized_height // self.segm_downsampling_rate, \
batch_resized_width // self.segm_downsampling_rate).long()
for i in range(self.batch_per_gpu):
this_record = batch_records[i]
# load image and label
image_path = os.path.join(self.root_dataset, this_record['fpath_img'])
segm_path = os.path.join(self.root_dataset, this_record['fpath_segm'])
img = imread(image_path, mode='RGB')
segm = imread(segm_path)
assert(img.ndim == 3)
assert(segm.ndim == 2)
assert(img.shape[0] == segm.shape[0])
assert(img.shape[1] == segm.shape[1])
if self.random_flip == True:
random_flip = np.random.choice([0, 1])
if random_flip == 1:
img = cv2.flip(img, 1)
segm = cv2.flip(segm, 1)
# note that each sample within a mini batch has different scale param
img = imresize(img, (batch_resized_size[i, 0], batch_resized_size[i, 1]), interp='bilinear')
segm = imresize(segm, (batch_resized_size[i, 0], batch_resized_size[i, 1]), interp='nearest')
# to avoid seg label misalignment
segm_rounded_height = round2nearest_multiple(segm.shape[0], self.segm_downsampling_rate)
segm_rounded_width = round2nearest_multiple(segm.shape[1], self.segm_downsampling_rate)
segm_rounded = np.zeros((segm_rounded_height, segm_rounded_width), dtype='uint8')
segm_rounded[:segm.shape[0], :segm.shape[1]] = segm
segm = imresize(segm_rounded, (segm_rounded.shape[0] // self.segm_downsampling_rate, \
segm_rounded.shape[1] // self.segm_downsampling_rate), \
interp='nearest')
# image to float
img = img.astype(np.float32)[:, :, ::-1] # RGB to BGR!!!
img = img.transpose((2, 0, 1))
img = self.img_transform(torch.from_numpy(img.copy()))
batch_images[i][:, :img.shape[1], :img.shape[2]] = img
batch_segms[i][:segm.shape[0], :segm.shape[1]] = torch.from_numpy(segm.astype(np.int)).long()
batch_segms = batch_segms - 1 # label from -1 to 149
output = dict()
output['img_data'] = batch_images
output['seg_label'] = batch_segms
return output
def __len__(self):
return int(1e6) # It's a fake length due to the trick that every loader maintains its own list
#return self.num_sampleclass
class ValDataset(torchdata.Dataset):
def __init__(self, odgt, opt, max_sample=-1, start_idx=-1, end_idx=-1):
self.root_dataset = opt.root_dataset
self.imgSize = opt.imgSize
self.imgMaxSize = opt.imgMaxSize
# max down sampling rate of network to avoid rounding during conv or pooling
self.padding_constant = opt.padding_constant
# mean and std
self.img_transform = transforms.Compose([
transforms.Normalize(mean=[102.9801, 115.9465, 122.7717], std=[1., 1., 1.])
])
self.list_sample = [json.loads(x.rstrip()) for x in open(odgt, 'r')]
if max_sample > 0:
self.list_sample = self.list_sample[0:max_sample]
if start_idx >= 0 and end_idx >= 0: # divide file list
self.list_sample = self.list_sample[start_idx:end_idx]
self.num_sample = len(self.list_sample)
assert self.num_sample > 0
print('# samples: {}'.format(self.num_sample))
def __getitem__(self, index):
this_record = self.list_sample[index]
# load image and label
image_path = os.path.join(self.root_dataset, this_record['fpath_img'])
segm_path = os.path.join(self.root_dataset, this_record['fpath_segm'])
img = imread(image_path, mode='RGB')
img = img[:, :, ::-1] # BGR to RGB!!!
segm = imread(segm_path)
ori_height, ori_width, _ = img.shape
img_resized_list = []
for this_short_size in self.imgSize:
# calculate target height and width
scale = min(this_short_size / float(min(ori_height, ori_width)),
self.imgMaxSize / float(max(ori_height, ori_width)))
target_height, target_width = int(ori_height * scale), int(ori_width * scale)
# to avoid rounding in network
target_height = round2nearest_multiple(target_height, self.padding_constant)
target_width = round2nearest_multiple(target_width, self.padding_constant)
# resize
img_resized = cv2.resize(img.copy(), (target_width, target_height))
# image to float
img_resized = img_resized.astype(np.float32)
img_resized = img_resized.transpose((2, 0, 1))
img_resized = self.img_transform(torch.from_numpy(img_resized))
img_resized = torch.unsqueeze(img_resized, 0)
img_resized_list.append(img_resized)
segm = torch.from_numpy(segm.astype(np.int)).long()
batch_segms = torch.unsqueeze(segm, 0)
batch_segms = batch_segms - 1 # label from -1 to 149
output = dict()
output['img_ori'] = img.copy()
output['img_data'] = [x.contiguous() for x in img_resized_list]
output['seg_label'] = batch_segms.contiguous()
output['info'] = this_record['fpath_img']
return output
def __len__(self):
return self.num_sample
class TestDataset(torchdata.Dataset):
def __init__(self, odgt, opt, max_sample=-1):
self.imgSize = opt.imgSize
self.imgMaxSize = opt.imgMaxSize
# max down sampling rate of network to avoid rounding during conv or pooling
self.padding_constant = opt.padding_constant
# down sampling rate of segm labe
self.segm_downsampling_rate = opt.segm_downsampling_rate
# mean and std
self.img_transform = transforms.Compose([
transforms.Normalize(mean=[102.9801, 115.9465, 122.7717], std=[1., 1., 1.])
])
if isinstance(odgt, list):
self.list_sample = odgt
elif isinstance(odgt, str):
self.list_sample = [json.loads(x.rstrip()) for x in open(odgt, 'r')]
if max_sample > 0:
self.list_sample = self.list_sample[0:max_sample]
self.num_sample = len(self.list_sample)
assert self.num_sample > 0
print('# samples: {}'.format(self.num_sample))
def __getitem__(self, index):
this_record = self.list_sample[index]
# load image and label
image_path = this_record['fpath_img']
img = imread(image_path, mode='RGB')
img = img[:, :, ::-1] # BGR to RGB!!!
ori_height, ori_width, _ = img.shape
img_resized_list = []
for this_short_size in self.imgSize:
# calculate target height and width
scale = min(this_short_size / float(min(ori_height, ori_width)),
self.imgMaxSize / float(max(ori_height, ori_width)))
target_height, target_width = int(ori_height * scale), int(ori_width * scale)
# to avoid rounding in network
target_height = round2nearest_multiple(target_height, self.padding_constant)
target_width = round2nearest_multiple(target_width, self.padding_constant)
# resize
img_resized = cv2.resize(img.copy(), (target_width, target_height))
# image to float
img_resized = img_resized.astype(np.float32)
img_resized = img_resized.transpose((2, 0, 1))
img_resized = self.img_transform(torch.from_numpy(img_resized))
img_resized = torch.unsqueeze(img_resized, 0)
img_resized_list.append(img_resized)
# segm = torch.from_numpy(segm.astype(np.int)).long()
# batch_segms = torch.unsqueeze(segm, 0)
# batch_segms = batch_segms - 1 # label from -1 to 149
output = dict()
output['img_ori'] = img.copy()
output['img_data'] = [x.contiguous() for x in img_resized_list]
# output['seg_label'] = batch_segms.contiguous()
output['info'] = this_record['fpath_img']
return output
def __len__(self):
return self.num_sample
|
{"hexsha": "315254f2897e16a44659671b7f00feb0ea920bc7", "size": 12921, "ext": "py", "lang": "Python", "max_stars_repo_path": "dataset.py", "max_stars_repo_name": "gitunit/semantic-segmentation-pytorch", "max_stars_repo_head_hexsha": "601a2d09dae0d942ee0c9a6c7f00d4885ea4561b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-06-14T21:43:22.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-14T21:44:16.000Z", "max_issues_repo_path": "dataset.py", "max_issues_repo_name": "gitunit/semantic-segmentation-pytorch", "max_issues_repo_head_hexsha": "601a2d09dae0d942ee0c9a6c7f00d4885ea4561b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dataset.py", "max_forks_repo_name": "gitunit/semantic-segmentation-pytorch", "max_forks_repo_head_hexsha": "601a2d09dae0d942ee0c9a6c7f00d4885ea4561b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-03-01T09:32:43.000Z", "max_forks_repo_forks_event_max_datetime": "2020-06-15T17:12:18.000Z", "avg_line_length": 42.2254901961, "max_line_length": 108, "alphanum_fraction": 0.6227846142, "include": true, "reason": "import numpy,from scipy", "num_tokens": 3048}
|
export Metadata, Metadata!, rename!, delete!
import Base.==
const metadata_folder_name = ".metadata"
const max_lock_retries = 100
const metadata_lock = "metadata.lck"
metadatadir(args...) = projectdir(metadata_folder_name, args...)
mutable struct Metadata <: AbstractDict{String, Any}
path::String
mtime::Float64
data::Dict{String,Any}
end
function Metadata(path::String; overwrite=false)
(isfile(path) || isdir(path)) || @warn "There is no file or folder at '$path'."
assert_metadata_directory()
rel_path = project_rel_path(path)
# Check if there is already an entry for that file in the index
lock("metadata")
semaphore_enter("indexread")
unlock("metadata")
_path = find_file_in_index(path)
semaphore_exit("indexread")
if _path != nothing && !overwrite
m = load_metadata(_path)
if m.mtime != mtime(path) && isfile(path)
@warn "The metadata entries might not be up to date. The file changed after adding the entries"
end
elseif _path != nothing && overwrite
m = Metadata(rel_path, mtime(path), Dict{String,Any}())
save_metadata(m)
else
lock("metadata", wait_for_semaphore="indexread")
try
m = Metadata(rel_path, mtime(path), Dict{String,Any}())
save_metadata(m)
finally
unlock("metadata")
end
end
return m
end
Base.length(m::Metadata) = length(m.data)
Base.iterate(m::Metadata, args...; kwargs...) = iterate(m.data, args...; kwargs...)
Metadata!(path::String) = Metadata(path, overwrite=true)
project_rel_path(path) = relpath(abspath(path), projectdir())
get_stored_path(m::Metadata) = getfield(m,:path)
standardize_path(path) = join(splitpath(project_rel_path(path)), "/")
hash_path(path) = hash(standardize_path(path))
to_file_name(x) = string(x)*".bson"
function load_metadata(path; ignore_exceptions=false)
try
entry = BSON.load(path)
Metadata([entry[string(field)] for field in fieldnames(Metadata)]...)
catch e
if ignore_exceptions
return nothing
else
rethrow(e)
end
end
end
function find_file_in_index(path)
file = metadatadir(hash_path(path)|>to_file_name)
isfile(file) && return file
return nothing
end
function ==(a::Metadata,b::Metadata)
for k ∈ fieldnames(Metadata)
if getfield(a,k) != getfield(b,k)
return false
end
end
return true
end
Base.setproperty!(m::Metadata, sym::Symbol, val) = error("The field '$sym' is treated as immutable and cannot be updated directly.")
Base.getproperty(m::Metadata, sym::Symbol) = getproperty(m, Val(sym))
getproperty(m::Metadata, ::Val{T}) where T = getfield(m, T)
getproperty(m::Metadata, ::Val{:path}) = projectdir(getfield(m, :path))
Base.getindex(m::Metadata, field::String) = m.data[field]
function Base.setindex!(m::Metadata, val, field::String)
m.data[field] = val
save_metadata(m)
return val
end
Base.keys(m::Metadata) = keys(m.data)
function Base.delete!(m::Metadata, field)
delete!(m.data,field)
save_metadata(m)
return m
end
function rename!(m::Metadata, path)
rel_path = project_rel_path(path)
assert_metadata_directory()
lock("metadata", wait_for_semaphore="indexread")
try
if find_file_in_index(rel_path) != nothing
unlock("metadata")
error("There is already metadata stored for '$path'.")
end
new_metadata_file = metadatadir(hash_path(path)|>to_file_name)
old_metadata_file = metadatadir(hash_path(m.path)|>to_file_name)
mv(old_metadata_file, new_metadata_file)
setfield!(m, :path, rel_path)
save_metadata(m)
finally
unlock("metadata")
end
end
function Base.delete!(m::Metadata)
assert_metadata_directory()
lock("metadata", wait_for_semaphore="indexread")
try
file = metadatadir(to_file_name(hash_path(m.path)))
if !isfile(file)
unlock("metadata")
error("There is no metadata storage for id $(m.path)")
end
rm(file)
remove_index_entry(m.id, get_stored_path(m))
finally
unlock("metadata")
end
end
function save_metadata(m::Metadata)
setfield!(m,:mtime,mtime(m.path))
BSON.bson(metadatadir(to_file_name(hash_path(m.path))),Dict(string(field)=>getfield(m,field) for field in fieldnames(Metadata)))
end
function assert_metadata_directory()
metadata_directory = metadatadir()
if !isdir(metadata_directory)
@info "Metadata directory not found, creating a new one"
try
mkdir(metadata_directory)
catch e
if !isa(e, Base.IOError)
rethrow(e)
end
end
end
end
function DrWatson.tag!(m, args...; kwargs...)
tag!(m.data, args...; kwargs...)
save_metadata(m)
end
|
{"hexsha": "4a98e73f591b62f4be3c45489d2b5e9561610d58", "size": 4888, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Metadata.jl", "max_stars_repo_name": "sebastianpech/DrWatsonSim", "max_stars_repo_head_hexsha": "3a78e1a0171c45b2e206c26cf535c813a84a2a31", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2020-11-06T12:13:04.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-15T05:10:35.000Z", "max_issues_repo_path": "src/Metadata.jl", "max_issues_repo_name": "sebastianpech/DrWatsonSim", "max_issues_repo_head_hexsha": "3a78e1a0171c45b2e206c26cf535c813a84a2a31", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Metadata.jl", "max_forks_repo_name": "sebastianpech/DrWatsonSim", "max_forks_repo_head_hexsha": "3a78e1a0171c45b2e206c26cf535c813a84a2a31", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.8048780488, "max_line_length": 132, "alphanum_fraction": 0.6552782324, "num_tokens": 1196}
|
#
# Copyright (c) 2020 The rlutils authors
#
# This source code is licensed under an MIT license found in the LICENSE file in the root directory of this project.
#
import unittest
class TestVi(unittest.TestCase):
'''
Test rlutils.algorithm.vi.
'''
def _get_test_mdp(self):
import numpy as np
t_mat = np.array([
[
[0., 1., 0.],
[0., 0., 1.],
[0., 0., 1.]
],
[
[1., 0., 0.],
[1., 0., 0.],
[0., 1., 0.]
]
], dtype=np.float32)
r_vec = np.array([
[0., 0., 1.],
[0., 0., 0.]
], dtype=np.float32)
v_corr = np.array([10. * .9 * .9, 10. * .9, 10.], dtype=np.float32)
q_corr = np.array([
[10. * .9 * .9, 10. * .9, 10.],
[10. * .9 * .9 * .9, 10. * .9 * .9 * .9, 10. * .9 * .9]
], dtype=np.float32)
return t_mat, r_vec, q_corr, v_corr
def test_vi(self):
import rlutils as rl
import numpy as np
t_mat, r_vec, q_corr, v_corr = self._get_test_mdp()
q_hat, v_hat = rl.algorithm.vi(t_mat, r_vec, gamma=0.9, eps=1e-5)
self.assertLess(np.max(np.abs(v_hat - v_corr)), 1e-4)
self.assertLess(np.max(np.abs(q_hat - q_corr)), 1e-4)
def test_vi_q_init(self):
import rlutils as rl
import numpy as np
t_mat, r_vec, q_corr, v_corr = self._get_test_mdp()
q_hat, v_hat = rl.algorithm.vi(t_mat, r_vec, gamma=0.9, eps=1e-5, q_init=q_corr, max_it=1)
self.assertLess(np.max(np.abs(v_hat - v_corr)), 1e-4)
self.assertLess(np.max(np.abs(q_hat - q_corr)), 1e-4)
def test_vi_v_init(self):
import rlutils as rl
import numpy as np
t_mat, r_vec, q_corr, v_corr = self._get_test_mdp()
q_hat, v_hat = rl.algorithm.vi(t_mat, r_vec, gamma=0.9, eps=1e-5, v_init=v_corr, max_it=1)
self.assertLess(np.max(np.abs(v_hat - v_corr)), 1e-4)
self.assertLess(np.max(np.abs(q_hat - q_corr)), 1e-4)
def test_vi_timeout(self):
import rlutils as rl
t_mat, r_vec, q_corr, v_corr = self._get_test_mdp()
try:
rl.algorithm.vi(t_mat, r_vec, gamma=0.9, eps=1e-5, max_it=2)
self.fail()
except rl.algorithm.VITimeoutException:
pass
if __name__ == '__main__':
unittest.main()
|
{"hexsha": "a18f5d6b17930fe0810690d7e2d79d01c8aca5c2", "size": 2442, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/algorithm/test_vi.py", "max_stars_repo_name": "lucaslehnert/rlutils", "max_stars_repo_head_hexsha": "77597f0596ff907c3dcae131a2fb51971949e634", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-03-21T22:33:30.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-18T21:00:18.000Z", "max_issues_repo_path": "test/algorithm/test_vi.py", "max_issues_repo_name": "lucaslehnert/rlutils", "max_issues_repo_head_hexsha": "77597f0596ff907c3dcae131a2fb51971949e634", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/algorithm/test_vi.py", "max_forks_repo_name": "lucaslehnert/rlutils", "max_forks_repo_head_hexsha": "77597f0596ff907c3dcae131a2fb51971949e634", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.75, "max_line_length": 116, "alphanum_fraction": 0.5266175266, "include": true, "reason": "import numpy", "num_tokens": 764}
|
# Tests of this file not covered in other tests
@testset "dual_model_variables.jl" begin
@testset "push_to_dual_obj_aff_terms!" begin
primal_model = soc1_test()
dual_obj_affine_terms = Dict{VI,Float64}()
list = MOI.get(
primal_model,
MOI.ListOfConstraintIndices{VVF,MOI.SecondOrderCone}(),
)
ci = first(list)
func = MOI.get(primal_model, MOI.ConstraintFunction(), ci)
set = MOI.get(primal_model, MOI.ConstraintSet(), ci)
Dualization.push_to_dual_obj_aff_terms!(
primal_model,
dual_obj_affine_terms,
VI(1),
func,
set,
1,
)
@test isempty(dual_obj_affine_terms)
end
@testset "set_dual_variable_name" begin
primal_model = soc1_test()
vi = VI(1)
Dualization.set_dual_variable_name(primal_model, vi, 1, "con", "")
@test MOI.get(primal_model, MOI.VariableName(), vi) == "con_1"
Dualization.set_dual_variable_name(primal_model, vi, 2, "con", "")
@test MOI.get(primal_model, MOI.VariableName(), vi) == "con_2"
Dualization.set_dual_variable_name(primal_model, vi, 2, "con", "oi")
@test MOI.get(primal_model, MOI.VariableName(), vi) == "oicon_2"
end
end
|
{"hexsha": "78afb37a8779e2f42b979fcbf20a9a72cd98d76e", "size": 1300, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/Tests/test_dual_model_variables.jl", "max_stars_repo_name": "JuliaOpt/Dualization.jl", "max_stars_repo_head_hexsha": "b9d25819677ced58cac0374df797cd5586830ad5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 18, "max_stars_repo_stars_event_min_datetime": "2019-08-12T17:28:12.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-30T12:50:50.000Z", "max_issues_repo_path": "test/Tests/test_dual_model_variables.jl", "max_issues_repo_name": "JuliaOpt/Dualization.jl", "max_issues_repo_head_hexsha": "b9d25819677ced58cac0374df797cd5586830ad5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 34, "max_issues_repo_issues_event_min_datetime": "2019-08-12T18:07:22.000Z", "max_issues_repo_issues_event_max_datetime": "2020-05-27T17:53:44.000Z", "max_forks_repo_path": "test/Tests/test_dual_model_variables.jl", "max_forks_repo_name": "JuliaOpt/Dualization.jl", "max_forks_repo_head_hexsha": "b9d25819677ced58cac0374df797cd5586830ad5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-01-23T01:28:36.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-08T17:32:45.000Z", "avg_line_length": 37.1428571429, "max_line_length": 76, "alphanum_fraction": 0.6146153846, "num_tokens": 347}
|
\name{loadPrice}
\alias{loadPrice}
\title{Load price from github}
\description{
Load stock(s) price from github
}
\usage{
loadPrice(...)
}
\arguments{
\item{...}{one or more ticker string(s)}
}
\examples{
## load a ticker data
loadPrice('MWG')
## load multiple tickers
loadPrice('VN30', 'FPT', 'VCB')
}
\keyword{loadPrice}
|
{"hexsha": "a69193bd129696fc3d24dea9d6d4f3d0d21d6a02", "size": 328, "ext": "rd", "lang": "R", "max_stars_repo_path": "man/price.rd", "max_stars_repo_name": "algo-stocks/rdatavn", "max_stars_repo_head_hexsha": "b0318b4eead44351fba6132d91c66cd0ab410916", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "man/price.rd", "max_issues_repo_name": "algo-stocks/rdatavn", "max_issues_repo_head_hexsha": "b0318b4eead44351fba6132d91c66cd0ab410916", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "man/price.rd", "max_forks_repo_name": "algo-stocks/rdatavn", "max_forks_repo_head_hexsha": "b0318b4eead44351fba6132d91c66cd0ab410916", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 15.619047619, "max_line_length": 42, "alphanum_fraction": 0.6859756098, "num_tokens": 99}
|
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib as mpl
import sys
sys.path.append('./')
from util import constants
aspect = {
'size': 6.5,
'font_scale': 2.5,
'labels': False,
'ratio': 1.625,
}
models = ['ngram', 'lstm']
sns.set(style="white", palette="muted", color_codes=True)
sns.set_context("notebook", font_scale=aspect['font_scale'])
mpl.rc('font', family='serif', serif='Times New Roman')
mpl.rc('figure', figsize=(aspect['size'] * aspect['ratio'], aspect['size'] * aspect['ratio']))
sns.set_style({'font.family': 'serif', 'font.serif': 'Times New Roman'})
def get_artificial_df(folder):
ngram_file = '%s/artificial__ngram__full-results.csv' % folder
lstm_file = '%s/artificial__lstm__full-results.csv' % folder
df_artificial_ngram = pd.read_csv(ngram_file)
df_artificial_lstm = pd.read_csv(lstm_file)
df_artificial_ngram['Model'] = 'trigram'
df_artificial_lstm['Model'] = 'LSTM'
df_models = []
df_models += [df_artificial_ngram] if 'ngram' in models else []
df_models += [df_artificial_lstm] if 'lstm' in models else []
df = pd.concat(df_models, sort=False)
df = df.sort_values(['Model', 'artificial'], ascending=[False, True])
df_art = df[df.artificial].copy()
df_art['artificial'] = df_art['test_loss']
df_art = df_art[['Model', 'lang', 'artificial', 'fold']]
df_nat = df[~df.artificial].copy()
df_nat['natural'] = df_nat['test_loss']
df_nat = df_nat[['Model', 'lang', 'natural', 'fold']]
df_final = df_art.set_index(['Model', 'lang', 'fold']) \
.join(df_nat.set_index(['Model', 'lang', 'fold'])) \
.reset_index()
return df_final
def get_artificial_df_avg(folder):
df_final = get_artificial_df(folder)
df_final = df_final.groupby(['Model', 'lang']).agg('mean').reset_index()
return df_final
df_final = get_artificial_df_avg(constants.rfolder_artificial_harmony)
sns.scatterplot(x='natural', y='artificial', data=df_final, hue='Model', style='Model', s=500, markers=['P', 'o'])
delta_y = df_final.artificial.max() - df_final.artificial.min()
y = [df_final.artificial.min() + x * delta_y for x in range(0, 2)]
min_y = df_final.artificial.min()
min_x = df_final.natural.min()
max_y = df_final.artificial.max()
max_x = df_final.natural.max()
min_range = min(min_y, min_x) - .1
max_range = max(max_y, max_x) + .1
plot_range = [min_range, max_range]
plt.plot(plot_range, plot_range, 'C2--', linewidth=2, alpha=0.8)
plt.legend(labelspacing=0.2, markerscale=4)
plt.xlim(plot_range)
plt.xticks(np.arange(2.75, 4.8, step=0.5))
plt.ylim(plot_range)
plt.yticks(np.arange(2.75, 4.8, step=0.5))
plt.xlabel('Original Language')
plt.ylabel('Artificial Language')
plt.tight_layout()
plt.savefig('plot/scatterplot_harmony.pdf', bbox_inches='tight')
plt.show()
|
{"hexsha": "345570a9df72f42d18be8b2d7c242afa1a8761a5", "size": 2856, "ext": "py", "lang": "Python", "max_stars_repo_path": "visualization_layer/plot_artificial_scatter.py", "max_stars_repo_name": "tpimentelms/phonotactic-complexity", "max_stars_repo_head_hexsha": "70d0a9e45943096d7640eaf7277033e3920408c9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2020-04-17T20:46:11.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-02T10:32:00.000Z", "max_issues_repo_path": "visualization_layer/plot_artificial_scatter.py", "max_issues_repo_name": "tpimentelms/phonotactic-complexity", "max_issues_repo_head_hexsha": "70d0a9e45943096d7640eaf7277033e3920408c9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "visualization_layer/plot_artificial_scatter.py", "max_forks_repo_name": "tpimentelms/phonotactic-complexity", "max_forks_repo_head_hexsha": "70d0a9e45943096d7640eaf7277033e3920408c9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.3846153846, "max_line_length": 114, "alphanum_fraction": 0.6887254902, "include": true, "reason": "import numpy", "num_tokens": 822}
|
# Script to run through the files and fit the zero-inflated negative binomial distrubution
using Plots, Distributions, DelimitedFiles, Base.Printf
include("Utils.jl")
import Main.Utils
gr()
# Set some parameters
spread = 0.95
folder = "Chains/"
Files = readdir(folder)
# Make a stats variable to be writtten to a file
Stats = Array{Any,2}(undef,length(Files)+1,13)
Stats[1,:] = ["Name" "p" "w" "r" "K" "r_min" "r_max" "p_min" "p_max" "w_min" "w_max" "K_min" "K_max"]
for (ii,file) in enumerate(Files)
# Load and plot the distribution data
chain = readdlm(folder*file)
# Extract parameters and plot
r = Utils.find_MAP(chain,idx=1)
p = Utils.find_MAP(chain,idx=2)
w = Utils.find_MAP(chain,idx=3)
K = Utils.find_MAP(chain,idx=4)
int_r = Utils.credibleintervals(chain,idx=1, spread=spread)
int_p = Utils.credibleintervals(chain,idx=2, spread=spread)
int_w = Utils.credibleintervals(chain,idx=3, spread=spread)
int_K = Utils.credibleintervals(chain,idx=4, spread=spread)
# Plot and save
plt = Utils.plot_chain(chain, [r,p,w,K], [int_r,int_p,int_w,int_K])
try
Plots.pdf("MCMC/"*file)
catch
mkdir("MCMC")
Plots.pdf("MCMC/"*file)
end
# Save data
println(file)
Stats[ii+1,:] = [file p w r K r-int_r[1] int_r[2]-r p-int_p[1] int_p[2]-p w-int_w[1] int_w[2]-w K-int_K[1] int_K[2]-K]
end
DelimitedFiles.writedlm("Statistics.txt", Stats)
|
{"hexsha": "7ce692de174df51641462b9b51f3870c58f9e2d4", "size": 1431, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "Stats.jl", "max_stars_repo_name": "rdbrackston/tx-analysis", "max_stars_repo_head_hexsha": "7a58767723b60d7d4997f1a0f9f1b18ed3d3e8f7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-04-13T05:42:14.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-18T12:54:43.000Z", "max_issues_repo_path": "Stats.jl", "max_issues_repo_name": "rdbrackston/tx-analysis", "max_issues_repo_head_hexsha": "7a58767723b60d7d4997f1a0f9f1b18ed3d3e8f7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Stats.jl", "max_forks_repo_name": "rdbrackston/tx-analysis", "max_forks_repo_head_hexsha": "7a58767723b60d7d4997f1a0f9f1b18ed3d3e8f7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.8125, "max_line_length": 122, "alphanum_fraction": 0.6722571628, "num_tokens": 466}
|
# coding=utf-8
# Copyright 2020 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
# Copyright 2020 Kalpesh Krishna.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fine-tuning GPT2 for conditional generation tasks."""
from __future__ import absolute_import, division, print_function
import glob
import logging
import os
import random
import re
import shutil
import subprocess
import numpy as np
import time
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from collections import defaultdict
from args import get_parser
from data_utils import MAX_ROBERTA_LENGTH
from fairseq.models.roberta import RobertaModel
from fairseq.optim.adafactor import Adafactor
from style_dataset import (InverseParaphraseDatasetText,
ParaphraseDatasetText)
from transformers import (WEIGHTS_NAME, AdamW, GPT2Config, GPT2LMHeadModel,
GPT2Tokenizer, get_linear_schedule_with_warmup)
from utils import GPT2ParentModule, init_gpt2_model
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
MODEL_CLASSES = {
'gpt2': (GPT2Config, GPT2LMHeadModel, GPT2Tokenizer),
}
SPECIAL_TOKENS = {
"additional_special_tokens": ["<dense-vectors>", "<tokens>", "<verb>", "<ARG0>", "<ARG1>", "<global-dense-vectors>"],
"pad_token": "<pad>",
"bos_token": "<bos>",
"eos_token": "<eos>"
}
def load_and_cache_examples(args, tokenizer, evaluate=False):
if not args.prefix_input_type.startswith("original"):
dataset = InverseParaphraseDatasetText(
tokenizer=tokenizer,
args=args,
evaluate=evaluate,
split="dev" if evaluate else "train"
)
else:
dataset = ParaphraseDatasetText(
tokenizer=tokenizer,
args=args,
evaluate=evaluate,
split="dev" if evaluate else "train"
)
return dataset
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def _rotate_checkpoints(args, checkpoint_prefix, use_mtime=False):
if not args.save_total_limit:
return
if args.save_total_limit <= 0:
return
# Check if we should delete older checkpoint(s)
glob_checkpoints = glob.glob(os.path.join(args.output_dir, '{}-*'.format(checkpoint_prefix)))
if len(glob_checkpoints) <= args.save_total_limit:
return
ordering_and_checkpoint_path = []
for path in glob_checkpoints:
if use_mtime:
ordering_and_checkpoint_path.append((os.path.getmtime(path), path))
else:
regex_match = re.match('.*{}-([0-9]+)'.format(checkpoint_prefix), path)
if regex_match and regex_match.groups():
ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))
checkpoints_sorted = sorted(ordering_and_checkpoint_path)
checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - args.save_total_limit)
checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
for checkpoint in checkpoints_to_be_deleted:
logger.info("Deleting older checkpoint [{}] due to args.save_total_limit".format(checkpoint))
shutil.rmtree(checkpoint)
def save_model(gpt2_model, output_dir, args, global_step, tokenizer=None):
# Take care of distributed/parallel training
model_to_save = gpt2_model.gpt2
model_to_save = model_to_save.module if hasattr(model_to_save, 'module') else model_to_save
model_to_save.save_pretrained(output_dir)
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(args, os.path.join(output_dir, 'training_args.bin'))
with open(os.path.join(output_dir, "global_step.txt"), "w") as f:
f.write(str(global_step) + "\n")
if tokenizer:
tokenizer.save_pretrained(output_dir)
def train(args, gpt2_model, train_dataset, tokenizer):
""" Train the model """
if args.local_rank in [-1, 0]:
try:
tb_writer = SummaryWriter(logdir="runs/summary_%s" % args.job_id)
except:
tb_writer = SummaryWriter(log_dir="runs/summary_%s" % args.job_id)
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Update the model definition in case RoBERTa is training
model = gpt2_model.gpt2
# Prepare optimizer and schedule (linear warmup and decay)
# extra layer_norm.weight for com
no_decay = ['bias', 'LayerNorm.weight', 'layer_norm.weight']
grouped_parameters = [
{
'params': [
p for n, p in model.named_parameters()
if not any(nd in n for nd in no_decay)
],
'weight_decay': args.weight_decay
},
{
'params': [
p for n, p in model.named_parameters()
if any(nd in n for nd in no_decay)
],
'weight_decay': 0.0
}
]
optimizer = AdamW(grouped_parameters,
lr=float(args.learning_rate),
eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer,
num_warmup_steps=args.warmup_steps,
num_training_steps=t_total)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True)
# this is necessary to ensure multi-GPU training happens since the gpt2_model.gpt2 pointer has been set to the model without the DDP wrapper
gpt2_model.gpt2 = model
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
loss_metrics = {
"lm": {"current": 0.0, "previous": 0.0}
}
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0])
set_seed(args) # Added here for reproducibility (even between python 2 and 3)
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
loss = gpt2_model(batch)
if args.n_gpu > 1:
for k, v in loss.items():
loss[k] = v.mean()
if args.gradient_accumulation_steps > 1:
for k, v in loss.items():
loss[k] = v / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss["lm"], optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss["lm"].backward()
# Update the metrics for Tensorboard logging
for metric_type, metric_vals in loss_metrics.items():
metric_vals["current"] += loss[metric_type].item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
# Update the generator or the discriminator optimizer
optimizer.step()
scheduler.step()
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
# Only evaluate when single GPU otherwise metrics may not average well
if args.local_rank == -1 and args.evaluate_during_training:
results = evaluate(args, gpt2_model, tokenizer)
for key, value in results.items():
tb_writer.add_scalar('eval_{}'.format(key), value, global_step)
tb_writer.add_scalar('learning_rate', scheduler.get_lr()[0], global_step)
for metric_type, metric_vals in loss_metrics.items():
tb_writer.add_scalar(
'%s_loss' % metric_type,
(metric_vals["current"] - metric_vals["previous"]) / args.logging_steps,
global_step
)
metric_vals["previous"] = metric_vals["current"]
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
checkpoint_prefix = 'checkpoint'
# Save model checkpoint
output_dir = os.path.join(args.output_dir, '{}-{}'.format(checkpoint_prefix, global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
save_model(gpt2_model, output_dir, args, global_step, tokenizer=tokenizer)
_rotate_checkpoints(args, checkpoint_prefix)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, loss_metrics["lm"]["current"] / global_step
def evaluate(args, gpt2_model, tokenizer, prefix=""):
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_output_dir = args.output_dir
eval_dataset = load_and_cache_examples(args, tokenizer, evaluate=True)
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu evaluate
if args.n_gpu > 1:
gpt2_model = torch.nn.DataParallel(gpt2_model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
total_instances = 0
gpt2_model.eval()
for batch in tqdm(eval_dataloader, desc="Evaluating"):
curr_loss = gpt2_model.evaluate(batch)
eval_loss += curr_loss
total_instances += batch["suffix_style"].shape[0]
nb_eval_steps += 1
eval_loss = eval_loss / nb_eval_steps
perplexity = torch.exp(torch.tensor(eval_loss))
result = {
"perplexity": perplexity
}
output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results {} *****".format(prefix))
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
return result
def main():
parser = get_parser("finetuning")
args = parser.parse_args()
if (os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir):
raise ValueError("Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(args.output_dir))
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend='nccl')
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
# Set seed
set_seed(args)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
# Barrier to make sure only the first process in distributed training download model & vocab
torch.distributed.barrier()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path,
cache_dir=args.cache_dir if args.cache_dir else None)
# Adding an extra embedding dimension for style/content vectors
config.extra_embedding_dim = args.extra_embedding_dim
tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None)
model = model_class.from_pretrained(args.model_name_or_path,
from_tf=bool('.ckpt' in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None)
tokenizer.add_special_tokens(SPECIAL_TOKENS)
model.resize_token_embeddings(len(tokenizer))
model.to(args.device)
gpt2_model = GPT2ParentModule(args=args, gpt2=model)
if args.local_rank == 0:
torch.distributed.barrier() # End of barrier to make sure only the first process in distributed training download model & vocab
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Barrier to make sure only the first process in distributed training process the dataset, and the others will use the cache
train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False)
if args.local_rank == 0:
torch.distributed.barrier()
global_step, tr_loss = train(args, gpt2_model, train_dataset, tokenizer)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step))
if not os.path.exists(output_dir) and args.local_rank in [-1, 0]:
os.makedirs(output_dir)
save_model(gpt2_model, output_dir, args, global_step, tokenizer)
gpt2_model, tokenizer = init_gpt2_model(checkpoint_dir=args.output_dir,
args=args,
model_class=model_class,
tokenizer_class=tokenizer_class)
# Evaluation
if args.do_eval and args.local_rank in [-1, 0]:
eval_done = False
all_results = {}
top_checkpoint = None
patience = 0
while not eval_done:
checkpoints = []
if not args.evaluate_specific:
checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/checkpoint-*/' + WEIGHTS_NAME, recursive=True)))
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
# Sort checkpoints according to the step number
if len(checkpoints) > 0:
checkpoints.sort(key=lambda x: int(x.split("-")[-1]))
else:
checkpoints.append(args.evaluate_specific)
checkpoints = [x for x in checkpoints if x not in all_results]
# Count the number of while loop iterations no new checkpoints were found
if len(checkpoints) == 0:
patience += 1
else:
patience = 0
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
prefix = checkpoint.split('/')[-1] if checkpoint.find('checkpoint') != -1 else ""
gpt2_model, _ = init_gpt2_model(checkpoint_dir=checkpoint,
args=args,
model_class=model_class)
result = evaluate(args, gpt2_model, tokenizer, prefix=prefix)
all_results[checkpoint] = result["perplexity"]
sorted_results = [(k, v) for k, v in all_results.items()]
sorted_results.sort(key=lambda x: x[1].item())
if not args.evaluate_specific and args.do_delete_old and len(sorted_results) > args.save_total_limit:
logger.info("Deleting worse checkpoints...")
# delete all but the top save_total_limit checkpoints
for res in sorted_results[args.save_total_limit:]:
if os.path.exists(res[0]):
logger.info("Deleting {}...".format(res[0]))
shutil.rmtree(res[0])
# move top checkpoint to root directory
if not args.evaluate_specific and len(sorted_results) > 0 and sorted_results[0][0] != top_checkpoint:
command = "cp {}/* {}".format(sorted_results[0][0], args.output_dir)
logger.info("executing {}...".format(command))
subprocess.check_output(command, shell=True)
top_checkpoint = sorted_results[0][0]
sorted_results_summary = "\n".join(["{} = {:.4f}".format(x[0], x[1]) for x in sorted_results])
logger.info("Top checkpoints:\n{}".format(sorted_results_summary))
if args.eval_frequency_min == 0 or args.evaluate_specific or patience > args.eval_patience:
eval_done = True
else:
logger.info("Sleeping for {:d} minutes...zzzz...".format(args.eval_frequency_min))
time.sleep(args.eval_frequency_min * 60)
return all_results
if __name__ == "__main__":
main()
|
{"hexsha": "86fe06a6a40286aa73c1e8068ea0d10843fea5da", "size": 21419, "ext": "py", "lang": "Python", "max_stars_repo_path": "style_paraphrase/run_lm_finetuning.py", "max_stars_repo_name": "bmtm/style-transfer-paraphrase", "max_stars_repo_head_hexsha": "ffc46ce4481b6d85a9704eeeddebb82331a38638", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-04-09T19:31:19.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-09T19:31:19.000Z", "max_issues_repo_path": "style_paraphrase/run_lm_finetuning.py", "max_issues_repo_name": "bmtm/style-transfer-paraphrase", "max_issues_repo_head_hexsha": "ffc46ce4481b6d85a9704eeeddebb82331a38638", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "style_paraphrase/run_lm_finetuning.py", "max_forks_repo_name": "bmtm/style-transfer-paraphrase", "max_forks_repo_head_hexsha": "ffc46ce4481b6d85a9704eeeddebb82331a38638", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.1633858268, "max_line_length": 165, "alphanum_fraction": 0.6326159018, "include": true, "reason": "import numpy", "num_tokens": 4554}
|
#!/usr/bin/env python3
import numpy as np
from keras import backend as K
def mean_gaussian_negative_log_likelihood(y_true, y_pred):
nll = 0.5 * np.log(2 * np.pi) + 0.5 * K.square(y_pred - y_true)
axis = tuple(range(1, len(K.int_shape(y_true))))
return K.mean(K.sum(nll, axis=axis), axis=-1)
|
{"hexsha": "ba34cb4cb5de401debbf36ff36ddecd8b1ad148b", "size": 307, "ext": "py", "lang": "Python", "max_stars_repo_path": "vaegan/losses.py", "max_stars_repo_name": "enisimsar/vaegan-shoes-keras", "max_stars_repo_head_hexsha": "d775a60d42a2c80e8ecc9bc3bfc321ca5010bc0e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 43, "max_stars_repo_stars_event_min_datetime": "2018-08-21T12:58:24.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T05:55:32.000Z", "max_issues_repo_path": "vaegan/losses.py", "max_issues_repo_name": "enisimsar/vaegan-shoes-keras", "max_issues_repo_head_hexsha": "d775a60d42a2c80e8ecc9bc3bfc321ca5010bc0e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2019-05-20T06:37:01.000Z", "max_issues_repo_issues_event_max_datetime": "2019-07-23T03:16:25.000Z", "max_forks_repo_path": "vaegan/losses.py", "max_forks_repo_name": "enisimsar/vaegan-shoes-keras", "max_forks_repo_head_hexsha": "d775a60d42a2c80e8ecc9bc3bfc321ca5010bc0e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 13, "max_forks_repo_forks_event_min_datetime": "2018-05-28T01:36:39.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-20T16:04:56.000Z", "avg_line_length": 25.5833333333, "max_line_length": 67, "alphanum_fraction": 0.680781759, "include": true, "reason": "import numpy", "num_tokens": 95}
|
import numpy as np
import os
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction import DictVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report, f1_score
import scipy.stats
import utils
__author__ = "Christopher Potts"
__version__ = "CS224u, Stanford, Spring 2021"
def sentiment_reader(src_filename, include_subtrees=True, dedup=False):
"""
Iterator for our distribution of the SST-3 and other files in
that format.
Parameters
----------
src_filename : str
Full path to the file to be read.
include_subtrees : bool
If True, then the subtrees are returned as separate examples.
This affects only the train split. For dev and test, only
the full examples are included.
dedup : bool
If True, only one copy of each (example, label) pair is included.
This mainly affects the train set, though there is one repeated
example in the dev set.
Yields
------
pd.DataFrame with columns ['example_id', 'sentence', 'label']
"""
df = pd.read_csv(src_filename)
if not include_subtrees:
df = df[df.is_subtree == 0]
if dedup:
df = df.groupby(['sentence', 'label']).apply(lambda x: x.iloc[0])
df = df.reset_index(drop=True)
return df
def train_reader(sst_home, include_subtrees=False, dedup=False):
"""
Convenience function for reading the SST-3 train file.
"""
src = os.path.join(sst_home, 'sst3-train.csv')
return sentiment_reader(
src, include_subtrees=include_subtrees, dedup=dedup)
def dev_reader(sst_home, include_subtrees=False, dedup=False):
"""
Convenience function for reading the SST-3 dev file.
"""
src = os.path.join(sst_home, 'sst3-dev.csv')
return sentiment_reader(
src, include_subtrees=include_subtrees, dedup=dedup)
def test_reader(sst_home, include_subtrees=False, dedup=False):
"""
Convenience function for reading the SST-3 test file, unlabeled.
This function should be used only for the final stages of a
project, to obtain a submission to be evaluated. If you need
to do an evaluation yourself with the labeled dataset, use
`sentiment_reader` pointing to the labeled version of this
dataset.
"""
src = os.path.join(sst_home, 'sst3-test-unlabeled.csv')
return sentiment_reader(
src, include_subtrees=include_subtrees, dedup=dedup)
def bakeoff_dev_reader(sst_home, include_subtrees=False, dedup=False):
"""
Convenience function for reading the bakeoff dev file.
"""
src = os.path.join(sst_home, 'cs224u-sentiment-dev.csv')
return sentiment_reader(
src, include_subtrees=include_subtrees, dedup=dedup)
def bakeoff_test_reader(sst_home, include_subtrees=False, dedup=False):
"""
Convenience function for reading the bakeoff test file, unlabeled.
"""
src = os.path.join(sst_home, 'cs224u-sentiment-test-unlabeled.csv')
return sentiment_reader(
src, include_subtrees=include_subtrees, dedup=dedup)
def build_dataset(dataframes, phi, vectorizer=None, vectorize=True):
"""
Core general function for building experimental datasets.
Parameters
----------
dataframes : pd.DataFrame or list of pd.DataFrame
The dataset or datasets to process, as read in by
`sentiment_reader`.
phi : feature function
Any function that takes a string as input and returns a
bool/int/float-valued dict as output.
vectorizer : sklearn.feature_extraction.DictVectorizer
If this is None, then a new `DictVectorizer` is created and
used to turn the list of dicts created by `phi` into a
feature matrix. This happens when we are training.
If this is not None, then it's assumed to be a `DictVectorizer`
and used to transform the list of dicts. This happens in
assessment, when we take in new instances and need to
featurize them as we did in training.
vectorize : bool
Whether to use a DictVectorizer. Set this to False for
deep learning models that process their own input.
Returns
-------
dict
A dict with keys 'X' (the feature matrix), 'y' (the list of
labels), 'vectorizer' (the `DictVectorizer`), and
'raw_examples' (the `nltk.Tree` objects, for error analysis).
"""
if isinstance(dataframes, (list, tuple)):
df = pd.concat(dataframes)
else:
df = dataframes
raw_examples = list(df.sentence.values)
feat_dicts = list(df.sentence.apply(phi).values)
if 'label' in df.columns:
labels = list(df.label.values)
else:
labels = None
feat_matrix = None
if vectorize:
# In training, we want a new vectorizer:
if vectorizer is None:
vectorizer = DictVectorizer(sparse=False)
feat_matrix = vectorizer.fit_transform(feat_dicts)
# In assessment, we featurize using the existing vectorizer:
else:
feat_matrix = vectorizer.transform(feat_dicts)
else:
feat_matrix = feat_dicts
return {'X': feat_matrix,
'y': labels,
'vectorizer': vectorizer,
'raw_examples': raw_examples}
def experiment(
train_dataframes,
phi,
train_func,
assess_dataframes=None,
train_size=0.7,
score_func=utils.safe_macro_f1,
vectorize=True,
verbose=True,
random_state=None):
"""
Generic experimental framework. Either assesses with a random
train/test split of `train_reader` or with `assess_reader` if
it is given.
Parameters
----------
train_dataframes : pd.DataFrame or list of pd.DataFrame
The dataset or datasets to process, as read in by
`sentiment_reader`.
phi : feature function
Any function that takes an `nltk.Tree` instance as input
and returns a bool/int/float-valued dict as output.
train_func : model wrapper
Any function that takes a feature matrix and a label list
as its values and returns a fitted model with a `predict`
function that operates on feature matrices.
assess_dataframes : pd.DataFrame, list of pd.DataFrame or None
If None, then the df from `train_dataframes` is split into
a random train/test split, with the the train percentage
determined by `train_size`. If not None, then this should
be a dataset or datasets to process, as read in by
`sentiment_reader`. Each such dataset will be read and
used in a separate evaluation.
train_size : float (default: 0.7)
If `assess_reader` is None, then this is the percentage of
`train_reader` devoted to training. If `assess_reader` is
not None, then this value is ignored.
score_metric : function name (default: `utils.safe_macro_f1`)
This should be an `sklearn.metrics` scoring function. The
default is weighted average F1 (macro-averaged F1). For
comparison with the SST literature, `accuracy_score` might
be used instead. For other metrics that can be used here,
see http://scikit-learn.org/stable/modules/classes.html#module-sklearn.metrics
vectorize : bool
Whether to use a DictVectorizer. Set this to False for
deep learning models that process their own input.
verbose : bool (default: True)
Whether to print out the model assessment to standard output.
Set to False for statistical testing via repeated runs.
random_state : int or None
Optionally set the random seed for consistent sampling
where random train/test splits are being created.
Prints
-------
To standard output, if `verbose=True`
Model precision/recall/F1 report. Accuracy is micro-F1 and is
reported because many SST papers report that figure, but macro
precision/recall/F1 is better given the class imbalances and the
fact that performance across the classes can be highly variable.
Returns
-------
dict with keys
'model': trained model
'phi': the function used for featurization
'train_dataset': a dataset as returned by `build_dataset`
'assess_datasets': list of datasets as returned by `build_dataset`
'predictions': list of lists of predictions on the assessment datasets
'metric': `score_func.__name__`
'score': the `score_func` score on each of the assessment datasets
"""
# Train dataset:
train = build_dataset(
train_dataframes,
phi,
vectorizer=None,
vectorize=vectorize)
# Manage the assessment set-up:
X_train = train['X']
y_train = train['y']
raw_train = train['raw_examples']
assess_datasets = []
if assess_dataframes is None:
X_train, X_assess, y_train, y_assess, raw_train, raw_assess = train_test_split(
X_train, y_train, raw_train,
train_size=train_size,
test_size=None,
random_state=random_state)
assess_datasets.append({
'X': X_assess,
'y': y_assess,
'vectorizer': train['vectorizer'],
'raw_examples': raw_assess})
else:
if not isinstance(assess_dataframes, (tuple, list)):
assess_dataframes = [assess_dataframes]
for assess_df in assess_dataframes:
# Assessment dataset using the training vectorizer:
assess = build_dataset(
assess_df,
phi,
vectorizer=train['vectorizer'],
vectorize=vectorize)
assess_datasets.append(assess)
# Train:
mod = train_func(X_train, y_train)
# Predictions if we have labels:
predictions = []
scores = []
for dataset_num, assess in enumerate(assess_datasets, start=1):
preds = mod.predict(assess['X'])
if assess['y'] is None:
predictions.append(None)
scores.append(None)
else:
if verbose:
if len(assess_datasets) > 1:
print("Assessment dataset {}".format(dataset_num))
print(classification_report(assess['y'], preds, digits=3))
predictions.append(preds)
scores.append(score_func(assess['y'], preds))
true_scores = [s for s in scores if s is not None]
if len(true_scores) > 1 and verbose:
mean_score = np.mean(true_scores)
print("Mean of macro-F1 scores: {0:.03f}".format(mean_score))
# Return the overall scores and other experimental info:
return {
'model': mod,
'phi': phi,
'train_dataset': train,
'assess_datasets': assess_datasets,
'predictions': predictions,
'metric': score_func.__name__,
'scores': scores}
def compare_models(
dataframes,
phi1,
train_func1,
phi2=None,
train_func2=None,
vectorize1=True,
vectorize2=True,
stats_test=scipy.stats.wilcoxon,
trials=10,
train_size=0.7,
score_func=utils.safe_macro_f1):
"""
Wrapper for comparing models. The parameters are like those of
`experiment`, with the same defaults, except
Parameters
----------
dataframes : pd.DataFrame or list of pd.DataFrame
The dataset or datasets to process, as read in by
`sentiment_reader`.
phi1, phi2
Just like `phi` for `experiment`. `phi1` defaults to
`unigrams_phi`. If `phi2` is None, then it is set equal
to `phi1`.
train_func1, train_func2
Just like `train_func` for `experiment`. If `train_func2`
is None, then it is set equal to `train_func`.
vectorize1, vectorize1 : bool
Whether to vectorize the respective inputs. Use `False` for
deep learning models that featurize their own input.
stats_test : scipy.stats function
Defaults to `scipy.stats.wilcoxon`, a non-parametric version
of the paired t-test.
trials : int (default: 10)
Number of runs on random train/test splits of `reader`,
with `train_size` controlling the amount of training data.
train_size : float
Percentage of data to use for training.
Prints
------
To standard output
A report of the assessment.
Returns
-------
(np.array, np.array, float)
The first two are the scores from each model (length `trials`),
and the third is the p-value returned by `stats_test`.
"""
if phi2 == None:
phi2 = phi1
if train_func2 == None:
train_func2 = train_func1
experiments1 = [experiment(dataframes,
phi=phi1,
train_func=train_func1,
score_func=score_func,
vectorize=vectorize1,
verbose=False) for _ in range(trials)]
experiments2 = [experiment(dataframes,
phi=phi2,
train_func=train_func2,
score_func=score_func,
vectorize=vectorize2,
verbose=False) for _ in range(trials)]
scores1 = np.array([d['scores'][0] for d in experiments1])
scores2 = np.array([d['scores'][0] for d in experiments2])
# stats_test returns (test_statistic, p-value). We keep just the p-value:
pval = stats_test(scores1, scores2)[1]
# Report:
print('Model 1 mean: {0:.03f}'.format(scores1.mean()))
print('Model 2 mean: {0:.03f}'.format(scores2.mean()))
print('p = {0:.03f}'.format(pval if pval >= 0.001 else 'p < 0.001'))
# Return the scores for later analysis, and the p value:
return scores1, scores2, pval
def build_rnn_dataset(dataframes, tokenizer=lambda s: s.split()):
"""
Given an SST reader, return the dataset as (X, y) training pairs.
Parameters
----------
dataframes : pd.DataFrame or list of pd.DataFrame
The dataset or datasets to process, as read in by
`sentiment_reader`.
tokenizer : function from str to list of str
Defaults to a whitespace tokenizer.
Returns
-------
X, y
Where X is a list of list of str, and y is the output label list.
"""
if isinstance(dataframes, (list, tuple)):
df = pd.concat(dataframes)
else:
df = dataframes
X = list(df.sentence.apply(tokenizer))
y = list(df.label.values)
return X, y
|
{"hexsha": "91caef76c417ec6e59d47b3413ae7663a7ab1acb", "size": 14479, "ext": "py", "lang": "Python", "max_stars_repo_path": "sst.py", "max_stars_repo_name": "klaudia-kantor/cs224u", "max_stars_repo_head_hexsha": "dd7e79052338555d86ed427c12270065fdf065ef", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1122, "max_stars_repo_stars_event_min_datetime": "2015-03-28T22:05:47.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T10:47:29.000Z", "max_issues_repo_path": "sst.py", "max_issues_repo_name": "klaudia-kantor/cs224u", "max_issues_repo_head_hexsha": "dd7e79052338555d86ed427c12270065fdf065ef", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 65, "max_issues_repo_issues_event_min_datetime": "2016-04-03T03:07:56.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-22T18:01:54.000Z", "max_forks_repo_path": "sst.py", "max_forks_repo_name": "klaudia-kantor/cs224u", "max_forks_repo_head_hexsha": "dd7e79052338555d86ed427c12270065fdf065ef", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 679, "max_forks_repo_forks_event_min_datetime": "2015-03-31T01:29:04.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T23:41:20.000Z", "avg_line_length": 33.2850574713, "max_line_length": 87, "alphanum_fraction": 0.6500448926, "include": true, "reason": "import numpy,import scipy", "num_tokens": 3361}
|
# -*- coding: utf-8 -*-
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
__author__ = ["Markus Löning"]
import numpy as np
import pandas as pd
import pytest
from pytest import raises
from sktime.forecasting.base import ForecastingHorizon
from sktime.forecasting.base._fh import DELEGATED_METHODS
from sktime.forecasting.model_selection import temporal_train_test_split
from sktime.forecasting.tests._config import INDEX_TYPE_LOOKUP
from sktime.forecasting.tests._config import SUPPORTED_INDEX_FH_COMBINATIONS
from sktime.forecasting.tests._config import TEST_FHS
from sktime.utils._testing import make_forecasting_problem
from sktime.utils._testing.forecasting import _make_fh
from sktime.utils.validation.forecasting import SUPPORTED_INDEX_TYPES
def _assert_index_equal(a, b):
"""Helper function to compare forecasting horizons"""
assert isinstance(a, pd.Index)
assert isinstance(b, pd.Index)
assert a.equals(b)
@pytest.mark.parametrize(
"index_type, fh_type, is_relative", SUPPORTED_INDEX_FH_COMBINATIONS
)
@pytest.mark.parametrize("steps", TEST_FHS)
def test_fh(index_type, fh_type, is_relative, steps):
# generate data
y = make_forecasting_problem(index_type=index_type)
assert isinstance(y.index, INDEX_TYPE_LOOKUP.get(index_type))
# split data
y_train, y_test = temporal_train_test_split(y, test_size=10)
# choose cutoff point
cutoff = y_train.index[-1]
# generate fh
fh = _make_fh(cutoff, steps, fh_type, is_relative)
assert isinstance(fh.to_pandas(), INDEX_TYPE_LOOKUP.get(fh_type))
# get expected outputs
if isinstance(steps, int):
steps = np.array([steps])
fh_relative = pd.Int64Index(steps).sort_values()
fh_absolute = y.index[np.where(y.index == cutoff)[0] + steps].sort_values()
fh_indexer = fh_relative - 1
fh_oos = fh.to_pandas()[fh_relative > 0]
is_oos = len(fh_oos) == len(fh)
fh_ins = fh.to_pandas()[fh_relative <= 0]
is_ins = len(fh_ins) == len(fh)
# check outputs
# check relative representation
_assert_index_equal(fh_absolute, fh.to_absolute(cutoff).to_pandas())
assert not fh.to_absolute(cutoff).is_relative
# check relative representation
_assert_index_equal(fh_relative, fh.to_relative(cutoff).to_pandas())
assert fh.to_relative(cutoff).is_relative
# check index-like representation
_assert_index_equal(fh_indexer, fh.to_indexer(cutoff))
# check in-sample representation
# we only compare the numpy array here because the expected solution is
# formatted in a slightly different way than the generated solution
np.testing.assert_array_equal(
fh_ins.to_numpy(), fh.to_in_sample(cutoff).to_pandas()
)
assert fh.to_in_sample(cutoff).is_relative == is_relative
assert fh.is_in_sample(cutoff) == is_ins
# check out-of-sample representation
np.testing.assert_array_equal(
fh_oos.to_numpy(), fh.to_out_of_sample(cutoff).to_pandas()
)
assert fh.to_out_of_sample(cutoff).is_relative == is_relative
assert fh.is_out_of_sample(cutoff) == is_oos
def test_fh_method_delegation():
fh = ForecastingHorizon(1)
for method in DELEGATED_METHODS:
assert hasattr(fh, method)
BAD_INPUT_ARGS = (
(1, 2), # tuple
"some_string", # string
0.1, # float
-0.1, # negative float
np.array([0.1, 2]), # float in array
None,
)
@pytest.mark.parametrize("arg", BAD_INPUT_ARGS)
def test_check_fh_values_bad_input_types(arg):
with raises(TypeError):
ForecastingHorizon(arg)
DUPLICATE_INPUT_ARGS = (
np.array([1, 2, 2]),
[3, 3, 1],
)
@pytest.mark.parametrize("arg", DUPLICATE_INPUT_ARGS)
def test_check_fh_values_duplicate_input_values(arg):
with raises(ValueError):
ForecastingHorizon(arg)
GOOD_INPUT_ARGS = (
pd.Int64Index([1, 2, 3]),
pd.period_range("2000-01-01", periods=3, freq="D"),
pd.date_range("2000-01-01", periods=3, freq="M"),
np.array([1, 2, 3]),
[1, 2, 3],
1,
)
@pytest.mark.parametrize("arg", GOOD_INPUT_ARGS)
def test_check_fh_values_input_conversion_to_pandas_index(arg):
output = ForecastingHorizon(arg, is_relative=False).to_pandas()
assert type(output) in SUPPORTED_INDEX_TYPES
|
{"hexsha": "7ccb1d7f2e1a13bd86ca4959b1bb037d8df4fe14", "size": 4251, "ext": "py", "lang": "Python", "max_stars_repo_path": "sktime/forecasting/base/tests/test_fh.py", "max_stars_repo_name": "alwinw/sktime", "max_stars_repo_head_hexsha": "a6f17bd586df6bbc8e6c783f08eda4c30d2353f9", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-10-05T12:17:41.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-19T08:57:50.000Z", "max_issues_repo_path": "sktime/forecasting/base/tests/test_fh.py", "max_issues_repo_name": "alwinw/sktime", "max_issues_repo_head_hexsha": "a6f17bd586df6bbc8e6c783f08eda4c30d2353f9", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sktime/forecasting/base/tests/test_fh.py", "max_forks_repo_name": "alwinw/sktime", "max_forks_repo_head_hexsha": "a6f17bd586df6bbc8e6c783f08eda4c30d2353f9", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.2573529412, "max_line_length": 79, "alphanum_fraction": 0.7299458951, "include": true, "reason": "import numpy", "num_tokens": 1081}
|
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import math
import os
import pytest
import random
import numpy as np
import torch
from PIL import Image
from kaolin.io import render
from kaolin.render.camera import generate_perspective_projection
from kaolin.utils.testing import FLOAT_TYPES
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
SAMPLE_DIR = os.path.join(ROOT_DIR, os.pardir, os.pardir, os.pardir, 'samples', 'synthetic')
class TestImportView:
@pytest.fixture(autouse=True)
def expected_rgb(self):
path = os.path.join(SAMPLE_DIR, '0_rgb.png')
return torch.from_numpy(
np.array(Image.open(path))
)[:, :, :3].float() / 255.
@pytest.fixture(autouse=True)
def expected_depth_linear(self):
path = os.path.join(SAMPLE_DIR, '0_depth_linear.npy')
return torch.from_numpy(np.load(path))
@pytest.fixture(autouse=True)
def expected_semantic(self):
path = os.path.join(SAMPLE_DIR, '0_semantic.npy')
return torch.from_numpy(np.load(path))
@pytest.fixture(autouse=True)
def expected_instance(self):
path = os.path.join(SAMPLE_DIR, '0_instance.npy')
return torch.from_numpy(np.load(path))
@pytest.fixture(autouse=True)
def expected_normals(self):
path = os.path.join(SAMPLE_DIR, '0_normals.png')
return torch.from_numpy(
np.array(Image.open(path))
)[:, :, :3].float() / 255.
@pytest.fixture(autouse=True)
def expected_json(self):
path = os.path.join(SAMPLE_DIR, '0_metadata.json')
with open(path, 'r') as f:
fjson = json.load(f)
return fjson
@pytest.fixture(autouse=True)
def expected_metadata(self, expected_json):
asset_transforms = torch.FloatTensor(expected_json['asset_transforms'][0][1])
cam_transform = torch.FloatTensor(expected_json['camera_properties']['tf_mat'])
aspect_ratio = (expected_json['camera_properties']['resolution']['width'] /
expected_json['camera_properties']['resolution']['height'])
focal_length = expected_json['camera_properties']['focal_length']
horizontal_aperture = expected_json['camera_properties']['horizontal_aperture']
fov = 2 * math.atan(horizontal_aperture / (2 * focal_length))
return {
'cam_transform': cam_transform[:, :3],
'asset_transforms': asset_transforms,
'cam_proj': generate_perspective_projection(fov, aspect_ratio),
'clipping_range': expected_json['camera_properties']['clipping_range']
}
@pytest.fixture(autouse=True)
def expected_bbox_2d_tight(self, expected_json):
return expected_json['bbox_2d_tight']
@pytest.fixture(autouse=True)
def expected_bbox_2d_loose(self, expected_json):
return expected_json['bbox_2d_loose']
@pytest.mark.parametrize('with_rgb', [True, False])
@pytest.mark.parametrize('with_depth_linear', [True, False])
@pytest.mark.parametrize('with_semantic', [True, False])
@pytest.mark.parametrize('with_instance', [True, False])
@pytest.mark.parametrize('with_normals', [True, False])
@pytest.mark.parametrize('with_bbox_2d_tight', [True, False])
@pytest.mark.parametrize('with_bbox_2d_loose', [True, False])
def test_import_synthetic_view(self, expected_rgb, expected_depth_linear,
expected_semantic, expected_instance,
expected_normals, expected_bbox_2d_tight,
expected_bbox_2d_loose, expected_metadata,
with_rgb, with_depth_linear, with_semantic,
with_instance, with_normals, with_bbox_2d_tight,
with_bbox_2d_loose):
output = render.import_synthetic_view(SAMPLE_DIR, 0,
rgb=with_rgb,
depth_linear=with_depth_linear,
semantic=with_semantic,
instance=with_instance,
normals=with_normals,
bbox_2d_tight=with_bbox_2d_tight,
bbox_2d_loose=with_bbox_2d_loose)
if with_rgb:
assert torch.equal(output['rgb'], expected_rgb)
else:
assert 'rgb' not in output
if with_depth_linear:
assert torch.equal(output['depth_linear'], expected_depth_linear)
else:
assert 'depth_linear' not in output
if with_semantic:
assert torch.equal(output['semantic'], expected_semantic)
else:
assert 'semantic' not in output
if with_instance:
assert torch.equal(output['instance'], expected_instance)
else:
assert 'instance' not in output
if with_normals:
assert torch.equal(output['normals'], expected_normals)
else:
assert 'normals' not in output
if with_bbox_2d_tight:
assert output['bbox_2d_tight'] == expected_bbox_2d_tight
else:
assert 'bbox_2d_tight' not in output
if with_bbox_2d_loose:
assert output['bbox_2d_loose'] == expected_bbox_2d_loose
else:
assert 'bbox_2d_loose' not in output
assert expected_metadata.keys() == output['metadata'].keys()
assert torch.equal(expected_metadata['cam_transform'],
output['metadata']['cam_transform'])
assert torch.equal(expected_metadata['cam_proj'],
output['metadata']['cam_proj'])
assert (expected_metadata['clipping_range'] ==
output['metadata']['clipping_range'])
|
{"hexsha": "ac3c9b2aca86adcea446f0f315ff4040635f32c7", "size": 6490, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/python/kaolin/io/test_render.py", "max_stars_repo_name": "priyasundaresan/kaolin", "max_stars_repo_head_hexsha": "ddae34ba5f09bffc4368c29bc50491c5ece797d4", "max_stars_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_stars_count": 3747, "max_stars_repo_stars_event_min_datetime": "2019-11-13T02:18:16.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T21:12:31.000Z", "max_issues_repo_path": "tests/python/kaolin/io/test_render.py", "max_issues_repo_name": "priyasundaresan/kaolin", "max_issues_repo_head_hexsha": "ddae34ba5f09bffc4368c29bc50491c5ece797d4", "max_issues_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_issues_count": 371, "max_issues_repo_issues_event_min_datetime": "2019-11-13T14:50:59.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-22T19:40:06.000Z", "max_forks_repo_path": "tests/python/kaolin/io/test_render.py", "max_forks_repo_name": "priyasundaresan/kaolin", "max_forks_repo_head_hexsha": "ddae34ba5f09bffc4368c29bc50491c5ece797d4", "max_forks_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_forks_count": 482, "max_forks_repo_forks_event_min_datetime": "2019-11-13T05:04:38.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T10:20:26.000Z", "avg_line_length": 41.0759493671, "max_line_length": 92, "alphanum_fraction": 0.6271186441, "include": true, "reason": "import numpy", "num_tokens": 1363}
|
program testburn
use bl_types
use network
use eos_module
implicit none
real(kind=dp_t) :: dens, temp, pres, entr
real(kind=dp_t), dimension(nspec) :: Xin
integer :: ic12, io16, img24
logical :: do_diag
call network_init()
call eos_init(gamma_in=5.0d0/3.0d0)
do_diag = .false.
ic12 = network_species_index("carbon-12")
io16 = network_species_index("oxygen-16")
img24 = network_species_index("magnesium-24")
dens = 1.e4_dp_t
temp = 1.e8_dp_t
Xin(ic12) = 0.5_dp_t
Xin(io16) = 0.5_dp_t
Xin(img24) = 0.0_dp_t
den_eos = dens
temp_eos = temp
xn_eos(:) = Xin(:)
! test eos_input_rt
call eos(eos_input_rt, den_eos, temp_eos, &
xn_eos, &
p_eos, h_eos, e_eos, &
cv_eos, cp_eos, xne_eos, eta_eos, pele_eos, &
dpdt_eos, dpdr_eos, dedt_eos, dedr_eos, &
dpdX_eos, dhdX_eos, &
gam1_eos, cs_eos, s_eos, &
dsdt_eos, dsdr_eos, &
do_diag)
print *, 'input: eos_input_rt'
print *, 'dens: ', dens, ' temp: ', temp
print *, 'X: ', Xin
print *, 'pres: ', p_eos, ' ener: ', e_eos
print *, 'h: ', h_eos
print *, 'c_v: ', cv_eos, ' c_p : ', cp_eos
print *, 'dpdT: ', dpdt_eos, ' dpdr: ', dpdr_eos
print *, 'dedT: ', dedt_eos, ' dedr: ', dedr_eos
print *, 'dpdX: ', dpdX_eos(:)
print *, 'dhdX: ', dhdX_eos(:)
print *, ' '
print *, 'setting pres = ', p_eos
pres = p_eos
print *, 'setting entr = ', s_eos
entr = s_eos
! test eos_input_rh
temp_eos = 0.d0
call eos(eos_input_rh, den_eos, temp_eos, &
xn_eos, &
p_eos, h_eos, e_eos, &
cv_eos, cp_eos, xne_eos, eta_eos, pele_eos, &
dpdt_eos, dpdr_eos, dedt_eos, dedr_eos, &
dpdX_eos, dhdX_eos, &
gam1_eos, cs_eos, s_eos, &
dsdt_eos, dsdr_eos, &
do_diag)
print *, ' '
print *, 'input: eos_input_rh'
print *, 'dens: ', dens, ' temp: ', temp
print *, 'X: ', Xin
print *, 'pres: ', p_eos, ' ener: ', e_eos
print *, 'entropy: ', s_eos
print *, 'h: ', h_eos
print *, 'c_v: ', cv_eos, ' c_p : ', cp_eos
print *, 'dpdT: ', dpdt_eos, ' dpdr: ', dpdr_eos
print *, 'dedT: ', dedt_eos, ' dedr: ', dedr_eos
print *, 'dpdX: ', dpdX_eos(:)
print *, 'dhdX: ', dhdX_eos(:)
! test eos_input_tp
den_eos = 0.d0
call eos(eos_input_tp, den_eos, temp_eos, &
xn_eos, &
p_eos, h_eos, e_eos, &
cv_eos, cp_eos, xne_eos, eta_eos, pele_eos, &
dpdt_eos, dpdr_eos, dedt_eos, dedr_eos, &
dpdX_eos, dhdX_eos, &
gam1_eos, cs_eos, s_eos, &
dsdt_eos, dsdr_eos, &
do_diag)
print *, ' '
print *, 'input: eos_input_tp'
print *, 'dens: ', dens, ' temp: ', temp
print *, 'X: ', Xin
print *, 'pres: ', p_eos, ' ener: ', e_eos
print *, 'h: ', h_eos
print *, 'c_v: ', cv_eos, ' c_p : ', cp_eos
print *, 'dpdT: ', dpdt_eos, ' dpdr: ', dpdr_eos
print *, 'dedT: ', dedt_eos, ' dedr: ', dedr_eos
print *, 'dpdX: ', dpdX_eos(:)
print *, 'dhdX: ', dhdX_eos(:)
! test eos_input_rp
temp_eos = 0.d0
call eos(eos_input_rp, den_eos, temp_eos, &
xn_eos, &
p_eos, h_eos, e_eos, &
cv_eos, cp_eos, xne_eos, eta_eos, pele_eos, &
dpdt_eos, dpdr_eos, dedt_eos, dedr_eos, &
dpdX_eos, dhdX_eos, &
gam1_eos, cs_eos, s_eos, &
dsdt_eos, dsdr_eos, &
do_diag)
print *, ' '
print *, 'input: eos_input_rp'
print *, 'dens: ', dens, ' temp: ', temp
print *, 'X: ', Xin
print *, 'pres: ', p_eos, ' ener: ', e_eos
print *, 'h: ', h_eos
print *, 'c_v: ', cv_eos, ' c_p : ', cp_eos
print *, 'dpdT: ', dpdt_eos, ' dpdr: ', dpdr_eos
print *, 'dedT: ', dedt_eos, ' dedr: ', dedr_eos
print *, 'dpdX: ', dpdX_eos(:)
print *, 'dhdX: ', dhdX_eos(:)
! test eos_input_re
temp_eos = 0.d0
call eos(eos_input_re, den_eos, temp_eos, &
xn_eos, &
p_eos, h_eos, e_eos, &
cv_eos, cp_eos, xne_eos, eta_eos, pele_eos, &
dpdt_eos, dpdr_eos, dedt_eos, dedr_eos, &
dpdX_eos, dhdX_eos, &
gam1_eos, cs_eos, s_eos, &
dsdt_eos, dsdr_eos, &
do_diag)
print *, ' '
print *, 'input: eos_input_re'
print *, 'dens: ', dens, ' temp: ', temp
print *, 'X: ', Xin
print *, 'pres: ', p_eos, ' ener: ', e_eos
print *, 'h: ', h_eos
print *, 'c_v: ', cv_eos, ' c_p : ', cp_eos
print *, 'dpdT: ', dpdt_eos, ' dpdr: ', dpdr_eos
print *, 'dedT: ', dedt_eos, ' dedr: ', dedr_eos
print *, 'dpdX: ', dpdX_eos(:)
print *, 'dhdX: ', dhdX_eos(:)
! test eos_input_ps
p_eos = pres
s_eos = entr
call eos(eos_input_ps, den_eos, temp_eos, &
xn_eos, &
p_eos, h_eos, e_eos, &
cv_eos, cp_eos, xne_eos, eta_eos, pele_eos, &
dpdt_eos, dpdr_eos, dedt_eos, dedr_eos, &
dpdX_eos, dhdX_eos, &
gam1_eos, cs_eos, s_eos, &
dsdt_eos, dsdr_eos, &
do_diag)
print *, ' '
print *, 'input: eos_input_ps'
print *, 'pres: ', pres, 'entr: ', entr
print *, 'dens: ', den_eos, ' temp: ', temp_eos
print *, 'X: ', Xin
print *, 'pres_eos: ', p_eos, 'entr_eos: ', s_eos
print *, 'h: ', h_eos, ' ener: ', e_eos
print *, 'c_v: ', cv_eos, ' c_p : ', cp_eos
print *, 'dpdT: ', dpdt_eos, ' dpdr: ', dpdr_eos
print *, 'dedT: ', dedt_eos, ' dedr: ', dedr_eos
print *, 'dpdX: ', dpdX_eos(:)
print *, 'dhdX: ', dhdX_eos(:)
end program testburn
|
{"hexsha": "3310967f39e76d3ff3288fe819e8a036fecae9ee", "size": 5509, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "Microphysics/EOS/gamma_law_general/test/testeos.f90", "max_stars_repo_name": "sailoridy/MAESTRO", "max_stars_repo_head_hexsha": "f957d148d2028324a2a1076be244f73dad63fd67", "max_stars_repo_licenses": ["BSD-3-Clause-LBNL"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2017-05-15T15:28:56.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-09T08:13:32.000Z", "max_issues_repo_path": "Microphysics/EOS/gamma_law_general/test/testeos.f90", "max_issues_repo_name": "sailoridy/MAESTRO", "max_issues_repo_head_hexsha": "f957d148d2028324a2a1076be244f73dad63fd67", "max_issues_repo_licenses": ["BSD-3-Clause-LBNL"], "max_issues_count": 17, "max_issues_repo_issues_event_min_datetime": "2017-06-14T23:05:00.000Z", "max_issues_repo_issues_event_max_datetime": "2018-11-28T16:40:42.000Z", "max_forks_repo_path": "Microphysics/EOS/gamma_law_general/test/testeos.f90", "max_forks_repo_name": "sailoridy/MAESTRO", "max_forks_repo_head_hexsha": "f957d148d2028324a2a1076be244f73dad63fd67", "max_forks_repo_licenses": ["BSD-3-Clause-LBNL"], "max_forks_count": 14, "max_forks_repo_forks_event_min_datetime": "2017-06-14T14:52:09.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-04T07:16:09.000Z", "avg_line_length": 28.6927083333, "max_line_length": 56, "alphanum_fraction": 0.5476493011, "num_tokens": 2204}
|
import os
import numpy as np
import pickle
import tensorflow as tf
from Configuration import FLAGS
from Data_Process import Data_Process
from Caption_Model import Caption_Model
# from Activity_Model import Activity_Model
def train_models():
data_process = Data_Process(FLAGS)
# 1. Data process for lstm_caption
print 'Getting trianing data for Caption_Model ...'
image_path_list, image_cnn_list, image_caption_index_list = data_process.get_data_for_caption('training')
# 2. Train lstm_caption model
print 'Trianing Caption_Model ...'
caption_model = Caption_Model(FLAGS)
caption_model.train_model(image_path_list, image_cnn_list, image_caption_index_list)
# 3. Data process for lstm_activity
print 'Generating Captions for Activity_Model to train ...'
act_train_image_path, act_train_image_cnn, act_train_image_label = data_process.get_data_for_activity('training')
predicted_caption = caption_model.test_model(act_train_image_path, act_train_image_cnn)
def main(_):
train_models()
if __name__ == '__main__':
tf.app.run()
|
{"hexsha": "d7c6cfd0008deca3e3a967eaf8bddfeb1c6a3d62", "size": 1047, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/main.py", "max_stars_repo_name": "xincoder/SBGAR", "max_stars_repo_head_hexsha": "3b4e4bce1b1d160533f4e3904e46a5c08be680c7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-04-28T06:21:48.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-29T03:33:33.000Z", "max_issues_repo_path": "code/main.py", "max_issues_repo_name": "xincoder/SBGAR", "max_issues_repo_head_hexsha": "3b4e4bce1b1d160533f4e3904e46a5c08be680c7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/main.py", "max_forks_repo_name": "xincoder/SBGAR", "max_forks_repo_head_hexsha": "3b4e4bce1b1d160533f4e3904e46a5c08be680c7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.7272727273, "max_line_length": 114, "alphanum_fraction": 0.8147086915, "include": true, "reason": "import numpy", "num_tokens": 237}
|
import numpy as np
import scipy.interpolate
############ P5 Tridiag solver
class TriDiag:
def __init__(self, a, d, b, n=None):
if n is None:
n = len(d)
self.n = n
self.a = np.asarray(a)
self.b = np.asarray(b)
self.d = np.asarray(d)
def mult(self, x):
'''Multiplies tridiagonal matrix by some vector of length n
'''
x = np.asarray(x)
return (self.d * x) + \
np.concatenate([self.a * x[1:],[0]]) + \
np.concatenate([[0], self.b * x[:-1]])
def solve(self, y, verbose=True):
'''Finds solution to equation Ax=y for x
verbose: print arrays after solving
All changes to internal arrays will be reset after solving
'''
y = np.asarray(y)
# Create checkpoint, so that internal arrays can be manipulated
checkPoint = (self.a, self.b, self.d)
if verbose:
print("Matrix before solve:")
for label, array in [('a',self.a), ('d',self.d), ('b',self.b)]:
print('{} array:'.format(label), array)
print()
# Forward solve
for i in range(self.n):
y[i] = y[i] / self.d[i]
if i < self.n-1:
self.a[i] = self.a[i] / self.d[i]
self.d[i+1] = self.d[i+1] - self.b[i] * self.a[i]
y[i+1] = y[i+1] - self.b[i] * y[i]
# Backward solve
k = self.n-1
for j in range(k):
i = k-j-1
y[i] = y[i] - self.a[i] * y[i+1]
if verbose:
print("Matrix after solve:")
for label, array in [('a',self.a), ('d',self.d), ('b',self.b)]:
print('{} array:'.format(label), array)
print()
# Restore the checkpoint
self.a, self.b, self.d = checkPoint
return y
############ EC1 Spline solver
def derivSolve(x, y, derivl, derivr):
'''Solve for point derivatives at internal mesh points:
1) some set of points
2) function evaluations at those points
3) derivatives at left and right endpoints
'''
x = np.asarray(x)
y = np.asarray(y)
hs = x[1:] - x[:-1]
# Above diagonal
a = np.concatenate(
[
[0],
-2/hs[1:],
],
axis=0
)
# True diagonal
d = np.concatenate(
[
[1],
-4*(1/hs[1:] + 1/hs[:-1]),
[1],
],
axis=0
)
# Below diagonal
b = np.concatenate(
[
-2/hs[:-1],
[0]
]
)
# y_i
solution = y[1:-1] * ( 1/hs[1:]**2 - 1/hs[:-1]**2)
# y_i-1
solution += y[:-2] * (1/hs[:-1]**2)
# y_i+1
solution += y[2:] * (-1/hs[1:]**2)
solution *= 6
# Assemble y, for Ax=y
solution = np.concatenate(
[
[derivl],
solution,
[derivr],
],
axis=0
)
derivs = TriDiag(a,d,b).solve(solution, verbose=False)
return derivs
def cubic_spline(x, y, derivl, derivr):
'''Creates cubic hermite spline with x,y and left and right derivatives
Mostly just wrapper for derivSolve
'''
derivs = derivSolve(x, y, derivl, derivr)
return scipy.interpolate.CubicHermiteSpline( x, y, derivs )
def periodic_cubic_spline(x, y):
if not np.isclose( [y[0]], [y[-1]]):
raise RuntimeError(
'Function not periodic on [{0},{1}], endpoints don\'t match ({2}, {3})'.format(
x[0],x[-1],
y[0],y[-1]
)
)
x = np.asarray(x)
y = np.asarray(y)
hs = x[1:] - x[:-1]
# solve for zero deriv at endpoint, meets function evals at all interior points
derivs_base = derivSolve(x, y, 0, 0)
# solve for derivative adjustment at endpoints
derivs_adjust = derivSolve(x, 0*y, 1, 1)
# re-usable dydx array, used for both dydx_0 and dydx_1
dydxcoeff = np.array([
-2/hs[-1],
-4*(1/hs[0] + 1/hs[-1]),
-2/hs[0]
])
# Coefficients for y[...] at endpoint
ycoeffs = np.array([
-6/hs[-1]**2,
-6*(1/hs[0]**2 - 1/hs[-1]**2),
6*(1/hs[0]**2)
])
# This is the last [[g_i``]] formula, applied at endpoints
# used to solve for alpha in writeup
alpha = ycoeffs @ [y[-2], y[0], y[1]]
alpha += dydxcoeff @ [derivs_base[-2], derivs_base[0], derivs_base[1]]
alpha /= -1* dydxcoeff @ [derivs_adjust[-2], derivs_adjust[0], derivs_adjust[1]]
# The final derivatives used in periodic spline
derivs = derivs_base + alpha * derivs_adjust
# Spline-ify our results
return scipy.interpolate.CubicHermiteSpline( x, y, derivs )
|
{"hexsha": "ba50bce309c45fee8485a8e7cab1883ba0dcf1b5", "size": 4825, "ext": "py", "lang": "Python", "max_stars_repo_path": "numerical_eqs/interp/spline/functions.py", "max_stars_repo_name": "alienbrett/numerical-eqs-collection", "max_stars_repo_head_hexsha": "23619bf379d53ce0facb63be08ee6a3902d404d5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "numerical_eqs/interp/spline/functions.py", "max_issues_repo_name": "alienbrett/numerical-eqs-collection", "max_issues_repo_head_hexsha": "23619bf379d53ce0facb63be08ee6a3902d404d5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "numerical_eqs/interp/spline/functions.py", "max_forks_repo_name": "alienbrett/numerical-eqs-collection", "max_forks_repo_head_hexsha": "23619bf379d53ce0facb63be08ee6a3902d404d5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.4223300971, "max_line_length": 91, "alphanum_fraction": 0.4882901554, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1403}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.