text stringlengths 0 27.1M | meta dict |
|---|---|
from sympy import symbols, Mul, sin, Integral, oo, Eq, Sum, sqrt, exp, pi, Dummy
from sympy.core.expr import unchanged
from sympy.stats import (Normal, Poisson, variance, Covariance, Variance,
Probability, Expectation, Moment, CentralMoment)
from sympy.stats.rv import probability, expectation
def test_literal_probability():
X = Normal('X', 2, 3)
Y = Normal('Y', 3, 4)
Z = Poisson('Z', 4)
W = Poisson('W', 3)
x = symbols('x', real=True)
y, w, z = symbols('y, w, z')
assert Probability(X > 0).evaluate_integral() == probability(X > 0)
assert Probability(X > x).evaluate_integral() == probability(X > x)
assert Probability(X > 0).rewrite(Integral).doit() == probability(X > 0)
assert Probability(X > x).rewrite(Integral).doit() == probability(X > x)
assert Expectation(X).evaluate_integral() == expectation(X)
assert Expectation(X).rewrite(Integral).doit() == expectation(X)
assert Expectation(X**2).evaluate_integral() == expectation(X**2)
assert Expectation(x*X).args == (x*X,)
assert Expectation(x*X).expand() == x*Expectation(X)
assert Expectation(2*X + 3*Y + z*X*Y).expand() == 2*Expectation(X) + 3*Expectation(Y) + z*Expectation(X*Y)
assert Expectation(2*X + 3*Y + z*X*Y).args == (2*X + 3*Y + z*X*Y,)
assert Expectation(sin(X)) == Expectation(sin(X)).expand()
assert Expectation(2*x*sin(X)*Y + y*X**2 + z*X*Y).expand() == 2*x*Expectation(sin(X)*Y) \
+ y*Expectation(X**2) + z*Expectation(X*Y)
assert Expectation(X + Y).expand() == Expectation(X) + Expectation(Y)
assert Expectation((X + Y)*(X - Y)).expand() == Expectation(X**2) - Expectation(Y**2)
assert Expectation((X + Y)*(X - Y)).expand().doit() == -12
assert Expectation(X + Y, evaluate=True).doit() == 5
assert Expectation(X + Expectation(Y)).doit() == 5
assert Expectation(X + Expectation(Y)).doit(deep=False) == 2 + Expectation(Expectation(Y))
assert Expectation(X + Expectation(Y + Expectation(2*X))).doit(deep=False) == 2 \
+ Expectation(Expectation(Y + Expectation(2*X)))
assert Expectation(X + Expectation(Y + Expectation(2*X))).doit() == 9
assert Expectation(Expectation(2*X)).doit() == 4
assert Expectation(Expectation(2*X)).doit(deep=False) == Expectation(2*X)
assert Expectation(4*Expectation(2*X)).doit(deep=False) == 4*Expectation(2*X)
assert Expectation((X + Y)**3).expand() == 3*Expectation(X*Y**2) +\
3*Expectation(X**2*Y) + Expectation(X**3) + Expectation(Y**3)
assert Expectation((X - Y)**3).expand() == 3*Expectation(X*Y**2) -\
3*Expectation(X**2*Y) + Expectation(X**3) - Expectation(Y**3)
assert Expectation((X - Y)**2).expand() == -2*Expectation(X*Y) +\
Expectation(X**2) + Expectation(Y**2)
assert Variance(w).args == (w,)
assert Variance(w).expand() == 0
assert Variance(X).evaluate_integral() == Variance(X).rewrite(Integral).doit() == variance(X)
assert Variance(X + z).args == (X + z,)
assert Variance(X + z).expand() == Variance(X)
assert Variance(X*Y).args == (Mul(X, Y),)
assert type(Variance(X*Y)) == Variance
assert Variance(z*X).expand() == z**2*Variance(X)
assert Variance(X + Y).expand() == Variance(X) + Variance(Y) + 2*Covariance(X, Y)
assert Variance(X + Y + Z + W).expand() == (Variance(X) + Variance(Y) + Variance(Z) + Variance(W) +
2 * Covariance(X, Y) + 2 * Covariance(X, Z) + 2 * Covariance(X, W) +
2 * Covariance(Y, Z) + 2 * Covariance(Y, W) + 2 * Covariance(W, Z))
assert Variance(X**2).evaluate_integral() == variance(X**2)
assert unchanged(Variance, X**2)
assert Variance(x*X**2).expand() == x**2*Variance(X**2)
assert Variance(sin(X)).args == (sin(X),)
assert Variance(sin(X)).expand() == Variance(sin(X))
assert Variance(x*sin(X)).expand() == x**2*Variance(sin(X))
assert Covariance(w, z).args == (w, z)
assert Covariance(w, z).expand() == 0
assert Covariance(X, w).expand() == 0
assert Covariance(w, X).expand() == 0
assert Covariance(X, Y).args == (X, Y)
assert type(Covariance(X, Y)) == Covariance
assert Covariance(z*X + 3, Y).expand() == z*Covariance(X, Y)
assert Covariance(X, X).args == (X, X)
assert Covariance(X, X).expand() == Variance(X)
assert Covariance(z*X + 3, w*Y + 4).expand() == w*z*Covariance(X,Y)
assert Covariance(X, Y) == Covariance(Y, X)
assert Covariance(X + Y, Z + W).expand() == Covariance(W, X) + Covariance(W, Y) + Covariance(X, Z) + Covariance(Y, Z)
assert Covariance(x*X + y*Y, z*Z + w*W).expand() == (x*w*Covariance(W, X) + w*y*Covariance(W, Y) +
x*z*Covariance(X, Z) + y*z*Covariance(Y, Z))
assert Covariance(x*X**2 + y*sin(Y), z*Y*Z**2 + w*W).expand() == (w*x*Covariance(W, X**2) + w*y*Covariance(sin(Y), W) +
x*z*Covariance(Y*Z**2, X**2) + y*z*Covariance(Y*Z**2, sin(Y)))
assert Covariance(X, X**2).expand() == Covariance(X, X**2)
assert Covariance(X, sin(X)).expand() == Covariance(sin(X), X)
assert Covariance(X**2, sin(X)*Y).expand() == Covariance(sin(X)*Y, X**2)
assert Covariance(w, X).evaluate_integral() == 0
def test_probability_rewrite():
X = Normal('X', 2, 3)
Y = Normal('Y', 3, 4)
Z = Poisson('Z', 4)
W = Poisson('W', 3)
x, y, w, z = symbols('x, y, w, z')
assert Variance(w).rewrite(Expectation) == 0
assert Variance(X).rewrite(Expectation) == Expectation(X ** 2) - Expectation(X) ** 2
assert Variance(X, condition=Y).rewrite(Expectation) == Expectation(X ** 2, Y) - Expectation(X, Y) ** 2
assert Variance(X, Y) != Expectation(X**2) - Expectation(X)**2
assert Variance(X + z).rewrite(Expectation) == Expectation((X + z) ** 2) - Expectation(X + z) ** 2
assert Variance(X * Y).rewrite(Expectation) == Expectation(X ** 2 * Y ** 2) - Expectation(X * Y) ** 2
assert Covariance(w, X).rewrite(Expectation) == -w*Expectation(X) + Expectation(w*X)
assert Covariance(X, Y).rewrite(Expectation) == Expectation(X*Y) - Expectation(X)*Expectation(Y)
assert Covariance(X, Y, condition=W).rewrite(Expectation) == Expectation(X * Y, W) - Expectation(X, W) * Expectation(Y, W)
w, x, z = symbols("W, x, z")
px = Probability(Eq(X, x))
pz = Probability(Eq(Z, z))
assert Expectation(X).rewrite(Probability) == Integral(x*px, (x, -oo, oo))
assert Expectation(Z).rewrite(Probability) == Sum(z*pz, (z, 0, oo))
assert Variance(X).rewrite(Probability) == Integral(x**2*px, (x, -oo, oo)) - Integral(x*px, (x, -oo, oo))**2
assert Variance(Z).rewrite(Probability) == Sum(z**2*pz, (z, 0, oo)) - Sum(z*pz, (z, 0, oo))**2
assert Covariance(w, X).rewrite(Probability) == \
-w*Integral(x*Probability(Eq(X, x)), (x, -oo, oo)) + Integral(w*x*Probability(Eq(X, x)), (x, -oo, oo))
# To test rewrite as sum function
assert Variance(X).rewrite(Sum) == Variance(X).rewrite(Integral)
assert Expectation(X).rewrite(Sum) == Expectation(X).rewrite(Integral)
assert Covariance(w, X).rewrite(Sum) == 0
assert Covariance(w, X).rewrite(Integral) == 0
assert Variance(X, condition=Y).rewrite(Probability) == Integral(x**2*Probability(Eq(X, x), Y), (x, -oo, oo)) - \
Integral(x*Probability(Eq(X, x), Y), (x, -oo, oo))**2
def test_symbolic_Moment():
mu = symbols('mu', real=True)
sigma = symbols('sigma', real=True, positive=True)
x = symbols('x')
X = Normal('X', mu, sigma)
M = Moment(X, 4, 2)
assert M.rewrite(Expectation) == Expectation((X - 2)**4)
assert M.rewrite(Probability) == Integral((x - 2)**4*Probability(Eq(X, x)),
(x, -oo, oo))
k = Dummy('k')
expri = Integral(sqrt(2)*(k - 2)**4*exp(-(k - \
mu)**2/(2*sigma**2))/(2*sqrt(pi)*sigma), (k, -oo, oo))
assert M.rewrite(Integral).dummy_eq(expri)
assert M.doit() == (mu**4 - 8*mu**3 + 6*mu**2*sigma**2 + \
24*mu**2 - 24*mu*sigma**2 - 32*mu + 3*sigma**4 + 24*sigma**2 + 16)
M = Moment(2, 5)
assert M.doit() == 2
def test_symbolic_CentralMoment():
mu = symbols('mu', real=True)
sigma = symbols('sigma', real=True, positive=True)
x = symbols('x')
X = Normal('X', mu, sigma)
CM = CentralMoment(X, 6)
assert CM.rewrite(Expectation) == Expectation((X - Expectation(X))**6)
assert CM.rewrite(Probability) == Integral((x - Integral(x*Probability(True),
(x, -oo, oo)))**6*Probability(Eq(X, x)), (x, -oo, oo))
k = Dummy('k')
expri = Integral(sqrt(2)*(k - Integral(sqrt(2)*k*exp(-(k - \
mu)**2/(2*sigma**2))/(2*sqrt(pi)*sigma), (k, -oo, oo)))**6*exp(-(k - \
mu)**2/(2*sigma**2))/(2*sqrt(pi)*sigma), (k, -oo, oo))
assert CM.rewrite(Integral).dummy_eq(expri)
assert CM.doit().simplify() == 15*sigma**6
CM = Moment(5, 5)
assert CM.doit() == 5
| {
"alphanum_fraction": 0.5824248431,
"author": null,
"avg_line_length": 54.0535714286,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "1dc1d9cff0f351565a45353513930cbceba374ae",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 35,
"max_forks_repo_forks_event_max_datetime": "2022-03-23T10:15:10.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-03-26T03:12:04.000Z",
"max_forks_repo_head_hexsha": "1fb2490fa2fa9b476da450f02a25b03c1dc07cf0",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "bigfooted/sympy",
"max_forks_repo_path": "sympy/stats/tests/test_symbolic_probability.py",
"max_issues_count": 387,
"max_issues_repo_head_hexsha": "1fb2490fa2fa9b476da450f02a25b03c1dc07cf0",
"max_issues_repo_issues_event_max_datetime": "2022-03-31T07:00:21.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-12-15T14:54:04.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "bigfooted/sympy",
"max_issues_repo_path": "sympy/stats/tests/test_symbolic_probability.py",
"max_line_length": 126,
"max_stars_count": 603,
"max_stars_repo_head_hexsha": "1fb2490fa2fa9b476da450f02a25b03c1dc07cf0",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "bigfooted/sympy",
"max_stars_repo_path": "sympy/stats/tests/test_symbolic_probability.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-31T23:38:03.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-12-23T13:49:32.000Z",
"num_tokens": 2916,
"path": null,
"reason": "from sympy",
"repo": null,
"save_path": null,
"sha": null,
"size": 9081
} |
function fill_wavelets!(iss::Int64, wavelets::Array{Array{Float64,2},2}, srcwav::Array{SrcWav}, sflags::Vector{Int64})
npw = size(wavelets,1)
nt = size(wavelets,2)
δt = step(srcwav[1][1].grid)
for ipw=1:npw
ns, snfield = size(wavelets[ipw,1]) # ns may vary with ipw
for ifield=1:snfield, is=1:ns
snt = length(srcwav[ipw][1].grid)
if(sflags[ipw] == 0)
# nothing # just put zeros, no sources added
for it=1:nt
wavelets[ipw,it][is,ifield] = 0.0
end
elseif(sflags[ipw] == 1)
"ϕ[t] = s[t]"
for it=1:snt
source_term = srcwav[ipw][iss].d[ifield][it,is]
wavelets[ipw,it][is,ifield] = source_term
end
elseif(sflags[ipw] == -1)
"source sink for 1"
"ϕ[t] = -s[t]"
for it=2:snt
source_term = srcwav[ipw][iss].d[ifield][it,is]
wavelets[ipw,nt-it+2][is,ifield] = -1.0*source_term
end
elseif(sflags[ipw] == 2)
"ϕ[t] = ∑₁ᵗ⁻¹ s[t]"
source_term_stack = 0.0;
if(ifield == 1)
for it=1:snt-1
source_term_stack += (srcwav[ipw][iss].d[ifield][it,is] .* δt)
wavelets[ipw,it+1][is,ifield] = source_term_stack
end
else
for it=2:snt-1
source_term_stack += (((srcwav[ipw][iss].d[ifield][it,is] .* δt) +
(srcwav[ipw][iss].d[ifield][it-1,is] .* δt)) * 0.5)
wavelets[ipw,it+1][is,ifield] = source_term_stack
end
end
if(nt > snt)
wavelets[ipw,snt+1:end][is,ifield] = wavelets[ipw,snt][is,ifield]
end
elseif(sflags[ipw] == -2)
"source sink for 2"
# multiplication with -1 for subtraction #time reversal
# as the source wavelet has to be subtracted before the propagation step, I shift here by one sample"
source_term_stack = 0.0;
for it=1:snt-1
source_term_stack += (srcwav[ipw][iss].d[ifield][it,is] .* δt)
wavelets[ipw,nt-it+1][is,ifield] = -1.0 * source_term_stack
end
if(nt > snt)
nt_diff = nt-snt
wavelets[ipw,1:nt_diff+1][is,ifield] = wavelets[ipw,nt_diff+2][is,ifield]
end
end
end
end
end
struct Source_B1 end
struct Source_B0 end
# This routine ABSOLUTELY should not allocate any memory, called inside time loop.
@inbounds @fastmath function add_source!(it::Int64, issp::Int64, iss::Int64, pac::PFdtdc, pass::Vector{PFdtdss}, pap::PFdtdp, ::Source_B1)
# aliases
p=pap.p;
wavelets=pass[issp].wavelets
ageom=pac.ageom
isx1=pass[issp].isx1
isx2=pass[issp].isx2
isz1=pass[issp].isz1
isz2=pass[issp].isz2
ssprayw=pass[issp].ssprayw
modttI=pac.modttI
modrr=pac.modrrvz
"""
adding source to pressure field at [it]
"""
for ipw in pac.activepw
sfields=pac.sfields[ipw]
isfields=pac.isfields[ipw]
if(pac.sflags[ipw] ≠ 0) # only if sflags is non-zero
pw=p[ipw]
for (iff, ifield) in enumerate(isfields)
@simd for is = 1:ageom[ipw][iss].ns
"""
use wavelets at [it], i.e., sum of source terms
until [it-1]
division of source term with δx and δz (see Jan's fdelmodc manual)
"""
source_term = wavelets[ipw,it][is, iff] * pac.δt * pac.δxI * pac.δzI
"""
multiplication with modttI
"""
pw[isz1[ipw][is], isx1[ipw][is],ifield] +=
source(source_term,ssprayw[ipw][1,is], pac, isz1[ipw][is], isx1[ipw][is],eval(sfields[iff])())
pw[isz1[ipw][is], isx2[ipw][is],ifield] +=
source(source_term,ssprayw[ipw][2,is], pac, isz1[ipw][is], isx2[ipw][is],eval(sfields[iff])())
pw[isz2[ipw][is], isx1[ipw][is],ifield] +=
source(source_term,ssprayw[ipw][3,is], pac, isz2[ipw][is], isx1[ipw][is],eval(sfields[iff])())
pw[isz2[ipw][is], isx2[ipw][is],ifield] +=
source(source_term,ssprayw[ipw][4,is], pac, isz2[ipw][is], isx2[ipw][is],eval(sfields[iff])())
end
end
end
end
end
# This routine ABSOLUTELY should not allocate any memory, called inside time loop.
@inbounds @fastmath function add_source!(it::Int64, issp::Int64, iss::Int64, pac::PFdtdc, pass::Vector{PFdtdss}, pap::PFdtdp, ::Source_B0)
# aliases
p=pap.p;
wavelets=pass[issp].wavelets
ageom=pac.ageom
isx1=pass[issp].isx1
isz1=pass[issp].isz1
ssprayw=pass[issp].ssprayw
modttI=pac.modttI
"""
adding source to pressure field at [it]
"""
for ipw in pac.activepw
sfields=pac.sfields[ipw]
isfields=pac.isfields[ipw]
if(pac.sflags[ipw] ≠ 0) # only if sflags is non-zero
pw=p[ipw]
for (iff, ifield) in enumerate(isfields)
@simd for is = 1:ageom[ipw].ns[iss]
"""
use wavelets at [it], i.e., sum of source terms
until [it-1]
division of source term with δx and δz (see Jan's fdelmodc manual)
"""
source_term = wavelets[ipw,it][is, iff] * pac.δt * pac.δxI * pac.δzI
"""
multiplication with modttI
"""
pw[isz1[ipw][is], isx1[ipw][is],ifield] += source(source_term,1.0,pac,isz1[ipw][is],isx1[ipw][is],eval(sfields[iff])())
end
end
end
end
end
# on pressure grid
source(source_term, spray, pac, iz, ix, ::P) = source_term * spray * pac.modttI[iz,ix]
# on Vx grid
source(source_term, spray, pac, iz, ix, ::Vx) = source_term * spray * pac.modrrvx[iz,ix]
# on Vz grid
source(source_term, spray, pac, iz, ix, ::Vz) = source_term * spray * pac.modrrvz[iz,ix]
| {
"alphanum_fraction": 0.6467326733,
"author": null,
"avg_line_length": 30.421686747,
"converted": null,
"ext": "jl",
"file": null,
"hexsha": "65f831b9cdc6f61e2f73aabfbcb6fba253682300",
"include": null,
"lang": "Julia",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "abd7d74d2bc60814f6bca298635ca22ea34ec2c0",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "JuliaTagBot/GeoPhyInv.jl",
"max_forks_repo_path": "src/fdtd/source.jl",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "abd7d74d2bc60814f6bca298635ca22ea34ec2c0",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "JuliaTagBot/GeoPhyInv.jl",
"max_issues_repo_path": "src/fdtd/source.jl",
"max_line_length": 138,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "abd7d74d2bc60814f6bca298635ca22ea34ec2c0",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "JuliaTagBot/GeoPhyInv.jl",
"max_stars_repo_path": "src/fdtd/source.jl",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1968,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 5050
} |
# -*- coding: utf-8 -*-
import sys
import subprocess
import time
from tempfile import NamedTemporaryFile
import toml
import asteval
import tqdm
import os
from collections import deque
import numpy as np
from collections import namedtuple
import itertools
import h5py
import Geant4 as g4
from Geant4.hepunit import *
class Task:
def __init__(self, conf, desc, exec_path, show_bar=True, num_events=None):
self.conf = conf
self.desc = desc
self.exec_path = exec_path
self.show_bar = show_bar
self.bar = None
self.bad_retval = False
self.conf_filename = None
self.num_events = num_events
self.current = 0
def __del__(self):
if not self.bad_retval:
# Task completed successfully. Dispose configuration file.
if self.conf_filename is not None:
os.unlink(self.conf_filename)
def start(self):
with NamedTemporaryFile('w', delete=False) as f:
self.conf_filename = f.name
toml.dump(self.conf, f)
f.close()
self.proc = subprocess.Popen(
[self.exec_path, f.name],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def update_status(self):
if self.proc.poll() is not None:
# process terminated
self.current = self.num_events
if self.bar is not None:
self.bar.update(self.bar.total - self.bar.n)
if self.proc.poll() != 0:
# task did not complete successfully. dump info.
self.bad_retval = True
sys.stdout.write('# {}: {} {}\n'.format(
self.desc, self.exec_path, self.conf_filename))
if self.proc.poll() != 0:
for x in self.proc.stderr:
sys.stdout.write(x.decode('utf-8'))
sys.stdout.write('\n')
return False
if len(self.proc.stderr.peek()) != 0:
line = self.proc.stderr.readline().decode('utf-8')
if line[:4] == 'TOT=':
num_events = int(line[4:])
self.num_events = num_events
if self.show_bar:
fmt = ('{desc:>30s}:{percentage:3.0f}% ' +
'|{bar}| {n_fmt:>9s}/{total_fmt:<9s}')
self.bar = tqdm.tqdm(
total=num_events, bar_format=fmt, desc=self.desc)
elif line[:4] == 'CUR=':
current = int(line[4:])
self.current = current
if self.bar is not None:
self.bar.update(current - self.bar.n)
return True
class ParallelTaskRunner:
def __init__(self):
self.tasks = []
def add_task(self, task):
self.tasks.append(task)
def run(self):
for task in self.tasks:
task.start()
running_tasks = self.tasks
while 1:
for task in running_tasks:
if task.update_status() == False:
running_tasks.remove(task)
time.sleep(0.2)
if len(running_tasks) == 0:
break
class SerialTaskRunner:
def __init__(self):
self.tasks = []
self.max_num_threads = 4
def add_task(self, task):
self.tasks.append(task)
def run(self):
for task in self.tasks:
task.start()
running_tasks = set()
fmt = ('{desc:>30s}:{percentage:3.0f}% ' +
'|{bar}| {n_fmt:>9s}/{total_fmt:<9s}')
bar = tqdm.tqdm(
total=len(self.tasks), bar_format=fmt)
while 1:
for task in list(running_tasks):
if task.update_status() == False:
running_tasks.remove(task)
time.sleep(0.2)
while len(self.tasks) and len(running_tasks)<self.max_num_threads:
new_task = self.tasks.pop()
running_tasks.add(new_task)
bar.set_description_str(new_task.desc)
bar.update(1)
new_task.start()
if len(running_tasks) == 0:
break
def RunMonteCarloSingleIndex(
conf, reconf, vals, desc, out_filename, total_num_events,
min_num_events_per_thread, max_num_threads):
num_threads = max(1, min(
total_num_events//min_num_events_per_thread, max_num_threads))
q, r = divmod(total_num_events, num_threads)
num_events_per_thread = np.ones(num_threads, dtype=int)*q
num_events_per_thread[0:r] += 1
assert(total_num_events == num_events_per_thread.sum())
out_filenames = []
running_tasks = deque()
for i, num_events in enumerate(num_events_per_thread):
with NamedTemporaryFile('w', delete=False) as f:
out_filenames.append(f.name)
running_tasks.append(
Task(
reconf(conf, *vals, num_events, f.name),
'none', 'pbpl-compton-mc', False, num_events))
for task in list(running_tasks):
task.start()
fmt = ('{desc:>30s}:{percentage:3.0f}% ' +
'|{bar}| {n_fmt:>9s}/{total_fmt:<9s}')
bar = tqdm.tqdm(total=total_num_events, bar_format=fmt, desc=desc)
finished_sum = 0
while 1:
for task in list(running_tasks):
if task.update_status() == False:
running_tasks.remove(task)
finished_sum += task.num_events
current_running = int(np.array(
[t.current for t in running_tasks]).sum())
bar.update(current_running + finished_sum - bar.n)
time.sleep(0.2)
if len(running_tasks)==0:
break
# merge results
# Any dataset with 'num_events' attribute is treated as 'data' and
# is summed in the output. Otherwise, datasets are treated as 'bins'
# and are simply copied to the output.
with h5py.File(out_filename, 'w') as fout:
for filename in out_filenames:
num_events = {}
with h5py.File(filename, 'r') as fin:
def visit(k, v):
if not isinstance(v, h5py.Dataset):
return
if k not in fout:
fin.copy(v, fout, k)
else:
if 'num_events' in v.attrs:
fout[k][()] += v[()]
fout[k].attrs['num_events'] += (
v.attrs['num_events'])
else:
assert(np.array_equal(fout[k][()], v[()]))
fin.visititems(visit)
for filename in out_filenames:
os.unlink(filename)
def RunMonteCarlo(
indices, conf, reconf, out_filename,
num_events_per_run, min_num_events_per_thread, max_num_threads):
indices = np.array(indices).T
runs_shape = [len(x)-int(y) for x, y in zip(indices[0], indices[3])]
if not hasattr(num_events_per_run, '__len__'):
num_events_per_run = num_events_per_run * np.ones(
runs_shape, dtype=int)
filenames = {}
for i in itertools.product(*[range(len(v)) for v in indices[0]]):
desc = ', '.join(['{}={}'.format(A, B) for A, B in zip(indices[1], i)])
end_of_range = False
vals = []
for j in range(len(i)):
if indices[3][j]:
vals.append(indices[0][j][i[j]:i[j]+2])
if len(vals[-1]) != 2:
end_of_range = True
break
else:
vals.append(indices[0][j][i[j]])
if end_of_range:
continue
f = NamedTemporaryFile('w', delete=False)
filenames[i] = f.name
f.close()
RunMonteCarloSingleIndex(
conf, reconf, vals,
desc, f.name, num_events_per_run[i],
min_num_events_per_thread, max_num_threads)
aeval = asteval.Interpreter(use_numpy=True)
for q in g4.hepunit.__dict__:
aeval.symtable[q] = g4.hepunit.__dict__[q]
# merge results
# Any dataset with 'num_events' attribute is treated as 'data' and
# is summed in the output. Otherwise, datasets are treated as 'bins'
# and are simply copied to the output.
path = os.path.dirname(out_filename)
if path != '':
os.makedirs(path, exist_ok=True)
with h5py.File(out_filename, 'w') as fout:
with h5py.File(list(filenames.values())[0], 'r') as fin:
def visit(k, v):
if not isinstance(v, h5py.Dataset):
return
if k not in fout and 'num_events' not in v.attrs:
fin.copy(v, fout, k)
fin.visititems(visit)
for i, (vals, label, unit, is_binned) in enumerate(indices.T):
dset_name = 'i{}'.format(i)
if unit is None:
fout[dset_name] = np.array(vals, dtype='S')
else:
float_unit = float(aeval(unit))
fout[dset_name] = vals/float_unit
fout[dset_name].attrs.create('label', np.string_(label))
fout[dset_name].attrs.create('unit', np.string_(unit))
num_events = {}
for i in itertools.product(*[range(len(v)) for v in indices[0]]):
if i not in filenames:
continue
with h5py.File(filenames[i], 'r') as fin:
def visit(k, v):
if 'num_events' not in v.attrs:
return
if k not in fout:
dset_shape = runs_shape + list(v.shape)
dset = fout.create_dataset(
k, shape=dset_shape, dtype='float32')
num_events[k] = np.zeros(runs_shape)
dset.attrs.create('unit', np.string_(v.attrs['unit']))
fout[k][i] = v
num_events[k][i] = v.attrs['num_events']
fin.visititems(visit)
the_num_events = list(num_events.values())[0]
for v in num_events.values():
assert(np.array_equal(the_num_events, v))
fout['num_events'] = the_num_events
for k, v in filenames.items():
os.unlink(v)
| {
"alphanum_fraction": 0.5389572349,
"author": null,
"avg_line_length": 35.9368421053,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "7534ace62b277d577f172bb93af76b86bd483af0",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "a5afcdffc778f61a4726d7c5a231af2bca466900",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "ucla-pbpl/pbpl-compton",
"max_forks_repo_path": "pbpl/compton/tasks.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "a5afcdffc778f61a4726d7c5a231af2bca466900",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "ucla-pbpl/pbpl-compton",
"max_issues_repo_path": "pbpl/compton/tasks.py",
"max_line_length": 79,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "a5afcdffc778f61a4726d7c5a231af2bca466900",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "ucla-pbpl/pbpl-compton",
"max_stars_repo_path": "pbpl/compton/tasks.py",
"max_stars_repo_stars_event_max_datetime": "2020-06-03T20:59:33.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-09-24T23:52:58.000Z",
"num_tokens": 2326,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 10242
} |
# # Arbitrary Precision
#
# COSMO allows you to solve problems with arbitrary floating-point precision, e.g. by using `BigFloat` problem data. To do this, the desired floating point type has to be consistent across the model `COSMO.Model{<: AbstractFloat}`, the input data and the (optional) settings object `COSMO.Settings{<: AbstractFloat}`.
# As an example, assume we want to solve the following quadratic program:
#
# $$
# \begin{array}{ll} \text{minimize} & 1/2 x^\top P x + q^\top x \\
# \text{subject to} & l \leq A x \leq u
# \end{array}
# $$
# where $P = \begin{bmatrix} 4 & 1 \\ 1 & 2\end{bmatrix}$, $q = [1, 1]^\top$, $A = \begin{bmatrix} 1 & 1 \\ 1 & 0 \\ 0 & 1\end{bmatrix}$ and $l= [1,0, 0]^\top$, $u=[1, 0.7, 0.7]^\top$. We start by creating the model with the desired precision:
using COSMO, LinearAlgebra, SparseArrays
model = COSMO.Model{BigFloat}()
#-
# Next, we define the problem data as `BigFloat` arrays and create the constraint:
q = BigFloat[1; 1.]
P = sparse(BigFloat[4. 1; 1 2])
A = BigFloat[1. 1; 1 0; 0 1]
l = BigFloat[1.; 0; 0]
u = BigFloat[1; 0.7; 0.7]
constraint = COSMO.Constraint(A, zeros(BigFloat, 3), COSMO.Box(l, u))
# Notice that the constraint type parameter is dependent on the input data. The same is true for the constraint set `Box`. Next, we define the settings
settings = COSMO.Settings{BigFloat}(verbose = true, kkt_solver = QdldlKKTSolver)
# and assemble and solve the problem:
assemble!(model, P, q, constraint, settings = settings)
result = COSMO.optimize!(model);
# Moreover, notice that when no type parameter is specified, all objects default to `Float64`:
model = COSMO.Model()
# Two limitations to arbitrary precision:
# - Since we call the LAPACK function `syevr` for eigenvalue decompositions, we currently only support solving problems with PSD constraints in `Float32` and `Float64`.
# - We suggest to use the pure Julia QDLDL linear system solver (`kkt_solver = QdldlKKTSolver`) when working with arbitrary precision types as some of the other available solvers don't support all available precisions.
#md # !!! note
#md # `JuMP` does not currently support arbitrary precision. However, if you want to use `COSMO` directly with `MathOptInterface`, you can use: `COSMO.Optimizer{<: AbstractFloat}` as your optimizer. Again, the problem data precision of your MathOptInterface-model has to agree with the optimizer's precision.
| {
"alphanum_fraction": 0.7198832847,
"author": null,
"avg_line_length": 57.119047619,
"converted": null,
"ext": "jl",
"file": null,
"hexsha": "5cae23c31d0349502e53adcba4aeb53bc1fb4414",
"include": null,
"lang": "Julia",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 39,
"max_forks_repo_forks_event_max_datetime": "2022-03-23T08:53:29.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-03-10T06:40:11.000Z",
"max_forks_repo_head_hexsha": "c12d46c485ddccba286d3447d60d1cb399402119",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "msarfati/COSMO.jl",
"max_forks_repo_path": "docs/src/literate/arbitrary_precision.jl",
"max_issues_count": 110,
"max_issues_repo_head_hexsha": "c12d46c485ddccba286d3447d60d1cb399402119",
"max_issues_repo_issues_event_max_datetime": "2022-03-20T00:44:39.000Z",
"max_issues_repo_issues_event_min_datetime": "2018-12-12T15:52:17.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "msarfati/COSMO.jl",
"max_issues_repo_path": "docs/src/literate/arbitrary_precision.jl",
"max_line_length": 317,
"max_stars_count": 210,
"max_stars_repo_head_hexsha": "c12d46c485ddccba286d3447d60d1cb399402119",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "msarfati/COSMO.jl",
"max_stars_repo_path": "docs/src/literate/arbitrary_precision.jl",
"max_stars_repo_stars_event_max_datetime": "2022-03-17T23:11:26.000Z",
"max_stars_repo_stars_event_min_datetime": "2018-12-11T23:45:52.000Z",
"num_tokens": 711,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 2399
} |
import sys
sys.path.insert(0,'./')
from argparse import ArgumentParser
import pytorch_lightning as pl
import json
from typing import Iterator, List, Dict, Optional
import torch
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
# for dataset reader
from allennlp.data.data_loaders import MultiProcessDataLoader, SimpleDataLoader
from allennlp.data import DataLoader, DatasetReader, Instance, Vocabulary
from allennlp.data.batch import Batch
from allennlp.data.fields import TextField, SequenceLabelField, LabelField
from allennlp.data.dataset_readers import DatasetReader
from allennlp.common.file_utils import cached_path
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.tokenizers import Token, Tokenizer, SpacyTokenizer
from allennlp.data.vocabulary import Vocabulary
# read pretrained embedding from AWS S3
from allennlp.modules.token_embedders.embedding import _read_embeddings_from_text_file
# for building model
from allennlp.models import Model
from allennlp.modules.text_field_embedders import TextFieldEmbedder, BasicTextFieldEmbedder
from allennlp.modules.token_embedders import Embedding
from allennlp.modules.seq2vec_encoders import Seq2VecEncoder, PytorchSeq2VecWrapper
from allennlp.modules.seq2seq_encoders import Seq2SeqEncoder, PytorchSeq2SeqWrapper
from allennlp.modules import FeedForward
from allennlp.nn.util import get_text_field_mask, sequence_cross_entropy_with_logits
from allennlp.nn import InitializerApplicator, RegularizerApplicator
from allennlp.training.metrics import CategoricalAccuracy
from allennlp.training.trainer import Trainer
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning.callbacks import ModelCheckpoint
from callbacks import *
train_data_path = "https://s3-us-west-2.amazonaws.com/allennlp/datasets/academic-papers-example/train.jsonl"
validation_data_path = "https://s3-us-west-2.amazonaws.com/allennlp/datasets/academic-papers-example/dev.jsonl"
pretrained_file = "https://s3-us-west-2.amazonaws.com/allennlp/datasets/glove/glove.6B.100d.txt.gz"
class PublicationDatasetReader(DatasetReader):
''' Dataset Reader for publication and venue dataset '''
def __init__(self, tokenizer: Tokenizer = None, token_indexers: Dict[str, TokenIndexer]= None,**kwargs):
super().__init__(**kwargs)
self._tokenizer = tokenizer or SpacyTokenizer()
self._token_indexers = token_indexers or {'tokens': SingleIdTokenIndexer()}
def _read(self, file_path: str) -> Iterator[Instance]:
""" Read publication and venue dataset in JSON format in Lazy manner. It yields the generator
Data is in the following format:
{'title': ... 'paperAbstract': ... 'venue': ...}
"""
instances = []
with open(cached_path(file_path),'r') as data_file:
for line in data_file:
line = line.strip('\n')
if not line:
continue
paper_json = json.loads(line)
title = paper_json['title']
abstract = paper_json['paperAbstract']
venue = paper_json['venue']
yield self.text_to_instance(title, abstract, venue)
def text_to_instance(self,
title: str,
abstract: str,
venue: str = None)-> Instance:
""" Turn title, abstract and venue to Instance """
tokenized_title = self._tokenizer.tokenize(title)
tokenized_abstract = self._tokenizer.tokenize(abstract)
title_field = TextField(tokenized_title, self._token_indexers)
abstract_field = TextField(tokenized_abstract, self._token_indexers)
fields = {'title': title_field,
'abstract': abstract_field
}
if venue is not None:
fields['label'] = LabelField(venue)
return Instance(fields)
class AcademicPaperClassifier(pl.LightningModule):
""" Model to classify venue based on input title and abstract """
def __init__(self, vocab, learning_rate=0.005, embedding_dim=100, hidden_dim= 100, batch_size=32, num_workers=8) ->None:
super().__init__()
self.learning_rate = learning_rate
self.embedding_dim = embedding_dim
self.hidden_dim = hidden_dim
self.vocab = vocab
self.batch_size = batch_size
self.num_workers= num_workers
# reader
self.reader = PublicationDatasetReader()
# model will be created under create_model from `setup`
num_classes = vocab.get_vocab_size('labels')
vocab_length = vocab.get_vocab_size('tokens')
token_embedding = Embedding(num_embeddings=vocab_length,
embedding_dim=self.embedding_dim)
self.text_field_embedder = BasicTextFieldEmbedder({"tokens": token_embedding})
self.title_encoder = PytorchSeq2VecWrapper(torch.nn.LSTM(self.embedding_dim, self.hidden_dim,
batch_first=True, bidirectional=True))
self.abstract_encoder = PytorchSeq2VecWrapper(torch.nn.LSTM(self.embedding_dim, self.hidden_dim,
batch_first=True, bidirectional=True))
self.classifier_feedforward = torch.nn.Linear(2 * 2 * self.hidden_dim, num_classes)
self.loss = torch.nn.CrossEntropyLoss()
self.metrics = {
'accuracy': CategoricalAccuracy(),
'accuracy3': CategoricalAccuracy(top_k=3)
}
self.save_hyperparameters()
def prepare_data(self):
self.train_data_path = "https://s3-us-west-2.amazonaws.com/allennlp/datasets/academic-papers-example/train.jsonl"
self.validation_data_path = "https://s3-us-west-2.amazonaws.com/allennlp/datasets/academic-papers-example/dev.jsonl"
self.pretrained_file = "https://s3-us-west-2.amazonaws.com/allennlp/datasets/glove/glove.6B.100d.txt.gz"
def setup1(self, stage=None):
# create vocabulary
train_dataset = self.reader.read(self.train_data_path)
validation_dataset = self.reader.read(self.validation_data_path)
# need to create vocabulary before
self.vocab = Vocabulary.from_instances(train_dataset)
self.vocab.extend_from_instances(validation_dataset)
def train_dataloader(self):
# use train dataset to create batches
train_dl = MultiProcessDataLoader(self.reader,
data_path = self.train_data_path,
batch_size=self.batch_size,
shuffle=True,
max_instances_in_memory=self.batch_size,
num_workers=self.num_workers)
train_dl.index_with(vocab)
return train_dl
def val_dataloader(self):
data_loader = MultiProcessDataLoader(self.reader, self.validation_data_path, batch_size=self.batch_size, shuffle=False,
max_instances_in_memory=self.batch_size,
num_workers=self.num_workers)
data_loader.index_with(vocab)
return data_loader
def forward(self,
title: Dict[str, torch.LongTensor],
abstract: Dict[str, torch.LongTensor],
label: torch.LongTensor = None)-> Dict[str, torch.Tensor]:
embedded_title = self.text_field_embedder(title)
title_mask = get_text_field_mask(title)
encoded_title = self.title_encoder(embedded_title, title_mask)
embedded_abstract = self.text_field_embedder(abstract)
abstract_mask = get_text_field_mask(abstract)
encoded_abstract = self.abstract_encoder(embedded_abstract, abstract_mask)
logits = self.classifier_feedforward(torch.cat([encoded_title, encoded_abstract],dim=-1))
class_probabilities = F.softmax(logits, dim=-1)
argmax_indices = np.argmax(class_probabilities.cpu().data.numpy(), axis=-1)
labels = [self.vocab.get_token_from_index(x, namespace='labels') for x in argmax_indices]
output_dict = {
'logits': logits,
'class_prob': class_probabilities,
'predicted_label': labels
}
if label is not None:
loss = self.loss(logits, label)
for name, metric in self.metrics.items():
metric(logits, label)
output_dict[name] = metric.get_metric()
output_dict['loss'] = loss
return output_dict
def training_step(self, batch, batch_idx):
output = self.forward(**batch)
return output
def validation_step(self, batch, batch_idx):
output = self.forward(**batch)
output['val_loss'] = output['loss']
del output['loss']
return output
def configure_optimizers(self):
return optim.SGD(self.parameters(), lr=self.learning_rate)
@staticmethod
def add_model_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument('--learning_rate', type=float, default=0.0001)
parser.add_argument('--batch_size', default=32, type=int)
parser.add_argument('--num_workers', default=8, type=int)
return parser
def build_vocab():
# Building vocabulary
reader = PublicationDatasetReader()
train_dataset= reader.read(train_data_path)
validation_dataset = reader.read(validation_data_path)
vocab = Vocabulary.from_instances(train_dataset)
vocab.extend_from_instances(validation_dataset)
# vocabulary done
return vocab
if __name__=='__main__':
parser = ArgumentParser()
parser = pl.Trainer.add_argparse_args(parser)
parser = AcademicPaperClassifier.add_model_specific_args(parser)
args = parser.parse_args()
# Logger
logger = TensorBoardLogger('tb_logs', name='bert')
# build vocab
vocab = build_vocab()
# model definition
model = AcademicPaperClassifier(vocab=vocab, batch_size= args.batch_size, num_workers=args.num_workers)
# Trainer definition
trainer = pl.Trainer.from_argparse_args(args, fast_dev_run=False, progress_bar_refresh_rate=5, max_epochs=10,enable_pl_optimizer=False,
callbacks=[
LogHistogramCallback(),
ModelCheckpoint(dirpath='.checkpoints/', monitor='val_loss')
], logger=logger, auto_lr_find=True)
trainer.fit(model) | {
"alphanum_fraction": 0.6579667823,
"author": null,
"avg_line_length": 43.1417322835,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "410bcef59196b7e6d021fca4308484ff01c5d354",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "bafef5400353cac0dd5fa91d5da8717264943f1c",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "pmaxit/DLProjects",
"max_forks_repo_path": "runs/run_allen.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "bafef5400353cac0dd5fa91d5da8717264943f1c",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "pmaxit/DLProjects",
"max_issues_repo_path": "runs/run_allen.py",
"max_line_length": 140,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "bafef5400353cac0dd5fa91d5da8717264943f1c",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "pmaxit/DLProjects",
"max_stars_repo_path": "runs/run_allen.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2199,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 10958
} |
! Copyright (c) 2019, ARM Ltd. All rights reserved.
!
! Licensed under the Apache License, Version 2.0 (the "License");
! you may not use this file except in compliance with the License.
! You may obtain a copy of the License at
!
! http://www.apache.org/licenses/LICENSE-2.0
!
! Unless required by applicable law or agreed to in writing, software
! distributed under the License is distributed on an "AS IS" BASIS,
! WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
! See the License for the specific language governing permissions and
! limitations under the License.
! C1108 -- Save statement in a BLOCK construct shall not conatin a
! saved-entity-list that does not specify a common-block-name
program main
integer x, y, z
real r, s, t
common /argmnt2/ r, s, t
!ERROR: 'argmnt1' appears as a COMMON block in a SAVE statement but not in a COMMON statement
save /argmnt1/
block
!ERROR: SAVE statement in BLOCK construct may not contain a common block name 'argmnt2'
save /argmnt2/
end block
end program
| {
"alphanum_fraction": 0.7315936626,
"author": null,
"avg_line_length": 37,
"converted": null,
"ext": "f90",
"file": null,
"hexsha": "3cad73ef6f63112ba5e1245413760761582a1163",
"include": null,
"lang": "FORTRAN",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "648d6b9eeaac17134ec5850493a3e60d7c7ad09b",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "jeanPerier/f18",
"max_forks_repo_path": "test/semantics/blockconstruct02.f90",
"max_issues_count": 3,
"max_issues_repo_head_hexsha": "3a13d84742a88a0684e372e025e4e7530378d3bf",
"max_issues_repo_issues_event_max_datetime": "2020-02-27T14:13:38.000Z",
"max_issues_repo_issues_event_min_datetime": "2019-11-22T09:08:58.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "pjh40/f18",
"max_issues_repo_path": "test/semantics/blockconstruct02.f90",
"max_line_length": 95,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "3a13d84742a88a0684e372e025e4e7530378d3bf",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "pjh40/f18",
"max_stars_repo_path": "test/semantics/blockconstruct02.f90",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 273,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 1073
} |
from __future__ import print_function
# Example of a *very* simple variabiilty metric
# krughoff@uw.edu, ebellm, ljones
import numpy as np
from scipy.signal import lombscargle
from rubin_sim.maf.metrics import BaseMetric
__all__ = ['PeriodDeviationMetric']
def find_period_LS(times, mags, minperiod=2., maxperiod=35., nbinmax=10**5, verbose=False):
"""Find the period of a lightcurve using scipy's lombscargle method.
The parameters used here imply magnitudes but there is no reason this would not work if fluxes are passed.
:param times: A list of times for the given observations
:param mags: A list of magnitudes for the object at the given times
:param minperiod: Minimum period to search
:param maxperiod: Maximum period to search
:param nbinmax: Maximum number of frequency bins to use in the search
:returns: Period in the same units as used in times. This is simply
the max value in the Lomb-Scargle periodogram
"""
if minperiod < 0:
minperiod = 0.01
nbins = int((times.max() - times.min())/minperiod * 1000)
if nbins > nbinmax:
if verbose:
print('lowered nbins')
nbins = nbinmax
# Recenter the magnitude measurements about zero
dmags = mags - np.median(mags)
# Create frequency bins
f = np.linspace(1./maxperiod, 1./minperiod, nbins)
# Calculate periodogram
pgram = lombscargle(times, dmags, f)
idx = np.argmax(pgram)
# Return period of the bin with the max value in the periodogram
return 1./f[idx]
class PeriodDeviationMetric(BaseMetric):
"""Measure the percentage deviation of recovered periods for pure sine wave variability (in magnitude).
"""
def __init__(self, col='observationStartMJD', periodMin=3., periodMax=35., nPeriods=5,
meanMag=21., amplitude=1., metricName='Period Deviation', periodCheck=None,
**kwargs):
"""
Construct an instance of a PeriodDeviationMetric class
:param col: Name of the column to use for the observation times, commonly 'expMJD'
:param periodMin: Minimum period to test (days)
:param periodMax: Maximimum period to test (days)
:param periodCheck: Period to use in the reduce function (days)
:param meanMag: Mean value of the lightcurve
:param amplitude: Amplitude of the variation (mags)
"""
self.periodMin = periodMin
self.periodMax = periodMax
self.periodCheck = periodCheck
self.guessPMin = np.min([self.periodMin*0.8, self.periodMin-1])
self.guessPMax = np.max([self.periodMax*1.20, self.periodMax+1])
self.nPeriods = nPeriods
self.meanMag = meanMag
self.amplitude = amplitude
super(PeriodDeviationMetric, self).__init__(col, metricName=metricName, **kwargs)
def run(self, dataSlice, slicePoint=None):
"""
Run the PeriodDeviationMetric
:param dataSlice : Data for this slice.
:param slicePoint: Metadata for the slice. (optional)
:return: The error in the period estimated from a Lomb-Scargle periodogram
"""
# Make sure the observation times are sorted
data = np.sort(dataSlice[self.colname])
# Create 'nPeriods' random periods within range of min to max.
if self.periodCheck is not None:
periods = [self.periodCheck]
else:
periods = self.periodMin + np.random.random(self.nPeriods)*(self.periodMax - self.periodMin)
# Make sure the period we want to check is in there
periodsdev = np.zeros(np.size(periods), dtype='float')
for i, period in enumerate(periods):
omega = 1./period
# Calculate up the amplitude.
lc = self.meanMag + self.amplitude*np.sin(omega*data)
# Try to recover the period given a window buffered by min of a day or 20% of period value.
if len(lc) < 3:
# Too few points to find a period
return self.badval
pguess = find_period_LS(data, lc, minperiod=self.guessPMin, maxperiod=self.guessPMax)
periodsdev[i] = (pguess - period) / period
return {'periods': periods, 'periodsdev': periodsdev}
def reducePDev(self, metricVal):
"""
At a particular slicepoint, return the period deviation for self.periodCheck.
If self.periodCheck is None, just return a random period in the range.
"""
result = metricVal['periodsdev'][0]
return result
def reduceWorstPeriod(self, metricVal):
"""
At each slicepoint, return the period with the worst period deviation.
"""
worstP = np.array(metricVal['periods'])[np.where(metricVal['periodsdev'] == metricVal['periodsdev'].max())[0]]
return worstP
def reduceWorstPDev(self, metricVal):
"""
At each slicepoint, return the largest period deviation.
"""
worstPDev = np.array(metricVal['periodsdev'])[np.where(metricVal['periodsdev'] == metricVal['periodsdev'].max())[0]]
return worstPDev
| {
"alphanum_fraction": 0.655549054,
"author": null,
"avg_line_length": 41.016,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "382701575566928ea10b01b3e992d35b72e20a0b",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "eb7b1ee21c828523f8a5374fe4510fe6e5ec2a2a",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "RileyWClarke/flarubin",
"max_forks_repo_path": "rubin_sim/maf/mafContrib/varMetrics.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "eb7b1ee21c828523f8a5374fe4510fe6e5ec2a2a",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "RileyWClarke/flarubin",
"max_issues_repo_path": "rubin_sim/maf/mafContrib/varMetrics.py",
"max_line_length": 124,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "eb7b1ee21c828523f8a5374fe4510fe6e5ec2a2a",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "RileyWClarke/flarubin",
"max_stars_repo_path": "rubin_sim/maf/mafContrib/varMetrics.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1263,
"path": null,
"reason": "import numpy,from scipy",
"repo": null,
"save_path": null,
"sha": null,
"size": 5127
} |
import torch
import math
import numpy as np
from .random_fields import GaussianRF
from timeit import default_timer
import argparse
class KolmogorovFlow2d(object):
def __init__(self, w0, Re, n):
# Grid size
self.s = w0.size()[-1]
assert self.s == w0.size()[-2], "Grid must be uniform in both directions."
assert math.log2(self.s).is_integer(), "Grid size must be power of 2."
assert n >= 0 and isinstance(n, int), "Forcing number must be non-negative integer."
assert n < self.s // 2 - 1, "Forcing number too large for grid size."
# Forcing number
self.n = n
assert Re > 0, "Reynolds number must be positive."
# Reynolds number
self.Re = Re
# Device
self.device = w0.device
# Current time
self.time = 0.0
# Current vorticity in Fourier space
self.w_h = torch.fft.fft2(w0, norm="backward")
# Wavenumbers in y and x directions
self.k_y = torch.cat((torch.arange(start=0, end=self.s // 2, step=1, dtype=torch.float32, device=self.device), \
torch.arange(start=-self.s // 2, end=0, step=1, dtype=torch.float32, device=self.device)),
0).repeat(self.s, 1)
self.k_x = self.k_y.clone().transpose(0, 1)
# Negative inverse Laplacian in Fourier space
self.inv_lap = (self.k_x ** 2 + self.k_y ** 2)
self.inv_lap[0, 0] = 1.0
self.inv_lap = 1.0 / self.inv_lap
# Negative scaled Laplacian
self.G = (1.0 / self.Re) * (self.k_x ** 2 + self.k_y ** 2)
# Dealiasing mask using 2/3 rule
self.dealias = (self.k_x ** 2 + self.k_y ** 2 <= (self.s / 3.0) ** 2).float()
# Ensure mean zero
self.dealias[0, 0] = 0.0
# Get current vorticity from stream function (Fourier space)
def vorticity(self, stream_f=None, real_space=True):
if stream_f is not None:
w_h = self.Re * self.G * stream_f
else:
w_h = self.w_h
if real_space:
return torch.fft.irfft2(w_h, s=(self.s, self.s), norm="backward")
else:
return w_h
# Compute stream function from vorticity (Fourier space)
def stream_function(self, w_h=None, real_space=False):
if w_h is None:
psi_h = self.w_h.clone()
else:
psi_h = w_h.clone()
# Stream function in Fourier space: solve Poisson equation
psi_h = self.inv_lap * psi_h
if real_space:
return torch.fft.irfft2(psi_h, s=(self.s, self.s), norm="backward")
else:
return psi_h
# Compute velocity field from stream function (Fourier space)
def velocity_field(self, stream_f=None, real_space=True):
if stream_f is None:
stream_f = self.stream_function(real_space=False)
# Velocity field in x-direction = psi_y
q_h = stream_f * 1j * self.k_y
# Velocity field in y-direction = -psi_x
v_h = stream_f * -1j * self.k_x
if real_space:
q = torch.fft.irfft2(q_h, s=(self.s, self.s), norm="backward")
v = torch.fft.irfft2(v_h, s=(self.s, self.s), norm="backward")
return q, v
else:
return q_h, v_h
# Compute non-linear term + forcing from given vorticity (Fourier space)
def nonlinear_term(self, w_h):
# Physical space vorticity
w = torch.fft.ifft2(w_h, s=(self.s, self.s), norm="backward")
# Velocity field in physical space
q, v = self.velocity_field(self.stream_function(w_h, real_space=False), real_space=True)
# Compute non-linear term
t1 = torch.fft.fft2(q * w, s=(self.s, self.s), norm="backward")
t1 = self.k_x * t1
t2 = torch.fft.fft2(v * w, s=(self.s, self.s), norm="backward")
t2 = self.k_y * t2
nonlin = -1j * (t1 + t2)
# Apply forcing: -ncos(ny)
if self.n > 0:
nonlin[..., 0, self.n] -= (float(self.n) / 2.0) * (self.s ** 2)
nonlin[..., 0, -self.n] -= (float(self.n) / 2.0) * (self.s ** 2)
return nonlin
def advance(self, t, delta_t=1e-3):
# Final time
T = self.time + t
# Advance solution in Fourier space
while self.time < T:
if self.time + delta_t > T:
current_delta_t = T - self.time
else:
current_delta_t = delta_t
# Inner-step of Heun's method
nonlin1 = self.nonlinear_term(self.w_h)
w_h_tilde = (self.w_h + current_delta_t * (nonlin1 - 0.5 * self.G * self.w_h)) / (
1.0 + 0.5 * current_delta_t * self.G)
# Cranck-Nicholson + Heun update
nonlin2 = self.nonlinear_term(w_h_tilde)
self.w_h = (self.w_h + current_delta_t * (0.5 * (nonlin1 + nonlin2) - 0.5 * self.G * self.w_h)) / (
1.0 + 0.5 * current_delta_t * self.G)
# De-alias
self.w_h *= self.dealias
self.time += current_delta_t
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--part", type=int, default=0)
parser.add_argument("--re", type=float, default=40.0)
opt = parser.parse_args()
device = torch.device('cuda:0')
s = 1024
sub = 4
n = 4
Re = opt.re
T_in = 100.0
T = 100
t = 64
dt = 1.0 / t
GRF = GaussianRF(2, s, 2 * math.pi, alpha=2.5, tau=7, device=device)
u0 = GRF.sample(1)
NS = KolmogorovFlow2d(u0, Re, n)
NS.advance(T_in, delta_t=1e-3)
sol = np.zeros((T, t + 1, s // sub, s // sub))
sol_ini = NS.vorticity().squeeze(0).cpu().numpy()[::sub, ::sub]
for i in range(T):
sol[i, 0, :, :] = sol_ini
for j in range(t):
t1 = default_timer()
NS.advance(dt, delta_t=1e-3)
sol[i, j + 1, :, :] = NS.vorticity().squeeze(0).cpu().numpy()[::sub, ::sub]
t2 = default_timer()
print(i, t2 - t1)
sol_ini = sol[i, -1, :, :]
# np.save('NS_fine_Re500_S512_s64_T500_t128.npy', sol)
np.save('NS_fine_Re' + str(int(Re)) + '_T' + str(t) + '_part' + str(opt.part) + '.npy', sol)
| {
"alphanum_fraction": 0.5538141692,
"author": null,
"avg_line_length": 31.5808080808,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "b18a371f123e204c7255a986879978ee628cdb45",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 14,
"max_forks_repo_forks_event_max_datetime": "2022-03-30T08:34:33.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-11-16T05:17:02.000Z",
"max_forks_repo_head_hexsha": "95f830bdaafb2c03f7153df9e59e4832223a6108",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "argonne-lcf/PINO",
"max_forks_repo_path": "solver/kolmogorov_flow.py",
"max_issues_count": 3,
"max_issues_repo_head_hexsha": "95f830bdaafb2c03f7153df9e59e4832223a6108",
"max_issues_repo_issues_event_max_datetime": "2022-03-29T08:07:41.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-11-27T15:37:38.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "argonne-lcf/PINO",
"max_issues_repo_path": "solver/kolmogorov_flow.py",
"max_line_length": 120,
"max_stars_count": 36,
"max_stars_repo_head_hexsha": "95f830bdaafb2c03f7153df9e59e4832223a6108",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "argonne-lcf/PINO",
"max_stars_repo_path": "solver/kolmogorov_flow.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-31T15:22:15.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-11-09T09:02:13.000Z",
"num_tokens": 1827,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 6253
} |
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 16 11:02:22 2020
@author: sopmathieu
This file contains different functions to compute and analyze the autocorrelation
of time-series that may contain missing values.
These functions compute the autocorrelation and the autocovariance of a series
at a desired lag. They plot the autocorrelation and partial autocorrelation
functions of a series until a maximum lag. They also calculate the p-values
associated to the porte-manteau test at each lag.
Finally, a procedure to automatically select the block length
(of a BB method) is included in this file.
"""
import numpy as np
from statsmodels.tsa.arima_process import ArmaProcess
from statsmodels.tsa.stattools import pacf
from statsmodels.tsa.stattools import acf
import scipy.stats as stats
import matplotlib.pyplot as plt
from sklearn.utils import resample
from kneed import KneeLocator
def autocorrelation(Xi, k=1):
"""
Computes the autocorrelation at lag k, valid for missing values.
Parameters
----------
Xi : 1D array
A single time-series.
k : int, optional
The lag at which the autocorrelation should be computed.
The default is one.
Returns
-------
autoCorr : float
The autocorrelation at lag k.
"""
if k >= 1:
ts1 = Xi[:-k]; ts2 = Xi[k:]
else:
ts1 = Xi; ts2 = Xi
N = len(ts1)
Xs1 = np.nanmean(ts1); Xs2 = np.nanmean(ts2)
autoCov = 0; c=0
for i in np.arange(0, N):
if (not np.isnan(ts1[i]) and not np.isnan(ts2[i])):
autoCov += (ts1[i]-Xs1) * (ts2[i]-Xs2)
c += 1
autoCorr = ((1/c)*autoCov*(1/(np.nanstd(Xi[:-k])*np.nanstd(Xi[k:]))))
return autoCorr
def autocovariance(Xi, k=1):
"""
Computes the autocovariance at lag k, valid for missing values.
Parameters
----------
Xi : 1D array
A single time-series.
k : int, optional
The lag at which the autocovariance should be computed.
The default is one.
Returns
-------
autoCov : float
The autocovariance at lag k.
"""
if k >= 1:
ts1 = Xi[:-k]; ts2 = Xi[k:]
else:
ts1 = Xi; ts2 = Xi
N = len(ts1)
Xs1 = np.nanmean(ts1); Xs2 = np.nanmean(ts2)
autoCov = 0; c=0
for i in np.arange(0, N):
if (not np.isnan(ts1[i]) and not np.isnan(ts2[i])):
autoCov += (ts1[i]-Xs1) * (ts2[i]-Xs2)
c += 1
return (1/c)*autoCov
def autocorr(x, k=1):
"""
Computes the autocorrelation at lag k, valid for missing values
and faster than previous function.
Parameters
----------
x : 1D array
A single time-series.
k : int, optional
The lag at which the autocorrelation should be computed.
The default is one.
Returns
-------
autoCorr : float
The autocorrelation at lag k.
"""
if k >= 1:
ts1 = x[:-k]; ts2 = x[k:]
else:
ts1 = x; ts2 = x
a = np.ma.masked_invalid(ts1)
b = np.ma.masked_invalid(ts2)
msk = (~a.mask & ~b.mask)
autoCorr = (np.corrcoef([a[msk], b[msk]]))[0,1]
return autoCorr
def autocov(x, k=1):
"""
Computes the autocovariance at lag k, valid for missing values.
Parameters
----------
x : 1D array
A single time-series.
k : int, optional
The lag at which the autocovariance should be computed.
The default is one.
Returns
-------
autoCov : float
The autocovariance at lag k.
"""
if k >= 1:
ts1 = x[:-k]; ts2 = x[k:]
else:
ts1 = x; ts2 = x
a = np.ma.masked_invalid(ts1)
b = np.ma.masked_invalid(ts2)
msk = (~a.mask & ~b.mask)
autoCov = (np.cov([a[msk], b[msk]], ddof=0))[0][1] #otherwise different normalization
return autoCov
#================================================================
### plots
#===============================================================
def acf_pacf_plot(x, which_display=0, max_cov=50):
"""
Plots the autocorrelation function (acf) and the partial
autocorrelation function (pacf) for a time-series
with missing observations (except at lag=0).
Parameters
----------
x : 2D-array
A panel of time-series.
which_display : int, optional
The index of the series in the panel to be displayed (acf, pacf).
The default is zero.
max_cov : int>0, optional
The maximum lag until the autocovariance should
be computed. The defaults is 50.
Returns
-------
fig : a matplotlib figure
The figure with the acf and pacf.
"""
(row_x, column_x) = x.shape
corr_data = np.zeros((column_x, max_cov + 1))
p_ljung = np.zeros((column_x, max_cov))
partcorr_data = np.zeros((column_x, max_cov + 1))
for i in range(column_x):
if np.count_nonzero(~np.isnan(x[:,i])) > max_cov+1:
intm = acf(x[:,i], nlags=max_cov, missing='drop',alpha=0.05, qstat='True')
corr_data[i,:] = intm[0]
p_ljung[i,:] = intm[3]
x_wht_nan = x[:,i]
intm = pacf(x_wht_nan[~np.isnan(x_wht_nan)], max_cov, alpha=0.05)
partcorr_data[i,:] = intm[0]
display = x[:,which_display] #which series to display
display = display[~np.isnan(display)] #remove nans
ci = np.ones(max_cov) *stats.norm.ppf((1 + 0.95)/2)/np.sqrt(len(display))
plt.rcParams['figure.figsize'] = (10.0, 10.0)
fig = plt.figure()
f1 = plt.subplot(2,1,1)
plt.stem(np.arange(max_cov)+1, corr_data[which_display,1:], basefmt='k', use_line_collection=True)
plt.plot(np.arange(max_cov)+1, ci,'k:')
plt.plot(np.arange(max_cov)+1, -ci,'k:')
plt.fill_between(np.arange(max_cov)+1, -ci, ci,color='b', alpha=0.2)
plt.title("Autocorrelation function in series %s" %which_display)
#plt.axis([-2, max_cov+2, -0.2, 1.05])
f1.set_xlim([-2, max_cov+2])
f2 = plt.subplot(2,1,2)
plt.stem(np.arange(max_cov)+1, partcorr_data[which_display,1:], basefmt='k',use_line_collection=True)
plt.plot(np.arange(max_cov)+1, ci,'b:')
plt.plot(np.arange(max_cov)+1, -ci,'b:')
plt.fill_between(np.arange(max_cov)+1, -ci, ci,color='b', alpha=0.2)
plt.title("Partial-Autocorrelation function in series %s" %which_display)
#plt.axis([-2, max_cov+2, -0.2, 1.05])
f2.set_xlim([-2, max_cov+2])
plt.show()
return fig
def acf_pacf_residuals(res, max_cov=50):
"""
Plots the autocorrelation function (acf), the partial
autocorrelation function (pacf) and the p-values of the
lung-box test for residuals of ARMA models.
Parameters
----------
res : 1D-array
The residuals of an ARMA model.
max_cov : int>0, optional
The maximum lag until the autocovariance should
be computed. The defaults is 50.
Returns
-------
fig : a matplotlib figure
The figure with the acf, pacf and the p-values.
"""
n = len(res)
acf_res = acf(res, nlags=max_cov, qstat='True')[0]
p_value_res = acf(res, nlags=max_cov, qstat='True')[2]
pacf_res = pacf(res, nlags=max_cov)
fig = plt.figure()
ci = np.ones(max_cov) * stats.norm.ppf((1 + 0.95)/2)/np.sqrt(n)
plt.subplot(3,1,1)
plt.stem(np.arange(max_cov)+1, acf_res[1:], basefmt='k')
plt.plot(np.arange(max_cov)+1, ci,'k:')
plt.plot(np.arange(max_cov)+1, -ci,'k:')
plt.fill_between(np.arange(max_cov), -ci, ci,color='b', alpha=0.2)
plt.title("Acf of residuals")
plt.axis([-2, max_cov+2, -0.1, 1.2])
plt.subplot(3,1,2)
plt.stem(np.arange(max_cov)+1, pacf_res[1:], basefmt='k')
plt.plot(np.arange(max_cov)+1, ci,'b:')
plt.plot(np.arange(max_cov)+1, -ci,'b:')
plt.fill_between(np.arange(max_cov), -ci, ci,color='b', alpha=0.2)
plt.title("Pacf of residuals")
plt.axis([-2, max_cov+2, -0.1, 1.2])
plt.subplot(3,1,3)
plt.plot(np.arange(max_cov)+1, p_value_res,'o')
plt.plot(np.arange(max_cov)+1, np.ones(max_cov)*0.05,'b:')
plt.title("p-values of the Ljung-box chi squared stats")
plt.axis([-2, max_cov+2, -0.1, 1.2])
plt.show()
return fig
#================================================================
### choice of the block length
#===============================================================
def block_length_choice(data, bbl_min=10, bbl_max=110, bbl_step=10,
n_corr=50, nmc=200, BB_method='MBB', plot=True):
"""
Computes an appropriate value of the block length for a panel of
time-series with missing values.
The algorithm works as follows.
For each block length tested over the specified range, this function resamples
several series of observations using a block bootstrap procedure.
Then, it computes the mean squared error (MSE) of the mean,
standard deviation and autocorrelation at different lags
of the resampled series (with respect to the original data).
Small block lengths represent the variance and the mean of the data properly
(mse of the mean and the variance increases when block length augments).
Whereas large block lengths better account for the autocorrelation of the data
(mse of the autocorrelation diminishes when block length increases).
The appropriate value for the block length is finally selected as the first
value such that the mse of the autocorrelation stabilizes
("knee" of the curve).
This value intuitively corresponds to the smallest length which is
able to represent the main part of the autocorrelation of the series.
Parameters
----------
data : 2D-array
A matrix representing a panel of time-series to be resampled by
a block boostrap procedure. (rows: time, columns: series)
To save computational time, only the IC series may be used.
bbl_min : int, optional
Lower value for the block length. The block lengths are tested in
the range [bbl_min, bbl_max]. Default is 10.
bbl_max : int, optional
Upper value for the block length. The block lengths are tested in
the range [bbl_min, bbl_max]. Default is 110.
bbl_step : int, optional
Step value for the block length. The block lengths are tested in
the range [bbl_min, bbl_max], with step equal to bbl_step.
Default is 10.
n_corr : int, optional
Maximal lag up to which the autocorrelation is evaluated.
The default is 50.
nmc : int > 0, optional
Number of resampled series used to compute the MSEs.
BB_method : str, optional
String that designates the block boostrap method chosen for sampling data.
Values for the string should be selected among:
'MBB': moving block bootstrap
'NBB': non-overlapping block bootstrap
If the matched block bootstrap is intended to be use, 'NBB' may be
selected (the matched block bootstrap is based on the 'NBB').
'CBB': circular block bootstrap
Default is 'MBB'.
plot : bool, optional
Flag to show the figures (and print some results). The default is True.
Returns
-------
block-length : int
The selected block length.
"""
assert np.ndim(data) == 2, "Input data must be a 2D array"
(n_obs, n_series) = data.shape
assert BB_method in ['MBB', 'NBB', 'CBB'], "Undefined block bootstrap procedure"
#compute the autocorrelation of the initial data
corr_data = np.zeros((n_series, n_corr))
for j in range(n_series):
for i in range(n_corr):
corr_data[j,i] = autocorr(data[:,j],i+1)
### parameters
n_bbl = int(np.ceil((bbl_max - bbl_min)/bbl_step)) #number of block length sizes tested
mse_mean_series = np.zeros((n_bbl, n_series)); mse_std_series = np.zeros((n_bbl, n_series))
mse_corr_lag = np.zeros((n_corr, n_series)) ; mse_corr_series = np.zeros((n_bbl, n_series))
bbl = np.zeros(n_bbl)
c = 0
for block_length in range(bbl_min, bbl_max, bbl_step):
### Create blocks (moving block bootstrap)
bbl[c] = block_length
n_blocks = int(np.ceil(n_obs/block_length))
if BB_method == 'MBB':
N = n_obs - block_length + 1
blocks = np.zeros((N, block_length, n_series))
for j in range(n_series):
for i in range(N):
blocks[i,:,j] = data[i:i+block_length, j] #series by series
elif BB_method == 'NBB':
N = int(np.floor(n_obs / block_length))
blocks = np.zeros((N, block_length, n_series))
for j in range(n_series):
cc = 0
it = 0
for i in range(0, N):
blocks[cc,:,j] = data[it:it+block_length,j] #non-overlapping
it += block_length
cc += 1
elif BB_method == 'CBB':
N = n_obs
blocks = np.zeros((N, block_length, n_series))
for j in range(n_series):
cc = 0
data_dup = np.concatenate((data[:,j], data[:,j]))
for i in range(0, N):
blocks[cc,:,j] = data_dup[i:i+block_length] #overlapping
cc += 1
for j in range(n_series):
corr_boot = np.zeros((n_corr, nmc)); mean_boot = np.zeros((nmc)); std_boot = np.zeros((nmc))
corr_boot[:] = np.nan; mean_boot[:] = np.nan; std_boot[:] = np.nan
for b in range(nmc):
boot = resample(blocks[:,:,j], replace=True, n_samples=n_blocks).flatten()
#boot = boot[~np.isnan(boot)]
for i in range(n_corr):
corr_boot[i,b] = autocorr(boot, i+1)
mean_boot[b] = np.nanmean(boot)
std_boot[b] = np.nanstd(boot)
### results per station
mse_mean_series[c,j] = (np.nanmean(mean_boot) - np.nanmean(data[:,j]))**2 + np.nanvar(mean_boot)
mse_std_series[c,j] = (np.nanmean(std_boot) - np.nanstd(data[:,j]))**2 + np.nanvar(std_boot)
for i in range(n_corr):
mse_corr_lag[i,j] = (np.nanmean(corr_boot[i,:]) - corr_data[j,i])**2 + np.nanvar(corr_boot[i,:])
mse_corr_series[c,j] = np.nanmean(mse_corr_lag, axis=0)[j]
c += 1
#for all stations
mse_mean = np.nanmean(mse_mean_series, axis=1)
mse_std = np.nanmean(mse_std_series, axis=1)
mse_corr = np.nanmean(mse_corr_series, axis=1)
x = bbl
y = mse_corr
#select the knee of the curve
coef = np.polyfit(x, y, deg=1)
coef_curve = np.polyfit(x, y, deg=2)
if coef_curve[0] < 0:
curve = 'concave'
else:
curve = 'convex'
if coef[0] < 0: #slope is positive
direction = 'decreasing'
else: #slope is negative
direction = 'increasing'
kn = KneeLocator(x, y, curve=curve, direction=direction)
block_length = kn.knee
if plot:
plt.rcParams['figure.figsize'] = (10.0, 6.0)
plt.rcParams['font.size'] = 14
plt.plot(x, y, marker='o'); plt.xlabel('block length')
plt.ylabel('mse of autocorrelation')
plt.title('MSE of the autocorrelation as function of the block length')
if block_length is not None:
plt.axvline(x=block_length, color='orange', linestyle='--', label='selected value \n (knee)')
plt.legend()
plt.show()
print('Block length which minimizes the mse of the mean:', x[np.argmin(mse_mean)])#0
print('Block length which minimizes the mse of the std:', x[np.argmin(mse_std)])#0
print('Block length which minimizes the mse of the autocorrelation:', x[np.argmin(mse_corr)]) #100
return block_length
#############################################################################"
""" Tests """
if __name__ == "__main__":
x = [-2.1, -1, 4.3]
k = 1
inte = (x[k:]-np.nanmean(x[k:])) * (x[:-k]-np.nanmean(x[:-k]))
cov_x = np.nanmean(inte)
corr_x = np.nanmean(inte) * (1/(np.nanstd(x[k:]) * np.nanstd(x[:-k])))
corr_test1_x = autocorrelation(x)
corr_test2_x = autocorr(x)
cov_test1_x = autocovariance(x)
cov_test2_x = autocov(x)
y = 20 * np.random.randn(1000) + 100
k = 1
inte = (y[k:]-np.nanmean(y[k:])) * (y[:-k]-np.nanmean(y[:-k]))
cov_y = np.nanmean(inte)
corr_y = np.nanmean(inte) * (1/(np.nanstd(y[k:]) * np.nanstd(y[:-k])))
corr_test1_y = autocorrelation(y)
corr_test2_y = autocorr(y)
cov_test1_y = autocovariance(y)
cov_test2_y = autocov(y)
##### autoregressive models
phi1 = 0.9
ar1 = np.array([1, phi1])
ma1 = np.array([1])
AR_object1 = ArmaProcess(ar1, ma1)
simulated_data_1 = AR_object1.generate_sample(nsample=1000)
ar2 = np.array([1, -0.9])#inverse sign
ma2 = np.array([1])
AR_object2 = ArmaProcess(ar2, ma2)
simulated_data_2 = AR_object2.generate_sample(nsample=1000)
n_cov = 40
corr_ar1=np.zeros((n_cov)) ; corr_ar2=np.zeros((n_cov))
cov_ar1=np.zeros((n_cov)) ; cov_ar2=np.zeros((n_cov))
cov_test1=np.zeros((n_cov)) ; cov_test2=np.zeros((n_cov))
corr_test1=np.zeros((n_cov)) ; corr_test2=np.zeros((n_cov))
for i in range(n_cov):
corr_ar1[i] = autocorr(simulated_data_1,i+1)
corr_ar2[i] = autocorr(simulated_data_2,i+1)
cov_ar1[i] = autocov(simulated_data_1,i+1)
cov_ar2[i] = autocov(simulated_data_2,i+1)
corr_test1[i] = autocorrelation(simulated_data_1, i+1)
corr_test2[i] = autocorrelation(simulated_data_2, i+1)
cov_test1[i] = autocovariance(simulated_data_1, i+1)
cov_test2[i] = autocovariance(simulated_data_2, i+1)
simulated_data_1 = simulated_data_1.reshape(-1,1)
plot_ar1 = acf_pacf_plot(simulated_data_1)
simulated_data_2 = simulated_data_2.reshape(-1,1)
plot_ar2 = acf_pacf_plot(simulated_data_2) | {
"alphanum_fraction": 0.5879042832,
"author": null,
"avg_line_length": 36.1739130435,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "3641711d513a536cb75a18a0ace06de75948b751",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "7bba8a216b02a7c5607b4b5127c245c74d8f8514",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "sophiano/cusvm",
"max_forks_repo_path": "cusvm/autocorrelations.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "7bba8a216b02a7c5607b4b5127c245c74d8f8514",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "sophiano/cusvm",
"max_issues_repo_path": "cusvm/autocorrelations.py",
"max_line_length": 112,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "7bba8a216b02a7c5607b4b5127c245c74d8f8514",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "sophiano/cusvm",
"max_stars_repo_path": "cusvm/autocorrelations.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 5151,
"path": null,
"reason": "import numpy,import scipy,from statsmodels",
"repo": null,
"save_path": null,
"sha": null,
"size": 18304
} |
import numpy as np
def pad_images_similar(img1, img2):
list_of_images = [img1, img2]
padded_list = []
a = 0
b = 0
for image in list_of_images:
x, y = image.shape
if x > a:
a = x
if y > b:
b = y
for image in list_of_images:
nuc_a, nuc_b = image.shape
if b > a:
limit_a = int(round(b/2)) - int(round(nuc_a/2))
limit_b = int(round((b - nuc_b)/2))
padded = np.zeros([b, b])
padded[limit_a:nuc_a + limit_a,
limit_b:nuc_b + limit_b] = image
else:
limit_a = int(round((a - nuc_a)/2))
limit_b = int(round(a/2)) - int(round(nuc_b/2))
padded = np.zeros([a, a])
padded[limit_a:nuc_a + limit_a,
limit_b:nuc_b + limit_b] = image
padded_list.append(padded)
return padded_list
| {
"alphanum_fraction": 0.5,
"author": null,
"avg_line_length": 26,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "094b5533ab9510b7bb69dc2709c0409f85abca45",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "2d206bd5cc71191c00d6cc2b60868f6fdce33828",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "ndsystems/deepthought",
"max_forks_repo_path": "deepthought/utils.py",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "2d206bd5cc71191c00d6cc2b60868f6fdce33828",
"max_issues_repo_issues_event_max_datetime": "2021-06-18T06:58:12.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-06-18T06:58:12.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "ndsystems/deepthought",
"max_issues_repo_path": "deepthought/utils.py",
"max_line_length": 59,
"max_stars_count": 3,
"max_stars_repo_head_hexsha": "2d206bd5cc71191c00d6cc2b60868f6fdce33828",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "ndsystems/deepthought",
"max_stars_repo_path": "deepthought/utils.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-04T15:34:09.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-06-15T07:02:30.000Z",
"num_tokens": 262,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 910
} |
# make_experiments.py
# Author: Noam Buckman
# Description: Generate Simulation Settings
# This Notebook generates and saves multiple experiment savings for
# analyzing the effects of different parameters.
# The settings can be varied by % human (non-communicating) vehicles and SVO type.
# Output: Multiple Pickle (.p) files that save the Vehicle settings.
# Note: Make sure to change the path and results_directory to match your machine
import sys, os, pickle
import numpy as np
sys.path.append('/Users/noambuckman/git-repos/traffic_simulations/src/')
import TrafficSimulator as ts
results_directory = '/Users/noambuckman/git-repos/traffic_simulations/results/'
if not os.path.exists(results_directory):
os.makedirs(results_directory)
def assign_human(sim_car_lists, p_human):
sim_car_lists_with_humans = []
for list_of_cars in sim_car_lists:
#Choose Human Type
list_of_car_with_human_specs = []
for (arrival_time, agentID, human_type, svo_theta, turn_direction, start_lane) in list_of_cars:
coin_flip = np.random.random()
if coin_flip < p_human:
new_human_type = "Human"
else:
new_human_type = "AV"
list_of_car_with_human_specs += [(arrival_time, agentID, new_human_type, svo_theta, turn_direction, start_lane)]
sim_car_lists_with_humans += [list_of_car_with_human_specs]
return sim_car_lists_with_humans
def assign_svo(sim_car_lists, svo_theta_list):
sim_car_lists_with_svo = []
for list_of_cars in sim_car_lists:
#Choose Human Type
list_of_car_with_svo_specs = []
for (arrival_time, agentID, human_type, svo_theta, turn_direction, start_lane) in list_of_cars:
new_svo_theta = np.random.choice(svo_theta_list)
list_of_car_with_svo_specs += [(arrival_time, agentID, human_type, new_svo_theta, turn_direction, start_lane)]
sim_car_lists_with_svo += [list_of_car_with_svo_specs]
return sim_car_lists_with_svo
#### 1. Generate (and save) the arrival times for all vehicles¶
# This will be fixed for all experiments since we don't want to vary the traffic conditions,
# just the vehicle-specific settings (communicating, SVO)
number_sims = 25
all_cars_arrivals_turns = [ts.generate_simulation_cars(16, 0.33, 0.33, 0.1) for s in range(number_sims)]
pickle.dump(all_cars_arrivals_turns,open(results_directory+'all_cars_arrivals_turns.p','wb'))
#### 2. Vary the percentage of humans
all_cars_h0 = assign_human(all_cars_arrivals_turns, 0.0)
all_cars_h25 = assign_human(all_cars_arrivals_turns, 0.25)
all_cars_h50 = assign_human(all_cars_arrivals_turns, 0.50)
all_cars_h75 = assign_human(all_cars_arrivals_turns, 0.50)
all_cars_h100 = assign_human(all_cars_arrivals_turns, 1.00)
pickle.dump(all_cars_h0,open(results_directory+'all_cars_h0.p','wb'))
pickle.dump(all_cars_h25,open(results_directory+'all_cars_h25.p','wb'))
pickle.dump(all_cars_h50,open(results_directory+'all_cars_h50.p','wb'))
pickle.dump(all_cars_h75,open(results_directory+'all_cars_h75.p','wb'))
pickle.dump(all_cars_h100,open(results_directory+'all_cars_h100.p','wb'))
##### 3. Vary the distribution of SVOs of the vehicles
all_cars_h0_all_ego = assign_svo(all_cars_h0, [0.0])
all_cars_h0_all_pro = assign_svo(all_cars_h0, [np.pi/4.0])
all_cars_h0_mixed3 = assign_svo(all_cars_h0, [np.pi/4.0, np.pi/6.0, 0])
all_cars_h0_mixed4 = assign_svo(all_cars_h0, [np.pi/4.0, np.pi/6.0, np.pi/12.0, 0])
pickle.dump(all_cars_h0_all_ego,open(results_directory+'all_cars_h0_ego.p','wb'))
pickle.dump(all_cars_h0_all_pro,open(results_directory+'all_cars_h0_all_pro.p','wb'))
pickle.dump(all_cars_h0_mixed3,open(results_directory+'all_cars_h0_mixed3.p','wb'))
pickle.dump(all_cars_h0_mixed4,open(results_directory+'all_cars_h0_mixed4.p','wb'))
all_cars_h25_all_ego = assign_svo(all_cars_h25, [0.0])
all_cars_h25_all_pro = assign_svo(all_cars_h25, [np.pi/4.0])
all_cars_h25_mixed3 = assign_svo(all_cars_h25, [np.pi/4.0, np.pi/6.0, 0])
all_cars_h25_mixed4 = assign_svo(all_cars_h25, [np.pi/4.0, np.pi/6.0, np.pi/12.0, 0])
pickle.dump(all_cars_h25_all_ego,open(results_directory+'all_cars_h25_ego.p','wb'))
pickle.dump(all_cars_h25_all_pro,open(results_directory+'all_cars_h25_all_pro.p','wb'))
pickle.dump(all_cars_h25_mixed3,open(results_directory+'all_cars_h25_mixed3.p','wb'))
pickle.dump(all_cars_h25_mixed4,open(results_directory+'all_cars_h25_mixed4.p','wb'))
all_cars_h50_all_ego = assign_svo(all_cars_h50, [0.0])
all_cars_h50_all_pro = assign_svo(all_cars_h50, [np.pi/4.0])
all_cars_h50_mixed3 = assign_svo(all_cars_h50, [np.pi/4.0, np.pi/6.0, 0])
all_cars_h50_mixed4 = assign_svo(all_cars_h50, [np.pi/4.0, np.pi/6.0, np.pi/12.0, 0])
pickle.dump(all_cars_h50_all_ego,open(results_directory+'all_cars_h50_ego.p','wb'))
pickle.dump(all_cars_h50_all_pro,open(results_directory+'all_cars_h50_all_pro.p','wb'))
pickle.dump(all_cars_h50_mixed3,open(results_directory+'all_cars_h50_mixed3.p','wb'))
pickle.dump(all_cars_h50_mixed4,open(results_directory+'all_cars_h50_mixed4.p','wb'))
all_cars_h75_all_ego = assign_svo(all_cars_h75, [0.0])
all_cars_h75_all_pro = assign_svo(all_cars_h75, [np.pi/4.0])
all_cars_h75_mixed3 = assign_svo(all_cars_h75, [np.pi/4.0, np.pi/6.0, 0])
all_cars_h75_mixed4 = assign_svo(all_cars_h75, [np.pi/4.0, np.pi/6.0, np.pi/12.0, 0])
pickle.dump(all_cars_h75_all_ego,open(results_directory+'all_cars_h75_ego.p','wb'))
pickle.dump(all_cars_h75_all_pro,open(results_directory+'all_cars_h75_all_pro.p','wb'))
pickle.dump(all_cars_h75_mixed3,open(results_directory+'all_cars_h75_mixed3.p','wb'))
pickle.dump(all_cars_h75_mixed4,open(results_directory+'all_cars_h75_mixed4.p','wb'))
all_cars_h100_all_ego = assign_svo(all_cars_h100, [0.0])
all_cars_h100_all_pro = assign_svo(all_cars_h100, [np.pi/4.0])
all_cars_h100_mixed3 = assign_svo(all_cars_h100, [np.pi/4.0, np.pi/6.0, 0])
all_cars_h100_mixed4 = assign_svo(all_cars_h100, [np.pi/4.0, np.pi/6.0, np.pi/12.0, 0])
pickle.dump(all_cars_h100_all_ego,open(results_directory+'all_cars_h100_ego.p','wb'))
pickle.dump(all_cars_h100_all_pro,open(results_directory+'all_cars_h100_all_pro.p','wb'))
pickle.dump(all_cars_h100_mixed3,open(results_directory+'all_cars_h100_mixed3.p','wb'))
pickle.dump(all_cars_h100_mixed4,open(results_directory+'all_cars_h100_mixed4.p','wb'))
| {
"alphanum_fraction": 0.765578635,
"author": null,
"avg_line_length": 48.5075757576,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "2e70046045b9553066d6adeb18cde0273c8558d0",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2022-02-07T08:17:14.000Z",
"max_forks_repo_forks_event_min_datetime": "2022-02-07T08:17:14.000Z",
"max_forks_repo_head_hexsha": "d5d557d014ab6b4f6f2483e5f5087f39553f50fe",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "noambuckman/svo-intersection",
"max_forks_repo_path": "scripts/make_experiments.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "d5d557d014ab6b4f6f2483e5f5087f39553f50fe",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "noambuckman/svo-intersection",
"max_issues_repo_path": "scripts/make_experiments.py",
"max_line_length": 124,
"max_stars_count": 3,
"max_stars_repo_head_hexsha": "d5d557d014ab6b4f6f2483e5f5087f39553f50fe",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "noambuckman/svo-intersection",
"max_stars_repo_path": "scripts/make_experiments.py",
"max_stars_repo_stars_event_max_datetime": "2021-10-20T06:15:48.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-07-30T16:42:25.000Z",
"num_tokens": 1999,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 6403
} |
"""
External indices
"""
import numpy as np
from sklearn.metrics.cluster.supervised import check_clusterings
from sklearn.utils.linear_assignment_ import linear_assignment
from sklearn.metrics import accuracy_score
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import connected_components
def _contingency_matrix(y_true, y_pred):
w = np.zeros((y_true.max() + 1, y_pred.max() + 1), dtype=np.int64)
for c, k in zip(y_true, y_pred):
w[c, k] += 1 # w[c, k] = number of c-labeled samples in map cell k
return w
def class_scatter_index(dist_fun, y_true, y_pred):
"""Class scatter index (CSI).
Parameters
----------
dist_fun : function (k : int, l : int) => int
distance function between units k and l on the map.
y_true : array, shape = [n]
true labels.
y_pred : array, shape = [n]
predicted cluster ids.
Returns
-------
csi : float (lower is better)
References
----------
Elend, L., & Kramer, O. (2019). Self-Organizing Maps with Convolutional Layers.
"""
y_true = y_true.astype(np.int64)
y_pred = y_pred.astype(np.int64)
check_clusterings(y_true, y_pred)
n_classes = y_true.max() + 1
n_units = y_pred.max() + 1
w = _contingency_matrix(y_true, y_pred)
groups = np.zeros(n_classes, dtype=np.int64)
for c in range(n_classes):
connectivity = csr_matrix([[1 if dist_fun(k, l) == 1 else 0
for l in range(n_units) if w[c, l] > 0]
for k in range(n_units) if w[c, k] > 0])
groups[c] = connected_components(csgraph=connectivity, directed=False, return_labels=False)
return np.mean(groups)
def clustering_accuracy(y_true, y_pred):
"""Unsupervised clustering accuracy.
Can only be used if the number of target classes in y_true is equal to the number of clusters in y_pred.
Parameters
----------
y_true : array, shape = [n]
true labels.
y_pred : array, shape = [n]
predicted cluster ids.
Returns
-------
accuracy : float in [0,1] (higher is better)
"""
y_true = y_true.astype(np.int64)
y_pred = y_pred.astype(np.int64)
check_clusterings(y_true, y_pred)
w = _contingency_matrix(y_true, y_pred).T
ind = linear_assignment(w.max() - w)
return np.sum([w[i, j] for i, j in ind]) / y_true.size
def entropy(y_true, y_pred):
"""SOM class distribution entropy measure.
Parameters
----------
y_true : array, shape = [n]
true labels.
y_pred : array, shape = [n]
predicted cluster ids.
Returns
-------
entropy : float (lower is better)
References
----------
Elend, L., & Kramer, O. (2019). Self-Organizing Maps with Convolutional Layers.
"""
y_true = y_true.astype(np.int64)
y_pred = y_pred.astype(np.int64)
check_clusterings(y_true, y_pred)
w = _contingency_matrix(y_true, y_pred)
freqs = np.divide(w.max(axis=0) + 1e-12, w.sum(axis=0) + 1e-12) # relative frequencies of majority class
return np.sum(-np.log(freqs))
def normalized_minor_class_occurrence(y_true, y_pred):
"""Normalized minor class occurrence (NMCO).
Ratio of samples that do not belong to the majority ground-truth label in their cluster. Is equivalent
to 1 - purity.
Parameters
----------
y_true : array, shape = [n]
true labels.
y_pred : array, shape = [n]
predicted cluster ids.
Returns
-------
nmco : float in [0,1] (lower is better)
References
----------
Elend, L., & Kramer, O. (2019). Self-Organizing Maps with Convolutional Layers.
"""
return 1.0 - purity(y_true, y_pred)
def purity(y_true, y_pred):
"""Clustering purity.
Parameters
----------
y_true : array, shape = [n]
true labels.
y_pred : array, shape = [n]
predicted cluster ids.
Returns
-------
purity : float in [0,1] (higher is better)
"""
y_true = y_true.astype(np.int64)
y_pred = y_pred.astype(np.int64)
check_clusterings(y_true, y_pred)
w = _contingency_matrix(y_true, y_pred)
label_mapping = w.argmax(axis=0)
y_pred_voted = np.array([label_mapping[y] for y in y_pred])
return accuracy_score(y_true, y_pred_voted)
| {
"alphanum_fraction": 0.6245939675,
"author": null,
"avg_line_length": 28.7333333333,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "3a0840d58aea546b29529869f4abbeab439af444",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 5,
"max_forks_repo_forks_event_max_datetime": "2022-02-24T02:35:29.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-06-26T09:44:37.000Z",
"max_forks_repo_head_hexsha": "f747ffff9b3b291a2f9b6bff184755b0f9c89c70",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "FlorentF9/SOMperf",
"max_forks_repo_path": "somperf/metrics/external.py",
"max_issues_count": 3,
"max_issues_repo_head_hexsha": "f747ffff9b3b291a2f9b6bff184755b0f9c89c70",
"max_issues_repo_issues_event_max_datetime": "2022-02-21T22:09:49.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-07-15T19:57:31.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "FlorentF9/SOMperf",
"max_issues_repo_path": "somperf/metrics/external.py",
"max_line_length": 109,
"max_stars_count": 14,
"max_stars_repo_head_hexsha": "f747ffff9b3b291a2f9b6bff184755b0f9c89c70",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "FlorentF9/SOMperf",
"max_stars_repo_path": "somperf/metrics/external.py",
"max_stars_repo_stars_event_max_datetime": "2021-12-07T18:21:56.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-11-02T20:03:01.000Z",
"num_tokens": 1138,
"path": null,
"reason": "import numpy,from scipy",
"repo": null,
"save_path": null,
"sha": null,
"size": 4310
} |
# -*- mode: python; coding: utf-8 -*-
# Copyright (c) 2018 Radio Astronomy Software Group
# Licensed under the 2-clause BSD License
"""Tests for telescope objects and functions.
"""
import os
from astropy.coordinates import EarthLocation
import numpy as np
import pytest
import pyuvdata
from pyuvdata.data import DATA_PATH
from pyuvdata import UVData
required_parameters = ["_telescope_name", "_telescope_location"]
required_properties = ["telescope_name", "telescope_location"]
extra_parameters = [
"_antenna_diameters",
"_Nants_telescope",
"_antenna_names",
"_antenna_numbers",
"_antenna_positions",
]
extra_properties = [
"antenna_diameters",
"Nants_telescope",
"antenna_names",
"antenna_numbers",
"antenna_positions",
]
other_attributes = [
"citation",
"telescope_location_lat_lon_alt",
"telescope_location_lat_lon_alt_degrees",
"pyuvdata_version_str",
]
astropy_sites = EarthLocation.get_site_names()
while "" in astropy_sites:
astropy_sites.remove("")
expected_known_telescopes = astropy_sites + ["PAPER", "HERA", "SMA"]
# Tests for Telescope object
def test_parameter_iter():
"Test expected parameters."
telescope_obj = pyuvdata.Telescope()
all_params = []
for prop in telescope_obj:
all_params.append(prop)
for a in required_parameters:
assert a in all_params, (
"expected attribute " + a + " not returned in object iterator"
)
def test_required_parameter_iter():
"Test expected required parameters."
telescope_obj = pyuvdata.Telescope()
required = []
for prop in telescope_obj.required():
required.append(prop)
for a in required_parameters:
assert a in required, (
"expected attribute " + a + " not returned in required iterator"
)
def test_extra_parameter_iter():
"Test expected optional parameters."
telescope_obj = pyuvdata.Telescope()
extra = []
for prop in telescope_obj.extra():
extra.append(prop)
for a in extra_parameters:
a in extra, "expected attribute " + a + " not returned in extra iterator"
def test_unexpected_parameters():
"Test for extra parameters."
telescope_obj = pyuvdata.Telescope()
expected_parameters = required_parameters + extra_parameters
attributes = [i for i in list(telescope_obj.__dict__.keys()) if i[0] == "_"]
for a in attributes:
assert a in expected_parameters, (
"unexpected parameter " + a + " found in Telescope"
)
def test_unexpected_attributes():
"Test for extra attributes."
telescope_obj = pyuvdata.Telescope()
expected_attributes = required_properties + other_attributes
attributes = [i for i in list(telescope_obj.__dict__.keys()) if i[0] != "_"]
for a in attributes:
assert a in expected_attributes, (
"unexpected attribute " + a + " found in Telescope"
)
def test_properties():
"Test that properties can be get and set properly."
telescope_obj = pyuvdata.Telescope()
prop_dict = dict(list(zip(required_properties, required_parameters)))
for k, v in prop_dict.items():
rand_num = np.random.rand()
setattr(telescope_obj, k, rand_num)
this_param = getattr(telescope_obj, v)
try:
assert rand_num == this_param.value
except (AssertionError):
print("setting {prop_name} to a random number failed".format(prop_name=k))
raise
def test_known_telescopes():
"""Test known_telescopes function returns expected results."""
assert sorted(pyuvdata.known_telescopes()) == sorted(expected_known_telescopes)
def test_get_telescope():
for inst in pyuvdata.known_telescopes():
telescope_obj = pyuvdata.get_telescope(inst)
assert telescope_obj.telescope_name == inst
def test_get_telescope_center_xyz():
ref_xyz = (-2562123.42683, 5094215.40141, -2848728.58869)
ref_latlonalt = (-26.7 * np.pi / 180.0, 116.7 * np.pi / 180.0, 377.8)
test_telescope_dict = {
"test": {
"center_xyz": ref_xyz,
"latitude": None,
"longitude": None,
"altitude": None,
"citation": "",
},
"test2": {
"center_xyz": ref_xyz,
"latitude": ref_latlonalt[0],
"longitude": ref_latlonalt[1],
"altitude": ref_latlonalt[2],
"citation": "",
},
}
telescope_obj = pyuvdata.get_telescope(
"test", telescope_dict_in=test_telescope_dict
)
telescope_obj_ext = pyuvdata.Telescope()
telescope_obj_ext.citation = ""
telescope_obj_ext.telescope_name = "test"
telescope_obj_ext.telescope_location = ref_xyz
assert telescope_obj == telescope_obj_ext
telescope_obj_ext.telescope_name = "test2"
telescope_obj2 = pyuvdata.get_telescope(
"test2", telescope_dict_in=test_telescope_dict
)
assert telescope_obj2 == telescope_obj_ext
def test_get_telescope_no_loc():
test_telescope_dict = {
"test": {
"center_xyz": None,
"latitude": None,
"longitude": None,
"altitude": None,
"citation": "",
}
}
pytest.raises(
ValueError,
pyuvdata.get_telescope,
"test",
telescope_dict_in=test_telescope_dict,
)
def test_hera_loc():
hera_file = os.path.join(DATA_PATH, "zen.2458098.45361.HH.uvh5_downselected")
hera_data = UVData()
hera_data.read(hera_file, read_data=False, file_type="uvh5")
telescope_obj = pyuvdata.get_telescope("HERA")
assert np.allclose(
telescope_obj.telescope_location,
hera_data.telescope_location,
rtol=hera_data._telescope_location.tols[0],
atol=hera_data._telescope_location.tols[1],
)
| {
"alphanum_fraction": 0.6629559856,
"author": null,
"avg_line_length": 29.6395939086,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "bc2654a4c348d836db6d4d1f85ab2dae665fe8e8",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "90537f78230d3d34f5db4d39a9f2a18373435437",
"max_forks_repo_licenses": [
"BSD-2-Clause"
],
"max_forks_repo_name": "no-lex/pyuvdata",
"max_forks_repo_path": "pyuvdata/tests/test_telescopes.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "90537f78230d3d34f5db4d39a9f2a18373435437",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-2-Clause"
],
"max_issues_repo_name": "no-lex/pyuvdata",
"max_issues_repo_path": "pyuvdata/tests/test_telescopes.py",
"max_line_length": 86,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "90537f78230d3d34f5db4d39a9f2a18373435437",
"max_stars_repo_licenses": [
"BSD-2-Clause"
],
"max_stars_repo_name": "no-lex/pyuvdata",
"max_stars_repo_path": "pyuvdata/tests/test_telescopes.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1373,
"path": null,
"reason": "import numpy,from astropy",
"repo": null,
"save_path": null,
"sha": null,
"size": 5839
} |
module Ch05.VarArg
import Data.Vect
--------------------------------------------------------------------------------
-- Auxillary stuff for defining functions with a variable # of args
--------------------------------------------------------------------------------
||| Type of vectors of fixed length of elements of varying types
data VarVect : (n : Nat) -> Vect n Type -> Type where
VarNil : VarVect Z []
VarCons : (a : Type) ->
(x : a) ->
VarVect n as ->
VarVect (S n) (a :: as)
VarArgType : (numArgs : Nat) ->
Vect numArgs Type ->
Type
VarArgType Z [] = ?VarArgType_rhs_1
VarArgType (S k) (a :: as) = a -> VarArgType k as
-- Suppose we want to define a function
--
-- f : a_1 -> a_2 -> ... -> a_n -> b
--
-- for some [a_1, ..., a_n] : Vect n Type. Then there are two ways to do that:
--
-- One: Starting from the knowledge of
--
-- \x1, ... x(n-1) => f x1 ... x(n-1) xn
--
-- for arbitrary but fixed `xn`, we define `f`.
| {
"alphanum_fraction": 0.4707050645,
"author": null,
"avg_line_length": 27.9722222222,
"converted": null,
"ext": "idr",
"file": null,
"hexsha": "de6cff6672d0d1dd30180793ae863f6f412ed632",
"include": null,
"lang": "Idris",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 3,
"max_forks_repo_forks_event_max_datetime": "2022-03-06T07:08:45.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-03-13T04:56:05.000Z",
"max_forks_repo_head_hexsha": "3934bfe42b93f84c1bf35b7b34cf30b3a7fd7399",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "mr-infty/tapl",
"max_forks_repo_path": "Ch05/VarArg.idr",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "3934bfe42b93f84c1bf35b7b34cf30b3a7fd7399",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "mr-infty/tapl",
"max_issues_repo_path": "Ch05/VarArg.idr",
"max_line_length": 80,
"max_stars_count": 6,
"max_stars_repo_head_hexsha": "3934bfe42b93f84c1bf35b7b34cf30b3a7fd7399",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "mr-infty/tapl",
"max_stars_repo_path": "Ch05/VarArg.idr",
"max_stars_repo_stars_event_max_datetime": "2022-03-15T11:38:28.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-02-27T13:52:57.000Z",
"num_tokens": 264,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 1007
} |
from __future__ import division
from numpy import *
from interpolation.cartesian import mlinspace
d = 2 # number of dimension
Ng = 1000 # number of points on the grid
K = int(Ng**(1/d)) # nb of points in each dimension
N = 10000 # nb of points to evaluate
a = array([0.0]*d, dtype=float)
b = array([1.0]*d, dtype=float)
orders = array([K]*d, dtype=int)
grid = mlinspace(a,b,orders)
# single valued function to interpolate
f = lambda vec: sqrt(vec.sum(axis=1))
# df
# # vector valued function
# g
# single valued function to interpolate
vals = f(grid)
print(vals)
mvals = concatenate([vals[:,None],vals[:,None]],axis=1)
print(mvals.shape)
# one single point
point = array([0.5, 0.5])
# many points
points = row_stack([[0.5, 0.5]]*N)
def test_cubic_spline():
from interpolation.splines.filter_cubic import filter_coeffs
from interpolation.splines.eval_cubic import eval_cubic_spline, vec_eval_cubic_spline
cc = filter_coeffs(a,b,orders,vals)
assert(tuple(cc.shape)==tuple([o+2 for o in orders]))
ii = eval_cubic_spline(a, b, orders, cc, point)
iii = vec_eval_cubic_spline(a, b, orders, cc, points)
assert(isinstance(ii,float))
assert(iii.ndim==1)
def test_cubic_multi_spline():
from interpolation.splines.filter_cubic import filter_mcoeffs
from interpolation.splines.eval_cubic import eval_cubic_splines, vec_eval_cubic_splines
cc = filter_mcoeffs(a,b,orders,mvals)
assert(tuple(cc.shape) == tuple([o+2 for o in orders]+[mvals.shape[1]]))
ii = eval_cubic_splines(a, b, orders, cc, point)
iii = vec_eval_cubic_splines(a, b, orders, cc, points)
assert(ii.ndim==1)
assert(iii.ndim==2)
def test_cubic_spline_object():
from interpolation.splines import CubicSpline
cs = CubicSpline(a,b,orders,vals)
ii = cs(point)
iii = cs(points)
assert(ii.ndim==0)
assert(isscalar(ii))
assert(iii.ndim==1)
assert(tuple(iii.shape)==(N,))
def test_cubic_multi_spline_object():
from interpolation.splines import CubicSplines
cs = CubicSplines(a,b,orders,mvals)
ii = cs(point)
iii = cs(points)
n_splines = mvals.shape[1]
assert(ii.ndim==1)
assert(tuple(ii.shape)==(n_splines,))
assert(iii.ndim==2)
assert(tuple(iii.shape)==(N,n_splines))
if __name__ == '__main__':
test_cubic_spline()
test_cubic_multi_spline()
test_cubic_spline_object()
test_cubic_multi_spline_object()
| {
"alphanum_fraction": 0.6888708367,
"author": null,
"avg_line_length": 24.137254902,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "b268b5a428aea729192391f13f50ab8d9dda9acb",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "25520556804dd104c5931c8a6bedfff65420025f",
"max_forks_repo_licenses": [
"BSD-2-Clause"
],
"max_forks_repo_name": "gboehl/interpolation.py",
"max_forks_repo_path": "interpolation/splines/tests/test_cubic_splines.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "25520556804dd104c5931c8a6bedfff65420025f",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-2-Clause"
],
"max_issues_repo_name": "gboehl/interpolation.py",
"max_issues_repo_path": "interpolation/splines/tests/test_cubic_splines.py",
"max_line_length": 91,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "25520556804dd104c5931c8a6bedfff65420025f",
"max_stars_repo_licenses": [
"BSD-2-Clause"
],
"max_stars_repo_name": "gboehl/interpolation.py",
"max_stars_repo_path": "interpolation/splines/tests/test_cubic_splines.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 710,
"path": null,
"reason": "from numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2462
} |
#this is a python script that calls a c++ excutable
import time
import numpy as np
import subprocess
import os
def generate_embedding(norb,nimp,u,afm):
'''
Generate the 1-body embedding Hamiltonian.
'''
tmpdir = "/home/sunchong/work/finiteTMPS/tests/call/dump/"
impsite = np.arange(nimp)+1
np.savetxt(tmpdir+"/impsite.txt",impsite.T,fmt='%d')
# solve full system
nelec = norb
nemb = 2*nimp
h1e = np.zeros((2, norb, norb))
for i in range(norb):
h1e[0][i,(i+1)%norb] = -1.
h1e[0][i,(i-1)%norb] = -1.
h1e[1][i,(i+1)%norb] = -1.
h1e[1][i,(i-1)%norb] = -1.
if(i%2==0):
h1e[0][i,i] = afm
h1e[1][i,i] = -afm
else:
h1e[1][i,i] = afm
h1e[0][i,i] = -afm
ewa, eva = np.linalg.eigh(h1e[0])
ewb, evb = np.linalg.eigh(h1e[1])
dm1a = np.einsum('ij,kj-> ik', eva[:,:nelec/2],eva[:,:nelec/2].conj())
dm1b = np.einsum('ij,kj-> ik', evb[:,:nelec/2],evb[:,:nelec/2].conj())
# construct bath
Ra = np.zeros((norb,nimp*2))
Rb = np.zeros((norb,nimp*2))
Ra[:nimp,:nimp] = np.eye(nimp)
Rb[:nimp,:nimp] = np.eye(nimp)
_,_,ba = np.linalg.svd(dm1a[:nimp,nimp:],full_matrices=False)
_,_,bb = np.linalg.svd(dm1b[:nimp,nimp:],full_matrices=False)
Ra[nimp:,nimp:] = ba.conj().T
Rb[nimp:,nimp:] = bb.conj().T
# construct 1-body embedding Hamiltonian
h1emb = np.zeros((2, 2*nimp,2*nimp))
h1emb[0] = np.dot(Ra.conj().T, np.dot(h1e[0], Ra))
h1emb[1] = np.dot(Rb.conj().T, np.dot(h1e[1], Rb))
h1emb = h1emb.reshape(nemb*2,nemb)
np.savetxt(tmpdir+"/hamfile.txt",h1emb)
np.savetxt(tmpdir+"/ehamfile.txt",h1emb)
#construct 2-body embedding Hamiltonian
g2emb = np.einsum('ki,im,li,in -> kmln', Ra.T.conj(),Ra,Rb.T.conj(),Rb)*u
np.savetxt(tmpdir+"/evfile.txt", g2emb.reshape(nemb**3,nemb))
#g2e = np.zeros((nemb,)*4)
#for i in range(nimp):
# g2e[i,i,i,i] = u
#g2e[1,1,2,2] = 2.
#g2e[2,2,1,1] = 2.
#g2emb = g2e.copy()
#g2e = g2e.reshape(nemb**3,nemb)
#np.savetxt(tmpdir+"/vfile.txt",g2e)
return (h1emb[:nemb], h1emb[nemb:]), (g2emb*0, g2emb,g2emb*0)
#############################################
##Hubbard Hamiltonian
#nemb = 2*nimp
#h1a = np.zeros((nemb,nemb))
#for i in range(nemb):
# h1a[i,(i+1)%nemb] = -1.
# h1a[i,(i-1)%nemb] = -1.
#h1 = np.array([h1a,h1a])
#np.savetxt(tmpdir+"/hamfile.txt",h1.reshape(2*nemb,nemb))
#return (h1a,h1a)
#############################################
def call_ftmps(norb, nimp, u, mu, beta, tau, maxm=2000, \
tmpdir='./', mpsdir='../../'):
'''
Solve embedding problem with impsolver_ancilla
'''
tmpdir = "/home/sunchong/work/finiteTMPS/tests/call/dump/"
#time_str = repr(time.time())[6:]
hamfile = tmpdir + "/hamfile.txt"
ehamfile = tmpdir + "/ehamfile.txt"
vfile = tmpdir + "/vfile.txt"
evfile = tmpdir + "/evfile.txt"
infile = tmpdir + "/input_mps"
impsite = tmpdir + "/impsite.txt"
# write input file
fin = open(infile, "w")
fin.write("input\n{\n")
fin.write("hamfile = %s\n"%hamfile)
fin.write("ehamfile = %s\n"%ehamfile)
fin.write("vfile = %s\n"%vfile)
fin.write("evfile = %s\n"%evfile)
fin.write("outdir = %s\n"%(tmpdir))
fin.write("impsite = %s\n"%(impsite))
fin.write("N = %d\n"%norb)
fin.write("Nimp = %d\n"%nimp)
fin.write("U = %f\n"%u)
fin.write("mu = %f\n"%mu)
fin.write("beta = %f\n"%beta)
fin.write("tau = %f\n"%tau)
fin.write("maxm = %d\n"%maxm)
fin.write("cutoff = 1E-9\n")
fin.write("realstep = yes\n")
fin.write("verbose = no\n")
fin.write("fitmpo = yes\n")
fin.write("rungekutta = yes\n}")
fin.close()
# write 1 body hamiltonian
#h1e_n = h1e.reshape(2*norb, norb)
#np.savetxt(hamfile,h1e_n)
#np.savetxt(impsite, (np.arange(nimp)+1).T, fmt="%d")
# call ancilla code
subprocess.call([mpsdir + "impsolver_ancilla_ibath", infile])
rdm1 = np.loadtxt(tmpdir+"/rdm1s.txt")
e = np.loadtxt(tmpdir+"/energy.txt")
# remove temperary files
os.remove(infile)
os.remove(hamfile)
os.remove(impsite)
os.remove(tmpdir+"/energy.txt")
os.remove(tmpdir+"/rdm1s.txt")
os.remove(tmpdir+"/rdm2.txt")
return e, rdm1
def printmat(m):
nx, ny = m.shape
for i in range(nx):
print ' '.join(map(str, m[i]))
if __name__ == "__main__":
from pyscf.ftsolver import ed_grandmu as ftfci
tmpdir = "/home/sunchong/work/finiteTMPS/tests/call/dump/"
norb = 12
nimp = 2
nemb = 2*nimp
u = 4.
mu = 0. #u/2
beta = 0.4
T = 1./beta
tau = 0.05
afm = 0.01
h1e, g2e = generate_embedding(norb,nimp,u,afm)
#g2e = np.zeros((nemb,)*4)
#for i in range(nimp):
# g2e[i,i,i,i] = u
#g2e = (g2e*0,g2e,g2e*0)
e, rdm1 = call_ftmps(nemb, nimp, u, mu, beta, tau)
rdm1fci,_,efci = ftfci.rdm12s_fted(h1e,g2e,nemb,nemb,T,mu,symm="UHF")
rdm1fci = rdm1fci.reshape(2*nemb,nemb)
print e, efci
print np.linalg.norm(rdm1-rdm1fci)
| {
"alphanum_fraction": 0.5548698168,
"author": null,
"avg_line_length": 28.6464088398,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "837f59c23342b5bb7e7496caf11e120c72fe4fda",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "33e96bb5bc4913ee7d57d93f61e319df6dea5d36",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "sunchong137/finiteMPS_sc",
"max_forks_repo_path": "tests/call/call_ancilla.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "33e96bb5bc4913ee7d57d93f61e319df6dea5d36",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "sunchong137/finiteMPS_sc",
"max_issues_repo_path": "tests/call/call_ancilla.py",
"max_line_length": 77,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "33e96bb5bc4913ee7d57d93f61e319df6dea5d36",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "sunchong137/finiteMPS_sc",
"max_stars_repo_path": "tests/call/call_ancilla.py",
"max_stars_repo_stars_event_max_datetime": "2020-03-03T21:54:46.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-03-03T21:54:46.000Z",
"num_tokens": 1963,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 5185
} |
"""
Toolbox for statistical methods. For all functions, this toolbox assumes that
the first dimension is the temporal sampling dimension.
Author: Andre Perkins
"""
import numpy as np
import numexpr as ne
import dask.array as da
import logging
from scipy.linalg import svd
from scipy.ndimage import convolve1d
from sklearn import linear_model
logger = logging.getLogger(__name__)
def detrend_data(data, output_arr=None):
"""
Detrend data using a linear fit.
Parameters
----------
data: ndarray-like
Input dataset to detrend. Assumes leading axis is sampling dimension.
output_arr: ndarray-like, optional
Output array with same shape as data to store detrended data.
"""
dummy_time = np.arange(data.shape[0])[:, None]
model = linear_model.LinearRegression(fit_intercept=True, n_jobs=-1)
model.fit(dummy_time, data)
linfit = model.predict(dummy_time)
detrended = data - linfit
if output_arr is not None:
output_arr[:] = detrended
else:
output_arr = detrended
return output_arr
def dask_detrend_data(data, output_arr):
"""
Detrend data using a linear fit.
Parameters
----------
data: dask.array
Input dataset to detrend. Assumes leading axis is sampling dimension.
output_arr: ndarray-like
Output array with same shape as data to store detrended data.
Notes
-----
This is a very expensive operation if using a large dataset. May slow down
if forced to spill onto the disk cache It does not currently take into
account X data. Instead, it creates a dummy array (using arange) for
sampling points.
"""
dummy_time = np.arange(data.shape[0])[:, None]
dummy_time = da.from_array(dummy_time, chunks=dummy_time.shape)
# intercept handling
x_offset = dummy_time.mean(axis=0)
x_centered = dummy_time - x_offset
y_offset = data.mean(axis=0)
y_centered = data - y_offset
coefs, resid, rank, s = da.linalg.lstsq(x_centered, y_centered)
intercepts = y_offset - x_offset*coefs
predict = da.dot(dummy_time, coefs) + intercepts
detrended = data - predict
da.store(detrended, output_arr)
return output_arr
def calc_anomaly(data, yrsize, climo=None, output_arr=None):
"""
Caculate anomaly for the given data. Right now it assumes sub-annual data
input so that the climatology subtracts means for each month instead
of the mean of the entire series.
Note: May take yrsize argument out and leave it to user to format data
as to take the desired anomaly.
Parameters
----------
data: ndarray
Input data to calculate the anomaly from. Leading dimension should be
the temporal axis.
yrsize: int
Number of elements that compose a full year. Used to reshape the data
time axis to num years x size year for climatology purposes.
climo: ndarray, optional
User-provided climatology to subtract from the data. Must be
broadcastable over the time-dimension of data
output_arr: ndarray-like, optional
Array to place output of anomaly calculation that supports
ndarray-like slicing. This is required for dask array input.
Returns
-------
anomaly: ndarray-like
Data converted to its anomaly form.
climo: ndarray
The calculated climatology that was subtracted from the data
"""
yrsize = int(yrsize)
if not yrsize >= 1:
raise ValueError('yrsize must be an integer >= 1')
# Reshape to take monthly mean
old_shp = data.shape
new_shp = (old_shp[0]//yrsize, yrsize, old_shp[1])
data = data.reshape(new_shp)
# Use of data[:] should work for ndarray or ndarray-like
if climo is None:
climo = data.mean(axis=0, keepdims=True)
if is_dask_array(data):
if output_arr is None:
raise ValueError('calc_anomaly requires an output array keyword '
'argument when operating on a Dask array.')
anomaly = data - climo
old_shp_anom = anomaly.reshape(old_shp)
da.store(old_shp_anom, output_arr)
out_climo = climo.compute()
else:
if output_arr is not None:
output_arr[:] = np.squeeze(ne.evaluate('data - climo'))
else:
output_arr = np.squeeze(ne.evaluate('data - climo'))
output_arr = output_arr.reshape(old_shp)
out_climo = climo
return output_arr, out_climo
# def calc_ce(fcast, trial_obs, obs):
# """
# Method to calculate the Coefficient of Efficiency as defined by Nash and
# Sutcliffe 1970.
#
# Parameters
# ----------
# fcast: ndarray
# Time series of forecast data. M x N where M is the temporal dimension.
# obs: ndarray
# Time series of observations. M x N
#
# Returns
# -------
# CE: ndarray
# Coefficient of efficiency for all locations over the time range.
# """
#
# assert(fcast.shape == trial_obs.shape)
#
# # Climatological variance
# cvar = obs.var(axis=0, ddof=1)
#
# # Error variance
# error = ne.evaluate('(trial_obs - fcast)**2')
# evar = error.sum(axis=0)/(len(error))
#
# return 1 - evar/cvar
def calc_eofs(data, num_eigs, ret_pcs=False, var_stats_dict=None):
"""
Method to calculate the EOFs of given dataset. This assumes data comes in as
an m x n matrix where m is the temporal dimension and n is the spatial
dimension.
Parameters
----------
data: ndarray
Dataset to calculate EOFs from
num_eigs: int
Number of eigenvalues/vectors to return. Must be less than min(m, n).
ret_pcs: bool, optional
Return principal component matrix along with EOFs
var_stats_dict: dict, optional
Dictionary target to star some simple statistics about the EOF
calculation. Note: if this is provided for a dask array it prompts two
SVD calculations for both the compressed and full singular values.
Returns
-------
eofs: ndarray
The eofs (as column vectors) of the data with dimensions n x k where
k is the num_eigs.
svals: ndarray
Singular values from the svd decomposition. Returned as a row vector
in order from largest to smallest.
"""
if is_dask_array(data):
pcs, full_svals, eofs = da.linalg.svd_compressed(data, num_eigs)
var = da.var(data, axis=0)
out_svals = np.zeros(num_eigs)
out_eofs = np.zeros((num_eigs, data.shape[1]))
out_pcs = np.zeros((data.shape[0], num_eigs))
out_var = np.zeros((data.shape[1]))
da.store([eofs, full_svals, pcs, var],
[out_eofs, out_svals, out_pcs, out_var])
out_eofs = out_eofs.T
out_pcs = out_pcs.T
else:
eofs, full_svals, pcs = svd(data[:].T, full_matrices=False)
out_eofs = eofs[:, :num_eigs]
out_svals = full_svals[:num_eigs]
out_pcs = pcs[:num_eigs]
out_var = data[:].var(ddof=1, axis=0)
# variance stats
if var_stats_dict is not None:
try:
nt = data.shape[0]
ns = data.shape[1]
eig_vals = (out_svals ** 2) / nt
total_var = out_var.sum()
var_expl_by_mode = eig_vals / total_var
var_expl_by_retained = var_expl_by_mode[0:num_eigs].sum()
var_stats_dict['nt'] = nt
var_stats_dict['ns'] = ns
var_stats_dict['eigvals'] = eig_vals
var_stats_dict['num_ret_modes'] = num_eigs
var_stats_dict['total_var'] = total_var
var_stats_dict['var_expl_by_mode'] = var_expl_by_mode
var_stats_dict['var_expl_by_ret'] = var_expl_by_retained
except TypeError as e:
print('Must past dictionary type to var_stats_dict in order to ' \
'output variance statistics.')
print(e)
if ret_pcs:
return out_eofs, out_svals, out_pcs
else:
return out_eofs, out_svals
def calc_lac(fcast, obs):
"""
Method to calculate the Local Anomaly Correlation (LAC). Uses numexpr
for speed over larger datasets.
Note: If necessary (memory concerns) in the future, the numexpr statements
can be extended to use pytable arrays. Would need to provide means to
function, as summing over the dataset is still very slow it seems.
Parameters
----------
fcast: ndarray
Time series of forecast data. M x N where M is the temporal dimension.
obs: ndarray
Time series of observations. M x N
Returns
-------
lac: ndarray
Local anomaly corellations for all locations over the time range.
"""
# Calculate means of data
f_mean = fcast.mean(axis=0)
o_mean = obs.mean(axis=0)
f_anom = fcast - f_mean
o_anom = obs - o_mean
# Calculate covariance between time series at each gridpoint
cov = (f_anom * o_anom).sum(axis=0)
# Calculate standardization terms
f_std = (f_anom**2).sum(axis=0)
o_std = (o_anom**2).sum(axis=0)
if is_dask_array(f_std):
f_std = da.sqrt(f_std)
else:
f_std = np.sqrt(f_std)
if is_dask_array(o_std):
o_std = da.sqrt(o_std)
else:
o_std = np.sqrt(o_std)
std = f_std * o_std
lac = cov / std
return lac
def calc_mse(fcast, obs):
sq_err = (obs - fcast)**2
mse = sq_err.mean(axis=0)
return mse
def calc_ce(fcast, obs):
sq_err = (obs - fcast)**2
obs_mean = obs.mean(axis=0)
obs_var = (obs - obs_mean)**2
ce = 1 - (sq_err.sum(axis=0) / obs_var.sum(axis=0))
return ce
def calc_n_eff(data1, data2=None):
"""
Calculate the effective degrees of freedom for data using lag-1
autocorrelation.
Parameters
----------
data1: ndarray
Dataset to calculate effective degrees of freedom for. Assumes
first dimension is the temporal dimension.
data2: ndarray, optional
A second dataset to calculate the effective degrees of freedom
for covariances/correlations etc.
Returns
-------
n_eff: ndarray
Effective degrees of freedom for input data.
"""
if data2 is not None:
assert data1.shape == data2.shape,\
'Data must have have same shape for combined n_eff calculation'
# Lag-1 autocorrelation
r1 = calc_lac(data1[0:-1], data1[1:])
n = len(data1)
if data2 is not None:
r2 = calc_lac(data2[0:-1], data2[1:])
n_eff = n*((1 - r1*r2)/(1+r1*r2))
else:
n_eff = n*((1-r1)/(1+r1))
return n_eff
def run_mean(data, window_size, trim_edge=None, output_arr=None):
"""
A function for calculating the running mean on data.
Parameters
----------
data: ndarray
Data matrix to perform running mean over. Expected to be in time(row) x
space(column) format. And that samples span full years.
window_size: int
Size of the window to compute the running mean over.
trim_edge: int, optional
Remove specified items from the start and end of the sampling
dimension of the running mean. Otherwise the window_size/2 items at
the start and the end will have reflected padding effects.
output_arr: ndarray-like, optional
Array to place output of running mean that supports
ndarray-like slicing. This is required for dask array input.
Returns
-------
result: ndarray
Running mean result of given data.
bot_edge: int
Number of elements removed from beginning of the time series
top_edge: int
Number of elements removed from the ending of the time series
"""
sample_len = data.shape[0]
if sample_len < window_size:
raise ValueError("Window size must be smaller than or equal to the "
"length of the time dimension of the data.")
if trim_edge is not None:
sample_len -= trim_edge*2
if sample_len < 1:
raise ValueError('Not enough data to trim edges. Please try with '
'trim_edge=None')
weights = [1.0/float(window_size) for _ in range(window_size)]
if is_dask_array(data):
if output_arr is None:
raise ValueError('calc_anomaly requires an output array keyword '
'argument when operating on a Dask array.')
def _run_mean_block(block):
return convolve1d(block, weights, axis=0)
old_chunk_shape = data
pad = window_size // 2
ghost = da.ghost.ghost(data, depth={0: pad}, boundary={0: 'reflect'})
filt = ghost.map_blocks(_run_mean_block)
unpadded = da.ghost.trim_internal(filt, {0: pad})
if trim_edge is not None:
unpadded = unpadded[trim_edge:-trim_edge]
da.store(unpadded, output_arr)
else:
res = convolve1d(data, weights, axis=0)
if trim_edge:
res = res[trim_edge:-trim_edge]
if output_arr is not None:
output_arr[:] = res
else:
output_arr = res
return output_arr
def is_dask_array(arr):
return hasattr(arr, 'dask')
| {
"alphanum_fraction": 0.6367831745,
"author": null,
"avg_line_length": 30.5972222222,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "ec31632a324dd9ddfa5bec955bcb1ce5c43953a4",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 9,
"max_forks_repo_forks_event_max_datetime": "2021-09-21T18:28:47.000Z",
"max_forks_repo_forks_event_min_datetime": "2018-01-15T16:44:09.000Z",
"max_forks_repo_head_hexsha": "62511284889e8f29f02c324d6760dd9751c798ec",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "brianmapes/pylim",
"max_forks_repo_path": "pylim/Stats.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "62511284889e8f29f02c324d6760dd9751c798ec",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "brianmapes/pylim",
"max_issues_repo_path": "pylim/Stats.py",
"max_line_length": 82,
"max_stars_count": 13,
"max_stars_repo_head_hexsha": "62511284889e8f29f02c324d6760dd9751c798ec",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "brianmapes/pylim",
"max_stars_repo_path": "pylim/Stats.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-29T20:48:32.000Z",
"max_stars_repo_stars_event_min_datetime": "2018-01-29T22:14:34.000Z",
"num_tokens": 3293,
"path": null,
"reason": "import numpy,from scipy,import numexpr",
"repo": null,
"save_path": null,
"sha": null,
"size": 13218
} |
# Optimality conditions
Now we will move to studying constrained optimizaton problems i.e., the full problem
$$
\begin{align} \
\min \quad &f(x)\\
\text{s.t.} \quad & g_j(x) \geq 0\text{ for all }j=1,\ldots,J\\
& h_k(x) = 0\text{ for all }k=1,\ldots,K\\
&x\in \mathbb R^n.
\end{align}
$$
In order to identify which points are optimal, we want to define similar conditions as there are for unconstrained problems through the gradient:
>If $x$ is a local optimum to function $f$, then $\nabla f(x)=0$.
## Feasible descent directions
Let $S\subset \mathbb R^n$ ($S\neq \emptyset$ closed) and $x^*\in S$.
**Definition:** The set
$$ D = \{d\in \mathbb R^n: d\neq0,x^*+\alpha d\in S \text{ for all } \alpha\in (0,\delta) \text{ for some } \delta>0\}$$
is called the cone of feasible directions of $S$ in $x^*$.
**Definition:** The set
$$ F = \{d\in \mathbb R^n: f(x^*+\alpha d)<f(x^*)\text{ for all } \alpha\in (0,\delta) \text{ for some } \delta>0\}$$
is called the cone of descent directions.
**Definition:** The set $F\cap D$ is called the cone of feasible descent directions.
**(Obvious) Theorem:** Consider an optimization problem
$$
\begin{align}
\min &\ f(x)\\
\text{s.t. }&\ x\in S
\end{align}
$$
and let $x^*\in S$. Now if $x^*$ is a local minimizer **then** the set of feasible descent directions $F\cap D$ is empty.
Since, if $\nabla f(x)d<0$, **then** $d$ is a descent direction, the following theorem follows easily.
**Theorem:** Consider an optimization problem
$$
\begin{align}
\min &\ f(x)\\
\text{s.t. }&\ x\in S
\end{align}
$$
and let $x^*\in S$. Now, if $x^*$ is a local minimizer, then $\{d\in B(0,1):\nabla f(x^*)d<0 \}\cap D$ is empty.
## KKT conditions
Unfortunately, the set $D$ is not easily explicitly modelled. Thus, we need to develop methods for explicitly defining the set $D$ or even better the set $\{d\in B(0,1):\nabla f(x^*)d<0 \}\cap D$. This is done through the KKT conditions:
**Theorem (Kuhn-Tucker Necessary Conditions)** Let $x^**$ be a local minimum for problem
$$
$$
\begin{align} \
\min \quad &f(x)\\
\text{s.t.} \quad & g_j(x) \geq 0\text{ for all }j=1,\ldots,J\\
& h_k(x) = 0\text{ for all }k=1,\ldots,K\\
&x\in \mathbb R^n.
\end{align}
$$
$$
and assume that $x^*$ is regular. Then there exists unique Lagrance multiplier vectors $\lambda^* = (\lambda^*_1,\ldots,\lambda_J^*)$ and $\mu^*=(\mu_1^*,\ldots,\mu_K^*)$ such that
$$
\begin{align}
&\nabla_xL(x,\lambda,\mu) = 0\\
&\mu_j^*\geq0,\text{ for all }j=1,\ldots,J\\
&\mu_j^*=0,\text{for all }j\in A(x^*),
\end{align}
$$
where $$L(x,\lambda,\mu) = f(x)+\sum_{j=1}^J\lambda_jh_j(x) + \sum_{k=1}^K\mu_kg_k(x)$$ and $A(x^*)$ is the set of active constraints at $x^*$. If in addition $f$, $h$ and $g$ are twice continuously differentiable, it holds that
$$
yH_{x}L(x^*,\lambda^*,\mu^*)y\geq0, \text{ for all }y\in V(x^*),
$$
where
$$
V(x^*) = \{y:\nabla h_j(x^*)'y=0, \text{ for all }j=1,\ldots,J, \text{ and }\nabla g_k(x^*)'y=0, \text{ for all }j\in A(x^*).
$$
**Example (page 285, Bertsekas: Nonlinear Programming)** Consider the optimizaiton problem
$$
\begin{align}
\min &\qquad \frac12 (x_1^2+x^2_2+x^2_3)\\
\text{s.t}&\qquad x_1+x_2+x_3\geq 0.
\end{align}
$$
Let us verify the Kuhn-Tucker necessary conditions for the local optimum $x^*=(-1,-1,-1)$.
```python
def f(x):
return 0.5*sum([i**2 for i in x])
def g(x):
return 3-sum(x)
def h(x):
return 0*sum(x)
```
```python
import numpy as np
import ad
def grad_x_L(x,lambda_,mu,f,g,h):
return ad.gh(f)[0](x)+lambda_*np.array(ad.gh(h)[0](x))+mu*np.array(ad.gh(g)[0](x))
```
```python
import ad
mu = 1
lambda_ = 10 #Does not play a role. Think why?
x_opt = [-1,-1,-1]
print grad_x_L(x_opt,lambda_,mu,f,g,h)
print g(x_opt)
```
[-2. -2. -2.]
6
| {
"alphanum_fraction": 0.4991351975,
"author": null,
"avg_line_length": 27.3149606299,
"converted": true,
"ext": "ipynb",
"file": null,
"hexsha": "18065e73ab59d14961d9dfefa999f69c56e891ee",
"include": null,
"lang": "Jupyter Notebook",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "2fb072d8e45d203080d40ca383de14257a1fcbd2",
"max_forks_repo_licenses": [
"CC-BY-3.0"
],
"max_forks_repo_name": "AwelEshetu/edxCourses",
"max_forks_repo_path": "Lecture 6, optimality conditions.ipynb",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "2fb072d8e45d203080d40ca383de14257a1fcbd2",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"CC-BY-3.0"
],
"max_issues_repo_name": "AwelEshetu/edxCourses",
"max_issues_repo_path": "Lecture 6, optimality conditions.ipynb",
"max_line_length": 252,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "2fb072d8e45d203080d40ca383de14257a1fcbd2",
"max_stars_repo_licenses": [
"CC-BY-3.0"
],
"max_stars_repo_name": "AwelEshetu/edxCourses",
"max_stars_repo_path": "Lecture 6, optimality conditions.ipynb",
"max_stars_repo_stars_event_max_datetime": "2017-02-21T12:58:16.000Z",
"max_stars_repo_stars_event_min_datetime": "2016-07-18T12:49:18.000Z",
"num_tokens": 1421,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 6938
} |
"""
This module allows detect spots in fluorescence microscopy images. The algorithms
are translations of those published in Aguet et al. Dev. Cell 2013. Refer to that publication
and corresponding code for detailed information.
"""
# Author: Guillaume Witz, Biozentrum Basel, 2019
# License: MIT License
import numpy as np
import matplotlib.pyplot as plt
import scipy.fftpack
import scipy
import pandas as pd
from skimage.feature import peak_local_max
from scipy.ndimage.filters import convolve
from scipy.signal import fftconvolve
from scipy.optimize import least_squares
from . import tools_GW as tools
def spot_filter_convfft(image, templ, logfilt, alpha = 0.05, loc_max_dist = 4):
"""Simulate double-adder model
Parameters
----------
image : 2 or 3D numpy array
image to analyse
templ: 2 or 3D numpy array
template image for filtering
logfilt: 2 or 3D numpy array
LoG version of the filter
alpha: float
significance threshold
loc_max_dist: float
minimal distance between spots
Returns
-------
spot_result : dict
dictionary with spot detection information (amplitude, background, filtered image etc.)
"""
if len(image.shape) == 2:
dim = 2
else:
dim = 3
border = ((np.array(logfilt.shape)-1)/2).astype(int)
if dim==3:
image = np.pad(image,((border[0],border[0]),(border[1],border[1]),(border[2],border[2])), mode = 'reflect')
else:
image = np.pad(image,((border[0],border[0]),(border[1],border[1])), mode = 'reflect')
convmode = 'same'
image = image.astype(float)
T_s = np.sum(templ)
T2_s = np.sum(templ**2)
n = np.size(templ)
ones_mat = np.ones(templ.shape)
if dim ==3:
I_s = fftconvolve(image,ones_mat, mode=convmode)[border[0]:-border[0],border[1]:-border[1],border[2]:-border[2]]
I2_s = fftconvolve(image**2,ones_mat, mode=convmode)[border[0]:-border[0],border[1]:-border[1],border[2]:-border[2]]
ITconv = fftconvolve(image,templ, mode=convmode)[border[0]:-border[0],border[1]:-border[1],border[2]:-border[2]]
else:
I_s = fftconvolve(image,ones_mat, mode=convmode)[border[0]:-border[0],border[1]:-border[1]]
I2_s = fftconvolve(image**2,ones_mat, mode=convmode)[border[0]:-border[0],border[1]:-border[1]]
ITconv = fftconvolve(image,templ, mode=convmode)[border[0]:-border[0],border[1]:-border[1]]
A = (ITconv-I_s*T_s/n)/(T2_s-T_s**2/n)
amplitude = A
c=(I_s-A*T_s)/n
background=c
#statistical analysis
J = np.column_stack((templ.flatten(), np.ones(np.size(templ))))
C = np.linalg.inv(J.T@(J))
f_c = I2_s - 2*c*I_s + n*c**2
RSS = A**2*T2_s-2*A*(ITconv-c*T_s)+f_c
RSS[RSS<0]=0
sigma_e2 = RSS/(n-3)
sigma_A = np.sqrt(sigma_e2*C[0,0])
sigma_res = np.sqrt((RSS-(A*T_s+n*c-I_s)/n)/(n-1))
kLevel = scipy.stats.norm.ppf(1-alpha/2, loc=0, scale=1)
SE_sigma_c = sigma_res/np.sqrt(2*(n-1))*kLevel
df2 = (n-1)*(sigma_A**2+SE_sigma_c**2)**2/(sigma_A**4+SE_sigma_c**4)
scomb = np.sqrt((sigma_A**2+SE_sigma_c**2)/n)
T = (A-sigma_res*kLevel)/scomb
mask= scipy.stats.t.cdf(-T,df2)<alpha
#find peaks
if dim==3:
imgLoG = fftconvolve(image,logfilt,mode = 'same')[border[0]:-border[0],border[1]:-border[1],border[2]:-border[2]]
else:
imgLoG = fftconvolve(image,logfilt,mode = 'same')[border[0]:-border[0],border[1]:-border[1]]
locmax_base = peak_local_max(-imgLoG,min_distance = loc_max_dist, indices = False)
imgLM = locmax_base*mask
spot_result={'amplitude':A, 'background':c, 'prob_mask':mask, 'logmask':locmax_base, 'mask':imgLM, 'imgLoG': imgLoG}
return spot_result
def get_candidates(filtered, subimage):
"""Gather candidate spots. Remove spots close to border
or with low amplitude
Parameters
----------
filtered : dict
output of spot_filter_convfft()
subimage: 2 or 3D numpy array
subimage
Returns
-------
spot_prop : Pandas dataframe
dataframe with spot information x, y, (z), amplitude, background
"""
im_size = subimage.shape
im_middle = int((im_size[1]-1)/2)
medval = np.median(subimage[:,im_middle-5:im_middle+6])
medvalmask = filtered['amplitude']>0.1*medval
spot_mask = filtered['mask']*medvalmask
amp_0 = filtered['amplitude'][spot_mask]
b_0 = filtered['background'][spot_mask]
spot_coord = np.where(spot_mask)
spot_coord = np.stack(spot_coord).T
if len(subimage.shape)==3:
spot_prop = pd.DataFrame(np.c_[spot_coord,amp_0, b_0],columns=['z','x','y','A','b'])
spot_prop = spot_prop[(spot_prop.z-4>=0)&(spot_prop.z+5<subimage.shape[0])]
spot_prop = spot_prop[(spot_prop.x-5>=0)&(spot_prop.x+6<subimage.shape[1])]
spot_prop = spot_prop[(spot_prop.y-5>=0)&(spot_prop.y+6<subimage.shape[2])]
else:
spot_prop = pd.DataFrame(np.c_[spot_coord,amp_0, b_0],columns=['x','y','A','b'])
spot_prop = spot_prop[(spot_prop.x-5>=0)&(spot_prop.x+6<subimage.shape[0])]
spot_prop = spot_prop[(spot_prop.y-5>=0)&(spot_prop.y+6<subimage.shape[1])]
return spot_prop
def fit_candidates(subimage, spot_prop, sigmaXY, show_fit = False, fit_type = 'None'):
"""Gather candidate spots. Remove spots close to border
or with low amplitude
Parameters
----------
subimage : 2 or 3D numpy array
subimage
spot_prop : Pandas dataframe
spot properties, output of get_candidates()
sigmaXY : float
approximate spot width
show_fit : bool
show plot of the fit
fit_type : str
how spots should be fitted (fixed sigma, fixed background etc.)
Returns
-------
fit_res : 2d numpy array
fit results. Each row is one spot. The first columns up
to and including the antepenultimate are fit output of
scipy.optimize.least_squares. The two last columns are
square sum error and normalized cross-correlation.
"""
im_size = subimage.shape
im_middle = int((im_size[1]-1)/2)
fitbox = [int(np.ceil(4*sigmaXY)),int(np.ceil(4*sigmaXY))]
xgrid, ygrid = np.meshgrid(range(2*fitbox[0]+1),range(2*fitbox[1]+1),
indexing='ij')
if fit_type == 'None':
fit_res = np.zeros((len(spot_prop),5+2))
elif fit_type == 'B':
fit_res = np.zeros((len(spot_prop),4+2))
cstB = np.median(subimage[:,im_middle-5:im_middle+6])
elif fit_type == 'sigma':
fit_res = np.zeros((len(spot_prop),4+2))
elif fit_type == 'sigmaB':
fit_res = np.zeros((len(spot_prop),3+2))
cstB = np.median(subimage[:,im_middle-5:im_middle+6])
spot_prop = spot_prop[(spot_prop.x>fitbox[0])&(spot_prop.y>fitbox[1])&(spot_prop.y<im_size[1]-fitbox[1])]
for x in range(len(spot_prop)):
spot_image = subimage[int(spot_prop.iloc[x].x)-fitbox[0]:int(spot_prop.iloc[x].x)+fitbox[0]+1,
int(spot_prop.iloc[x].y)-fitbox[1]:int(spot_prop.iloc[x].y)+fitbox[1]+1]
if fit_type == 'sigma':
param_init = [spot_prop.iloc[x].A, fitbox[0],fitbox[1], spot_prop.iloc[x].b]
res = least_squares(tools.LSE_gauss2D_cstsigma, param_init, args=(xgrid, ygrid, sigmaXY, spot_image))
res_abs = np.abs(res.x)
fitim = tools.fun_gauss2D_cstsigma(xgrid, ygrid,sigmaXY, *res_abs)
elif fit_type == 'None':
param_init = [spot_prop.iloc[x].A, fitbox[0],fitbox[1],sigmaXY, spot_prop.iloc[x].b]
res = least_squares(tools.LSE_gauss2D, param_init, args=(xgrid, ygrid, spot_image))
res_abs = np.abs(res.x)
fitim = tools.fun_gauss2D(xgrid, ygrid,zgrid, *res_abs)
elif fit_type == 'B':
param_init = [spot_prop.iloc[x].A, fitbox[0],fitbox[1],sigmaXY]
res = least_squares(tools.LSE_gauss2D_cstB, param_init, args=(xgrid, ygrid, cstB, spot_image))
res_abs = np.abs(res.x)
fitim = tools.fun_gauss2D_cstB(xgrid, ygrid,cstB, *res_abs)
elif fit_type == 'sigmaB':
param_init = [spot_prop.iloc[x].A, fitbox[0],fitbox[1]]
res = least_squares(tools.LSE_gauss2D_cstBsigma, param_init, args=(xgrid, ygrid,cstB,
sigmaXY, spot_image))
res_abs = np.abs(res.x)
fitim = tools.fun_gauss2D_cstBsigma(xgrid, ygrid,cstB,sigmaXY, *res_abs)
#print(res.x)
#print(res.cost)
vec1 = np.ravel(spot_image)
vec2 = np.ravel(fitim)
vec1 = vec1-np.mean(vec1)
vec2 = vec2-np.mean(vec2)
vec1 = vec1/np.sqrt(np.sum(vec1**2))
vec2 = vec2/np.sqrt(np.sum(vec2**2))
ncc = np.sum(vec1*vec2)
if show_fit:
res_abs = np.abs(res.x)
#fitim = tools.fun_gauss3D_cstsigma(xgrid, ygrid,zgrid,sigmaXY, sigmaZ, *res_abs)
fitim = tools.fun_gauss2D(xgrid, ygrid, *res_abs)
plt.subplot(1,2,1)
plt.imshow(np.sum(spot_image,axis = 0))
plt.subplot(1,2,2)
plt.imshow(np.sum(fitim,axis = 0))
plt.show()
fit_res[x,0:-2]= res.x
fit_res[x,-2]=res.cost
fit_res[x,-1]=ncc
return fit_res
def make_g_filter(modelsigma, modelsigmaZ = None):
"""Create a Gaussian spot model for filtering
Parameters
----------
modelsigma : float
expected xy standard dev.
modelsigmaZ: float
expected z standard dev.
Returns
-------
g : 2 or 3D numpy array
Gaussian filter
"""
if modelsigmaZ is None:
ws=round(4*modelsigma)
x = np.arange(-ws, ws+1, 1)
y = np.arange(-ws, ws+1, 1)
xx, yy = np.meshgrid(x, y)
g = np.exp(-(xx**2+yy**2)/(2*modelsigma**2))
else:
ws=round(4*modelsigma)
wsZ = round(4*modelsigmaZ)
x = np.arange(-ws, ws+1, 1)
y = np.arange(-ws, ws+1, 1)
z = np.arange(-wsZ, wsZ+1, 1)
xx, yy, zz = np.meshgrid(x, y, z, indexing = 'ij')
g = np.exp(-(xx**2+yy**2)/(2*modelsigma**2))*np.exp(-(zz**2)/(2*modelsigmaZ**2))
return g
def make_laplacelog(modelsigma, modelsigmaZ = None):
"""Create a LoG spot model for filtering
Parameters
----------
modelsigma : float
expected xy standard dev.
modelsigmaZ: float
expected z standard dev.
Returns
-------
g : 2 or 3D numpy array
LoG filter
"""
if modelsigmaZ is None:
ws=round(4*modelsigma)
x = np.arange(-ws, ws+1, 1)
y = np.arange(-ws, ws+1, 1)
xx, yy = np.meshgrid(x, y)
g = np.exp(-(xx**2+yy**2)/(2*modelsigma**2))
else:
ws=round(4*modelsigma)
wsZ = round(4*modelsigmaZ)
x = np.arange(-ws, ws+1, 1)
y = np.arange(-ws, ws+1, 1)
z = np.arange(-wsZ, wsZ+1, 1)
xx, yy, zz = np.meshgrid(x, y, z, indexing = 'ij')
g = (xx**2/modelsigma**4-1/modelsigma**2+yy**2/modelsigma**4-1/modelsigma**2+zz**2/modelsigmaZ**4-1/modelsigmaZ**2)*np.exp(-(xx**2+yy**2)/(2*modelsigma**2)-(zz**2)/(2*modelsigmaZ**2))
return g | {
"alphanum_fraction": 0.5975107371,
"author": null,
"avg_line_length": 36.4504792332,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "9bbb0d6688c0cae441b758d0dc72211351eb3335",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 2,
"max_forks_repo_forks_event_max_datetime": "2020-04-21T18:25:50.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-12-02T16:34:37.000Z",
"max_forks_repo_head_hexsha": "d3f68ef22186ee096aaca554c346e814ebc35b1b",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "guiwitz/DoubleAdderArticle",
"max_forks_repo_path": "colicycle/colicycle/spot_detection.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "d3f68ef22186ee096aaca554c346e814ebc35b1b",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "guiwitz/DoubleAdderArticle",
"max_issues_repo_path": "colicycle/colicycle/spot_detection.py",
"max_line_length": 191,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "d3f68ef22186ee096aaca554c346e814ebc35b1b",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "guiwitz/DoubleAdderArticle",
"max_stars_repo_path": "colicycle/colicycle/spot_detection.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 3468,
"path": null,
"reason": "import numpy,import scipy,from scipy",
"repo": null,
"save_path": null,
"sha": null,
"size": 11409
} |
import sys
import numpy as np
import cv2
filename = 'space_shuttle.jpg'
if len(sys.argv) > 1:
filename = sys.argv[1]
img = cv2.imread(filename)
if img is None:
print('Image load failed!')
exit()
# Load network
net = cv2.dnn.readNet('bvlc_googlenet.caffemodel', 'deploy.prototxt')
if net.empty():
print('Network load failed!')
exit()
# Load class names
classNames = None
with open('classification_classes_ILSVRC2012.txt', 'rt') as f:
classNames = f.read().rstrip('\n').split('\n')
# Inference
inputBlob = cv2.dnn.blobFromImage(img, 1, (224, 224), (104, 117, 123))
net.setInput(inputBlob, 'data')
prob = net.forward()
# Check results & Display
out = prob.flatten()
classId = np.argmax(out)
confidence = out[classId]
text = '%s (%4.2f%%)' % (classNames[classId], confidence * 100)
cv2.putText(img, text, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 1, cv2.LINE_AA)
cv2.imshow('img', img)
cv2.waitKey()
cv2.destroyAllWindows()
| {
"alphanum_fraction": 0.6441176471,
"author": null,
"avg_line_length": 20.8163265306,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "4f8bb0ad1216bdedadfceb59102e8f2b13009604",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2020-11-02T10:33:31.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-11-02T10:33:31.000Z",
"max_forks_repo_head_hexsha": "230bddc6882467ed059e95d05dcf60cc4a2a2b2b",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "JSJeong-me/2020-11-02_Steel_AI",
"max_forks_repo_path": "opencv-classification/classify.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "230bddc6882467ed059e95d05dcf60cc4a2a2b2b",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "JSJeong-me/2020-11-02_Steel_AI",
"max_issues_repo_path": "opencv-classification/classify.py",
"max_line_length": 93,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "230bddc6882467ed059e95d05dcf60cc4a2a2b2b",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "JSJeong-me/2020-11-02_Steel_AI",
"max_stars_repo_path": "opencv-classification/classify.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 296,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1020
} |
from __future__ import annotations
from typing import Tuple, NoReturn
from ...base import BaseEstimator
import numpy as np
from itertools import product
from IMLearn.metrics.loss_functions import misclassification_error
class DecisionStump(BaseEstimator):
"""
A decision stump classifier for {-1,1} labels according to the CART
algorithm
Attributes
----------
self.threshold_ : float
The threshold by which the data is split
self.j_ : int
The index of the feature by which to split the data
self.sign_: int
The label to predict for samples where the value of the j'th feature
is about the threshold
"""
def __init__(self) -> DecisionStump:
"""
Instantiate a Decision stump classifier
"""
super().__init__()
self.threshold_, self.j_, self.sign_ = None, None, None
def _fit(self, X: np.ndarray, y: np.ndarray) -> NoReturn:
"""
fits a decision stump to the given data
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Input data to fit an estimator for
y : ndarray of shape (n_samples, )
Responses of input data to fit to
"""
min_loss = np.inf
for sign, i in product([-1, 1], range(X.shape[1])):
threshold, loss = self._find_threshold(X[:, i], y, sign)
if loss < min_loss:
self.sign_, self.threshold_, self.j_ = sign, threshold, i
min_loss = loss
def _predict(self, X: np.ndarray) -> np.ndarray:
"""
Predict responses for given samples using fitted estimator
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Input data to predict responses for
y : ndarray of shape (n_samples, )
Responses of input data to fit to
Returns
-------
responses : ndarray of shape (n_samples, )
Predicted responses of given samples
Notes
-----
Feature values strictly below threshold are predicted as `-sign`
whereas values which equal to or above the threshold are predicted
as `sign`
"""
y_hat = np.array([self.sign_ if xj >= self.threshold_ else -self.sign_
for xj in X[:, self.j_]])
return y_hat
def _find_threshold(self, values: np.ndarray, labels: np.ndarray,
sign: int) -> Tuple[float, float]:
"""
Given a feature vector and labels, find a threshold by which to
perform a split The threshold is found according to the value
minimizing the misclassification error along this feature
Parameters
----------
values: ndarray of shape (n_samples,)
A feature vector to find a splitting threshold for
labels: ndarray of shape (n_samples,)
The labels to compare against
sign: int
Predicted label assigned to values equal to or above threshold
Returns
-------
thr: float
Threshold by which to perform split
thr_err: float between 0 and 1
Misclassificaiton error of returned threshold
Notes
-----
For every tested threshold, values strictly below threshold are
predicted as `-sign` whereas values which equal to or above the
threshold are predicted as `sign`
"""
sort_idx = np.argsort(values)
y, x = labels[sort_idx], values[sort_idx]
sorted_threshold = np.concatenate([[-np.inf],
(x[1:] + x[:-1])/2, [np.inf]])
min_threshold_loss = np.abs(np.sum(y[np.sign(y) == sign]))
losses_lst = np.append(min_threshold_loss, min_threshold_loss -
np.cumsum(y * sign))
min_loss_idx = np.argmin(losses_lst)
return sorted_threshold[min_loss_idx], losses_lst[min_loss_idx]
def _loss(self, X: np.ndarray, y: np.ndarray) -> float:
"""
Evaluate performance under misclassification loss function
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Test samples
y : ndarray of shape (n_samples, )
True labels of test samples
Returns
-------
loss : float
Performance under missclassification loss function
"""
y_hat = self._predict(X)
mce = misclassification_error(y, y_hat)
return mce
| {
"alphanum_fraction": 0.5862369338,
"author": null,
"avg_line_length": 32.5673758865,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "a15d3cbbefa91da6d061bf0453bc1a96d6fa0a52",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "b4a01e04fff4181837780cc603446fd73defd349",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "anatfl/IML.HUJI",
"max_forks_repo_path": "IMLearn/learners/classifiers/decision_stump.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "b4a01e04fff4181837780cc603446fd73defd349",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "anatfl/IML.HUJI",
"max_issues_repo_path": "IMLearn/learners/classifiers/decision_stump.py",
"max_line_length": 78,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "b4a01e04fff4181837780cc603446fd73defd349",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "anatfl/IML.HUJI",
"max_stars_repo_path": "IMLearn/learners/classifiers/decision_stump.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 963,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 4592
} |
import re
import numpy as np
def write_point_cloud(ply_filename, points):
formatted_points = []
for point in points:
formatted_points.append("%f %f %f %d %d %d 0\n" % (point[0], point[1], point[2], point[3], point[4], point[5]))
out_file = open(ply_filename, "w")
out_file.write('''ply
format ascii 1.0
element vertex %d
property float x
property float y
property float z
property uchar blue
property uchar green
property uchar red
property uchar alpha
end_header
%s
''' % (len(points), "".join(formatted_points)))
out_file.close()
def depth_image_to_point_cloud(rgb, depth, scale, K, pose):
u = range(0, rgb.shape[1])
v = range(0, rgb.shape[0])
u, v = np.meshgrid(u, v)
u = u.astype(float)
v = v.astype(float)
Z = depth.astype(float) / scale
X = (u - K[0, 2]) * Z / K[0, 0]
Y = (v - K[1, 2]) * Z / K[1, 1]
X = np.ravel(X)
Y = np.ravel(Y)
Z = np.ravel(Z)
valid = Z > 0
X = X[valid]
Y = Y[valid]
Z = Z[valid]
position = np.vstack((X, Y, Z, np.ones(len(X))))
position = np.dot(pose, position)
R = np.ravel(rgb[:, :, 0])[valid]
G = np.ravel(rgb[:, :, 1])[valid]
B = np.ravel(rgb[:, :, 2])[valid]
points = np.transpose(np.vstack((position[0:3, :], R, G, B))).tolist()
return points
def create_depth_map_from_disparity(disp, focal_length, baseline):
depth = baseline * focal_length / disp
mask = depth == np.inf
return depth, mask
def read_pfm(file):
""" Read a pfm file """
file = open(file, 'rb')
color = None
width = None
height = None
scale = None
endian = None
header = file.readline().rstrip()
header = str(bytes.decode(header, encoding='utf-8'))
if header == 'PF':
color = True
elif header == 'Pf':
color = False
else:
raise Exception('Not a PFM file.')
temp_str = str(bytes.decode(file.readline(), encoding='utf-8'))
dim_match = re.match(r'^(\d+)\s(\d+)\s$', temp_str)
if dim_match:
width, height = map(int, dim_match.groups())
else:
raise Exception('Malformed PFM header.')
scale = float(file.readline().rstrip())
if scale < 0: # little-endian
endian = '<'
scale = -scale
else:
endian = '>' # big-endian
data = np.fromfile(file, endian + 'f')
shape = (height, width, 3) if color else (height, width)
data = np.reshape(data, shape)
# DEY: I don't know why this was there.
file.close()
return data, scale
| {
"alphanum_fraction": 0.5732038835,
"author": null,
"avg_line_length": 23.623853211,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "2af2a0846909b8b216d5f95a6284cd5f6684e78f",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "fa14288f149c5af7b2a49092f729f5c4f44517ba",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "MorchelPeng/deep-video-mvs",
"max_forks_repo_path": "dataset/utils.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "fa14288f149c5af7b2a49092f729f5c4f44517ba",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "MorchelPeng/deep-video-mvs",
"max_issues_repo_path": "dataset/utils.py",
"max_line_length": 119,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "b3943a9249d522dca3e6cd603e427f611cc7bad5",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "hashi0203/deep-video-mvs",
"max_stars_repo_path": "dataset/utils.py",
"max_stars_repo_stars_event_max_datetime": "2022-01-10T07:51:41.000Z",
"max_stars_repo_stars_event_min_datetime": "2022-01-10T07:51:41.000Z",
"num_tokens": 758,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2575
} |
#!/usr/bin/env python
import numpy as np
import rospy
from minau.srv import ArmControl, SetHeadingVelocity, SetHeadingDepth
from geometry_msgs.msg import Vector3
from minau.msg import ControlStatus
from sensor_msgs.msg import Joy
# def vel(heading,speed):
# heading_rad = heading*np.pi/180
# return Vector3(speed*np.cos(heading_rad),speed*np.sin(heading_rad),0)
class MyController():
def __init__(self):
# print(rospy.get_namespace())
self.heading = 0.0
self.depth = 0.0
self.speed = 0.5
self.dive_depth = 0.3
self.dy, self.dx = 0, 0
rospy.wait_for_service('uuv_control/arm_control')
arm_control = rospy.ServiceProxy('uuv_control/arm_control', ArmControl)
resp1 = arm_control()
# print(2)
rospy.wait_for_service('uuv_control/set_heading_velocity')
# print(3)
rospy.wait_for_service('uuv_control/set_heading_depth')
rospy.Subscriber("/joy", Joy, self.callback)
# print(4)
print("ready...")
self.depth_flag = False
self.heading_flag = False
def callback(self, msg):
cmd = False
dx, dy = 0, 0
if msg.buttons[2] and not self.depth_flag: # UP
self.depth_flag = True
self.depth -= 0.3
shd = rospy.ServiceProxy('uuv_control/set_heading_depth', SetHeadingDepth)
shd(self.heading, self.depth)
cmd = True
if msg.buttons[0] and not self.depth_flag: # DOWN
self.depth_flag = True
self.depth += 0.3
shd = rospy.ServiceProxy('uuv_control/set_heading_depth', SetHeadingDepth)
shd(self.heading, self.depth)
cmd = True
else:
self.depth_flag = False
heading_cmd = False
if msg.buttons[3] and not self.heading_flag:
new_heading = self.heading - 15
while new_heading < 0:
new_heading += 360
while new_heading > 360:
new_heading -= 360
self.heading = new_heading
self.heading_flag = True
cmd = True
heading_cmd = True
shd = rospy.ServiceProxy('uuv_control/set_heading_depth', SetHeadingDepth)
shd(self.heading, self.depth)
elif msg.buttons[1] and not self.heading_flag:
new_heading = self.heading + 15
while new_heading < 0:
new_heading += 360
while new_heading > 360:
new_heading -= 360
self.heading = new_heading
self.heading_flag = True
cmd = True
heading_cmd = True
shd = rospy.ServiceProxy('uuv_control/set_heading_depth', SetHeadingDepth)
shd(self.heading, self.depth)
else:
self.heading_flag = False
# Y axis in real life (x in NED)
if msg.axes[6] > 0:
print("reading")
self.dy -= 0.1
vec = Vector3(self.dy, self.dx, 0.0)
shv = rospy.ServiceProxy('uuv_control/set_heading_velocity', SetHeadingVelocity)
shv(self.heading, vec)
cmd = True
elif msg.axes[6] < 0:
print("reading")
self.dy += 0.1
vec = Vector3(self.dy, self.dx, 0.0)
shv = rospy.ServiceProxy('uuv_control/set_heading_velocity', SetHeadingVelocity)
shv(self.heading, vec)
cmd = True
elif msg.axes[7] > 0:
print("reading")
self.dx += 0.1
vec = Vector3(self.dy, self.dx, 0.0)
shv = rospy.ServiceProxy('uuv_control/set_heading_velocity', SetHeadingVelocity)
shv(self.heading, vec)
cmd = True
elif msg.axes[7] < 0:
print("reading")
self.dx -= 0.1
vec = Vector3(self.dy, self.dx, 0.0)
shv = rospy.ServiceProxy('uuv_control/set_heading_velocity', SetHeadingVelocity)
shv(self.heading, vec)
cmd = True
# print(msg)
if msg.buttons[5] != 0:
rospy.wait_for_service('uuv_control/disarm_control')
arm_control = rospy.ServiceProxy('uuv_control/disarm_control', ArmControl)
resp1 = arm_control()
# shv = rospy.ServiceProxy('uuv_control/set_heading_velocity', SetHeadingVelocity)
# vec = Vector3(0.0,0.0, 0.0)
# for i in range(5):
# shv(self.heading, vec)
if cmd:
print("Heading: {} | Depth: {} | dx: {} | dy: {}".format(self.heading, -self.depth, self.dx, self.dy))
if __name__ == "__main__":
rospy.init_node("joy_controller")
controller = MyController()
rospy.spin()
| {
"alphanum_fraction": 0.5649458784,
"author": null,
"avg_line_length": 33.8309859155,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "b9c87dd5e961651edafa64ac76894effda7858bd",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "e033a070c6934e6a0ec80c82f076625a9d336361",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "COHRINT/minau_tools",
"max_forks_repo_path": "scripts/joystick_rx.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "e033a070c6934e6a0ec80c82f076625a9d336361",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "COHRINT/minau_tools",
"max_issues_repo_path": "scripts/joystick_rx.py",
"max_line_length": 114,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "e033a070c6934e6a0ec80c82f076625a9d336361",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "COHRINT/minau_tools",
"max_stars_repo_path": "scripts/joystick_rx.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1151,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 4804
} |
import tensorflow as tf
from tensorflow import distributions
import numpy as np
def evaluate_sample(model, data_sample, session):
"""
Samples parameter realizations from the variational posterior distributions and
performs inference
Args:
model: the model to evaluate
data_iterator: an iterator that iterates over the samples and lables
session: the session to run the evaluation in
"""
with tf.name_scope("Evaluation"):
q = model.q._probs
res = []
ref = []
while True:
try:
r,y = session.run((q, data_sample['y']))
res.append(r)
ref.append(y)
except tf.errors.OutOfRangeError:
break
res=np.argmax(np.concatenate(res, axis=0),axis = -1)
ref=np.argmax(np.concatenate(ref, axis=0),axis = -1)
return np.float(np.sum(res==ref)) / len(ref)
def evaluate_sample_xchange(model, data_sample, session):
"""
Samples parameter realizations from the variational posterior distributions and
performs inference
Args:
model: the model to evaluate
data_iterator: an iterator that iterates over the samples and lables
session: the session to run the evaluation in
"""
with tf.name_scope("Evaluation"):
q = model.q._probs
q_uni = model.q_uniform._probs
res = []
ref = []
res_uni = []
ref_uni = []
while True:
try:
r, y = session.run((q, data_sample['y']))
r_uni,y_uni = session.run((q_uni,data_sample['y']))
res.append(r)
ref.append(y)
res_uni.append(r_uni)
ref_uni.append(y_uni)
except tf.errors.OutOfRangeError:
break
res = np.argmax(np.concatenate(res, axis=0), axis=-1)
ref = np.argmax(np.concatenate(ref, axis=0), axis=-1)
res_uni = np.argmax(np.concatenate(res_uni, axis=0), axis=-1)
ref_uni = np.argmax(np.concatenate(ref_uni, axis=0), axis=-1)
return np.float(np.sum(res == ref)) / len(ref),np.float(np.sum(res_uni == ref_uni)) / len(ref_uni) | {
"alphanum_fraction": 0.5822102426,
"author": null,
"avg_line_length": 33.223880597,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "ec7a2c978b5250d16c806e53276c3a030d21ace0",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "3a6885f77aabb9b539e554a34a1c7ad358a39336",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "NithinKumaraNT/DNN_Quantizer",
"max_forks_repo_path": "evaluation.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "3a6885f77aabb9b539e554a34a1c7ad358a39336",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "NithinKumaraNT/DNN_Quantizer",
"max_issues_repo_path": "evaluation.py",
"max_line_length": 106,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "3a6885f77aabb9b539e554a34a1c7ad358a39336",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "NithinKumaraNT/DNN_Quantizer",
"max_stars_repo_path": "evaluation.py",
"max_stars_repo_stars_event_max_datetime": "2019-05-16T14:07:17.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-04-01T22:02:05.000Z",
"num_tokens": 510,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2226
} |
# -*- coding: utf-8 -*-
"""
Created on Tue May 17 15:50:25 2016
@author: hossam
"""
from pathlib import Path
import optimizers.PSO as pso
import optimizers.MVO as mvo
import optimizers.GWO as gwo
import optimizers.MFO as mfo
import optimizers.CS as cs
import optimizers.BAT as bat
import optimizers.WOA as woa
import optimizers.FFA as ffa
import optimizers.SSA as ssa
import optimizers.GA as ga
import optimizers.HHO as hho
import optimizers.SCA as sca
import optimizers.JAYA as jaya
import optimizers.DE as de
import benchmarks
import csv
import numpy
import time
import warnings
import os
import plot_convergence as conv_plot
import plot_boxplot as box_plot
warnings.simplefilter(action='ignore')
def selector(algo,func_details,popSize,Iter):
function_name=func_details[0]
lb=func_details[1]
ub=func_details[2]
dim=func_details[3]
if(algo=="SSA"):
x=ssa.SSA(getattr(benchmarks, function_name),lb,ub,dim,popSize,Iter)
elif(algo=="PSO"):
x=pso.PSO(getattr(benchmarks, function_name),lb,ub,dim,popSize,Iter)
elif(algo=="GA"):
x=ga.GA(getattr(benchmarks, function_name),lb,ub,dim,popSize,Iter)
elif(algo=="BAT"):
x=bat.BAT(getattr(benchmarks, function_name),lb,ub,dim,popSize,Iter)
elif(algo=="FFA"):
x=ffa.FFA(getattr(benchmarks, function_name),lb,ub,dim,popSize,Iter)
elif(algo=="GWO"):
x=gwo.GWO(getattr(benchmarks, function_name),lb,ub,dim,popSize,Iter)
elif(algo=="WOA"):
x=woa.WOA(getattr(benchmarks, function_name),lb,ub,dim,popSize,Iter)
elif(algo=="MVO"):
x=mvo.MVO(getattr(benchmarks, function_name),lb,ub,dim,popSize,Iter)
elif(algo=="MFO"):
x=mfo.MFO(getattr(benchmarks, function_name),lb,ub,dim,popSize,Iter)
elif(algo=="CS"):
x=cs.CS(getattr(benchmarks, function_name),lb,ub,dim,popSize,Iter)
elif(algo=="HHO"):
x=hho.HHO(getattr(benchmarks, function_name),lb,ub,dim,popSize,Iter)
elif(algo=="SCA"):
x=sca.SCA(getattr(benchmarks, function_name),lb,ub,dim,popSize,Iter)
elif(algo=="JAYA"):
x=jaya.JAYA(getattr(benchmarks, function_name),lb,ub,dim,popSize,Iter)
elif(algo=="DE"):
x=de.DE(getattr(benchmarks, function_name),lb,ub,dim,popSize,Iter)
else:
return null;
return x
def run(optimizer, objectivefunc, NumOfRuns, params, export_flags):
"""
It serves as the main interface of the framework for running the experiments.
Parameters
----------
optimizer : list
The list of optimizers names
objectivefunc : list
The list of benchmark functions
NumOfRuns : int
The number of independent runs
params : set
The set of parameters which are:
1. Size of population (PopulationSize)
2. The number of iterations (Iterations)
export_flags : set
The set of Boolean flags which are:
1. Export (Exporting the results in a file)
2. Export_details (Exporting the detailed results in files)
3. Export_convergence (Exporting the covergence plots)
4. Export_boxplot (Exporting the box plots)
Returns
-----------
N/A
"""
# Select general parameters for all optimizers (population size, number of iterations) ....
PopulationSize = params['PopulationSize']
Iterations= params['Iterations']
#Export results ?
Export=export_flags['Export_avg']
Export_details=export_flags['Export_details']
Export_convergence = export_flags['Export_convergence']
Export_boxplot = export_flags['Export_boxplot']
Flag=False
Flag_details=False
# CSV Header for for the cinvergence
CnvgHeader=[]
results_directory = time.strftime("%Y-%m-%d-%H-%M-%S") + '/'
Path(results_directory).mkdir(parents=True, exist_ok=True)
for l in range(0,Iterations):
CnvgHeader.append("Iter"+str(l+1))
for i in range (0, len(optimizer)):
for j in range (0, len(objectivefunc)):
convergence = [0]*NumOfRuns
executionTime = [0]*NumOfRuns
for k in range (0,NumOfRuns):
func_details=benchmarks.getFunctionDetails(objectivefunc[j])
x=selector(optimizer[i],func_details,PopulationSize,Iterations)
convergence[k] = x.convergence
optimizerName = x.optimizer
objfname = x.objfname
if(Export_details==True):
ExportToFile=results_directory + "experiment_details.csv"
with open(ExportToFile, 'a',newline='\n') as out:
writer = csv.writer(out,delimiter=',')
if (Flag_details==False): # just one time to write the header of the CSV file
header= numpy.concatenate([["Optimizer","objfname","ExecutionTime"],CnvgHeader])
writer.writerow(header)
Flag_details=True # at least one experiment
executionTime[k] = x.executionTime
a=numpy.concatenate([[x.optimizer,x.objfname,x.executionTime],x.convergence])
writer.writerow(a)
out.close()
if(Export==True):
ExportToFile=results_directory + "experiment.csv"
with open(ExportToFile, 'a',newline='\n') as out:
writer = csv.writer(out,delimiter=',')
if (Flag==False): # just one time to write the header of the CSV file
header= numpy.concatenate([["Optimizer","objfname","ExecutionTime"],CnvgHeader])
writer.writerow(header)
Flag=True
avgExecutionTime = float("%0.2f"%(sum(executionTime) / NumOfRuns))
avgConvergence = numpy.around(numpy.mean(convergence, axis=0, dtype=numpy.float64), decimals=2).tolist()
a=numpy.concatenate([[optimizerName,objfname,avgExecutionTime],avgConvergence])
writer.writerow(a)
out.close()
if Export_convergence == True:
conv_plot.run(results_directory, optimizer, objectivefunc, Iterations)
if Export_boxplot == True:
box_plot.run(results_directory, optimizer, objectivefunc, Iterations)
if (Flag==False): # Faild to run at least one experiment
print("No Optomizer or Cost function is selected. Check lists of available optimizers and cost functions")
print("Execution completed")
| {
"alphanum_fraction": 0.6256564141,
"author": null,
"avg_line_length": 38.0857142857,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "866dc0a1cd3ff270f986994530347a801f7b3781",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "8da36f21425e43985367af3df4509502c4e2385b",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "Senoussaoui/EvoloPy",
"max_forks_repo_path": "optimizer.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "8da36f21425e43985367af3df4509502c4e2385b",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "Senoussaoui/EvoloPy",
"max_issues_repo_path": "optimizer.py",
"max_line_length": 124,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "8da36f21425e43985367af3df4509502c4e2385b",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "Senoussaoui/EvoloPy",
"max_stars_repo_path": "optimizer.py",
"max_stars_repo_stars_event_max_datetime": "2021-02-17T20:50:43.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-02-17T20:50:43.000Z",
"num_tokens": 1623,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 6665
} |
function [texture structure] = structure_texture_decomposition_rof(im, theta, nIters, alp)
%
% Decompose the input IMAGE into structure and texture parts using the
% Rudin-Osher-Fatemi method. The final output is a linear combination
% of the decomposed texture and the structure parts.
%
% According to Wedel etal "An Improved Algorithm for TV-L1 optical flow"
% equations (8)-(10)
%
% Test code
% [im1, im2, tu, tv] = read_image_flow_tune_para(4,0);
% [t1 s1] = structure_texture_decomposition_rof(im1);
% [t2 s2] = structure_texture_decomposition_rof(im2);
% indx = ~isnan(tu) | ~isnan(tv);
% uv = cat(3, tu, tv);
% uv(isnan(uv)) = 0;
% figure; imshow(-abs(partial_deriv(cat(3,im1, im2), uv)).*indx, []); title('original');
% figure; imshow(-abs(partial_deriv(cat(3,s1, s2), uv)).*indx, []); title('structure');
% figure; imshow(-abs(partial_deriv(cat(3,t1, t2), uv)).*indx, []); title('texture');
% tmp = partial_deriv(cat(3,t1, t2, uv);
% figure; imshow(t1, []); figure; imshow(s1, []);
% Author: Deqing Sun, Department of Computer Science, Brown University
% Contact: dqsun@cs.brown.edu
% $Date: 2009 $
%
% Copyright 2009-2010, Brown University, Providence, RI. USA
%
% All Rights Reserved
%
% All commercial use of this software, whether direct or indirect, is
% strictly prohibited including, without limitation, incorporation into in
% a commercial product, use in a commercial service, or production of other
% artifacts for commercial purposes.
%
% Permission to use, copy, modify, and distribute this software and its
% documentation for research purposes is hereby granted without fee,
% provided that the above copyright notice appears in all copies and that
% both that copyright notice and this permission notice appear in
% supporting documentation, and that the name of the author and Brown
% University not be used in advertising or publicity pertaining to
% distribution of the software without specific, written prior permission.
%
% For commercial uses contact the Technology Venture Office of Brown University
%
% THE AUTHOR AND BROWN UNIVERSITY DISCLAIM ALL WARRANTIES WITH REGARD TO
% THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
% FITNESS FOR ANY PARTICULAR PURPOSE. IN NO EVENT SHALL THE AUTHOR OR
% BROWN UNIVERSITY BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL
% DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
% PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
% ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
% THIS SOFTWARE.
if nargin == 1
theta = 1/8;
nIters = 100;
alp = 0.95; % alp = 0.75 results in 4:1
end;
% Rescale the input image to [-1 1]
IM = scale_image(im, -1,1);
% Backup orginal images
im = IM;
% stepsize
delta = 1.0/(4.0*theta);
for iIm = 1:size(im,3)
% Initialize dual variable p to be 0
p = zeros([size(im,1) size(im,2) 2]);
% Gradient descend
I = squeeze(IM(:,:,iIm));
for iter = 1:nIters
% Compute divergence eqn(8)
div_p = imfilter(p(:,:,1), [-1 1 0], 'corr', 0)+ ...
imfilter(p(:,:,2), [-1 1 0]', 'corr', 0);
I_x = imfilter(I+theta*div_p, [-1 1], 'replicate');
I_y = imfilter(I+theta*div_p, [-1 1]', 'replicate');
% Update dual variable eqn(9)
p(:,:,1) = p(:,:,1) + delta*(I_x);
p(:,:,2) = p(:,:,2) + delta*(I_y);
% Reproject to |p| <= 1 eqn(10)
reprojection = max(1.0, sqrt(p(:,:,1).^2 + p(:,:,2).^2));
p(:,:,1) = p(:,:,1)./reprojection;
p(:,:,2) = p(:,:,2)./reprojection;
end
% compute divergence
div_p = imfilter(p(:,:,1), [-1 1 0], 'corr', 0)+ ...
imfilter(p(:,:,2), [-1 1 0]', 'corr', 0);
% compute structure component
IM(:,:,iIm) = I + theta*div_p;
end;
texture = squeeze(scale_image(im - alp*IM, 0, 255));
if nargout == 2
structure = squeeze(scale_image(IM, 0, 255)); %(u-min(u(:)))/(max(u(:))-min(u(:))) - 1;
end; | {
"alphanum_fraction": null,
"author": "kristinbranson",
"avg_line_length": null,
"converted": null,
"ext": null,
"file": null,
"hexsha": null,
"include": null,
"lang": null,
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": "github-repos/MATLAB/kristinbranson-JAABA/JAABA-5d778a23e3e7cf272df9a89a72b1b66d94f535d7/spaceTime/optflow_deqing/utils/structure_texture_decomposition_rof.m",
"reason": null,
"repo": "JAABA",
"save_path": "github-repos/MATLAB/kristinbranson-JAABA",
"sha": "5d778a23e3e7cf272df9a89a72b1b66d94f535d7",
"size": null
} |
import pandas as pd
import statsmodels.api as sm
from patsy import dmatrices
from statsmodels.stats.outliers_influence import variance_inflation_factor
def vif(df, y, x, merge_coef = False):
import scorecardpy as sc
df = sc.germancredit()
y = 'creditability'
x = ['age_in_years', 'credit_amount', 'present_residence_since']
Xtrain = df.loc[:,x]
ytrain = df.loc[:,y]
Xtrain = sm.add_constant(Xtrain)
lrfit = sm.GLM(
ytrain.astype(float),
Xtrain.astype(float),
family=sm.families.Binomial()
).fit()
y, X = dmatrices(' ~ '.join([y, '+'.join(x)]), data=df, return_type="dataframe")
vif = pd.DataFrame({
'variables': ['const', 'age_in_years', 'credit_amount', 'present_residence_since'],
'vif': [variance_inflation_factor(X.values, i) for i in range(X.shape[1])]
})
if merge_coef:
vif = pd.merge(
lrfit.summary2().tables[1].reset_index().rename(columns = {'index':'variables'}),
vif,
on = 'variables', how='outer'
)
return vif
#' Variance Inflation Factors
#'
#' \code{vif} calculates variance-inflation and generalized variance-inflation factors for linear, generalized linear.
#'
#' @param model A model object.
#' @param merge_coef Logical, whether to merge with coefficients of model summary matrix. Defaults to FALSE.
#'
#' @return A data frame with columns for variable and gvif, or additional columns for df and gvif^(1/(2*df)) if provided model uses factor variable.
#'
#' @seealso \url{https://cran.r-project.org/package=car}
#' @examples
#' data(germancredit)
#'
#' # Example I
#' fit1 = glm(creditability~ age.in.years + credit.amount +
#' present.residence.since, family = binomial(), data = germancredit)
#' vif(fit1)
#' vif(fit1, merge_coef=TRUE)
#'
#' # Example II
#' fit2 = glm(creditability~ status.of.existing.checking.account +
#' credit.history + credit.amount, family = binomial(), data = germancredit)
#' vif(fit2)
#' vif(fit2, merge_coef=TRUE)
#'
#'
#' @importFrom stats coef coefficients cov2cor model.matrix vcov
#' @export
vif = function(model, merge_coef = FALSE) {
. = df = gvif = gvif_adj = variable = NULL
if (any(is.na(coef(model)))) stop ("There are aliased coefficients in the model")
v <- vcov(model)
assign <- attr(model.matrix(model), "assign")
if (names(coefficients(model)[1]) == "(Intercept)") {
v <- v[-1, -1]
assign <- assign[-1]
} else warning("No intercept: vifs may not be sensible.")
terms <- labels(terms(model))
if (length(terms) < 2) stop("model contains fewer than 2 terms")
R <- cov2cor(v)
detR <- det(R)
result <- data.table(variable=terms, gvif=0, df=0, gvif_adj=0) # generalized vif, degree freedom,
for (t in seq_len(length(terms))) {
subs = which(assign == t)
result[t, `:=`(
gvif = det(as.matrix(R[subs, subs])) * det(as.matrix(R[-subs, -subs])) / detR,
df = length(subs) )]
}
if (result[, all(df==1)]) {
result = result[,.(variable, gvif)]
} else {
result[, gvif_adj := gvif^(1/(2*df))]
setnames(result, c('variable', 'gvif', 'df', 'gvif^(1/(2*df))'))
}
# merge with coefficients matrix
if (merge_coef) {
if (length(assign) == length(terms)) {
coefDF = as.data.frame(coef(summary(model)))
coefDT = data.table(variable = row.names(coefDF),Estimate=coefDF[,1],
data.table(coefDF[,2:4])[,lapply(.SD,function(x) round(x,4))])
result = merge(coefDT, result, by='variable', all.x = TRUE, sort = FALSE)
} else {
warning('The summary matrix cant merge with vif.')
}
}
return(result[])
}
# modified from car::vif
| {
"alphanum_fraction": 0.6472545757,
"author": null,
"avg_line_length": 31.3565217391,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "3af5b91bd48219414ca844d488c124159bbccfa6",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "c2fa3d2a939e2afa0f8f7f39dae0d44a156a23ed",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "chenlongzhen/scorecardpy",
"max_forks_repo_path": "scorecardpy/vif.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "c2fa3d2a939e2afa0f8f7f39dae0d44a156a23ed",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "chenlongzhen/scorecardpy",
"max_issues_repo_path": "scorecardpy/vif.py",
"max_line_length": 148,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "c2fa3d2a939e2afa0f8f7f39dae0d44a156a23ed",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "chenlongzhen/scorecardpy",
"max_stars_repo_path": "scorecardpy/vif.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1046,
"path": null,
"reason": "import statsmodels,from statsmodels",
"repo": null,
"save_path": null,
"sha": null,
"size": 3606
} |
import numpy as np
# Calculating the time needed for task i when allocated in DC j
def dijstra(G, src, des):
'''Find the shortest path lenght from src to des
Args:
G: A graph representing the network
src: The source node
des: The destination node
Returns:
The length of one shortest path from src to des
'''
INT_MAX = 1 << 30
vertex_number = len(G)
dis = np.zeros(vertex_number, dtype=float)
visited = np.zeros(vertex_number, dtype=bool)
# initialize the dis[]
for i in range(vertex_number):
dis[i] = G[src][i]
dis[src] = 0
visited[src] = True
while (visited[des]==False):
index = 0
min_d = INT_MAX
for i in range(vertex_number):
if not visited[i] and dis[i] < min_d:
min_d = dis[i]
index = i
visited[index] = True
# do relaxation
for i in range(vertex_number):
if not visited[i] and G[index][i] != INT_MAX and dis[index]+G[index][i] < dis[i]:
dis[i] = dis[index]+G[index][i]
return dis[des]
def cal_eta(loc, Gp, data_loc, data_amo):
''' Calculating the ETA when put this task in DC_loc
Args:
loc: which DC this task is aissgned
Gp: the matrix representing the network of DCs
data_loc: the location of DC's which stored data
data_amo: the amount of required data of DCs
Returns:
the ETA
'''
eta = 0.0
for data in zip(data_loc, data_amo):
G = Gp.copy()
G = G.astype(float)
for i in range(len(G)):
for j in range(len(G[0])):
G[i][j] = G[i][j] / data[1]
print(str(G[i][j]) + " ", end='')
print()
eta += dijstra(G, data[0], loc)
return eta
if __name__ == "__main__":
Gp = np.array([
[100, 100, 10000, 200],
[100, 100, 200, 300],
[10000, 200, 100, 10000],
[200, 300, 10000, 100]
])
data_loc = np.array([2, 3])
data_amo = np.array([200, 300])
# cal_eta(data_loc, data_amo, Gp, 0, 0)
print(dijstra(Gp, 0, 2))
| {
"alphanum_fraction": 0.5405904059,
"author": null,
"avg_line_length": 27.7948717949,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "3da27298c0e967134ed82191a47bc42f3dc9456d",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "0fa14eedd509bba032cc7569f9a6ad8bf529e6d8",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "vihowe/asd_project",
"max_forks_repo_path": "src/cal_eta.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "0fa14eedd509bba032cc7569f9a6ad8bf529e6d8",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "vihowe/asd_project",
"max_issues_repo_path": "src/cal_eta.py",
"max_line_length": 93,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "0fa14eedd509bba032cc7569f9a6ad8bf529e6d8",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "vihowe/asd_project",
"max_stars_repo_path": "src/cal_eta.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 616,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2168
} |
import sys
import subprocess
from SALib.test_functions import Ishigami
import numpy as np
import re
salib_cli = "./src/SALib/scripts/salib.py"
ishigami_fp = "./src/SALib/test_functions/params/Ishigami.txt"
if sys.version_info[0] == 2:
subprocess.run = subprocess.call
def test_delta():
cmd = "python {cli} sample saltelli -p {fn} -o model_input.txt -n 1024"\
.format(cli=salib_cli, fn=ishigami_fp) +\
" --precision 8 --max-order 2 --seed=100"
subprocess.run(cmd.split())
# Run model and save output
np.savetxt('model_output.txt', Ishigami.evaluate(
np.loadtxt('model_input.txt')))
analyze_cmd = "python {cli} analyze delta -p {fn} -X model_input.txt \
-Y model_output.txt -c 0 -r 10 --seed=100".format(cli=salib_cli,
fn=ishigami_fp).split()
result = subprocess.check_output(analyze_cmd, universal_newlines=True)
result = re.sub(r'[\n\t\s]*', '', result)
expected_output = 'Parameterdeltadelta_confS1S1_confx10.2122850.0074810.3123190.011463x20.3530150.0061840.4306860.013135x30.1613440.0057540.0013880.001545'
assert len(result) > 0 and result in expected_output, \
"Results did not match expected values:\n\n Expected: \n{} \n\n Got: \n{}".format(
expected_output, result)
def test_dgsm():
# Generate inputs
cmd = "python {cli} sample finite_diff -p {fn} -o model_input.txt -d 0.001\
--precision=8 -n 1000 --seed=100".format(cli=salib_cli,
fn=ishigami_fp).split()
subprocess.run(cmd)
# Run model and save output
np.savetxt('model_output.txt', Ishigami.evaluate(
np.loadtxt('model_input.txt')))
analyze_cmd = "python {cli} analyze dgsm -p {fn} -X model_input.txt\
-Y model_output.txt -c 0 -r 1000 --seed=100"\
.format(cli=salib_cli, fn=ishigami_fp).split()
# run analysis and use regex to strip all whitespace from result
result = subprocess.check_output(analyze_cmd, universal_newlines=True)
result = re.sub(r'[\n\t\s]*', '', result)
expected = "Parametervivi_stddgsmdgsm_confx17.69803416.3731482.2331100.986061x224.48770117.3199737.1035971.092944x311.05754523.7851003.2076651.488346"
assert len(result) > 0 and result == expected, \
"Unexpected DGSM results.\n\nExpected:\n{}\n\nGot:{}"\
.format(expected, result)
def test_fast():
# Generate inputs
cmd = "python {cli} sample fast_sampler -p {fn} -o model_input.txt \
--precision=8 -n 1000 -M 4 --seed=100".format(cli=salib_cli,
fn=ishigami_fp).split()
subprocess.run(cmd)
# Run model and save output
np.savetxt('model_output.txt', Ishigami.evaluate(
np.loadtxt('model_input.txt')))
analyze_cmd = "python {cli} analyze fast -p {fn} \
-Y model_output.txt -c 0 --seed=100"\
.format(cli=salib_cli, fn=ishigami_fp).split()
# run analysis and use regex to strip all whitespace from result
result = subprocess.check_output(analyze_cmd, universal_newlines=True)
result = re.sub(r'[\n\t\s]*', '', result)
expected = "ParameterFirstTotalx10.3104030.555603x20.4425530.469546x30.0000000.239155"
assert len(result) > 0 and result == expected, \
"Unexpected FAST results.\n\nExpected:\n{}\n\nGot:{}"\
.format(expected, result)
def test_ff():
# Generate inputs
cmd = "python {cli} sample ff -p {fn} -o model_input.txt \
--precision=8 -n 1000 --seed=100".format(cli=salib_cli,
fn=ishigami_fp).split()
subprocess.run(cmd)
# Run model and save output
np.savetxt('model_output.txt', Ishigami.evaluate(
np.loadtxt('model_input.txt')))
analyze_cmd = "python {cli} analyze ff -p {fn} -X model_input.txt\
-Y model_output.txt -c 0 --seed=100"\
.format(cli=salib_cli, fn=ishigami_fp).split()
# run analysis and use regex to strip all whitespace from result
result = subprocess.check_output(analyze_cmd, universal_newlines=True)
result = re.sub(r'[\n\t\s]*', '', result)
expected = "ParameterMEx10.000000x20.000000x30.000000dummy_00.000000('x1','x2')0.000000('x1','x3')0.000000('x2','x3')0.000000('x1','dummy_0')0.000000('x2','dummy_0')0.000000('x3','dummy_0')0.000000"
assert len(result) > 0 and result == expected, \
"Unexpected FF results.\n\nExpected:\n{}\n\nGot:{}"\
.format(expected, result)
def test_morris():
# Generate inputs
cmd = "python {cli} sample morris -p {fn} -o model_input.txt -n 100\
--precision=8 --levels=10 --seed=100 -lo False"\
.format(cli=salib_cli, fn=ishigami_fp).split()
subprocess.run(cmd)
# Run model and save output
np.savetxt('model_output.txt', Ishigami.evaluate(
np.loadtxt('model_input.txt')))
# run analysis
analyze_cmd = "python {cli} analyze morris -p {fn} -X model_input.txt\
-Y model_output.txt -c 0 -r 1000 -l 10 --seed=100"\
.format(cli=salib_cli, fn=ishigami_fp).split()
result = subprocess.check_output(analyze_cmd, universal_newlines=True)
result = re.sub(r'[\n\t\s]*', '', result)
expected_output = """ParameterMu_StarMuMu_Star_ConfSigmax17.4997.4991.8019.330x22.215-0.4700.3482.776x35.4240.8641.1487.862"""
assert len(result) > 0 and result == expected_output, \
"Results did not match expected values:\n\n Expected: \n{} \n\n Got: \n{}".format(
expected_output, result)
def test_rbd_fast():
# Generate inputs
cmd = "python {cli} sample ff -p {fn} -o model_input.txt \
--precision=8 --seed=100".format(cli=salib_cli, fn=ishigami_fp).split()
subprocess.run(cmd)
# Run model and save output
np.savetxt('model_output.txt', Ishigami.evaluate(
np.loadtxt('model_input.txt')))
analyze_cmd = "python {cli} analyze rbd_fast -p {fn} -X model_input.txt\
-Y model_output.txt --seed=100"\
.format(cli=salib_cli, fn=ishigami_fp).split()
# run analysis and use regex to strip all whitespace from result
result = subprocess.check_output(analyze_cmd, universal_newlines=True)
result = re.sub(r'[\n\t\s]*', '', result)
expected = "ParameterFirstx10.39223x20.299578x30.0342307"
assert len(result) > 0 and result == expected, \
"Unexpected RBD-FAST results.\n\nExpected:\n{}\n\nGot:{}"\
.format(expected, result)
def test_sobol():
# Generate inputs
cmd = "python {cli} sample saltelli -p {fn} -o model_input.txt -n 1024\
--precision 8 --max-order 2 --seed=100".format(cli=salib_cli,
fn=ishigami_fp)
cmd = cmd.split()
result = subprocess.check_output(cmd, universal_newlines=True)
np.savetxt('model_output.txt', Ishigami.evaluate(
np.loadtxt('model_input.txt')))
analyze_cmd = "python {cli} analyze sobol -p {fn}\
-Y model_output.txt -c 0 --max-order 2\
-r 1000 --seed=100".format(cli=salib_cli, fn=ishigami_fp).split()
result = subprocess.check_output(analyze_cmd, universal_newlines=True)
result = re.sub(r'[\n\t\s]*', '', result)
expected_output = 'ParameterS1S1_confSTST_confx10.3168320.0622410.5558600.085972x20.4437630.0560470.4418980.041596x30.0122030.0559540.2446750.025332Parameter_1Parameter_2S2S2_confx1x20.0092540.083829x1x30.2381720.101764x2x3-0.0048880.067819'
assert len(result) > 0 and result == expected_output, \
"Results did not match expected values:\n\n Expected: \n{} \n\n Got: \n{}".format(
expected_output, result)
if __name__ == '__main__':
test_delta()
test_dgsm()
test_fast()
test_ff()
test_morris()
test_rbd_fast()
test_sobol()
| {
"alphanum_fraction": 0.6566101256,
"author": null,
"avg_line_length": 38.4228855721,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "1c428aad04470cc050f9150015da22fd25145f7a",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "2545a439ca474a673fddadf0399f7c4e21000d99",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "JimmyKude/SALib",
"max_forks_repo_path": "tests/test_cli_analyze.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "2545a439ca474a673fddadf0399f7c4e21000d99",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "JimmyKude/SALib",
"max_issues_repo_path": "tests/test_cli_analyze.py",
"max_line_length": 245,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "b090a1699e6df9b789723bf0097521e5dc316e4c",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "tk-ML/SALib",
"max_stars_repo_path": "tests/test_cli_analyze.py",
"max_stars_repo_stars_event_max_datetime": "2021-06-22T08:27:17.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-06-22T08:27:17.000Z",
"num_tokens": 2271,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 7723
} |
# import libraries here
import numpy as np
import cv2
def count_blood_cells(image_path):
"""
Procedura prima putanju do fotografije i vraca broj crvenih krvnih zrnaca, belih krvnih zrnaca i
informaciju da li pacijent ima leukemiju ili ne, na osnovu odnosa broja krvnih zrnaca
Ova procedura se poziva automatski iz main procedure i taj deo kod nije potrebno menjati niti implementirati.
:param image_path: <String> Putanja do ulazne fotografije.
:return: <int> Broj prebrojanih crvenih krvnih zrnaca,
<int> broj prebrojanih belih krvnih zrnaca,
<bool> da li pacijent ima leukemniju (True ili False)
"""
red_blood_cell_count = 0
white_blood_cell_count = 0
has_leukemia = None
cvimg = cv2.imread(image_path)
greenimg = cvimg[:, :, 1].astype('float64')
greenimg *= (255.0 / greenimg.max())
greenimg = greenimg.astype('uint8')
adabingreenimg = cv2.adaptiveThreshold(greenimg, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 535, 62)
invadabingreenimg = 255 - adabingreenimg
img, contours, hierarchy = cv2.findContours(invadabingreenimg, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for contour in contours:
cv2.fillPoly(invadabingreenimg, pts=[contour], color=255)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7, 7))
oinvadabingreenimg = cv2.morphologyEx(invadabingreenimg, cv2.MORPH_OPEN, kernel, iterations=3)
_, whitecellscontours, _ = cv2.findContours(oinvadabingreenimg, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
white_blood_cell_count = len(whitecellscontours)
adabingreenimg = cv2.adaptiveThreshold(greenimg, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 535, 0)
invadabingreenimg = 255 - adabingreenimg
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
einvadabingreenimg = cv2.morphologyEx(invadabingreenimg, cv2.MORPH_ERODE, kernel, iterations=1)
img, contours, hierarchy = cv2.findContours(einvadabingreenimg, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for contour in contours:
cv2.fillPoly(einvadabingreenimg, pts=[contour], color=255)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
oeinvadabingreenimg = cv2.morphologyEx(einvadabingreenimg, cv2.MORPH_OPEN, kernel, iterations=3)
_, cellscontours, _ = cv2.findContours(oeinvadabingreenimg, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
red_blood_cell_count = len(cellscontours) - white_blood_cell_count
has_leukemia = True if red_blood_cell_count/(red_blood_cell_count+white_blood_cell_count) < 0.925 else False
if white_blood_cell_count > 1 and white_blood_cell_count <= 4:
m = cv2.moments(whitecellscontours[0])
cX = int(m["m10"] / m["m00"])
cY = int(m["m01"] / m["m00"])
center = [cX, cY]
canMergeCells = True
for c in whitecellscontours:
m = cv2.moments(c)
cX = int(m["m10"] / m["m00"])
cY = int(m["m01"] / m["m00"])
if abs(center[0] - cX) < 0.16 * greenimg.shape[1] and abs(center[1] - cY) < 0.16 * greenimg.shape[0]:
center = [(center[0] + cX) / 2, (center[1] + cY) / 2]
else:
canMergeCells = False
break
if canMergeCells:
white_blood_cell_count = 1
has_leukemia = False
has_leukemia = True if white_blood_cell_count > 4 and red_blood_cell_count <= 1800 else has_leukemia
has_leukemia = False if white_blood_cell_count == 1 else has_leukemia
return red_blood_cell_count, white_blood_cell_count, has_leukemia
| {
"alphanum_fraction": 0.7019553073,
"author": null,
"avg_line_length": 52.6470588235,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "939799d7dadecc300efbd446476d62f629517391",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "ea6e749d943f104c1454e514995663d3cdd46b49",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "FmasterofU/soft-comp_challenges",
"max_forks_repo_path": "1_image_processing/process.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "ea6e749d943f104c1454e514995663d3cdd46b49",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "FmasterofU/soft-comp_challenges",
"max_issues_repo_path": "1_image_processing/process.py",
"max_line_length": 113,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "ea6e749d943f104c1454e514995663d3cdd46b49",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "FmasterofU/soft-comp_challenges",
"max_stars_repo_path": "1_image_processing/process.py",
"max_stars_repo_stars_event_max_datetime": "2021-11-19T08:42:04.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-11-19T08:42:04.000Z",
"num_tokens": 1156,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 3580
} |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pdb
import logging
import numpy as np
import torch
from torch.utils.data import DataLoader, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm
from torch.utils.data import DataLoader, RandomSampler
from seqeval.metrics import f1_score, precision_score, recall_score, classification_report
from data_utils import load_and_cache_examples, tag_to_id, get_chunks
from flashtool import Logger
import os
import pickle
logger = logging.getLogger(__name__)
def evaluate(args, model, tokenizer, labels, pad_token_label_id, best, mode, data_loader=None, prefix="", verbose=True, final=True):
if data_loader is not None:
eval_dataloader = data_loader
else:
eval_dataset = load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode=mode, final=final)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu evaluate
# if args.n_gpu > 1:
# model = torch.nn.DataParallel(model)
logger.info("***** Running evaluation %s *****", prefix)
if verbose:
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
model.eval()
i = 0
for batch in tqdm(eval_dataloader, desc="Evaluating"):
i += 1
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use segment_ids
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
if args.n_gpu > 1:
tmp_eval_loss = tmp_eval_loss.mean()
eval_loss += tmp_eval_loss.item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
preds = np.argmax(preds, axis=2)
label_map = {i: label for i, label in enumerate(labels)}
preds_list = [[] for _ in range(out_label_ids.shape[0])]
out_id_list = [[] for _ in range(out_label_ids.shape[0])]
preds_id_list = [[] for _ in range(out_label_ids.shape[0])]
out_label_list = [[] for _ in range(out_label_ids.shape[0])]
for i in range(out_label_ids.shape[0]):
for j in range(out_label_ids.shape[1]):
if out_label_ids[i, j] != pad_token_label_id:
preds_list[i].append(label_map[preds[i][j]])
out_id_list[i].append(out_label_ids[i][j])
out_label_list[i].append(label_map[out_label_ids[i][j]])
preds_id_list[i].append(preds[i][j])
p = precision_score(out_label_list, preds_list)
r = recall_score(out_label_list, preds_list)
new_F = f1_score(out_label_list, preds_list)
c_result = classification_report(out_label_list, preds_list)
results = {}
is_updated = False
if new_F > best[-1]:
best = [p, r, new_F]
is_updated = True
results.update({
"loss": eval_loss,
"precision": p,
"recall": r,
"f1": new_F,
"best_precision": best[0],
"best_recall": best[1],
"best_f1": best[-1]
})
logger.info("***** Eval results %s *****", prefix)
for key in sorted(results.keys()):
logger.info(" %s = %s", key, str(results[key]))
model.train()
return results, preds_list, best, is_updated, c_result
def align_predictions(predictions: np.ndarray, label_ids: np.ndarray):
preds = np.argmax(predictions, axis=2)
batch_size, seq_len = preds.shape
out_label_list = [[] for _ in range(batch_size)]
preds_list = [[] for _ in range(batch_size)]
for i in range(batch_size):
for j in range(seq_len):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index and label_ids[i, j] not in start_end_label_id:
out_label_list[i].append(label_map[label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
return preds_list, out_label_list
| {
"alphanum_fraction": 0.6401762115,
"author": null,
"avg_line_length": 37.3355263158,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "10705ae41c7a6275da84b4f2b43b28d9f09b983a",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 3,
"max_forks_repo_forks_event_max_datetime": "2022-03-31T02:52:21.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-09-13T12:14:40.000Z",
"max_forks_repo_head_hexsha": "a1572717d7a3ec8e0e2b7d43671a9b74464ecab1",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "microsoft/MetaST",
"max_forks_repo_path": "src/eval.py",
"max_issues_count": 5,
"max_issues_repo_head_hexsha": "a1572717d7a3ec8e0e2b7d43671a9b74464ecab1",
"max_issues_repo_issues_event_max_datetime": "2021-12-07T23:55:36.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-09-01T08:44:23.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "microsoft/MetaST",
"max_issues_repo_path": "src/eval.py",
"max_line_length": 133,
"max_stars_count": 15,
"max_stars_repo_head_hexsha": "a1572717d7a3ec8e0e2b7d43671a9b74464ecab1",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "microsoft/MetaST",
"max_stars_repo_path": "src/eval.py",
"max_stars_repo_stars_event_max_datetime": "2022-02-28T08:18:06.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-08-16T19:15:17.000Z",
"num_tokens": 1346,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 5675
} |
import traces.PyTrace as PT
import numpy as np
import traces.conicsolve as con
import pdb,sys
from traces.axro.WSverify import traceChaseParam
import matplotlib.pyplot as plt
import time
import utilities.plotting as uplt
#Need to determine resolution and effective area
#as a function of shell radius
#Loop through all ~260 shells and determine vignetting
#and resolution functions vs. pointing error
#Will also need reflectivity vs. theta
#IMD Ir reflectivity at 1 keV
irang,irref = np.transpose(np.genfromtxt('/home/rallured/Dropbox/AXRO'
'/WSTracing/'
'150504IrReflectivity.txt',comments=';'))
#CXC Ir optical constants
ener,delta,beta,alpha,gamma = np.transpose(np.genfromtxt('/home/rallured'
'/Dropbox/AXRO/WSTracing/chandraConstants.txt')[2:])
#Paul's thermal filter transmission
def reflectivityIr(ang):
"""Return reflectivity with 0.5 nm RMS roughness
calculated with IMD (1 keV)"""
return irref[np.argmin(np.abs(irang-ang))]
def CXCreflIr(ang,energy,rough):
"""Return reflectivity with 0.5 nm RMS roughness
calculated using Fresnel equations, Chandra
optical constants, and "Strehl" factor (Nevot-Croce)
Energy supplied in eV
Roughness in RMS nm
"""
#Get proper optical constants
if np.size(energy) == 1:
ind = np.argmin(abs(ener-energy/1000.))
b = beta[ind]*.95
d = delta[ind]*.95
else:
b,d = np.zeros(len(energy)),np.zeros(len(energy))
for i in range(len(energy)):
ind = np.argmin(abs(ener-energy[i]/1000.))
b[i] = beta[ind]*.95
d[i] = delta[ind]*.95
n = 1 - d + 1j*b
n2 = abs(n)**2
#Compute reflectivity in each polarization plane
#Return mean value
Rp = abs(n*n*np.sin(ang)-np.sqrt(n*n-np.cos(ang)**2))**2/\
abs(n*n*np.sin(ang)+np.sqrt(n*n-np.cos(ang)**2))**2
Rs = abs(np.sin(ang)-np.sqrt(n*n-np.cos(ang)**2))**2/\
abs(np.sin(ang)+np.sqrt(n*n-np.cos(ang)**2))**2
R = np.mean([Rp,Rs],axis=0)
wave = 1240./energy #wavelength in nm
k = 2*np.pi/wave
strehl = np.exp(-4*k**2*np.sin(ang)**2*rough**2)
return R*strehl
def traceWSShell(num,theta,r0,z0,phigh,plow,shigh,slow,\
energy,rough,chaseFocus=False,bestFocus=False):
"""Trace a WS mirror pair with 10 m focal length and
mirror axial cutoffs defined by phigh,plow
"""
#Define annulus of source rays
a,p,d,e = con.woltparam(r0,z0)
r1 = PT.wsPrimRad(plow,1.,r0,z0)#np.tan(a/2.)*(plow-10000.) + r0
r2 = PT.wsPrimRad(phigh,1.,r0,z0)#np.tan(a/2.)*(phigh-10000.) + r0
rays = PT.annulus(r1,r2,num)
PT.transform(rays,0,0,0,np.pi,0,0)
PT.transform(rays,0,0,z0,0,0,0)
#Trace to primary
PT.wsPrimary(rays,r0,z0,1.)
#Handle vignetting
ind = np.logical_and(rays[3]<phigh,rays[3]>plow)
rays = PT.vignette(rays,ind=ind)
#Vignette rays hitting backside of mirror
dot = rays[4]*rays[7]+rays[5]*rays[8]+rays[6]*rays[9]
ind = dot < 0.
rays = PT.vignette(rays,ind=ind)
#If all rays are vignetted, return
if np.size(rays[1]) < 1:
return 0.,0.,0.
#Apply pointing error
rays = [rays[0],rays[1],rays[2],rays[3],\
rays[4]+np.sin(theta),rays[5],-np.sqrt(1-np.sin(theta)**2),\
rays[7],rays[8],rays[9]]
## PT.l = PT.l + np.sin(theta)
## PT.n = -np.sqrt(1 - PT.l**2)
#Reflect
PT.reflect()
#Compute mean incidence angle for reflectivity
ang = np.abs(np.mean(np.arcsin(dot))) #radians
refl1 = CXCreflIr(ang,energy,rough)
#Total rays entering primary aperture
N1 = np.size(rays[1])
#Trace to secondary
PT.wsSecondary(r0,z0,1.)
#Vignette anything outside the physical range of the mirror
ind = np.logical_and(rays[3]>slow,rays[3]<shigh)
rays = PT.vignette(rays,ind=ind)
#Vignette anything hitting the backside
dot = rays[4]*rays[7]+rays[5]*rays[8]+rays[6]*rays[9]
ind = dot < 0.
rays = PT.vignette(rays,ind=ind)
if np.size(rays[1]) < 1:
return 0.,0.,0.
PT.reflect()
#Compute mean incidence angle for reflectivity
ang = np.abs(np.mean(np.arcsin(dot))) #radians
refl2 = CXCreflIr(ang,energy,rough)
#Trace to focal plane
rays = PT.flat(rays)
## #Find Chase focus
## delta = 0.
## if chaseFocus or bestFocus:
## cx,cy = PT.centroid()
## r = np.sqrt(cx**2+cy**2)
## delta = .0625*(1.+1)*(r**2*(phigh-plow)/10000.**2)\
## *(1/np.tan(a))**2
## PT.transform(0,0,delta,0,0,0)
## PT.flat()
##
## #Find best focus
## delta2 = 0.
## delta3 = 0.
## if bestFocus:
## try:
## tran.focusI(rays,weights=
## except:
## pdb.set_trace()
## PT.flat()
return refl1*refl2,rays
#return PT.hpd(), PT.rmsCentroid(), delta
def evaluateShell(theta,alpha):
"""Compute vignetting factor and HPD as a function of
pointing error. Supply pointing errors theta and intersection
radius of shell. Assumption is 200 mm long segments.
"""
r0 = 10000.*np.tan(alpha)
hpd = np.zeros(np.size(theta))
rms = np.copy(hpd)
delta = np.copy(hpd)
a,p,d,e = con.woltparam(r0,10000.)
for t in theta:
hpd[t==theta],rms[t==theta],delta[t==theta] = \
traceWSShell(10000,t,r0,10000.,11000.,\
10000.,10000.,8000.,1000.,.5,\
chaseFocus=True)
return hpd,rms,delta
def SXperformance(theta,energy,rough,bestsurface=False,optsurface=False):
"""Go through a SMART-X prescription file and compute
area weighted performance for a flat focal plane
"""
#Load in rx data
## rx = np.transpose(np.genfromtxt('/home/rallured/Dropbox/AXRO/WSTracing/'
## 'mirror-design-260sh-200mmlong-040mmthick'
## '-3mdiam-10mfl-10arcmin-fov-planarIntersept032713.csv',\
## delimiter=','))
rx = np.transpose(np.genfromtxt('/home/rallured/Dropbox/AXRO/WSTracing/'
'150528_Pauls_Rx.csv',delimiter=','))
geo = np.transpose(np.genfromtxt('/home/rallured/Dropbox/AXRO/WSTracing/'
'geometric_transmission_102711.txt'))
therm = np.transpose(np.genfromtxt('/home/rallured/Dropbox/AXRO/'
'WSTracing/thermal_shield_transmission_102711.txt'))
f = np.sqrt(rx[1][-1]**2+10000.**2)
z = np.sqrt(f**2-rx[1]**2) #spherical
## z = np.repeat(10000.,np.size(rx[1]))
## ind = rx[0] > 210.
## rx = rx[:,ind]
Ns = np.shape(rx)[1]
#Loop through and compute a resolution and a weight for each shell
hpdTelescope = np.zeros(np.size(theta))
rmsTelescope = np.zeros(np.size(theta))
delta = np.zeros(np.size(theta))
cent = np.zeros(np.size(theta))
platefrac = np.zeros(np.size(theta))
#fig = plt.figure()
for t in theta[:]:
xi = np.array([])
yi = np.array([])
l = np.array([])
m = np.array([])
n = np.array([])
weights = np.array([])
#plt.clf()
tstart = time.time()
plate = np.zeros(Ns)
for s in np.arange(0,Ns):
if geo[1][s] > 0.:
sys.stdout.write('Shell: %03i \r' % s)
sys.stdout.flush()
r,rays = traceWSShell(1000,t,rx[1][s],z[s],z[s]+225.,z[s]+25.,\
z[s]-25.,z[s]-225.,energy,rough)
r = r*geo[1][s]*rx[9][s] #Reflectivity*area*alignmentbars*vign
#Account for thermal shield in shells 220-321
if s > 219:
r = r * therm[1][np.abs(energy/1000.-therm[0]).argmin()]
r = np.repeat(r,np.size(rays[1]))
weights = np.append(weights,r)
PT.conic(1107.799202,-1.)
xi = np.append(xi,rays[1])
yi = np.append(yi,rays[2])
l = np.append(l,rays[4])
m = np.append(m,rays[5])
n = np.append(n,rays[6])
if s%10==0:
plt.plot(rays[1][:100],rays[2][:100],'.')
plate[s] = anal.centroid(rays,weights=r)[0]
print time.time()-tstart
#Have list of photon positions and weights
#Need to compute centroid and then FoM
#Normalize weights
weights = weights/np.sum(weights)
xi = np.array(xi,order='F')
yi = np.array(yi,order='F')
zi = np.zeros(np.size(xi)).astype('float')
li = np.array(l,order='F')
mi = np.array(m,order='F')
ni = np.array(n,order='F')
uxi = np.zeros(np.size(xi)).astype('float')
uyi = np.zeros(np.size(xi)).astype('float')
uzi = np.zeros(np.size(xi)).astype('float')
rays = [np.zeros(np.size(xi)).astype('float'),\
xi,yi,zi,\
li,mi,ni,\
uxi,uyi,uzi]
if bestsurface:
rays = tran.transform(rays,0,0,.25,0,0,0)
surf.focusI(rays,weights=weights)
if optsurface:
PT.conic(1107.799202,-1.) #Emprically found best surface 1128.058314
#Compute FoM
rmsTelescope[t==theta] = PT.rmsCentroid(weights=weights)/10000.
hpdTelescope[t==theta] = PT.hpd(weights=weights)/10000.
cx,cy = PT.centroid()
cent[t==theta] = cx
ind = geo[1] > 0.
platefrac[t==theta] = np.std(plate[ind]/1e4)/rmsTelescope[t==theta]
print hpdTelescope[t==theta],rmsTelescope[t==theta]
return hpdTelescope,rmsTelescope,delta,cent,plate
def sphericalNodes(rin,z0,fov,Nshells,N):
"""This function will iteratively scan node positions
about a sphere around the focus. Node will start in obvious
vignetting position. Extreme rays will be traced including
FoV. Node will be nudged outward until vignetting no longer
occurs. Node will then be moved by the designated mechanical
gap. Then the next node is traced in the same fashion.
Assumptions: 50 mm symmetric gap
"""
#Bookkeeping parameters
f = np.sqrt(rin**2+z0**2)
fov = fov/60.*np.pi/180. #fov to radians
zlist = []
rlist = []
for i in range(Nshells):
#Starting radius for next shell node
rstart = PT.wsPrimRad(z0+225.,1.,rin,z0)
#Reduce rstart until vignetting is reached
flag = 0
while flag==0:
zstart = np.sqrt(f**2-rstart**2)
#Set up rays
r1 = PT.wsPrimRad(zstart+25.,1.,rstart,zstart)
r2 = PT.wsPrimRad(zstart+225.,1.,rstart,zstart)
PT.pointsource(0.,N)
PT.z = np.repeat(10500.,N)
PT.x = np.linspace(r1,r2,N)
PT.n = np.repeat(-1.,N)
#Perform trace and add FoV deflections to rays
PT.wsPrimary(rstart,zstart,1.)
PT.l = np.repeat(np.sin(fov),N)
PT.n = -np.sqrt(1 - PT.l**2)
#Verify that rays do not hit prior primary
PT.wsPrimary(rin,z0,1.)
if np.sum(PT.z<z0+225.) != 0:
#Ray has hit
print 'Ray hits prior primary!'
flag = 1
#Verify that rays do not hit prior secondary
PT.wsPrimary(rstart,zstart,1.)
PT.reflect()
PT.wsSecondary(rstart,zstart,1.)
PT.reflect()
PT.wsSecondary(rin,z0,1.)
if np.sum(PT.z > z0-225.) != 0:
print 'Ray hits prior secondary!'
flag = 1
#Look at other deflection
PT.pointsource(0.,N)
PT.z = np.repeat(10500.,N)
PT.x = np.linspace(r1,r2,N)
PT.n = np.repeat(-1.,N)
#Perform trace and add FoV deflections to rays
PT.wsPrimary(rstart,zstart,1.)
PT.l = np.repeat(-np.sin(fov),N)
PT.n = -np.sqrt(1 - PT.l**2)
#Verify that rays do not hit prior primary
PT.wsPrimary(rin,z0,1.)
if np.sum(PT.z<z0+225.) != 0:
#Ray has hit
print 'Ray hits prior primary!'
flag = 1
#Verify that rays do not hit prior secondary
PT.wsPrimary(rstart,zstart,1.)
PT.reflect()
PT.wsSecondary(rstart,zstart,1.)
PT.reflect()
PT.wsSecondary(rin,z0,1.)
if np.sum(PT.z > z0-225.) != 0:
print 'Ray hits prior secondary!'
flag = 1
if flag==0:
rstart = rstart - .01 #Take off 10 microns
## sys.stdout.write(str(rstart)+'\n')
## sys.stdout.flush()
#Vignetting has been reached, append rstart and zstart
#to list of node positions
rlist.append(rstart)
zlist.append(zstart)
rin = rstart
z0 = zstart
return rlist,zlist
def rxPlot():
#Get Rx
rx = np.transpose(np.genfromtxt('/home/rallured/Dropbox/AXRO/WSTracing/'
'150528_Pauls_Rx.csv',delimiter=','))
geo = np.transpose(np.genfromtxt('/home/rallured/Dropbox/AXRO/WSTracing/'
'geometric_transmission_102711.txt'))
rx = rx[:,geo[1]>0]
geo = geo[1][geo[1]>0]
f = np.sqrt(rx[1][-1]**2+10000.**2)
z = np.sqrt(f**2-rx[1]**2) #spherical
#Make plot
plt.figure('SX')
plt.clf()
for i in np.arange(0,len(geo),3):
rp1 = con.primrad(z[i]+50.,rx[1][i],1e4)
rp2 = con.primrad(z[i]+250.,rx[1][i],1e4)
rh1 = con.secrad(z[i]-50.,rx[1][i],1e4)
rh2 = con.secrad(z[i]-250.,rx[1][i],1e4)
uplt.isoplot([rp1,rp2],[z[i]+50.-1e4,z[i]+250.-1e4],'b')
uplt.isoplot([rh1,rh2],[z[i]-50.-1e4,z[i]-250.-1e4],'b')
return rx,geo
| {
"alphanum_fraction": 0.5613132338,
"author": null,
"avg_line_length": 37.5967302452,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "1675dce23e5331b3855dabdffc8b8b02a3ada5b4",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 4,
"max_forks_repo_forks_event_max_datetime": "2019-08-08T15:27:29.000Z",
"max_forks_repo_forks_event_min_datetime": "2017-04-13T17:24:54.000Z",
"max_forks_repo_head_hexsha": "2d6722f0db28c045df35075487f9d4fdfed8b284",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "bddonovan/PyXFocus",
"max_forks_repo_path": "examples/axro/SMARTX.py",
"max_issues_count": 6,
"max_issues_repo_head_hexsha": "2d6722f0db28c045df35075487f9d4fdfed8b284",
"max_issues_repo_issues_event_max_datetime": "2019-04-26T11:13:03.000Z",
"max_issues_repo_issues_event_min_datetime": "2017-11-03T16:13:46.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "bddonovan/PyXFocus",
"max_issues_repo_path": "examples/axro/SMARTX.py",
"max_line_length": 91,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "2d6722f0db28c045df35075487f9d4fdfed8b284",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "bddonovan/PyXFocus",
"max_stars_repo_path": "examples/axro/SMARTX.py",
"max_stars_repo_stars_event_max_datetime": "2018-04-20T15:32:24.000Z",
"max_stars_repo_stars_event_min_datetime": "2018-04-20T15:32:24.000Z",
"num_tokens": 4101,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 13798
} |
"""
Plot test files of the forcings
Notes
-----
Reference : Kay et al. [2014]
Author : Zachary Labe
Date : 1 February 2018
"""
### Import modules
import numpy as np
from netCDF4 import Dataset
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
import nclcmaps as ncm
import datetime
### Define directories
directorydata = '/surtsey/ypeings/'
directoryfigure = '/home/zlabe/Desktop/testseaice/'
### Define time
now = datetime.datetime.now()
currentmn = str(now.month)
currentdy = str(now.day)
currentyr = str(now.year)
currenttime = currentmn + '_' + currentdy + '_' + currentyr
titletime = currentmn + '/' + currentdy + '/' + currentyr
print('\n' '----Calculate forcing file SIT constant - %s----' % titletime)
### Read in data
data = Dataset(directorydata + 'SST-SIC-SIT_lens_2051-2080_polar.nc')
lon = data.variables['lon'][:]
lat = data.variables['lat'][:]
sit = data.variables['ice_thick'][:]
data.close()
lons,lats = np.meshgrid(lon,lat)
#### Test Data
plt.rc('text',usetex=True)
plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']})
for i in range(sit.shape[0]):
fig = plt.figure()
ax = plt.subplot(111)
m = Basemap(projection='ortho',lon_0=300,lat_0=90,resolution='l')
var = sit[i]
m.drawmapboundary(fill_color='white')
m.drawcoastlines(color='darkgrey',linewidth=0.3)
cs = m.contourf(lons,lats,var,np.arange(0,5.1,0.1),latlon=True,extend='max')
cs1 = m.contour(lons,lats,lats,np.arange(66.6,67.6,1),linewidths=1,colors='r',
linestyles='--',latlon=True)
cs.set_cmap('cubehelix')
m.fillcontinents(color='dimgrey')
cbar = plt.colorbar(cs,extend='both')
cbar.set_label(r'\textbf{SIT (m)}')
ticks = np.arange(0,6,1)
cbar.set_ticks(ticks)
cbar.set_ticklabels(list(map(str,ticks)))
plt.savefig(directoryfigure + 'polar_testplot_%s.png' % i,dpi=300) | {
"alphanum_fraction": 0.6420682731,
"author": null,
"avg_line_length": 28.4571428571,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "6c509fb8920a9c5ea7e125c43aa6c64caf88db50",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 4,
"max_forks_repo_forks_event_max_datetime": "2022-03-31T07:05:01.000Z",
"max_forks_repo_forks_event_min_datetime": "2018-04-05T17:55:36.000Z",
"max_forks_repo_head_hexsha": "6defdd897a61d7d1a02f34a9f4ec92b2b17b3075",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "zmlabe/ThicknessSensitivity",
"max_forks_repo_path": "Scripts/plot_forcings_testplot.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "6defdd897a61d7d1a02f34a9f4ec92b2b17b3075",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "zmlabe/ThicknessSensitivity",
"max_issues_repo_path": "Scripts/plot_forcings_testplot.py",
"max_line_length": 82,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "6defdd897a61d7d1a02f34a9f4ec92b2b17b3075",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "zmlabe/ThicknessSensitivity",
"max_stars_repo_path": "Scripts/plot_forcings_testplot.py",
"max_stars_repo_stars_event_max_datetime": "2017-10-22T02:22:14.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-10-22T02:22:14.000Z",
"num_tokens": 578,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1992
} |
import os
import csv
import sys
import re
from surprise import Dataset
from surprise import Reader
from collections import defaultdict
import numpy as np
class Load_Data:
# These two dictionaries will be used to fetch id from movieName and vice versa.
movieID_to_name = {}
name_to_movieID = {}
# defining relative path for the two dataset .CSV files
ratingsPath = './dataset/ratings.csv'
moviesPath = './dataset/movies.csv'
def loadRatingDataset(self):
"""
This function will load the two dataset files and return the rating dataframe
and also populate the ID->movie and movie->ID dictionaries
"""
# Look for files relative to the directory we are running from
# os.chdir(os.path.dirname(sys.argv[0]))
# initialising default values
ratingsDataset = 0
self.movieID_to_name = {}
self.name_to_movieID = {}
# defining a Reader
# line_format = columns names of rating.csv
# comma seperated file
# skip the header
reader = Reader(line_format='user item rating timestamp',
sep=',', skip_lines=1)
# loading data from rating.csv and storing it in ratingsDataset
ratingsDataset = Dataset.load_from_file(
self.ratingsPath, reader=reader)
# Here, we will read from movies.csv and populate the two dictionaries
with open(self.moviesPath, newline='', encoding='ISO-8859-1') as csvfile:
movieReader = csv.reader(csvfile)
next(movieReader) # Skip header line
for row in movieReader:
movieID = int(row[0])
movieName = row[1]
self.movieID_to_name[movieID] = movieName
self.name_to_movieID[movieName] = movieID
return ratingsDataset
def getUserRatings(self, user):
"""
This function will return the list of tuple (movieId, userRatings)
"""
# initialising with default values
userRatings = []
# false until finds the user, then true if user matches, and after that, if user changes, it breaks the code.
hitUser = False
# appending movieId with rating to the userRatings list
with open(self.ratingsPath, newline='') as csvfile:
ratingReader = csv.reader(csvfile)
next(ratingReader)
for row in ratingReader:
userID = int(row[0])
if (user == userID):
movieID = int(row[1])
rating = float(row[2])
userRatings.append((movieID, rating))
hitUser = True
if (hitUser and (user != userID)):
break
return userRatings
def getPopularityRanks(self):
"""
This function rank the movies based on their count of ratings
"""
# initialised two dictionaries for our task
ratings = defaultdict(int)
rankings = defaultdict(int)
# calculated total count of rating a movie get
with open(self.ratingsPath, newline='') as csvfile:
ratingReader = csv.reader(csvfile)
next(ratingReader)
for row in ratingReader:
movieID = int(row[1])
ratings[movieID] += 1
# providing rank to the movies, based on count
rank = 1
for movieID, ratingCount in sorted(ratings.items(), key=lambda x: x[1], reverse=True):
rankings[movieID] = rank
rank += 1
return rankings
def getGenres(self):
"""
This function returns the genre sparse list
"""
genres = defaultdict(list)
genreIDs = {}
maxGenreID = 0
# Here, fetch the positions for the genre for each movie
with open(self.moviesPath, newline='', encoding='ISO-8859-1') as csvfile:
movieReader = csv.reader(csvfile)
next(movieReader) # Skip header line
for row in movieReader:
movieID = int(row[0])
genreList = row[2].split('|')
genreIDList = []
for genre in genreList:
if genre in genreIDs:
genreID = genreIDs[genre]
else:
genreID = maxGenreID
genreIDs[genre] = genreID
maxGenreID += 1
genreIDList.append(genreID)
genres[movieID] = genreIDList
# Convert integer-encoded genre lists to bitfields that we can treat as vectors
for (movieID, genreIDList) in genres.items():
bitfield = [0] * maxGenreID
for genreID in genreIDList:
bitfield[genreID] = 1
genres[movieID] = bitfield
return genres
def getYears(self):
"""
This function returns the dictionary {movieId:Year}
"""
# regular expression to extract year
p = re.compile(r"(?:\((\d{4})\))?\s*$")
years = defaultdict(int)
with open(self.moviesPath, newline='', encoding='ISO-8859-1') as csvfile:
movieReader = csv.reader(csvfile)
next(movieReader)
for row in movieReader:
movieID = int(row[0])
title = row[1]
m = p.search(title)
year = m.group(1)
if year:
years[movieID] = int(year)
return years
def getMovieName(self, movieID):
"""
This function will return the movie name from movieID
"""
if movieID in self.movieID_to_name:
return self.movieID_to_name[movieID]
else:
return ""
def getMovieID(self, movieName):
"""
This function will return the movieId from movieName
"""
if movieName in self.name_to_movieID:
return self.name_to_movieID[movieName]
else:
return 0
| {
"alphanum_fraction": 0.5611558036,
"author": null,
"avg_line_length": 33.6519337017,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "39dfe61ad92b6d66293fcdbd3a23043358782c03",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "dd58b767e98b3b6b8aff0a2f48c5fc037293025d",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "ShubhankSinghal/Movie-Recommendation-System",
"max_forks_repo_path": "framework/load_data.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "dd58b767e98b3b6b8aff0a2f48c5fc037293025d",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "ShubhankSinghal/Movie-Recommendation-System",
"max_issues_repo_path": "framework/load_data.py",
"max_line_length": 117,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "dd58b767e98b3b6b8aff0a2f48c5fc037293025d",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "ShubhankSinghal/Movie-Recommendation-System",
"max_stars_repo_path": "framework/load_data.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1296,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 6091
} |
/* Copyright (C) 2014 InfiniDB, Inc.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; version 2 of
the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA. */
/******************************************************************************************
* $Id$
*
******************************************************************************************/
#include "mcsconfig.h"
#include <string>
#include <boost/algorithm/string.hpp>
#include <stdexcept>
#include <libxml/xmlmemory.h>
#include <libxml/parser.h>
#include <vector>
using namespace std;
#include "xmlparser.h"
namespace config
{
const string XMLParser::getConfig(const xmlDocPtr doc, const string& section, const string& name) const
{
string res;
xmlNodePtr cur1 = xmlDocGetRootElement(doc);
if (cur1 == NULL)
throw runtime_error("XMLParser::getConfig: error accessing XML root");
cur1 = cur1->xmlChildrenNode;
while (cur1 != NULL)
{
string cur1name = (const char*)cur1->name;
if ((boost::iequals(cur1name, section)))
{
xmlNodePtr cur2 = cur1->xmlChildrenNode;
while (cur2 != NULL)
{
string cur2name = (const char*)cur2->name;
if ((boost::iequals(cur2name, name)))
{
xmlNodePtr cur3 = cur2->xmlChildrenNode;
if (cur3)
res = (const char*)cur3->content;
return res;
}
cur2 = cur2->next;
}
}
cur1 = cur1->next;
}
// maybe nullstr if not found
return res;
}
void XMLParser::getConfig(const xmlDocPtr doc, const string& section, const string& name, vector<string>& values) const
{
string res;
xmlNodePtr cur1 = xmlDocGetRootElement(doc);
if (cur1 == NULL)
throw runtime_error("XMLParser::getConfig: error accessing XML root");
cur1 = cur1->xmlChildrenNode;
while (cur1 != NULL)
{
string cur1name = (const char*)cur1->name;
if ((boost::iequals(cur1name, section)))
{
xmlNodePtr cur2 = cur1->xmlChildrenNode;
while (cur2 != NULL)
{
string cur2name = (const char*)cur2->name;
if ((boost::iequals(cur2name, name)))
{
res.clear();
xmlNodePtr cur3 = cur2->xmlChildrenNode;
if (cur3)
res = (const char*)cur3->content;
values.push_back(res);
}
cur2 = cur2->next;
}
}
cur1 = cur1->next;
}
}
void XMLParser::setConfig(xmlDocPtr doc, const string& section, const string& name, const string& value)
{
xmlNodePtr cur1 = xmlDocGetRootElement(doc);
if (cur1 == NULL)
throw runtime_error("XMLParser::setConfig: error accessing XML root");
xmlNodePtr cur2;
cur1 = cur1->xmlChildrenNode;
while (cur1 != NULL)
{
string cur1name = (const char*)cur1->name;
if (boost::iequals(cur1name, section))
{
cur2 = cur1->xmlChildrenNode;
while (cur2 != NULL)
{
string cur2name = (const char*)cur2->name;
if (boost::iequals(cur2name, name))
{
xmlNodePtr cur3 = cur2->xmlChildrenNode;
if (cur3 == NULL)
{
xmlAddChild(cur2, xmlNewText((const xmlChar*)"\t"));
cur3 = cur2->xmlChildrenNode;
}
else
{
xmlFree(cur3->content);
}
cur3->content = xmlStrdup((const xmlChar*)value.c_str());
return;
}
cur2 = cur2->next;
}
// We found the section, but not the name, so we need to add a new node here
xmlAddChild(cur1, xmlNewText((const xmlChar*)"\t"));
xmlNewTextChild(cur1, NULL, (const xmlChar*)name.c_str(), (const xmlChar*)value.c_str());
xmlAddChild(cur1, xmlNewText((const xmlChar*)"\n\t"));
return;
}
cur1 = cur1->next;
}
// We did not find the section, so we need to add it and the name here
cur1 = xmlDocGetRootElement(doc);
xmlAddChild(cur1, xmlNewText((const xmlChar*)"\t"));
cur2 = xmlNewChild(cur1, NULL, (const xmlChar*)section.c_str(), NULL);
xmlAddChild(cur2, xmlNewText((const xmlChar*)"\n\t\t"));
xmlNewTextChild(cur2, NULL, (const xmlChar*)name.c_str(), (const xmlChar*)value.c_str());
xmlAddChild(cur2, xmlNewText((const xmlChar*)"\n\t"));
xmlAddChild(cur1, xmlNewText((const xmlChar*)"\n"));
return;
}
void XMLParser::delConfig(xmlDocPtr doc, const string& section, const string& name)
{
string res;
xmlNodePtr cur1 = xmlDocGetRootElement(doc);
if (cur1 == NULL)
throw runtime_error("XMLParser::delConfig: error accessing XML root");
cur1 = cur1->xmlChildrenNode;
while (cur1 != NULL)
{
string cur1name = (const char*)cur1->name;
if ((boost::iequals(cur1name, section)))
{
xmlNodePtr cur2 = cur1->xmlChildrenNode;
while (cur2 != NULL)
{
xmlNodePtr tmp = cur2;
cur2 = cur2->next;
string tmpname = (const char*)tmp->name;
if ((boost::iequals(tmpname, name)))
{
xmlUnlinkNode(tmp);
xmlFreeNode(tmp);
}
}
}
cur1 = cur1->next;
}
return;
}
const vector<string> XMLParser::enumConfig(const xmlDocPtr doc) const
{
vector<string> resv;
string res;
xmlNodePtr cur1 = xmlDocGetRootElement(doc);
if (cur1 == NULL)
throw runtime_error("XMLParser::getConfig: error accessing XML root");
cur1 = cur1->xmlChildrenNode;
while (cur1 != NULL)
{
res = reinterpret_cast<const char*>(cur1->name);
if (res != "text" && res != "comment")
resv.push_back(res);
cur1 = cur1->next;
}
return resv;
}
const vector<string> XMLParser::enumSection(const xmlDocPtr doc, const string& section) const
{
vector<string> resv;
string res;
xmlNodePtr cur1 = xmlDocGetRootElement(doc);
if (cur1 == NULL)
throw runtime_error("XMLParser::getConfig: error accessing XML root");
cur1 = cur1->xmlChildrenNode;
while (cur1 != NULL)
{
if ((!xmlStrcmp(cur1->name, (const xmlChar*)section.c_str())))
{
xmlNodePtr cur2 = cur1->xmlChildrenNode;
while (cur2 != NULL)
{
res = reinterpret_cast<const char*>(cur2->name);
if (res != "text" && res != "comment")
resv.push_back(res);
cur2 = cur2->next;
}
}
cur1 = cur1->next;
}
return resv;
}
} //namespace
// vim:ts=4 sw=4:
| {
"alphanum_fraction": 0.5384113734,
"author": null,
"avg_line_length": 26.4379310345,
"converted": null,
"ext": "cpp",
"file": null,
"hexsha": "7b16af9d54f2789bda1fa57ba5d58d558aaa3e04",
"include": null,
"lang": "C++",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 2,
"max_forks_repo_forks_event_max_datetime": "2022-03-31T06:24:22.000Z",
"max_forks_repo_forks_event_min_datetime": "2022-02-27T14:00:01.000Z",
"max_forks_repo_head_hexsha": "3d5f96dc9e3e4aa255f4e6105489758944d37cc4",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "zettadb/zettalib",
"max_forks_repo_path": "src/vendor/mariadb-10.6.7/storage/columnstore/columnstore/utils/configcpp/xmlparser.cpp",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "3d5f96dc9e3e4aa255f4e6105489758944d37cc4",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "zettadb/zettalib",
"max_issues_repo_path": "src/vendor/mariadb-10.6.7/storage/columnstore/columnstore/utils/configcpp/xmlparser.cpp",
"max_line_length": 119,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "3d5f96dc9e3e4aa255f4e6105489758944d37cc4",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "zettadb/zettalib",
"max_stars_repo_path": "src/vendor/mariadb-10.6.7/storage/columnstore/columnstore/utils/configcpp/xmlparser.cpp",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1819,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 7667
} |
\chapter{{\tt ILUMtx}: Incomplete $LU$ Matrix Object}
\label{chapter:ILUMtx}
\par
The {\tt ILUMtx} object represents and approximate (incomplete)
$(L+I)D(I+U)$, $(U^T+I)D(I+U)$ or $(U^H+I)D(I+U)$ factorization.
It is a very simple object, rows and columns of $L$ and $U$ are
stored as single vectors.
All computations to compute the factorization and to solve linear
systems are performed with sparse BLAS1 kernels.
Presently, the storage scheme is very simple minded, we use {\tt
malloc()} and {\tt free()} to handle the individual vectors of the
rows and columns of $L$ and $U$.
\par
At present we have one factorization method.
No pivoting is performed.
Rows of $U$ are stored, along with columns of $L$ if the matrix is
nonsymmetric.
If a zero pivot is encountered on the diagonal during the
factorization, the computation stops and returns a nonzero error
code.
(Presently, there is no ``patch-and-go'' functionality.)
An $L_{j,i}$ entry is kept if
$
|L_{j,i} D_{i,i}| \ge \sigma \sqrt{|D_{i,i}| \ |A_{j,j}|},
$
where $\sigma$ is a user supplied drop tolerance,
and similarly for $U_{i,j}$.
Note, if $A_{j,j} = 0$, as is common for KKT matrices,
all $L_{j,i}$ and $U_{i,j}$ entries will be kept.
It is simple to modify the code to use another drop tolerance
criteria, e.g., an absolute tolerance, or one based only on
$|D_{i,i}|$.
We intend to write other factorization methods that will
conform to a user-supplied nonzero structure for the factors.
| {
"alphanum_fraction": 0.7275223061,
"author": null,
"avg_line_length": 41.6285714286,
"converted": null,
"ext": "tex",
"file": null,
"hexsha": "75e6d9c881d78229ef89d723fdb94a273c882d19",
"include": null,
"lang": "TeX",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2019-08-29T18:41:28.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-08-29T18:41:28.000Z",
"max_forks_repo_head_hexsha": "2cb2c434b536eb668ff88bdf82538d22f4f0f711",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "alleindrach/calculix-desktop",
"max_forks_repo_path": "ccx_prool/SPOOLES.2.2/ILUMtx/doc/intro.tex",
"max_issues_count": 4,
"max_issues_repo_head_hexsha": "2cb2c434b536eb668ff88bdf82538d22f4f0f711",
"max_issues_repo_issues_event_max_datetime": "2018-01-25T16:08:31.000Z",
"max_issues_repo_issues_event_min_datetime": "2017-09-21T17:03:55.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "alleindrach/calculix-desktop",
"max_issues_repo_path": "ccx_prool/SPOOLES.2.2/ILUMtx/doc/intro.tex",
"max_line_length": 66,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "2cb2c434b536eb668ff88bdf82538d22f4f0f711",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "alleindrach/calculix-desktop",
"max_stars_repo_path": "ccx_prool/SPOOLES.2.2/ILUMtx/doc/intro.tex",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 424,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 1457
} |
from __future__ import absolute_import, division, print_function, unicode_literals
import glob
import os
import numpy as np
import argparse
import json
import torch
from scipy.io.wavfile import write
from env import AttrDict
from meldataset import MAX_WAV_VALUE
from models import Generator
from time import time
h = None
device = None
def load_checkpoint(filepath, device):
assert os.path.isfile(filepath)
print("Loading '{}'".format(filepath))
checkpoint_dict = torch.load(filepath, map_location=device)
print("Complete.")
return checkpoint_dict
def scan_checkpoint(cp_dir, prefix):
pattern = os.path.join(cp_dir, prefix + '*')
cp_list = glob.glob(pattern)
if len(cp_list) == 0:
return ''
return sorted(cp_list)[-1]
def inference(a):
generator = Generator(h).to(device)
state_dict_g = load_checkpoint(a.checkpoint_file, device)
generator.load_state_dict(state_dict_g['generator'])
filelist = os.listdir(a.input_mels_dir)
os.makedirs(a.output_dir, exist_ok=True)
generator.eval()
generator.remove_weight_norm()
with torch.no_grad():
for i, filename in enumerate(filelist):
print("Loading mel: "+filename)
x = np.load(os.path.join(a.input_mels_dir, filename))
x = torch.FloatTensor(x).to(device)
t = time()
y_g_hat = generator(x)
audio = y_g_hat.squeeze()
audio = audio * MAX_WAV_VALUE
audio = audio.cpu().numpy().astype('int16')
print("total time infer: {}".format(time()-t))
output_file = os.path.join(a.output_dir, os.path.splitext(filename)[0] + '_generated_e2e.wav')
write(output_file, h.sampling_rate, audio)
print(output_file)
def main():
print('Initializing Inference Process..')
parser = argparse.ArgumentParser()
parser.add_argument('--input_mels_dir', default='test_mel_files')
parser.add_argument('--output_dir', default='generated_files_from_mel')
parser.add_argument('--checkpoint_file', required=True)
a = parser.parse_args()
config_file = os.path.join(os.path.split(a.checkpoint_file)[0], 'config_v1.json')
with open(config_file) as f:
data = f.read()
global h
json_config = json.loads(data)
h = AttrDict(json_config)
torch.manual_seed(h.seed)
global device
if torch.cuda.is_available():
torch.cuda.manual_seed(h.seed)
device = torch.device('cuda')
else:
device = torch.device('cpu')
inference(a)
if __name__ == '__main__':
main()
| {
"alphanum_fraction": 0.6678214011,
"author": null,
"avg_line_length": 27.6382978723,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "d99f146b8152df0a30f14afa3a9e06067ed8521c",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "8c5f2c8f3bffe7aca68c576168a12ab3451d09ec",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "dodoproptit99/Multilingual_Text_to_Speech",
"max_forks_repo_path": "hifi_gan/inference_e2e.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "8c5f2c8f3bffe7aca68c576168a12ab3451d09ec",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "dodoproptit99/Multilingual_Text_to_Speech",
"max_issues_repo_path": "hifi_gan/inference_e2e.py",
"max_line_length": 106,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "8c5f2c8f3bffe7aca68c576168a12ab3451d09ec",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "dodoproptit99/Multilingual_Text_to_Speech",
"max_stars_repo_path": "hifi_gan/inference_e2e.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 581,
"path": null,
"reason": "import numpy,from scipy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2598
} |
'''This module contains all functions needed for
the fully handmade neural network, as well as the
model class itself'''
#import pandas as pd
import numpy as np
import seaborn as sns
#from progressbar.bar import ProgressBar
## Functions
def compute_activation (X, activation_type):
'''Defining activation functions
Takes a nparray or a single value
# Returns in the same format
For softmax : assuming that X.shape[0]== n_neurons,
the axis0 of array X is used for computing the mean
'''
X=np.array(X)
if activation_type == 'relu':
return np.maximum(X,0)
if activation_type == 'sigmoid':
return 1/(1+np.exp(-X))
if activation_type == 'tanh':
return np.tanh(X)
if activation_type == 'linear':
return X
if activation_type == 'softmax':
exp_x = np.exp(X)
return exp_x / exp_x.sum(axis=0)
#raise error if unknown type
raise ValueError(f'Unknown activation type {activation_type}.\
Supported types : linear, relu, sigmoid, tanh, softmax')
def compute_activation_derivative (layer_output, activation_type):
'''Computes the derivative of the activation functions,
depending of the outputs of the output of these functions
nota : if occures that for each of the 5 basic activations,
f'(X) can be expressed simply as a function of f(X)
Takes a nparray or a single value
# Returns in the same format
'''
X_output=np.array(layer_output)
if activation_type == 'relu':
return (X_output > 0).astype(int)
if activation_type == 'linear':
return np.ones(X_output.shape)
if activation_type == 'sigmoid':
return X_output - np.square(X_output)
if activation_type == 'tanh':
return 1 - np.square(X_output)
if activation_type == 'softmax':
return X_output - np.square(X_output)
#raise error if unknown type
raise ValueError(f'Unknown activation type {activation_type}.\
Supported types : linear, relu, sigmoid, tanh, softmax')
def compute_metric (y, y_pred, metric, loss_derivative=False):
'''Defining loss and metric functions
Takes nparrays, lists or a single values
## IF loss_derivative==False:
output: always scalar
## IF loss_derivative==True: (True will be ignored for non-loss metrics)
Computes the partial derivative of the loss function
with respect to each component of each sample
output: 2Darray
n_samples * 1 for binary_crossentropy or single output regression
n_samples * n_class for categorical_crossentropy
n_samples * n_features for multifeatures regression)
'''
#converting DataFrames, lists or lists of lists to nparray
y = np.array(y)
y_pred = np.array(y_pred)
#deal with 1D inputs to forge a n_samples * 1 2D-array
if len(y.shape) == 1:
y = np.expand_dims(y, axis = 1)
if len(y_pred.shape) == 1:
y_pred = np.expand_dims(y_pred, axis = 1)
#raise errors for unconsistant inputs
if len(y.shape) > 2:
raise ValueError('y vector dimension too high. Must be 2 max')
if len(y_pred.shape) > 2:
raise ValueError('y_pred vector dimension too high. Must be 2 max')
if y.shape != y_pred.shape:
raise ValueError(f'unconsistent vectors dimensions during scoring :\
y.shape= {y.shape} and y_pred.shape= {y_pred.shape}')
#compute loss funtions (or derivatives if loss_derivative==True)
if metric == 'mse':
if not loss_derivative:
return np.square(y-y_pred).mean()
return 1 / y.size * 2 * (y_pred - y)
if metric == 'mae':
if not loss_derivative:
return np.abs(y-y_pred).mean()
return 1 / y.size * (y_pred - y) / np.abs(y - y_pred)
if metric == 'categorical_crossentropy':
if not loss_derivative:
return -1 / y.shape[0] * ((y * np.log(y_pred)).sum())
return -1 / y.shape[0] * (y / y_pred)
if metric == 'binary_crossentropy':
if y.shape[1]>1:
raise ValueError('y vector dimension too high.\
Must be 1 max for binary_crossentropy')
if not loss_derivative:
return -(y*np.log(y_pred)+(1-y)*np.log(1-y_pred)).mean()
return -1 / y.size * (y / y_pred - (1-y) / (1-y_pred))
# compute other metrics functions
#### accuracy, f1-score, recall, etc.. : not implemented yet
raise ValueError(f'Unknown metric {metric}. Supported types :\
mse, mae, categorical_crossentropy, binary_crossentropy')
class adam_optimizer():
'''adam optimizer object
This object in instanciated by the .fit() method of the model class
each time it is triggered
Unlike in Keras, this object should not be instanciated by the user'''
def __init__(self, weights, bias, alpha_init=0.001, beta_1=0.9,
beta_2=0.999, epsilon=1e-8):
self.alpha_init=alpha_init
self.beta_1=beta_1
self.beta_2=beta_2
self.epsilon=epsilon
self.t=0
#initializing first and second momentum
self.m_weights = [np.zeros_like(w) for w in weights]
self.m_bias = [np.zeros_like(b) for b in bias]
self.v_weights = self.m_weights.copy()
self.v_bias = self.m_bias.copy()
def get_update(self, gradient_weights, gradient_bias):
'''computes the values to be added to weights and bias arrays at
the end of the train step'''
self.t+=1
alpha=self.alpha_init*np.sqrt(1-self.beta_2**self.t)/(1-self.beta_1**self.t)
# updating 1st and 2nd momenta
self.m_weights=[self.beta_1 * m + (1-self.beta_1) * grad\
for m, grad in zip(self.m_weights, gradient_weights)]
self.m_bias=[self.beta_1 * m + (1-self.beta_1) * grad\
for m, grad in zip(self.m_bias, gradient_bias)]
self.v_weights=[self.beta_2 * v + (1-self.beta_2) * grad**2\
for v, grad in zip(self.v_weights, gradient_weights)]
self.v_bias=[self.beta_2 * v + (1-self.beta_2) * grad**2\
for v, grad in zip(self.v_bias, gradient_bias)]
#computing the updates
weights_update = [- alpha * m / (np.sqrt(v) + self.epsilon)\
for m, v in zip( self.m_weights, self.v_weights)]
bias_update = [- alpha * m / (np.sqrt(v) + self.epsilon)\
for m, v in zip( self.m_bias, self.v_bias)]
return weights_update, bias_update
class handmade_nn ():
'''
hand-made version of neural network
so far, the possibilities are :
- layers activation functions :
'linear', 'relu', 'sigmoid', 'tanh', 'softmax'
- weights initializers : 'ones', 'glorot_uniform'
- bias initializers : 'zeros', 'ones'
- loss functions :
'mse', 'mae', 'binary_crossentropy', 'categorical_crossentropy'
- solver :
SGD without momentum
'''
def __init__ (self, input_dim=0):
self.weights=[]
self.bias=[]
self.activation_types=[]
self.input_dim=input_dim
self.n_layers=0
self.loss_history=[]
def set_input_dim (self, input_dim):
'''manually sets the input_dim attribute of the model instance'''
self.input_dim = input_dim
def set_loss (self, loss):
'''manually sets the loss attribute of the model instance'''
self.loss = loss
def add_dense_layer (self, n_neurons, activation_type,
weights_initializer='glorot_uniform', bias_initializer='zeros'):
'''add a dense (fully connected) layer of neurons to the model
This initializes the weights and bias according to selected initializer type,
wich are yet implemented directly here'''
#check if the input_dim is set
if self.input_dim == 0:
raise ValueError('input_dim = 0 .\
Use set_input_dim before creating first layer')
#get the size of the input os this layer
if len(self.bias) == 0:
previous_dim=self.input_dim
else:
previous_dim=(self.bias[-1].shape[0])
#initialize the layer parameters
if weights_initializer == 'ones':
self.weights.append(np.ones((n_neurons, previous_dim)))
elif weights_initializer == 'glorot_uniform':
limit = np.square(6 / (n_neurons + previous_dim))
self.weights.append(np.random.uniform(-limit, limit, size = (n_neurons, previous_dim)))
else:
raise ValueError(f'Unknown weights initializer {weights_initializer}.\
Supported types : ones, glorot_uniform')
if bias_initializer == 'zeros':
self.bias.append(np.zeros(n_neurons))
elif bias_initializer == 'ones':
self.bias.append(np.ones(n_neurons))
else:
raise ValueError(f'Unknown bias initializer {bias_initializer}.\
Supported types : zeros, ones')
self.activation_types.append(activation_type)
self.n_layers += 1
#test the activation type
compute_activation(0, activation_type)
def predict (self, X, keep_hidden_layers=False):
'''input X : list, list of lists, np array, pd DataFrame
axis 0 = samples
axis 1 = features
## IF keep_hidden_layers==False:
output = y_pred: 2D np-array
axis 0 = samples
axis 1 = output features, depending of the size of last layer
## IF keep_hidden_layers==True:
outputs = layers_outputs, layers_activation_derivatives
-output1 = layers_outputs:
list of 2D np-arrays of outputs of each layer
len(list)=n_layers+1: 1st element = X itself
last element = y_pred
axis 0 = samples
axis 1 = number of neurons of the layer
-output2 = layers_activation_derivatives:
list of 2D np-arrays of d_act/d_input of each layer
len(list)=n_layers
axis 0 = samples
axis 1 = number of neurons of the layer
'''
#converting DataFrames, lists or lists of lists to nparray
X = np.array(X)
#deal with 1D inputs to forge a 1 * n_features 2D-array
if len(X.shape) == 1:
X = np.expand_dims(X, axis = 0)
#raise errors for unconsistant inputs
if len(X.shape) > 2:
raise ValueError('X vector dimension too high. Must be 2 max')
if X.shape[1] != self.input_dim:
raise ValueError(f'Unconsistent number of features. \
The network input_dim is {self.input_dim}')
#compute the prediction
layers_outputs = [X]
layers_activation_derivatives = []
for layer_index, activation_type in enumerate(self.activation_types):
activation_input = np.dot(self.weights[layer_index], X.T)\
+ np.expand_dims(self.bias[layer_index], axis=1)
X = compute_activation(activation_input, activation_type).T
layers_outputs.append(X)
layers_activation_derivatives.append(\
compute_activation_derivative(X, activation_type))
if keep_hidden_layers:
return layers_outputs, layers_activation_derivatives
return X
def score (self, X, y, metric):
'''use predict method, then compute_metric function'''
y_pred=self.predict(X)
return compute_metric(y, y_pred, metric)
def compute_backpropagation (self, X, y):
'''This method :
- executes self.predict(X) WITH keep_hidden_layers
to keep all intermediate outputs
- executes compute_metric (y, y_pred, loss) WITH loss_derivative
- for each layer from last to first : computes loss
derivatives (aka gradient) with respect to bias and weights
output 1 : gradient with respect to weights
(list of 2D arrays
len(list) = n_layers
axis 0 = number of neurons of the layer
axis 1 = number of neurons of the previous layer (or features in the input)
output 2 : gradient with respect to bias
(list of 1D arrays)
len(list) = n_layers
axis 0 = number of neurons of the layer
'''
delta_weights=[]
delta_bias=[]
# compute the outputs and the derivatives of each layer
layers_outputs, layers_activation_derivatives\
= self.predict(X, keep_hidden_layers = True)
# compute d_loss/d_ypred
dloss_doutput = compute_metric (y,
layers_outputs[-1],
self.loss,
loss_derivative = True)
for layer_index in range(self.n_layers-1, -1, -1):
# compute d_loss/d_input of the layer
dloss_dinput = dloss_doutput * layers_activation_derivatives[layer_index]
# compute gradient with respect to weights and bias
delta_weights.append(np.dot(dloss_dinput.T, layers_outputs[layer_index]))
delta_bias.append(np.sum(dloss_dinput, axis=0))
# update dloss_doutput for next propagation
if layer_index > 0:
dloss_doutput = np.dot (dloss_dinput, self.weights[layer_index])
delta_weights.reverse()
delta_bias.reverse()
return delta_weights, delta_bias
def fit (self, X, y, loss=None, learning_rate=0.01,
batch_size=1, n_epochs=10, verbose=1,
optimizer_type='sgd',
alpha_init=0.001, beta_1=0.9,
beta_2=0.999, epsilon=1e-8):
'''input X : 2D array or pd DataFrame
axis 0 = samples
axis 1 = features
'''
if loss:
self.loss=loss
if optimizer_type == 'adam':
optimizer = adam_optimizer (self.weights, self.bias,
alpha_init=alpha_init, beta_1=beta_1,
beta_2=beta_2, epsilon=epsilon)
X = np.array(X)
y = np.array(y)
n_samples = X.shape[0]
n_minibatches_per_epoch = int(n_samples / batch_size)
loss=self.score(X, y, self.loss)
if self.loss_history == []:
self.loss_history.append(loss)
if verbose>0:
print(f'initial loss: {self.score(X, y, self.loss)}')
for epoch_index in range (n_epochs):
if verbose>1:
print(f'beginning epoch n°{epoch_index + 1}')
#progress_batches = ProgressBar()
#for mini_batch_index in progress_batches(range(n_minibatches_per_epoch)):
for mini_batch_index in range(n_minibatches_per_epoch):
gradient_weights, gradient_bias\
= self.compute_backpropagation(X[mini_batch_index * batch_size :\
(mini_batch_index +1) * batch_size],
y[mini_batch_index * batch_size :\
(mini_batch_index +1) * batch_size])
if optimizer_type == 'sgd':
#compute the update directly
weights_update = [-learning_rate * grad for grad in gradient_weights]
bias_update = [-learning_rate * grad for grad in gradient_bias]
elif optimizer_type == 'adam':
#compute the update with the optimizer
weights_update, bias_update = optimizer.get_update(gradient_weights,
gradient_bias)
else:
raise ValueError(f'unsupported optimizer type {optimizer_type}')
# updating weights and bias
self.weights = [w + w_update for w, w_update in zip(self.weights, weights_update)]
self.bias = [b + b_update for b, b_update in zip(self.bias, bias_update)]
loss=self.score(X, y, self.loss)
self.loss_history.append(loss)
if verbose>1:
print(f'end of epoch n°{epoch_index + 1}. loss: {self.score(X, y, self.loss)}')
if verbose==1:
print(f'final loss: {self.score(X, y, self.loss)}')
def plot_loss_history(self):
'''plots the complete loss history of the model since creation,
including multiple .fit() calls'''
graph=sns.lineplot(x=range(len(self.loss_history)),y=self.loss_history)
graph.set(xlabel="epochs", ylabel = "loss")
| {
"alphanum_fraction": 0.5989200095,
"author": null,
"avg_line_length": 39.4660421546,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "190b43ce92ce85f169fa021de133b5c79034758d",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "a3d7c5ca8a325b717ed26e7fbbd7f309df3e5015",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "bdepebhe/handmade-neural-network",
"max_forks_repo_path": "hmnn/lib.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "a3d7c5ca8a325b717ed26e7fbbd7f309df3e5015",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "bdepebhe/handmade-neural-network",
"max_issues_repo_path": "hmnn/lib.py",
"max_line_length": 99,
"max_stars_count": 3,
"max_stars_repo_head_hexsha": "a3d7c5ca8a325b717ed26e7fbbd7f309df3e5015",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "bdepebhe/hand-made-neural-network",
"max_stars_repo_path": "hmnn/lib.py",
"max_stars_repo_stars_event_max_datetime": "2021-09-30T09:35:40.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-01-31T22:04:31.000Z",
"num_tokens": 3749,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 16852
} |
[STATEMENT]
lemma irreflexiveDecisionLess:
shows "(x, x) \<notin> decisionLess"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (x, x) \<notin> decisionLess
[PROOF STEP]
unfolding decisionLess_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (x, x) \<notin> {(l1, l2). isDecision l1 \<and> \<not> isDecision l2}
[PROOF STEP]
by simp | {
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": "SATSolverVerification_SatSolverVerification",
"hexsha": null,
"include": null,
"lang": null,
"length": 2,
"llama_tokens": 150,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
} |
"""Locator functions to interact with geographic data"""
import numpy as np
import pandas as pd
import flood_tool.geo as geo
__all__ = ['Tool']
def clean_postcodes(postcodes):
"""
Takes list or array of postcodes, and returns it in a cleaned numpy array
"""
postcode_df = pd.DataFrame({'Postcode':postcodes})
postcode_df['Postcode'] = postcode_df['Postcode'].str.upper()
# If length is not 7 get rid of spaces. This fixes e.g. "SW19 2AZ" -> "SW192AZ"
postcode_df['Postcode'] = postcode_df['Postcode'].where(
postcode_df['Postcode'].str.len() == 7, postcode_df['Postcode'].str.replace(" ", ""))
# If length is 5 (e.g. "W67HZ") add two spaces in the middle (-> "W6 7HZ")
postcode_df['Postcode'] = postcode_df['Postcode'].where(
postcode_df['Postcode'].str.len() != 5,
postcode_df['Postcode'].str[:2]+ " " + postcode_df['Postcode'].str[2:])
# If length is 6 (e.g. "SW72AZ") add a space in the middle and end(-> "SW7 2AZ")
postcode_df['Postcode'] = postcode_df['Postcode'].where(
postcode_df['Postcode'].str.len() != 6,
postcode_df['Postcode'].str[:3]+ " " + postcode_df['Postcode'].str[3:])
return postcode_df['Postcode'].to_numpy()
class Tool(object):
"""Class to interact with a postcode database file."""
def __init__(self, postcode_file=None, risk_file=None, values_file=None):
"""
Reads postcode and flood risk files and provides a postcode locator service.
Parameters
---------
postcode_file : str, optional
Filename of a .csv file containing geographic location data for postcodes.
risk_file : str, optional
Filename of a .csv file containing flood risk data.
values_file : str, optional
Filename of a .csv file containing property value data for postcodes.
"""
self.postcode_file = postcode_file
self.risk_file = risk_file
self.values_file = values_file
self.postcode_df = pd.read_csv(self.postcode_file)
# Make data frame of values & clean the postcodes in them.
self.values_df = pd.read_csv(self.values_file)
postcode_arr = self.values_df['Postcode'].to_numpy()
postcode_arr = clean_postcodes(postcode_arr)
self.values_df['Postcode'] = postcode_arr
# Make data frame of risks, add columns to be used in get...flood_probability
self.risk_df = pd.read_csv(self.risk_file)
# Northing_max (:= northing+radius), northing_min, easting_max, easting_min for each row
self.risk_df["X_max"] = self.risk_df["X"] + self.risk_df["radius"]
self.risk_df["X_min"] = self.risk_df["X"] - self.risk_df["radius"]
self.risk_df["Y_max"] = self.risk_df["Y"] + self.risk_df["radius"]
self.risk_df["Y_min"] = self.risk_df["Y"] - self.risk_df["radius"]
# Also add column of radius squared r2
self.risk_df["radius_squared"] = np.square(self.risk_df["radius"])
def get_lat_long(self, postcodes):
"""Get an array of WGS84 (latitude, longitude) pairs from a list of postcodes.
Parameters
----------
postcodes: sequence of strs
Ordered sequence of N postcode strings
Returns
-------
ndarray
Array of Nx_2 (latitude, longitdue) pairs for the input postcodes.
Invalid postcodes return [`numpy.nan`, `numpy.nan`].
"""
# Fix evil postcodes
postcodes = clean_postcodes(postcodes)
postcode_df = self.postcode_df
postcode_df = postcode_df.fillna('np.nan')
postcode_df = postcode_df.set_index('Postcode')
index_data = postcode_df.loc[postcodes]
lat = np.array(index_data['Latitude']).T
lng = np.array(index_data['Longitude']).T
return np.vstack((lat, lng)).transpose()
def get_easting_northing_flood_probability(self, easting, northing):
"""Get an array of flood risk probabilities from arrays of eastings and northings.
Flood risk data is extracted from the Tool flood risk file. Locations
not in a risk band circle return `Zero`, otherwise returns the name of the
highest band it sits in.
Parameters
----------
easting: numpy.ndarray of floats
OS Eastings of locations of interest
northing: numpy.ndarray of floats
Ordered sequence of postcodes
Returns
-------
numpy.ndarray of strs
numpy array of flood probability bands corresponding to input locations.
"""
# Read in risk files as pandas dataframe
risks = self.risk_df
prob_bands = np.full(np.size(easting), "Zero", dtype='<U8')
# For each point we get:
for point, point_east in enumerate(easting):
point_north = northing[point]
# Pick the zones where easting_min < easting < easting_max
zones = risks.loc[(risks.X_max >= point_east) & (risks.X_min <= point_east)]
# Further reduce these to where northing_min < northing < northing_max
zones_pot = zones.loc[(zones.Y_max >= point_north) & (zones.Y_min <= point_north)]
# For each potential zone:
for i in range(len(zones_pot.index)):
# Don't bother with further zones if we already know the risk is High
if prob_bands[point] == "High":
break
row = zones_pot.iloc[i]
# Squared distance from point to zone (we use squares to avoid square-rooting)
dist2 = (row.X-point_east)*(row.X-point_east) + (row.Y-point_north)*(row.Y-point_north)
if dist2 <= row.radius_squared:
risk = row.prob_4band
current_band = prob_bands[point]
if risk == "High":
prob_bands[point] = risk
elif risk == "Medium" and current_band != "High":
prob_bands[point] = risk
elif risk == "Low" and (current_band != "High" and current_band != "Medium"):
prob_bands[point] = risk
elif risk == "Very Low" and current_band == "Zero":
prob_bands[point] = "Very Low"
return prob_bands
def get_sorted_flood_probability(self, postcodes):
"""Get an array of flood risk probabilities from a sequence of postcodes.
Probability is ordered High>Medium>Low>Very low>Zero.
Flood risk data is extracted from the `Tool` flood risk file.
Parameters
----------
postcodes: sequence of strs
Ordered sequence of postcodes
Returns
-------
pandas.DataFrame
Dataframe of flood probabilities indexed by postcode and ordered from `High` to `Zero`,
then by lexagraphic (dictionary) order on postcode. The index is named `Postcode`, the
data column is named `Probability Band`. Invalid postcodes and duplicates
are removed.
"""
# Fix evil postcodes
postcodes = clean_postcodes(postcodes)
# Get latitude and longitude
output = self.get_lat_long(postcodes) # Returns latitude,longitude pairs in an array
lat_long = pd.DataFrame(
{'Postcode':postcodes, 'latitude':output[:, 0], 'longitude':output[:, 1]})
# Delete the wrong format of postcode
lat_long = lat_long.dropna(how='any')
latitude = np.array(lat_long.latitude)
longitude = np.array(lat_long.longitude)
# Returns Eastings and Northings in an array
output_2 = geo.get_easting_northing_from_lat_long(latitude, longitude)
# Returns array of flood risk probabilities
output_3 = self.get_easting_northing_flood_probability(output_2[0], output_2[1])
# New column in dataframe containing the probabilities
lat_long['Probability Band'] = output_3
# Removing invalid postcodes
lat_long = lat_long.dropna(how='any')
# Removing duplicates
lat_long = lat_long.drop_duplicates(subset='Postcode')
# Sort by Probability Bands
# add variable ordered to sort later by Xun Xie
lat_long['Probability Band'] = pd.Categorical(
lat_long['Probability Band'],
categories=["High", "Medium", "Low", "Very Low", "Zero"], ordered=True)
#add sort firstly by Probability Band and then sort secondly by Postcode
lat_long = lat_long.sort_values(by=['Probability Band', 'Postcode'], ascending=[True, True])
lat_long = lat_long.set_index('Postcode')
return lat_long # Make Postcode the Index
def get_flood_cost(self, postcodes):
"""Get an array of estimated cost of a flood event from a sequence of postcodes.
Parameters
----------
postcodes: sequence of strs
Ordered collection of postcodes
Returns
-------
numpy.ndarray of floats
array of floats for the pound sterling cost for the input postcodes.
Invalid postcodes return `numpy.nan`.
"""
# Fix evil postcodes
postcodes = clean_postcodes(postcodes)
values_df = self.values_df[['Postcode', 'Total Value']]
values_df = values_df.loc[values_df.Postcode.isin(postcodes)]
values_df = values_df.set_index('Postcode').reindex(postcodes)
values_df = values_df.fillna(0)
return np.array(values_df['Total Value'])
def get_annual_flood_risk(self, postcodes, probability_bands):
"""Get an array of estimated annual flood risk in pounds sterling per year of a flood
event from a sequence of postcodes and flood probabilities.
Parameters
----------
postcodes: sequence of strs
Ordered collection of postcodes
probability_bands: sequence of strs
Ordered collection of flood probabilities
Returns
-------
numpy.ndarray
array of floats for the annual flood risk in pounds sterling for the input postcodes.
Invalid postcodes return `numpy.nan`.
"""
#get cost_value
cost_value = self.get_flood_cost(postcodes)
#create Dataframe for replacing corresonding value
risk_df = pd.DataFrame({'Probability Band': probability_bands})
total_df = risk_df.replace(
{'High':0.1, 'Medium': 0.02, 'Low': 0.01, 'Very Low': 0.001, 'Zero': 0})
pro_ser = np.array(total_df['Probability Band'])
#compute result
annual = pro_ser * cost_value * 0.05
return annual
def get_sorted_annual_flood_risk(self, postcodes):
"""Get a sorted pandas DataFrame of flood risks.
Parameters
----------
postcodes: sequence of strs
Ordered sequence of postcodes
Returns
-------
pandas.DataFrame
Dataframe of flood risks indexed by (normalized) postcode and ordered by risk,
then by lexagraphic (dictionary) order on the postcode. The index is named
`Postcode` and the data column `Flood Risk`.
Invalid postcodes and duplicates are removed.
"""
# Fix evil postcodes
postcodes = clean_postcodes(postcodes)
# Get lat, long of postcodes
arr = self.get_lat_long(postcodes)
lat = arr[:, 0] # Latitude
lng = arr[:, 1] # Longitude
# Convert lat, long -> easting, northing
tem = geo.get_easting_northing_from_lat_long(lat, lng, radians=False)
eos = tem[0] # Easting
nos = tem[1] # Northing
# Get our data frame of postcodes and risks
prob_band = self.get_easting_northing_flood_probability(eos, nos)
flood_risk = self.get_annual_flood_risk(postcodes, prob_band)
risk_df = pd.DataFrame({'Postcode':postcodes, 'Flood Risk':flood_risk})
# Clean up data frame
risk_df = risk_df.drop_duplicates()
risk_df = risk_df.set_index('Postcode')
risk_df = risk_df.sort_values(by=['Flood Risk', 'Postcode'], ascending=[False, True])
return risk_df
| {
"alphanum_fraction": 0.6209789071,
"author": null,
"avg_line_length": 40.2590163934,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "4072cb968c1fe7cb9e4bc5fed16ae082a9179243",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "058e2861346c3e0152d59c8feba99ed0e63790da",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "karimjbacchus/flood_risk",
"max_forks_repo_path": "flood_tool/tool.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "058e2861346c3e0152d59c8feba99ed0e63790da",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "karimjbacchus/flood_risk",
"max_issues_repo_path": "flood_tool/tool.py",
"max_line_length": 103,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "058e2861346c3e0152d59c8feba99ed0e63790da",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "karimjbacchus/flood_risk",
"max_stars_repo_path": "flood_tool/tool.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2812,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 12279
} |
import numpy as np
from operator import itemgetter
from scipy.linalg import cholesky, cho_solve
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.base import clone
from sklearn.utils import check_random_state
from sklearn.utils.optimize import _check_optimize_result
from .kernels import RBF, WhiteKernel, ConstantKernel as C
class FeatureSelectionGPR(GaussianProcessRegressor):
"""
FeatureSelectionGPR - Gaussian process regression with $l_1$-regularization
"""
def __init__(self, kernel=None, regularization_l1=1.0, regularization_l2=1e-2, regularization_noise=1e3,
*, alpha=1e-10, rho=1.0, n_restarts_optimizer=0, admm_maxiter=100, admm_tol=5e-3,
normalize_y=False, copy_X_train=True, random_state=None):
self.kernel = kernel
self.regularization_l1 = regularization_l1
self.regularization_l2 = regularization_l2
self.regularization_noise = regularization_noise
self.alpha = alpha
self.rho = rho
self.optimizer = "fmin_l_bfgs_b"
self.n_restarts_optimizer = n_restarts_optimizer
self.admm_maxiter = admm_maxiter
self.admm_tol = admm_tol
self.normalize_y = normalize_y
self.copy_X_train = copy_X_train
self.random_state = random_state
def fit(self, X, y):
"""Fit Gaussian process regression model.
Parameters
----------
X : array-like of shape (n_samples, n_features) or list of object
Feature vectors or other representations of training data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values
Returns
-------
self : returns an instance of self.
"""
if self.kernel is None: # Use an RBF kernel as default
self.kernel_ = C(constant_value=1.0, constant_value_bounds=(1e-5, 1e5)) \
* RBF(
length_scale=np.ones((X.shape[1])),
length_scale_bounds=(0, 1e5)
) \
+ WhiteKernel(noise_level=1.0, noise_level_bounds=(1e-5, 1e5))
else:
self.kernel_ = clone(self.kernel)
self._rng = check_random_state(self.random_state)
if self.kernel_.requires_vector_input:
X, y = self._validate_data(X, y, multi_output=True, y_numeric=True,
ensure_2d=True, dtype="numeric")
else:
X, y = self._validate_data(X, y, multi_output=True, y_numeric=True,
ensure_2d=False, dtype=None)
# Normalize target value
if self.normalize_y:
self._y_train_mean = np.mean(y, axis=0)
self._y_train_std = np.std(y, axis=0)
# Remove mean and make unit variance
y = (y - self._y_train_mean) / self._y_train_std
else:
self._y_train_mean = np.zeros(1)
self._y_train_std = 1
if np.iterable(self.alpha) \
and self.alpha.shape[0] != y.shape[0]:
if self.alpha.shape[0] == 1:
self.alpha = self.alpha[0]
else:
raise ValueError("alpha must be a scalar or an array"
" with same number of entries as y.(%d != %d)"
% (self.alpha.shape[0], y.shape[0]))
self.X_train_ = np.copy(X) if self.copy_X_train else X
self.y_train_ = np.copy(y) if self.copy_X_train else y
if self.optimizer is not None and self.kernel_.n_dims > 0:
# Choose hyperparameters based on maximizing the log-marginal
# likelihood (potentially starting from several initial values)
def shrinkage(x, kappa):
return np.maximum(0, x - kappa) - np.maximum(0, -x - kappa)
# First optimize starting from theta specified in kernel
def ADMM(initial_theta, bounds):
_theta = initial_theta
w = _theta[1:-1]
w_old = w
u = np.zeros_like(w)
_abstol = len(w) * self.admm_tol ** 2
for _iter in range(self.admm_maxiter):
def obj_func(theta, eval_gradient=True):
_reg = self.regularization_l1 * np.sum(np.abs(w))
_reg += self.regularization_l2 * np.dot(theta[1:-1], theta[1:-1]) * .5
_reg += self.regularization_noise * theta[1:-1] ** 2 * .5
_res = theta[1:-1] - w
_aug_dual = self.rho * np.dot(u + _res / 2, _res)
if eval_gradient:
lml, grad = self.log_marginal_likelihood(
theta, eval_gradient=True, clone_kernel=False
)
grad[1:-1] -= self.rho * (_res + u)
grad[1:-1] -= self.regularization_l2 * theta[1:-1]
grad[-1] -= self.regularization_noise * theta[-1]
return _reg + _aug_dual - lml, -grad
else:
return _reg + _aug_dual - self.log_marginal_likelihood(
theta, clone_kernel=False
)
sol = self._constrained_optimization(
obj_func,
_theta,
bounds
)
_theta = sol[0]
w = shrinkage(_theta[1:-1] + u, self.regularization_l1 / self.rho)
u += (_theta[1:-1] - w)
if (np.linalg.norm(_theta[1:-1] - w) < _abstol) and \
(np.linalg.norm(w - w_old) < _abstol + self.admm_tol * np.linalg.norm(self.rho*u)):
print('ADMM stopped after ', _iter+1, 'iterations.')
break
w_old = w
return sol
optima = [
(
ADMM(self.kernel_.theta, self.kernel_.bounds)
)
]
# Additional runs are performed from log-uniform chosen initial theta
if self.n_restarts_optimizer > 0:
if not np.isfinite(self.kernel_.bounds).all():
raise ValueError(
"Multiple optimizer restarts (n_restarts_optimizer>0) "
"requires that all bounds are finite.")
bounds = self.kernel_.bounds
while self.n_restarts_optimizer > len(optima)-2:
theta_initial = \
self._rng.uniform(bounds[:, 0], bounds[:, 1])
optima.append(
ADMM(
theta_initial, bounds
)
)
# Select result from run with minimal (negative) log-marginal
# likelihood
lml_values = list(map(itemgetter(1), optima))
self.kernel_.theta = optima[np.argmin(lml_values)][0]
self.log_marginal_likelihood_value_ = -np.min(lml_values)
else:
self.log_marginal_likelihood_value_ = \
self.log_marginal_likelihood(self.kernel_.theta,
clone_kernel=False)
# Precompute quantities required for predictions which are independent
# of actual query points
K = self.kernel_(self.X_train_)
K[np.diag_indices_from(K)] += self.alpha
try:
self.L_ = cholesky(K, lower=True) # Line 2
# self.L_ changed, self._K_inv needs to be recomputed
self._K_inv = None
except np.linalg.LinAlgError as exc:
exc.args = ("The kernel, %s, is not returning a "
"positive definite matrix. Try gradually "
"increasing the 'alpha' parameter of your "
"GaussianProcessRegressor estimator."
% self.kernel_,) + exc.args
raise
self.alpha_ = cho_solve((self.L_, True), self.y_train_) # Line 3
return self
| {
"alphanum_fraction": 0.5316471015,
"author": null,
"avg_line_length": 44.6648648649,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "85235ae80ec0a24fe581cc88e7b553232cc4aae4",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 3,
"max_forks_repo_forks_event_max_datetime": "2021-09-17T11:33:20.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-10-06T14:37:25.000Z",
"max_forks_repo_head_hexsha": "be3cd5bf9def8ffc11fb5f63b34ac4e428af963c",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "lim271/FeatureSelectionGP",
"max_forks_repo_path": "fsgp/feature_selection_gpr.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "be3cd5bf9def8ffc11fb5f63b34ac4e428af963c",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "lim271/FeatureSelectionGP",
"max_issues_repo_path": "fsgp/feature_selection_gpr.py",
"max_line_length": 108,
"max_stars_count": 3,
"max_stars_repo_head_hexsha": "be3cd5bf9def8ffc11fb5f63b34ac4e428af963c",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "lim271/FeatureSelectionGP",
"max_stars_repo_path": "fsgp/feature_selection_gpr.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-20T13:07:42.000Z",
"max_stars_repo_stars_event_min_datetime": "2022-02-10T07:16:06.000Z",
"num_tokens": 1817,
"path": null,
"reason": "import numpy,from scipy",
"repo": null,
"save_path": null,
"sha": null,
"size": 8263
} |
(* Title: HOL/Algebra/Lattice.thy
Author: Clemens Ballarin, started 7 November 2003
Copyright: Clemens Ballarin
Most congruence rules by Stephan Hohe.
*)
theory Lattice
imports Congruence
begin
section \<open>Orders and Lattices\<close>
subsection \<open>Partial Orders\<close>
record 'a gorder = "'a eq_object" +
le :: "['a, 'a] => bool" (infixl "\<sqsubseteq>\<index>" 50)
locale weak_partial_order = equivalence L for L (structure) +
assumes le_refl [intro, simp]:
"x \<in> carrier L ==> x \<sqsubseteq> x"
and weak_le_antisym [intro]:
"[| x \<sqsubseteq> y; y \<sqsubseteq> x; x \<in> carrier L; y \<in> carrier L |] ==> x .= y"
and le_trans [trans]:
"[| x \<sqsubseteq> y; y \<sqsubseteq> z; x \<in> carrier L; y \<in> carrier L; z \<in> carrier L |] ==> x \<sqsubseteq> z"
and le_cong:
"\<lbrakk> x .= y; z .= w; x \<in> carrier L; y \<in> carrier L; z \<in> carrier L; w \<in> carrier L \<rbrakk> \<Longrightarrow>
x \<sqsubseteq> z \<longleftrightarrow> y \<sqsubseteq> w"
definition
lless :: "[_, 'a, 'a] => bool" (infixl "\<sqsubset>\<index>" 50)
where "x \<sqsubset>\<^bsub>L\<^esub> y \<longleftrightarrow> x \<sqsubseteq>\<^bsub>L\<^esub> y & x .\<noteq>\<^bsub>L\<^esub> y"
subsubsection \<open>The order relation\<close>
context weak_partial_order
begin
lemma le_cong_l [intro, trans]:
"\<lbrakk> x .= y; y \<sqsubseteq> z; x \<in> carrier L; y \<in> carrier L; z \<in> carrier L \<rbrakk> \<Longrightarrow> x \<sqsubseteq> z"
by (auto intro: le_cong [THEN iffD2])
lemma le_cong_r [intro, trans]:
"\<lbrakk> x \<sqsubseteq> y; y .= z; x \<in> carrier L; y \<in> carrier L; z \<in> carrier L \<rbrakk> \<Longrightarrow> x \<sqsubseteq> z"
by (auto intro: le_cong [THEN iffD1])
lemma weak_refl [intro, simp]: "\<lbrakk> x .= y; x \<in> carrier L; y \<in> carrier L \<rbrakk> \<Longrightarrow> x \<sqsubseteq> y"
by (simp add: le_cong_l)
end
lemma weak_llessI:
fixes R (structure)
assumes "x \<sqsubseteq> y" and "~(x .= y)"
shows "x \<sqsubset> y"
using assms unfolding lless_def by simp
lemma lless_imp_le:
fixes R (structure)
assumes "x \<sqsubset> y"
shows "x \<sqsubseteq> y"
using assms unfolding lless_def by simp
lemma weak_lless_imp_not_eq:
fixes R (structure)
assumes "x \<sqsubset> y"
shows "\<not> (x .= y)"
using assms unfolding lless_def by simp
lemma weak_llessE:
fixes R (structure)
assumes p: "x \<sqsubset> y" and e: "\<lbrakk>x \<sqsubseteq> y; \<not> (x .= y)\<rbrakk> \<Longrightarrow> P"
shows "P"
using p by (blast dest: lless_imp_le weak_lless_imp_not_eq e)
lemma (in weak_partial_order) lless_cong_l [trans]:
assumes xx': "x .= x'"
and xy: "x' \<sqsubset> y"
and carr: "x \<in> carrier L" "x' \<in> carrier L" "y \<in> carrier L"
shows "x \<sqsubset> y"
using assms unfolding lless_def by (auto intro: trans sym)
lemma (in weak_partial_order) lless_cong_r [trans]:
assumes xy: "x \<sqsubset> y"
and yy': "y .= y'"
and carr: "x \<in> carrier L" "y \<in> carrier L" "y' \<in> carrier L"
shows "x \<sqsubset> y'"
using assms unfolding lless_def by (auto intro: trans sym) (*slow*)
lemma (in weak_partial_order) lless_antisym:
assumes "a \<in> carrier L" "b \<in> carrier L"
and "a \<sqsubset> b" "b \<sqsubset> a"
shows "P"
using assms
by (elim weak_llessE) auto
lemma (in weak_partial_order) lless_trans [trans]:
assumes "a \<sqsubset> b" "b \<sqsubset> c"
and carr[simp]: "a \<in> carrier L" "b \<in> carrier L" "c \<in> carrier L"
shows "a \<sqsubset> c"
using assms unfolding lless_def by (blast dest: le_trans intro: sym)
subsubsection \<open>Upper and lower bounds of a set\<close>
definition
Upper :: "[_, 'a set] => 'a set"
where "Upper L A = {u. (ALL x. x \<in> A \<inter> carrier L --> x \<sqsubseteq>\<^bsub>L\<^esub> u)} \<inter> carrier L"
definition
Lower :: "[_, 'a set] => 'a set"
where "Lower L A = {l. (ALL x. x \<in> A \<inter> carrier L --> l \<sqsubseteq>\<^bsub>L\<^esub> x)} \<inter> carrier L"
lemma Upper_closed [intro!, simp]:
"Upper L A \<subseteq> carrier L"
by (unfold Upper_def) clarify
lemma Upper_memD [dest]:
fixes L (structure)
shows "[| u \<in> Upper L A; x \<in> A; A \<subseteq> carrier L |] ==> x \<sqsubseteq> u \<and> u \<in> carrier L"
by (unfold Upper_def) blast
lemma (in weak_partial_order) Upper_elemD [dest]:
"[| u .\<in> Upper L A; u \<in> carrier L; x \<in> A; A \<subseteq> carrier L |] ==> x \<sqsubseteq> u"
unfolding Upper_def elem_def
by (blast dest: sym)
lemma Upper_memI:
fixes L (structure)
shows "[| !! y. y \<in> A ==> y \<sqsubseteq> x; x \<in> carrier L |] ==> x \<in> Upper L A"
by (unfold Upper_def) blast
lemma (in weak_partial_order) Upper_elemI:
"[| !! y. y \<in> A ==> y \<sqsubseteq> x; x \<in> carrier L |] ==> x .\<in> Upper L A"
unfolding Upper_def by blast
lemma Upper_antimono:
"A \<subseteq> B ==> Upper L B \<subseteq> Upper L A"
by (unfold Upper_def) blast
lemma (in weak_partial_order) Upper_is_closed [simp]:
"A \<subseteq> carrier L ==> is_closed (Upper L A)"
by (rule is_closedI) (blast intro: Upper_memI)+
lemma (in weak_partial_order) Upper_mem_cong:
assumes a'carr: "a' \<in> carrier L" and Acarr: "A \<subseteq> carrier L"
and aa': "a .= a'"
and aelem: "a \<in> Upper L A"
shows "a' \<in> Upper L A"
proof (rule Upper_memI[OF _ a'carr])
fix y
assume yA: "y \<in> A"
hence "y \<sqsubseteq> a" by (intro Upper_memD[OF aelem, THEN conjunct1] Acarr)
also note aa'
finally
show "y \<sqsubseteq> a'"
by (simp add: a'carr subsetD[OF Acarr yA] subsetD[OF Upper_closed aelem])
qed
lemma (in weak_partial_order) Upper_cong:
assumes Acarr: "A \<subseteq> carrier L" and A'carr: "A' \<subseteq> carrier L"
and AA': "A {.=} A'"
shows "Upper L A = Upper L A'"
unfolding Upper_def
apply rule
apply (rule, clarsimp) defer 1
apply (rule, clarsimp) defer 1
proof -
fix x a'
assume carr: "x \<in> carrier L" "a' \<in> carrier L"
and a'A': "a' \<in> A'"
assume aLxCond[rule_format]: "\<forall>a. a \<in> A \<and> a \<in> carrier L \<longrightarrow> a \<sqsubseteq> x"
from AA' and a'A' have "\<exists>a\<in>A. a' .= a" by (rule set_eqD2)
from this obtain a
where aA: "a \<in> A"
and a'a: "a' .= a"
by auto
note [simp] = subsetD[OF Acarr aA] carr
note a'a
also have "a \<sqsubseteq> x" by (simp add: aLxCond aA)
finally show "a' \<sqsubseteq> x" by simp
next
fix x a
assume carr: "x \<in> carrier L" "a \<in> carrier L"
and aA: "a \<in> A"
assume a'LxCond[rule_format]: "\<forall>a'. a' \<in> A' \<and> a' \<in> carrier L \<longrightarrow> a' \<sqsubseteq> x"
from AA' and aA have "\<exists>a'\<in>A'. a .= a'" by (rule set_eqD1)
from this obtain a'
where a'A': "a' \<in> A'"
and aa': "a .= a'"
by auto
note [simp] = subsetD[OF A'carr a'A'] carr
note aa'
also have "a' \<sqsubseteq> x" by (simp add: a'LxCond a'A')
finally show "a \<sqsubseteq> x" by simp
qed
lemma Lower_closed [intro!, simp]:
"Lower L A \<subseteq> carrier L"
by (unfold Lower_def) clarify
lemma Lower_memD [dest]:
fixes L (structure)
shows "[| l \<in> Lower L A; x \<in> A; A \<subseteq> carrier L |] ==> l \<sqsubseteq> x \<and> l \<in> carrier L"
by (unfold Lower_def) blast
lemma Lower_memI:
fixes L (structure)
shows "[| !! y. y \<in> A ==> x \<sqsubseteq> y; x \<in> carrier L |] ==> x \<in> Lower L A"
by (unfold Lower_def) blast
lemma Lower_antimono:
"A \<subseteq> B ==> Lower L B \<subseteq> Lower L A"
by (unfold Lower_def) blast
lemma (in weak_partial_order) Lower_is_closed [simp]:
"A \<subseteq> carrier L \<Longrightarrow> is_closed (Lower L A)"
by (rule is_closedI) (blast intro: Lower_memI dest: sym)+
lemma (in weak_partial_order) Lower_mem_cong:
assumes a'carr: "a' \<in> carrier L" and Acarr: "A \<subseteq> carrier L"
and aa': "a .= a'"
and aelem: "a \<in> Lower L A"
shows "a' \<in> Lower L A"
using assms Lower_closed[of L A]
by (intro Lower_memI) (blast intro: le_cong_l[OF aa'[symmetric]])
lemma (in weak_partial_order) Lower_cong:
assumes Acarr: "A \<subseteq> carrier L" and A'carr: "A' \<subseteq> carrier L"
and AA': "A {.=} A'"
shows "Lower L A = Lower L A'"
unfolding Lower_def
apply rule
apply clarsimp defer 1
apply clarsimp defer 1
proof -
fix x a'
assume carr: "x \<in> carrier L" "a' \<in> carrier L"
and a'A': "a' \<in> A'"
assume "\<forall>a. a \<in> A \<and> a \<in> carrier L \<longrightarrow> x \<sqsubseteq> a"
hence aLxCond: "\<And>a. \<lbrakk>a \<in> A; a \<in> carrier L\<rbrakk> \<Longrightarrow> x \<sqsubseteq> a" by fast
from AA' and a'A' have "\<exists>a\<in>A. a' .= a" by (rule set_eqD2)
from this obtain a
where aA: "a \<in> A"
and a'a: "a' .= a"
by auto
from aA and subsetD[OF Acarr aA]
have "x \<sqsubseteq> a" by (rule aLxCond)
also note a'a[symmetric]
finally
show "x \<sqsubseteq> a'" by (simp add: carr subsetD[OF Acarr aA])
next
fix x a
assume carr: "x \<in> carrier L" "a \<in> carrier L"
and aA: "a \<in> A"
assume "\<forall>a'. a' \<in> A' \<and> a' \<in> carrier L \<longrightarrow> x \<sqsubseteq> a'"
hence a'LxCond: "\<And>a'. \<lbrakk>a' \<in> A'; a' \<in> carrier L\<rbrakk> \<Longrightarrow> x \<sqsubseteq> a'" by fast+
from AA' and aA have "\<exists>a'\<in>A'. a .= a'" by (rule set_eqD1)
from this obtain a'
where a'A': "a' \<in> A'"
and aa': "a .= a'"
by auto
from a'A' and subsetD[OF A'carr a'A']
have "x \<sqsubseteq> a'" by (rule a'LxCond)
also note aa'[symmetric]
finally show "x \<sqsubseteq> a" by (simp add: carr subsetD[OF A'carr a'A'])
qed
subsubsection \<open>Least and greatest, as predicate\<close>
definition
least :: "[_, 'a, 'a set] => bool"
where "least L l A \<longleftrightarrow> A \<subseteq> carrier L & l \<in> A & (ALL x : A. l \<sqsubseteq>\<^bsub>L\<^esub> x)"
definition
greatest :: "[_, 'a, 'a set] => bool"
where "greatest L g A \<longleftrightarrow> A \<subseteq> carrier L & g \<in> A & (ALL x : A. x \<sqsubseteq>\<^bsub>L\<^esub> g)"
text (in weak_partial_order) \<open>Could weaken these to @{term "l \<in> carrier L \<and> l
.\<in> A"} and @{term "g \<in> carrier L \<and> g .\<in> A"}.\<close>
lemma least_closed [intro, simp]:
"least L l A ==> l \<in> carrier L"
by (unfold least_def) fast
lemma least_mem:
"least L l A ==> l \<in> A"
by (unfold least_def) fast
lemma (in weak_partial_order) weak_least_unique:
"[| least L x A; least L y A |] ==> x .= y"
by (unfold least_def) blast
lemma least_le:
fixes L (structure)
shows "[| least L x A; a \<in> A |] ==> x \<sqsubseteq> a"
by (unfold least_def) fast
lemma (in weak_partial_order) least_cong:
"[| x .= x'; x \<in> carrier L; x' \<in> carrier L; is_closed A |] ==> least L x A = least L x' A"
by (unfold least_def) (auto dest: sym)
text (in weak_partial_order) \<open>@{const least} is not congruent in the second parameter for
@{term "A {.=} A'"}\<close>
lemma (in weak_partial_order) least_Upper_cong_l:
assumes "x .= x'"
and "x \<in> carrier L" "x' \<in> carrier L"
and "A \<subseteq> carrier L"
shows "least L x (Upper L A) = least L x' (Upper L A)"
apply (rule least_cong) using assms by auto
lemma (in weak_partial_order) least_Upper_cong_r:
assumes Acarrs: "A \<subseteq> carrier L" "A' \<subseteq> carrier L" (* unneccessary with current Upper? *)
and AA': "A {.=} A'"
shows "least L x (Upper L A) = least L x (Upper L A')"
apply (subgoal_tac "Upper L A = Upper L A'", simp)
by (rule Upper_cong) fact+
lemma least_UpperI:
fixes L (structure)
assumes above: "!! x. x \<in> A ==> x \<sqsubseteq> s"
and below: "!! y. y \<in> Upper L A ==> s \<sqsubseteq> y"
and L: "A \<subseteq> carrier L" "s \<in> carrier L"
shows "least L s (Upper L A)"
proof -
have "Upper L A \<subseteq> carrier L" by simp
moreover from above L have "s \<in> Upper L A" by (simp add: Upper_def)
moreover from below have "ALL x : Upper L A. s \<sqsubseteq> x" by fast
ultimately show ?thesis by (simp add: least_def)
qed
lemma least_Upper_above:
fixes L (structure)
shows "[| least L s (Upper L A); x \<in> A; A \<subseteq> carrier L |] ==> x \<sqsubseteq> s"
by (unfold least_def) blast
lemma greatest_closed [intro, simp]:
"greatest L l A ==> l \<in> carrier L"
by (unfold greatest_def) fast
lemma greatest_mem:
"greatest L l A ==> l \<in> A"
by (unfold greatest_def) fast
lemma (in weak_partial_order) weak_greatest_unique:
"[| greatest L x A; greatest L y A |] ==> x .= y"
by (unfold greatest_def) blast
lemma greatest_le:
fixes L (structure)
shows "[| greatest L x A; a \<in> A |] ==> a \<sqsubseteq> x"
by (unfold greatest_def) fast
lemma (in weak_partial_order) greatest_cong:
"[| x .= x'; x \<in> carrier L; x' \<in> carrier L; is_closed A |] ==>
greatest L x A = greatest L x' A"
by (unfold greatest_def) (auto dest: sym)
text (in weak_partial_order) \<open>@{const greatest} is not congruent in the second parameter for
@{term "A {.=} A'"}\<close>
lemma (in weak_partial_order) greatest_Lower_cong_l:
assumes "x .= x'"
and "x \<in> carrier L" "x' \<in> carrier L"
and "A \<subseteq> carrier L" (* unneccessary with current Lower *)
shows "greatest L x (Lower L A) = greatest L x' (Lower L A)"
apply (rule greatest_cong) using assms by auto
lemma (in weak_partial_order) greatest_Lower_cong_r:
assumes Acarrs: "A \<subseteq> carrier L" "A' \<subseteq> carrier L"
and AA': "A {.=} A'"
shows "greatest L x (Lower L A) = greatest L x (Lower L A')"
apply (subgoal_tac "Lower L A = Lower L A'", simp)
by (rule Lower_cong) fact+
lemma greatest_LowerI:
fixes L (structure)
assumes below: "!! x. x \<in> A ==> i \<sqsubseteq> x"
and above: "!! y. y \<in> Lower L A ==> y \<sqsubseteq> i"
and L: "A \<subseteq> carrier L" "i \<in> carrier L"
shows "greatest L i (Lower L A)"
proof -
have "Lower L A \<subseteq> carrier L" by simp
moreover from below L have "i \<in> Lower L A" by (simp add: Lower_def)
moreover from above have "ALL x : Lower L A. x \<sqsubseteq> i" by fast
ultimately show ?thesis by (simp add: greatest_def)
qed
lemma greatest_Lower_below:
fixes L (structure)
shows "[| greatest L i (Lower L A); x \<in> A; A \<subseteq> carrier L |] ==> i \<sqsubseteq> x"
by (unfold greatest_def) blast
text \<open>Supremum and infimum\<close>
definition
sup :: "[_, 'a set] => 'a" ("\<Squnion>\<index>_" [90] 90)
where "\<Squnion>\<^bsub>L\<^esub>A = (SOME x. least L x (Upper L A))"
definition
inf :: "[_, 'a set] => 'a" ("\<Sqinter>\<index>_" [90] 90)
where "\<Sqinter>\<^bsub>L\<^esub>A = (SOME x. greatest L x (Lower L A))"
definition
join :: "[_, 'a, 'a] => 'a" (infixl "\<squnion>\<index>" 65)
where "x \<squnion>\<^bsub>L\<^esub> y = \<Squnion>\<^bsub>L\<^esub>{x, y}"
definition
meet :: "[_, 'a, 'a] => 'a" (infixl "\<sqinter>\<index>" 70)
where "x \<sqinter>\<^bsub>L\<^esub> y = \<Sqinter>\<^bsub>L\<^esub>{x, y}"
subsection \<open>Lattices\<close>
locale weak_upper_semilattice = weak_partial_order +
assumes sup_of_two_exists:
"[| x \<in> carrier L; y \<in> carrier L |] ==> EX s. least L s (Upper L {x, y})"
locale weak_lower_semilattice = weak_partial_order +
assumes inf_of_two_exists:
"[| x \<in> carrier L; y \<in> carrier L |] ==> EX s. greatest L s (Lower L {x, y})"
locale weak_lattice = weak_upper_semilattice + weak_lower_semilattice
subsubsection \<open>Supremum\<close>
lemma (in weak_upper_semilattice) joinI:
"[| !!l. least L l (Upper L {x, y}) ==> P l; x \<in> carrier L; y \<in> carrier L |]
==> P (x \<squnion> y)"
proof (unfold join_def sup_def)
assume L: "x \<in> carrier L" "y \<in> carrier L"
and P: "!!l. least L l (Upper L {x, y}) ==> P l"
with sup_of_two_exists obtain s where "least L s (Upper L {x, y})" by fast
with L show "P (SOME l. least L l (Upper L {x, y}))"
by (fast intro: someI2 P)
qed
lemma (in weak_upper_semilattice) join_closed [simp]:
"[| x \<in> carrier L; y \<in> carrier L |] ==> x \<squnion> y \<in> carrier L"
by (rule joinI) (rule least_closed)
lemma (in weak_upper_semilattice) join_cong_l:
assumes carr: "x \<in> carrier L" "x' \<in> carrier L" "y \<in> carrier L"
and xx': "x .= x'"
shows "x \<squnion> y .= x' \<squnion> y"
proof (rule joinI, rule joinI)
fix a b
from xx' carr
have seq: "{x, y} {.=} {x', y}" by (rule set_eq_pairI)
assume leasta: "least L a (Upper L {x, y})"
assume "least L b (Upper L {x', y})"
with carr
have leastb: "least L b (Upper L {x, y})"
by (simp add: least_Upper_cong_r[OF _ _ seq])
from leasta leastb
show "a .= b" by (rule weak_least_unique)
qed (rule carr)+
lemma (in weak_upper_semilattice) join_cong_r:
assumes carr: "x \<in> carrier L" "y \<in> carrier L" "y' \<in> carrier L"
and yy': "y .= y'"
shows "x \<squnion> y .= x \<squnion> y'"
proof (rule joinI, rule joinI)
fix a b
have "{x, y} = {y, x}" by fast
also from carr yy'
have "{y, x} {.=} {y', x}" by (intro set_eq_pairI)
also have "{y', x} = {x, y'}" by fast
finally
have seq: "{x, y} {.=} {x, y'}" .
assume leasta: "least L a (Upper L {x, y})"
assume "least L b (Upper L {x, y'})"
with carr
have leastb: "least L b (Upper L {x, y})"
by (simp add: least_Upper_cong_r[OF _ _ seq])
from leasta leastb
show "a .= b" by (rule weak_least_unique)
qed (rule carr)+
lemma (in weak_partial_order) sup_of_singletonI: (* only reflexivity needed ? *)
"x \<in> carrier L ==> least L x (Upper L {x})"
by (rule least_UpperI) auto
lemma (in weak_partial_order) weak_sup_of_singleton [simp]:
"x \<in> carrier L ==> \<Squnion>{x} .= x"
unfolding sup_def
by (rule someI2) (auto intro: weak_least_unique sup_of_singletonI)
lemma (in weak_partial_order) sup_of_singleton_closed [simp]:
"x \<in> carrier L \<Longrightarrow> \<Squnion>{x} \<in> carrier L"
unfolding sup_def
by (rule someI2) (auto intro: sup_of_singletonI)
text \<open>Condition on \<open>A\<close>: supremum exists.\<close>
lemma (in weak_upper_semilattice) sup_insertI:
"[| !!s. least L s (Upper L (insert x A)) ==> P s;
least L a (Upper L A); x \<in> carrier L; A \<subseteq> carrier L |]
==> P (\<Squnion>(insert x A))"
proof (unfold sup_def)
assume L: "x \<in> carrier L" "A \<subseteq> carrier L"
and P: "!!l. least L l (Upper L (insert x A)) ==> P l"
and least_a: "least L a (Upper L A)"
from L least_a have La: "a \<in> carrier L" by simp
from L sup_of_two_exists least_a
obtain s where least_s: "least L s (Upper L {a, x})" by blast
show "P (SOME l. least L l (Upper L (insert x A)))"
proof (rule someI2)
show "least L s (Upper L (insert x A))"
proof (rule least_UpperI)
fix z
assume "z \<in> insert x A"
then show "z \<sqsubseteq> s"
proof
assume "z = x" then show ?thesis
by (simp add: least_Upper_above [OF least_s] L La)
next
assume "z \<in> A"
with L least_s least_a show ?thesis
by (rule_tac le_trans [where y = a]) (auto dest: least_Upper_above)
qed
next
fix y
assume y: "y \<in> Upper L (insert x A)"
show "s \<sqsubseteq> y"
proof (rule least_le [OF least_s], rule Upper_memI)
fix z
assume z: "z \<in> {a, x}"
then show "z \<sqsubseteq> y"
proof
have y': "y \<in> Upper L A"
apply (rule subsetD [where A = "Upper L (insert x A)"])
apply (rule Upper_antimono)
apply blast
apply (rule y)
done
assume "z = a"
with y' least_a show ?thesis by (fast dest: least_le)
next
assume "z \<in> {x}" (* FIXME "z = x"; declare specific elim rule for "insert x {}" (!?) *)
with y L show ?thesis by blast
qed
qed (rule Upper_closed [THEN subsetD, OF y])
next
from L show "insert x A \<subseteq> carrier L" by simp
from least_s show "s \<in> carrier L" by simp
qed
qed (rule P)
qed
lemma (in weak_upper_semilattice) finite_sup_least:
"[| finite A; A \<subseteq> carrier L; A ~= {} |] ==> least L (\<Squnion>A) (Upper L A)"
proof (induct set: finite)
case empty
then show ?case by simp
next
case (insert x A)
show ?case
proof (cases "A = {}")
case True
with insert show ?thesis
by simp (simp add: least_cong [OF weak_sup_of_singleton] sup_of_singletonI)
(* The above step is hairy; least_cong can make simp loop.
Would want special version of simp to apply least_cong. *)
next
case False
with insert have "least L (\<Squnion>A) (Upper L A)" by simp
with _ show ?thesis
by (rule sup_insertI) (simp_all add: insert [simplified])
qed
qed
lemma (in weak_upper_semilattice) finite_sup_insertI:
assumes P: "!!l. least L l (Upper L (insert x A)) ==> P l"
and xA: "finite A" "x \<in> carrier L" "A \<subseteq> carrier L"
shows "P (\<Squnion>(insert x A))"
proof (cases "A = {}")
case True with P and xA show ?thesis
by (simp add: finite_sup_least)
next
case False with P and xA show ?thesis
by (simp add: sup_insertI finite_sup_least)
qed
lemma (in weak_upper_semilattice) finite_sup_closed [simp]:
"[| finite A; A \<subseteq> carrier L; A ~= {} |] ==> \<Squnion>A \<in> carrier L"
proof (induct set: finite)
case empty then show ?case by simp
next
case insert then show ?case
by - (rule finite_sup_insertI, simp_all)
qed
lemma (in weak_upper_semilattice) join_left:
"[| x \<in> carrier L; y \<in> carrier L |] ==> x \<sqsubseteq> x \<squnion> y"
by (rule joinI [folded join_def]) (blast dest: least_mem)
lemma (in weak_upper_semilattice) join_right:
"[| x \<in> carrier L; y \<in> carrier L |] ==> y \<sqsubseteq> x \<squnion> y"
by (rule joinI [folded join_def]) (blast dest: least_mem)
lemma (in weak_upper_semilattice) sup_of_two_least:
"[| x \<in> carrier L; y \<in> carrier L |] ==> least L (\<Squnion>{x, y}) (Upper L {x, y})"
proof (unfold sup_def)
assume L: "x \<in> carrier L" "y \<in> carrier L"
with sup_of_two_exists obtain s where "least L s (Upper L {x, y})" by fast
with L show "least L (SOME z. least L z (Upper L {x, y})) (Upper L {x, y})"
by (fast intro: someI2 weak_least_unique) (* blast fails *)
qed
lemma (in weak_upper_semilattice) join_le:
assumes sub: "x \<sqsubseteq> z" "y \<sqsubseteq> z"
and x: "x \<in> carrier L" and y: "y \<in> carrier L" and z: "z \<in> carrier L"
shows "x \<squnion> y \<sqsubseteq> z"
proof (rule joinI [OF _ x y])
fix s
assume "least L s (Upper L {x, y})"
with sub z show "s \<sqsubseteq> z" by (fast elim: least_le intro: Upper_memI)
qed
lemma (in weak_upper_semilattice) weak_join_assoc_lemma:
assumes L: "x \<in> carrier L" "y \<in> carrier L" "z \<in> carrier L"
shows "x \<squnion> (y \<squnion> z) .= \<Squnion>{x, y, z}"
proof (rule finite_sup_insertI)
\<comment> \<open>The textbook argument in Jacobson I, p 457\<close>
fix s
assume sup: "least L s (Upper L {x, y, z})"
show "x \<squnion> (y \<squnion> z) .= s"
proof (rule weak_le_antisym)
from sup L show "x \<squnion> (y \<squnion> z) \<sqsubseteq> s"
by (fastforce intro!: join_le elim: least_Upper_above)
next
from sup L show "s \<sqsubseteq> x \<squnion> (y \<squnion> z)"
by (erule_tac least_le)
(blast intro!: Upper_memI intro: le_trans join_left join_right join_closed)
qed (simp_all add: L least_closed [OF sup])
qed (simp_all add: L)
text \<open>Commutativity holds for \<open>=\<close>.\<close>
lemma join_comm:
fixes L (structure)
shows "x \<squnion> y = y \<squnion> x"
by (unfold join_def) (simp add: insert_commute)
lemma (in weak_upper_semilattice) weak_join_assoc:
assumes L: "x \<in> carrier L" "y \<in> carrier L" "z \<in> carrier L"
shows "(x \<squnion> y) \<squnion> z .= x \<squnion> (y \<squnion> z)"
proof -
(* FIXME: could be simplified by improved simp: uniform use of .=,
omit [symmetric] in last step. *)
have "(x \<squnion> y) \<squnion> z = z \<squnion> (x \<squnion> y)" by (simp only: join_comm)
also from L have "... .= \<Squnion>{z, x, y}" by (simp add: weak_join_assoc_lemma)
also from L have "... = \<Squnion>{x, y, z}" by (simp add: insert_commute)
also from L have "... .= x \<squnion> (y \<squnion> z)" by (simp add: weak_join_assoc_lemma [symmetric])
finally show ?thesis by (simp add: L)
qed
subsubsection \<open>Infimum\<close>
lemma (in weak_lower_semilattice) meetI:
"[| !!i. greatest L i (Lower L {x, y}) ==> P i;
x \<in> carrier L; y \<in> carrier L |]
==> P (x \<sqinter> y)"
proof (unfold meet_def inf_def)
assume L: "x \<in> carrier L" "y \<in> carrier L"
and P: "!!g. greatest L g (Lower L {x, y}) ==> P g"
with inf_of_two_exists obtain i where "greatest L i (Lower L {x, y})" by fast
with L show "P (SOME g. greatest L g (Lower L {x, y}))"
by (fast intro: someI2 weak_greatest_unique P)
qed
lemma (in weak_lower_semilattice) meet_closed [simp]:
"[| x \<in> carrier L; y \<in> carrier L |] ==> x \<sqinter> y \<in> carrier L"
by (rule meetI) (rule greatest_closed)
lemma (in weak_lower_semilattice) meet_cong_l:
assumes carr: "x \<in> carrier L" "x' \<in> carrier L" "y \<in> carrier L"
and xx': "x .= x'"
shows "x \<sqinter> y .= x' \<sqinter> y"
proof (rule meetI, rule meetI)
fix a b
from xx' carr
have seq: "{x, y} {.=} {x', y}" by (rule set_eq_pairI)
assume greatesta: "greatest L a (Lower L {x, y})"
assume "greatest L b (Lower L {x', y})"
with carr
have greatestb: "greatest L b (Lower L {x, y})"
by (simp add: greatest_Lower_cong_r[OF _ _ seq])
from greatesta greatestb
show "a .= b" by (rule weak_greatest_unique)
qed (rule carr)+
lemma (in weak_lower_semilattice) meet_cong_r:
assumes carr: "x \<in> carrier L" "y \<in> carrier L" "y' \<in> carrier L"
and yy': "y .= y'"
shows "x \<sqinter> y .= x \<sqinter> y'"
proof (rule meetI, rule meetI)
fix a b
have "{x, y} = {y, x}" by fast
also from carr yy'
have "{y, x} {.=} {y', x}" by (intro set_eq_pairI)
also have "{y', x} = {x, y'}" by fast
finally
have seq: "{x, y} {.=} {x, y'}" .
assume greatesta: "greatest L a (Lower L {x, y})"
assume "greatest L b (Lower L {x, y'})"
with carr
have greatestb: "greatest L b (Lower L {x, y})"
by (simp add: greatest_Lower_cong_r[OF _ _ seq])
from greatesta greatestb
show "a .= b" by (rule weak_greatest_unique)
qed (rule carr)+
lemma (in weak_partial_order) inf_of_singletonI: (* only reflexivity needed ? *)
"x \<in> carrier L ==> greatest L x (Lower L {x})"
by (rule greatest_LowerI) auto
lemma (in weak_partial_order) weak_inf_of_singleton [simp]:
"x \<in> carrier L ==> \<Sqinter>{x} .= x"
unfolding inf_def
by (rule someI2) (auto intro: weak_greatest_unique inf_of_singletonI)
lemma (in weak_partial_order) inf_of_singleton_closed:
"x \<in> carrier L ==> \<Sqinter>{x} \<in> carrier L"
unfolding inf_def
by (rule someI2) (auto intro: inf_of_singletonI)
text \<open>Condition on \<open>A\<close>: infimum exists.\<close>
lemma (in weak_lower_semilattice) inf_insertI:
"[| !!i. greatest L i (Lower L (insert x A)) ==> P i;
greatest L a (Lower L A); x \<in> carrier L; A \<subseteq> carrier L |]
==> P (\<Sqinter>(insert x A))"
proof (unfold inf_def)
assume L: "x \<in> carrier L" "A \<subseteq> carrier L"
and P: "!!g. greatest L g (Lower L (insert x A)) ==> P g"
and greatest_a: "greatest L a (Lower L A)"
from L greatest_a have La: "a \<in> carrier L" by simp
from L inf_of_two_exists greatest_a
obtain i where greatest_i: "greatest L i (Lower L {a, x})" by blast
show "P (SOME g. greatest L g (Lower L (insert x A)))"
proof (rule someI2)
show "greatest L i (Lower L (insert x A))"
proof (rule greatest_LowerI)
fix z
assume "z \<in> insert x A"
then show "i \<sqsubseteq> z"
proof
assume "z = x" then show ?thesis
by (simp add: greatest_Lower_below [OF greatest_i] L La)
next
assume "z \<in> A"
with L greatest_i greatest_a show ?thesis
by (rule_tac le_trans [where y = a]) (auto dest: greatest_Lower_below)
qed
next
fix y
assume y: "y \<in> Lower L (insert x A)"
show "y \<sqsubseteq> i"
proof (rule greatest_le [OF greatest_i], rule Lower_memI)
fix z
assume z: "z \<in> {a, x}"
then show "y \<sqsubseteq> z"
proof
have y': "y \<in> Lower L A"
apply (rule subsetD [where A = "Lower L (insert x A)"])
apply (rule Lower_antimono)
apply blast
apply (rule y)
done
assume "z = a"
with y' greatest_a show ?thesis by (fast dest: greatest_le)
next
assume "z \<in> {x}"
with y L show ?thesis by blast
qed
qed (rule Lower_closed [THEN subsetD, OF y])
next
from L show "insert x A \<subseteq> carrier L" by simp
from greatest_i show "i \<in> carrier L" by simp
qed
qed (rule P)
qed
lemma (in weak_lower_semilattice) finite_inf_greatest:
"[| finite A; A \<subseteq> carrier L; A ~= {} |] ==> greatest L (\<Sqinter>A) (Lower L A)"
proof (induct set: finite)
case empty then show ?case by simp
next
case (insert x A)
show ?case
proof (cases "A = {}")
case True
with insert show ?thesis
by simp (simp add: greatest_cong [OF weak_inf_of_singleton]
inf_of_singleton_closed inf_of_singletonI)
next
case False
from insert show ?thesis
proof (rule_tac inf_insertI)
from False insert show "greatest L (\<Sqinter>A) (Lower L A)" by simp
qed simp_all
qed
qed
lemma (in weak_lower_semilattice) finite_inf_insertI:
assumes P: "!!i. greatest L i (Lower L (insert x A)) ==> P i"
and xA: "finite A" "x \<in> carrier L" "A \<subseteq> carrier L"
shows "P (\<Sqinter>(insert x A))"
proof (cases "A = {}")
case True with P and xA show ?thesis
by (simp add: finite_inf_greatest)
next
case False with P and xA show ?thesis
by (simp add: inf_insertI finite_inf_greatest)
qed
lemma (in weak_lower_semilattice) finite_inf_closed [simp]:
"[| finite A; A \<subseteq> carrier L; A ~= {} |] ==> \<Sqinter>A \<in> carrier L"
proof (induct set: finite)
case empty then show ?case by simp
next
case insert then show ?case
by (rule_tac finite_inf_insertI) (simp_all)
qed
lemma (in weak_lower_semilattice) meet_left:
"[| x \<in> carrier L; y \<in> carrier L |] ==> x \<sqinter> y \<sqsubseteq> x"
by (rule meetI [folded meet_def]) (blast dest: greatest_mem)
lemma (in weak_lower_semilattice) meet_right:
"[| x \<in> carrier L; y \<in> carrier L |] ==> x \<sqinter> y \<sqsubseteq> y"
by (rule meetI [folded meet_def]) (blast dest: greatest_mem)
lemma (in weak_lower_semilattice) inf_of_two_greatest:
"[| x \<in> carrier L; y \<in> carrier L |] ==>
greatest L (\<Sqinter>{x, y}) (Lower L {x, y})"
proof (unfold inf_def)
assume L: "x \<in> carrier L" "y \<in> carrier L"
with inf_of_two_exists obtain s where "greatest L s (Lower L {x, y})" by fast
with L
show "greatest L (SOME z. greatest L z (Lower L {x, y})) (Lower L {x, y})"
by (fast intro: someI2 weak_greatest_unique) (* blast fails *)
qed
lemma (in weak_lower_semilattice) meet_le:
assumes sub: "z \<sqsubseteq> x" "z \<sqsubseteq> y"
and x: "x \<in> carrier L" and y: "y \<in> carrier L" and z: "z \<in> carrier L"
shows "z \<sqsubseteq> x \<sqinter> y"
proof (rule meetI [OF _ x y])
fix i
assume "greatest L i (Lower L {x, y})"
with sub z show "z \<sqsubseteq> i" by (fast elim: greatest_le intro: Lower_memI)
qed
lemma (in weak_lower_semilattice) weak_meet_assoc_lemma:
assumes L: "x \<in> carrier L" "y \<in> carrier L" "z \<in> carrier L"
shows "x \<sqinter> (y \<sqinter> z) .= \<Sqinter>{x, y, z}"
proof (rule finite_inf_insertI)
txt \<open>The textbook argument in Jacobson I, p 457\<close>
fix i
assume inf: "greatest L i (Lower L {x, y, z})"
show "x \<sqinter> (y \<sqinter> z) .= i"
proof (rule weak_le_antisym)
from inf L show "i \<sqsubseteq> x \<sqinter> (y \<sqinter> z)"
by (fastforce intro!: meet_le elim: greatest_Lower_below)
next
from inf L show "x \<sqinter> (y \<sqinter> z) \<sqsubseteq> i"
by (erule_tac greatest_le)
(blast intro!: Lower_memI intro: le_trans meet_left meet_right meet_closed)
qed (simp_all add: L greatest_closed [OF inf])
qed (simp_all add: L)
lemma (in weak_lower_semilattice) weak_meet_assoc:
assumes L: "x \<in> carrier L" "y \<in> carrier L" "z \<in> carrier L"
shows "(x \<sqinter> y) \<sqinter> z .= x \<sqinter> (y \<sqinter> z)"
proof -
(* FIXME: improved simp, see weak_join_assoc above *)
have "(x \<sqinter> y) \<sqinter> z = z \<sqinter> (x \<sqinter> y)" by (simp only: meet_comm)
also from L have "... .= \<Sqinter>{z, x, y}" by (simp add: weak_meet_assoc_lemma)
also from L have "... = \<Sqinter>{x, y, z}" by (simp add: insert_commute)
also from L have "... .= x \<sqinter> (y \<sqinter> z)" by (simp add: weak_meet_assoc_lemma [symmetric])
finally show ?thesis by (simp add: L)
qed
subsection \<open>Total Orders\<close>
locale weak_total_order = weak_partial_order +
assumes total: "[| x \<in> carrier L; y \<in> carrier L |] ==> x \<sqsubseteq> y | y \<sqsubseteq> x"
text \<open>Introduction rule: the usual definition of total order\<close>
lemma (in weak_partial_order) weak_total_orderI:
assumes total: "!!x y. [| x \<in> carrier L; y \<in> carrier L |] ==> x \<sqsubseteq> y | y \<sqsubseteq> x"
shows "weak_total_order L"
by standard (rule total)
text \<open>Total orders are lattices.\<close>
sublocale weak_total_order < weak?: weak_lattice
proof
fix x y
assume L: "x \<in> carrier L" "y \<in> carrier L"
show "EX s. least L s (Upper L {x, y})"
proof -
note total L
moreover
{
assume "x \<sqsubseteq> y"
with L have "least L y (Upper L {x, y})"
by (rule_tac least_UpperI) auto
}
moreover
{
assume "y \<sqsubseteq> x"
with L have "least L x (Upper L {x, y})"
by (rule_tac least_UpperI) auto
}
ultimately show ?thesis by blast
qed
next
fix x y
assume L: "x \<in> carrier L" "y \<in> carrier L"
show "EX i. greatest L i (Lower L {x, y})"
proof -
note total L
moreover
{
assume "y \<sqsubseteq> x"
with L have "greatest L y (Lower L {x, y})"
by (rule_tac greatest_LowerI) auto
}
moreover
{
assume "x \<sqsubseteq> y"
with L have "greatest L x (Lower L {x, y})"
by (rule_tac greatest_LowerI) auto
}
ultimately show ?thesis by blast
qed
qed
subsection \<open>Complete Lattices\<close>
locale weak_complete_lattice = weak_lattice +
assumes sup_exists:
"[| A \<subseteq> carrier L |] ==> EX s. least L s (Upper L A)"
and inf_exists:
"[| A \<subseteq> carrier L |] ==> EX i. greatest L i (Lower L A)"
text \<open>Introduction rule: the usual definition of complete lattice\<close>
lemma (in weak_partial_order) weak_complete_latticeI:
assumes sup_exists:
"!!A. [| A \<subseteq> carrier L |] ==> EX s. least L s (Upper L A)"
and inf_exists:
"!!A. [| A \<subseteq> carrier L |] ==> EX i. greatest L i (Lower L A)"
shows "weak_complete_lattice L"
by standard (auto intro: sup_exists inf_exists)
definition
top :: "_ => 'a" ("\<top>\<index>")
where "\<top>\<^bsub>L\<^esub> = sup L (carrier L)"
definition
bottom :: "_ => 'a" ("\<bottom>\<index>")
where "\<bottom>\<^bsub>L\<^esub> = inf L (carrier L)"
lemma (in weak_complete_lattice) supI:
"[| !!l. least L l (Upper L A) ==> P l; A \<subseteq> carrier L |]
==> P (\<Squnion>A)"
proof (unfold sup_def)
assume L: "A \<subseteq> carrier L"
and P: "!!l. least L l (Upper L A) ==> P l"
with sup_exists obtain s where "least L s (Upper L A)" by blast
with L show "P (SOME l. least L l (Upper L A))"
by (fast intro: someI2 weak_least_unique P)
qed
lemma (in weak_complete_lattice) sup_closed [simp]:
"A \<subseteq> carrier L ==> \<Squnion>A \<in> carrier L"
by (rule supI) simp_all
lemma (in weak_complete_lattice) top_closed [simp, intro]:
"\<top> \<in> carrier L"
by (unfold top_def) simp
lemma (in weak_complete_lattice) infI:
"[| !!i. greatest L i (Lower L A) ==> P i; A \<subseteq> carrier L |]
==> P (\<Sqinter>A)"
proof (unfold inf_def)
assume L: "A \<subseteq> carrier L"
and P: "!!l. greatest L l (Lower L A) ==> P l"
with inf_exists obtain s where "greatest L s (Lower L A)" by blast
with L show "P (SOME l. greatest L l (Lower L A))"
by (fast intro: someI2 weak_greatest_unique P)
qed
lemma (in weak_complete_lattice) inf_closed [simp]:
"A \<subseteq> carrier L ==> \<Sqinter>A \<in> carrier L"
by (rule infI) simp_all
lemma (in weak_complete_lattice) bottom_closed [simp, intro]:
"\<bottom> \<in> carrier L"
by (unfold bottom_def) simp
text \<open>Jacobson: Theorem 8.1\<close>
lemma Lower_empty [simp]:
"Lower L {} = carrier L"
by (unfold Lower_def) simp
lemma Upper_empty [simp]:
"Upper L {} = carrier L"
by (unfold Upper_def) simp
theorem (in weak_partial_order) weak_complete_lattice_criterion1:
assumes top_exists: "EX g. greatest L g (carrier L)"
and inf_exists:
"!!A. [| A \<subseteq> carrier L; A ~= {} |] ==> EX i. greatest L i (Lower L A)"
shows "weak_complete_lattice L"
proof (rule weak_complete_latticeI)
from top_exists obtain top where top: "greatest L top (carrier L)" ..
fix A
assume L: "A \<subseteq> carrier L"
let ?B = "Upper L A"
from L top have "top \<in> ?B" by (fast intro!: Upper_memI intro: greatest_le)
then have B_non_empty: "?B ~= {}" by fast
have B_L: "?B \<subseteq> carrier L" by simp
from inf_exists [OF B_L B_non_empty]
obtain b where b_inf_B: "greatest L b (Lower L ?B)" ..
have "least L b (Upper L A)"
apply (rule least_UpperI)
apply (rule greatest_le [where A = "Lower L ?B"])
apply (rule b_inf_B)
apply (rule Lower_memI)
apply (erule Upper_memD [THEN conjunct1])
apply assumption
apply (rule L)
apply (fast intro: L [THEN subsetD])
apply (erule greatest_Lower_below [OF b_inf_B])
apply simp
apply (rule L)
apply (rule greatest_closed [OF b_inf_B])
done
then show "EX s. least L s (Upper L A)" ..
next
fix A
assume L: "A \<subseteq> carrier L"
show "EX i. greatest L i (Lower L A)"
proof (cases "A = {}")
case True then show ?thesis
by (simp add: top_exists)
next
case False with L show ?thesis
by (rule inf_exists)
qed
qed
(* TODO: prove dual version *)
subsection \<open>Orders and Lattices where \<open>eq\<close> is the Equality\<close>
locale partial_order = weak_partial_order +
assumes eq_is_equal: "op .= = op ="
begin
declare weak_le_antisym [rule del]
lemma le_antisym [intro]:
"[| x \<sqsubseteq> y; y \<sqsubseteq> x; x \<in> carrier L; y \<in> carrier L |] ==> x = y"
using weak_le_antisym unfolding eq_is_equal .
lemma lless_eq:
"x \<sqsubset> y \<longleftrightarrow> x \<sqsubseteq> y & x \<noteq> y"
unfolding lless_def by (simp add: eq_is_equal)
lemma lless_asym:
assumes "a \<in> carrier L" "b \<in> carrier L"
and "a \<sqsubset> b" "b \<sqsubset> a"
shows "P"
using assms unfolding lless_eq by auto
end
text \<open>Least and greatest, as predicate\<close>
lemma (in partial_order) least_unique:
"[| least L x A; least L y A |] ==> x = y"
using weak_least_unique unfolding eq_is_equal .
lemma (in partial_order) greatest_unique:
"[| greatest L x A; greatest L y A |] ==> x = y"
using weak_greatest_unique unfolding eq_is_equal .
text \<open>Lattices\<close>
locale upper_semilattice = partial_order +
assumes sup_of_two_exists:
"[| x \<in> carrier L; y \<in> carrier L |] ==> EX s. least L s (Upper L {x, y})"
sublocale upper_semilattice < weak?: weak_upper_semilattice
by standard (rule sup_of_two_exists)
locale lower_semilattice = partial_order +
assumes inf_of_two_exists:
"[| x \<in> carrier L; y \<in> carrier L |] ==> EX s. greatest L s (Lower L {x, y})"
sublocale lower_semilattice < weak?: weak_lower_semilattice
by standard (rule inf_of_two_exists)
locale lattice = upper_semilattice + lower_semilattice
text \<open>Supremum\<close>
declare (in partial_order) weak_sup_of_singleton [simp del]
lemma (in partial_order) sup_of_singleton [simp]:
"x \<in> carrier L ==> \<Squnion>{x} = x"
using weak_sup_of_singleton unfolding eq_is_equal .
lemma (in upper_semilattice) join_assoc_lemma:
assumes L: "x \<in> carrier L" "y \<in> carrier L" "z \<in> carrier L"
shows "x \<squnion> (y \<squnion> z) = \<Squnion>{x, y, z}"
using weak_join_assoc_lemma L unfolding eq_is_equal .
lemma (in upper_semilattice) join_assoc:
assumes L: "x \<in> carrier L" "y \<in> carrier L" "z \<in> carrier L"
shows "(x \<squnion> y) \<squnion> z = x \<squnion> (y \<squnion> z)"
using weak_join_assoc L unfolding eq_is_equal .
text \<open>Infimum\<close>
declare (in partial_order) weak_inf_of_singleton [simp del]
lemma (in partial_order) inf_of_singleton [simp]:
"x \<in> carrier L ==> \<Sqinter>{x} = x"
using weak_inf_of_singleton unfolding eq_is_equal .
text \<open>Condition on \<open>A\<close>: infimum exists.\<close>
lemma (in lower_semilattice) meet_assoc_lemma:
assumes L: "x \<in> carrier L" "y \<in> carrier L" "z \<in> carrier L"
shows "x \<sqinter> (y \<sqinter> z) = \<Sqinter>{x, y, z}"
using weak_meet_assoc_lemma L unfolding eq_is_equal .
lemma (in lower_semilattice) meet_assoc:
assumes L: "x \<in> carrier L" "y \<in> carrier L" "z \<in> carrier L"
shows "(x \<sqinter> y) \<sqinter> z = x \<sqinter> (y \<sqinter> z)"
using weak_meet_assoc L unfolding eq_is_equal .
text \<open>Total Orders\<close>
locale total_order = partial_order +
assumes total_order_total: "[| x \<in> carrier L; y \<in> carrier L |] ==> x \<sqsubseteq> y | y \<sqsubseteq> x"
sublocale total_order < weak?: weak_total_order
by standard (rule total_order_total)
text \<open>Introduction rule: the usual definition of total order\<close>
lemma (in partial_order) total_orderI:
assumes total: "!!x y. [| x \<in> carrier L; y \<in> carrier L |] ==> x \<sqsubseteq> y | y \<sqsubseteq> x"
shows "total_order L"
by standard (rule total)
text \<open>Total orders are lattices.\<close>
sublocale total_order < weak?: lattice
by standard (auto intro: sup_of_two_exists inf_of_two_exists)
text \<open>Complete lattices\<close>
locale complete_lattice = lattice +
assumes sup_exists:
"[| A \<subseteq> carrier L |] ==> EX s. least L s (Upper L A)"
and inf_exists:
"[| A \<subseteq> carrier L |] ==> EX i. greatest L i (Lower L A)"
sublocale complete_lattice < weak?: weak_complete_lattice
by standard (auto intro: sup_exists inf_exists)
text \<open>Introduction rule: the usual definition of complete lattice\<close>
lemma (in partial_order) complete_latticeI:
assumes sup_exists:
"!!A. [| A \<subseteq> carrier L |] ==> EX s. least L s (Upper L A)"
and inf_exists:
"!!A. [| A \<subseteq> carrier L |] ==> EX i. greatest L i (Lower L A)"
shows "complete_lattice L"
by standard (auto intro: sup_exists inf_exists)
theorem (in partial_order) complete_lattice_criterion1:
assumes top_exists: "EX g. greatest L g (carrier L)"
and inf_exists:
"!!A. [| A \<subseteq> carrier L; A ~= {} |] ==> EX i. greatest L i (Lower L A)"
shows "complete_lattice L"
proof (rule complete_latticeI)
from top_exists obtain top where top: "greatest L top (carrier L)" ..
fix A
assume L: "A \<subseteq> carrier L"
let ?B = "Upper L A"
from L top have "top \<in> ?B" by (fast intro!: Upper_memI intro: greatest_le)
then have B_non_empty: "?B ~= {}" by fast
have B_L: "?B \<subseteq> carrier L" by simp
from inf_exists [OF B_L B_non_empty]
obtain b where b_inf_B: "greatest L b (Lower L ?B)" ..
have "least L b (Upper L A)"
apply (rule least_UpperI)
apply (rule greatest_le [where A = "Lower L ?B"])
apply (rule b_inf_B)
apply (rule Lower_memI)
apply (erule Upper_memD [THEN conjunct1])
apply assumption
apply (rule L)
apply (fast intro: L [THEN subsetD])
apply (erule greatest_Lower_below [OF b_inf_B])
apply simp
apply (rule L)
apply (rule greatest_closed [OF b_inf_B])
done
then show "EX s. least L s (Upper L A)" ..
next
fix A
assume L: "A \<subseteq> carrier L"
show "EX i. greatest L i (Lower L A)"
proof (cases "A = {}")
case True then show ?thesis
by (simp add: top_exists)
next
case False with L show ?thesis
by (rule inf_exists)
qed
qed
(* TODO: prove dual version *)
subsection \<open>Examples\<close>
subsubsection \<open>The Powerset of a Set is a Complete Lattice\<close>
theorem powerset_is_complete_lattice:
"complete_lattice \<lparr>carrier = Pow A, eq = op =, le = op \<subseteq>\<rparr>"
(is "complete_lattice ?L")
proof (rule partial_order.complete_latticeI)
show "partial_order ?L"
by standard auto
next
fix B
assume "B \<subseteq> carrier ?L"
then have "least ?L (\<Union>B) (Upper ?L B)"
by (fastforce intro!: least_UpperI simp: Upper_def)
then show "EX s. least ?L s (Upper ?L B)" ..
next
fix B
assume "B \<subseteq> carrier ?L"
then have "greatest ?L (\<Inter>B \<inter> A) (Lower ?L B)"
txt \<open>@{term "\<Inter>B"} is not the infimum of @{term B}:
@{term "\<Inter>{} = UNIV"} which is in general bigger than @{term "A"}!\<close>
by (fastforce intro!: greatest_LowerI simp: Lower_def)
then show "EX i. greatest ?L i (Lower ?L B)" ..
qed
text \<open>An other example, that of the lattice of subgroups of a group,
can be found in Group theory (Section~\ref{sec:subgroup-lattice}).\<close>
end
| {
"alphanum_fraction": null,
"author": "SEL4PROJ",
"avg_line_length": null,
"converted": null,
"ext": null,
"file": null,
"hexsha": null,
"include": null,
"lang": null,
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": "github-repos/isabelle/SEL4PROJ-jormungand/jormungand-bad97f9817b4034cd705cd295a1f86af880a7631/case_study/isabelle/src/HOL/Algebra/Lattice.thy",
"reason": null,
"repo": "jormungand",
"save_path": "github-repos/isabelle/SEL4PROJ-jormungand",
"sha": "bad97f9817b4034cd705cd295a1f86af880a7631",
"size": null
} |
# import numpy as np
# import gym
# import torch
# from typing import Dict, Any
# from copy import deepcopy
# from malib.algorithm.common.model import MLPCritic
# from malib.algorithm.common.policy import Policy
#
#
# class QMIX(Policy):
# def __init__(
# self,
# registered_name: str,
# observation_space: gym.spaces.Space,
# action_space: gym.spaces.Space,
# model_config: Dict[str, Any] = None,
# custom_config: Dict[str, Any] = None,
# ):
# super(QMIX, self).__init__(
# registered_name=registered_name,
# observation_space=observation_space,
# action_space=action_space,
# model_config=model_config,
# custom_config=custom_config,
# )
# self.eps_min = (
# 1e-2 if custom_config is None else custom_config.get("eps_min", 1e-5)
# )
# self.eps_max = (
# 1.0 if custom_config is None else custom_config.get("eps_max", 1.0)
# )
# self.eps_decay = (
# 100 if custom_config is None else custom_config.get("eps_decay", 100)
# )
# self.polyak = (
# 0.99 if custom_config is None else custom_config.get("polyak", 0.99)
# )
# self.delta = (self.eps_max - self.eps_min) / self.eps_decay
# self.step = 0
#
# self.obs_dim = self.preprocessor.size
# self.act_dim = action_space.n
#
# self.q = MLPCritic(self.obs_dim, self.act_dim, model_config)
# self.q_targ = deepcopy(self.q)
#
# def compute_actions(self, observation, **kwargs):
# pass
#
# def _calc_eps(self):
# return max(self.eps_min, self.eps_max - self.delta * self.step)
#
# def compute_action(self, observation, **kwargs):
# # self.step += 1
# if np.random.random() < self._calc_eps():
# actions = range(self.action_space.n)
# if "legal_moves" in kwargs:
# actions = kwargs["legal_moves"]
# elif "action_mask" in kwargs:
# actions = np.where(kwargs["action_mask"] == 1)[0]
# action = np.random.choice(actions)
# action_prob = torch.zeros(self.action_space.n)
# action_prob[action] = 1.0
# return action, None, {"action_probs": action_prob}
# probs = torch.softmax(self.q(observation), dim=-1)
# if "legal_moves" in kwargs:
# mask = torch.zeros_like(probs)
# mask[kwargs["legal_moves"]] = 1
# probs = mask * probs
# elif "action_mask" in kwargs:
# mask = torch.FloatTensor(kwargs["action_mask"])
# probs = mask * probs
# # probs = probs / probs.sum()
# # action = Categorical(probs=probs).sample()
# action = probs.argmax().view(1)
#
# extra_info = {"action_probs": probs.detach().numpy()}
# return action.item(), None, extra_info
#
# def compute_q(self, obs):
# return self.q(obs)
#
# def compute_target_q(self, obs):
# return self.q_targ(obs)
#
# def get_parameters(self):
# return self.q.parameters()
#
# def update_target(self):
# # self.q_targ.load_state_dict(self.q.state_dict())
# with torch.no_grad():
# for p, p_targ in zip(self.q.parameters(), self.q_targ.parameters()):
# p_targ.data.mul_(self.polyak)
# p_targ.data.add_((1 - self.polyak) * p.data)
#
# def state_dict(self):
# return {
# "q": self.q.state_dict(),
# "q_target": self.q_targ.state_dict(),
# "step": self.step,
# }
#
# def set_weights(self, parameters):
# self.q.load_state_dict(parameters["q"])
# self.q_targ.load_state_dict(parameters["q_target"])
# self.step = parameters["step"]
#
# def train(self):
# pass
#
# def eval(self):
# pass
| {
"alphanum_fraction": 0.5638621389,
"author": null,
"avg_line_length": 34.9203539823,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "b599dbea2abcc1ec4fbf5f3e2f587aa9a53a36e5",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "9ac2f0a8783aede56f4ac1f6074db7daa41b6b6c",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "ReinholdM/play_football_with_human",
"max_forks_repo_path": "malib/algorithm/qmix/policy.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "9ac2f0a8783aede56f4ac1f6074db7daa41b6b6c",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "ReinholdM/play_football_with_human",
"max_issues_repo_path": "malib/algorithm/qmix/policy.py",
"max_line_length": 83,
"max_stars_count": 5,
"max_stars_repo_head_hexsha": "9ac2f0a8783aede56f4ac1f6074db7daa41b6b6c",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "ReinholdM/play_football_with_human",
"max_stars_repo_path": "malib/algorithm/qmix/policy.py",
"max_stars_repo_stars_event_max_datetime": "2021-12-23T09:04:21.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-11-17T03:11:13.000Z",
"num_tokens": 967,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 3946
} |
/***************************************************************************
* Software License Agreement (BSD License) *
* Copyright (C) 2012 by Markus Bader <markus.bader@tuwien.ac.at> *
* *
* Redistribution and use in source and binary forms, with or without *
* modification, are permitted provided that the following conditions *
* are met: *
* *
* 1. Redistributions of source code must retain the above copyright *
* notice, this list of conditions and the following disclaimer. *
* 2. Redistributions in binary form must reproduce the above copyright *
* notice, this list of conditions and the following disclaimer in *
* the documentation and/or other materials provided with the *
* distribution. *
* 3. Neither the name of the copyright holder nor the names of its *
* contributors may be used to endorse or promote products derived *
* from this software without specific prior written permission. *
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS *
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT *
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS *
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE *
* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, *
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, *
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; *
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER *
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT *
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY *
* WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE *
* POSSIBILITY OF SUCH DAMAGE. *
***************************************************************************/
#include <iostream>
#include <stdlib.h>
#include <signal.h>
#include "shmfw/variable.h"
#include "shmfw/vector.h"
#include "shmfw/log.h"
#include <boost/program_options.hpp>
#include <boost/thread.hpp>
#include <ncurses.h>
bool loop_program = true;
struct Prarmeters {
bool clear;
std::string shm_memory_name;
unsigned int shm_memory_size;
std::string variable_name;
std::string log_file;
int process_cout_level;
int source;
int file_level;
int cout_level;
bool timeout_msg;
bool pull;
bool buffer;
bool timestamp;
int timeout;
};
Prarmeters readArgs ( int argc, char **argv ) {
namespace po = boost::program_options;
Prarmeters params;
po::options_description desc ( "Allowed Parameters" );
desc.add_options()
( "help", "get this help message" )
( "clear", "clears the shared memory" )
( "msg_off,o", "switches the timeout message off" )
( "buffer", "shows the buffer id" )
( "timestamp_off", "switches the timestamp off" )
( "pull,p", "pulls for log messages (no trigger needed)" )
( "timeout,t", po::value<int> ( ¶ms.timeout )->default_value ( 2000 ), "timeout for timeout message in ms" )
( "source,s", po::value<int> ( ¶ms.source )->default_value ( -1 ), "source filter (-1 off)" )
( "process_cout_level", po::value<int> ( ¶ms.process_cout_level )->default_value ( ShmFw::Message::NA ), "level of message to be printed by the meassage creating process" )
( "cout_level", po::value<int> ( ¶ms.cout_level )->default_value ( ShmFw::Message::NA ), "level of message to be printed by this process" )
( "file_level", po::value<int> ( ¶ms.file_level )->default_value ( ShmFw::Message::NA ), "level of message to be stored" )
( "file,f", po::value<std::string> ( ¶ms.log_file )->default_value ( "/tmp/log" ), "log file prefix, if empty it will print of cout" )
( "shm_log,l", po::value<std::string> ( ¶ms.variable_name )->default_value ( "log" ), "shared variable name of the logger" )
( "shm_memory_name", po::value<std::string> ( ¶ms.shm_memory_name )->default_value ( ShmFw::DEFAULT_LOG_SEGMENT_NAME() ), "shared memory segment name" )
( "shm_memory_size", po::value<unsigned int> ( ¶ms.shm_memory_size )->default_value ( ShmFw::DEFAULT_LOG_SEGMENT_SIZE() ), "shared memory segment size" );
po::variables_map vm;
try {
po::store ( po::parse_command_line ( argc, argv, desc ), vm );
} catch ( const std::exception &ex ) {
std::cout << desc << std::endl;;
exit ( 1 );
}
po::notify ( vm );
if ( vm.count ( "help" ) ) {
std::cout << desc << std::endl;
exit ( 1 );
}
params.clear = ( vm.count ( "clear" ) > 0 );
params.timeout_msg = ! ( vm.count ( "msg_off" ) > 0 );
params.pull = ( vm.count ( "pull" ) > 0 );
params.buffer = ( vm.count ( "buffer" ) > 0 );
params.timestamp = !( vm.count ( "timestamp_off" ) > 0 );
return params;
}
void printMsgs ( std::vector<ShmFw::Message> &msgs, std::ofstream &file, const Prarmeters ¶ms ) {
for ( unsigned int i = 0; i < msgs.size(); i++ ) {
ShmFw::Message &msg = msgs[i];
if ( !params.log_file.empty() ) {
file << std::setw ( 4 ) << i << ": " << msg << std::endl;
}
//source log level/type filter
if ( msg.getType() >= params.cout_level ) {
if ( params.buffer ) std::cout << std::setw ( 4 ) << i << ": ";
if ( params.timestamp ) std::cout << boost::posix_time::to_simple_string ( msg.getTime() ).substr(12,26) << ": ";
if ( msg.getType() != ShmFw::Message::NA ) std::cout << ": " << std::setw ( 8 ) << msg.typeStr() << ": ";
std::cout << msg.getMsg();
std::cout << std::endl;
}
}
}
void dequeLog ( ShmFw::Log &log, const Prarmeters ¶ms ) {
std::vector<ShmFw::Message> msgs;
std::ofstream file;
boost::posix_time::ptime now = boost::posix_time::microsec_clock::local_time();
std::string datedFileName = params.log_file + std::string ( "_" ) + boost::posix_time::to_iso_string ( now ) + std::string ( ".txt" );
if ( !params.log_file.empty() ) {
file.open ( datedFileName.c_str(), std::ios::out | std::ios::binary );
}
while ( loop_program ) {
bool wait_ones = true;
while ( ( log.timed_wait ( params.timeout ) == false ) && loop_program && wait_ones ) {
if ( params.timeout_msg ) std::cout << "waited " << params.timeout << " ms" << std::endl;
if ( params.pull ) wait_ones = false;
}
msgs.clear();
log.lock();
#if BOOST_VERSION / 100 % 1000 <= 55
for ( ShmFw::Log::Iterator it = log->begin(); it != log->end(); it++ ) {
//source filter
if ( (params.source < 0) || (params.source == ( *it ).getSource())) {
msgs.push_back ( *it );
log.pop_front();
}
}
#else
#endif
log.unlock();
printMsgs ( msgs, file, params );
}
if ( !params.log_file.empty() ) file.close();
}
void terminate ( int param ) {
std::cout << "Closing program!" << std::endl;
loop_program = false;
}
int main ( int argc, char *argv[] ) {
signal ( SIGINT, terminate );
signal ( SIGKILL, terminate );
Prarmeters params = readArgs ( argc, argv );
if ( params.clear ) {
ShmFw::Handler::removeSegment ( params.shm_memory_name );
std::cout << "Shared Memory " << params.shm_memory_name << " cleared" << std::endl;
exit ( 1 );
}
ShmFw::HandlerPtr shmHdl = ShmFw::Handler::create ( params.shm_memory_name, params.shm_memory_size );
ShmFw::Log log ( shmHdl, params.variable_name );
log.process_cout_level ( params.process_cout_level );
log.unlock();
dequeLog ( log, params );
log.unlock();
exit ( 0 );
}
| {
"alphanum_fraction": 0.5754671196,
"author": null,
"avg_line_length": 43.3789473684,
"converted": null,
"ext": "cpp",
"file": null,
"hexsha": "43101b0ae11df976c4f2821f837e019a0c5d5c96",
"include": null,
"lang": "C++",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2018-11-07T02:45:24.000Z",
"max_forks_repo_forks_event_min_datetime": "2018-11-07T02:45:24.000Z",
"max_forks_repo_head_hexsha": "d386841f8b1425cbeca8c8a3fc6919bf8d83c38e",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "ShmFw/shmfw",
"max_forks_repo_path": "apps/log/src/main_log.cpp",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "d386841f8b1425cbeca8c8a3fc6919bf8d83c38e",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "ShmFw/shmfw",
"max_issues_repo_path": "apps/log/src/main_log.cpp",
"max_line_length": 180,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "d386841f8b1425cbeca8c8a3fc6919bf8d83c38e",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "ShmFw/shmfw",
"max_stars_repo_path": "apps/log/src/main_log.cpp",
"max_stars_repo_stars_event_max_datetime": "2016-02-06T14:57:32.000Z",
"max_stars_repo_stars_event_min_datetime": "2016-02-06T14:57:32.000Z",
"num_tokens": 2011,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 8242
} |
# -*- coding: utf-8 -*-
"""FFT functions.
This module contains FFT functions that support centered operation.
"""
import numpy as np
from sigpy import config, util
if config.cupy_enabled:
import cupy as cp
def fft(input, oshape=None, axes=None, center=True, norm='ortho'):
"""FFT function that supports centering.
Args:
input (array): input array.
oshape (None or array of ints): output shape.
axes (None or array of ints): Axes over which to compute the FFT.
norm (Nonr or ``"ortho"``): Keyword to specify the normalization mode.
Returns:
array: FFT result of dimension oshape.
See Also:
:func:`numpy.fft.fftn`
"""
device = util.get_device(input)
xp = device.xp
with device:
if not np.issubdtype(input.dtype, np.complexfloating):
input = input.astype(np.complex)
if center:
output = _fftc(input, oshape=oshape, axes=axes, norm=norm)
else:
output = xp.fft.fftn(input, s=oshape, axes=axes, norm=norm)
if np.issubdtype(input.dtype, np.complexfloating) and input.dtype != output.dtype:
output = output.astype(input.dtype)
return output
def ifft(input, oshape=None, axes=None, center=True, norm='ortho'):
"""IFFT function that supports centering.
Args:
input (array): input array.
oshape (None or array of ints): output shape.
axes (None or array of ints): Axes over which to compute the inverse FFT.
norm (None or ``"ortho"``): Keyword to specify the normalization mode.
Returns:
array of dimension oshape.
See Also:
:func:`numpy.fft.ifftn`
"""
device = util.get_device(input)
xp = device.xp
with device:
if not np.issubdtype(input.dtype, np.complexfloating):
input = input.astype(np.complex)
if center:
output = _ifftc(input, oshape=oshape, axes=axes, norm=norm)
else:
output = xp.fft.ifftn(input, s=oshape, axes=axes, norm=norm)
if np.issubdtype(input.dtype, np.complexfloating) and input.dtype != output.dtype:
output = output.astype(input.dtype)
return output
def _fftc(input, oshape=None, axes=None, norm='ortho'):
ndim = input.ndim
axes = util._normalize_axes(axes, ndim)
device = util.get_device(input)
xp = device.xp
if oshape is None:
oshape = input.shape
with device:
tmp = input
tshape = list(input.shape)
for a in axes:
i = oshape[a]
tshape[a] = i
idx = xp.arange(i, dtype=input.dtype)
tmp = tmp.swapaxes(a, -1)
tshape[a], tshape[-1] = tshape[-1], tshape[a]
tmp = util.resize(tmp, tshape)
tmp = xp.fft.ifftshift(tmp, axes=-1)
tmp = xp.fft.fft(tmp, axis=-1, norm=norm)
tmp = xp.fft.fftshift(tmp, axes=-1)
tmp = tmp.swapaxes(a, -1)
tshape[a], tshape[-1] = tshape[-1], tshape[a]
output = tmp
return output
def _ifftc(input, oshape=None, axes=None, norm='ortho'):
ndim = input.ndim
axes = util._normalize_axes(axes, ndim)
device = util.get_device(input)
xp = device.xp
if oshape is None:
oshape = input.shape
with device:
tmp = input
tshape = list(input.shape)
for a in axes:
i = oshape[a]
tshape[a] = i
idx = xp.arange(i, dtype=input.dtype)
tmp = tmp.swapaxes(a, -1)
tshape[a], tshape[-1] = tshape[-1], tshape[a]
tmp = util.resize(tmp, tshape)
tmp = xp.fft.ifftshift(tmp, axes=-1)
tmp = xp.fft.ifft(tmp, axis=-1, norm=norm)
tmp = xp.fft.fftshift(tmp, axes=-1)
tmp = tmp.swapaxes(a, -1)
tshape[a], tshape[-1] = tshape[-1], tshape[a]
output = tmp
return output
| {
"alphanum_fraction": 0.579133958,
"author": null,
"avg_line_length": 26.5033557047,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "40b728357a1cf78ff8c40718d059423c898441aa",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "56f8eb9be57b5a80e53ae09f2ba0802586fe69bc",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "davidyzeng/sigpy",
"max_forks_repo_path": "sigpy/fft.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "56f8eb9be57b5a80e53ae09f2ba0802586fe69bc",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "davidyzeng/sigpy",
"max_issues_repo_path": "sigpy/fft.py",
"max_line_length": 90,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "56f8eb9be57b5a80e53ae09f2ba0802586fe69bc",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "davidyzeng/sigpy",
"max_stars_repo_path": "sigpy/fft.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1025,
"path": null,
"reason": "import numpy,import cupy",
"repo": null,
"save_path": null,
"sha": null,
"size": 3949
} |
import pygame
import numpy as np
import random
pygame.init()
clock = pygame.time.Clock()
display = pygame.display.set_mode((1280, 720))
screen = pygame.display.get_surface()
width,height = screen.get_width(), screen.get_height()
pygame.display.set_caption('water cellular automata')
rows = int(height / 8)
columns = int(width / 8)
field = np.zeros((rows, columns), dtype=np.int32)
for a in range(0, int((rows * columns) / 500)):
r1 = random.randint(0, int((rows / 4) * 3))
r2 = random.randint(0, int(columns - 1))
field[r1, r2] = 21
def draw_text(text, pos):
font = pygame.font.SysFont("arial", int(height / rows))
y_pos = pos[1]
x_pos = pos[0]
for line in text.splitlines():
for char in range(1, len(line)+1):
text = font.render(line[:char], 1, (255, 0, 0))
screen.blit(text, (x_pos, y_pos))
def physics():
for y in np.arange(field.shape[0]):
for x in np.arange(field.shape[1]):
value = field[y, x]
u = y - 1
d = y + 1
l = x - 1
r = x + 1
if not value >= 20:
if not y - 1 < 0:
uppervalue = field[u, x]
else:
uppervalue = 20
if not y + 1 > (rows - 1):
undervalue = field[d, x]
else:
undervalue = 20
if not x - 1 < 0:
leftvalue = field[y, l]
else:
leftvalue = 20
if not x + 1 > (columns - 1):
rightvalue = field[y, r]
else:
rightvalue = 20
if value >= 1 and value <= 10:
if undervalue < 10:
field[y, x] -= 1
field[d, x] += 1
elif leftvalue < 10 and rightvalue < 10 and value >= 2:
field[y, x] -= 2
field[y, l] += 1
field[y, r] += 1
elif rightvalue < 10 and value > 1:
field[y, x] -= 1
field[y, r] += 1
elif leftvalue < 10 and value > 1:
field[y, x] -= 1
field[y, l] += 1
quitting = False
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quitting = True
break
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
quitting = True
if event.key == pygame.K_1:
Mouse_x, Mouse_y = pygame.mouse.get_pos()
column_width = width / columns
row_height = height / rows
for anzahl_columns in np.arange(field.shape[1]):
column = column_width * anzahl_columns
nächster_column = column_width * (anzahl_columns + 1)
for anzahl_rows in np.arange(field.shape[0]):
row = row_height * anzahl_rows
nächste_row = row_height * (anzahl_rows + 1)
if Mouse_x > column and Mouse_x <= nächster_column and Mouse_y > row and Mouse_y <= nächste_row:
field[anzahl_rows, anzahl_columns] = 21
elif event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1:
Mouse_x, Mouse_y = pygame.mouse.get_pos()
column_width = width / columns
row_height = height / rows
for anzahl_columns in np.arange(field.shape[1]):
column = column_width * anzahl_columns
nächster_column = column_width * (anzahl_columns + 1)
for anzahl_rows in np.arange(field.shape[0]):
row = row_height * anzahl_rows
nächste_row = row_height * (anzahl_rows + 1)
if Mouse_x > column and Mouse_x <= nächster_column and Mouse_y > row and Mouse_y <= nächste_row:
if not field[anzahl_rows, anzahl_columns] >= 10:
field[anzahl_rows, anzahl_columns] += 1
if event.button == 2:
Mouse_x, Mouse_y = pygame.mouse.get_pos()
column_width = width / columns
row_height = height / rows
for anzahl_columns in np.arange(field.shape[1]):
column = column_width * anzahl_columns
nächster_column = column_width * (anzahl_columns + 1)
for anzahl_rows in np.arange(field.shape[0]):
row = row_height * anzahl_rows
nächste_row = row_height * (anzahl_rows + 1)
if Mouse_x > column and Mouse_x <= nächster_column and Mouse_y > row and Mouse_y <= nächste_row:
field[anzahl_rows, anzahl_columns] = 20
if event.button == 3:
Mouse_x, Mouse_y = pygame.mouse.get_pos()
column_width = width / columns
row_height = height / rows
for anzahl_columns in np.arange(field.shape[1]):
column = column_width * anzahl_columns
nächster_column = column_width * (anzahl_columns + 1)
for anzahl_rows in np.arange(field.shape[0]):
row = row_height * anzahl_rows
nächste_row = row_height * (anzahl_rows + 1)
if Mouse_x > column and Mouse_x <= nächster_column and Mouse_y > row and Mouse_y <= nächste_row:
field[anzahl_rows, anzahl_columns] = 0
if quitting == True:
break
display.fill((55, 55, 55))
for y in np.arange(field.shape[0]):
for x in np.arange(field.shape[1]):
value = field[y, x]
d = y + 1
if not y + 1 > (rows - 1):
undervalue = field[d, x]
else:
undervalue = 20
if value == 21:
if undervalue < 10:
d = y + 1
field[d, x] += 1
physics()
for y in np.arange(field.shape[0]):
ypos = y * height / field.shape[0]
for x in np.arange(field.shape[1]):
xpos = x * width / field.shape[1]
val = field[y, x]
if field[y, x] > 0 and not field[y, x] > 10:
pygame.draw.rect(display, (0, val / 12, 255 / val ), (xpos, ypos, np.math.ceil(width / field.shape[1]), np.math.ceil(height / field.shape[0])))
#draw_text(str(val), (xpos, ypos))
elif (field[y, x] == 20):
pygame.draw.rect(display, (255, 255, 255), (xpos, ypos, np.math.ceil(width / field.shape[1]), np.math.ceil(height / field.shape[0])))
#draw_text(str(val), (xpos, ypos))
elif (field[y,x] == 21):
pygame.draw.rect(display, (255, 0, 0), (xpos, ypos, np.math.ceil(width / field.shape[1]), np.math.ceil(height / field.shape[0])))
#draw_text(str(val), (xpos, ypos))
pygame.display.update() | {
"alphanum_fraction": 0.4778856526,
"author": null,
"avg_line_length": 47.2356687898,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "d75e81163d75de3977ca22c060df2cd22f389eb0",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2021-06-06T17:38:51.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-06-06T17:38:51.000Z",
"max_forks_repo_head_hexsha": "53b6d89cce007f0183112c33a081fd01b3a757ae",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "CREEPZOMBEY/cellular-automata-fluid-simulation",
"max_forks_repo_path": "Water.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "53b6d89cce007f0183112c33a081fd01b3a757ae",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "CREEPZOMBEY/cellular-automata-fluid-simulation",
"max_issues_repo_path": "Water.py",
"max_line_length": 160,
"max_stars_count": 6,
"max_stars_repo_head_hexsha": "53b6d89cce007f0183112c33a081fd01b3a757ae",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "CREEPZOMBEY/cellular-automata-fluid-simulation",
"max_stars_repo_path": "Water.py",
"max_stars_repo_stars_event_max_datetime": "2021-06-06T17:38:50.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-04-05T11:25:52.000Z",
"num_tokens": 1718,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 7416
} |
[STATEMENT]
lemma irreducible\<^sub>dD:
assumes "irreducible\<^sub>d p"
shows "degree p > 0" "\<And>q r. degree q < degree p \<Longrightarrow> degree r < degree p \<Longrightarrow> p \<noteq> q * r"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 0 < degree p &&& (\<And>q r. \<lbrakk>degree q < degree p; degree r < degree p\<rbrakk> \<Longrightarrow> p \<noteq> q * r)
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
irreducible\<^sub>d p
goal (1 subgoal):
1. 0 < degree p &&& (\<And>q r. \<lbrakk>degree q < degree p; degree r < degree p\<rbrakk> \<Longrightarrow> p \<noteq> q * r)
[PROOF STEP]
unfolding irreducible\<^sub>d_def
[PROOF STATE]
proof (prove)
using this:
0 < degree p \<and> (\<forall>q r. degree q < degree p \<longrightarrow> degree r < degree p \<longrightarrow> p \<noteq> q * r)
goal (1 subgoal):
1. 0 < degree p &&& (\<And>q r. \<lbrakk>degree q < degree p; degree r < degree p\<rbrakk> \<Longrightarrow> p \<noteq> q * r)
[PROOF STEP]
by auto | {
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": "Polynomial_Interpolation_Missing_Polynomial",
"hexsha": null,
"include": null,
"lang": null,
"length": 3,
"llama_tokens": 381,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
} |
#pragma once
#include <boost/core/noncopyable.hpp>
#include <cstdint>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <utility>
namespace koinos::mq {
enum class retry_policy
{
none,
exponential_backoff
};
enum class error_code : int64_t
{
success,
failure,
time_out
};
struct message
{
std::string exchange;
std::string routing_key;
std::string content_type;
std::string data;
uint64_t delivery_tag;
std::optional< std::string > reply_to;
std::optional< std::string > correlation_id;
std::optional< uint64_t > expiration;
};
namespace detail { struct message_broker_impl; }
class message_broker final : private boost::noncopyable
{
private:
std::unique_ptr< detail::message_broker_impl > _message_broker_impl;
public:
message_broker();
~message_broker();
using on_connect_func = std::function< error_code( message_broker& m ) >;
error_code connect(
const std::string& url,
retry_policy p = retry_policy::exponential_backoff,
on_connect_func f = []( message_broker& m ){ return error_code::success; }
) noexcept;
void disconnect() noexcept;
bool is_connected() noexcept;
error_code publish( const message& msg ) noexcept;
std::pair< error_code, std::shared_ptr< message > > consume() noexcept;
error_code declare_exchange(
const std::string& exchange,
const std::string& exchange_type,
bool passive = false,
bool durable = false,
bool auto_delete = false,
bool internal = false
) noexcept;
std::pair< error_code, std::string > declare_queue(
const std::string& queue,
bool passive = false,
bool durable = false,
bool exclusive = false,
bool auto_delete = false
) noexcept;
error_code bind_queue(
const std::string& queue,
const std::string& exchange,
const std::string& binding_key,
bool autoack = true
) noexcept;
error_code ack_message( uint64_t delivery_tag ) noexcept;
};
} // koinos::mq
| {
"alphanum_fraction": 0.6563814867,
"author": null,
"avg_line_length": 23,
"converted": null,
"ext": "hpp",
"file": null,
"hexsha": "c942d35ae099334a2d4cbd52b8f0efee8256ac41",
"include": null,
"lang": "C++",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "dc0897f94c8b8cff8aac660a6aa7ab9556f819fa",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "koinos/koinos-mq-cpp",
"max_forks_repo_path": "libraries/mq/include/koinos/mq/message_broker.hpp",
"max_issues_count": 11,
"max_issues_repo_head_hexsha": "dc0897f94c8b8cff8aac660a6aa7ab9556f819fa",
"max_issues_repo_issues_event_max_datetime": "2021-10-18T21:52:12.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-03-08T19:32:46.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "koinos/koinos-mq-cpp",
"max_issues_repo_path": "libraries/mq/include/koinos/mq/message_broker.hpp",
"max_line_length": 80,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "dc0897f94c8b8cff8aac660a6aa7ab9556f819fa",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "koinos/koinos-mq-cpp",
"max_stars_repo_path": "libraries/mq/include/koinos/mq/message_broker.hpp",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 501,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 2139
} |
import ipywidgets as widgets
from ipywidgets import interact
import matplotlib.pyplot as plt
import numpy as np
def browse_track_multi(img, liste_a, segmentation):
nt, ny, nx = img.shape
def plot_track(i, save=False, name_img = "file.png"):
fig, axes = plt.subplots(1,1, figsize=(8, 8))
axes.imshow(img[i], cmap="gray",interpolation='nearest')
#axes.contour(segmentation[i], [0.5], linewidths=1.2, colors='y')
color=iter(plt.cm.jet(np.linspace(0,1,len(liste_a))))
for n in range(len(liste_a)):
c=next(color)
# plot the mother track while i increase
axes.plot(liste_a[n][0][0:i+1,1], liste_a[n][0][0:i+1,0], linewidth=3, c=c)
#axes.text(liste_a[n][0][0,1]+50, liste_a[n][0][0,0]+5, "cell_{}".format(n+1), fontsize = 14, color = c)
# plot the daughter 1 track while i increase, if there is a daughter1 and if we have reach the end mom.
if len(liste_a[n][1]) > 0 and i > len(liste_a[n][0]):
axes.plot(liste_a[n][1][0:i-len(liste_a[n][0])+1,1],
liste_a[n][1][0:i-len(liste_a[n][0])+1,0],
'--', linewidth=3, c='m')
#axes.text(liste_a[n][1][i-len(liste_a[n][0]),1]+50,
# liste_a[n][1][i-len(liste_a[n][0]),0]+5,
# 'd1', fontsize = 12, color = 'g')
else:
pass
# plot the daughter 2 track while i increase, if there is a daughter2 and if we have reach the end mom.
if len(liste_a[n][2]) > 0 and i > len(liste_a[n][0]):
axes.plot(liste_a[n][2][0:i-len(liste_a[n][0])+1,1],
liste_a[n][2][0:i-len(liste_a[n][0])+1,0],
'--', linewidth=3, c='w')
# axes.text(liste_a[n][2][i-len(liste_a[n][0]),1]+50,
# liste_a[n][2][i-len(liste_a[n][0]),0]+5,
# 'd2', fontsize = 12, color = 'b')
else:
pass
axes.axis("off")
axes.autoscale_view('tight')
plt.show()
if save == True:
fig.savefig(name_img, bbox_inches='tight')
interact(plot_track, i=(0,nt-1), save=False,name_img = "file.png")
def browse_track(img, mom, d1, d2, result,segmentation):
nt, ny, nx = img.shape
def plot_track(i):
fig, axes = plt.subplots(1,1, figsize=(12, 12))
axes.imshow(img[i], cmap="gray",interpolation='nearest')
axes.contour(segmentation[i], [0.5], linewidths=1.2, colors='y')
for coor in result:
axes.scatter(coor[:,1], coor[:,0], s=4, c='g')
axes.plot(mom[0:i+1,1], mom[0:i+1,0], linewidth=1, c='r')
if i < len(mom):
axes.text(mom[i,1]+50, mom[i,0]+5, "mom", fontsize = 14, color = 'r')
if i > len(mom):
axes.plot(d1[0:i-len(mom)+1,1], d1[0:i-len(mom)+1,0], linewidth=2, c='b')
axes.text(d1[i-len(mom),1]+50, d1[i-len(mom),0]+5, "d1", fontsize = 12, color = 'b')
axes.plot(d2[0:i-len(mom)+1,1], d2[0:i-len(mom)+1,0], linewidth=2, c='g')
#axes.text(d2[i-len(mom),1]+50, d2[i-len(mom),0]+5, "d2", fontsize = 12, color = 'g')
axes.axis("off")
axes.autoscale_view('tight')
plt.show()
interact(plot_track, i=(0,nt-1))
| {
"alphanum_fraction": 0.518111639,
"author": null,
"avg_line_length": 40.0952380952,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "17d625fa6aedeb6e21696a88cb29150507bf386e",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "05e66c202cd6e3db9bafa1501cd794392a99c1aa",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "bioimage-analysis/track_cell_division",
"max_forks_repo_path": "script/plot_track.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "05e66c202cd6e3db9bafa1501cd794392a99c1aa",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "bioimage-analysis/track_cell_division",
"max_issues_repo_path": "script/plot_track.py",
"max_line_length": 116,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "05e66c202cd6e3db9bafa1501cd794392a99c1aa",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "bioimage-analysis/track_cell_division",
"max_stars_repo_path": "script/plot_track.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1044,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 3368
} |
from typing import Dict, List, Tuple, Union
import numpy as np
import torch
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import Dataset
from .prepare_data import process_labels, process_tokens
class NERDataset(Dataset):
"""
PyTorch Dataset for NER data format.
Dataset might be preprocessed for more efficiency.
"""
def __init__(
self,
token_seq: List[List[str]],
label_seq: List[List[str]],
token2idx: Dict[str, int],
label2idx: Dict[str, int],
preprocess: bool = True,
):
self.token2idx = token2idx
self.label2idx = label2idx
self.preprocess = preprocess
if preprocess:
self.token_seq = [process_tokens(tokens, token2idx) for tokens in token_seq]
self.label_seq = [process_labels(labels, label2idx) for labels in label_seq]
else:
self.token_seq = token_seq
self.label_seq = label_seq
def __len__(self):
return len(self.token_seq)
def __getitem__(self, idx: int) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
if self.preprocess:
tokens = self.token_seq[idx]
labels = self.label_seq[idx]
else:
tokens = process_tokens(self.token_seq[idx], self.token2idx)
labels = process_labels(self.label_seq[idx], self.label2idx)
lengths = [len(tokens)]
return np.array(tokens), np.array(labels), np.array(lengths)
class NERCollator(object):
"""
Collator that handles variable-size sentences.
"""
def __init__(
self,
token_padding_value: int,
label_padding_value: int,
percentile: Union[int, float] = 100,
):
self.token_padding_value = token_padding_value
self.label_padding_value = label_padding_value
self.percentile = percentile
def __call__(
self,
batch: List[Tuple[np.ndarray, np.ndarray, np.ndarray]],
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
tokens, labels, lengths = zip(*batch)
tokens = [list(i) for i in tokens]
labels = [list(i) for i in labels]
max_len = int(np.percentile(lengths, self.percentile))
lengths = torch.tensor(
np.clip(lengths, a_min=0, a_max=max_len),
).squeeze(-1)
for i in range(len(batch)):
tokens[i] = torch.tensor(tokens[i][:max_len])
labels[i] = torch.tensor(labels[i][:max_len])
sorted_idx = torch.argsort(lengths, descending=True)
tokens = pad_sequence(
tokens, padding_value=self.token_padding_value, batch_first=True
)[sorted_idx]
labels = pad_sequence(
labels, padding_value=self.label_padding_value, batch_first=True
)[sorted_idx]
lengths = lengths[sorted_idx]
return tokens, labels, lengths
| {
"alphanum_fraction": 0.6272413793,
"author": null,
"avg_line_length": 29.5918367347,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "e9da54e88b83e52f47838c9103d51ab85b341b40",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "b1729d97ccb168e5796045cf9b387b35536803eb",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "abdallah1097/pytorch_ner",
"max_forks_repo_path": "pytorch_ner/dataset.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "b1729d97ccb168e5796045cf9b387b35536803eb",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "abdallah1097/pytorch_ner",
"max_issues_repo_path": "pytorch_ner/dataset.py",
"max_line_length": 88,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "b1729d97ccb168e5796045cf9b387b35536803eb",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "abdallah1097/pytorch_ner",
"max_stars_repo_path": "pytorch_ner/dataset.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 644,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2900
} |
import os
import h5py
import json
import pickle
import random
import numpy as np
from pathlib import Path
from sklearn.cluster import MiniBatchKMeans
# root path
ROOT_PATH = Path(os.path.dirname(__file__)).parent
# set seed value
SEED = 1234
random.seed(SEED)
np.random.seed(SEED)
# read data
hdf5_data = h5py.File(os.path.join(ROOT_PATH, 'data/hdf5/hdf5_data.h5'), 'r')
ids = list(hdf5_data['ids'])
# We want around <=500 ids for ir ranking or for each cluster
# Since we can't apply min, max constraints on K-Means we set the number to 400.
# With 500 we ended up with 600+ samples per cluster on average and thats why we reduced to 400
# Formula: IDS_PER_CLUSTER ≈ N_IDS / N_CLUSTERS
N_IDS = len(ids)
IDS_PER_CLUSTER = 400
N_CLUSTERS = int(N_IDS / IDS_PER_CLUSTER)
# create data for kmeans
train_images = []
train_ids = []
for id in ids:
images = hdf5_data[f'{id}_images'][()]
for i in range(images.shape[0]):
train_images.append(images[i])
train_ids.append(id.tolist())
assert len(train_images) == len(train_ids)
# stack training data
train_data = np.stack(train_images, axis=0)
# train mini batch kmeans
kmeans = MiniBatchKMeans(n_clusters=N_CLUSTERS, verbose=True).fit(train_data)
# save kmeans model
pickle.dump(kmeans, open(f'{str(ROOT_PATH)}/data/kmeans/checkpoint.pkl', 'wb'))
# prepare dict with cluster label as key and ids as values
cluser_ids = {str(label.tolist()): set() for label in kmeans.labels_}
for label, id in zip(kmeans.labels_, train_ids):
cluser_ids[str(label.tolist())].add(id)
cluser_ids = {k: list(v) for k, v in cluser_ids.items()}
# write label clusters
with open(f'{str(ROOT_PATH)}/data/kmeans/cluster_ids.json', 'w') as json_file:
json.dump(cluser_ids, json_file, ensure_ascii=False, indent=4)
# read kmeans model and test it on random example
kmeans = pickle.load(open(f'{str(ROOT_PATH)}/data/kmeans/checkpoint.pkl', 'rb'))
print(len(kmeans.labels_))
print(kmeans.predict(np.random.rand(1, 1024).astype(np.float32)))
# stats
ids_per_cluster = []
for k, v in cluser_ids.items():
ids_per_cluster.append(len(v))
print(f'Total ids: {len(ids)}')
print(f'Total images: {len(train_ids)}')
print(f'Total clusters: {len(kmeans.labels_)}')
print(f'Num of ids per cluster: {ids_per_cluster}')
print(f'Max: {max(ids_per_cluster)}')
print(f'Min: {min(ids_per_cluster)}')
print(f'Avg.: {sum(ids_per_cluster) / len(ids_per_cluster)}')
| {
"alphanum_fraction": 0.7310889443,
"author": null,
"avg_line_length": 30.4556962025,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "36676c7fa9ddbde0faa47a463b92f59896de25b1",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2020-10-22T09:07:08.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-10-22T09:07:08.000Z",
"max_forks_repo_head_hexsha": "73809a755157fc9e51278b7fd246d13d19e2ab59",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "cleopatra-itn/GOAL",
"max_forks_repo_path": "scripts/cluster_images.py",
"max_issues_count": 12,
"max_issues_repo_head_hexsha": "73809a755157fc9e51278b7fd246d13d19e2ab59",
"max_issues_repo_issues_event_max_datetime": "2022-03-12T00:40:03.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-07-07T18:02:28.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "cleopatra-itn/GOAL",
"max_issues_repo_path": "scripts/cluster_images.py",
"max_line_length": 95,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "73809a755157fc9e51278b7fd246d13d19e2ab59",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "cleopatra-itn/GOAL",
"max_stars_repo_path": "scripts/cluster_images.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 671,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2406
} |
/*==============================================================================
Copyright (c) 2016 Matt Calabrese
Distributed under the Boost Software License, Version 1.0. (See accompanying
file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
==============================================================================*/
#ifndef BOOST_EXT_CALL_EXAMPLE_GRAPHVIZ_HPP_
#define BOOST_EXT_CALL_EXAMPLE_GRAPHVIZ_HPP_
#include <boost_ext/call/prov_traits/provide.hpp>
#include <boost_ext/call/receiver/graphviz.hpp>
#include <boost/filesystem/path.hpp>
#include <boost/program_options/options_description.hpp>
#include <graphviz/cgraph.h>
#include <iostream>
#include <string>
#define BOOST_EXT_CALL_EXAMPLE_GRAPH
#endif // BOOST_EXT_CALL_EXAMPLE_GRAPHVIZ_HPP_
#ifdef BOOST_EXT_CALL_EXAMPLE_GRAPHVIZ_MAIN
#undef BOOST_EXT_CALL_EXAMPLE_GRAPHVIZ_MAIN
int main( int const num_args, char** const args )
{
namespace po = boost::program_options;
std::string format;
boost::filesystem::path output_file_path;
po::options_description description( "Usage" );
description.add_options()
("help", "Print command-line options.")
("graphviz-version"
, "The version of the graphviz library that is being used."
)/*
("format", po::value< std::string >( &format )->default_value( "svg" )
, "Set output format."
)*/
("output-file,o", po::value< boost::filesystem::path >( &output_file_path )
, "Set the output file."
);
po::variables_map parsed_args;
po::store( po::parse_command_line( num_args, args, description ) );
po::notify( parsed_args );
if( parsed_args.count( "help" ) != 0 )
{
std::cout << description << '\n';
}
else
if( parsed_args.count( "output-file" ) == 1 )
{
// TODO(mattcalabrese) Determine format here.
boost_ext_call_example_graph
( []( char const* root_description, auto&& provider )
{
prov_traits::provide( boost_ext::receiver::graphviz( )
, std::forward< decltype( provider ) >( provider )
);
}
);
}
else
{
std::cout
<< "No output file was specified. Run this executable with the "
"command-line option \"--help\" for usage information.\n";
}
return 0;
}
#endif
| {
"alphanum_fraction": 0.6239205527,
"author": null,
"avg_line_length": 27.9036144578,
"converted": null,
"ext": "hpp",
"file": null,
"hexsha": "7ae8dfe6b8190d338253160962a0e26021a11d08",
"include": null,
"lang": "C++",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 2,
"max_forks_repo_forks_event_max_datetime": "2020-12-28T06:53:29.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-08-04T03:51:36.000Z",
"max_forks_repo_head_hexsha": "97349baaf27659c9dc4d67cf8963b2e871eaedae",
"max_forks_repo_licenses": [
"BSL-1.0"
],
"max_forks_repo_name": "mattcalabrese/argot",
"max_forks_repo_path": "example/include/call_example/graphviz.hpp",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "97349baaf27659c9dc4d67cf8963b2e871eaedae",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSL-1.0"
],
"max_issues_repo_name": "mattcalabrese/argot",
"max_issues_repo_path": "example/include/call_example/graphviz.hpp",
"max_line_length": 80,
"max_stars_count": 49,
"max_stars_repo_head_hexsha": "97349baaf27659c9dc4d67cf8963b2e871eaedae",
"max_stars_repo_licenses": [
"BSL-1.0"
],
"max_stars_repo_name": "mattcalabrese/argot",
"max_stars_repo_path": "example/include/call_example/graphviz.hpp",
"max_stars_repo_stars_event_max_datetime": "2021-07-21T10:05:19.000Z",
"max_stars_repo_stars_event_min_datetime": "2018-05-09T23:17:45.000Z",
"num_tokens": 541,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 2316
} |
cd("..")
run(`bash build.sh`)
| {
"alphanum_fraction": 0.5333333333,
"author": null,
"avg_line_length": 10,
"converted": null,
"ext": "jl",
"file": null,
"hexsha": "afd8542cd0e308bb76b90ffef073c96bdb383348",
"include": null,
"lang": "Julia",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "a1124dd4cd09825a6ec79422ae6ca84053ed5261",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "ctrekker/PAMAuth.jl",
"max_forks_repo_path": "deps/build.jl",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "a1124dd4cd09825a6ec79422ae6ca84053ed5261",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "ctrekker/PAMAuth.jl",
"max_issues_repo_path": "deps/build.jl",
"max_line_length": 20,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "a1124dd4cd09825a6ec79422ae6ca84053ed5261",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "ctrekker/PAMAuth.jl",
"max_stars_repo_path": "deps/build.jl",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 9,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 30
} |
import pandas as pd
import numpy as np
from datetime import datetime
df = pd.read_csv('3WLA.txt',sep='\t')
epoch = datetime(1980, 1, 1)
df["Cardinality TW"] = df["Week-End"].apply(lambda x: (datetime.strptime(x, "%d-%b-%Y")-epoch))
df["Activity CL"] = df["Activity"].apply(lambda x: x.lower().strip().replace(" ", ""))
df["Activity CLSH"] = df["Activity CL"].apply(lambda x: str(x)) + df["Shaft"]
df["Initial Index"] = df["Activity CLSH"].apply(lambda x: (x==df["Activity CLSH"]).idxmax())
df["Initial Start"] = df["Initial Index"].apply(lambda x: df.loc[x , "Start"])
df["Initial Finish"] = df["Initial Index"].apply(lambda x: df.loc[x , "Finish"])
df["Appears"] = df["Activity CLSH"].apply(lambda x: df.index[(df["Activity CLSH"] == x)].tolist()) # List of index apperances
df["Plan Duration"] = np.subtract(df["Finish"].apply(lambda x: (datetime.strptime(x, "%d-%b-%Y"))),df["Start"].apply(lambda x: (datetime.strptime(x, "%d-%b-%Y"))))
# LW Index Loop
df["LW Index"] = 0
for i in df.index:
focus_desc = df.loc[i , "Activity CLSH"]
for j in df.index:
if (j<i) & (df.loc[j , "Activity CLSH"]==focus_desc):
df.loc[i , "LW Index"]=j
# Completed flag
df["Completed"] = "Not Started"
df["To Finish TW"] = "No"
df["To Finish LW"] = "No"
for i in df.index:
original_days = np.subtract(datetime.strptime(df.loc[i , "Initial Finish"], "%d-%b-%Y"),datetime.strptime(df.loc[i , "Initial Start"], "%d-%b-%Y")).days+1
df.loc[i , "Initial Duration"]=original_days
if np.subtract(datetime.strptime(df.loc[i , "Finish"], "%d-%b-%Y"),datetime.strptime(df.loc[i , "Week-End"], "%d-%b-%Y")).days <=0:
actual_days = np.subtract(datetime.strptime(df.loc[i , "Finish"], "%d-%b-%Y"),datetime.strptime(df.loc[i , "Start"], "%d-%b-%Y")).days+1
df.loc[i , "Completed"]="Completed"
df.loc[i , "Actual Duration"]=actual_days
df.loc[i , "Var Days Duration"]=original_days-actual_days
df.loc[i , "Var % Duration"]=(actual_days-original_days)/original_days
elif np.subtract(datetime.strptime(df.loc[i , "Finish"], "%d-%b-%Y"),datetime.strptime(df.loc[i , "Week-End"], "%d-%b-%Y")).days <7:
df.loc[i , "To Finish TW"]="Yes"
if (np.subtract(datetime.strptime(df.loc[i , "Start"], "%d-%b-%Y"),datetime.strptime(df.loc[i , "Week-End"], "%d-%b-%Y")).days <0) & (np.subtract(datetime.strptime(df.loc[i , "Finish"], "%d-%b-%Y"),datetime.strptime(df.loc[i , "Week-End"], "%d-%b-%Y")).days >0):
df.loc[i , "Completed"]="In Progress"
df["To Finish LW"] = df["LW Index"].apply(lambda x: df.loc[x , "To Finish TW"])
df.loc[df["LW Index"] == 0, "To Finish LW"] = "No" #Post-lambda correction to new look-ahead entries
df["LW Start"] = df["LW Index"].apply(lambda x: df.loc[x , "Start"])
df["LW Finish"] = df["LW Index"].apply(lambda x: df.loc[x , "Finish"])
df["OV Start Slip"] = np.subtract(df["Start"].apply(lambda x: (datetime.strptime(x, "%d-%b-%Y"))),df["Initial Start"].apply(lambda x: (datetime.strptime(x, "%d-%b-%Y"))))
df["OV Finish Slip"] = np.subtract(df["Finish"].apply(lambda x: (datetime.strptime(x, "%d-%b-%Y"))),df["Initial Finish"].apply(lambda x: (datetime.strptime(x, "%d-%b-%Y"))))
df["LW Start Slip"] = np.subtract(df["Start"].apply(lambda x: (datetime.strptime(x, "%d-%b-%Y"))),df["LW Start"].apply(lambda x: (datetime.strptime(x, "%d-%b-%Y"))))
df["LW Finish Slip"] = np.subtract(df["Finish"].apply(lambda x: (datetime.strptime(x, "%d-%b-%Y"))),df["LW Finish"].apply(lambda x: (datetime.strptime(x, "%d-%b-%Y"))))
df["LW Duration Slip"] = np.add(np.subtract(df["Finish"].apply(lambda x: (datetime.strptime(x, "%d-%b-%Y"))),df["Start"].apply(lambda x: (datetime.strptime(x, "%d-%b-%Y")))),np.subtract(df["LW Start"].apply(lambda x: (datetime.strptime(x, "%d-%b-%Y"))),df["LW Finish"].apply(lambda x: (datetime.strptime(x, "%d-%b-%Y")))))
## Filters
df["Movement"]= (df["LW Duration Slip"]!="0 days") | (df["LW Start Slip"]!="0 days") | (df["LW Finish Slip"]!="0 days")
df["Immenence"] = np.subtract(df["Start"].apply(lambda x: (datetime.strptime(x, "%d-%b-%Y"))),df["Week-End"].apply(lambda x: (datetime.strptime(x, "%d-%b-%Y"))))
df["First Appear"] = df.index==df["Initial Index"]
df["Surprise"] = (df["First Appear"]) & (df["Immenence"].dt.days <14) & (df["Completed"] != "Completed")
#df["PD Date"] = pd.to_datetime(df["Week-End"])
#Change movements to just days
df["Plan Duration"] = df["Plan Duration"].apply(lambda x: x.days+1)
df["OV Start Slip"] = df["OV Start Slip"].apply(lambda x: x.days)
df["OV Finish Slip"] = df["OV Finish Slip"].apply(lambda x: x.days)
df["LW Start Slip"] = df["LW Start Slip"].apply(lambda x: x.days)
df["LW Finish Slip"] = df["LW Finish Slip"].apply(lambda x: x.days)
df["LW Duration Slip"] = df["LW Duration Slip"].apply(lambda x: x.days)
df["Immenence"] = df["Immenence"].apply(lambda x: x.days)
#Drop Unrequired columns from dataframe (not needed in output)
df = df.drop(columns=["Cardinality TW","Activity CL", "Activity CLSH","Initial Index","Appears","LW Index"])
###### START MACHINE LEARNING MODEL
from sklearn import preprocessing
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPRegressor
df_ml = df[df["Actual Duration"]>0]
# LabelEncode some columns
label_encoder = LabelEncoder()
# Encoding for String items...
shaft_int_encoded = label_encoder.fit_transform(df_ml["Shaft"].astype(str))
discipline_int_encoded = label_encoder.fit_transform(df_ml["Discipline"].astype(str))
critical_path_int_encoded = label_encoder.fit_transform(df_ml["Critical Path"].astype(str))
extra_work_int_encoded = label_encoder.fit_transform(df_ml["Extra Work"].astype(str))
input_data_collection = np.asarray([shaft_int_encoded,
discipline_int_encoded,
critical_path_int_encoded,
extra_work_int_encoded,
df_ml["Initial Duration"]])
output_data_collection = np.asarray([df_ml["Actual Duration"]])
data = input_data_collection.transpose()
target = output_data_collection.transpose()
data_scaler = preprocessing.MinMaxScaler()
target_scaler = preprocessing.MinMaxScaler()
data_fitted = data_scaler.fit_transform(data)
target_fitted = target_scaler.fit_transform(target.reshape(-1,1))
x_train, x_test, y_train, y_test = train_test_split(data_fitted,target_fitted,test_size=0.80)
classifier = MLPRegressor(max_iter=10000, activation='relu', solver='adam', random_state=1)
classifier.fit(x_train,np.ravel(y_train))
#Prediction model... (using the complete data set)
# Encoding for String items...
p_shaft_int_encoded = label_encoder.fit_transform(df["Shaft"].astype(str))
p_discipline_int_encoded = label_encoder.fit_transform(df["Discipline"].astype(str))
p_critical_path_int_encoded = label_encoder.fit_transform(df["Critical Path"].astype(str))
p_extra_work_int_encoded = label_encoder.fit_transform(df["Extra Work"].astype(str))
predict_input_data_collection = np.asarray([p_shaft_int_encoded,
p_discipline_int_encoded,
p_critical_path_int_encoded,
p_extra_work_int_encoded,
df["Initial Duration"]])
predict_data = predict_input_data_collection.transpose()
predict_data_scaler = preprocessing.MinMaxScaler()
predict_data_fitted = predict_data_scaler.fit_transform(predict_data)
output_predicted = classifier.predict(predict_data_fitted)
output_predicted_unscaled = target_scaler.inverse_transform(output_predicted.reshape(-1,1))
df["Predicted Duration"] = output_predicted_unscaled
# Output the final data frame.
df.to_csv("3WLA analysis.txt", sep='\t')
| {
"alphanum_fraction": 0.6445079523,
"author": null,
"avg_line_length": 51.5897435897,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "e547962fcfe7b954e23235e6dff6bbeaa59471aa",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "4d5873dce68d34dd3c1f3cab90b45bf7b11d7aba",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "chipnetics/vaporware",
"max_forks_repo_path": "python/las_model.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "4d5873dce68d34dd3c1f3cab90b45bf7b11d7aba",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "chipnetics/vaporware",
"max_issues_repo_path": "python/las_model.py",
"max_line_length": 323,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "4d5873dce68d34dd3c1f3cab90b45bf7b11d7aba",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "chipnetics/vaporware",
"max_stars_repo_path": "python/las_model.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 2078,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 8048
} |
# coding: utf-8
# This notebook will make abundance matched catalogs for Jeremy and Zhongxu. I'm gonna send this notebook along to them as well in case there's something not quite right that they want to adjust. The catalogs requested were defined as follows:
# - 2 catalogs, M_peak and V_max @ M_peak (which is how, I believe, V_max is defined in this catalog. Will check tho).
# - scatter 0.18 dex
# - number density of 4.2e-4
# - z = 0.55
# - using the SMF Jeremy provided, which is in this directory with name DR10_cBOSS_WISE_SMF_z0.45_0.60_M7.dat
# - On DS14, which is located on ki-ls at /nfs/slac/des/fs1/g/sims/yymao/ds14_b_sub, courtesy of Yao
# - Include in the catalog, along with the galaxies, M_vir, x, y, z, vx, vy, vz, M_gal, am_i_a_satellite?, and M_host
from os import path
import numpy as np
from AbundanceMatching import *
from halotools.sim_manager import RockstarHlistReader, CachedHaloCatalog
halo_dir = '/scratch/users/swmclau2/MDPL2/'
#halo_dir = '/nfs/slac/des/fs1/g/sims/yymao/ds14_b_sub/hlists/'
#halo_dir = '/scratch/users/swmclau2/hlists/ds_14_b_sub/hlists/'
a = 1.0#0.65
z = 1.0/a - 1 # ~ 0.55
fname = path.join(halo_dir, 'hlist_%.5f.list'%a)
columns_to_keep = {'halo_id': (1, 'i8'), 'halo_upid':(6,'i8'), 'halo_mvir':(10, 'f4'), 'halo_x':(17, 'f4'), 'halo_y':(18,'f4'), 'halo_z':(19,'f4'),'halo_vx':(20,'f4'), 'halo_vy':(21, 'f4'), 'halo_vz':(22,'f4'),
'halo_rvir': (11, 'f4'),'halo_rs':(12,'f4'), 'halo_mpeak':(58, 'f4'),'halo_vmax@mpeak':(72, 'f4'), 'halo_m200b':(39, 'f4')}
simname = 'mdpl2'
# Only run the below if you want to cache, which is useful maybe the first time (maybe). It takes ~30 min and some disk space, so be warned.
#
# Update (Feb 1st, 2019): I had to edit halotools to make this work. The last line of the halocat was missing values... Specifically making the reader stop iteration once it encountered an indexerror.
#reader = RockstarHlistReader(fname, columns_to_keep, '/scratch/users/swmclau2/halocats/hlist_%.2f.list.%s.hdf5'%(a, simname),\
# simname,'rockstar', z, 'default', 1000.0, 2.44e9, overwrite=True, header_char = '#')
#reader.read_halocat(['halo_rvir', 'halo_rs'], write_to_disk=False, update_cache_log=False)
#
#reader.add_supplementary_halocat_columns()
#reader.write_to_disk()
#reader.update_cache_log()
# In[15]:
halocat = CachedHaloCatalog(simname = simname, halo_finder='rockstar', redshift = z,version_name='most_recent')
print halocat.halo_table.colnames
# In[18]:
#### TMP ####
# Gonna do a preliminary mass cut on the halocatalog
n_part = 20
# TODO do with mpeak instead
pmass = 1.5e9
halo_table = halocat.halo_table[halocat.halo_table['halo_mvir']>n_part*pmass]
smf = np.genfromtxt('/home/users/swmclau2/Git/pearce/bin/shams/smf_dr72bright34_m7_lowm.dat', skip_header=True)[:,0:2]
#smf = np.genfromtxt('/scratch/users/swmclau2/smf_dr72bright34_m7_lowm.dat', skip_header=True)[:,0:2]
#smf = np.genfromtxt('DR10_cBOSS_WISE_SMF_z0.45_0.60_M7.dat', skip_header=True)[:,0:2]
# In[19]:
# In[20]:
nd = 5e-4#4.2e-4 #nd of final cat
# In[21]:
#ab_property = 'halo_mpeak'
#ab_property = 'halo_mvir'
#ab_property = 'halo_vmax@mpeak'
#ab_property = 'halo_vmax'
ab_property = 'halo_vpeak'
# In[22]:
af = AbundanceFunction(smf[:,0], smf[:,1], (9.0, 12.9), faint_end_first = True)
scatter = 0.1#0.2#0.15
remainder = af.deconvolute(scatter, 20)
# In[ ]:
nd_halos = calc_number_densities(halo_table[ab_property], 1000.0) #don't think this matters which one i choose here
# In[ ]:
#check the abundance function
# In[ ]:
catalog = af.match(nd_halos, scatter)
# In[ ]:
# In[ ]:
h = 0.674
n_obj_needed = int(nd*((1000.0)**3)) # don't divide by h
# In[ ]:
non_nan_idxs = ~np.isnan(catalog)
sort_idxs = np.argsort(catalog[non_nan_idxs])[::-1]
final_catalog = catalog[non_nan_idxs][sort_idxs][:n_obj_needed]
output = halo_table[non_nan_idxs][sort_idxs][:n_obj_needed]
output['gal_smass'] = final_catalog
#output.write('/nfs/slac/g/ki/ki18/des/swmclau2/catalog_ab_%s_large.hdf5'%ab_property, format = 'hdf5', path = '%s_catalog'%ab_property, overwrite=True)
#output.write('/scratch/users/swmclau2/test_MDPL2_%s_smf_sham_large.hdf5'%ab_property, format = 'hdf5', path = '%s_catalog'%ab_property, overwrite=True)
#output.write('/scratch/users/swmclau2/MDPL2_%s_smf_sham.hdf5'%ab_property, format = 'hdf5', path = '%s_catalog'%ab_property, overwrite=True)
np.save('/scratch/users/swmclau2/UniverseMachine/cut_vpeak_loscat_sham_catalog.npy', output.as_array()[:n_obj_needed])
#print ab_property
| {
"alphanum_fraction": 0.7110041566,
"author": null,
"avg_line_length": 34.6287878788,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "d1136c40a7258ff48f3e088cc6d4b75ca6ed5909",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 3,
"max_forks_repo_forks_event_max_datetime": "2019-05-03T23:50:01.000Z",
"max_forks_repo_forks_event_min_datetime": "2016-10-04T08:07:52.000Z",
"max_forks_repo_head_hexsha": "746f2bf4bf45e904d66996e003043661a01423ba",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "mclaughlin6464/pearce",
"max_forks_repo_path": "bin/shams/make_sham.py",
"max_issues_count": 16,
"max_issues_repo_head_hexsha": "746f2bf4bf45e904d66996e003043661a01423ba",
"max_issues_repo_issues_event_max_datetime": "2018-05-01T22:53:39.000Z",
"max_issues_repo_issues_event_min_datetime": "2016-11-04T22:24:32.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "mclaughlin6464/pearce",
"max_issues_repo_path": "bin/shams/make_sham.py",
"max_line_length": 243,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "746f2bf4bf45e904d66996e003043661a01423ba",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "mclaughlin6464/pearce",
"max_stars_repo_path": "bin/shams/make_sham.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1541,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 4571
} |
import unittest
from opentamp.core.internal_repr import parameter
from opentamp.core.util_classes import robot_predicates, pr2_predicates, matrix
from opentamp.core.util_classes.openrave_body import OpenRAVEBody
from errors_exceptions import PredicateException, ParamValidationException
from opentamp.core.util_classes.param_setup import ParamSetup
import opentamp.core.util_classes.pr2_constants as const
import numpy as np
class TestPR2Predicates(unittest.TestCase):
# Begin of the test
def test_robot_at(self):
# RobotAt, Robot, RobotPose
robot = ParamSetup.setup_pr2()
rPose = ParamSetup.setup_pr2_pose()
pred = pr2_predicates.PR2RobotAt("testRobotAt", [robot, rPose], ["Robot", "RobotPose"])
self.assertEqual(pred.get_type(), "PR2RobotAt")
# Robot and RobotPose are initialized to the same pose
self.assertTrue(pred.test(0))
with self.assertRaises(PredicateException) as cm:
pred.test(time=2)
self.assertEqual(cm.exception.message, "Out of range time for predicate 'testRobotAt: (PR2RobotAt pr2 pr2_pose)'.")
robot.pose = np.array([[3, 4, 5, 3],
[6, 5, 7, 6],
[6, 3, 4, 6]])
rPose.value = np.array([[3, 4, 5, 6],
[6, 5, 7, 1],
[6, 3, 9, 2]])
self.assertTrue(pred.is_concrete())
robot.rGripper = np.matrix([0.5, 0.4, 0.6, 0.5])
robot.lGripper = np.matrix([0.5, 0.4, 0.6, 0.5])
rPose.rGripper = np.matrix([0.5, 0.4, 0.6, 0.5])
robot.backHeight = np.matrix([0.2, 0.29, 0.18, 0.2])
robot.rArmPose = np.array([[0,0,0,0,0,0,0],
[1,2,3,4,5,6,7],
[7,6,5,4,3,2,1],
[0,0,0,0,0,0,0]]).T
robot.lArmPose = np.array([[0,0,0,0,0,0,0],
[1,2,3,4,5,6,7],
[7,6,5,4,3,2,1],
[0,0,0,0,0,0,0]]).T
rPose.rArmPose = np.array([[0,0,0,0,0,0,0]]).T
rPose.lArmPose = np.array([[0,0,0,0,0,0,0]]).T
with self.assertRaises(PredicateException) as cm:
pred.test(time=4)
self.assertEqual(cm.exception.message, "Out of range time for predicate 'testRobotAt: (PR2RobotAt pr2 pr2_pose)'.")
with self.assertRaises(PredicateException) as cm:
pred.test(time=-1)
self.assertEqual(cm.exception.message, "Out of range time for predicate 'testRobotAt: (PR2RobotAt pr2 pr2_pose)'.")
self.assertTrue(pred.test(time=0))
self.assertFalse(pred.test(time=1))
self.assertFalse(pred.test(time=2))
self.assertTrue(pred.test(time=3))
def test_is_mp(self):
robot = ParamSetup.setup_pr2()
test_env = ParamSetup.setup_env()
pred = pr2_predicates.PR2IsMP("test_isMP", [robot], ["Robot"], test_env)
self.assertEqual(pred.get_type(), "PR2IsMP")
with self.assertRaises(PredicateException) as cm:
pred.test(time=0)
self.assertEqual(cm.exception.message, "Insufficient pose trajectory to check dynamic predicate 'test_isMP: (PR2IsMP pr2)' at the timestep.")
# Getting lowerbound and movement step
lbH_l, bH_m = pred.lower_limit[0], pred.joint_step[0]
llA_l, lA_m = pred.lower_limit[1:8], pred.joint_step[1:8]
lrA_l, rA_m = pred.lower_limit[9:16], pred.joint_step[9:16]
llG_l, lG_m = pred.lower_limit[8], pred.joint_step[8]
lrG_l, rG_m = pred.lower_limit[16], pred.joint_step[16]
# Base pose is valid in the timestep: 1,2,3,4,5
robot.pose = np.array([[1,2,3,4,5,6,7],
[0,2,3,4,5,6,7],
[1,2,3,4,5,6,7]])
# Arm pose is valid in the timestep: 0,1,2,3
robot.rArmPose = np.hstack((lrA_l+rA_m, lrA_l+2*rA_m, lrA_l+3*rA_m, lrA_l+4*rA_m, lrA_l+3*rA_m, lrA_l+5*rA_m, lrA_l+100*rA_m))
robot.lArmPose = np.hstack((llA_l+lA_m, llA_l+lA_m, llA_l+lA_m, llA_l+lA_m, llA_l+lA_m, llA_l+lA_m, llA_l+lA_m))
# Gripper pose is valid in the timestep: 0,1,3,4,5
robot.rGripper = np.matrix([lrG_l, lrG_l+rG_m, lrG_l+2*rG_m, lrG_l+5*rG_m, lrG_l+4*rG_m, lrG_l+3*rG_m, lrG_l+2*rG_m]).reshape((1,7))
robot.lGripper = np.matrix([llG_l, llG_l+lG_m, llG_l+lG_m, llG_l+lG_m, llG_l+lG_m, llG_l+lG_m, llG_l+lG_m]).reshape((1,7))
# Back height pose is always valid
robot.backHeight = np.matrix([bH_m, bH_m, bH_m, bH_m, bH_m, bH_m, bH_m]).reshape((1,7))
# Thus only timestep 1 and 3 are valid
# import ipdb; ipdb.set_trace()
self.assertFalse(pred.test(0))
self.assertTrue(pred.test(1))
self.assertFalse(pred.test(2))
self.assertTrue(pred.test(3))
self.assertFalse(pred.test(4))
self.assertFalse(pred.test(5))
with self.assertRaises(PredicateException) as cm:
pred.test(6)
self.assertEqual(cm.exception.message, "Insufficient pose trajectory to check dynamic predicate 'test_isMP: (PR2IsMP pr2)' at the timestep.")
def test_within_joint_limit(self):
robot = ParamSetup.setup_pr2()
test_env = ParamSetup.setup_env()
pred = pr2_predicates.PR2WithinJointLimit("test_joint_limit", [robot], ["Robot"], test_env)
self.assertEqual(pred.get_type(), "PR2WithinJointLimit")
# Getting lowerbound and movement step
lbH_l, bH_m = pred.lower_limit[0], pred.joint_step[0]
llA_l, lA_m = pred.lower_limit[1:8], pred.joint_step[1:8]
lrA_l, rA_m = pred.lower_limit[9:16], pred.joint_step[9:16]
llG_l, lG_m = pred.lower_limit[8], pred.joint_step[8]
lrG_l, rG_m = pred.lower_limit[16], pred.joint_step[16]
# Base pose is valid in the timestep: 1,2,3,4,5
robot.pose = np.array([[1,2,3,4,5,6,7],
[0,2,3,4,5,6,7],
[1,2,3,4,5,6,7]])
# timestep 6 should fail
robot.rArmPose = np.hstack((lrA_l+rA_m, lrA_l+2*rA_m, lrA_l+3*rA_m, lrA_l+4*rA_m, lrA_l+3*rA_m, lrA_l+5*rA_m, lrA_l+100*rA_m))
# timestep 1 should fail
robot.lArmPose = np.hstack((llA_l+lA_m, llA_l-lA_m, llA_l+lA_m, llA_l+lA_m, llA_l+lA_m, llA_l+lA_m, llA_l+lA_m))
robot.rGripper = np.matrix([lrG_l, lrG_l+rG_m, lrG_l+2*rG_m, lrG_l+5*rG_m, lrG_l+4*rG_m, lrG_l+3*rG_m, lrG_l+2*rG_m]).reshape((1,7))
robot.lGripper = np.matrix([llG_l, llG_l+lG_m, llG_l+lG_m, llG_l+lG_m, llG_l+lG_m, llG_l+lG_m, llG_l+lG_m]).reshape((1,7))
# timestep 3 s fail
robot.backHeight = np.matrix([bH_m, bH_m, bH_m, -bH_m, bH_m, bH_m, bH_m]).reshape((1,7))
# Thus timestep 1, 3, 6 should fail
self.assertTrue(pred.test(0))
self.assertFalse(pred.test(1))
self.assertTrue(pred.test(2))
self.assertFalse(pred.test(3))
self.assertTrue(pred.test(4))
self.assertTrue(pred.test(5))
self.assertFalse(pred.test(6))
def test_in_contact(self):
test_env = ParamSetup.setup_env()
robot = ParamSetup.setup_pr2("robotttt")
ee_pose = ParamSetup.setup_ee_pose()
target = ParamSetup.setup_target("omg")
pred = pr2_predicates.PR2InContact("test_set_gripper", [robot, ee_pose, target], ["Robot", "EEPose", "Target"], test_env)
self.assertEqual(pred.get_type(), "PR2InContact")
target.value = np.array([[0],[0],[0]])
ee_pose.value = np.array([[0],[0],[0]])
robot.rGripper = np.matrix([0.3])
self.assertFalse(pred.test(0))
robot.rGripper = np.matrix([const.GRIPPER_CLOSE_VALUE])
self.assertTrue(pred.test(0))
def test_in_gripper(self):
tol = 1e-4
TEST_GRAD = False
# InGripper, Robot, Can
robot = ParamSetup.setup_pr2()
can = ParamSetup.setup_blue_can(geom = (0.04, 0.25))
test_env = ParamSetup.setup_env()
pred = pr2_predicates.PR2InGripperPos("InGripper", [robot, can], ["Robot", "Can"], test_env)
pred2 = pr2_predicates.PR2InGripperRot("InGripper_rot", [robot, can], ["Robot", "Can"], test_env)
# Since this predicate is not yet concrete
self.assertFalse(pred.test(0))
can.pose = np.array([[0,0,0]]).T
# initialized pose value is not right
self.assertFalse(pred.test(0))
self.assertTrue(pred2.test(0))
# check the gradient of the implementations (correct)
if const.TEST_GRAD: pred.expr.expr.grad(pred.get_param_vector(0), True, tol)
# Now set can's pose and rotation to be the right things
can.pose = np.array([[5.77887566e-01, -1.26743678e-01, 8.37601627e-01]]).T
self.assertTrue(pred.test(0))
if const.TEST_GRAD: pred.expr.expr.grad(pred.get_param_vector(0), True, tol)
# A new robot arm pose
robot.rArmPose = np.array([[-np.pi/3, np.pi/7, -np.pi/5, -np.pi/3, -np.pi/7, -np.pi/7, np.pi/5]]).T
self.assertFalse(pred.test(0))
# Only the pos is correct, rotation is not yet right
can.pose = np.array([[0.59152062, -0.71105108, 1.05144139]]).T
self.assertTrue(pred.test(0))
if const.TEST_GRAD: pred.expr.expr.grad(pred.get_param_vector(0), True, tol)
can.rotation = np.array([[0.02484449, -0.59793421, -0.68047349]]).T
self.assertTrue(pred.test(0))
self.assertTrue(pred2.test(0))
if const.TEST_GRAD: pred.expr.expr.grad(pred.get_param_vector(0), True, tol)
# now rotate robot basepose
robot.pose = np.array([[0,0,np.pi/3]]).T
self.assertFalse(pred.test(0))
if const.TEST_GRAD: pred.expr.expr.grad(pred.get_param_vector(0), True, tol)
can.pose = np.array([[0.91154861, 0.15674634, 1.05144139]]).T
self.assertTrue(pred.test(0))
if const.TEST_GRAD: pred.expr.expr.grad(pred.get_param_vector(0), True, tol)
can.rotation = np.array([[1.07204204, -0.59793421, -0.68047349]]).T
self.assertTrue(pred2.test(0))
self.assertTrue(pred.test(0))
if const.TEST_GRAD: pred.expr.expr.grad(pred.get_param_vector(0), True, tol)
robot.rArmPose = np.array([[-np.pi/4, np.pi/8, -np.pi/2, -np.pi/2, -np.pi/8, -np.pi/8, np.pi/3]]).T
self.assertFalse(pred.test(0))
if const.TEST_GRAD: pred.expr.expr.grad(pred.get_param_vector(0), True, tol)
can.rotation = np.array([[2.22529480e+00, 3.33066907e-16, -5.23598776e-01]]).T
self.assertTrue(pred2.test(0))
self.assertFalse(pred.test(0))
if const.TEST_GRAD: pred.expr.expr.grad(pred.get_param_vector(0), True, tol)
can.pose = np.array([[3.98707028e-01, 4.37093473e-01, 8.37601627e-01]]).T
self.assertTrue(pred.test(0))
# check the gradient of the implementations (correct)
if const.TEST_GRAD: pred.expr.expr.grad(pred.get_param_vector(0), True, tol)
# testing example from grasp where predicate continues to fail,
# confirmed that Jacobian is fine.
robot.pose = np.array([-0.52014383, 0.374093 , 0.04957286]).reshape((3,1))
robot.backHeight = np.array([ 2.79699865e-13]).reshape((1,1))
robot.lGripper = np.array([ 0.49999948]).reshape((1,1))
robot.rGripper = np.array([ 0.53268086]).reshape((1,1))
robot.rArmPose = np.array([-1.39996414, -0.31404741, -1.42086452, -1.72304084, -1.16688324,
-0.20148917, -3.33438558]).reshape((7,1))
robot.lArmPose = np.array([ 0.05999948, 1.24999946, 1.78999946, -1.68000049, -1.73000049,
-0.10000051, -0.09000051]).reshape((7,1))
can.pose = np.array([-0. , -0.08297436, 0.925 ]).reshape((3,1))
can.rotation = np.array([-0., -0., -0.]).reshape((3,1))
# Torso Jacobian is somehow off by half
# if const.TEST_GRAD: pred.expr.expr.grad(pred.get_param_vector(0), True, 1e-4)
def test_ee_reachable(self):
# InGripper, Robot, Can
robot = ParamSetup.setup_pr2()
test_env = ParamSetup.setup_env()
rPose = ParamSetup.setup_pr2_pose()
ee_pose = ParamSetup.setup_ee_pose()
pred = pr2_predicates.PR2EEReachablePos("ee_reachable", [robot, rPose, ee_pose], ["Robot", "RobotPose", "EEPose"], test_env)
pred2 = pr2_predicates.PR2EEReachableRot("ee_reachable_rot", [robot, rPose, ee_pose], ["Robot", "RobotPose", "EEPose"], test_env)
pr2 = pred._param_to_body[robot]
# Since this predicate is not yet concrete
self.assertFalse(pred.test(0))
# ee_pose.value = np.array([[0.951, -0.188, 0.790675]]).T
ee_pose.value = np.array([[0.440, -0.339, 0.791]]).T
ee_pose.rotation = np.array([[0,0,0]]).T
ee_targ = ParamSetup.setup_green_can()
ee_body = OpenRAVEBody(test_env, "EE_Pose", ee_targ.geom)
ee_body.set_pose(ee_pose.value[:, 0], ee_pose.rotation[:, 0])
robot.lArmPose = np.zeros((7,7))
robot.lGripper = np.ones((1, 7))*0.528
robot.rArmPose = np.zeros((7,7))
robot.rGripper = np.ones((1, 7))*0.528
robot.pose = np.zeros((3,7))
robot.backHeight = np.zeros((1,7))
# initialized pose value is not right
self.assertFalse(pred.test(0))
# Find IK Solution
trajectory = []
trajectory.append(pr2.get_ik_from_pose([0.440-3*const.APPROACH_DIST, -0.339, 0.791], [0,0,0], "rightarm_torso")[0]) #s=-3
trajectory.append(pr2.get_ik_from_pose([0.440-2*const.APPROACH_DIST, -0.339, 0.791], [0,0,0], "rightarm_torso")[0]) #s=-2
trajectory.append(pr2.get_ik_from_pose([0.440-const.APPROACH_DIST, -0.339, 0.791], [0,0,0], "rightarm_torso")[0]) #s=-1
trajectory.append(pr2.get_ik_from_pose([0.440, -0.339, 0.791], [0,0,0], "rightarm_torso")[0]) #s=0
trajectory.append(pr2.get_ik_from_pose([0.440, -0.339, 0.791+const.RETREAT_DIST], [0,0,0], "rightarm_torso")[0]) #s=1
trajectory.append(pr2.get_ik_from_pose([0.440, -0.339, 0.791+2*const.RETREAT_DIST], [0,0,0], "rightarm_torso")[0]) #s=2
trajectory.append(pr2.get_ik_from_pose([0.440, -0.339, 0.791+3*const.RETREAT_DIST], [0,0,0], "rightarm_torso")[0]) #s=3
trajectory = np.array(trajectory).T
robot.backHeight = trajectory[[0], :]
robot.rArmPose = trajectory[1:, :]
# Predicate should succeed in the grasping post at t=3,
# EEreachableRot should always pass since rotation is right all the time
self.assertFalse(pred.test(0))
self.assertTrue(pred2.test(0))
self.assertFalse(pred.test(1))
self.assertTrue(pred2.test(1))
self.assertFalse(pred.test(2))
self.assertTrue(pred2.test(2))
self.assertTrue(pred.test(3))
self.assertTrue(pred2.test(3))
# Ik Gradient Check is not passing
# if const.TEST_GRAD: pred.expr.expr.grad(pred.get_param_vector(3), True, 1e-2)
# if const.TEST_GRAD: pred2.expr.expr.grad(pred2.get_param_vector(3), True, 1e-2)
def test_obstructs(self):
# Obstructs, Robot, RobotPose, RobotPose, Can
robot = ParamSetup.setup_pr2()
rPose = ParamSetup.setup_pr2_pose()
can = ParamSetup.setup_blue_can(geom = (0.04, 0.25))
test_env = ParamSetup.setup_env()
pred = pr2_predicates.PR2Obstructs("test_obstructs", [robot, rPose, rPose, can], ["Robot", "RobotPose", "RobotPose", "Can"], test_env, tol=const.TOL)
self.assertEqual(pred.get_type(), "PR2Obstructs")
# Since can is not yet defined
self.assertFalse(pred.test(0))
# Move can so that it collide with robot base
can.pose = np.array([[0],[0],[0]])
self.assertTrue(pred.test(0))
# This gradient test passed
if const.TEST_GRAD: pred.expr.expr.grad(pred.get_param_vector(0), num_check=True, atol=5e-2)
# Move can away so there is no collision
can.pose = np.array([[0],[0],[-2]])
self.assertFalse(pred.test(0))
# This gradient test passed
if const.TEST_GRAD: pred.expr.expr.grad(pred.get_param_vector(0), True, 1e-1)
# Move can to the center of the gripper (touching -> should recognize as collision)
can.pose = np.array([[.578, -.127, .838]]).T
self.assertTrue(pred.test(0))
self.assertFalse(pred.test(0, negated = True))
# The gradient test below doesn't work because the collision normals in
# the robot's right gripper already are inaccurate because the can is there.
# if const.TEST_GRAD: pred.expr.expr.grad(pred.get_param_vector(0), num_check=True, atol=1e-1)
# Move can away from the gripper, no collision
can.pose = np.array([[.700, -.127, .838]]).T
self.assertFalse(pred.test(0))
self.assertTrue(pred.test(0, negated = True))
# This gradient test passed
if const.TEST_GRAD: pred.expr.expr.grad(pred.get_param_vector(0), num_check=True, atol=1e-1)
# Move can into the robot arm, should have collision
can.pose = np.array([[.50, -.3, .838]]).T
self.assertTrue(pred.test(0))
self.assertFalse(pred.test(0, negated = True))
# The gradient test below doesn't work because the collision normals for
# the robot's r_wrist_flex_link are inaccurate because the can is there.
# if const.TEST_GRAD: pred.expr.expr.grad(pred.get_param_vector(0), num_check=True, atol=1e-1)
def test_obstructs_holding(self):
# Obstructs, Robot, RobotPose, RobotPose, Can, Can
robot = ParamSetup.setup_pr2()
rPose = ParamSetup.setup_pr2_pose()
can = ParamSetup.setup_blue_can("can1", geom = (0.04, 0.25))
can_held = ParamSetup.setup_blue_can("can2", geom = (0.04, 0.25))
test_env = ParamSetup.setup_env()
# test_env.SetViewer('qtcoin')
pred = pr2_predicates.PR2ObstructsHolding("test_obstructs", [robot, rPose, rPose, can, can_held], ["Robot", "RobotPose", "RobotPose", "Can", "Can"], test_env, debug = True)
self.assertEqual(pred.get_type(), "PR2ObstructsHolding")
# Since can is not yet defined
self.assertFalse(pred.test(0))
# Move can so that it collide with robot base
rPose.value = can.pose = np.array([[0],[0],[0]])
can_held.pose = np.array([[.5],[.5],[0]])
self.assertTrue(pred.test(0))
# This Grandient test passes
if const.TEST_GRAD: pred.expr.expr.grad(pred.get_param_vector(0), num_check=True, atol=.1)
# Move can away so there is no collision
can.pose = np.array([[0],[0],[-2]])
self.assertFalse(pred.test(0))
# This Grandient test passes
if const.TEST_GRAD: pred.expr.expr.grad(pred.get_param_vector(0), num_check=True, atol=.1)
# Move can to the center of the gripper (touching -> should recognize as collision)
can.pose = np.array([[.578, -.127, .838]]).T
self.assertTrue(pred.test(0))
self.assertFalse(pred.test(0, negated = True))
# This Gradient test failed, failed Link-> right gripper fingers
# if const.TEST_GRAD: pred.expr.expr.grad(pred.get_param_vector(0), num_check=True, atol=.1)
# Move can away from the gripper, no collision
can.pose = np.array([[.700, -.127, .838]]).T
self.assertFalse(pred.test(0))
self.assertTrue(pred.test(0, negated = True))
# This Gradient test passed
if const.TEST_GRAD: pred.expr.expr.grad(pred.get_param_vector(0), num_check=True, atol=.1)
# Move caheldn into the robot arm, should have collision
can.pose = np.array([[.50, -.3, .838]]).T
self.assertTrue(pred.test(0))
self.assertFalse(pred.test(0, negated = True))
# This gradient checks failed
# if const.TEST_GRAD: pred.expr.expr.grad(pred.get_param_vector(0), num_check=True, atol=.1)
pred._plot_handles = []
pred2 = pr2_predicates.PR2ObstructsHolding("test_obstructs_held", [robot, rPose, rPose, can_held, can_held], ["Robot", "RobotPose", "RobotPose", "Can", "Can"], test_env, debug = True)
rPose.value = can_held.pose = can.pose = np.array([[0],[0],[0]])
pred._param_to_body[can].set_pose(can.pose, can.rotation)
self.assertTrue(pred2.test(0))
can_held.pose = np.array([[0],[0],[-2]])
self.assertFalse(pred2.test(0))
# This Grandient test passed
if const.TEST_GRAD: pred2.expr.expr.grad(pred2.get_param_vector(0), num_check=True, atol=.1)
# Move can to the center of the gripper (touching -> should allow touching)
can_held.pose = np.array([[.578, -.127, .838]]).T
self.assertTrue(pred2.test(0, negated = True))
self.assertFalse(pred2.test(0))
# This Gradient test fails ->failed link: l_finger_tip, r_finger_tip, r_gripper_palm
# if const.TEST_GRAD: pred2.expr.expr.grad(pred2.get_param_vector(0), num_check=True, atol=.1)
# Move can away from the gripper, no collision
can_held.pose = np.array([[.700, -.127, .838]]).T
self.assertFalse(pred2.test(0))
self.assertTrue(pred2.test(0, negated = True))
# This Gradient test passed
if const.TEST_GRAD: pred2.expr.expr.grad(pred2.get_param_vector(0), num_check=True, atol=.1)
# Move caheldn into the robot arm, should have collision
can_held.pose = np.array([[.50, -.3, .838]]).T
self.assertTrue(pred2.test(0))
self.assertFalse(pred.test(0, negated = True))
# This Gradient test failed -> failed link: r_gripper_l_finger, r_gripper_r_finger
# if const.TEST_GRAD: pred2.expr.expr.grad(pred2.get_param_vector(0), num_check=True, atol=.1)
def test_r_collides(self):
# RCollides Robot Obstacle
robot = ParamSetup.setup_pr2()
rPose = ParamSetup.setup_pr2_pose()
table = ParamSetup.setup_box()
test_env = ParamSetup.setup_env()
# test_env.SetViewer("qtcoin")
pred = pr2_predicates.PR2RCollides("test_r_collides", [robot, table], ["Robot", "Table"], test_env, debug = True)
# self.assertEqual(pred.get_type(), "RCollides")
# Since can is not yet defined
self.assertFalse(pred.test(0))
table.pose = np.array([[0],[0],[0]])
self.assertTrue(pred.test(0))
self.assertFalse(pred.test(0, negated = True))
# This gradient test passed with a box
if const.TEST_GRAD: pred.expr.expr.grad(pred.get_param_vector(0), num_check=True, atol=.1)
# Move can so that it collide with robot base
table.pose = np.array([[0],[0],[1.5]])
self.assertTrue(pred.test(0))
self.assertFalse(pred.test(0, negated = True))
# This gradient test passed with a box
if const.TEST_GRAD: pred.expr.expr.grad(pred.get_param_vector(0), num_check=True, atol=.1)
# Move can away so there is no collision
table.pose = np.array([[0],[2],[.75]])
self.assertFalse(pred.test(0))
self.assertTrue(pred.test(0, negated = True))
# This gradient test passed with a box
if const.TEST_GRAD: pred.expr.expr.grad(pred.get_param_vector(0), num_check=True, atol=.1)
table.pose = np.array([[0],[0],[3]])
self.assertFalse(pred.test(0))
self.assertTrue(pred.test(0, negated = True))
# This gradient test passed with a box
if const.TEST_GRAD: pred.expr.expr.grad(pred.get_param_vector(0), num_check=True, atol=.1)
table.pose = np.array([[0],[0],[-0.4]])
self.assertTrue(pred.test(0))
self.assertFalse(pred.test(0, negated = True))
# This gradient test failed
if const.TEST_GRAD: pred.expr.expr.grad(pred.get_param_vector(0), num_check=True, atol=.1)
# table.pose = np.array([[1],[1],[.75]])
table.pose = np.array([[0.545],[0.606],[.75]])
self.assertTrue(pred.test(0))
self.assertFalse(pred.test(0, negated = True))
# This gradient test passed with a box
if const.TEST_GRAD: pred.expr.expr.grad(pred.get_param_vector(0), num_check=True, atol=.1)
table.pose = np.array([[1],[1],[.75]])
table.rotation = np.array([[.5,.5,-.5]]).T
self.assertTrue(pred.test(0))
self.assertFalse(pred.test(0, negated = True))
# This gradient test passed with a box
if const.TEST_GRAD: pred.expr.expr.grad(pred.get_param_vector(0), num_check=True, atol=.1)
table.pose = np.array([[.5],[.5],[2]])
self.assertFalse(pred.test(0))
self.assertTrue(pred.test(0, negated = True))
table.pose = np.array([[.5],[1.45],[.5]])
table.rotation = np.array([[0.8,0,0]]).T
self.assertTrue(pred.test(0))
self.assertFalse(pred.test(0, negated = True))
# This gradient test passed with a box
if const.TEST_GRAD: pred.expr.expr.grad(pred.get_param_vector(0), num_check=True, atol=.1)
"""
Uncomment the following to see the robot
"""
# pred._param_to_body[table].set_pose(table.pose, table.rotation)
# import ipdb; ipdb.set_trace()
| {
"alphanum_fraction": 0.6194574443,
"author": null,
"avg_line_length": 53.4194915254,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "809c6c9dd9ca8d5ea3260ece504d18ac82c6d528",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "f0642028d551d0436b3a3dbc3bfb2f23a00adc14",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "Algorithmic-Alignment-Lab/openTAMP",
"max_forks_repo_path": "opentamp/test/test_core/test_util_classes/test_pr2_predicates.py",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "f0642028d551d0436b3a3dbc3bfb2f23a00adc14",
"max_issues_repo_issues_event_max_datetime": "2022-02-13T22:48:09.000Z",
"max_issues_repo_issues_event_min_datetime": "2022-02-13T22:48:09.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "Algorithmic-Alignment-Lab/openTAMP",
"max_issues_repo_path": "opentamp/test/test_core/test_util_classes/test_pr2_predicates.py",
"max_line_length": 191,
"max_stars_count": 4,
"max_stars_repo_head_hexsha": "f0642028d551d0436b3a3dbc3bfb2f23a00adc14",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "Algorithmic-Alignment-Lab/openTAMP",
"max_stars_repo_path": "opentamp/test/test_core/test_util_classes/test_pr2_predicates.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-26T17:33:13.000Z",
"max_stars_repo_stars_event_min_datetime": "2022-02-13T15:52:18.000Z",
"num_tokens": 7349,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 25214
} |
PROGRAM exemple
USE parlib
USE lib
IMPLICIT NONE
REAL:: more
more = addition(a,b)
WRITE(*,*) "The result: ", more
END PROGRAM
| {
"alphanum_fraction": 0.6893939394,
"author": null,
"avg_line_length": 8.8,
"converted": null,
"ext": "f90",
"file": null,
"hexsha": "a6841e174000b0ec07de5da937ba36c06cff614c",
"include": null,
"lang": "FORTRAN",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "14a340f370c8d0ac6f9844acb93269711d75916d",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "FFerrazzaT/-Training-Room-GNU-Make-",
"max_forks_repo_path": "Code/WildCard/app/exemple.f90",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "14a340f370c8d0ac6f9844acb93269711d75916d",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "FFerrazzaT/-Training-Room-GNU-Make-",
"max_issues_repo_path": "Code/WildCard/app/exemple.f90",
"max_line_length": 31,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "14a340f370c8d0ac6f9844acb93269711d75916d",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "FFerrazzaT/-Training-Room-GNU-Make-",
"max_stars_repo_path": "Code/WildCard/app/exemple.f90",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 38,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 132
} |
dir <- "~/workspace/dyn-urg/files/results/time-series-dynamism-experiment/"
nonhomogTS <- read.table(paste(dir,"non-homog-poisson-dynamism.csv",sep=""), quote="\"",as.is=T,colClasses=list("numeric"))
homogTS <- read.table(paste(dir,"homog-poisson-dynamism.csv",sep=""), quote="\"",as.is=T,colClasses=list("numeric"))
normalTS <- read.table(paste(dir,"normal-dynamism.csv",sep=""), quote="\"",as.is=T,colClasses=list("numeric"))
uniformTS <- read.table(paste(dir,"uniform-dynamism.csv",sep=""), quote="\"",as.is=T,colClasses=list("numeric"))
nonhomogTS["type"] <- "non-homogeneous-Poisson"
homogTS["type"] <- "homogeneous-Poisson"
normalTS["type"] <- "normal"
uniformTS["type"] <- "uniform"
res <- merge(nonhomogTS,homogTS, all=T)
res <- merge(res,normalTS,all=T)
res <- merge(res,uniformTS,all=T)
df <- data.frame(res)
library(ggplot2)
p <- ggplot(df, aes(x=V1,fill=type)) + geom_histogram(binwidth=.01, alpha=.5, position="identity") + xlab("dynamism") + scale_x_continuous(breaks=seq(0, 1, 0.05)) + scale_fill_brewer(palette="Set1")
show(p)
| {
"alphanum_fraction": 0.6958174905,
"author": null,
"avg_line_length": 40.4615384615,
"converted": null,
"ext": "r",
"file": null,
"hexsha": "3fefeccd56788a041fd5abae13d7ade7eb795c93",
"include": null,
"lang": "R",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "42ad86f8875bf2394c427789607199bd99d50480",
"max_forks_repo_licenses": [
"ECL-2.0",
"Apache-2.0"
],
"max_forks_repo_name": "rinde/dynamism-urgency-2015-code",
"max_forks_repo_path": "files/scripts/plot-timeseries-dynamism.r",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "42ad86f8875bf2394c427789607199bd99d50480",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"ECL-2.0",
"Apache-2.0"
],
"max_issues_repo_name": "rinde/dynamism-urgency-2015-code",
"max_issues_repo_path": "files/scripts/plot-timeseries-dynamism.r",
"max_line_length": 198,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "42ad86f8875bf2394c427789607199bd99d50480",
"max_stars_repo_licenses": [
"ECL-2.0",
"Apache-2.0"
],
"max_stars_repo_name": "rinde/dynamism-urgency-2015-code",
"max_stars_repo_path": "files/scripts/plot-timeseries-dynamism.r",
"max_stars_repo_stars_event_max_datetime": "2018-07-20T20:17:10.000Z",
"max_stars_repo_stars_event_min_datetime": "2018-07-20T20:17:10.000Z",
"num_tokens": 326,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 1052
} |
import numpy as np
class solver:
def __init__(self):
self.grid= np.ones((9,9,9),dtype=np.uint8)
self.Org=None
self.isSolved=False
self.Error=False
def setField(self,x,y,value):
if value>0:
self.grid[x,y,:]=0
self.grid[x,y,value-1]=1
else:
self.grid[x,y,:]=1
self.Org=np.copy(self.grid)
#print("xyv",x,y,value)
#print(self.grid[:3,5:])
def clearGrid(self):
for i in range(9):
for j in range(9):
self.setField(i,j,0)
def getGrid(self,S=None,ignoreNewPossibilites=False):
R=np.zeros((9,9),dtype=np.uint8)
if S is None:
S=self.grid
for x in range(9):
for y in range(9):
sum=0
v=0
for i in range(9):
sum+=S[x,y,i]
if(S[x,y,i]):
v=int(i)
if sum==1:
R[x,y]=v+1
if sum==0:
self.Error=True
if ignoreNewPossibilites:
return R
for x in range(3):
for y in range(3):
nbh=S[3*x:3*x+3,3*y:3*y+3]
for i in range(9):
sum=np.sum(nbh[:,:,i])
if sum==1:
#print(x,y,":\n",nbh[:,:,i])
for j in range(3):
for k in range(3):
if nbh[j,k,i]:
R[3*x+j,3*y+k]=i+1
if sum==0:
self.Error=True
return R
def getPossibilitys(self):
R=np.zeros((9,9))
for x in range(9):
for y in range(9):
sum=0
for i in range(9):
sum+=self.grid[x,y,i]
R[x,y]=sum
return R
def printGrid(self,S=None,R=None):
if S is None:
S=self.grid
if self.Error:
print("Sudoko not solvable")
if R is None:
R=self.getGrid(S)
for y in range(9):
for x in range(9):
if(R[x,y]):
print(int(R[x,y]),"\t",end='')
else:
print(" \t",end='')
if(x%3==2):
print("|",end='')
print()
if(y%3==2):
print("-\t-\t-\t-\t-\t-\t-\t-\t")
def printOrg(self):
if self.Org is not None:
self.printGrid(R=self.getGrid(self.Org,ignoreNewPossibilites=True))
def printPossi(self):
self.printGrid(R=self.getPossibilitys())
def diff2Org(self,g=None):
if self.Org is None:
return None
if g is None and self.grid is not None:
g=self.getGrid(self.grid)
else:
return None
diff=g-self.getGrid(self.Org,ignoreNewPossibilites=True)
self.printGrid(R=diff)
return diff
def Change(self,A,B):
return np.sum(A-B)
def getClue(self,S=None):
if S is None:
S=np.copy(S)
else:
S=np.copy(S)
R=self.getGrid(S)
for x in range(9):
for y in range(9):
v=R[x,y]
if v!=0:
#print(v,R[x,:])
S[x,:,int(v-1)]=0
#print(v,R[:,y])
S[:,y,int(v-1)]=0
xk,yk=x//3,y//3
#print(x,y,"|",xk,yk)
#print(R[xk*3:xk*3+3,yk*3:yk*3+3])
S[xk*3:xk*3+3,yk*3:yk*3+3,int(v-1)]=0
S[x,y,int(v-1)]=1
return S
def getObvious(self):
S=np.copy(self.grid)
New=self.getClue(S)
for i in range(100):
S=New
New=self.getClue(S)
#print("\n\n")
#self.printPossi()
if (self.Change(New,S)==0.0):
print("Iterations:",i)
break
if np.sum==9*np.math.factorial(9):
self.isSolved=True
self.grid=New
print("target: \n",self.grid[:3,:3,0])
return (self.isSolved,New)
def getSolution(self,grid=None):
if grid is None:
if self.Org is None:
return
else:
grid=self.getGrid(self.Org,ignoreNewPossibilites=True)
def findNextCellToFill(grid, i, j):
for x in range(i,9):
for y in range(j,9):
if grid[x][y] == 0:
return x,y
for x in range(0,9):
for y in range(0,9):
if grid[x][y] == 0:
return x,y
return -1,-1
def isValid(grid, i, j, e):
rowOk = all([e != grid[i][x] for x in range(9)])
if rowOk:
columnOk = all([e != grid[x][j] for x in range(9)])
if columnOk:
# finding the top left x,y co-ordinates of the section containing the i,j cell
secTopX, secTopY = 3 *(i//3), 3 *(j//3) #floored quotient should be used here.
for x in range(secTopX, secTopX+3):
for y in range(secTopY, secTopY+3):
if grid[x][y] == e:
return False
return True
return False
def solveSudoku(grid, i=0, j=0):
i,j = findNextCellToFill(grid, i, j)
if i == -1:
return True
for e in range(1,10):
if isValid(grid,i,j,e):
grid[i][j] = e
if solveSudoku(grid, i, j):
return True
# Undo the current cell for backtracking
grid[i][j] = 0
solveSudoku(grid)
return grid
if __name__=="__main__":
solver=solver()
def case1():
solver.setField(0,0,1)
solver.setField(1,0,8)
solver.setField(0,1,5)
#solver.setField(1,1,2)
solver.setField(2,1,3)
solver.setField(3,2,1)
solver.setField(4,1,4)
solver.setField(5,0,9)
solver.setField(7,0,4)
solver.setField(8,0,5)
solver.setField(7,1,8)
solver.setField(8,1,1)
solver.setField(0,4,2)
#solver.setField(2,5,7)
solver.setField(4,4,7)
#solver.setField(5,3,5)
solver.setField(3,5,8)
solver.setField(8,3,9)
solver.setField(8,4,6)
#solver.setField(6,5,1)
solver.setField(0,6,3)
solver.setField(0,8,8)
solver.setField(2,8,2)
solver.setField(2,7,5)
solver.setField(2,6,1)
solver.setField(3,6,4)
solver.setField(3,8,5)
solver.setField(5,8,7)
solver.setField(8,7,8)
#solver.setField(6,6,5)
solver.setField(7,6,6)
#Hinzufügen für komplett lösbares Sudoku
solver.setField(1,4,5)
solver.setField(1,5,3)
solver.setField(4,7,1)
solver.setField(5,5,4)
solver.setField(3,3,2)
solver.setField(2,3,8)
solver.setField(0,7,7)
def case2():
#test find with row const
solver.setField(0,1,1)
solver.setField(0,2,2)
solver.setField(0,3,3)
solver.setField(0,4,4)
solver.setField(0,5,5)
solver.setField(0,6,6)
solver.setField(0,7,7)
solver.setField(0,8,8)
def case3():
#test find with row and column
solver.setField(0,0,1)
solver.setField(8,1,9)
solver.setField(0,2,3)
solver.setField(0,3,4)
solver.setField(0,4,5)
solver.setField(0,5,6)
solver.setField(0,6,7)
solver.setField(0,7,8)
def case4():
#Test find with row col const
solver.setField(0,0,9)
solver.setField(2,3,9)
solver.setField(3,6,9)
solver.setField(7,7,9)
def case5():
#Test find with row and nbh const
solver.setField(0,0,9)
solver.setField(2,3,9)
solver.setField(1,6,2)
solver.setField(1,7,5)
def case6():
#Test find with nbh const
solver.setField(0,0,1)
solver.setField(0,1,2)
solver.setField(0,2,3)
solver.setField(1,0,4)
solver.setField(1,1,5)
solver.setField(1,2,6)
solver.setField(2,0,7)
solver.setField(2,1,8)
def case7():
solver.setField(0,0,1)
solver.setField(0,1,2)
solver.setField(0,2,3)
solver.setField(1,0,4)
solver.setField(1,1,5)
solver.setField(1,2,6)
solver.setField(3,0,7)
solver.setField(3,1,8)
solver.setField(4,0,9)
#Evaluation
case1()
G=solver.getSolution()
print("\n\n Org")
solver.printOrg()
print("\n\n Diff")
solver.diff2Org()
print("\n\n Possiblitys")
solver.printPossi()
print("\n\n Solution")
solver.printGrid(R=G) | {
"alphanum_fraction": 0.4351503759,
"author": null,
"avg_line_length": 28.1647058824,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "1584037d21fb63c030f7bf91fe2f6082c9411550",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "24e26b3d7ecdffa01e1b2a47914cbe3fe3cebb1a",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "pshobowale/SudokuSolver",
"max_forks_repo_path": "app/sudokusolver/src/sudokusolver/solver.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "24e26b3d7ecdffa01e1b2a47914cbe3fe3cebb1a",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "pshobowale/SudokuSolver",
"max_issues_repo_path": "app/sudokusolver/src/sudokusolver/solver.py",
"max_line_length": 111,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "24e26b3d7ecdffa01e1b2a47914cbe3fe3cebb1a",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "pshobowale/SudokuSolver",
"max_stars_repo_path": "app/sudokusolver/src/sudokusolver/solver.py",
"max_stars_repo_stars_event_max_datetime": "2021-08-03T07:44:09.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-08-03T07:44:09.000Z",
"num_tokens": 2582,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 9576
} |
# -*- coding: utf-8 -*-
"""
Generating image window by weighted sampling map from input image
This can also be considered as a `weighted random cropping` layer of the
input image
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import tensorflow as tf
from niftynet.engine.image_window import N_SPATIAL
from niftynet.engine.sampler_uniform import UniformSampler
class WeightedSampler(UniformSampler):
"""
This class generators samples from a user provided
frequency map for each input volume
The sampling likelihood of each voxel (and window around)
is proportional to its frequency
This is implemented in a closed form using cumulative histograms
for efficiency purposes i.e., the first three dims of image.
This layer can be considered as a `weighted random cropping` layer of the
input image.
"""
def __init__(self,
reader,
data_param,
batch_size,
windows_per_image,
queue_length=10):
UniformSampler.__init__(self,
reader=reader,
data_param=data_param,
batch_size=batch_size,
windows_per_image=windows_per_image,
queue_length=queue_length)
tf.logging.info('Initialised weighted sampler window instance')
self.spatial_coordinates_generator = weighted_spatial_coordinates
def weighted_spatial_coordinates(subject_id,
data,
img_sizes,
win_sizes,
n_samples=1):
"""
This is the function that actually does the cumulative histogram
and sampling.
also, note that win_sizes could be different,
for example in segmentation network
input image window size is 32x32x10,
training label window is 16x16x10, the network reduces x-y plane
spatial resolution.
This function handles this situation by first find the largest
window across these window definitions, and generate the coordinates.
These coordinates are then adjusted for each of the
smaller window sizes (the output windows are concentric).
"""
# requiring a data['sampler'] as the frequency map.
# the shape should be [x, y, z, 1, 1]
if data is None or data.get('sampler', None) is None:
tf.logging.fatal("input weight map not found. please check "
"the configuration file")
raise RuntimeError
n_samples = max(n_samples, 1)
uniq_spatial_size = set([img_size[:N_SPATIAL]
for img_size in list(img_sizes.values())])
if len(uniq_spatial_size) > 1:
tf.logging.fatal("Don't know how to generate sampling "
"locations: Spatial dimensions of the "
"grouped input sources are not "
"consistent. %s", uniq_spatial_size)
raise NotImplementedError
uniq_spatial_size = uniq_spatial_size.pop()
# find spatial window location based on the largest spatial window
spatial_win_sizes = [win_size[:N_SPATIAL]
for win_size in win_sizes.values()]
spatial_win_sizes = np.asarray(spatial_win_sizes, dtype=np.int32)
max_spatial_win = np.max(spatial_win_sizes, axis=0)
# testing window size
for i in range(0, N_SPATIAL):
assert uniq_spatial_size[i] >= max_spatial_win[i], \
"window size {} is larger than image size {}".format(
max_spatial_win[i], uniq_spatial_size[i])
# get cropped version of the input weight map where the centre of
# the window might be. If the centre of the window was outside of
# this crop area, the patch would be outside of the field of view
half_win = np.floor(max_spatial_win / 2).astype(int)
try:
cropped_map = data['sampler'][
half_win[0]:-half_win[0] if max_spatial_win[0] > 1 else 1,
half_win[1]:-half_win[1] if max_spatial_win[1] > 1 else 1,
half_win[2]:-half_win[2] if max_spatial_win[2] > 1 else 1,
0, 0]
assert np.all(cropped_map.shape) > 0
except (IndexError, KeyError):
tf.logging.fatal("incompatible map: %s", data['sampler'].shape)
raise
except AssertionError:
tf.logging.fatal(
"incompatible window size for weighted sampler. "
"Please use smaller (fully-specified) spatial window sizes")
raise
# Get the cumulative sum of the normalised sorted intensities
# i.e. first sort the sampling frequencies, normalise them
# to sum to one, and then accumulate them in order
flatten_map = cropped_map.flatten()
sorted_data = np.cumsum(np.divide(np.sort(flatten_map), flatten_map.sum()))
# get the sorting indexes to that we can invert the sorting later on.
sorted_indexes = np.argsort(flatten_map)
middle_coords = np.zeros((n_samples, N_SPATIAL), dtype=np.int32)
for sample in range(0, n_samples):
# get n_sample from the cumulative histogram, spaced by 1/n_samples,
# plus a random perturbation to give us a stochastic sampler
sample_ratio = 1 - (np.random.random() + sample) / (n_samples + 1)
# find the index where the cumulative it above the sample threshold
# import pdb; pdb.set_trace()
try:
sample_index = np.argmax(sorted_data >= sample_ratio)
except ValueError:
tf.logging.fatal("unable to choose sampling window based on "
"the current frequency map.")
raise
# invert the sample index to the pre-sorted index
inverted_sample_index = sorted_indexes[sample_index]
# get the x,y,z coordinates on the cropped_map
# (note: we need to re-shift it later due to the crop)
middle_coords[sample, :N_SPATIAL] = np.unravel_index(
inverted_sample_index, cropped_map.shape)[:N_SPATIAL]
# adjust max spatial coordinates based on each mod spatial window size
all_coordinates = {}
for mod in list(win_sizes):
win_size = win_sizes[mod][:N_SPATIAL]
half_win_diff = np.floor((max_spatial_win - win_size) / 2.0)
# shift starting coordinates of the window
# Note that we did not shift the centre coordinates
# above to the corner of the window
# because the shift is the same as the cropping amount
# Also, we need to add half_win_diff/2 so that smaller windows
# are centred within the large windows
spatial_coords = np.zeros((n_samples, N_SPATIAL * 2), dtype=np.int32)
spatial_coords[:, :N_SPATIAL] = \
middle_coords[:, :N_SPATIAL] + half_win_diff[:N_SPATIAL]
# the opposite corner of the window is
# just adding the mod specific window size
spatial_coords[:, N_SPATIAL:] = \
spatial_coords[:, :N_SPATIAL] + win_size[:N_SPATIAL]
# include the subject id
subject_id = np.ones((n_samples,), dtype=np.int32) * subject_id
spatial_coords = np.append(subject_id[:, None], spatial_coords, axis=1)
all_coordinates[mod] = spatial_coords
return all_coordinates
| {
"alphanum_fraction": 0.643623523,
"author": null,
"avg_line_length": 44.0898203593,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "6a45aaa181e349ef8861d8bd5a8d357db4312af9",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "92a2f447738224fb10b83fa60c78a35e0c25ac34",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "amh28/NIF",
"max_forks_repo_path": "niftynet/engine/sampler_weighted.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "92a2f447738224fb10b83fa60c78a35e0c25ac34",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "amh28/NIF",
"max_issues_repo_path": "niftynet/engine/sampler_weighted.py",
"max_line_length": 79,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "92a2f447738224fb10b83fa60c78a35e0c25ac34",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "amh28/NIF",
"max_stars_repo_path": "niftynet/engine/sampler_weighted.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1623,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 7363
} |
include("Include.jl")
# extra:
using Flux
using Flux: @epochs
using BSON: @save
# load training set -
full_training_data_frame = load_fibrinolysis_training_data()
# filter out the data -
experimental_data_table = filter(:visitid => x -> (x == 2 || x == 3 || x == 1), full_training_data_frame)
# initialize storage for the training data -
training_data = Vector{Tuple{Vector{Float32},Vector{Float32}}}()
# build a model architecture -
has_TM_flag = 0
number_of_inputs = 14
number_of_outputs = 4
dimension_hidden_layer = number_of_inputs + 2
deep_fibrinolysis_model = Chain(Dense(number_of_inputs, dimension_hidden_layer, σ), Dense(dimension_hidden_layer, number_of_outputs));
# setup a loss function -
loss(x, y) = Flux.Losses.mae(deep_fibrinolysis_model(x), y; agg = mean)
# pointer to params -
ps = Flux.params(deep_fibrinolysis_model)
# # use old school gradient descent -
opt = Momentum(0.1, 0.95)
# main training loop -
(P, D) = size(experimental_data_table)
for i ∈ 1:P
for j = 1:P
# leave one out -
if (i != j)
# get input and output data -
input_data = convert.(Float32, Vector(experimental_data_table[j, 3:16]))
output_data = convert.(Float32, Vector(experimental_data_table[j, 17:20]))
data_example = (input_data, output_data)
# capture -
push!(training_data, data_example)
end
end
# ok, so have the training data for this case -
# train -
@epochs 12000 Flux.train!(loss, ps, training_data, opt)
# save -
model_name = "deep_fibrinolysis_model-L$(i)O-TF-TM-$(has_TM_flag)-ALL.bson"
model_file_path = joinpath(_PATH_TO_MODELS, model_name)
@save model_file_path deep_fibrinolysis_model
# need to empty the training data -
empty!(training_data)
end | {
"alphanum_fraction": 0.6895027624,
"author": null,
"avg_line_length": 28.28125,
"converted": null,
"ext": "jl",
"file": null,
"hexsha": "dc7d9b8b486a4b51a631294f6f1ea07d58f1e232",
"include": null,
"lang": "Julia",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "eaab26053ce720b3affc8f1d27aaa0a4b624ca89",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "varnerlab/UVM-TopDown-LegacyModel",
"max_forks_repo_path": "training_network_2.jl",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "eaab26053ce720b3affc8f1d27aaa0a4b624ca89",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "varnerlab/UVM-TopDown-LegacyModel",
"max_issues_repo_path": "training_network_2.jl",
"max_line_length": 134,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "eaab26053ce720b3affc8f1d27aaa0a4b624ca89",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "varnerlab/UVM-TopDown-LegacyModel",
"max_stars_repo_path": "training_network_2.jl",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 488,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 1810
} |
from copy import deepcopy
import numpy as np
import torch
from ..data import FlatTransition
from . import OffPolicyAlgorithm
from .goal_sampling_strategy import GoalSamplingStrategy
class HAC(OffPolicyAlgorithm):
"""Hierarchical Actor-Critic."""
supported_goal_sampling_strategies = {"future", "episode", "final"}
def __init__(self, name, model, child_failure_penalty, check_achievement,
flat_algo_kwargs=None, flat_algo_name="SAC", goal_sampling_strategy="future",
buffer_size=100000, testing_buffer_size=50000, batch_size=128, n_hindsight_goals=4,
testing_fraction=0.3, fully_random_fraction=0.1, bootstrap_testing_transitions=True,
use_normal_trans_for_testing=False, use_testing_transitions=True,
learn_from_deterministic_episodes=True, log_q_values=True, learning_starts=0,
bootstrap_end_of_episode=True, grad_steps_per_env_step=1):
"""
Args:
check_achievement (callable): Maps achieved_goal,
desired_goal and parent_info to boolean achieved and float
reward indicating whether the achieved_goal satisfies the
desired goal and what reward this implies.
"""
super(HAC, self).__init__(name, flat_algo_name, model, fully_random_fraction,
flat_algo_kwargs, buffer_size, batch_size, learning_starts,
grad_steps_per_env_step)
assert goal_sampling_strategy in self.supported_goal_sampling_strategies\
or isinstance(goal_sampling_strategy, GoalSamplingStrategy), \
"Goal sampling strategy {} not supported.".format(goal_sampling_strategy)
self._child_failure_penalty = child_failure_penalty
self._goal_sampling_strategy = goal_sampling_strategy
self._check_achievement = check_achievement
self._n_hindsight_goals = n_hindsight_goals
self._testing_fraction = testing_fraction
self._bootstrap_testing_transitions = bootstrap_testing_transitions
self._use_normal_trans_for_testing = use_normal_trans_for_testing
self._use_testing_transitions = use_testing_transitions
self._learn_from_deterministic_episodes = learn_from_deterministic_episodes
self._log_q_values = log_q_values
self._bootstrap_end_of_episode = bootstrap_end_of_episode
self._verbose = False
def _sample_achieved_goals(self, current_index):
goals = []
if self._n_hindsight_goals > 0:
n_transitions = len(self._episode_transitions)
if self._goal_sampling_strategy == "future":
n_goals = min(self._n_hindsight_goals,
n_transitions - current_index - 1)
indices = np.random.randint(low = current_index + 1, high = n_transitions,
size = n_goals)
elif self._goal_sampling_strategy == "episode":
n_goals = min(self._n_hindsight_goals, n_transitions)
indices = np.random.randint(low = 0, high = n_transitions, size = n_goals)
elif self._goal_sampling_strategy == "final":
# only generate hindsight goal from final state if environment is done
if self._episode_transitions[-1].env_done:
indices = [-1]
else:
indices = []
elif isinstance(self._goal_sampling_strategy, GoalSamplingStrategy):
indices = self._goal_sampling_strategy(self._episode_transitions,
self._n_hindsight_goals)
for i in indices:
goals.append(self._episode_transitions[i].subtask_tr.info["achieved_generalized_goal"])
return goals
def _add_experience_to_flat_algo(self, parent_info, deterministic_episode, node_is_sink, sess_info):
if self._learn_from_deterministic_episodes or not deterministic_episode:
self._ep_return = 0
# add transitions of this episode to replay buffer of flat RL algorithm
# (by applying hindsight goal and action manipulations)
for trans_index, tr in enumerate(self._episode_transitions):
has_achieved_now = tr.subtask_tr.info.get("has_achieved", False)
if self._bootstrap_end_of_episode:
done = has_achieved_now
else:
done = has_achieved_now or tr.env_info.done
# unaltered flat transition
f_trans_0 = FlatTransition(
obs = tr.subtask_tr.obs,
action = tr.subtask_tr.action,
reward = tr.subtask_tr.reward,
new_obs = tr.subtask_tr.new_obs,
done = done)
# if the node is a sink, do not attempt to manipulate action
# in hindsight and add original transition to replay buffer
if node_is_sink:
testing_transition = False
f_trans_base = f_trans_0
self._add_to_flat_replay_buffer(f_trans_0)
self._ep_return += f_trans_0.reward
# if the node is not a sink consider testing transitions
# and hindsight action transitions
else:
# did the child node use a deterministic version of its policy?
# (testing transition)
testing_transition = tr.algo_info["child_be_deterministic"]
# boolean indicating whether child achieved subgoal
did_child_achieve_subgoal = tr.child_feedback["has_achieved"]
# add testing transitions (if enabled)
# Optionally, also transitions generated by stochastic
# lower level are used as testing transitions.
# Only add transition with penalty if child node failed
# to achieve its subgoal, otherwise do not add any transition.
if self._use_testing_transitions \
and (testing_transition or self._use_normal_trans_for_testing) \
and not did_child_achieve_subgoal:
f_trans_testing = deepcopy(f_trans_0)
f_trans_testing.reward = self._child_failure_penalty
if not self._bootstrap_testing_transitions:
# Have to set done to True in these transitions
# (according to HAC paper and accompanying code).
f_trans_testing.done = True
if self._verbose:
print("testing transition in {}\n".format(self.name)
+ str(f_trans_testing))
self._add_to_flat_replay_buffer(f_trans_testing)
self._ep_return += f_trans_testing.reward
# hindsight action transition
# If child node achieved desired goal use original action.
# If not, use subgoal the child achieved as action.
f_trans_hindsight_action = deepcopy(f_trans_0)
if not testing_transition or did_child_achieve_subgoal:
self._ep_return += f_trans_hindsight_action.reward
if not did_child_achieve_subgoal:
f_trans_hindsight_action.action = tr.child_feedback["achieved_generalized_goal"]
# TODO: In principle, reward could depend on action, so have to recompute
if self._verbose:
print("hindsight action transition in {}\n".format(self.name)
+ str(f_trans_hindsight_action))
self._add_to_flat_replay_buffer(f_trans_hindsight_action)
f_trans_base = f_trans_hindsight_action
# hindsight goal transitions (based on hindsight action
# transition or original transition in case of a sink node)
achieved_goals = self._sample_achieved_goals(trans_index)
for hindsight_goal in achieved_goals:
f_trans_hindsight_goal = deepcopy(f_trans_base)
f_trans_hindsight_goal.obs = {
"partial_observation": tr.subtask_tr.obs["partial_observation"],
"desired_goal": hindsight_goal
}
f_trans_hindsight_goal.new_obs = {
"partial_observation": tr.subtask_tr.new_obs["partial_observation"],
"desired_goal": hindsight_goal
}
achieved, f_reward = self._check_achievement(
achieved_goal = tr.subtask_tr.info["achieved_generalized_goal"],
desired_goal = hindsight_goal,
obs = f_trans_hindsight_goal.obs,
action = f_trans_hindsight_goal.action,
parent_info = parent_info,
env_info = tr.env_info)
f_trans_hindsight_goal.done = achieved
f_trans_hindsight_goal.reward = f_reward
self._add_to_flat_replay_buffer(f_trans_hindsight_goal)
# log undiscounted ep return to tensorboard (includes contribution
# from testing transitions but not from hindsight transitions!)
# tensorboard logging
if self._tb_writer is not None:
self._tb_writer.add_scalar(f"{self.name}/ep_return", self._ep_return, sess_info.total_step)
self._episode_transitions.clear()
def get_algo_info(self, env_obs, parent_info):
# child_be_deterministic instructs the children to be deterministic whereas
# is_deterministic implies that this node is supposed to be deterministic
if parent_info is not None:
is_deterministic = parent_info.algo_info["child_be_deterministic"]
child_be_deterministic = is_deterministic
else:
is_deterministic = False
child_be_deterministic = False
# if child_be_deterministic is true, all children have to use deterministic policies
# until this node or an active parent gets back control (testing transition)
child_be_deterministic = child_be_deterministic or np.random.rand() < self._testing_fraction
new_algo_info = {
"is_deterministic": is_deterministic,
"child_be_deterministic": child_be_deterministic
}
return new_algo_info
| {
"alphanum_fraction": 0.6073188538,
"author": null,
"avg_line_length": 53.4536585366,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "6d108623d6983ed0cb8b8b04022ba918e5ed1c95",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "21a1cefc53e5c457745570460de0d99e68622e57",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "nicoguertler/graph_rl",
"max_forks_repo_path": "graph_rl/algorithms/hac.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "21a1cefc53e5c457745570460de0d99e68622e57",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "nicoguertler/graph_rl",
"max_issues_repo_path": "graph_rl/algorithms/hac.py",
"max_line_length": 107,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "21a1cefc53e5c457745570460de0d99e68622e57",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "nicoguertler/graphrl",
"max_stars_repo_path": "graph_rl/algorithms/hac.py",
"max_stars_repo_stars_event_max_datetime": "2022-01-04T15:21:55.000Z",
"max_stars_repo_stars_event_min_datetime": "2022-01-04T15:21:55.000Z",
"num_tokens": 2028,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 10958
} |
using RegexTools
using Test
@testset "RegexTools.jl" begin
@testset "hex_escape" begin
for char in Char.(0:126)
rawstr = string(char)
regstr = RegexTools.hex_escape(rawstr)
reg = Regex(regstr)
m = match(reg, rawstr)
@test !isnothing(m)
end
end
end
| {
"alphanum_fraction": 0.53125,
"author": null,
"avg_line_length": 19.5555555556,
"converted": null,
"ext": "jl",
"file": null,
"hexsha": "c317a1bef3384898c2b74d699e4af1b940852eab",
"include": null,
"lang": "Julia",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "8ea0a4d94235d5b2bb800a4a2763e7769b8a4a4d",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "josePereiro/RegexTools.jl",
"max_forks_repo_path": "test/runtests.jl",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "8ea0a4d94235d5b2bb800a4a2763e7769b8a4a4d",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "josePereiro/RegexTools.jl",
"max_issues_repo_path": "test/runtests.jl",
"max_line_length": 50,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "8ea0a4d94235d5b2bb800a4a2763e7769b8a4a4d",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "josePereiro/RegexTools.jl",
"max_stars_repo_path": "test/runtests.jl",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 88,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 352
} |
"""Reads a multi-tile CBF image, discovering its detector geometry automatically"""
import sys
import numpy
import pycbf
from scitbx.array_family import flex
from dxtbx.format.FormatCBF import FormatCBF
from dxtbx.format.FormatCBFFull import FormatCBFFull
from dxtbx.format.FormatStill import FormatStill
from dxtbx.model.detector import Detector
class cbf_wrapper(pycbf.cbf_handle_struct):
"""Wrapper class that provides convenience functions for working with cbflib"""
def add_category(self, name, columns):
"""Create a new category and populate it with column names"""
self.new_category(name.encode())
for column in columns:
self.new_column(column.encode())
def add_row(self, data):
"""Add a row to the current category. If data contains more entries than
there are columns in this category, then the remainder is truncated
Use '.' for an empty value in a row."""
self.new_row()
self.rewind_column()
for item in data:
try:
self.set_value(item.encode())
except AttributeError:
self.set_value(item)
if item == ".":
self.set_typeofvalue(b"null")
try:
self.next_column()
except Exception:
break
def has_sections(self):
"""True if the cbf has the array_structure_list_section table, which
changes how its data is stored in the binary sections
"""
try:
self.find_category(b"array_structure_list_section")
return True
except Exception as e:
if "CBF_NOTFOUND" in str(e):
return False
raise e
class FormatCBFMultiTile(FormatCBFFull):
"""An image reading class multi-tile CBF files"""
@staticmethod
def understand(image_file):
"""Check to see if this looks like an CBF format image, i.e. we can
make sense of it."""
try:
cbf_handle = pycbf.cbf_handle_struct()
cbf_handle.read_widefile(image_file.encode(), pycbf.MSG_DIGEST)
except Exception as e:
if "CBFlib Error" in str(e):
return False
# check if multiple arrays
try:
return cbf_handle.count_elements() > 1
except Exception as e:
if "CBFlib Error" in str(e):
return False
def _start(self):
"""Open the image file as a cbf file handle, and keep this somewhere
safe."""
FormatCBF._start(self) # Note, skip up an inheritance level
def detectorbase_start(self):
pass
def _get_cbf_handle(self):
try:
return self._cbf_handle
except AttributeError:
self._cbf_handle = cbf_wrapper()
self._cbf_handle.read_widefile(self._image_file.encode(), pycbf.MSG_DIGEST)
return self._cbf_handle
def _detector(self):
"""Return a working detector instance."""
cbf = self._get_cbf_handle()
d = Detector()
for i in range(cbf.count_elements()):
ele_id = cbf.get_element_id(i)
cbf.find_category(b"diffrn_data_frame")
cbf.find_column(b"detector_element_id")
cbf.find_row(ele_id)
cbf.find_column(b"array_id")
array_id = cbf.get_value()
cbf_detector = cbf.construct_detector(i)
p = d.add_panel()
p.set_name(array_id)
# code adapted below from dxtbx.model.detector.DetectorFactory.imgCIF_H
pixel = (
cbf_detector.get_inferred_pixel_size(1),
cbf_detector.get_inferred_pixel_size(2),
)
fast = cbf_detector.get_detector_axes()[0:3]
slow = cbf_detector.get_detector_axes()[3:6]
origin = cbf_detector.get_pixel_coordinates_fs(0, 0)
size = tuple(reversed(cbf.get_image_size(0)))
try:
cbf.find_category(b"array_intensities")
cbf.find_column(b"undefined_value")
cbf.select_row(i)
underload = cbf.get_doublevalue()
overload = cbf.get_overload(i)
trusted_range = (underload, overload)
except Exception as e:
if "CBF_NOTFOUND" not in str(e):
raise
trusted_range = (0.0, 0.0)
try:
cbf.find_column(b"gain")
cbf.select_row(i)
gain = cbf.get_doublevalue()
except Exception as e:
if "CBF_NOTFOUND" not in str(e):
raise
gain = 1.0
cbf_detector.__swig_destroy__(cbf_detector)
del cbf_detector
p.set_local_frame(fast, slow, origin)
p.set_pixel_size(tuple(map(float, pixel)))
p.set_image_size(size)
p.set_trusted_range(tuple(map(float, trusted_range)))
p.set_gain(gain)
# p.set_px_mm_strategy(px_mm) FIXME
return d
def _beam(self):
"""Return a working beam instance."""
return self._beam_factory.imgCIF_H(self._get_cbf_handle())
def get_raw_data(self):
if self._raw_data is None:
self._raw_data = []
cbf = self._get_cbf_handle()
# find the data
cbf.select_category(0)
while cbf.category_name().lower() != "array_data":
try:
cbf.next_category()
except Exception:
return None
cbf.select_column(0)
cbf.select_row(0)
d = self.get_detector()
for panel in d:
name = panel.get_name()
cbf.find_column(b"array_id")
assert name == cbf.get_value()
cbf.find_column(b"data")
assert cbf.get_typeofvalue().find(b"bnry") > -1
image_string = cbf.get_realarray_as_string()
image = flex.double(numpy.fromstring(image_string, numpy.float))
parameters = cbf.get_realarrayparameters_wdims_fs()
image_size = (parameters[6], parameters[5])
image.reshape(flex.grid(*image_size))
self._raw_data.append(image)
try:
cbf.next_row()
except Exception:
break
assert len(d) == len(self._raw_data)
return tuple(self._raw_data)
class FormatCBFMultiTileStill(FormatStill, FormatCBFMultiTile):
"""An image reading class for full CBF format images i.e. those from
a variety of cameras which support this format. Custom derived from
the FormatStill to handle images without a gonimeter or scan"""
@staticmethod
def understand(image_file):
"""Check to see if this looks like an CBF format image, i.e. we can
make sense of it."""
header = FormatCBFMultiTile.get_cbf_header(image_file)
# According to ImageCIF, "Data items in the DIFFRN_MEASUREMENT_AXIS
# category associate axes with goniometers."
# http://www.iucr.org/__data/iucr/cifdic_html/2/cif_img.dic/Cdiffrn_measurement_axis.html
if "diffrn_measurement_axis" in header:
return False
return True
if __name__ == "__main__":
for arg in sys.argv[1:]:
print(FormatCBFMultiTile.understand(arg))
| {
"alphanum_fraction": 0.5857940942,
"author": null,
"avg_line_length": 32.1282051282,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "bf88ebc39840e10e6c6db52c2bfd532e81f43495",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 10,
"max_forks_repo_forks_event_max_datetime": "2021-09-30T14:48:50.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-04-08T13:30:32.000Z",
"max_forks_repo_head_hexsha": "fc24e215a8052e7e17be4ad4b41f9dbb474d852a",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "toastisme/dxtbx",
"max_forks_repo_path": "src/dxtbx/format/FormatCBFMultiTile.py",
"max_issues_count": 448,
"max_issues_repo_head_hexsha": "fc24e215a8052e7e17be4ad4b41f9dbb474d852a",
"max_issues_repo_issues_event_max_datetime": "2022-03-31T15:58:48.000Z",
"max_issues_repo_issues_event_min_datetime": "2019-04-06T01:20:56.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "toastisme/dxtbx",
"max_issues_repo_path": "src/dxtbx/format/FormatCBFMultiTile.py",
"max_line_length": 97,
"max_stars_count": 3,
"max_stars_repo_head_hexsha": "fc24e215a8052e7e17be4ad4b41f9dbb474d852a",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "toastisme/dxtbx",
"max_stars_repo_path": "src/dxtbx/format/FormatCBFMultiTile.py",
"max_stars_repo_stars_event_max_datetime": "2020-09-18T08:38:37.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-08-16T05:46:29.000Z",
"num_tokens": 1639,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 7518
} |
"""
Programmer:
Date of Development:
This code has been developed according to the procedures mentioned in the following research article:
" "
"""
import numpy as np
import time
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn import datasets
from Py_FS.wrapper.nature_inspired._utilities import Solution, Data, initialize, sort_agents, display, compute_fitness, compute_accuracy, Conv_plot
from Py_FS.wrapper.nature_inspired._transfer_functions import get_trans_function
def Name_of_the_wrapper(num_agents, max_iter, train_data, train_label, obj_function=compute_fitness, trans_func_shape='s', save_conv_graph=False):
# Name of the optimizer
############################### Parameters ####################################
# #
# num_agents: number of agents #
# max_iter: maximum number of generations #
# train_data: training samples of data #
# train_label: class labels for the training samples #
# obj_function: the function to maximize while doing feature selection #
# trans_function_shape: shape of the transfer function used #
# save_conv_graph: boolean value for saving convergence graph #
# #
###############################################################################
short_name = ''
agent_name = ''
train_data, train_label = np.array(train_data), np.array(train_label)
num_features = train_data.shape[1]
trans_function = get_trans_function(trans_func_shape)
# setting up the objectives
weight_acc = None
if(obj_function==compute_fitness):
weight_acc = float(input('Weight for the classification accuracy [0-1]: '))
obj = (obj_function, weight_acc)
compute_accuracy = (compute_fitness, 1) # compute_accuracy is just compute_fitness with accuracy weight as 1
# initialize agents and Leader (the agent with the max fitness)
agents = initialize(num_agents, num_features)
fitness = np.zeros(num_agents)
accuracy = np.zeros(num_agents)
Leader_agent = np.zeros((1, num_features))
Leader_fitness = float("-inf")
Leader_accuracy = float("-inf")
# initialize convergence curves
convergence_curve = {}
convergence_curve['fitness'] = np.zeros(max_iter)
convergence_curve['feature_count'] = np.zeros(max_iter)
# format the data
data = Data()
data.train_X, data.val_X, data.train_Y, data.val_Y = train_test_split(
train_data, train_label, stratify=train_label, test_size=0.2)
# create a solution object
solution = Solution()
solution.num_agents = num_agents
solution.max_iter = max_iter
solution.num_features = num_features
solution.obj_function = obj_function
# rank initial agents
agents, fitness = sort_agents(agents, obj, data)
# start timer
start_time = time.time()
for iter_no in range(max_iter):
print('\n================================================================================')
print(' Iteration - {}'.format(iter_no+1))
print('================================================================================\n')
################ write your main position update code here ################
###########################################################################
# update final information
agents, fitness = sort_agents(agents, obj, data)
display(agents, fitness, agent_name)
# update Leader (best agent)
if fitness[0] > Leader_fitness:
Leader_agent = agents[0].copy()
Leader_fitness = fitness[0].copy()
convergence_curve['fitness'][iter_no] = Leader_fitness
convergence_curve['feature_count'][iter_no] = int(np.sum(Leader_agent))
# compute final accuracy
Leader_agent, Leader_accuracy = sort_agents(Leader_agent, compute_accuracy, data)
agents, accuracy = sort_agents(agents, compute_accuracy, data)
print('\n================================================================================')
print(' Final Result ')
print('================================================================================\n')
print('Leader ' + agent_name + ' Dimension : {}'.format(int(np.sum(Leader_agent))))
print('Leader ' + agent_name + ' Fitness : {}'.format(Leader_fitness))
print('Leader ' + agent_name + ' Classification Accuracy : {}'.format(Leader_accuracy))
print('\n================================================================================\n')
# stop timer
end_time = time.time()
exec_time = end_time - start_time
# plot convergence graph
fig, axes = Conv_plot(convergence_curve)
if(save_conv_graph):
plt.savefig('convergence_graph_'+ short_name + '.jpg')
plt.show()
# update attributes of solution
solution.best_agent = Leader_agent
solution.best_fitness = Leader_fitness
solution.best_accuracy = Leader_accuracy
solution.convergence_curve = convergence_curve
solution.final_agents = agents
solution.final_fitness = fitness
solution.final_accuracy = accuracy
solution.execution_time = exec_time
return solution
if __name__ == '__main__':
iris = datasets.load_iris()
Name_of_the_wrapper(10, 20, iris.data, iris.target, save_conv_graph=True)
| {
"alphanum_fraction": 0.56773634,
"author": null,
"avg_line_length": 40.8865248227,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "d2bbab95a6def330ece05589646c4d7d8994cb24",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 23,
"max_forks_repo_forks_event_max_datetime": "2022-03-31T04:36:33.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-11-18T14:01:09.000Z",
"max_forks_repo_head_hexsha": "afe96cb8271f1e86a77075d19ec107c37afbbff3",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "rishavpramanik/Feature-Selection",
"max_forks_repo_path": "build/lib/Py_FS/wrapper/nature_inspired/_general_structure.py",
"max_issues_count": 11,
"max_issues_repo_head_hexsha": "afe96cb8271f1e86a77075d19ec107c37afbbff3",
"max_issues_repo_issues_event_max_datetime": "2022-03-25T18:54:53.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-10-21T18:05:12.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "rishavpramanik/Feature-Selection",
"max_issues_repo_path": "build/lib/Py_FS/wrapper/nature_inspired/_general_structure.py",
"max_line_length": 147,
"max_stars_count": 31,
"max_stars_repo_head_hexsha": "afe96cb8271f1e86a77075d19ec107c37afbbff3",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "rishavpramanik/Feature-Selection",
"max_stars_repo_path": "build/lib/Py_FS/wrapper/nature_inspired/_general_structure.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-27T11:20:13.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-11-17T10:42:22.000Z",
"num_tokens": 1068,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 5765
} |
*dk,ltripedge
function ltripedge(i,iedg,itet,itetoff,itettyp,iparent,jtet,
& jtetoff,mbndry,nef_cmo,icr1,icontab)
C
C
C #####################################################################
C
C PURPOSE -
C
C This function is true iff the given edge is determined
C to be a triple edge.
C
C INPUT ARGUMENTS -
C
C OUTPUT ARGUMENTS -
C
C CHANGE HISTORY -
C
C $Log: ltripedge.f,v $
C Revision 2.00 2007/11/05 19:46:00 spchu
C Import to CVS
C
CPVCS
CPVCS Rev 1.2 Fri Sep 03 10:47:12 1999 kuprat
CPVCS Added documentation.
CPVCS
CPVCS Rev 1.1 Mon Nov 02 17:47:36 1998 kuprat
CPVCS Corrected format statement.
CPVCS
CPVCS Rev 1.0 Fri Oct 23 16:17:16 1998 kuprat
CPVCS Initial revision.
implicit none
include 'local_element.h'
include 'chydro.h'
integer maxelt
parameter (maxelt=100)
integer ieltlist(maxelt)
integer i,iedg,iprevelt,icurrelt,icurredge,leneltlist,ic1,
& ic2,itet(*),itetoff(*),itettyp(*),ipar1,ipar2,iparent(*),
& imaxpar,iminpar,j,j1,jtetj,jtet(*),jtetoff(*),mbndry,
& nextelt,nef_cmo,nmat,ierrw,jedg,jc1,jc2,jpar1,jpar2,
& jmaxpar,jminpar,icra,icrb,icr1(*),nsc,nsa,nsb,icontab(50,*),
& n1,n2,njump
character*132 logmess
logical ltripedge,lhitoneboundary
lhitoneboundary=.false.
iprevelt=0
icurrelt=i
icurredge=iedg
leneltlist=1
ieltlist(1)=i
ic1=itet(
& ielmedge1(1,iedg,itettyp(icurrelt))
& +itetoff(icurrelt))
ic2=itet(
& ielmedge1(2,iedg,itettyp(icurrelt))
& +itetoff(icurrelt))
ipar1=iparent(ic1)
ipar2=iparent(ic2)
imaxpar=max(ipar1,ipar2)
iminpar=min(ipar1,ipar2)
c.... Compute the number of materials around the edge and
c.... the number of constraints of the edge. If the sum of
c.... these numbers is greater than 3, we deem edge IEDG
c.... of element I to be a 'triple' edge.
c.... First transit around edge IEDG of element I and determine
c.... how many material jumps there are. If the cycle around the
c.... edge is closed, the number of materials around the edge
c.... (NMAT) is equal to the number of jumps around the edge,
c.... unless there are no jumps, in which case NMAT=1.
c.... If the cycle is not closed (the case of a boundary edge),
c.... the number of materials is equal to the number of jumps
c.... plus one.
njump=0
1500 continue
c.... Loop over faces of current element to see which face we
c.... will transit through.
do 1510 j=1,nelmnef(itettyp(icurrelt))
c.... Loop over edges of current face to see if any are equal
c.... to the pivot edge, in which case we attempt to transit through the
c.... face.
do j1=1,ielmface0(j,itettyp(icurrelt))
if (ielmface2(j1,j,itettyp(icurrelt)).eq
& .icurredge) then
c.... We attempt to transit through face J.
jtetj=jtet(j+jtetoff(icurrelt))
c.... If face J is a boundary face, check if we haven't hit
c.... a boundary face before.
if (jtetj.eq.mbndry) then
c.... If we haven't hit a boundary face before, go to the
c.... beginning of the cycle and transit in the opposite
c.... direction. We expect to hit a second boundary face
c.... eventually.
if (.not.lhitoneboundary) then
lhitoneboundary=.true.
if (leneltlist.ge.2) then
iprevelt=ieltlist(2)
icurrelt=i
icurredge=iedg
goto 1500
else
goto 1510
endif
else
c.... We have hit a boundary face before, so our transiting
c.... task is complete.
nmat=njump+1
goto 10
endif
c.... If face J is an internal boundary face, we define NEXTELT to be
c.... the element on the other side of face J. However we don't
c.... actually transit through J unless we aren't moving
c.... backwards (i.e. NEXTELT.NE.IPREVELT). If this is true, we
c.... increment NJUMP.
elseif (jtetj.gt.mbndry) then
nextelt=1+(jtetj-mbndry-1)/nef_cmo
if (nextelt.ne.iprevelt) njump=njump+1
else
c.... Here face J is a regular (nonboundary) face and we
c.... NEXTELT to be the element on the opposite side.
nextelt=1+(jtetj-1)/nef_cmo
endif
c.... We only transit through J to NEXTELT if we aren't
c.... moving backwards.
if (nextelt.ne.iprevelt) then
c.... The next element is the beginning element, so we have
c.... closed the cycle.
if (nextelt.eq.i) then
c.... If NJUMP=0, there is only one material around the edge.
if (njump.eq.0) then
nmat=1
c.... If NJUMP=1, we have an error, because it is impossible to
c.... have a closed cycle with one material jump.
elseif (njump.eq.1) then
print*,'LTRIPEDGE: Top. error!'
stop
else
c.... Here the number of materials is equal to the number of jumps.
nmat=njump
endif
goto 10
endif
c.... Here we have not closed the cycle, so we simply make the previous
c.... element equal to the current element and make the current
c.... element equal to the next element, and write the new current
c.... element into the element list IELTLIST.
iprevelt=icurrelt
leneltlist=leneltlist+1
if (leneltlist.gt.maxelt) then
write(logmess,'(a,i6,a)') 'More than ', maxelt,
& 'elements sharing an edge!'
call writloga('default',0,logmess,0,ierrw)
stop
endif
icurrelt=nextelt
ieltlist(leneltlist)=nextelt
c.... We now locate the pivot edge in the new current element.
do jedg=1,nelmnee(itettyp(icurrelt))
jc1=itet(
& ielmedge1(1,jedg,itettyp(icurrelt))
& +itetoff(icurrelt))
jc2=itet(
& ielmedge1(2,jedg,itettyp(icurrelt))
& +itetoff(icurrelt))
jpar1=iparent(jc1)
jpar2=iparent(jc2)
jmaxpar=max(jpar1,jpar2)
jminpar=min(jpar1,jpar2)
if (jminpar.eq.iminpar.and.jmaxpar.eq
& .imaxpar) then
icurredge=jedg
goto 1500
endif
enddo
write(logmess,'(a)') 'LTRIPEDGE: Topological error!!'
call writloga('default',0,logmess,0,ierrw)
write(logmess,'(a,2i8)') 'iminpar/imaxpar=',iminpar,
& imaxpar
call writloga('default',0,logmess,0,ierrw)
write(logmess,'(a,i8)') 'cannot match element ',
& icurrelt
call writloga('default',0,logmess,0,ierrw)
stop
endif
endif
enddo
1510 continue
c.... At this point we did not bail out of the previous loop, meaning
c.... we did not (i) close the cycle and (ii) did not encounter
c.... two boundary faces. This is a topological error.
write(logmess,'(a)') 'LTRIPEDGE: Topological error!!'
call writloga('default',0,logmess,0,ierrw)
write(logmess,'(a,2i8)') 'iminpar/imaxpar=',iminpar,
& imaxpar
call writloga('default',0,logmess,0,ierrw)
write(logmess,'(a,i8)') 'last element ',
& icurrelt
call writloga('default',0,logmess,0,ierrw)
stop
10 continue
c.... We now loop through the constraints for both endpoints and
c.... see how many they have in common. This will be the number
c.... of constraints of the edge.
icra=icr1(ic1)
icrb=icr1(ic2)
if(icra.eq.0.or.icrb.eq.0) then
nsc=0
go to 100
endif
nsc=0
nsa=icontab(1,icra)
nsb=icontab(1,icrb)
do n1=1,nsa
do n2=1,nsb
if (icontab(2+n1,icra).eq.icontab(2+n2,icrb))
* then
nsc=nsc+1
endif
enddo
enddo
100 continue
c.... The edge is a triple edge iff the sum of materials and constraints
c.... is at least three.
if (nmat+nsc.ge.3) then
ltripedge=.true.
else
ltripedge=.false.
endif
return
end
| {
"alphanum_fraction": 0.5519113323,
"author": null,
"avg_line_length": 31.3546099291,
"converted": null,
"ext": "f",
"file": null,
"hexsha": "f41580eed603c20366cfd460fb4389cf092d0892",
"include": null,
"lang": "FORTRAN",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 63,
"max_forks_repo_forks_event_max_datetime": "2022-03-24T06:48:36.000Z",
"max_forks_repo_forks_event_min_datetime": "2017-02-08T21:56:04.000Z",
"max_forks_repo_head_hexsha": "decd0ce0e5dab068034ef382cabcd134562de832",
"max_forks_repo_licenses": [
"Intel"
],
"max_forks_repo_name": "daniellivingston/LaGriT",
"max_forks_repo_path": "src/lg_core/ltripedge.f",
"max_issues_count": 166,
"max_issues_repo_head_hexsha": "511ef22f3b7e839c7e0484604cd7f6a2278ae6b9",
"max_issues_repo_issues_event_max_datetime": "2022-03-29T21:36:28.000Z",
"max_issues_repo_issues_event_min_datetime": "2017-01-26T17:15:45.000Z",
"max_issues_repo_licenses": [
"CNRI-Python"
],
"max_issues_repo_name": "millerta/LaGriT-1",
"max_issues_repo_path": "src/ltripedge.f",
"max_line_length": 72,
"max_stars_count": 73,
"max_stars_repo_head_hexsha": "511ef22f3b7e839c7e0484604cd7f6a2278ae6b9",
"max_stars_repo_licenses": [
"CNRI-Python"
],
"max_stars_repo_name": "millerta/LaGriT-1",
"max_stars_repo_path": "src/ltripedge.f",
"max_stars_repo_stars_event_max_datetime": "2022-03-09T22:22:32.000Z",
"max_stars_repo_stars_event_min_datetime": "2017-02-09T17:54:28.000Z",
"num_tokens": 2495,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 8842
} |
from copper.cop.cop_node import CopNode
import pyopencl as cl
import numpy
from PIL import Image
class COP2_Comp_Add(CopNode):
'''
This filter adds foreground over background using OpenCL
'''
type_name = "add"
category = "comps"
def __init__(self, engine, parent):
super(CLC_Comp_Add, self).__init__(engine, parent)
self.program = engine.load_program("comp_add.cl")
self.__inputs__ = [None, None]
self.__input_names__ = ["Input 1","Input 2"]
def compute(self):
self.width, self.height = self.input(0).size
self.devOutBuffer = cl.Image(self.engine.ctx, self.engine.mf.READ_WRITE, self.image_format, shape=(self.width, self.height))
sampler = cl.Sampler(self.engine.ctx,
True, # Normalized coordinates
cl.addressing_mode.CLAMP_TO_EDGE,
cl.filter_mode.LINEAR)
exec_evt = self.program.run_add(self.engine.queue, self.size, None,
self.input(0).getOutDevBuffer(),
self.input(1).getOutDevBuffer(),
self.devOutBuffer,
sampler,
numpy.int32(self.width),
numpy.int32(self.height),
)
exec_evt.wait()
class COP2_Comp_Blend(CopNode):
'''
This filter blends foreground over background using OpenCL
'''
type_name = "blend"
category = "comps"
def __init__(self, engine, parent):
super(CLC_Comp_Blend, self).__init__(engine, parent)
self.program = engine.load_program("comp_blend.cl")
self.__inputs__ = [None, None]
self.__input_names__ = ["Input 1","Input 2"]
self.addParameter("factor", float, 0.5)
def bypass_node(self):
factor = self.parm("factor").evalAsFloat()
if factor <= 0.0:
self.log("Bypassing with node %s at input 0" % (self.input(0).path()))
return self.input(0)
if factor >= 1.0:
self.log("Bypassing with node %s at input 1" % (self.input(1).path()))
return self.input(1)
return None
def compute(self):
self.width, self.height = self.input(0).size
self.devOutBuffer = cl.Image(self.engine.ctx, self.engine.mf.READ_WRITE, self.image_format, shape=(self.width, self.height))
sampler = cl.Sampler(self.engine.ctx,
True, # Normalized coordinates
cl.addressing_mode.CLAMP_TO_EDGE,
cl.filter_mode.LINEAR)
exec_evt = self.program.run_blend(self.engine.queue, self.size, None,
self.input(0).getOutDevBuffer(),
self.input(1).getOutDevBuffer(),
self.devOutBuffer,
sampler,
numpy.int32(self.width),
numpy.int32(self.height),
numpy.float32(self.parm("factor").evalAsFloat())
)
exec_evt.wait()
| {
"alphanum_fraction": 0.7052117264,
"author": null,
"avg_line_length": 29.2380952381,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "183ad3a9a2ed6c678262c55c0a1cc151b307d671",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 3,
"max_forks_repo_forks_event_max_datetime": "2020-02-14T06:56:40.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-03-18T05:17:10.000Z",
"max_forks_repo_head_hexsha": "1900b506d0a407a3fb5774ab129b984a547ee0b5",
"max_forks_repo_licenses": [
"Unlicense"
],
"max_forks_repo_name": "cinepost/Copperfield_FX",
"max_forks_repo_path": "copper/cop/cop_comps.py",
"max_issues_count": 5,
"max_issues_repo_head_hexsha": "1900b506d0a407a3fb5774ab129b984a547ee0b5",
"max_issues_repo_issues_event_max_datetime": "2022-03-11T23:19:01.000Z",
"max_issues_repo_issues_event_min_datetime": "2016-06-30T10:19:25.000Z",
"max_issues_repo_licenses": [
"Unlicense"
],
"max_issues_repo_name": "cinepost/Copperfield_FX",
"max_issues_repo_path": "copper/cop/cop_comps.py",
"max_line_length": 126,
"max_stars_count": 6,
"max_stars_repo_head_hexsha": "1900b506d0a407a3fb5774ab129b984a547ee0b5",
"max_stars_repo_licenses": [
"Unlicense"
],
"max_stars_repo_name": "cinepost/Copperfield_FX",
"max_stars_repo_path": "copper/cop/cop_comps.py",
"max_stars_repo_stars_event_max_datetime": "2021-12-28T05:44:15.000Z",
"max_stars_repo_stars_event_min_datetime": "2016-07-28T13:59:34.000Z",
"num_tokens": 698,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2456
} |
subroutine readopac(fname, f, fx, fy, fxy)
c
c***********************************************************************
c read opacity tables
c***********************************************************************
c
include'titan.imp'
include'titan.par'
include'titan.com'
c
logical lxst
c
character*8 fname, dname
c
dimension f(mxo, myo), fx(mxo, myo), fy(mxo, myo), fxy(mxo, myo)
c
c=======================================================================
c open the file
c=======================================================================
c
inquire( file = fname, exist = lxst )
if (.not. lxst) go to 10
c
open( unit = iopac, iostat = ier, file = fname )
if (ier .gt. 0) go to 12
c
c=======================================================================
c read the data
c=======================================================================
c
read(iopac,'(13a8)') dname
if (dname .ne. fname) go to 14
c
read(iopac,'(2(2e20.12,i5))') xxmin, dxx, nx, yymin, dyy, ny
if (nx .ne. mxo .or. ny .ne. myo ) go to 16
if (xxmin .ne. xmino .or. yymin .ne. ymino) go to 18
if (dxx .ne. dxo .or. dyy .ne. dyo ) go to 20
c
read(iopac,'(4e20.12)') f, fx, fy, fxy
c
c=======================================================================
c close the file
c=======================================================================
c
close(unit = iopac, iostat = ier)
if (ier .gt. 0) go to 22
c
return
c
c=======================================================================
c error exits
c=======================================================================
c
10 write (itty, 11) fname
write (iout, 11) fname
11 format(' eos dataset 'a10' fails to exist')
stop 'readopa1'
c
12 write (itty, 13) fname, ier, ier, ier
write (iout, 13) fname, ier, ier, ier
13 format(' error opening eos file 'a8' ier =' i20, o22, a8)
stop 'readopa2'
c
14 write (itty, 15) fname, dname
write (iout, 15) fname, dname
15 format(' file-name discrepancy. fname = 'a8' dname = 'a8)
stop 'readopa3'
c
16 write (itty, 17) mxo, nx, myo, ny
write (iout, 17) mxo, nx, myo, ny
17 format(' discrepancy in dimensions. mx ='i3' nx ='i3
. ' my ='i4' ny ='i4)
stop 'readopa4'
c
18 write (itty, 19) xmino, xxmin, ymino, yymin
write (iout, 19) xmino, xxmin, ymino, yymin
19 format(' discrepant origin. xmino ='1pe20.12' xxmin ='e20.12
. ' ymino ='1pe20.12' yymin ='e20.12)
stop 'readopa5'
c
20 write (itty, 21) dxo, dxx, dyo, dyy
write (iout, 21) dxo, dxx, dyo, dyy
21 format(' discrepant spacing. dxo ='1pe20.12' dxx ='e20.12
. ' dyo ='1pe20.12' dyy ='e20.12)
stop 'readopa6'
c
22 write (itty, 23) fname, ier, ier, ier
write (iout, 23) fname, ier, ier, ier
23 format(' error closing opac file 'a8' ier =' i20, o22, a8)
stop 'readopa7'
c
end
| {
"alphanum_fraction": 0.4111944966,
"author": null,
"avg_line_length": 34.3870967742,
"converted": null,
"ext": "f",
"file": null,
"hexsha": "a8a2e96348e2f01321554e39177e5f3406e68bb5",
"include": null,
"lang": "FORTRAN",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "235b2dcb4c79b2af666a787e8b24e769fd8ac294",
"max_forks_repo_licenses": [
"NCSA"
],
"max_forks_repo_name": "matthewturk/lca-titan",
"max_forks_repo_path": "readopac.f",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "235b2dcb4c79b2af666a787e8b24e769fd8ac294",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"NCSA"
],
"max_issues_repo_name": "matthewturk/lca-titan",
"max_issues_repo_path": "readopac.f",
"max_line_length": 72,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "235b2dcb4c79b2af666a787e8b24e769fd8ac294",
"max_stars_repo_licenses": [
"NCSA"
],
"max_stars_repo_name": "matthewturk/lca-titan",
"max_stars_repo_path": "readopac.f",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 980,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 3198
} |
#!/usr/bin/env python
import rospy
from geometry_msgs.msg import Twist, Quaternion #, Point, Pose, TwistWithCovariance, Vector3
import tf
import numpy as np
def main():
rospy.init_node("sphero_simple_openloop")
pub_twist = rospy.Publisher("/cmd_vel", Twist, queue_size=1)
T = Twist()
r = rospy.Rate(10)
phi = 0
# for i in range(1000):
while not rospy.is_shutdown():
T.linear.x = 50 * np.cos(phi/100.)
T.linear.y = 50 * np.sin(2* phi/100.)
pub_twist.publish(T)
phi += 1
r.sleep()
print "a"
if __name__ == "__main__":
main()
| {
"alphanum_fraction": 0.606504065,
"author": null,
"avg_line_length": 20.5,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "d1832f30326d1c59b62289a1d8dd2b90ec32c865",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "614f4958816b565c7950ea0c6e6249864fbf2efe",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "koro/smp_sphero",
"max_forks_repo_path": "sphero_simple_openloop.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "614f4958816b565c7950ea0c6e6249864fbf2efe",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "koro/smp_sphero",
"max_issues_repo_path": "sphero_simple_openloop.py",
"max_line_length": 92,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "614f4958816b565c7950ea0c6e6249864fbf2efe",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "koro/smp_sphero",
"max_stars_repo_path": "sphero_simple_openloop.py",
"max_stars_repo_stars_event_max_datetime": "2020-12-13T13:02:55.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-12-13T13:02:55.000Z",
"num_tokens": 177,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 615
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
import json
import torch
import numpy as np
import random
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
import sys
import time
import argparse
from src.models.models import TAILOR
from src.models.optimization import BertAdam
from src.utils.eval import get_metrics
from src.utils.eval_gap import *
from torch.utils.data import DataLoader, WeightedRandomSampler
import torch.utils.data as data
from util import parallel_apply, get_logger
from src.dataloaders.cmu_dataloader import AlignedMoseiDataset, UnAlignedMoseiDataset
#torch.distributed.init_process_group(backend="nccl")
global logger
def get_args(description='Multi-modal Multi-label Emotion Recognition'):
parser = argparse.ArgumentParser(description=description)
parser.add_argument("--do_train", action='store_true', help="Whether to run training.")
parser.add_argument("--do_test", action='store_true', help="whether to run test")
parser.add_argument("--aligned", action='store_true', help="whether train align of unalign dataset")
parser.add_argument("--data_path", type=str, help='cmu_mosei data_path')
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument('--num_thread_reader', type=int, default=1, help='')
parser.add_argument('--lr', type=float, default=0.0001, help='initial learning rate')
parser.add_argument('--epochs', type=int, default=20, help='upper epoch limit')
parser.add_argument('--unaligned_data_path', type=str, default='/amax/cmy/mosei_senti_data_noalign.pkl', help='load unaligned dataset')
parser.add_argument('--batch_size', type=int, default=256, help='batch size')
parser.add_argument('--lr_decay', type=float, default=0.9, help='Learning rate exp epoch decay')
parser.add_argument('--n_display', type=int, default=100, help='Information display frequence')
parser.add_argument('--text_dim', type=int, default=300, help='text_feature_dimension')
parser.add_argument('--video_dim', type=int, default=35, help='video feature dimension')
parser.add_argument('--audio_dim', type=int, default=74, help='audio_feature_dimension')
parser.add_argument('--seed', type=int, default=42, help='random seed')
parser.add_argument('--max_words', type=int, default=60, help='')
parser.add_argument('--max_frames', type=int, default=60, help='')
parser.add_argument('--max_sequence', type=int, default=60, help='')
parser.add_argument('--max_label', type=int, default=6, help='')
parser.add_argument("--bert_model", default="bert-base", type=str, required=False, help="Bert module")
parser.add_argument("--visual_model", default="visual-base", type=str, required=False, help="Visual module")
parser.add_argument("--audio_model", default="audio-base", type=str, required=False, help="Audio module")
parser.add_argument("--cross_model", default="cross-base", type=str, required=False, help="Cross module")
parser.add_argument("--decoder_model", default="decoder-base", type=str, required=False, help="Decoder module")
parser.add_argument("--init_model", default=None, type=str, required=False, help="Initial model.")
parser.add_argument("--warmup_proportion", default=0.1, type=float,
help="Proportion of training to perform linear learning rate warmup for. E.g., 0.1 = 10%% of training.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument('--n_gpu', type=int, default=1, help="Changed in the execute process.")
parser.add_argument("--world_size", default=0, type=int, help="distribted training")
parser.add_argument("--local_rank", default=0, type=int, help="distribted training")
parser.add_argument('--coef_lr', type=float, default=0.1, help='coefficient for bert branch.')
parser.add_argument('--bert_num_hidden_layers', type=int, default=6, help="Layer NO. of visual.")
parser.add_argument('--visual_num_hidden_layers', type=int, default=3, help="Layer NO. of visual.")
parser.add_argument('--audio_num_hidden_layers', type=int, default=3, help="Layer No. of audio")
parser.add_argument('--cross_num_hidden_layers', type=int, default=3, help="Layer NO. of cross.")
parser.add_argument('--decoder_num_hidden_layers', type=int, default=1, help="Layer NO. of decoder.")
parser.add_argument("--num_classes", default=6, type=int, required=False)
parser.add_argument("--hidden_size",type=int, default=256)
args = parser.parse_args()
# Check paramenters
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
if not args.do_train and not args.do_test:
raise ValueError("At least one of `do_train` or `do_test` must be True.")
args.batch_size = int(args.batch_size / args.gradient_accumulation_steps)
return args
def set_seed_logger(args):
global logger
# predefining random initial seeds
random.seed(args.seed)
os.environ['PYTHONHASHSEED'] = str(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed) # if you are using multi-GPU.
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.cuda.set_device(args.local_rank)
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir, exist_ok=True)
logger = get_logger(os.path.join(args.output_dir, "log.txt"))
if args.local_rank == 0:
logger.info("Effective parameters:")
for key in sorted(args.__dict__):
logger.info(" <<< {}: {}".format(key, args.__dict__[key]))
return args
def init_device(args, local_rank):
global logger
device = torch.device("cuda" if torch.cuda.is_available() else "cpu", local_rank)
n_gpu = 1
logger.info("device: {} n_gpu: {}".format(device, n_gpu))
args.n_gpu = n_gpu
if args.batch_size % args.n_gpu != 0:
raise ValueError("Invalid batch_size/batch_size_val and n_gpu parameter: {}%{} and {}%{}, should be == 0".format(
args.batch_size, args.n_gpu, args.batch_size_val, args.n_gpu))
return device, n_gpu
def init_model(args, device, n_gpu, local_rank):
if args.init_model:
model_state_dict = torch.load(args.init_model, map_location='cpu')
else:
model_state_dict = None
# Prepare model
model = TAILOR.from_pretrained(args.bert_model, args.visual_model, args.audio_model, args.cross_model, args.decoder_model, task_config=args)
return model
def prep_optimizer(args, model, num_train_optimization_steps, device, n_gpu, local_rank, coef_lr=1.):
if hasattr(model, 'module'):
model = model.module
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
no_decay_param_tp = [(n, p) for n, p in param_optimizer if not any(nd in n for nd in no_decay)]
decay_param_tp = [(n, p) for n, p in param_optimizer if any(nd in n for nd in no_decay)]
no_decay_bert_param_tp = [(n, p) for n, p in no_decay_param_tp if "audio." in n]
no_decay_nobert_param_tp = [(n, p) for n, p in no_decay_param_tp if "audio." not in n]
decay_bert_param_tp = [(n, p) for n, p in decay_param_tp if "audio." in n]
decay_nobert_param_tp = [(n, p) for n, p in decay_param_tp if "audio." not in n]
optimizer_grouped_parameters = [
{'params': [p for n, p in no_decay_bert_param_tp], 'weight_decay': 0.01, 'lr': args.lr * 1.0},
{'params': [p for n, p in no_decay_nobert_param_tp], 'weight_decay': 0.01},
{'params': [p for n, p in decay_bert_param_tp], 'weight_decay': 0.0, 'lr': args.lr * 1.0},
{'params': [p for n, p in decay_nobert_param_tp], 'weight_decay': 0.0}
]
scheduler = None
optimizer = BertAdam(optimizer_grouped_parameters, lr=args.lr, warmup=args.warmup_proportion,
schedule='warmup_linear', t_total=num_train_optimization_steps, weight_decay=0.01,
max_grad_norm=1.0)
return optimizer, scheduler, model
def prep_dataloader(args):
Dataset = AlignedMoseiDataset if args.aligned else UnAlignedMoseiDataset
train_dataset = Dataset(
args.data_path,
'train'
)
val_dataset = Dataset(
args.data_path,
'valid'
)
test_dataset = Dataset(
args.data_path,
'test'
)
label_input, label_mask = train_dataset._get_label_input()
train_dataloader = DataLoader(
train_dataset,
batch_size=args.batch_size // args.n_gpu,
num_workers=args.num_thread_reader,
pin_memory=False,
shuffle=True,
drop_last=True
)
val_dataloader = DataLoader(
val_dataset,
batch_size=args.batch_size // args.n_gpu,
num_workers=args.num_thread_reader,
pin_memory=False,
shuffle=True,
drop_last=True
)
test_dataloader = DataLoader(
test_dataset,
batch_size=args.batch_size // args.n_gpu,
num_workers=args.num_thread_reader,
pin_memory=False,
shuffle=True,
drop_last=True
)
train_length = len(train_dataset)
val_length = len(val_dataset)
test_length = len(test_dataset)
return train_dataloader, val_dataloader, test_dataloader, train_length, val_length, test_length, label_input, label_mask
def save_model(args, model, epoch):
# Only save the model it-self
model_to_save = model.module if hasattr(model, 'module') else model
output_model_file = os.path.join(
args.output_dir, "pytorch_model_{}.bin.".format(epoch))
torch.save(model_to_save.state_dict(), output_model_file)
logger.info("Model saved to %s", output_model_file)
return output_model_file
def load_model(epoch, args, n_gpu, device, model_file=None):
if model_file is None or len(model_file) == 0:
model_file = os.path.join(args.output_dir, "pytorch_model.bin.{}".format(epoch))
if os.path.exists(model_file):
model_state_dict = torch.load(model_file, map_location='cpu')
if args.local_rank == 0:
logger.info("Model loaded from %s", model_file)
cache_dir = args.cache_dir if args.cache_dir else os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed')
model = TAILOR.from_pretrained(args.bert_model, args.visual_model, args.audio_model, args.cross_model,
cache_dir=cache_dir, state_dict=model_state_dict, task_config=args)
model.to(device)
else:
model = None
return model
def train_epoch(epoch, args, model, train_dataloader, device, n_gpu, optimizer, scheduler, global_step, local_rank=0, label_input=None, label_mask=None):
global logger
model.train()
log_step = args.n_display
start_time = time.time()
total_loss = 0
total_pred = []
total_true_label = []
total_pred_scores = []
for step, batch in enumerate(train_dataloader):
# torch.cuda.empty_cache()
if n_gpu == 1:
# multi-gpu does scattering it-self
batch = tuple(t.to(device=device, non_blocking=True) for t in batch)
pairs_text, pairs_mask, video, video_mask,audio, audio_mask, ground_label = batch
model_loss, batch_pred, true_label, pred_scores = model(pairs_text, pairs_mask, video, video_mask, audio, audio_mask, label_input, label_mask, groundTruth_labels=ground_label, training=True)
if n_gpu > 1:
model_loss = model_loss.mean() # mean() to average on multi-gpu.
if args.gradient_accumulation_steps > 1:
model_loss = model_loss / args.gradient_accumulation_steps
model_loss.backward()
total_loss += float(model_loss)
total_pred.append(batch_pred)
total_true_label.append(true_label)
total_pred_scores.append(pred_scores)
if (step + 1) % args.gradient_accumulation_steps == 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
if scheduler is not None:
scheduler.step() # Update learning rate schedule
optimizer.step()
optimizer.zero_grad()
global_step += 1
if global_step % log_step == 0 and local_rank == 0:
logger.info("Epoch: %d/%d, Step: %d/%d, Lr: %s, loss: %f, Time/step: %f", epoch + 1,
args.epochs, step + 1,
len(train_dataloader), "-".join([str('%.6f'%itm) for itm in sorted(list(set(optimizer.get_lr())))]),float(model_loss),
(time.time() - start_time) / (log_step * args.gradient_accumulation_steps))
start_time = time.time()
total_loss = total_loss / len(train_dataloader)
total_pred=torch.cat(total_pred,0)
total_true_label = torch.cat(total_true_label, 0)
total_pred_scores = torch.cat(total_pred_scores, 0)
return total_loss, total_pred, total_true_label, total_pred_scores
def eval_epoch(args, model, val_dataloader, device, n_gpu, label_input, label_mask):
if hasattr(model, 'module'):
model = model.module.to(device)
else:
model = model.to(device)
model.eval()
with torch.no_grad():
total_pred = []
total_true_label = []
total_pred_scores = []
for _, batch in enumerate(val_dataloader):
batch = tuple(t.to(device) for t in batch)
text, text_mask, video, video_mask, audio, audio_mask, groundTruth_labels = batch
batch_pred, true_label, pred_scores = model(text, text_mask, video, video_mask, audio, audio_mask, label_input, label_mask, groundTruth_labels=groundTruth_labels, training=False)
total_pred.append(batch_pred)
total_true_label.append(true_label)
total_pred_scores.append(pred_scores)
total_pred=torch.cat(total_pred,0)
total_true_label = torch.cat(total_true_label, 0)
total_pred_scores = torch.cat(total_pred_scores, 0)
return total_pred, total_true_label, total_pred_scores
def main():
global logger
train_time = time.time()
args = get_args()
args = set_seed_logger(args)
device, n_gpu = init_device(args, args.local_rank)
model = init_model(args, device, n_gpu, args.local_rank)
model = model.to(device)
if args.aligned == False:
logger.warning("!!!!!!!!!!!!!! you start train unaligned dataset")
else:
logger.warning("!!!!!!!!!!!!!! you start train aligned dataset")
print('***** dataloder preping ... *****')
if args.do_train:
train_dataloader, val_dataloader, test_dataloader, train_length, val_length, test_length, label_input, label_mask = prep_dataloader(args)
label_input = label_input.to(device)
label_mask = label_mask.to(device)
num_train_optimization_steps = (int(len(train_dataloader) + args.gradient_accumulation_steps - 1)
/ args.gradient_accumulation_steps) * args.epochs
coef_lr = args.coef_lr
if args.init_model:
coef_lr = 1.0
optimizer, scheduler, model = prep_optimizer(args, model, num_train_optimization_steps, device, n_gpu, args.local_rank, coef_lr=coef_lr)
if args.local_rank == 0:
logger.info("***** Running training *****")
logger.info(" Num examples = %d", train_length)
logger.info(" Batch size = %d", args.batch_size)
logger.info(" Num steps = %d", num_train_optimization_steps * args.gradient_accumulation_steps)
best_score = 0.000
best_output_model_file = None
global_step = 0
best_model = None
for epoch in range(args.epochs):
total_loss, total_pred, total_label, total_pred_scores= train_epoch(epoch, args, model, train_dataloader, device, n_gpu, optimizer,
scheduler, global_step, local_rank=args.local_rank, label_input=label_input, label_mask=label_mask)
total_micro_f1, total_micro_precision, total_micro_recall, total_acc = get_metrics(total_pred, total_label)
total_pred_scores = total_pred_scores.data.cpu().numpy()
total_label = total_label.data.cpu().numpy()
train_gap = calculate_gap(total_pred_scores, total_label)
if args.local_rank == 0:
logger.info("Epoch %d/%d Finished, Train Loss: %f, Train_micro_f1: %f, Train_micro_precision: %f, Train_micro_recall: %f, Train_acc: %f, train_gap: %f", \
epoch + 1, args.epochs, total_loss, total_micro_f1, total_micro_precision, total_micro_recall, total_acc, train_gap)
if args.local_rank == 0:
logger.info("***** Running valing *****")
logger.info(" Num examples = %d", val_length)
logger.info(" Batch_size = %d", args.batch_size)
val_pred, val_label, val_pred_scores = eval_epoch(args, model, val_dataloader, device, n_gpu, label_input, label_mask)
val_micro_f1, val_micro_precision, val_micro_recall, val_acc = get_metrics(val_pred, val_label)
val_pred_scores = val_pred_scores.data.cpu().numpy()
val_label = val_label.data.cpu().numpy()
val_gap = calculate_gap(val_pred_scores, val_label)
logger.info("----- micro_f1: %f, micro_precision: %f, micro_recall: %f, acc: %f, val_gap: %f", \
val_micro_f1, val_micro_precision, val_micro_recall, val_acc, val_gap)
output_model_file = save_model(args, model, epoch)
if best_score <= val_micro_f1:
best_score = val_micro_f1
best_model = model
best_output_model_file = output_model_file
logger.info("The best model is: {}, the f1 is: {:.4f}".format(best_output_model_file, best_score))
if args.local_rank == 0:
logger.info('***** Running testing *****')
logger.info(' Num examples = %d', test_length)
logger.info(" Batch_size = %d", args.batch_size)
test_pred, test_label, test_pred_scores = eval_epoch(args, best_model, test_dataloader, device, n_gpu, label_input, label_mask)
test_micro_f1, test_micro_precision, test_micro_recall, test_acc = get_metrics(test_pred, test_label)
test_pred_scores = test_pred_scores.data.cpu().numpy()
test_label = test_label.data.cpu().numpy()
test_gap = calculate_gap(test_pred_scores, test_label)
logger.info("----- micro_f1: %f, micro_precision: %f, micro_recall: %f, acc: %f, test_gap: %f", \
test_micro_f1, test_micro_precision, test_micro_recall, test_acc, test_gap)
if __name__ == "__main__":
main()
| {
"alphanum_fraction": 0.6710039087,
"author": null,
"avg_line_length": 48.368159204,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "f68bdac01bed7313ac7e61dca763cbb301d0a1a0",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "d30c137d0695af0d9cf172593d1dd83f645e2d3c",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "LiangFeifei0920/TAILOR",
"max_forks_repo_path": "train.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "d30c137d0695af0d9cf172593d1dd83f645e2d3c",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "LiangFeifei0920/TAILOR",
"max_issues_repo_path": "train.py",
"max_line_length": 198,
"max_stars_count": 2,
"max_stars_repo_head_hexsha": "d30c137d0695af0d9cf172593d1dd83f645e2d3c",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "LiangFeifei0920/TAILOR",
"max_stars_repo_path": "train.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-11T02:49:12.000Z",
"max_stars_repo_stars_event_min_datetime": "2022-03-11T02:49:09.000Z",
"num_tokens": 4532,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 19444
} |
"""
@author: Vincent Bonnet
@description : Constraint data structure
"""
import numpy as np
import core.jit.item_utils as item_utils
class Constraint:
def __init__(self, num_nodes : int):
# Constraint Property
self.stiffness = np.float64(0.0)
self.damping = np.float64(0.0)
# Node ids involved in the constraint
self.node_IDs = item_utils.empty_data_ids(num_nodes)
# system indices of the nodes
self.systemIndices = np.zeros(num_nodes, dtype = np.int32)
# Precomputed cost function
self.c = np.zeros(num_nodes, dtype = np.float64) # constraint/cost function
self.g = np.zeros((num_nodes, 2), dtype = np.float64) # gradients
self.H = np.zeros((num_nodes, num_nodes, 2, 2), dtype = np.float64) # Hessians
# Precomputed forces/jacobians.
self.f = np.zeros((num_nodes, 2), dtype = np.float64)
self.dfdx = np.zeros((num_nodes, num_nodes, 2, 2), dtype = np.float64)
self.dfdv = np.zeros((num_nodes, num_nodes, 2, 2), dtype = np.float64)
class AnchorSpring(Constraint):
def __init__(self):
Constraint.__init__(self, num_nodes = 1)
self.rest_length = np.float64(0.0)
self.kinematic_component_IDs = item_utils.empty_data_ids(2) # Point ids
self.kinematic_component_param = np.float64(0.0)
self.kinematic_component_pos = np.zeros(2, dtype = np.float64)
@staticmethod
def name():
return "anchorSpring"
class Spring(Constraint):
def __init__(self):
Constraint.__init__(self, num_nodes = 2)
self.rest_length = np.float64(0.0)
@staticmethod
def name():
return "spring"
class Bending(Constraint):
def __init__(self):
# Maintain angle between (x0,x1) and (x1,x2)
Constraint.__init__(self, num_nodes = 3)
self.rest_angle = np.float64(0.0)
@staticmethod
def name():
return "bending"
class Area(Constraint):
def __init__(self):
Constraint.__init__(self, num_nodes = 3)
self.rest_area = np.float64(0.0)
@staticmethod
def name():
return "area" | {
"alphanum_fraction": 0.6448291998,
"author": null,
"avg_line_length": 30.5285714286,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "4e81c9e64df56df47a38be8546c656b373078d31",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 5,
"max_forks_repo_forks_event_max_datetime": "2021-09-13T05:29:54.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-12-07T21:44:41.000Z",
"max_forks_repo_head_hexsha": "e71f2305d7452de985e5e9fa8935da611b6d9992",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "vincentbonnetcg/Numerical-Bric-a-Brac",
"max_forks_repo_path": "implicit_solver/lib/objects/jit/data/constraints.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "e71f2305d7452de985e5e9fa8935da611b6d9992",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "vincentbonnetcg/Numerical-Bric-a-Brac",
"max_issues_repo_path": "implicit_solver/lib/objects/jit/data/constraints.py",
"max_line_length": 86,
"max_stars_count": 14,
"max_stars_repo_head_hexsha": "e71f2305d7452de985e5e9fa8935da611b6d9992",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "vincentbonnetcg/Numerical-Bric-a-Brac",
"max_stars_repo_path": "implicit_solver/lib/objects/jit/data/constraints.py",
"max_stars_repo_stars_event_max_datetime": "2021-09-07T09:57:44.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-05-04T00:42:47.000Z",
"num_tokens": 567,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2137
} |
import os
import sys
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + '/../..')
from andi_funcs import TrackGeneratorSegmentation, import_tracks, import_labels, package_tracks
from models import segmentation_model_2d
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.models import load_model
import numpy as np
# Load validation data
tracks_val = package_tracks(import_tracks('../../Datasets/Validation/task3.txt')[1], dimensions=2, max_T=200)
positions_val = import_labels('../../Datasets/Validation/ref3.txt')[1] - 1
tracks_test = package_tracks(import_tracks('../../Datasets/Test/task3.txt')[1], dimensions=2, max_T=200)
# Run model
model = segmentation_model_2d()
model.compile(optimizer=Adam(learning_rate=0.001), loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.summary()
history = model.fit(TrackGeneratorSegmentation(batches=200, batch_size=32, dimensions=2), epochs=200,
callbacks=[
ModelCheckpoint(filepath='../Models/2D.h5', monitor='val_accuracy', save_best_only=True,
mode='max')],
validation_data=(tracks_val, positions_val), use_multiprocessing=True, workers=16)
# Save performance metrics
np.savetxt('2D_accuracy.txt', history.history['accuracy'])
np.savetxt('2D_val_accuracy.txt', history.history['val_accuracy'])
# Evaluate on test data
model = load_model('../Models/2D.h5')
np.savetxt('../../Datasets/Test/predictions_task3_2D.txt', model.predict(tracks_test, use_multiprocessing=True))
| {
"alphanum_fraction": 0.737037037,
"author": null,
"avg_line_length": 46.2857142857,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "ffaacdf2fc9509d704ff28b0ff5c99e10f559737",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2021-05-03T15:04:20.000Z",
"max_forks_repo_forks_event_min_datetime": "2021-05-03T15:04:20.000Z",
"max_forks_repo_head_hexsha": "18e6e1420d269066b3a7646e1525f017026edf4c",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "tsmbland/andi_challenge",
"max_forks_repo_path": "Task3_Segmentation/Train/2D.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "18e6e1420d269066b3a7646e1525f017026edf4c",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "tsmbland/andi_challenge",
"max_issues_repo_path": "Task3_Segmentation/Train/2D.py",
"max_line_length": 112,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "18e6e1420d269066b3a7646e1525f017026edf4c",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "tsmbland/andi_challenge",
"max_stars_repo_path": "Task3_Segmentation/Train/2D.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 349,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1620
} |
[STATEMENT]
lemma lex_two_SN_order_pair:
assumes o1: "SN_order_pair s1 ns1" and o2: "SN_order_pair s2 ns2"
shows "SN_order_pair (lex_two s1 ns1 s2) (lex_two s1 ns1 ns2)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. SN_order_pair (lex_two s1 ns1 s2) (lex_two s1 ns1 ns2)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. SN_order_pair (lex_two s1 ns1 s2) (lex_two s1 ns1 ns2)
[PROOF STEP]
interpret o1: SN_order_pair s1 ns1
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. SN_order_pair s1 ns1
[PROOF STEP]
using o1
[PROOF STATE]
proof (prove)
using this:
SN_order_pair s1 ns1
goal (1 subgoal):
1. SN_order_pair s1 ns1
[PROOF STEP]
.
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. SN_order_pair (lex_two s1 ns1 s2) (lex_two s1 ns1 ns2)
[PROOF STEP]
interpret o2: SN_order_pair s2 ns2
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. SN_order_pair s2 ns2
[PROOF STEP]
using o2
[PROOF STATE]
proof (prove)
using this:
SN_order_pair s2 ns2
goal (1 subgoal):
1. SN_order_pair s2 ns2
[PROOF STEP]
.
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. SN_order_pair (lex_two s1 ns1 s2) (lex_two s1 ns1 ns2)
[PROOF STEP]
note o1.trans_S o1.trans_NS o2.trans_S o2.trans_NS o1.SN o2.SN
o1.compat_NS_S o2.compat_NS_S o1.compat_S_NS o2.compat_S_NS
[PROOF STATE]
proof (state)
this:
trans s1
trans ns1
trans s2
trans ns2
SN s1
SN s2
ns1 O s1 \<subseteq> s1
ns2 O s2 \<subseteq> s2
s1 O ns1 \<subseteq> s1
s2 O ns2 \<subseteq> s2
goal (1 subgoal):
1. SN_order_pair (lex_two s1 ns1 s2) (lex_two s1 ns1 ns2)
[PROOF STEP]
note this [unfolded trans_O_iff]
[PROOF STATE]
proof (state)
this:
s1 O s1 \<subseteq> s1
ns1 O ns1 \<subseteq> ns1
s2 O s2 \<subseteq> s2
ns2 O ns2 \<subseteq> ns2
SN s1
SN s2
ns1 O s1 \<subseteq> s1
ns2 O s2 \<subseteq> s2
s1 O ns1 \<subseteq> s1
s2 O ns2 \<subseteq> s2
goal (1 subgoal):
1. SN_order_pair (lex_two s1 ns1 s2) (lex_two s1 ns1 ns2)
[PROOF STEP]
interpret order_pair "(lex_two s1 ns1 s2)" "(lex_two s1 ns1 ns2)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. order_pair (lex_two s1 ns1 s2) (lex_two s1 ns1 ns2)
[PROOF STEP]
by(rule lex_two_order_pair, standard)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. SN_order_pair (lex_two s1 ns1 s2) (lex_two s1 ns1 ns2)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. SN_order_pair (lex_two s1 ns1 s2) (lex_two s1 ns1 ns2)
[PROOF STEP]
by(standard, rule lex_two; fact)
[PROOF STATE]
proof (state)
this:
SN_order_pair (lex_two s1 ns1 s2) (lex_two s1 ns1 ns2)
goal:
No subgoals!
[PROOF STEP]
qed | {
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": "Knuth_Bendix_Order_Lexicographic_Extension",
"hexsha": null,
"include": null,
"lang": null,
"length": 14,
"llama_tokens": 1216,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
} |
[STATEMENT]
lemma Substable_intro_diamond[Substable_intros]:
assumes "Substable cond \<psi>"
shows "Substable cond (\<lambda> \<phi> . \<^bold>\<diamond>(\<psi> \<phi>))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Substable cond (\<lambda>\<phi>. \<^bold>\<diamond>\<psi> \<phi>)
[PROOF STEP]
unfolding conn_defs
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Substable cond (\<lambda>\<phi>. \<^bold>\<not>\<^bold>\<box>\<^bold>\<not>\<psi> \<phi>)
[PROOF STEP]
by (simp add: assms Substable_intros) | {
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": "PLM_TAO_9_PLM",
"hexsha": null,
"include": null,
"lang": null,
"length": 2,
"llama_tokens": 196,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
} |
import matplotlib
import numpy as np
matplotlib.use('Agg')
import shap
def test_multiply():
""" Basic LSTM example from keras
"""
try:
import keras
import numpy as np
import tensorflow as tf
from keras.datasets import imdb
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers.embeddings import Embedding
from keras.preprocessing import sequence
except Exception as e:
print("Skipping test_tf_keras_mnist_cnn!")
return
import shap
np.random.seed(7)
(X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=1000)
X_train = np.expand_dims(sequence.pad_sequences(X_train, maxlen=100),axis=2)
X_test = np.expand_dims(sequence.pad_sequences(X_test, maxlen=100),axis=2)
# create the model
embedding_vector_length = 32
mod = Sequential()
mod.add(LSTM(100, input_shape=(100,1)))
mod.add(Dense(1, activation='sigmoid'))
mod.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
inds = np.random.choice(X_train.shape[0], 3, replace=False)
data = X_train[inds,:]
test_in = X_test[10:11,:,:]
e = shap.DeepExplainer((mod.layers[0].input, mod.layers[-1].input), data)
shap_values = e.shap_values(test_in)
sums = np.array([shap_values[i].sum() for i in range(len(shap_values))])
sess = tf.keras.backend.get_session()
diff = sess.run(mod.layers[-1].input, feed_dict={mod.layers[0].input: test_in})[0,:] - \
sess.run(mod.layers[-1].input, feed_dict={mod.layers[0].input: data}).mean(0)
assert np.allclose(sums, diff, atol=1e-06), "Sum of SHAP values does not match difference!"
| {
"alphanum_fraction": 0.6710301651,
"author": null,
"avg_line_length": 36.6041666667,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "5b4513e0cd38d45faff90814be0c0a516a3358a0",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "672e44f5d1f6ce808796b35be0dd0a75c2c3c9ed",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "ekrim/shap",
"max_forks_repo_path": "tests/test_deep_lstm.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "672e44f5d1f6ce808796b35be0dd0a75c2c3c9ed",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "ekrim/shap",
"max_issues_repo_path": "tests/test_deep_lstm.py",
"max_line_length": 95,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "672e44f5d1f6ce808796b35be0dd0a75c2c3c9ed",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "ekrim/shap",
"max_stars_repo_path": "tests/test_deep_lstm.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 452,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1757
} |
from .enums import CitySize, AreaKind
import numpy as np
class OkumuraHata:
def __init__(
self,
frequency,
transmitter_height,
receiver_height,
city_size,
area_kind,
):
self.frequency = frequency
self.transmitter_height = transmitter_height
self.receiver_height = receiver_height
self.city_size = city_size
self.area_kind = area_kind
def _height_correction(self):
if self.city_size.value == CitySize.LARGE.value and self.frequency <= 200:
return 8.29 * (np.log10(1.54 * self.receiver_height)**2) - 1.1
elif self.city_size == CitySize.LARGE.value:
return 3.2 * (np.log10(11.75 * self.receiver_height)**2) - 4.97
else:
return 0.8 + (1.1 * np.log10(self.frequency) - 0.7) * self.receiver_height - 1.56 * np.log10(self.frequency)
def _base_loss(self, distance):
constant_factor = 69.55
frequency_factor = 26.16 * np.log10(self.frequency)
base_height_factor = 13.82 * np.log10(self.transmitter_height)
distance_factor = (44.9 - 6.55 * np.log10(self.transmitter_height)) * np.log10(distance)
return constant_factor + frequency_factor - base_height_factor - self._height_correction() + distance_factor
def _suburban_loss(self, distance):
frequency_factor = 2 * (np.log10(self.frequency/28.0)**2)
constant_factor = 5.4
return self._base_loss(distance) - frequency_factor - constant_factor
def _rural_loss(self, distance):
frequency_factor = 4.78 * (np.log10(self.frequency)**2) - 18.33 * (np.log10(self.frequency))
constant_factor = 40.94
return self._base_loss(distance) - frequency_factor - constant_factor
def path_loss(self, distance):
if self.area_kind.value == AreaKind.URBAN.value:
return self._base_loss(distance)
elif self.area_kind.value == AreaKind.SUBURBAN.value:
return self._suburban_loss(distance)
elif self.area_kind.value == AreaKind.RURAL.value:
return self._rural_loss(distance)
else:
raise ValueError("Invalid area type")
| {
"alphanum_fraction": 0.7001469868,
"author": null,
"avg_line_length": 37.1090909091,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "176666c2c3b2a4b220958ac4332993594c6ba494",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "3eb2e0b1339a7a8d637134205b84ff71e3ebb7d0",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "pcstl/mobile_location",
"max_forks_repo_path": "mobile_localization/path_loss/okumura_hata.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "3eb2e0b1339a7a8d637134205b84ff71e3ebb7d0",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "pcstl/mobile_location",
"max_issues_repo_path": "mobile_localization/path_loss/okumura_hata.py",
"max_line_length": 114,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "3eb2e0b1339a7a8d637134205b84ff71e3ebb7d0",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "pcstl/mobile_localization",
"max_stars_repo_path": "mobile_localization/path_loss/okumura_hata.py",
"max_stars_repo_stars_event_max_datetime": "2020-05-27T03:09:19.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-05-27T03:09:19.000Z",
"num_tokens": 534,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2041
} |
///////////////////////////////////////////
// http://progsch.net/wordpress/?p=81
#include <boost/thread.hpp>
#include <deque>
class ThreadPool;
// our worker thread objects
class Worker {
public:
Worker(ThreadPool &s) : pool(s) { }
void operator()();
private:
ThreadPool &pool;
};
// the actual thread pool
class ThreadPool {
public:
ThreadPool(size_t);
template<class F>
void enqueue(F f);
template <class F, class C>
void enqueue(F f, C* i);
bool done() { return active == 0; }
~ThreadPool();
private:
friend class Worker;
// need to keep track of threads so we can join them
std::vector< boost::thread > workers;
// the task queue
std::deque< std::function<void()> > tasks;
// synchronization
boost::mutex queue_mutex;
boost::condition_variable condition;
bool stop;
size_t active;
};
void Worker::operator()()
{
std::function<void()> task;
while(true)
{
{
// acquire lock
boost::unique_lock<boost::mutex>
lock(pool.queue_mutex);
// look for a work item
while(!pool.stop && pool.tasks.empty())
{ // if there are none wait for notification
pool.condition.wait(lock);
}
if(pool.stop) // exit if the pool is stopped
return;
// get the task from the queue
task = pool.tasks.front();
pool.tasks.pop_front();
} // release lock
++pool.active;
// execute the task
task();
--pool.active;
}
}
// the constructor just launches some amount of workers
ThreadPool::ThreadPool(size_t threads)
: stop(false), active(0)
{
for(size_t i = 0; i < threads;++i)
workers.push_back(boost::thread(Worker(*this)));
}
// the destructor joins all threads
ThreadPool::~ThreadPool()
{
// stop all threads
stop = true;
condition.notify_all();
// join them
for(size_t i = 0;i<workers.size();++i)
workers[i].join();
}
// add new work item to the pool
template<class F>
void ThreadPool::enqueue(F f)
{
{ // acquire lock
boost::unique_lock<boost::mutex> lock(queue_mutex);
// add the task
tasks.push_back(std::function<void()>(f));
} // release lock
// wake up one thread
condition.notify_one();
}
// add new work item to the pool
template<class F, class C>
void ThreadPool::enqueue(F f, C* i)
{
{ // acquire lock
boost::unique_lock<boost::mutex> lock(queue_mutex);
// add the task
tasks.push_back(std::bind(f, i));
} // release lock
// wake up one thread
condition.notify_one();
}
| {
"alphanum_fraction": 0.6505219207,
"author": null,
"avg_line_length": 18.1439393939,
"converted": null,
"ext": "hpp",
"file": null,
"hexsha": "e2cac4f4618498812186505a041cc79b74d6e4df",
"include": null,
"lang": "C++",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "a894a9c7bd45a68ea1b6ff14877cdbe47ddd39cf",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "Sonaza/scyori",
"max_forks_repo_path": "code/szen/inc/szen/System/ThreadPool.hpp",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "a894a9c7bd45a68ea1b6ff14877cdbe47ddd39cf",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "Sonaza/scyori",
"max_issues_repo_path": "code/szen/inc/szen/System/ThreadPool.hpp",
"max_line_length": 55,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "a894a9c7bd45a68ea1b6ff14877cdbe47ddd39cf",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "Sonaza/scyori",
"max_stars_repo_path": "code/szen/inc/szen/System/ThreadPool.hpp",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 626,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 2395
} |
'''
This module contains functions for retrieving Kartverket map data for specified areas.
Notes
-----
The retrieval of map data uses `owslib`. Read more in the `owslib image
tutorial
<https://geopython.github.io/OWSLib/index.html?highlight=webmapservice>`_.
Karverket data is retrieved from the `Kartverket WMS
<http://kartverket.no/data/API-og-WMS/>`_.
Karverket WMS example:
.. code:: bash
http://openwms.statkart.no/skwms1/wms.topo3?version=1.1.1&styles=&service=wms&REQUEST=map&SRS=EPSG:32633&BBOX=210924.955,6668620.35,255289.776,6688292.32&LAYERS=topo3_WMS&WIDTH=1650&HEIGHT=1100&FORMAT=image/png&BGCOLOR=0xFFFFFF&TRANSPARENT=TRUE
'''
def get_wms_dict(xml):
'''An almost useful routine from creating a dict from a capabilities XML
Args
----
xml: str
Capabilities XML in string format
Returns
-------
d: OrderedDict
Capabilities XML key/values in dict format
'''
from collections import OrderedDict
from bs4 import BeautifulSoup
def get_attrs(layer, key):
return layer.find(key).attrs
soup = BeautifulSoup(xml, 'lxml')
layers = soup.findAll('layer')[1:]
d = OrderedDict()
for l in layers:
title = l.find('title').text
d[title] = OrderedDict()
boundingboxes = l.findAll('boundingbox')
for srs in sorted([srs.text for srs in l.findAll('srs')]):
for bb in boundingboxes:
if bb['srs'] == srs:
d[title][srs] = OrderedDict()
for k in sorted(bb.attrs.keys()):
if k != 'srs':
d[title][srs][k] = bb.attrs[k]
return d
def project_bbox(srs, lon0, lat0, lon1, lat1):
'''Project the bounding box for map extent coords from WGS84 to `srs`
Args
----
srs: str
Spatial Reference System for map output
lon0: float
Minimum longitude for map extent
lat0: float
Minimum latitude for map extent
lon1: float
Maximum longitude for map extent
lat1: float
Maximum latitude for map extent
Returns
-------
bbox: float tuple
Bounding box for map extent. Value is `minx, miny, maxx, maxy` in units
of the SRS
'''
import pyproj
wgs84 = pyproj.Proj(init='EPSG:4326')
proj = pyproj.Proj('+init={}'.format(srs))
minx, miny = pyproj.transform(wgs84, proj, lon0, lat0)
maxx, maxy = pyproj.transform(wgs84, proj, lon1, lat1)
return (minx, miny, maxx, maxy)
def get_size(bbox, width):
'''Generate adjusted width and height from bounds and given width
Args
----
bbox: float tuple
Bounding box for map extent. Value is `minx, miny, maxx, maxy` in units
of the SRS
width: int
Pixel width for Karverket WMS GetMap() query
Return
------
width: int
Adjusted pixel width for Karverket WMS GetMap() query
height: int
Adjusted pixel height for Karverket WMS GetMap() query
'''
import pyproj
# Maximum WIDTH/HEIGHT dimension for Kartveket WMS GetMap call
maxdim = 4096
# Make width equal `maxdim` if too large
width = min(width, maxdim)
# Get ratio between projected dimensions
xdiff = bbox[2] - bbox[0]
ydiff = bbox[3] - bbox[1]
yx_ratio = ydiff / xdiff
# Calcuate height from projected dimension
height = round(width * yx_ratio)
# Adjust values if height too large
if height > maxdim:
height = maxdim
width = round(height / yx_ratio)
return width, height
def get_wms_png(wms, bbox, layer, srs, width=1600, transparent=True):
'''Get map data via WMS GetMap method for given bounding box, and width
Args
----
wms: owslib.wms
WMS object with getmap() class method to call
bbox: float tuple
Bounding box for map extent. Value is `minx, miny, maxx, maxy` in units
of the SRS
layer: str
Name of WMS layer to retrieve
srs: str
Spatial reference system
width: int
Pixel width for Karverket WMS GetMap() query
transparent: bool
Switch to make background color transparent (Default: True)
Returns
-------
oswslib_img: owslib.image
Image object with retrieved image data
'''
img_fmt = 'image/png'
# Generate size parameters from `bbox` and desired pixel width
size = get_size(bbox, width)
# Retrieve map data using WMS GetMap() call
owslib_img = wms.getmap(layers=[layer], srs=srs, bbox=bbox, size=size,
format=img_fmt, transparent=transparent)
return owslib_img
def png2geotiff(filename_png, srs, bbox):
'''Read and convert png file to GEOTIFF file with GDAL
Args
----
filename_png: str
Path and filename of png file to be output
srs: str
Spatial reference system string (e.g. EPSG:4326)
bbox: float tuple
Bounding box for map extent. Value is `minx, miny, maxx, maxy` in units
of the SRS
'''
import os
import subprocess
filename_tif = '{}.tif'.format(os.path.splitext(filename_png)[0])
params = (srs, bbox[0], bbox[1], bbox[2], bbox[3], filename_png, filename_tif)
call = 'gdal_translate -a_srs {} -a_ullr {} {} {} {} {} {}'.format(*params)
subprocess.check_call(call.split(' '))
return None
def map_ticks(pos0, pos1, n, nsew=False):
'''Generate n tick positions and labels from given start and end position
Args
----
pos0: float
Lon or Lat starting point
pos1: float
Lon or Lat end point
n: int
Number of tick positions and labels to generate
nsew: bool
Switch to append N, S, E, or W to tick labels (Default: False)
Returns
-------
ticks: list of float
Projected tick positions
labels: list of str
Labels in DPS for generated tick positions
'''
import numpy
def parse_degminsec(dec_degs, method=None, round_secs=False):
'''Parse decimal degrees to degrees, minutes and seconds'''
degs = numpy.floor(dec_degs)
dec_mins = numpy.abs((dec_degs - degs) * 60)
mins = numpy.floor(dec_mins)
secs = numpy.abs((dec_mins - mins) * 60)
if method == 'lon':
if degs < 0:
nsew = 'W'
elif degs > 0:
nsew = 'E'
else:
nsew = ''
elif method == 'lat':
if degs < 0:
nsew = 'S'
elif degs > 0:
nsew = 'N'
else:
nsew = ''
else:
nsew = ''
if round_secs:
secs = numpy.round(secs)
return degs, mins, secs, nsew
ticks = numpy.linspace(pos0, pos1, n)
print('lon lat', pos0, pos1)
fmt = "{:.0f}$\degree$ {:.0f}$'$ {:.0f}$''$"
degs, mins, secs, nsews = parse_degminsec(ticks, round_secs=True)
if nsew:
fmt += ' {}'
values = zip(degs, mins, secs, nsews)
labels = [fmt.format(d, m, s, ns) for d, m, s in values]
else:
values = zip(degs, mins, secs)
labels = [fmt.format(d, m, s) for d, m, s in values]
return ticks, labels
| {
"alphanum_fraction": 0.6014673311,
"author": null,
"avg_line_length": 27.3636363636,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "cc62daa8f862be641129c5ce62acdf5fed3dc81f",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "fb0bf9a309939519e076ea7cbb5aadcd900f9301",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "ryanjdillon/smartmove",
"max_forks_repo_path": "smartmove/visuals/maps.py",
"max_issues_count": 13,
"max_issues_repo_head_hexsha": "fb0bf9a309939519e076ea7cbb5aadcd900f9301",
"max_issues_repo_issues_event_max_datetime": "2018-12-05T17:45:46.000Z",
"max_issues_repo_issues_event_min_datetime": "2017-10-09T10:08:43.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "ryanjdillon/smartmove",
"max_issues_repo_path": "smartmove/visuals/maps.py",
"max_line_length": 248,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "fb0bf9a309939519e076ea7cbb5aadcd900f9301",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "ryanjdillon/smartmove",
"max_stars_repo_path": "smartmove/visuals/maps.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1993,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 7224
} |
import numpy as np
import os
from IPSData import CollectorIPSData
import csv
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
forest = RandomForestRegressor(n_estimators = 10 , max_features = 'sqrt' , criterion = 'mse' , max_depth = 5 , n_jobs = 2 )
basepath = r"F:\IPSData2"
thckpath = r"F:\KLAData\ThicknessData"
ipsdata = CollectorIPSData(basepath , thckpath)
xs , ys , ps = ipsdata.ReadMulti_RflcThck([2,4])
X = np.concatenate( (xs[0] , xs[1]) , axis = 0)
y = np.concatenate( (ys[0] , ys[1]) ).flatten()
X2Train = xs[0][: , 450:850]
y2Train = ys[0]
X3Train = xs[1][: , 450:850]
y3Train = ys[1]
X = np.concatenate((X2Train , X3Train ))
y = np.concatenate((y2Train , y3Train ))
y = y.reshape( y.shape[0], 1)
X2Test = X2Train[:10,:]
X2Train = X2Train[11:: , : ]
y2Test = y2Train[:10]
y2Train = y2Train[11:]
X3Test = X3Train[:10,:]
X3Train = X3Train[11:: , : ]
y3Test = y3Train[:10]
y3Train = y3Train[11:]
y2Test = y2Test.reshape( y2Test.shape[0] , 1)
y3Test = y3Test.reshape( y3Test.shape[0] , 1)
XTrain = np.concatenate((X2Train , X3Train ))
yTrain = np.concatenate((y2Train , y3Train ))
yTrain = yTrain.reshape( yTrain.shape[0], 1)
p2 = ps[0]
p3 = ps[1]
Position = np.concatenate((p2,p3))
########## Fitting ###########
forest.fit(XTrain,yTrain) # Use Train Data for Train
predict = forest.predict(X) # Prediction of All Datas. use for calc error and correlation
y = y.flatten()
errors = mean_squared_error(y , predict)
core = np.corrcoef( predict , y )[0,1]
fit = np.polyfit(predict, y, deg=1)
########### predict for display ################
pre2Train = forest.predict(X2Train)
pre2Test = forest.predict(X2Test)
pre3Train = forest.predict(X3Train)
pre3Test = forest.predict(X3Test)
################ Display ###################
print("Error : ",errors)
print("Correlation : " , core)
plt.title( "Correlation = {0} , Error = {1}, (Red : #2 , Blue : #4) ".format(core , errors) )
plt.xlabel( "Predict" )
plt.ylabel( "Target" )
s1 = plt.scatter(pre2Train , y2Train , c = 'r' , alpha = 0.5 , label = "#2Train")
s2 = plt.scatter(pre3Train , y3Train , c = 'b' , alpha = 0.5 , label = "#3Train")
s3 = plt.scatter(pre2Test , y2Test , c = 'g' , alpha = 0.9 , label = "#2Test")
s4 = plt.scatter(pre3Test , y3Test , c = 'y' , alpha = 0.9 , label = "#3Test")
s5 = plt.scatter( [285]*len(yTrain) , yTrain , c = 'brown' , alpha = 0.9 , label = "Target")
plt.plot( predict , fit[0]*predict + fit[1] , color = 'Turquoise')
plt.legend()
plt.xlim(280,340)
plt.ylim(280,340)
plt.legend()
#plt.plot(LossTest , 'b' )
for label,x,y,i,pos in zip(Position,predict,y,range(0,len(y)),Position ):
posstr = pos.split(" ")
posx = float(posstr[0])
posy = float(posstr[1])
rho = ( posx**2+posy**2 )**0.5
if i%31 == 0:
rand = np.random.uniform(1.0 , 2.0)
plt.annotate(
label,
xy = (x,y),
xytext=(10*rand,-10*rand),
textcoords='offset points',
bbox=dict(boxstyle='round,pad=0.2', fc='yellow', alpha=0.2),
arrowprops=dict(arrowstyle = '->', connectionstyle='arc3,rad=0'))
#if i%71 == 0:
# plt.annotate(
# label,
# xy = (x,y),
# xytext=(30*(i/60),30*(i/60)),
# textcoords='offset points',
# bbox=dict(boxstyle='round,pad=0.2', fc='green', alpha=0.2),
# arrowprops=dict(arrowstyle = '->', connectionstyle='arc3,rad=0'))
plt.show()
| {
"alphanum_fraction": 0.6088319088,
"author": null,
"avg_line_length": 29.7457627119,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "44f158df1755830cbddf70d70a69116dce5b4f85",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "a7f633fbbc1ab2e1c272015c16304f7690bea7e2",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "pimier15/DataAnalysis",
"max_forks_repo_path": "DataAnalysis/DataAnalysis/RandomForest_CheckPoint/Code/IPSRandomForest.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "a7f633fbbc1ab2e1c272015c16304f7690bea7e2",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "pimier15/DataAnalysis",
"max_issues_repo_path": "DataAnalysis/DataAnalysis/RandomForest_CheckPoint/Code/IPSRandomForest.py",
"max_line_length": 125,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "a7f633fbbc1ab2e1c272015c16304f7690bea7e2",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "pimier15/DataAnalysis",
"max_stars_repo_path": "DataAnalysis/DataAnalysis/RandomForest_CheckPoint/Code/IPSRandomForest.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1151,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 3510
} |
#!/usr/bin/python
import re
import numpy as np
import sys
import ChemUtils as ch
#print ch.str2composition( sys.argv[1] )
#sides = ch.parseReaction( 'Fe+O2=Fe2O3' )
#sides = ch.parseReaction( 'C12H22O11+KNO3=H2O+CO2+K2CO3+N2' )
#print sides
#print ch.reaction2string( sides )
#print ch.balanceReactionString( 'Fe+O2=Fe2O3' )
print ch.balanceReactionString( 'C12H22O11+KNO3=H2O+CO2+K2CO3+N2' )
#print atomicBalance( reaction[0], reaction[1] )
| {
"alphanum_fraction": 0.7377777778,
"author": null,
"avg_line_length": 22.5,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "4ce0b6928e1820a99cfba97f5fb1c95b134b15eb",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 2,
"max_forks_repo_forks_event_max_datetime": "2019-04-28T02:24:50.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-02-09T12:31:06.000Z",
"max_forks_repo_head_hexsha": "9c5480f2392c9c89b9fee4902db0c4cde5323a6c",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "Aki78/FlightAI",
"max_forks_repo_path": "python/pySimE/chemistry/tests/test_reaction.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "9c5480f2392c9c89b9fee4902db0c4cde5323a6c",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "Aki78/FlightAI",
"max_issues_repo_path": "python/pySimE/chemistry/tests/test_reaction.py",
"max_line_length": 67,
"max_stars_count": 26,
"max_stars_repo_head_hexsha": "240f9b7e85b3a6eda7a27dc15fe3f7b8c08774c5",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "ProkopHapala/SimpleSimulationEngine",
"max_stars_repo_path": "python/pySimE/chemistry/tests/test_reaction.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-24T09:39:28.000Z",
"max_stars_repo_stars_event_min_datetime": "2016-12-04T04:45:12.000Z",
"num_tokens": 167,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 450
} |
import numpy as np
from relevanceai.operations.dr.base import DimReductionBase
from typing import Optional, Dict, Any
from relevanceai.operations.cluster.constants import (
DIM_REDUCTION,
DIM_REDUCTION_DEFAULT_ARGS,
)
class PCA(DimReductionBase):
def fit(self, vectors: np.ndarray, dims: int = 3, *args, **kw):
from sklearn.decomposition import PCA as SKLEARN_PCA
pca = SKLEARN_PCA(n_components=min(dims, vectors.shape[1]))
return pca.fit(vectors)
def fit_transform(
self,
vectors: np.ndarray,
dr_args: Optional[Dict[Any, Any]] = DIM_REDUCTION_DEFAULT_ARGS["pca"],
dims: int = 3,
) -> np.ndarray:
from sklearn.decomposition import PCA as SKLEARN_PCA
self.logger.debug(f"{dr_args}")
vector_length = len(vectors[0])
pca = SKLEARN_PCA(n_components=min(dims, vector_length), **dr_args)
return pca.fit_transform(vectors)
| {
"alphanum_fraction": 0.6851654216,
"author": null,
"avg_line_length": 32.3103448276,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "0fde7e69c029dba29d54454c833eb13d185fd95c",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 4,
"max_forks_repo_forks_event_max_datetime": "2022-02-11T03:19:32.000Z",
"max_forks_repo_forks_event_min_datetime": "2022-01-04T01:48:30.000Z",
"max_forks_repo_head_hexsha": "a0542f35153d9c842f3d2cd0955d6b07f6dfc07b",
"max_forks_repo_licenses": [
"Apache-2.0"
],
"max_forks_repo_name": "RelevanceAI/RelevanceAI",
"max_forks_repo_path": "relevanceai/operations/dr/models/pca.py",
"max_issues_count": 217,
"max_issues_repo_head_hexsha": "a0542f35153d9c842f3d2cd0955d6b07f6dfc07b",
"max_issues_repo_issues_event_max_datetime": "2022-03-30T08:11:49.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-11-23T00:11:01.000Z",
"max_issues_repo_licenses": [
"Apache-2.0"
],
"max_issues_repo_name": "RelevanceAI/RelevanceAI",
"max_issues_repo_path": "relevanceai/operations/dr/models/pca.py",
"max_line_length": 78,
"max_stars_count": 21,
"max_stars_repo_head_hexsha": "a0542f35153d9c842f3d2cd0955d6b07f6dfc07b",
"max_stars_repo_licenses": [
"Apache-2.0"
],
"max_stars_repo_name": "RelevanceAI/RelevanceAI",
"max_stars_repo_path": "relevanceai/operations/dr/models/pca.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-23T03:45:30.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-11-23T13:01:36.000Z",
"num_tokens": 229,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 937
} |
import numpy as np
from conv_net import conv_net
from data_utils import *
import time
class Solver(object):
"""
Solver class for train and test
methods:
- train: train conv_net
- test: test trained net
"""
def __init__(self):
# config
self.lr = 1e-4
self.weight_decay=0.0004
self.batch_size = 50
self.model = conv_net([self.batch_size, 28, 28, 1]) #MNIST input
self.max_epoch = 1
self.param_path = 'param.npy'
def train(self):
images, labels = load_mnist('./dataset', 'train')
print(images.shape, labels.shape)
# start trainig
for epoch in range(self.max_epoch):
# clear loss
self.train_acc = 0
self.train_loss = 0
batch_num = int(images.shape[0] / self.batch_size)
for i in range(batch_num):
# load batch
imgs = images[i * self.batch_size:(i + 1) * self.batch_size].reshape([self.batch_size, 28, 28, 1])
lbls = labels[i * self.batch_size:(i + 1) * self.batch_size]
# compute one batch
self.model.forward(imgs,lbls)
self.model.backprop()
self.model.update(self.lr, self.weight_decay)
# compute loss
self.train_acc += self.model.batch_acc
self.train_loss += self.model.batch_loss
if i % 10 == 0:
# compute average
avg_batch_acc = float(self.model.batch_acc / self.batch_size)
avg_batch_loss = self.model.batch_loss / self.batch_size
print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) +
" epoch: %d, batch: %d, batch_acc: %.4f, avg_batch_loss: %.4f " % (
epoch, i, avg_batch_acc, avg_batch_loss))
# compute average
avg_train_acc = float(self.train_acc / images.shape[0])
avg_train_loss = self.train_loss /images.shape[0]
print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) +
" epoch: %5d , train_acc: %.4f avg_train_loss: %.4f" % (
epoch, avg_train_acc, avg_train_loss))
# save weights
conv1_param = {'weights': self.model.conv1.weights,
'bias': self.model.conv1.bias,}
conv2_param = {'weights': self.model.conv2.weights,
'bias': self.model.conv2.bias,}
fc_param = {'weights': self.model.fc.weights,
'bias': self.model.fc.bias,}
param_dict = {'conv1': conv1_param,
'conv2': conv2_param,
'fc': fc_param,}
np.save("param.npy",param_dict)
def test(self, path):
images, labels = load_mnist('./dataset', 't10k')
print(images.shape, labels.shape)
# clear loss
self.test_acc = 0
self.test_loss = 0
# load_param
param_dict = np.load(path, encoding='bytes').item()
conv1_param = param_dict['conv1']
conv2_param = param_dict['conv2']
fc_param = param_dict['fc']
# fill weights
self.model.conv1.weights = conv1_param['weights']
self.model.conv1.bias = conv1_param['bias']
self.model.conv2.weights = conv2_param['weights']
self.model.conv2.bias = conv2_param['bias']
self.model.fc.weights = fc_param['weights']
self.model.fc.bias = fc_param['bias']
batch_num = int(images.shape[0] / self.batch_size)
for i in range(batch_num):
print('testing batch number %d' %(i))
# load batch
imgs = images[i * self.batch_size:(i + 1) * self.batch_size].reshape([self.batch_size, 28, 28, 1])
lbls = labels[i * self.batch_size:(i + 1) * self.batch_size]
# forward pass only
self.model.forward(imgs,lbls)
# compute loss
self.test_acc += self.model.batch_acc
self.test_loss += self.model.batch_loss
# compute average
avg_test_acc = float(self.test_acc / images.shape[0])
avg_test_loss = self.test_loss /images.shape[0]
print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) +
" train_acc: %.4f avg_train_loss: %.4f" % (avg_test_acc, avg_test_loss))
if __name__ == "__main__":
solver = Solver()
#solver.train() # uncomment to train
solver.test('param.npy') | {
"alphanum_fraction": 0.6049891015,
"author": null,
"avg_line_length": 34.4083333333,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "40053747bddc7f447167b2847965b6a074e4f316",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "f8dc1d577520d70da2f159b6ad6b36b5ad0cc5e1",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "izzrak/numpy_cnn",
"max_forks_repo_path": "solver.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "f8dc1d577520d70da2f159b6ad6b36b5ad0cc5e1",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "izzrak/numpy_cnn",
"max_issues_repo_path": "solver.py",
"max_line_length": 106,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "f8dc1d577520d70da2f159b6ad6b36b5ad0cc5e1",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "izzrak/numpy_cnn",
"max_stars_repo_path": "solver.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1092,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 4129
} |
# -*- coding: utf-8 -*-
"""
Created on Wed May 26 18:27:37 2021
@author: Laura Grandas
"""
import os
import PyPDF2
import slate3k as slate
import pandas as pd
import numpy as np
import string
# para las stopwords
import nltk
from nltk.corpus import stopwords
spanish_stopwords = stopwords.words('spanish')
from nltk.stem import SnowballStemmer
stemmer = SnowballStemmer('spanish')
#MATRIX TÉRMINO-DOCUMENTO
from sklearn.feature_extraction.text import CountVectorizer # Vectorizador de palabras y DTM
spanish_stopwords = stopwords.words('spanish')
from sklearn.decomposition import LatentDirichletAllocation # Modelo de LDA
# LDA esta vaina no está corriendo en python 3.9
import pyLDAvis
from pyLDAvis import sklearn as sklearnlda #EXPORTAR EL MODELO (VISUALIZACIÓN)
#%%
path = r'C:\Users\laura\Dropbox\UNIANDES\tesis_derecho'
path_txt = r'C:\Users\laura\Dropbox\UNIANDES\tesis_derecho\textos\10_Tutelas 2019-2 a 2021-1 - copy'
os.chdir(path_txt)
files_name = os.listdir(path_txt)
#%% SCRAPE DE TODOS LOS ARCHIVOS CON PyPDF2
# para que me guarde cada tutela como un string muy largo voy a necesitar esto
# Es una función para que las listas de listas se vuelvan una sola lista de strings
def aplanar(elemento):
string=[]
for i in elemento:
if type(i)==str:
string.append(i)
else:
#Para este punto i no es un string
for elementoDeI in i :#Aca va a separar en conjuntos
for elementito in aplanar(elementoDeI):
string.append(elementito)
return string
# para leer todos los archivos en la carpeta
files_name = os.listdir(path_txt)
outputs = []
file_texto = []
for j in files_name:
try:
if j != '16_AT_2019_672dup.pdf':
print(j)
pdfReader = PyPDF2.PdfFileReader(j)
count = pdfReader.numPages
output = []
# para leer todas las paginas de cada archivo
for i in range(count):
page = pdfReader.getPage(i)
output.append(page.extractText())
output = aplanar(output)
# lista_enmodo_string = ' '.join(map(str, output))
# output = lista_enmodo_string
outputs.append(output)
juntos = [str(j), output]
file_texto.append(juntos)
except:
pass
# outputs.append('nada')
# juntos = (str(j),'nada')
# file_texto.append(juntos)
lista_enmodo_string = ' '.join(map(str, output))
#%% limpiando y tokenizando outputs2
ejemplo = outputs[12]
sobran = [r"\n", '.', ',',':', '-', '(', ')', '[', ']', '"', 'cid9', '—', ';',
'•', '*', "'", r"\xc", 'cid', '\x0c', 'negrete', ' canscanner ',
' cc ', ' fecha ', ' señor', ' radicado ', 'derecho', ' fundamental ',
'fundamentales', 'solicitud', ' caso ',
' ley ', ' auto ', ' ón', 'abril', 'mayo', 'agencia', 'nacional', ' corte ',
'tutela', 'sentencia', 'scanned', '$','xc', 'iván', 'miguel', ' cc ',
' ia ', ' to ', ' id ', ' ad ', ' an ', ' ci ', ' ro ', ' ca ', 'ani',
'tribunal', 'https', ' io ', ' io ', 'constitucional', ' ae ', ' ii '
' cia ', ' ce ', ' ea ', ' ie ', 'camscanner', ' by ', 'deech', ' aa '
' mp ', ' ser ', ' do '
]
# ahora lo que veo que sobra en los lda
numeros = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
sobran.append(numeros)
sobran = aplanar(sobran)
def caracteres(objeto):
for i in range(len(sobran)):
objeto = objeto.replace(sobran[i], '')
return objeto
outputs2_clean = []
for j in range(len(outputs)):
minusculas = outputs[j].lower()
limpios = caracteres(minusculas)
outputs2_clean.append(limpios)
print('LIMPIO', outputs2_clean[22][2000:4000])
#%% tokenizing
# ejemplo = outputs2[12]
# spanish_stopwords.append('xc')
ejemplo = outputs2_clean
n_vocab=5000 # máximo tamaño de vocabulario
tf_vectorizer = CountVectorizer( max_features=n_vocab, stop_words=spanish_stopwords, ngram_range=(1,4))
# ejemplo = [ejemplo]
tf = tf_vectorizer.fit_transform(ejemplo)
print(tf)
path_html = r'C:\Users\laura\Dropbox\UNIANDES\tesis_derecho\lda_limpio'
os.chdir(path_html)
#%% LDA
for i in range(4,31):
lda = LatentDirichletAllocation(n_components=i, max_iter=11,doc_topic_prior=0.1, topic_word_prior=0.1, n_jobs=-1,random_state=353, verbose=1) #CONSTRUYO EL MODELO
lda.fit(tf) # Esti
LDAvis_prepared=sklearnlda.prepare(lda, tf, tf_vectorizer ) # Preparo el modelo y sus resultados para la visualización
pyLDAvis.save_html(LDAvis_prepared, f'LDA_{i}.html')
#%% PREPARANDO DATAFRAMES LIMPIOS PARA LLEGAR A LAS MATRICES
df = pd.DataFrame(file_texto, columns= ['name_file', 'texto'])
lista_enmodo_string = ' '.join(map(str, output))
df["strings"] = ' '.join(map(str, df.texto))
#%% función de calidad por caracteres
def calidad(doc):
aciertos=0
contador=0
for j in doc:
for i in j:
contador+=1
if (i in string.ascii_letters):
aciertos+=1
return aciertos/contador
#%%
path_destino = r'C:\Users\laura\Dropbox\UNIANDES\tesis_derecho\textos\destino'
os.chdir(path_destino)
df.to_csv('df1.csv')
| {
"alphanum_fraction": 0.5896656535,
"author": null,
"avg_line_length": 27.5517241379,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "82d83ac72f386d919295cb6bec063954e00e32ab",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "68ccb620897ff82832793e5de6b2f404b3a46676",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "LauraGrandas/text-analysis-Tutelas-Colombia-",
"max_forks_repo_path": "textos2.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "68ccb620897ff82832793e5de6b2f404b3a46676",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "LauraGrandas/text-analysis-Tutelas-Colombia-",
"max_issues_repo_path": "textos2.py",
"max_line_length": 167,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "68ccb620897ff82832793e5de6b2f404b3a46676",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "LauraGrandas/text-analysis-Tutelas-Colombia-",
"max_stars_repo_path": "textos2.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1538,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 5593
} |
/-
Copyright (c) 2020 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Leonardo de Moura, Sebastian Ullrich
-/
import Lean.Util.CollectLevelParams
import Lean.Elab.DeclUtil
import Lean.Elab.DefView
import Lean.Elab.Inductive
import Lean.Elab.Structure
import Lean.Elab.MutualDef
import Lean.Elab.DeclarationRange
namespace Lean.Elab.Command
open Meta
/- Auxiliary function for `expandDeclNamespace?` -/
def expandDeclIdNamespace? (declId : Syntax) : Option (Name × Syntax) :=
let (id, optUnivDeclStx) := expandDeclIdCore declId
let scpView := extractMacroScopes id
match scpView.name with
| Name.str Name.anonymous s _ => none
| Name.str pre s _ =>
let nameNew := { scpView with name := Name.mkSimple s }.review
if declId.isIdent then
some (pre, mkIdentFrom declId nameNew)
else
some (pre, declId.setArg 0 (mkIdentFrom declId nameNew))
| _ => none
/- given declarations such as `@[...] def Foo.Bla.f ...` return `some (Foo.Bla, @[...] def f ...)` -/
def expandDeclNamespace? (stx : Syntax) : Option (Name × Syntax) :=
if !stx.isOfKind `Lean.Parser.Command.declaration then none
else
let decl := stx[1]
let k := decl.getKind
if k == `Lean.Parser.Command.abbrev ||
k == `Lean.Parser.Command.def ||
k == `Lean.Parser.Command.theorem ||
k == `Lean.Parser.Command.constant ||
k == `Lean.Parser.Command.axiom ||
k == `Lean.Parser.Command.inductive ||
k == `Lean.Parser.Command.classInductive ||
k == `Lean.Parser.Command.structure then
match expandDeclIdNamespace? decl[1] with
| some (ns, declId) => some (ns, stx.setArg 1 (decl.setArg 1 declId))
| none => none
else if k == `Lean.Parser.Command.instance then
let optDeclId := decl[3]
if optDeclId.isNone then none
else match expandDeclIdNamespace? optDeclId[0] with
| some (ns, declId) => some (ns, stx.setArg 1 (decl.setArg 3 (optDeclId.setArg 0 declId)))
| none => none
else
none
def elabAxiom (modifiers : Modifiers) (stx : Syntax) : CommandElabM Unit := do
-- leading_parser "axiom " >> declId >> declSig
let declId := stx[1]
let (binders, typeStx) := expandDeclSig stx[2]
let scopeLevelNames ← getLevelNames
let ⟨name, declName, allUserLevelNames⟩ ← expandDeclId declId modifiers
addDeclarationRanges declName stx
runTermElabM declName fun vars => Term.withLevelNames allUserLevelNames $ Term.elabBinders binders.getArgs fun xs => do
Term.applyAttributesAt declName modifiers.attrs AttributeApplicationTime.beforeElaboration
let type ← Term.elabType typeStx
Term.synthesizeSyntheticMVarsNoPostponing
let type ← instantiateMVars type
let type ← mkForallFVars xs type
let type ← mkForallFVars vars type (usedOnly := true)
let (type, _) ← Term.levelMVarToParam type
let usedParams := collectLevelParams {} type |>.params
match sortDeclLevelParams scopeLevelNames allUserLevelNames usedParams with
| Except.error msg => throwErrorAt stx msg
| Except.ok levelParams =>
let decl := Declaration.axiomDecl {
name := declName,
levelParams := levelParams,
type := type,
isUnsafe := modifiers.isUnsafe
}
Term.ensureNoUnassignedMVars decl
addDecl decl
Term.applyAttributesAt declName modifiers.attrs AttributeApplicationTime.afterTypeChecking
if isExtern (← getEnv) declName then
compileDecl decl
Term.applyAttributesAt declName modifiers.attrs AttributeApplicationTime.afterCompilation
/-
leading_parser "inductive " >> declId >> optDeclSig >> optional ":=" >> many ctor
leading_parser atomic (group ("class " >> "inductive ")) >> declId >> optDeclSig >> optional ":=" >> many ctor >> optDeriving
-/
private def inductiveSyntaxToView (modifiers : Modifiers) (decl : Syntax) : CommandElabM InductiveView := do
checkValidInductiveModifier modifiers
let (binders, type?) := expandOptDeclSig decl[2]
let declId := decl[1]
let ⟨name, declName, levelNames⟩ ← expandDeclId declId modifiers
addDeclarationRanges declName decl
let ctors ← decl[4].getArgs.mapM fun ctor => withRef ctor do
-- def ctor := leading_parser " | " >> declModifiers >> ident >> optional inferMod >> optDeclSig
let ctorModifiers ← elabModifiers ctor[1]
if ctorModifiers.isPrivate && modifiers.isPrivate then
throwError "invalid 'private' constructor in a 'private' inductive datatype"
if ctorModifiers.isProtected && modifiers.isPrivate then
throwError "invalid 'protected' constructor in a 'private' inductive datatype"
checkValidCtorModifier ctorModifiers
let ctorName := ctor.getIdAt 2
let ctorName := declName ++ ctorName
let ctorName ← withRef ctor[2] $ applyVisibility ctorModifiers.visibility ctorName
let inferMod := !ctor[3].isNone
let (binders, type?) := expandOptDeclSig ctor[4]
addDocString' ctorName ctorModifiers.docString?
addAuxDeclarationRanges ctorName ctor ctor[2]
pure { ref := ctor, modifiers := ctorModifiers, declName := ctorName, inferMod := inferMod, binders := binders, type? := type? : CtorView }
let classes ← getOptDerivingClasses decl[5]
pure {
ref := decl
modifiers := modifiers
shortDeclName := name
declName := declName
levelNames := levelNames
binders := binders
type? := type?
ctors := ctors
derivingClasses := classes
}
private def classInductiveSyntaxToView (modifiers : Modifiers) (decl : Syntax) : CommandElabM InductiveView :=
inductiveSyntaxToView modifiers decl
def elabInductive (modifiers : Modifiers) (stx : Syntax) : CommandElabM Unit := do
let v ← inductiveSyntaxToView modifiers stx
elabInductiveViews #[v]
def elabClassInductive (modifiers : Modifiers) (stx : Syntax) : CommandElabM Unit := do
let modifiers := modifiers.addAttribute { name := `class }
let v ← classInductiveSyntaxToView modifiers stx
elabInductiveViews #[v]
@[builtinCommandElab declaration]
def elabDeclaration : CommandElab := fun stx =>
match expandDeclNamespace? stx with
| some (ns, newStx) => do
let ns := mkIdentFrom stx ns
let newStx ← `(namespace $ns:ident $newStx end $ns:ident)
withMacroExpansion stx newStx $ elabCommand newStx
| none => do
let modifiers ← elabModifiers stx[0]
let decl := stx[1]
let declKind := decl.getKind
if declKind == `Lean.Parser.Command.«axiom» then
elabAxiom modifiers decl
else if declKind == `Lean.Parser.Command.«inductive» then
elabInductive modifiers decl
else if declKind == `Lean.Parser.Command.classInductive then
elabClassInductive modifiers decl
else if declKind == `Lean.Parser.Command.«structure» then
elabStructure modifiers decl
else if isDefLike decl then
elabMutualDef #[stx]
else
throwError "unexpected declaration"
/- Return true if all elements of the mutual-block are inductive declarations. -/
private def isMutualInductive (stx : Syntax) : Bool :=
stx[1].getArgs.all fun elem =>
let decl := elem[1]
let declKind := decl.getKind
declKind == `Lean.Parser.Command.inductive
private def elabMutualInductive (elems : Array Syntax) : CommandElabM Unit := do
let views ← elems.mapM fun stx => do
let modifiers ← elabModifiers stx[0]
inductiveSyntaxToView modifiers stx[1]
elabInductiveViews views
/- Return true if all elements of the mutual-block are definitions/theorems/abbrevs. -/
private def isMutualDef (stx : Syntax) : Bool :=
stx[1].getArgs.all fun elem =>
let decl := elem[1]
isDefLike decl
private def isMutualPreambleCommand (stx : Syntax) : Bool :=
let k := stx.getKind
k == `Lean.Parser.Command.variable ||
k == `Lean.Parser.Command.variables ||
k == `Lean.Parser.Command.universe ||
k == `Lean.Parser.Command.universes ||
k == `Lean.Parser.Command.check ||
k == `Lean.Parser.Command.set_option ||
k == `Lean.Parser.Command.open
private partial def splitMutualPreamble (elems : Array Syntax) : Option (Array Syntax × Array Syntax) :=
let rec loop (i : Nat) : Option (Array Syntax × Array Syntax) :=
if h : i < elems.size then
let elem := elems.get ⟨i, h⟩
if isMutualPreambleCommand elem then
loop (i+1)
else if i == 0 then
none -- `mutual` block does not contain any preamble commands
else
some (elems[0:i], elems[i:elems.size])
else
none -- a `mutual` block containing only preamble commands is not a valid `mutual` block
loop 0
@[builtinMacro Lean.Parser.Command.mutual]
def expandMutualNamespace : Macro := fun stx => do
let mut ns? := none
let mut elemsNew := #[]
for elem in stx[1].getArgs do
match ns?, expandDeclNamespace? elem with
| _, none => elemsNew := elemsNew.push elem
| none, some (ns, elem) => ns? := some ns; elemsNew := elemsNew.push elem
| some nsCurr, some (nsNew, elem) =>
if nsCurr == nsNew then
elemsNew := elemsNew.push elem
else
Macro.throwErrorAt elem s!"conflicting namespaces in mutual declaration, using namespace '{nsNew}', but used '{nsCurr}' in previous declaration"
match ns? with
| some ns =>
let ns := mkIdentFrom stx ns
let stxNew := stx.setArg 1 (mkNullNode elemsNew)
`(namespace $ns:ident $stxNew end $ns:ident)
| none => Macro.throwUnsupported
@[builtinMacro Lean.Parser.Command.mutual]
def expandMutualElement : Macro := fun stx => do
let mut elemsNew := #[]
let mut modified := false
for elem in stx[1].getArgs do
match (← expandMacro? elem) with
| some elemNew => elemsNew := elemsNew.push elemNew; modified := true
| none => elemsNew := elemsNew.push elem
if modified then
pure $ stx.setArg 1 (mkNullNode elemsNew)
else
Macro.throwUnsupported
@[builtinMacro Lean.Parser.Command.mutual]
def expandMutualPreamble : Macro := fun stx =>
match splitMutualPreamble stx[1].getArgs with
| none => Macro.throwUnsupported
| some (preamble, rest) => do
let secCmd ← `(section)
let newMutual := stx.setArg 1 (mkNullNode rest)
let endCmd ← `(end)
pure $ mkNullNode (#[secCmd] ++ preamble ++ #[newMutual] ++ #[endCmd])
@[builtinCommandElab «mutual»]
def elabMutual : CommandElab := fun stx => do
if isMutualInductive stx then
elabMutualInductive stx[1].getArgs
else if isMutualDef stx then
elabMutualDef stx[1].getArgs
else
throwError "invalid mutual block"
/- leading_parser "attribute " >> "[" >> sepBy1 (eraseAttr <|> Term.attrInstance) ", " >> "]" >> many1 ident -/
@[builtinCommandElab «attribute»] def elabAttr : CommandElab := fun stx => do
let mut attrInsts := #[]
let mut toErase := #[]
for attrKindStx in stx[2].getSepArgs do
if attrKindStx.getKind == ``Lean.Parser.Command.eraseAttr then
let attrName := attrKindStx[1].getId.eraseMacroScopes
unless isAttribute (← getEnv) attrName do
throwError "unknown attribute [{attrName}]"
toErase := toErase.push attrName
else
attrInsts := attrInsts.push attrKindStx
let attrs ← elabAttrs attrInsts
let idents := stx[4].getArgs
for ident in idents do withRef ident <| liftTermElabM none do
let declName ← resolveGlobalConstNoOverloadWithInfo ident
Term.applyAttributes declName attrs
for attrName in toErase do
Attribute.erase declName attrName
def expandInitCmd (builtin : Bool) : Macro := fun stx =>
let optHeader := stx[1]
let doSeq := stx[2]
let attrId := mkIdentFrom stx $ if builtin then `builtinInit else `init
if optHeader.isNone then
`(@[$attrId:ident]def initFn : IO Unit := do $doSeq)
else
let id := optHeader[0]
let type := optHeader[1][1]
`(def initFn : IO $type := do $doSeq
@[$attrId:ident initFn]constant $id : $type)
@[builtinMacro Lean.Parser.Command.«initialize»] def expandInitialize : Macro :=
expandInitCmd (builtin := false)
@[builtinMacro Lean.Parser.Command.«builtin_initialize»] def expandBuiltinInitialize : Macro :=
expandInitCmd (builtin := true)
end Lean.Elab.Command
| {
"alphanum_fraction": null,
"author": "gebner",
"avg_line_length": null,
"converted": null,
"ext": null,
"file": null,
"hexsha": null,
"include": null,
"lang": null,
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": "github-repos/lean/gebner-lean4-old/lean4-old-ee51cdfaf63ee313c914d83264f91f414a0e3b6e/stage0/src/Lean/Elab/Declaration.lean",
"reason": null,
"repo": "lean4-old",
"save_path": "github-repos/lean/gebner-lean4-old",
"sha": "ee51cdfaf63ee313c914d83264f91f414a0e3b6e",
"size": null
} |
import argparse
import yaml
import os
import shutil
from pathlib import Path
import torch
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rcParams["lines.linewidth"] = 0.8
from pysnn.network import SNNNetwork
from evolutionary.utils.constructors import build_network, build_environment
from evolutionary.utils.utils import randomize_env
def plot_transient_irl(folder, plot_only):
folder = Path(folder)
# Go over all IRL runs
runs = sorted(Path(folder).rglob("run?.csv"))
# Lists for the blue background dots (unsorted/unprocessed)
all_x = []
all_y = []
# Figure for plotting
fig, ax = plt.subplots(1, 1)
for run in runs:
# Extract index
i = run.stem[-1]
# Check for int
try:
int(i)
except ValueError:
print("Run indices are not clear!")
# Load run
run = pd.read_csv(run, sep=",")
# Sort by increasing divergence
sort_idx = np.argsort(run["div"])
x = run["div"].values[sort_idx]
y = run["thrust"].values[sort_idx]
# Take moving average of sorted for the red lines
ma_y = pd.Series(y).rolling(window=40, min_periods=1).mean().values
# Plot line
ax.plot(x, ma_y, "r", alpha=0.5)
# Add to lists of blue dots
all_x.extend(x.tolist())
all_y.extend(y.tolist())
# Write to dataframe
if not plot_only:
output = pd.DataFrame({"x": x, "y": ma_y})
output.to_csv(folder / f"run{i}_transient.csv", index=False, sep=",")
# Plot blue dots
# Do these go in the background by default?
ax.scatter(all_x, all_y, c="b", alpha=0.5)
ax.grid()
ax.set_xlim([-10, 10])
ax.set_ylim([-0.9, 0.9])
fig.tight_layout()
plt.show()
# Write to dataframe as well
if not plot_only:
scatter = pd.DataFrame({"x": all_x, "y": all_y})
scatter.to_csv(folder / f"raw_points_transient.csv", index=False, sep=",")
if __name__ == "__main__":
# Parse input arguments
parser = argparse.ArgumentParser()
parser.add_argument("--folder", type=str, required=True)
parser.add_argument("--plot_only", action="store_true")
args = vars(parser.parse_args())
# Call
plot_transient_irl(args["folder"], args["plot_only"])
| {
"alphanum_fraction": 0.6261642676,
"author": null,
"avg_line_length": 26.5393258427,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "f0d41fb6601e775f26d43b986d70611e59d64a2e",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 1,
"max_forks_repo_forks_event_max_datetime": "2020-07-28T12:02:59.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-07-28T12:02:59.000Z",
"max_forks_repo_head_hexsha": "690ba3f744e2b9b83c9d0945b6e05f76be93788f",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "fedepare/evolutionary",
"max_forks_repo_path": "extra/plot_transient_irl.py",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "690ba3f744e2b9b83c9d0945b6e05f76be93788f",
"max_issues_repo_issues_event_max_datetime": "2020-09-24T17:28:18.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-07-28T11:16:57.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "fedepare/evolutionary",
"max_issues_repo_path": "extra/plot_transient_irl.py",
"max_line_length": 82,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "690ba3f744e2b9b83c9d0945b6e05f76be93788f",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "fedepare/evolutionary",
"max_stars_repo_path": "extra/plot_transient_irl.py",
"max_stars_repo_stars_event_max_datetime": "2020-07-08T11:32:24.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-07-08T11:32:24.000Z",
"num_tokens": 595,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2362
} |
import numpy as np
import torch
import torch.nn as nn
import torch.functional as F
from sklearn.metrics import accuracy_score
class DMILoss:
def __call__(self, *args, **kwargs):
return self.forward(*args, **kwargs)
def forward(self, output, target):
outputs = torch.softmax(output, dim=-1)
targets = target.reshape(-1, 1).type(torch.int64)
y_onehot = torch.zeros(target.size(0), 2)
y_onehot.scatter_(1, targets, 1)
y_onehot = y_onehot.transpose(0, 1)
mat = y_onehot @ outputs
mat = mat / target.size(0)
det = torch.det(mat.float())
if det < 0:
return torch.log(torch.abs(det) + 0.0001)
else:
return -torch.log(torch.abs(det) + 0.0001)
class SigmoidLoss:
def __call__(self, *args, **kwargs):
return self.forward(*args, **kwargs)
def forward(self, y_pred, y):
return torch.mean(1. / (1. + torch.exp(y_pred * y)))
class MLP(nn.Module):
def __init__(self, feature_dim, hidsizes, outputs=1, dropout=0., activation='relu'):
super(MLP, self).__init__()
if activation == 'relu':
self.ac_fn = torch.nn.ReLU
elif activation == 'tanh':
self.ac_fn = torch.nn.Tanh
elif activation == 'sigmoid':
self.ac_fn = torch.nn.Sigmoid
elif activation == 'leaky':
self.ac_fn = torch.nn.LeakyReLU
elif activation == 'elu':
self.ac_fn = torch.nn.ELU
elif activation == 'relu6':
self.ac_fn = torch.nn.ReLU6
self.mlp = []
hidsizes = [feature_dim] + hidsizes
for i in range(1, len(hidsizes)):
self.mlp.append(nn.Linear(hidsizes[i-1], hidsizes[i]))
self.mlp.append(nn.Dropout(dropout))
self.mlp.append(self.ac_fn())
self.mlp = nn.Sequential(*self.mlp, nn.Linear(hidsizes[-1], outputs))
def forward(self, x):
if type(x) != torch.Tensor:
x = torch.tensor(x, dtype=torch.float)
if x.dim() < 2:
x = x.unsqueeze(0)
# if torch.cuda.is_available():
# x = x.cuda()
return self.mlp(x).squeeze(-1)
@staticmethod
def layer_init(layer, w_scale=1.0):
nn.init.orthogonal_(layer.weight.data)
layer.weight.data.mul_(w_scale)
nn.init.constant_(layer.bias.data, 0)
return layer
class BinaryClassifier(object):
def __init__(self, model, learning_rate, loss_func='bce'):
self.model = model
# if torch.cuda.is_available():
# self.model.cuda()
if loss_func == 'bce':
self.transform_y = False
self.ac_fn = None
self.loss_func = torch.nn.BCEWithLogitsLoss
elif loss_func == 'mse':
self.transform_y = True
self.ac_fn = torch.tanh
self.loss_func = torch.nn.MSELoss
elif loss_func == 'l1':
self.transform_y = True
self.ac_fn = torch.tanh
self.loss_func = torch.nn.L1Loss
elif loss_func == 'huber':
self.transform_y = True
self.ac_fn = torch.tanh
self.loss_func = torch.nn.SmoothL1Loss
elif loss_func == 'logistic':
self.transform_y = True
self.ac_fn = torch.tanh
self.loss_func = torch.nn.SoftMarginLoss
elif loss_func == 'sigmoid':
self.transform_y = True
self.ac_fn = torch.tanh
self.loss_func = SigmoidLoss
elif loss_func == 'dmi':
self.transform_y = False
self.ac_fn = torch.sigmoid
self.loss_func = DMILoss
else:
raise(NotImplementedError, loss_func)
self.loss = self.loss_func()
self.optimizer = torch.optim.Adam(self.model.parameters(), learning_rate)
def predict(self, X):
with torch.no_grad():
if not self.ac_fn:
y_pred = torch.sigmoid(self.model(X)).cpu().numpy() # bce with logits
else:
y_pred = self.ac_fn(self.model(X)).cpu().numpy()
if self.transform_y:
y_pred[y_pred < 0] = -1
y_pred[y_pred >= 0] = 1
else:
y_pred = y_pred.round()
return y_pred
def train(self, X, y):
self.model.train()
y_pred = self.model(X)
if self.ac_fn:
y_pred = self.ac_fn(y_pred)
y = torch.tensor(y, dtype=torch.float)
loss = self.loss(y_pred, y)
self.optimizer.zero_grad()
loss.backward()
# torch.nn.utils.clip_grad_norm_(self.model.parameters(), 0.5)
self.optimizer.step()
return loss.item()
def val(self, X, y):
self.model.eval()
y_pred = self.predict(torch.tensor(X, dtype=torch.float))
acc = accuracy_score(y, y_pred)
return acc
def fit(self, X_train, y_train, X_val=None, y_val=None, episodes=100, batchsize=None,
val_interval=20, log_interval=100, logger=None):
if self.transform_y:
y_train[y_train == 0] = -1
if y_val is not None:
y_val[y_val == 0] = -1
train_acc, val_acc, losses = [], [], []
batchsize = batchsize if batchsize and batchsize < len(X_train) else len(X_train)
m = X_train.shape[0]
for ep in range(episodes):
mb_idxes = np.random.choice(m, batchsize, replace=False)
mb_X_train, mb_y_train = X_train[mb_idxes], y_train[mb_idxes]
loss = self.train(mb_X_train, mb_y_train)
losses.append(loss)
if ep % val_interval == 0 and X_val is not None and y_val is not None:
train_acc.append(self.val(X_train, y_train))
val_acc.append(self.val(X_val, y_val))
if logger is not None and ep % log_interval == 0:
logger.record_tabular('ep', ep)
logger.record_tabular('loss', np.mean(losses[-log_interval:]))
logger.record_tabular('train_acc', np.mean(train_acc[-log_interval//val_interval:]))
if X_val is not None and y_val is not None:
logger.record_tabular('val_acc', np.mean(val_acc[-log_interval//val_interval:]))
logger.dump_tabular()
return {'loss': losses, 'train_acc': train_acc, 'val_acc': val_acc}
class DMIClassifier(object):
def __init__(self, model, learning_rate):
self.model = model
# if torch.cuda.is_available():
# self.model.cuda()
self.loss = DMILoss()
self.optimizer = torch.optim.Adam(self.model.parameters(), learning_rate)
def predict(self, X):
with torch.no_grad():
y_pred = self.model(X).cpu().numpy()
y_pred = y_pred.argmax(-1)
return y_pred
def train(self, X, y):
self.model.train()
y_pred = self.model(X)
y = torch.tensor(y, dtype=torch.float)
loss = self.loss(y_pred, y)
self.optimizer.zero_grad()
loss.backward()
# torch.nn.utils.clip_grad_norm_(self.model.parameters(), 0.5)
self.optimizer.step()
return loss.item()
def val(self, X, y):
self.model.eval()
y_pred = self.predict(torch.tensor(X, dtype=torch.float))
acc = accuracy_score(y, y_pred)
return acc
def fit(self, X_train, y_train, X_val=None, y_val=None, episodes=100, batchsize=None,
val_interval=20, log_interval=100, logger=None):
train_acc, val_acc, losses = [], [], []
batchsize = batchsize if batchsize and batchsize < len(X_train) else len(X_train)
m = X_train.shape[0]
for ep in range(episodes):
mb_idxes = np.random.choice(m, batchsize, replace=False)
mb_X_train, mb_y_train = X_train[mb_idxes], y_train[mb_idxes]
loss = self.train(mb_X_train, mb_y_train)
losses.append(loss)
if ep % val_interval == 0 and X_val is not None and y_val is not None:
train_acc.append(self.val(X_train, y_train))
val_acc.append(self.val(X_val, y_val))
if logger is not None and ep % log_interval == 0:
logger.record_tabular('ep', ep)
logger.record_tabular('loss', np.mean(losses[-log_interval:]))
logger.record_tabular('train_acc', np.mean(train_acc[-log_interval//val_interval:]))
if X_val is not None and y_val is not None:
logger.record_tabular('val_acc', np.mean(val_acc[-log_interval//val_interval:]))
logger.dump_tabular()
return {'loss': losses, 'train_acc': train_acc, 'val_acc': val_acc}
class SurrogateBinaryClassifier(BinaryClassifier):
def __init__(self, model, learning_rate, loss_func, e0, e1):
super(SurrogateBinaryClassifier, self).__init__(model, learning_rate, loss_func)
self.e = np.array([e0, e1], dtype=float)
self.loss = self.loss_func(reduction='none')
def train(self, X, y):
""" The original surrogate function is:
(1 - \rho_{-y}) * l(t,y) - \rho_{y} * l(t,-y)
loss = ---------------------------------------------
1 - \rho_{+1} - \rho_{-1}
where y \in {-1, +1},
But because we use {0, 1} as the label, so the loss becomes:
(1 - e_{1-y}) * l(t,y) - e_{y} * l(t,1-y)
loss = -----------------------------------------
1 - e_0 - e_1
"""
self.model.train()
y_pred = self.model(X)
if self.ac_fn:
y_pred = self.ac_fn(y_pred)
if self.transform_y:
y[y == -1] = 0
c1 = torch.tensor(1 - self.e[np.int32(1-y)], dtype=torch.float)
c2 = torch.tensor(self.e[np.int32(y)], dtype=torch.float)
if self.transform_y:
y[y == 0] = -1
y = torch.tensor(y, dtype=torch.float)
loss1 = c1 * self.loss(y_pred, y)
loss2 = c2 * self.loss(y_pred, -y if self.transform_y else 1 - y)
loss = torch.mean(loss1 - loss2) / (1 - self.e.sum())
self.optimizer.zero_grad()
loss.backward()
# torch.nn.utils.clip_grad_norm_(self.model.parameters(), 0.5)
self.optimizer.step()
return loss.item()
class PeerBinaryClassifier(BinaryClassifier):
def __init__(self, model, learning_rate, loss_func, alpha=1.):
super(PeerBinaryClassifier, self).__init__(model, learning_rate, loss_func)
self.alpha = alpha
def train(self, X, y, X_, y_):
self.model.train()
y_pred = self.model(X)
if self.ac_fn:
y_pred = self.ac_fn(y_pred)
y = torch.tensor(y, dtype=torch.float)
y_pred_ = self.model(X_)
if self.ac_fn:
y_pred_ = self.ac_fn(y_pred_)
y_ = torch.tensor(y_, dtype=torch.float)
loss = self.loss(y_pred, y) - self.alpha * self.loss(y_pred_, y_)
self.optimizer.zero_grad()
loss.backward()
# torch.nn.utils.clip_grad_norm_(self.model.parameters(), 0.5)
self.optimizer.step()
return loss.item()
def fit(self, X_train, y_train, X_val=None, y_val=None, episodes=100, batchsize=None, batchsize_=None,
val_interval=20, log_interval=100, logger=None):
if self.transform_y:
y_train[y_train == 0] = -1
if y_val is not None:
y_val[y_val == 0] = -1
losses, train_acc, val_acc = [], [], []
batchsize = batchsize or len(X_train)
batchsize_ = batchsize_ or len(X_train)
m = X_train.shape[0]
for ep in range(episodes):
mb_idxes = np.random.choice(m, batchsize, replace=False)
mb_X_train, mb_y_train = X_train[mb_idxes], y_train[mb_idxes]
mb_X_train_ = X_train[np.random.choice(m, batchsize_, replace=False)]
mb_y_train_ = y_train[np.random.choice(m, batchsize_, replace=False)]
loss = self.train(mb_X_train, mb_y_train, mb_X_train_, mb_y_train_)
losses.append(loss)
if ep % val_interval == 0 and X_val is not None and y_val is not None:
train_acc.append(self.val(X_train, y_train))
val_acc.append(self.val(X_val, y_val))
if logger is not None and ep % log_interval == 0:
logger.record_tabular('ep', ep)
logger.record_tabular('loss', np.mean(losses[-log_interval:]))
logger.record_tabular('train_acc', np.mean(train_acc[-log_interval//val_interval:]))
if X_val is not None and y_val is not None:
logger.record_tabular('val_acc', np.mean(val_acc[-log_interval//val_interval:]))
logger.dump_tabular()
return {
'loss': losses,
'train_acc': train_acc,
'val_acc': val_acc
}
| {
"alphanum_fraction": 0.5709757229,
"author": null,
"avg_line_length": 37.3815028902,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "bc449f9064d2a656fa8314bdef7fd46be5c48253",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "bedf5d6e38e82a911ef04ed8ea23145a51fd0688",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "apricotxingya/peer_loss",
"max_forks_repo_path": "models/nn.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "bedf5d6e38e82a911ef04ed8ea23145a51fd0688",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "apricotxingya/peer_loss",
"max_issues_repo_path": "models/nn.py",
"max_line_length": 106,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "bedf5d6e38e82a911ef04ed8ea23145a51fd0688",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "apricotxingya/peer_loss",
"max_stars_repo_path": "models/nn.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 3154,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 12934
} |
module complete
end
| {
"alphanum_fraction": 0.8095238095,
"author": null,
"avg_line_length": 5.25,
"converted": null,
"ext": "jl",
"file": null,
"hexsha": "5036c4d74975ec7a574bdbf757e455b51a8128c0",
"include": null,
"lang": "Julia",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "449486ef1c09656ee59d4a55854a5b160dc2fc9d",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "turing-complete/xsh",
"max_forks_repo_path": "src/complete.jl",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "449486ef1c09656ee59d4a55854a5b160dc2fc9d",
"max_issues_repo_issues_event_max_datetime": "2018-03-09T12:45:10.000Z",
"max_issues_repo_issues_event_min_datetime": "2018-02-25T02:25:09.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "turing-complete/xsh",
"max_issues_repo_path": "src/complete.jl",
"max_line_length": 15,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "449486ef1c09656ee59d4a55854a5b160dc2fc9d",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "tmaone/xsh",
"max_stars_repo_path": "src/complete.jl",
"max_stars_repo_stars_event_max_datetime": "2015-02-15T15:32:01.000Z",
"max_stars_repo_stars_event_min_datetime": "2015-02-15T15:32:01.000Z",
"num_tokens": 5,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 21
} |
[STATEMENT]
lemma diff_invariant_eq: "diff_invariant I f U S t\<^sub>0 G =
(\<forall>s. I s \<longrightarrow> (\<forall>X\<in>Sols f U S t\<^sub>0 s. (\<forall>t\<in>U s.(\<forall>\<tau>\<in>(down (U s) t). G (X \<tau>)) \<longrightarrow> I (X t))))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. diff_invariant I f U S t\<^sub>0 G = (\<forall>s. I s \<longrightarrow> (\<forall>X\<in>Sols f U S t\<^sub>0 s. \<forall>t\<in>U s. (\<forall>\<tau>\<in>down (U s) t. G (X \<tau>)) \<longrightarrow> I (X t)))
[PROOF STEP]
unfolding diff_invariant_def g_orbital_eq image_le_pred
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ((\<Union> \<circ> \<P> (\<lambda>s. {X t |t X. t \<in> U s \<and> (\<forall>x\<in>down (U s) t. G (X x)) \<and> X \<in> Sols f U S t\<^sub>0 s})) {s. I s} \<subseteq> {s. I s}) = (\<forall>s. I s \<longrightarrow> (\<forall>X\<in>Sols f U S t\<^sub>0 s. \<forall>t\<in>U s. (\<forall>\<tau>\<in>down (U s) t. G (X \<tau>)) \<longrightarrow> I (X t)))
[PROOF STEP]
by auto | {
"alphanum_fraction": null,
"author": null,
"avg_line_length": null,
"converted": null,
"ext": null,
"file": "Hybrid_Systems_VCs_HS_ODEs",
"hexsha": null,
"include": null,
"lang": null,
"length": 2,
"llama_tokens": 427,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": null
} |
# any functions for plots
from matplotlib import pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from matplotlib import dates as mdates
from matplotlib.ticker import FormatStrFormatter
import numpy as np
import pandas as pd
from pathlib import Path
import glob, os
import datetime
def deployment_constancy (df, title):
'''
create a plot to check weather LOKI was deployed constantly with depth
'''
depth = df['Depth (m)'].tolist()
time = [datetime.datetime.strptime(x, '%Y-%m-%d %H:%M:%S') for x in df['Time_Loki (UTC)'].tolist()]
fig, [ax1, ax2] = plt.subplots(2,1)
# plot depth vs time
ax1.scatter(time, depth, color='black', s=3)
#ax1.set_xlabel('time (UTC)')
ax1.set_ylabel('depth (m)')
ax1.invert_yaxis()
ax1.set_title(str(title+' (depth vs time)'), fontsize =10)
ax1.xaxis.set_major_locator(mdates.MinuteLocator(interval=5)) # modify the date time x ticker frequency with interval(min) =5min
ax1.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M')) # modify the datetime dicker format
# plot velocity vs time
velocity = []
vel_time = []
for i in range(0, len(time)-1):
if time[i] != time[i+1]:
each_vel = abs((depth[i]-depth[i+1])/((time[i]-time[i+1])/datetime.timedelta(seconds=1)))
velocity.append(each_vel)
vel_time.append(time[i])
else:
pass
ax2.scatter(vel_time, velocity, color='black', s=3)
ax2.set_xlabel('time (UTC)')
ax2.set_ylabel('velocity (m/s)')
ax2.set_title(str(title+' (velocity vs time)'), fontsize =10)
ax2.xaxis.set_major_locator(mdates.MinuteLocator(interval=5)) # modify the date time x ticker frequency with interval(min) =5min
ax2.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M')) # modify the datetime dicker format
fig.tight_layout() #adjust subplots space
os.chdir('/Users/dong/Library/Mobile Documents/com~apple~CloudDocs/Work/github/LOKIpy/plots')
fig_name = str('dist_vel_'+title+'.pdf')
plt.savefig(fig_name)
plt.close()
def vertical_distribution_old (count_dict, title, min_depth, max_depth, depth_interval, water_vol):
'''
bins describes the depth interval
density describes ratio, default: False
align describes the location of histogram, default: right
'''
for org, count in count_dict.items():
bins=np.arange(min_depth,max_depth, depth_interval)
count_vol = [x/((max_depth/depth_interval)*depth_interval) for x in count]
plt.barh(bins[:len(count_vol)], count_vol, align='edge', color='black', height = 10) # horizontal bar plot
plt.xlabel('concentration (n/m3)')
plt.ylabel('depth (m)')
plt.gca().invert_yaxis()
plt.title(org)
os.chdir('/Users/dong/Library/Mobile Documents/com~apple~CloudDocs/Work/github/LOKIpy/plots')
fig_name = str('concent_'+org+'_'+title+'.pdf')
plt.savefig(fig_name)
plt.close()
def vertical_each_org_distribution (each_df, count_dict, title, min_depth, max_depth, depth_interval, water_vol):
'''
bins describes the depth interval
density describes ratio, default: False
align describes the location of histogram, default: right
work with dictionary
this function works for station level
'''
# organize environmental data e.g. depth, temperature, salinity, oxygen
depth = each_df['Depth (m)'].tolist()
temperature = each_df['Temperature (°C)'].tolist()
salinity = each_df['Salinity (psu)'].tolist()
oxygen = each_df['Oxygen concentration (µM)'].tolist()
fig, axs = plt.subplots(2,3, figsize = (15, 10))
axs = axs.ravel()
i = 0
for org, count in count_dict.items():
# add target data
bins=np.arange(min_depth,max_depth, depth_interval)
count_vol = [x/((max_depth/depth_interval)*depth_interval) for x in count]
axs[i].barh(bins[:len(count_vol)], count_vol, align='edge', color='black', height = 10) # horizontal bar plot
axs[i].set_xlabel('concentration (n/m3)')
axs[i].set_ylabel('depth (m)')
axs[i].invert_yaxis()
axs[i].set_title(org, y =1.0) # subplot title
axs[i].xaxis.set_major_formatter(FormatStrFormatter('%.2f'))
# add environmental data
temp_ax = axs[i].twiny()
temp_ax.plot(temperature, depth, color='red')
temp_ax.set_xlabel('temperature', color='red')
sal_ax = axs[i].twiny()
sal_ax.plot(salinity, depth, color='green')
sal_ax.xaxis.set_ticks_position('bottom')
sal_ax.xaxis.set_label_position('bottom')
sal_ax.spines['bottom'].set_position(('outward', 40))
sal_ax.set_xlabel('salinity (PSU)', color = 'green')
# change tick and colors
axs[i].xaxis.set_ticks_position('top') # change the position of each spines of axis
axs[i].xaxis.set_label_position('top')
temp_ax.xaxis.set_ticks_position('bottom')
temp_ax.xaxis.set_label_position('bottom')
temp_ax.spines['bottom'].set_color('red') # change the location color of spines and ticks
temp_ax.tick_params(axis='x', color='red')
sal_ax.spines['bottom'].set_color('green')
sal_ax.tick_params(axis='x', color='green')
axs[i].set_xticks(np.arange(0, max(count_vol) + 0.05, 0.05))
i += 1
fig.tight_layout(pad=3) # adjust layout of subplots
plt.suptitle(title, y = 0.99) # main title
os.chdir('/Users/dong/Library/Mobile Documents/com~apple~CloudDocs/Work/github/LOKIpy/plots')
fig_name = str('concent_'+title+'.pdf')
plt.savefig(fig_name)
plt.close()
def stacked_vertical_distribution (mask_dict, title, min_depth, max_depth, depth_interval, water_vol):
i = False
bins=np.arange(min_depth,max_depth, depth_interval)
org_list = []
fig, ax = plt.subplots()
for org, count in mask_dict.items():
org_list.append(org)
# sum each element in count to botton in bar
count_vol = [x/((max_depth/depth_interval)*depth_interval) for x in count]
if i == False:
bar_bottom = [0]*len(count)
i = True
ax.barh(bins[:len(count_vol)], count_vol, height = 10, align='edge', left=np.array(bar_bottom)) # horizontal bar plot
bar_bottom = [a+b for a, b in zip(bar_bottom, count_vol)]
ax.invert_yaxis()
ax.set_title(title)
ax.set_xlabel('concentration (n/m3)')
ax.set_ylabel('depth (m)')
ax.legend(org_list, loc ='upper right')
ax.xaxis.set_major_formatter(FormatStrFormatter('%.2f'))
os.chdir('/Users/dong/Library/Mobile Documents/com~apple~CloudDocs/Work/github/LOKIpy/plots')
fig_name = str('stacked_'+title+'.pdf')
plt.savefig(fig_name)
plt.close()
def comp_vertical_distribution (ecotaxa_df, min_depth, max_depth, depth_interval):
'''
create a plot with two vertical profile to compare between them
'''
left_depth = np.asarray(ecotaxa_df['Depth (m)'])
right_depth = np.asarray(ecotaxa_df['Depth (m)'])
fig, [ax1, ax2] = plt.subplots(1,2)
ax1.hist(left_depth, bins=np.arange(min_depth,max_depth, depth_interval), orientation='horizontal', color='black')
ax1.invert_yaxis() # invert axis subplot level
ax1.invert_xaxis()
ax1.set_xlabel('counts') # add label on subplot level
ax1.set_ylabel('depth [m]')
ax2.hist(left_depth, bins=np.arange(min_depth,max_depth, depth_interval), orientation='horizontal', color='black')
ax2.invert_yaxis()
ax2.set_xlabel('counts')
#ax2.set_ylabel('depth [m]')
plt.show()
plt.close()
| {
"alphanum_fraction": 0.6563870968,
"author": null,
"avg_line_length": 39.7435897436,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "f79d026c88bd3385855f496c209bca425dc77719",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "afab9867eaad1ae56e70beaca4463493f4ee2efb",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "kimdonggyun/OCEANpy",
"max_forks_repo_path": "scripts/graphcre.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "afab9867eaad1ae56e70beaca4463493f4ee2efb",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "kimdonggyun/OCEANpy",
"max_issues_repo_path": "scripts/graphcre.py",
"max_line_length": 132,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "afab9867eaad1ae56e70beaca4463493f4ee2efb",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "kimdonggyun/OCEANpy",
"max_stars_repo_path": "scripts/graphcre.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1968,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 7750
} |
import data.finset
import data.fintype
import data.fin
import data.rat
open function
open finset
variables {α : Type*} {β : Type*}
variables [fintype α] [fintype β]
namespace fintype
theorem card_image_of_injective [decidable_eq β] {f : α → β}: injective f → finset.card ((elems α).image f) = card α :=
finset.card_image_of_injective (elems α)
end fintype
section surj
open fintype
-- for fintypes (where we can define the image), surjection is equivalent to the image being everything
lemma univ_eq_image_of_surj [decidable_eq β] (f : α → β): surjective f → elems β = image f (elems α) :=
begin
intro,
ext b, rw mem_image, apply iff_of_true (complete _),
cases ‹surjective f› b with a ha,
exact ⟨a, complete _, ha⟩,
end
lemma surj_of_univ_eq_image [decidable_eq β] (f : α → β): elems β = image f (elems α) → surjective f :=
begin
intros h b,
have q: b ∈ elems β := complete b,
rw h at q,
simp at q,
rcases q with ⟨a, _, r⟩,
exact ⟨a, r⟩
end
lemma univ_eq_image_iff_surj [decidable_eq β] (f : α → β): elems β = image f (elems α) ↔ surjective f :=
⟨surj_of_univ_eq_image _, univ_eq_image_of_surj _⟩
-- ed's proof
-- idea: show f is injective as well, so it's bijective, so α and β have the same cardinality: contradiction
lemma fintype.not_surjective_of_card_lt {f : α → β}
(hcard : fintype.card α < fintype.card β) (h_surj : surjective f) : false :=
begin
have h_inj : injective f,
intros a1 a2 h,
refine @finset.inj_on_of_surj_on_of_card_le _ _ (fintype.elems α) (fintype.elems β) (λ a ha, f a)
(λ a ha, fintype.complete _) _ (le_of_lt hcard) _ _ (fintype.complete _) (fintype.complete _) h,
refine λ b hb, let ⟨a,ha⟩ := h_surj b in ⟨a,fintype.complete _, eq.symm ha⟩,
apply ne_of_lt hcard,
apply fintype.card_congr,
apply equiv.of_bijective ⟨h_inj, h_surj⟩,
end
-- my proof 1
-- idea: show f is injective as well, so its image is the same size as α, but it's surjective: contradiction
lemma no_surj_to_smaller_set [decidable_eq β] (hst : card α < card β) (f : α → β) : ¬ surjective f :=
begin
intro,
have: injective f,
intros a1 a2 h,
refine inj_on_of_surj_on_of_card_le (λ a _, f a) (λ _ _, mem_univ _) (λ b _, _) (le_of_lt hst) (complete _) (complete _) h,
cases ‹surjective f› b with a _,
exact ⟨a, complete _, ‹f a = b›.symm⟩,
apply not_le_of_gt hst,
rw [← card_image_of_injective ‹injective f›, ← univ_eq_image_of_surj f ‹surjective f›],
trivial
end
-- my proof 2
-- idea: show f has an injective inverse, so the image of f⁻¹ is the same size as β, but the image is a subset of α, so card α ≥ card β
lemma ge_card_of_surj [decidable_eq α] {f : α → β} : surjective f → card β ≤ card α :=
begin
intro,
let f_inv := surj_inv ‹surjective f›,
have: injective f_inv := injective_surj_inv _,
rw ← card_image_of_injective ‹injective f_inv›,
apply card_le_of_subset,
apply subset_univ
end
-- apply my proof 1 to fins
lemma no_surj_to_smaller_fin (n m : ℕ) (H : n < m) (f : fin n → fin m) : ¬ surjective f :=
begin
apply no_surj_to_smaller_set,
repeat {rwa fintype.card_fin},
end
-- apply my proof 2 to fins
lemma no_surj_to_smaller_fin' (n m : ℕ) (H : n < m) (f : fin n → fin m) : ¬ surjective f :=
begin
intro f_surj,
apply not_le_of_gt H,
rw [← fintype.card_fin m, ← fintype.card_fin n],
apply ge_card_of_surj f_surj,
end
-- a lemma which might want to belong somewhere (effectively used in my proof 2)
lemma no_inj_to_smaller_set [decidable_eq β] (H : card β < card α) (f : α → β) : ¬ injective f :=
begin
intro f_inj,
have := card_image_of_injective f_inj,
apply not_le_of_gt H,
rw ← this,
apply card_le_of_subset,
apply subset_univ
end
end surj | {
"alphanum_fraction": null,
"author": "b-mehta",
"avg_line_length": null,
"converted": null,
"ext": null,
"file": null,
"hexsha": null,
"include": null,
"lang": null,
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": null,
"max_forks_repo_licenses": null,
"max_forks_repo_name": null,
"max_forks_repo_path": null,
"max_issues_count": null,
"max_issues_repo_head_hexsha": null,
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": null,
"max_issues_repo_name": null,
"max_issues_repo_path": null,
"max_line_length": null,
"max_stars_count": null,
"max_stars_repo_head_hexsha": null,
"max_stars_repo_licenses": null,
"max_stars_repo_name": null,
"max_stars_repo_path": null,
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": null,
"path": "github-repos/lean/b-mehta-lean-experiments/lean-experiments-5f0aed189f724ae6f739ec75dcdddcd2687614e1/src/combinatorics.lean",
"reason": null,
"repo": "lean-experiments",
"save_path": "github-repos/lean/b-mehta-lean-experiments",
"sha": "5f0aed189f724ae6f739ec75dcdddcd2687614e1",
"size": null
} |
#!/usr/bin/env python
"""bulge_graph.py: A graph representation of RNA secondary structure based
on its decomposition into primitive structure types: stems, hairpins,
interior loops, multiloops, etc...
for eden and graphlearn we stripped forgi down to this single file.
forgi: https://github.com/pkerpedjiev/forgi
"""
import sys
import collections as col
import itertools as it
import os
import operator as oper
import contextlib
import random
import shutil
import tempfile as tf
__author__ = "Peter Kerpedjiev"
__copyright__ = "Copyright 2012, 2013, 2014"
__version__ = "0.2"
__maintainer__ = "Peter Kerpedjiev"
__email__ = "pkerp@tbi.univie.ac.at"
bracket_left = "([{<ABCDEFGHIJKLMNOPQRSTUVWXYZ"
bracket_right = ")]}>abcdefghijklmnopqrstuvwxyz"
def gen_random_sequence(l):
'''
Generate a random RNA sequence of length l.
'''
return "".join([random.choice(['A', 'C', 'G', 'U']) for i in range(l)])
@contextlib.contextmanager
def make_temp_directory():
'''
Yanked from:
http://stackoverflow.com/questions/13379742/right-way-to-clean-up-a-temporary-folder-in-python-class
'''
temp_dir = tf.mkdtemp()
yield temp_dir
shutil.rmtree(temp_dir)
def insert_into_stack(stack, i, j):
# print "add", i,j
k = 0
while len(stack[k]) > 0 and stack[k][len(stack[k]) - 1] < j:
k += 1
stack[k].append(j)
return k
def delete_from_stack(stack, j):
# print "del", j
k = 0
while len(stack[k]) == 0 or stack[k][len(stack[k]) - 1] != j:
k += 1
stack[k].pop()
return k
def pairtable_to_dotbracket(pt):
"""
Converts arbitrary pair table array (ViennaRNA format) to structure in dot bracket format.
"""
stack = col.defaultdict(list)
seen = set()
res = ""
for i in range(1, pt[0] + 1):
if pt[i] != 0 and pt[i] in seen:
raise ValueError('Invalid pairtable contains duplicate entries')
seen.add(pt[i])
if pt[i] == 0:
res += '.'
else:
if pt[i] > i: # '(' check if we can stack it...
res += bracket_left[insert_into_stack(stack, i, pt[i])]
else: # ')'
res += bracket_right[delete_from_stack(stack, i)]
return res
def inverse_brackets(bracket):
res = col.defaultdict(int)
for i, a in enumerate(bracket):
res[a] = i
return res
def dotbracket_to_pairtable(struct):
"""
Converts arbitrary structure in dot bracket format to pair table (ViennaRNA format).
"""
pt = [0] * (len(struct) + 1)
pt[0] = len(struct)
stack = col.defaultdict(list)
inverse_bracket_left = inverse_brackets(bracket_left)
inverse_bracket_right = inverse_brackets(bracket_right)
for i, a in enumerate(struct):
i += 1
# print i,a, pt
if a == ".":
pt[i] = 0
else:
if a in inverse_bracket_left:
stack[inverse_bracket_left[a]].append(i)
else:
if len(stack[inverse_bracket_right[a]]) == 0:
raise ValueError('Too many closing brackets!')
j = stack[inverse_bracket_right[a]].pop()
pt[i] = j
pt[j] = i
if len(stack[inverse_bracket_left[a]]) != 0:
raise ValueError('Too many opening brackets!')
return pt
def pairtable_to_tuples(pt):
'''
Convert a pairtable to a list of base pair tuples.
i.e. [4,3,4,1,2] -> [(1,3),(2,4),(3,1),(4,2)]
:param pt: A pairtable
:return: A list paired tuples
'''
pt = iter(pt)
# get rid of the first element which contains the length
# of the sequence. We'll figure it out after the traversal
pt.next()
tuples = []
for i, p in enumerate(pt):
tuples += [(i + 1, p)]
return tuples
def tuples_to_pairtable(pair_tuples, seq_length=None):
'''
Convert a representation of an RNA consisting of a list of tuples
to a pair table:
i.e. [(1,3),(2,4),(3,1),(4,2)] -> [4,3,4,1,2]
:param tuples: A list of pair tuples
:param seq_length: How long is the sequence? Only needs to be passed in when
the unpaired nucleotides aren't passed in as (x,0) tuples.
:return: A pair table
'''
if seq_length is None:
max_bp = max([max(x) for x in pair_tuples])
else:
max_bp = seq_length
pt = [0] * (max_bp + 1)
pt[0] = max_bp
for tup in pair_tuples:
pt[tup[0]] = tup[1]
return pt
def add_bulge(bulges, bulge, context, message):
"""
A wrapper for a simple dictionary addition
Added so that debugging can be made easier
:param bulges:
:param bulge:
:param context:
:param message:
:return:
"""
# bulge = (context, bulge)
bulges[context] = bulges.get(context, []) + [bulge]
return bulges
def any_difference_of_one(stem, bulge):
"""
See if there's any difference of one between the two
ends of the stem [(a,b),(c,d)] and a bulge (e,f)
:param stem: A couple of couples (2 x 2-tuple) indicating the start and end
nucleotides of the stem in the form ((s1, e1), (s2, e2))
:param bulge: A couple (2-tuple) indicating the first and last position
of the bulge.
:return: True if there is an overlap between the stem nucleotides and the
bulge nucleotides. False otherwise
"""
for stem_part in stem:
for part in stem_part:
for bulge_part in bulge:
if abs(bulge_part - part) == 1:
return True
return False
def print_bulges(bulges):
"""
Print the names and definitions of the bulges.
:param bulges: A list of tuples of the form [(s, e)] where s and e are the
numbers of the nucleotides at the start and end of the bulge.
"""
for i in range(len(bulges)):
# print "bulge:", bulge
bulge_str = "define b{} 1".format(i)
bulge = bulges[i]
bulge_str += " {} {}".format(bulge[0] + 1, bulge[1] + 1)
print bulge_str
def condense_stem_pairs(stem_pairs):
"""
Given a list of stem pairs, condense them into stem definitions
I.e. the pairs (0,10),(1,9),(2,8),(3,7) can be condensed into
just the ends of the stem: [(0,10),(3,7)]
:param stem_pairs: A list of tuples containing paired base numbers.
:returns: A list of tuples of tuples of the form [((s1, e1), (s2, e2))]
where s1 and e1 are the nucleotides at one end of the stem
and s2 and e2 are the nucleotides at the other.
"""
stem_pairs.sort()
prev_pair = (-10, -10)
stems = []
start_pair = None
for pair in stem_pairs:
# There's a potential bug here since we don't check the direction
# but hopefully it won't bite us in the ass later
if abs(pair[0] - prev_pair[0]) != 1 or abs(pair[1] - prev_pair[1]) != 1:
if start_pair is not None:
stems += [(start_pair, prev_pair)]
start_pair = pair
prev_pair = pair
if start_pair is not None:
stems += [(start_pair, prev_pair)]
return stems
def print_brackets(brackets):
"""
Print the brackets and a numbering, for debugging purposes
:param brackets: A string with the dotplot passed as input to this script.
"""
numbers = [chr(ord('0') + i % 10) for i in range(len(brackets))]
tens = [chr(ord('0') + i / 10) for i in range(len(brackets))]
print "brackets:\n", brackets, "\n", "".join(tens), "\n", "".join(numbers)
def find_bulges_and_stems(brackets):
"""
Iterate through the structure and enumerate the bulges and the stems that are
present.
The returned stems are of the form [[(s1, s2), (e1,e2)], [(s1,s2),(e1,e2)],...]
where (s1,s2) are the residue numbers of one end of the stem and (e1,e2) are the
residue numbers at the other end of the stem
(see condense_stem_pairs)
The returned bulges are of the form [(s,e), (s,e),...] where s is the start of a bulge
and e is the end of a bulge
:param brackets: A string with the dotplot passed as input to this script.
"""
prev = 'x'
context = 0
bulges = dict()
finished_bulges = []
context_depths = dict()
opens = []
stem_pairs = []
dots_start = 0
context_depths[0] = 0
i = 0
for i in range(len(brackets)):
if brackets[i] == '(':
opens.append(i)
if prev == '(':
context_depths[context] = context_depths.get(context, 0) + 1
continue
else:
context += 1
context_depths[context] = 1
if prev == '.':
dots_end = i - 1
bulges = add_bulge(
bulges, (dots_start, dots_end), context, "4")
if brackets[i] == ')':
if len(opens) == 0:
raise Exception("Unmatched close bracket")
stem_pairs.append((opens.pop(), i))
context_depths[context] -= 1
if context_depths[context] == 0:
if context in bulges:
finished_bulges += bulges[context]
bulges[context] = []
context -= 1
if prev == '.':
dots_end = i - 1
bulges = add_bulge(
bulges, (dots_start, dots_end), context, "2")
if brackets[i] == '.':
if prev == '.':
continue
dots_start = i
prev = brackets[i]
if prev == '.':
dots_end = i
bulges = add_bulge(bulges, (dots_start, dots_end), context, "7")
elif prev == '(':
print >> sys.stderr, "Unmatched bracket at the end"
sys.exit(1)
"""
elif prev == ')':
bulges = add_bulge(bulges, (i+1, i+1), context, "8")
"""
if context in bulges.keys():
finished_bulges += bulges[context]
if len(opens) > 0:
raise Exception("Unmatched open bracket")
stem_pairs.sort()
stems = condense_stem_pairs(stem_pairs)
return finished_bulges, stems
def print_name(filename):
print "name", os.path.splitext(filename)[0]
class BulgeGraph(object):
def __init__(self, bg_file=None, dotbracket_str='', seq=''):
self.seq_length = 0
self.ang_types = None
self.mst = None
self.build_order = None
self.name = "untitled"
self.defines = dict()
self.edges = col.defaultdict(set)
self.longrange = col.defaultdict(set)
self.weights = dict()
# sort the coordinate basis for each stem
self.bases = dict()
self.stem_invs = dict()
self.seq_ids = []
self.name_counter = 0
if dotbracket_str != '':
self.from_dotbracket(dotbracket_str)
self.seq = seq
for i, s in enumerate(seq):
self.seq_ids += [(' ', str(i + 1), ' ')]
# if bg_file is not None:
# self.from_bg_file(bg_file)
# get an internal index for a named vertex
# this applies to both stems and edges
def get_vertex(self, name=None):
"""
Return a new unique vertex name.
"""
if name is None:
name = "x{}".format(self.name_counter)
self.name_counter += 1
return name
def element_length(self, key):
"""
Get the number of residues that are contained within this element.
:param key: The name of the element.
"""
d = self.defines[key]
length = 0
for i in range(0, len(d), 2):
length += d[i + 1] - d[i] + 1
return length
def stem_length(self, key):
"""
Get the length of a particular element. If it's a stem, it's equal to
the number of paired bases. If it's an interior loop, it's equal to the
number of unpaired bases on the strand with less unpaired bases. If
it's a multiloop, then it's the number of unpaired bases.
"""
d = self.defines[key]
if key[0] == 's' or key[0] == 'y':
return (d[1] - d[0]) + 1
elif key[0] == 'f':
return self.get_bulge_dimensions(key)[0]
elif key[0] == 't':
return self.get_bulge_dimensions(key)[1]
elif key[0] == 'h':
return self.get_bulge_dimensions(key)[0]
else:
return min(self.get_bulge_dimensions(key))
def get_single_define_str(self, key):
"""
Get a define string for a single key.
"""
return "define {} {}".format(key, " ".join([str(d) for d in self.defines[key]]))
def get_define_str(self):
"""
Convert the defines into a string.
Format:
define [name] [start_res1] [end_res1] [start_res2] [end_res2]
"""
defines_str = ''
# a method for sorting the defines
def define_sorter(k):
drni = self.define_residue_num_iterator(k, adjacent=True)
return drni.next()
for key in sorted(self.defines.keys(), key=define_sorter):
defines_str += self.get_single_define_str(key)
# defines_str += "define %s %s" % ( key, " ".join([str(d) for d in
# self.defines[key]]))
defines_str += '\n'
return defines_str
def get_length_str(self):
return "length " + str(self.seq_length) + '\n'
def get_connect_str(self):
"""
Get the connections of the bulges in the graph.
Format:
connect [from] [to1] [to2] [to3]
"""
whole_str = ''
for key in self.edges:
if len(self.edges[key]) == 0:
continue
# Our graph will be defined by the stems and the bulges they
# connect to
name = key
if name[0] == 's':
out_str = "connect {}".format(name)
for dest in self.edges[key]:
out_str += " {}".format(dest)
whole_str += out_str
whole_str += '\n'
return whole_str
def get_sequence_str(self):
"""
Return the sequence along with its keyword. I.e.
seq ACGGGCC
"""
if len(self.seq) > 0:
return "seq {}\n".format(self.seq)
else:
return ""
def get_name_str(self):
"""
Return the name of this structure along with its keyword:
name 1y26
"""
return "name {}\n".format(self.name)
def to_bg_string(self):
"""
Output a string representation that can be stored and reloaded.
"""
out_str = ''
out_str += self.get_name_str()
out_str += self.get_length_str()
out_str += self.get_sequence_str()
out_str += self.get_define_str()
out_str += self.get_connect_str()
return out_str
def to_file(self, filename):
with open(filename, 'w') as f:
out_str = self.to_bg_string()
f.write(out_str)
def to_element_string(self):
"""
Create a string similar to dotbracket notation that identifies what
type of element is present at each location.
For example the following dotbracket:
..((..))..
Should yield the following element string:
ffsshhsstt
Indicating that it begins with a fiveprime region, continues with a
stem, has a hairpin after the stem, the stem continues and it is terminated
by a threeprime region.
"""
output_str = [' '] * (self.seq_length + 1)
for d in self.defines.keys():
for resi in self.define_residue_num_iterator(d, adjacent=False):
output_str[resi] = d[0]
return "".join(output_str).strip()
def define_range_iterator(self, node, adjacent=False, seq_ids=False):
"""
Return the ranges of the nucleotides in the define.
In other words, if a define contains the following: [1,2,7,8]
The ranges will be [1,2] and [7,8].
:param adjacent: Use the nucleotides in the neighboring element which
connect to this element as the range starts and ends.
:return: A list of two-element lists
"""
a = iter(self.defines[node])
ranges = it.izip(a, a)
if node[0] == 'i':
# interior loops have to be treated specially because
# they might have a bulge that has no unpaired nucleotides on one
# strand
if adjacent:
conns = self.connections(node)
s1 = self.defines[conns[0]]
s2 = self.defines[conns[1]]
# offset by one, which will be reversed in the yield step
# below
ranges = [[s1[1] + 1, s2[0] - 1], [s2[3] + 1, s1[2] - 1]]
if node[0] == 'm':
if adjacent:
conns = self.connections(node)
s1 = self.get_sides_plus(conns[0], node)[0]
s2 = self.get_sides_plus(conns[1], node)[0]
rnge = sorted([self.defines[conns[0]][s1],
self.defines[conns[1]][s2]])
ranges = [[rnge[0] + 1, rnge[1] - 1]]
for (ds1, ds2) in ranges:
if adjacent:
if ds1 > 1:
ds1 -= 1
if ds2 < self.seq_length:
ds2 += 1
if seq_ids:
# this will cause problems if the nucleotide has insertion
# codes
yield [self.seq_ids[ds1 - 1], self.seq_ids[ds2 - 1]]
else:
yield [ds1, ds2]
def define_residue_num_iterator(self, node, adjacent=False, seq_ids=False):
"""
Iterate over the residue numbers that belong to this node.
:param node: The name of the node
"""
visited = set()
for r in self.define_range_iterator(node, adjacent, seq_ids=False):
for i in range(r[0], r[1] + 1):
if seq_ids:
if self.seq_ids[i - 1] not in visited:
visited.add(self.seq_ids[i - 1])
yield self.seq_ids[i - 1]
else:
if i not in visited:
visited.add(i)
yield i
def iterate_over_seqid_range(self, start_id, end_id):
"""
Iterate over the seq_ids between the start_id and end_id.
"""
i1 = self.seq_ids.index(start_id)
i2 = self.seq_ids.index(end_id)
for i in range(i1, i2 + 1):
yield self.seq_ids[i]
def create_bulge_graph(self, stems, bulges):
"""
Find out which stems connect to which bulges
Stems and bulges which share a nucleotide are considered connected.
:param stems: A list of tuples of tuples of the form [((s1, e1), (s2, e2))]
where s1 and e1 are the nucleotides at one end of the stem
and s2 and e2 are the nucleotides at the other.
:param bulges: A list of tuples of the form [(s, e)] where s and e are the
numbers of the nucleotides at the start and end of the bulge.
"""
for i in range(len(stems)):
stem = stems[i]
for j in range(len(bulges)):
bulge = bulges[j]
if any_difference_of_one(stem, bulge):
self.edges['y{}'.format(i)].add('b{}'.format(j))
self.edges['b{}'.format(j)].add('y{}'.format(i))
def create_stem_graph(self, stems, bulge_counter):
"""
Determine which stems are connected to each other. A stem can be connected to
another stem when there is an interior loop with an unpaired nucleotide on
one side. In this case, a bulge will be created on the other side, but it
will only consist of the two paired bases around where the unpaired base
would be if it existed.
The defines for these bulges will be printed as well as the connection strings
for the stems they are connected to.
:param stems: A list of tuples of tuples of the form [((s1, e1), (s2, e2))]
where s1 and e1 are the nucleotides at one end of the stem
and s2 and e2 are the nucleotides at the other.
:param bulge_counter: The number of bulges that have been encountered so far.
:returns: A dictionary indexed by the number of a stem, containing a set of the
other stems that the index is connected to.
"""
# print "stems:", stems
stem_stems = dict()
for i in range(len(stems)):
for j in range(i + 1, len(stems)):
for k1 in range(2):
# don't fear the for loop
for k2 in range(2):
for l1 in range(2):
for l2 in range(2):
s1 = stems[i][k1][l1]
s2 = stems[j][k2][l2]
if abs(s1 - s2) == 1:
stem_stems_set = stem_stems.get(i, set())
if j not in stem_stems_set:
bn = 'b{}'.format(bulge_counter)
# self.defines[bn] = [min(s1, s2)+1,
# max(s1, s2)+1]
self.defines[bn] = []
self.weights[bn] = 1
self.edges['y{}'.format(i)].add(bn)
self.edges[bn].add('y{}'.format(i))
self.edges['y{}'.format(j)].add(bn)
self.edges[bn].add('y{}'.format(j))
bulge_counter += 1
stem_stems_set.add(j)
stem_stems[i] = stem_stems_set
for d in self.defines.keys():
if d[0] != 'y':
continue
(s1, e1, s2, e2) = self.defines[d]
if abs(s2 - e1) == 1:
bn = 'b{}'.format(bulge_counter)
self.defines[bn] = []
self.weights[bn] = 1
self.edges[bn].add(d)
self.edges[d].add(bn)
bulge_counter += 1
return stem_stems
def remove_vertex(self, v):
"""
Delete a node after merging it with another
:param v: The name of the node
"""
# delete all edges to this node
for key in self.edges[v]:
self.edges[key].remove(v)
for edge in self.edges:
if v in self.edges[edge]:
self.edges[edge].remove(v)
# delete all edges from this node
del self.edges[v]
del self.defines[v]
def reduce_defines(self):
"""
Make defines like this:
define x0 2 124 124 3 4 125 127 5 5
Into this:
define x0 2 3 5 124 127
That is, consolidate contiguous bulge region defines.
"""
for key in self.defines.keys():
if key[0] != 's':
assert (len(self.defines[key]) % 2 == 0)
new_j = 0
while new_j < len(self.defines[key]):
j = new_j
new_j += j + 2
(f1, t1) = (
int(self.defines[key][j]), int(self.defines[key][j + 1]))
# remove bulges of length 0
if f1 == -1 and t1 == -2:
del self.defines[key][j]
del self.defines[key][j]
new_j = 0
continue
# merge contiguous bulge regions
for k in range(j + 2, len(self.defines[key]), 2):
if key[0] == 'y':
# we can have stems with defines like: [1,2,3,4]
# which would imply a non-existant loop at its end
continue
(f2, t2) = (
int(self.defines[key][k]), int(self.defines[key][k + 1]))
if t2 + 1 != f1 and t1 + 1 != f2:
continue
if t2 + 1 == f1:
self.defines[key][j] = str(f2)
self.defines[key][j + 1] = str(t1)
elif t1 + 1 == f2:
self.defines[key][j] = str(f1)
self.defines[key][j + 1] = str(t2)
del self.defines[key][k]
del self.defines[key][k]
new_j = 0
break
def merge_vertices(self, vertices):
"""
This is done when two of the outgoing strands of a stem
go to different bulges
It is assumed that the two ends are on the same sides because
at least one vertex has a weight of 2, implying that it accounts
for all of the edges going out of one side of the stem
:param vertices: A list of vertex names to combine into one.
"""
merge_str = ""
new_vertex = self.get_vertex()
self.weights[new_vertex] = 0
# assert(len(vertices) == 2)
connections = set()
for v in vertices:
merge_str += " {}".format(v)
# what are we gonna merge?
for item in self.edges[v]:
connections.add(item)
# Add the definition of this vertex to the new vertex
# self.merge_defs[new_vertex] = self.merge_defs.get(new_vertex, [])
# + [v]
if v[0] == 's':
self.defines[new_vertex] = self.defines.get(
new_vertex, []) + [self.defines[v][0],
self.defines[v][2]] + [
self.defines[v][1], self.defines[v][3]]
else:
self.defines[new_vertex] = self.defines.get(
new_vertex, []) + self.defines[v]
self.weights[new_vertex] += 1
# remove the old vertex, since it's been replaced by new_vertex
self.remove_vertex(v)
self.reduce_defines()
# self.weights[new_vertex] = 2
for connection in connections:
self.edges[new_vertex].add(connection)
self.edges[connection].add(new_vertex)
return new_vertex
def nucleotides_to_elements(self, nucleotides):
"""
Convert a list of nucleotides to element names.
Remove redundant entries and return a set.
"""
return set([self.get_node_from_residue_num(n) for n in nucleotides])
def find_bulge_loop(self, vertex, max_length=4):
"""
Find a set of nodes that form a loop containing the
given vertex and being no greater than 4 nodes long.
:param vertex: The vertex to start the search from.
:returns: A list of the nodes in the loop.
"""
visited = set()
to_visit = [(key, 1) for key in self.edges[vertex]]
visited.add(vertex)
in_path = [vertex]
while len(to_visit) > 0:
(current, depth) = to_visit.pop()
visited.add(current)
in_path = in_path[:depth]
in_path.append(current)
for key in self.edges[current]:
if key == vertex and depth > 1:
if len(in_path[:depth + 1]) > max_length:
continue
else:
return in_path[:depth + 1]
if key not in visited:
to_visit.append((key, depth + 1))
return []
def add_node(self, name, edges, define, weight=1):
self.defines[name] = define
self.edges[name] = edges
self.weights[name] = weight
for edge in self.edges[name]:
self.edges[edge].add(name)
def dissolve_stem(self, key):
"""
Remove a stem. This means that we need
to reconfigure all of the adjacent elements in such a manner
that they now include the nucleotides that were formerly
in this stem.
"""
st = list(self.stem_bp_iterator(key))
self.remove_base_pairs(st)
def remove_base_pairs(self, to_remove):
"""
Remove all of the base pairs which are in pair_list.
:param to_remove: A list of tuples containing the names of the base pairs.
:return: nothing
"""
pt = self.to_pair_tuples()
nt = []
for p in pt:
to_add = p
for s in to_remove:
if sorted(p) == sorted(s):
to_add = (p[0], 0)
break
nt += [to_add]
self.defines = dict()
# self.edges = dict()
self.from_tuples(nt)
def collapse(self):
"""
If any vertices form a loop, then they are either a bulge region of
a fork region. The bulge (interior loop) regions will be condensed
into one node.
"""
new_vertex = True
while new_vertex:
new_vertex = False
bulges = [k for k in self.defines if k[0] != 'y']
for (b1, b2) in it.combinations(bulges, r=2):
if self.edges[b1] == self.edges[b2] and len(self.edges[b1]) > 1:
connections = self.connections(b1)
all_connections = [sorted(
(self.get_sides_plus(connections[0], b1)[0],
self.get_sides_plus(
connections[0], b2)[0])),
sorted(
(self.get_sides_plus(connections[
1], b1)[0],
self.get_sides_plus(connections[1], b2)[0]))]
if all_connections == [[1, 2], [0, 3]]:
# interior loop
self.merge_vertices([b1, b2])
new_vertex = True
break
def interior_loop_iterator(self):
"""
Iterate over all of the interior loops.
An interior loop can only have two connections: to the two stems which it links.
"""
for key in self.defines.keys():
if key[0] == 'i':
yield key
def relabel_node(self, old_name, new_name):
"""
Change the name of a node.
param old_name: The previous name of the node
param new_name: The new name of the node
"""
# replace the define name
define = self.defines[old_name]
del self.defines[old_name]
self.defines[new_name] = define
# replace the index into the edges array
edge = self.edges[old_name]
del self.edges[old_name]
self.edges[new_name] = edge
# replace the name of any edge that pointed to old_name
for k in self.edges.keys():
new_edges = set()
for e in self.edges[k]:
if e == old_name:
new_edges.add(new_name)
else:
new_edges.add(e)
self.edges[k] = new_edges
def compare_stems(self, b):
"""
A function that can be passed in as the key to a sort.
"""
return (self.defines[b][0], 0)
def compare_bulges(self, b):
connections = self.connections(b)
return (self.defines[connections[0]][0],
self.defines[connections[1]][0])
def compare_hairpins(self, b):
connections = self.connections(b)
return (self.defines[connections[0]][1], sys.maxint)
def relabel_nodes(self):
"""
Change the labels of the nodes to be more indicative of their nature.
s: stem
h: hairpin
i: interior loop
m: multiloop
f: five-prime unpaired
t: three-prime unpaired
"""
stems = []
hairpins = []
interior_loops = []
multiloops = []
fiveprimes = []
threeprimes = []
for d in self.defines.keys():
if d[0] == 'y' or d[0] == 's':
stems += [d]
stems.sort(key=self.compare_stems)
continue
if len(self.defines[d]) == 0 and len(self.edges[d]) == 1:
hairpins += [d]
continue
if len(self.defines[d]) == 0 and len(self.edges[d]) == 2:
multiloops += [d]
continue
if len(self.edges[d]) <= 1 and self.defines[d][0] == 1:
fiveprimes += [d]
continue
if len(self.edges[d]) == 1 and self.defines[d][1] == self.seq_length:
threeprimes += [d]
continue
if (len(self.edges[d]) == 1 and
self.defines[d][0] != 1 and
self.defines[d][1] != self.seq_length):
hairpins += [d]
hairpins.sort(key=self.compare_hairpins)
continue
if d[0] == 'm' or (d[0] != 'i' and len(self.edges[d]) == 2 and
self.weights[d] == 1 and
self.defines[d][0] != 1 and
self.defines[d][1] != self.seq_length):
multiloops += [d]
multiloops.sort(key=self.compare_bulges)
continue
if d[0] == 'i' or self.weights[d] == 2:
interior_loops += [d]
interior_loops.sort(key=self.compare_stems)
for d in fiveprimes:
self.relabel_node(d, 'f1')
for d in threeprimes:
self.relabel_node(d, 't1')
for i, d in enumerate(stems):
self.relabel_node(d, 's%d' % (i))
for i, d in enumerate(interior_loops):
self.relabel_node(d, 'i%d' % (i))
for i, d in enumerate(multiloops):
self.relabel_node(d, 'm%d' % (i))
for i, d in enumerate(hairpins):
self.relabel_node(d, 'h%d' % (i))
def has_connection(self, v1, v2):
""" Is there an edge between these two nodes """
if v2 in self.edges[v1]:
return True
else:
# two multiloops can be connected at the end of a stem
for e in self.edges[v1]:
if e[0] != 's':
continue
if v2 in self.edges[e]:
(s1b, s1e) = self.get_sides(e, v1)
(s2b, s2e) = self.get_sides(e, v2)
if s1b == s2b:
return True
return False
def connection_type(self, define, connections):
"""
Classify the way that two stems are connected according to the type
of bulge that separates them.
Potential angle types for single stranded segments, and the ends of
the stems they connect:
1 2 (1, 1) #pseudoknot
1 0 (1, 0)
3 2 (0, 1)
3 0 (0, 0)
:param define: The name of the bulge separating the two stems
:param connections: The two stems and their separation
"""
if define[0] == 'i':
# interior loop, we just have to check if
# connections[0] < connections[1]
if self.defines[connections[0]][0] < self.defines[connections[1]][0]:
return 1
else:
return -1
elif define[0] == 'm':
(s1c, b1c) = self.get_sides_plus(connections[0], define)
(s2c, b2c) = self.get_sides_plus(connections[1], define)
if (s1c, s2c) == (1, 0):
return 2
elif (s1c, s2c) == (0, 1):
return -2
elif (s1c, s2c) == (3, 0):
return 3
elif (s1c, s2c) == (0, 3):
return -3
elif (s1c, s2c) == (2, 3):
return 4
elif (s1c, s2c) == (3, 2):
return -4
# the next two refer to pseudoknots
elif (s1c, s2c) == (2, 1):
return 5
elif (s1c, s2c) == (1, 2):
return -5
else:
raise Exception("Weird angle type: (s1c, s2c) = (%d, %d)" %
(s1c, s2c))
else:
raise Exception(
"connection_type called on non-interior loop/multiloop")
def connection_ends(self, connection_type):
"""
Find out which ends of the stems are connected by a particular angle
type.
:param connection_type: The angle type, as determined by which corners
of a stem are connected
:return: (s1e, s2b)
"""
ends = ()
if abs(connection_type) == 1:
ends = (1, 0)
elif abs(connection_type) == 2:
ends = (1, 0)
elif abs(connection_type) == 3:
ends = (0, 0)
elif abs(connection_type) == 4:
ends = (1, 0)
elif abs(connection_type) == 5:
ends = (1, 1)
else:
raise Exception('Unknown connection type: %d' % (connection_type))
if connection_type < 0:
return ends[::-1]
else:
return ends
def get_multiloop_nucleotides(self, multiloop_loop):
"""
Return a list of nucleotides which make up a particular
multiloop.
:param multiloop_loop: The elements which make up this multiloop
:return: A list of nucleotides
"""
stems = [d for d in multiloop_loop if d[0] == 's']
multis = [d for d in multiloop_loop if d[0] == 'm']
residues = []
for s in stems:
relevant_edges = [c for c in self.edges[s] if c in multiloop_loop]
sides = [self.get_sides_plus(s, c)[0] for c in relevant_edges]
sides.sort()
# the whole stem is part of this multiloop
if sides == [2, 3] or sides == [0, 1]:
residues += range(
self.defines[s][sides[0]], self.defines[s][sides[1]] + 1)
else:
residues += [
self.defines[s][sides[0]], self.defines[s][sides[1]]]
for m in multis:
residues += self.define_residue_num_iterator(m, adjacent=False)
return residues
def find_external_loops(self):
'''
Return all of the elements which are part of
an external loop.
:return: A list containing the external loops in this molecule
(i.e. ['f0, m3, m5, t0'])
'''
ext_loop = []
for d in it.chain(self.floop_iterator(),
self.tloop_iterator(),
self.mloop_iterator()):
loop_nts = self.shortest_bg_loop(d)
if len(loop_nts) == 0:
ext_loop += [d]
return ext_loop
def find_multiloop_loops(self):
"""
Find out which defines are connected in a multiloop.
:return: Two lists, one containing the sets of nucleotides comprising the shortest loops
and the other containing sets of nucleotides comprising the shortest loops.
"""
loops = set()
for d in self.mloop_iterator():
loop_nts = self.shortest_bg_loop(d)
if len(loop_nts) > 0:
loops.add(tuple(sorted(loop_nts)))
loops = list(loops)
loop_elems = []
for loop in loops:
all_loops = set([self.get_node_from_residue_num(n) for n in loop])
# some multiloops might not contain any nucleotides, so we
# have to explicitly add these
for a, b in it.combinations(all_loops, r=2):
common_edges = set.intersection(self.edges[a], self.edges[b])
for e in common_edges:
all_loops.add(e)
loop_elems += [all_loops]
return loop_elems, loops
def seq_ids_from_seq(self):
"""
Get the sequence ids of the string.
"""
self.seq_ids = []
# when provided with just a sequence, we presume that the
# residue ids are numbered from 1-up
for i, s in enumerate(self.seq):
self.seq_ids += [(' ', i + 1, ' ')]
def remove_degenerate_nodes(self):
"""
For now just remove all hairpins that have no length.
"""
to_remove = []
for d in self.defines:
if d[0] == 'h' and len(self.defines[d]) == 0:
to_remove += [d]
for r in to_remove:
self.remove_vertex(r)
def from_stems_and_bulges(self, stems, bulges):
"""
Create the graph from the list of stems and bulges.
:param stems: A list of tuples of two two-tuples, each containing the start
and end nucleotides of each strand of the stem.
:param bulges: A list of tuples containing the starts and ends of the
of the bulge regions.
:return: Nothing, just make the bulgegraph
"""
for i in range(len(stems)):
# one is added to each coordinate to make up for the fact that
# residues are 1-based
ss1 = stems[i][0][0] + 1
ss2 = stems[i][0][1] + 1
se1 = stems[i][1][0] + 1
se2 = stems[i][1][1] + 1
self.defines['y%d' % (i)] = [min(ss1, se1), max(ss1, se1),
min(ss2, se2), max(ss2, se2)]
self.weights['y%d' % (i)] = 1
for i in range(len(bulges)):
bulge = bulges[i]
self.defines['b%d' % (i)] = sorted([bulge[0] + 1, bulge[1] + 1])
self.weights['b%d' % (i)] = 1
self.create_bulge_graph(stems, bulges)
self.create_stem_graph(stems, len(bulges))
self.collapse()
self.relabel_nodes()
self.remove_degenerate_nodes()
self.sort_defines()
def dissolve_length_one_stems(self):
# dissolve all stems which have a length of one
repeat = True
while repeat:
repeat = False
for k in self.defines:
if k[0] == 's' and self.stem_length(k) == 1:
self.dissolve_stem(k)
repeat = True
break
def from_dotbracket(self, dotbracket_str, dissolve_length_one_stems=False):
"""
Populate the BulgeGraph structure from a dotbracket representation.
ie: ..((..))..
:param dotbracket_str: A string containing the dotbracket representation
of the structure
"""
self.__init__()
self.dotbracket_str = dotbracket_str
self.seq_length = len(dotbracket_str)
if len(dotbracket_str) == 0:
return
pt = dotbracket_to_pairtable(dotbracket_str)
tuples = pairtable_to_tuples(pt)
self.from_tuples(tuples)
if dissolve_length_one_stems:
self.dissolve_length_one_stems()
def to_pair_table(self):
"""
Create a pair table from the list of elements.
The first element in the returned list indicates the number of
nucleotides in the structure.
i.e. [5,5,4,0,2,1]
"""
pair_tuples = self.to_pair_tuples()
return tuples_to_pairtable(pair_tuples)
def to_pair_tuples(self):
"""
Create a list of tuples corresponding to all of the base pairs in the
structure. Unpaired bases will be shown as being paired with a
nucleotide numbered 0.
i.e. [(1,5),(2,4),(3,0),(4,2),(5,1)]
"""
# iterate over each element
table = []
for d in self.defines:
# iterate over each nucleotide in each element
for b in self.define_residue_num_iterator(d):
p = self.pairing_partner(b)
if p is None:
p = 0
table += [(b, p)]
return table
def to_bpseq_string(self):
"""
Create a bpseq string from this structure.
"""
out_str = ''
for i in range(1, self.seq_length + 1):
pp = self.pairing_partner(i)
if pp is None:
pp = 0
out_str += "{} {} {}\n".format(i, self.seq[i - 1], pp)
return out_str
def bpseq_to_tuples_and_seq(self, bpseq_str):
"""
Convert a bpseq string to a list of pair tuples and a sequence
dictionary. The return value is a tuple of the list of pair tuples
and a sequence string.
:param bpseq_str: The bpseq string
:return: ([(1,5),(2,4),(3,0),(4,2),(5,1)], 'ACCAA')
"""
lines = bpseq_str.split('\n')
seq = []
tuples = []
for line in lines:
parts = line.split()
if len(parts) == 0:
continue
(t1, s, t2) = (int(parts[0]), parts[1], int(parts[2]))
tuples += [(t1, t2)]
seq += [s]
seq = "".join(seq).upper().replace('T', 'U')
return (tuples, seq)
def from_tuples(self, tuples):
"""
Create a bulge_graph from a list of pair tuples. Unpaired
nucleotides have a pairing partner of 0.
"""
stems = []
bulges = []
tuples.sort()
tuples = iter(tuples)
(t1, t2) = tuples.next()
prev_from = t1
prev_to = t2
start_from = prev_from
start_to = prev_to
last_paired = prev_from
for t1, t2 in tuples:
(from_bp, to_bp) = (t1, t2)
if abs(to_bp - prev_to) == 1 and prev_to != 0:
# stem
if (((prev_to - prev_from > 0 and to_bp - from_bp > 0) or
(prev_to - prev_from < 0 and to_bp - from_bp < 0)) and
(to_bp - prev_to) == -(from_bp - prev_from)):
(prev_from, prev_to) = (from_bp, to_bp)
last_paired = from_bp
continue
if to_bp == 0 and prev_to == 0:
# bulge
(prev_from, prev_to) = (from_bp, to_bp)
continue
else:
if prev_to != 0:
new_stem = tuple(
sorted([tuple(sorted([start_from - 1, start_to - 1])),
tuple(sorted([prev_from - 1, prev_to - 1]))]))
if new_stem not in stems:
stems += [new_stem]
last_paired = from_bp
start_from = from_bp
start_to = to_bp
else:
new_bulge = ((last_paired - 1, prev_from - 1))
bulges += [new_bulge]
start_from = from_bp
start_to = to_bp
prev_from = from_bp
prev_to = to_bp
# Take care of the last element
if prev_to != 0:
new_stem = tuple(
sorted([tuple(sorted([start_from - 1, start_to - 1])),
tuple(sorted([prev_from - 1, prev_to - 1]))]))
if new_stem not in stems:
stems += [new_stem]
if prev_to == 0:
new_bulge = ((last_paired - 1, prev_from - 1))
bulges += [new_bulge]
self.from_stems_and_bulges(stems, bulges)
def sort_defines(self):
"""
Sort the defines of interior loops and stems so that the 5' region
is always first.
"""
for k in self.defines.keys():
d = self.defines[k]
if len(d) == 4:
if d[0] > d[2]:
new_d = [d[2], d[3], d[0], d[1]]
self.defines[k] = new_d
def to_dotbracket_string(self):
"""
Convert the BulgeGraph representation to a dot-bracket string
and return it.
:return: A dot-bracket representation of this BulgeGraph
"""
pt = self.to_pair_table()
return pairtable_to_dotbracket(pt)
def sorted_stem_iterator(self):
"""
Iterate over a list of the stems sorted by the lowest numbered
nucleotide in each stem.
"""
stems = [d for d in self.defines if d[0] == 's']
stems.sort(key=lambda s: self.defines[s][0])
for s in stems:
yield s
def is_single_stranded(self, node):
"""
Does this node represent a single-stranded region?
Single stranded regions are five-prime and three-prime unpaired
regions, multiloops, and hairpins
:param node: The name of the node
:return: True if yes, False if no
"""
if node[0] == 'f' or node[0] == 't' or node[0] == 'm' or node[0] == 'h':
return True
else:
return False
def get_node_dimensions(self, node):
"""
Return the dimensions of a node.
If the node is a stem, then the dimensions will be l where l is
the length of the stem.
Otherwise, see get_bulge_dimensions(node)
:param node: The name of the node
:return: A pair containing its dimensions
"""
if node[0] == 's':
return (self.stem_length(node), self.stem_length(node))
"""
return (self.defines[node][1] - self.defines[node][0] + 1,
self.defines[node][1] - self.defines[node][0] + 1)
"""
else:
return self.get_bulge_dimensions(node)
def adjacent_stem_pairs_iterator(self):
"""
Iterate over all pairs of stems which are separated by some element.
This will always yield triples of the form (s1, e1, s2) where s1 and
s2 are the stem identifiers and e1 denotes the element that separates
them.
"""
for d in self.defines.keys():
if len(self.edges[d]) == 2:
edges = list(self.edges[d])
if edges[0][0] == 's' and edges[1][0] == 's':
yield (edges[0], d, edges[1])
def stem_bp_iterator(self, stem):
"""
Iterate over all the base pairs in the stem.
"""
d = self.defines[stem]
stem_length = self.stem_length(stem)
for i in range(stem_length):
yield (d[0] + i, d[3] - i)
def get_connected_residues(self, s1, s2):
"""
Get the nucleotides which are connected by the element separating
s1 and s2. They should be adjacent stems.
The connected nucleotides are those which are spanned by a single
interior loop or multiloop. In the case of an interior loop, this
function will return a list of two tuples and in the case of multiloops
if it will be a list of one tuple.
If the two stems are not separated by a single element, then return
an empty list.
"""
# sort the stems according to the number of their first nucleotide
stems = [s1, s2]
stems.sort(key=lambda x: self.defines[x][0])
c1 = self.edges[s1]
c2 = self.edges[s2]
# find out which edges they share
common_edges = c1.intersection(c2)
if len(common_edges) == 0:
# not connected
return []
if len(common_edges) > 1:
raise Exception("Too many connections between the stems")
# the element linking the two stems
conn = list(common_edges)[0]
# find out the sides of the stems that face the bulge
(s1b, s1e) = self.get_sides(s1, conn)
(s2b, s2e) = self.get_sides(s2, conn)
# get the nucleotides on the side facing the stem
s1_nucleotides = self.get_side_nucleotides(s1, s1b)
s2_nucleotides = self.get_side_nucleotides(s2, s2b)
# find out the distances between all the nucleotides flanking
# the bulge
dists = []
for n1 in s1_nucleotides:
for n2 in s2_nucleotides:
dists += [(abs(n2 - n1), n1, n2)]
dists.sort()
# return the ones which are closest to each other
if conn[0] == 'i':
return sorted([sorted(dists[0][1:]), sorted(dists[1][1:])])
else:
return sorted([sorted(dists[0][1:])])
def get_side_nucleotides(self, stem, side):
"""
Get the nucleotide numbers on the given side of
them stem. Side 0 corresponds to the 5' end of the
stem whereas as side 1 corresponds to the 3' side
of the stem.
:param stem: The name of the stem
:param side: Either 0 or 1, indicating the 5' or 3' end of the stem
:return: A tuple of the nucleotide numbers on the given side of
the stem.
"""
if side == 0:
return (self.defines[stem][0], self.defines[stem][3])
elif side == 1:
return (self.defines[stem][1], self.defines[stem][2])
raise Exception("Invalid side (%d) for the stem (%s)." % (stem, side))
def get_any_sides(self, e1, e2):
"""
Get the side of e1 that e2 is on. The only difference from the get_sides
method is the fact that e1 does not have to be a stem.
0 indicates that e2 is on the side with lower numbered
nucleotides and 1 indicates that e2 is on the side with
greater nucleotide numbers.
:param e1: The name of the first element.
:param e2: The name of the second element.
:return: A tuple indicating the side of e1 adjacent to e2 and the side of e2
adjacent to e1
"""
if e1[0] == 's':
return self.get_sides(e1, e2)
elif e2[0] == 's':
return self.get_sides(e2, e1)[::-1]
return None
def get_sides(self, s1, b):
"""
Get the side of s1 that is next to b.
s1e -> s1b -> b
:param s1: The stem.
:param b: The bulge.
:return: A tuple indicating which side is the one next to the bulge
and which is away from the bulge.
"""
s1d = self.defines[s1]
bd = self.defines[b]
# if the bulge is a length 0, multiloop then use the adjacent
# stem to determine its side
if len(bd) == 0:
edges = self.edges[b]
for e in edges:
if e != s1:
bd = self.defines[e]
break
for i in xrange(4):
for k in xrange(len(bd)):
if s1d[i] - bd[k] == 1:
if i == 0:
s1b = 0
break
if i == 2:
s1b = 1
break
elif s1d[i] - bd[k] == -1:
if i == 1:
s1b = 1
break
if i == 3:
s1b = 0
break
if s1b == 0:
s1e = 1
else:
s1e = 0
return (s1b, s1e)
def get_sides_plus(self, s1, b):
"""
Get the side of s1 that is next to b.
s1e -> s1b -> b
:param s1: The stem.
:param b: The bulge.
:return: A tuple indicating the corner of the stem that connects
to the bulge as well as the corner of the bulge that connects
to the stem.
"""
s1d = self.defines[s1]
bd = self.defines[b]
if len(bd) == 0:
edges = self.edges[b]
for e in edges:
if e != s1:
bd = self.defines[e]
break
for k in xrange(len(bd)):
# before the stem on the 5' strand
if s1d[0] - bd[k] == 1:
return (0, k)
# after the stem on the 5' strand
elif bd[k] - s1d[1] == 1:
return (1, k)
# before the stem on the 3' strand
elif s1d[2] - bd[k] == 1:
return (2, k)
# after the stem on the 3' strand
elif bd[k] - s1d[3] == 1:
return (3, k)
raise Exception("Faulty multiloop %s connecting %s"
% (" ".join(map(str, bd)),
" ".join(map(str, s1d))))
def stem_side_vres_to_resn(self, stem, side, vres):
"""
Return the residue number given the stem name, the strand (side) it's on
and the virtual residue number.
"""
d = self.defines[stem]
if side == 0:
return d[0] + vres
else:
return d[3] - vres
def stem_iterator(self):
"""
Iterator over all of the stems in the structure.
"""
for d in self.defines.keys():
if d[0] == 's':
yield d
def hloop_iterator(self):
"""
Iterator over all of the hairpin in the structure.
"""
for d in self.defines.keys():
if d[0] == 'h':
yield d
def mloop_iterator(self):
"""
Iterator over all of the multiloops in the structure.
"""
for d in self.defines.keys():
if d[0] == 'm':
yield d
def iloop_iterator(self):
"""
Iterator over all of the interior loops in the structure.
"""
for d in self.defines.keys():
if d[0] == 'i':
yield d
def floop_iterator(self):
"""
Yield the name of the 5' prime unpaired region if it is
present in the structure.
"""
if 'f1' in self.defines.keys():
yield 'f1'
def tloop_iterator(self):
"""
Yield the name of the 3' prime unpaired region if it is
present in the structure.
"""
if 't1' in self.defines.keys():
yield 't1'
def pairing_partner(self, nucleotide_number):
"""
Return the base pairing partner of the nucleotide at position
nucleotide_number. If this nucleotide is unpaired, return None.
:param nucleotide_number: The position of the query nucleotide in the
sequence.
:return: The number of the nucleotide base paired with the one at
position nucleotide_number.
"""
for d in self.stem_iterator():
for (r1, r2) in self.stem_bp_iterator(d):
if r1 == nucleotide_number:
return r2
elif r2 == nucleotide_number:
return r1
return None
def connections(self, bulge):
"""
Return the edges that connect to a bulge in a list form,
sorted by lowest res number of the connection.
"""
def sort_key(x):
if len(self.defines[x]) > 0:
if self.defines[x][0] == 1:
# special case for stems at the beginning since there is no
# adjacent nucleotide 0
return 0
return list(self.define_residue_num_iterator(x, adjacent=True))[0]
connections = list(self.edges[bulge])
connections.sort(key=sort_key)
return connections
def get_define_seq_str(self, d, adjacent=False):
"""
Get an array containing the sequences for the given define.
Non-stem sequences will contain the sequence without the overlapping
stem residues that are part of the define.
:param d: The define for which to get the sequences
:return: An array containing the sequences corresponding to the defines
"""
define = self.defines[d]
ranges = zip(*[iter(define)] * 2)
c = self.connections(d)
if d[0] == 'i':
s1 = self.defines[c[0]]
s2 = self.defines[c[1]]
if adjacent:
return [self.seq[s1[1] - 1:s2[0]],
self.seq[s2[3] - 1:s1[2]]]
else:
return [self.seq[s1[1]:s2[0] - 1],
self.seq[s2[3]:s1[2] - 1]]
if d[0] == 'm':
s1 = self.defines[c[0]]
s2 = self.defines[c[1]]
i1 = s1[self.get_sides_plus(c[0], d)[0]]
i2 = s2[self.get_sides_plus(c[1], d)[0]]
(i1, i2) = (min(i1, i2), max(i1, i2))
if adjacent:
return [self.seq[i1 - 1:i2]]
else:
return [self.seq[i1:i2 - 1]]
else:
seqs = []
for r in ranges:
if d[0] == 's':
seqs += [self.seq[r[0] - 1:r[1]]]
else:
if adjacent:
if r[0] > 1:
seqs += [self.seq[r[0] - 2:r[1] + 1]]
else:
seqs += [self.seq[r[0] - 1:r[1] + 1]]
else:
seqs += [self.seq[r[0] - 1:r[1]]]
return seqs
def get_stem_direction(self, s1, s2):
"""
Return 0 if the lowest numbered residue in s1
is lower than the lowest numbered residue in s2.
"""
if self.defines[s1][0] < self.defines[s2][0]:
return 0
return 1
def get_multiloop_side(self, m):
"""
Find out which strand a multiloop is on. An example of a situation in
which the loop can be on both sides can be seen in the three-stemmed
structure below:
(.().().)
In this case, the first multiloop section comes off of the 5' strand of
the first stem (the prior stem is always the one with a lower numbered
first residue). The second multiloop section comess of the 3' strand of
the second stem and the third loop comes off the 3' strand of the third
stem.
"""
c = self.connections(m)
p1 = self.get_sides_plus(c[0], m)
p2 = self.get_sides_plus(c[1], m)
return (p1[0], p2[0])
def get_strand(self, multiloop):
"""
Get the strand on which this multiloop is located.
:param multiloop: The name of the multiloop
:return: 0 for being on the lower numbered strand and 1 for
being on the higher numbered strand.
"""
conn = self.connections(multiloop)
t = self.connection_type(multiloop, conn)
if abs(t) == 2:
return 1
elif abs(t) == 3:
return 0
else:
return 2
pass
def get_bulge_dimensions(self, bulge):
"""
Return the dimensions of the bulge.
If it is single stranded it will be (0, x). Otherwise it will be (x, y).
:param bulge: The name of the bulge.
:return: A pair containing its dimensions
"""
bd = self.defines[bulge]
c = self.connections(bulge)
if bulge[0] == 'i':
# if this interior loop only has one unpaired region
# then we have to find out if it's on the 5' strand or
# the 3' strand
# Example:
# s1 1 3
# 23 25
# s2 5 10
# 15 20
s1 = self.defines[c[0]]
s2 = self.defines[c[1]]
dims = (s2[0] - s1[1] - 1, s1[2] - s2[3] - 1)
if bulge[0] == 'm':
# Multiloops are also pretty easy
if len(bd) == 2:
dims = (bd[1] - bd[0] + 1, 1000)
else:
dims = (0, 1000)
if bulge[0] == 'f' or bulge[0] == 't':
dims = (bd[1] - bd[0] + 1, -1)
if bulge[0] == 'h':
dims = (bd[1] - bd[0] + 1, -1)
return dims
def get_node_from_residue_num(self, base_num, seq_id=False):
"""
Iterate over the defines and see which one encompasses this base.
"""
for key in self.defines.keys():
define = self.defines[key]
for i in range(0, len(define), 2):
a = [int(define[i]), int(define[i + 1])]
a.sort()
if seq_id:
for i in range(a[0], a[1] + 1):
if self.seq_ids[i - 1][1] == base_num:
return key
else:
if base_num >= a[0] and base_num <= a[1]:
return key
raise Exception(
"Base number %d not found in the defines." % (base_num))
def get_length(self, vertex):
"""
Get the minimum length of a vertex.
If it's a stem, then the result is its length (in base pairs).
If it's a bulge, then the length is the smaller of it's dimensions.
:param vertex: The name of the vertex.
"""
if vertex[0] == 's':
return abs(self.defines[vertex][1] - self.defines[vertex][0]) + 1
else:
if len(self.edges[vertex]) == 1:
return self.defines[vertex][1] - self.defines[vertex][0] + 1
else:
dims = list(self.get_bulge_dimensions(vertex))
dims.sort()
if vertex[0] == 'i':
return sum(dims) / float(len(dims))
else:
return min(dims)
def get_flanking_region(self, bulge_name, side=0):
"""
If a bulge is flanked by stems, return the lowest residue number
of the previous stem and the highest residue number of the next
stem.
:param bulge_name: The name of the bulge
:param side: The side of the bulge (indicating the strand)
"""
c = self.connections(bulge_name)
if bulge_name[0] == 'h':
s1 = self.defines[c[0]]
return (s1[0], s1[3])
s1 = self.defines[c[0]]
s2 = self.defines[c[1]]
if bulge_name[0] == 'i':
# interior loop
if side == 0:
return (s1[0], s2[1])
else:
return (s2[2], s1[3])
elif bulge_name[0] == 'm':
ss = self.get_multiloop_side(bulge_name)
st = [s1, s2]
ends = []
# go through the two sides and stems and pick
# the other end of the same strand
for i, s in enumerate(ss):
if s == 0:
ends += [st[i][1]]
elif s == 1:
ends += [st[i][0]]
elif s == 2:
ends += [st[i][3]]
elif s == 3:
ends += [st[i][2]]
else:
raise Exception("Weird multiloop sides: %s" %
bulge_name)
ends.sort()
return tuple(ends)
# multiloop
return (None, None)
def get_flanking_sequence(self, bulge_name, side=0):
if len(self.seq) == 0:
raise Exception(
"No sequence present in the bulge_graph: %s" % (self.name))
(m1, m2) = self.get_flanking_region(bulge_name, side)
return self.seq[m1 - 1:m2]
def get_flanking_handles(self, bulge_name, side=0):
"""
Get the indices of the residues for fitting bulge regions.
So if there is a loop like so (between residues 7 and 16):
(((...))))
7890123456
^ ^
Then residues 9 and 13 will be used as the handles against which
to align the fitted region.
In the fitted region, the residues (2,6) will be the ones that will
be aligned to the handles.
:return: (orig_chain_res1, orig_chain_res1, flanking_res1, flanking_res2)
"""
f1 = self.get_flanking_region(bulge_name, side)
c = self.connections(bulge_name)
if bulge_name[0] == 'h':
s1 = self.defines[c[0]]
ab = [s1[1], s1[2]]
return (ab[0], ab[1], ab[0] - f1[0], ab[1] - f1[0])
s1 = self.defines[c[0]]
s2 = self.defines[c[1]]
if bulge_name[0] == 'm':
sides = self.get_multiloop_side(bulge_name)
ab = [s1[sides[0]], s2[sides[1]]]
ab.sort()
return (ab[0], ab[1], ab[0] - f1[0], ab[1] - f1[0])
if bulge_name[0] == 'i':
if side == 0:
ab = [s1[1], s2[0]]
else:
ab = [s2[3], s1[2]]
return (ab[0], ab[1], ab[0] - f1[0], ab[1] - f1[0])
# probably still have to include the 5' and 3' regions, but that
# will come a little later
return None
def are_adjacent_stems(self, s1, s2, multiloops_count=True):
"""
Are two stems separated by only one element. If multiloops should not
count as edges, then the appropriate parameter should be set.
:param s1: The name of the first stem
:param s2: The name of the second stem
:param multiloops_count: Whether to count multiloops as an edge linking
two stems
"""
for e in self.edges[s1]:
if not multiloops_count and e[0] == 'm':
continue
if s2 in self.edges[e]:
return True
return False
def random_subgraph(self, subgraph_length=None):
"""
Return a random subgraph of this graph.
:return: A list containing a the nodes comprising a random subgraph
"""
if subgraph_length is None:
subgraph_length = random.randint(1, len(self.defines.keys()))
start_node = random.choice(self.defines.keys())
curr_length = 0
visited = set()
next_nodes = [start_node]
new_graph = []
while curr_length < subgraph_length:
curr_node = random.choice(next_nodes)
if curr_node[0] == 'i' or curr_node[0] == 'm':
# if it's an interior loop or a multiloop, then we have to
# add the adjacent stems
for e in self.edges[curr_node]:
if e in new_graph:
continue
visited.add(e)
new_graph += [e]
next_nodes += list(self.edges[e])
curr_length += 1
visited.add(curr_node)
next_nodes += list(self.edges[curr_node])
next_nodes = [n for n in next_nodes if n not in visited]
new_graph += [curr_node]
curr_length += 1 # self.element_length(curr_node)
return new_graph
def same_stem_end(self, sd):
"""
Return the index of the define that is on the same end of the
stem as the index sd.
:param sd: An index into a define.
:return: The index pointing to the nucleotide on the other strand
on the same side as the stem.
"""
if sd == 0:
return 3
elif sd == 1:
return 2
elif sd == 2:
return 1
else:
return 0
def get_resseqs(self, define, seq_ids=True):
"""
Return the pdb ids of the nucleotides in this define.
:param define: The name of this element.
:param: Return a tuple of two arrays containing the residue ids
on each strand
"""
resnames = []
ranges = zip(*[iter(self.defines[define])] * 2)
for r in ranges:
strand_resnames = []
for x in range(r[0], r[1] + 1):
if seq_ids:
strand_resnames += [self.seq_ids[x - 1]]
else:
strand_resnames += [x]
resnames += [strand_resnames]
return resnames
def connected_stem_iterator(self):
"""
Iterate over all pairs of connected stems.
"""
for l in it.chain(self.mloop_iterator(), self.iloop_iterator()):
edge_list = list(self.edges[l])
yield (edge_list[0], l, edge_list[1])
def get_mst(self):
"""
Create a minimum spanning tree from this BulgeGraph. This is useful
for constructing a structure where each section of a multiloop is
sampled independently and we want to introduce a break at the largest
multiloop section.
"""
priority = {'s': 1, 'i': 2, 'm': 3, 'f': 4, 't': 5}
# keep track of all linked nodes
edges = sorted(it.chain(self.mloop_iterator(),
self.iloop_iterator()),
key=lambda x: (priority[x[0]], min(self.get_node_dimensions(x))))
mst = set(it.chain(self.stem_iterator(),
self.floop_iterator(),
self.tloop_iterator()))
# store all of the disconnected trees
forest = [set([m]) for m in mst]
# get the tree containing a particular element
def get_tree(elem):
for t in forest:
if elem in t:
return t
while len(edges) > 0:
conn = edges.pop(0)
neighbors = list(self.edges[conn])
# get the trees containing the neighbors of this node
# the node should be an interior loop or multiloop so
# the neighbors should necessarily be stems, 5' or 3'
t1 = get_tree(neighbors[0])
t2 = get_tree(neighbors[1])
if len(set.intersection(t1, t2)) == 0:
# if this node connects two disparate trees, then add it to the
# mst
new_tree = t1.union(t2)
forest.remove(t1)
forest.remove(t2)
forest.append(new_tree)
mst.add(conn)
return mst
def traverse_graph(self):
"""
Traverse the graph to get the angle types. The angle type depends on
which corners of the stem are connected by the multiloop or internal
loop.
"""
if self.mst is None:
self.mst = self.get_mst()
build_order = []
to_visit = [('s0', 'start')]
visited = set(['s0'])
build_paths = col.defaultdict(list)
while len(to_visit) > 0:
to_visit.sort(key=lambda x: min(self.get_node_dimensions(x[0])))
(current, prev) = to_visit.pop(0)
for e in self.edges[current]:
if e not in visited and e in self.mst:
# make sure the node hasn't been visited
# and is in the minimum spanning tree
to_visit.append((e, current))
build_paths[e] += [e]
build_paths[e] += build_paths[current]
visited.add(e)
if current[0] != 's' and len(self.edges[current]) == 2:
# multiloop or interior loop
# overkill method of getting the stem that isn't
# equal to prev
next_stem = set.difference(self.edges[current],
set([prev]))
build_order += [(prev, current, list(next_stem)[0])]
self.build_paths = build_paths
self.build_order = build_order
return build_order
def set_angle_types(self):
"""
Fill in the angle types based on the build order
"""
if self.build_order is None:
self.traverse_graph()
self.ang_types = dict()
for (s1, b, s2) in self.build_order:
self.ang_types[b] = self.connection_type(b, [s1, s2])
def get_angle_type(self, bulge):
"""
Return what type of angle this bulge is, based on the way this
would be built using a breadth-first traversal along the minimum
spanning tree.
"""
if self.ang_types is None:
self.set_angle_types()
if bulge in self.ang_types:
return self.ang_types[bulge]
else:
return None
def is_node_pseudoknot(self, d):
"""
Is a particular multiloop part of a pseudoknot?
"""
conn = self.connections(d)
ct = self.connection_type(d, conn)
if abs(ct) == 5:
return True
return False
def is_loop_pseudoknot(self, loop):
"""
Is a particular loop a pseudoknot?
:param loop: A list of elements that are part of the loop.
:return: Either True or false
"""
allowed_ang_types = [2, 3, 4]
found_ang_types = set()
for l in loop:
if l[0] != 'm':
continue
conn = self.connections(l)
ctype = self.connection_type(l, conn)
if ctype not in allowed_ang_types:
return True
found_ang_types.add(ctype)
if len(found_ang_types) == 3:
return False
return True
def is_pseudoknot(self):
"""
Is this bulge part of a pseudoknot?
"""
for d in self.mloop_iterator():
if self.is_node_pseudoknot(d):
return True
return False
'''
def to_networkx(self):
"""
Convert this graph to a networkx representation. This representation
will contain all of the nucleotides as nodes and all of the base pairs
as edges as well as the adjacent nucleotides.
"""
import networkx as nx
G = nx.Graph()
residues = []
for d in self.defines:
prev = None
for r in self.define_residue_num_iterator(d):
G.add_node(r)
residues += [r]
residues.sort()
prev = None
for r in residues:
if prev is not None:
G.add_edge(prev, r)
prev = r
for s in self.stem_iterator():
for (f, t) in self.stem_bp_iterator(s):
G.add_edge(f, t)
return G
'''
def ss_distance(self, e1, e2):
'''
Calculate the distance between two elements (e1, e2)
along the secondary structure. The distance only starts
at the edge of each element, and is the closest distance
between the two elements.
:param e1: The name of the first element
:param e2: The name of the second element
:return: The integer distance between the two along the secondary
structure.
'''
# get the edge nucleotides
# thanks to:
# http://stackoverflow.com/questions/2154249/identify-groups-of-continuous-numbers-in-a-list
# we get the edges, except that they might be one too close because we use adjacent
# nucleotides, nevertheless we'll take care of that later
d1_corners = []
d2_corners = []
for key, group in it.groupby(
enumerate(self.define_residue_num_iterator(e1, adjacent=True)),
lambda(index, item): index - item):
group = map(oper.itemgetter(1), group)
d1_corners += group
for key, group in it.groupby(
enumerate(self.define_residue_num_iterator(e2, adjacent=True)),
lambda(index, item): index - item):
group = map(oper.itemgetter(1), group)
d2_corners += group
import networkx as nx
G = self.to_networkx()
path_lengths = []
for c1, c2 in it.product(d1_corners, d2_corners):
path_lengths += [nx.shortest_path_length(G, c1, c2)]
if e1 == e2:
return 0
if e1 in self.edges[e2]:
return min(path_lengths) + 1
# make some exceptions for edges which have length 0
common_edges = set.intersection(self.edges[e1], self.edges[e2])
for e in common_edges:
if e[0] == 'i' and len(self.defines[e]) < 4:
return min(path_lengths) + 1
elif e[0] == 'm' and len(self.defines[e]) < 2:
return min(path_lengths) + 1
return min(path_lengths) + 2
def get_position_in_element(self, resnum):
node = self.get_node_from_residue_num(resnum)
if node[0] == 's':
if self.defines[node][0] <= resnum <= self.defines[node][1]:
return resnum - self.defines[node][0], self.defines[node][1] - self.defines[node][0]
else:
return abs(resnum - self.defines[node][3]), self.defines[node][1] - self.defines[node][0]
elif node[0] == 'i':
s0, s1 = self.connections(node)
if self.defines[s0][1] <= resnum <= self.defines[s1][0]:
return resnum - self.defines[s0][1], self.defines[s1][0] - self.defines[s0][1]
else:
return abs(resnum - self.defines[s0][2]) - 1, self.defines[s0][2] - self.defines[s1][3]
elif node[0] == 'h':
pos1 = resnum - self.defines[node][0]
pos2 = abs(resnum - self.defines[node][1])
return min(pos1, pos2) + 1, (self.defines[node][1] - self.defines[node][0] + 2) / 2
i = 0
while i < len(self.defines[node]):
s = self.defines[node][i]
e = self.defines[node][i + 1]
if s <= resnum <= e:
return resnum - s + 1, e - s + 2
i += 2
return None
def connected(self, n1, n2):
'''
Are the nucleotides n1 and n2 connected?
@param n1: A node in the BulgeGraph
@param n2: Another node in the BulgeGraph
@return: True or False indicating whether they are connected.
'''
if n1 in self.edges[n2] or n2 in self.edges[n1]:
return True
# two multiloops can be considered connected if they both
# link to the same side of the same stem
if n1[0] == 'm' and n2[0] == 'm':
common_stems = list(
set.intersection(self.edges[n1], self.edges[n2]))
if len(common_stems) == 0:
return False
common_stem = common_stems[0]
(s1c, b1c) = self.get_sides_plus(common_stem, n1)
(s2c, b1c) = self.get_sides_plus(common_stem, n2)
if sorted([s1c, s2c]) == [0, 3] or sorted([s1c, s2c]) == [1, 2]:
return True
return False
def bg_from_subgraph(bg, sg):
"""
Create a BulgeGraph from a list containing the nodes
to take from the original.
WARNING: The sequence information is not copied
"""
nbg = BulgeGraph()
nbg.seq_length = 0
for d in sg:
# copy the define
nbg.defines[d] = bg.defines[d][::]
# copy edges only if they connect elements which
# are also in the new structure
for e in bg.edges.keys():
for conn in bg.edges[e]:
if conn in sg:
nbg.edges[e].add(conn)
return nbg
| {
"alphanum_fraction": 0.5148986462,
"author": null,
"avg_line_length": 31.6935300795,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "529075584e7e7f5b22b46ff655d558470509c089",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "1f29d4c9d458edb2bd62a98e57254d78a1f2093f",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "zaidurrehman/EDeN",
"max_forks_repo_path": "eden/modifier/rna/lib_forgi.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "1f29d4c9d458edb2bd62a98e57254d78a1f2093f",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "zaidurrehman/EDeN",
"max_issues_repo_path": "eden/modifier/rna/lib_forgi.py",
"max_line_length": 105,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "1f29d4c9d458edb2bd62a98e57254d78a1f2093f",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "zaidurrehman/EDeN",
"max_stars_repo_path": "eden/modifier/rna/lib_forgi.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 20585,
"path": null,
"reason": "import networkx",
"repo": null,
"save_path": null,
"sha": null,
"size": 83766
} |
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '5'
import cv2
import numpy as np
from maskrcnn_benchmark.config import cfg
from demo.predictor import ICDARDemo, RRPNDemo
from maskrcnn_benchmark.utils.visualize import vis_image, write_result_ICDAR_RRPN2polys, zip_dir
from maskrcnn_benchmark.data.datasets.irra_interface import get_irra_XXX
from PIL import Image
import time
import json
from tqdm import tqdm
from Pascal_VOC import eval_func
from link_boxes import merge
def res2json(result_dir):
res_list = os.listdir(result_dir)
res_dict = {}
for rf in tqdm(res_list):
if rf[-4:] == '.txt':
respath = os.path.join(result_dir, rf)
reslines = open(respath, 'r').readlines()
reskey = 'EEE/' + rf[13:-4]
res_dict[reskey] = [{'points':np.array(l.replace('\n', '').split(','), np.int).reshape(-1, 2).tolist()} for l in reslines]
json_tarf = os.path.join(result_dir, 'res.json')
if os.path.isfile(json_tarf):
print('Json file found, removing it...')
os.remove(json_tarf)
j_f = open(json_tarf, 'w')
json.dump(res_dict, j_f)
print('json dump done', json_tarf)
return json_tarf
def database_to_json(dataset_dir):
database = get_irra_XXX('val', dataset_dir, 'EEE')
json_name = 'data_cache/gt_val_irra.json'
if os.path.isfile(json_name):
print('json_name found, loading it...')
return json_name
data_dict = {}
for data_item in database:
data_code = 'EEE/' + data_item['image'].split('/')[-1].split('.')[0][10:]
print('data_code:', data_code)
data_dict[data_code] = [{'points':pts.reshape(-1, 2).tolist(), 'transcription':'111'} for pts in data_item['polys']]
j_f = open(json_name, 'w')
json.dump(data_dict, j_f)
print('json dump done', json_name)
return json_name
config_file = 'configs/irragular_det/e2e_faster_rcnn_R_50_C4_1x.yaml' #'#"configs/ICDAR2019_det_RRPN/e2e_rrpn_R_50_C4_1x_LSVT_val_4scales_angle_norm.yaml" #e2e_rrpn_R_50_C4_1x_ICDAR13_15_trial_test.yaml
# update the config options with the config file
cfg.merge_from_file(config_file)
# manual override some options
cfg.merge_from_list(["MODEL.DEVICE", "cuda"])
# cfg.freeze()
# cfg.MODEL.WEIGHT = 'models/IC-13-15-17-Trial/model_0155000.pth'
vis = False
merge_box = cfg.TEST.MERGE_BOX
result_dir = os.path.join('results', config_file.split('/')[-1].split('.')[0], cfg.MODEL.WEIGHT.split('/')[-1].split('.')[0])
if merge_box:
result_dir += '_merge_box'
if not os.path.isdir(result_dir):
os.makedirs(result_dir)
coco_demo = RRPNDemo(
cfg,
min_image_size=800,
confidence_threshold=0.6,
)
dataset_name = 'IRRA_another_nofilter' #'#cfg.TEST.DATASET_NAME
testing_dataset = {
'IRRA_another1': {
'testing_image_dir': '../datasets/picture/',
'gt_dir':'../datasets/TASK0407/coordinates/EEE/',
'off': [0, 162]
},
'IRRA_another1_denoise': {
'testing_image_dir': '../datasets/denoise_pic/',
'gt_dir':'../datasets/TASK0407/coordinates/EEE/',
'off': [0, 150]
},
'IRRA': {
'testing_image_dir': '../datasets/TASK0407/imshow_picture/EEE',
'gt_dir':'../datasets/TASK0407/coordinates/EEE/',
'off': [0, 162]
},
'IRRA_another_nofilter': {
'testing_image_dir': '../datasets/0514_nofilter/',
'gt_dir':'../datasets/TASK0407/coordinates/EEE/',
'off': [0, 162]
},
'ArT': {
'testing_image_dir': '../datasets/ArT/ArT_detect_train/train_images',
'off': [4000, 5603]
},
}
image_dir = testing_dataset[dataset_name]['testing_image_dir']
gt_dir = testing_dataset[dataset_name]['gt_dir']
# vocab_dir = testing_dataset[dataset_name]['test_vocal_dir']
off_group = testing_dataset[dataset_name]['off']
# load image and then run prediction
# image_dir = '../datasets/ICDAR13/Challenge2_Test_Task12_Images/'
# imlist = os.listdir(image_dir)[off_group[0]:off_group[1]]
gtlist = os.listdir(gt_dir)
gtlist.sort()
print('************* META INFO ***************')
print('config_file:', config_file)
print('result_dir:', result_dir)
print('image_dir:', image_dir)
print('weights:', cfg.MODEL.WEIGHT)
print('merge_box:', merge_box)
print('***************************************')
# print('gtlist:', gtlist)
#num_images = len(imlist)
cnt = 0
num_images = off_group[1] - off_group[0]
if dataset_name == 'IRRA':
for idx in range(off_group[0], off_group[1]):
gt_filename = gtlist[idx]
gt_code = gt_filename.split('.')[0]
image = 'nofilter_' + gt_code + '.jpg'
impath = os.path.join(image_dir, image)
# print('image:', impath)
img = cv2.imread(impath)
cnt += 1
tic = time.time()
predictions, bounding_boxes = coco_demo.run_on_opencv_image(img)
toc = time.time()
print('time cost:', str(toc - tic)[:6], '|', str(cnt) + '/' + str(num_images))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
bboxes_np = bounding_boxes.bbox.data.cpu().numpy()
bboxes_np[:, 2:4] /= cfg.MODEL.RRPN.GT_BOX_MARGIN
if merge_box:
bboxes_np_reverse = bboxes_np.copy()
bboxes_np_reverse[:, 2:4] = bboxes_np_reverse[:, 3:1:-1]
bboxes_np_reverse = merge(bboxes_np_reverse)
bboxes_np_reverse[:, 2:4] = bboxes_np_reverse[:, 3:1:-1]
bboxes_np = bboxes_np_reverse
width, height = bounding_boxes.size
if vis:
pil_image = vis_image(Image.fromarray(img), bboxes_np)
pil_image.show()
time.sleep(20)
else:
write_result_ICDAR_RRPN2polys(image[:-4], bboxes_np, threshold=0.7, result_dir=result_dir, height=height, width=width)
#im_file, dets, threshold, result_dir, height, width
#cv2.imshow('win', predictions)
#cv2.waitKey(0)
else:
testing_img = os.listdir(image_dir)
for imname in testing_img:
# gt_filename = gtlist[idx]
# gt_code = gt_filename.split('.')[0]
# image = 'nofilter_' + gt_code + '.jpg'
# impath = os.path.join(image_dir, image)
# print('image:', impath)
impath = os.path.join(image_dir, imname)
img = cv2.imread(impath)
cnt += 1
tic = time.time()
predictions, bounding_boxes = coco_demo.run_on_opencv_image(img)
toc = time.time()
print('time cost:', str(toc - tic)[:6], '|', str(cnt) + '/' + str(num_images))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
bboxes_np = bounding_boxes.bbox.data.cpu().numpy()
bboxes_np[:, 2:4] /= cfg.MODEL.RRPN.GT_BOX_MARGIN
if merge_box:
bboxes_np_reverse = bboxes_np.copy()
bboxes_np_reverse[:, 2:4] = bboxes_np_reverse[:, 3:1:-1]
bboxes_np_reverse = merge(bboxes_np_reverse)
bboxes_np_reverse[:, 2:4] = bboxes_np_reverse[:, 3:1:-1]
bboxes_np = bboxes_np_reverse
width, height = bounding_boxes.size
if vis:
pil_image = vis_image(Image.fromarray(img), bboxes_np)
pil_image.show()
time.sleep(20)
else:
write_result_ICDAR_RRPN2polys(imname[:-4], bboxes_np, threshold=0.7, result_dir=result_dir, height=height, width=width)
#im_file, dets, threshold, result_dir, height, width
#cv2.imshow('win', predictions)
#cv2.waitKey(0)
if dataset_name == 'IC15':
zipfilename = os.path.join(result_dir, 'submit_' + config_file.split('/')[-1].split('.')[0] + '_' + cfg.MODEL.WEIGHT.split('/')[-1].split('.')[0] + '.zip')
if os.path.isfile(zipfilename):
print('Zip file exists, removing it...')
os.remove(zipfilename)
zip_dir(result_dir, zipfilename)
comm = 'curl -i -F "submissionFile=@' + zipfilename + '" http://127.0.0.1:8080/evaluate'
# print(comm)
print(os.popen(comm, 'r'))
elif dataset_name == 'LSVT':
# input_json_path = 'results/e2e_rrpn_R_50_C4_1x_LSVT_val/model_0190000/res.json'
gt_json_path = '../datasets/LSVT/train_full_labels.json'
# to json
input_json_path = res2json(result_dir)
eval_func(input_json_path, gt_json_path)
elif dataset_name == 'ArT':
# input_json_path = 'results/e2e_rrpn_R_50_C4_1x_LSVT_val/model_0190000/res.json'
gt_json_path = '../datasets/ArT/ArT_detect_train/train_labels.json'
# to json
input_json_path = res2json(result_dir)
eval_func(input_json_path, gt_json_path)
elif dataset_name == 'IRRA':
gt_json_path = database_to_json('../datasets/TASK0407/')
# to json
input_json_path = res2json(result_dir)
eval_func(input_json_path, gt_json_path) | {
"alphanum_fraction": 0.6266321477,
"author": null,
"avg_line_length": 35.8225806452,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "c4ffbcaa09c9c28512e13bbd75ba29ed18777b76",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 4,
"max_forks_repo_forks_event_max_datetime": "2020-04-12T12:26:50.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-02-29T12:14:18.000Z",
"max_forks_repo_head_hexsha": "fd79b679044ea23fd9eb30691453ed0805d2e98b",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "dun933/FudanOCR",
"max_forks_repo_path": "model/maskrcnn_benchmark_architecture/demo/irra_infer.py",
"max_issues_count": 33,
"max_issues_repo_head_hexsha": "fd79b679044ea23fd9eb30691453ed0805d2e98b",
"max_issues_repo_issues_event_max_datetime": "2022-03-12T00:17:30.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-12-10T19:15:39.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "dun933/FudanOCR",
"max_issues_repo_path": "model/maskrcnn_benchmark_architecture/demo/irra_infer.py",
"max_line_length": 203,
"max_stars_count": 25,
"max_stars_repo_head_hexsha": "e6b18b0eefaf832b2eb7198f5df79e00bd4cee36",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "JinGyeSetBirdsFree/FudanOCR",
"max_stars_repo_path": "model/maskrcnn_benchmark_architecture/demo/irra_infer.py",
"max_stars_repo_stars_event_max_datetime": "2020-04-24T07:56:06.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-02-29T12:14:10.000Z",
"num_tokens": 2400,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 8884
} |
#!/usr/bin/env python
# coding: utf-8
# # Fifth exercice: Non-Cartesian radial under-sampling
#
# In this notebook, you can play with the design parameters to regenerate different radial in-out patterns (so, we draw radial spokes over a rotating angle of $\pi$). You can play with the number of shots by changing the under-sampling factor.
#
# - Authors: Philippe Ciuciu (philippe.ciuciu@cea.fr)
# - Date: 04/02/2019
# - Target: [ISBI'19 tutorial](https://biomedicalimaging.org/2019/tutorials/) on **Recent advances in acquisition and reconstruction for Compressed Sensing MRI**
# - **Revision**: 01/06/2021 for ATSI MSc hands-on session at Paris-Saclay University.
# In[23]:
#DISPLAY BRAIN PHANTOM
get_ipython().run_line_magic('matplotlib', 'inline')
import numpy as np
import os.path as op
import os
import math ; import cmath
import matplotlib.pyplot as plt
import sys
from mri.operators import NonCartesianFFT
from mri.operators.utils import convert_locations_to_mask, gridded_inverse_fourier_transform_nd
from pysap.data import get_sample_data
from skimage import data, img_as_float, io, filters
from modopt.math.metrics import ssim
#get current working dir
#cwd = os.getcwd()
#dirimg_2d = op.join(cwd,"..","data")
#FOV = 0.2 #field of view parameter in m (ie real FOV = 20 x20 cm^2)
#pixelSize = FOV/img_size
#load data file corresponding to the target resolution
#filename = "BrainPhantom" + str(img_size) + ".png"
#mri_filename = op.join(dirimg_2d, filename)
#mri_img = io.imread(mri_filename, as_gray=True)
mri_img = get_sample_data('2d-mri')
img_size = mri_img.shape[0]
plt.figure()
plt.title("T2* axial slice, size = {}".format(img_size))
if mri_img.ndim == 2:
plt.imshow(mri_img, cmap=plt.cm.gray)
else:
plt.imshow(mri_img)
plt.show()
# In[30]:
# set up the first shot
rfactor = 8
nb_shots = math.ceil(img_size/rfactor)
print("number of shots: {}".format(nb_shots))
# vectorize the nb of shots
vec_shots = np.arange(0,nb_shots)
# define the regularly spaced samples on a single shot
nsamples = (np.arange(0,img_size) - img_size//2)/(img_size)
print("number of samples per shot: {}".format(np.size(nsamples)))
shot_c = np.array(nsamples, dtype = np.complex_)
shots = np.array([], dtype = np.complex_)
# acculumate shots after rotating the initial one by the right angular increment
for k in vec_shots:
shots = np.append(shots, shot_c * np.exp(2 * np.pi * 1j * k/(2*nb_shots)))
kspace_loc = np.zeros((len(shots),2))
#assign real and imaginary parts of complex-valued k-space trajectories to k-space locations
kspace_loc[:,0] = shots.real
kspace_loc[:,1] = shots.imag
#Plot full initialization
kspace = plt.figure(figsize = (8,8))
#plot shots
plt.scatter(kspace_loc[:,0],kspace_loc[:,1], marker = '.')
plt.title("Radial undersampling R = %d" %rfactor)
axes = plt.gca()
plt.grid()
# In[29]:
data=convert_locations_to_mask(kspace_loc, mri_img.shape)
fourier_op = NonCartesianFFT(samples=kspace_loc, shape=mri_img.shape,
implementation='cpu')
kspace_obs = fourier_op.op(mri_img.data)
# In[17]:
grid_space = np.linspace(-0.5, 0.5, num=mri_img.shape[0])
grid2D = np.meshgrid(grid_space, grid_space)
grid_soln = gridded_inverse_fourier_transform_nd(kspace_loc, kspace_obs,
tuple(grid2D), 'linear')
plt.imshow(np.abs(grid_soln), cmap='gray')
# Calculate SSIM
base_ssim = ssim(grid_soln, mri_img)
plt.title('Gridded Solution\nSSIM = ' + str(base_ssim))
plt.show()
| {
"alphanum_fraction": 0.7206385405,
"author": null,
"avg_line_length": 31.0442477876,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "6c90062f5023b00b10a7f060b780e7164ecef2c3",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 4,
"max_forks_repo_forks_event_max_datetime": "2020-02-01T04:56:56.000Z",
"max_forks_repo_forks_event_min_datetime": "2019-04-23T15:08:11.000Z",
"max_forks_repo_head_hexsha": "e30fb99d1eacde3297ae9d00ab05ca8d681a6317",
"max_forks_repo_licenses": [
"BSD-2-Clause"
],
"max_forks_repo_name": "chaithyagr/isbi19-tutorial",
"max_forks_repo_path": "python/05.ISBI19_notebook.py",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "e30fb99d1eacde3297ae9d00ab05ca8d681a6317",
"max_issues_repo_issues_event_max_datetime": "2019-12-11T16:53:55.000Z",
"max_issues_repo_issues_event_min_datetime": "2019-12-11T16:53:55.000Z",
"max_issues_repo_licenses": [
"BSD-2-Clause"
],
"max_issues_repo_name": "chaithyagr/isbi19-tutorial",
"max_issues_repo_path": "python/05.ISBI19_notebook.py",
"max_line_length": 243,
"max_stars_count": 9,
"max_stars_repo_head_hexsha": "e30fb99d1eacde3297ae9d00ab05ca8d681a6317",
"max_stars_repo_licenses": [
"BSD-2-Clause"
],
"max_stars_repo_name": "chaithyagr/isbi19-tutorial",
"max_stars_repo_path": "python/05.ISBI19_notebook.py",
"max_stars_repo_stars_event_max_datetime": "2021-05-23T20:39:09.000Z",
"max_stars_repo_stars_event_min_datetime": "2019-04-05T16:17:52.000Z",
"num_tokens": 984,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 3508
} |
import os
import pickle
from typing import Dict
import colorlog
import tensorflow as tf
import numpy as np
__PATH__ = os.path.abspath(os.path.dirname(__file__))
_PARLAI_PAD = '__null__'
_PARLAI_GO = '__start__'
_PARLAI_EOS = '__end__'
_PARLAI_UNK = '__unk__'
_PARLAI_START_VOCAB = [_PARLAI_PAD, _PARLAI_GO, _PARLAI_EOS, _PARLAI_UNK]
PARLAI_PAD_ID = 0
PARLAI_GO_ID = 1
PARLAI_EOS_ID = 2
PARLAI_UNK_ID = 3
_BERT_PAD = "[PAD]"
_BERT_UNK = "[UNK]"
_BERT_CLS = "[CLS]"
_BERT_SEP = "[SEP]"
_BERT_MASK = "[MASK]"
BERT_PAD_ID = 0
BERT_UNK_ID = 100
BERT_CLS_ID = 101
BERT_SEP_ID = 102
BERT_MASK_ID = 103
def convert_subword_to_word(sentence):
return sentence.replace(' ##', '')
class Vocabulary(object):
def __init__(self,
vocab_fname: str = None,
vocab_dict: Dict[str, int] = None,
num_oov_buckets: int = 1,
unk_token: str = _PARLAI_UNK):
if vocab_fname is None and vocab_dict is None:
raise ValueError("One of 'vocab_fname' or 'vocab_dict' should not be None")
if vocab_fname and vocab_dict:
raise ValueError("Only one of 'vocab_fname' or 'vocab_dict' can have value")
if vocab_fname:
raise NotImplementedError
elif vocab_dict:
strings = []
indices = []
for key, value in vocab_dict.items():
strings.append(key)
indices.append(value)
self.string_to_index_table = tf.lookup.StaticVocabularyTable(
tf.lookup.KeyValueTensorInitializer(
keys=strings, values=indices, key_dtype=tf.string, value_dtype=tf.int64
), num_oov_buckets, lookup_key_dtype=tf.string
)
self.index_to_string_table = tf.lookup.StaticHashTable(
tf.lookup.KeyValueTensorInitializer(
keys=indices, values=strings, key_dtype=tf.int64, value_dtype=tf.string
), default_value=unk_token
)
self._num_oov_buckets = num_oov_buckets
self._unk_token = unk_token
def string_to_index(self, keys):
return self.string_to_index_table.lookup(keys)
def index_to_string(self, keys):
if keys.dtype == tf.int32:
keys = tf.cast(keys, tf.int64)
return self.index_to_string_table.lookup(keys)
| {
"alphanum_fraction": 0.6407109606,
"author": null,
"avg_line_length": 30.6883116883,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "031daf96f3f2941ad04f0bbc16259c75853ebca6",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": 12,
"max_forks_repo_forks_event_max_datetime": "2022-02-24T16:18:44.000Z",
"max_forks_repo_forks_event_min_datetime": "2020-01-29T12:17:07.000Z",
"max_forks_repo_head_hexsha": "1e9c47bb61291a0b15e5b1e6901eafebc73db8db",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "bckim92/sequential-knowledge-transformer",
"max_forks_repo_path": "data/vocabulary.py",
"max_issues_count": 7,
"max_issues_repo_head_hexsha": "1e9c47bb61291a0b15e5b1e6901eafebc73db8db",
"max_issues_repo_issues_event_max_datetime": "2021-02-02T14:11:19.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-01-17T16:02:28.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "bckim92/sequential-knowledge-transformer",
"max_issues_repo_path": "data/vocabulary.py",
"max_line_length": 91,
"max_stars_count": 135,
"max_stars_repo_head_hexsha": "1e9c47bb61291a0b15e5b1e6901eafebc73db8db",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "bckim92/sequential-knowledge-transformer",
"max_stars_repo_path": "data/vocabulary.py",
"max_stars_repo_stars_event_max_datetime": "2022-03-28T03:19:55.000Z",
"max_stars_repo_stars_event_min_datetime": "2020-01-03T08:35:35.000Z",
"num_tokens": 584,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 2363
} |
from __future__ import absolute_import, division, print_function, unicode_literals
import time
import numpy as np
from noise import snoise3
from ..mode import Mode
class Noise(Mode):
class State(object):
RED = "RED"
GREEN = "GRN"
BLUE = "BLUE"
GREYSCALE = "GREY"
FULL_COLOR = "RGB"
NAME = "NOIS"
STATES = [State.RED, State.GREEN, State.BLUE, State.GREYSCALE, State.FULL_COLOR]
DEFAULT_STATE_INDEX = 0
OCTAVES, OCTAVES_FULL_COLOR = 3, 1
PERSISTENCE = 0.5
SCALE = 0.05
TIME_SCALE = 0.5
def __init__(self, index, state_index=None):
super(Noise, self).__init__(index=index, state_index=state_index)
self._cells = np.zeros((16, 16))
self._octaves = self.OCTAVES_FULL_COLOR if self.state == self.State.FULL_COLOR else self.OCTAVES
def render(self, pixel_grid):
t = time.clock() * self.TIME_SCALE
state = self.state
for y, row in enumerate(self._cells):
for x, cell in enumerate(row):
rgb = self._rgb(state, x, y, t)
pixel_grid.pixel(x, y).color = rgb
def _noise(self, x, y, t):
noise = snoise3(x * self.SCALE, y * self.SCALE, t, octaves=self._octaves)
return (noise + 1) / 2
def _rgb(self, state, x, y, t):
magnitude = self._noise(x, y, t)
if state == self.State.RED:
rgb = magnitude, 0, 0
elif state == self.State.GREEN:
rgb = 0, magnitude, 0
elif state == self.State.BLUE:
rgb = 0, 0, magnitude
elif state == self.State.GREYSCALE:
rgb = magnitude, magnitude, magnitude
elif state == self.State.FULL_COLOR:
rgb = magnitude, self._noise(x + 100, y + 100, t), self._noise(x + 200, y + 200, t)
else:
raise ValueError
return rgb
| {
"alphanum_fraction": 0.5906652361,
"author": null,
"avg_line_length": 30.5573770492,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "a094ef18156dfaae1cd18107b673672172a58a46",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "87ac04adbb74702bee3dcaa5c6bded7786cf73e7",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "Spooner/pixel-table",
"max_forks_repo_path": "pixel_table/modes/noise/noise.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "87ac04adbb74702bee3dcaa5c6bded7786cf73e7",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "Spooner/pixel-table",
"max_issues_repo_path": "pixel_table/modes/noise/noise.py",
"max_line_length": 104,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "87ac04adbb74702bee3dcaa5c6bded7786cf73e7",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "Spooner/pixel-table",
"max_stars_repo_path": "pixel_table/modes/noise/noise.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 519,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 1864
} |
import numpy as np
import subprocess
import os
import glob
#NUM_TRIES = 5
#NUM_EPOCHS = 200
NUM_TRIES = 1
NUM_EPOCHS = 20
#DATASETS=["UWaveGestureLibraryAll","FacesUCR","ECG5000"]
#DATASETS=["Datasets/CDOT/Time_Series_For_Clustering_El_Paso_with_Weather_data.csv"]
DATASETS=["CDOT"]
exmpl_range = np.arange(4, 29, step=8) # What is the assumption here?
#DATASETS = [os.path.split(x)[-1] for x in glob.glob("D://Datasets/UCRArchive_2018/**")]
for DATASET in DATASETS:
for x in exmpl_range:
print("Number of examples: %d" % x)
for i in range(NUM_TRIES):
seed = np.random.randint(0, np.iinfo(np.int32).max)
'''
print("Using Seed %d" % seed)
# Test Silh + AE
print("Starting trial %d of %d with Silh=%s,Proto=%s,AE=%s" % (i+1,NUM_TRIES,True,False,True))
#subprocess.run(["python","semi_supervised.py","--dataset=%s"%DATASET,"--number_examples=%d" % x,"--number_epochs=%d" % NUM_EPOCHS,"--silh","--ae","--seed=%d"%seed],check=True)
# Test Silh on its own
print("Starting trial %d of %d with Silh=%s,Proto=%s,AE=%s" % (i+1,NUM_TRIES,True,False,False))
subprocess.run(["python","semi_supervised.py","--dataset=%s"%DATASET,"--number_examples=%d" % x,"--number_epochs=%d" % NUM_EPOCHS,"--silh","--seed=%d"%seed],check=True)
# Test Proto + AE
print("Starting trial %d of %d with Silh=%s,Proto=%s,AE=%s" % (i+1,NUM_TRIES,False,True,True))
subprocess.run(["python","semi_supervised.py","--dataset=%s"%DATASET,"--number_examples=%d" % x,"--number_epochs=%d" % NUM_EPOCHS,"--ae","--proto","--seed=%d"%seed],check=True)
# Test Proto
print("Starting trial %d of %d with Silh=%s,Proto=%s,AE=%s" % (i+1,NUM_TRIES,False,True,False))
subprocess.run(["python","semi_supervised.py","--dataset=%s"%DATASET,"--number_examples=%d" % x,"--number_epochs=%d" % NUM_EPOCHS,"--proto","--seed=%d"%seed],check=True)
# Test AE
print("Starting trial %d of %d with Silh=%s,Proto=%s,AE=%s" % (i+1,NUM_TRIES,False,False,True))
subprocess.run(["python","semi_supervised.py","--dataset=%s"%DATASET,"--number_examples=%d" % x,"--number_epochs=%d" % NUM_EPOCHS,"--ae","--seed=%d"%seed],check=True)
#Todo debug DB loss issue with tensorflow expands dims
# Test DB + AE
'''
#print("Starting trial %d of %d with Silh=%s,Proto=%s,AE=%s,DB=%s" %(i+1,NUM_TRIES,False,False,True,True))
#subprocess.run(["python","semi_supervised.py","--dataset=%s"%DATASET,"--number_examples=%d" % x,"--number_epochs=%d" % NUM_EPOCHS,"--ae", "--db", "--seed=%d"%seed],check=True)
# Test DB
print("Starting trial %d of %d with Silh=%s,Proto=%s,AE=%s,DB=%s" %(i+1,NUM_TRIES,False,False,False,True))
subprocess.run(["python","semi_supervised.py","--dataset=%s"%DATASET,"--number_examples=%d" % x,"--number_epochs=%d" % NUM_EPOCHS,"--db", "--seed=%d"%seed],check=True)
| {
"alphanum_fraction": 0.6095551895,
"author": null,
"avg_line_length": 64.5744680851,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "434e20817d8313653b91d6aa2708b9406355b95d",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "26e002bbbd128587921622776993fdc2705a092c",
"max_forks_repo_licenses": [
"BSD-2-Clause"
],
"max_forks_repo_name": "nnbaokhang/Semi_Supervised_Embedding_for_Scalable_and_Accurate",
"max_forks_repo_path": "runner.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "26e002bbbd128587921622776993fdc2705a092c",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"BSD-2-Clause"
],
"max_issues_repo_name": "nnbaokhang/Semi_Supervised_Embedding_for_Scalable_and_Accurate",
"max_issues_repo_path": "runner.py",
"max_line_length": 188,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "26e002bbbd128587921622776993fdc2705a092c",
"max_stars_repo_licenses": [
"BSD-2-Clause"
],
"max_stars_repo_name": "nnbaokhang/Semi_Supervised_Embedding_for_Scalable_and_Accurate",
"max_stars_repo_path": "runner.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 936,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 3035
} |
# https://github.com/nirupamaprv/Analyze-AB-test-Results/blob/master/Analyze%20A:B%20Test%20Results-Quiz-Answers.txt
# hypothesis is less related to p-value
import pandas as pd
import numpy as np
import random
import matplotlib
matplotlib.use('TkAgg')
#We are setting the seed to assure that we get the same answers on quizzes as we set up
random.seed(42)
###########################################
# part 1: probablity evaluation
###########################################
###########################################
#Quiz 1: Understanding the Dataset
#DESCRIPTION
#VALUE
#The number of rows in the dataset.
#294478
#The number of unique users in the dataset.
#290584
#The proportion of users converted.
#12%
#The number of times the new_page and treatment don't line up.
#3893
#Do any of the rows have missing values?
#No
###########################################
# read dataset
df = pd.read_csv('ab_data.csv')
# inspect dataset
# ? conversion 這邊的case 不知道什麼case 才會是1 要查查
df.head()
# we use shape function to see number of rows [first element]
row_num = df.shape[0]
print("Number of rows is: {}".format(row_num))
#use unique() function
user_total = df.nunique()['user_id']
print("Number of unique users is : {}".format(user_total))
# met1
# 這個只是恰巧 因為convertion 的value 只有1
# we can find proportion of users converted by taking mean since values are 1 and 0
#print("Converted users proportion is {}%".format((df['converted'].mean())*100))
# met2
# alternate method to find number of converted users
sum(df['converted'].values)/row_num
# rows where treatment group user lands incorrectly on old_page
# columa name = value
# treatment group 希望在新的landing page
mismatch_grp1 = df.query("group == 'treatment' and landing_page == 'old_page'")
print("Times treatment group user lands incorrectly on old_page is {}".format(len(mismatch_grp1)))
# rows where control group user incorrectly lands on new_page
# control group 希望在舊的landing page
mismatch_grp2 = df.query("group == 'control' and landing_page == 'new_page'")
print("Times control group user incorrectly lands on new_page is {}".format(len(mismatch_grp2)))
# number of times the new_page and treatment don't line up is sum of above two values
print("Times new_page and treatment don't line up is {}".format(len(mismatch_grp1) + len(mismatch_grp2)))
# we check number of values in each rows using info function
# entry values denote if any column has missing values
df.info()
######################
#Quiz 2: Messy Data
#In this case, how should we handle the rows where the landing_page and group columns don't align?
#Remove these rows.
######################
# Delete Rows
# drop rows for mismatched treatment groups
df.drop(df.query("group == 'treatment' and landing_page == 'old_page'").index, inplace=True)
# drop rows for mismatched control groups
df.drop(df.query("group == 'control' and landing_page == 'new_page'").index, inplace=True)
# display after delete
df.info()
# save new clean dataset which contains no duplicates or records with missing or mismatched values
# we will use this dataset in next sections
# ? 沒看到有去重的function 啊 我覺得還沒有去重
df.to_csv('ab_edited.csv', index=False)
# read newly created dataset into another dataframe
df2 = pd.read_csv('ab_edited.csv')
# Double Check all of the uncorrect rows were removed - this should be 0
df2[((df2['group'] == 'treatment') == (df2['landing_page'] == 'new_page')) == False].shape[0]
###########################################
#Quiz 3: Updated DataFrame
#QUIZ QUESTION
#Match each description to the correct corresponding value.
#DESCRIPTION
#VALUE
#The number of unique ids in df2.
#290584
#The user_id for the non-unique id in df2.
#773192
#The landing_page for the non-unique id.
#new_page
#The group for the non-unique id.
#treatment
#The value of converted column for the non-unique id.
#0
###########################################
# inspect df2
df2.info()
# unique user ids count is
len(df2['user_id'].unique())
# check if duplicates in user_id
# we know that one user id is repeated due to difference between #userids and #unique ids
sum(df2['user_id'].duplicated())
# inspect duplicate userid
df2[df2.duplicated(['user_id'], keep=False)]['user_id']
# delete duplicate record
# we choose one with timestamp as "2017-01-09 05:37:58.781806"
# 實際行為要看怎麼remove
time_dup = "2017-01-09 05:37:58.781806"
df2 = df2[df2.timestamp != time_dup]
# inspect number of entries in df2 after deleting duplicate record
df2.info()
# compare df2 with df (original without preprocessing)
# as seen above, 290584 entries now as entry with index 1876 is deleted
# we can confirm by checking unique values of user ids
len(df['user_id'].unique())
###########################################
#Quiz 4: Probability
#QUIZ QUESTION
#Use your solutions to Question 4. in the notebook to match each description to its corresponding value.
#DESCRIPTION
#VALUE
#Probability of converting regardless of page.
#0.1196
#Given an individual received the control page, the probability of converting.
#0.1204
#Given that an individual received the treatment, the probability of converting.
#0.1188
#The probability of receiving the new page.
#0.5001
# Evidence that one page leads to more conversions?
# Given that an individual was in the treatment group, the probability they converted is 0.118807
# Given that an individual was in the control group, the probability they converted is 0.120386
# We find that old page does better, but by a very tiny margin.
# Change aversion, test span durations and other potentially influencing factors are not accounted for. So, we cannot state with certainty that one page leads to more conversions. This is even more important due to almost similar perforamnce of both pages.
###########################################
# since values are 1 and 0, we can calculate mean to get probability of an individual converting
# ? individual converting 是什麼意思 這個probability 不太懂
df['converted'].mean()
# for this we group by column 'group'
# then we compute the statistics using describe function
# as conversions are assigned boolean values, we can use mean to find probability of conversion
df_grp = df.groupby('group')
df_grp.describe()
# number of individuals who got new page is same as those in treatment group
new_user = len(df.query("group == 'treatment'"))
# calculate total number of users
users=df.shape[0]
# thus, probability that an individual received the new page is new_user/users
new_user_p = new_user/users
print(new_user_p)
###########################################
# part 2: A/B test
# Notice that because of the time stamp associated with each event,
# you could technically run a hypothesis test continuously as each observation was observed.
# A/B test 時間上要多長 才能說這個test 是重要的?
# However, then the hard question is do you stop as soon as one page is considered
# significantly better than another or does it need to happen consistently for a certain amount of time?
# How long do you run to render a decision that neither page is better than another?
###########################################
###########################################
#Quiz 5: Hypothesis Testing
#QUIZ QUESTION
#Use your solutions to Part II Question 2 in the notebook to assist in this quiz.
#DESCRIPTION
#SOLUTION
#p_new under the null.
#0.1196
#p_old under the null.
#0.1196
#n_new
#145310
#n_old
#145274
#p_new - p_old under the null.
#0
###########################################
#For now, consider you need to make the decision just based on "all the data provided".
# 指的是 dfs (after part 1 remove missing data, remove duplicated)
#If you want to assume that the old page is better unless the new page proves to be definitely
#better at a Type I error rate of 5% (type I error(α)),
#what should your null and alternative hypotheses be?
# You can state your hypothesis in terms of words or in terms of $p_{old}$ and $p_{new}$,
# $p_{old}$ and $p_{new}$ are the converted rates for the old and new pages.
#Hypothesis
#$H_{0}$ : $p_{old}$ >= $p_{new}$
#$H_{1}$ : $p_{old}$ < $p_{new}$
# ? 不知道這個assumption 是幹嘛的
#Assume under the null hypothesis,
#$p_{new}$ and $p_{old}$ both have "true" success rates equal to the converted success rate regardless of page -
# that is $p_{new}$ and $p_{old}$ are equal.
#Furthermore, assume they are equal to the converted rate in ab_data.csv regardless of the page.
# ? 不知道這個說明是要幹嘛的
#Use a sample size for each page equal to the ones in ab_data.csv.
#Perform the sampling distribution for the difference in converted between the two pages over 10,000 iterations of calculating an estimate from the null.
#Use the cells below to provide the necessary parts of this simulation. If this doesn't make complete sense right now, don't worry - you are going to work through the problems below to complete this problem. You can use Quiz 5 in the classroom to make sure you are on the right track.
# under the null => unde the null hypothesis
# assume 這兩個是相等的在前面
# 然後根據真正的模擬然後做調整
# ??所以這邊是一樣的
# What is the convert rate for $p_{new}$ under the null?
p_new = df2['converted'].mean()
print(p_new)
# What is the convert rate for $p_{old}$ under the null?
p_old = df2['converted'].mean()
print(p_old)
# treatment group samples
# What is $n_{new}$?
n_new = len(df2.query("group == 'treatment'"))
print(n_new)
# control group samples
# What is $n_{old}$?
n_old = len(df2.query("group == 'control'"))
print(n_old)
#Simulate $n_{new}$ transactions with a convert rate of $p_{new}$ under the null. Store these $n_{new}$ 1's and 0's in new_page_converted.
new_page_converted = np.random.choice([1, 0], size=n_new, p=[p_new, (1-p_new)])
# print(len(new_page_converted)) #code to check values
#Simulate $n_{old}$ transactions with a convert rate of $p_{old}$ under the null. Store these $n_{old}$ 1's and 0's in old_page_converted.
old_page_converted = np.random.choice([1, 0], size=n_old, p=[p_old, (1-p_old)])
# print(len(old_page_converted)) #code to check values
# Find $p_{new}$ - $p_{old}$ for your simulated values
# from part new_page_converted(145310) and old_page_converted(145274)
# since new_page_converted and old_page_converted have different sizes, we cannot directly compute p_diff
# since, differernce is only 36 values of thousands, we truncate the excess in new_page_converted
new_page_converted = new_page_converted[:145274]
p_diff = (new_page_converted/n_new) - (old_page_converted/n_old)
# print(p_diff) #code to check values
# 根據上面p_diff 的過程 計算10000 次
# 但是不用truncate size 而是用mean 來幫助
# ? 不是完全懂
# Simulate 10,000 $p_{new}$ - $p_{old}$ values using this same process similarly to the one you calculated in parts a. through
# Store all 10,000 values in p_diffs.
p_diffs = []
for _ in range(10000):
new_page_converted = np.random.choice([1, 0], size=n_new, p=[p_new, (1-p_new)]).mean()
old_page_converted = np.random.choice([1, 0], size=n_old, p=[p_old, (1-p_old)]).mean()
diff = new_page_converted - old_page_converted
p_diffs.append(diff)
# Plot a histogram of the p_diffs. Does this plot look like what you expected?
# Use the matching problem in the classroom to assure you fully understand what was computed here.
plt.hist(p_diffs)
plt.xlabel('p_diffs')
plt.ylabel('Frequency')
plt.title('Plot of 10K simulated p_diffs');
# j. What proportion of the p_diffs are greater than the actual difference observed in ab_data.csv?
# (是跟df 比 不是df2 比較)
# meaning
# compute difference from original dataset ab_data.csv
act_diff = df[df['group'] == 'treatment']['converted'].mean() - df[df['group'] == 'control']['converted'].mean()
act_diff
# list to array
p_diffs = np.array(p_diffs)
p_diffs
# proportion of p_diffs greater than the actual difference observed in ab_data.csv is computed as:
(act_diff < p_diffs).mean()
# k. In words, explain what you just computed in part j..
#We are computing p-values here.
#As explained in the videos and quizzes, this is the probability of observing our statistic (or one more extreme in favor of the alternative) if the null hypothesis is true.
#The more extreme in favor of the alternative portion of this statement determines the shading associated with your p-value.
#Here, we find that there is no conversion advantage with new pages. We conclude that null hypothesis is true as old and new pages perform almost similarly. Old pages, as the numbers show, performed slightly better.
| {
"alphanum_fraction": 0.7154353774,
"author": null,
"avg_line_length": 33.4032258065,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "2a302c1ef818e8a8ddbe030cfd171f1db6cb2866",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "2dd0f6494cdb3d87af05176cb111f5ca3188ac51",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "say543/machine_learning_basics",
"max_forks_repo_path": "Analyze_ab_test_results2.py",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "2dd0f6494cdb3d87af05176cb111f5ca3188ac51",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "say543/machine_learning_basics",
"max_issues_repo_path": "Analyze_ab_test_results2.py",
"max_line_length": 284,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "2dd0f6494cdb3d87af05176cb111f5ca3188ac51",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "say543/machine_learning_basics",
"max_stars_repo_path": "Analyze_ab_test_results2.py",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 3307,
"path": null,
"reason": "import numpy",
"repo": null,
"save_path": null,
"sha": null,
"size": 12426
} |
#script to create moisture capital map for testing (for % chg)
#requires climate data from cru_ts4.zip (via https://crudata.uea.ac.uk/cru/data/hrg/)
rm(list = ls())
library(raster)
library(tidyverse)
#set scenario parms
initial_yr <- 2018
final_yr <- 2035
perc_chg <- -20 #this is the % difference over the entire period
additional_yr <- T #if true generate a map for the year after the final year
count_yrs <- final_yr - initial_yr
final_diffc <- 1 + (perc_chg / 100) #convert to final proportion diffc (over entire period)
#read update file
input_scenario <- "Data/Moisture/GSCap_JanFebMarAprMayJun_S_"
#input_scenario <- "Data/Moisture/GSCap_JanFebMarAprMayJun_S_"
#input_scenario <- "Data/Moisture/test_"
initial_map <- raster(paste0(input_scenario,initial_yr,".asc"))
#calculate final year values
final_map <- round(initial_map * final_diffc,3)
final_map[final_map < 0] <- 0
final_map[final_map > 1] <- 1
delta_map <- (final_map - initial_map) / count_yrs #change by this amount each year (per pixel)
#plot(initial_map,main=initial_yr)
#loop over all years using delta_map to change each year
for(yr in seq(from=initial_yr+1,to=final_yr-1,by=1)){
initial_map <- round(initial_map + delta_map,3)
initial_map[initial_map < 0] <- 0
initial_map[initial_map > 1] <- 1
#plot(initial_map,main=yr)
#m <- cellStats(initial_map, 'mean', na.rm=T)
print(yr)
writeRaster(initial_map,paste0(input_scenario,"testing",perc_chg,"_",yr,".asc"))
}
writeRaster(final_map,paste0(input_scenario,"testing",perc_chg,"_",final_yr,".asc"))
#plot(final_map,main=final_yr)
if(additional_yr){
add_yr <- final_yr+1
add_map <- round(final_map + delta_map,3)
add_map[add_map < 0] <- 0
add_map[add_map > 1] <- 1
print(add_yr)
writeRaster(add_map,paste0(input_scenario,"testing",perc_chg,"_",add_yr,".asc"))
}
| {
"alphanum_fraction": 0.7265372168,
"author": null,
"avg_line_length": 28.96875,
"converted": null,
"ext": "r",
"file": null,
"hexsha": "2ffac65eac75ec73ab85aebc1efd6b9bf1b78752",
"include": null,
"lang": "R",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "0086066dc6f2c015786c9835d6c2b857d74a7b26",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "jamesdamillington/CRAFTYInput",
"max_forks_repo_path": "moistureMap_testing.r",
"max_issues_count": null,
"max_issues_repo_head_hexsha": "0086066dc6f2c015786c9835d6c2b857d74a7b26",
"max_issues_repo_issues_event_max_datetime": null,
"max_issues_repo_issues_event_min_datetime": null,
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "jamesdamillington/CRAFTYInput",
"max_issues_repo_path": "moistureMap_testing.r",
"max_line_length": 96,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "0086066dc6f2c015786c9835d6c2b857d74a7b26",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "jamesdamillington/CRAFTYInput",
"max_stars_repo_path": "moistureMap_testing.r",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 577,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 1854
} |
#!/usr/bin/env python3
import argparse
from collections import defaultdict
from typing import Dict, List, Union, Any
import xml.etree.ElementTree as ET
from xml.dom import minidom
from xml.sax.saxutils import unescape
import networkx as nx
import yaml
import sys
import io
def bandwidth_conversion(x):
# shadow classic uses bandwidths units of KiB/s
return str(int(x) * 8) + ' Kibit'
def latency_conversion(x):
# shadow classic uses latency units of milliseconds
x = float(x)
assert x.is_integer(), 'Sub-millisecond latencies are not supported'
return str(int(x)) + ' ms'
# original attribute name: (new attribute name, new attribute type, value transform fn)
ATTR_CONVERSIONS = {
'bandwidthup': ('bandwidth_up', 'string', bandwidth_conversion),
'bandwidthdown': ('bandwidth_down', 'string', bandwidth_conversion),
'latency': ('latency', 'string', latency_conversion),
'packetloss': ('packet_loss', None, None),
'countrycode': ('country_code', None, None),
'citycode': ('city_code', None, None),
'geocode': ('geo_code', None, None),
'ip': ('ip_address', None, None),
}
def convert_topology(xml_root, out_file, set_labels):
graphml_ns = 'http://graphml.graphdrawing.org/xmlns'
graphml_ns_prefix = '{' + graphml_ns + '}'
ET.register_namespace('', graphml_ns)
# map of functions to apply to text/values for elements with a given id
id_type_conversions = {}
# remap any of the attribute names and types, and build `id_type_conversions`
for x in xml_root.findall('{}key'.format(graphml_ns_prefix)):
if x.attrib['attr.name'] in ATTR_CONVERSIONS:
(attr_name, attr_type,
attr_map_fn) = ATTR_CONVERSIONS[x.attrib['attr.name']]
x.attrib['attr.name'] = attr_name
if attr_type != None:
x.attrib['attr.type'] = attr_type
if attr_map_fn != None:
id_type_conversions[x.attrib['id']] = attr_map_fn
# transform the text/values
for x in xml_root.findall('{}graph'.format(graphml_ns_prefix)):
for y in x.findall('{}data'.format(graphml_ns_prefix)):
if y.attrib['key'] in id_type_conversions:
y.text = id_type_conversions[y.attrib['key']](y.text)
nodes = x.findall('{}node'.format(graphml_ns_prefix))
edges = x.findall('{}edge'.format(graphml_ns_prefix))
for y in nodes + edges:
for z in y.findall('{}data'.format(graphml_ns_prefix)):
if z.attrib['key'] in id_type_conversions:
z.text = id_type_conversions[z.attrib['key']](z.text)
removed_graph_keys = {}
removed_graph_data = {}
# collect and remove the keys for any unsupported graph attributes
for x in xml_root.findall('{}key'.format(graphml_ns_prefix)):
# if x.attrib['attr.name'] in UNSUPPORTED_ATTRS:
if x.attrib['for'] == 'graph':
removed_graph_keys[x.attrib['id']] = x.attrib['attr.name']
xml_root.remove(x)
# store and remove the text/values for any unsupported graph attributes
for x in xml_root.findall('{}graph'.format(graphml_ns_prefix)):
for y in x.findall('{}data'.format(graphml_ns_prefix)):
if y.attrib['key'] in removed_graph_keys:
removed_graph_data[removed_graph_keys[y.attrib['key']]] = y.text
x.remove(y)
# build the graph from the xml
xml = ET.tostring(xml_root, encoding='utf-8', method='xml').decode('utf-8')
graph = nx.parse_graphml(xml)
# shadow doesn't use any attributes that would go in 'node_default' or
# 'edge_default', so we don't expect there to be any
if 'node_default' in graph.graph:
assert len(graph.graph['node_default']) == 0
del graph.graph['node_default']
if 'edge_default' in graph.graph:
assert len(graph.graph['edge_default']) == 0
del graph.graph['edge_default']
# Only change node and edge labels if the label option was given.
# Our custom labels help avoid cross-ref from an edge to a node via the node's
# numeric id, which can be a pain especially on large graphs.
if set_labels:
graph = nx.relabel_nodes(graph, lambda x: f"node at {graph.nodes[x]['ip_address']}")
for (source, target) in graph.edges:
src_ip = graph.nodes[source]['ip_address']
tgt_ip = graph.nodes[target]['ip_address']
graph[source][target]['label'] = f"path from {src_ip} to {tgt_ip}"
# generate gml from the graph
try:
nx.write_gml(graph, out_file)
except nx.exception.NetworkXError:
# we require keys with underscores which isn't technically allowed by the
# spec, but both igraph and recent versions of networkx allow them
print("Unable to write GML. Do you have networkx version >= 2.5?",
file=sys.stderr)
raise
return removed_graph_data
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=('Convert Shadow topology files from the version 1.x format '
'to the 2.x format, and write the output to stdout'))
parser.add_argument('filename', help='Filename to convert')
parser.add_argument('--set-labels', help='Add custom labels to nodes and edges',
action="store_true", default=False, required=False)
args = parser.parse_args()
filename = args.filename
if filename == '-':
filename = '/dev/stdin'
tree = ET.parse(filename)
# We remove and show a warning for deprecated graph attributes, but leave deprecated
# node/edge attributes alone. Shadow currently refuses to read graphs with unrecognized
# node/edge attributes so the user will be able to tell if any are no longer used, but
# igraph is unable to read GML graph attributes and will not raise an error, so we show
# the error here instead. In the case of 'preferdirectpaths', this also lets us convert
# the option to 'use_shortest_path' in 'convert_legacy_config.py' without needing to
# raise an error.
removed_graph_data = convert_topology(tree.getroot(), sys.stdout.buffer, args.set_labels)
for x in removed_graph_data:
print('Removed the deprecated graph attribute \'{}\': {}'.format(
x, removed_graph_data[x]), file=sys.stderr)
| {
"alphanum_fraction": 0.6611102375,
"author": null,
"avg_line_length": 40.5031847134,
"converted": null,
"ext": "py",
"file": null,
"hexsha": "cfbe16d1c41d73418199b655eced9d4b0c60d5c0",
"include": true,
"lang": "Python",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "b9c8b840d2a57a5e76ec9f45db5af571fdfac814",
"max_forks_repo_licenses": [
"BSD-3-Clause"
],
"max_forks_repo_name": "marcosimioni/shadow",
"max_forks_repo_path": "src/tools/convert_legacy_topology.py",
"max_issues_count": 78,
"max_issues_repo_head_hexsha": "b9c8b840d2a57a5e76ec9f45db5af571fdfac814",
"max_issues_repo_issues_event_max_datetime": "2022-03-21T00:56:45.000Z",
"max_issues_repo_issues_event_min_datetime": "2021-07-13T16:12:25.000Z",
"max_issues_repo_licenses": [
"BSD-3-Clause"
],
"max_issues_repo_name": "marcosimioni/shadow",
"max_issues_repo_path": "src/tools/convert_legacy_topology.py",
"max_line_length": 93,
"max_stars_count": 1,
"max_stars_repo_head_hexsha": "470d013675433d8cc52e61c581cefc06befb7768",
"max_stars_repo_licenses": [
"BSD-3-Clause"
],
"max_stars_repo_name": "sporksmith/shadow",
"max_stars_repo_path": "src/tools/convert_legacy_topology.py",
"max_stars_repo_stars_event_max_datetime": "2021-08-02T12:45:00.000Z",
"max_stars_repo_stars_event_min_datetime": "2021-08-02T12:45:00.000Z",
"num_tokens": 1482,
"path": null,
"reason": "import networkx",
"repo": null,
"save_path": null,
"sha": null,
"size": 6359
} |
include("faber.jl")
include("types.jl")
"""
acc_rej(n::Int64, S::System, X::AbstractModel, f1::Regular, u, v)
For Regular sampling_scheme: always accept
"""
function acc_rej(n::Int64, S::System, X::AbstractModel, f1::Regular, u::Float64, v::Float64, t::Float64)
return true
end
"""
acc_rej(n::Int64, S::System, X::AbstractModel, f1::SubSampling, u, v)
Subsampling scheme: accept the event time as coming from the real Poisson rate.
"""
function acc_rej(n::Int64, S::System, X::AbstractModel, ::SubSampling, u::Float64, v::Float64, t::Float64)
if λratio(n, S, X, u, v, t)> rand()
return true
else
return false
end
end
"""
update_events!(n, S::System, X::AbstractModel, ::SubSampling, ::FullIndependence, acc)
renovate time when the nth coefficient rings (i.e. τ_n = 0). In the full independece case
we do not need to simulate any other time, but just rescale them.
"""
function update_events!(n::Int64, τ0::Float64, S::System, X::AbstractModel, ::SubSampling, ::FullIndependence, acc::Bool, u::Float64, v::Float64, t::Float64)
S.τ[n] = λbar(n, S, X , u, v, t)
for i in 1:(n-1)
S.τ[i] -= τ0
end
for i in (n + 1): length(S.ϕ)
S.τ[i] -= τ0
end
end
function update_events!(n::Int64, τ0::Float64, S::System, X::AbstractModel, f1::Regular, f2::PartialIndependence, acc::Bool, u::Float64, v::Float64, t::Float64)
for i in S.ϕ[n].nhb
S.τ[i] = λbar(i, S, X , u, v, t)
end
for i in S.ϕ[n].notnhb
S.τ[i] -= τ0
end
end
"""
update_events!(n, S::System, X::AbstractModel, f1::Regular, f2::PartialIndependence, acc)
renovate time when the `n`_th coefficient rings (i.e. τ_n = 0). In the partial independece case
we need to simulate all the time relative to the functions sharing the same support and rescale the others.
"""
function update_events!(n::Int64, τ0::Float64, S::System, X::AbstractModel, f1::Regular, f2::PartialIndependence, acc::Bool, u::Float64, v::Float64, t::Float64)
for i in S.ϕ[n].nhb
S.τ[i] = λbar(i, S, X , u, v, t)
end
for i in S.ϕ[n].notnhb
S.τ[i] -= τ0
end
end
"""
update_events!(n, S::System, X::AbstractModel, f1::SubSampling, f2::PartialIndependence, acc)
renovate time when the `n`_th coefficient rings (i.e. τ_n = 0). In the partial independece case
we need to simulate all the time relative to the functions sharing the same support and rescale the others.
if `acc` is false, means that in the previous step we rejected the event and we did not change velocity.
This implies that we need to renovate only the `n`th time like in the Full independece case.
"""
function update_events!(n::Int64, τ0::Float64, S::System, X::AbstractModel, ::SubSampling, ::PartialIndependence, acc::Bool, u::Float64, v::Float64, t::Float64)
if acc == true
update_events!(n, τ0, S, X, Regular(), PartialIndependence(), acc, u, v, t)
else
update_events!(n, τ0, S, X, SubSampling(), FullIndependence(), acc, u, v, t)
end
end
"""
zigzagsampler(X::AbstractModel, T, L, u, v, TT)
run the ZigZag sampler for diffusion `X` conditioned to start at `u` and
end at `v` at time `T`. The infinite summation is truncated at level `L` and
the ZigZag process will run up to time `TT`. Optionally set velocities θ
"""
function zz_sampler(X::AbstractModel, T::Float64, L::Int64, u::Float64, v::Float64, clock::Float64, ξ =fill(0.0, 2<<L - 1) , θ = fill(1.0, 2<<L - 1))
t = 0.0
S = System(L, T, ξ, θ)
τ0 = 0.0
n0 = 0
Ξ = Skeleton[]
while t < clock
τ0 , n0 = findmin(S.τ)
S.ξ .+= S.θ*τ0
t += τ0
if acc_rej(n0, S, X, sampling_scheme(X), u, v, t)
S.θ[n0] = -S.θ[n0]
push!(Ξ, (Skeleton(copy(S.ξ), t)))
update_events!(n0, τ0, S, X, sampling_scheme(X), dependence_strucute(X), true, u, v, t)
else
update_events!(n0, τ0, S, X, sampling_scheme(X), dependence_strucute(X), false, u, v, t)
end
end
return Ξ
end
function zz_sampler_count(X::AbstractModel, T::Float64, L::Int64, u::Float64, v::Float64, clock::Float64, ξ =fill(0.0, 2<<L - 1), θ = fill(1.0, 2<<L - 1))
t = 0.0
S = System(L, T, ξ, θ)
τ0 = 0.0
n0 = 0
Ξ = Skeleton[]
num_events = fill(0, 2^(L+1) -1)
while t < clock
τ0 , n0 = findmin(S.τ)
S.ξ .+= S.θ*τ0
t += τ0
if acc_rej(n0, S, X, sampling_scheme(X), u, v, t)
S.θ[n0] = -S.θ[n0]
num_events[n0] += 1
update_events!(n0, τ0, S, X, sampling_scheme(X), dependence_strucute(X), true, u, v, t)
else
update_events!(n0, τ0, S, X, sampling_scheme(X), dependence_strucute(X), false, u, v, t)
end
end
return num_events
end
function zz_sampler_count_ub(X::AbstractModel, T::Float64, L::Int64, u::Float64, v::Float64, clock::Float64, ξ =fill(0.0, 2<<L - 1), θ = fill(1.0, 2<<L - 1))
t = 0.0
S = System(L, T, ξ, θ)
τ0 = 0.0
n0 = 0
Ξ = Skeleton[]
num_events = fill(0, 2^(L+1) -1)
while t < clock
τ0 , n0 = findmin(S.τ)
S.ξ .+= S.θ*τ0
t += τ0
num_events[n0] += 1
if acc_rej(n0, S, X, sampling_scheme(X), u, v, t)
S.θ[n0] = -S.θ[n0]
update_events!(n0, τ0, S, X, sampling_scheme(X), dependence_strucute(X), true, u, v, t)
else
update_events!(n0, τ0, S, X, sampling_scheme(X), dependence_strucute(X), false, u, v, t)
end
end
return num_events
end
| {
"alphanum_fraction": 0.6051915048,
"author": null,
"avg_line_length": 33.3878787879,
"converted": null,
"ext": "jl",
"file": null,
"hexsha": "47e4ffb39c2be1340fcf4210ed7ffbf3fc208010",
"include": null,
"lang": "Julia",
"length": null,
"llama_tokens": null,
"mathlib_filename": null,
"max_forks_count": null,
"max_forks_repo_forks_event_max_datetime": null,
"max_forks_repo_forks_event_min_datetime": null,
"max_forks_repo_head_hexsha": "0ffb12385d09e8a95cd64141de0e683058891448",
"max_forks_repo_licenses": [
"MIT"
],
"max_forks_repo_name": "SebaGraz/ZZDiffusionBridge",
"max_forks_repo_path": "src/zz_sampler.jl",
"max_issues_count": 1,
"max_issues_repo_head_hexsha": "0ffb12385d09e8a95cd64141de0e683058891448",
"max_issues_repo_issues_event_max_datetime": "2020-01-16T21:14:50.000Z",
"max_issues_repo_issues_event_min_datetime": "2020-01-16T21:14:50.000Z",
"max_issues_repo_licenses": [
"MIT"
],
"max_issues_repo_name": "SebaGraz/ZZDiffusionBridge",
"max_issues_repo_path": "src/zz_sampler.jl",
"max_line_length": 160,
"max_stars_count": null,
"max_stars_repo_head_hexsha": "0ffb12385d09e8a95cd64141de0e683058891448",
"max_stars_repo_licenses": [
"MIT"
],
"max_stars_repo_name": "SebaGraz/ZZDiffusionBridge",
"max_stars_repo_path": "src/zz_sampler.jl",
"max_stars_repo_stars_event_max_datetime": null,
"max_stars_repo_stars_event_min_datetime": null,
"num_tokens": 1951,
"path": null,
"reason": null,
"repo": null,
"save_path": null,
"sha": null,
"size": 5509
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.