text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
import numpy as np
import LSFIR
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider, Button, RadioButtons
Fpass2 = 11.0 # MHz Passband end frequency
Fstop2 = 15.0 #MHz Stopband start frequency
Fstop1 = 5.0
Fpass1 = 4.0
Fsamp = 50.0 # MHz Sampling Frequency
Weight = 100 # Weight of stop band error
Taps = 71 # FIR Filter Taps
Fnotch1 = 60 #Notch out freq Fnotch1
Fnotch2 = 60
h_global = np.zeros(Taps)
design = 0
fig, myFilter = plt.subplots(figsize=(18, 9))
plt.subplots_adjust(left=0.25,bottom=0.3)
myFilter.set_xlabel('Freq(MHz)')
myFilter.set_ylabel('Magnitude(dB)')
h = LSFIR.lpfls(Taps,2*np.pi*(Fpass2/Fsamp),2*np.pi*(Fstop2/Fsamp),Weight)
h_quant = np.round(h)
h_global = h_quant
H = np.fft.fft(h,1024)
H_quant = np.fft.fft(h_quant,1024)
Mag = 20*np.log10(abs(H[0:512]))
Mag_quant = 20*np.log10(abs(H_quant[0:512]))
freq = np.zeros(512)
for i in range(0,512) :
freq[i] = Fsamp/2 * i/512
l1, = plt.plot(freq,Mag, lw=2, color='blue')
plt.axis([0, 50.0, -50, 100])
l2, = plt.plot(freq,Mag_quant, lw=2, color='red')
axcolor = 'lightgoldenrodyellow'
axTaps = plt.axes([0.6, 0.175, 0.25, 0.02], axisbg=axcolor)
axFsamp = plt.axes([0.6, 0.150, 0.25,0.02], axisbg=axcolor)
axFpass2 = plt.axes([0.6, 0.125, 0.25, 0.02],axisbg=axcolor)
axFstop2 = plt.axes([0.6, 0.100,0.25, 0.02],axisbg=axcolor)
axFstop1 = plt.axes([0.6, 0.075,0.25, 0.02],axisbg=axcolor)
axFpass1 = plt.axes([0.6, 0.050,0.25, 0.02],axisbg=axcolor)
axW = plt.axes([0.1, 0.175, 0.25, 0.02],axisbg=axcolor)
axFnotch1 = plt.axes([0.1, 0.150, 0.25, 0.02],axisbg=axcolor)
axFnotch2 = plt.axes([0.1, 0.125, 0.25, 0.02],axisbg=axcolor)
sTaps = Slider(axTaps,'Taps',15,2048,valinit=Taps)
sFsamp = Slider(axFsamp,'Samp Freq',1.0,100,valinit=Fsamp)
sFpass1 = Slider(axFpass1,'PassFreq2',1.0,40.0,valinit=Fpass1)
sFstop1 = Slider(axFstop1,'StopFreq2',1.0,45.0,valinit=Fstop1)
sFstop2 = Slider(axFstop2, 'StopFreq1',1.0,45.0,valinit=Fstop2)
sFpass2 = Slider(axFpass2,'PassFreq1',1.0,45.0,valinit=Fpass2)
sW = Slider(axW,'Weight',1.0,1000,valinit=Weight)
sFnotch1 = Slider(axFnotch1,'Notch 1',0.0,60,valinit=Fnotch1)
sFnotch2 = Slider(axFnotch2,'Notch 2',0.0,60,valinit=Fnotch2)
def update(val):
global design
Taps = int(sTaps.val)
if(Taps%2 == 0) :
Taps = Taps +1
Fsamp = sFsamp.val
Fpass1 = sFpass1.val
Fstop1 = sFstop1.val
Fstop2 = sFstop2.val
Fpass2 = sFpass2.val
W = sW.val
Fnotch1 = sFnotch1.val
Fnotch2 = sFnotch2.val
wp = 2*np.pi*(Fpass2/Fsamp)
ws = 2*np.pi*(Fstop2/Fsamp)
wp2 = 2*np.pi*(Fpass1/Fsamp)
ws2 = 2*np.pi*(Fstop1/Fsamp)
wn1 = 2*np.pi*(Fnotch1/Fsamp)
wn2 = 2*np.pi*(Fnotch2/Fsamp)
if(design == 0):
if(ws<=wp) :
print("PANIC --> Stop Frequency is smaller than Cut-off Frequency!!")
if((Fnotch1 <= Fsamp/2) and (Fnotch2 <= Fsamp/2)) :
h = LSFIR.lpfls2notch(Taps,wp,ws,wn1,wn2,W)
elif((Fnotch1 > Fsamp/2) and (Fnotch2 <= Fsamp/2)):
h = LSFIR.lpfls1notch(Taps,wp,ws,wn2,W)
elif((Fnotch1 <= Fsamp/2) and (Fnotch2 > Fsamp/2)):
h = LSFIR.lpfls1notch(Taps,wp,ws,wn1,W)
else:
h = LSFIR.lpfls(Taps,wp,ws,W)
elif(design == 1):
h = LSFIR.bpfls(Taps,ws2,wp2,wp,ws,W)
elif(design == 2):
h = LSFIR.hpfls(Taps, ws2, wp2, W)
h_quant = np.round(h)
global h_global
h_global = h_quant
H = np.fft.fft(h,1024)
H_quant = np.fft.fft(h_quant,1024)
Mag = 20*np.log10(abs(H[0:512]))
Mag_quant = 20*np.log10(abs(H_quant[0:512]))
freq = np.zeros(512)
for i in range(0,512) :
freq[i] = Fsamp/2 * i/512
l1.set_ydata(Mag)
l1.set_xdata(freq)
l2.set_ydata(Mag_quant)
l2.set_xdata(freq)
fig.canvas.draw_idle()
sTaps.on_changed(update)
sFsamp.on_changed(update)
sFpass1.on_changed(update)
sFstop1.on_changed(update)
sFpass2.on_changed(update)
sFstop2.on_changed(update)
sW.on_changed(update)
sFnotch1.on_changed(update)
sFnotch2.on_changed(update)
resetax = plt.axes([0.1, 0.025, 0.05, 0.04])
button1 = Button(resetax, 'Reset', color=axcolor, hovercolor='green')
def reset(event):
sTaps.reset()
sFsamp.reset()
sFpass1.reset()
sFstop1.reset()
sFstop2.reset()
sFpass2.reset()
sW.reset()
sFnotch1.reset()
sFnotch2.reset()
button1.on_clicked(reset)
generateax = plt.axes([0.3, 0.025, 0.05, 0.04])
button2 = Button(generateax, 'Generate', color=axcolor, hovercolor='green')
def generate(event):
global h_global
print h_global[0:(len(h_global)+1)/2]
button2.on_clicked(generate)
lowpassax = plt.axes([0.025, 0.5, 0.05, 0.04])
button3 = Button(lowpassax, 'LowPass', color=axcolor, hovercolor='green')
def LowPassDesignSet(event):
global design
design = 0
button3.on_clicked(LowPassDesignSet)
button3.on_clicked(reset)
button3.on_clicked(update)
highpassax = plt.axes([0.025, 0.4, 0.05, 0.04])
button4 = Button(highpassax, 'HighPass', color=axcolor, hovercolor='green')
def HighPassDesignSet(event):
global design
design = 2
button4.on_clicked(HighPassDesignSet)
button4.on_clicked(reset)
button4.on_clicked(update)
bandpassax = plt.axes([0.025, 0.3, 0.05, 0.04])
button5 = Button(bandpassax, 'BandPass', color=axcolor, hovercolor='green')
def BandPassDesignSet(event):
global design
design = 1
button5.on_clicked(BandPassDesignSet)
button5.on_clicked(reset)
button5.on_clicked(update)
plt.show()
|
{"hexsha": "8e7824bece198e7adab91a728b14ae32cf2770ab", "size": 5691, "ext": "py", "lang": "Python", "max_stars_repo_path": "LSDesignAdvanced.py", "max_stars_repo_name": "SiddhantRaman/Least-Squared-Error-Based-FIR-Filters", "max_stars_repo_head_hexsha": "0b77fd51462b49009cc6038ce37d5ee9cd413e55", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "LSDesignAdvanced.py", "max_issues_repo_name": "SiddhantRaman/Least-Squared-Error-Based-FIR-Filters", "max_issues_repo_head_hexsha": "0b77fd51462b49009cc6038ce37d5ee9cd413e55", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "LSDesignAdvanced.py", "max_forks_repo_name": "SiddhantRaman/Least-Squared-Error-Based-FIR-Filters", "max_forks_repo_head_hexsha": "0b77fd51462b49009cc6038ce37d5ee9cd413e55", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.1525423729, "max_line_length": 82, "alphanum_fraction": 0.6399578281, "include": true, "reason": "import numpy", "num_tokens": 2078}
|
#pragma once
#include <cstddef>
#include <boost/config.hpp>
#include <boost/version.hpp>
#include <boost/utility/addressof.hpp>
//! Workaround (honestly, a hack) for cases when Blackhole is being compiled with clang on systems
//! with boost 1.55 on board.
//!
//! Stolen from https://svn.boost.org/trac/boost/ticket/5487.
//!
//! \note must be included before <boost/variant/get.hpp> or <boost/utility/addressof.hpp>.
#if defined(__clang__) && (BOOST_VERSION / 100000 == 1 && BOOST_VERSION / 100 % 1000 == 55)
#ifndef BOOST_NO_CXX11_NULLPTR
namespace boost {
namespace detail {
#if defined(__clang__) && !defined(_LIBCPP_VERSION) && !defined(BOOST_NO_CXX11_DECLTYPE)
typedef decltype(nullptr) addr_nullptr_t;
#else
typedef std::nullptr_t addr_nullptr_t;
#endif
template<>
struct addressof_impl<addr_nullptr_t> {
typedef addr_nullptr_t T;
constexpr static T* f(T& v, int) {
return &v;
}
};
template<>
struct addressof_impl<addr_nullptr_t const> {
typedef addr_nullptr_t const T;
constexpr static T* f(T& v, int) {
return &v;
}
};
template<>
struct addressof_impl<addr_nullptr_t volatile> {
typedef addr_nullptr_t volatile T;
constexpr static T* f(T& v, int) {
return &v;
}
};
template<>
struct addressof_impl<addr_nullptr_t const volatile> {
typedef addr_nullptr_t const volatile T;
constexpr static T* f( T& v, int) {
return &v;
}
};
} // namespace detail
} // namespace boost
#endif // BOOST_NO_CXX11_NULLPTR
#endif
|
{"hexsha": "ac1055fa95589791c8be65c92793ba5376662790", "size": 1526, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/hack/addressof.hpp", "max_stars_repo_name": "JakariaBlaine/blackhole", "max_stars_repo_head_hexsha": "e340329c6e2e3166858d8466656ad12300b686bd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 193.0, "max_stars_repo_stars_event_min_datetime": "2015-01-05T08:48:05.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-31T22:04:01.000Z", "max_issues_repo_path": "src/hack/addressof.hpp", "max_issues_repo_name": "JakariaBlaine/blackhole", "max_issues_repo_head_hexsha": "e340329c6e2e3166858d8466656ad12300b686bd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 135.0, "max_issues_repo_issues_event_min_datetime": "2015-01-13T13:02:49.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-12T15:06:48.000Z", "max_forks_repo_path": "src/hack/addressof.hpp", "max_forks_repo_name": "JakariaBlaine/blackhole", "max_forks_repo_head_hexsha": "e340329c6e2e3166858d8466656ad12300b686bd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 40.0, "max_forks_repo_forks_event_min_datetime": "2015-01-21T16:37:30.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-25T15:54:04.000Z", "avg_line_length": 21.4929577465, "max_line_length": 98, "alphanum_fraction": 0.6926605505, "num_tokens": 400}
|
using FFTW, Jets, JetPackTransforms, Test
@testset "fft, 1D, complex" begin
n = 512
m = rand(n) + im * rand(n)
d = rand(n) + im * rand(n)
A = JopFft(ComplexF64, n)
lhs, rhs = dot_product_test(A, m, d)
@test lhs ≈ rhs
expected = fft(m) / sqrt(n)
observed = A * m
@test expected ≈ observed
expected = bfft(d) / sqrt(n)
observed = A' * d
@test expected ≈ observed
end
@testset "fft, alternative constructor" begin
R = JetSpace(Float64,512)
A = JopFft(R)
B = JopFft(Float64,512)
m = rand(R)
@test A*m ≈ B*m
end
@testset "fft, 1D, real" begin
n = 512
spDom = JetSpace(Float64, n)
spRng = symspace(JopFft, Float64, 1, n)
m = rand(spDom)
d = rand(spRng)
d[1] = real(d[1])
d[length(spRng)] = d[1]
A = JopFft(Float64, n)
@test range(A) == spRng
lhs, rhs = dot_product_test(A, m, d)
@test lhs ≈ rhs
end
@testset "fft, 2D" begin
n1, n2 = 128, 256
A = JopFft(ComplexF64, n1, n2)
m = rand(domain(A))
d = rand(range(A))
#=
On windows causes a segfault... presumably a bug in BLAS?
=#
if !Sys.iswindows()
lhs, rhs = dot_product_test(A, m, d)
@test lhs ≈ rhs
end
end
@testset "fft, 2D, real->complex" begin
n1, n2 = 128, 256
spDom = JetSpace(Float64, n1, n2)
spRng = symspace(JopFft, Float64, 1, n1, n2)
m = rand(spDom)
d = rand(spRng)
A = JopFft(Float64,n1,n2)
@test spRng == range(A)
lhs, rhs = dot_product_test(A, m, d)
@test lhs ≈ rhs
m = rand(spDom)
m_roundtrip = A' * (A * m)
@test m ≈ m_roundtrip
d = A * m
d_check = rfft(m) / sqrt(length(m))
@test parent(d) ≈ d_check
d_copy = copy(d)
mm = A' * d
@test d_copy ≈ d
mm_check = brfft(parent(d), n1) / sqrt(length(m))
@test mm_check ≈ mm
@test mm_check ≈ m
@test mm ≈ m
end
@testset "fft, 1D transform of 2D array" begin
n1, n2 = 128, 256
A = JopFft(ComplexF64, n1, n2; dims=(1,))
m = rand(domain(A))
d = A*m
d_expected = similar(d)
for i2 = 1:n2
d_expected[:,i2] = fft(vec(m[:,i2])) / sqrt(n1)
end
@test d ≈ d_expected
a = A'*d
a_expected = similar(a)
for i2 = 1:n2
a_expected[:,i2] = bfft(vec(d[:,i2])) / sqrt(n1)
end
@test a ≈ a_expected
if !Sys.iswindows()
lhs, rhs = dot_product_test(A,rand(domain(A)),rand(range(A)))
@test lhs ≈ rhs
end
end
@testset "fft, 1D transform of 2D array, real->complex" begin
n1, n2 = 128, 256
A = JopFft(Float64, n1, n2; dims=(1,))
m = rand(domain(A))
d = A*m
d_expected = similar(parent(d))
for i2 = 1:n2
d_expected[:,i2] = rfft(vec(m[:,i2])) / sqrt(n1)
end
@test parent(d) ≈ d_expected
a = A'*d
a_expected = similar(a)
for i2 = 1:n2
a_expected[:,i2] = brfft(vec(parent(d)[:,i2]),n1) / sqrt(n1)
end
@test a ≈ a_expected
lhs, rhs = dot_product_test(A,rand(domain(A)),rand(range(A)))
@test lhs ≈ rhs
end
@testset "fft, 1D transform of 2D array, 2nd dim" begin
n1, n2 = 128, 256
A = JopFft(ComplexF64, n1, n2; dims=(2,))
m = rand(domain(A))
d = A*m
d_expected = similar(d)
for i1 = 1:n1
d_expected[i1,:] = fft(vec(m[i1,:])) / sqrt(n2)
end
@test d ≈ d_expected
a = A'*d
a_expected = similar(a)
for i1 = 1:n1
a_expected[i1,:] = bfft(vec(d_expected[i1,:])) / sqrt(n2)
end
@test a ≈ a_expected
if !Sys.iswindows()
lhs, rhs = dot_product_test(A,rand(domain(A)),rand(range(A)))
@test lhs ≈ rhs
end
end
@testset "fft, 1D transform of 2D array, 2nd dim, real->complex" begin
n1, n2 = 128, 256
A = JopFft(Float64, n1, n2; dims=(2,))
m = rand(domain(A))
d = A*m
d_expected = similar(parent(d))
for i1 = 1:n1
d_expected[i1,:] = rfft(vec(m[i1,:])) / sqrt(n2)
end
@test parent(d) ≈ d_expected
a = A'*d
a_expected = similar(a)
for i1 = 1:n1
a_expected[i1,:] = brfft(vec(d_expected[i1,:]), n2) / sqrt(n2)
end
@test a ≈ a_expected
lhs, rhs = dot_product_test(A,rand(domain(A)),rand(range(A)))
@test lhs ≈ rhs
end
@testset "fft, 2D of 3D array, real to complex along first two dims" begin
n1,n2,n3 = 128,256,10
A = JopFft(Float64,n1,n2,n3;dims=(1,2))
m = rand(domain(A))
d = A*m
d_expected = similar(parent(d))
for i3=1:n3
d_expected[:,:,i3] = rfft(m[:,:,i3]) / sqrt(n1*n2)
end
@test parent(d) ≈ d_expected
lhs, rhs = dot_product_test(A,rand(domain(A)),rand(range(A)))
@test lhs ≈ rhs
end
|
{"hexsha": "22e4d91368f1544737592a2de546d525c854bfa0", "size": 4593, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/jop_fft.jl", "max_stars_repo_name": "ChevronETC/JetPackTransforms.jl", "max_stars_repo_head_hexsha": "0609c749455d539076057796411d627fb30a8e7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/jop_fft.jl", "max_issues_repo_name": "ChevronETC/JetPackTransforms.jl", "max_issues_repo_head_hexsha": "0609c749455d539076057796411d627fb30a8e7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-11-02T19:30:23.000Z", "max_issues_repo_issues_event_max_datetime": "2020-11-02T19:30:23.000Z", "max_forks_repo_path": "test/jop_fft.jl", "max_forks_repo_name": "ChevronETC/JetPackTransforms.jl", "max_forks_repo_head_hexsha": "0609c749455d539076057796411d627fb30a8e7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.3756906077, "max_line_length": 74, "alphanum_fraction": 0.5610711953, "num_tokens": 1710}
|
@testset "geometry.coords3d" begin
@testset "cartesian3d" begin
c3d = cartesian3d(float.([1 2 3; 4 5 6; 7 8 9; 10 11 12]))
@test size(c3d.coords) == (4, 3)
@test sum(x_components(c3d)) == 22
@test sum(y_components(c3d)) == 26
@test sum(z_components(c3d)) == 30
end
end # geometry.coords3d
|
{"hexsha": "3a1220031958dbc40600d81198ff16fdb12fb7f2", "size": 313, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/geometry/coords3d.jl", "max_stars_repo_name": "Kelvyn88/MolecularGraph.jl", "max_stars_repo_head_hexsha": "ffe7732400dd16c7f5ddfb61972616fa6392cd8f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/geometry/coords3d.jl", "max_issues_repo_name": "Kelvyn88/MolecularGraph.jl", "max_issues_repo_head_hexsha": "ffe7732400dd16c7f5ddfb61972616fa6392cd8f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/geometry/coords3d.jl", "max_forks_repo_name": "Kelvyn88/MolecularGraph.jl", "max_forks_repo_head_hexsha": "ffe7732400dd16c7f5ddfb61972616fa6392cd8f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.3571428571, "max_line_length": 62, "alphanum_fraction": 0.6325878594, "num_tokens": 126}
|
using KernelRidgeRegression
using MLKernels
using Base.Test
using StatsBase
@test GaussianKernel(3.0) == GaussianKernel(3.0)
N = 5000
x = rand(1, N) * 4π - 2π
yy = sinc.(x) # vec(sinc.(4 .* x) .+ 0.2 .* sin.(30 .* x))
y = squeeze(yy + 0.1randn(1, N), 1)
xnew = collect(-2.5π:0.01:2.5π)'
mykrr = fit(KRR, x, y, 1e-3/5000,
GaussianKernel(100.0))
ynew = predict(mykrr, xnew);
mynystkrr = fit(NystromKRR,
x, y, 10.0, 280,
GaussianKernel(100.0))
ynystnew = predict(mynystkrr, xnew)
mysrkrr = fit(SubsetRegressorsKRR,
x, y, 1.0, 280,
GaussianKernel(100.0))
ysrnew = predict(mysrkrr, xnew)
myfastkrr = fit(FastKRR,
x, y, 4/5000, 11,
GaussianKernel(100.0))
yfastnew = predict(myfastkrr, xnew)
myfastkrr2 = fitPar(FastKRR, length(x),
(i) -> x[:, i], (i) -> y[i],
4/5000, 11, GaussianKernel(100.0))
yfastnew2 = predict(myfastkrr, xnew)
mytnkrr = fit(TruncatedNewtonKRR,
x, y, 4/5000, GaussianKernel(100.0),
0.5, 200)
ytnnew = predict(mytnkrr, xnew)
myrandkrr = fit(RandomFourierFeatures,
x, y, 1/500.0, 500, 1.0)
yrandnew = predict(myrandkrr, xnew)
emean = sqrt(mean((vec(sinc.(xnew)) - mean(xnew)) .^ 2))
ekrr = sqrt(mean((vec(sinc.(xnew)) - ynew) .^ 2))
enyst = sqrt(mean((vec(sinc.(xnew)) - ynystnew) .^ 2))
esr = sqrt(mean((vec(sinc.(xnew)) - ysrnew) .^ 2))
efast = sqrt(mean((vec(sinc.(xnew)) - yfastnew) .^ 2))
erand = sqrt(mean((vec(sinc.(xnew)) - yrandnew) .^ 2))
etn = sqrt(mean((vec(sinc.(xnew)) - ytnnew) .^ 2))
@test eltype(emean) == Float64
@test eltype(ekrr ) == Float64
@test eltype(enyst) == Float64
@test eltype(esr) == Float64
@test eltype(efast) == Float64
@test eltype(erand) == Float64
@test eltype(etn ) == Float64
@test emean > ekrr
@test emean > enyst
@test emean > esr
@test emean > efast
@test emean > erand
@test emean > etn
# from julia/test/show.jl:
replstr(x) = sprint((io, y′) -> show(IOContext(io, :limit => true), MIME("text/plain"), y′), x)
@test replstr(mykrr) == "KernelRidgeRegression.KRR{Float64}:\n λ = 2.0e-7\n ϕ = SquaredExponentialKernel(100.0)"
@test contains(replstr(myrandkrr), "KernelRidgeRegression.RandomFourierFeatures{Float64,Complex{Float64}}:\n λ = 0.002\n: σ = 1.0\n: K = 500\n ϕ = KernelRidgeRegression")
@test replstr(mynystkrr) == "KernelRidgeRegression.NystromKRR{Float64}:\n λ = 10.0\n ϕ = SquaredExponentialKernel(100.0)\n m = 280"
@test replstr(mysrkrr) == "KernelRidgeRegression.SubsetRegressorsKRR{Float64}:\n λ = 1.0\n ϕ = SquaredExponentialKernel(100.0)\n m = 280"
@test replstr(myfastkrr) == "KernelRidgeRegression.FastKRR{Float64}:\n λ = 0.0008\n m = 11\n ϕ = SquaredExponentialKernel(100.0)"
@test replstr(myfastkrr2) == "KernelRidgeRegression.FastKRR{Float64}:\n λ = 0.0008\n m = 11\n ϕ = SquaredExponentialKernel(100.0)"
@test replstr(mytnkrr) == "KernelRidgeRegression.TruncatedNewtonKRR{Float64}:\n λ = 0.0008\n ϕ = SquaredExponentialKernel(100.0)"
|
{"hexsha": "6426a56633675db8c70ee5a76c7e558112763d3e", "size": 3106, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "mattborghi/KernelRidgeRegression.jl", "max_stars_repo_head_hexsha": "ea79e41b04672782efd01bc5d9b295adbc9d3f74", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2017-06-28T19:42:13.000Z", "max_stars_repo_stars_event_max_datetime": "2017-06-29T12:13:36.000Z", "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "mattborghi/KernelRidgeRegression.jl", "max_issues_repo_head_hexsha": "ea79e41b04672782efd01bc5d9b295adbc9d3f74", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2017-06-29T08:58:08.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-31T20:20:00.000Z", "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "mattborghi/KernelRidgeRegression.jl", "max_forks_repo_head_hexsha": "ea79e41b04672782efd01bc5d9b295adbc9d3f74", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-06-25T13:44:38.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-31T20:15:45.000Z", "avg_line_length": 37.8780487805, "max_line_length": 182, "alphanum_fraction": 0.6255634256, "num_tokens": 1171}
|
from collections import Iterable
import numpy as np
import openmdao.api as om
from .constants import INF_BOUND
class _ReprClass(object):
"""
Class for defining objects with a simple constant string __repr__.
This is useful for constants used in arg lists when you want them to appear in
automatically generated source documentation as a certain string instead of python's
default representation.
"""
def __init__(self, repr_string):
"""
Initialize the __repr__ string.
Parameters
----------
repr_string : str
The string to be returned by __repr__
"""
self._repr_string = repr_string
def __repr__(self):
"""
Return our _repr_string.
Returns
-------
str
Whatever string we were initialized with.
"""
return self._repr_string
def __str__(self):
return self._repr_string
# unique object to check if default is given (when None is an allowed value)
_unspecified = _ReprClass("unspecified")
def get_rate_units(units, time_units, deriv=1):
"""
Return a string for rate units given units for the variable and time units.
Parameters
----------
units : str
Units of a given variable.
time_units : str
Time units.
deriv : int
If 1, provide the units of the first derivative. If 2,
provide the units of the second derivative.
Returns
-------
str
Corresponding rate units for the given variable.
"""
if deriv not in (1, 2):
raise ValueError('deriv argument must be 1 or 2.')
tu = time_units if deriv == 1 else '{0}**2'.format(time_units)
if units is not None and time_units is not None:
rate_units = '{0}/{1}'.format(units, tu)
elif units is not None:
rate_units = units
elif time_units is not None:
rate_units = '1.0/{0}'.format(tu)
else:
rate_units = None
return rate_units
def get_target_metadata(ode, name, user_targets=_unspecified, user_units=_unspecified,
user_shape=_unspecified, control_rate=False, user_static_target=_unspecified):
"""
Return the targets of a state variable in a given ODE system.
If the targets of the state is _unspecified, and the state name is a top level input name
in the ODE, then the state values are automatically connected to that top-level input.
If _unspecified and not a top-level input of the ODE, no connection is made.
If targets is explicitly None, then no connection is made.
Otherwise, if the user specified some other string or sequence of strings as targets, then
those are returned.
Parameters
----------
ode : om.System
The OpenMDAO system which serves as the ODE for dymos. This system should already have
had its setup and configure methods called.
name : str
The name of the variable whose targets are desired.
user_targets : str or None or Sequence or _unspecified
Targets for the variable as given by the user.
user_units : str or None or _unspecified
Units for the variable as given by the user.
user_shape : None or Sequence or _unspecified
Shape for the variable as given by the user.
control_rate : bool
When True, check for the control rate if the name is not in the ODE.
user_static_target : bool or None or _unspecified
When False, assume the shape of the target in the ODE includes the number of nodes as the
first dimension. If True, the connecting parameter does not need to be "fanned out" to
connect to each node. If _unspecified, attempt to resolve by the presence of a tag
`dymos.static_target` on the target variable, which is the same as `static_target=True`.
Returns
-------
shape : tuple
The shape of the variable. If not specified, shape is taken from the ODE targets.
units : str
The units of the variable. If not specified, units are taken from the ODE targets.
Notes
-----
This method requires that the ODE has run its setup and configure methods. Thus,
this method should be called from configure of some parent Group, and the ODE should
be a system within that Group.
"""
rate_src = False
ode_inputs = {opts['prom_name']: opts for (k, opts) in
ode.get_io_metadata(iotypes=('input',), get_remote=True).items()}
if user_targets is _unspecified:
if name in ode_inputs:
targets = [name]
elif control_rate and f'{name}_rate' in ode_inputs:
targets = [f'{name}_rate']
rate_src = True
elif control_rate and f'{name}_rate2' in ode_inputs:
targets = [f'{name}_rate2']
rate_src = True
else:
targets = []
elif user_targets:
if isinstance(user_targets, str):
targets = [user_targets]
else:
targets = user_targets
else:
targets = []
if user_units is _unspecified:
target_units_set = {ode_inputs[tgt]['units'] for tgt in targets}
if len(target_units_set) == 1:
units = next(iter(target_units_set))
if rate_src:
units = f"{units}*s"
else:
raise ValueError(f'Unable to automatically assign units to {name}. '
f'Targets have multiple units: {target_units_set}. '
f'Either promote targets and use set_input_defaults to assign common '
f'units, or explicitly provide them to {name}.')
else:
units = user_units
# Resolve whether the targets is static or dynamic
static_target_tags = [tgt for tgt in targets if 'dymos.static_target' in ode_inputs[tgt]['tags']]
if static_target_tags:
static_target = True
if not user_static_target:
raise ValueError(f"User has specified 'static_target = False' for parameter {name},"
f"but one or more targets is tagged with "
f"'dymos.static_target': {' '.join(static_target_tags)}")
else:
if user_static_target is _unspecified:
static_target = False
else:
static_target = user_static_target
if user_shape in {None, _unspecified}:
# Resolve target shape
target_shape_set = {ode_inputs[tgt]['shape'] for tgt in targets}
if len(target_shape_set) == 1:
shape = next(iter(target_shape_set))
if not static_target:
if len(shape) == 1:
shape = (1,)
else:
shape = shape[1:]
elif len(target_shape_set) == 0:
raise ValueError(f'Unable to automatically assign a shape to {name}.\n'
'Targets for this variable either do not exist or have no shape set.\n'
'The shape for this variable must be set explicitly via the '
'`shape=<tuple>` argument.')
else:
raise ValueError(f'Unable to automatically assign a shape to {name} based on targets. '
f'Targets have multiple shapes assigned: {target_shape_set}. '
f'Change targets such that all have common shapes.')
else:
shape = user_shape
return shape, units, static_target
def get_source_metadata(ode, src, user_units, user_shape):
"""
Return the targets of a state variable in a given ODE system.
If the targets of the state is _unspecified, and the state name is a top level input name
in the ODE, then the state values are automatically connected to that top-level input.
If _unspecified and not a top-level input of the ODE, no connection is made.
If targets is explicitly None, then no connection is made.
Otherwise, if the user specified some other string or sequence of strings as targets, then
those are returned.
Parameters
----------
ode : om.System
The OpenMDAO system which serves as the ODE for dymos. This system should already have
had its setup and configure methods called.
src : str
The relative path in the ODE to the source variable whose metadata is requested.
user_units : str or None or Sequence or _unspecified
Units for the variable as given by the user.
user_shape : str or None or Sequence or _unspecified
Shape for the variable as given by the user.
Returns
-------
shape : tuple
The shape of the variable. If not specified, shape is taken from the ODE targets.
units : str
The units of the variable. If not specified, units are taken from the ODE targets.
Notes
-----
This method requires that the ODE has run its setup and configure methods. Thus,
this method should be called from configure of some parent Group, and the ODE should
be a system within that Group.
"""
ode_outputs = {opts['prom_name']: opts for (k, opts) in
ode.get_io_metadata(iotypes=('output',), get_remote=True).items()}
if src not in ode_outputs:
raise ValueError(f'Unable to find the source {src} in the ODE at {ode.pathname}.')
if user_units in {None, _unspecified}:
units = ode_outputs[src]['units']
else:
units = user_units
if user_shape in {None, _unspecified}:
ode_shape = ode_outputs[src]['shape']
shape = (1,) if len(ode_shape) == 1 else ode_shape[1:]
else:
shape = user_shape
return shape, units
def reshape_val(val, shape, num_input_nodes):
"""
Return the given value reshaped to (num_input_nodes,) + shape.
If the value is scalar or a size-1 array, return that value multiplied by
np.ones((num_input_nodes,) + shape). If the value's shape is shape, then
repeat those values along a new first dimension. Otherwise, reshape it to
the correct shape and return that.
Parameters
----------
val : float or array-like
The values to be conformed to the desired shape.
shape : tuple
The shape of the desired output at each node.
num_input_nodes : int
The number of nodes along which the value is repeated.
Returns
-------
np.array
The given value of the correct shape.
"""
if np.isscalar(val) or np.prod(np.asarray(val).shape) == 1:
shaped_val = float(val) * np.ones((num_input_nodes,) + shape)
elif np.asarray(val).shape == shape:
shaped_val = np.repeat(val[np.newaxis, ...], num_input_nodes, axis=0)
else:
shaped_val = np.reshape(val, newshape=(num_input_nodes,) + shape)
return shaped_val
class CoerceDesvar(object):
"""
Check the desvar options for the appropriate shape and resize accordingly with options.
Parameters
----------
num_input_nodes : int
Number of input nodes.
desvar_indices : ndarray
Flattened indices of the variable.
options : dict
Variable options dictionary, should contain "shape".
"""
def __init__(self, num_input_nodes, desvar_indices=None, options=None):
self.num_input_nodes = num_input_nodes
shape = options['shape']
size = np.prod(shape, dtype=int)
fix_initial = options['fix_initial']
fix_final = options['fix_final']
if desvar_indices is None:
desvar_indices = list(range(size * num_input_nodes))
if fix_initial:
if isinstance(fix_initial, Iterable):
idxs_to_fix = np.where(np.asarray(fix_initial))[0]
for idx_to_fix in reversed(sorted(idxs_to_fix)):
del desvar_indices[idx_to_fix]
else:
del desvar_indices[:size]
if fix_final:
if isinstance(fix_final, Iterable):
idxs_to_fix = np.where(np.asarray(fix_final))[0]
for idx_to_fix in reversed(sorted(idxs_to_fix)):
del desvar_indices[-size + idx_to_fix]
else:
del desvar_indices[-size:]
self.desvar_indices = desvar_indices
self.options = options
def __call__(self, option):
"""
Test that an option's shape is compliant with the number of input nodes for the design variable.
Parameters
----------
option : str
The name of the option whose value(s) are desired.
Returns
-------
object
The value of the desvar option.
Raises
------
ValueError
If the number of values in the option is not compliant with the number of input
nodes for the design variable.
"""
val = self.options[option]
if option == 'lower':
lb = np.zeros_like(self.desvar_indices, dtype=float)
lb[:] = -INF_BOUND if self.options['lower'] is None else val
return lb
if option == 'upper':
ub = np.zeros_like(self.desvar_indices, dtype=float)
ub[:] = INF_BOUND if self.options['upper'] is None else val
return ub
if val is None or np.isscalar(val):
return val
# Handle value for vector/matrix valued variables
if isinstance(val, list):
val = np.asarray(val)
if val.shape == self.options['shape']:
return np.tile(val.flatten(), int(len(self.desvar_indices)/val.size))
else:
raise ValueError('array-valued option {0} must have same shape '
'as states ({1})'.format(option, self.options['shape']))
def CompWrapperConfig(comp_class):
"""
Returns a wrapped comp_class that calls its configure_io method at the end of setup.
This allows for standalone testing of Dymos components that normally require their parent group
to configure them.
Parameters
----------
comp_class : Component class
Class that we would like to wrap.
Returns
-------
WrappedClass
Wrapped version of comp_class.
"""
class WrappedClass(comp_class):
def setup(self):
"""
Appends a call to configure_io after setup.
"""
super(WrappedClass, self).setup()
self.configure_io()
return WrappedClass
# Modify class so we can run it standalone.
def GroupWrapperConfig(comp_class):
"""
Returns a wrapped group_class that calls its configure_io method at the end of setup.
This allows for standalone testing of Dymos components that normally require their parent group
to configure them.
Parameters
----------
comp_class : Group class
Class that we would like to wrap.
Returns
-------
WrappedClass
Wrapped version of comp_class.
"""
class WrappedClass(comp_class):
def setup(self):
"""
Setup as normal.
"""
super(WrappedClass, self).setup()
def configure(self):
"""
Call configure_io during configure.
"""
self.configure_io()
return WrappedClass
|
{"hexsha": "6a1e8bee26acaa8b8857dd7dcbfc604a211cdafe", "size": 15392, "ext": "py", "lang": "Python", "max_stars_repo_path": "dymos/utils/misc.py", "max_stars_repo_name": "kaushikponnapalli/dymos", "max_stars_repo_head_hexsha": "3fba91d0fc2c0e8460717b1bec80774676287739", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "dymos/utils/misc.py", "max_issues_repo_name": "kaushikponnapalli/dymos", "max_issues_repo_head_hexsha": "3fba91d0fc2c0e8460717b1bec80774676287739", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dymos/utils/misc.py", "max_forks_repo_name": "kaushikponnapalli/dymos", "max_forks_repo_head_hexsha": "3fba91d0fc2c0e8460717b1bec80774676287739", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.9024943311, "max_line_length": 104, "alphanum_fraction": 0.6186330561, "include": true, "reason": "import numpy", "num_tokens": 3333}
|
"""This module creates and manages a SQL database as a log for all jobs
submitted via the exoctk web app.
Authors
-------
- Joe Filippazzo
Use
---
This module is intended to be imported and used within a separate
python environment, e.g.
::
from exoctk import log_exoctk
log_exoctk.create_db()
Dependencies
------------
- ``astropy``
- ``numpy``
- ``pathlib``
- ``sqlite3``
"""
import os
import datetime
import astropy.table as at
import numpy as np
from pathlib import Path
import sqlite3
def create_db(dbpath, overwrite=True):
"""Create a new database at the given ``dbpath``
Parameters
----------
dbpath : str
The full path for the new database, including the filename
and ``.db`` file extension.
schema : str
The path to the ``.sql`` schema for the database
overwrite : bool
Overwrite dbpath if it already exists
"""
if dbpath != ':memory:':
# Make sure the path is valid
if not os.path.exists(os.path.dirname(dbpath)):
raise IOError('Not a valid path:', dbpath)
# Make sure the file is a .db
if not dbpath.endswith('.db'):
raise IOError('Please provide a path with a .db file extension')
# Use pathlib.Path to become compliant with bandit.
p = Path(dbpath)
# Remove existing file if overwriting
if os.path.isfile(dbpath) and overwrite:
p.unlink()
# Make the new file
p.touch()
# Generate the tables
conn = sqlite3.connect(dbpath)
cur = conn.cursor()
# Table for groups_integrations
cur.execute("CREATE TABLE 'groups_integrations' ('id' INTEGER NOT NULL UNIQUE, 'date' TEXT NOT NULL, 'targname' TEXT, 'kmag' REAL, 'mod' TEXT, 'obs_time' REAL, 'n_group' REAL, 'ins' TEXT, 'filt' TEXT, 'filt_ta' TEXT, 'subarray' TEXT, 'subarray_ta' TEXT, 'sat_mode' TEXT, 'sat_max' REAL, PRIMARY KEY(id));")
# Table for limb_darkening
cur.execute("CREATE TABLE 'limb_darkening' ('id' INTEGER NOT NULL UNIQUE, 'date' TEXT NOT NULL, 'n_bins' INTEGER, 'teff' REAL, 'logg' REAL, 'feh' REAL, 'bandpass' TEXT, 'modeldir' TEXT, 'wave_min' REAL, 'mu_min' REAL, 'wave_max' REAL, 'local_files' TEXT, 'pixels_per_bin' INTEGER, 'uniform' TEXT, 'linear' TEXT, 'quadratic' TEXT, 'squareroot' TEXT, 'logarithmic' TEXT, 'exponential' TEXT, 'three_parameter' TEXT, 'four_parameter' TEXT, PRIMARY KEY(id));")
# Table for contam_visibility
cur.execute("CREATE TABLE 'contam_visibility' ('id' INTEGER NOT NULL UNIQUE, 'date' TEXT NOT NULL, 'targname' TEXT, 'ra' REAL, 'dec' REAL, 'inst' TEXT, 'companion' TEXT, PRIMARY KEY(id));")
# Table for phase_constraint
cur.execute("CREATE TABLE 'phase_constraint' ('id' INTEGER NOT NULL UNIQUE, 'date' TEXT NOT NULL, 'targname' TEXT, 'orbital_period' REAL, 'eccentricity' REAL, 'transit_type' TEXT, 'omega' REAL, 'inclination' REAL, 'transit_time' REAL, 'window_size' REAL, 'observation_duration' REAL, 'minimum_phase' REAL, 'maximum_phase' REAL, PRIMARY KEY(id));")
# Table for fortney grid
cur.execute("CREATE TABLE 'fortney' ('id' INTEGER NOT NULL UNIQUE, 'date' TEXT NOT NULL, 'ptemp' REAL, 'pchem' TEXT, 'cloud' TEXT, 'pmass' REAL, 'm_unit' TEXT, 'refrad' REAL, 'r_unit' TEXT, 'rstar' REAL, 'rstar_unit' TEXT, PRIMARY KEY(id));")
# Table for generic grid
cur.execute("CREATE TABLE 'generic' ('id' INTEGER NOT NULL UNIQUE, 'date' TEXT NOT NULL, 'temperature' REAL, 'gravity' REAL, 'r_planet' REAL, 'r_star' REAL, 'condensation' TEXT, 'metallicity' REAL, 'c_o' REAL, 'haze' REAL, 'cloud' REAL, PRIMARY KEY(id));")
# Commit and close the connection
conn.commit()
conn.close()
if os.path.isfile(dbpath):
print("ExoCTK database created at {}".format(dbpath))
def load_db(dbpath):
"""Load a database
Parameters
----------
dbpath : str
The path to the ``.db`` database file
Returns
-------
cur : ``sqlite.connection.cursor`` obj
An SQLite3 Cursor object if dbpath is found.
"""
if os.path.isfile(dbpath) or dbpath == ':memory:':
con = sqlite3.connect(dbpath, isolation_level=None, detect_types=sqlite3.PARSE_DECLTYPES, check_same_thread=False)
cur = con.cursor()
print('Database loaded: {}'.format(dbpath))
return cur
else:
print("Sorry, could not find the file '{}'".format(dbpath))
def log_form_input(form_dict, table, database):
"""A function to store the form inputs of any page ``GET`` requests
in a database.
Parameters
----------
form_dict : dict
The dictionary of form inputs
table : str
The table name to INSERT on
database : ``sqlite.connection.cursor`` obj
The database cursor object
"""
try:
# Get the column names
colnames = np.array(database.execute("PRAGMA table_info('{}')".format(table)).fetchall()).T[1]
# Convert hyphens to underscores and leading numerics to letters for db column names
inpt = {k.replace('-', '_').replace('3', 'three').replace('4', 'four'): v for k, v in form_dict.items()}
# Add a timestamp
inpt['date'] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
# Insert the form values that are valid column names
cols = [col for col in inpt.keys() if col in colnames]
vals = [str(inpt.get(col, 'none')) for col in cols]
qry = "Insert Into {} ({}) Values ({})".format(table, ', '.join(cols), ', '.join('?' * len(cols)))
database.execute(qry, vals)
except Exception as e:
print('Could not log form submission.')
print(e)
def view_log(database, table, limit=50):
"""Visually inspect the job log.
Parameters
----------
database : str or ``sqlite3.connection.cursor`` obj
The database cursor object
table : str
The table name
limit : int
The number of records to show
Returns
-------
table : ``astropy.Table`` obj
An astropy.table object containing the results.
"""
if isinstance(database, str):
DB = load_db(database)
elif isinstance(database, sqlite3.Cursor):
DB = database
else:
print("Please enter the path to a .db file or a sqlite.Cursor object.")
# Query the database
colnames = np.array(DB.execute("PRAGMA table_info('{}')".format(table)).fetchall()).T[1]
results = DB.execute("SELECT * FROM {} LIMIT {}".format(table, limit)).fetchall()
# Empty table
table = at.Table(names=colnames, dtype=['O'] * len(colnames))
# Add the results
if len(results) > 0:
for row in results:
table.add_row(row)
return table
def scrub(table_name):
"""Snippet to prevent SQL injection attcks! PEW PEW PEW!"""
return ''.join(chr for chr in table_name if chr.isalnum())
|
{"hexsha": "2b4bbbea1a71882b2233d5df63416cedfe020012", "size": 6893, "ext": "py", "lang": "Python", "max_stars_repo_path": "exoctk/log_exoctk.py", "max_stars_repo_name": "bourque/exoctk", "max_stars_repo_head_hexsha": "1d2f8e7b9c00e74033626d81593b1f879b7df6ad", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "exoctk/log_exoctk.py", "max_issues_repo_name": "bourque/exoctk", "max_issues_repo_head_hexsha": "1d2f8e7b9c00e74033626d81593b1f879b7df6ad", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "exoctk/log_exoctk.py", "max_forks_repo_name": "bourque/exoctk", "max_forks_repo_head_hexsha": "1d2f8e7b9c00e74033626d81593b1f879b7df6ad", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.980861244, "max_line_length": 459, "alphanum_fraction": 0.6335412738, "include": true, "reason": "import numpy,import astropy", "num_tokens": 1781}
|
(************************************************************************)
(* * The Coq Proof Assistant / The Coq Development Team *)
(* v * INRIA, CNRS and contributors - Copyright 1999-2018 *)
(* <O___,, * (see CREDITS file for the list of authors) *)
(* \VV/ **************************************************************)
(* // * This file is distributed under the terms of the *)
(* * GNU Lesser General Public License Version 2.1 *)
(* * (see LICENSE file for the text of the license) *)
(************************************************************************)
(* A <X1,...,Xn>: non commutative polynomials on a commutative ring A *)
Set Implicit Arguments.
Require Import Setoid.
Require Import BinList.
Require Import BinPos.
Require Import BinNat.
Require Import BinInt.
Require Export Ring_polynom. (* n'utilise que PExpr *)
Require Export Ncring.
Section MakeRingPol.
Context (C R:Type) `{Rh:Ring_morphism C R}.
Variable phiCR_comm: forall (c:C)(x:R), x * [c] == [c] * x.
Ltac rsimpl := repeat (gen_rewrite || rewrite phiCR_comm).
Ltac add_push := gen_add_push .
(* Definition of non commutative multivariable polynomials
with coefficients in C :
*)
Inductive Pol : Type :=
| Pc : C -> Pol
| PX : Pol -> positive -> positive -> Pol -> Pol.
(* PX P i n Q represents P * X_i^n + Q *)
Definition cO:C . exact ring0. Defined.
Definition cI:C . exact ring1. Defined.
Definition P0 := Pc 0.
Definition P1 := Pc 1.
Variable Ceqb:C->C->bool.
Class Equalityb (A : Type):= {equalityb : A -> A -> bool}.
Notation "x =? y" := (equalityb x y) (at level 70, no associativity).
Variable Ceqb_eq: forall x y:C, Ceqb x y = true -> (x == y).
Instance equalityb_coef : Equalityb C :=
{equalityb x y := Ceqb x y}.
Fixpoint Peq (P P' : Pol) {struct P'} : bool :=
match P, P' with
| Pc c, Pc c' => c =? c'
| PX P i n Q, PX P' i' n' Q' =>
match Pos.compare i i', Pos.compare n n' with
| Eq, Eq => if Peq P P' then Peq Q Q' else false
| _,_ => false
end
| _, _ => false
end.
Instance equalityb_pol : Equalityb Pol :=
{equalityb x y := Peq x y}.
(* Q a ses variables de queue < i *)
Definition mkPX P i n Q :=
match P with
| Pc c => if c =? 0 then Q else PX P i n Q
| PX P' i' n' Q' =>
match Pos.compare i i' with
| Eq => if Q' =? P0 then PX P' i (n + n') Q else PX P i n Q
| _ => PX P i n Q
end
end.
Definition mkXi i n := PX P1 i n P0.
Definition mkX i := mkXi i 1.
(** Opposite of addition *)
Fixpoint Popp (P:Pol) : Pol :=
match P with
| Pc c => Pc (- c)
| PX P i n Q => PX (Popp P) i n (Popp Q)
end.
Notation "-- P" := (Popp P)(at level 30).
(** Addition et subtraction *)
Fixpoint PaddCl (c:C)(P:Pol) {struct P} : Pol :=
match P with
| Pc c1 => Pc (c + c1)
| PX P i n Q => PX P i n (PaddCl c Q)
end.
(* Q quelconque *)
Section PaddX.
Variable Padd:Pol->Pol->Pol.
Variable P:Pol.
(* Xi^n * P + Q
les variables de tete de Q ne sont pas forcement < i
mais Q est normalisé : variables de tete decroissantes *)
Fixpoint PaddX (i n:positive)(Q:Pol){struct Q}:=
match Q with
| Pc c => mkPX P i n Q
| PX P' i' n' Q' =>
match Pos.compare i i' with
| (* i > i' *)
Gt => mkPX P i n Q
| (* i < i' *)
Lt => mkPX P' i' n' (PaddX i n Q')
| (* i = i' *)
Eq => match Z.pos_sub n n' with
| (* n > n' *)
Zpos k => mkPX (PaddX i k P') i' n' Q'
| (* n = n' *)
Z0 => mkPX (Padd P P') i n Q'
| (* n < n' *)
Zneg k => mkPX (Padd P (mkPX P' i k P0)) i n Q'
end
end
end.
End PaddX.
Fixpoint Padd (P1 P2: Pol) {struct P1} : Pol :=
match P1 with
| Pc c => PaddCl c P2
| PX P' i' n' Q' =>
PaddX Padd P' i' n' (Padd Q' P2)
end.
Notation "P ++ P'" := (Padd P P').
Definition Psub(P P':Pol):= P ++ (--P').
Notation "P -- P'" := (Psub P P')(at level 50).
(** Multiplication *)
Fixpoint PmulC_aux (P:Pol) (c:C) {struct P} : Pol :=
match P with
| Pc c' => Pc (c' * c)
| PX P i n Q => mkPX (PmulC_aux P c) i n (PmulC_aux Q c)
end.
Definition PmulC P c :=
if c =? 0 then P0 else
if c =? 1 then P else PmulC_aux P c.
Fixpoint Pmul (P1 P2 : Pol) {struct P2} : Pol :=
match P2 with
| Pc c => PmulC P1 c
| PX P i n Q =>
PaddX Padd (Pmul P1 P) i n (Pmul P1 Q)
end.
Notation "P ** P'" := (Pmul P P')(at level 40).
Definition Psquare (P:Pol) : Pol := P ** P.
(** Evaluation of a polynomial towards R *)
Fixpoint Pphi(l:list R) (P:Pol) {struct P} : R :=
match P with
| Pc c => [c]
| PX P i n Q =>
let x := nth 0 i l in
let xn := pow_pos x n in
(Pphi l P) * xn + (Pphi l Q)
end.
Reserved Notation "P @ l " (at level 10, no associativity).
Notation "P @ l " := (Pphi l P).
(** Proofs *)
Ltac destr_pos_sub H :=
match goal with |- context [Z.pos_sub ?x ?y] =>
assert (H := Z.pos_sub_discr x y); destruct (Z.pos_sub x y)
end.
Lemma Peq_ok : forall P P',
(P =? P') = true -> forall l, P@l == P'@ l.
Proof.
induction P;destruct P';simpl;intros ;try easy.
- now apply ring_morphism_eq, Ceqb_eq.
- specialize (IHP1 P'1). specialize (IHP2 P'2).
simpl in IHP1, IHP2.
destruct (Pos.compare_spec p p1); try discriminate;
destruct (Pos.compare_spec p0 p2); try discriminate.
destruct (Peq P2 P'1); try discriminate.
subst; now rewrite IHP1, IHP2.
Qed.
Lemma Pphi0 : forall l, P0@l == 0.
Proof.
intros;simpl.
rewrite ring_morphism0. reflexivity.
Qed.
Lemma Pphi1 : forall l, P1@l == 1.
Proof.
intros;simpl; rewrite ring_morphism1. reflexivity.
Qed.
Lemma mkPX_ok : forall l P i n Q,
(mkPX P i n Q)@l == P@l * (pow_pos (nth 0 i l) n) + Q@l.
Proof.
intros l P i n Q;unfold mkPX.
destruct P;try (simpl;reflexivity).
assert (Hh := ring_morphism_eq c 0).
simpl; case_eq (Ceqb c 0);simpl;try reflexivity.
intros.
rewrite Hh. rewrite ring_morphism0.
rsimpl. apply Ceqb_eq. trivial.
destruct (Pos.compare_spec i p).
assert (Hh := @Peq_ok P3 P0). case_eq (P3=? P0). intro. simpl.
rewrite Hh.
rewrite Pphi0. rsimpl. rewrite Pos.add_comm. rewrite pow_pos_add;rsimpl.
subst;trivial. reflexivity. trivial. intros. simpl. reflexivity. simpl. reflexivity.
simpl. reflexivity.
Qed.
Ltac Esimpl :=
repeat (progress (
match goal with
| |- context [?P@?l] =>
match P with
| P0 => rewrite (Pphi0 l)
| P1 => rewrite (Pphi1 l)
| (mkPX ?P ?i ?n ?Q) => rewrite (mkPX_ok l P i n Q)
end
| |- context [[?c]] =>
match c with
| 0 => rewrite ring_morphism0
| 1 => rewrite ring_morphism1
| ?x + ?y => rewrite ring_morphism_add
| ?x * ?y => rewrite ring_morphism_mul
| ?x - ?y => rewrite ring_morphism_sub
| - ?x => rewrite ring_morphism_opp
end
end));
simpl; rsimpl.
Lemma PaddCl_ok : forall c P l, (PaddCl c P)@l == [c] + P@l .
Proof.
induction P; simpl; intros; Esimpl; try reflexivity.
rewrite IHP2. rsimpl.
rewrite (ring_add_comm (P2 @ l * pow_pos (nth 0 p l) p0) [c]).
reflexivity.
Qed.
Lemma PmulC_aux_ok : forall c P l, (PmulC_aux P c)@l == P@l * [c].
Proof.
induction P;simpl;intros. rewrite ring_morphism_mul.
try reflexivity.
simpl. Esimpl. rewrite IHP1;rewrite IHP2;rsimpl.
Qed.
Lemma PmulC_ok : forall c P l, (PmulC P c)@l == P@l * [c].
Proof.
intros c P l; unfold PmulC.
assert (Hh:= ring_morphism_eq c 0);case_eq (c =? 0). intros.
rewrite Hh;Esimpl. apply Ceqb_eq;trivial.
assert (H1h:= ring_morphism_eq c 1);case_eq (c =? 1);intros.
rewrite H1h;Esimpl. apply Ceqb_eq;trivial.
apply PmulC_aux_ok.
Qed.
Lemma Popp_ok : forall P l, (--P)@l == - P@l.
Proof.
induction P;simpl;intros.
Esimpl.
rewrite IHP1;rewrite IHP2;rsimpl.
Qed.
Ltac Esimpl2 :=
Esimpl;
repeat (progress (
match goal with
| |- context [(PaddCl ?c ?P)@?l] => rewrite (PaddCl_ok c P l)
| |- context [(PmulC ?P ?c)@?l] => rewrite (PmulC_ok c P l)
| |- context [(--?P)@?l] => rewrite (Popp_ok P l)
end)); Esimpl.
Lemma PaddXPX: forall P i n Q,
PaddX Padd P i n Q =
match Q with
| Pc c => mkPX P i n Q
| PX P' i' n' Q' =>
match Pos.compare i i' with
| (* i > i' *)
Gt => mkPX P i n Q
| (* i < i' *)
Lt => mkPX P' i' n' (PaddX Padd P i n Q')
| (* i = i' *)
Eq => match Z.pos_sub n n' with
| (* n > n' *)
Zpos k => mkPX (PaddX Padd P i k P') i' n' Q'
| (* n = n' *)
Z0 => mkPX (Padd P P') i n Q'
| (* n < n' *)
Zneg k => mkPX (Padd P (mkPX P' i k P0)) i n Q'
end
end
end.
induction Q; reflexivity.
Qed.
Lemma PaddX_ok2 : forall P2,
(forall P l, (P2 ++ P) @ l == P2 @ l + P @ l)
/\
(forall P k n l,
(PaddX Padd P2 k n P) @ l ==
P2 @ l * pow_pos (nth 0 k l) n + P @ l).
induction P2;simpl;intros. split. intros. apply PaddCl_ok.
induction P. unfold PaddX. intros. rewrite mkPX_ok.
simpl. rsimpl.
intros. simpl.
destruct (Pos.compare_spec k p) as [Hh|Hh|Hh].
destr_pos_sub H1h. Esimpl2.
rewrite Hh; trivial. rewrite H1h. reflexivity.
simpl. rewrite mkPX_ok. rewrite IHP1. Esimpl2.
rewrite Pos.add_comm in H1h.
rewrite H1h.
rewrite pow_pos_add. Esimpl2.
rewrite Hh; trivial. reflexivity.
rewrite mkPX_ok. rewrite PaddCl_ok. Esimpl2. rewrite Pos.add_comm in H1h.
rewrite H1h. Esimpl2. rewrite pow_pos_add. Esimpl2.
rewrite Hh; trivial. reflexivity.
rewrite mkPX_ok. rewrite IHP2. Esimpl2.
rewrite (ring_add_comm (P2 @ l * pow_pos (nth 0 p l) p0)
([c] * pow_pos (nth 0 k l) n)).
reflexivity. assert (H1h := ring_morphism_eq c 0);case_eq (Ceqb c 0);
intros; simpl.
rewrite H1h;trivial. Esimpl2. apply Ceqb_eq; trivial. reflexivity.
decompose [and] IHP2_1. decompose [and] IHP2_2. clear IHP2_1 IHP2_2.
split. intros. rewrite H0. rewrite H1.
Esimpl2.
induction P. unfold PaddX. intros. rewrite mkPX_ok. simpl. reflexivity.
intros. rewrite PaddXPX.
destruct (Pos.compare_spec k p1) as [H3h|H3h|H3h].
destr_pos_sub H4h.
rewrite mkPX_ok. simpl. rewrite H0. rewrite H1. Esimpl2.
rewrite H4h. rewrite H3h;trivial. reflexivity.
rewrite mkPX_ok. rewrite IHP1. Esimpl2. rewrite H3h;trivial.
rewrite Pos.add_comm in H4h.
rewrite H4h. rewrite pow_pos_add. Esimpl2.
rewrite mkPX_ok. simpl. rewrite H0. rewrite H1.
rewrite mkPX_ok.
Esimpl2. rewrite H3h;trivial.
rewrite Pos.add_comm in H4h.
rewrite H4h. rewrite pow_pos_add. Esimpl2.
rewrite mkPX_ok. simpl. rewrite IHP2. Esimpl2.
gen_add_push (P2 @ l * pow_pos (nth 0 p1 l) p2). try reflexivity.
rewrite mkPX_ok. simpl. reflexivity.
Qed.
Lemma Padd_ok : forall P Q l, (P ++ Q) @ l == P @ l + Q @ l.
intro P. elim (PaddX_ok2 P); auto.
Qed.
Lemma PaddX_ok : forall P2 P k n l,
(PaddX Padd P2 k n P) @ l == P2 @ l * pow_pos (nth 0 k l) n + P @ l.
intro P2. elim (PaddX_ok2 P2); auto.
Qed.
Lemma Psub_ok : forall P' P l, (P -- P')@l == P@l - P'@l.
unfold Psub. intros. rewrite Padd_ok. rewrite Popp_ok. rsimpl.
Qed.
Lemma Pmul_ok : forall P P' l, (P**P')@l == P@l * P'@l.
induction P'; simpl; intros. rewrite PmulC_ok. reflexivity.
rewrite PaddX_ok. rewrite IHP'1. rewrite IHP'2. Esimpl2.
Qed.
Lemma Psquare_ok : forall P l, (Psquare P)@l == P@l * P@l.
Proof.
intros. unfold Psquare. apply Pmul_ok.
Qed.
(** Definition of polynomial expressions *)
(*
Inductive PExpr : Type :=
| PEc : C -> PExpr
| PEX : positive -> PExpr
| PEadd : PExpr -> PExpr -> PExpr
| PEsub : PExpr -> PExpr -> PExpr
| PEmul : PExpr -> PExpr -> PExpr
| PEopp : PExpr -> PExpr
| PEpow : PExpr -> N -> PExpr.
*)
(** Specification of the power function *)
Section POWER.
Variable Cpow : Set.
Variable Cp_phi : N -> Cpow.
Variable rpow : R -> Cpow -> R.
Record power_theory : Prop := mkpow_th {
rpow_pow_N : forall r n, (rpow r (Cp_phi n))== (pow_N r n)
}.
End POWER.
Variable Cpow : Set.
Variable Cp_phi : N -> Cpow.
Variable rpow : R -> Cpow -> R.
Variable pow_th : power_theory Cp_phi rpow.
(** evaluation of polynomial expressions towards R *)
Fixpoint PEeval (l:list R) (pe:PExpr C) {struct pe} : R :=
match pe with
| PEO => 0
| PEI => 1
| PEc c => [c]
| PEX _ j => nth 0 j l
| PEadd pe1 pe2 => (PEeval l pe1) + (PEeval l pe2)
| PEsub pe1 pe2 => (PEeval l pe1) - (PEeval l pe2)
| PEmul pe1 pe2 => (PEeval l pe1) * (PEeval l pe2)
| PEopp pe1 => - (PEeval l pe1)
| PEpow pe1 n => rpow (PEeval l pe1) (Cp_phi n)
end.
Strategy expand [PEeval].
Definition mk_X j := mkX j.
(** Correctness proofs *)
Lemma mkX_ok : forall p l, nth 0 p l == (mk_X p) @ l.
Proof.
destruct p;simpl;intros;Esimpl;trivial.
Qed.
Ltac Esimpl3 :=
repeat match goal with
| |- context [(?P1 ++ ?P2)@?l] => rewrite (Padd_ok P1 P2 l)
| |- context [(?P1 -- ?P2)@?l] => rewrite (Psub_ok P1 P2 l)
end;try Esimpl2;try reflexivity;try apply ring_add_comm.
(* Power using the chinise algorithm *)
Section POWER2.
Variable subst_l : Pol -> Pol.
Fixpoint Ppow_pos (res P:Pol) (p:positive){struct p} : Pol :=
match p with
| xH => subst_l (Pmul P res)
| xO p => Ppow_pos (Ppow_pos res P p) P p
| xI p => subst_l (Pmul P (Ppow_pos (Ppow_pos res P p) P p))
end.
Definition Ppow_N P n :=
match n with
| N0 => P1
| Npos p => Ppow_pos P1 P p
end.
Fixpoint pow_pos_gen (R:Type)(m:R->R->R)(x:R) (i:positive) {struct i}: R :=
match i with
| xH => x
| xO i => let p := pow_pos_gen m x i in m p p
| xI i => let p := pow_pos_gen m x i in m x (m p p)
end.
Lemma Ppow_pos_ok : forall l, (forall P, subst_l P@l == P@l) ->
forall res P p, (Ppow_pos res P p)@l == (pow_pos_gen Pmul P p)@l * res@l.
Proof.
intros l subst_l_ok res P p. generalize res;clear res.
induction p;simpl;intros. try rewrite subst_l_ok.
repeat rewrite Pmul_ok. repeat rewrite IHp.
rsimpl. repeat rewrite Pmul_ok. repeat rewrite IHp. rsimpl.
try rewrite subst_l_ok.
repeat rewrite Pmul_ok. reflexivity.
Qed.
Definition pow_N_gen (R:Type)(x1:R)(m:R->R->R)(x:R) (p:N) :=
match p with
| N0 => x1
| Npos p => pow_pos_gen m x p
end.
Lemma Ppow_N_ok : forall l, (forall P, subst_l P@l == P@l) ->
forall P n, (Ppow_N P n)@l == (pow_N_gen P1 Pmul P n)@l.
Proof. destruct n;simpl. reflexivity. rewrite Ppow_pos_ok; trivial. Esimpl. Qed.
End POWER2.
(** Normalization and rewriting *)
Section NORM_SUBST_REC.
Let subst_l (P:Pol) := P.
Let Pmul_subst P1 P2 := subst_l (Pmul P1 P2).
Let Ppow_subst := Ppow_N subst_l.
Fixpoint norm_aux (pe:PExpr C) : Pol :=
match pe with
| PEO => Pc cO
| PEI => Pc cI
| PEc c => Pc c
| PEX _ j => mk_X j
| PEadd pe1 (PEopp pe2) =>
Psub (norm_aux pe1) (norm_aux pe2)
| PEadd pe1 pe2 => Padd (norm_aux pe1) (norm_aux pe2)
| PEsub pe1 pe2 => Psub (norm_aux pe1) (norm_aux pe2)
| PEmul pe1 pe2 => Pmul (norm_aux pe1) (norm_aux pe2)
| PEopp pe1 => Popp (norm_aux pe1)
| PEpow pe1 n => Ppow_N (fun p => p) (norm_aux pe1) n
end.
Definition norm_subst pe := subst_l (norm_aux pe).
Lemma norm_aux_spec :
forall l pe,
PEeval l pe == (norm_aux pe)@l.
Proof.
intros.
induction pe.
- now simpl; rewrite <- ring_morphism0.
- now simpl; rewrite <- ring_morphism1.
- Esimpl3.
- Esimpl3.
- simpl.
rewrite IHpe1;rewrite IHpe2.
destruct pe2; Esimpl3.
unfold Psub.
destruct pe1; destruct pe2; rewrite Padd_ok; rewrite Popp_ok; reflexivity.
- simpl. unfold Psub. rewrite IHpe1;rewrite IHpe2.
now destruct pe1;
[destruct pe2; rewrite Padd_ok; rewrite Popp_ok; Esimpl3 | Esimpl3..].
- simpl. rewrite IHpe1;rewrite IHpe2. rewrite Pmul_ok. reflexivity.
- now simpl; rewrite IHpe; Esimpl3.
- simpl.
rewrite Ppow_N_ok; (intros;try reflexivity).
rewrite rpow_pow_N; [| now apply pow_th].
induction n;simpl; [now Esimpl3|].
induction p; simpl; trivial.
+ try rewrite IHp;try rewrite IHpe;
repeat rewrite Pms_ok; repeat rewrite Pmul_ok;reflexivity.
+ rewrite Pmul_ok.
try rewrite IHp;try rewrite IHpe; repeat rewrite Pms_ok;
repeat rewrite Pmul_ok;reflexivity.
Qed.
Lemma norm_subst_spec :
forall l pe,
PEeval l pe == (norm_subst pe)@l.
Proof.
intros;unfold norm_subst.
unfold subst_l. apply norm_aux_spec.
Qed.
End NORM_SUBST_REC.
Fixpoint interp_PElist (l:list R) (lpe:list (PExpr C * PExpr C)) {struct lpe} : Prop :=
match lpe with
| nil => True
| (me,pe)::lpe =>
match lpe with
| nil => PEeval l me == PEeval l pe
| _ => PEeval l me == PEeval l pe /\ interp_PElist l lpe
end
end.
Lemma norm_subst_ok : forall l pe,
PEeval l pe == (norm_subst pe)@l.
Proof.
intros;apply norm_subst_spec.
Qed.
Lemma ring_correct : forall l pe1 pe2,
(norm_subst pe1 =? norm_subst pe2) = true ->
PEeval l pe1 == PEeval l pe2.
Proof.
simpl;intros.
do 2 (rewrite (norm_subst_ok l);trivial).
apply Peq_ok;trivial.
Qed.
End MakeRingPol.
|
{"author": "Priyanka-Mondal", "repo": "Coq", "sha": "220c3eccfa5643b1ca2398d4940e29917da786d9", "save_path": "github-repos/coq/Priyanka-Mondal-Coq", "path": "github-repos/coq/Priyanka-Mondal-Coq/Coq-220c3eccfa5643b1ca2398d4940e29917da786d9/lib/plugins/setoid_ring/Ncring_polynom.v"}
|
import numpy as np
# divide matrix by row-sums
mat = np.mat([[4,2],[2,3]])
print(mat/mat.sum(axis=1))
# divide matrix by col-sums
mat = np.mat([[1,2],[3,4]])
print(mat/mat.sum(axis=0))
|
{"hexsha": "8d483e85cdaeef8c1319120ea0d09599cceb59ee", "size": 187, "ext": "py", "lang": "Python", "max_stars_repo_path": "basic_operations.py", "max_stars_repo_name": "Valentindi/numpy_cheatsheet", "max_stars_repo_head_hexsha": "0a1ff493778caef0dab60a019adf3f3c2756e1b8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "basic_operations.py", "max_issues_repo_name": "Valentindi/numpy_cheatsheet", "max_issues_repo_head_hexsha": "0a1ff493778caef0dab60a019adf3f3c2756e1b8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "basic_operations.py", "max_forks_repo_name": "Valentindi/numpy_cheatsheet", "max_forks_repo_head_hexsha": "0a1ff493778caef0dab60a019adf3f3c2756e1b8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 18.7, "max_line_length": 27, "alphanum_fraction": 0.6363636364, "include": true, "reason": "import numpy", "num_tokens": 63}
|
"""
melange_lite.py
A short description of the project.
Handles the primary functions
"""
from __future__ import division, print_function
from jax import numpy as jnp
import numpy as np
from jax.config import config; config.update("jax_enable_x64", True)
from jax import lax, ops, vmap, jit, grad, random
class SMCSamplerFactory(object):
"""
def run an SMC Sampler with a FeynmanKac model
Particles
=========
`Particles` are defined as batches of _individual particle quantities (like positions, latent vars, velocities, etc);
the forward propagation kernels `M0` and `M` (for arbitrary time t) return `Particles` as X, a dictionary s.t.
X = {'x' : Array, # of shape (self.N, Dx) where Dx is the dimension of each particle's quantity 'x'
'v' : Array, # of shape (self.N, Dv) where Dv is the dimension of each particle's quantity 'v'
...
'seed' : Array, # if dynamics are stochastic, we need a different jax.random.PRNGKey for each particle (and are considered 'per-particle' quantities)
...
}
Methods
=========
the four 'important' methods that we call are M0, M, logG0, logG.
As canon, each method looks like the following
* M0(parameter_dict: Dict[str, Array]): -> Dict[str, Array] # returns X (see above)
* logG0(parameter_dict: Dict[str, Array]): -> Array # returns lws (log weights of the particles)
* M(X: Dict[str, Array], parameter_dict: Dict[str, Array], t: Array): -> Dict[str, Array] # returns new X particles container
*logG(Xp: Dict[str, Array], X: Dict[str, Array], parameter_dict: Dict[str, Array], t: Array): -> Array # returns new X particles container
See the 'dummy' methods for further documentation
"""
def __init__(self,
T, #number of steps in the SMC propagator
N, #number of particles
IW_energy_fn, #uncanonicalized importance weight energy fn
IW_parameters, #importance weight parameters (immutable)
**kwargs #kwargs omitted in base FKSMCSampler
):
"""generic init class
arguments
T : int
number of steps in the sampler
N : int
number of particles
IW_energy_fn : fn
importance weight energy function
def IW_energy_fn(X: Array, parameters: Array) -> Array # float output (as energy)
IW_parameters : Array
parameters that are passed to IW_energy_fn
parameters
T : T
N : N
IW_parameters : IW_parameters
_IW_energy_fn : Callable[Dict[str, Array], Dict[str, Array]]
canonicalized importance weight energy function
"""
#default arguments
self.T = T
self.N = N
self.IW_parameters = IW_parameters
#importance weight energy function
self._IW_energy_fn = IW_energy_fn
#handle all of the methods...
self._handle_methods(**kwargs) #pass all kwargs in hopes the `_handle` function will pick these up
def _handle_methods(self, **kwargs):
"""
call:
`_handle_M0_kernel`,
`_handle_logG0`,
`_handle_M`,
`_handle_logG`
to set up necessary callable methods.
"""
self._handle_M0_kernel(**kwargs)
self._handle_logG0(**kwargs)
self._handle_M(**kwargs)
self._handle_logG(**kwargs)
def _handle_M0_kernel(self, **kwargs):
"""
M0 kernel is _usually_ a special case. this handles M0 separately.
creates an instance method called 'M0'
def M0_kernel(parameter_dict: Dict[str, Array]) -> Dict[str, Array]:
arguments
parameter_dict : Dict
returns
X : Dict
particles definitions (see class documentation)
self.M0 = M0_kernel # set the method
"""
def M0_kernel(parameter_dict):
raise NotImplementedError(f"you have to define the M0 kernel yourself, dumbass.")
self.M0 = M0_kernel
def _handle_logG0(self, **kwargs):
"""
logG0 calculator is _usually_ a special case (like M0), so it is also defined separately.
creates an instance method called `logG0`
def logG0(X : Dict[str, Array], parameter_dict: Dict[str, Array]) -> Dict[str, Array]:
arguments
X : Dict
particles container from self.M0
parameter_dict : Dict
returns
lws : Array
log weights of shape (self.N)
self.logG0 = logG0
"""
def logG0(X, parameter_dict):
raise NotImplementedError(f"you have to define the logG0 kernel yourself, dumbass.")
#set the attrs
self.logG0 = logG0
def _handle_M(self, M_kernel_fn, M_kernel_energy_fn, M_shift_fn, **kwargs):
"""
creates an instance method called 'M'
def M(X : Dict[str, Array], parameter_dict: Dict[str, Array]) -> Dict[str, Array]:
arguments
X : Dict
parameter_dict : Dict
t : Array
returns
X : Dict
self.M = M # set the method
"""
def M(X, parameter_dict, t):
raise NotImplementedError(f"you have to define the M kernel yourself, dumbass.")
self.M = M
def _handle_logG(self):
"""
creates an instance method called `logG`
def logG(Xp : Dict[str, Array], X : Dict[str, Array], parameter_dict: Dict[str, Array], t : Array) -> Dict[str, Array]:
arguments
Xp : Dict
previous particles container
X : Dict
current particles container
parameter_dict : Dict
t : Array
time increment
returns
lws : Array
log weights of shape (self.N)
self.logG = logG
"""
def logG(Xp, X, parameter_dict, t):
raise NotImplementedError(f"you have to define the M kernel yourself, dumbass.")
self.logG = logG
def build_force_fn(self, energy_fn):
"""make a force_fn
"""
force_fn = lambda x, params: -1. * grad(energy_fn)(x, params) #default argnums=0
return force_fn
def works(self):
"""get the run function
"""
from jax.scipy.special import logsumexp
#get a scan fn
def run_scan_fn(carrier, t):
Xp, params_dict = carrier
X = self.M(Xp, params_dict, t)
lws = self.logG(Xp, X, params_dict, t)
return (X, params_dict), lws
def works_fn(parameter_dict):
init_Xs = self.M0(parameter_dict)
init_lws = self.logG0(init_Xs, parameter_dict)
carrier = (init_Xs, parameter_dict)
(out_Xs, _), stacked_lws = lax.scan(run_scan_fn, carrier, jnp.arange(0,self.T-1))
all_lws = jnp.vstack((init_lws[jnp.newaxis, ...], stacked_lws))
#last_lws = jnp.cumsum(all_lws, axis=0)[-1]
return -all_lws
return works_fn
class GaussianSMCSamplerFactory(SMCSamplerFactory):
"""
a Gaussian SMC Sampler factory with Gaussian proposals
parameter_dict = {
# M0 parameters
mu : Array, # means of the prior distribution with shape (Dx)
lcov : Array, # log covariance array of the prior distribution with shape (Dx)
seed : Array, # jax.random.PRNGKey
M_parameters : Dict,
{
'mu': Array, # mu array of shape (T-1, Dx)
'lcov_force': Array, # log covariance of shape (T-1, Dx)
'lcov_step': Array, # log covarance of shape (T-1, Dx)
}
L_parameters : Dict,
same as M_parameters
ldt : Array # float of the log timestep
}
X = {
x : Array, # positions of shape (N,Dx)
seed : Array, # jax.random.PRNGKey of shape (N),
# the following parameters are added so that the k_logp kernel calculator doesn't have to recompute the M push values
'forward_mu' : Array #forward mu of the ULA process
'forward_cov' : Array #forward mu of the ULA process
}
"""
def __init__(self, T, N, IW_parameters, **kwargs):
from melange_lite.utils.gaussians import unnormalized_Gaussian_logp
energy_fn = lambda x, params: -1. * unnormalized_Gaussian_logp(x, params[0], params[1])
super().__init__(T, N, energy_fn, IW_parameters, **kwargs)
self._kernel_energy_fn = energy_fn #for the M and l kernels
def _handle_M0_kernel(self, **kwargs):
def gaussian_kernel(parameter_dict):
mu = parameter_dict['mu']
Dx = len(mu)
cov_vector = jnp.transpose(jnp.exp(parameter_dict['lcov']))
seed = parameter_dict['seed']
v_seed, x_seed = random.split(seed)
#make xs
xs = random.normal(x_seed, (self.N, Dx))*jnp.sqrt(cov_vector) + mu
return {'x': xs,
'seed': random.split(x_seed, self.N),
'forward_mu': jnp.repeat(mu[..., jnp.newaxis], self.N, axis=0),
'forward_cov': jnp.repeat(jnp.exp(parameter_dict['lcov'])[..., jnp.newaxis], self.N, axis=0)
}
#set the attrs
self.M0 = gaussian_kernel
def _handle_logG0(self, **kwargs):
def logG0(Xs, parameter_dict):
return jnp.zeros(self.N)
self.logG0 = logG0
def _handle_M(self):
from melange_lite.utils.gaussians import EL_mu_sigma, ULA_move
def _M(X, parameter_dict, t):
"""
proposal for a singular particle x
"""
#grab the parameters
x = X['x']
seed = X['seed']
Dx = x.shape
ldt = parameter_dict['ldt']
Mparams = parameter_dict['M_parameters']
potential_params = jnp.vstack((Mparams['mu'][t], jnp.exp(Mparams['lcov_force'][t])))
#split the random seed
run_seed, x_seed = random.split(seed)
#call EL
mu, cov = EL_mu_sigma(x, self._kernel_energy_fn, ldt, potential_params)
#ula move
new_x = ULA_move(x, self._kernel_energy_fn, ldt, run_seed, potential_params)
return {'x': new_x, 'seed': x_seed, 'forward_mu': mu, 'forward_cov': cov}
self._M = _M #set that attr
self.M = vmap(_M,
in_axes=(0, None, None)
)
def _handle_logG(self):
from melange_lite.utils.gaussians import normalized_Gaussian_logp, EL_mu_sigma
def _logG(Xp, X, parameter_dict, t):
xp, x = Xp['x'], X['x']
#compute importance_weight (there is 1 more index in the IW parameters than in everything else...)
mu_t, mu_tm1 = self.IW_parameters['mu'][t+1], self.IW_parameters['mu'][t]
cov_force_t, cov_force_tm1 = jnp.exp(self.IW_parameters['lcov_force'][t+1]), jnp.exp(self.IW_parameters['lcov_force'][t])
IWs = -self._IW_energy_fn(x, jnp.vstack((mu_t, cov_force_t))) + self._IW_energy_fn(xp, jnp.vstack((mu_tm1, cov_force_tm1)))
#compute M_kernel_logp: we don't need to recompute this
k_t_logps = normalized_Gaussian_logp(x, X['forward_mu'], X['forward_cov'])
#compute l_kernel_logp
Lparams = parameter_dict['L_parameters']
potential_params = jnp.vstack((Lparams['mu'][t], jnp.exp(Lparams['lcov_force'][t])))
l_mu, l_cov = EL_mu_sigma(x, self._kernel_energy_fn, parameter_dict['ldt'], potential_params)
l_tm1_logps = normalized_Gaussian_logp(xp, l_mu, l_cov)
#finalize and return
lws = IWs + l_tm1_logps - k_t_logps
return lws
self._logG = _logG
self.logG = vmap(_logG, in_axes=(0, 0, None, None))
|
{"hexsha": "e9246ae980d98dcbcd1c409565d1aed90a0774aa", "size": 12374, "ext": "py", "lang": "Python", "max_stars_repo_path": "melange_lite/melange_lite.py", "max_stars_repo_name": "dominicrufa/melange_lite", "max_stars_repo_head_hexsha": "0683997d7296a5d6f5a10bab1895f9b417c948c3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "melange_lite/melange_lite.py", "max_issues_repo_name": "dominicrufa/melange_lite", "max_issues_repo_head_hexsha": "0683997d7296a5d6f5a10bab1895f9b417c948c3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "melange_lite/melange_lite.py", "max_forks_repo_name": "dominicrufa/melange_lite", "max_forks_repo_head_hexsha": "0683997d7296a5d6f5a10bab1895f9b417c948c3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.6599423631, "max_line_length": 158, "alphanum_fraction": 0.5701470826, "include": true, "reason": "import numpy,from jax", "num_tokens": 2931}
|
import sys
import json
import requests
import numpy as np
from flask import Flask, make_response
from flask import render_template
from flask import Flask
app = Flask(__name__)
from config import consumer_key, access_token, redirect_uri
@app.route("/")
def hello():
resp = get_pocket_data()
return render_template('index.html', data=resp)
def get_pocket_data():
access_data = { "consumer_key": consumer_key,
"access_token": access_token,
"detailType":"simple",
"sort":"newest"
}
#headers = {"content-type":"application/json"} #No need to feed headers (gives error). requests lib takes care of it.
pocket_response = requests.post("https://getpocket.com/v3/get",
data = access_data)
if pocket_response.status_code != 200:
print "No response from API"
return None
else:
json_data = pocket_response.json()
if json_data is None: #Exit the program
print "Invalid response. Exiting program..."
sys.exit(0)
items_list = json_data["list"] #Listing of all the items
keys = items_list.keys() #array of item keys
total_items = len(keys) #Total no of saved items
print "Total number of saved items is:", total_items
word_count = np.zeros(total_items)
#To pull out any index number
for i in range(0,total_items):
word_count[i] = items_list[keys[i]]["word_count"]
# print "+ " + items_list[keys[i]]["resolved_title"]
#Word count analysis
less_than_1000 = 0 #initialization
between_1000_3000 = 0
between_3000_5000 = 0
over_5000= 0
for x in range(0,len(word_count)):
if word_count[x] < 1000:
less_than_1000 += 1
elif word_count[x] < 3000:
between_1000_3000 += 1
elif word_count[x] < 5000:
between_3000_5000 += 1
else:
over_5000 += 1
data = {}
data.update({'less than 1000 words' : less_than_1000})
data.update({'between 1000-3000 words' : between_1000_3000})
data.update({'between 3000-5000 words' : between_3000_5000})
data.update({'over 5000 words' : over_5000})
return data
if __name__ == '__main__':
app.run(debug=True)
|
{"hexsha": "5252ae2e4a6ee22a0580b86bde84a87de07f851e", "size": 2056, "ext": "py", "lang": "Python", "max_stars_repo_path": "pocket_stats.py", "max_stars_repo_name": "sarthak-s/Pocket-Stats", "max_stars_repo_head_hexsha": "fc41cafcd00e08f8a7dfe5b84cf4e483c81f530d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pocket_stats.py", "max_issues_repo_name": "sarthak-s/Pocket-Stats", "max_issues_repo_head_hexsha": "fc41cafcd00e08f8a7dfe5b84cf4e483c81f530d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pocket_stats.py", "max_forks_repo_name": "sarthak-s/Pocket-Stats", "max_forks_repo_head_hexsha": "fc41cafcd00e08f8a7dfe5b84cf4e483c81f530d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.7, "max_line_length": 118, "alphanum_fraction": 0.7013618677, "include": true, "reason": "import numpy", "num_tokens": 608}
|
[STATEMENT]
lemma image_filter_cartesian_product_correct:
fixes f :: "'x \<times> 'y \<rightharpoonup> 'z"
assumes I[simp, intro!]: "s1.invar s1" "s2.invar s2"
shows "s3.\<alpha> (image_filter_cartesian_product f s1 s2)
= { z | x y z. f (x,y) = Some z \<and> x\<in>s1.\<alpha> s1 \<and> y\<in>s2.\<alpha> s2 }" (is ?T1)
"s3.invar (image_filter_cartesian_product f s1 s2)" (is ?T2)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. s3.\<alpha> (image_filter_cartesian_product f s1 s2) = {uu_. \<exists>x y z. uu_ = z \<and> f (x, y) = Some z \<and> x \<in> s1.\<alpha> s1 \<and> y \<in> s2.\<alpha> s2} &&& s3.invar (image_filter_cartesian_product f s1 s2)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. s3.\<alpha> (image_filter_cartesian_product f s1 s2) = {uu_. \<exists>x y z. uu_ = z \<and> f (x, y) = Some z \<and> x \<in> s1.\<alpha> s1 \<and> y \<in> s2.\<alpha> s2}
2. s3.invar (image_filter_cartesian_product f s1 s2)
[PROOF STEP]
from set_iterator_product_correct
[OF s1.iteratei_correct[OF I(1)] s2.iteratei_correct[OF I(2)]]
[PROOF STATE]
proof (chain)
picking this:
set_iterator (set_iterator_product (s1.iteratei s1) (\<lambda>a. s2.iteratei s2)) (s1.\<alpha> s1 \<times> s2.\<alpha> s2)
[PROOF STEP]
have it_s12: "set_iterator
(set_iterator_product (s1.iteratei s1) (\<lambda>_. s2.iteratei s2))
(s1.\<alpha> s1 \<times> s2.\<alpha> s2)"
[PROOF STATE]
proof (prove)
using this:
set_iterator (set_iterator_product (s1.iteratei s1) (\<lambda>a. s2.iteratei s2)) (s1.\<alpha> s1 \<times> s2.\<alpha> s2)
goal (1 subgoal):
1. set_iterator (set_iterator_product (s1.iteratei s1) (\<lambda>_. s2.iteratei s2)) (s1.\<alpha> s1 \<times> s2.\<alpha> s2)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
set_iterator (set_iterator_product (s1.iteratei s1) (\<lambda>_. s2.iteratei s2)) (s1.\<alpha> s1 \<times> s2.\<alpha> s2)
goal (2 subgoals):
1. s3.\<alpha> (image_filter_cartesian_product f s1 s2) = {uu_. \<exists>x y z. uu_ = z \<and> f (x, y) = Some z \<and> x \<in> s1.\<alpha> s1 \<and> y \<in> s2.\<alpha> s2}
2. s3.invar (image_filter_cartesian_product f s1 s2)
[PROOF STEP]
have LIS:
"set_ins s3.\<alpha> s3.invar s3.ins"
"set_empty s3.\<alpha> s3.invar s3.empty"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. set_ins s3.\<alpha> s3.invar s3.ins &&& set_empty s3.\<alpha> s3.invar s3.empty
[PROOF STEP]
by unfold_locales
[PROOF STATE]
proof (state)
this:
set_ins s3.\<alpha> s3.invar s3.ins
set_empty s3.\<alpha> s3.invar s3.empty
goal (2 subgoals):
1. s3.\<alpha> (image_filter_cartesian_product f s1 s2) = {uu_. \<exists>x y z. uu_ = z \<and> f (x, y) = Some z \<and> x \<in> s1.\<alpha> s1 \<and> y \<in> s2.\<alpha> s2}
2. s3.invar (image_filter_cartesian_product f s1 s2)
[PROOF STEP]
from iterate_image_filter_to_set_correct[OF LIS it_s12, of f]
[PROOF STATE]
proof (chain)
picking this:
s3.\<alpha> (iterate_to_set s3.empty s3.ins (set_iterator_image_filter f (set_iterator_product (s1.iteratei s1) (\<lambda>_. s2.iteratei s2)))) = {b. \<exists>a. a \<in> s1.\<alpha> s1 \<times> s2.\<alpha> s2 \<and> f a = Some b} \<and> s3.invar (iterate_to_set s3.empty s3.ins (set_iterator_image_filter f (set_iterator_product (s1.iteratei s1) (\<lambda>_. s2.iteratei s2))))
[PROOF STEP]
show ?T1 ?T2
[PROOF STATE]
proof (prove)
using this:
s3.\<alpha> (iterate_to_set s3.empty s3.ins (set_iterator_image_filter f (set_iterator_product (s1.iteratei s1) (\<lambda>_. s2.iteratei s2)))) = {b. \<exists>a. a \<in> s1.\<alpha> s1 \<times> s2.\<alpha> s2 \<and> f a = Some b} \<and> s3.invar (iterate_to_set s3.empty s3.ins (set_iterator_image_filter f (set_iterator_product (s1.iteratei s1) (\<lambda>_. s2.iteratei s2))))
goal (1 subgoal):
1. s3.\<alpha> (image_filter_cartesian_product f s1 s2) = {uu_. \<exists>x y z. uu_ = z \<and> f (x, y) = Some z \<and> x \<in> s1.\<alpha> s1 \<and> y \<in> s2.\<alpha> s2} &&& s3.invar (image_filter_cartesian_product f s1 s2)
[PROOF STEP]
unfolding image_filter_cartesian_product_alt
[PROOF STATE]
proof (prove)
using this:
s3.\<alpha> (iterate_to_set s3.empty s3.ins (set_iterator_image_filter f (set_iterator_product (s1.iteratei s1) (\<lambda>_. s2.iteratei s2)))) = {b. \<exists>a. a \<in> s1.\<alpha> s1 \<times> s2.\<alpha> s2 \<and> f a = Some b} \<and> s3.invar (iterate_to_set s3.empty s3.ins (set_iterator_image_filter f (set_iterator_product (s1.iteratei s1) (\<lambda>_. s2.iteratei s2))))
goal (1 subgoal):
1. s3.\<alpha> (iterate_to_set s3.empty s3.ins (set_iterator_image_filter f (set_iterator_product (s1.iteratei s1) (\<lambda>_. s2.iteratei s2)))) = {uu_. \<exists>x y z. uu_ = z \<and> f (x, y) = Some z \<and> x \<in> s1.\<alpha> s1 \<and> y \<in> s2.\<alpha> s2} &&& s3.invar (iterate_to_set s3.empty s3.ins (set_iterator_image_filter f (set_iterator_product (s1.iteratei s1) (\<lambda>_. s2.iteratei s2))))
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
s3.\<alpha> (image_filter_cartesian_product f s1 s2) = {uu_. \<exists>x y z. uu_ = z \<and> f (x, y) = Some z \<and> x \<in> s1.\<alpha> s1 \<and> y \<in> s2.\<alpha> s2}
s3.invar (image_filter_cartesian_product f s1 s2)
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 2351, "file": "Collections_ICF_gen_algo_SetGA", "length": 11}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
##############################################
# The MIT License (MIT)
# Copyright (c) 2020 Kevin Walchko
# see LICENSE for full details
##############################################
import os
if 'BLINKA_MCP2221' in os.environ.keys():
pass
else:
os.environ['BLINKA_MCP2221'] = "1"
import rclpy
# import time
# import numpy as np
from rtf_sensors.imu_node import RTFIMU
def main(args=None):
rclpy.init(args=args)
node = RTFIMU()
try:
rclpy.spin(node)
except KeyboardInterrupt:
print("\n\nbye ...\n")
node.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
|
{"hexsha": "9e5359ac608384d43f5b66104c76955602cb45d8", "size": 679, "ext": "py", "lang": "Python", "max_stars_repo_path": "rtf_sensors/imu_serial_node.py", "max_stars_repo_name": "RecklessTedsFunland/rtf_sensors", "max_stars_repo_head_hexsha": "880e93b1a358ff3ea65f5c90949c483e52ac44c7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "rtf_sensors/imu_serial_node.py", "max_issues_repo_name": "RecklessTedsFunland/rtf_sensors", "max_issues_repo_head_hexsha": "880e93b1a358ff3ea65f5c90949c483e52ac44c7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "rtf_sensors/imu_serial_node.py", "max_forks_repo_name": "RecklessTedsFunland/rtf_sensors", "max_forks_repo_head_hexsha": "880e93b1a358ff3ea65f5c90949c483e52ac44c7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 18.3513513514, "max_line_length": 46, "alphanum_fraction": 0.5552282769, "include": true, "reason": "import numpy", "num_tokens": 182}
|
"""
vp_overlap.py
Do calculations for overlap type functionals
"""
import numpy as np
from scipy.special import erf
def vp_overlap(self):
const = 2
#Calculate overlap
self.E.S = self.grid.integrate((np.sum(self.na_frac, axis=1) * np.sum(self.nb_frac, axis=1))**(0.5))
self.E.F = erf( const * self.E.S)
if not self.ens:
iksa = [self.KSa]
iksb = [self.KSb]
else:
iksa = [self.KSa, self.KSA]
iksb = [self.KSb, self.KSB]
for KSa in iksa:
#Functional derivative of the overlap
KSa.V.dSdn = KSa.scale * 0.5 * (self.nb_frac / self.na_frac)**0.5
if self.optPartition is True:
KSa.V.dSdn = np.repeat(KSa.V.dSdn, 2, axis=1)
#Remove any Nans
KSa.V.dSdn[ np.isinf(KSa.V.dSdn) ] = 0.0
KSa.V.dSdn[ np.isnan(KSa.V.dSdn) ] = 0.0
KSa.V.dFdn = 2 * np.pi**(-0.5) * np.exp( -(const * self.E.S)**2 ) * const * KSa.V.dSdn
for KSb in iksb:
#Functional derivative of the overlap
KSb.V.dSdn = KSb.scale * 0.5 * (self.na_frac / self.nb_frac)**0.5
if self.optPartition is True:
KSb.V.dSdn = np.repeat(KSb.V.dSdn, 2, axis=1)
#Remove any Nans
KSb.V.dSdn[ np.isinf(KSb.V.dSdn) ] = 0.0
KSb.V.dSdn[ np.isnan(KSb.V.dSdn) ] = 0.0
KSb.V.dFdn = 2 * np.pi**(-0.5) * np.exp( -(const * self.E.S)**2 ) * const * KSb.V.dSdn
|
{"hexsha": "9313a0b57d0151eb9f07f972b2d8e498359a38d7", "size": 1399, "ext": "py", "lang": "Python", "max_stars_repo_path": "CADMium/partition/vp_overlap.py", "max_stars_repo_name": "VHchavez/CADMium", "max_stars_repo_head_hexsha": "39f3bd63ca69502a80c677855da72f9e691b57e2", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "CADMium/partition/vp_overlap.py", "max_issues_repo_name": "VHchavez/CADMium", "max_issues_repo_head_hexsha": "39f3bd63ca69502a80c677855da72f9e691b57e2", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-04-23T20:38:38.000Z", "max_issues_repo_issues_event_max_datetime": "2021-04-23T20:38:38.000Z", "max_forks_repo_path": "CADMium/partition/vp_overlap.py", "max_forks_repo_name": "VHchavez/CADMium", "max_forks_repo_head_hexsha": "39f3bd63ca69502a80c677855da72f9e691b57e2", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.0888888889, "max_line_length": 104, "alphanum_fraction": 0.5625446748, "include": true, "reason": "import numpy,from scipy", "num_tokens": 512}
|
using Documenter, GigaSOM
makedocs(modules = [GigaSOM],
clean = false,
format = Documenter.HTML(prettyurls = !("local" in ARGS),
canonical = "https://lcsb-biocore.github.io/GigaSOM.jl/stable/",
assets = ["assets/gigasomlogotransp.ico"]),
sitename = "GigaSOM.jl",
authors = "The developers of GigaSOM.jl",
linkcheck = !("skiplinks" in ARGS),
pages = [
"Home" => "index.md",
"Background" => "background.md",
"How to get started" => "howToGetStarted.md",
"Functions" => "functions.md",
"How to contribute" => "howToContribute.md",
],
)
deploydocs(
repo = "github.com/LCSB-BioCore/GigaSOM.jl.git",
target = "build",
branch = "gh-pages",
devbranch = "origin/develop",
versions = "stable" => "v^",
)
|
{"hexsha": "bd306d13d75e1753f8406dda0c1067fe419ea7a1", "size": 904, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "docs/make.jl", "max_stars_repo_name": "UnofficialJuliaMirror/GigaSOM.jl-a03a9c34-069e-5582-a11c-5c984cab887c", "max_stars_repo_head_hexsha": "e229624c78f170fb20389f619f820e676971c7ec", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "docs/make.jl", "max_issues_repo_name": "UnofficialJuliaMirror/GigaSOM.jl-a03a9c34-069e-5582-a11c-5c984cab887c", "max_issues_repo_head_hexsha": "e229624c78f170fb20389f619f820e676971c7ec", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "docs/make.jl", "max_forks_repo_name": "UnofficialJuliaMirror/GigaSOM.jl-a03a9c34-069e-5582-a11c-5c984cab887c", "max_forks_repo_head_hexsha": "e229624c78f170fb20389f619f820e676971c7ec", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.4814814815, "max_line_length": 80, "alphanum_fraction": 0.5265486726, "num_tokens": 242}
|
module AutoOffload
using LinearAlgebra, AbstractFFTs, FFTW
@static if Base.find_package("CuArrays") !== nothing
using CuArrays
if Float64(CuArrays.CUDAdrv.totalmem(first(CuArrays.CUDAdrv.devices()))) > 1e9
@info("CUDA support found, automatic GPU acceleration will be enabled.")
const GPU_SUPPORTED = true
const AUTO_GPU_SIZE = 100
cuify(x) = CuArrays.CuArray(x)
# Piracy, should get upstreamed
function Base.ldiv!(x::CuArrays.CuArray,_qr::CuArrays.CUSOLVER.CuQR,b::CuArrays.CuArray)
_x = UpperTriangular(_qr.R) \ (_qr.Q' * reshape(b,length(b),1))
x .= vec(_x)
end
else
@info("CUDA support not found, GPU acceleration will not be available.")
const GPU_SUPPORTED = false
const AUTO_GPU_SIZE = 100
cuify(x) = x
end
else
@info("CUDA support not found, GPU acceleration will not be available.")
const GPU_SUPPORTED = false
const AUTO_GPU_SIZE = 100
cuify(x) = x
end
function accelerated_mul!(X,A,B)
if GPU_SUPPORTED && size(A,1) > AUTO_GPU_SIZE && b isa Array
A,B = cuify(A),cuify(B)
X .= Array(A*B)
else
mul!(X,A,B)
end
end
function accelerated_ldiv!(X,A,B)
if GPU_SUPPORTED && size(A,1) > AUTO_GPU_SIZE && b isa Array
_X,A,B = cuify(X),cuify(A),cuify(B)
X .= Array(ldiv!(_X,A,B))
else
ldiv!(X,A,B)
end
end
function accelerated_factorize!(A)
if GPU_SUPPORTED && size(A,1) > AUTO_GPU_SIZE && b isa Array
qr(cuify(A))
else
factorize(A)
end
end
function accelerated_fft!(A)
if GPU_SUPPORTED && size(A,1) > AUTO_GPU_SIZE && b isa Array
A .= Array(fft!(cuify(A)))
else
FFTW.fft!(A)
end
end
export accelerated_mul!, accelerated_ldiv!, accelerated_factorize!,
accelerated_fft!
module Pirate
# Somehow take over mul!, *, etc to autooffload to GPU?
end
end # module
|
{"hexsha": "d085eb4fd9bf3844a6816e8748ce1198cdbba1cc", "size": 1946, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/AutoOffload.jl", "max_stars_repo_name": "ChrisRackauckas/AutoOffload.jl", "max_stars_repo_head_hexsha": "b649a6abff31aa20a4253ea4ca070a89ada27452", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 26, "max_stars_repo_stars_event_min_datetime": "2020-06-19T01:58:37.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-29T11:47:07.000Z", "max_issues_repo_path": "src/AutoOffload.jl", "max_issues_repo_name": "ChrisRackauckas/AutoOffload.jl", "max_issues_repo_head_hexsha": "b649a6abff31aa20a4253ea4ca070a89ada27452", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2019-07-05T01:18:55.000Z", "max_issues_repo_issues_event_max_datetime": "2019-10-09T17:00:01.000Z", "max_forks_repo_path": "src/AutoOffload.jl", "max_forks_repo_name": "ChrisRackauckas/AutoOffload.jl", "max_forks_repo_head_hexsha": "b649a6abff31aa20a4253ea4ca070a89ada27452", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-01-26T23:54:27.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-09T19:23:28.000Z", "avg_line_length": 25.9466666667, "max_line_length": 96, "alphanum_fraction": 0.6310380267, "num_tokens": 548}
|
import gc
import copy
from inferelator import utils
from inferelator.single_cell_workflow import SingleCellWorkflow
from inferelator.regression.base_regression import _RegressionWorkflowMixin
import numpy as np
# These are required to run this module but nothing else
# They are therefore not package dependencies
import scanpy as sc
import celloracle as co
class CellOracleWorkflow(SingleCellWorkflow):
oracle = None
def startup_finish(self):
"""
Skip inferelator preprocessing and do celloracle preprocessing
As per https://github.com/morris-lab/CellOracle/issues/58
"""
self.align_priors_and_expression()
self.data.convert_to_float()
adata = self.data._adata
if "paga" not in adata.uns:
utils.Debug.vprint("Normalizing data {sh}".format(sh=adata.shape))
sc.pp.filter_genes(adata, min_counts=1)
sc.pp.normalize_per_cell(adata, key_n_counts='n_counts_all')
adata.raw = adata
adata.layers["raw_count"] = adata.raw.X.copy()
utils.Debug.vprint("Scaling data")
sc.pp.log1p(adata)
sc.pp.scale(adata)
utils.Debug.vprint("PCA Preprocessing")
sc.tl.pca(adata, svd_solver='arpack')
utils.Debug.vprint("Diffmap Preprocessing")
sc.pp.neighbors(adata, n_neighbors=4, n_pcs=20)
sc.tl.diffmap(adata)
sc.pp.neighbors(adata, n_neighbors=10, use_rep='X_diffmap')
utils.Debug.vprint("Clustering Preprocessing")
sc.tl.louvain(adata, resolution=0.8)
sc.tl.paga(adata, groups='louvain')
sc.pl.paga(adata)
sc.tl.draw_graph(adata, init_pos='paga', random_state=123)
# Restore counts
adata.X = adata.layers["raw_count"].copy()
else:
# Assume all the preprocessing is done and just move along
utils.Debug.vprint("Using saved preprocessing for CellOracle")
@staticmethod
def reprocess_prior_to_base_GRN(priors_data):
base_GRN = priors_data.copy()
base_GRN.index.name = "Target"
base_GRN = base_GRN.melt(ignore_index=False, var_name="Regulator").reset_index()
base_GRN = base_GRN.loc[base_GRN['value'] != 0, :].copy()
base_GRN.drop("value", axis=1, inplace=True)
return {k: v["Regulator"].tolist() for k, v in base_GRN.groupby("Target")}
@staticmethod
def reprocess_co_output_to_inferelator_results(co_out):
betas = [r.pivot(index='target', columns='source', values='coef_mean').fillna(0) for k, r in co_out.items()]
rankers = [r.pivot(index='target', columns='source', values='-logp').fillna(0) for k, r in co_out.items()]
return betas, rankers
class CellOracleRegression(_RegressionWorkflowMixin):
oracle_imputation = True
def run_regression(self):
utils.Debug.vprint("Creating Oracle Object")
# Set up oracle object
oracle = co.Oracle()
oracle.import_anndata_as_raw_count(adata=self.data._adata,
cluster_column_name="louvain",
embedding_name="X_pca")
# Apparently PCA is not transferred from the adata object
oracle.perform_PCA(100)
# Add prior
oracle.addTFinfo_dictionary(self.reprocess_prior_to_base_GRN(self.priors_data))
utils.Debug.vprint("Imputation Preprocessing")
if self.oracle_imputation:
# Heuristics from Celloracle documentation
n_comps = np.where(np.diff(np.diff(np.cumsum(oracle.pca.explained_variance_ratio_))>0.002))[0][0]
k = int(0.025 * oracle.adata.shape[0])
# Make sure n_comps is between 10 and 50
# It likes to go to 0 for noise controls
n_comps = max(min(n_comps, 50), 10)
# Make sure k is at least 25 too I guess
k = max(k, 25)
oracle.knn_imputation(n_pca_dims=n_comps, k=k, balanced=True, b_sight=k*8,
b_maxl=k*4, n_jobs=4)
# Pretend to do imputation
else:
oracle.adata.layers["imputed_count"] = oracle.adata.layers["normalized_count"].copy()
utils.Debug.vprint("CellOracle GRN inference")
# Call GRN inference
links = oracle.get_links(cluster_name_for_GRN_unit="louvain", alpha=10,
verbose_level=0, test_mode=False)
# Deepcopy the result dict that we want
result = copy.deepcopy(links.links_dict)
# Try to clean up some of these circular references
del links
del oracle
del self.data._adata
del self.data
# Call an explicit GC cycle
gc.collect()
return self.reprocess_co_output_to_inferelator_results(result)
|
{"hexsha": "0b11272689a4bf1aac8b8223c8c9a5d36a24d361", "size": 4897, "ext": "py", "lang": "Python", "max_stars_repo_path": "inferelator/benchmarking/celloracle.py", "max_stars_repo_name": "Xparx/inferelator", "max_stars_repo_head_hexsha": "2a33c741c4ba7a6bf3d18a3c14d583af0e0705e8", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 25, "max_stars_repo_stars_event_min_datetime": "2019-06-21T07:56:53.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-19T06:58:07.000Z", "max_issues_repo_path": "inferelator/benchmarking/celloracle.py", "max_issues_repo_name": "Xparx/inferelator", "max_issues_repo_head_hexsha": "2a33c741c4ba7a6bf3d18a3c14d583af0e0705e8", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 22, "max_issues_repo_issues_event_min_datetime": "2019-04-16T15:28:19.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-02T19:11:12.000Z", "max_forks_repo_path": "inferelator/benchmarking/celloracle.py", "max_forks_repo_name": "Xparx/inferelator", "max_forks_repo_head_hexsha": "2a33c741c4ba7a6bf3d18a3c14d583af0e0705e8", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2019-05-13T20:03:17.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-11T01:44:01.000Z", "avg_line_length": 31.7987012987, "max_line_length": 116, "alphanum_fraction": 0.629569124, "include": true, "reason": "import numpy", "num_tokens": 1153}
|
using TupleTools, Base.Cartesian
export loop_einsum, loop_einsum!, allow_loops
"""
loop_einsum(::EinCode, xs, size_dict)
evaluates the eincode specified by `EinCode` and the tensors `xs` by looping
over all possible indices and calculating the contributions ot the result.
Scales exponentially in the number of distinct index-labels.
"""
function loop_einsum(code::EinCode{ixs, iy}, xs::NTuple{N, AbstractArray{<:Any,M} where M},
size_dict) where {N, ixs, iy}
size = getindex.(Ref(size_dict), iy)
loop_einsum!(code, xs, get_output_array(xs, size; has_repeated_indices=!allunique(iy)), size_dict)
end
"""
loop_einsum!(::EinCode, xs, y, size_dict)
inplace-version of `loop_einsum`, saving the result in a preallocated tensor
of correct size `y`.
"""
function loop_einsum!(code::EinCode{ixs, iy},
xs::NTuple{N, AbstractArray{<:Any,M} where M},
y::AbstractArray{T,L}, size_dict) where {N,L,T,IT <: Union{AbstractChar,Integer}, ixs, iy}
ALLOW_LOOPS[] || @error "using `loop_einsum` to evaluate" code size.(xs) size(y)
A = einarray(code, xs, size_dict)
reduce_einarray!(A, y)
end
function reduce_einarray!(A::EinArray{T}, y) where T
@inbounds for ind_y in A.OCIS
iy = subindex(A.y_indexer,ind_y)
yi = zero(T)
for ind_x in A.ICIS
ind = TupleTools.vcat(ind_x.I,ind_y.I)
yi += map_prod(A.xs, ind, A.x_indexers)
end
y[iy] = yi
end
y
end
@inline function get_output_array(xs::NTuple{N, AbstractArray{<:Any,M} where M}, size; has_repeated_indices=true) where N
if has_repeated_indices
zeros(promote_type(map(eltype,xs)...), size...)
else
Array{promote_type(map(eltype,xs)...)}(undef, size...)
end
end
const ALLOW_LOOPS = Ref(true)
"""
allow_loops(flag::Bool)
Setting this to `false` will cause OMEinsum to log an error if it falls back to
`loop_einsum` evaluation, instead of calling specialised kernels. The default is `true`.
"""
function allow_loops(flag::Bool)
ALLOW_LOOPS[] = flag
end
|
{"hexsha": "b6a4cec9db9a5c5a6a84f2d59ffd95f4bb15887b", "size": 2075, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/loop_einsum.jl", "max_stars_repo_name": "kshyatt/OMEinsum.jl", "max_stars_repo_head_hexsha": "dd42a5c365f56d50a1a744b4e390671c0a3c4905", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-28T16:01:29.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-28T16:01:29.000Z", "max_issues_repo_path": "src/loop_einsum.jl", "max_issues_repo_name": "kshyatt/OMEinsum.jl", "max_issues_repo_head_hexsha": "dd42a5c365f56d50a1a744b4e390671c0a3c4905", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/loop_einsum.jl", "max_forks_repo_name": "kshyatt/OMEinsum.jl", "max_forks_repo_head_hexsha": "dd42a5c365f56d50a1a744b4e390671c0a3c4905", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.9365079365, "max_line_length": 121, "alphanum_fraction": 0.6761445783, "num_tokens": 601}
|
# ----------------------------------------------------------------------------------
# # Presenting Word Frequency Results
# ----------------------------------------------------------------------------------
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
print("Data Loaded")
tot_results = pd.read_csv('/mnt/c/Users/charl/Desktop/finance_perso/BurnieYilmazRS19/resultsData/percent_totals_0412182.csv')
del tot_results['Unnamed: 0']
wc_results = pd.read_csv('/mnt/c/Users/charl/Desktop/finance_perso/BurnieYilmazRS19/resultsData/wilcoxon_rank_sums_0412182.csv')
del wc_results['Unnamed: 0']
pvalue_cutoff = (0.01/3900)
print("Which words were signficant across the two shifts?")
p1_p2_tokens = set(wc_results[wc_results[f"wc_p1_p2"] < pvalue_cutoff][["token"]].token.tolist())
p2_p3_tokens = set(wc_results[wc_results[f"wc_p2_p3"] < pvalue_cutoff][["token"]].token.tolist())
popular_token_2 = set(tot_results[tot_results[f"tot_p2"] > 1].token.tolist())
tokens_w_filter = p1_p2_tokens & p2_p3_tokens & popular_token_2
print(tokens_w_filter)
p1_p2_rise = set(wc_results[wc_results[f"change_p1_p2"] > 0][["token"]].token.tolist())
p2_p3_rise = set(wc_results[wc_results[f"change_p2_p3"] > 0][["token"]].token.tolist())
p1_p2_fall = set(wc_results[wc_results[f"change_p1_p2"] < 0][["token"]].token.tolist())
p2_p3_fall = set(wc_results[wc_results[f"change_p2_p3"] < 0][["token"]].token.tolist())
print("Risers")
print(tokens_w_filter & p1_p2_rise & p2_p3_rise)
print("Fallers")
print(tokens_w_filter & p1_p2_fall & p2_p3_fall)
print("Price Dynamic")
print(tokens_w_filter & p1_p2_rise & p2_p3_fall)
print(tokens_w_filter & p1_p2_fall & p2_p3_rise)
|
{"hexsha": "067e4e8f0cbcbfb94893c4ff3afeaa230e52073a", "size": 1801, "ext": "py", "lang": "Python", "max_stars_repo_path": "BurnieYilmazRS19/8_PresentRelativeFrequency.py", "max_stars_repo_name": "Charles0009/crypto_finance_analysis", "max_stars_repo_head_hexsha": "028938afabf0e9fbf352e8136acdc5d9753ba56d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "BurnieYilmazRS19/8_PresentRelativeFrequency.py", "max_issues_repo_name": "Charles0009/crypto_finance_analysis", "max_issues_repo_head_hexsha": "028938afabf0e9fbf352e8136acdc5d9753ba56d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "BurnieYilmazRS19/8_PresentRelativeFrequency.py", "max_forks_repo_name": "Charles0009/crypto_finance_analysis", "max_forks_repo_head_hexsha": "028938afabf0e9fbf352e8136acdc5d9753ba56d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.5245901639, "max_line_length": 129, "alphanum_fraction": 0.6635202665, "include": true, "reason": "import numpy", "num_tokens": 513}
|
#include "stdafx.h"
#include <boost/test/unit_test.hpp>
#include <boost/filesystem.hpp>
#include "ExternalSourceModule.h"
#include "ExternalSinkModule.h"
#include "FileReaderModule.h"
#include "FrameMetadata.h"
#include "Frame.h"
#include "Logger.h"
#include "AIPExceptions.h"
#include "FramesMuxer.h"
#include "test_utils.h"
#include "PipeLine.h"
#include "StatSink.h"
#include "AIPExceptions.h"
#include "RotateCV.h"
BOOST_AUTO_TEST_SUITE(task_test)
void test(std::string filename, int width, int height, ImageMetadata::ImageType imageType, int type, int depth, double angle)
{
auto fileReader = boost::shared_ptr<FileReaderModule>(new FileReaderModule(FileReaderModuleProps("./data/" + filename + ".raw")));
auto metadata = framemetadata_sp(new RawImageMetadata(width, height, ImageMetadata::RGB, CV_8UC3, width * 3, CV_8U, FrameMetadata::HOST));
auto rawImagePin = fileReader->addOutputPin(metadata);
auto m1 = boost::shared_ptr<Module>(new RotateCV(RotateCVProps(angle)));
fileReader->setNext(m1);
auto outputPinId = m1->getAllOutputPinsByType(FrameMetadata::RAW_IMAGE)[0];
auto sink = boost::shared_ptr<ExternalSinkModule>(new ExternalSinkModule());
m1->setNext(sink);
BOOST_TEST(fileReader->init());
BOOST_TEST(m1->init());
BOOST_TEST(sink->init());
fileReader->step();
m1->step();
auto frames = sink->pop();
BOOST_TEST((frames.find(outputPinId) != frames.end()));
auto outFrame = frames[outputPinId];
BOOST_TEST(outFrame->getMetadata()->getFrameType() == FrameMetadata::RAW_IMAGE);
auto outFilename = "./data/testOutput/rotatecv_tests_" + filename + "_" + std::to_string(angle) + ".raw";
Test_Utils::saveOrCompare(outFilename.c_str(), (const uint8_t *)outFrame->data(), outFrame->size(), 0);
}
BOOST_AUTO_TEST_CASE(task1)
{
test("frame_1280x720_rgb", 1280, 720, ImageMetadata::ImageType::RGB, CV_8UC3, CV_8U, 90);
}
// BOOST_AUTO_TEST_CASE(task1)
// {
// auto fileReader = boost::shared_ptr<FileReaderModule>(new FileReaderModule(FileReaderModuleProps("./data/filenamestrategydata/?.txt")));
// auto metadata = framemetadata_sp(new FrameMetadata(FrameMetadata::GENERAL));
// auto pinId = fileReader->addOutputPin(metadata);
// bool relay = false;
// auto sink = boost::shared_ptr<Module>(new StatSink());
// fileReader->setNext(sink, relay);
// PipeLine p("test");
// p.appendModule(fileReader);
// p.init();
// p.run_all_threaded();
// for (auto i = 0; i < 10; i++)
// {
// boost::this_thread::sleep_for(boost::chrono::milliseconds(100)); // giving time to call step
// relay = !relay;
// fileReader->relay(sink, relay);
// }
// p.stop();
// p.term();
// p.wait_for_all();
// }
void testFrames(frame_container& frames, size_t fIndex, size_t size)
{
BOOST_TEST(frames.size() == size);
for (auto it = frames.cbegin(); it != frames.cend(); it++)
{
BOOST_TEST(fIndex == it->second->fIndex);
}
}
BOOST_AUTO_TEST_CASE(task34)
{
size_t readDataSize = 1024;
auto metadata = framemetadata_sp(new FrameMetadata(FrameMetadata::ENCODED_IMAGE));
auto m1 = boost::shared_ptr<ExternalSourceModule>(new ExternalSourceModule());
auto pin1_1 = m1->addOutputPin(metadata);
auto pin1_2 = m1->addOutputPin(metadata);
auto m2 = boost::shared_ptr<ExternalSourceModule>(new ExternalSourceModule());
auto pin2_1 = m2->addOutputPin(metadata);
auto m3 = boost::shared_ptr<ExternalSourceModule>(new ExternalSourceModule());
auto pin3_1 = m3->addOutputPin(metadata);
auto muxer = boost::shared_ptr<Module>(new FramesMuxer());
m1->setNext(muxer);
m2->setNext(muxer);
m3->setNext(muxer);
auto sink = boost::shared_ptr<ExternalSinkModule>(new ExternalSinkModule());
muxer->setNext(sink);
BOOST_TEST(m1->init());
BOOST_TEST(m2->init());
BOOST_TEST(m3->init());
BOOST_TEST(muxer->init());
BOOST_TEST(sink->init());
{
{
// basic
auto encodedImageFrame = m1->makeFrame(readDataSize, pin1_1);
encodedImageFrame->fIndex = 500;
frame_container frames;
frames.insert(make_pair(pin1_1, encodedImageFrame));
frames.insert(make_pair(pin1_2, encodedImageFrame));
frames.insert(make_pair(pin2_1, encodedImageFrame));
frames.insert(make_pair(pin3_1, encodedImageFrame));
m1->send(frames);
muxer->step();
BOOST_TEST(sink->try_pop().size() == 0);
m2->send(frames);
muxer->step();
BOOST_TEST(sink->try_pop().size() == 0);
m3->send(frames);
muxer->step();
auto outFrames = sink->try_pop();
testFrames(outFrames, 500, 4);
}
{
auto encodedImageFrame = m1->makeFrame(readDataSize, pin1_1);
encodedImageFrame->fIndex = 600;
frame_container frames;
frames.insert(make_pair(pin2_1, encodedImageFrame));
frames.insert(make_pair(pin3_1, encodedImageFrame));
m2->send(frames);
muxer->step();
BOOST_TEST(sink->try_pop().size() == 0);
m3->send(frames);
muxer->step();
BOOST_TEST(sink->try_pop().size() == 0);
auto encodedImageFrame2 = m1->makeFrame(readDataSize, pin1_2);
encodedImageFrame2->fIndex = 600;
frame_container frames2;
frames2.insert(make_pair(pin1_1, encodedImageFrame2));
frames2.insert(make_pair(pin1_2, encodedImageFrame2));
m1->send(frames2);
muxer->step();
auto outFrames = sink->try_pop();
testFrames(outFrames, 600, 4);
}
}
BOOST_TEST(m1->term());
BOOST_TEST(m2->term());
BOOST_TEST(m3->term());
BOOST_TEST(muxer->term());
BOOST_TEST(sink->term());
m1.reset();
m2.reset();
m3.reset();
muxer.reset();
sink.reset();
}
BOOST_AUTO_TEST_SUITE_END()
|
{"hexsha": "c810164cd944fe2368bc5104cdf785b270dac663", "size": 5466, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "base/test/task_test.cpp", "max_stars_repo_name": "shrikant-pattawi/ApraPipes", "max_stars_repo_head_hexsha": "5d9ed8f00b924cd03bc1d27871ab56bd5b433022", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "base/test/task_test.cpp", "max_issues_repo_name": "shrikant-pattawi/ApraPipes", "max_issues_repo_head_hexsha": "5d9ed8f00b924cd03bc1d27871ab56bd5b433022", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "base/test/task_test.cpp", "max_forks_repo_name": "shrikant-pattawi/ApraPipes", "max_forks_repo_head_hexsha": "5d9ed8f00b924cd03bc1d27871ab56bd5b433022", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.887755102, "max_line_length": 140, "alphanum_fraction": 0.7072813758, "num_tokens": 1538}
|
import os
import glob
import pickle
import re
# Our numerical workhorses
import numpy as np
import pandas as pd
# Import the project utils
import sys
sys.path.insert(0, '../')
# Import matplotlib stuff for plotting
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from IPython.core.pylabtools import figsize
from mpl_toolkits.axes_grid1 import make_axes_locatable
# Logo-generating module and utils
import anylogo
import NB_sortseq_utils as utils
# set plotting format options
utils.set_plotting_style_emat()
#===============================================================================
# Set output directory
#===============================================================================
output = 'output_figs/'
#===============================================================================
# directory where emat csv files are contained
#===============================================================================
# lac
datadir_lac = '../sortseq/2011_lacZ/'
# mar
datadir_marA = '../sortseq/20150820_marRmut2only/'
datadir_mar_RNAP = '../sortseq/20150513_marRmut1only_marRdeltaRAB_marRdeltaR/'
# rel
datadir_rel = '../sortseq/20150312_relB/'
#===============================================================================
# plot energy matrices with logos on top.
#===============================================================================
# Set color scale - I want the colorbar to be symmetric and will pick values#
# that seem appropriate for all matrices.
emat_min=-0.4
emat_max=0.4
mid_val=0.0
# Create background dict
gc = .508
background_array =pd.DataFrame( [[(1-gc)/2,gc/2,gc/2,(1-gc)/2]])
#------------------------------------------------------------------------------#
# lacZ: LacI
#------------------------------------------------------------------------------#
energy_df = pd.read_csv(datadir_lac + '2011_lacZ_MG1655_M9glucose_na_mut1_4bins_LacI_O1_emat_mean.csv')
energy_df = energy_df[['A','C','G','T']]
# create background nucleotide frequencies dataframe
energy_df_scaled = utils.estimate_scalefactor(np.array(energy_df))*energy_df.copy()
background_df = pd.DataFrame(pd.np.tile(background_array,
(len(energy_df_scaled), 1)), columns=['A','C','G','T'])
seq = 'AATTGTGAGCGGATAACAATT'
plt.figure(figsize=utils.cm2inch((0.18*len(seq) + 0.2,2.5)))
ax = plt.gca()
relative_scale=1.5
relative_spacing=.65
emat_ymin = -2 * (relative_scale + relative_spacing)
emat_ymax = -2 * relative_spacing
yticks = np.linspace(emat_ymin, emat_ymax, 9)[[1, 3, 5, 7]]
yticklabels = list('TGCA')
anylogo.draw(ax, effect_df=energy_df_scaled, logo_type='information',
background = background_df,
use_transparency=False)
L = len(seq)
ax.set_xticks([])
im = ax.imshow(utils.zero_matrix_WT(np.array(energy_df.T), seq),
interpolation='none',
cmap='RdBu_r',
clim=(emat_min, emat_max),
norm = utils.MidpointNormalize(midpoint = mid_val,
vmin = emat_min, vmax = emat_max),
extent=(-.5, L - .5, emat_ymin, emat_ymax),
zorder=100,
aspect='auto')
ax.set_ylim([emat_ymin, 2])
ax.set_yticks(yticks)
ax.set_yticklabels(yticklabels, fontsize=5, horizontalalignment='center')
ax.set_ylabel('')
ax.yaxis.set_tick_params(length=0)
# # create an axes on the right side of ax. The width of cax will be 3%
# # of ax and the padding between cax and ax will be fixed at 0.05 inch.
# divider = make_axes_locatable(ax)
# cax = divider.append_axes("right", size="3%", pad=0.05)
#
# cbar = plt.colorbar(im, cax=cax, ticks=[-0.5, 0, 0.5])
# cbar.ax.set_yticklabels(['-0.5', '0', '0.5'], fontsize=6, fontname='Arial')
# cbar.outline.set_visible(False)
# cbar.ax.tick_params(axis=u'both', which=u'both',length=0)
y = .5*emat_ymax
for i in range(L):
ax.text(i, y, seq[i], horizontalalignment='center', verticalalignment='center',
fontsize=6)
ax.tick_params(axis='y', pad=7)
plt.tight_layout()
plt.savefig(output + 'fig2_lacZ_emat_logo_lacI.pdf')
# save energy matrix using nearest interpolation
plt.figure()
ax = plt.gca()
L = len(seq)
im = ax.imshow(utils.zero_matrix_WT(np.array(energy_df.T), seq),
interpolation='nearest',
cmap='RdBu_r',
clim=(emat_min, emat_max),
norm = utils.MidpointNormalize(midpoint = mid_val,
vmin = emat_min, vmax = emat_max))
ax.axis('off')
plt.savefig(output + 'fig2_lacZ_emat_logo_lacI_ematonly.pdf')
#------------------------------------------------------------------------------#
# logos for CRP
#------------------------------------------------------------------------------#
energy_df = pd.read_csv(datadir_lac + '2011_lacZ_MG1655_M9glucose_na_mut1_4bins_CRP_emat_mean.csv')
energy_df = energy_df[['A','C','G','T']]
energy_df_scaled = utils.estimate_scalefactor(np.array(energy_df))*energy_df.copy()
energy_df_scaled = energy_df_scaled[['A','C','G','T']]
# create background nucleotide frequencies dataframe
energy_df_scaled = utils.estimate_scalefactor(np.array(energy_df))*energy_df.copy()
background_df = pd.DataFrame(pd.np.tile(background_array,
(len(energy_df_scaled), 1)), columns=['A','C','G','T'])
seq = 'ATTAATGTGAGTTAGCTCACTCATTA'
plt.figure(figsize=utils.cm2inch((0.18*len(seq) + 0.2,2.5)))
ax = plt.gca()
relative_scale=1.5
relative_spacing=.65
emat_ymin = -2 * (relative_scale + relative_spacing)
emat_ymax = -2 * relative_spacing
yticks = np.linspace(emat_ymin, emat_ymax, 9)[[1, 3, 5, 7]]
yticklabels = list('TGCA')
anylogo.draw(ax, effect_df=energy_df_scaled, logo_type='information',
background = background_df,
use_transparency=False)
L = len(seq)
ax.set_xticks([])
im = ax.imshow(utils.zero_matrix_WT(np.array(energy_df.T), seq),
interpolation='none',
cmap='RdBu_r',
clim=(emat_min, emat_max),
norm = utils.MidpointNormalize(midpoint = mid_val,
vmin = emat_min, vmax = emat_max),
extent=(-.5, L - .5, emat_ymin, emat_ymax),
zorder=100,
aspect='auto')
ax.set_ylim([emat_ymin, 2])
ax.set_yticks(yticks)
ax.set_yticklabels(yticklabels, fontsize=5, horizontalalignment='center')
ax.set_ylabel('')
ax.yaxis.set_tick_params(length=0)
# # create an axes on the right side of ax. The width of cax will be 3%
# # of ax and the padding between cax and ax will be fixed at 0.05 inch.
# divider = make_axes_locatable(ax)
# cax = divider.append_axes("right", size="3%", pad=0.05)
#
# cbar = plt.colorbar(im, cax=cax, ticks=[-0.5, 0, 0.5])
# cbar.ax.set_yticklabels(['-0.5', '0', '0.5'], fontsize=6, fontname='Arial')
# cbar.outline.set_visible(False)
# cbar.ax.tick_params(axis=u'both', which=u'both',length=0)
y = .5*emat_ymax
for i in range(L):
ax.text(i, y, seq[i], horizontalalignment='center', verticalalignment='center',
fontsize=6)
ax.tick_params(axis='y', pad=7)
plt.tight_layout()
plt.savefig(output + 'fig2_lacZ_emat_logo_CRP.pdf')
# save energy matrix using nearest interpolation
plt.figure()
ax = plt.gca()
L = len(seq)
im = ax.imshow(utils.zero_matrix_WT(np.array(energy_df.T), seq),
interpolation='nearest',
cmap='RdBu_r',
clim=(emat_min, emat_max),
norm = utils.MidpointNormalize(midpoint = mid_val,
vmin = emat_min, vmax = emat_max))
ax.axis('off')
plt.savefig(output + 'fig2_lacZ_emat_logo_CRP_ematonly.pdf')
#------------------------------------------------------------------------------#
# lacZ: RNAP
#------------------------------------------------------------------------------#
energy_df = pd.read_csv(datadir_lac + '2011_lacZ_MG1655_M9glucose_na_mut1_4bins_RNAP_emat_mean.csv')
# energy_df['position'] = energy_df['position'] - 63
energy_df = energy_df[energy_df.position != energy_df.position.min()]
energy_df = energy_df[energy_df.position != energy_df.position.max()]
energy_df.reset_index(inplace=True)
energy_df = energy_df[['A','C','G','T']]
energy_df_scaled = utils.estimate_scalefactor(np.array(energy_df))*energy_df.copy()
energy_df_scaled.reset_index(inplace=True)
energy_df_scaled = energy_df_scaled[['A','C','G','T']]
# create background nucleotide frequencies dataframe
energy_df_scaled = utils.estimate_scalefactor(np.array(energy_df))*energy_df.copy()
background_df = pd.DataFrame(pd.np.tile(background_array,
(len(energy_df_scaled), 1)), columns=['A','C','G','T'])
seq = 'TTTACACTTTATGCTTCCGGCTCGTATGTT'
plt.figure(figsize=utils.cm2inch((0.18*len(seq) + 0.2,2.5)))
ax = plt.gca()
relative_scale=1.5
relative_spacing=.65
emat_ymin = -2 * (relative_scale + relative_spacing)
emat_ymax = -2 * relative_spacing
yticks = np.linspace(emat_ymin, emat_ymax, 9)[[1, 3, 5, 7]]
yticklabels = list('TGCA')
anylogo.draw(ax, effect_df=energy_df_scaled, logo_type='information',
background = background_df,
use_transparency=False)
L = len(seq)
ax.set_xticks([])
im = ax.imshow(utils.zero_matrix_WT(np.array(energy_df.T), seq),
interpolation='none',
cmap='RdBu_r',
clim=(emat_min, emat_max),
norm = utils.MidpointNormalize(midpoint = mid_val,
vmin = emat_min, vmax = emat_max),
extent=(-.5, L - .5, emat_ymin, emat_ymax),
zorder=100,
aspect='auto')
ax.set_ylim([emat_ymin, 2])
ax.set_yticks(yticks)
ax.set_yticklabels(yticklabels, fontsize=5, horizontalalignment='center')
ax.set_ylabel('')
ax.yaxis.set_tick_params(length=0)
# # create an axes on the right side of ax. The width of cax will be 3%
# # of ax and the padding between cax and ax will be fixed at 0.05 inch.
# divider = make_axes_locatable(ax)
# cax = divider.append_axes("right", size="3%", pad=0.05)
#
# cbar = plt.colorbar(im, cax=cax, ticks=[-0.5, 0, 0.5])
# cbar.ax.set_yticklabels(['-0.5', '0', '0.5'], fontsize=6, fontname='Arial')
# cbar.outline.set_visible(False)
# cbar.ax.tick_params(axis=u'both', which=u'both',length=0)
y = .5*emat_ymax
for i in range(L):
ax.text(i, y, seq[i], horizontalalignment='center', verticalalignment='center',
fontsize=6)
ax.tick_params(axis='y', pad=7)
plt.tight_layout()
plt.savefig(output + 'fig2_lacZ_emat_logo_RNAP.pdf')
# save energy matrix using nearest interpolation
plt.figure()
ax = plt.gca()
L = len(seq)
im = ax.imshow(utils.zero_matrix_WT(np.array(energy_df.T), seq),
interpolation='nearest',
cmap='RdBu_r',
clim=(emat_min, emat_max),
norm = utils.MidpointNormalize(midpoint = mid_val,
vmin = emat_min, vmax = emat_max))
ax.axis('off')
plt.savefig(output + 'fig2_lacZ_emat_logo_RNAP_ematonly.pdf')
#------------------------------------------------------------------------------#
# marRAB: MarA
#------------------------------------------------------------------------------#
energy_df = pd.read_csv(datadir_marA + '20150820_marR_MG1655_LB_na_mut2_4bins_MarA_emat_mean.csv')
# energy_df = energy_df[energy_df.position != energy_df.position.max()]
energy_df = energy_df[['A','C','G','T']]
energy_df_scaled = utils.estimate_scalefactor(np.array(energy_df))*energy_df.copy()
energy_df_scaled = energy_df_scaled[['A','C','G','T']]
# create background nucleotide frequencies dataframe
energy_df_scaled = utils.estimate_scalefactor(np.array(energy_df))*energy_df.copy()
background_df = pd.DataFrame(pd.np.tile(background_array,
(len(energy_df_scaled), 1)), columns=['A','C','G','T'])
seq = 'ATTTAGCAAAACGTGGCATC'
plt.figure(figsize=utils.cm2inch((0.18*len(seq) + 0.2,2.5)))
ax = plt.gca()
relative_scale=1.5
relative_spacing=.65
emat_ymin = -2 * (relative_scale + relative_spacing)
emat_ymax = -2 * relative_spacing
yticks = np.linspace(emat_ymin, emat_ymax, 9)[[1, 3, 5, 7]]
yticklabels = list('TGCA')
anylogo.draw(ax, effect_df=energy_df_scaled, logo_type='information',
background = background_df,
use_transparency=False)
L = len(seq)
ax.set_xticks([])
im = ax.imshow(utils.zero_matrix_WT(np.array(energy_df.T), seq),
interpolation='none',
cmap='RdBu_r',
clim=(emat_min, emat_max),
norm = utils.MidpointNormalize(midpoint = mid_val,
vmin = emat_min, vmax = emat_max),
extent=(-.5, L - .5, emat_ymin, emat_ymax),
zorder=100,
aspect='auto')
ax.set_ylim([emat_ymin, 2])
ax.set_yticks(yticks)
ax.set_yticklabels(yticklabels, fontsize=5, horizontalalignment='center')
ax.set_ylabel('')
ax.yaxis.set_tick_params(length=0)
# # create an axes on the right side of ax. The width of cax will be 3%
# # of ax and the padding between cax and ax will be fixed at 0.05 inch.
# divider = make_axes_locatable(ax)
# cax = divider.append_axes("right", size="3%", pad=0.05)
#
# cbar = plt.colorbar(im, cax=cax, ticks=[-0.5, 0, 0.5])
# cbar.ax.set_yticklabels(['-0.5', '0', '0.5'], fontsize=6, fontname='Arial')
# cbar.outline.set_visible(False)
# cbar.ax.tick_params(axis=u'both', which=u'both',length=0)
y = .5*emat_ymax
for i in range(L):
ax.text(i, y, seq[i], horizontalalignment='center', verticalalignment='center',
fontsize=6)
ax.tick_params(axis='y', pad=7)
plt.tight_layout()
plt.savefig(output + 'fig2_marRAB_emat_logo_marA.pdf')
# save energy matrix using nearest interpolation
plt.figure()
ax = plt.gca()
L = len(seq)
im = ax.imshow(utils.zero_matrix_WT(np.array(energy_df.T), seq),
interpolation='nearest',
cmap='RdBu_r',
clim=(emat_min, emat_max),
norm = utils.MidpointNormalize(midpoint = mid_val,
vmin = emat_min, vmax = emat_max))
ax.axis('off')
plt.savefig(output + 'fig2_marRAB_emat_logo_marA_ematonly.pdf')
#------------------------------------------------------------------------------#
# marRAB: RNAP
#------------------------------------------------------------------------------#
energy_df = pd.read_csv(datadir_mar_RNAP + '20150513_marR_MG1655_LB_na_mut1_4bins_RNAP_emat_mean.csv')
energy_df = energy_df[energy_df.position != energy_df.position.max()]
energy_df = energy_df[energy_df.position != energy_df.position.max()]
energy_df.reset_index(inplace=True)
energy_df = energy_df[['A','C','G','T']]
energy_df_scaled = utils.estimate_scalefactor(np.array(energy_df))*energy_df.copy()
energy_df_scaled.reset_index(inplace=True)
energy_df_scaled = energy_df_scaled[['A','C','G','T']]
# create background nucleotide frequencies dataframe
energy_df_scaled = utils.estimate_scalefactor(np.array(energy_df))*energy_df.copy()
background_df = pd.DataFrame(pd.np.tile(background_array,
(len(energy_df_scaled), 1)), columns=['A','C','G','T'])
seq = 'TTGACTTATACTTGCCTGGGCAATATTAT'
plt.figure(figsize=utils.cm2inch((0.18*len(seq) + 0.2,2.5)))
ax = plt.gca()
relative_scale=1.5
relative_spacing=.65
emat_ymin = -2 * (relative_scale + relative_spacing)
emat_ymax = -2 * relative_spacing
yticks = np.linspace(emat_ymin, emat_ymax, 9)[[1, 3, 5, 7]]
yticklabels = list('TGCA')
anylogo.draw(ax, effect_df=energy_df_scaled, logo_type='information',
background = background_df,
use_transparency=False)
L = len(seq)
ax.set_xticks([])
im = ax.imshow(utils.zero_matrix_WT(np.array(energy_df.T), seq),
interpolation='none',
cmap='RdBu_r',
clim=(emat_min, emat_max),
norm = utils.MidpointNormalize(midpoint = mid_val,
vmin = emat_min, vmax = emat_max),
extent=(-.5, L - .5, emat_ymin, emat_ymax),
zorder=100,
aspect='auto')
ax.set_ylim([emat_ymin, 2])
ax.set_yticks(yticks)
ax.set_yticklabels(yticklabels, fontsize=5, horizontalalignment='center')
ax.set_ylabel('')
ax.yaxis.set_tick_params(length=0)
# # create an axes on the right side of ax. The width of cax will be 3%
# # of ax and the padding between cax and ax will be fixed at 0.05 inch.
# divider = make_axes_locatable(ax)
# cax = divider.append_axes("right", size="3%", pad=0.05)
#
# cbar = plt.colorbar(im, cax=cax, ticks=[-0.5, 0, 0.5])
# cbar.ax.set_yticklabels(['-0.5', '0', '0.5'], fontsize=6, fontname='Arial')
# cbar.outline.set_visible(False)
# cbar.ax.tick_params(axis=u'both', which=u'both',length=0)
y = .5*emat_ymax
for i in range(L):
ax.text(i, y, seq[i], horizontalalignment='center', verticalalignment='center',
fontsize=6)
ax.tick_params(axis='y', pad=7)
plt.tight_layout()
plt.savefig(output + 'fig2_marRAB_emat_logo_RNAP.pdf')
# save energy matrix using nearest interpolation
plt.figure()
ax = plt.gca()
L = len(seq)
im = ax.imshow(utils.zero_matrix_WT(np.array(energy_df.T), seq),
interpolation='nearest',
cmap='RdBu_r',
clim=(emat_min, emat_max),
norm = utils.MidpointNormalize(midpoint = mid_val,
vmin = emat_min, vmax = emat_max))
ax.axis('off')
plt.savefig(output + 'fig2_marRAB_emat_logo_RNAP_ematonly.pdf')
#------------------------------------------------------------------------------#
# relB promoter: RelBE
#------------------------------------------------------------------------------#
energy_df = pd.read_csv(datadir_rel + '20150513_relB_MG1655_M9glucose_na_mut1_4bins_RelBE_emat_mean.csv')
energy_df = energy_df[['A','C','G','T']]
energy_df_scaled = utils.estimate_scalefactor(np.array(energy_df))*energy_df.copy()
energy_df_scaled = energy_df_scaled[['A','C','G','T']]
# create background nucleotide frequencies dataframe
energy_df_scaled = utils.estimate_scalefactor(np.array(energy_df))*energy_df.copy()
background_df = pd.DataFrame(pd.np.tile(background_array,
(len(energy_df_scaled), 1)), columns=['A','C','G','T'])
seq = 'TGTAATGACATTTGTAATTACAA'
plt.figure(figsize=utils.cm2inch((0.18*len(seq) + 0.2,2.5)))
ax = plt.gca()
relative_scale=1.5
relative_spacing=.65
emat_ymin = -2 * (relative_scale + relative_spacing)
emat_ymax = -2 * relative_spacing
yticks = np.linspace(emat_ymin, emat_ymax, 9)[[1, 3, 5, 7]]
yticklabels = list('TGCA')
anylogo.draw(ax, effect_df=energy_df_scaled, logo_type='information',
background = background_df,
use_transparency=False)
L = len(seq)
ax.set_xticks([])
im = ax.imshow(utils.zero_matrix_WT(np.array(energy_df.T), seq),
interpolation='none',
cmap='RdBu_r',
clim=(emat_min, emat_max),
norm = utils.MidpointNormalize(midpoint = mid_val,
vmin = emat_min, vmax = emat_max),
extent=(-.5, L - .5, emat_ymin, emat_ymax),
zorder=100,
aspect='auto')
ax.set_ylim([emat_ymin, 2])
ax.set_yticks(yticks)
ax.set_yticklabels(yticklabels, fontsize=5, horizontalalignment='center')
ax.set_ylabel('')
ax.yaxis.set_tick_params(length=0)
# # create an axes on the right side of ax. The width of cax will be 3%
# # of ax and the padding between cax and ax will be fixed at 0.05 inch.
# divider = make_axes_locatable(ax)
# cax = divider.append_axes("right", size="3%", pad=0.05)
#
# cbar = plt.colorbar(im, cax=cax, ticks=[-0.5, 0, 0.5])
# cbar.ax.set_yticklabels(['-0.5', '0', '0.5'], fontsize=6, fontname='Arial')
# cbar.outline.set_visible(False)
# cbar.ax.tick_params(axis=u'both', which=u'both',length=0)
y = .5*emat_ymax
for i in range(L):
ax.text(i, y, seq[i], horizontalalignment='center', verticalalignment='center',
fontsize=6)
ax.tick_params(axis='y', pad=7)
plt.tight_layout()
plt.savefig(output + 'fig2_relB_emat_logo_RelBE.pdf')
# save energy matrix using nearest interpolation
plt.figure()
ax = plt.gca()
L = len(seq)
im = ax.imshow(utils.zero_matrix_WT(np.array(energy_df.T), seq),
interpolation='nearest',
cmap='RdBu_r',
clim=(emat_min, emat_max),
norm = utils.MidpointNormalize(midpoint = mid_val,
vmin = emat_min, vmax = emat_max))
ax.axis('off')
plt.savefig(output + 'fig2_relB_emat_logo_RelBE_ematonly.pdf')
#------------------------------------------------------------------------------#
# relB promoter: RelBE
#------------------------------------------------------------------------------#
energy_df = pd.read_csv(datadir_rel + '20150513_relB_MG1655_M9glucose_na_mut1_4bins_RNAP_emat_mean.csv')
energy_df = energy_df[energy_df.position != energy_df.position.min()]
energy_df = energy_df[energy_df.position != energy_df.position.max()]
energy_df.reset_index(inplace=True)
energy_df = energy_df[['A','C','G','T']]
energy_df_scaled = utils.estimate_scalefactor(np.array(energy_df))*energy_df.copy()
energy_df_scaled.reset_index(inplace=True)
energy_df_scaled = energy_df_scaled[['A','C','G','T']]
# create background nucleotide frequencies dataframe
energy_df_scaled = utils.estimate_scalefactor(np.array(energy_df))*energy_df.copy()
background_df = pd.DataFrame(pd.np.tile(background_array,
(len(energy_df_scaled), 1)), columns=['A','C','G','T'])
seq = 'TTGCCCTAAGCATGTTGTAGTGCGATACT'
plt.figure(figsize=utils.cm2inch((0.18*len(seq) + 0.2,2.5)))
ax = plt.gca()
relative_scale=1.5
relative_spacing=.65
emat_ymin = -2 * (relative_scale + relative_spacing)
emat_ymax = -2 * relative_spacing
yticks = np.linspace(emat_ymin, emat_ymax, 9)[[1, 3, 5, 7]]
yticklabels = list('TGCA')
anylogo.draw(ax, effect_df=energy_df_scaled, logo_type='information',
background = background_df,
use_transparency=False)
L = len(seq)
ax.set_xticks([])
im = ax.imshow(utils.zero_matrix_WT(np.array(energy_df.T), seq),
interpolation='none',
cmap='RdBu_r',
clim=(emat_min, emat_max),
norm = utils.MidpointNormalize(midpoint = mid_val,
vmin = emat_min, vmax = emat_max),
extent=(-.5, L - .5, emat_ymin, emat_ymax),
zorder=100,
aspect='auto')
ax.set_ylim([emat_ymin, 2])
ax.set_yticks(yticks)
ax.set_yticklabels(yticklabels, fontsize=5, horizontalalignment='center')
ax.set_ylabel('')
ax.yaxis.set_tick_params(length=0)
# # create an axes on the right side of ax. The width of cax will be 3%
# # of ax and the padding between cax and ax will be fixed at 0.05 inch.
# divider = make_axes_locatable(ax)
# cax = divider.append_axes("right", size="3%", pad=0.05)
#
# cbar = plt.colorbar(im, cax=cax, ticks=[-0.5, 0, 0.5])
# cbar.ax.set_yticklabels(['-0.5', '0', '0.5'], fontsize=6, fontname='Arial')
# cbar.outline.set_visible(False)
# cbar.ax.tick_params(axis=u'both', which=u'both',length=0)
y = .5*emat_ymax
for i in range(L):
ax.text(i, y, seq[i], horizontalalignment='center', verticalalignment='center',
fontsize=6)
ax.tick_params(axis='y', pad=7)
plt.tight_layout()
plt.savefig(output + 'fig2_relB_emat_logo_RNAP.pdf')
# save energy matrix using nearest interpolation
plt.figure()
ax = plt.gca()
L = len(seq)
im = ax.imshow(utils.zero_matrix_WT(np.array(energy_df.T), seq),
interpolation='nearest',
cmap='RdBu_r',
clim=(emat_min, emat_max),
norm = utils.MidpointNormalize(midpoint = mid_val,
vmin = emat_min, vmax = emat_max))
ax.axis('off')
plt.savefig(output + 'fig2_relB_emat_logo_RNAP_ematonly.pdf')
|
{"hexsha": "e3034310bcb1ad9ecf80101651c9dc594d1b45d7", "size": 23200, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/figures/fig2_knowngenes_matrices_logos.py", "max_stars_repo_name": "RPGroup-PBoC/sortseq_belliveau", "max_stars_repo_head_hexsha": "ca3b0b8092bbe6deaf1b82b2dab67b4bcca679f2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2018-05-07T00:50:08.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-10T12:40:56.000Z", "max_issues_repo_path": "code/figures/fig2_knowngenes_matrices_logos.py", "max_issues_repo_name": "RPGroup-PBoC/sortseq_belliveau", "max_issues_repo_head_hexsha": "ca3b0b8092bbe6deaf1b82b2dab67b4bcca679f2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/figures/fig2_knowngenes_matrices_logos.py", "max_forks_repo_name": "RPGroup-PBoC/sortseq_belliveau", "max_forks_repo_head_hexsha": "ca3b0b8092bbe6deaf1b82b2dab67b4bcca679f2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-07-29T01:40:40.000Z", "max_forks_repo_forks_event_max_datetime": "2019-07-29T01:40:40.000Z", "avg_line_length": 36.1934477379, "max_line_length": 105, "alphanum_fraction": 0.6379310345, "include": true, "reason": "import numpy", "num_tokens": 6251}
|
"""Implementation for the linear binning procedure."""
from typing import Tuple
from numba import njit
import numpy as np
from kernreg.funcs_to_jit import include_weights_from_endpoints
include_weights_from_endpoints_jitted = njit(include_weights_from_endpoints)
def linear_binning(
x: np.ndarray,
y: np.ndarray,
gridsize: int,
a: float,
binwidth: float,
truncate: bool = True,
) -> Tuple[np.ndarray, np.ndarray]:
r"""Apply linear binning to x and y.
Linear binning generates bin counts (x-dimension) and bin averages
(y-dimension) over an equally spaced grid.
In essence, bin counts are obtained by assigning the raw data to
neighboring grid points. A bin count represents the
amount of data in the neighborhood of its corresponding grid point.
Counts on the y-axis display the respective bin averages.
The linear binning strategy is based on the transformation
``xgrid[i] = ((x[i] - a) * delta) + 1``,
which maps each ``x[i]`` onto its corresponding gridpoint.
The integer part of ``xgrid[i]`` indicates the two nearest bin centers to ``x[i]``.
Additionally, the "fractional part"
or "bin weight" is computed, which contains the weight attached to the
two nearest bin centers:
``binweights = xgrid - bincenters``.
The corresponding weights are ``1 - binweights`` for the bin to the left and
``binweights`` for the bin to the right.
If ``truncate`` is True, end observations are trimmed.
Otherwise, weight from end observations is given to corresponding
end grid points.
Arguments:
x: Array of the predictor variable. Shape (N,). Missing values are not accepted.
Must be sorted ascendingly.
y: Array of the response variable. Shape (N,). Missing values are not accepted.
Must come pre-sorted by ``x``.
gridsize: Number of equally-spaced grid points in the ``x``-dimension.
Over this grid, ``x`` and ``y`` are binned.
a: Start point of the grid.
binwidth: Bin width.
truncate: If True, trim endpoints.
Returns:
xcounts: Array of binned x-values ("bin counts"). Of length ``gridsize``.
ycounts: Array of binned y-values ("bin averages"). Of length ``gridsize``.
"""
N = len(x)
xcounts = np.zeros(gridsize)
ycounts = np.zeros(gridsize)
xgrid = np.zeros(N)
binweights = np.zeros(N)
bincenters = [0] * N
# Map x into set of corresponding grid points
for i in range(N):
xgrid[i] = ((x[i] - a) / binwidth) + 1
# The integer part of xgrid indicates the two nearest bin centers to x[i]
bincenters[i] = int(xgrid[i])
binweights[i] = xgrid[i] - bincenters[i]
for point in range(gridsize):
for index, value in enumerate(bincenters):
if value == point:
xcounts[point - 1] += 1 - binweights[index]
xcounts[point] += binweights[index]
ycounts[point - 1] += (1 - binweights[index]) * y[index]
ycounts[point] += binweights[index] * y[index]
if truncate is False:
xcounts, ycounts = include_weights_from_endpoints_jitted(
xcounts, ycounts, y, xgrid, gridsize
)
return xcounts, ycounts
|
{"hexsha": "4c169b09edc224897b5b09a2fd03c8df564de4ef", "size": 3294, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/kernreg/linear_binning.py", "max_stars_repo_name": "segsell/kernreg", "max_stars_repo_head_hexsha": "2a8c4e73a42994fa6331ca49de8bc3d11a9f7a74", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-01-22T22:18:54.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-22T22:18:54.000Z", "max_issues_repo_path": "src/kernreg/linear_binning.py", "max_issues_repo_name": "segsell/kernreg", "max_issues_repo_head_hexsha": "2a8c4e73a42994fa6331ca49de8bc3d11a9f7a74", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-01-23T22:45:25.000Z", "max_issues_repo_issues_event_max_datetime": "2021-01-23T22:45:25.000Z", "max_forks_repo_path": "src/kernreg/linear_binning.py", "max_forks_repo_name": "segsell/kernreg", "max_forks_repo_head_hexsha": "2a8c4e73a42994fa6331ca49de8bc3d11a9f7a74", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.6736842105, "max_line_length": 88, "alphanum_fraction": 0.6496660595, "include": true, "reason": "import numpy,from numba", "num_tokens": 811}
|
\documentclass{article}
\title{SigViz: real-time signal visualizer for MCEs}
\author{Lorenzo Minutolo \\
California Institute of Technology \\
\and
Sofia Fatigoni \\
University of British Columbia \\
}
\date{\today}
\begin{document}
\maketitle
\tableofcontents
\newpage
\section{About this document}\label{about}
\subsection{Document purpose}
\subsection{Definitions}
\section{Software description}\label{description}
\subsection{Purpose}
\subsection{Applicability}
\subsection{Use-case scenario}
\section{System requirements} \label{requirements}
\subsection{Software}
\subsection{Hardware}
\section{Manual}
\subsection{Installation}
\subsection{Launching the server}
\subsection{Connecting to the server}
\subsection{Displaying a plot}
\section{Troubleshooting}
\subsection{Cannot launch the server}
\subsection{Cannot find the server using a browser}
% \section{Conclusions}\label{conclusions}
% There is no longer \LaTeX{} example which was written by \cite{doe}.
\begin{thebibliography}{9}
% \bibitem[Doe]{doe} \emph{First and last \LaTeX{} example.},
% John Doe 50 B.C.
\end{thebibliography}
\end{document}
|
{"hexsha": "a968ceb9e04bcaf207ad4a49d3f4ef09345eec5f", "size": 1133, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "Documentation/Technical_doc.tex", "max_stars_repo_name": "LorenzoMinutolo/SigViz", "max_stars_repo_head_hexsha": "9cf85dcb8d8cf1bd04e4c808396fc24efb530d7c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Documentation/Technical_doc.tex", "max_issues_repo_name": "LorenzoMinutolo/SigViz", "max_issues_repo_head_hexsha": "9cf85dcb8d8cf1bd04e4c808396fc24efb530d7c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 10, "max_issues_repo_issues_event_min_datetime": "2020-04-15T00:57:44.000Z", "max_issues_repo_issues_event_max_datetime": "2020-04-21T01:26:30.000Z", "max_forks_repo_path": "Documentation/Technical_doc.tex", "max_forks_repo_name": "LorenzoMinutolo/SigViz", "max_forks_repo_head_hexsha": "9cf85dcb8d8cf1bd04e4c808396fc24efb530d7c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.7884615385, "max_line_length": 70, "alphanum_fraction": 0.773168579, "num_tokens": 304}
|
@testset "1637.widest-vertical-area-between-two-points-containing-no-points.jl" begin
@test max_width_of_vertical_area([[8,7],[9,9],[7,4],[9,7]]) == 1
@test max_width_of_vertical_area([[3,1],[9,0],[1,0],[1,4],[5,3],[8,8]]) == 3
end
|
{"hexsha": "f8fe19696f5d0bb5a16819c0389a2a696dc2d87e", "size": 239, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/problems/1637.widest-vertical-area-between-two-points-containing-no-points.jl", "max_stars_repo_name": "jmmshn/LeetCode.jl", "max_stars_repo_head_hexsha": "dd2f34af8d253b071e8a36823d390e52ad07ab2e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 74, "max_stars_repo_stars_event_min_datetime": "2020-10-27T18:58:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T13:27:49.000Z", "max_issues_repo_path": "test/problems/1637.widest-vertical-area-between-two-points-containing-no-points.jl", "max_issues_repo_name": "jmmshn/LeetCode.jl", "max_issues_repo_head_hexsha": "dd2f34af8d253b071e8a36823d390e52ad07ab2e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 57, "max_issues_repo_issues_event_min_datetime": "2020-11-01T07:26:04.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-19T11:57:53.000Z", "max_forks_repo_path": "test/problems/1637.widest-vertical-area-between-two-points-containing-no-points.jl", "max_forks_repo_name": "jmmshn/LeetCode.jl", "max_forks_repo_head_hexsha": "dd2f34af8d253b071e8a36823d390e52ad07ab2e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 20, "max_forks_repo_forks_event_min_datetime": "2020-10-30T11:52:04.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-13T10:35:11.000Z", "avg_line_length": 59.75, "max_line_length": 85, "alphanum_fraction": 0.6485355649, "num_tokens": 88}
|
[STATEMENT]
lemma repv_selectlike_other: "x\<noteq>y \<Longrightarrow> (repv \<omega> x d \<in> selectlike X \<omega> {y}) = (repv \<omega> x d \<in> X)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. x \<noteq> y \<Longrightarrow> (repv \<omega> x d \<in> selectlike X \<omega> {y}) = (repv \<omega> x d \<in> X)
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<lbrakk>x \<noteq> y; repv \<omega> x d \<in> selectlike X \<omega> {y}\<rbrakk> \<Longrightarrow> repv \<omega> x d \<in> X
2. \<lbrakk>x \<noteq> y; repv \<omega> x d \<in> X\<rbrakk> \<Longrightarrow> repv \<omega> x d \<in> selectlike X \<omega> {y}
[PROOF STEP]
assume a: "x\<noteq>y"
[PROOF STATE]
proof (state)
this:
x \<noteq> y
goal (2 subgoals):
1. \<lbrakk>x \<noteq> y; repv \<omega> x d \<in> selectlike X \<omega> {y}\<rbrakk> \<Longrightarrow> repv \<omega> x d \<in> X
2. \<lbrakk>x \<noteq> y; repv \<omega> x d \<in> X\<rbrakk> \<Longrightarrow> repv \<omega> x d \<in> selectlike X \<omega> {y}
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
x \<noteq> y
[PROOF STEP]
have h: "{y}\<subseteq>-{x}"
[PROOF STATE]
proof (prove)
using this:
x \<noteq> y
goal (1 subgoal):
1. {y} \<subseteq> - {x}
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
{y} \<subseteq> - {x}
goal (2 subgoals):
1. \<lbrakk>x \<noteq> y; repv \<omega> x d \<in> selectlike X \<omega> {y}\<rbrakk> \<Longrightarrow> repv \<omega> x d \<in> X
2. \<lbrakk>x \<noteq> y; repv \<omega> x d \<in> X\<rbrakk> \<Longrightarrow> repv \<omega> x d \<in> selectlike X \<omega> {y}
[PROOF STEP]
show "(repv \<omega> x d \<in> selectlike X \<omega> {y}) \<Longrightarrow> (repv \<omega> x d \<in> X)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. repv \<omega> x d \<in> selectlike X \<omega> {y} \<Longrightarrow> repv \<omega> x d \<in> X
[PROOF STEP]
using a selectlike_def Vagree_repv[of \<omega> x d]
[PROOF STATE]
proof (prove)
using this:
x \<noteq> y
selectlike ?X ?\<nu> ?V = {\<omega> \<in> ?X. Vagree \<omega> ?\<nu> ?V}
Vagree \<omega> (repv \<omega> x d) (- {x})
goal (1 subgoal):
1. repv \<omega> x d \<in> selectlike X \<omega> {y} \<Longrightarrow> repv \<omega> x d \<in> X
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
repv \<omega> x d \<in> selectlike X \<omega> {y} \<Longrightarrow> repv \<omega> x d \<in> X
goal (1 subgoal):
1. \<lbrakk>x \<noteq> y; repv \<omega> x d \<in> X\<rbrakk> \<Longrightarrow> repv \<omega> x d \<in> selectlike X \<omega> {y}
[PROOF STEP]
show "(repv \<omega> x d \<in> X) \<Longrightarrow> (repv \<omega> x d \<in> selectlike X \<omega> {y})"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. repv \<omega> x d \<in> X \<Longrightarrow> repv \<omega> x d \<in> selectlike X \<omega> {y}
[PROOF STEP]
using selectlike_def[where X=X and \<nu>=\<omega> and V=\<open>-{x}\<close>] Vagree_repv[where \<omega>=\<omega> and x=x and d=d]
selectlike_antimon[where X=X and \<nu>=\<omega> and V=\<open>{y}\<close> and W=\<open>-{x}\<close>, OF h] Vagree_sym[where \<nu>=\<open>repv \<omega> x d\<close> and V=\<open>-{x}\<close>]
[PROOF STATE]
proof (prove)
using this:
selectlike X \<omega> (- {x}) = {\<omega>' \<in> X. Vagree \<omega>' \<omega> (- {x})}
Vagree \<omega> (repv \<omega> x d) (- {x})
selectlike X \<omega> (- {x}) \<subseteq> selectlike X \<omega> {y}
Vagree (repv \<omega> x d) ?\<nu>' (- {x}) = Vagree ?\<nu>' (repv \<omega> x d) (- {x})
goal (1 subgoal):
1. repv \<omega> x d \<in> X \<Longrightarrow> repv \<omega> x d \<in> selectlike X \<omega> {y}
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
repv \<omega> x d \<in> X \<Longrightarrow> repv \<omega> x d \<in> selectlike X \<omega> {y}
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 1574, "file": "Differential_Game_Logic_Coincidence", "length": 12}
|
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Dict, Optional, Sequence, Union
import numpy as np
import torch
from monai.config import IgniteInfo, KeysCollection
from monai.utils import ensure_tuple, look_up_option, min_version, optional_import
idist, _ = optional_import("ignite", IgniteInfo.OPT_IMPORT_VERSION, min_version, "distributed")
if TYPE_CHECKING:
from ignite.engine import Engine
else:
Engine, _ = optional_import("ignite.engine", IgniteInfo.OPT_IMPORT_VERSION, min_version, "Engine")
__all__ = [
"stopping_fn_from_metric",
"stopping_fn_from_loss",
"write_metrics_reports",
"from_engine",
]
def stopping_fn_from_metric(metric_name: str):
"""
Returns a stopping function for ignite.handlers.EarlyStopping using the given metric name.
"""
def stopping_fn(engine: Engine):
return engine.state.metrics[metric_name]
return stopping_fn
def stopping_fn_from_loss():
"""
Returns a stopping function for ignite.handlers.EarlyStopping using the loss value.
"""
def stopping_fn(engine: Engine):
return -engine.state.output # type:ignore
return stopping_fn
def write_metrics_reports(
save_dir: str,
images: Optional[Sequence[str]],
metrics: Optional[Dict[str, Union[torch.Tensor, np.ndarray]]],
metric_details: Optional[Dict[str, Union[torch.Tensor, np.ndarray]]],
summary_ops: Optional[Union[str, Sequence[str]]],
deli: str = "\t",
output_type: str = "csv",
):
"""
Utility function to write the metrics into files, contains 3 parts:
1. if `metrics` dict is not None, write overall metrics into file, every line is a metric name and value pair.
2. if `metric_details` dict is not None, write raw metric data of every image into file, every line for 1 image.
3. if `summary_ops` is not None, compute summary based on operations on `metric_details` and write to file.
Args:
save_dir: directory to save all the metrics reports.
images: name or path of every input image corresponding to the metric_details data.
if None, will use index number as the filename of every input image.
metrics: a dictionary of (metric name, metric value) pairs.
metric_details: a dictionary of (metric name, metric raw values) pairs, usually, it comes from metrics
computation, for example, the raw value can be the mean_dice of every channel of every input image.
summary_ops: expected computation operations to generate the summary report.
it can be: None, "*" or list of strings, default to None.
None - don't generate summary report for every expected metric_details.
"*" - generate summary report for every metric_details with all the supported operations.
list of strings - generate summary report for every metric_details with specified operations, they
should be within list: ["mean", "median", "max", "min", "<int>percentile", "std", "notnans"].
the number in "<int>percentile" should be [0, 100], like: "15percentile". default: "90percentile".
for more details, please check: https://numpy.org/doc/stable/reference/generated/numpy.nanpercentile.html.
note that: for the overall summary, it computes `nanmean` of all classes for each image first,
then compute summary. example of the generated summary report::
class mean median max 5percentile 95percentile notnans
class0 6.0000 6.0000 7.0000 5.1000 6.9000 2.0000
class1 6.0000 6.0000 6.0000 6.0000 6.0000 1.0000
mean 6.2500 6.2500 7.0000 5.5750 6.9250 2.0000
deli: the delimiter character in the file, default to "\t".
output_type: expected output file type, supported types: ["csv"], default to "csv".
"""
if output_type.lower() != "csv":
raise ValueError(f"unsupported output type: {output_type}.")
if not os.path.exists(save_dir):
os.makedirs(save_dir)
if metrics is not None and len(metrics) > 0:
with open(os.path.join(save_dir, "metrics.csv"), "w") as f:
for k, v in metrics.items():
f.write(f"{k}{deli}{str(v)}\n")
if metric_details is not None and len(metric_details) > 0:
for k, v in metric_details.items():
if isinstance(v, torch.Tensor):
v = v.cpu().numpy()
if v.ndim == 0:
# reshape to [1, 1] if no batch and class dims
v = v.reshape((1, 1))
elif v.ndim == 1:
# reshape to [N, 1] if no class dim
v = v.reshape((-1, 1))
# add the average value of all classes to v
class_labels = ["class" + str(i) for i in range(v.shape[1])] + ["mean"]
v = np.concatenate([v, np.nanmean(v, axis=1, keepdims=True)], axis=1)
with open(os.path.join(save_dir, f"{k}_raw.csv"), "w") as f:
f.write(f"filename{deli}{deli.join(class_labels)}\n")
for i, b in enumerate(v):
f.write(f"{images[i] if images is not None else str(i)}{deli}{deli.join([str(c) for c in b])}\n")
if summary_ops is not None:
supported_ops = OrderedDict(
{
"mean": np.nanmean,
"median": np.nanmedian,
"max": np.nanmax,
"min": np.nanmin,
"90percentile": lambda x: np.nanpercentile(x[0], x[1]),
"std": np.nanstd,
"notnans": lambda x: (~np.isnan(x)).sum(),
}
)
ops = ensure_tuple(summary_ops)
if "*" in ops:
ops = tuple(supported_ops.keys())
def _compute_op(op: str, d: np.ndarray):
if not op.endswith("percentile"):
c_op = look_up_option(op, supported_ops)
return c_op(d)
threshold = int(op.split("percentile")[0])
return supported_ops["90percentile"]((d, threshold)) # type: ignore
with open(os.path.join(save_dir, f"{k}_summary.csv"), "w") as f:
f.write(f"class{deli}{deli.join(ops)}\n")
for i, c in enumerate(np.transpose(v)):
f.write(f"{class_labels[i]}{deli}{deli.join([f'{_compute_op(k, c):.4f}' for k in ops])}\n")
def from_engine(keys: KeysCollection, first: bool = False):
"""
Utility function to simplify the `batch_transform` or `output_transform` args of ignite components
when handling dictionary or list of dictionaries(for example: `engine.state.batch` or `engine.state.output`).
Users only need to set the expected keys, then it will return a callable function to extract data from
dictionary and construct a tuple respectively.
If data is a list of dictionaries after decollating, extract expected keys and construct lists respectively,
for example, if data is `[{"A": 1, "B": 2}, {"A": 3, "B": 4}]`, from_engine(["A", "B"]): `([1, 3], [2, 4])`.
It can help avoid a complicated `lambda` function and make the arg of metrics more straight-forward.
For example, set the first key as the prediction and the second key as label to get the expected data
from `engine.state.output` for a metric::
from monai.handlers import MeanDice, from_engine
metric = MeanDice(
include_background=False,
output_transform=from_engine(["pred", "label"])
)
Args:
keys: specified keys to extract data from dictionary or decollated list of dictionaries.
first: whether only extract specified keys from the first item if input data is a list of dictionaries,
it's used to extract the scalar data which doesn't have batch dim and was replicated into every
dictionary when decollating, like `loss`, etc.
"""
keys = ensure_tuple(keys)
def _wrapper(data):
if isinstance(data, dict):
return tuple(data[k] for k in keys)
if isinstance(data, list) and isinstance(data[0], dict):
# if data is a list of dictionaries, extract expected keys and construct lists,
# if `first=True`, only extract keys from the first item of the list
ret = [data[0][k] if first else [i[k] for i in data] for k in keys]
return tuple(ret) if len(ret) > 1 else ret[0]
return _wrapper
|
{"hexsha": "5d72c028f940a572d30d1f281a2834c7306ccf5d", "size": 9273, "ext": "py", "lang": "Python", "max_stars_repo_path": "monai/handlers/utils.py", "max_stars_repo_name": "dylanbuchi/MONAI", "max_stars_repo_head_hexsha": "1651f1b003b0ffae8b615d191952ad65ad091277", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "monai/handlers/utils.py", "max_issues_repo_name": "dylanbuchi/MONAI", "max_issues_repo_head_hexsha": "1651f1b003b0ffae8b615d191952ad65ad091277", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "monai/handlers/utils.py", "max_forks_repo_name": "dylanbuchi/MONAI", "max_forks_repo_head_hexsha": "1651f1b003b0ffae8b615d191952ad65ad091277", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.1343283582, "max_line_length": 118, "alphanum_fraction": 0.6273050793, "include": true, "reason": "import numpy", "num_tokens": 2179}
|
MODULE NWTC_Num
! This module contains numeric-type routines with non-system-specific logic and references.
! It contains the following routines:
! SUBROUTINE AddOrSub2Pi ( OldAngle, NewAngle )
! SUBROUTINE BSortReal ( RealAry, NumPts )
! FUNCTION CROSS_PRODUCT ( Vector1, Vector2 )
! FUNCTION EqualRealNos ( ReNum1, ReNum2 )
! SUBROUTINE GL_Pts ( IPt, NPts, Loc, Wt [, ErrStat] )
! FUNCTION IndexCharAry ( CVal, CAry )
! FUNCTION InterpBin ( XVal, XAry, YAry, ILo, AryLen ) ! Generic interface for InterpBinComp and InterpBinReal.
! FUNCTION InterpBinComp ( XVal, XAry, YAry, ILo, AryLen )
! FUNCTION InterpBinReal ( XVal, XAry, YAry, ILo, AryLen )
! FUNCTION InterpStp ( XVal, XAry, YAry, ILo, AryLen ) ! Generic interface for InterpStpComp and InterpStpReal.
! FUNCTION InterpStpComp ( XVal, XAry, YAry, Ind, AryLen )
! FUNCTION InterpStpReal ( XVal, XAry, YAry, Ind, AryLen )
! SUBROUTINE LocateStp ( XVal, XAry, Ind, AryLen )
! FUNCTION Mean ( Ary, AryLen ) ! Function to calculate the mean value of a vector array.
! SUBROUTINE MPi2Pi ( Angle )
! SUBROUTINE RombergInt ( f, a, b, R, err, eps, ErrStat )
! SUBROUTINE SetConstants
! SUBROUTINE SmllRotTrans ( RotationType, Theta1, Theta2, Theta3, TransMat, ErrTxt )
! SUBROUTINE SortUnion ( Ary1, N1, Ary2, N2, Ary, N )
! FUNCTION StdDevFn ( Ary, AryLen, Mean ) ! Function to calculate the standard deviation of a vector array.
USE NWTC_IO
IMPLICIT NONE
!=======================================================================
! Global numeric-related variables.
REAL(DbKi) :: D2R_D ! Factor to convert degrees to radians in double precision.
REAL(DbKi) :: Inf_D ! IEEE value for NaN (not-a-number) in double precision
REAL(DbKi) :: NaN_D ! IEEE value for Inf (infinity) in double precision
REAL(DbKi) :: Pi_D ! Ratio of a circle's circumference to its diameter in double precision.
REAL(DbKi) :: PiBy2_D ! Pi/2 in double precision.
REAL(DbKi) :: R2D_D ! Factor to convert radians to degrees in double precision.
REAL(DbKi) :: RPM2RPS_D ! Factor to convert revolutions per minute to radians per second in double precision.
REAL(DbKi) :: RPS2RPM_D ! Factor to convert radians per second to revolutions per minute in double precision.
REAL(DbKi) :: TwoByPi_D ! 2/Pi in double precision.
REAL(DbKi) :: TwoPi_D ! 2*Pi in double precision.
REAL(ReKi) :: D2R ! Factor to convert degrees to radians.
REAL(ReKi) :: Inf ! IEEE value for NaN (not-a-number)
REAL(ReKi) :: NaN ! IEEE value for Inf (infinity)
REAL(ReKi) :: Pi ! Ratio of a circle's circumference to its diameter.
REAL(ReKi) :: PiBy2 ! Pi/2.
REAL(ReKi) :: R2D ! Factor to convert radians to degrees.
REAL(ReKi) :: RPM2RPS ! Factor to convert revolutions per minute to radians per second.
REAL(ReKi) :: RPS2RPM ! Factor to convert radians per second to revolutions per minute.
REAL(ReKi) :: TwoByPi ! 2/Pi.
REAL(ReKi) :: TwoPi ! 2*Pi.
INTEGER, ALLOCATABLE :: IntIndx (:,:) ! The array of indices holding that last index used for interpolation in IntBlade().
!=======================================================================
! Create interface for a generic InterpBin that actually uses specific routines.
INTERFACE InterpBin
MODULE PROCEDURE InterpBinComp
MODULE PROCEDURE InterpBinReal
END INTERFACE
! Create interface for a generic InterpStp that actually uses specific routines.
INTERFACE InterpStp
MODULE PROCEDURE InterpStpComp
MODULE PROCEDURE InterpStpReal
END INTERFACE
CONTAINS
!=======================================================================
SUBROUTINE AddOrSub2Pi ( OldAngle, NewAngle )
! This routine is used to convert NewAngle to an angle within 2*Pi of
! OldAngle by adding or subtracting 2*Pi accordingly; it then sets
! OldAngle equal to NewAngle. This routine is useful for converting
! angles returned from a call to the ATAN2() FUNCTION into angles that may
! exceed the -Pi to Pi limit of ATAN2(). For example, if the nacelle yaw
! angle was 179deg in the previous time step and the yaw angle increased
! by 2deg in the new time step, we want the new yaw angle returned from a
! call to the ATAN2() FUNCTION to be 181deg instead of -179deg. This
! routine assumes that the angle change between calls is not more than
! 2*Pi in absolute value. OldAngle should be SAVEd in the calling
! routine.
! Argument declarations:
REAL(ReKi), INTENT(INOUT) :: OldAngle ! Angle from which NewAngle will be converted to within 2*Pi of, rad.
REAL(ReKi), INTENT(INOUT) :: NewAngle ! Angle to be converted to within 2*Pi of OldAngle, rad.
! Local declarations:
REAL(ReKi) :: DelAngle ! The difference between OldAngle and NewAngle, rad.
! Add or subtract 2*Pi in order to convert NewAngle two within 2*Pi of
! OldAngle:
DelAngle = OldAngle - NewAngle
DO WHILE ( ABS( DelAngle ) >= TwoPi )
NewAngle = NewAngle + SIGN( TwoPi, DelAngle )
DelAngle = OldAngle - NewAngle
END DO
! Set OldAngle to equal NewAngle:
OldAngle = NewAngle
RETURN
END SUBROUTINE AddOrSub2Pi
!=======================================================================
SUBROUTINE BSortReal ( RealAry, NumPts )
! This routine sorts a list of real numbers. It uses the buble sort algorithm,
! which is only suitable for short lists.
! Argument declarations:
INTEGER, INTENT(IN) :: NumPts ! The length of the list to be sorted.
REAL(ReKi), INTENT(INOUT) :: RealAry(NumPts) ! The list of real numbers to be sorted.
! Local declarations:
REAL(ReKi) :: Temp ! Temporary variable to hold the current element.
INTEGER :: I ! Index into the array.
LOGICAL :: Change ! Flag to indicate if a change of order was made.
! Sort the list
Change = .TRUE.
DO WHILE ( Change )
Change = .FALSE.
DO I=2,NumPts
IF ( RealAry(I) < RealAry(I-1) ) THEN
Temp = RealAry(I)
RealAry(I) = RealAry(I-1)
RealAry(I-1) = Temp
Change = .TRUE.
END IF
END DO ! I
END DO ! WHILE
RETURN
END SUBROUTINE BSortReal ! ( RealAry, NumPts )
!=======================================================================
FUNCTION Cross_Product(Vector1, Vector2)
! This function computes the cross product of two 3-element arrays:
! Cross_Product = Vector1 X Vector2 (resulting in a vector)
! Argument declarations.
REAL(ReKi), INTENT(IN ) :: Vector1 (3)
REAL(ReKi), INTENT(IN ) :: Vector2 (3)
! Function definition
REAL(ReKi) :: Cross_Product (3) ! = Vector1 X Vector2 (resulting in a vector)
Cross_Product(1) = Vector1(2)*Vector2(3) - Vector1(3)*Vector2(2)
Cross_Product(2) = Vector1(3)*Vector2(1) - Vector1(1)*Vector2(3)
Cross_Product(3) = Vector1(1)*Vector2(2) - Vector1(2)*Vector2(1)
RETURN
END FUNCTION Cross_Product
!=======================================================================
! SUBROUTINE GetPermMat ( InpMat, PMat, ErrStat )
!
! ! This subroutine computes a permutation matrix, PMat, for a given
! ! input matrix, InpMat. It assumes that InpMat is of full rank
! ! and for now, the matrices are 3 x 3.
!
! ! passed variables
!
! REAL(ReKi), INTENT(IN ) :: InpMat (3,3)
! REAL(ReKi), INTENT(OUT ) :: PMat (3,3) !this could be integer, but we'll leave it real now
! INTEGER, INTENT(OUT ) :: ErrStat ! a non-zero value indicates an error in the permutation matrix algorithm
!
! ! local variables
! INTEGER :: iCol ! loop counter
! INTEGER :: iRow ! loop counter
! INTEGER :: MaxCol ! holds index of maximum value in a column
!
! LOGICAL :: ChkCols (3) ! a check to make sure we have only one non-zero element per column
!
! ! initialize some variables
! PMat = 0.0
! ChkCols = .FALSE.
! ErrStat = 0
!
! ! find the pivots
! DO iRow = 1,3
!
! MaxCol = 1 ! initialize max index
! DO iCol = 2,3
! IF ( ABS(InpMat(iRow,iCol)) > ABS(InpMat(iRow,MaxCol)) ) &
! MaxCol = iCol
! END DO ! iCol
!
! IF ( ChkCols(MaxCol) ) THEN ! we can have only 1 non-zero entry per row and column, but we've just violated that!
! CALL ProgAbort( ' Error in GetPermMat(): InpMat is not full rank.', TrapErrors = .TRUE. )
! ErrStat = 1
! END IF
!
! PMat(MaxCol, iRow) = SIGN( 1.0_ReKi, InpMat(iRow,MaxCol) ) ! technically a permutation matrix would only have +1.0 (not -1.0)
! ChkCols(MaxCol) = .TRUE.
!
! END DO ! iRow
!
! RETURN
! END SUBROUTINE GetPermMat ! ( InpMat, PMat, ErrStat )
!=======================================================================
FUNCTION EqualRealNos ( ReNum1, ReNum2 )
! This function compares 2 real numbers and determines if they
! are "almost" equal, i.e. within some relative tolerance
! passed variables
REAL(ReKi), INTENT(IN ) :: ReNum1 ! the first real number to compare
REAL(ReKi), INTENT(IN ) :: ReNum2 ! the second real number to compare
LOGICAL :: EqualRealNos ! the function definition -- returns .true. if the numbers are almost equal
! local variables
REAL(ReKi), PARAMETER :: Eps = EPSILON(ReNum1) ! machine precision
REAL(ReKi), PARAMETER :: Tol = 100.*Eps / 2. ! absolute tolerance (ignore the last 2 significant digits)
REAL(ReKi) :: Fraction
! make sure we're never trying to get more precision than Tol
Fraction = MAX( ABS(ReNum1+ReNum2), 1.0_ReKi )
! determine if ReNum1 and ReNum2 are approximately equal
IF ( ABS(ReNum1 - ReNum2) <= Fraction*Tol ) THEN ! the relative error, (comparison suggestion from Lahey.com)
EqualRealNos = .TRUE.
ELSE
EqualRealNos = .FALSE.
ENDIF
END FUNCTION EqualRealNos
!=======================================================================
FUNCTION GetSmllRotAngs ( DCMat, ErrStat )
! This subroutine computes the angles that make up the input
! direction cosine matrix, DCMat
! passed variables
REAL(ReKi), INTENT(IN ) :: DCMat (3,3)
INTEGER, INTENT(OUT ) :: ErrStat ! a non-zero value indicates an error in the permutation matrix algorithm
REAL(ReKi) :: GetSmllRotAngs ( 3 )
! local variables
REAL(ReKi) :: denom ! the denominator of the resulting matrix
REAL(ReKi), PARAMETER :: LrgAngle = 0.4 ! Threshold for when a small angle becomes large (about 23deg). This comes from: COS(SmllAngle) ~ 1/SQRT( 1 + SmllAngle^2 ) and SIN(SmllAngle) ~ SmllAngle/SQRT( 1 + SmllAngle^2 ) results in ~5% error when SmllAngle = 0.4rad.
REAL(ReKi), PARAMETER :: TOL = EPSILON(TOL) ! tolerance for division by zero
! initialize output angles (just in case there is an error that prevents them from getting set)
GetSmllRotAngs = 0.0
ErrStat = 0
! calculate the small angles
GetSmllRotAngs(1) = DCMat(2,3) - DCMat(3,2)
GetSmllRotAngs(2) = DCMat(3,1) - DCMat(1,3)
GetSmllRotAngs(3) = DCMat(1,2) - DCMat(2,1)
denom = DCMat(1,1) + DCMat(2,2) + DCMat(3,3) - 1
IF ( ABS(denom) > TOL ) THEN
GetSmllRotAngs = GetSmllRotAngs / denom
! check that the angles are, in fact, small
IF ( ANY( ABS(GetSmllRotAngs) > LrgAngle ) ) THEN
CALL ProgWarn( ' Angles in GetSmllRotAngs() are larger than '//TRIM(Num2LStr(LrgAngle))//' radians.' )
ErrStat = 1
END IF
ELSE
! check that the angles are, in fact, small (denom should be close to 2 if angles are small)
CALL ProgAbort( ' Denominator is zero in GetSmllRotAngs().', TrapErrors = .TRUE. )
ErrStat = -1
END IF
END FUNCTION GetSmllRotAngs ! ( DCMat, PMat, ErrStat )
!=======================================================================
SUBROUTINE GL_Pts ( IPt, NPts, Loc, Wt, ErrStat )
! This funtion returns the non-dimensional (-1:+1) location of the given Gauss-Legendre Quadrature point and its weight.
! The values came from Carnahan, Brice; Luther, H.A.; Wilkes, James O. (1969) "Applied Numerical Methods."
! Argument declarations.
REAL(ReKi) :: Loc ! The location of the specified point.
REAL(ReKi) :: Wt ! The weight for the specified point.
INTEGER, INTENT(OUT), OPTIONAL :: ErrStat ! Error status; if present, program does not abort on error
INTEGER, INTENT(INOUT) :: IPt ! The quadrature point in question.
INTEGER, INTENT(INOUT) :: NPts ! The number of points used in the quadrature.
IF ( PRESENT(ErrStat) ) ErrStat = 0
! Check to see if the number of points and the specific point are valid values.
IF ( ( NPts < 1 ) .OR. ( NPts > 6 ) ) THEN
CALL ProgAbort ( ' In function GL_Loc, the number of points used for Gauss-Legendre Quadrature must be between 1 and 6' &
//' (inclusive). Instead, it is "'//TRIM( Int2LStr( NPts ) )//'".', PRESENT(ErrStat) )
IF ( PRESENT(ErrStat) ) THEN ! this should always be true here
ErrStat = 1
RETURN
END IF
END IF
IF ( ( Ipt < 1 ) .OR. ( Ipt > NPts ) ) THEN
CALL ProgAbort ( ' In function GL_Loc, the point being used for Gauss-Legendre Quadrature must be between 1 and ' &
//TRIM( Int2LStr( NPts ) )//' (inclusive). Instead, it is "'//TRIM( Int2LStr( Ipt ) )//'".', PRESENT(ErrStat) )
IF ( PRESENT(ErrStat) ) THEN
ErrStat = 1
RETURN
END IF
END IF
! Set the location and weight of the point.
SELECT CASE ( NPts )
CASE ( 1 ) ! Case 1 is really just rectangular integration.
Loc = 0.0
Wt = 2.0
CASE ( 2 )
SELECT CASE ( Ipt )
CASE ( 1 )
Loc = -0.5773503
Wt = 1.0
CASE ( 2 )
Loc = 0.5773503
Wt = 1.0
END SELECT ! Ipt
CASE ( 3 )
SELECT CASE ( Ipt )
CASE ( 1 )
Loc = -0.7745967
Wt = 0.5555556
CASE ( 2 )
Loc = 0.0
Wt = 0.8888889
CASE ( 3 )
Loc = 0.7745967
Wt = 0.5555556
END SELECT ! Ipt
CASE ( 4 )
SELECT CASE ( Ipt )
CASE ( 1 )
Loc = -0.8611363
Wt = 0.3478548
CASE ( 2 )
Loc = -0.3399810
Wt = 0.6521452
CASE ( 3 )
Loc = 0.3399810
Wt = 0.6521452
CASE ( 4 )
Loc = 0.8611363
Wt = 0.3478548
END SELECT ! Ipt
CASE ( 5 )
SELECT CASE ( Ipt )
CASE ( 1 )
Loc = -0.9061798
Wt = 0.2369269
CASE ( 2 )
Loc = -0.5384693
Wt = 0.4786287
CASE ( 3 )
Loc = 0.0
Wt = 0.5688889
CASE ( 4 )
Loc = 0.5384693
Wt = 0.4786287
CASE ( 5 )
Loc = 0.9061798
Wt = 0.2369269
END SELECT ! Ipt
CASE ( 6 )
SELECT CASE ( Ipt )
CASE ( 1 )
Loc = -0.9324695
Wt = 0.1713245
CASE ( 2 )
Loc = -0.6612094
Wt = 0.3607616
CASE ( 3 )
Loc = -0.2386192
Wt = 0.4679139
CASE ( 4 )
Loc = 0.2386192
Wt = 0.4679139
CASE ( 5 )
Loc = 0.6612094
Wt = 0.3607616
CASE ( 6 )
Loc = 0.9324695
Wt = 0.1713245
END SELECT ! Ipt
END SELECT ! Npts
RETURN
END SUBROUTINE GL_Pts ! ( IPt, NPts, Loc, Wt [, ErrStat] )
!=======================================================================
FUNCTION IndexCharAry( CVal, CAry )
! This funtion returns an integer index such that CAry(IndexCharAry) = CVal. If
! no element in the array matches CVal, the value -1 is returned. The routine
! performs a binary search on the input array to determine if CVal is an
! element of the array; thus, CAry must be sorted and stored in increasing
! alphebetical (ASCII) order. The routine does not check that the array is
! sorted. The routine assumes that CVal is type CHARACTER and CAry
! is an array of CHARACTERS.
! Function declaration.
INTEGER :: IndexCharAry ! This function
! Argument declarations.
CHARACTER(*), INTENT(IN) :: CVal ! String to find.
CHARACTER(*), INTENT(IN) :: CAry(:) ! Array of strings to search.
! Local declarations.
INTEGER :: IHi ! The high index into the arrays.
INTEGER :: IMid ! The mid-point index between IHi and ILo.
INTEGER :: ILo
! Initialize some variables
ILo = 1
IHi = SIZE(CAry)
IF ( CVal == CAry(ILo) ) THEN
IndexCharAry = ILo
ELSEIF ( CVal == CAry(IHi) ) THEN
IndexCharAry = IHi
ELSE
IndexCharAry = -1
! Let's search!
DO WHILE ( IHi-ILo > 1 )
IMid = ( IHi + ILo )/2
IF( CVal > CAry(IMid) ) THEN
ILo = IMid
ELSEIF (CVal < CAry(IMid) ) THEN
IHi = IMid
ELSE !Found it
IndexCharAry = IMid
EXIT
END IF
END DO
END IF
RETURN
END FUNCTION IndexCharAry
!=======================================================================
FUNCTION InterpBinComp( XVal, XAry, YAry, ILo, AryLen )
! This funtion returns a y-value that corresponds to an input x-value by interpolating into the arrays.
! It uses a binary interpolation scheme that takes about log(AryLen)/log(2) steps to converge.
! It returns the first or last YAry() value if XVal is outside the limits of XAry().
! This routine assumes YAry is COMPLEX.
! Function declaration.
COMPLEX(ReKi) :: InterpBinComp ! This function.
! Argument declarations.
INTEGER, INTENT(IN) :: AryLen ! Length of the arrays.
INTEGER, INTENT(INOUT) :: ILo ! The low index into the arrays.
REAL(ReKi), INTENT(IN) :: XAry (AryLen) ! Array of X values to be interpolated.
REAL(ReKi), INTENT(IN) :: XVal ! X value to be interpolated.
COMPLEX(ReKi), INTENT(IN) :: YAry (AryLen) ! Array of Y values to be interpolated.
! Local declarations.
INTEGER :: IHi ! The high index into the arrays.
INTEGER :: IMid ! The mid-point index between IHi and ILo.
! Let's check the limits first.
IF ( XVal <= XAry(1) ) THEN
InterpBinComp = YAry(1)
ILo = 1
RETURN
ELSE IF ( XVal >= XAry(AryLen) ) THEN
InterpBinComp = YAry(AryLen)
ILo = AryLen - 1
RETURN
END IF
! Let's interpolate!
ILo = 1
IHi = AryLen
DO WHILE ( IHi-ILo > 1 )
IMid = ( IHi + ILo )/2
IF ( XVal >= XAry(IMid) ) THEN
ILo = IMid
ELSE
IHi = IMid
END IF
END DO
InterpBinComp = YAry(ILo) + ( YAry(IHi) - YAry(ILo) )*( XVal - XAry(ILo) )/( XAry(IHi) - XAry(ILo) )
RETURN
END FUNCTION InterpBinComp ! ( XVal, XAry, YAry, ILo, AryLen )
!=======================================================================
FUNCTION InterpBinReal( XVal, XAry, YAry, ILo, AryLen )
! This funtion returns a y-value that corresponds to an input x-value by interpolating into the arrays.
! It uses a binary interpolation scheme that takes about log(AryLen)/log(2) steps to converge.
! It returns the first or last YAry() value if XVal is outside the limits of XAry().
! This routine assumes YAry is REAL.
! Function declaration.
REAL(ReKi) :: InterpBinReal ! This function.
! Argument declarations.
INTEGER, INTENT(IN) :: AryLen ! Length of the arrays.
INTEGER, INTENT(INOUT) :: ILo ! The low index into the arrays.
REAL(ReKi), INTENT(IN) :: XAry (AryLen) ! Array of X values to be interpolated.
REAL(ReKi), INTENT(IN) :: XVal ! X value to be interpolated.
REAL(ReKi), INTENT(IN) :: YAry (AryLen) ! Array of Y values to be interpolated.
! Local declarations.
INTEGER :: IHi ! The high index into the arrays.
INTEGER :: IMid ! The mid-point index between IHi and ILo.
! Let's check the limits first.
IF ( XVal <= XAry(1) ) THEN
InterpBinReal = YAry(1)
ILo = 1
RETURN
ELSE IF ( XVal >= XAry(AryLen) ) THEN
InterpBinReal = YAry(AryLen)
ILo = AryLen - 1
RETURN
END IF
! Let's interpolate!
ILo = 1
IHi = AryLen
DO WHILE ( IHi-ILo > 1 )
IMid = ( IHi + ILo )/2
IF ( XVal >= XAry(IMid) ) THEN
ILo = IMid
ELSE
IHi = IMid
END IF
END DO
InterpBinReal = YAry(ILo) + ( YAry(IHi) - YAry(ILo) )*( XVal - XAry(ILo) )/( XAry(IHi) - XAry(ILo) )
RETURN
END FUNCTION InterpBinReal ! ( XVal, XAry, YAry, ILo, AryLen )
!=======================================================================
FUNCTION InterpStpComp( XVal, XAry, YAry, Ind, AryLen )
! This funtion returns a y-value that corresponds to an input x-value by interpolating into the arrays.
! It uses the passed index as the starting point and does a stepwise interpolation from there. This is
! especially useful when the calling routines save the value from the last time this routine was called
! for a given case where XVal does not change much from call to call. When there is no correlation
! from one interpolation to another, InterpBin() may be a better choice.
! It returns the first or last YAry() value if XVal is outside the limits of XAry().
! This routine assumes YAry is COMPLEX.
! Function declaration.
COMPLEX(ReKi) :: InterpStpComp ! This function.
! Argument declarations.
INTEGER, INTENT(IN) :: AryLen ! Length of the arrays.
INTEGER, INTENT(INOUT) :: Ind ! Initial and final index into the arrays.
REAL(ReKi), INTENT(IN) :: XAry (AryLen) ! Array of X values to be interpolated.
REAL(ReKi), INTENT(IN) :: XVal ! X value to be interpolated.
COMPLEX(ReKi), INTENT(IN) :: YAry (AryLen) ! Array of Y values to be interpolated.
! Let's check the limits first.
IF ( XVal <= XAry(1) ) THEN
InterpStpComp = YAry(1)
Ind = 1
RETURN
ELSE IF ( XVal >= XAry(AryLen) ) THEN
InterpStpComp = YAry(AryLen)
Ind = AryLen - 1
RETURN
END IF
! Let's interpolate!
Ind = MAX( MIN( Ind, AryLen-1 ), 1 )
DO
IF ( XVal < XAry(Ind) ) THEN
Ind = Ind - 1
ELSE IF ( XVal >= XAry(Ind+1) ) THEN
Ind = Ind + 1
ELSE
InterpStpComp = ( YAry(Ind+1) - YAry(Ind) )*( XVal - XAry(Ind) )/( XAry(Ind+1) - XAry(Ind) ) + YAry(Ind)
RETURN
END IF
END DO
RETURN
END FUNCTION InterpStpComp ! ( XVal, XAry, YAry, Ind, AryLen )
!=======================================================================
FUNCTION InterpStpReal( XVal, XAry, YAry, Ind, AryLen )
! This funtion returns a y-value that corresponds to an input x-value by interpolating into the arrays.
! It uses the passed index as the starting point and does a stepwise interpolation from there. This is
! especially useful when the calling routines save the value from the last time this routine was called
! for a given case where XVal does not change much from call to call. When there is no correlation
! from one interpolation to another, InterpBin() may be a better choice.
! It returns the first or last YAry() value if XVal is outside the limits of XAry().
! This routine assumes YAry is REAL.
! Function declaration.
REAL(ReKi) :: InterpStpReal ! This function.
! Argument declarations.
INTEGER, INTENT(IN) :: AryLen ! Length of the arrays.
INTEGER, INTENT(INOUT) :: Ind ! Initial and final index into the arrays.
REAL(ReKi), INTENT(IN) :: XAry (AryLen) ! Array of X values to be interpolated.
REAL(ReKi), INTENT(IN) :: XVal ! X value to be interpolated.
REAL(ReKi), INTENT(IN) :: YAry (AryLen) ! Array of Y values to be interpolated.
! Let's check the limits first.
IF ( XVal <= XAry(1) ) THEN
InterpStpReal = YAry(1)
Ind = 1
RETURN
ELSE IF ( XVal >= XAry(AryLen) ) THEN
InterpStpReal = YAry(AryLen)
Ind = AryLen - 1
RETURN
END IF
! Let's interpolate!
Ind = MAX( MIN( Ind, AryLen-1 ), 1 )
DO
IF ( XVal < XAry(Ind) ) THEN
Ind = Ind - 1
ELSE IF ( XVal >= XAry(Ind+1) ) THEN
Ind = Ind + 1
ELSE
InterpStpReal = ( YAry(Ind+1) - YAry(Ind) )*( XVal - XAry(Ind) )/( XAry(Ind+1) - XAry(Ind) ) + YAry(Ind)
RETURN
END IF
END DO
RETURN
END FUNCTION InterpStpReal ! ( XVal, XAry, YAry, Ind, AryLen )
!=======================================================================
SUBROUTINE LocateBin( XVal, XAry, Ind, AryLen )
! This subroutine finds the lower-bound index of an input x-value located in an array.
! On return, Ind has a value such that
! XAry(Ind) <= XVal < XAry(Ind+1), with the exceptions that
! Ind = 0 when XVal < XAry(1), and
! Ind = AryLen when XAry(AryLen) <= XVal.
!
! It uses a binary interpolation scheme that takes about log(AryLen)/log(2) steps to converge.
! If the index doesn't change much between calls, LocateStp() may be a better option.
! Argument declarations.
INTEGER, INTENT(IN) :: AryLen ! Length of the array.
INTEGER, INTENT(OUT) :: Ind ! Final (low) index into the array.
REAL(ReKi), INTENT(IN) :: XAry (AryLen) ! Array of X values to be interpolated.
REAL(ReKi), INTENT(IN) :: XVal ! X value to be interpolated.
! Local declarations.
INTEGER :: IHi ! The high index into the arrays.
INTEGER :: IMid ! The mid-point index between IHi and Ind.
! Let's check the limits first.
IF ( XVal < XAry(1) ) THEN
Ind = 0
ELSE IF ( XVal >= XAry(AryLen) ) THEN
Ind = AryLen
ELSE
! Let's interpolate!
Ind = 1
IHi = AryLen
DO WHILE ( IHi-Ind > 1 )
IMid = ( IHi + Ind )/2
IF ( XVal >= XAry(IMid) ) THEN
Ind = IMid
ELSE
IHi = IMid
END IF
END DO
END IF
RETURN
END SUBROUTINE LocateBin
!=======================================================================
SUBROUTINE LocateStp( XVal, XAry, Ind, AryLen )
! This subroutine finds the lower-bound index of an input x-value located in an array.
! On return, Ind has a value such that
! XAry(Ind) <= XVal < XAry(Ind+1), with the exceptions that
! Ind = 0 when XVal < XAry(1), and
! Ind = AryLen when XAry(AryLen) <= XVal.
!
! It uses the passed index as the starting point and does a stepwise search from there. This is
! especially useful when the calling routines save the value from the last time this routine was called
! for a given case where XVal does not change much from call to call. When there is no correlation
! from one interpolation to another, a binary search may be a better choice.
! Argument declarations.
INTEGER, INTENT(IN) :: AryLen ! Length of the array.
INTEGER, INTENT(INOUT) :: Ind ! Initial and final index into the array.
REAL(ReKi), INTENT(IN) :: XAry (AryLen) ! Array of X values to be interpolated.
REAL(ReKi), INTENT(IN) :: XVal ! X value to be interpolated.
! Let's check the limits first.
IF ( XVal < XAry(1) ) THEN
Ind = 0
ELSE IF ( XVal >= XAry(AryLen) ) THEN
Ind = AryLen
ELSE
Ind = MAX( MIN( Ind, AryLen-1 ), 1 )
DO
IF ( XVal < XAry(Ind) ) THEN
Ind = Ind - 1
ELSE IF ( XVal >= XAry(Ind+1) ) THEN
Ind = Ind + 1
ELSE
RETURN
END IF
END DO
END IF
RETURN
END SUBROUTINE LocateStp
!=======================================================================
FUNCTION Mean ( Ary, AryLen )
! This routine calculates the mean value of an array.
! Function declaration.
REAL(ReKi) :: Mean ! This function.
! Argument declarations:
INTEGER, INTENT(IN) :: AryLen ! Length of the array.
REAL(ReKi), INTENT(IN) :: Ary (AryLen) ! Input array.
! Local declarations.
INTEGER :: I ! The index into the array.
Mean = 0.0
DO I=1,AryLen
Mean = Mean + Ary(I)
END DO ! I
Mean = Mean/AryLen
RETURN
END FUNCTION Mean ! ( Ary, AryLen )
!=======================================================================
SUBROUTINE MPi2Pi ( Angle )
! This routine ensures that Angle lies between -pi and pi.
! Argument declarations:
REAL(ReKi), INTENT(INOUT) :: Angle
! Get the angle between 0 and 2Pi.
Angle = MODULO( Angle, TwoPi )
! Get the angle between -Pi and Pi.
IF ( Angle > Pi ) THEN
Angle = Angle - TwoPi
END IF
RETURN
END SUBROUTINE MPi2Pi
!=======================================================================
SUBROUTINE RombergInt(f, a, b, R, err, eps, ErrStat)
! This routine is used to integrate funciton f over the interval [a, b]. This routine
! is useful for sufficiently smooth (e.g., analytic) integrands, integrated over
! intervals which contain no singularities, and where the endpoints are also nonsingular.
!
! f is an external function. For example f(x) = 1 + x.
!
! FUNCTION f(x)
! USE PRECISION
! IMPLICIT NONE
!
! REAL(ReKi) f
! REAL(ReKi) x
!
! f = 1 + x
!
! RETURN
! END FUNCTION f
IMPLICIT NONE
! Argument declarations:
REAL(ReKi), EXTERNAL :: f ! Integrand function name
REAL(ReKi), INTENT(IN) :: a ! Lower integration limit
REAL(ReKi), INTENT(IN) :: b ! Upper integration limit
REAL(ReKi), INTENT(IN) :: eps ! Absolute error bound
REAL(ReKi), INTENT(OUT) :: R ! The result of integration
REAL(ReKi), INTENT(OUT) :: err ! Actual absolute error
INTEGER, INTENT(OUT), OPTIONAL :: ErrStat ! Error status; if present, program does not abort on error
! Local declarations:
INTEGER :: m, i, j, k
INTEGER, PARAMETER :: mmax = 50 ! Maximum iteration number for m
INTEGER, PARAMETER :: imax = 50 ! Maximum iteration number for i
REAL(ReKi), ALLOCATABLE :: T(:,:)
REAL(ReKi) :: h ! Step length
REAL(ReKi) :: sumf
! Initialize T
ALLOCATE( T( mmax, imax ) )
T = 0
T(1, 1) = 0.5*(b - a)*( f(a) + f(b) )
k = 2
DO m = 1, mmax-2
h = (b-a)*(0.5)**m
sumf = 0
DO i = 1, 2**(m-1)
sumf = sumf + f(a + (2*i-1)*h)
k = k + 1
END DO
T( m+1, 1) = 0.5*T( m, 1 )+ h * sumf
DO j = 1, m
T(m-j+1, j+1) = ( 4.0**j * T(m-j+2, j) - T(m-j+1, j) )/(4.0**j - 1.0)
! absolute error
err = ABS( T(m-j+1, j+1) - T( m-j+2, j ) )
! set k >=9 to prevent early terminations
IF( (err .LT. eps) .and. (k >= 9) ) THEN
! return the intergration result if the conditions are met
R = T(m-j+1, j+1)
IF( ALLOCATED(T) ) DEALLOCATE(T)
RETURN
END IF
END DO
END DO
err = ABS( T(m-j+1, j+1) - T( m-j+2, j ) )
R = T(m-j+1, j+1)
IF( ALLOCATED(T) ) DEALLOCATE(T)
! Return error message if the maximum iteration number is reached.
CALL ProgAbort ( ' In subroutine RombergInt, the iteration reaches the maximum number. The integration did NOT converge! ', &
PRESENT(ErrStat) )
IF ( PRESENT(ErrStat) ) THEN
ErrStat = 1
RETURN
END IF
RETURN
END SUBROUTINE RombergInt
!=======================================================================
SUBROUTINE SetConstants( )
! This routine computes numeric constants stored in the NWTC Library
! USE, INTRINSIC :: ieee_arithmetic !use this for compilers that have implemented
! local variables for getting values of NaN and Inf (not necessary when using ieee_arithmetic)
REAL(DbKi) :: Neg_D ! a negative real(DbKi) number
REAL(ReKi) :: Neg ! a negative real(ReKi) number
! Constants based upon Pi:
Pi_D = ACOS( -1.0_DbKi )
D2R_D = Pi_D/180.0_DbKi
R2D_D = 180.0_DbKi/Pi_D
PiBy2_D = Pi_D/2.0_DbKi
RPM2RPS_D = Pi_D/30.0_DbKi
RPS2RPM_D = 30.0_DbKi/Pi_D
TwoByPi_D = 2.0_DbKi/Pi_D
TwoPi_D = 2.0_DbKi*Pi_D
Pi = ACOS( -1.0_ReKi )
D2R = Pi/180.0_ReKi
R2D = 180.0_ReKi/Pi
PiBy2 = Pi/2.0_ReKi
RPM2RPS = Pi/30.0_ReKi
RPS2RPM = 30.0_ReKi/Pi
TwoByPi = 2.0_ReKi/Pi
TwoPi = 2.0_ReKi*Pi
! IEEE constants:
! NaN_D = ieee_value(0.0_DbKi, ieee_quiet_nan)
! Inf_D = ieee_value(0.0_DbKi, ieee_positive_inf)
!
! NaN = ieee_value(0.0_ReKi, ieee_quiet_nan)
! Inf = ieee_value(0.0_DbKi, ieee_positive_inf)
! set variables to negative numbers to calculate NaNs (compilers may complain when taking sqrt of negative constants)
Neg = -1.0_ReKi
Neg_D = -1.0_DbKi
NaN_D = SQRT ( Neg_D )
Inf_D = Pi_D / 0.0_DbKi
NaN = SQRT ( Neg )
Inf = Pi / 0.0_ReKi
RETURN
END SUBROUTINE SetConstants
!=======================================================================
SUBROUTINE SmllRotTrans( RotationType, Theta1, Theta2, Theta3, TransMat, ErrTxt )
! This routine computes the 3x3 transformation matrix, TransMat,
! to a coordinate system x (with orthogonal axes x1, x2, x3)
! resulting from three rotations (Theta1, Theta2, Theta3) about the
! orthogonal axes (X1, X2, X3) of coordinate system X. All angles
! are assummed to be small, as such, the order of rotations does
! not matter and Euler angles do not need to be used. This routine
! is used to compute the transformation matrix (TransMat) between
! undeflected (X) and deflected (x) coordinate systems. In matrix
! form:
! {x1} [TransMat(Theta1, ] {X1}
! {x2} = [ Theta2, ]*{X2}
! {x3} [ Theta3 )] {X3}
! The transformation matrix, TransMat, is the closest orthonormal
! matrix to the nonorthonormal, but skew-symmetric, Bernoulli-Euler
! matrix:
! [ 1.0 Theta3 -Theta2 ]
! A = [ -Theta3 1.0 Theta1 ]
! [ Theta2 -Theta1 1.0 ]
!
! In the Frobenius Norm sense, the closest orthornormal matrix is:
! TransMat = U*V^T,
!
! where the columns of U contain the eigenvectors of A*A^T and the
! columns of V contain the eigenvectors of A^T*A (^T = transpose).
! This result comes directly from the Singular Value Decomposition
! (SVD) of A = U*S*V^T where S is a diagonal matrix containing the
! singular values of A, which are SQRT( eigenvalues of A*A^T ) =
! SQRT( eigenvalues of A^T*A ).
! The algebraic form of the transformation matrix, as implemented
! below, was derived symbolically by J. Jonkman by computing U*V^T
! by hand with verification in Mathematica.
! Passed Variables:
REAL(ReKi), INTENT(IN ) :: Theta1 ! The small rotation about X1, (rad).
REAL(ReKi), INTENT(IN ) :: Theta2 ! The small rotation about X2, (rad).
REAL(ReKi), INTENT(IN ) :: Theta3 ! The small rotation about X3, (rad).
REAL(ReKi), INTENT(OUT) :: TransMat (3,3) ! The resulting transformation matrix from X to x, (-).
CHARACTER(*), INTENT(IN) :: RotationType ! The type of rotation; used to inform the user where a large rotation is occuring upon such an event.
CHARACTER(*), INTENT(IN ), OPTIONAL :: ErrTxt ! an additional message to be displayed as a warning (typically the simulation time)
! Local Variables:
REAL(ReKi) :: ComDenom ! = ( Theta1^2 + Theta2^2 + Theta3^2 )*SQRT( 1.0 + Theta1^2 + Theta2^2 + Theta3^2 )
REAL(ReKi), PARAMETER :: LrgAngle = 0.4 ! Threshold for when a small angle becomes large (about 23deg). This comes from: COS(SmllAngle) ~ 1/SQRT( 1 + SmllAngle^2 ) and SIN(SmllAngle) ~ SmllAngle/SQRT( 1 + SmllAngle^2 ) results in ~5% error when SmllAngle = 0.4rad.
REAL(ReKi) :: Theta11 ! = Theta1^2
REAL(ReKi) :: Theta12S ! = Theta1*Theta2*[ SQRT( 1.0 + Theta1^2 + Theta2^2 + Theta3^2 ) - 1.0 ]
REAL(ReKi) :: Theta13S ! = Theta1*Theta3*[ SQRT( 1.0 + Theta1^2 + Theta2^2 + Theta3^2 ) - 1.0 ]
REAL(ReKi) :: Theta22 ! = Theta2^2
REAL(ReKi) :: Theta23S ! = Theta2*Theta3*[ SQRT( 1.0 + Theta1^2 + Theta2^2 + Theta3^2 ) - 1.0 ]
REAL(ReKi) :: Theta33 ! = Theta3^2
REAL(ReKi) :: SqrdSum ! = Theta1^2 + Theta2^2 + Theta3^2
REAL(ReKi) :: SQRT1SqrdSum ! = SQRT( 1.0 + Theta1^2 + Theta2^2 + Theta3^2 )
LOGICAL, SAVE :: FrstWarn = .TRUE. ! When .TRUE., indicates that we're on the first warning.
! Display a warning message if at least one angle gets too large in
! magnitude:
IF ( ( ( ABS(Theta1) > LrgAngle ) .OR. ( ABS(Theta2) > LrgAngle ) .OR. ( ABS(Theta3) > LrgAngle ) ) .AND. FrstWarn ) THEN
CALL ProgWarn(' Small angle assumption violated in SUBROUTINE SmllRotTrans() due to'// &
' a large '//TRIM(RotationType)//'. The solution may be inaccurate.'// &
' Simulation continuing, but future warnings will be suppressed.')
IF ( PRESENT(ErrTxt) ) THEN
CALL WrScr(' Additional debugging message from SUBROUTINE SmllRotTrans(): '//TRIM(ErrTxt) )
END IF
FrstWarn = .FALSE. ! Don't enter here again!
ENDIF
! Compute some intermediate results:
Theta11 = Theta1*Theta1
Theta22 = Theta2*Theta2
Theta33 = Theta3*Theta3
SqrdSum = Theta11 + Theta22 + Theta33
SQRT1SqrdSum = SQRT( 1.0 + SqrdSum )
ComDenom = SqrdSum*SQRT1SqrdSum
Theta12S = Theta1*Theta2*( SQRT1SqrdSum - 1.0 )
Theta13S = Theta1*Theta3*( SQRT1SqrdSum - 1.0 )
Theta23S = Theta2*Theta3*( SQRT1SqrdSum - 1.0 )
! Define the transformation matrix:
IF ( ComDenom == 0.0 ) THEN ! All angles are zero and matrix is ill-conditioned (the matrix is derived assuming that the angles are not zero); return identity
TransMat(1,:) = (/ 1.0, 0.0, 0.0 /)
TransMat(2,:) = (/ 0.0, 1.0, 0.0 /)
TransMat(3,:) = (/ 0.0, 0.0, 1.0 /)
ELSE ! At least one angle is nonzero
TransMat(1,1) = ( Theta11*SQRT1SqrdSum + Theta22 + Theta33 )/ComDenom
TransMat(2,2) = ( Theta11 + Theta22*SQRT1SqrdSum + Theta33 )/ComDenom
TransMat(3,3) = ( Theta11 + Theta22 + Theta33*SQRT1SqrdSum )/ComDenom
TransMat(1,2) = ( Theta3*SqrdSum + Theta12S )/ComDenom
TransMat(2,1) = ( -Theta3*SqrdSum + Theta12S )/ComDenom
TransMat(1,3) = ( -Theta2*SqrdSum + Theta13S )/ComDenom
TransMat(3,1) = ( Theta2*SqrdSum + Theta13S )/ComDenom
TransMat(2,3) = ( Theta1*SqrdSum + Theta23S )/ComDenom
TransMat(3,2) = ( -Theta1*SqrdSum + Theta23S )/ComDenom
ENDIF
RETURN
END SUBROUTINE SmllRotTrans
!=======================================================================
SUBROUTINE SortUnion ( Ary1, N1, Ary2, N2, Ary, N )
! This routine takes two sorted arrays and finds the sorted union of the two.
! Note: If the same value is found in both arrays, only one is kept. However, if either
! array as multiple occurances of the same value, the largest multiple will be
! kept. Duplicates should be eliminated externally if this is not desirable.
! Argument declarations:
INTEGER, INTENT(OUT) :: N ! The length of the output array.
INTEGER, INTENT(IN) :: N1 ! The length of the first input array.
INTEGER, INTENT(IN) :: N2 ! The length of the second input array.
REAL(ReKi), INTENT(OUT) :: Ary(N1+N2) ! The sorted union.
REAL(ReKi), INTENT(IN) :: Ary1(N1) ! The first list of sorted real numbers.
REAL(ReKi), INTENT(IN) :: Ary2(N2) ! The second list of sorted real numbers.
! Local declarations:
INTEGER :: I1 ! Index into the first array.
INTEGER :: I2 ! Index into the second array.
I1 = 1
I2 = 1
N = 1
DO WHILE ( ( I1 <= N1 ) .AND. ( I2 <= N2 ) )
IF ( Ary1(I1) < Ary2(I2) ) THEN
Ary(N) = Ary1(I1)
I1 = I1 + 1
ELSE IF ( Ary1(I1) > Ary2(I2) ) THEN
Ary(N) = Ary2(I2)
I2 = I2 + 1
ELSE
Ary(N) = Ary1(I1)
I1 = I1 + 1
I2 = I2 + 1
END IF
N = N + 1
END DO ! WHILE
! We've reached the end of one array, but we need to add the end
! of the other array if we haven't reached the end of it yet.
IF ( I1 <= N1 ) THEN
Ary(N:N+N1-I1) = Ary1(I1:)
N = N+N1-I1
ELSEIF ( I2 <= N2 ) THEN
Ary(N:N+N2-I2) = Ary2(I2:)
N = N+N2-I2
ELSE
N = N - 1
ENDIF
RETURN
END SUBROUTINE SortUnion ! ( Ary1, N1, Ary2, N2, Ary, N )
!=======================================================================
FUNCTION StdDevFn ( Ary, AryLen, Mean )
! This routine calculates the standard deviation of a population contained in Ary.
! Function declaration.
REAL(ReKi) :: StdDevFn ! This function.
! Argument declarations:
INTEGER, INTENT(IN) :: AryLen ! Length of the array.
REAL(ReKi), INTENT(IN) :: Ary (AryLen) ! Input array.
REAL(ReKi), INTENT(IN) :: Mean ! The previously calculated mean of the array.
! Local declarations.
REAL(DbKi) :: Sum ! A temporary sum.
INTEGER :: I ! The index into the array.
Sum = 0.0_DbKi
DO I=1,AryLen
Sum = Sum + ( Ary(I) - Mean )**2
END DO ! I
StdDevFn = SQRT( Sum/( AryLen - 1 ) )
RETURN
END FUNCTION StdDevFn ! ( Ary, AryLen, Mean )
!=======================================================================
END MODULE NWTC_Num
|
{"hexsha": "2727954b7dfb70f50f254d37f7c485948ba3d6c6", "size": 49558, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "Source/WT_Perf/v3.05.00a-adp/Source/NWTC_Subroutine_Library/NWTC_Lib_v1.05.00/source/NWTC_Num.f90", "max_stars_repo_name": "NREL/HARP_Opt", "max_stars_repo_head_hexsha": "5a00d0adb34af1c412ed05d7212751d02fec9666", "max_stars_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2015-08-30T09:32:41.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-23T20:25:00.000Z", "max_issues_repo_path": "Source/WT_Perf/v3.05.00a-adp/Source/NWTC_Subroutine_Library/NWTC_Lib_v1.05.00/source/NWTC_Num.f90", "max_issues_repo_name": "NREL/HARP_Opt", "max_issues_repo_head_hexsha": "5a00d0adb34af1c412ed05d7212751d02fec9666", "max_issues_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-06-12T03:30:26.000Z", "max_issues_repo_issues_event_max_datetime": "2018-06-13T19:48:57.000Z", "max_forks_repo_path": "Source/WT_Perf/v3.05.00a-adp/Source/NWTC_Subroutine_Library/NWTC_Lib_v1.05.00/source/NWTC_Num.f90", "max_forks_repo_name": "NREL/HARP_Opt", "max_forks_repo_head_hexsha": "5a00d0adb34af1c412ed05d7212751d02fec9666", "max_forks_repo_licenses": ["ECL-2.0", "Apache-2.0"], "max_forks_count": 16, "max_forks_repo_forks_event_min_datetime": "2015-03-18T20:59:57.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-19T13:17:27.000Z", "avg_line_length": 35.9376359681, "max_line_length": 315, "alphanum_fraction": 0.4952580814, "num_tokens": 13041}
|
# -*- coding: utf-8 -*-
# This Program
import time
import h5py
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from keras.layers import Input, ZeroPadding2D, Conv2D, BatchNormalization, Activation, Flatten, Dense
from keras.layers import MaxPooling2D
from keras.models import Model
def F1Score(pred, y):
debug = 0
if(debug):
print(pred.shape)
print(y.shape)
start = 24
end = 34
epsilon = 0.0001 # to avoid division by 0
if(debug):
print("pred[0][start:end]: ", pred[0][start:end])
print("y[0][start:end]: ", y[0][start:end])
#TP = ((pred == 1) * pred) == ((y == 1) * y) # (pred == y) == 1
TP = np.logical_and(np.equal(pred, 1), np.equal(y, 1))
if(debug):
#print("(pred == 1): ", (pred == 1)[0][start:end])
#print("(y == 1): ", (y == 1)[0][start:end])
#print("(pred == 1) == (y == 1): ", (pred == 1) == (y == 1)[0][start:end])
print("(pred == 1): ", np.equal(pred, 1)[0][start:end])
print("(y == 1): ", np.equal(y, 1)[0][start:end])
print("TP: ", TP[0][start:end])
print()
TP = np.sum(TP)
if(TP == 0):
TP += epsilon
if(debug):
print("TP: ", TP)
#TN = (pred == 0) and (y == 0) # (pred == y) == 0
TN = np.logical_and(np.equal(pred, 0), np.equal(y, 0))
if(debug):
print("pred == 0: ", np.equal(pred, 0)[0][start:end])
print("y == 0: ", np.equal(y, 0)[0][start:end])
print("TN: ", TN[0][start:end])
print()
TN = np.sum(TN)
if(debug):
print("TN: ", TN)
#FP = (pred == 1) and (y == 0)
FP = np.logical_and(np.equal(pred, 1), np.equal(y, 0))
if(debug):
print("pred == 1: ", np.equal(pred, 1)[0][start:end])
print("y == 0: ", np.equal(y, 0)[0][start:end])
print("FP: ", FP[0][start:end])
print()
FP = np.sum(FP)
if(FP == 0):
FP += epsilon
if(debug):
print("FP: ", FP)
#FN = (pred == 0) and (y == 1)
FN = np.logical_and(np.equal(pred, 0), np.equal(y, 1))
if(debug):
print("pred == 0: ", np.equal(pred, 0)[0][start:end])
print("y == 1: ", np.equal(y, 1)[0][start:end])
print("FN: ", FN[0][start:end])
print()
FN = np.sum(FN)
if(FN == 0):
FN += epsilon
if (debug):
print("FN: ", FN)
precision = TP / (TP + FP)
if (debug):
print("precision: ", precision)
recall = TP / (TP + FN)
if(debug):
print("recall: ", recall)
score = 2 / ((1 / precision) + (1 / recall))
return precision, recall, score
def load_data(filename):
"""
Dataset format should is h5 and look like this:
datasets/
--filename
----train_x (m, 64, 64, 3)
----train_y (m,)
m := number of training examples
filename := e.g. "mydataset.h5"
"""
dataset = h5py.File(str(filename), "r")
X = np.array(dataset["train_x"][:])
Y = np.array(dataset["train_y"][:])
# reshape from (m,) to (1, m)
Y = Y.reshape((1, Y.shape[0]))
return X, Y
def three_layer_ConvNet_keras(input_shape):
X_input = Input(input_shape)
X = ZeroPadding2D((3, 3))(X_input)
X = Conv2D(32, (3, 3), strides=(1, 1), name='conv0')(X)
X = BatchNormalization(axis=3, name='bn0')(X)
X = Activation('relu')(X)
X = MaxPooling2D((2, 2), name='max_pool')(X)
X = Conv2D(16, (3, 3), strides=(1, 1), name='conv1')(X)
X = BatchNormalization(axis=3, name='bn1')(X)
X = Activation('relu')(X)
X = MaxPooling2D((2, 2), name='max_pool1')(X)
X = Flatten()(X)
X = Dense(1, activation='sigmoid', name='fc')(X)
model = Model(inputs=X_input, outputs=X, name='HappyModel')
return model
def L_layer_ConvNet_keras(input_shape, layer_dims, strides, paddings, pool_filters, pool_strides, pool_paddings):
"""
layer_dims = [[(8, 3, 3), (16, 3, 3)], [6, 1]]
strides = [1, 1]
paddings = ["same", "same"]
pool_filters = [4, 4]
pool_strides = [4, 4]
pool_paddings = ["same", "same"]
"""
conv_layers = layer_dims[0]
dens_layers = layer_dims[1]
L_conv = len(conv_layers)
L_dens = len(dens_layers)
X_input = Input(input_shape)
X = ZeroPadding2D((3, 3))(X_input)
for l in range(L_conv):
X = Conv2D(conv_layers[l][0], (conv_layers[l][1], conv_layers[l][2]), strides=(strides[l], strides[l]), padding=paddings[l], name='conv' + str(l))(X)
X = BatchNormalization(axis=3, name='bn' + str(l))(X)
X = Activation('relu')(X)
X = MaxPooling2D((pool_filters[l], pool_filters[0]), strides=(pool_strides[l], pool_strides[l]), padding=pool_paddings[l], name='max_pool' + str(l))(X)
X = Flatten()(X)
for l in range(L_dens-1):
X = Dense(dens_layers[l])(X)
X = Activation("relu")(X)
X = Dense(dens_layers[-1], activation='sigmoid', name='fc')(X)
model = Model(inputs=X_input, outputs=X, name='HappyModel')
return model
def main():
print(time.time())
filename = "../datasets/catvnoncat_3831_1315.h5"
X_train, Y_train = load_data(filename)
# X_train = preprocess(X_train)
X_train = X_train / 255.
Y_train = Y_train.T
print("X_train.shape: ", X_train.shape)
print("Y_train.shape: ", Y_train.shape)
test_filename = "../datasets/train_catvnoncat.h5"
test_dataset = h5py.File(test_filename, "r")
X_test = np.array(test_dataset["train_set_x"][:])
Y_test = np.array(test_dataset["train_set_y"][:])
# reshape from (m,) to (1, m)
Y_test = Y_test.reshape((1, Y_test.shape[0]))
# X_test = preprocess(X_test)
X_test = X_test / 255.
Y_test = Y_test.T
print("X_test.shape: ", X_test.shape)
print("Y_test.shape: ", Y_test.shape)
# (4, 4, 3, 8) = (filter height, filter width, channel, number of filters)
layer_dims = [[(8, 3, 3), (16, 3, 3), (32, 3, 3), (64, 3, 3)], [512, 256, 128, 64, 32, 16, 8, 1]]
strides = [1, 1, 1, 1]
paddings = ["SAME", "SAME", "SAME", "SAME"]
pool_filters = [2, 2, 2, 2]
pool_strides = [2, 2, 2, 2]
# 32,15, 7, 4
pool_paddings = ["SAME", "SAME", "SAME", "SAME"]
learning_rate = 0.0001
num_epochs = 5
minibatch_size = 64
print_cost = True
start = time.time()
print("Start time: ", start)
# execute model
model = L_layer_ConvNet_keras(X_train.shape[1:], layer_dims, strides, paddings, pool_filters, pool_strides, pool_paddings)
model.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
model.fit(x=X_train, y=Y_train, epochs=num_epochs, batch_size=minibatch_size)
acc_loss_train = model.evaluate(x=X_train, y=Y_train)
acc_loss_test = model.evaluate(x=X_test, y=Y_test)
print()
print("epoch " + str(num_epochs) + ":")
print("Train Accuracy = " + str(acc_loss_train[1]))
print("Test Accuracy = " + str(acc_loss_test[1]))
diff = time.time() - start
print("Time: ", diff)
preds_train = model.predict(X_train)
preds_test = model.predict(X_test)
#print("preds_train[:10, 0]: ", preds_train[:10, 0])
preds_train = (preds_train >= 0.5) * 1
preds_test = (preds_test >= 0.5) * 1
#print("preds_train[:10, 0]: ", preds_train[:10, 0])
#print("Y_trains: ", Y_train[:10, 0])
f1_train = F1Score(preds_train[:, 0], Y_train[:, 0])
f1_test = F1Score(preds_test[:, 0], Y_test[:, 0])
print()
print("F1score train: ", f1_train[-1])
print("F1score test: ", f1_test[-1])
if __name__ == "__main__":
main()
#
#
#
#
#
#
#
#
#
#
|
{"hexsha": "8aace6f15ac7195403e3ee9ddc4b6eb1d2a5a4ca", "size": 7550, "ext": "py", "lang": "Python", "max_stars_repo_path": "L_layer_ConvNet_keras/L_layer_ConvNet_keras.py", "max_stars_repo_name": "HerrHuber/L_layer_ConvNet_keras", "max_stars_repo_head_hexsha": "3b0b80a446fd60ef3215ab19de5a8d0d96e9f4b5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-01-19T22:52:59.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-19T22:52:59.000Z", "max_issues_repo_path": "L_layer_ConvNet_keras/L_layer_ConvNet_keras.py", "max_issues_repo_name": "HerrHuber/L_layer_ConvNet_keras", "max_issues_repo_head_hexsha": "3b0b80a446fd60ef3215ab19de5a8d0d96e9f4b5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "L_layer_ConvNet_keras/L_layer_ConvNet_keras.py", "max_forks_repo_name": "HerrHuber/L_layer_ConvNet_keras", "max_forks_repo_head_hexsha": "3b0b80a446fd60ef3215ab19de5a8d0d96e9f4b5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.4435483871, "max_line_length": 159, "alphanum_fraction": 0.5660927152, "include": true, "reason": "import numpy", "num_tokens": 2464}
|
"""
Basic of linear algebra.
"""
import numpy as np
a = np.array([[1.,2.],[3.,4.]])
print(a)
print(np.transpose(a))
print(np.linalg.det(a.transpose()))
print(np.linalg.inv(a))
print(np.trace(a))
print(np.eye(3)) # identity matrix
y = np.array([[3.],[7.]])
print(np.linalg.solve(a,y)) # solve x+2y==3 && 3x+4y==7
print(np.linalg.eig(a)) # find the eigenvalues and their respectives eigenvectors
|
{"hexsha": "41961a2f6df2d41556490ec51f400fb7ab1dbdb8", "size": 394, "ext": "py", "lang": "Python", "max_stars_repo_path": "numpy/np_ex006.py", "max_stars_repo_name": "rpoliselit/python-for-dummies", "max_stars_repo_head_hexsha": "d6f45a966a5238058953f93d8660832fa692b3d4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "numpy/np_ex006.py", "max_issues_repo_name": "rpoliselit/python-for-dummies", "max_issues_repo_head_hexsha": "d6f45a966a5238058953f93d8660832fa692b3d4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "numpy/np_ex006.py", "max_forks_repo_name": "rpoliselit/python-for-dummies", "max_forks_repo_head_hexsha": "d6f45a966a5238058953f93d8660832fa692b3d4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.2666666667, "max_line_length": 81, "alphanum_fraction": 0.6624365482, "include": true, "reason": "import numpy", "num_tokens": 118}
|
import pickle
import numpy as np
import tensorflow as tf
import PIL.Image
import pandas as pd
import os
import sys
import argparse
import PIL
import os
import glob
import numpy as np
import tensorflow as tf
import tfutil
#----------------------------------------------------------------------------
# Parse individual image from a tfrecords file.
def parse_tfrecord_tf(record):
features = tf.parse_single_example(record, features={
'shape': tf.FixedLenFeature([3], tf.int64),
'data': tf.FixedLenFeature([], tf.string)})
data = tf.decode_raw(features['data'], tf.uint8)
return tf.reshape(data, features['shape'])
def parse_tfrecord_np(record):
ex = tf.train.Example()
ex.ParseFromString(record)
shape = ex.features.feature['shape'].int64_list.value
data = ex.features.feature['data'].bytes_list.value[0]
return np.fromstring(data, np.uint8).reshape(shape)
#----------------------------------------------------------------------------
# Dataset class that loads data from tfrecords files.
class TFRecordDataset:
def __init__(self,
tfrecord_file, # Tfrecords files.
resolution = None, # Dataset resolution, None = autodetect.
label_file = None, # Relative path of the labels file, None = autodetect.
max_label_size = 0, # 0 = no labels, 'full' = full labels, <int> = N first label components.
buffer_mb = 256, # Read buffer size (megabytes).
num_threads = 2): # Number of concurrent threads.
self.tfrecord_file = tfrecord_file
self.resolution = None
self.resolution_log2 = None
self.shape = [] # [channel, height, width]
self.dtype = 'uint8'
self.dynamic_range = [0, 255]
self.label_file = label_file
self.label_size = None # [component]
self.label_dtype = None
self._np_labels = None
self._tf_minibatch_in = None
self._tf_labels_var = None
self._tf_labels_dataset = None
self._tf_datasets = dict()
self._tf_iterator = None
self._tf_init_ops = dict()
self._tf_minibatch_np = None
self._cur_minibatch = -1
self._cur_lod = -1
assert os.path.isfile(self.tfrecord_file)
assert os.path.isfile(self.label_file)
tfr_shapes = []
tfr_opt = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.NONE)
for record in tf.python_io.tf_record_iterator(self.tfrecord_file, tfr_opt):
tfr_shapes.append(parse_tfrecord_np(record).shape)
break
tfr_files = [self.tfrecord_file]
# Determine shape and resolution.
max_shape = max(tfr_shapes, key=lambda shape: np.prod(shape))
self.resolution = resolution if resolution is not None else max_shape[1]
print("----------------------------", flush=True)
print("Resolution used:", self.resolution, flush=True)
print("----------------------------", flush=True)
self.resolution_log2 = int(np.log2(self.resolution))
self.shape = [max_shape[0], self.resolution, self.resolution]
tfr_lods = [self.resolution_log2 - int(np.log2(shape[1])) for shape in tfr_shapes]
assert all(shape[0] == max_shape[0] for shape in tfr_shapes)
assert all(shape[1] == shape[2] for shape in tfr_shapes)
assert all(shape[1] == self.resolution // (2**lod) for shape, lod in zip(tfr_shapes, tfr_lods))
print("tfr_lods:", tfr_lods, flush=True)
print("tfr_shapes:", tfr_shapes, flush=True)
print("tfr_files:", tfr_files, flush=True)
#assert all(lod in tfr_lods for lod in range(self.resolution_log2 - 1))
# Load labels.
assert max_label_size == 'full' or max_label_size >= 0
self._np_labels = np.zeros([1<<20, 0], dtype=np.float32)
if self.label_file is not None and max_label_size != 0:
self._np_labels = np.load(self.label_file)
assert self._np_labels.ndim == 2
if max_label_size != 'full' and self._np_labels.shape[1] > max_label_size:
self._np_labels = self._np_labels[:, :max_label_size]
self.label_size = self._np_labels.shape[1]
self.label_dtype = self._np_labels.dtype.name
# Build TF expressions.
with tf.name_scope('Dataset'), tf.device('/cpu:0'):
self._tf_minibatch_in = tf.placeholder(tf.int64, name='minibatch_in', shape=[])
tf_labels_init = tf.zeros(self._np_labels.shape, self._np_labels.dtype)
self._tf_labels_var = tf.Variable(tf_labels_init, name='labels_var')
tfutil.set_vars({self._tf_labels_var: self._np_labels})
self._tf_labels_dataset = tf.data.Dataset.from_tensor_slices(self._tf_labels_var)
for tfr_file, tfr_shape, tfr_lod in zip(tfr_files, tfr_shapes, tfr_lods):
if tfr_lod < 0:
continue
dset = tf.data.TFRecordDataset(tfr_file, compression_type='', buffer_size=buffer_mb<<20)
dset = dset.map(parse_tfrecord_tf, num_parallel_calls=num_threads)
dset = tf.data.Dataset.zip((dset, self._tf_labels_dataset))
bytes_per_item = np.prod(tfr_shape) * np.dtype(self.dtype).itemsize
dset = dset.batch(self._tf_minibatch_in)
self._tf_datasets[tfr_lod] = dset
self._tf_iterator = tf.data.Iterator.from_structure(self._tf_datasets[0].output_types, self._tf_datasets[0].output_shapes)
self._tf_init_ops = {lod: self._tf_iterator.make_initializer(dset) for lod, dset in self._tf_datasets.items()}
# Use the given minibatch size and level-of-detail for the data returned by get_minibatch_tf().
def configure(self, minibatch_size, lod=0):
lod = int(np.floor(lod))
assert minibatch_size >= 1 and lod in self._tf_datasets
if self._cur_minibatch != minibatch_size or self._cur_lod != lod:
self._tf_init_ops[lod].run({self._tf_minibatch_in: minibatch_size})
self._cur_minibatch = minibatch_size
self._cur_lod = lod
# Get next minibatch as TensorFlow expressions.
def get_minibatch_tf(self): # => images, labels
return self._tf_iterator.get_next()
# Get next minibatch as NumPy arrays.
def get_minibatch_np(self, minibatch_size, lod=0): # => images, labels
self.configure(minibatch_size, lod)
if self._tf_minibatch_np is None:
self._tf_minibatch_np = self.get_minibatch_tf()
return tfutil.run(self._tf_minibatch_np)
# Get random labels as TensorFlow expression.
def get_random_labels_tf(self, minibatch_size): # => labels
if self.label_size > 0:
return tf.gather(self._tf_labels_var, tf.random_uniform([minibatch_size], 0, self._np_labels.shape[0], dtype=tf.int32))
else:
return tf.zeros([minibatch_size, 0], self.label_dtype)
# Get random labels as NumPy array.
def get_random_labels_np(self, minibatch_size): # => labels
if self.label_size > 0:
return self._np_labels[np.random.randint(self._np_labels.shape[0], size=[minibatch_size])]
else:
return np.zeros([minibatch_size, 0], self.label_dtype)
def inference(data_dir, result_subdir, random_seed, batch_size = 20):
model_name_path = result_subdir + "/network-final-full-conv.pkl"
print("Loading model: ", model_name_path, flush=True)
print("Data Base Dir:", data_dir, flush=True)
tf_test_record_file = data_dir + "/test/test-r08.tfrecords"
tf_test_label_file = data_dir + "/test/test-rxx.labels"
tf.InteractiveSession()
dataset = TFRecordDataset(tfrecord_file=tf_test_record_file,
label_file=tf_test_label_file, resolution=256, max_label_size = "full")
csv_input_test = data_dir + "/test/test.csv"
csv_input_train = data_dir + "/train/train.csv"
csv_input_valid = data_dir + "/valid/valid.csv"
inf_path_test = result_subdir + "/inference/test_pngs"
inf_path_reals = inf_path_test + "/reals"
inf_path_fakes = inf_path_test + "/fakes"
csv_save_fakes = inf_path_fakes + "/test.csv"
csv_save_reals = inf_path_reals + "/test.csv"
if not os.path.exists(inf_path_test):
os.makedirs(inf_path_test)
if not os.path.exists(inf_path_reals):
os.makedirs(inf_path_reals)
if not os.path.exists(inf_path_fakes):
os.makedirs(inf_path_fakes)
data_frame_te = pd.read_csv(csv_input_test)
data_frame_tr = pd.read_csv(csv_input_train)
data_frame_vl = pd.read_csv(csv_input_valid)
np_labels_tr = []
np_labels_vl = []
for row in data_frame_tr.iterrows():
np_labels_tr.append(row[1][1:].values)
for row in data_frame_vl.iterrows():
np_labels_vl.append(row[1][1:].values)
labels_arr_tr = np.asarray(np_labels_tr)
num_examples_tr = labels_arr_tr.shape[0]
num_batches_tr = np.int(np.ceil(num_examples_tr/batch_size))
labels_arr_vl = np.asarray(np_labels_vl)
num_examples_vl = labels_arr_vl.shape[0]
num_batches_vl = np.int(np.ceil(num_examples_vl/batch_size))
split_headers = list(data_frame_te.columns[0:]) #Remove "Path" from names
paths_te = []
np_labels_te = []
for row in data_frame_te.iterrows():
np_labels_te.append(row[1][1:].values)
labels_arr_te = np.asarray(np_labels_te)
num_examples_te = labels_arr_te.shape[0]
num_batches_te = np.int(np.ceil(num_examples_te/batch_size))
print("Test Label Data Shape:", labels_arr_te.shape,flush=True)
print("Number Test Batches:", num_batches_te,flush=True)
# Import official network.
with open(model_name_path, 'rb') as file:
all_models = pickle.load(file)
Gs = all_models[-1]
# Create CSV File Test Fake
ids_i_te = np.arange(0,labels_arr_te.shape[0],1)
ids_te_fake = np.array([inf_path_fakes+"/"+np.str(np.int(ids_i_te[j])) + ".png" for j in range(0,len(ids_i_te))]).reshape(labels_arr_te.shape[0],1)
ids_and_labs_te = np.concatenate((ids_te_fake,labels_arr_te),axis=1)
df_new_te = pd.DataFrame(columns=split_headers,data=ids_and_labs_te)
df_new_te.to_csv(csv_save_fakes, mode='w', header=True,index=False)
ids_te_real = np.array([inf_path_reals+"/"+np.str(np.int(ids_i_te[j])) + ".png" for j in range(0,len(ids_i_te))]).reshape(labels_arr_te.shape[0],1)
# Generate Latents
latents_all = np.random.RandomState(random_seed).randn(num_examples_tr+num_examples_vl+num_examples_te,*Gs.input_shapes[0][1:])
# Split latents for train, val, test
latents_te = latents_all[num_examples_tr+num_examples_vl:,:]
assert latents_te.shape[0] == num_examples_te
def adjust_dynamic_range(data, drange_in, drange_out):
if drange_in != drange_out:
scale = (np.float32(drange_out[1]) - np.float32(drange_out[0])) / (np.float32(drange_in[1]) - np.float32(drange_in[0]))
bias = (np.float32(drange_out[0]) - np.float32(drange_in[0]) * scale)
data = data * scale + bias
return data
def convert_to_pil_image(image, drange=[0,1]):
assert image.ndim == 2 or image.ndim == 3
if image.ndim == 3:
if image.shape[0] == 1:
image = image[0] # grayscale CHW => HW
else:
image = image.transpose(1, 2, 0) # CHW -> HWC
#image = adjust_dynamic_range(image, drange, [0,255])
#image = np.rint(image).clip(0, 255).astype(np.uint8)
format = 'RGB' if image.ndim == 3 else 'L'
return PIL.Image.fromarray(image, format)
def save_image(image, filename, drange=[0,1], quality=95):
img = convert_to_pil_image(image, drange)
img.save(filename)
print('Generating inference test images..', flush=True)
img_i = 0
all_labels_rl = []
for i in range(0,num_batches_te):
labels = labels_arr_te[i*batch_size:(i+1)*batch_size]
latents = latents_te[i*batch_size:(i+1)*batch_size]
images_fake = Gs.run(latents, labels)
images_real, labels_real = dataset.get_minibatch_np(minibatch_size=batch_size)
images_fake = np.clip(np.rint((images_fake + 1.0) / 2.0 * 255.0), 0.0, 255.0).astype(np.uint8)
for j in range(0,len(images_fake)):
img_fk = images_fake[j]
img_rl = images_real[j]
name_fk = ids_te_fake[img_i][0]
name_rl = ids_te_real[img_i][0]
save_image(img_fk, name_fk)
save_image(img_rl, name_rl)
all_labels_rl.append(labels_real[j])
img_i += 1
all_labels_rl = np.asarray(all_labels_rl)
# Create CSV File Test Real
ids_and_labs_te_rl = np.concatenate((ids_te_real,all_labels_rl),axis=1)
df_new_te = pd.DataFrame(columns=split_headers,data=ids_and_labs_te_rl)
df_new_te.to_csv(csv_save_reals, mode='w', header=True,index=False)
def execute_cmdline(argv):
prog = argv[0]
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest='command')
subparsers.required = True
def add_command(cmd, desc, example=None):
epilog = 'Example: %s %s' % (prog, example) if example is not None else None
return subparsers.add_parser(cmd, description=desc, help=desc, epilog=epilog)
p = add_command('inference', 'Inference.')
p.add_argument('data_dir', help='Data load Path')
p.add_argument('result_subdir', help='Results Directory')
p.add_argument('random_seed', type=int, help='Random Seed')
args = parser.parse_args(argv[1:] if len(argv) > 1 else ['-h'])
func = globals()[args.command]
del args.command
func(**vars(args))
#----------------------------------------------------------------------------
if __name__ == "__main__":
execute_cmdline(sys.argv)
|
{"hexsha": "32d49dadc3cf206158d3d1f462f78b735b5b132a", "size": 14392, "ext": "py", "lang": "Python", "max_stars_repo_path": "GAN_cpd/inference.py", "max_stars_repo_name": "AugustDS/extd_med_benchmark", "max_stars_repo_head_hexsha": "3dc4f5c00ba98f79c70336ec7a5723586c145231", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "GAN_cpd/inference.py", "max_issues_repo_name": "AugustDS/extd_med_benchmark", "max_issues_repo_head_hexsha": "3dc4f5c00ba98f79c70336ec7a5723586c145231", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "GAN_cpd/inference.py", "max_forks_repo_name": "AugustDS/extd_med_benchmark", "max_forks_repo_head_hexsha": "3dc4f5c00ba98f79c70336ec7a5723586c145231", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-03-29T01:18:03.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-29T01:18:03.000Z", "avg_line_length": 44.8348909657, "max_line_length": 152, "alphanum_fraction": 0.6267370762, "include": true, "reason": "import numpy", "num_tokens": 3554}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @author: Wesley
# @time: 2020-12-11 10:47
import os
import cv2
import torch
from models.unet import UNet
from torchvision import transforms
import numpy as np
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
net = UNet(1, 1).to(device)
weight = r'E:\PyCharmProject\Road-Detection\weights\weight.pt'
if os.path.exists(weight):
net.load_state_dict(torch.load(weight))
img_path = 'src/img/1_sat.jpg'
mask_path = 'src/img/1_mask.png'
if __name__ == '__main__':
origin = cv2.imread(img_path, 1)
cv2.imshow('origin', origin)
tr = transforms.Compose([transforms.ToTensor()])
img = tr(origin).unsqueeze(0).to(device)
mask = tr(cv2.imread(mask_path, 0))
net.eval()
with torch.no_grad():
pred = net(img)
pred[pred >= 0.5] = 1
pred[pred < 0.5] = 0
TP = ((pred == 1) & (mask == 1)).sum()
TN = ((pred == 0) & (mask == 0)).sum()
FN = ((pred == 0) & (mask == 1)).sum()
FP = ((pred == 1) & (mask == 0)).sum()
pa = (TP + TN) / (TP + TN + FP + FN)
iou = TP / (TP + FP + FN)
print('pa: ', pa)
print('iou', iou)
cv2.imshow('origin_out', np.hstack([img, pred]))
cv2.waitKey(0)
cv2.destroyAllWindows()
|
{"hexsha": "592053af1fab1c1da7ba5ad2b44bb83a16df92ba", "size": 1252, "ext": "py", "lang": "Python", "max_stars_repo_path": "detect.py", "max_stars_repo_name": "Wesley-Tse/Road-Detection", "max_stars_repo_head_hexsha": "c3b444287d9b41ccc4234e737e4421b5d1b3c3da", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "detect.py", "max_issues_repo_name": "Wesley-Tse/Road-Detection", "max_issues_repo_head_hexsha": "c3b444287d9b41ccc4234e737e4421b5d1b3c3da", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "detect.py", "max_forks_repo_name": "Wesley-Tse/Road-Detection", "max_forks_repo_head_hexsha": "c3b444287d9b41ccc4234e737e4421b5d1b3c3da", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.04, "max_line_length": 69, "alphanum_fraction": 0.5974440895, "include": true, "reason": "import numpy", "num_tokens": 407}
|
[STATEMENT]
lemma raw_has_prod_Suc:
"raw_has_prod f (Suc M) a \<longleftrightarrow> raw_has_prod (\<lambda>n. f (Suc n)) M a"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. raw_has_prod f (Suc M) a = raw_has_prod (\<lambda>n. f (Suc n)) M a
[PROOF STEP]
unfolding raw_has_prod_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ((\<lambda>n. \<Prod>i\<le>n. f (i + Suc M)) \<longlonglongrightarrow> a \<and> a \<noteq> (0::'a)) = ((\<lambda>n. \<Prod>i\<le>n. f (Suc (i + M))) \<longlonglongrightarrow> a \<and> a \<noteq> (0::'a))
[PROOF STEP]
by auto
|
{"llama_tokens": 256, "file": null, "length": 2}
|
import k3d
import numpy as np
import pytest
from .plot_compare import *
import vtk
from vtk.util import numpy_support
def test_volume():
prepare()
reader = vtk.vtkXMLImageDataReader()
reader.SetFileName('./test/assets/volume.vti')
reader.Update()
vti = reader.GetOutput()
x, y, z = vti.GetDimensions()
volume_data = numpy_support.vtk_to_numpy(
vti.GetPointData().GetArray(0)
).reshape(-1, y, x).astype(np.float32)
volume = k3d.volume(volume_data, samples=128)
pytest.plot += volume
compare('volume')
def test_volume_opacity_function():
prepare()
reader = vtk.vtkXMLImageDataReader()
reader.SetFileName('./test/assets/volume.vti')
reader.Update()
vti = reader.GetOutput()
x, y, z = vti.GetDimensions()
volume_data = numpy_support.vtk_to_numpy(
vti.GetPointData().GetArray(0)
).reshape(-1, y, x).astype(np.float32)
volume = k3d.volume(volume_data, opacity_function=[0, 0.0, 0.2, 0.5, 1, 1.0], samples=128)
pytest.plot += volume
compare('volume_opacity_function')
def test_volume_alpha_coef():
prepare()
reader = vtk.vtkXMLImageDataReader()
reader.SetFileName('./test/assets/volume.vti')
reader.Update()
vti = reader.GetOutput()
x, y, z = vti.GetDimensions()
volume_data = numpy_support.vtk_to_numpy(
vti.GetPointData().GetArray(0)
).reshape(-1, y, x).astype(np.float32)
volume = k3d.volume(volume_data, alpha_coef=200, samples=128)
pytest.plot += volume
compare('volume_alpha_coef')
|
{"hexsha": "b3c1ea3f11a54176fccf5036edd7c8a78af34862", "size": 1558, "ext": "py", "lang": "Python", "max_stars_repo_path": "k3d/test/test_visual_volume.py", "max_stars_repo_name": "mjpolak/K3D-jupyter", "max_stars_repo_head_hexsha": "539c53cab580d55b8841bb87589ab3d4cf95bdb0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 704, "max_stars_repo_stars_event_min_datetime": "2015-10-31T10:37:14.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T08:14:39.000Z", "max_issues_repo_path": "k3d/test/test_visual_volume.py", "max_issues_repo_name": "mjpolak/K3D-jupyter", "max_issues_repo_head_hexsha": "539c53cab580d55b8841bb87589ab3d4cf95bdb0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 269, "max_issues_repo_issues_event_min_datetime": "2015-10-28T18:57:06.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T18:10:03.000Z", "max_forks_repo_path": "k3d/test/test_visual_volume.py", "max_forks_repo_name": "mjpolak/K3D-jupyter", "max_forks_repo_head_hexsha": "539c53cab580d55b8841bb87589ab3d4cf95bdb0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 114, "max_forks_repo_forks_event_min_datetime": "2015-11-14T04:25:57.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-25T13:44:53.000Z", "avg_line_length": 23.2537313433, "max_line_length": 94, "alphanum_fraction": 0.6739409499, "include": true, "reason": "import numpy", "num_tokens": 418}
|
function cross_map(r::MersenneTwister)
map = YAML.load_file("maps/cross.yaml")
exits = [(4, 1), (1, 4), (4, 7), (7, 4)]
start = rand(r, exits)
exit = rand(r, setdiff(exits, [start]))
map["starts"] = [Dict("x"=>start[2], "y"=>start[1])]
map["exits"] = [Dict("x"=>exit[2], "y"=>exit[1])]
push!(map["objects"], Dict("type"=>"food", "color"=>"green",
"x"=>exit[2], "y"=>exit[1]))
map
end
function cross_map(seed::Int64)
r = MersenneTwister(seed)
cross_map(r)
end
function cross_meta()
Dict{String, Any}("max_step" => 100)
end
function cross_search_fitness(cont_f::Function; seed::Int64=0)
e = Episode(Grid(cross_map(seed)); reward=food_reward, meta=cross_meta())
run!(e, cont_f)
steps = terminate(e) ? e.meta["max_step"] : e.meta["step"]
1 - steps / e.meta["max_step"]
end
function cross_memorize_fitness(cont_f::Function; seed::Int64=0)
map_function() = cross_map(seed)
learn_fitness(cont_f, map_function, cross_meta)
end
function cross_map(r::MersenneTwister, strategy::Int64)
map = YAML.load_file("maps/cross.yaml")
exits = [(4, 1), (1, 4), (4, 7), (7, 4)]
si = rand(r, 1:4)
start = exits[si]
exit = exits[mod((si-1)+strategy, 4)+1]
map["starts"] = [Dict("x"=>start[2], "y"=>start[1])]
map["exits"] = [Dict("x"=>exit[2], "y"=>exit[1])]
push!(map["objects"], Dict("type"=>"food", "color"=>"green",
"x"=>exit[2], "y"=>exit[1]))
map
end
function cross_strategy_fitness(cont_f::Function; seed::Int64=0)
r = MersenneTwister(seed)
strategy = rand(r, 1:3)
map_function() = cross_map(r, strategy)
learn_fitness(cont_f, map_function, cross_meta)
end
|
{"hexsha": "668721f73bc4f74930af4697d897d3c1b7050a20", "size": 1728, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/fitness/cross.jl", "max_stars_repo_name": "d9w/RoboGrid.jl", "max_stars_repo_head_hexsha": "e217adfa3e8351017746d4fede8399e92ba5df73", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/fitness/cross.jl", "max_issues_repo_name": "d9w/RoboGrid.jl", "max_issues_repo_head_hexsha": "e217adfa3e8351017746d4fede8399e92ba5df73", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/fitness/cross.jl", "max_forks_repo_name": "d9w/RoboGrid.jl", "max_forks_repo_head_hexsha": "e217adfa3e8351017746d4fede8399e92ba5df73", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.6037735849, "max_line_length": 77, "alphanum_fraction": 0.5943287037, "num_tokens": 555}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 11 13:30:53 2017
@author: laoj
"""
import numpy as np
import pymc3 as pm
import theano.tensor as tt
from pymc3.distributions.distribution import Discrete, draw_values, generate_samples, infer_shape
from pymc3.distributions.dist_math import bound, logpow, factln, Cholesky
from pymc3.math import tround
#%% n scaler, p 1D
#n = 183
n = np.array([[106],
[143],
[102],
[116],
[183],
[150]])
p = np.array([[ 0.21245365, 0.41223126, 0.37531509],
[ 0.13221011, 0.50537169, 0.3624182 ],
[ 0.08813779, 0.54447146, 0.36739075],
[ 0.18932804, 0.4630365, 0.34763546],
[ 0.11006472, 0.49227755, 0.39765773],
[ 0.17886852, 0.41098834, 0.41014314]])
# p = np.array([ 0.21245365, 0.41223126, 0.37531509])
n = tt.as_tensor_variable(n)
p = tt.as_tensor_variable(p)
n = np.squeeze(n)
n = tt.shape_padright(n) if n.ndim == 1 else tt.as_tensor_variable(n)
n.ndim
n * p
#%%
n = np.array([[106],
[143],
[102],
[116],
[183],
[150]])
#n = 183
p = np.array([[ 0.21245365, 0.41223126, 0.37531509],
[ 0.13221011, 0.50537169, 0.3624182 ],
[ 0.08813779, 0.54447146, 0.36739075],
[ 0.18932804, 0.4630365, 0.34763546],
[ 0.11006472, 0.49227755, 0.39765773],
[ 0.17886852, 0.41098834, 0.41014314]])
#p = np.array([[ 0.21245365, 0.41223126, 0.37531509]])
#n = tt.as_tensor_variable(n)
p = tt.as_tensor_variable(p)
#%%
class Multinomial(Discrete):
def __init__(self, n, p, *args, **kwargs):
super(Multinomial, self).__init__(*args, **kwargs)
p = p / tt.sum(p, axis=-1, keepdims=True)
n = np.squeeze(n) # works also if n is a tensor
if len(self.shape) > 1:
m = self.shape[-2]
try:
assert n.shape == (m,)
except (AttributeError, AssertionError):
n = n * tt.ones(m)
self.n = tt.shape_padright(n)
self.p = p if p.ndim > 1 else tt.shape_padleft(p)
elif n.ndim == 1:
self.n = tt.shape_padright(n)
self.p = p if p.ndim > 1 else tt.shape_padleft(p)
else:
# n is a scalar, p is a 1d array
self.n = tt.as_tensor_variable(n)
self.p = tt.as_tensor_variable(p)
self.mean = self.n * self.p
mode = tt.cast(tt.round(self.mean), 'int32')
diff = self.n - tt.sum(mode, axis=-1, keepdims=True)
inc_bool_arr = tt.abs_(diff) > 0
mode = tt.inc_subtensor(mode[inc_bool_arr.nonzero()],
diff[inc_bool_arr.nonzero()])
self.mode = mode
def _random(self, n, p, size=None):
original_dtype = p.dtype
# Set float type to float64 for numpy. This change is related to numpy issue #8317 (https://github.com/numpy/numpy/issues/8317)
p = p.astype('float64')
# Now, re-normalize all of the values in float64 precision. This is done inside the conditionals
if size == p.shape:
size = None
if (p.ndim == 1) and (n.ndim == 0):
p = p / p.sum()
randnum = np.random.multinomial(n, p.squeeze(), size=size)
else:
p = p / p.sum(axis=1, keepdims=True)
if n.shape[0] > p.shape[0]:
randnum = np.asarray([
np.random.multinomial(nn, p.squeeze(), size=size)
for nn in n
])
elif n.shape[0] < p.shape[0]:
randnum = np.asarray([
np.random.multinomial(n.squeeze(), pp, size=size)
for pp in p
])
else:
randnum = np.asarray([
np.random.multinomial(nn, pp, size=size)
for (nn, pp) in zip(n, p)
])
return randnum.astype(original_dtype)
def random(self, point=None, size=None):
n, p = draw_values([self.n, self.p], point=point)
samples = generate_samples(self._random, n, p,
dist_shape=self.shape,
size=size)
return samples
def logp(self, x):
n = self.n
p = self.p
return bound(
tt.sum(factln(n)) - tt.sum(factln(x)) + tt.sum(x * tt.log(p)),
tt.all(x >= 0),
tt.all(tt.eq(tt.sum(x, axis=-1, keepdims=True), n)),
tt.all(p <= 1),
tt.all(tt.eq(tt.sum(p, axis=-1), 1)),
tt.all(tt.ge(n, 0)),
broadcast_conditions=False
)
Multinomial.dist(1,np.ones(3)/3,shape=(6, 3)).mode.eval()
#%%
Multinomial.dist(n,p,shape=(6, 3)).p.eval()
#%%
Multinomial.dist(n,p,shape=(6, 3)).n.eval()
#%%
Multinomial.dist(n,p,shape=(6, 3)).mean.eval()
#%%
Multinomial.dist(n,p,shape=(6, 3)).random()
#%%
counts =np.asarray([[19, 50, 37],
[21, 67, 55],
[11, 53, 38],
[17, 54, 45],
[24, 93, 66],
[27, 53, 70]])
Multinomial.dist(n,p,shape=(6, 3)).logp(x=counts).eval()
#%%
with pm.Model() as model:
like = Multinomial('obs_ABC', n, p, observed=counts, shape=counts.shape)
#%%
paramall = (
[[.25, .25, .25, .25], 4, 2],
[[.25, .25, .25, .25], (1, 4), 3],
# 3: expect to fail
# [[.25, .25, .25, .25], (10, 4)],
[[.25, .25, .25, .25], (10, 1, 4), 5],
# 5: expect to fail
# [[[.25, .25, .25, .25]], (2, 4), [7, 11]],
[[[.25, .25, .25, .25],
[.25, .25, .25, .25]], (2, 4), 13],
[[[.25, .25, .25, .25],
[.25, .25, .25, .25]], (2, 4), [17, 19]],
[[[.25, .25, .25, .25],
[.25, .25, .25, .25]], (1, 2, 4), [23, 29]],
[[[.25, .25, .25, .25],
[.25, .25, .25, .25]], (10, 2, 4), [31, 37]],
)
for p, shape, n in paramall:
with pm.Model() as model:
m = Multinomial('m', n=n, p=np.asarray(p), shape=shape)
print(m.random().shape)
#%%
counts =np.asarray([[19, 50, 37],
[21, 67, 55],
[11, 53, 38],
[17, 54, 45],
[24, 93, 66],
[27, 53, 70]])
n = np.array([[106],
[143],
[102],
[116],
[183],
[150]])
sparsity=1 #not zero
beta=np.ones(counts.shape) #input for dirichlet
with pm.Model() as model:
theta=pm.Dirichlet('theta',beta/sparsity, shape = counts.shape)
transition=pm.Multinomial('transition',n,theta,observed=counts)
trace=pm.sample(1000)
#%%
import numpy as np
import pymc3 as pm
import theano.tensor as tt
def norm_simplex(p):
"""Sum-to-zero transformation."""
return (p.T / p.sum(axis=-1)).T
def ccmodel(beta, x):
"""Community composition model."""
return norm_simplex(tt.exp(tt.dot(x, tt.log(beta))))
class DirichletMultinomial(pm.Discrete):
"""Dirichlet Multinomial Model
"""
def __init__(self, alpha, *args, **kwargs):
super(DirichletMultinomial, self).__init__(*args, **kwargs)
self.alpha = alpha
def logp(self, x):
alpha = self.alpha
n = tt.sum(x, axis=-1)
sum_alpha = tt.sum(alpha, axis=-1)
const = (tt.gammaln(n + 1) + tt.gammaln(sum_alpha)) - tt.gammaln(n + sum_alpha)
series = tt.gammaln(x + alpha) - (tt.gammaln(x + 1) + tt.gammaln(alpha))
result = const + tt.sum(series, axis=-1)
return result
def as_col(x):
if isinstance(x, tt.TensorVariable):
return x.dimshuffle(0, 'x')
else:
return np.asarray(x).reshape(-1, 1)
def as_row(x):
if isinstance(x, tt.TensorVariable):
return x.dimshuffle('x', 0)
else:
return np.asarray(x).reshape(1, -1)
n, k, r = 25, 10, 2
x = np.random.randint(0, 1000, size=(n, k))
y = np.random.randint(0, 1000, size=n)
design = np.vstack((np.ones(25), np.random.randint(2, size=n))).T
with pm.Model() as model:
# Community composition
pi = pm.Dirichlet('pi', np.ones(k), shape=(r, k))
comp = pm.Deterministic('comp', ccmodel(pi, design))
# Inferred population density of observed taxa (hierarchical model)
rho = pm.Normal('rho', shape=r)
tau = pm.Lognormal('tau')
dens = pm.Lognormal('dens', tt.dot(design, rho), tau=tau, shape=n)
# Community composition *with* the spike
expected_recovery = as_col(1 / dens)
_comp = norm_simplex(tt.concatenate((comp, expected_recovery), axis=1))
# Variability
mu = pm.Lognormal('mu')
# Data
obs = DirichletMultinomial('obs', _comp * mu,
observed=tt.concatenate((x, as_col(y)), axis=1))
pm.sample(1000)
|
{"hexsha": "81977d254cadb7ee5093cb2ff32e221394f8fe36", "size": 8455, "ext": "py", "lang": "Python", "max_stars_repo_path": "Miscellaneous/test_script_pymc3/multinominal.py", "max_stars_repo_name": "junpenglao/Planet_Sakaar_Data_Science", "max_stars_repo_head_hexsha": "73d9605b91b774a56d18c193538691521f679f16", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 51, "max_stars_repo_stars_event_min_datetime": "2018-04-08T19:53:15.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-24T21:08:25.000Z", "max_issues_repo_path": "Miscellaneous/test_script_pymc3/multinominal.py", "max_issues_repo_name": "junpenglao/Planet_Sakaar_Data_Science", "max_issues_repo_head_hexsha": "73d9605b91b774a56d18c193538691521f679f16", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2018-05-29T20:50:37.000Z", "max_issues_repo_issues_event_max_datetime": "2020-09-12T07:14:08.000Z", "max_forks_repo_path": "Miscellaneous/test_script_pymc3/multinominal.py", "max_forks_repo_name": "junpenglao/Planet_Sakaar_Data_Science", "max_forks_repo_head_hexsha": "73d9605b91b774a56d18c193538691521f679f16", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 13, "max_forks_repo_forks_event_min_datetime": "2018-07-21T09:53:10.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-07T19:06:26.000Z", "avg_line_length": 30.9706959707, "max_line_length": 135, "alphanum_fraction": 0.5434654051, "include": true, "reason": "import numpy,import theano,import pymc3,from pymc3", "num_tokens": 2711}
|
# -*- coding: utf-8 -*-
"""
gyroid.util
===========
"""
import numpy as np
import scipy.io
import matplotlib.pyplot as plt
from matplotlib import colors
from mayavi import mlab
from .unitcell import UnitCell
from .group import Group
from .grid import Grid
from .basis import Basis
__all__ = [
"render_structure_1d",
"render_structure_2d",
"render_structure_3d",
"prepare_scft_input"]
def prepare_scft_input(dim,grid_num_vec,cryst_system,
cryst_param_vec,sym_group,basis_grid_vec,basis_c,
data_file="field_in.mat",show_img=False,
save_img=False,img_file="field_in.png",
**kwargs):
b = "Bravais"
uc = UnitCell(dim,cryst_system,cryst_param_vec);
g = Group(dim,b,uc.shape,sym_group)
gd = Grid(basis_grid_vec,g)
bs = Basis(g,gd)
c = np.zeros(bs.N)
N = basis_c.size
if N < bs.N:
c[0:N] = basis_c
else:
c = basis_c[0:bs.N]
if dim == 1:
render_structure_1d(bs,gd,grid_num_vec[0],c,
data_name=data_file,save_img=save_img,
show_img=show_img,img_name=img_file,
**kwargs)
if dim == 2:
render_structure_2d(bs,gd,grid_num_vec[0],grid_num_vec[1],c,
data_name=data_file,save_img=save_img,
show_img=show_img,img_name=img_file,
**kwargs)
if dim == 3:
render_structure_3d(bs,gd,grid_num_vec[0],grid_num_vec[1],
grid_num_vec[2],c,
data_name=data_file,save_img=save_img,
show_img=show_img,img_name=img_file,
**kwargs)
def render_structure_1d(basis,grid,Na,c,
save_data=True,data_name="struct1d.mat",
save_img=True,show_img=True,
img_name="struct1d.png",
**kwargs):
''' Calculate and render 1D structure for given SABF and unit cell.
:param basis: a set of SABFs
:type basis: :class:`Basis`
:param Na: number of grids in **a** of the unit cell.
:type Na: integer
:param c: coefficients for each SABF
:type c: 1D `numpy.array`
:param save_data: if True, save data in file with Matlab mat format
:type save_data: bool
:param data_name: the file name of the data file
:type data_name: string
:param save_img: if True, save image in file, the format is determined by the extension of the image file name
:type save_img: bool
:param img_name: the file name of the image file
:type img_name: string
:param show_img: if True, show image on the screen
:type show_img: bool
:param kwargs: any extra key words arguments will be passed to plot functions
'''
#struct = basis.generate_structure(Na,c)
struct = basis.generate_structure_by_fft((Na,),c,grid)
# For debug only
#print basis.fft2sabf(np.fft.fftn(struct),grid)
a = 1.0 * basis.shape.h[0,0]
rx = np.array([a*i/Na for i in np.arange(Na)])
if save_data:
scipy.io.savemat(data_name,{"rx":rx,"struct":struct})
if save_img or show_img:
plt.plot(rx,struct,**kwargs)
if save_img:
plt.savefig(img_name)
if show_img:
plt.show()
return rx,struct
def render_structure_2d(basis,grid,Na,Nb,c,
save_data=True,data_name="struct2d.mat",
save_img=True,show_img=True,
img_name="struct2d.png",
levels=None,cmap=None,
**kwargs):
''' Calculate and render 2D structure for given SABF and unit cell.
:param basis: a set of SABFs
:type basis: :class:`Basis`
:param Na: number of grids in **a** of the unit cell.
:type Na: integer
:param Nb: number of grids in **b** of the unit cell.
:type Nb: integer
:param c: coefficients for each SABF
:type c: 1D `numpy.array`
:param save_data: if True, save data in file with Matlab mat format
:type save_data: bool
:param data_name: the file name of the data file
:type data_name: string
:param save_img: if True, save image in file, the format is determined by the extension of the image file name
:type save_img: bool
:param img_name: the file name of the image file
:type img_name: string
:param show_img: if True, show image on the screen
:type show_img: bool
:param kwargs: any extra key words arguments will be passed to plot functions
'''
# If generate_structure_by_fft failed
# Give generate_structure a try.
#struct = basis.generate_structure((Na,Nb),c)
struct = basis.generate_structure_by_fft((Na,Nb),c,grid)
# For debug only
print "Input c: ",c
print "c from constructed structure: "
print basis.fft2sabf(np.fft.fftn(struct),grid)
rx = np.zeros((Na,Nb))
ry = np.zeros((Na,Nb))
for (i,j) in np.ndindex(Na,Nb):
x = (1.0*np.array([i,j])) / (Na,Nb)
rx[i,j],ry[i,j] = np.dot(x,basis.shape.h)
if save_data:
scipy.io.savemat(data_name,{"rx":rx,"ry":ry,"struct":struct})
if save_img or show_img:
dx = rx.max() - rx.min()
dy = ry.max() - ry.min()
w,h = plt.figaspect(float(dy/dx)) # float is must
# No frame, white background, w/h aspect ratio figure
fig = plt.figure(figsize=(w,h),frameon=False,
dpi=80,facecolor='w')
# full figure subplot, no border, no axes
ax = fig.add_axes([0,0,1,1],frameon=False,axisbg='w')
# no ticks
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# Default: there are 256 contour levels
if levels is None:
step = (struct.max() - struct.min()) / 256
levels = np.arange(struct.min(),struct.max()+step,step)
# Default: colormap is monochromatic red
if cmap is None:
clr = np.zeros((256,3))
for i in np.arange(256):
clr[i,0] = i / 255.0
cmap = colors.ListedColormap(clr)
# actual plot
ax.contourf(rx,ry,struct,levels=levels,
cmap=cmap,antialiased=False,**kwargs)
#ax.contourf(rx,ry,struct)
if save_img:
plt.savefig(img_name)
if show_img:
plt.show()
return rx,ry,struct
def render_structure_3d(basis,grid,Na,Nb,Nc,c,
save_data=True,data_name="struct3d.mat",
save_img=True,show_img=True,
img_name="struct3d.png",
levels=None,cmap=None,
**kwargs):
''' Calculate and render 3D structure for given SABF and unit cell.
:param basis: a set of SABFs
:type basis: :class:`Basis`
:param Na: number of grids in **a** of the unit cell.
:type Na: integer
:param Nb: number of grids in **b** of the unit cell.
:type Nb: integer
:param Nc: number of grids in **c** of the unit cell.
:type Nc: integer
:param c: coefficients for each SABF
:type c: 1D `numpy.array`
:param save_data: if True, save data in file with Matlab mat format
:type save_data: bool
:param data_name: the file name of the data file
:type data_name: string
:param save_img: if True, save image in file, the format is determined by the extension of the image file name
:type save_img: bool
:param img_name: the file name of the image file
:type img_name: string
:param show_img: if True, show image on the screen
:type show_img: bool
:param kwargs: any extra key words arguments will be passed to plot functions
'''
#struct = basis.generate_structure((Na,Nb,Nc),c)
struct = basis.generate_structure_by_fft((Na,Nb,Nc),c,grid)
# For debug only
#print basis.fft2sabf(np.fft.fftn(struct),grid)
rx = np.zeros((Na,Nb,Nc))
ry = np.zeros((Na,Nb,Nc))
rz = np.zeros((Na,Nb,Nc))
for (i,j,k) in np.ndindex(Na,Nb,Nc):
x = (1.0*np.array([i,j,k])) / (Na,Nb,Nc)
rx[i,j,k],ry[i,j,k],rz[i,j,k] = np.dot(x,basis.shape.h)
if save_data:
scipy.io.savemat(data_name,
{"rx":rx,"ry":ry,"rz":rz,"struct":struct})
if save_img or show_img:
mlab.contour3d(rx,ry,rz,struct,**kwargs)
if save_img:
mlab.savefig(img_name)
if show_img:
plt.show()
return rx,ry,rz,struct
|
{"hexsha": "8637d27fa89e08b1d8e3302b9ac28265984d4669", "size": 8527, "ext": "py", "lang": "Python", "max_stars_repo_path": "gyroid/util.py", "max_stars_repo_name": "liuyxpp/liuyxpp-gyroid", "max_stars_repo_head_hexsha": "7db91cb140869760124a66239773822bc2cd4e44", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-09-15T13:47:53.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-18T18:28:46.000Z", "max_issues_repo_path": "gyroid/util.py", "max_issues_repo_name": "liuyxpp/liuyxpp-gyroid", "max_issues_repo_head_hexsha": "7db91cb140869760124a66239773822bc2cd4e44", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-11-20T09:37:42.000Z", "max_issues_repo_issues_event_max_datetime": "2020-11-20T09:37:42.000Z", "max_forks_repo_path": "gyroid/util.py", "max_forks_repo_name": "liuyxpp/liuyxpp-gyroid", "max_forks_repo_head_hexsha": "7db91cb140869760124a66239773822bc2cd4e44", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.8277310924, "max_line_length": 114, "alphanum_fraction": 0.5968101325, "include": true, "reason": "import numpy,import scipy", "num_tokens": 2285}
|
// Copyright 2004-present Facebook. All Rights Reserved.
#include "fboss/agent/hw/sai/fake/FakeSaiInSegEntry.h"
#include <boost/functional/hash.hpp>
namespace facebook::fboss {
FakeSaiInSegEntry::FakeSaiInSegEntry(sai_inseg_entry_t other_sai_inseg_entry) {
sai_inseg_entry.switch_id = other_sai_inseg_entry.switch_id;
sai_inseg_entry.label = other_sai_inseg_entry.label;
}
bool FakeSaiInSegEntry::operator==(const FakeSaiInSegEntry& other) const {
return sai_inseg_entry.switch_id == other.sai_inseg_entry.switch_id &&
sai_inseg_entry.label == other.sai_inseg_entry.label;
}
} // namespace facebook::fboss
namespace std {
size_t hash<facebook::fboss::FakeSaiInSegEntry>::operator()(
const facebook::fboss::FakeSaiInSegEntry& key) const {
std::size_t seed = 0;
boost::hash_combine(seed, boost::hash_value(key.sai_inseg_entry.switch_id));
boost::hash_combine(seed, boost::hash_value(key.sai_inseg_entry.label));
return seed;
}
} // namespace std
|
{"hexsha": "9cf71b6007b6625beac7bb677f425191c9e0480c", "size": 977, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "fboss/agent/hw/sai/fake/FakeSaiInSegEntry.cpp", "max_stars_repo_name": "nathanawmk/fboss", "max_stars_repo_head_hexsha": "9f36dbaaae47202f9131598560c65715334a9a83", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 834.0, "max_stars_repo_stars_event_min_datetime": "2015-03-10T18:12:28.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T20:16:17.000Z", "max_issues_repo_path": "fboss/agent/hw/sai/fake/FakeSaiInSegEntry.cpp", "max_issues_repo_name": "nathanawmk/fboss", "max_issues_repo_head_hexsha": "9f36dbaaae47202f9131598560c65715334a9a83", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 82.0, "max_issues_repo_issues_event_min_datetime": "2015-04-07T08:48:29.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-11T21:56:58.000Z", "max_forks_repo_path": "fboss/agent/hw/sai/fake/FakeSaiInSegEntry.cpp", "max_forks_repo_name": "nathanawmk/fboss", "max_forks_repo_head_hexsha": "9f36dbaaae47202f9131598560c65715334a9a83", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 296.0, "max_forks_repo_forks_event_min_datetime": "2015-03-11T03:45:37.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-14T22:54:22.000Z", "avg_line_length": 30.53125, "max_line_length": 79, "alphanum_fraction": 0.7778915046, "num_tokens": 270}
|
import tensorflow as tf
import numpy as np
from .data_aug import get_data_aug_fn
AUTOTUNE = tf.data.experimental.AUTOTUNE
def get_cifar10_data(batch_size, data_aug, train_data_size=None,
repeat=True, shuffle=True, shuffle_size=None):
train_data, test_data = tf.keras.datasets.cifar10.load_data()
train_img, train_lbl = train_data[0].astype(np.float32), train_data[1]
test_img, test_lbl = test_data[0].astype(np.float32), test_data[1]
mean = np.array([125.307, 122.95, 113.865])
std = np.array([62.9932, 62.0887, 66.7048])
train_img, test_img = (train_img - mean) / std, (test_img - mean) / std
if train_data_size is not None:
train_img = train_img[:train_data_size]
train_lbl = train_lbl[:train_data_size]
train_dataset = tf.data.Dataset.from_tensor_slices((train_img, train_lbl))
if shuffle:
assert shuffle_size is not None
train_dataset = train_dataset.shuffle(shuffle_size)
train_dataset = train_dataset.batch(batch_size, drop_remainder=True)
if data_aug:
data_aug = get_data_aug_fn(batch_size, [4, 4, 32, 32, 3])
train_dataset = train_dataset.map(data_aug, num_parallel_calls=AUTOTUNE)
if repeat:
train_dataset = train_dataset.repeat()
train_dataset = train_dataset.prefetch(AUTOTUNE)
test_dataset = tf.data.Dataset.from_tensor_slices((test_img, test_lbl))
test_dataset = test_dataset.batch(100).prefetch(AUTOTUNE)
return train_dataset, test_dataset
|
{"hexsha": "eb66fb1c457596cf72448d93fab105bf973ddb31", "size": 1490, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/data/cifar10.py", "max_stars_repo_name": "SeanJia/InfoMCR", "max_stars_repo_head_hexsha": "2b4760ad6ffdd98859fea1967eb1b8aa7e51be52", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-03-09T10:22:46.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-21T12:02:28.000Z", "max_issues_repo_path": "src/data/cifar10.py", "max_issues_repo_name": "SeanJia/InfoMCR", "max_issues_repo_head_hexsha": "2b4760ad6ffdd98859fea1967eb1b8aa7e51be52", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/data/cifar10.py", "max_forks_repo_name": "SeanJia/InfoMCR", "max_forks_repo_head_hexsha": "2b4760ad6ffdd98859fea1967eb1b8aa7e51be52", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.1515151515, "max_line_length": 80, "alphanum_fraction": 0.7261744966, "include": true, "reason": "import numpy", "num_tokens": 392}
|
###########################################################
## reading and saving data #
###########################################################
## Copyright (c) 2018, National Institute of Informatics #
## Author: Fuming Fang #
## Affiliation: National Institute of Informatics #
## Email: fang@nii.ac.jp #
###########################################################
# -*- coding: utf-8 -*-
import numpy as np
import os
from sklearn.utils import shuffle
import sys
import struct
class dataio(object):
def __init__(self, train_genuine, train_spoof, dev_genuine, dev_spoof,
test_data=None, batch_size=32):
if train_genuine is not None and train_spoof is not None:
self.training_data, self.label = self.load_data(train_genuine, train_spoof)
self.frames = len(self.training_data)
self.batch_size = min(batch_size, self.frames)
self.max_index = self.frames - self.batch_size
if dev_genuine is not None and dev_spoof is not None:
self.dev_data, self.dev_label = self.load_data(dev_genuine, dev_spoof)
self.dev_frames = len(self.dev_data)
self.current_dev_index = 0
self.dev_batch_size = min(64, self.dev_frames)
self.dev_iterations = (self.dev_frames - 1)//self.dev_batch_size + 1
if test_data is not None:
self.test_data, self.test_names = self.load_test_data(test_data, 400)
self.test_frames = len(self.test_data)
def load_data(self, scp_genuine, scp_spoof):
if scp_genuine is not None:
genuine = self._load_data(scp_genuine, 400)
genuine = np.reshape(genuine, (-1, 864, 400, 1))
genuine_lab = np.zeros((len(genuine), 2), dtype=np.float32)
genuine_lab[:, 0] = 1.0
if scp_spoof is not None:
spoof = self._load_data(scp_spoof, 400)
spoof = np.reshape(spoof, (-1, 864, 400, 1))
spoof_lab = np.zeros((len(spoof), 2), dtype=np.float32)
spoof_lab[:, 1] = 1.0
if scp_genuine is not None and scp_spoof is not None:
x = np.concatenate((genuine, spoof), axis=0)
y = np.concatenate((genuine_lab, spoof_lab), axis=0)
elif scp_genuine is not None and scp_spoof is None:
x = genuine
y = genuine_lab
elif scp_genuine is None and scp_spoof is not None:
x = spoof
y = spoof_lab
else:
raise NotImplementedError
return x, y
def _load_data(self, scp_path, dim):
scp = np.loadtxt(scp_path, dtype=str)
total_frames = 0
for name in scp:
total_frames += os.path.getsize(name)/4/dim
data = np.zeros((total_frames, dim), dtype=np.float32)
idx = 0
for name in scp:
with open(name, 'rb') as f:
v = f.read()
v = np.frombuffer(v, dtype=np.float32)
v = np.reshape(v, (-1, dim))
data[idx:idx+len(v)] = v
idx += len(v)
return data
def load_test_data(self, scp_path, dim):
scp = np.loadtxt(scp_path, dtype=str)
test_data = list()
for name in scp:
with open(name, 'rb') as f:
v = f.read()
v = np.frombuffer(v, dtype=np.float32)
v = np.reshape(v, (-1, 864, dim, 1))
test_data.append(v)
return test_data, scp
def shuffle(self):
self.training_data, self.label = shuffle(self.training_data, self.label)
def batch(self):
rand_v = np.random.randint(self.max_index)
x = self.training_data[rand_v:rand_v+self.batch_size]
y = self.label[rand_v:rand_v+self.batch_size]
return x, y
def dev_batch(self):
s = self.current_dev_index
e = s + self.dev_batch_size
if e > self.dev_frames:
e = self.dev_frames
x = self.dev_data[s:e]
y = self.dev_label[s:e]
if e >= self.dev_frames:
self.current_dev_index = 0
else:
self.current_dev_index = e
return x, y
def save_data(self, name, data):
with open(name,'wb') as f:
f.write(struct.pack('f'*data.size, *data.flat))
|
{"hexsha": "8e2bf82ff96feffee1a5763c2a2125411edcdab8", "size": 4527, "ext": "py", "lang": "Python", "max_stars_repo_path": "dataio.py", "max_stars_repo_name": "entn-at/lcnn", "max_stars_repo_head_hexsha": "797d8847fad6d1179866ac2d7d7402240483123b", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 15, "max_stars_repo_stars_event_min_datetime": "2018-11-04T18:35:41.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-10T14:18:54.000Z", "max_issues_repo_path": "dataio.py", "max_issues_repo_name": "entn-at/lcnn", "max_issues_repo_head_hexsha": "797d8847fad6d1179866ac2d7d7402240483123b", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2018-11-02T20:15:53.000Z", "max_issues_repo_issues_event_max_datetime": "2019-04-13T06:55:01.000Z", "max_forks_repo_path": "dataio.py", "max_forks_repo_name": "entn-at/lcnn", "max_forks_repo_head_hexsha": "797d8847fad6d1179866ac2d7d7402240483123b", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2018-10-07T12:44:16.000Z", "max_forks_repo_forks_event_max_datetime": "2019-07-16T08:29:44.000Z", "avg_line_length": 34.5572519084, "max_line_length": 87, "alphanum_fraction": 0.5290479346, "include": true, "reason": "import numpy", "num_tokens": 1060}
|
#!/usr/bin/env python
"""
Test the Iron Component code. This code can be run from teh command line:
> python test_fe.py --datafile /user/jotaylor/git/spamm//Data/FakeData/Iron_comp/fakeFe1_deg.dat
--redshift 0.5
"""
import os
import datetime
import numpy as np
import time
import argparse
import glob
from utils.parse_pars import parse_pars
from utils import draw_from_sample
from spamm.components.HostGalaxyComponent import HostGalaxyComponent
from spamm.Spectrum import Spectrum
PARS = parse_pars()["host_galaxy"]
TEST_WL = parse_pars()["testing"]
WL = np.arange(TEST_WL["wl_min"], TEST_WL["wl_max"], TEST_WL["wl_step"])
#-----------------------------------------------------------------------------#
def from_file(datafile, redshift=None,
scale=None, subset=False, pname=None):
print(PARS, "\n")
templates = glob.glob(os.path.join(PARS["hg_models"], "*"))
print("Using datafile: {}\n".format(datafile))
print("Templates = {}\n".format(templates))
print("Are the parameters in utils good? If not, ctrl+c, modify them, and rerun")
time.sleep(5)
try:
wavelengths, flux, flux_err = np.loadtxt(datafile, unpack=True)
except ValueError:
wavelengths, flux = np.loadtxt(datafile, unpack=True)
flux_err = flux*0.05
if redshift is not None:
print("Correcting for redshift {}\n".format(redshift))
wavelengths /= (1+float(redshift))
if scale is not None:
print("Scaling flux by {\n}".format(scale))
flux *= scale
flux_err *= scale
if subset:
minwl = 1e10
maxwl = 0.
for template in templates:
with open(template) as f:
t_wl, t_flux = np.loadtxt(template, unpack=True)
maxwl = max(maxwl, max(t_wl))
minwl = min(minwl, min(t_wl))
print("Wavelength range of templates {} =\n{}:{}".format(templates,
minwl, maxwl))
print("Only using this range on datafile")
inds = np.where((wavelengths >= minwl) & (wavelengths <= maxwl))
wavelengths = wavelengths[inds]
flux = flux[inds]
flux_err = flux_err[inds]
params = {"wl": wavelengths,
"flux": flux,
"err": flux_err,
"pname": pname,
"datafile": datafile}
return wavelengths, flux, flux_err, params
#-----------------------------------------------------------------------------#
def create_hg(hg_params=None):
"""
Args:
hg_params (dictionary): Host galaxy component parameters. Required keys are:
- no_templates (number of templates)
- wl (wavelength range of HG model, redshift must be accounted for already)
- hg_norm_{} (1,2,3 depending on number of templates)
"""
if hg_params is None:
hg_params = {"no_templates": 3, "wl": WL}
max_template_flux = 6e-12
samples = draw_from_sample.gaussian(PARS["hg_norm_min"], max_template_flux, 3)
hg_params["hg_norm_1"] = samples[0]
hg_params["hg_norm_2"] = samples[1]
hg_params["hg_norm_3"] = samples[2]
hg_params["hg_stellar_disp"] = draw_from_sample.gaussian(PARS["hg_stellar_disp_min"], PARS["hg_stellar_disp_max"])
print("HG params: {}".format(hg_params))
hg = HostGalaxyComponent()
# Make a Spectrum object with dummy flux and flux error
spectrum = Spectrum(hg_params["wl"], hg_params["wl"], hg_params["wl"])
hg.initialize(spectrum)
comp_params = [hg_params["hg_norm_{}".format(x)] for x in range(1, hg_params["no_templates"]+1)] + [hg_params["hg_stellar_disp"]]
hg_flux = HostGalaxyComponent.flux(hg, spectrum, comp_params)
hg_err = hg_flux * 0.05
# pl.errorbar(hg_params["wl"], hg_flux, hg_err)
# pl.savefig("hg_data.png")
return hg_params["wl"], hg_flux, hg_err, hg_params
#-----------------------------------------------------------------------------#
|
{"hexsha": "d268f8ab3448d4f6983a6707ce15fd953a7f9230", "size": 3947, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/run_hg.py", "max_stars_repo_name": "jotaylor/SPAMM", "max_stars_repo_head_hexsha": "3087269cb823d6f4022ebf1dd75d920dee7c1cc0", "max_stars_repo_licenses": ["BSD-3-Clause-Clear"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/run_hg.py", "max_issues_repo_name": "jotaylor/SPAMM", "max_issues_repo_head_hexsha": "3087269cb823d6f4022ebf1dd75d920dee7c1cc0", "max_issues_repo_licenses": ["BSD-3-Clause-Clear"], "max_issues_count": 25, "max_issues_repo_issues_event_min_datetime": "2018-09-07T13:50:57.000Z", "max_issues_repo_issues_event_max_datetime": "2019-05-31T19:50:23.000Z", "max_forks_repo_path": "examples/run_hg.py", "max_forks_repo_name": "jotaylor/SPAMM", "max_forks_repo_head_hexsha": "3087269cb823d6f4022ebf1dd75d920dee7c1cc0", "max_forks_repo_licenses": ["BSD-3-Clause-Clear"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.2110091743, "max_line_length": 133, "alphanum_fraction": 0.6024828984, "include": true, "reason": "import numpy", "num_tokens": 967}
|
#!/usr/bin/env python
#
# This program shows how to use mpi_comm_split
#
import numpy
from numpy import *
from mpi4py import MPI
import sys
def myquit(mes):
MPI.Finalize()
print(mes)
sys.exit()
comm=MPI.COMM_WORLD
myid=comm.Get_rank()
numprocs=comm.Get_size()
print("hello from ",myid," of ",numprocs)
# Map colors to integers
col_to_int={'blue ': 0, 'green ': 1, 'red ': 2, 'yellow': 3}
# Reverse col_index ot give integers to colors
index_to_color= {v: k for k, v in col_to_int.items()}
if(myid == 0) :
print("color to integer=",col_to_int," and ","integer to color=",index_to_color)
# select_val is either 0 or 1 depending odd/even value of myid
select_val=myid % 2
# color_str is either red or green depending odd/even value of
# select_val, that is myid
color_str=index_to_color[select_val]
# Get an integer representation of our color Note: this will be the same at select_val
color=col_to_int[color_str]
print("myid= %d color integer = %d color name = %s" %(myid,color,color_str))
#MPI.Finalize()
#exit()
# Split will create a set of communicators. All of the
# tasks with the same value of color will be in the same
# communicator. In this case we get two sets one for
# odd tasks and one for even tasks. Note they have the
# same name on all tasks, new_comm, but there are actually
# two different values (sets of tasks) in each one.
new_comm=comm.Split(color,myid)
# Get the new id and number of tasks in the communicator
new_id=new_comm.Get_rank()
new_nodes=new_comm.Get_size()
# Here we do a bcast from the new roots to show we can use the communicator
bcast_val=None
if new_id == 0 : bcast_val=myid
bcast_val=new_comm.bcast(bcast_val)
# Iterate through the colors and each processor will print its information
for k in col_to_int.keys():
if( k == color_str):
print("new id is %d in the %s communicator or %d.%s \
original id is %d \
id bcast from root %d" %(new_id,k,new_id,k,myid,bcast_val))
MPI.Finalize()
|
{"hexsha": "37997b0501408b7c09511c93a6816d8d8be6e15b", "size": 2011, "ext": "py", "lang": "Python", "max_stars_repo_path": "array/bot/others/P_ex12.py", "max_stars_repo_name": "timkphd/examples", "max_stars_repo_head_hexsha": "04c162ec890a1c9ba83498b275fbdc81a4704062", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2020-11-01T00:29:22.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-24T19:09:47.000Z", "max_issues_repo_path": "array/bot/others/P_ex12.py", "max_issues_repo_name": "timkphd/examples", "max_issues_repo_head_hexsha": "04c162ec890a1c9ba83498b275fbdc81a4704062", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-02-09T01:59:47.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-09T01:59:47.000Z", "max_forks_repo_path": "array/bot/others/P_ex12.py", "max_forks_repo_name": "timkphd/examples", "max_forks_repo_head_hexsha": "04c162ec890a1c9ba83498b275fbdc81a4704062", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.1168831169, "max_line_length": 86, "alphanum_fraction": 0.7120835405, "include": true, "reason": "import numpy,from numpy", "num_tokens": 565}
|
from glob import iglob
import numpy as np
import matplotlib.pyplot as plt
import scipy
import random
from scipy import ndimage
from scipy import signal
from scipy import interpolate
from scipy import fft
import audio.segment as seg
import audio.utils as utils
# https://en.wikipedia.org/wiki/Short-time_Fourier_transform#Inverse_STFT
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.istft.html
# https://dsp.stackexchange.com/questions/9877/reconstruction-of-audio-signal-from-spectrogram
# https://docs.scipy.org/doc/scipy/reference/tutorial/interpolate.html
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.interp2d.html
# https://en.wikipedia.org/wiki/Spectral_density
# https://en.wikipedia.org/wiki/Formant
def transform(segments, opt_log10=False, opt_abs=True, opt_gaussian=None, nperseg=4096, noverlap=4000):
x = seg.flatten(segments)
fs = segments.frame_rate
x = utils.amplify(x, window_size=fs)
t = np.linspace(0, len(x)/segments.frame_rate, len(x))
# All of the audio clip
f1, t1, Zxx = scipy.signal.stft(x, fs, nperseg=nperseg, noverlap=noverlap)
if opt_abs:
Zxx = np.abs(Zxx)
if opt_log10:
Zxx = np.log10(Zxx)
if opt_gaussian is not None:
Zxx = scipy.ndimage.gaussian_filter(Zxx, opt_gaussian)
return t, f1, t1, Zxx
def draw_fft(segments, x=None):
if x is None:
x = seg.flatten(segments)
N = len(x)
#s = np.abs(scipy.fft.fft(x))[:N//2]
T = N/segments.frame_rate
print(N, T, N*T)
t = np.linspace(0.0, T, N)
sp = scipy.fft.fft(x)
print(dir(sp))
print(t.shape[-1], segments.frame_rate)
freq = scipy.fft.fftfreq(t.shape[-1], d=1/segments.frame_rate)
plt.plot(freq[:N//2], np.abs(sp.real[:N//2]))
plt.show()
def spectrogram(segments, opt_log10=False, opt_show=True):
"""
https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.spectrogram.html
"""
x = seg.flatten(segments)
fs = segments.frame_rate
print(len(x)/fs, len(x), fs)
t, f1, t1, Zxx = transform(segments, opt_log10=opt_log10, opt_abs=True)
frequency_precision = f1[1]-f1[0]
print("".join(str(x) for x in ["Frequency precision: ", frequency_precision, "Hz"]))
plt.pcolormesh(t1, f1, Zxx, shading='flat')
plt.ylabel('Frequency [Hz]')
plt.xlabel('Time [sec]')
if opt_show:
plt.show()
class Interpolater:
def __init__(self, segments):
self.fs = segments.frame_rate
self.data = flatten(segments)
self.freq, self.times, Zxx = scipy.signal.stft(self.data, fs=self.fs, nperseg=4096)
self.Zxx = np.abs(Zxx)
self.fun = scipy.interpolate.interp2d(self.times, self.freq, self.Zxx, kind='cubic')
print(self.freq.shape, self.times.shape)
def _index(self, value, vector):
low_index = 0
high_index = 0
max_index = len(vector)-1
for v in vector:
high_index+=1
if v >= value:
break
low_index+=1
if low_index > max_index:
low_index = max_index
if high_index > max_index:
high_index = max_index
return (low_index, high_index), (vector[low_index], vector[high_index])
def intensity(self, time, freq):
return self.fun(time, freq)
def get_signal(self, freq, times):
amplitude = np.zeros_like(times)
sig = np.real(np.exp( (times * freq * 2 * np.pi * 1j) + np.random.random() * 2 * np.pi * 1j))
for ix, a in enumerate(amplitude):
amplitude[ix] = self.intensity(times[ix], freq) * sig[ix]
return amplitude
def static_tones(segments):
interp = Interpolater(segments)
max_t = len(interp.data)/interp.fs
all_times = np.transpose(np.linspace(0, max_t, len(interp.data)))
a = np.zeros_like(all_times)
f = 50
while f < 2000:
#for f in [123, 232, 344, 373, 495, 719, 827, 1037, 1260]:
print(f)
a += interp.get_signal(f, all_times)
f += 30+100*np.random.random()
a = np.iinfo(np.int16).max * 0.75 * a / np.max(np.abs(a))
a = a.astype("int16")
output = seg.to_segment(a, segments.frame_rate, segments.sample_width, segments.channels)
spectrogram(output)
file_handle = output.export("D:\\output.mp3", format="mp3")
plt.plot(all_times, a)
plt.show()
def remove_phase(segments):
fs=segments.frame_rate
data = seg.flatten(segments)
freq, times, Zxx = scipy.signal.stft(data, fs, nperseg=1024)
t, x = scipy.signal.istft(np.abs(Zxx), fs)
x = x.astype("int16")
output = seg.to_segment(x, segments.frame_rate, segments.sample_width, segments.channels)
file_handle = output.export("D:\\output.mp3", format="mp3")
def save(segments):
print(segments.sample_width, segments.frame_rate, segments.channels)
data = seg.flatten(segments)
fs = segments.frame_rate
_, _, Zxx = scipy.signal.stft(data, fs)
t, x = scipy.signal.istft(Zxx, fs)
x = x.astype("int16")
output = seg.to_segment(x, segments.frame_rate, segments.sample_width, segments.channels)
file_handle = output.export("D:\\output.mp3", format="mp3")
def smooth(vec, window_size=5):
ret = np.zeros_like(vec)
for i, _ in enumerate(vec):
tmp = vec[i-window_size//2:i+window_size//2]
ret[i] = sum(tmp)/max(1, len(tmp))
return ret
def find_max_ix(vec, window_size=5):
ret = []
for i, _ in enumerate(vec):
tmp = vec[i-window_size//2:i+window_size//2]
if tmp.size > 0 and vec[i] == max(vec[i-window_size//2:i+window_size//2]):
ret.append(i)
return ret
def find_max_smooth(x_values, y_values, window_size=5):
vec = smooth(y_values, window_size=window_size)
y_max = np.max(y_values)
max_xs = []
max_ys = []
for ix in find_max_ix(vec, window_size=5):
if y_values[ix] > 0.01*y_max:
max_xs.append(x_values[ix])
max_ys.append(y_values[ix])
return max_xs, max_ys
def find_max_interp(x_values, y_values, cutoff=None, min_x=20, max_x=2000, resolution=1):
y_fun = Spline(x_values, y_values)
x = np.linspace(min_x, max_x, int((max_x - min_x)/resolution))
y = np.array(list(y_fun(f) for f in x))
y_prime = np.array(list(y_fun.derivate(f) for f in x))
max_xs = []
max_ys = []
for i in range(1, len(y_prime)):
if np.sign(y_prime[i]) < np.sign(y_prime[i-1]):
x_tmp = x[i-1] + (x[i]-x[i-1])/2
y_tmp = y_fun(x_tmp)
if cutoff is None or y_tmp >= cutoff:
max_xs.append(x_tmp)
max_ys.append(y_tmp)
return max_xs, max_ys
def find_peaks(x_values, y_values, min_x=20, max_x=2000, cutoff=None, resolution=0.1,):
if min_x is None:
min_x = x_values[0]
if max_x is None:
max_x = x_values[-1]
max_xs, max_ys = find_max_interp(x_values, y_values, cutoff=cutoff, min_x=min_x, max_x=max_x, resolution=resolution)
#max_xs, max_ys = find_max_smooth(x_values, y_values)
return max_xs, max_ys
class Spline:
def __init__(self, x, y, s=4800):
self.x = x
self.y = y
self.spl = scipy.interpolate.splrep(self.x, self.y, s=s)
self.der = scipy.interpolate.splder(self.spl, 1)
def __call__(self, x):
if x < self.x[0] or x > self.x[-1]:
return 0
return scipy.interpolate.splev(x, self.spl)
def derivate(self, x):
if x < self.x[0] or x > self.x[-1]:
return 0
return scipy.interpolate.splev(x, self.der)
class Particle:
def __init__(self, time, frequency, amplitude, smooth_amplitude=None):
self.time = time
self.frequency = frequency
self.amplitude = amplitude
self.smooth_amplitude = smooth_amplitude if smooth_amplitude is not None else amplitude
self.prev=None
self.next=None
def delta_freq(self, particle):
return np.abs(particle.frequency-self.frequency)
def __len__(self):
l = 0
step = self
while step is not None:
step = step.next
l += 1
return l
@property
def duration(self):
time, _, _ = self.line()
if time:
return time[-1]-time[0]
return 0
def line(self):
times = []
frequencies = []
amplitudes = []
step = self
while step is not None:
times.append(step.time)
frequencies.append(step.frequency)
amplitudes.append(step.smooth_amplitude)
step = step.next
return times, frequencies, amplitudes
def interpolate(self, times):
try:
amplitude = np.zeros_like(times)
line_times, line_freqs, line_amps = self.line()
#freq = scipy.interpolate.interp1d(line_times, line_freqs, kind='cubic', fill_value="extrapolate")
#amp = scipy.interpolate.interp1d(line_times, line_amps, kind='cubic', fill_value="extrapolate")
freq = Spline(line_times, line_freqs)
amp = Spline(line_times, line_amps)
cum_arg = 0
first = True
cum_t = 0
for i, t in enumerate(times):
delta_t = t-cum_t
if t > line_times[0] and t < line_times[-1]:
f = freq(t)
a = amp(t)
if first:
cum_arg = t*f
first = False
else:
cum_arg += f * delta_t
amplitude[i] = np.real(a*np.exp(cum_arg * 2 * np.pi * 1j))
cum_t += delta_t
return amplitude
except:
return np.zeros_like(times)
def plot(self, times):
try:
plot_times = []
amplitude = []
frequency = []
line_times, line_freqs, line_amps = self.line()
#freq = scipy.interpolate.interp1d(line_times, line_freqs, kind='cubic', fill_value="extrapolate")
#amp = scipy.interpolate.interp1d(line_times, line_amps, kind='cubic', fill_value="extrapolate")
freq = Spline(line_times, line_freqs)
amp = Spline(line_times, line_amps)
for i, t in enumerate(times):
if t > line_times[0] and t < line_times[-1]:
plot_times.append(t)
frequency.append(freq(t))
amplitude.append(amp(t))
colors = "bgrcmykw"
color = random.choice(colors)
plt.plot(plot_times, frequency, '-' + color)
plt.plot(line_times, line_freqs, 'o' + color)
except:
pass
def filter_particles(particles, duration=0.1, minlen=5):
all_particles = []
for particle in particles:
if particle.duration >= duration and len(particle) >= minlen:
all_particles.append(particle)
return all_particles
def find_closest_index(vec, value):
if len(vec) == 0:
return None
tmp = np.abs(vec - value)
return tmp.argmin()
def find_base_frequency(max_xs, max_ys, tolerance=0.01, guess=None):
# This is shady and unreliable.
base_freq = 1
if len(max_ys) == 0:
return base_freq
max_y = max(max_ys)
for i, _ in enumerate(max_xs):
if max_ys[i] >= tolerance*max(max_ys):
base_freq = max_xs[i]
break
return base_freq
def find_base_binning(max_xs, max_ys, resolution=5, low_base=75, high_base=200, min_x=20, max_x=2000):
points = list()
for ix, x in enumerate(max_xs):
if x >= min_x and x < max_x:
low_range = max(int(np.floor(x/high_base)), 1)
high_range = int(np.ceil(x/low_base))
for n in range(low_range, high_range):
points.append((x/n, max_ys[ix]/n, max_ys[ix], n))
points.sort(key=lambda x: x[0])
guesses = list()
low_ix = 0
high_ix = 0
max_ix = len(points)-1
f = low_base
while f < high_base:
while low_ix < max_ix and points[low_ix][0] < f-resolution/2:
low_ix += 1
while high_ix < max_ix and points[high_ix][0] < f+resolution/2:
high_ix += 1
window = points[low_ix:high_ix]
win_len = len(window)
if win_len > 0:
freq = sum(x[0] for x in window)/win_len
amplitude = sum(x[1] for x in window)
guesses.append((freq, amplitude))
f += resolution
guesses.sort(key=lambda x: x[1], reverse=True)
if len(guesses):
return guesses[0][0]
return find_base_frequency(max_xs, max_ys)
def harmonic_peaks(max_xs, max_ys, max_x=2000, base_guess=None, tolerance=0.1, frequency_window=40):
harm_xs = []
harm_ys = []
harm_ix = []
#base_freq = find_base_frequency(max_xs, max_ys, tolerance=tolerance, guess=None)
base_freq = find_base_binning(max_xs, max_ys)
low_ix = 0
high_ix = 0
max_ix = len(max_xs)-1
for n in range(1, int(max_x/base_freq)):
freq = n*base_freq
amplitude = 0
closest_index = find_closest_index(max_xs, freq)
if closest_index is not None:
amplitude = max_ys[closest_index]
while low_ix < max_ix and max_xs[low_ix] < freq-frequency_window/2:
low_ix += 1
while high_ix < max_ix and max_xs[high_ix] < freq+frequency_window/2:
high_ix += 1
window = max_ys[low_ix:high_ix]
win_len = len(window)
if win_len > 0:
amplitude = max(x for x in window)
harm_xs.append(freq)
harm_ys.append(amplitude)
harm_ix.append(n)
return harm_xs, harm_ys, harm_ix
def find_particles(f1, t1, Zxx, resolution=0.1, step_size=5, amplitude_limit=0.05, match_freq=50):
"""
amplitude_limit = 0.3 is really cool with log10.
opt_gaussian defaults to None but you can provide [t, f] to apply window size e.g [0, 100]
"""
particles = []
prev_particles = []
amplitude_criteria = amplitude_limit * np.max(Zxx)
guess = None
for i in range(0, len(t1), step_size):
print(i, len(t1))
time = t1[i]
new_particles = []
# should be in a window
max_z = np.max(Zxx)
max_fs, max_amps = find_peaks(f1, Zxx[:, i], cutoff=None, resolution=resolution)
max_fs, max_amps, harmonic_index = harmonic_peaks(max_fs, max_amps, base_guess=guess)
for ix, frequency in enumerate(max_fs):
amplitude = max_amps[ix]
if max_amps[ix] > amplitude_criteria:
new_particles.append(Particle(time=time, frequency=frequency, amplitude=amplitude))
if len(max_fs) > 0:
guess = max_fs[0]
# Todo: improve complexity in matching
new_particles.sort(key=lambda x: x.smooth_amplitude, reverse=True)
for part in new_particles:
tmp_prev = [p for p in prev_particles if p.next == None]
if tmp_prev:
min_prev = tmp_prev[0]
min_delta = tmp_prev[0].delta_freq(part)
for pix in range(1, len(tmp_prev)):
tmp_delta = tmp_prev[pix].delta_freq(part)
if tmp_prev[pix].delta_freq(part) < min_delta:
min_prev = tmp_prev[pix]
min_delta = tmp_delta
if min_delta > match_freq: #Hz
continue
min_prev.next = part
part.prev = min_prev
if min_prev.prev is None:
particles.append(min_prev)
prev_particles = new_particles
return particles
def find_harmonic_particles(f1, t1, Zxx, resolution=0.1, step_size=5, amplitude_limit=0.05, match_freq=50):
"""
amplitude_limit = 0.3 is really cool with log10.
opt_gaussian defaults to None but you can provide [t, f] to apply window size e.g [0, 100]
"""
particles = []
prev_particles = []
amplitude_criteria = amplitude_limit * np.max(Zxx)
guess = None
prev_particles = dict()
for i in range(0, len(t1), step_size):
print(i, len(t1))
time = t1[i]
new_particles = []
# should be in a window
max_z = np.max(Zxx)
max_fs, max_amps = find_peaks(f1, Zxx[:, i], cutoff=None, resolution=resolution)
#max_fs, max_amps, harmonic_index = harmonic_peaks(max_fs, max_amps, max_x=2000, base_guess=guess)
new_particles = dict()
for ix, frequency in enumerate(max_fs):
amplitude = max_amps[ix]
if amplitude < amplitude_limit*max_z:
continue
hix = harmonic_index[ix]
p = Particle(time, frequency, amplitude)
new_particles[hix] = p
prev_p = prev_particles.get(hix, None)
if prev_p != None:
prev_p.next = p
p.prev = prev_p
else:
particles.append(p)
prev_particles=new_particles
return particles
def build_signal(t, particles):
all_signals = np.zeros_like(t)
print(t[0], t[-1])
for ix, particle in enumerate(particles):
print(ix, len(particles))
all_signals += particle.interpolate(t)
all_signals = np.iinfo(np.int16).max * all_signals / np.max(np.abs(all_signals))
all_signals = all_signals.astype("int16")
return utils.amplify(all_signals)
def rebuild_signal(segments, amplitude_limit=0.01, step_size=1, resolution=0.01):
spectrogram(segments)
t, f1, t1, Zxx = transform(segments)
#particles = find_harmonic_particles(f1, t1, Zxx, amplitude_limit=amplitude_limit, step_size=step_size, resolution=resolution)
particles = find_particles(f1, t1, Zxx, amplitude_limit=amplitude_limit, step_size=step_size, resolution=resolution)
particles = filter_particles(particles, duration=0, minlen=4)
all_signals = build_signal(t, particles)
output = seg.to_segment(all_signals, segments.frame_rate, segments.sample_width, segments.channels)
file_handle = output.export("D:\\output.mp3", format="mp3")
print("output written")
spectrogram(output, opt_show=False)
for ix, particle in enumerate(particles):
print(ix, len(particles))
particle.plot(t)
plt.show()
def decode_image_from_stackexchange():
# Image from https://dsp.stackexchange.com/questions/47304/stft-computation
from PIL import Image
import PIL.ImageOps
Zxx = np.array(PIL.ImageOps.flip(PIL.ImageOps.invert(Image.open("D:\\so_sample.png").convert('L'))))
f1 = np.linspace(0, 4000, Zxx.shape[0])
t1 = np.linspace(0, 3, Zxx.shape[1])
t = np.linspace(0, 3, 3*16000)
print(Zxx.shape, f1.shape, t1.shape)
particles = find_particles(f1, t1, Zxx, amplitude_limit=0, step_size=1)
#particles = filter_particles(particles, 0.01)
all_signals = build_signal(t, particles)
output = seg.to_segment(all_signals, 16000, 2, 1)
file_handle = output.export("D:\\output.mp3", format="mp3")
print("output written")
#spectrogram(output, opt_show=False)
for ix, particle in enumerate(particles):
print(ix, len(particles))
particle.plot(t)
plt.show()
def generate_particle(t_start, t_end, freq_fun, amplitude):
t = t_start
frequency = freq_fun(t)
particle = Particle(t, frequency, amplitude)
current = particle
while t < t_end:
t += 0.1
freq = freq_fun(t)
next_part = Particle(t, freq_fun(t), amplitude)
current.next = next_part
next_part.prev = current
current = next_part
return particle
def test_particle():
t_start = 0
t_end = 3
sample_rate = 16000
particles = list()
base_freq = 124
amplitude = [3, 10, 18, 4, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
for n in range(1, 20):
particles.append(generate_particle(t_start, t_end, freq_fun=lambda x: (0.75 + 0.25*(x%1))*n*base_freq, amplitude=amplitude[n]))
t = np.linspace(t_start, t_end, int((t_end-t_start)*sample_rate))
all_signals = build_signal(t, particles)
segments = seg.to_segment(all_signals, sample_rate, 2, 1)
rebuild_signal(segments, amplitude_limit=0.05, step_size=5, resolution=1)
enable_profiling = False
if enable_profiling:
import cProfile, pstats, io
from pstats import SortKey
pr = cProfile.Profile()
pr.enable()
#DATA_FILES_GLOB="D:\\output*"
DATA_FILES_GLOB="D:\\Desktop\\soundboard\\Dahlgren\\jajjemen.flac"
#DATA_FILES_GLOB="D:\\Desktop\\soundboard\\Alekanderu\\*vad var det*.flac"
#DATA_FILES_GLOB="D:\\Desktop\\soundboard\\Misc\\*.flac"
#DATA_FILES_GLOB="E:\\monoton.flac"
for file in iglob(DATA_FILES_GLOB):
print(file)
segments = seg.read_file(file)
#draw_fft(segments)
#save(segments)
#spectrogram(segments)
#static_tones(segments)
#remove_phase(segments)
rebuild_signal(segments, amplitude_limit=0.01, step_size=1, resolution=1)
#test_particle()
#test_find_base_frequency(segments)
#decode_image_from_stackexchange()
#amplify_segments(segments)
if enable_profiling:
pr.disable()
s = io.StringIO()
sortby = SortKey.CUMULATIVE
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
print(s.getvalue())
|
{"hexsha": "c9b3742bac0dbfcda040f7311df14c69ce0bbbe6", "size": 22007, "ext": "py", "lang": "Python", "max_stars_repo_path": "voice.py", "max_stars_repo_name": "tednoob/RePhase", "max_stars_repo_head_hexsha": "3b9a30018682463bcdd6029be491cb1f7129c048", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "voice.py", "max_issues_repo_name": "tednoob/RePhase", "max_issues_repo_head_hexsha": "3b9a30018682463bcdd6029be491cb1f7129c048", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "voice.py", "max_forks_repo_name": "tednoob/RePhase", "max_forks_repo_head_hexsha": "3b9a30018682463bcdd6029be491cb1f7129c048", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.7530674847, "max_line_length": 136, "alphanum_fraction": 0.5990366702, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 5713}
|
subroutine setprob()
implicit none
double precision pi, pi2
common /compi/ pi,pi2
double precision uvel, vvel
common /comvelocity/ uvel, vvel
open(10,file='setprob.data')
read(10,*) uvel
read(10,*) vvel
close(10)
pi = 4.d0*datan(1.d0)
pi2 = 2.d0*pi
end
|
{"hexsha": "9ff6495a9c859650a73900576ee898ba21fa626b", "size": 337, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "applications/clawpack/advection/2d/periodic/setprob.f", "max_stars_repo_name": "ECLAIRWaveS/ForestClaw", "max_stars_repo_head_hexsha": "0a18a563b8c91c55fb51b56034fe5d3928db37dd", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 34, "max_stars_repo_stars_event_min_datetime": "2017-09-26T13:39:44.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-11T08:56:23.000Z", "max_issues_repo_path": "applications/clawpack/advection/2d/periodic/setprob.f", "max_issues_repo_name": "ECLAIRWaveS/ForestClaw", "max_issues_repo_head_hexsha": "0a18a563b8c91c55fb51b56034fe5d3928db37dd", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 75, "max_issues_repo_issues_event_min_datetime": "2017-08-02T19:56:00.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T12:36:32.000Z", "max_forks_repo_path": "applications/clawpack/advection/2d/periodic/setprob.f", "max_forks_repo_name": "ECLAIRWaveS/ForestClaw", "max_forks_repo_head_hexsha": "0a18a563b8c91c55fb51b56034fe5d3928db37dd", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 23, "max_forks_repo_forks_event_min_datetime": "2018-02-21T00:10:58.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-14T19:08:36.000Z", "avg_line_length": 16.85, "max_line_length": 37, "alphanum_fraction": 0.5489614243, "num_tokens": 110}
|
# -*- coding:utf-8 -*-
import tensorflow as tf
import numpy as np
import os
import sys
import pickle
import datetime
import matplotlib.pyplot as plt
from readthyroid import *
# 874 1840
img_channels = 1
iterations = 40
batch_size = 46
total_epoch = 150
test_iterations = 59
test_size = 46
weight_decay = 0.0003
dropout_rate = 0.8
momentum_rate = 0.9
# log_save_path = './log'
# model_save_path = './model_z/Attention_VGG/'
# exp_name = 'Attention_VGG'
num = 100
alpha = 1.5
beta = 1.0
def TP_def(c, y):
c = np.array(c)
y = np.array(y)
# print(c.shape, y.shape)
TP, FP, FN, TN = 0,0,0,0
for i in range(len(y)):
if c[i]:
if y[i,0]:
TP += 1
else:
TN += 1
else:
if y[i,0]:
FN += 1
else:
FP += 1
return TP, FP, FN, TN
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape, dtype=tf.float32)
return tf.Variable(initial)
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.01)
return tf.Variable(initial)
# def conv2d(x, W):
# return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
# def conv2d(input, in_features, out_features, kernel_size, strides=1, with_bias=True):
# W = weight_variable([ kernel_size, kernel_size, in_features, out_features ])
# conv = tf.nn.conv2d(input, W, strides=[1, strides, strides, 1], padding='SAME')
# if with_bias:
# return conv + bias_variable([ out_features ])
# return conv
def conv2d(input, in_features, out_features, kernel_size, strides=1, with_bias=True):
W = weight_variable([ kernel_size, kernel_size, in_features, out_features ])
conv = tf.nn.relu(batch_norm(tf.nn.conv2d(input, W, strides=[1, strides, strides, 1], padding='SAME')))
if with_bias:
return conv + bias_variable([ out_features ])
return conv
def max_pool(input, k_size=1, stride=1, name=None):
return tf.nn.max_pool(input, ksize=[1, k_size, k_size, 1], strides=[1, stride, stride, 1],
padding='SAME', name=name)
def avg_pool(input, k_size=1, stride=1, padding = 'SAME', name=None):
return tf.nn.avg_pool(input, ksize=[1, k_size, k_size, 1], strides=[1, stride, stride, 1],
padding=padding, name=name)
def batch_norm(input):
return tf.contrib.layers.batch_norm(input, decay=0.9, center=True, scale=True, epsilon=1e-3,
is_training=train_flag, updates_collections=None)
def index_max(con):
# con = input
print(con.shape[0])
max = np.zeros(23)
for i in range(23):
max[i] = tf.reduce_max(con[i])
return max
def pre_res_block(input, in_channel, channel_1, channel_2, channel_3, kernel_size_1=1, kernel_size_2=3, kernel_size_3=1, small=False):
x = tf.nn.relu(batch_norm(input))
x = tf.nn.dropout(x, keep_prob)
conv1_1 = conv2d(x, in_channel, channel_1, kernel_size_1)
conv1_1bn = tf.nn.relu(batch_norm(conv1_1))
conv1_1bn = tf.nn.dropout(conv1_1bn, keep_prob)
if small:
conv1_2 = conv2d(conv1_1bn, channel_1, channel_2, kernel_size_2, 2)
conv1_2bn = tf.nn.relu(batch_norm(conv1_2))
conv1_2bn = tf.nn.dropout(conv1_2bn, keep_prob)
conv1_3 = conv2d(conv1_2bn, channel_2, channel_3, kernel_size_3)
conv2_1 = conv2d(x, in_channel, channel_3, kernel_size_3, 2)
else:
conv1_2 = conv2d(conv1_1bn, channel_1, channel_2, kernel_size_2)
conv1_2bn = tf.nn.relu(batch_norm(conv1_2))
conv1_2bn = tf.nn.dropout(conv1_2bn, keep_prob)
conv1_3 = conv2d(conv1_2bn, channel_2, channel_3, kernel_size_3)
conv2_1 = conv2d(x, in_channel, channel_3, kernel_size_3)
res_output = tf.add(conv2_1, conv1_3)
return res_output
def res_block(input, in_channel, channel_1, channel_2, channel_3, kernel_size_1=1, kernel_size_2=3, kernel_size_3=1, name=None):
x = tf.nn.relu(batch_norm(input))
x = tf.nn.dropout(x, keep_prob)
conv1_1 = conv2d(x, in_channel, channel_1, kernel_size_1)
conv1_1bn = tf.nn.relu(batch_norm(conv1_1))
conv1_1bn = tf.nn.dropout(conv1_1bn, keep_prob)
conv1_2 = conv2d(conv1_1bn, channel_1, channel_2, kernel_size_2)
conv1_2bn = tf.nn.relu(conv1_2)
conv1_2bn = tf.nn.dropout(conv1_2bn, keep_prob)
conv1_3 = conv2d(conv1_2bn, channel_2, channel_3, kernel_size_3)
res_output = tf.add(input, conv1_3, name)
return res_output
def learning_rate_schedule(epoch_num):
if epoch_num < 40:
return 0.1
elif epoch_num < 60:
return 0.01
elif epoch_num < 90:
return 0.001
elif epoch_num == 100:
return 0.005
elif epoch_num < 120:
return 0.0001
else:
return 0.00001
def feature_cam(input, p=200.0, color_map=cv2.COLORMAP_JET):
output = np.zeros((input.shape[0], input.shape[1], input.shape[2], 3))
for j in range(input.shape[0]):
sum = np.zeros((1, input.shape[1], input.shape[2], 1))
for i in range(input.shape[3]):
sum += input[j:j + 1, :, :, i:i + 1]
img = sum - np.min(sum)
img /= np.max(img)
cam = np.uint8(p * img)
# temp = np.reshape(cam, [input.shape[1], input.shape[2], 1])
temp = cv2.applyColorMap(cam[0], color_map)
# print('%d: temp = '%j, temp[10,10,2])
output[j] += temp
# print('output = ', output[j,10,10,2])
return output
def run_testing(sess):
# id = 1
acc = 0.0
loss = 0.0
pre_index = 0
ep_y_, correct_p, out = [], [], []
for it in range(test_iterations):
batch_x = test_x[pre_index:pre_index + test_size]
batch_y = test_y[pre_index:pre_index + test_size]
pre_index = pre_index + test_size
loss_, acc_, ep_output_aa, out_f, ep_y_1, correct_p1 = sess.run([cross_entropy, accuracy, output_aa, output_f, y_,
correct_prediction],
feed_dict={x: batch_x, y_: batch_y, keep_prob: 1.0,
train_flag: False})
print(out_f.shape)
if it == 0:
out = ep_output_aa
for i in range(10):
for j in range(512):
temp = (out_f[i, :, :, j]-np.min(out_f[i, :, :, j]))/(np.max(out_f[i, :, :, j])-np.min(out_f[i, :, :, j]))
temp = cv2.resize(temp, (128, 128), interpolation=cv2.INTER_CUBIC)
temp = np.uint8(temp*255)
# print(i, np.max(temp), np.min(temp))
cv2.imwrite('./temp/whale/%d/%d.png'%(i, j), 0.7 * cv2.applyColorMap(temp, cv2.COLORMAP_JET) + 0.3 * test_x[i] * 255)
else:
out = np.concatenate((out, ep_output_aa), axis=0)
print(it, out.shape)
# loss_, acc_, ep_output, ep_y_1, correct_p1, \
# output_1_, output_2_, output_3_, output_4_ = sess.run([cross_entropy, accuracy, output, y_,
# correct_prediction, output_1, output_2, output_3, output_4],
# feed_dict={x: batch_x, y_: batch_y, keep_prob: 1.0,
# train_flag: False})
# loss_, acc_, ep_output, ep_y_1, correct_p1, \
# a1_t, a1_m, a1, a2_t, a2_m, a2, a3_t, a3_m, a3, out_ = sess.run([cross_entropy, accuracy, output, y_,
# correct_prediction, a1_trunk, a1_mask, att_1,
# a2_trunk, a2_mask, att_2, a3_trunk, a3_mask,
# att_3, out],
# feed_dict={x: batch_x, y_: batch_y,
# keep_prob: 1.0,
# train_flag: False})
loss += loss_ / float(test_iterations)
acc += acc_ / float(test_iterations)
ep_y_.extend(ep_y_1)
correct_p.extend(correct_p1)
# cam_img_a1_t = feature_cam(a1_t, 200.0)
# cam_img_a1_m = feature_cam(a1_m, 200.0, cv2.COLORMAP_JET)
# cam_img_a1 = feature_cam(a1, 200.0)
#
# cam_img_a2_t = feature_cam(a2_t, 200.0)
# cam_img_a2_m = feature_cam(a2_m, 200.0, cv2.COLORMAP_JET)
# cam_img_a2 = feature_cam(a2, 200.0)
#
# cam_img_a3_t = feature_cam(a3_t, 200.0)
# cam_img_a3_m = feature_cam(a3_m, 200.0, cv2.COLORMAP_JET)
# cam_img_a3 = feature_cam(a3, 200.0)
#
# cam_o = feature_cam(out_, 200.0)
# half = a3_t * a3_m + a3_t
# print("a3_t.shape, a3_m.shape, half.shape", a3_t.shape, a3_m.shape, half.shape)
# half = np.reshape(half, [14, 14, 1])
# half_ = np.uint8(200 * half)
# temp = cv2.applyColorMap(half_, cv2.COLORMAP_JET)
# sub = a3 - a3_t
# print('mean::::', np.mean(a3_t), np.mean(a3), np.mean(a3_m))
# print("np.max(sub), np.mean(sub)", np.max(sub), np.mean(sub), np.sum(sub))
# ep_y_ = np.array(ep_y_).reshape(-1, 2)
# correct_p = np.array(correct_p).reshape(-1))
summary = tf.Summary(value=[tf.Summary.Value(tag="test_loss", simple_value=loss),
tf.Summary.Value(tag="test_accuracy", simple_value=acc)])
# return acc, loss, summary, ep_y_, correct_p, cam_img_a1_t, cam_img_a1_m, cam_img_a1,\
# cam_img_a2_t, cam_img_a2_m, cam_img_a2, cam_img_a3_t, cam_img_a3_m, cam_img_a3, cam_o
# return acc, loss, summary, ep_y_, correct_p, cam_o1, cam_o2, cam_o3, cam_o4
return acc, loss, summary, ep_y_, correct_p, out
def cam_write(corg, img, po, pc, name):
cam_img = cv2.resize(img, (112, 112))
res = corg * po + cam_img * pc
cv2.imwrite('/home/zrx/zrx/pyworkplace/output/Attention/' + name, res)
if __name__ == '__main__':
org_x = np.load('img_100.npy')
org_y = np.load('lab_100.npy')
org_x = org_x[:, :, :, np.newaxis]
org_x = segdata_preprocessing(org_x)
# org_x, org_y = prepare_compdata()
# org_x, org_y = prepare_compdata()
acc_te, loss_te = [], []
acc = 0.0
graph = tf.Graph()
sess = tf.Session(graph = graph)
with graph.as_default():
# define placeholder x, y_ , keep_prob, learning_rate
x = tf.placeholder(tf.float32, [None, image_size, image_size, 1], name='x')
y_ = tf.placeholder(tf.float32, [None, class_num], name='y_')
keep_prob = tf.placeholder(tf.float32, name='keep_p')
learning_rate = tf.placeholder(tf.float32, name='learning_r')
train_flag = tf.placeholder(tf.bool, name='train_f')
# build_network
# 112*112
output = conv2d(x, 1, 64, 3)
output = conv2d(output, 64, 64, 3)
output = max_pool(output, 2, 2, "pool1")
# 56*56
output = conv2d(output, 64, 128, 3)
output = conv2d(output, 128, 128, 3)
output = max_pool(output, 2, 2, "pool2")
# 28*28
output = conv2d(output, 128, 256, 3)
output = conv2d(output, 256, 256, 3)
output = conv2d(output, 256, 256, 3)
output = conv2d(output, 256, 256, 3)
output = max_pool(output, 2, 2, "pool3")
# 14*14
output = conv2d(output, 256, 512, 3)
output = conv2d(output, 512, 512, 3)
output = conv2d(output, 512, 512, 3)
output = conv2d(output, 512, 512, 3)
output = max_pool(output, 2, 2, 'pool4')
# 7*7
output = conv2d(output, 512, 512, 3)
output = conv2d(output, 512, 512, 3)
output = conv2d(output, 512, 512, 3)
output_f = conv2d(output, 512, 512, 3)
# output = max_pool(output, 2, 2, 'pool5')
# output = tf.contrib.layers.flatten(output)
output = tf.reshape(output_f, [-1, 8*8 * 512])
print("=====》1", y_.shape, output.shape)
W_fc1 = tf.get_variable('fc1', shape=[8*8 * 512, 4096], initializer=tf.contrib.keras.initializers.he_normal())
b_fc1 = bias_variable([4096])
output = tf.nn.relu(batch_norm(tf.matmul(output, W_fc1) + b_fc1))
output = tf.nn.dropout(output, keep_prob)
W_fc2 = tf.get_variable('fc7', shape=[4096, 4096], initializer=tf.contrib.keras.initializers.he_normal())
b_fc2 = bias_variable([4096])
output_aa = tf.nn.relu(batch_norm(tf.matmul(output, W_fc2) + b_fc2))
output = tf.nn.dropout(output_aa, keep_prob)
W_fc3 = tf.get_variable('fc3', shape=[4096, class_num], initializer=tf.contrib.keras.initializers.he_normal())
b_fc3 = bias_variable([class_num])
output = tf.nn.relu(batch_norm(tf.matmul(output, W_fc3) + b_fc3))
# output = tf.reshape(output,[-1,10])
# loss function: cross_entropy
# train_step: training operation
print("=====》2", y_.shape, output.shape)
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=output))
l2 = tf.add_n([tf.nn.l2_loss(var) for var in tf.trainable_variables()])
train_step = tf.train.MomentumOptimizer(learning_rate, momentum_rate, use_nesterov=True). \
minimize(cross_entropy + l2 * weight_decay)
correct_prediction = tf.equal(tf.argmax(output, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
with tf.Session(graph=graph) as sess:
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
saver.restore(sess, tf.train.latest_checkpoint('./model_z/g_VGG_10id/'))
print('shape:', org_x.shape, org_y.shape)
# test_x = segdata_preprocessing()
# test_x = test_normalization(org_x)
test_x = org_x
test_y = org_y
print('test_x.shape, test_y.shape:', test_x.shape, test_y.shape)
ep_output, ep_y_, correct_p = [], [], []
start_time = time.time()
# val_acc, val_loss, test_summary, ep_y_, correct_p, cam_o1, cam_o2, cam_o3, cam_o4 = run_testing(sess)
# val_acc, val_loss, test_summary, ep_y_, correct_p, max1_1_, max1_2_, max1_3_, max2_1_, max_res_ = run_testing(sess)
# val_acc, val_loss, test_summary, ep_y_, correct_p, cam_img_a1_t, cam_img_a1_m, cam_img_a1, \
# cam_img_a2_t, cam_img_a2_m, cam_img_a2, cam_img_a3_t, cam_img_a3_m, cam_img_a3, cam_out = run_testing(sess)
val_acc, val_loss, test_summary, ep_y_, correct_p, out = run_testing(sess)
print('out shape:', out.shape)
np.save('features4096.npy', out)
print('--org_x.shape', org_x.shape, org_y.shape)
# print('--cam_img_a1_t.shape', cam_img_a3_t.shape)
# for i in range(55):
# if correct_p[i] == True:
# flag = 'T'
# else:
# flag = 'F'
#
# org = np.reshape(org_x[i], [112, 112, 3])
# org = np.uint8(org)
# cv2.imwrite('/home/zrx/zrx/pyworkplace/output/Attention/%d_%s.png' % (i, flag), org)
#
# cam_write(org, cam_img_a1_t[i], 0.6, 0.4, '%d_a1_t.png' % i)
# cam_write(org, cam_img_a1_m[i], 0.6, 0.4, '%d_a1_m.png' % i)
# cam_write(org, cam_img_a1[i], 0.6, 0.4, '%d_a1.png' % i)
#
# cam_write(org, cam_img_a2_t[i], 0.6, 0.4, '%d_a2_t.png' % i)
# cam_write(org, cam_img_a2_m[i], 0.6, 0.4, '%d_a2_m.png' % i)
# cam_write(org, cam_img_a2[i], 0.6, 0.4, '%d_a2.png' % i)
#
# cam_write(org, cam_img_a3_t[i], 0.6, 0.4, '%d_a3_t.png' % i)
# cam_write(org, cam_img_a3_m[i], 0.6, 0.4, '%d_a3_m.png' % i)
# cam_write(org, cam_img_a3[i], 0.6, 0.4, '%d_a3.png' % i)
# cam_write(org, cam_out[i], 0.6, 0.4, '%d_out.png' % i)
# print('........%d........'%i)
# print(np.mean(cam_img_a3_t[i]), np.max(cam_img_a3_t[i]), np.min(cam_img_a3_t[i]))
# print(np.mean(cam_img_a3[i]), np.max(cam_img_a3[i]), np.min(cam_img_a3[i]))
# sub = cam_img_a3_t[i] - cam_img_a3[i]
# print(type(sub))
# print(sub.shape)
# print(np.mean(sub), np.max(sub), np.min(sub))
# sub = cv2.resize(sub, (112, 112))
# cv2.imwrite('/home/zrx/zrx/pyworkplace/output/Attention/%d_sub.png'%i, sub)
# acc += val_acc
#
# acc /= float(55)
# print("cost_time: %ds, test_acc: %.4f" % (int(time.time() - start_time), acc))
TP, FP, FN, TN = TP_def(correct_p, ep_y_)
print('TP, FP, FN, TN', TP, FP, FN, TN)
FPR = FP / (FP + TN)
FNR = FN / (TP + FN)
print('FPR, FNR', FPR, FNR)
print('acc:', val_acc)
# print(':max1_1_:', max1_1_)
# print('max1_2_:', max1_2_)
# print('max1_3_:', max1_3_)
# print('max_2_1_:', max2_1_)
# print('max_res_:', max_res_)
|
{"hexsha": "9850ad17028fa7edc4c6430bd8006797f2c6f8ae", "size": 17190, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/g_load_vgg.py", "max_stars_repo_name": "Stomach-ache/semi-MList", "max_stars_repo_head_hexsha": "ce694a4eb831c3e7d1d727678b8b46d71efc628e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "code/g_load_vgg.py", "max_issues_repo_name": "Stomach-ache/semi-MList", "max_issues_repo_head_hexsha": "ce694a4eb831c3e7d1d727678b8b46d71efc628e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/g_load_vgg.py", "max_forks_repo_name": "Stomach-ache/semi-MList", "max_forks_repo_head_hexsha": "ce694a4eb831c3e7d1d727678b8b46d71efc628e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.6550868486, "max_line_length": 137, "alphanum_fraction": 0.5741128563, "include": true, "reason": "import numpy", "num_tokens": 5165}
|
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from matplotlib.ticker import PercentFormatter
data = pd.read_csv('C:\\Users\\stewue\\OneDrive - Wuersten\\Uni\\19_HS\\Masterarbeit\\Repo\\Evaluation\\RQ1_Results\\aggregated\\numberofbenchmarks.csv',dtype='str')
numberOf = data['benchmarks'].astype(int)
filter = numberOf[numberOf > 0]
all, base = np.histogram(filter, bins=[1, 5, 10, 25, 50, 75, 100, 200, 600])
label = ('1-4', '5-9', '10-24', '25-49', '50-74', '75-99', '100-199', '200+')
x = np.arange(8)
fig = plt.figure()
ymax = 275
total = 753
# absolute
ax1 = fig.add_subplot()
ax1.bar(x, all)
ax1.set_ylim(0, ymax)
ax1.set_ylabel('# projects')
# relative
ax2 = ax1.twinx()
plt.gca().yaxis.set_major_formatter(PercentFormatter(1, 0))
ax2.bar(x, np.divide(all, total))
ax2.set_ylim(0, ymax / total)
ax2.set_ylabel('# projects [%]')
ax1.set_xlabel('# benchmarks')
plt.xticks(x, label)
plt.tight_layout()
#plt.show()
plt.savefig('C:\\Users\\stewue\\OneDrive - Wuersten\\Uni\\19_HS\\Masterarbeit\\Repo\\Evaluation\\RQ1_Results\\images\\number_of_benchmarks_per_project.pdf')
print("average: " + str(np.average(filter)))
print("std: " + str(np.std(filter)))
print("median: " + str(np.median(filter)))
print("total: " + str(total))
print("max: " + str(np.max(filter)))
s10 = filter[filter < 10]
print("<10: " + str(s10.size / total))
print("<10: " + str(s10.size))
l50 = filter[filter >= 50]
print(">=50: " + str(l50.size / total))
print(">=50: " + str(l50.size))
|
{"hexsha": "5530349f6bf0569e4941c3b5590ca9908cafd079", "size": 1497, "ext": "py", "lang": "Python", "max_stars_repo_path": "RQ1_Python/number_of_benchmarks.py", "max_stars_repo_name": "stewue/masterthesis-evaluation", "max_stars_repo_head_hexsha": "0fb825e196f386c628f95524aa9c80af2126617e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "RQ1_Python/number_of_benchmarks.py", "max_issues_repo_name": "stewue/masterthesis-evaluation", "max_issues_repo_head_hexsha": "0fb825e196f386c628f95524aa9c80af2126617e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "RQ1_Python/number_of_benchmarks.py", "max_forks_repo_name": "stewue/masterthesis-evaluation", "max_forks_repo_head_hexsha": "0fb825e196f386c628f95524aa9c80af2126617e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.3529411765, "max_line_length": 165, "alphanum_fraction": 0.6800267201, "include": true, "reason": "import numpy", "num_tokens": 477}
|
> module EscardoOliva.TestSelectionFunction
> import EscardoOliva.SelectionFunction
> %default total
> %access public export
> %auto_implicits off
> xs : List Int
> xs = [0,3,2,-1,0,9,-7]
> min : Int
> min = arginf xs id
> max : Int
> max = argsup xs id
|
{"hexsha": "b6cfc59d6ab92ebc98770bd7bc5ebd04b6f8b4d8", "size": 260, "ext": "lidr", "lang": "Idris", "max_stars_repo_path": "EscardoOliva/TestSelectionFunction.lidr", "max_stars_repo_name": "zenntenn/IdrisLibs", "max_stars_repo_head_hexsha": "a81c3674273a4658cd205e9bd1b6f95163cefc3e", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "EscardoOliva/TestSelectionFunction.lidr", "max_issues_repo_name": "zenntenn/IdrisLibs", "max_issues_repo_head_hexsha": "a81c3674273a4658cd205e9bd1b6f95163cefc3e", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "EscardoOliva/TestSelectionFunction.lidr", "max_forks_repo_name": "zenntenn/IdrisLibs", "max_forks_repo_head_hexsha": "a81c3674273a4658cd205e9bd1b6f95163cefc3e", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 14.4444444444, "max_line_length": 43, "alphanum_fraction": 0.6807692308, "num_tokens": 88}
|
! -*- Mode: Fortran; -*-
!
! (C) 2014 by Argonne National Laboratory.
! See COPYRIGHT in top-level directory.
!
subroutine MPI_Comm_spawn_multiple_f08(count, array_of_commands, array_of_argv, array_of_maxprocs, &
array_of_info, root, comm, intercomm, array_of_errcodes, ierror)
use, intrinsic :: iso_c_binding, only : c_int, c_char, c_ptr, c_loc, c_associated
use :: mpi_f08, only : MPI_Info, MPI_Comm
use :: mpi_f08, only : MPI_ARGVS_NULL, MPI_ERRCODES_IGNORE
use :: mpi_f08, only : MPIR_C_MPI_ARGVS_NULL, MPIR_C_MPI_ERRCODES_IGNORE
use :: mpi_c_interface, only : c_Info, c_Comm
use :: mpi_c_interface, only : MPIR_Comm_spawn_multiple_c
implicit none
integer, intent(in) :: count
character(len=*), intent(in), target :: array_of_commands(*)
character(len=*), intent(in), target :: array_of_argv(count, *)
integer, intent(in) :: array_of_maxprocs(*)
type(MPI_Info), intent(in) :: array_of_info(*)
integer, intent(in) :: root
type(MPI_Comm), intent(in) :: comm
type(MPI_Comm), intent(out) :: intercomm
integer, target :: array_of_errcodes(*)
integer, optional, intent(out) :: ierror
integer(c_int) :: count_c
integer(c_int) :: array_of_maxprocs_c(count)
integer(c_Info) :: array_of_info_c(count)
integer(c_int) :: root_c
integer(c_Comm) :: comm_c
integer(c_Comm) :: intercomm_c
integer(c_int), target :: array_of_errcodes_c(sum(array_of_maxprocs(1:count)))
integer(c_int) :: ierror_c
integer :: length
type(c_ptr) :: array_of_argv_cptr
type(c_ptr) :: array_of_errcodes_cptr
logical :: has_errcodes_ignore = .false.
array_of_argv_cptr = c_loc(array_of_argv)
if (c_associated(array_of_argv_cptr, c_loc(MPI_ARGVS_NULL))) then
array_of_argv_cptr = MPIR_C_MPI_ARGVS_NULL
end if
array_of_errcodes_cptr = c_loc(array_of_errcodes)
if (c_associated(array_of_errcodes_cptr, c_loc(MPI_ERRCODES_IGNORE))) then
array_of_errcodes_cptr = MPIR_C_MPI_ERRCODES_IGNORE
has_errcodes_ignore = .true.
end if
if (c_int == kind(0)) then
ierror_c = MPIR_Comm_spawn_multiple_c(count, c_loc(array_of_commands), array_of_argv_cptr, &
array_of_maxprocs, array_of_info(1:count)%MPI_VAL, root, comm%MPI_VAL, intercomm%MPI_VAL, &
array_of_errcodes_cptr, len(array_of_commands), len(array_of_argv))
else
count_c = count
array_of_maxprocs_c(1:count) = array_of_maxprocs(1:count)
array_of_info_c = array_of_info(1:count)%MPI_VAL
root_c = root
comm_c = comm%MPI_VAL
if (.not. has_errcodes_ignore) then
array_of_errcodes_cptr = c_loc(array_of_errcodes_c)
end if
ierror_c = MPIR_Comm_spawn_multiple_c(count_c, c_loc(array_of_commands), array_of_argv_cptr, &
array_of_maxprocs_c, array_of_info_c, root_c, comm_c, intercomm_c, array_of_errcodes_cptr, &
len(array_of_commands), len(array_of_argv))
if (.not. has_errcodes_ignore) then
length = sum(array_of_maxprocs(1:count))
array_of_errcodes(1:length) = array_of_errcodes_c
end if
intercomm%MPI_VAL = intercomm_c
end if
if (present(ierror)) ierror = ierror_c
end subroutine MPI_Comm_spawn_multiple_f08
|
{"hexsha": "ff6d80c2109a76cd86a0a971a39058cb4b53180d", "size": 3295, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/binding/fortran/use_mpi_f08/wrappers_f/comm_spawn_multiple_f08ts.f90", "max_stars_repo_name": "humairakamal/FG-MPI", "max_stars_repo_head_hexsha": "a0181ecde8a97e60ab6721a5e9a74dc7e7f77e77", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2015-12-31T03:15:50.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-15T00:54:47.000Z", "max_issues_repo_path": "mpich-3.3/src/binding/fortran/use_mpi_f08/wrappers_f/comm_spawn_multiple_f08ts.f90", "max_issues_repo_name": "ucd-plse/mpi-error-prop", "max_issues_repo_head_hexsha": "4367df88bcdc4d82c9a65b181d0e639d04962503", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2015-12-30T22:28:15.000Z", "max_issues_repo_issues_event_max_datetime": "2017-05-16T19:17:42.000Z", "max_forks_repo_path": "mpich-3.3/src/binding/fortran/use_mpi_f08/wrappers_f/comm_spawn_multiple_f08ts.f90", "max_forks_repo_name": "ucd-plse/mpi-error-prop", "max_forks_repo_head_hexsha": "4367df88bcdc4d82c9a65b181d0e639d04962503", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2015-12-29T22:14:56.000Z", "max_forks_repo_forks_event_max_datetime": "2019-06-13T07:23:35.000Z", "avg_line_length": 40.1829268293, "max_line_length": 105, "alphanum_fraction": 0.7013657056, "num_tokens": 911}
|
module CommonUtils
using Caesar
using Images
using FileIO
using Cairo
using RoMEPlotting
export plotSLAM2D_KeyAndSim, plotHMSLevel
@deprecate buildDEMSimulated(w...;kw...) RoME.generateField_CanyonDEM(w...;kw...)
@deprecate getSampleDEM(w...;kw...) RoME.generateField_CanyonDEM(w...;kw...)
@deprecate loadDEM!(w...;kw...) RoME._buildGraphScalarField(w...;kw...)
@deprecate plotSLAM2D_KeyAndSim(w...;kw...) RoMEPlotting.plotSLAM2D_KeyAndRef(w...;kw...)
## plot some of the ROIs
function plotHMSLevel(fg::AbstractDFG,
lbl::Symbol;
coord=Coord.cartesian(xmin=-9000, xmax=9000, ymin=-9000,ymax=9000) )
#
loc = getPPE(fg, lbl, :simulated).suggested
plp = plot( x=[loc[1]], y=[loc[2];],
Geom.point,
Guide.title(string(lbl)),
Theme(default_color=colorant"purple") )
#
fct = intersect(ls(fg, lbl), lsf(fg, tags=[:DEM;]))[1]
plk = getFactorType(fg[fct]).Z.densityFnc |> plotKDE;
#
union!(plp.layers, plk.layers);
plp.coord = coord
plp
end
end
|
{"hexsha": "b64d19a5394b0c37409182f27599009d87a13a4e", "size": 1060, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "examples/dev/scalar/CommonUtils.jl", "max_stars_repo_name": "nkhedekar/Caesar.jl", "max_stars_repo_head_hexsha": "647ab1a9a068e9eb9ff2de36e12e86b7b77878bb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 122, "max_stars_repo_stars_event_min_datetime": "2018-07-02T19:05:36.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-09T14:18:07.000Z", "max_issues_repo_path": "examples/dev/scalar/CommonUtils.jl", "max_issues_repo_name": "nkhedekar/Caesar.jl", "max_issues_repo_head_hexsha": "647ab1a9a068e9eb9ff2de36e12e86b7b77878bb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 245, "max_issues_repo_issues_event_min_datetime": "2018-06-01T15:04:50.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T05:53:32.000Z", "max_forks_repo_path": "examples/dev/scalar/CommonUtils.jl", "max_forks_repo_name": "nkhedekar/Caesar.jl", "max_forks_repo_head_hexsha": "647ab1a9a068e9eb9ff2de36e12e86b7b77878bb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 15, "max_forks_repo_forks_event_min_datetime": "2018-06-01T13:22:16.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-09T12:00:59.000Z", "avg_line_length": 25.2380952381, "max_line_length": 91, "alphanum_fraction": 0.6396226415, "num_tokens": 345}
|
'''
Navigation Network, Written by Xiao
For robot localization in a dynamic environment.
'''
import numpy as np
from lib.params import ADJACENT_NODES_SHIFT_GRID
ACTION_ENCODING = dict(left=np.array([1,0,0]), right=np.array([0,1,0]), forward=np.array([0,0,1]))
ACTION_CLASSNUM = len(ACTION_ENCODING) # dimension of action space [left, right, forward]
HORIZONTAL_MOVE_MAX = ADJACENT_NODES_SHIFT_GRID # maximum number of grid steps in horizontal movement
FORWARD_MOVE_MAX = 4 # maximum number of grid steps in forward movement
NUM_WORKERS = 0 # dataloader workers set to zero cuz triples shared files
# ------------------------------------------------------------------------------
'''
NUM_EPOCHS, STEP_SIZE, GAMMA, lr0 satisfies
As lr_final_epoch = lr0*power(GAMMA, ceil(NUM_EPOCHS / STEP_SIZE)-1)
if we want lr_final_epoch <= 1e-4
this constraint will regulate the corresponding value of other params
'''
# Traning parameters/setting
BATCH_SIZE = dict(rnet=14, resnet50=26, vgg16=19, resnext50_32x4d=20, googlenet=400) # common settings for networks {image=26, SG=14}
NUM_EPOCHS = 60 # common settings for networks {image=50, SG=1000}
# --------------------------------------------
# Training hyper-parameter
LEARNING_RATE = 0.01 # common settings for networks {image=0.01, SG=0.01}
MOMENTUM = 0.9 # common settings for networks {image=0.9}
# --------------------------------------------
# Decay LR by a factor of GAMMA (LR*=GAMMA) every STEP_SIZE epochs
STEP_SIZE = 7 # common settings for networks {image=10, SG=10}
GAMMA = 0.5 # common settings for networks {image=0.1, SG=0.01}
# ------------------------------------------------------------------------------
TRAIN_FRACTION = 0.7
VAL_FRACTION = 0.15
TEST_FRACTION = 0.15
# ------------------------------------------------------------------------------
DATA_DIR = './Network/datasets' # Training and validation data directory
TRAJECTORY_FILE_NAME = 'trajectories_fraction_0.001.npy' # 19043 pairs
CHECKPOINTS_DIR = './Network/navigation_network/checkpoints/'
# ------------------------------------------------------------------------------
|
{"hexsha": "e31b2422fabc05a7b29a0b38710d197ec531787b", "size": 2094, "ext": "py", "lang": "Python", "max_stars_repo_path": "Network/navigation_network/params.py", "max_stars_repo_name": "XiaoLiSean/Cognitive-Map", "max_stars_repo_head_hexsha": "6b2019e5b3a46902b06c8d5d1e86b39425042de9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Network/navigation_network/params.py", "max_issues_repo_name": "XiaoLiSean/Cognitive-Map", "max_issues_repo_head_hexsha": "6b2019e5b3a46902b06c8d5d1e86b39425042de9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Network/navigation_network/params.py", "max_forks_repo_name": "XiaoLiSean/Cognitive-Map", "max_forks_repo_head_hexsha": "6b2019e5b3a46902b06c8d5d1e86b39425042de9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-11-04T06:25:31.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-04T06:25:31.000Z", "avg_line_length": 52.35, "max_line_length": 133, "alphanum_fraction": 0.61747851, "include": true, "reason": "import numpy", "num_tokens": 509}
|
# Code made for Sergio Andrés Díaz Ariza
# 05 Abril 2021
# License MIT
# Introduction to Control: Python Program Assignment 1
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
import control as co
import sympy as sp
import seaborn as sns
sns.set()
# Define Transafer Function
G1 = co.tf([1],[1,2])
G2 = co.tf([1],[1,10])
# Convert Transfer function to Steady State
G1_ss = co.tf2ss(G1)
G2_ss = co.tf2ss(G2)
# Looking time Responses
# Impulse Response
t = np.linspace(0,4,1000)
t1,y1 = co.impulse_response(G1,t)
plt.figure(1)
plt.plot(t1,y1)
plt.xlabel("time $[s]$")
plt.ylabel("Amplitude")
# Step Response
t = np.linspace(0,4,1000)
t2 = np.linspace(0,4,1000)
t1,y1 = co.step_response(G1,t)
t2,y2 = co.step_response(G2,t2)
plt.figure(2)
plt.plot(t1,y1,label='G1')
plt.plot(t2,y2,label='G2')
plt.xlabel("time $[s]$")
plt.ylabel("Amplitude")
plt.legend()
####################################################
# Using Scipy and compare
lti = signal.lti([1],[1,2])
lti2 = signal.lti([1],[1,10])
t3, y3 = signal.step2(lti)
t4, y4 = signal.step2(lti2)
plt.figure(3)
plt.plot(t3, y3)
plt.plot(t4, y4)
plt.xlabel('Time [s]')
plt.ylabel('Amplitude')
plt.title('Step response for 1. Order ')
##########################################################
# Step1.2 Poitn 1 of Assignment
lti5 = signal.lti([12],[6,1])
lti6 = signal.lti([12],[12,1])
t5, y5 = signal.step2(lti5)
t6, y6 = signal.step2(lti6)
plt.figure(4)
plt.plot(t5, y5,label=r'$\tau=5$')
plt.plot(t6, y6,label=r'$\tau=10$')
plt.xlabel('Time [s]')
plt.ylabel('Amplitude')
plt.title('Step response for 1. Order ')
plt.legend()
##########################################################
# Step1.2 Point 1 of Assignment
lti7 = signal.lti([4],[7,1])
t7, y7 = signal.step2(lti7)
plt.figure(5)
plt.plot(t7, y7,label=r'$\tau=10$')
plt.xlabel('Time [s]')
plt.ylabel('Amplitude')
plt.title('Step response for 1. Order ')
# Using sympy library to find the inverse laplace
s = sp.symbols('s')
t = sp.symbols('t')
F = 8/(6.4*s+1)
F_1 = sp.inverse_laplace_transform(F,s,t)
print(F_1)
plt.show()
|
{"hexsha": "fe02a501afa053332d8f21f43074b9f411460beb", "size": 2084, "ext": "py", "lang": "Python", "max_stars_repo_path": "First_Order/2.4_Taller_1_Sistemas_Lneales.py", "max_stars_repo_name": "Daz-Riza-Seriog/I_Control", "max_stars_repo_head_hexsha": "d4568f67d735fa5dab619e006fd29c31ce5248ea", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-03-19T00:15:21.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-19T00:15:21.000Z", "max_issues_repo_path": "First_Order/2.4_Taller_1_Sistemas_Lneales.py", "max_issues_repo_name": "Daz-Riza-Seriog/I_Control", "max_issues_repo_head_hexsha": "d4568f67d735fa5dab619e006fd29c31ce5248ea", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "First_Order/2.4_Taller_1_Sistemas_Lneales.py", "max_forks_repo_name": "Daz-Riza-Seriog/I_Control", "max_forks_repo_head_hexsha": "d4568f67d735fa5dab619e006fd29c31ce5248ea", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.6603773585, "max_line_length": 58, "alphanum_fraction": 0.6247600768, "include": true, "reason": "import numpy,from scipy,import sympy", "num_tokens": 681}
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for training utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras.engine import training_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.platform import test
class ModelInputsTest(test.TestCase):
def test_single_thing(self):
a = np.ones(10)
model_inputs = training_utils.ModelInputs(a)
self.assertEqual(['input_1'], model_inputs.get_input_names())
vals = model_inputs.get_symbolic_inputs()
self.assertTrue(tensor_util.is_tensor(vals))
vals = model_inputs.get_symbolic_inputs(return_single_as_list=True)
self.assertEqual(1, len(vals))
self.assertTrue(tensor_util.is_tensor(vals[0]))
def test_single_thing_eager(self):
with context.eager_mode():
a = np.ones(10)
model_inputs = training_utils.ModelInputs(a)
self.assertEqual(['input_1'], model_inputs.get_input_names())
val = model_inputs.get_symbolic_inputs()
self.assertTrue(tf_utils.is_symbolic_tensor(val))
vals = model_inputs.get_symbolic_inputs(return_single_as_list=True)
self.assertEqual(1, len(vals))
self.assertTrue(tf_utils.is_symbolic_tensor(vals[0]))
def test_list(self):
a = [np.ones(10), np.ones(20)]
model_inputs = training_utils.ModelInputs(a)
self.assertEqual(['input_1', 'input_2'], model_inputs.get_input_names())
vals = model_inputs.get_symbolic_inputs()
self.assertTrue(tensor_util.is_tensor(vals[0]))
self.assertTrue(tensor_util.is_tensor(vals[1]))
def test_list_eager(self):
with context.eager_mode():
a = [np.ones(10), np.ones(20)]
model_inputs = training_utils.ModelInputs(a)
self.assertEqual(['input_1', 'input_2'], model_inputs.get_input_names())
vals = model_inputs.get_symbolic_inputs()
self.assertTrue(tf_utils.is_symbolic_tensor(vals[0]))
self.assertTrue(tf_utils.is_symbolic_tensor(vals[1]))
def test_dict(self):
a = {'b': np.ones(10), 'a': np.ones(20)}
model_inputs = training_utils.ModelInputs(a)
self.assertEqual(['a', 'b'], model_inputs.get_input_names())
vals = model_inputs.get_symbolic_inputs()
self.assertTrue(tensor_util.is_tensor(vals['a']))
self.assertTrue(tensor_util.is_tensor(vals['b']))
def test_dict_eager(self):
with context.eager_mode():
a = {'b': np.ones(10), 'a': np.ones(20)}
model_inputs = training_utils.ModelInputs(a)
self.assertEqual(['a', 'b'], model_inputs.get_input_names())
vals = model_inputs.get_symbolic_inputs()
self.assertTrue(tf_utils.is_symbolic_tensor(vals['a']))
self.assertTrue(tf_utils.is_symbolic_tensor(vals['b']))
if __name__ == '__main__':
test.main()
|
{"hexsha": "44ea23998fe6f3b614fb09b9667add179cf3fd85", "size": 3567, "ext": "py", "lang": "Python", "max_stars_repo_path": "tensorflow/python/keras/engine/training_utils_test.py", "max_stars_repo_name": "aeverall/tensorflow", "max_stars_repo_head_hexsha": "7992bf97711919f56f80bff9e5510cead4ab2095", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2018-12-12T23:33:05.000Z", "max_stars_repo_stars_event_max_datetime": "2019-02-26T07:20:22.000Z", "max_issues_repo_path": "tensorflow/python/keras/engine/training_utils_test.py", "max_issues_repo_name": "aeverall/tensorflow", "max_issues_repo_head_hexsha": "7992bf97711919f56f80bff9e5510cead4ab2095", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tensorflow/python/keras/engine/training_utils_test.py", "max_forks_repo_name": "aeverall/tensorflow", "max_forks_repo_head_hexsha": "7992bf97711919f56f80bff9e5510cead4ab2095", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-10-11T00:17:03.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-23T18:59:45.000Z", "avg_line_length": 39.6333333333, "max_line_length": 80, "alphanum_fraction": 0.7204934118, "include": true, "reason": "import numpy", "num_tokens": 785}
|
from collections import defaultdict
import dolfin as df
import numpy as np
from xii.meshing.embedded_mesh import EmbeddedMesh
class SubDomainMesh(EmbeddedMesh):
'''Embedded mesh for cell funcions.'''
def __init__(self, marking_function, markers):
assert marking_function.dim() == marking_function.mesh().topology().dim()
EmbeddedMesh.__init__(self, marking_function, markers)
def OverlapMesh(mesh1, mesh2, tol=1E-14):
'''
Given two subdomain meshes which share a single unique common tag in
their marking function we create here a mesh of cells corresponding
to that tag. The new mesh cells can be mapped to mesh1/2 cells.
'''
assert isinstance(mesh1, SubDomainMesh), type(mesh1)
assert isinstance(mesh2, SubDomainMesh)
tdim = mesh1.topology().dim()
assert mesh2.topology().dim() == tdim
assert mesh1.geometry().dim() == mesh2.geometry().dim()
# Overlap has to be unique as well (for now)
tags1 = set(mesh1.marking_function.array())
tags2 = set(mesh2.marking_function.array())
common_tags = tags1 & tags2
assert len(common_tags) == 1
tag = int(common_tags.pop())
# A bit of wishful thinking here: create overlap mesh from mesh1
# and hope it makes sense for mesh2 as well
emesh = SubDomainMesh(mesh1.marking_function, tag)
# Now we have a mesh from cells of omega to cells in mesh1. Let's
# build a map for mesh2 simlar to `build_embedding_map`
tree = mesh2.bounding_box_tree()
# Localize first the vertex in mesh2
mesh2.init(tdim) # Then we want to find a cell in mesh2 which
mesh2.init(tdim, 0) # has the same vertices
c2v = mesh2.topology()(tdim, 0)
mesh_x = mesh2.coordinates()
emesh_x = emesh.coordinates()
# Get som idea of mesh size to make relative comparison of coords
scale = max(emesh_x.max(axis=0) - emesh_x.min(axis=0))
# Also build the map for vertices
entity_map = {0: [None]*emesh.num_vertices(), tdim: [None]*emesh.num_cells()}
vertex_map = entity_map[0]
cell_map = entity_map[tdim]
collided_cells = {}
for cell in df.cells(emesh):
# The idea is the there is exactly on the_cell which will be
# found in every point-cell collision patches
the_cell = set()
for vertex in cell.entities(0):
# Try to be less efficient by computing each vertex collision
# only once
if vertex in collided_cells:
mcells = collided_cells[vertex]
else:
vertex_x = emesh_x[vertex]
mcells = tree.compute_entity_collisions(df.Point(*vertex_x))
# What is the id of vertex in the mesh
mcell_vertices = c2v(next(iter(mcells)))
the_vertex = min(mcell_vertices, key=lambda v: np.linalg.norm(vertex_x-mesh_x[v]))
error = np.linalg.norm(vertex_x - mesh_x[the_vertex])/scale
assert error < tol, 'Found a hanging node %16f' % error
vertex_map[vertex] = the_vertex
collided_cells[vertex] = mcells
if not the_cell:
the_cell.update(mcells)
else:
the_cell = the_cell & set(mcells)
assert len(the_cell) == 1, the_cell
# Insert
cell_map[cell.index()] = the_cell.pop()
# Sanity
assert not any(v is None for v in entity_map[0])
assert not any(v is None for v in entity_map[tdim])
# At this point we can build add the map
emesh.parent_entity_map[mesh2.id()] = entity_map
return emesh
# -------------------------------------------------------------------
if __name__ == '__main__':
mesh = df.UnitSquareMesh(4, 4)
subdomains = df.MeshFunction('size_t', mesh, mesh.topology().dim(), 3)
df.CompiledSubDomain('x[0] < 0.25+DOLFIN_EPS').mark(subdomains, 1)
df.CompiledSubDomain('x[0] > 0.75-DOLFIN_EPS').mark(subdomains, 2)
mesh1 = SubDomainMesh(subdomains, (1, 3))
mesh2 = SubDomainMesh(subdomains, (2, 3))
mesh12 = OverlapMesh(mesh1, mesh2)
# FIXME: split the file!
map1 = mesh12.parent_entity_map[mesh1.id()][2]
map2 = mesh12.parent_entity_map[mesh2.id()][2]
# Cell check out
for c, c1, c2 in zip(df.cells(mesh12), map1, map2):
assert df.near(c.midpoint().distance(df.Cell(mesh1, c1).midpoint()), 0, 1E-14)
assert df.near(c.midpoint().distance(df.Cell(mesh2, c2).midpoint()), 0, 1E-14)
# Vertices are not that important but anyways
x1 = mesh1.coordinates(); map1 = mesh12.parent_entity_map[mesh1.id()][0]
x2 = mesh2.coordinates(); map2 = mesh12.parent_entity_map[mesh2.id()][0]
for x, i1, i2 in zip(mesh12.coordinates(), map1, map2):
assert np.linalg.norm(x - x1[i1]) < 1E-13
assert np.linalg.norm(x - x2[i2]) < 1E-13
|
{"hexsha": "2a690a868b514886a414ac3e9eeb5e8a61b0b443", "size": 4855, "ext": "py", "lang": "Python", "max_stars_repo_path": "xii/meshing/subdomain_mesh.py", "max_stars_repo_name": "MiroK/fenics_ii", "max_stars_repo_head_hexsha": "58c41f0e8dba720962830395851e081b057269cc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2017-06-22T21:05:17.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-25T08:36:59.000Z", "max_issues_repo_path": "xii/meshing/subdomain_mesh.py", "max_issues_repo_name": "MiroK/fenics_ii", "max_issues_repo_head_hexsha": "58c41f0e8dba720962830395851e081b057269cc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2018-04-14T08:43:59.000Z", "max_issues_repo_issues_event_max_datetime": "2018-09-19T14:51:46.000Z", "max_forks_repo_path": "xii/meshing/subdomain_mesh.py", "max_forks_repo_name": "MiroK/fenics_ii", "max_forks_repo_head_hexsha": "58c41f0e8dba720962830395851e081b057269cc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2018-04-13T20:33:53.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-25T08:37:01.000Z", "avg_line_length": 39.4715447154, "max_line_length": 98, "alphanum_fraction": 0.6354273944, "include": true, "reason": "import numpy", "num_tokens": 1280}
|
//---------------------------------------------------------------------------//
//!
//! \file MonteCarlo_CollisionHandler.hpp
//! \author Alex Robinson
//! \brief Collision handler class declaration
//!
//---------------------------------------------------------------------------//
#ifndef MONTE_CARLO_COLLISION_HANDLER_HPP
#define MONTE_CARLO_COLLISION_HANDLER_HPP
// Boost Includes
#include <boost/unordered_map.hpp>
// FRENSIE Includes
#include "MonteCarlo_NeutronMaterial.hpp"
#include "MonteCarlo_PhotonMaterial.hpp"
#include "MonteCarlo_ElectronMaterial.hpp"
#include "MonteCarlo_NeutronState.hpp"
#include "MonteCarlo_PhotonState.hpp"
#include "MonteCarlo_ElectronState.hpp"
#include "Geometry_ModuleTraits.hpp"
namespace MonteCarlo{
//! The collision handler class
class CollisionHandler
{
private:
// Typedef for cell id neutron material map
typedef boost::unordered_map<Geometry::ModuleTraits::InternalCellHandle,
Teuchos::RCP<NeutronMaterial> >
CellIdNeutronMaterialMap;
// Typedef for cell id photon material map
typedef boost::unordered_map<Geometry::ModuleTraits::InternalCellHandle,
Teuchos::RCP<PhotonMaterial> >
CellIdPhotonMaterialMap;
// Typedef for cell id electron material map
typedef boost::unordered_map<Geometry::ModuleTraits::InternalCellHandle,
Teuchos::RCP<ElectronMaterial> >
CellIdElectronMaterialMap;
public:
//! Add a material to the collision handler
static void addMaterial(
const Teuchos::RCP<NeutronMaterial>& material,
const Teuchos::Array<Geometry::ModuleTraits::InternalCellHandle>&
cells_containing_material );
//! Add a material to the collision handler
static void addMaterial(
const Teuchos::RCP<PhotonMaterial>& material,
const Teuchos::Array<Geometry::ModuleTraits::InternalCellHandle>&
cells_containing_material );
//! Add a material to the collision handler
static void addMaterial(
const Teuchos::RCP<NeutronMaterial>& neutron_material,
const Teuchos::RCP<PhotonMaterial>& photon_material,
const Teuchos::Array<Geometry::ModuleTraits::InternalCellHandle>&
cells_containing_material );
//! Add a material to the collision handler
static void addMaterial(
const Teuchos::RCP<ElectronMaterial>& material,
const Teuchos::Array<Geometry::ModuleTraits::InternalCellHandle>&
cells_containing_material );
//! Check if a cell is void
static bool isCellVoid(const Geometry::ModuleTraits::InternalCellHandle cell,
const ParticleType particle_type );
//! Get the neutron material contained in a cell
static const Teuchos::RCP<NeutronMaterial>&
getCellNeutronMaterial(
const Geometry::ModuleTraits::InternalCellHandle cell );
//! Get the photon material contained in a cell
static const Teuchos::RCP<PhotonMaterial>&
getCellPhotonMaterial(
const Geometry::ModuleTraits::InternalCellHandle cell );
//! Get the electron material contained in a cell
static const Teuchos::RCP<ElectronMaterial>&
getCellElectronMaterial(
const Geometry::ModuleTraits::InternalCellHandle cell );
//! Get the total macroscopic cross section of a material
static double getMacroscopicTotalCrossSection( const NeutronState& particle);
//! Get the total macroscopic cross section of a material
static double getMacroscopicTotalCrossSection( const PhotonState& particle );
//! Get the total macroscopic cross section of a material
static double getMacroscopicTotalCrossSection( const ElectronState& particle );
//! Get the macroscopic cross section for a specific reaction
static double getMacroscopicReactionCrossSection(
const NeutronState& particle,
const NuclearReactionType reaction );
//! Get the macroscopic cross section for a specific reaction
static double getMacroscopicReactionCrossSection(
const PhotonState& particle,
const PhotoatomicReactionType reaction );
//! Get the macroscopic cross section for a specific reaction
static double getMacroscopicReactionCrossSection(
const PhotonState& particle,
const PhotonuclearReactionType reaction );
//! Get the macroscopic cross section for a specific reaction
static double getMacroscopicReactionCrossSection(
const ElectronState& particle,
const ElectroatomicReactionType reaction );
//! Collide with the material in a cell
static void collideWithCellMaterial( PhotonState& particle,
ParticleBank& bank,
const bool analogue );
//! Collide with the material in a cell
static void collideWithCellMaterial( NeutronState& particle,
ParticleBank& bank,
const bool analogue );
//! Collide with the material in a cell
static void collideWithCellMaterial( ElectronState& particle,
ParticleBank& bank,
const bool analogue );
private:
// The cell id neutron material map
static CellIdNeutronMaterialMap master_neutron_map;
static CellIdPhotonMaterialMap master_photon_map;
static CellIdElectronMaterialMap master_electron_map;
};
} // end MonteCarlo namespace
#endif // end MONTE_CARLO_COLLISION_HANDLER_HPP
//---------------------------------------------------------------------------//
// end MonteCarlo_CollisionHandler.hpp
//---------------------------------------------------------------------------//
|
{"hexsha": "52cf894dfe507bd04391bfceb0942b156e4ef331", "size": 5414, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "packages/monte_carlo/collision/native/src/MonteCarlo_CollisionHandler.hpp", "max_stars_repo_name": "lkersting/SCR-2123", "max_stars_repo_head_hexsha": "06ae3d92998664a520dc6a271809a5aeffe18f72", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "packages/monte_carlo/collision/native/src/MonteCarlo_CollisionHandler.hpp", "max_issues_repo_name": "lkersting/SCR-2123", "max_issues_repo_head_hexsha": "06ae3d92998664a520dc6a271809a5aeffe18f72", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "packages/monte_carlo/collision/native/src/MonteCarlo_CollisionHandler.hpp", "max_forks_repo_name": "lkersting/SCR-2123", "max_forks_repo_head_hexsha": "06ae3d92998664a520dc6a271809a5aeffe18f72", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.1558441558, "max_line_length": 81, "alphanum_fraction": 0.7087181382, "num_tokens": 1119}
|
import numpy as _np
from ._sum_inplace import sum_inplace as _sum_inplace
from netket.utils import (
mpi_available as _mpi_available,
n_nodes as _n_nodes,
MPI_comm as MPI_comm,
)
if _mpi_available:
from netket.utils import MPI
def subtract_mean(x, axis=None):
"""
Subtracts the mean of the input array over all but the last dimension
and over all MPI processes from each entry.
Args:
axis: Axis or axes along which the means are computed. The default is to
compute the mean of the flattened array.
"""
x_mean = mean(x, axis=axis)
x -= x_mean
return x
def mean(a, axis=None):
"""
Compute the arithmetic mean along the specified axis and over MPI processes.
Returns the average of the array elements. The average is taken over the flattened array by default,
otherwise over the specified axis. float64 intermediate and return values are used for integer inputs.
"""
# asarray is necessary for the axis=None case to work, as the MPI call requires a NumPy array
out = a.mean(axis=axis)
out = _sum_inplace(out)
out /= _n_nodes
return out
def sum(a, axis=None, out=None):
"""
Compute the arithmetic mean along the specified axis and over MPI processes.
"""
# asarray is necessary for the axis=None case to work, as the MPI call requires a NumPy array
out = _np.asarray(_np.sum(a, axis=axis, out=out))
if _n_nodes > 1:
MPI_comm.Allreduce(MPI.IN_PLACE, out.reshape(-1), op=MPI.SUM)
return out
def var(a, axis=None, out=None, ddof=0):
"""
Compute the variance mean along the specified axis and over MPI processes.
"""
m = mean(a, axis=axis)
if axis is None:
ssq = _np.abs(a - m) ** 2.0
else:
ssq = _np.abs(a - _np.expand_dims(m, axis)) ** 2.0
out = sum(ssq, axis=axis, out=out)
n_all = total_size(a, axis=axis)
out /= n_all - ddof
return out
def total_size(a, axis=None):
if axis is None:
l_size = a.size
else:
l_size = a.shape[axis]
if _n_nodes > 1:
l_size = MPI_comm.allreduce(l_size, op=MPI.SUM)
return l_size
|
{"hexsha": "81c255c16e85badded9d8486baafeabb91b3bf1a", "size": 2171, "ext": "py", "lang": "Python", "max_stars_repo_path": "netket/stats/mpi_stats.py", "max_stars_repo_name": "ChenAo-Phys/netket", "max_stars_repo_head_hexsha": "df3735993962ca6318dee0b86a5d15a9d37c9881", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "netket/stats/mpi_stats.py", "max_issues_repo_name": "ChenAo-Phys/netket", "max_issues_repo_head_hexsha": "df3735993962ca6318dee0b86a5d15a9d37c9881", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "netket/stats/mpi_stats.py", "max_forks_repo_name": "ChenAo-Phys/netket", "max_forks_repo_head_hexsha": "df3735993962ca6318dee0b86a5d15a9d37c9881", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.9540229885, "max_line_length": 106, "alphanum_fraction": 0.6554583141, "include": true, "reason": "import numpy", "num_tokens": 567}
|
"""Tests pathless_data_processor.py."""
import numpy as np
from mlops.dataset.pathless_data_processor import PathlessDataProcessor
PRESET_RAW_FEATURES = np.array(
[
[10, 20, 30, 40],
[0, 20, 40, 50],
[10, 20, 20, 60],
[20, 20, 50, 70],
[10, 20, 10, 80],
[10, 20, 60, 90],
[10, 20, 0, 100],
[30, 20, 70, 110],
[10, 20, -10, 120],
[-10, 20, 30, 130],
]
)
PRESET_RAW_LABELS = np.array([0, 1, 2, 0, 1, 2, 0, 1, 2, 0])
def test_get_raw_features_and_labels_returns_presets() -> None:
"""Tests that get_raw_features_and_labels returns the preset values."""
processor = PathlessDataProcessor(
{"X_train": PRESET_RAW_FEATURES}, {"y_train": PRESET_RAW_LABELS}
)
features, labels = processor.get_raw_features_and_labels("dne")
assert set(features.keys()) == {"X_train"}
assert set(labels.keys()) == {"y_train"}
assert np.array_equal(features["X_train"], PRESET_RAW_FEATURES)
assert np.array_equal(labels["y_train"], PRESET_RAW_LABELS)
def test_get_raw_features_returns_presets() -> None:
"""Tests that get_raw_features returns the preset values."""
processor = PathlessDataProcessor(
{"X_train": PRESET_RAW_FEATURES}, {"y_train": PRESET_RAW_LABELS}
)
features = processor.get_raw_features("dne")
assert set(features.keys()) == {"X_train"}
assert np.array_equal(features["X_train"], PRESET_RAW_FEATURES)
def test_preprocess_features_is_identity_function() -> None:
"""Tests that preprocess_features is the identity function."""
processor = PathlessDataProcessor(
{"X_train": PRESET_RAW_FEATURES}, {"y_train": PRESET_RAW_LABELS}
)
preprocessed = processor.preprocess_features(PRESET_RAW_FEATURES)
assert np.array_equal(preprocessed, PRESET_RAW_FEATURES)
def test_preprocess_labels_is_identity_function() -> None:
"""Tests that preprocess_labels is the identity function."""
processor = PathlessDataProcessor(
{"X_train": PRESET_RAW_FEATURES}, {"y_train": PRESET_RAW_LABELS}
)
preprocessed = processor.preprocess_labels(PRESET_RAW_LABELS)
assert np.array_equal(preprocessed, PRESET_RAW_LABELS)
|
{"hexsha": "da6a32d4234a3fde236dfb082927572c34c51f1b", "size": 2206, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/dataset/test_pathless_data_processor.py", "max_stars_repo_name": "kostaleonard/mlops", "max_stars_repo_head_hexsha": "236d3499535d6294768c15336180217829fb2ee3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-26T21:41:00.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-26T21:41:00.000Z", "max_issues_repo_path": "tests/dataset/test_pathless_data_processor.py", "max_issues_repo_name": "kostaleonard/mlops", "max_issues_repo_head_hexsha": "236d3499535d6294768c15336180217829fb2ee3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 39, "max_issues_repo_issues_event_min_datetime": "2021-11-18T20:01:34.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-26T17:59:07.000Z", "max_forks_repo_path": "tests/dataset/test_pathless_data_processor.py", "max_forks_repo_name": "kostaleonard/mlops", "max_forks_repo_head_hexsha": "236d3499535d6294768c15336180217829fb2ee3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.1639344262, "max_line_length": 75, "alphanum_fraction": 0.6858567543, "include": true, "reason": "import numpy", "num_tokens": 594}
|
from functools import reduce
import numpy as np
import pandas as pd
import scipy.stats as scs
import matplotlib.pyplot as plt
from standard_precip.lmoments import distr
class BaseStandardIndex():
'''
Calculate the SPI or SPEI index. A user specified distribution is fit to the precip data.
The CDF of this distribution is then calculated after which the the standard normal
distribution is calculated which gives the index. A distribution can be fit over the
precipitation data either using MLE or L-moments. NCAR's SPI calculators and the SPI and
SPEI R packages both use L-moments to fit the distribution. There are advantages and
disadvantages to each technique.
This calculation can be done on any time scale. Built in temporal scales include daily,
weekly, and monthly; however, the user can define their own timescale.
One should put some thought into the type of distribution fit to the
data. Precipitation can have zero value and some distributions are only
defined over interval (0, inf). Python's gamma distribution is defined
over [0, inf). In addition SPEI which is constructed from precipitation
- PET or (P-PET) can take on negative values.
'''
def __init__(self):
self.distrb = None
self.non_zero_distr = ['gam', 'pe3']
self._df_copy = None
self.freq_col = None
@staticmethod
def rolling_window_sum(df: pd.DataFrame, precip_cols: list, span: int=1, window_type: str=None,
center: bool=False, **kwargs):
'''
This is a helper method which will find the rolling sum of precipitation data.
'''
precip_cols_new = []
for p in precip_cols:
new_col_name = p+f"_scale_{span}"
df[new_col_name] = df[p].rolling(
window=span, win_type=window_type, center=center, **kwargs
).sum()
precip_cols_new.append(new_col_name)
return df, precip_cols_new
@staticmethod
def check_duplicate_dates(df, date_col):
'''
Method to check duplicate dates in dataframe. If duplicates are found, the row corresponding
to the first date found is used.
'''
if df.duplicated(subset=date_col).any():
print("Found duplicate dates in dataframe. Removing duplicates and using first date found")
df = df.drop_duplicates(subset=date_col)
return df
def fit_distribution(self, data: np.array, dist_type: str, fit_type: str='lmom', **kwargs):
'''
Fit given distribution to historical precipitation data.
The fit is accomplished using either L-moments or MLE (Maximum Likelihood Estimation).
For distributions that use the Gamma Function (Gamma and Pearson 3) remove observations
that have 0 precipitation values and fit using non-zero observations. Also find probability
of zero observation (estimated by number of zero obs / total obs). This is for latter use
in calculating the CDF using (Thom, 1966. Some Methods of Climatological Analysis)
'''
# Get distribution type
self.distrb = getattr(distr, dist_type)
# Determine zeros if distribution can not handle x = 0
p_zero = None
if dist_type in self.non_zero_distr:
p_zero = data[data == 0].shape[0] / data.shape[0]
data = data[data != 0]
if (data.shape[0]<4) or (p_zero==1):
params = None
else:
# Fit distribution
if fit_type == 'lmom':
params = self.distrb.lmom_fit(data, **kwargs)
elif fit_type == 'mle':
params = self.distrb.fit(data, **kwargs)
else:
raise AttributeError(f"{fit_type} is not an option. Option fit_types are mle and lmom")
return params, p_zero
def cdf_to_ppf(self, data, params, p_zero):
'''
Take the specific distributions fitted parameters and calculate the
cdf. Apply the inverse normal distribution to the cdf to get the SPI
SPEI. This process is best described in Lloyd-Hughes and Saunders, 2002
which is included in the documentation.
'''
# Calculate the CDF of observed precipitation on a given time scale
if not (p_zero is None):
if params:
cdf = p_zero + (1 - p_zero) * self.distrb.cdf(data, **params)
else:
cdf = np.empty(data.shape)
cdf.fill(np.nan)
else:
cdf = self.distrb.cdf(data, **params)
# Apply inverse normal distribution
norm_ppf = scs.norm.ppf(cdf)
norm_ppf[np.isinf(norm_ppf)] = np.nan
return norm_ppf
def calculate(self, df: pd.DataFrame, date_col: str, precip_cols: list, freq: str="M",
scale: int=1, freq_col: str=None, fit_type: str='lmom', dist_type: str='gam',
**dist_kwargs) -> pd.DataFrame:
'''
Calculate the index.
Check https://docs.scipy.org/doc/scipy/reference/stats.html for
distribution types
Parameters
----------
df: pd.Dataframe
Pandas dataframe with precipitation data as columns. Each column is treated as a seperate
set of observations and distributions are fit for individual columns. A date column should
also be given in the dataframe.
date_col: str
The column name for the date column. Date specification should follow the strftime format.
precip_cols: list
List of columns with precipitation data. Each column is treated as a separate set of
observations.
freq: str ["M", "W", "D"]
The temporal frequency to calculate the index on. The day of year ("D") or week of year
("W") or month of year ("M") is derived from the date_col. If the user desires a custome
frequency such as 3-month, 6-month, they can pass the column name for the custome freqency
(freq_col)
freq_col: str (column type: int)
Name of the column that specifies a custome frequency. This overrides the freq parameter.
The freq_col should group individual observations (rows) according to the users custome
frequency. The grouping is specified using integers.
scale: int (default=1)
Integer to specify the number of time periods over which the standardized precipitation
index is to be calculated. If freq="M" then this is the number of months.
fit_type: str ("lmom" or "mle")
Specify the type of fit to use for fitting distribution to the precipitation data. Either
L-moments (lmom) or Maximum Likelihood Estimation (mle). Note use L-moments when comparing
to NCAR's NCL code and R's packages to calculate SPI and SPEI.
dist_type: str
The distribution type to fit using either L-moments or MLE
'gam' - Gamma
'exp' - Exponential
'gev' - Generalised Extreme Value
'gpa' - Generalised Pareto
'gum' - Gumbel
'nor' - Normal
'pe3' - Pearson III
'wei' - Weibull
The distribution type to fit using ONLY MLE
'glo' - Generalised Logistic
'gno' - Generalised Normal
'kap' - Kappa
The distribution type to fit using ONLY L-moments
'wak' - Wakeby
dist_kwargs:
scale and location parameters. See documentation on scipy.stats.rv_continuous.fit
Returns
-------
df: pd.Dataframe
Pandas dataframe with the calculated indicies for each precipitation column appended
to the original dataframe.
'''
# Check for duplicate dates
df = self.check_duplicate_dates(df, date_col)
if isinstance(precip_cols, str):
precip_cols = [precip_cols]
if scale > 1:
df, precip_cols = self.rolling_window_sum(df, precip_cols, scale)
self._df_copy = df[[date_col] + precip_cols].copy()
self._df_copy[date_col] = pd.to_datetime(self._df_copy[date_col])
if freq_col:
self.freq_col = freq_col
else:
self.freq_col = 'freq'
if freq == "D":
self._df_copy[self.freq_col] = self._df_copy[date_col].dt.dayofyear
elif freq == "W":
self._df_copy[self.freq_col] = self._df_copy[date_col].dt.week
elif freq == "M":
self._df_copy[self.freq_col] = self._df_copy[date_col].dt.month
else:
raise AttributeError(f"{freq} is not a recognized frequency. Options are 'M', 'W', or 'D'")
freq_range = self._df_copy[self.freq_col].unique().tolist()
# Loop over months
dfs = []
for p in precip_cols:
dfs_p = pd.DataFrame()
for j in freq_range:
precip_all = self._df_copy.loc[self._df_copy[self.freq_col]==j]
precip_single_df = precip_all.dropna().copy()
precip_single = precip_single_df[p].values
precip_sorted = np.sort(precip_single)[::-1]
# Fit distribution for particular series and month
params, p_zero = self.fit_distribution(
precip_sorted, dist_type, fit_type, **dist_kwargs
)
# Calculate SPI/SPEI
spi = self.cdf_to_ppf(precip_single, params, p_zero)
idx_col = f"{p}_calculated_index"
precip_single_df[idx_col] = spi
precip_single_df = precip_single_df[[date_col, idx_col]]
dfs_p = pd.concat([dfs_p, precip_single_df])
dfs_p = dfs_p.sort_values(date_col)
dfs.append(dfs_p)
df_all = reduce(
lambda left, right: pd.merge(left, right, on=date_col, how='left'), dfs, self._df_copy
)
df_all = df_all.drop(columns=self.freq_col)
return df_all
|
{"hexsha": "3559db93e7d90401a23ec013514b0456613a4ac8", "size": 10213, "ext": "py", "lang": "Python", "max_stars_repo_path": "standard_precip/base_sp.py", "max_stars_repo_name": "e-baumer/standard_precip", "max_stars_repo_head_hexsha": "8945ba399a3493464a860b9901d648bdecc86354", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 60, "max_stars_repo_stars_event_min_datetime": "2016-11-23T17:36:37.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T02:34:29.000Z", "max_issues_repo_path": "standard_precip/base_sp.py", "max_issues_repo_name": "jnsebgosselin/standard_precip", "max_issues_repo_head_hexsha": "8945ba399a3493464a860b9901d648bdecc86354", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 12, "max_issues_repo_issues_event_min_datetime": "2017-03-22T04:12:25.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-17T14:13:37.000Z", "max_forks_repo_path": "standard_precip/base_sp.py", "max_forks_repo_name": "jnsebgosselin/standard_precip", "max_forks_repo_head_hexsha": "8945ba399a3493464a860b9901d648bdecc86354", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 30, "max_forks_repo_forks_event_min_datetime": "2017-02-05T01:03:31.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-11T15:47:46.000Z", "avg_line_length": 40.3675889328, "max_line_length": 107, "alphanum_fraction": 0.6137276021, "include": true, "reason": "import numpy,import scipy", "num_tokens": 2270}
|
# Números complexos
Neste notebook exploramos alguns aspectos dos números complexos. Especialmente, vamos falar da interferência entre duas ondas da mesma frequência.
Vimos nas aulas passadas que uma função cossenoidal geral, expressa por:
\begin{equation}
x(t) = \mathrm{Re}\left\{A\mathrm{e}^{-\mathrm{j}\phi} \ \mathrm{e}^{\mathrm{j}\omega t} \right\}
\end{equation}
em que $\tilde{A} = A\mathrm{e}^{-\mathrm{j}\phi}$ é a amplitude complexa do cosseno e contêm as informações de magnitude, $A$, e fase, $\phi$. Esta amplitude complexa pode ser representada no plano complexo por:
<div>
</div>
Imagine que temos duas ondas, ***de mesma frequência***, interferindo entre si. Queremos calcular a onda resultante. Então, podemos somá-las. Se cada uma das ondas é descrita por um número complexo $z_1$ e $z_2$, do tipo:
\begin{equation}
\tilde{z}_1 = a_1 + \mathrm{j} b_1 = |\tilde{z}_1| \mathrm{e}^{\mathrm{j}\phi_1}
\end{equation}
e
\begin{equation}
\tilde{z}_2 = a_2 + \mathrm{j} b_2 = |\tilde{z}_2| \mathrm{e}^{\mathrm{j}\phi_2}
\end{equation}
Vamos fazer algumas análises a seguir
```python
# importar as bibliotecas necessárias
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 14})
```
```python
# tempo e frequência
t = np.linspace(-2, 2, 1000) # vetor temporal
freq = 1
w = 2*np.pi*freq
# onda 1
M1 = 2
phi_1 = np.deg2rad(0)
z1 = M1*np.exp(1j*phi_1)
xt1 = np.real(z1*np.exp(1j*w*t))
# onda 2
M2 = 1.2
phi_2 = np.deg2rad(0)
z2 = M2*np.exp(1j*phi_2)
xt2 = np.real(z2*np.exp(1j*w*t))
```
Temos agora 2 opções para calcular a onda resultante. A primeira é somar os sinais no domínio do tempo
\begin{equation}
x(t) = x_1(t) + x_2(t).
\end{equation}
A segunda é somar as amplitudes complexas
\begin{equation}
z = z_1 + z_2
\end{equation}
e então construir o sinal resultante $x(t) = \tilde{z}\mathrm{e}^{\mathrm{j}\omega t}$
```python
# Sinal resultante pela soma das amplitudes complexas
z = z1+z2
xt = np.real(z*np.exp(1j*w*t))
# Figura
plt.figure(figsize=(12,8))
plt.subplot(2,1,2)
plt.title('Sinal resultante')
plt.plot(t, xt, '-r', linewidth = 2, label = 'resultante')
plt.plot(t, xt1, '--b', linewidth = 1, label = r'$x_1(t)$')
plt.plot(t, xt2, '--b', linewidth = 1, label = r'$x_2(t)$')
plt.legend(loc = 'upper right')
plt.grid(linestyle = '--', which='both')
plt.xlabel('Tempo [s]')
plt.ylabel(r'$x(t)$ [-]')
plt.ylim((-2, 2))
plt.xlim((t[0], t[-1]))
plt.subplot(2,2,1)
plt.plot(t, xt1, '-b', linewidth = 2)
plt.grid(linestyle = '--', which='both')
plt.xlabel('Tempo [s]')
plt.ylabel(r'$x_1(t)$ [-]')
plt.ylim((-2, 2))
plt.xlim((t[0], t[-1]))
plt.subplot(2,2,2)
plt.plot(t, xt2, '-b', linewidth = 2)
plt.grid(linestyle = '--', which='both')
plt.xlabel('Tempo [s]')
plt.ylabel(r'$x_2(t)$ [-]')
plt.ylim((-2, 2))
plt.xlim((t[0], t[-1]))
plt.tight_layout()
plt.show()
```
```python
plt.figure(figsize=(12,4))
plt.title('Tensão, corrente e potência')
plt.plot(t, xt1, '--b', linewidth = 1, label = r'$v(t)$ [V]')
plt.plot(t, xt2, '--g', linewidth = 1, label = r'$i(t)$ [A]')
plt.plot(t, xt1*xt2, 'r', linewidth = 2, label = r'$w(t)$ [J/s]')
#plt.plot(t, np.mean(xt1*xt2)*np.ones(len(t)), '--k', linewidth = 3, label = r'$<w(t)>$ [J/s]')
plt.legend(loc = 'upper right')
plt.grid(linestyle = '--', which='both')
plt.xlabel('Tempo [s]')
plt.ylabel('Amplitude [V], [A], [J/s]')
plt.ylim((-3, 3))
plt.xlim((t[0], t[-1]))
```
```python
```
|
{"hexsha": "6f3cc8582c51b2fc73ea188182d50e158c157086", "size": 233764, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "Aula 7 - Numeros complexos/Numeros complexos.ipynb", "max_stars_repo_name": "RicardoGMSilveira/codes_proc_de_sinais", "max_stars_repo_head_hexsha": "e6a44d6322f95be3ac288c6f1bc4f7cfeb481ac0", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2020-10-01T20:59:33.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-27T22:46:58.000Z", "max_issues_repo_path": "Aula 7 - Numeros complexos/Numeros complexos.ipynb", "max_issues_repo_name": "RicardoGMSilveira/codes_proc_de_sinais", "max_issues_repo_head_hexsha": "e6a44d6322f95be3ac288c6f1bc4f7cfeb481ac0", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Aula 7 - Numeros complexos/Numeros complexos.ipynb", "max_forks_repo_name": "RicardoGMSilveira/codes_proc_de_sinais", "max_forks_repo_head_hexsha": "e6a44d6322f95be3ac288c6f1bc4f7cfeb481ac0", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2020-10-15T12:08:22.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-12T12:26:53.000Z", "avg_line_length": 1029.7973568282, "max_line_length": 116732, "alphanum_fraction": 0.951759039, "converted": true, "num_tokens": 1240}
|
import LinearAlgebra, Distributions, Random, Statistics, DataFrames
"""
simulate_coefs_correlation(coefs_mean::Number=0.1; coefs_sd::Number=0.1, n::Int=10)
Generate a vector of random correlation coefficients from a normal distribution.
# Arguments
- `coefs_mean::Number`: Mean of the normal distribution from which to get the coefs.
- `coefs_sd::Number`: SD of the normal distribution.
- `n::Int`: Number of coefficients.
# Examples
```julia
simulate_coefs_correlation(0.5)
```
"""
function simulate_coefs_correlation(coefs_mean::Number=0.1; coefs_sd::Number=0.1, n::Int=10)
# Generate outcome
coefs = Random.rand(Distributions.Normal(coefs_mean, coefs_sd), n)
coefs = [x = [x] for x in coefs]
end
"""
simulate_data_correlation(coefs; n::Int=100, noise::Number=0.0, groupnames=:random)
Generate a DataFrame of correlated variables.
# Multiple Variables / Groups
- If `coefs` is a vector (*e.g., `[0.1, 0.2]`), the DataFrame will contain `length(coefs)` variables (`Var1, Var2, ...`). Altough uncorrelated between them, they are correlated to the outcome (`y`) by the specified coefs.
- If `coefs` is a vector of vectors (*e.g., `[[0.1], [0.2]]`), it will create `length(coefs)` groups, *i.e.*, stacked DataFrames with a correlation between the variables and the outcome varying between groups. It is possible to specify the `groupnames`.
# Arguments
- `coefs`: Correlation coefficients. Can be a number, a vector of numbers or a vector of vectors.
- `n::Int`: Number of observations.
- `noise::Number`: The SD of the random gaussian noise.
- `groupnames::Vector`: Vector of group names (default to `:random`).
- `kwargs...`: Arguments to pass to other functions.
!!! note
**Ideas / help required:**
- Different group sizes (See [#9](https://github.com/neuropsychology/Psycho.jl/issues/9))
- Bug in some cases (*e.g.*, `simulate_data_correlation([0.2, 0.9, 0.5])`) related to failure in Cholesky factorization (See [#11](https://github.com/neuropsychology/Psycho.jl/issues/11))
# Examples
```julia
simulate_data_correlation(0.2)
```
"""
function simulate_data_correlation end
function simulate_data_correlation(coefs::Vector{<:Number}; n::Int=100, noise::Number=0.0)
# Generate outcome
y = standardize(Random.rand(Distributions.Normal(0, 1), n))
n_var = length(coefs)
X = standardize(Random.rand(Distributions.Normal(0, 1), n, n_var))
X = hcat(y, X)
# find the current correlation matrix
cor_structure = Statistics.cov(X)
chol = LinearAlgebra.cholesky(cor_structure).U
# cholesky decomposition to get independence
X = X / chol
# create new correlation structure (zeros can be replaced with other r vals)
coefs_structure = Matrix{Float64}(LinearAlgebra.I, n_var+1, n_var+1)
coefs_structure[1, 2:end] = coefs
coefs_structure[2:end, 1] = coefs
X = X * LinearAlgebra.cholesky(coefs_structure).U
X = X * Statistics.std(y) .+ Statistics.mean(y)
X = X[:, 2:end]
# Add noise
if noise != 0
y = y .+ Random.rand(Distributions.Normal(0, noise), n)
end
data = DataFrames.DataFrame(hcat(y, X),
Symbol.(vcat(["y"], "Var" .* string.(1:n_var))))
return data
end
function simulate_data_correlation(coefs::Number; n::Int=100, noise::Number=0.0)
simulate_data_correlation([coefs], n=n, noise=noise)
end
function simulate_data_correlation(coefs::Vector{<:Vector}; n::Int=100, noise::Number=0.0, groupnames=:random, kwargs...)
data = simulate_data_groups(simulate_data_correlation, coefs=coefs, n=n, noise=noise, groupnames=groupnames, kwargs...)
return data
end
|
{"hexsha": "ac80c44413ce8fd979999b5150c9e9089670375d", "size": 3577, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/simulate/data_correlation.jl", "max_stars_repo_name": "neuropsychology/Psycho.jl", "max_stars_repo_head_hexsha": "ec812f48ff520588e36d833ce124572dc7ff585c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2019-10-24T21:45:41.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-26T01:25:28.000Z", "max_issues_repo_path": "src/simulate/data_correlation.jl", "max_issues_repo_name": "neuropsychology/Psycho.jl", "max_issues_repo_head_hexsha": "ec812f48ff520588e36d833ce124572dc7ff585c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 31, "max_issues_repo_issues_event_min_datetime": "2018-09-02T11:03:24.000Z", "max_issues_repo_issues_event_max_datetime": "2018-09-21T17:00:37.000Z", "max_forks_repo_path": "src/simulate/data_correlation.jl", "max_forks_repo_name": "neuropsychology/Psycho.jl", "max_forks_repo_head_hexsha": "ec812f48ff520588e36d833ce124572dc7ff585c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-04-21T13:47:46.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-07T12:26:50.000Z", "avg_line_length": 28.8467741935, "max_line_length": 253, "alphanum_fraction": 0.7120492032, "num_tokens": 1025}
|
! C function declarations
type, bind(C) :: float3
real(kind = 4) :: x, y, z
end type
interface
function create_npcf_c(timesRans, numShells, volBox, rMin, rMax) bind(C, name="create_npcf")
use iso_c_binding
implicit none
type(c_ptr) :: create_npcf_c
integer(c_int), value :: timesRans
integer(c_int), value :: numShells
real(c_double), value :: volBox
real(c_double), value :: rMax
real(c_double), value :: rMin
end function
subroutine delete_npcf_c(obj) bind(C, name="delete_npcf")
use iso_c_binding
implicit none
type(c_ptr), value :: obj
end subroutine
function get_shells_c(obj, shells) bind(C, name="get_shells")
use iso_c_binding
implicit none
integer(c_int) :: get_shells_c
type(c_ptr), value :: obj
real(c_double), dimension(:) :: shells
end function
function get_num_triangles_c(obj) bind(C, name="get_num_triangles")
use iso_c_binding
implicit none
integer(c_int) :: get_num_triangles_c
type(c_ptr), value :: obj
end function
function get_triangles_c(obj, tris) bind(C, name="get_triangles")
use iso_c_binding
import :: float3
integer(c_int) :: get_triangles_c
type(c_ptr), value :: obj
type(float3), dimension(:) :: tris
end function
function set_num_particles_c(obj, numParts) bind(C, name="set_num_particles")
use iso_c_binding
implicit none
integer(c_int) :: set_num_particles_c
type(c_ptr), value :: obj
integer(c_int), value :: numParts
end function
function get_2pt_size_c(obj) bind(C, name="get_2pt_size")
use iso_c_binding
implicit none
integer(c_int) :: get_2pt_size_c
type(c_ptr), value :: obj
end function
function get_3pt_size_c(obj) bind(C, name="get_3pt_size")
use iso_c_binding
implicit none
integer(c_int) :: get_3pt_size_c
type(c_ptr), value :: obj
end function
function calculate_correlations_c(obj, galaxies) bind(C, name="calculate_correlations")
use iso_c_binding
import :: float3
integer(c_int) :: calculate_correlations_c
type(c_ptr), value :: obj
type(float3), dimension(:) :: galaxies
end function
function calculate_2pt_c(obj, galaxies) bind(C, name="calculate_2pt")
use iso_c_binding
import :: float3
integer(c_int) :: calculate_2pt_c
type(c_ptr), value :: obj
type(float3), dimension(:) :: galaxies
end function
function get_2pt_c(obj, twoPoint) bind(C, name="get_2pt")
use iso_c_binding
implicit none
integer(c_int) :: get_2pt_c
type(c_ptr), value :: obj
real(c_double), dimension(:) :: twoPoint
end function
function get_3pt_c(obj, threePoint) bind(C, name="get_3pt")
use iso_c_binding
implicit none
integer(c_int) :: get_3pt_c
type(c_ptr), value :: obj
real(c_double), dimension(:) :: threePoint
end function
end interface
|
{"hexsha": "827bfd8a26cd57dae77befd5581fcdf5de586a20", "size": 3185, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "source/ganpcf_cdef.f90", "max_stars_repo_name": "dpearson1983/ganpcf", "max_stars_repo_head_hexsha": "d75fddfb094045a81916ffed10fec19d96b6d52e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-11-04T06:19:23.000Z", "max_stars_repo_stars_event_max_datetime": "2019-11-04T06:19:23.000Z", "max_issues_repo_path": "source/ganpcf_cdef.f90", "max_issues_repo_name": "dpearson1983/ganpcf", "max_issues_repo_head_hexsha": "d75fddfb094045a81916ffed10fec19d96b6d52e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "source/ganpcf_cdef.f90", "max_forks_repo_name": "dpearson1983/ganpcf", "max_forks_repo_head_hexsha": "d75fddfb094045a81916ffed10fec19d96b6d52e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-10-30T22:45:42.000Z", "max_forks_repo_forks_event_max_datetime": "2019-11-05T17:36:58.000Z", "avg_line_length": 31.2254901961, "max_line_length": 96, "alphanum_fraction": 0.6222919937, "num_tokens": 822}
|
import os
import copy
import argparse
import numpy as np
from tqdm import tqdm
from sklearn.cluster import KMeans
from plyfile import PlyData, PlyElement
def parse_args():
parser = argparse.ArgumentParser(description='Keypoints generator')
parser.add_argument('--dataset', default='hinterstoisser',
type=str, help="dataset name")
parser.add_argument('--sixdroot', default='/home/yusheng/data/sixd',
type=str, help="Path to SIXD root")
parser.add_argument('--num', type=int, help="Number of keypoints")
parser.add_argument('--type', choices=['sift', 'random', 'cluster', 'corner'],
type=str, help="Type of keypoints")
return parser.parse_args()
def get_3d_corners(vertices):
"""Get vertices 3D bounding boxes
Args
- vertices: (np.array) [N x 3] 3d vertices
Returns
- corners: (np.array) [8 x 2] 2d vertices
"""
min_x = np.min(vertices[:, 0])
max_x = np.max(vertices[:, 0])
min_y = np.min(vertices[:, 1])
max_y = np.max(vertices[:, 1])
min_z = np.min(vertices[:, 2])
max_z = np.max(vertices[:, 2])
corners = np.array([[min_x, min_y, min_z],
[min_x, min_y, max_z],
[min_x, max_y, min_z],
[min_x, max_y, max_z],
[max_x, min_y, min_z],
[max_x, min_y, max_z],
[max_x, max_y, min_z],
[max_x, max_y, max_z]])
return corners
if __name__ == '__main__':
args = parse_args()
assert args.type is not None, "Please specify type of keypoints"
assert args.num is not None, "Please specify number of keypoints"
assert args.type != 'sift', "Please go to ./pcl-sift to generate sift keypoints"
if args.type == 'corner':
assert args.num == 9, "Number of \"corner\" keypoints must be 9"
print("[LOG] Number of keypoints: %d" % args.num)
print("[LOG] Type of keypoints: %s" % args.type)
MODEL_ROOT = os.path.join(args.sixdroot, args.dataset, 'models')
KP_ROOT = os.path.join(args.sixdroot, args.dataset,
'kps', str(args.num), args.type)
if not os.path.exists(KP_ROOT):
os.makedirs(KP_ROOT)
else:
print("[WARNING] Overwrite existing files!")
tbar = tqdm(os.listdir(MODEL_ROOT))
for filename in tbar:
if '.ply' not in filename: # skip models_info.yml
continue
tbar.set_description(filename)
vertex = np.zeros(args.num,
dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')])
model = PlyData.read(os.path.join(MODEL_ROOT, filename))
xyz = np.stack((np.array(model['vertex']['x']),
np.array(model['vertex']['y']),
np.array(model['vertex']['z'])), axis=1)
if args.type == 'random':
selected_ids = np.random.choice(
model['vertex'].count, args.num, replace=False)
for i in range(args.num):
vertex[i][0] = model['vertex']['x'][selected_ids[i]]
vertex[i][1] = model['vertex']['y'][selected_ids[i]]
vertex[i][2] = model['vertex']['z'][selected_ids[i]]
elif args.type == 'cluster':
kmeans = KMeans(n_clusters=args.num, max_iter=1000).fit(xyz)
dist = kmeans.transform(xyz)
selected_ids = []
for i in range(args.num):
di = dist[:, i]
ind = np.argsort(di)[0]
vertex[i][0] = model['vertex']['x'][ind]
vertex[i][1] = model['vertex']['y'][ind]
vertex[i][2] = model['vertex']['z'][ind]
elif args.type == 'corner':
corners = get_3d_corners(xyz)
center = corners.mean(axis=0).reshape(1,3)
corners_and_center = np.concatenate((corners, center), axis=0)
for i in range(args.num):
vertex[i][0] = corners_and_center[i, 0]
vertex[i][1] = corners_and_center[i, 1]
vertex[i][2] = corners_and_center[i, 2]
data = PlyData([PlyElement.describe(vertex, 'vertex')], text=True)
with open(os.path.join(KP_ROOT, filename), mode='wb') as f:
data.write(f)
|
{"hexsha": "6d007c8748b99d3496325f0b70ab850b0ac0c2cb", "size": 4322, "ext": "py", "lang": "Python", "max_stars_repo_path": "kps/kp.py", "max_stars_repo_name": "qingchenkanlu/6dpose-genedata", "max_stars_repo_head_hexsha": "fbf973208eabdf11efd8bb384b1bc74963328193", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "kps/kp.py", "max_issues_repo_name": "qingchenkanlu/6dpose-genedata", "max_issues_repo_head_hexsha": "fbf973208eabdf11efd8bb384b1bc74963328193", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "kps/kp.py", "max_forks_repo_name": "qingchenkanlu/6dpose-genedata", "max_forks_repo_head_hexsha": "fbf973208eabdf11efd8bb384b1bc74963328193", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.9122807018, "max_line_length": 84, "alphanum_fraction": 0.5485886164, "include": true, "reason": "import numpy", "num_tokens": 1064}
|
import numpy as np
import sys
import os
import pytest
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
recordings_path = os.path.join(THIS_DIR, os.pardir, '../res/data/')
from src.music.score import Pieces
from src.model.model import Model
LENGTH_THRESHOLD = 3
@pytest.mark.parametrize("piece,tempo,recording", [
(Pieces.TestTwinkle, 60, f"{recordings_path}Twinkle_Recording.npy"),
(Pieces.TestPachabels, 60, f"{recordings_path}Pachabels_Recording.npy"),
])
def test_pieces_integration(piece, tempo, recording):
"""
Sample audio from recording and put into integration.
:param piece: pieces object
:param tempo: int beats per minute
:param recording: str path to recording
:return:
"""
model = Model(None, piece=piece, tempo=tempo)
t = 0
q = np.load(recording)[:, :]
states = [0] * model.score.N
while t < len(q[0]):
obs = q[:, t]
current_state, prob = model.next_observation(obs)
t += 1
if prob < 1.0e-110:
model.alpha *= 1.0e100
states[current_state[0]] += 1
res = states[1:len(states) - 1]
desired_note_length = (model.recording_speed * model.score.sub_beat.value) / tempo
average_note_length = sum(res) / len(res)
# Check no notes were skipped
assert all(count > 0 for count in res)
# Check that average note length was within acceptable range
assert abs(average_note_length - desired_note_length) < LENGTH_THRESHOLD
|
{"hexsha": "e25f127b66fa0bc0ae3adfe429b50600a6d403a4", "size": 1473, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/integration/test_model.py", "max_stars_repo_name": "dartmouth-cs98/20w-ensemble-vr-score-following", "max_stars_repo_head_hexsha": "3effbb47ac48580666a6642734c3738f4e3b427d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2020-05-29T07:35:38.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T17:50:15.000Z", "max_issues_repo_path": "test/integration/test_model.py", "max_issues_repo_name": "dartmouth-cs98/20w-ensemble-vr-score-following", "max_issues_repo_head_hexsha": "3effbb47ac48580666a6642734c3738f4e3b427d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 40, "max_issues_repo_issues_event_min_datetime": "2020-02-10T01:15:36.000Z", "max_issues_repo_issues_event_max_datetime": "2020-06-18T22:55:44.000Z", "max_forks_repo_path": "test/integration/test_model.py", "max_forks_repo_name": "dartmouth-cs98/20w-ensemble-vr-score-following", "max_forks_repo_head_hexsha": "3effbb47ac48580666a6642734c3738f4e3b427d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-02-07T03:46:00.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-07T03:46:00.000Z", "avg_line_length": 28.8823529412, "max_line_length": 86, "alphanum_fraction": 0.6782077393, "include": true, "reason": "import numpy", "num_tokens": 387}
|
#
# Copyright (c) European Molecular Biology Laboratory (EMBL)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__author__ = ['S. Basu']
__license__ = 'MIT'
__date__ = '2018/12/10'
from io import open
import logging
import numpy as np
import os
import json
import fabio
logger = logging.getLogger('autoCryst')
class CBFreader(object):
def __init__(self, filename):
"""
rtype: type(headers) -> dict
rtype: type(data) -> numpy array 2D
"""
if not os.path.exists(filename):
err = 'File does not exist %s.' % filename
logger.info('IOError:{}'.format(err))
return
self.cbf_file = filename
self.headers = {}
self.data = np.empty([])
return
def read_cbfheaders(self):
self.headers['filename'] = self.cbf_file
self.headers['dimension'] = []
fh = open(self.cbf_file, 'rb')
for record in fh:
if b'X-Binary-Size-Padding' in record:
break
if b'Pixel_size' in record:
self.headers['pixel_size'] = float(record.decode().split()[2])
if b'Detector_distance' in record:
self.headers['detector_distance'] = float(record.decode().split()[2])
if b'Wavelength' in record:
self.headers['photon_energy'] = 12398 / float(record.decode().split()[2])
if b'Beam_xy' in record:
beam = map(float, record.decode().replace('(', '').replace(')', '').replace(',', '').split()[2:4])
beam = list(beam)
self.headers['beam_center_x'] = beam[0]
self.headers['beam_center_y'] = beam[1]
if b'Detector:' in record:
self.headers['detector_name'] = record.decode().replace(',', '').split()[2:4]
if b'Exposure_time' in record:
self.headers['exposure'] = float(record.decode().split()[2])
if b'Start_angle' in record:
self.headers['starting_angle'] = float(record.decode().split()[2])
if b'Angle_increment' in record:
self.headers['oscillation_range'] = float(record.decode().split()[2])
if b'X-Binary-Size-Fastest-Dimension' in record:
self.headers['dimension'].append(int(record.decode().split()[1]))
if b'X-Binary-Size-Second-Dimension' in record:
self.headers['dimension'].append(int(record.decode().split()[1]))
else:
pass
fh.close()
return
def read_cbfdata(self):
handler = fabio.open(self.cbf_file)
self.data = handler.data.flatten()
return
'''
# pycbf is not supported for python 3. So, switching to fabio module.
def read_cbfdata(self):
handler = pycbf.cbf_handle_struct()
handler.read_file(self.cbf_file, pycbf.MSG_DIGEST)
handler.rewind_datablock()
handler.select_datablock(0)
handler.select_category(0)
handler.select_column(1)
handler.select_row(0)
type = handler.get_typeofvalue()
if type.find('bnry') > -1:
img_as_string = handler.get_integerarray_as_string()
self.data = np.fromstring(img_as_string, np.int32) # look for image.py in dials/util
# self.dimension = (handler.get_integerarrayparameters_wdims()[10],
handler.get_integerarrayparameters_wdims()[9])
# self.data = self.data.reshape(self.dimension)
# from scitbx.array_family import flex
# self.data = flex.int(self.data)
# self.data.reshape(flex.grid(*self.dimension))
else:
raise TypeError('cannot find image %s' % self.cbf_file)
return
'''
@staticmethod
def write_cbf():
try:
import pycbf
except ImportError as err:
logger.info('ImportError:{}'.format(err))
"Method to write a cbf image file"
return
class EigerReader(object):
def __init__(self, filename):
"""
:rtype: type(headers) -> dict
:rtype: type(data) -> numpy array 2D
"""
if not os.path.exists(filename):
err = "File does not exist %s" % filename
logger.info('IOError:{}'.format(err))
return
self.eiger_file = filename
self.headers = {}
self.data = np.empty([])
try:
import h5py
self.eiger_handle = h5py.File(self.eiger_file, 'r')
except (ImportError, NameError) as err:
logger.info('ImportError:{}'.format(err))
return
def read_h5headers(self):
if 'master' not in self.eiger_file:
err = '%s is not a master file, no header info' % self.eiger_file
logger.info('ValueError:{}'.format(err))
return
else:
self.headers['filename'] = self.eiger_file
pix_size = self.eiger_handle['/entry/instrument/detector/x_pixel_size']
self.headers['pixel_size'] = np.array(pix_size)
detector_name = self.eiger_handle['/entry/instrument/detector/description']
detector_name = np.array(detector_name).tolist() # type: bytes
self.headers['detector_name'] = detector_name.decode().split()[1:3]
detector_distance = self.eiger_handle['/entry/instrument/detector/detector_distance']
self.headers['detector_distance'] = np.array(detector_distance)
beamx = self.eiger_handle['/entry/instrument/detector/beam_center_x']
self.headers['beam_center_x'] = np.array(beamx)
beamy = self.eiger_handle['/entry/instrument/detector/beam_center_y']
self.headers['beam_center_y'] = np.array(beamy)
wave = self.eiger_handle['/entry/instrument/beam/incident_wavelength']
self.headers['photon_energy'] = 12398 / np.array(wave)
return
def read_h5data(self):
self.data = self.eiger_handle['/data/data']
self.data = np.array(self.data, np.int32)
return
def write_h5(self):
""" Method to write an Eiger h5 image """
return
class ImageHandler(object):
def __init__(self, filename):
self.imagefile = filename
self.imobject = type('', (), {})
self.imobject.headers = {}
if not os.path.exists(self.imagefile):
err = 'File does not exist: %s' % filename
logger.info('ImportError:{}'.format(err))
return
else:
if '.cbf' in self.imagefile:
self.imobject = CBFreader(self.imagefile)
self.imobject.read_cbfheaders()
elif 'master' in self.imagefile:
self.imobject = EigerReader(self.imagefile)
self.imobject.read_h5headers()
elif '.cxi' in self.imagefile:
dirname = os.path.dirname(self.imagefile)
if os.path.isfile(os.path.join(dirname, 'headers.json')):
cxi = open(os.path.join(dirname, 'headers.json'), 'r')
self.imobject.headers = json.load(cxi)
else:
err = 'cxi file exists but no header json file'
logger.info('DozorHit_Error:{}'.format(err))
else:
pass
return
def read_datablock(self):
if isinstance(self.imobject, CBFreader):
self.imobject.read_cbfdata()
elif isinstance(self.imobject, EigerReader):
self.imobject.read_h5data()
else:
pass
return
def check_icerings(self):
"""Method for ice rings analysis"""
return
def create_powdersum(self):
"""Method to write a powder pattern, useful for background checking"""
return
def create_maskfile(self):
"""Method for writing a mask file based on powder sum"""
return
def radial_avg(self):
return
def dstack(array_lst):
# data_stack = np.empty(array_lst.[0].shape[0],array_lst.[0].shape[1],len(array_lst))
if array_lst:
return np.dstack(array_lst)
else:
print("Empty list of arrays")
return
def create_h5stack(list_of_cbfs):
if len(list_of_cbfs) == 0:
print("Need a list of many cbfs; got empty list")
return
else:
image1 = CBFreader(list_of_cbfs[0])
image1.read_cbfheaders()
stacksize = (len(list_of_cbfs),) + tuple(image1.headers['dimension'])
data_stack = np.empty(stacksize, dtype=np.int32)
"""
:rtype: ii -> int
"""
for ii in range(len(list_of_cbfs)):
try:
c = CBFreader(list_of_cbfs[ii])
c.read_cbfdata()
data_stack[ii, :, :] = c.data.reshape((image1.headers['dimension']))
del c
except Exception as e:
raise e
return data_stack
if __name__ == '__main__':
'''
import glob
import h5py
lst_cbfs = glob.glob('/data/id23eh2/inhouse/opid232/20181208/RAW_DATA/Sample-1-1-03/MeshScan_02/*.cbf')
nframes = len(lst_cbfs)
nchunks = int(nframes / 100) + 1
for i in range(nchunks):
start = 100 * i
stop = 100 * (i + 1)
try:
data_stacks = create_h5stack(lst_cbfs[start:stop])
prefix = 'data_stack_' + str(i) + '.h5'
fh = h5py.File(prefix, 'w')
fh.create_dataset('/data/data', data=data_stacks)
fh.close()
for j in range(start, stop):
print(data_stacks[j, :, :].max())
except IndexError:
pass
'''
# img = CBFreader('/data/id23eh2/inhouse/opid232/20181208/RAW_DATA/
# Sample-1-1-03/MeshScan_02/mesh-insu_3_0_0100.cbf')
img = CBFreader('/Users/shbasu/work/autoCryst/examples/mesh-insu_2_0_1143.cbf')
img.read_cbfheaders()
print(img.headers)
img.read_cbfdata()
print(img.data.shape)
h5 = EigerReader('/Users/shbasu/work/autoCryst/examples/mesh-x_2_1_master.h5')
h5.read_h5headers()
print(h5.headers['pixel_size'])
|
{"hexsha": "b27cd925d65f801e8931e5fc4d0c74ad92ed55b5", "size": 11166, "ext": "py", "lang": "Python", "max_stars_repo_path": "edna2/lib/autocryst/src/Image.py", "max_stars_repo_name": "gsantoni/edna2", "max_stars_repo_head_hexsha": "0aad63a3ea8091ce62118f0b2c8ac78a2286da9e", "max_stars_repo_licenses": ["CC0-1.0", "MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "edna2/lib/autocryst/src/Image.py", "max_issues_repo_name": "gsantoni/edna2", "max_issues_repo_head_hexsha": "0aad63a3ea8091ce62118f0b2c8ac78a2286da9e", "max_issues_repo_licenses": ["CC0-1.0", "MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-04-06T10:39:50.000Z", "max_issues_repo_issues_event_max_datetime": "2021-04-14T19:24:37.000Z", "max_forks_repo_path": "edna2/lib/autocryst/src/Image.py", "max_forks_repo_name": "gsantoni/edna2", "max_forks_repo_head_hexsha": "0aad63a3ea8091ce62118f0b2c8ac78a2286da9e", "max_forks_repo_licenses": ["CC0-1.0", "MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2019-06-14T07:28:38.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-28T13:10:39.000Z", "avg_line_length": 36.2532467532, "max_line_length": 114, "alphanum_fraction": 0.596363962, "include": true, "reason": "import numpy", "num_tokens": 2631}
|
# Autogenerated wrapper script for GnuPG_jll for i686-linux-gnu
export dirmngr, dirmngr_client, gpg, gpg_agent, gpg_connect_agent, gpgconf, gpgscm, gpgsm, gpgtar, gpgv, kbxutil
using GnuTLS_jll
using Libksba_jll
using Libgcrypt_jll
using Libgpg_error_jll
using nPth_jll
using Zlib_jll
using Libassuan_jll
using OpenLDAPClient_jll
using Bzip2_jll
using SQLite_jll
using libusb_jll
using Nettle_jll
JLLWrappers.@generate_wrapper_header("GnuPG")
JLLWrappers.@declare_executable_product(dirmngr)
JLLWrappers.@declare_executable_product(dirmngr_client)
JLLWrappers.@declare_executable_product(gpg)
JLLWrappers.@declare_executable_product(gpg_agent)
JLLWrappers.@declare_executable_product(gpg_connect_agent)
JLLWrappers.@declare_executable_product(gpgconf)
JLLWrappers.@declare_executable_product(gpgscm)
JLLWrappers.@declare_executable_product(gpgsm)
JLLWrappers.@declare_executable_product(gpgtar)
JLLWrappers.@declare_executable_product(gpgv)
JLLWrappers.@declare_executable_product(kbxutil)
function __init__()
JLLWrappers.@generate_init_header(GnuTLS_jll, Libksba_jll, Libgcrypt_jll, Libgpg_error_jll, nPth_jll, Zlib_jll, Libassuan_jll, OpenLDAPClient_jll, Bzip2_jll, SQLite_jll, libusb_jll, Nettle_jll)
JLLWrappers.@init_executable_product(
dirmngr,
"bin/dirmngr",
)
JLLWrappers.@init_executable_product(
dirmngr_client,
"bin/dirmngr-client",
)
JLLWrappers.@init_executable_product(
gpg,
"bin/gpg",
)
JLLWrappers.@init_executable_product(
gpg_agent,
"bin/gpg-agent",
)
JLLWrappers.@init_executable_product(
gpg_connect_agent,
"bin/gpg-connect-agent",
)
JLLWrappers.@init_executable_product(
gpgconf,
"bin/gpgconf",
)
JLLWrappers.@init_executable_product(
gpgscm,
"bin/gpgscm",
)
JLLWrappers.@init_executable_product(
gpgsm,
"bin/gpgsm",
)
JLLWrappers.@init_executable_product(
gpgtar,
"bin/gpgtar",
)
JLLWrappers.@init_executable_product(
gpgv,
"bin/gpgv",
)
JLLWrappers.@init_executable_product(
kbxutil,
"bin/kbxutil",
)
JLLWrappers.@generate_init_footer()
end # __init__()
|
{"hexsha": "928e2b0b1ae25163f18d4a5a4c060df0034881e4", "size": 2262, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/wrappers/i686-linux-gnu.jl", "max_stars_repo_name": "JuliaBinaryWrappers/GnuPG_jll.jl", "max_stars_repo_head_hexsha": "edb5da03c4efeee73499d507001b3043197501d8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/wrappers/i686-linux-gnu.jl", "max_issues_repo_name": "JuliaBinaryWrappers/GnuPG_jll.jl", "max_issues_repo_head_hexsha": "edb5da03c4efeee73499d507001b3043197501d8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/wrappers/i686-linux-gnu.jl", "max_forks_repo_name": "JuliaBinaryWrappers/GnuPG_jll.jl", "max_forks_repo_head_hexsha": "edb5da03c4efeee73499d507001b3043197501d8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.0, "max_line_length": 197, "alphanum_fraction": 0.7378426172, "num_tokens": 678}
|
taskid(t=current_task()) = string(hash(t) & 0xffff, base=16, pad=4)
debug_header() = string("DEBUG: ", rpad(Dates.now(), 24), taskid(), " ")
macro debug(n::Int, s)
DEBUG_LEVEL[] >= n ? :(println(debug_header(), $(esc(s)))) :
:()
end
macro debugshow(n::Int, s)
DEBUG_LEVEL[] >= n ? :(println(debug_header(),
$(sprint(Base.show_unquoted, s)), " = ",
sprint(io->show(io, "text/plain",
begin value=$(esc(s)) end)))) :
:()
end
macro debugshort(n::Int, s)
DEBUG_LEVEL[] >= n ? :(println(debug_header(),
sprintcompact($(esc(s))))) :
:()
end
sprintcompact(x) = sprint(show, x; context=:compact => true)
printlncompact(x) = println(sprintcompact(x))
# Get the calling function. See https://github.com/JuliaLang/julia/issues/6733
# (The macro form @__FUNCTION__ is hard to escape correctly, so just us a function.)
function _funcname_expr()
return :($(esc(Expr(:isdefined, Symbol("#self#")))) ? nameof($(esc(Symbol("#self#")))) : nothing)
end
@noinline function precondition_error(msg, calling_funcname)
calling_funcname = calling_funcname === nothing ? "unknown" : calling_funcname
return ArgumentError("$calling_funcname() requires $msg")
end
"""
@require precondition [message]
Throw `ArgumentError` if `precondition` is false.
"""
macro require(condition, msg = "`$condition`")
:(if ! $(esc(condition)) throw(precondition_error($(esc(msg)), $(_funcname_expr()))) end)
end
@noinline function postcondition_error(msg, calling_funcname, ls="", l="", rs="", r="")
calling_funcname = calling_funcname === nothing ? "unknown" : calling_funcname
msg = "$calling_funcname() failed to ensure $msg"
if ls != ""
msg = string(msg, "\n", ls, " = ", sprint(show, l),
"\n", rs, " = ", sprint(show, r))
end
return AssertionError(msg)
end
# Copied from stdlib/Test/src/Test.jl:get_test_result()
iscondition(ex) = isa(ex, Expr) &&
ex.head == :call &&
length(ex.args) == 3 &&
first(string(ex.args[1])) != '.' &&
(!isa(ex.args[2], Expr) || ex.args[2].head != :...) &&
(!isa(ex.args[3], Expr) || ex.args[3].head != :...) &&
(ex.args[1] === :(==) ||
Base.operator_precedence(ex.args[1]) ==
Base.operator_precedence(:(==)))
"""
@ensure postcondition [message]
Throw `ArgumentError` if `postcondition` is false.
"""
macro ensure(condition, msg = "`$condition`")
if DEBUG_LEVEL[] < 0
return :()
end
if iscondition(condition)
l,r = condition.args[2], condition.args[3]
ls, rs = string(l), string(r)
return quote
if ! $(esc(condition))
# FIXME double-execution of condition l and r!
throw(postcondition_error($(esc(msg)), $(_funcname_expr()),
$ls, $(esc(l)), $rs, $(esc(r))))
end
end
end
:(if ! $(esc(condition)) throw(postcondition_error($(esc(msg)), $(_funcname_expr()))) end)
end
|
{"hexsha": "88e1cd563ad2f88cee0d43eb37786e8b854f8ceb", "size": 3305, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/debug.jl", "max_stars_repo_name": "c42f/URIs.jl", "max_stars_repo_head_hexsha": "a4c725b5090b21bed5cf6ca37cff196d80f32d1d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 514, "max_stars_repo_stars_event_min_datetime": "2016-12-01T06:26:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T12:02:15.000Z", "max_issues_repo_path": "src/debug.jl", "max_issues_repo_name": "c42f/URIs.jl", "max_issues_repo_head_hexsha": "a4c725b5090b21bed5cf6ca37cff196d80f32d1d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 729, "max_issues_repo_issues_event_min_datetime": "2016-12-20T10:46:44.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T22:58:35.000Z", "max_forks_repo_path": "src/debug.jl", "max_forks_repo_name": "c42f/URIs.jl", "max_forks_repo_head_hexsha": "a4c725b5090b21bed5cf6ca37cff196d80f32d1d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 175, "max_forks_repo_forks_event_min_datetime": "2017-02-08T18:06:55.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-24T21:12:56.000Z", "avg_line_length": 34.7894736842, "max_line_length": 101, "alphanum_fraction": 0.5361573374, "num_tokens": 788}
|
# Copyright 2019 Pascal Audet & Helen Janiszewski
#
# This file is part of OBStools.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
import pickle
from obstools.atacr import utils, DayNoise, StaNoise
np.seterr(all='ignore')
class Comply(object):
"""
A Comply object contains attributes that calculate and store the
compliance and coherence functions for the available channels.
Note
----
The object is initialized with either a processed
:class:`~obstools.atacr.classes.DayNoise` or
:class:`~obstools.atacr.classes.StaNoise` object. Each individual
spectral quantity is unpacked as an object attribute, but all of them
are discarded as the object is saved to disk.
Attributes
----------
elev : float
Station elevation in meters (OBS stations have negative elevations)
f : :class:`~numpy.ndarray`
Frequency axis for corresponding time sampling parameters
c11 : `numpy.ndarray`
Power spectra for component `H1`. Other identical attributes are
available for the power, cross and rotated spectra:
[11, 12, 1Z, 1P, 22, 2Z, 2P, ZZ, ZP, PP, HH, HZ, HP]
tf_list : Dict
Dictionary of possible transfer functions from the available
components (obtained from the
:class:`~obstools.atacr.classes.DayNoise` or the
:class:`~obstools.atacr.classes.StaNoise` noise objects)
complyfunc : Dict
Dictionary of compliance and coherence functions given the available
components.
"""
def __init__(self, objnoise=None, elev=None):
if any(value is None for value in [elev, objnoise]):
raise(Exception(
"Error: Initializing EventStream object with None values - " +
"aborting"))
if (objnoise and not isinstance(objnoise, DayNoise) and
not isinstance(objnoise, StaNoise)):
msg = "Error: A TFNoise object must be initialized with only " +\
"one of type DayNoise or StaNoise object"
raise TypeError(msg)
if not objnoise.av:
raise(Exception(
"Error: Noise object has not been processed (QC and " +
"averaging) - aborting"))
self.elevation = elev
self.f = objnoise.f
self.c11 = objnoise.power.c11
self.c22 = objnoise.power.c22
self.cZZ = objnoise.power.cZZ
self.cPP = objnoise.power.cPP
self.cHH = objnoise.rotation.cHH
self.cHZ = objnoise.rotation.cHZ
self.cHP = objnoise.rotation.cHP
self.c12 = objnoise.cross.c12
self.c1Z = objnoise.cross.c1Z
self.c1P = objnoise.cross.c1P
self.c2Z = objnoise.cross.c2Z
self.c2P = objnoise.cross.c2P
self.cZP = objnoise.cross.cZP
self.tf_list = objnoise.tf_list
class ComplyDict(dict):
def __init__(self):
self = dict()
def add(self, key, value):
self[key] = value
def calculate_compliance(self):
"""
Method to calculate compliance and coherence functions from the
averaged (daily or station-averaged) noise spectra.
Attributes
----------
complyfunc : Dict
Container Dictionary for all possible compliance and
coherence functions
Examples
--------
Calculate compliance and coherence functions for a DayNoise object.
In these examples, station elevation is extracted from the IRIS
metadata aggregator site: http://ds.iris.edu/mda/7D/M08A/
>>> from obstools.atacr import DayNoise
>>> from obstools.comply import Comply
>>> daynoise = DayNoise('demo')
Uploading demo data - March 04, 2012, station 7D.M08A
>>> daynoise.QC_daily_spectra()
>>> daynoise.average_daily_spectra()
>>> daycomply = Comply(objnoise=daynoise, elev=-126.4)
>>> daycomply.calculate_compliance()
>>> tfnoise.complyfunc.keys()
dict_keys(['ZP', 'ZP-21', 'ZP-H'])
Calculate compliance and coherence functions for a StaNoise object
>>> from obstools.atacr import StaNoise
>>> from obstools.comply import Comply
>>> stanoise = StaNoise('demo')
Uploading demo data - March 01 to 04, 2012, station 7D.M08A
>>> stanoise.QC_sta_spectra()
>>> stanoise.average_sta_spectra()
>>> stacomply = Comply(objnoise=stanoise, elev=-126.4)
>>> stacomply.calculate_compliance()
>>> stacomply.complyfunc.keys()
dict_keys(['ZP', 'ZP-21'])
"""
def wavenumber(omega, H):
"""
Function to approximate wavenumber from dispersion relation
H is depth below the seafloor, in meters
omega is a vector of positive angular frequencies
Stephen G. Mosher, 2020
"""
import numpy.polynomial as poly
g = 9.79329
N = len(omega)
# Approximations for k when k*H is very large (deep case) or
# very small (shallow case)
k_deep = omega**2 / g
k_shal = omega / np.sqrt(g * H)
"""
Alternatively, we can use a rational approximation to
tanh(x) to solve k for any omega. This approximation gives
a quartic equation, we take the positive real roots as the
value of k we're interested in. The rational approximation
being used is always better than the shallow approximation.
However, it's only better than the deep approximation if
k*H < 2.96. Therefore, we keep the solutions to k we find,
using the rational approximation for k*H < 2.96 and use the
deep water approximation to solve for k otherwise. The
average error is just under 1% and the maximum error is
2.5%.
"""
k = np.zeros(len(omega))
for i, om in enumerate(omega):
if i == 0:
k[i] = 0.
else:
a0 = -27 * om**2 / g # constant terms
a1 = 0. # no linear terms
a2 = 27 * H - (9 * om**2 * H**2)/g # quadratic terms
a3 = 0. # no cubic terms
a4 = H**3 # quartic terms
p = poly.Polynomial([a0, a1, a2, a3, a4])
solu = poly.Polynomial.roots(p)
positive_roots = solu[solu > 0]
real_positive_root = \
positive_roots[positive_roots.imag == 0].real[0]
k[i] = real_positive_root
# For k*H >= 2.96, prefer the deep approximation above
for i, wavenumber in enumerate(k_deep):
if wavenumber * H > 2.96:
k[i] = k_deep[i]
return k
# Calculate wavenumber - careful here, elevation is negative
k = wavenumber(2.*np.pi*self.f, -1.*self.elevation)
# Initialize empty dictionary
complyfunc = self.ComplyDict()
# Cycle through all available transfer functions in the objnoise
# object
for key, value in self.tf_list.items():
if key == 'ZP':
if value:
admit_ZP = utils.admittance(self.cZP, self.cPP)
compl_ZP = k*admit_ZP
coh_ZP = utils.coherence(self.cZP, self.cPP, self.cZZ)
complyfunc.add('ZP', [compl_ZP, coh_ZP])
elif key == 'ZP-21':
if value:
lc1cZ = np.conj(self.c1Z)/self.c11
lc1c2 = np.conj(self.c12)/self.c11
lc1cP = np.conj(self.c1P)/self.c11
coh_12 = utils.coherence(self.c12, self.c11, self.c22)
coh_1P = utils.coherence(self.c1P, self.c11, self.cPP)
coh_1Z = utils.coherence(self.c1Z, self.c11, self.cZZ)
gc2c2_c1 = self.c22*(1. - coh_12)
gcPcP_c1 = self.cPP*(1. - coh_1P)
gcZcZ_c1 = self.cZZ*(1. - coh_1Z)
gc2cZ_c1 = np.conj(self.c2Z) - np.conj(lc1c2*self.c1Z)
gcPcZ_c1 = self.cZP - np.conj(lc1cP*self.c1Z)
gc2cP_c1 = np.conj(self.c2P) - np.conj(lc1c2*self.c1P)
lc2cP_c1 = gc2cP_c1/gc2c2_c1
lc2cZ_c1 = gc2cZ_c1/gc2c2_c1
coh_c2cP_c1 = utils.coherence(gc2cP_c1, gc2c2_c1,
gcPcP_c1)
coh_c2cZ_c1 = utils.coherence(gc2cZ_c1, gc2c2_c1,
gcZcZ_c1)
gcPcP_c1c2 = gcPcP_c1*(1. - coh_c2cP_c1)
gcPcZ_c1c2 = gcPcZ_c1 - np.conj(lc2cP_c1)*gc2cZ_c1
gcZcZ_c1c2 = gcZcZ_c1*(1. - coh_c2cZ_c1)
admit_ZP_21 = utils.admittance(
gcPcZ_c1c2, gcPcP_c1c2)
compl_ZP_21 = k*admit_ZP_21
coh_ZP_21 = utils.coherence(
gcPcZ_c1c2, gcPcP_c1c2, gcZcZ_c1c2)
complyfunc.add('ZP-21', [compl_ZP_21, coh_ZP_21])
elif key == 'ZP-H':
if value:
lcHcP = np.conj(self.cHP)/self.cHH
coh_HP = utils.coherence(self.cHP, self.cHH, self.cPP)
coh_HZ = utils.coherence(self.cHZ, self.cHH, self.cZZ)
gcPcP_cH = self.cPP*(1. - coh_HP)
gcZcZ_cH = self.cZZ*(1. - coh_HZ)
gcPcZ_cH = self.cZP - np.conj(lcHcP*self.cHZ)
admit_ZP_H = utils.admittance(gcPcZ_cH, gcPcP_cH)
compl_ZP_H = k*admit_ZP_H
coh_ZP_H = utils.coherence(gcPcZ_cH, gcPcP_cH, gcZcZ_cH)
complyfunc.add('ZP-H', [compl_ZP_H, coh_ZP_H])
self.complyfunc = complyfunc
def save(self, filename, form='pkl'):
"""
Method to save the object to file using `~Pickle`.
Parameters
----------
filename : str
File name
Examples
--------
Following from the example outlined in method
:func:`~obstools.comply.classes.Comply.calculate_compliance`, we simply
save the final object
>>> daycomply.save('daycomply_demo.pkl')
Check that object has been saved
>>> import glob
>>> glob.glob("./daycomply_demo.pkl")
['./daycomply_demo.pkl']
"""
if not self.complyfunc:
print("Warning: saving before having calculated the compliance " +
"and coherence functions")
if form == 'pkl':
# Remove traces to save disk space
del self.c11
del self.c22
del self.cZZ
del self.cPP
del self.cHH
del self.cHZ
del self.cHP
del self.c12
del self.c1Z
del self.c1P
del self.c2Z
del self.c2P
del self.cZP
file = open(filename.parent / (filename.name + '.' + form), 'wb')
pickle.dump(self, file)
file.close()
elif form == 'csv':
import pandas as pd
if 'ZP-H' in self.complyfunc:
df = pd.DataFrame(
{'Frequency': self.f,
'Compliance ZP': self.complyfunc['ZP'][0],
'Coherence ZP': self.complyfunc['ZP'][1],
'Compliance ZP-21': self.complyfunc['ZP-21'][0],
'Coherence ZP-21': self.complyfunc['ZP-21'][1],
'Compliance ZP-H': self.complyfunc['ZP-H'][0],
'Coherence ZP-H': self.complyfunc['ZP-H'][1]})
elif 'ZP-21' in self.complyfunc:
df = pd.DataFrame(
{'Frequency': self.f,
'Compliance ZP': self.complyfunc['ZP'][0],
'Coherence ZP': self.complyfunc['ZP'][1],
'Compliance ZP-21': self.complyfunc['ZP-21'][0],
'Coherence ZP-21': self.complyfunc['ZP-21'][1]})
else:
df = pd.DataFrame(
{'Frequency': self.f,
'Compliance ZP': self.complyfunc['ZP'][0],
'Coherence ZP': self.complyfunc['ZP'][1]})
df.to_csv(filename.parent / (filename.name +
'.' + form), index=False)
return
|
{"hexsha": "df0c5104a35c06bf09fc3cde10b096023e65d824", "size": 13729, "ext": "py", "lang": "Python", "max_stars_repo_path": "obstools/comply/classes.py", "max_stars_repo_name": "paudetseis/OBStools", "max_stars_repo_head_hexsha": "c6c02d8864c25a14f22d1fae17ff5ad911b9ff00", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-12-05T04:32:38.000Z", "max_stars_repo_stars_event_max_datetime": "2019-12-05T04:32:38.000Z", "max_issues_repo_path": "obstools/comply/classes.py", "max_issues_repo_name": "paudetseis/OBStools", "max_issues_repo_head_hexsha": "c6c02d8864c25a14f22d1fae17ff5ad911b9ff00", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2019-12-04T02:06:45.000Z", "max_issues_repo_issues_event_max_datetime": "2019-12-06T22:20:19.000Z", "max_forks_repo_path": "obstools/comply/classes.py", "max_forks_repo_name": "paudetseis/OBStools", "max_forks_repo_head_hexsha": "c6c02d8864c25a14f22d1fae17ff5ad911b9ff00", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-02-25T16:51:35.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-25T16:51:35.000Z", "avg_line_length": 37.1054054054, "max_line_length": 79, "alphanum_fraction": 0.5541554374, "include": true, "reason": "import numpy", "num_tokens": 3467}
|
# I/O helper functions on text files
.b = import('../base', attach_operators = FALSE)
#' Add \code{ext}ension parameter to \link{\code{base::file.path}}
file_path = function (..., ext = NULL, fsep = .Platform$file.sep) {
dots = list(...)
if (! is.null(ext)) {
ilast = length(dots)
dots[ilast] = sprintf('%s.%s', dots[ilast], sub('^\\.', '', ext))
}
do.call(base::file.path, c(dots, fsep = fsep))
}
#' Augment \code{\link{utils::read.table}} by a mechanism to guess the file format.
#'
#' For the moment, only separators are handled based on the file extension.
#' This might change in the future to be more powerful, think Python’s
#' \code{csv.Sniffer} class.
read_table = function (file, ..., stringsAsFactors = FALSE, na.strings = c(NA, ''), check.names = FALSE) {
call = .b$match_call_defaults()
if (missing(file) || is_connection(file)) {
call[[1]] = quote(read.table)
return(.b$add_class(eval.parent(call), 'tbl_df'))
}
extension = .b$grep('\\.(\\w+)(\\.gz)?$', file)[1]
if (! ('sep' %in% names(call))) {
separators = list(csv = ',',
tsv = '\t',
txt = '\t')
call$sep = separators[[extension]]
}
call[[1]] = if (identical(extension, 'xlsx')) {
call$na.strings = NULL
quote(xlsx::read.xlsx)
} else quote(read.table)
.b$add_class(eval.parent(call), 'tbl_df')
}
write_table = function (x, file = '', ..., quote = FALSE, row.names = FALSE) {
call = .b$match_call_defaults()
call[[1]] = quote(write.table)
if (! is_connection(file) && file != '' && ! 'sep' %in% names(call)) {
extension = .b$grep('\\.(\\w+)(\\.gz)?$', file)[1]
separators = list(csv = ',',
tsv = '\t',
txt = '\t')
call$sep = separators[[extension]]
}
eval.parent(call)
}
read_full_table = function(file, sep="\t", stringsAsFactors=FALSE, check.names=FALSE, ...) {
index = utils::read.table(file, sep=sep, stringsAsFactors=stringsAsFactors, ...)
colnames(index) = index[1,]
rownames(index) = index[,1]
index[-1,-1]
}
is_connection = function (x) {
inherits(x, 'connection')
}
|
{"hexsha": "2233457b7d12a1da78cf26e1fac8ff051b9dad62", "size": 2238, "ext": "r", "lang": "R", "max_stars_repo_path": "io/text.r", "max_stars_repo_name": "mschubert/ebits", "max_stars_repo_head_hexsha": "e9c4a3d883fb9fbcbfd4689becca0fe2e5cbdbe5", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-08-20T12:36:29.000Z", "max_stars_repo_stars_event_max_datetime": "2019-08-20T12:36:29.000Z", "max_issues_repo_path": "io/text.r", "max_issues_repo_name": "mschubert/ebits", "max_issues_repo_head_hexsha": "e9c4a3d883fb9fbcbfd4689becca0fe2e5cbdbe5", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 25, "max_issues_repo_issues_event_min_datetime": "2017-01-14T14:16:05.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-24T15:49:11.000Z", "max_forks_repo_path": "io/text.r", "max_forks_repo_name": "mschubert/ebits", "max_forks_repo_head_hexsha": "e9c4a3d883fb9fbcbfd4689becca0fe2e5cbdbe5", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-04-18T19:06:36.000Z", "max_forks_repo_forks_event_max_datetime": "2018-04-18T19:06:36.000Z", "avg_line_length": 32.4347826087, "max_line_length": 106, "alphanum_fraction": 0.5580875782, "num_tokens": 603}
|
import numpy as np
import torch
from utils import Tokenizer
embed_size = 300
def create_embedding_matrix(tokenizer, embedding_file):
"""
Load pretrained embedding and output the npy contains the pretrained vectors
"""
embeddings_index = {}
with open(embedding_file, encoding='utf8') as f:
for line in f:
values = line.rstrip().rsplit(' ')
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
# specify the embedding size
embedding_matrix = np.zeros((len(tokenizer), embed_size))
for word, i in tokenizer.stoi.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
with open('embedding_matrix2.npy', 'wb') as f:
np.save(f, embedding_matrix)
tokenizer = torch.load('./tokenizers/tokenizer_vi_fix_spelling.pth')
zero_indexes = [0, 467, 490, 494, 563, 564, 570, 973, 1176, 1281, 1455, 1609, 1610, 1611]
for i in zero_indexes:
print(tokenizer.itos[i])
# embedding_file = './pretrained_embedding/word2vec_vi_words_300dims.txt'
embedding_file = './pretrained_embedding/cc.vi.300.vec'
# create_embedding_matrix(tokenizer, embedding_file)
x = np.load('./pretrained_embedding/embedding_matrix2.npy')
zeros = np.zeros((embed_size))
num_zeros = 0
zero_indexes = []
for word, i in tokenizer.stoi.items():
if (zeros[0] == x[i]).all():
num_zeros += 1
zero_indexes.append(i)
continue
print(num_zeros)
print(zero_indexes)
|
{"hexsha": "d4c90ea9d3acd0798b9a949145e5e2c30c4e9776", "size": 1598, "ext": "py", "lang": "Python", "max_stars_repo_path": "create_embedding_matrix.py", "max_stars_repo_name": "ngthanhtin/VLSP_ImageCaptioning", "max_stars_repo_head_hexsha": "46a2b430cc07c444fb69609a8c06670de2db8c36", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2021-11-19T17:07:09.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-10T13:58:56.000Z", "max_issues_repo_path": "create_embedding_matrix.py", "max_issues_repo_name": "ngthanhtin/VLSP_ImageCaptioning", "max_issues_repo_head_hexsha": "46a2b430cc07c444fb69609a8c06670de2db8c36", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "create_embedding_matrix.py", "max_forks_repo_name": "ngthanhtin/VLSP_ImageCaptioning", "max_forks_repo_head_hexsha": "46a2b430cc07c444fb69609a8c06670de2db8c36", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-12-10T01:52:46.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-18T10:02:03.000Z", "avg_line_length": 29.0545454545, "max_line_length": 89, "alphanum_fraction": 0.6795994994, "include": true, "reason": "import numpy", "num_tokens": 401}
|
import json
import numpy as np
import re
from collections import defaultdict as dd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.decomposition import PCA
from sklearn.naive_bayes import GaussianNB
from sklearn import svm
#####################preprocessing###################
fp = open('devfile.json')
data = []
target = []
for line in fp:
instance = json.loads(line)
target.append(instance['lang'])
data.append(instance['text'])
categories = {'ar': 1,
'bg': 2,
'de': 3,
'en': 4,
'es': 5,
'fa': 6,
'fr': 7,
'he': 8,
'hi': 9,
'it': 10,
'ja': 11,
'ko': 12,
'mr': 13,
'ne': 14,
'nl': 15,
'ru': 16,
'th': 17,
'uk': 18,
'ur': 19,
'zh': 20,
'unk': 21}
target = np.array(target)
for i in range(len(target)):
target[i] = categories[target[i]]
print(target)
data = np.array(data)
hv = HashingVectorizer(n_features=100, token_pattern=r'\b\w+\b',ngram_range=(2,6), analyzer='char_wb')
X = hv.transform(data).toarray()
transformer = TfidfTransformer(smooth_idf=False)
X2 = transformer.fit_transform(X).toarray()
########################classifier starts now###################
##'''GaussianNB'''
##clf = GaussianNB()
##clf.fit(X2, target)
##score = clf.score(X2, target)
##print('NB score = '+str(score))
'''Decision tree'''
##one_r = DecisionTreeClassifier()
##one_r.fit(X2[:8000], target[:8000])
##score = one_r.score(X2[8000:], target[8000:])
##print('Decision tree score = '+str(score))
'''SVM'''
clf = svm.SVC()
clf.fit(X2, target)
score = clf.score(X2, target)
print('SVM score = '+str(score))
|
{"hexsha": "9ada6536d35ac6100be6130e7a182a5f59d251e5", "size": 2098, "ext": "py", "lang": "Python", "max_stars_repo_path": "allthree.py", "max_stars_repo_name": "abigailyuan/LIDproj", "max_stars_repo_head_hexsha": "3e34c4d78b89c9513182ab064dc4b3858f59a1d2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "allthree.py", "max_issues_repo_name": "abigailyuan/LIDproj", "max_issues_repo_head_hexsha": "3e34c4d78b89c9513182ab064dc4b3858f59a1d2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "allthree.py", "max_forks_repo_name": "abigailyuan/LIDproj", "max_forks_repo_head_hexsha": "3e34c4d78b89c9513182ab064dc4b3858f59a1d2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.3513513514, "max_line_length": 102, "alphanum_fraction": 0.5843660629, "include": true, "reason": "import numpy", "num_tokens": 525}
|
import params
import category_theory.category.basic
import category_theory.core
open params
open category_theory
namespace operations
variable [category (bitvec word_len)]
/-!
# Operations
Building blocks operations.
The salsa20 cipher is built fully with add-rotate-XOR operations.
## Building blocks operations and the relation with their inverses
-/
/-- Rotate operation implemented as https://github.com/alexwebr/salsa20/blob/master/salsa20.c#L6 -/
def rotl : bitvec word_len → ℕ → bitvec word_len
| a shift := (a.shl shift).or (a.ushr (word_len - shift))
/-- Inverse of the rotate operation (`rotl`). -/
def rotl_inv : bitvec word_len → ℕ → bitvec word_len
| a shift := (a.ushr shift).or (a.shl (word_len - shift))
local notation `rotl⁻¹` := rotl_inv
/-- `rotl⁻¹` after `rotl` produces the identity. -/
lemma rotl_inv_is_inverse_of_rotl (I : rotl ≅ rotl⁻¹): I.hom ≫ I.inv = 𝟙 rotl :=
begin
exact I.hom_inv_id',
end
/-- Bitwise modulo addition implemented as https://stackoverflow.com/a/19760152 -/
def mod : bitvec word_len → bitvec word_len → bitvec word_len
| a b := (bitvec.and (a + b) (max_bitvec))
/-- The salsa20 xor operation is just bitwise xor. -/
def xor : bitvec word_len → bitvec word_len → bitvec word_len
| a b := a.xor b
/-- `xor` after `xor` produces the identity. -/
lemma xor_is_inverse_of_xor (I : xor ≅ xor): I.hom ≫ I.inv = 𝟙 xor :=
begin
exact I.hom_inv_id',
end
-- Some notation:
notation ` ZERO ` := bitvec.zero word_len
infix ` ROTL ` : 90 := rotl
infix ` ROTL⁻¹ `: 90 := rotl_inv
infix ` MOD ` : 90 := mod
infix ` XOR ` : 90 := xor
/-! ## Operation as a combination of building block operations -/
/-- We split the salsa20 operations in 2 terms, one at each side of the XOR. This is the right hand side. -/
def operation_rhs (b c : bitvec word_len) (shift : ℕ ): bitvec word_len := (b MOD c) ROTL shift
/-- With the split done in `operation_rhs`, an operation is just a XOR of 2 bitvectors. -/
def operation : bitvec word_len → bitvec word_len → bitvec word_len
| a b := a XOR b
/-! ## Operation lemmas -/
-- some notation for operations:
infix ` OP ` : 90 := operation
notation `OP_RHS` := operation_rhs
/-- OP is just XOR, so each operation is its own inverse. -/
lemma operation_inverse (I : operation ≅ operation) : I.hom ≫ I.inv = 𝟙 operation :=
by rw [iso.hom_inv_id]
end operations
|
{"author": "oxarbitrage", "repo": "salsa20", "sha": "12d0ebb3c27801931e61d470fb2ed548a5562578", "save_path": "github-repos/lean/oxarbitrage-salsa20", "path": "github-repos/lean/oxarbitrage-salsa20/salsa20-12d0ebb3c27801931e61d470fb2ed548a5562578/src/operations.lean"}
|
function to_non_normal(l::Vector{FieldsTower}, G::GAP.GapObj, deg::Int)
for x in l
assure_automorphisms(x)
assure_isomorphism(x, G)
end
lC = GAP.Globals.ConjugacyClassesSubgroups(G)
ind = 0
for i = 1:length(lC)
r = GAP.Globals.Representative(lC[i])
if GAP.Globals.Size(r) == divexact(degree(l[1].field), deg) && GAP.Globals.Size(GAP.Globals.Core(G, r)) == 1
ind = i
break
end
end
if iszero(ind)
error("Representation not possible")
end
rep = GAP.Globals.Representative(lC[ind])
ffields = Vector{AnticNumberField}(undef, length(l))
for i = 1:length(ffields)
ffields[i] = fixed_field(l[i], rep)
end
return ffields
end
function fixed_field(x::FieldsTower, H::GAP.GapObj)
gH = GAP.Globals.SmallGeneratingSet(H)
auts = NfToNfMor[]
found = 0
D = x.isomorphism
autsx = automorphisms(number_field(x))
i = 0
while length(gH) != found
i += 1
if D[autsx[i]] in gH
push!(auts, autsx[i])
found += 1
end
end
return fixed_field(number_field(x), auts)[1]
end
function _find_discriminant_bound(n, i, disc)
Gt = GAP.Globals.TransitiveGroup(n, i)
@assert GAP.Globals.IsSolvable(Gt)
id = GAP.Globals.IdGroup(Gt)
G1 = GAP.Globals.SmallGroup(id)
lC = GAP.Globals.ConjugacyClassesSubgroups(G1)
ind = 0
for k = 1:length(lC)
H = GAP.Globals.Representative(lC[k])
if GAP.Globals.Index(G1, H) != n
continue
end
core = GAP.Globals.Core(G1, H)
if !isone(GAP.Globals.Size(core))
continue
end
mp = GAP.Globals.FactorCosetAction(G1, H)
idT = GAP.Globals.TransitiveIdentification(GAP.Globals.Image(mp))
if idT == i
ind = i
break
end
end
cg = lC[ind]
H = GAP.Globals.Representative(cg)
conjs = GAP.Globals.Elements(cg)
#I check if it is a Frobenius group!
isfrobenius = true
for i = 1:length(conjs)
for j = i+1:length(conjs)
if GAP.Globals.Size(GAP.Globals.Intersection(conjs[i], conjs[j])) != 1
isfrobenius = false
break
end
end
if !isfrobenius
break
end
end
if isfrobenius
@show "Frobenius!"
#In this case, we can find a better bound for the closure!
m = divexact(id[1], n)
bdisc = disc^(2*m*n-m)
return root(bdisc, n-1)
end
j = 1
for i = 2:length(conjs)
H = GAP.Globals.Intersection(H, conjs[i])
j += 1
if GAP.Globals.Size(H) == 1
break
end
end
return disc^(j*divexact(id[1], n))
end
function fields_transitive_group(n::Int, i::Int, disc::fmpz)
Gt = GAP.Globals.TransitiveGroup(n, i)
@assert GAP.Globals.IsSolvable(Gt)
id = GAP.Globals.IdGroup(Gt)
G1 = GAP.Globals.SmallGroup(id)
@show disc_NC = _find_discriminant_bound(n, i, disc)
lf = fields(id[1], id[2], disc_NC)
ln = to_non_normal(lf, G1, n)
indices = Int[]
for i = 1:length(ln)
if abs(discriminant(maximal_order(ln[i]))) <= disc
push!(indices, i)
end
end
return ln[indices]
end
|
{"hexsha": "79f271864c92bfdb72e56f3ebb7c168bbdcc6174", "size": 2950, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/FieldFactory/non_normal.jl", "max_stars_repo_name": "lgoe-ac/Hecke.jl", "max_stars_repo_head_hexsha": "27a6f75d0174a9e3db2480e1f835f62ae65befc6", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 151, "max_stars_repo_stars_event_min_datetime": "2015-11-05T06:18:31.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T09:10:46.000Z", "max_issues_repo_path": "src/FieldFactory/non_normal.jl", "max_issues_repo_name": "lgoe-ac/Hecke.jl", "max_issues_repo_head_hexsha": "27a6f75d0174a9e3db2480e1f835f62ae65befc6", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 431, "max_issues_repo_issues_event_min_datetime": "2016-03-28T16:27:44.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T10:33:13.000Z", "max_forks_repo_path": "src/FieldFactory/non_normal.jl", "max_forks_repo_name": "lgoe-ac/Hecke.jl", "max_forks_repo_head_hexsha": "27a6f75d0174a9e3db2480e1f835f62ae65befc6", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 47, "max_forks_repo_forks_event_min_datetime": "2016-03-28T15:09:10.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-05T18:23:54.000Z", "avg_line_length": 25.2136752137, "max_line_length": 112, "alphanum_fraction": 0.6413559322, "num_tokens": 1004}
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
###
# @file aggregate_output.py
#
# @brief Aggregates multiple EINSim outputs into one coalesced file
#
# @author Minesh Patel
# Contact: minesh.patelh@gmail.com
import sys
import os
import argparse
import random
import numpy as np
import json
# project files
import utils
def callback(data, ecc_code, raw_data):
if data['uid_ignoring_fields'] not in raw_data:
raw_data[data['uid_ignoring_fields']] = []
raw_data[data['uid_ignoring_fields']].append(data)
def aggregate_einsim_results(infiles, outfile, fields_to_combine, print_stdout, graph, bootstrap):
if not outfile and not print_stdout and not graph:
print("[ERROR] must either output to file, stdout, or graph!!")
sys.exit(-1)
# check if the user specified an output file. if not, stdout will be used
if outfile and os.path.isfile(outfile):
print("[ERROR] outfile already exists! will not crush it")
sys.exit(-1)
# parse the input files
raw_data = {}
print("Parsing", len(infiles), "files...")
# raw_data = utils.parse_all_files(infiles, experimental=False)
ecc_codes = utils.parse_files_incremental(infiles, False, fields_to_combine, callback, raw_data)
ecc_codes_by_uid = {ecc_codes[code]['uid'] : code for code in ecc_codes}
# coalesce the input data
f = open(outfile, 'w') if outfile else None
for uid_if, data in raw_data.items():
combined_data = utils.combine_runs(data, fields_to_combine)
out_str = utils.get_output_string(combined_data)
if f: f.write(out_str)
if print_stdout: sys.stdout.write(out_str)
if f: f.close()
# can plot the output data if desired
if graph:
import plot_einsim_results
plot_einsim_results.plot(raw_data)
# bootstrap the data and generate CIs
if bootstrap:
# print the ECC code
assert len(ecc_codes) == 1, "can only bootstrap for one ECC code!"
print("[ECC]", json.dumps(list(ecc_codes.values())[0]))
# print the statistic
combined_statistic = combined_data['observations']['PER_BIT_ERROR_COUNT']['hist_databurst']
print("[DATA] COMBINED:", np.array(combined_statistic) / sum(combined_statistic))
# compute the CIs
for uid_if, data in raw_data.items():
nbs = int(1e9)
print("[INFO] bootstrapping", nbs, "rounds of nsamples:", bootstrap, "from uid:", uid_if, "with", len(data), "data points to generate CIs")
assert bootstrap <= len(data), "cannot take more bootstrap samples than there are data points"
ndata = len(data)
for i in range(bootstrap):
# take a bootstrap sample - subsample ``bootstrap'' values from the dataset
databurst_errors = np.array([0 for _ in data[0]['observations']['PER_BIT_ERROR_COUNT']['hist_databurst']], dtype=int)
codeburst_errors = np.array([0 for _ in data[0]['observations']['PER_BIT_ERROR_COUNT']['hist_codeburst']], dtype=int)
# determine how many samples fall in the nonzero (i.e., data) range - rest are 0, so they don't need any work
n_in_range = np.random.binomial(nbs, ndata / float(nbs))
print("[DEBUG] drawing", n_in_range, "samples from the data: {0:.2f}".format(100.0 * i / float(bootstrap - 1)) + '% complete')
for s in range(n_in_range):
sample_idx = random.randint(0, len(data) - 1)
databurst_errors += data[sample_idx]['observations']['PER_BIT_ERROR_COUNT']['hist_databurst']
codeburst_errors += data[sample_idx]['observations']['PER_BIT_ERROR_COUNT']['hist_codeburst']
# compute the statistic
print('[BOOTSTRAP]', (list(databurst_errors), list(codeburst_errors)))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Apply the EIN inference methodology using the results of EINSim')
parser.add_argument(dest='input_filenames', type=str, nargs='*',
help='input data files to parse (i.e., output file(s) of EINSim)')
parser.add_argument('-o', '--outfile', type=str, default=None,
help='output filename for aggregated input data')
parser.add_argument('-d', '--dirname', type=str, default=None,
help='local directory containing input files (scanned recursively)')
parser.add_argument('-b', '--bootstrap', type=int, default=None,
help='number of boostrap samples to create confidence intervals (requires single-sample outputs)')
parser.add_argument('-s', '--suppress-output', action='store_true', default=False,
help='suppress output to stdout (will still go to file if requested)')
parser.add_argument('-c', '--combine', type=str, action='append',
help='fields to combine when analyzing multiple dumps')
parser.add_argument('-g', '--graph', action='store_true', default=False,
help='enable graphing')
args = parser.parse_args(sys.argv[1:])
# list of all input files
all_filenames = args.input_filenames
# if a directory was specified, recurse into it and add all files to the list
if args.dirname:
for dp, dn, fnames in os.walk(args.dirname):
for f in fnames:
all_filenames.append(os.path.join(dp, f))
if len(all_filenames) == 0:
print("[ERROR] must have at least one input file to parse")
sys.exit(-1)
print("[INFO] parsing", len(all_filenames), "input files")
# test all input files for existence (sanity)
clean_filenames = []
for fname in all_filenames:
if not os.path.isfile(fname):
print("[ERROR] invalid input filename: \"" + fname + "\"")
sys.exit(-1)
else:
clean_filenames.append(fname)
# reduce and output the results as required
if not args.combine:
args.combine = []
aggregate_einsim_results(clean_filenames, args.outfile, args.combine, not args.suppress_output, args.graph, args.bootstrap)
|
{"hexsha": "2dd70c93dfda439878b3d9bee932523c697a2408", "size": 6203, "ext": "py", "lang": "Python", "max_stars_repo_path": "script/utils/aggregate_output.py", "max_stars_repo_name": "CMU-SAFARI/EINSim", "max_stars_repo_head_hexsha": "f3e782658dd19070ca0e85fcc422014c2283280e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2019-06-03T11:18:56.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-15T02:18:53.000Z", "max_issues_repo_path": "script/utils/aggregate_output.py", "max_issues_repo_name": "CMU-SAFARI/EINSim", "max_issues_repo_head_hexsha": "f3e782658dd19070ca0e85fcc422014c2283280e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "script/utils/aggregate_output.py", "max_forks_repo_name": "CMU-SAFARI/EINSim", "max_forks_repo_head_hexsha": "f3e782658dd19070ca0e85fcc422014c2283280e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-07-13T06:34:03.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-18T09:37:07.000Z", "avg_line_length": 46.6390977444, "max_line_length": 152, "alphanum_fraction": 0.643559568, "include": true, "reason": "import numpy", "num_tokens": 1408}
|
import csv
import numpy as np
import os
def writePoints(points,ptsFileName):
csv.writer(open(ptsFileName,'w'),delimiter=' ').writerows(points)
def batchWritePoints(batchPoints,outputDir):
for i in range(batchPoints.shape[0]):
writePoints(batchPoints[i,:,:],os.path.join(outputDir,str(i)+".pts"))
|
{"hexsha": "4cae3f00604e930bc0aed31db8d22d91a338048c", "size": 313, "ext": "py", "lang": "Python", "max_stars_repo_path": "Utils/writer.py", "max_stars_repo_name": "sanjeevmk/GLASS", "max_stars_repo_head_hexsha": "91c0954eab87d25d4866fea5c338f79fbca4f79e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2022-03-22T17:36:14.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-27T05:03:39.000Z", "max_issues_repo_path": "Utils/writer.py", "max_issues_repo_name": "sanjeevmk/glass", "max_issues_repo_head_hexsha": "91c0954eab87d25d4866fea5c338f79fbca4f79e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Utils/writer.py", "max_forks_repo_name": "sanjeevmk/glass", "max_forks_repo_head_hexsha": "91c0954eab87d25d4866fea5c338f79fbca4f79e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.3, "max_line_length": 77, "alphanum_fraction": 0.7284345048, "include": true, "reason": "import numpy", "num_tokens": 74}
|
subroutine secpred(j)
use constants_module
use arrays_module
use var_module
use arrays_section_module
use xsec_attribute_module
use subtools
implicit none
integer, intent(in) :: j
! Locals
integer :: i, pp, tableLength
real(kind=4) :: beds, fs, hy, yyn, yyn_1, temp1, temp2, new_I2
real(kind=4) :: xt, q_sk_multi, currentQ
real(kind=4) :: r_interpo_nn
!change 20191122
ci1=0
ci2=0
do i=ncomp,1,-1
if(i .lt. ncomp) then
downstreamI2Tablep = I2Tablep
downstreamSquareDepth = currentSquareDepth
yyn_1 = yyn
end if
! ----------------------------------------------------
elevTable = xsec_tab(1,:,i,j)
areaTable = xsec_tab(2,:,i,j)
pereTable = xsec_tab(3,:,i,j)
rediTable = xsec_tab(4,:,i,j)
convTable = xsec_tab(5,:,i,j)
topwTable = xsec_tab(6,:,i,j)
nwi1Table = xsec_tab(7,:,i,j)
dPdATable = xsec_tab(8,:,i,j)
currentSquareDepth=(elevTable-z(i,j))**2
I2Tablep = xsec_tab(9,:,i,j)
I2Tablec = xsec_tab(10,:,i,j)
! interpolate the cross section attributes based on predicted area
xt=areap(i,j)
! NEW CHANGE: 20191119
! To get rid of negative predicted area
if (areap(i,j) .le. TOLERANCE) then
yyn = elevTable(1)+(elevTable(2)-elevTable(1))/100*5
!areap(i) = 0.01
print*, 'At i = ', i , ' pred area is negative'
else
call r_interpol(areaTable,elevTable,nel,xt,yyn)
end if
depth(i) = yyn - z(i,j)
xt = yyn
call r_interpol(elevTable,pereTable,nel,xt,pere(i,j))
call r_interpol(elevTable,rediTable,nel,xt,hy)
!co(i) =r_interpol(elevTable,convTable,nel,xt)
currentCubicDepth=(elevTable-z(i,j))**3
call r_interpol(currentCubicDepth,convTable,nel,(xt-z(i,j))**3.0,co(i))
call r_interpol(elevTable,topwTable,nel,xt,bo(i,j))
! ci1(i) =r_interpol(areaTable,nwi1Table,nel,xt)
call r_interpol(currentSquareDepth,nwi1Table,nel,(depth(i))**2,ci1(i))
call r_interpol(elevTable,dPdATable,nel,xt,dpda(i))
currentQ = qp(i,j)
call calc_q_sk_multi(i,j,currentQ,q_sk_multi)
co(i) = q_sk_multi*co(i)
! ----------------------------------------------------
if(i .lt. ncomp) then
! I2 opposite direction calculated as interpolation start
xt=areap(i+1,j)
call r_interpol(currentSquareDepth,I2Tablec,nel,(yyn-z(i,j))**2,temp1)
call r_interpol(downstreamSquareDepth,downstreamI2Tablep, nel,(yyn_1-z(i+1,j))**2,temp2)
new_I2 = (temp1+temp2)/2.0
! I2 opposite direction calculated as interpolation end
if(ityp(i) == 1) then
ci2(i)=new_I2
beds=(z(i,j)-z(i+1,j))/dx(i,j)
fs=f*0.5*qp(i,j)*abs(qp(i,j))/(co(i)**2)+f*0.5*qp(i+1,j)*abs(qp(i+1,j))/(co(i+1)**2)
aso(i)=(areap(i,j)+areap(i+1,j))/2.0*(beds-fs)
gso(i)=grav*(beds-fs)
dbdx(i)=(bo(i+1,j)-bo(i,j))/dx(i,j)
end if
end if
!!! new for dkdh
! do pp = 2,nel
! if (yyn .le. elevTable(pp)) then
! dkdh(i) =(convTable(pp)-convTable(pp-1))/(elevTable(pp)-elevTable(pp-1))
! EXIT
! endif
! end do
end do
gso(ncomp)=gso(ncomp-1)
end subroutine secpred
|
{"hexsha": "05d5e4d1886ab80e78653da3a88bf53430b166e3", "size": 3447, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src_combined_Y_network/secpred.f90", "max_stars_repo_name": "MESH-Team/MESH_Code_irregular", "max_stars_repo_head_hexsha": "1edd0b60a8b2ccd95ec92b4fc7e381193cf6b936", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-01-30T22:53:41.000Z", "max_stars_repo_stars_event_max_datetime": "2020-01-30T22:53:41.000Z", "max_issues_repo_path": "src_combined_Y_network/secpred.f90", "max_issues_repo_name": "MESH-Team/MESH_Code_irregular", "max_issues_repo_head_hexsha": "1edd0b60a8b2ccd95ec92b4fc7e381193cf6b936", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-10-16T21:11:19.000Z", "max_issues_repo_issues_event_max_datetime": "2019-10-17T22:21:51.000Z", "max_forks_repo_path": "src_combined_Y_network/secpred.f90", "max_forks_repo_name": "MESH-Team/MESH_Code_irregular", "max_forks_repo_head_hexsha": "1edd0b60a8b2ccd95ec92b4fc7e381193cf6b936", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-01-06T17:11:11.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-28T22:36:23.000Z", "avg_line_length": 32.5188679245, "max_line_length": 100, "alphanum_fraction": 0.5517841601, "num_tokens": 1110}
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader, Dataset
torch.multiprocessing.set_sharing_strategy('file_system')
from tqdm import tqdm
import numpy as np
import os
from os.path import join, basename
from boltons.fileutils import iter_find_files
import soundfile as sf
import librosa
import pickle
from multiprocessing import Pool
import random
import torchaudio
import math
from torchaudio.datasets import LIBRISPEECH
def collate_fn_padd(batch):
"""collate_fn_padd
Padds batch of variable length
:param batch:
"""
# get sequence lengths
spects = [t[0] for t in batch]
segs = [t[1] for t in batch]
labels = [t[2] for t in batch]
lengths = [t[3] for t in batch]
fnames = [t[4] for t in batch]
padded_spects = torch.nn.utils.rnn.pad_sequence(spects, batch_first=True)
lengths = torch.LongTensor(lengths)
return padded_spects, segs, labels, lengths, fnames
def spectral_size(wav_len):
# layers = [(10,5,0), (8,4,0), (4,2,0), (4,2,0), (4,2,0)]
layers = [(10,5,0), (8,4,0), (4,2,0), (4,2,0), (4,1,0)]
for kernel, stride, padding in layers:
wav_len = math.floor((wav_len + 2*padding - 1*(kernel-1) - 1)/stride + 1)
return wav_len
def get_subset(dataset, percent):
A_split = int(len(dataset) * percent)
B_split = len(dataset) - A_split
dataset, _ = torch.utils.data.random_split(dataset, [A_split, B_split])
return dataset
class WavPhnDataset(Dataset):
def __init__(self, path):
self.path = path
self.data = list(iter_find_files(self.path, "*.wav"))
super(WavPhnDataset, self).__init__()
@staticmethod
def get_datasets(path):
raise NotImplementedError
def process_file(self, wav_path):
phn_path = wav_path.replace("wav", "phn")
# load audio
audio, sr = torchaudio.load(wav_path)
audio = audio[0]
audio_len = len(audio)
spectral_len = spectral_size(audio_len)
len_ratio = (audio_len / spectral_len)
# load labels -- segmentation and phonemes
with open(phn_path, "r") as f:
lines = f.readlines()
lines = list(map(lambda line: line.split(" "), lines))
# get segment times
times = torch.FloatTensor(list(map(lambda line: int(int(line[1]) / len_ratio), lines)))[:-1] # don't count end time as boundary
# get phonemes in each segment (for K times there should be K+1 phonemes)
phonemes = list(map(lambda line: line[2].strip(), lines))
return audio, times.tolist(), phonemes, wav_path
def __getitem__(self, idx):
audio, seg, phonemes, fname = self.process_file(self.data[idx])
return audio, seg, phonemes, spectral_size(len(audio)), fname
def __len__(self):
return len(self.data)
class TrainTestDataset(WavPhnDataset):
def __init__(self, path):
super(TrainTestDataset, self).__init__(path)
@staticmethod
def get_datasets(path, val_ratio=0.1):
train_dataset = TrainTestDataset(join(path, 'train'))
test_dataset = TrainTestDataset(join(path, 'test'))
train_len = len(train_dataset)
train_split = int(train_len * (1 - val_ratio))
val_split = train_len - train_split
train_dataset, val_dataset = torch.utils.data.random_split(train_dataset, [train_split, val_split])
train_dataset.path = join(path, 'train')
val_dataset.path = join(path, 'train')
return train_dataset, val_dataset, test_dataset
class TrainValTestDataset(WavPhnDataset):
def __init__(self, paths):
super(TrainValTestDataset, self).__init__(paths)
@staticmethod
def get_datasets(path, percent=1.0):
train_dataset = TrainValTestDataset(join(path, 'train'))
if percent != 1.0:
train_dataset = get_subset(train_dataset, percent)
train_dataset.path = join(path, 'train')
val_dataset = TrainValTestDataset(join(path, 'val'))
test_dataset = TrainValTestDataset(join(path, 'test'))
return train_dataset, val_dataset, test_dataset
class LibriSpeechDataset(LIBRISPEECH):
def __init__(self, path, subset, percent):
self.libri_dataset = LIBRISPEECH(path, url=subset, download=False)
if percent != 1.0:
self.libri_dataset = get_subset(self.libri_dataset, percent)
self.path = path
def __getitem__(self, idx):
wav, sr, utt, spk_id, chp_id, utt_id = self.libri_dataset[idx]
wav = wav[0]
return wav, None, None, spectral_size(len(wav)), None
def __len__(self):
return len(self.libri_dataset)
class MixedDataset(Dataset):
def __init__(self, ds1, ds2):
self.ds1 = ds1
self.ds2 = ds2
self.path = f"{ds1.path}+{ds2.path}"
self.ds1_len, self.ds2_len = len(ds1), len(ds2)
def __len__(self):
return self.ds1_len + self.ds2_len
def __getitem__(self, idx):
if idx < self.ds1_len:
return self.ds1[idx]
else:
return self.ds2[idx - self.ds1_len]
|
{"hexsha": "65ac236c1753d65908320a38c444a197d60cb2ce", "size": 5157, "ext": "py", "lang": "Python", "max_stars_repo_path": "dataloader.py", "max_stars_repo_name": "YaelSegal/UnsupSeg", "max_stars_repo_head_hexsha": "a8657565967e871064118d1ce2b452c033d05c50", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "dataloader.py", "max_issues_repo_name": "YaelSegal/UnsupSeg", "max_issues_repo_head_hexsha": "a8657565967e871064118d1ce2b452c033d05c50", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dataloader.py", "max_forks_repo_name": "YaelSegal/UnsupSeg", "max_forks_repo_head_hexsha": "a8657565967e871064118d1ce2b452c033d05c50", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.6380368098, "max_line_length": 140, "alphanum_fraction": 0.6507659492, "include": true, "reason": "import numpy", "num_tokens": 1348}
|
[STATEMENT]
lemma IO_language : "IO M q t \<subseteq> language_state M q"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. IO M q t \<subseteq> LS M q
[PROOF STEP]
by (metis atc_reaction_path IO.elims language_state mem_Collect_eq subsetI)
|
{"llama_tokens": 94, "file": "Adaptive_State_Counting_ATC_ATC", "length": 1}
|
[STATEMENT]
lemma swap_apply[simp]: "swap (a \<otimes>\<^sub>u b) = (b \<otimes>\<^sub>u a)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. swap (a \<otimes>\<^sub>u b) = b \<otimes>\<^sub>u a
[PROOF STEP]
unfolding swap_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (Snd;Fst) (a \<otimes>\<^sub>u b) = b \<otimes>\<^sub>u a
[PROOF STEP]
by (simp add: Axioms.register_pair_apply Fst_def Snd_def tensor_update_mult)
|
{"llama_tokens": 181, "file": "Registers_Laws", "length": 2}
|
import numpy as np
import torch
Ranges = {
'pelvis': [[0, 0], [0, 0], [0, 0]],
'pelvis0': [[-0.3, 0.3], [-1.2, 0.5], [-0.1, 0.1]],
'spine': [[-0.4, 0.4], [-1.0, 0.9], [-0.8, 0.8]],
'spine0': [[-0.4, 0.4], [-1.0, 0.9], [-0.8, 0.8]],
'spine1': [[-0.4, 0.4], [-0.5, 1.2], [-0.4, 0.4]],
'spine3': [[-0.5, 0.5], [-0.6, 1.4], [-0.8, 0.8]],
'spine2': [[-0.5, 0.5], [-0.4, 1.4], [-0.5, 0.5]],
'RFootBack': [[-0.2, 0.3], [-0.3, 1.1], [-0.3, 0.5]],
'LFootBack': [[-0.3, 0.2], [-0.3, 1.1], [-0.5, 0.3]],
'LLegBack1': [[-0.2, 0.3], [-0.5, 0.8], [-0.5, 0.4]],
'RLegBack1': [[-0.3, 0.2], [-0.5, 0.8], [-0.4, 0.5]],
'Head': [[-0.5, 0.5], [-1.0, 0.9], [-0.9, 0.9]],
'RLegBack2': [[-0.3, 0.2], [-0.6, 0.8], [-0.5, 0.6]],
'LLegBack2': [[-0.2, 0.3], [-0.6, 0.8], [-0.6, 0.5]],
'RLegBack3': [[-0.2, 0.3], [-0.8, 0.2], [-0.4, 0.5]],
'LLegBack3': [[-0.3, 0.2], [-0.8, 0.2], [-0.5, 0.4]],
'Mouth': [[-0.1, 0.1], [-1.1, 0.5], [-0.1, 0.1]],
'Neck': [[-0.8, 0.8], [-1.0, 1.0], [-1.1, 1.1]],
'LLeg1': [[-0.05, 0.05], [-1.3, 0.8], [-0.6, 0.6]], # Extreme
'RLeg1': [[-0.05, 0.05], [-1.3, 0.8], [-0.6, 0.6]],
'RLeg2': [[-0.05, 0.05], [-1.0, 0.9], [-0.6, 0.6]], # Extreme
'LLeg2': [[-0.05, 0.05], [-1.0, 1.1], [-0.6, 0.6]],
'RLeg3': [[-0.1, 0.4], [-0.3, 1.4], [-0.4, 0.7]], # Extreme
'LLeg3': [[-0.4, 0.1], [-0.3, 1.4], [-0.7, 0.4]],
'LFoot': [[-0.3, 0.1], [-0.4, 1.5], [-0.7, 0.3]], # Extreme
'RFoot': [[-0.1, 0.3], [-0.4, 1.5], [-0.3, 0.7]],
'Tail7': [[-0.1, 0.1], [-0.7, 1.1], [-0.9, 0.8]],
'Tail6': [[-0.1, 0.1], [-1.4, 1.4], [-1.0, 1.0]],
'Tail5': [[-0.1, 0.1], [-1.0, 1.0], [-0.8, 0.8]],
'Tail4': [[-0.1, 0.1], [-1.0, 1.0], [-0.8, 0.8]],
'Tail3': [[-0.1, 0.1], [-1.0, 1.0], [-0.8, 0.8]],
'Tail2': [[-0.1, 0.1], [-1.0, 1.0], [-0.8, 0.8]],
'Tail1': [[-0.1, 0.1], [-1.5, 1.4], [-1.2, 1.2]],
}
class LimitPrior(object):
def __init__(self, device, n_pose=32):
self.parts = {
'pelvis0': 0,
'spine': 1,
'spine0': 2,
'spine1': 3,
'spine2': 4,
'spine3': 5,
'LLeg1': 6,
'LLeg2': 7,
'LLeg3': 8,
'LFoot': 9,
'RLeg1': 10,
'RLeg2': 11,
'RLeg3': 12,
'RFoot': 13,
'Neck': 14,
'Head': 15,
'LLegBack1': 16,
'LLegBack2': 17,
'LLegBack3': 18,
'LFootBack': 19,
'RLegBack1': 20,
'RLegBack2': 21,
'RLegBack3': 22,
'RFootBack': 23,
'Tail1': 24,
'Tail2': 25,
'Tail3': 26,
'Tail4': 27,
'Tail5': 28,
'Tail6': 29,
'Tail7': 30,
'Mouth': 31
}
self.id2name = {v: k for k, v in self.parts.items()}
# Ignore the first joint.
self.prefix = 3
self.postfix= 99
self.part_ids = np.array(sorted(self.parts.values()))
min_values = np.hstack([np.array(np.array(Ranges[self.id2name[part_id]])[:, 0]) for part_id in self.part_ids])
max_values = np.hstack([
np.array(np.array(Ranges[self.id2name[part_id]])[:, 1])
for part_id in self.part_ids
])
self.ranges = Ranges
self.device = device
self.min_values = torch.from_numpy(min_values).view(n_pose, 3).float().to(device)
self.max_values = torch.from_numpy(max_values).view(n_pose, 3).float().to(device)
def __call__(self, x):
'''
Given x, rel rotation of 31 joints, for each parts compute the limit value.
k is steepness of the curve, max_val + margin is the midpoint of the curve (val 0.5)
Using Logistic:
max limit: 1/(1 + exp(k * ((max_val + margin) - x)))
min limit: 1/(1 + exp(k * (x - (min_val - margin))))
With max/min:
minlimit: max( min_vals - x , 0 )
maxlimit: max( x - max_vals , 0 )
With exponential:
min: exp(k * (minval - x) )
max: exp(k * (x - maxval) )
'''
## Max/min discontinous but fast. (flat + L2 past the limit)
x = x[:, self.prefix:self.postfix].view(x.shape[0], -1, 3)
zeros = torch.zeros_like(x).to(self.device)
# return np.maximum(x - self.max_values, zeros) + np.maximum(self.min_values - x, zeros)
return torch.mean(torch.max(x - self.max_values.unsqueeze(0), zeros) + torch.max(self.min_values.unsqueeze(0) - x, zeros))
def report(self, x):
res = self(x).r.reshape(-1, 3)
values = x[self.prefix:].r.reshape(-1, 3)
bad = np.any(res > 0, axis=1)
bad_ids = np.array(self.part_ids)[bad]
np.set_printoptions(precision=3)
for bad_id in bad_ids:
name = self.id2name[bad_id]
limits = self.ranges[name]
print('%s over! Overby:' % name),
print(res[bad_id - 1, :]),
print(' Limits:'),
print(limits),
print(' Values:'),
print(values[bad_id - 1, :])
if __name__ == '__main__':
name2id33 = {'RFoot': 14, 'RFootBack': 24, 'spine1': 4, 'Head': 16, 'LLegBack3': 19, 'RLegBack1': 21, 'pelvis0': 1,
'RLegBack3': 23, 'LLegBack2': 18, 'spine0': 3, 'spine3': 6, 'spine2': 5, 'Mouth': 32, 'Neck': 15,
'LFootBack': 20, 'LLegBack1': 17, 'RLeg3': 13, 'RLeg2': 12, 'LLeg1': 7, 'LLeg3': 9, 'RLeg1': 11,
'LLeg2': 8, 'spine': 2, 'LFoot': 10, 'Tail7': 31, 'Tail6': 30, 'Tail5': 29, 'Tail4': 28, 'Tail3': 27,
'Tail2': 26, 'Tail1': 25, 'RLegBack2': 22, 'root': 0}
name2id35 = {'RFoot': 14, 'RFootBack': 24, 'spine1': 4, 'Head': 16, 'LLegBack3': 19, 'RLegBack1': 21, 'pelvis0': 1,
'RLegBack3': 23, 'LLegBack2': 18, 'spine0': 3, 'spine3': 6, 'spine2': 5, 'Mouth': 32, 'Neck': 15,
'LFootBack': 20, 'LLegBack1': 17, 'RLeg3': 13, 'RLeg2': 12, 'LLeg1': 7, 'LLeg3': 9, 'RLeg1': 11,
'LLeg2': 8, 'spine': 2, 'LFoot': 10, 'Tail7': 31, 'Tail6': 30, 'Tail5': 29, 'Tail4': 28, 'Tail3': 27,
'Tail2': 26, 'Tail1': 25, 'RLegBack2': 22, 'root': 0, 'LEar': 33, 'REar': 34}
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
limit_prior = LimitPrior(device, 32)
for k,v in limit_prior.parts.items():
id33 = name2id33[k]-1
id35 = name2id35[k]-1
assert id33 == id35 and id33==v
x = torch.zeros((35*3,)).float().to(device)
limit_loss = limit_prior(x)
print('done')
|
{"hexsha": "58ae66102d2264322158e784763871dbfc895cb5", "size": 6653, "ext": "py", "lang": "Python", "max_stars_repo_path": "util/joint_limits_prior.py", "max_stars_repo_name": "chaneyddtt/Coarse-to-fine-3D-Animal", "max_stars_repo_head_hexsha": "b3f9b1031b5761838c94ca091095636101747fd9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2021-11-17T03:11:21.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-23T06:38:19.000Z", "max_issues_repo_path": "util/joint_limits_prior.py", "max_issues_repo_name": "chaneyddtt/Coarse-to-fine-3D-Animal", "max_issues_repo_head_hexsha": "b3f9b1031b5761838c94ca091095636101747fd9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-12-07T07:42:33.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-07T11:41:17.000Z", "max_forks_repo_path": "util/joint_limits_prior.py", "max_forks_repo_name": "chaneyddtt/Coarse-to-fine-3D-Animal", "max_forks_repo_head_hexsha": "b3f9b1031b5761838c94ca091095636101747fd9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2021-11-26T08:57:19.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-22T10:29:19.000Z", "avg_line_length": 44.3533333333, "max_line_length": 131, "alphanum_fraction": 0.4612956561, "include": true, "reason": "import numpy", "num_tokens": 2816}
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.datasets import load_iris
#Load iris data set
iris_data = load_iris()
iris = pd.DataFrame(iris_data['data'], columns=iris_data['feature_names'])
iris.info()
iris.describe()
setosa_x = iris['sepal length (cm)'][:50] #first 50 row
setosa_y = iris['sepal width (cm)'][:50]
versicolor_x = iris['sepal length (cm)'][50:100]
versicolor_y = iris['sepal width (cm)'][50:100]
# Visaulizing our data setosa and versicolor
plt.title("Iris data set visualization")
plt.xlabel('Sepal Width (cm)')
plt.ylabel('Sepal Length (cm)')
plt.scatter(setosa_x, setosa_y, marker='*', color='green')
plt.scatter(versicolor_x, versicolor_y, marker='+', color='red')
#
#
from sklearn.model_selection import train_test_split
X = iris
Y = iris_data['target']
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.33, random_state=150)
from sklearn.svm import SVC
model = SVC(gamma=0.01, kernel='linear', C=100)
model.fit(X_train, Y_train)
predictions = model.predict(X_test)
from sklearn.metrics import classification_report,confusion_matrix
from sklearn.metrics import accuracy_score
## Calculating precision
print(confusion_matrix(Y_test,predictions))
print(classification_report(Y_test,predictions,target_names=iris_data.target_names))
plt.show()
|
{"hexsha": "7825caf72bb7831c675ff54e21d1cc3c21e0f4f9", "size": 1363, "ext": "py", "lang": "Python", "max_stars_repo_path": "Iris.py", "max_stars_repo_name": "Akberovr/Support-Vector-Machine-Neural-Networks", "max_stars_repo_head_hexsha": "6d94853c88c30eb83a18c7ad986b85c1d87c3fc6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2018-11-01T21:17:35.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-09T02:56:04.000Z", "max_issues_repo_path": "Iris.py", "max_issues_repo_name": "AzerbaijanOpenSourceCommunity/Support-Vector-Machine-Neural-Networks", "max_issues_repo_head_hexsha": "6d94853c88c30eb83a18c7ad986b85c1d87c3fc6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Iris.py", "max_forks_repo_name": "AzerbaijanOpenSourceCommunity/Support-Vector-Machine-Neural-Networks", "max_forks_repo_head_hexsha": "6d94853c88c30eb83a18c7ad986b85c1d87c3fc6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2018-10-26T12:19:08.000Z", "max_forks_repo_forks_event_max_datetime": "2018-10-27T18:55:52.000Z", "avg_line_length": 25.7169811321, "max_line_length": 91, "alphanum_fraction": 0.7615553925, "include": true, "reason": "import numpy", "num_tokens": 360}
|
from numpy import ndarray
from model_spaces.core.gp_model import GPModel
from model_spaces.core.hyperpriors import Hyperpriors
class KernelKernelGPModel(GPModel):
def __init__(self, kernel_kernel_hyperpriors: Hyperpriors):
covariance = None
super().__init__(covariance, kernel_kernel_hyperpriors)
def set_kernel_kernel(self, k: ndarray):
pass
|
{"hexsha": "f23728e8bb51698b6618c310a1be93c074735132", "size": 380, "ext": "py", "lang": "Python", "max_stars_repo_path": "strategies/boms/kernel_kernel_gp_model.py", "max_stars_repo_name": "lschlessinger1/boms-python", "max_stars_repo_head_hexsha": "5ad6035a91c1eb3d33556ddfee25b99ba18ee431", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-04-21T09:58:49.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-17T14:04:31.000Z", "max_issues_repo_path": "strategies/boms/kernel_kernel_gp_model.py", "max_issues_repo_name": "lschlessinger1/boms-python", "max_issues_repo_head_hexsha": "5ad6035a91c1eb3d33556ddfee25b99ba18ee431", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-07-05T18:27:13.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-05T18:27:13.000Z", "max_forks_repo_path": "strategies/boms/kernel_kernel_gp_model.py", "max_forks_repo_name": "lschlessinger1/boms-python", "max_forks_repo_head_hexsha": "5ad6035a91c1eb3d33556ddfee25b99ba18ee431", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.3333333333, "max_line_length": 63, "alphanum_fraction": 0.7657894737, "include": true, "reason": "from numpy", "num_tokens": 88}
|
(* The types of finite sets and bags *)
theory FSets_Bags
imports "../NonFreeInput"
begin
(* Datatype of finite sets: *)
nonfree_datatype 'a fset = Emp | Ins 'a "'a fset"
where
Ins1: "Ins a (Ins a A) = Ins a A"
| Ins2: "Ins a1 (Ins a2 A) = Ins a2 (Ins a1 A)"
declare Ins1[simp]
(* Datatype of bags: *)
nonfree_datatype 'a bag = BEmp | BIns 'a "'a bag"
where BIns: "BIns a1 (BIns a2 B) = BIns a2 (BIns a1 B)"
nonfree_primrec fset_map :: "('a \<Rightarrow> 'b) \<Rightarrow> 'a fset \<Rightarrow> 'b fset"
where
"fset_map f Emp = Emp"
| "fset_map f (Ins a A) = Ins (f a) (fset_map f A)"
by (auto simp: Ins1 Ins2)
nonfree_primrec bag_map :: "('a \<Rightarrow> 'b) \<Rightarrow> 'a bag \<Rightarrow> 'b bag"
where
"bag_map f BEmp = BEmp"
| "bag_map f (BIns a B) = BIns (f a) (bag_map f B)"
by (auto simp: BIns)
(* Membership of an item in a finite set *)
nonfree_primrec mem :: "'a \<Rightarrow> 'a fset \<Rightarrow> bool"
where
"mem a Emp = False"
| "mem a (Ins b B) = (a = b \<or> mem a B)"
by auto
lemma mem_Ins[simp]: "mem a A \<Longrightarrow> Ins a A = A"
by (induction arbitrary: a rule: fset_induct) (auto simp: Ins2)
(* Multiplicity of an item in bag *)
nonfree_primrec mult :: "'a \<Rightarrow> 'a bag \<Rightarrow> nat"
where
"mult a BEmp = 0"
| "mult a (BIns b B) = (if a = b then Suc (mult a B) else mult a B)"
by (auto simp: BIns)
(* Flattening operator from bags to finite sets *)
nonfree_primrec flat :: "'a bag \<Rightarrow> 'a fset"
where
"flat BEmp = Emp"
| "flat (BIns a B) = Ins a (flat B)"
by (auto simp: Ins2)
lemma mem_flat_mult[simp]: "mem a (flat A) \<longleftrightarrow> mult a A \<noteq> 0"
by (induction rule: bag_induct) auto
(* Embedding of finite sets into bags *)
nonfree_primrec embed :: "'a fset \<Rightarrow> 'a bag"
where
"embed Emp = BEmp"
| "embed (Ins a A) = (if mult a (embed A) = 0 then BIns a (embed A) else embed A)"
by (auto simp: BIns)
lemma mult_embed_mem[simp]: "mult a (embed A) \<noteq> 0 \<longleftrightarrow> mem a A"
by (induction rule: fset_induct) auto
(* Cardinal of finite sets: *)
nonfree_primrec card1 :: "'a fset \<Rightarrow> 'a fset * nat"
where
"card1 Emp = (Emp, 0)"
| "card1 (Ins a A) = (case card1 A of (A,n) \<Rightarrow> (Ins a A, if mem a A then n else Suc n))"
by (auto simp: Ins2)
lemma card1: "card1 A = (A',n) \<Longrightarrow> A = A'"
by (induct arbitrary: A' n rule: fset_induct) (auto split: prod.splits)
definition card :: "'a fset \<Rightarrow> nat" where "card \<equiv> snd o card1"
lemma card_simps[simp]:
"card Emp = 0"
"card (Ins a A) = (if mem a A then card A else Suc (card A))"
unfolding card_def using card1 by (auto split: prod.splits)
(* Sum of a numeric function over a finite set: *)
nonfree_primrec sum1 :: "('a \<Rightarrow> nat) \<Rightarrow> 'a fset \<Rightarrow> 'a fset \<times> nat"
where
"sum1 f Emp = (Emp, 0)"
| "sum1 f (Ins a A) = (case sum1 f A of (A,n) \<Rightarrow> (Ins a A, if mem a A then n else n + f a))"
by (auto simp: Ins2)
lemma sum1: "sum1 f A = (A',n) \<Longrightarrow> A = A'"
by (induct arbitrary: A' n rule: fset_induct) (auto split: prod.splits)
definition sum :: " ('a \<Rightarrow> nat) \<Rightarrow> 'a fset \<Rightarrow> nat" where "sum f \<equiv> snd o sum1 f"
lemma sum_simps[simp]:
"sum f Emp = 0"
"sum f (Ins a A) = (if mem a A then sum f A else sum f A + f a)"
unfolding sum_def using sum1 by (auto split: prod.splits)
(* Sum of a numeric function over a bag: *)
nonfree_primrec bsum' :: "('a \<Rightarrow> nat) \<Rightarrow> 'a bag \<Rightarrow> nat"
where
"bsum' f BEmp = 0"
| "bsum' f (BIns a B) = bsum' f B + f a"
by auto
(* More generally: Sum of a commutative-monoid-valed function over a bag: *)
nonfree_primrec bsum :: "('a \<Rightarrow> 'b::comm_monoid_add) \<Rightarrow> 'a bag \<Rightarrow> 'b"
where
"bsum f BEmp = 0"
| "bsum f (BIns a B) = bsum f B + f a"
by (auto simp: algebra_simps)
(* Embedding of finite sets as sets: *)
nonfree_primrec asSet :: "'a fset \<Rightarrow> 'a set"
where
"asSet Emp = {}"
| "asSet (Ins a A) = insert a (asSet A)"
by auto
lemma in_asSet[simp]: "a \<in> asSet F \<longleftrightarrow> mem a F"
by (induction F) auto
lemma mem_ex_Ins: "mem a F \<Longrightarrow> \<exists> F'. \<not> mem a F' \<and> F = Ins a F'"
by (induction F) (metis Ins2 mem.simps mem_Ins)+
lemma finite_asSet[simp, intro]: "finite (asSet A)"
by (induction rule: fset_induct) auto
lemma finite_imp_asSet: "finite A \<Longrightarrow> (\<exists> F. A = asSet F)"
by (induction rule: finite_induct) (metis asSet.simps)+
lemma asSet_eq_emp[simp]: "asSet F = {} \<Longrightarrow> Emp = F"
by (induction F) auto
lemma asSet_inj[simp]: "asSet F1 = asSet F2 \<longleftrightarrow> F1 = F2"
proof(safe, induction F1 arbitrary: F2)
fix a F1 F2 assume IH: "\<And>F2. asSet F1 = asSet F2 \<Longrightarrow> F1 = F2"
and e: "asSet (Ins a F1) = asSet F2"
hence "mem a F2" by auto
then obtain F2' where F2': "\<not> mem a F2'" and F2: "F2 = Ins a F2'" using mem_ex_Ins[of a F2] by blast
show "Ins a F1 = F2"
proof(cases "mem a F1")
case False
hence "asSet F1 = asSet F2'" using e F2' unfolding F2 by auto
thus ?thesis unfolding F2 using IH by auto
qed(insert e IH, auto)
qed auto
definition asFset :: "'a set \<Rightarrow> 'a fset" where
"asFset A \<equiv> SOME F. asSet F = A"
lemma asSet_asFset[simp]:
assumes "finite A" shows "asSet (asFset A) = A"
unfolding asFset_def apply(rule someI_ex) using finite_imp_asSet[OF assms] by blast
lemma asFset_asSet[simp]: "asFset (asSet A) = A"
by (metis asSet_asFset asSet_inj finite_asSet)
lemma asFset_emp[simp]: "asFset {} = Emp"
by (metis asFset_asSet asSet.simps)
lemma asFset_insert[simp]: "finite A \<Longrightarrow> asFset (insert a A) = Ins a (asFset A)"
by (metis asFset_asSet asSet.simps finite_imp_asSet)
(* ACIU view: *)
definition "Singl a \<equiv> Ins a Emp"
nonfree_primrec Uni :: "'a fset \<Rightarrow> 'a fset \<Rightarrow> 'a fset"
where
"Uni Emp = (\<lambda> B. B)"
| "Uni (Ins a A) = (\<lambda> B. Ins a (Uni A B))"
by (auto simp: Ins2)
lemma Uni_Emp[simp]: "Uni Emp B = B"
and Uni_Ins[simp]: "Uni (Ins a A) B = Ins a (Uni A B)"
by auto
declare Uni.simps[simp del]
lemma Uni_Emp2[simp]: "Uni A Emp = A"
by(induction A) auto
lemma Uni_Ins2[simp]: "Uni A (Ins b B) = Ins b (Uni A B)"
by (induction A) (auto simp: Ins2)
lemma Uni_assoc: "Uni (Uni A B) C = Uni A (Uni B C)"
by (induction A) auto
lemma Uni_com: "Uni A B = Uni B A"
by (induction A) auto
lemma Uni_idem[simp]: "Uni A A = A"
by (induction A) auto
lemma Ins_not_Emp[simp]: "Ins a A \<noteq> Emp"
by (induct A) (metis mem.simps)+
lemma Singl_not_Emp[simp]: "Singl a \<noteq> Emp"
unfolding Singl_def by simp
lemma Uni_eq_Emp[simp]: "Uni A B = Emp \<longleftrightarrow> A = Emp \<and> B = Emp"
by (induct A) auto
lemma mem_Uni[simp]: "mem a (Uni A B) \<longleftrightarrow> mem a A \<or> mem a B"
by (induction A) auto
lemma asFset_Uni[simp]:
assumes "finite A" and "finite B"
shows "asFset (A \<union> B) = Uni (asFset A) (asFset B)"
using assms by (induct) auto
lemma asFset_eq_Emp[simp]: assumes "finite A" shows "asFset A = Emp \<longleftrightarrow> A = {}"
using assms by (induction, auto)
end
|
{"author": "metaforcy", "repo": "nonfree-data", "sha": "f3ce28278a88fdd240faa2e51f893fee5c15f2f2", "save_path": "github-repos/isabelle/metaforcy-nonfree-data", "path": "github-repos/isabelle/metaforcy-nonfree-data/nonfree-data-f3ce28278a88fdd240faa2e51f893fee5c15f2f2/Examples/FSets_Bags.thy"}
|
import os
import random
import numpy as np
import pandas as pd
import seaborn as sns
import gym
import matplotlib.pyplot as plt
plt.style.use('bmh')
import matplotlib
matplotlib.rcParams['font.family'] = 'IPAPGothic'
def reset_seeds():
random.seed(9949)
np.random.seed(9967)
import tensorflow as tf; tf.set_random_seed(9973)
def policy_table(env, nstate, naction, nplay=100):
## initialise policy table
policy = np.zeros((nstate, naction))
for _ in range(nplay):
s0 = env.reset()
done = False
while not done:
## decide action to take
if np.sum(policy[s0, :]) == 0:
action = np.random.randint(0, naction)
else:
action = np.argmax(policy[s0, :])
## update policy table
s1, reward, done, info = env.step(action)
policy[s0, action] += reward
s0 = s1
return policy
def policy_table_qlearning(env, nstate, naction, nplay=100):
## initialise policy table
policy = np.zeros((nstate, naction))
y = 0.95 # discount factor
lr = 0.8 # learning rate
for _ in range(nplay):
s0 = env.reset()
done = False
while not done:
## decide action to take
if np.sum(policy[s0, :]) == 0:
action = np.random.randint(0, naction)
else:
action = np.argmax(policy[s0, :])
## update policy table, considering future reward
s1, reward, done, info = env.step(action)
policy[s0, action] += reward + lr*(y*np.max(policy[s1, :]) - policy[s0, action])
s0 = s1
return policy
def policy_table_egreedy_qlearning(env, nstate, naction, nplay=100):
## initialise policy table
policy = np.zeros((nstate, naction))
y = 0.95 # discount factor
lr = 0.8 # learning rate
eps = 0.5 # epsilon
decay = 0.999 # decay factor
for _ in range(nplay):
s0 = env.reset()
done = False
eps *= decay
while not done:
## decide action to take
if (np.random.random() < eps) or (np.sum(policy[s0, :]) == 0):
action = np.random.randint(0, naction)
else:
action = np.argmax(policy[s0, :])
## update policy table, considering future reward
s1, reward, done, info = env.step(action)
policy[s0, action] += reward + lr*(y*np.max(policy[s1, :]) - policy[s0, action])
s0 = s1
return policy
env = gym.make('NChain-v0')
nstate = env.observation_space.n
naction = env.action_space.n
reset_seeds()
policy_0 = policy_table(env, nstate, naction)
policy_1 = policy_table_qlearning(env, nstate, naction)
policy_2 = policy_table_egreedy_qlearning(env, nstate, naction)
def run_game(policy, env):
s0 = env.reset()
sum_reward = 0
done = False
while not done:
action = np.argmax(policy[s0, :])
s1, reward, done, info = env.step(action)
sum_reward += reward
return sum_reward
def compare_algorithms(env, nstate, naction, nplay=100):
winner = np.zeros((3,))
for _ in range(nplay):
print('inf> loop = {}'.format(_))
policy_rl = policy_table(env, nstate, naction)
policy_qrl = policy_table_qlearning(env, nstate, naction)
policy_egqrl = policy_table_egreedy_qlearning(env, nstate, naction)
rl = run_game(policy_rl, env)
qrl = run_game(policy_qrl, env)
egqrl = run_game(policy_egqrl, env)
w = np.argmax(np.array([rl, qrl, egqrl]))
print('inf> winner = {}'.format(w))
winner[w] += 1
return winner
compare_algorithms(env, nstate, naction)
def deep_q_learning(env, nstate, naction, nplay=100):
## build deep Q-network
from keras.models import Sequential
from keras.layers import InputLayer, Dense
from keras.callbacks import CSVLogger
path_log = 'rl_gym-log.csv'
callbacks = [
CSVLogger(filename=path_log, append=True),
]
if os.path.isfile(path_log): os.remove(path_log)
model = Sequential()
model.add(InputLayer(batch_input_shape=(1, nstate)))
model.add(Dense(10, activation='sigmoid'))
model.add(Dense(naction, activation='linear'))
model.compile(loss='mse', optimizer='adam', metrics=['mae'])
y = 0.95 # discount factor
eps = 0.5 # epsilon
decay = 0.999 # decay factor
sum_rewards = []
for ii in range(nplay):
s0 = env.reset()
eps *= decay
done = False
if ii % 10 == 0:
print("loop {} of {}".format(ii+1, nplay))
sum_reward = 0.
while not done:
## decide action to take
if (np.random.random() < eps):
action = np.random.randint(0, naction)
else:
action = np.argmax(model.predict(np.identity(nstate)[s0:s0+1]))
s1, reward, done, info = env.step(action)
## update deep Q-network
target = reward + y * np.max(model.predict(np.identity(nstate)[s1:s1+1]))
target_vec = model.predict(np.identity(nstate)[s0:s0 + 1])[0]
target_vec[action] = target
model.fit(np.identity(nstate)[s0:s0+1], target_vec.reshape(-1, naction),
callbacks=callbacks, epochs=1, verbose=0)
s0 = s1
sum_reward += reward
sum_rewards.append(sum_reward)
# construct policy table
policy = np.zeros((nstate, naction))
for ii in range(nstate):
policy[ii] = model.predict(np.identity(nstate)[ii:ii+1])
return policy, sum_rewards
reset_seeds()
policy_3, sum_rewards = deep_q_learning(env, nstate, naction, nplay=1000)
def print_policy(policy):
for ii in range(policy.shape[0]):
print('state={} action={}'.format(ii, np.argmax(policy[ii, :])))
print('simple learning')
print_policy(policy_0)
print('q-learning')
print_policy(policy_1)
print('epsilon greedy q-learning')
print_policy(policy_2)
print('deep q-learning')
print_policy(policy_3)
### plot learning curve
df = pd.DataFrame({'rewards':sum_rewards})
df.plot()
plt.xlabel('# of play')
plt.ylabel('rewards')
plt.show()
plt.plot(df['rewards'].rolling(10).mean())
# eof
|
{"hexsha": "f60d9bbc36608d24b736cdb58ed54fd132d9eb41", "size": 5814, "ext": "py", "lang": "Python", "max_stars_repo_path": "dnn/rl_gym.py", "max_stars_repo_name": "takashi-matsushita/lab", "max_stars_repo_head_hexsha": "894e5762f58046c68e665d7463db3d7359c15fda", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "dnn/rl_gym.py", "max_issues_repo_name": "takashi-matsushita/lab", "max_issues_repo_head_hexsha": "894e5762f58046c68e665d7463db3d7359c15fda", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dnn/rl_gym.py", "max_forks_repo_name": "takashi-matsushita/lab", "max_forks_repo_head_hexsha": "894e5762f58046c68e665d7463db3d7359c15fda", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.6123348018, "max_line_length": 86, "alphanum_fraction": 0.6486068111, "include": true, "reason": "import numpy", "num_tokens": 1637}
|
"""
Function to pool the univariate estimators
Arguments:
unibetas::Array{Unibeta, 1} -> Array which contains the univariate estimators which have to be pooled
"""
function pool_unibetas(unibetas::Array{Unibeta, 1})
pooledbetas = unibetas[1].n .* unibetas[1].unibeta
sumN = unibetas[1].n
@simd for i = 2 : length(unibetas)
@inbounds pooledbetas = pooledbetas .+ unibetas[i].n .* unibetas[i].unibeta
@inbounds sumN = sumN .+ unibetas[i].n
end
pooledbetas = pooledbetas ./ sumN
return Unibeta(pooledbetas, sumN, unibetas[1].labels)
end
"""
Function to pool the covariance matrices
Arguments:
covarmat::Array{Covarmat, 1} -> Array which contains the covariances which have to be pooled
"""
function pool_covarmats(unibeta::Unibeta, covarmat::Array{Covarmat, 1})
pooledcovarmat = zeros(size(covarmat[1].covarmat))
sumN = unibeta.n[1:size(covarmat[1].covarmat)[1]]
@simd for i = 1 : size(covarmat[1].covarmat)[2]
@inbounds pooledcovarmat[:,i] = covarmat[1].covarmat[:,i] .* unibeta.n[1:size(covarmat[1].covarmat)[1]]
end
@simd for i = 2 : length(covarmat)
@simd for j = 1 : size(covarmat[1].covarmat)[2]
@inbounds pooledcovarmat[:,j] = pooledcovarmat[:,j] .+ covarmat[i].covarmat[:,j] .* unibeta.n[1:size(covarmat[1].covarmat)[1]]
end
@inbounds sumN = sumN .+ unibeta.n[1:size(covarmat[1].covarmat)[1]]
end
@simd for i = 1 : size(covarmat[1].covarmat)[2]
@inbounds pooledcovarmat[:,i] = pooledcovarmat[:,i] ./ sumN
end
return pooledcovarmat
end
|
{"hexsha": "a0961eac7c637ea98f11c14cd86b4b8dd0e34fc8", "size": 1473, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/pooling.jl", "max_stars_repo_name": "danielazoeller/DistributedBoosting", "max_stars_repo_head_hexsha": "aaac7133efe6a70711f9331049e5874dcc68a967", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/pooling.jl", "max_issues_repo_name": "danielazoeller/DistributedBoosting", "max_issues_repo_head_hexsha": "aaac7133efe6a70711f9331049e5874dcc68a967", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/pooling.jl", "max_forks_repo_name": "danielazoeller/DistributedBoosting", "max_forks_repo_head_hexsha": "aaac7133efe6a70711f9331049e5874dcc68a967", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.8108108108, "max_line_length": 129, "alphanum_fraction": 0.7121520706, "num_tokens": 558}
|
""" Testing array writer objects
See docstring of :mod:`nibabel.arraywriters` for API.
"""
from platform import python_compiler, machine
import itertools
import numpy as np
from io import BytesIO
from ..arraywriters import (SlopeInterArrayWriter, SlopeArrayWriter,
WriterError, ScalingError, ArrayWriter,
make_array_writer, get_slope_inter)
from ..casting import int_abs, type_info, shared_range, on_powerpc
from ..volumeutils import array_from_file, apply_read_scaling, _dt_min_max
from numpy.testing import assert_array_almost_equal, assert_array_equal
import pytest
from ..testing import (assert_allclose_safely, suppress_warnings,
error_warnings)
FLOAT_TYPES = np.sctypes['float']
COMPLEX_TYPES = np.sctypes['complex']
INT_TYPES = np.sctypes['int']
UINT_TYPES = np.sctypes['uint']
CFLOAT_TYPES = FLOAT_TYPES + COMPLEX_TYPES
IUINT_TYPES = INT_TYPES + UINT_TYPES
NUMERIC_TYPES = CFLOAT_TYPES + IUINT_TYPES
def round_trip(writer, order='F', apply_scale=True):
sio = BytesIO()
arr = writer.array
with np.errstate(invalid='ignore'):
writer.to_fileobj(sio, order)
data_back = array_from_file(arr.shape, writer.out_dtype, sio, order=order)
slope, inter = get_slope_inter(writer)
if apply_scale:
data_back = apply_read_scaling(data_back, slope, inter)
return data_back
def test_arraywriters():
# Test initialize
# Simple cases
if machine() == 'sparc64' and python_compiler().startswith('GCC'):
# bus errors on at least np 1.4.1 through 1.6.1 for complex
test_types = FLOAT_TYPES + IUINT_TYPES
else:
test_types = NUMERIC_TYPES
for klass in (SlopeInterArrayWriter, SlopeArrayWriter, ArrayWriter):
for type in test_types:
arr = np.arange(10, dtype=type)
aw = klass(arr)
assert aw.array is arr
assert aw.out_dtype == arr.dtype
assert_array_equal(arr, round_trip(aw))
# Byteswapped should be OK
bs_arr = arr.byteswap().newbyteorder('S')
bs_aw = klass(bs_arr)
bs_aw_rt = round_trip(bs_aw)
# assert against original array because POWER7 was running into
# trouble using the byteswapped array (bs_arr)
assert_array_equal(arr, bs_aw_rt)
bs_aw2 = klass(bs_arr, arr.dtype)
bs_aw2_rt = round_trip(bs_aw2)
assert_array_equal(arr, bs_aw2_rt)
# 2D array
arr2 = np.reshape(arr, (2, 5))
a2w = klass(arr2)
# Default out - in order is Fortran
arr_back = round_trip(a2w)
assert_array_equal(arr2, arr_back)
arr_back = round_trip(a2w, 'F')
assert_array_equal(arr2, arr_back)
# C order works as well
arr_back = round_trip(a2w, 'C')
assert_array_equal(arr2, arr_back)
assert arr_back.flags.c_contiguous
def test_arraywriter_check_scaling():
# Check keyword-only argument to ArrayWriter
# Within range - OK
arr = np.array([0, 1, 128, 255], np.uint8)
aw = ArrayWriter(arr)
# Out of range, scaling needed, default is error
with pytest.raises(WriterError):
ArrayWriter(arr, np.int8)
# Make default explicit
with pytest.raises(WriterError):
ArrayWriter(arr, np.int8, check_scaling=True)
# Turn off scaling check
aw = ArrayWriter(arr, np.int8, check_scaling=False)
assert_array_equal(round_trip(aw), np.clip(arr, 0, 127))
# Has to be keyword
with pytest.raises(TypeError):
ArrayWriter(arr, np.int8, False)
def test_no_scaling():
# Test arraywriter when writing different types without scaling
for in_dtype, out_dtype, awt in itertools.product(
NUMERIC_TYPES,
NUMERIC_TYPES,
(ArrayWriter, SlopeArrayWriter, SlopeInterArrayWriter)):
mn_in, mx_in = _dt_min_max(in_dtype)
arr = np.array([mn_in, 0, 1, mx_in], dtype=in_dtype)
kwargs = (dict(check_scaling=False) if awt == ArrayWriter
else dict(calc_scale=False))
aw = awt(arr, out_dtype, **kwargs)
with suppress_warnings():
back_arr = round_trip(aw)
exp_back = arr.copy()
# If converting to floating point type, casting is direct.
# Otherwise we will need to do float-(u)int casting at some point.
if out_dtype in IUINT_TYPES:
if in_dtype in CFLOAT_TYPES:
# Working precision is (at least) float
with suppress_warnings():
exp_back = exp_back.astype(float)
# Float to iu conversion will always round, clip
with np.errstate(invalid='ignore'):
exp_back = np.round(exp_back)
if hasattr(aw, 'slope') and in_dtype in FLOAT_TYPES:
# Finite scaling sets infs to min / max
exp_back = np.clip(exp_back, 0, 1)
else:
# Clip to shared range of working precision
exp_back = np.clip(exp_back,
*shared_range(float, out_dtype))
else: # iu input and output type
# No scaling, never gets converted to float.
# Does get clipped to range of output type
mn_out, mx_out = _dt_min_max(out_dtype)
if (mn_in, mx_in) != (mn_out, mx_out):
# Use smaller of input, output range to avoid np.clip
# upcasting the array because of large clip limits.
exp_back = np.clip(exp_back,
max(mn_in, mn_out),
min(mx_in, mx_out))
elif in_dtype in COMPLEX_TYPES:
# always cast to real from complex
with suppress_warnings():
exp_back = exp_back.astype(float)
exp_back = exp_back.astype(out_dtype)
# Sometimes working precision is float32 - allow for small differences
assert_allclose_safely(back_arr, exp_back)
def test_scaling_needed():
# Structured types return True if dtypes same, raise error otherwise
dt_def = [('f', 'i4')]
arr = np.ones(10, dt_def)
for t in NUMERIC_TYPES:
with pytest.raises(WriterError):
ArrayWriter(arr, t)
narr = np.ones(10, t)
with pytest.raises(WriterError):
ArrayWriter(narr, dt_def)
assert not ArrayWriter(arr).scaling_needed()
assert not ArrayWriter(arr, dt_def).scaling_needed()
# Any numeric type that can cast, needs no scaling
for in_t in NUMERIC_TYPES:
for out_t in NUMERIC_TYPES:
if np.can_cast(in_t, out_t):
aw = ArrayWriter(np.ones(10, in_t), out_t)
assert not aw.scaling_needed()
for in_t in NUMERIC_TYPES:
# Numeric types to complex never need scaling
arr = np.ones(10, in_t)
for out_t in COMPLEX_TYPES:
assert not ArrayWriter(arr, out_t).scaling_needed()
# Attempts to scale from complex to anything else fails
for in_t in COMPLEX_TYPES:
for out_t in FLOAT_TYPES + IUINT_TYPES:
arr = np.ones(10, in_t)
with pytest.raises(WriterError):
ArrayWriter(arr, out_t)
# Scaling from anything but complex to floats is OK
for in_t in FLOAT_TYPES + IUINT_TYPES:
arr = np.ones(10, in_t)
for out_t in FLOAT_TYPES:
assert not ArrayWriter(arr, out_t).scaling_needed()
# For any other output type, arrays with no data don't need scaling
for in_t in FLOAT_TYPES + IUINT_TYPES:
arr_0 = np.zeros(10, in_t)
arr_e = []
for out_t in IUINT_TYPES:
assert not ArrayWriter(arr_0, out_t).scaling_needed()
assert not ArrayWriter(arr_e, out_t).scaling_needed()
# Going to (u)ints, non-finite arrays don't need scaling for writers that
# can do scaling because these use finite_range to threshold the input data,
# but ArrayWriter does not do this. so scaling_needed is True
for in_t in FLOAT_TYPES:
arr_nan = np.zeros(10, in_t) + np.nan
arr_inf = np.zeros(10, in_t) + np.inf
arr_minf = np.zeros(10, in_t) - np.inf
arr_mix = np.array([np.nan, np.inf, -np.inf], dtype=in_t)
for out_t in IUINT_TYPES:
for arr in (arr_nan, arr_inf, arr_minf, arr_mix):
assert ArrayWriter(arr, out_t, check_scaling=False).scaling_needed()
assert not SlopeArrayWriter(arr, out_t).scaling_needed()
assert not SlopeInterArrayWriter(arr, out_t).scaling_needed()
# Floats as input always need scaling
for in_t in FLOAT_TYPES:
arr = np.ones(10, in_t)
for out_t in IUINT_TYPES:
# We need an arraywriter that will tolerate construction when
# scaling is needed
assert SlopeArrayWriter(arr, out_t).scaling_needed()
# in-range (u)ints don't need scaling
for in_t in IUINT_TYPES:
in_info = np.iinfo(in_t)
in_min, in_max = in_info.min, in_info.max
for out_t in IUINT_TYPES:
out_info = np.iinfo(out_t)
out_min, out_max = out_info.min, out_info.max
if in_min >= out_min and in_max <= out_max:
arr = np.array([in_min, in_max], in_t)
assert np.can_cast(arr.dtype, out_t)
# We've already tested this with can_cast above, but...
assert not ArrayWriter(arr, out_t).scaling_needed()
continue
# The output data type does not include the input data range
max_min = max(in_min, out_min) # 0 for input or output uint
min_max = min(in_max, out_max)
arr = np.array([max_min, min_max], in_t)
assert not ArrayWriter(arr, out_t).scaling_needed()
assert SlopeInterArrayWriter(arr + 1, out_t).scaling_needed()
if in_t in INT_TYPES:
assert SlopeInterArrayWriter(arr - 1, out_t).scaling_needed()
def test_special_rt():
# Test that zeros; none finite - round trip to zeros for scaleable types
# For ArrayWriter, these error for default creation, when forced to create
# the writer, they round trip to out_dtype max
arr = np.array([np.inf, np.nan, -np.inf])
for in_dtt in FLOAT_TYPES:
for out_dtt in IUINT_TYPES:
in_arr = arr.astype(in_dtt)
with pytest.raises(WriterError):
ArrayWriter(in_arr, out_dtt)
aw = ArrayWriter(in_arr, out_dtt, check_scaling=False)
mn, mx = shared_range(float, out_dtt)
assert np.allclose(round_trip(aw).astype(float),
[mx, 0, mn])
for klass in (SlopeArrayWriter, SlopeInterArrayWriter):
aw = klass(in_arr, out_dtt)
assert get_slope_inter(aw) == (1, 0)
assert_array_equal(round_trip(aw), 0)
for in_dtt, out_dtt, awt in itertools.product(
FLOAT_TYPES,
IUINT_TYPES,
(ArrayWriter, SlopeArrayWriter, SlopeInterArrayWriter)):
arr = np.zeros((3,), dtype=in_dtt)
aw = awt(arr, out_dtt)
assert get_slope_inter(aw) == (1, 0)
assert_array_equal(round_trip(aw), 0)
def test_high_int2uint():
# Need to take care of high values when testing whether values are already
# in range. There was a bug here were the comparison was in floating point,
# and therefore not exact, and 2**63 appeared to be in range for np.int64
arr = np.array([2**63], dtype=np.uint64)
out_type = np.int64
aw = SlopeInterArrayWriter(arr, out_type)
assert aw.inter == 2**63
def test_slope_inter_castable():
# Test scaling for arraywriter instances
# Test special case of all zeros
for in_dtt in FLOAT_TYPES + IUINT_TYPES:
for out_dtt in NUMERIC_TYPES:
for klass in (ArrayWriter, SlopeArrayWriter, SlopeInterArrayWriter):
arr = np.zeros((5,), dtype=in_dtt)
aw = klass(arr, out_dtt) # no error
# Test special case of none finite
# This raises error for ArrayWriter, but not for the others
arr = np.array([np.inf, np.nan, -np.inf])
for in_dtt in FLOAT_TYPES:
for out_dtt in IUINT_TYPES:
in_arr = arr.astype(in_dtt)
with pytest.raises(WriterError):
ArrayWriter(in_arr, out_dtt)
aw = SlopeArrayWriter(arr.astype(in_dtt), out_dtt) # no error
aw = SlopeInterArrayWriter(arr.astype(in_dtt), out_dtt) # no error
for in_dtt, out_dtt, arr, slope_only, slope_inter, neither in (
(np.float32, np.float32, 1, True, True, True),
(np.float64, np.float32, 1, True, True, True),
(np.float32, np.complex128, 1, True, True, True),
(np.uint32, np.complex128, 1, True, True, True),
(np.int64, np.float32, 1, True, True, True),
(np.float32, np.int16, 1, True, True, False),
(np.complex128, np.float32, 1, False, False, False),
(np.complex128, np.int16, 1, False, False, False),
(np.uint8, np.int16, 1, True, True, True),
# The following tests depend on the input data
(np.uint16, np.int16, 1, True, True, True), # 1 is in range
(np.uint16, np.int16, 2**16 - 1, True, True, False), # This not in range
(np.uint16, np.int16, (0, 2**16 - 1), True, True, False),
(np.uint16, np.uint8, 1, True, True, True),
(np.int16, np.uint16, 1, True, True, True), # in range
(np.int16, np.uint16, -1, True, True, False), # flip works for scaling
(np.int16, np.uint16, (-1, 1), False, True, False), # not with +-
(np.int8, np.uint16, 1, True, True, True), # in range
(np.int8, np.uint16, -1, True, True, False), # flip works for scaling
(np.int8, np.uint16, (-1, 1), False, True, False), # not with +-
):
# data for casting
data = np.array(arr, dtype=in_dtt)
# With scaling but no intercept
if slope_only:
SlopeArrayWriter(data, out_dtt)
else:
with pytest.raises(WriterError):
SlopeArrayWriter(data, out_dtt)
# With scaling and intercept
if slope_inter:
SlopeInterArrayWriter(data, out_dtt)
else:
with pytest.raises(WriterError):
SlopeInterArrayWriter(data, out_dtt)
# With neither
if neither:
ArrayWriter(data, out_dtt)
else:
with pytest.raises(WriterError):
ArrayWriter(data, out_dtt)
def test_calculate_scale():
# Test for special cases in scale calculation
npa = np.array
SIAW = SlopeInterArrayWriter
SAW = SlopeArrayWriter
# Offset handles scaling when it can
aw = SIAW(npa([-2, -1], dtype=np.int8), np.uint8)
assert get_slope_inter(aw) == (1.0, -2.0)
# Sign flip handles these cases
aw = SAW(npa([-2, -1], dtype=np.int8), np.uint8)
assert get_slope_inter(aw) == (-1.0, 0.0)
aw = SAW(npa([-2, 0], dtype=np.int8), np.uint8)
assert get_slope_inter(aw) == (-1.0, 0.0)
# But not when min magnitude is too large (scaling mechanism kicks in)
aw = SAW(npa([-510, 0], dtype=np.int16), np.uint8)
assert get_slope_inter(aw) == (-2.0, 0.0)
# Or for floats (attempts to expand across range)
aw = SAW(npa([-2, 0], dtype=np.float32), np.uint8)
assert get_slope_inter(aw) != (-1.0, 0.0)
# Case where offset handles scaling
aw = SIAW(npa([-1, 1], dtype=np.int8), np.uint8)
assert get_slope_inter(aw) == (1.0, -1.0)
# Can't work for no offset case
with pytest.raises(WriterError):
SAW(npa([-1, 1], dtype=np.int8), np.uint8)
# Offset trick can't work when max is out of range
aw = SIAW(npa([-1, 255], dtype=np.int16), np.uint8)
slope_inter = get_slope_inter(aw)
assert slope_inter != (1.0, -1.0)
def test_resets():
# Test reset of values, caching of scales
for klass, inp, outp in ((SlopeInterArrayWriter, (1, 511), (2.0, 1.0)),
(SlopeArrayWriter, (0, 510), (2.0, 0.0))):
arr = np.array(inp)
outp = np.array(outp)
aw = klass(arr, np.uint8)
assert_array_equal(get_slope_inter(aw), outp)
aw.calc_scale() # cached no change
assert_array_equal(get_slope_inter(aw), outp)
aw.calc_scale(force=True) # same data, no change
assert_array_equal(get_slope_inter(aw), outp)
# Change underlying array
aw.array[:] = aw.array * 2
aw.calc_scale() # cached still
assert_array_equal(get_slope_inter(aw), outp)
aw.calc_scale(force=True) # new data, change
assert_array_equal(get_slope_inter(aw), outp * 2)
# Test reset
aw.reset()
assert_array_equal(get_slope_inter(aw), (1.0, 0.0))
def test_no_offset_scale():
# Specific tests of no-offset scaling
SAW = SlopeArrayWriter
# Floating point
for data in ((-128, 127),
(-128, 126),
(-128, -127),
(-128, 0),
(-128, -1),
(126, 127),
(-127, 127)):
aw = SAW(np.array(data, dtype=np.float32), np.int8)
assert aw.slope == 1.0
aw = SAW(np.array([-126, 127 * 2.0], dtype=np.float32), np.int8)
assert aw.slope == 2
aw = SAW(np.array([-128 * 2.0, 127], dtype=np.float32), np.int8)
assert aw.slope == 2
# Test that nasty abs behavior does not upset us
n = -2**15
aw = SAW(np.array([n, n], dtype=np.int16), np.uint8)
assert_array_almost_equal(aw.slope, n / 255.0, 5)
def test_with_offset_scale():
# Tests of specific cases in slope, inter
SIAW = SlopeInterArrayWriter
aw = SIAW(np.array([0, 127], dtype=np.int8), np.uint8)
assert (aw.slope, aw.inter) == (1, 0) # in range
aw = SIAW(np.array([-1, 126], dtype=np.int8), np.uint8)
assert (aw.slope, aw.inter) == (1, -1) # offset only
aw = SIAW(np.array([-1, 254], dtype=np.int16), np.uint8)
assert (aw.slope, aw.inter) == (1, -1) # offset only
aw = SIAW(np.array([-1, 255], dtype=np.int16), np.uint8)
assert (aw.slope, aw.inter) != (1, -1) # Too big for offset only
aw = SIAW(np.array([-256, -2], dtype=np.int16), np.uint8)
assert (aw.slope, aw.inter) == (1, -256) # offset only
aw = SIAW(np.array([-256, -2], dtype=np.int16), np.int8)
assert (aw.slope, aw.inter) == (1, -129) # offset only
def test_io_scaling():
# Test scaling works for max, min when going from larger to smaller type,
# and from float to integer.
bio = BytesIO()
for in_type, out_type in itertools.product(
(np.int16, np.uint16, np.float32),
(np.int8, np.uint8, np.int16, np.uint16)):
out_dtype = np.dtype(out_type)
info = type_info(in_type)
imin, imax = info['min'], info['max']
if imin == 0: # unsigned int
val_tuples = ((0, imax),
(100, imax))
else:
val_tuples = ((imin, 0, imax),
(imin, 0),
(0, imax),
(imin, 100, imax))
if imin != 0:
val_tuples += ((imin, 0),
(0, imax))
for vals in val_tuples:
arr = np.array(vals, dtype=in_type)
aw = SlopeInterArrayWriter(arr, out_dtype)
aw.to_fileobj(bio)
arr2 = array_from_file(arr.shape, out_dtype, bio)
arr3 = apply_read_scaling(arr2, aw.slope, aw.inter)
# Max rounding error for integer type
# Slope might be negative
max_miss = np.abs(aw.slope) / 2.
abs_err = np.abs(arr - arr3)
assert np.all(abs_err <= max_miss)
if out_type in UINT_TYPES and 0 in (min(arr), max(arr)):
# Check that error is minimized for 0 as min or max
assert min(abs_err) == abs_err[arr == 0]
bio.truncate(0)
bio.seek(0)
def test_input_ranges():
# Test we get good precision for a range of input data
arr = np.arange(-500, 501, 10, dtype=np.float64)
bio = BytesIO()
working_type = np.float32
work_eps = np.finfo(working_type).eps
for out_type, offset in itertools.product(
IUINT_TYPES,
range(-1000, 1000, 100)):
aw = SlopeInterArrayWriter(arr, out_type)
aw.to_fileobj(bio)
arr2 = array_from_file(arr.shape, out_type, bio)
arr3 = apply_read_scaling(arr2, aw.slope, aw.inter)
# Max rounding error for integer type
# Slope might be negative
max_miss = np.abs(aw.slope) / working_type(2.) + work_eps * 10
abs_err = np.abs(arr - arr3)
max_err = np.abs(arr) * work_eps + max_miss
assert np.all(abs_err <= max_err)
if out_type in UINT_TYPES and 0 in (min(arr), max(arr)):
# Check that error is minimized for 0 as min or max
assert min(abs_err) == abs_err[arr == 0]
bio.truncate(0)
bio.seek(0)
def test_nan2zero():
# Test conditions under which nans written to zero, and error conditions
# nan2zero as argument to `to_fileobj` deprecated, raises error if not the
# same as input nan2zero - meaning that by default, nan2zero of False will
# raise an error.
arr = np.array([np.nan, 99.], dtype=np.float32)
for awt, kwargs in ((ArrayWriter, dict(check_scaling=False)),
(SlopeArrayWriter, dict(calc_scale=False)),
(SlopeInterArrayWriter, dict(calc_scale=False))):
# nan2zero default is True
# nan2zero ignored for floats
aw = awt(arr, np.float32, **kwargs)
data_back = round_trip(aw)
assert_array_equal(np.isnan(data_back), [True, False])
# Deprecation warning for nan2zero as argument to `to_fileobj`
with error_warnings():
with pytest.deprecated_call():
aw.to_fileobj(BytesIO(), 'F', True)
with pytest.deprecated_call():
aw.to_fileobj(BytesIO(), 'F', nan2zero=True)
# Error if nan2zero is not the value set at initialization
with pytest.raises(WriterError):
aw.to_fileobj(BytesIO(), 'F', False)
# set explicitly
aw = awt(arr, np.float32, nan2zero=True, **kwargs)
data_back = round_trip(aw)
assert_array_equal(np.isnan(data_back), [True, False])
# Integer output with nan2zero gives zero
aw = awt(arr, np.int32, **kwargs)
data_back = round_trip(aw)
assert_array_equal(data_back, [0, 99])
# Integer output with nan2zero=False gives whatever astype gives
aw = awt(arr, np.int32, nan2zero=False, **kwargs)
data_back = round_trip(aw)
astype_res = np.array(np.nan).astype(np.int32)
assert_array_equal(data_back, [astype_res, 99])
# Deprecation warning for nan2zero as argument to `to_fileobj`
with error_warnings():
with pytest.deprecated_call():
aw.to_fileobj(BytesIO(), 'F', False)
with pytest.deprecated_call():
aw.to_fileobj(BytesIO(), 'F', nan2zero=False)
# Error if nan2zero is not the value set at initialization
with pytest.raises(WriterError):
aw.to_fileobj(BytesIO(), 'F', True)
def test_byte_orders():
arr = np.arange(10, dtype=np.int32)
# Test endian read/write of types not requiring scaling
for tp in (np.uint64, np.float64, np.complex128):
dt = np.dtype(tp)
for code in '<>':
ndt = dt.newbyteorder(code)
for klass in (SlopeInterArrayWriter, SlopeArrayWriter,
ArrayWriter):
aw = klass(arr, ndt)
data_back = round_trip(aw)
assert_array_almost_equal(arr, data_back)
def test_writers_roundtrip():
ndt = np.dtype(np.float64)
arr = np.arange(3, dtype=ndt)
# intercept
aw = SlopeInterArrayWriter(arr, ndt, calc_scale=False)
aw.inter = 1.0
data_back = round_trip(aw)
assert_array_equal(data_back, arr)
# scaling
aw.slope = 2.0
data_back = round_trip(aw)
assert_array_equal(data_back, arr)
# if there is no valid data, we get zeros
aw = SlopeInterArrayWriter(arr + np.nan, np.int32)
data_back = round_trip(aw)
assert_array_equal(data_back, np.zeros(arr.shape))
# infs generate ints at same value as max
arr[0] = np.inf
aw = SlopeInterArrayWriter(arr, np.int32)
data_back = round_trip(aw)
assert_array_almost_equal(data_back, [2, 1, 2])
def test_to_float():
start, stop = 0, 100
for in_type in NUMERIC_TYPES:
step = 1 if in_type in IUINT_TYPES else 0.5
info = type_info(in_type)
mn, mx = info['min'], info['max']
arr = np.arange(start, stop, step, dtype=in_type)
arr[0] = mn
arr[-1] = mx
for out_type in CFLOAT_TYPES:
out_info = type_info(out_type)
for klass in (SlopeInterArrayWriter, SlopeArrayWriter,
ArrayWriter):
if in_type in COMPLEX_TYPES and out_type in FLOAT_TYPES:
with pytest.raises(WriterError):
klass(arr, out_type)
continue
aw = klass(arr, out_type)
assert aw.array is arr
assert aw.out_dtype == out_type
arr_back = round_trip(aw)
assert_array_equal(arr.astype(out_type), arr_back)
# Check too-big values overflowed correctly
out_min, out_max = out_info['min'], out_info['max']
assert np.all(arr_back[arr > out_max] == np.inf)
assert np.all(arr_back[arr < out_min] == -np.inf)
def test_dumber_writers():
arr = np.arange(10, dtype=np.float64)
aw = SlopeArrayWriter(arr)
aw.slope = 2.0
assert aw.slope == 2.0
with pytest.raises(AttributeError):
aw.inter
aw = ArrayWriter(arr)
with pytest.raises(AttributeError):
aw.slope
with pytest.raises(AttributeError):
aw.inter
# Attempt at scaling should raise error for dumb type
with pytest.raises(WriterError):
ArrayWriter(arr, np.int16)
def test_writer_maker():
arr = np.arange(10, dtype=np.float64)
aw = make_array_writer(arr, np.float64)
assert isinstance(aw, SlopeInterArrayWriter)
aw = make_array_writer(arr, np.float64, True, True)
assert isinstance(aw, SlopeInterArrayWriter)
aw = make_array_writer(arr, np.float64, True, False)
assert isinstance(aw, SlopeArrayWriter)
aw = make_array_writer(arr, np.float64, False, False)
assert isinstance(aw, ArrayWriter)
with pytest.raises(ValueError):
make_array_writer(arr, np.float64, False)
with pytest.raises(ValueError):
make_array_writer(arr, np.float64, False, True)
# Does calc_scale get run by default?
aw = make_array_writer(arr, np.int16, calc_scale=False)
assert (aw.slope, aw.inter) == (1, 0)
aw.calc_scale()
slope, inter = aw.slope, aw.inter
assert not (slope, inter) == (1, 0)
# Should run by default
aw = make_array_writer(arr, np.int16)
assert (aw.slope, aw.inter) == (slope, inter)
aw = make_array_writer(arr, np.int16, calc_scale=True)
assert (aw.slope, aw.inter) == (slope, inter)
def test_float_int_min_max():
# Conversion between float and int
for in_dt in FLOAT_TYPES:
finf = type_info(in_dt)
arr = np.array([finf['min'], finf['max']], dtype=in_dt)
# Bug in numpy 1.6.2 on PPC leading to infs - abort
if not np.all(np.isfinite(arr)):
print(f'Hit PPC max -> inf bug; skip in_type {in_dt}')
continue
for out_dt in IUINT_TYPES:
try:
with suppress_warnings(): # overflow
aw = SlopeInterArrayWriter(arr, out_dt)
except ScalingError:
continue
arr_back_sc = round_trip(aw)
assert np.allclose(arr, arr_back_sc)
def test_int_int_min_max():
# Conversion between (u)int and (u)int
eps = np.finfo(np.float64).eps
rtol = 1e-6
for in_dt in IUINT_TYPES:
iinf = np.iinfo(in_dt)
arr = np.array([iinf.min, iinf.max], dtype=in_dt)
for out_dt in IUINT_TYPES:
try:
aw = SlopeInterArrayWriter(arr, out_dt)
except ScalingError:
continue
arr_back_sc = round_trip(aw)
# integer allclose
adiff = int_abs(arr - arr_back_sc)
rdiff = adiff / (arr + eps)
assert np.all(rdiff < rtol)
def test_int_int_slope():
# Conversion between (u)int and (u)int for slopes only
eps = np.finfo(np.float64).eps
rtol = 1e-7
for in_dt in IUINT_TYPES:
iinf = np.iinfo(in_dt)
for out_dt in IUINT_TYPES:
kinds = np.dtype(in_dt).kind + np.dtype(out_dt).kind
if kinds in ('ii', 'uu', 'ui'):
arrs = (np.array([iinf.min, iinf.max], dtype=in_dt),)
elif kinds == 'iu':
arrs = (np.array([iinf.min, 0], dtype=in_dt),
np.array([0, iinf.max], dtype=in_dt))
for arr in arrs:
try:
aw = SlopeArrayWriter(arr, out_dt)
except ScalingError:
continue
assert not aw.slope == 0
arr_back_sc = round_trip(aw)
# integer allclose
adiff = int_abs(arr - arr_back_sc)
rdiff = adiff / (arr + eps)
assert np.all(rdiff < rtol)
def test_float_int_spread():
# Test rounding error for spread of values
powers = np.arange(-10, 10, 0.5)
arr = np.concatenate((-10**powers, 10**powers))
for in_dt in (np.float32, np.float64):
arr_t = arr.astype(in_dt)
for out_dt in IUINT_TYPES:
aw = SlopeInterArrayWriter(arr_t, out_dt)
arr_back_sc = round_trip(aw)
# Get estimate for error
max_miss = rt_err_estimate(arr_t,
arr_back_sc.dtype,
aw.slope,
aw.inter)
# Simulate allclose test with large atol
diff = np.abs(arr_t - arr_back_sc)
rdiff = diff / np.abs(arr_t)
assert np.all((diff <= max_miss) | (rdiff <= 1e-5))
def rt_err_estimate(arr_t, out_dtype, slope, inter):
# Error attributable to rounding
slope = 1 if slope is None else slope
inter = 1 if inter is None else inter
max_int_miss = slope / 2.
# Estimate error attributable to floating point slope / inter;
# Remove inter / slope, put in a float type to simulate the type
# promotion for the multiplication, apply slope / inter
flt_there = (arr_t - inter) / slope
flt_back = flt_there.astype(out_dtype) * slope + inter
max_flt_miss = np.abs(arr_t - flt_back).max()
# Max error is sum of rounding and fp error
return max_int_miss + max_flt_miss
def test_rt_bias():
# Check for bias in round trip
rng = np.random.RandomState(20111214)
mu, std, count = 100, 10, 100
arr = rng.normal(mu, std, size=(count,))
eps = np.finfo(np.float32).eps
for in_dt in (np.float32, np.float64):
arr_t = arr.astype(in_dt)
for out_dt in IUINT_TYPES:
aw = SlopeInterArrayWriter(arr_t, out_dt)
arr_back_sc = round_trip(aw)
bias = np.mean(arr_t - arr_back_sc)
# Get estimate for error
max_miss = rt_err_estimate(arr_t,
arr_back_sc.dtype,
aw.slope,
aw.inter)
# Hokey use of max_miss as a std estimate
bias_thresh = np.max([max_miss / np.sqrt(count), eps])
assert np.abs(bias) < bias_thresh
def test_nan2zero_scaling():
# Scaling needs to take into account whether nan can be represented as zero
# in the input data (before scaling).
# nan can be represented as zero of we can store (0 - intercept) / divslope
# in the output data - because reading back the data as `stored_array * divslope +
# intercept` will reconstruct zeros for the nans in the original input.
#
# Make array requiring scaling for which range does not cover zero -> arr
# Append nan to arr -> nan_arr
# Append 0 to arr -> zero_arr
# Write / read nan_arr, zero_arr
# Confirm nan, 0 generated same output value
for awt, in_dt, out_dt, sign in itertools.product(
(SlopeArrayWriter, SlopeInterArrayWriter),
FLOAT_TYPES,
IUINT_TYPES,
(-1, 1),
):
# Use fixed-up type information to avoid bugs, especially on PPC
in_info = type_info(in_dt)
out_info = type_info(out_dt)
# Skip inpossible combinations
if in_info['min'] == 0 and sign == -1:
continue
mx = min(in_info['max'], out_info['max'] * 2., 2**32)
vals = [np.nan] + [100, mx]
nan_arr = np.array(vals, dtype=in_dt) * sign
# Check that nan scales to same value as zero within same array
nan_arr_0 = np.array([0] + vals, dtype=in_dt) * sign
# Check that nan scales to almost the same value as zero in another array
zero_arr = np.nan_to_num(nan_arr)
nan_aw = awt(nan_arr, out_dt, nan2zero=True)
back_nan = round_trip(nan_aw) * float(sign)
nan_0_aw = awt(nan_arr_0, out_dt, nan2zero=True)
back_nan_0 = round_trip(nan_0_aw) * float(sign)
zero_aw = awt(zero_arr, out_dt, nan2zero=True)
back_zero = round_trip(zero_aw) * float(sign)
assert np.allclose(back_nan[1:], back_zero[1:])
assert_array_equal(back_nan[1:], back_nan_0[2:])
assert np.abs(back_nan[0] - back_zero[0]) < 1e-2
assert back_nan_0[0] == back_nan_0[1]
def test_finite_range_nan():
# Test finite range method and has_nan property
for in_arr, res in (
([[-1, 0, 1], [np.inf, np.nan, -np.inf]], (-1, 1)),
(np.array([[-1, 0, 1], [np.inf, np.nan, -np.inf]]), (-1, 1)),
([[np.nan], [np.nan]], (np.inf, -np.inf)), # all nans slices
(np.zeros((3, 4, 5)) + np.nan, (np.inf, -np.inf)),
([[-np.inf], [np.inf]], (np.inf, -np.inf)), # all infs slices
(np.zeros((3, 4, 5)) + np.inf, (np.inf, -np.inf)),
([[np.nan, -1, 2], [-2, np.nan, 1]], (-2, 2)),
([[np.nan, -np.inf, 2], [-2, np.nan, np.inf]], (-2, 2)),
([[-np.inf, 2], [np.nan, 1]], (1, 2)), # good max case
([[np.nan, -np.inf, 2], [-2, np.nan, np.inf]], (-2, 2)),
([np.nan], (np.inf, -np.inf)),
([np.inf], (np.inf, -np.inf)),
([-np.inf], (np.inf, -np.inf)),
([np.inf, 1], (1, 1)), # only look at finite values
([-np.inf, 1], (1, 1)),
([[], []], (np.inf, -np.inf)), # empty array
(np.array([[-3, 0, 1], [2, -1, 4]], dtype=int), (-3, 4)),
(np.array([[1, 0, 1], [2, 3, 4]], dtype=np.uint), (0, 4)),
([0., 1, 2, 3], (0, 3)),
# Complex comparison works as if they are floats
([[np.nan, -1 - 100j, 2], [-2, np.nan, 1 + 100j]], (-2, 2)),
([[np.nan, -1, 2 - 100j], [-2 + 100j, np.nan, 1]], (-2 + 100j, 2 - 100j)),
):
for awt, kwargs in ((ArrayWriter, dict(check_scaling=False)),
(SlopeArrayWriter, {}),
(SlopeArrayWriter, dict(calc_scale=False)),
(SlopeInterArrayWriter, {}),
(SlopeInterArrayWriter, dict(calc_scale=False))):
for out_type in NUMERIC_TYPES:
has_nan = np.any(np.isnan(in_arr))
try:
aw = awt(in_arr, out_type, **kwargs)
except WriterError:
continue
# Should not matter about the order of finite range method call
# and has_nan property - test this is true
assert aw.has_nan == has_nan
assert aw.finite_range() == res
aw = awt(in_arr, out_type, **kwargs)
assert aw.finite_range() == res
assert aw.has_nan == has_nan
# Check float types work as complex
in_arr = np.array(in_arr)
if in_arr.dtype.kind == 'f':
c_arr = in_arr.astype(np.complex128)
try:
aw = awt(c_arr, out_type, **kwargs)
except WriterError:
continue
aw = awt(c_arr, out_type, **kwargs)
assert aw.has_nan == has_nan
assert aw.finite_range() == res
# Structured type cannot be nan and we can test this
a = np.array([[1., 0, 1], [2, 3, 4]]).view([('f1', 'f')])
aw = awt(a, a.dtype, **kwargs)
with pytest.raises(TypeError):
aw.finite_range()
assert not aw.has_nan
|
{"hexsha": "22684ac9550ddeb0f36af831df6decf81d2d8b4a", "size": 37367, "ext": "py", "lang": "Python", "max_stars_repo_path": "venv/Lib/site-packages/nibabel/tests/test_arraywriters.py", "max_stars_repo_name": "richung99/digitizePlots", "max_stars_repo_head_hexsha": "6b408c820660a415a289726e3223e8f558d3e18b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-18T17:56:51.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-18T17:56:51.000Z", "max_issues_repo_path": "venv/Lib/site-packages/nibabel/tests/test_arraywriters.py", "max_issues_repo_name": "richung99/digitizePlots", "max_issues_repo_head_hexsha": "6b408c820660a415a289726e3223e8f558d3e18b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "venv/Lib/site-packages/nibabel/tests/test_arraywriters.py", "max_forks_repo_name": "richung99/digitizePlots", "max_forks_repo_head_hexsha": "6b408c820660a415a289726e3223e8f558d3e18b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.6563926941, "max_line_length": 87, "alphanum_fraction": 0.5876040356, "include": true, "reason": "import numpy,from numpy", "num_tokens": 9778}
|
# License: Apache-2.0
import copy
import warnings
from typing import Union
import databricks.koalas as ks
import numpy as np
import pandas as pd
from ..data_cleaning.drop_columns import DropColumns
from ..transformers.transformer import Transformer
from ..util import util
from ._base_encoder import _BaseEncoder
class MultiClassEncoder(_BaseEncoder):
"""Encode the categorical columns with a binary encoder passed by the user.
*N* categorical columns are mapped into *N * (n - 1)* numerical columns
where *n* is the number of classes.
Parameters
----------
encoder : Transformer
Binary Encoder.
dtype : type, default to np.float64.
Numerical datatype of the output data.
Examples
--------
* fit & transform with `pandas`
>>> import pandas as pd
>>> from gators.encoders import MultiClassEncoder
>>> from gators.encoders import WOEEncoder
>>> X = pd.DataFrame({
... 'A': ['Q', 'Q', 'Q', 'W', 'W', 'W'],
... 'B': ['Q', 'Q', 'W', 'W', 'W', 'W'],
... 'C': ['Q', 'Q', 'Q', 'Q', 'W', 'W'],
... 'D': [1, 2, 3, 4, 5, 6]})
>>> y = pd.Series([0, 0, 1, 2, 1, 2], name='TARGET')
>>> obj = MultiClassEncoder(WOEEncoder())
>>> obj.fit_transform(X, y)
D A__TARGET_1_WOEEncoder B__TARGET_1_WOEEncoder C__TARGET_1_WOEEncoder A__TARGET_2_WOEEncoder B__TARGET_2_WOEEncoder C__TARGET_2_WOEEncoder
0 1.0 0.0 0.000000 -0.405465 0.000000 0.000000 -0.405465
1 2.0 0.0 0.000000 -0.405465 0.000000 0.000000 -0.405465
2 3.0 0.0 0.693147 -0.405465 0.000000 0.693147 -0.405465
3 4.0 0.0 0.693147 -0.405465 1.386294 0.693147 -0.405465
4 5.0 0.0 0.693147 0.693147 1.386294 0.693147 0.693147
5 6.0 0.0 0.693147 0.693147 1.386294 0.693147 0.693147
* fit & transform with `koalas`
>>> import databricks.koalas as ks
>>> from gators.encoders import MultiClassEncoder
>>> from gators.encoders import WOEEncoder
>>> X = ks.DataFrame({
... 'A': ['Q', 'Q', 'Q', 'W', 'W', 'W'],
... 'B': ['Q', 'Q', 'W', 'W', 'W', 'W'],
... 'C': ['Q', 'Q', 'Q', 'Q', 'W', 'W'],
... 'D': [1, 2, 3, 4, 5, 6]})
>>> y = ks.Series([0, 0, 1, 2, 1, 2], name='TARGET')
>>> obj = MultiClassEncoder(WOEEncoder())
>>> obj.fit_transform(X, y)
D A__TARGET_1_WOEEncoder B__TARGET_1_WOEEncoder C__TARGET_1_WOEEncoder A__TARGET_2_WOEEncoder B__TARGET_2_WOEEncoder C__TARGET_2_WOEEncoder
0 1.0 0.0 0.000000 -0.405465 0.000000 0.000000 -0.405465
1 2.0 0.0 0.000000 -0.405465 0.000000 0.000000 -0.405465
2 3.0 0.0 0.693147 -0.405465 0.000000 0.693147 -0.405465
3 4.0 0.0 0.693147 -0.405465 1.386294 0.693147 -0.405465
4 5.0 0.0 0.693147 0.693147 1.386294 0.693147 0.693147
5 6.0 0.0 0.693147 0.693147 1.386294 0.693147 0.693147
* fit with `pandas` & transform with `NumPy`
>>> import pandas as pd
>>> from gators.encoders import MultiClassEncoder
>>> from gators.encoders import WOEEncoder
>>> X = pd.DataFrame({
... 'A': ['Q', 'Q', 'Q', 'W', 'W', 'W'],
... 'B': ['Q', 'Q', 'W', 'W', 'W', 'W'],
... 'C': ['Q', 'Q', 'Q', 'Q', 'W', 'W'],
... 'D': [1, 2, 3, 4, 5, 6]})
>>> y = pd.Series([0, 0, 1, 2, 1, 2], name='TARGET')
>>> obj = MultiClassEncoder(WOEEncoder())
>>> _ = obj.fit(X, y)
>>> obj.transform_numpy(X.to_numpy())
array([[ 1. , 0. , 0. , -0.40546511, 0. ,
0. , -0.40546511],
[ 2. , 0. , 0. , -0.40546511, 0. ,
0. , -0.40546511],
[ 3. , 0. , 0.69314718, -0.40546511, 0. ,
0.69314718, -0.40546511],
[ 4. , 0. , 0.69314718, -0.40546511, 1.38629436,
0.69314718, -0.40546511],
[ 5. , 0. , 0.69314718, 0.69314718, 1.38629436,
0.69314718, 0.69314718],
[ 6. , 0. , 0.69314718, 0.69314718, 1.38629436,
0.69314718, 0.69314718]])
* fit with `koalas` & transform with `NumPy`
>>> import databricks.koalas as ks
>>> from gators.encoders import MultiClassEncoder
>>> from gators.encoders import WOEEncoder
>>> X = ks.DataFrame({
... 'A': ['Q', 'Q', 'Q', 'W', 'W', 'W'],
... 'B': ['Q', 'Q', 'W', 'W', 'W', 'W'],
... 'C': ['Q', 'Q', 'Q', 'Q', 'W', 'W'],
... 'D': [1, 2, 3, 4, 5, 6]})
>>> y = ks.Series([0, 0, 1, 2, 1, 2], name='TARGET')
>>> obj = MultiClassEncoder(WOEEncoder())
>>> _ = obj.fit(X, y)
>>> obj.transform_numpy(X.to_numpy())
array([[ 1. , 0. , 0. , -0.40546511, 0. ,
0. , -0.40546511],
[ 2. , 0. , 0. , -0.40546511, 0. ,
0. , -0.40546511],
[ 3. , 0. , 0.69314718, -0.40546511, 0. ,
0.69314718, -0.40546511],
[ 4. , 0. , 0.69314718, -0.40546511, 1.38629436,
0.69314718, -0.40546511],
[ 5. , 0. , 0.69314718, 0.69314718, 1.38629436,
0.69314718, 0.69314718],
[ 6. , 0. , 0.69314718, 0.69314718, 1.38629436,
0.69314718, 0.69314718]])
"""
def __init__(self, encoder: Transformer, dtype: type = np.float64):
if not isinstance(encoder, Transformer):
raise TypeError("`encoder` should be a transformer.")
_BaseEncoder.__init__(self, dtype=dtype)
self.encoder = encoder
self.drop_columns = None
self.label_names = []
self.encoder_dict = {}
self.columns = []
self.idx_columns = np.ndarray([])
self.column_names = []
self.column_mapping = {}
self.name = type(encoder).__name__
def fit(
self, X: Union[pd.DataFrame, ks.DataFrame], y: Union[pd.Series, ks.Series]
) -> "MultiClassEncoder":
"""Fit the transformer on the dataframe `X`.
Parameters
----------
X : Union[pd.DataFrame, ks.DataFrame].
Input dataframe.
y : Union[pd.Series, ks.Series], default to None.
Labels.
Returns
-------
MultiClassEncoder
Instance of itself.
"""
self.check_dataframe(X)
self.check_y(X, y)
self.check_multiclass_target(y)
self.columns = util.get_datatype_columns(X, object)
self.check_nans(X, self.columns)
self.drop_columns = DropColumns(self.columns).fit(X)
if not self.columns:
warnings.warn(
f"""`X` does not contain object columns:
`{self.__class__.__name__}` is not needed"""
)
return self
self.idx_columns = util.get_idx_columns(
columns=X.columns,
selected_columns=self.columns,
)
y_name = y.name
if isinstance(X, pd.DataFrame):
y_one_hot = pd.get_dummies(y, prefix=y_name)
else:
y_one_hot = ks.get_dummies(y, prefix=y_name)
y_one_hot = y_one_hot.drop(y_one_hot.columns[0], axis=1)
self.label_names = y_one_hot.columns
for label_name in self.label_names:
self.encoder_dict[label_name] = copy.copy(self.encoder)
self.encoder_dict[label_name].fit(X[self.columns], y_one_hot[label_name])
return self
def transform(
self, X: Union[pd.DataFrame, ks.DataFrame]
) -> Union[pd.DataFrame, ks.DataFrame]:
"""Transform the dataframe `X`.
Parameters
----------
X : Union[pd.DataFrame, ks.DataFrame].
Input dataframe.
Returns
-------
Union[pd.DataFrame, ks.DataFrame]
Transformed dataframe.
"""
self.check_dataframe(X)
if not self.columns:
self.idx_columns = np.array([])
return X
for i, label_name in enumerate(self.label_names):
dummy = self.encoder_dict[label_name].transform(X[self.columns].copy())[
self.encoder_dict[label_name].columns
]
column_names = [f"{col}__{label_name}_{self.name}" for col in dummy.columns]
dummy.columns = column_names
self.column_names.extend(column_names)
for name, col in zip(column_names, self.columns):
self.column_mapping[name] = col
X = X.join(dummy, how="inner").sort_index()
return self.drop_columns.transform(X).astype(self.dtype)
def transform_numpy(self, X: np.ndarray) -> np.ndarray:
"""Transform the NumPy array `X`.
Parameters
----------
X : np.ndarray
Input array.
Returns
-------
np.ndarray
Transformed array.
"""
self.check_array(X)
if not self.columns:
return X
X_encoded_list = []
for i, label_name in enumerate(self.label_names):
dummy = self.encoder_dict[label_name].transform_numpy(
X[:, self.idx_columns].copy()
)
X_encoded_list.append(dummy)
X_new = np.concatenate(
[self.drop_columns.transform_numpy(X)] + X_encoded_list, axis=1
)
return X_new.astype(self.dtype)
|
{"hexsha": "ae08cdb6383e5136214e7b9d3b9d9639b6e79ca7", "size": 10540, "ext": "py", "lang": "Python", "max_stars_repo_path": "gators/encoders/multiclass_encoder.py", "max_stars_repo_name": "Aditya-Kapadiya/gators", "max_stars_repo_head_hexsha": "d7c9967e3a8e304a601b6a92ad834d03d3e36338", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-10-29T18:20:52.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T22:53:03.000Z", "max_issues_repo_path": "gators/encoders/multiclass_encoder.py", "max_issues_repo_name": "Aditya-Kapadiya/gators", "max_issues_repo_head_hexsha": "d7c9967e3a8e304a601b6a92ad834d03d3e36338", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-01-19T12:16:19.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-19T12:16:19.000Z", "max_forks_repo_path": "gators/encoders/multiclass_encoder.py", "max_forks_repo_name": "Aditya-Kapadiya/gators", "max_forks_repo_head_hexsha": "d7c9967e3a8e304a601b6a92ad834d03d3e36338", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2021-11-17T20:16:54.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-21T18:21:02.000Z", "avg_line_length": 43.1967213115, "max_line_length": 154, "alphanum_fraction": 0.4760910816, "include": true, "reason": "import numpy", "num_tokens": 3080}
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : initialization.py
@Time : 2021/12/11 17:21:18
@Author : Lin Junwei
@Version : 1.0
@Desc : initialization class and function
'''
#%% import
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import scipy.io
import timeit
from sklearn.cluster import KMeans
import pickle
import os
from scipy.spatial import distance_matrix
from scipy.spatial.distance import cdist
import matplotlib.pyplot as plt
import logging
#%% load data function
def self_generate_cluster(n=100, sigma=1, c = [1,1]):
"""
Parameters
----------
n : size
sigma: variance
c : centroid
Returns
----------
c1 : np.array in shape (n, dimensions)
"""
c_tuple = tuple(c[i] + np.random.normal(0, sigma, n) for i in range(0,len(c)))
c1 = np.column_stack(c_tuple)
return c1
def self_dataset(n=[100],sigma=[1],c=[[1,1]]):
"""
Parameters
----------
n : size
sigma: variance
c : centroid
Returns
----------
allset.T : np.array in shape (total samples, features)
alllabel.T: np.array in shape (total samples, 1)
"""
for i in range(len(sigma)):
set_i = self_generate_cluster(n = n[i], sigma = sigma[i], c = c[i])
label_i = np.array([[i for j in range(0,n[i])]])
if i == 0:
allset = set_i
alllabel = label_i
else:
allset = np.concatenate((allset,set_i),axis=0)
alllabel = np.concatenate((alllabel,label_i),axis=1)
return allset, alllabel.T
def load_dataset(dataset = 'wine'):
"""
Parameters
----------
dataset : name of dataset
Returns
----------
data : array in shape: (samples, features)
label : array in shape: (samples, 1)
"""
data_path = 'datasets/datasets/{}/{}_data.mat'.format(dataset,dataset)
label_path = 'datasets/datasets/{}/{}_label.mat'.format(dataset,dataset)
data = scipy.io.loadmat(data_path)['A']
label = scipy.io.loadmat(label_path)['b']
if dataset != 'mnist':
data = data.toarray()
data = data.T
print(dataset,' - data shape: ',data.shape, '; label shape: ',label.shape)
return data, label
### pairwise difference/add matrix
def pairwise_coef(X, opera = '-'):
'''
To Compute the pairwise differences
By left dot X in shape (n, d) => ((n*(n-1)/2), d)
Parameter:
--------
X : Data, in shape (n,d)
opera : str, '-': difference, default
'+': cumsum
Return:
--------
np.array(row_ls) in shape [(n*(n-1)/2), n]
'''
if opera == '-':
xj_opera = -1
elif opera == '+':
xj_opera = 1
else:
xj_opera = -1
n, d = X.shape
row_ls = []
for i in range(n):
for j in range(i+1, n):
row = np.array([0 for i in range(n)])
row[i] = 1
row[j] = -1
row_ls.append(row)
return np.array(row_ls)
### Gradient huber matrix
def grad_hub_coef(X):
'''
To Compute huber gradient
By left dot X in shape (n, 1)
Return:
--------
np.array(row_ls) in shape [(n*(n-1)/2), n]
'''
n, d = X.shape
def loc_fun(row_num, n):
if row_num == 1:
return None, 0, n-1
else:
lower_tri = np.tril(np.ones((row_num-1, row_num-1)), k=-1)
n_seque = np.array([n-j for j in range(1, row_num)]).reshape((-1,1))
loc_coef = np.dot(lower_tri, n_seque)
# 生成0, n-1, (n-1)+(n-2),...序列
#生成-1在第i行的位子
loc_minus_1 = loc_coef - np.arange(row_num-1).reshape((-1,1)) + row_num - 2
#生成1在第i行的位子
loc_1_start = (2*n-row_num)*(row_num-1)/2
loc_1_end = loc_1_start + n - row_num
return loc_minus_1.astype(np.int16).reshape((-1,)).tolist(), int(loc_1_start), int(loc_1_end)
rows = []
for row_num in range(1, n+1):
loc_minus_1, loc_1_start, loc_1_end = loc_fun(row_num, n)
row = np.zeros((int(n*(n-1)/2),))
if loc_minus_1 is None:
pass
else:
row[loc_minus_1] = -1
row[loc_1_start:loc_1_end] = 1
rows.append(row)
return np.stack(rows)
### Get weights in q3
def get_weights(a, topnum = 5):
mat_dist = pd.DataFrame(distance_matrix(a, a))
mat_dist = np.square(mat_dist)
full_w_arr = np.exp(-0.5*mat_dist).to_numpy()
def top_k(x,k):
ind=np.argpartition(x,[i for i in range(k)])[:k]
return ind[np.argsort(x[ind])]
def weight_topk(weights_arr, top_num = topnum):
weight_df = pd.DataFrame(weights_arr)
return np.apply_along_axis(lambda x: top_k(x,top_num+1),0,weight_df.values)[1:]
def weight_mask(weights_arr, top_num=topnum):
loc = weight_topk(weights_arr, top_num)
n = len(weights_arr)
res = np.zeros((n, n))
for i in range(n):
res[loc[:,i], i] = weights_arr[loc[:,i], i]
return res
return weight_mask(full_w_arr, topnum)
### read log and pickle
def my_custom_logger(logger_name, level=logging.INFO):
"""
Method to return a custom logger with the given name and level
"""
logger = logging.getLogger(logger_name)
logger.setLevel(level)
format_string = ("%(asctime)s | %(levelname)s | %(message)s")
log_format = logging.Formatter(fmt=format_string, datefmt='%Y-%m-%d | %H:%M:%S')
# Creating and adding the console handler
# console_handler = logging.StreamHandler(sys.stdout)
# console_handler.setFormatter(log_format)
# logger.addHandler(console_handler)
# Creating and adding the file handler
file_handler = logging.FileHandler(logger_name, mode='a')
file_handler.setFormatter(log_format)
logger.addHandler(file_handler)
return logger
def pickle_write(data, filenm, folder='AGM1'):
with open('result/' + folder + '/' + filenm + ".pkl", "wb") as f:
pickle.dump(data, f)
def pickle_read(filenm, folder='AGM1'):
with open('result/' + folder + '/' + filenm + ".pkl", "rb") as f:
out = pickle.load(f)
return out
def log_read(logname = 'AGM'):
'''read log to dataframe'''
path = str(os.getcwd()) + '\\log\\' + logname + '.log'
with open(path) as f:
records = []
for line in f.readlines():
ls = line.split(' | ')
records.append(ls[-1].strip())
all_rec = []
for rec in records:
iter_rec = rec.split(',')
iter_rec = [rec.split(":")[-1] for rec in iter_rec]
all_rec.append(iter_rec)
col_name = [rc.split(":")[0] for rc in rec.split(',')]
df = pd.DataFrame(all_rec)
df.columns = col_name
return df
#%% Stepwise Strategy - armijo
def armijo(d, obj, s, sigma, gamma,config):
alpha = s
obj_2 = ObjFunc(X = obj.X+alpha*d
,a = obj.a
,mat_config = config
,delta = obj.delta
,lam = obj.lam
,if_use_weight = obj.if_use_weight)
while obj_2.obj_func() > obj.obj_func() + gamma * alpha * \
(np.dot((obj.grad_obj_func().reshape(-1, 1).T), d.reshape(-1, 1)))[0][0]:
alpha = alpha * sigma
obj_2 = ObjFunc(X = obj.X+alpha*d
,a = obj.a
,mat_config = config
,delta = obj.delta
,lam = obj.lam
,if_use_weight = obj.if_use_weight)
return alpha, obj_2
#%% objective function class
class ObjFunc():
'''Objective Function Class'''
def __init__(self, X, a, mat_config, delta = 1e-3, lam = 1, if_use_weight = False):
self.X = X
self.a = a
self.delta = delta
self.lam = lam
self.if_use_weight = if_use_weight
self.grad_coef = mat_config['gradient']
self.pair_coef = mat_config['pairwise']
if self.if_use_weight:
self.weights_mat = mat_config['weights']
else:
self.weights_mat = np.ones((a.shape[0],a.shape[0]))
def norm_sum_squ(self, a,x=0,squ=True):
"""
Faster way to cal sum of l2 norms
Parameters
----------
x : current centorid X => [n*d] (x could be a scalar)
a : list of points a => [n*d] (when a = 0, it will cal the self l2-norm)
squ: True - sum of squared norms (first item in obj func)
False - sum of simple norms
Returns
----------
res : sum of l2-norms' square
"""
d = x - a
if len(d.shape) == 1:
d = np.array([d])
if squ: # sum of squared L2 norm
res = np.sum(np.einsum('ij,ij->i',d,d))
elif squ == False: # sum of L2 norm
res = np.sum(np.sqrt(np.einsum('ij,ij->i',d,d)))
return res
def hub(self,xi,xj):
"""
Huber norm of centorids
Parameters
----------
xi : centorid i => [1*d]
xj : centorid j => [1*d]
delta : huber norm param, default 1e-3
"""
y_norm = self.norm_sum_squ(xi-xj,0,squ=False)
if y_norm <= self.delta:
return (1/(2*self.delta))*y_norm**2
elif y_norm > self.delta:
return y_norm - self.delta*0.5 #return scalar
def grad_hub(self, xi, xj=0):
'''
Gradient of huber norm
Parameters
----------
xi : Single row vector of X (centorid) => [1*d]
xj : Single row vector of X (centorid) => [1*d]
Returns
----------
res : Gradient => [1*d] here: (d,)
'''
y = xi - xj
y_norm = self.norm_sum_squ(y,0,squ=False)
# l2 norm correction =
if y_norm <= self.delta:
return y/self.delta
elif y_norm > self.delta:
return y/y_norm #return vector
def hess_hub(self, xi, xj=0):
'''
Hessian of huber norm
Parameters
----------
xi : Single row vector of X => [1*d]
xj : Single row vector of X => [1*d]
Returns
----------
res : Hessian matrix of Huber(xi - xj) => [d*d]
'''
y = xi - xj
y_norm = self.norm_sum_squ(y,0,squ=False)
if y_norm <= self.delta:
return np.eye(len(y))/self.delta
elif y_norm > self.delta:
return (y_norm**2*np.eye(len(y)) - np.dot(y,y.T)) / y_norm**3 #return matrix
def weight(self, i, j):
'''
Calculate the Weights in the 2nd term
Parameters
----------
i : index, int
j : index, int
k : k nearest neighbors, int
self.if_use_weight : True when using weights model, Boolean
Returns
----------
1 : When not using weights
weights : When using weights
'''
if self.if_use_weight:
return self.weights_mat[i,j]
else:
return 1
def hub_sum_pairwise(self):
'''
second item value of the obj function
Returns
----------
res : value
'''
ls = len(self.X)
res = 0
for i in range(0,ls):
for j in range(i+1,ls):
res += self.hub(self.X[i], self.X[j]) * self.weight(i, j)
return res
# def partial_grad_hub_sum(self,i):
# '''
# Partial gradient of every rows in the gradient vector
# Parameters
# ----------
# i : index of variables to be derived, int
# Returns
# ----------
# 1 - when not using weights
# weights - when using weights
# '''
# partial_grad = 0
# for j in range(0, len(self.X)):
# if j < i:
# partial_grad += -self.grad_hub(self.X[i], self.X[j]) * self.weight(i,j)
# elif j > i:
# partial_grad += self.grad_hub(self.X[i], self.X[j]) * self.weight(i,j)
# return partial_grad
# def grad_hub_sum_pairwise(self):
# '''
# Gradient of the 2nd item of the obj function (vector)
# Returns
# ----------
# Gradient: [n*d] (in the same shape with X)
# '''
# return np.array([self.partial_grad_hub_sum(i) for i in range(len(self.X))])
def grad_hub_matrix(self):
'''use matrix to calculate the gradient of the 2nd item'''
#### new ########################
xi_xj = np.dot(self.pair_coef, self.X)
weight_ij = self.weights_mat[np.triu_indices(self.X.shape[0], k = 1)].reshape((-1,1))
grad_xi_xj = np.apply_along_axis(self.grad_hub, 1, xi_xj) * weight_ij
### old #########################
# n, d = self.X.shape
# xi_xj = []
# weight_ij = []
# for i in range(n):
# for j in range(i+1,n):
# xi_xj.append(self.X[i]-self.X[j])
# weight_ij.append(self.weight(i,j))
# xi_xj = np.stack(xi_xj)
# weight_ij = np.array(weight_ij).reshape((-1,1))
# grad_xi_xj = np.apply_along_axis(self.grad_hub, 1, xi_xj) * weight_ij
# print(xi_xj)
# print(weight_ij)
# print(grad_xi_xj)
return np.dot(self.grad_coef, grad_xi_xj)
def partial_hess_hub_sum(self, i, j):
'''
Each element of the Hessian of the second item
Returns
----------
Partial Hessian: [d*d]
'''
if i == j:
diagonal_ele = 0
for k in range(0,len(self.X)):
if k < i:
diagonal_ele += -self.hess_hub(self.X[k], self.X[i]) * self.weight(i,j)
elif k > i:
diagonal_ele += self.hess_hub(self.X[i], self.X[k]) * self.weight(i,j)
return diagonal_ele
else:
small = max(i, j)
large = min(i, j)
return - self.hess_hub(self.X[small], self.X[large]) * self.weight(i,j)
# def triangular_hess_hub_sum(self):
# (n,d) = self.X.shape
# row_hess_list = []
# for i in range(0, n):
# row_hess = [np.zeros((d, (i+1)*d))]
# for j in range(i+1 , n):
# row_hess.append(self.partial_hess_hub_sum(i, j))
# row_hess_list.append(np.concatenate(row_hess,axis = 1))
# full_mat = np.concatenate(row_hess_list)
# return full_mat
# def hess_hub_sum_pairwise(self):
# '''Get the full Hessian Matrix'''
# diagnoal = []
# for i in range(0, len(self.X)):
# for j in range(i, len(self.X)):
# if i != j:
# pass
# else:
# diagnoal.append(self.partial_hess_hub_sum(i, j))
# diagnoal = np.concatenate(diagnoal)
# Hess_half = self.triangular_hess_hub_sum()
# return Hess_half.T + Hess_half + diagnoal
def hess_hub_pairwise(self):
'''hess matrix of xi-xj (i<j)'''
n, d = self.X.shape
''' hess's diagonals'''
# lower right layer matrix
mat1 = np.zeros((n,n*d))
for i in range(n):
mat1[i:, i*d:(i+1)*d] = self.X[i] #each column
mat1[i, (i+1)*d:] = np.tile(self.X[i], (n-i-1,)) #each row
# pper left layer matrix
mat2 = np.zeros((n,n*d))
for i in range(n):
mat2[:i+1, i*d:(i+1)*d] = self.X[i] #each column
mat2[i, :i*d] = np.tile(self.X[i], (i,)) #each row
# apply hess_func to every xi-xj in (mat1-mat)
hess_pairwise = np.apply_along_axis(self.hess_hub, 2, (mat1-mat2).reshape(n,n,d)).reshape(n*d, n*d)
return hess_pairwise
def hess_product_p(self, hess_pairwise, p):
'''Newton CG A*p_k'''
n, d = self.X.shape
p = p.reshape(n*d,1)
nd = n * d
# calculata the matrix whose diagonals are Hess's diagonals
hess_diagonals = np.diagonal(np.dot((np.ones((nd, nd))-np.eye(nd, nd)), hess_pairwise)) # (nd,1) arr
'''hess's other elements'''
# hess_other_ele = np.diag(np.diagonal(hess_pairwise)) - hess_pairwise # (nd,nd) arr
'''2nd item's hess * p'''
Ap = (hess_diagonals + np.diagonal(hess_pairwise)).reshape(-1,1) * p - np.dot(hess_pairwise, p)
return Ap + p
# == old =============================
# Ap = []
# for i in range(n): # each d rows of vector Hess*d
# hd_i = np.zeros((d,1))
# for k in range(n): # sum up to calculate each d rows
# hd_i += np.dot(self.partial_hess_hub_sum(i, k), p[k*d : (k+1)*d])
# Ap.append(hd_i)
# Ap = np.stack(Ap).reshape(-1,1)
# return Ap + p
def obj_func(self):
'''objective function'''
fx = 0.5*self.norm_sum_squ(a=self.a,x=self.X, squ=True) + self.lam*self.hub_sum_pairwise()
return fx
# def grad_obj_func0(self):
# '''gradient of the objective function'''
# grad_fx = (self.X-self.a) + self.lam*self.grad_hub_sum_pairwise()
# return grad_fx
def grad_obj_func(self):
'''gradient of the objective function'''
grad_fx = (self.X-self.a) + self.lam*self.grad_hub_matrix()
return grad_fx
# def hess_obj_func(self):
# '''Hessian of the objective function'''
# first_item_hess = np.eye(self.X.shape[0]*self.X.shape[1])
# second_item_hess = self.hess_hub_sum_pairwise()
# return first_item_hess + second_item_hess
#%% Test sample
if __name__ == "__main__":
# a1 = self_generate_cluster(n=5, sigma=1, c = [1,1])
# a2 = self_generate_cluster(n=5, sigma=2, c = [3,4])
# a = np.concatenate((a1,a2),axis=0)
#
# X = np.array([[0,0] for i in np.arange(200)])
# X = np.array([[0, 0], [4, 3], [2, 5], [6, 6]])
# a = np.array([[2, 2], [3, 3], [3, 3], [2, 2]])
# f = ObjFunc(X=X, a=a, delta=1e-3, lam=1, if_use_weight=True)
# fx = ObjFunc(X=X, a=a, delta=1e-3, lam=1)
# print('norm: ', fx.norm_sum_squ(a))
# print('huber:', fx.hub(X[1], X[2]))
# print('gradient huber: ', fx.grad_hub(X[1], X[2]))
# print('hessian huber : ', fx.hess_hub(X[1], X[2]))
# X = np.array([[0,0] for i in np.arange(200)])
# fx = ObjFunc(X = X, a = a, delta=1e-3, lam=1)
#%% test
X = np.array([[0,0],[1,2],[3,5],[4,3]])
a = np.array([[1,1],[1,1],[2,2],[2,2]])
grad_coef = grad_hub_coef(X)
weights = get_weights(a, 2)
pair_coef = pairwise_coef(X, opera = '-')
# f = ObjFunc(X = X, a = a, delta=1e-3, lam=1, if_use_weight=True)
# f.partial_grad_hub_sum(i=1,j=1)
# grad = f.grad_hub_sum_pairwise()
# upper = f.triangular_hess_hub_sum()
matrix_config = {
'gradient' : grad_coef,
'weights' : weights,
'pairwise' : pair_coef}
f = ObjFunc(X = X, a = a, mat_config=matrix_config,delta=1e-3, lam=1, if_use_weight=True)
f.grad_hub_matrix()
# %%
|
{"hexsha": "f182bc0cbfe158c8264fbfbc68bd1dce3e938b99", "size": 19604, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/initialization.py", "max_stars_repo_name": "Limjohns/OptFinal", "max_stars_repo_head_hexsha": "d2876865d888a6b4a2dc271cc04b3b97b8c0fbcc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-10T13:35:02.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-10T13:35:02.000Z", "max_issues_repo_path": "code/initialization.py", "max_issues_repo_name": "Limjohns/OptFinal", "max_issues_repo_head_hexsha": "d2876865d888a6b4a2dc271cc04b3b97b8c0fbcc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/initialization.py", "max_forks_repo_name": "Limjohns/OptFinal", "max_forks_repo_head_hexsha": "d2876865d888a6b4a2dc271cc04b3b97b8c0fbcc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.5358255452, "max_line_length": 111, "alphanum_fraction": 0.5146398694, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 5462}
|
# import numpy as np
import networkx as nx
import pandas as pd
from bokeh.models import BoxSelectTool
from bokeh.models import Circle
from bokeh.models import HoverTool
from bokeh.models import MultiLine
from bokeh.models import TapTool
from bokeh.models.graphs import EdgesAndLinkedNodes
from bokeh.models.graphs import NodesAndLinkedEdges
from bokeh.models.graphs import from_networkx
from bokeh.palettes import Spectral4
from bokeh.plotting import figure
from pathlib import Path
CURR_PATH = Path(__file__).parent
background = {
"name": "twistmap/tub-hft-3.og.png",
"origin_x": -6.59,
"origin_y": -2.97,
"x": 3862,
"y": 2243,
"resolution": 0.01,
}
nodes = pd.read_csv(CURR_PATH / "wifi_nodes.csv")
nodes["floor"] = nodes["room"].str[2].astype(int)
nodes = nodes[(nodes["floor"] == 3) & (nodes["status"] == "on")]
nodes = nodes[nodes["platform"] == "nuc"]
node_positions = nodes.set_index("node_id")[["x", "y"]].to_dict("index")
for key in node_positions:
pos = node_positions[key]
node_positions[key] = (pos["x"], pos["y"])
def create_map():
p = figure(
x_range=(-1, 32),
y_range=(-1, 16),
plot_width=320,
plot_height=160,
toolbar_location="above",
title="TWIST 3rd floor",
sizing_mode="scale_width",
# output_backend="webgl",
)
p.xaxis.axis_label = "X [m]"
p.yaxis.axis_label = "Y [m]"
p.image_url(
url=[background["name"]],
x=background["origin_x"],
y=background["origin_y"],
w=background["x"] * background["resolution"],
h=background["y"] * background["resolution"],
anchor="bottom_left",
)
return p
def draw_graph(plot, G):
plot.add_tools(HoverTool(tooltips=None), TapTool(), BoxSelectTool())
graph = from_networkx(
G, nx.spring_layout, pos=node_positions, fixed=node_positions.keys()
)
graph.node_renderer.glyph = Circle(size=15, fill_color=Spectral4[0])
graph.node_renderer.selection_glyph = Circle(size=15, fill_color=Spectral4[2])
graph.node_renderer.hover_glyph = Circle(size=15, fill_color=Spectral4[1])
graph.edge_renderer.glyph = MultiLine(
line_color="#CCCCCC", line_alpha=0.8, line_width=5
)
graph.edge_renderer.selection_glyph = MultiLine(
line_color=Spectral4[2], line_width=5
)
graph.edge_renderer.hover_glyph = MultiLine(line_color=Spectral4[1], line_width=5)
graph.selection_policy = NodesAndLinkedEdges()
graph.inspection_policy = EdgesAndLinkedNodes()
plot.renderers.append(graph)
return plot
|
{"hexsha": "b4f51af5181f2793a02a1facffe398966db2e411", "size": 2588, "ext": "py", "lang": "Python", "max_stars_repo_path": "analysis/twistmap/__init__.py", "max_stars_repo_name": "mchwalisz/walker", "max_stars_repo_head_hexsha": "8447352ba23324e7f1ad564d626efad9760e3570", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2018-02-19T20:48:35.000Z", "max_stars_repo_stars_event_max_datetime": "2020-03-23T13:28:34.000Z", "max_issues_repo_path": "analysis/twistmap/__init__.py", "max_issues_repo_name": "mchwalisz/walker", "max_issues_repo_head_hexsha": "8447352ba23324e7f1ad564d626efad9760e3570", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 22, "max_issues_repo_issues_event_min_datetime": "2018-07-24T20:17:31.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-06T11:41:11.000Z", "max_forks_repo_path": "analysis/twistmap/__init__.py", "max_forks_repo_name": "mchwalisz/walker", "max_forks_repo_head_hexsha": "8447352ba23324e7f1ad564d626efad9760e3570", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-05-23T06:56:42.000Z", "max_forks_repo_forks_event_max_datetime": "2019-05-23T06:56:42.000Z", "avg_line_length": 29.4090909091, "max_line_length": 86, "alphanum_fraction": 0.6777434312, "include": true, "reason": "import numpy,import networkx", "num_tokens": 692}
|
import sys, os
import numpy as np
import scipy
import torch
import torch.nn as nn
from scipy import ndimage
from tqdm import tqdm, trange
from PIL import Image
import torch.hub
import torchvision
import torch.nn.functional as F
# download deeplabv2_resnet101_msc-cocostuff164k-100000.pth from
# https://github.com/kazuto1011/deeplab-pytorch/releases/download/v1.0/deeplabv2_resnet101_msc-cocostuff164k-100000.pth
# and put the path here
CKPT_PATH = "deeplabv2_resnet101_msc-cocostuff164k-100000.pth"
rescale = lambda x: (x + 1.) / 2.
def rescale_bgr(x):
x = (x+1)*127.5
x = torch.flip(x, dims=[0])
return x
class COCOStuffSegmenter(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.n_labels = 182
model = torch.hub.load("kazuto1011/deeplab-pytorch", "deeplabv2_resnet101", n_classes=self.n_labels)
ckpt_path = CKPT_PATH
model.load_state_dict(torch.load(ckpt_path))
self.model = model
normalize = torchvision.transforms.Normalize(mean=self.mean, std=self.std)
self.image_transform = torchvision.transforms.Compose([
torchvision.transforms.Lambda(lambda image: torch.stack(
[normalize(rescale_bgr(x)) for x in image]))
])
def forward(self, x, upsample=None):
x = self._pre_process(x)
x = self.model(x)
if upsample is not None:
x = torch.nn.functional.upsample_bilinear(x, size=upsample)
return x
def _pre_process(self, x):
x = self.image_transform(x)
return x
@property
def mean(self):
# bgr
return [104.008, 116.669, 122.675]
@property
def std(self):
return [1.0, 1.0, 1.0]
@property
def input_size(self):
return [3, 224, 224]
def run_model(img, model):
model = model.eval()
with torch.no_grad():
segmentation = model(img, upsample=(img.shape[2], img.shape[3]))
segmentation = torch.argmax(segmentation, dim=1, keepdim=True)
return segmentation.detach().cpu()
def get_input(batch, k):
x = batch[k]
if len(x.shape) == 3:
x = x[..., None]
x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format)
return x.float()
def save_segmentation(segmentation, path):
# --> class label to uint8, save as png
os.makedirs(os.path.dirname(path), exist_ok=True)
assert len(segmentation.shape)==4
assert segmentation.shape[0]==1
for seg in segmentation:
seg = seg.permute(1,2,0).numpy().squeeze().astype(np.uint8)
seg = Image.fromarray(seg)
seg.save(path)
def iterate_dataset(dataloader, destpath, model):
os.makedirs(destpath, exist_ok=True)
num_processed = 0
for i, batch in tqdm(enumerate(dataloader), desc="Data"):
try:
img = get_input(batch, "image")
img = img.cuda()
seg = run_model(img, model)
path = batch["relative_file_path_"][0]
path = os.path.splitext(path)[0]
path = os.path.join(destpath, path + ".png")
save_segmentation(seg, path)
num_processed += 1
except Exception as e:
print(e)
print("but anyhow..")
print("Processed {} files. Bye.".format(num_processed))
from taming.data.sflckr import Examples
from torch.utils.data import DataLoader
if __name__ == "__main__":
dest = sys.argv[1]
batchsize = 1
print("Running with batch-size {}, saving to {}...".format(batchsize, dest))
model = COCOStuffSegmenter({}).cuda()
print("Instantiated model.")
dataset = Examples()
dloader = DataLoader(dataset, batch_size=batchsize)
iterate_dataset(dataloader=dloader, destpath=dest, model=model)
print("done.")
|
{"hexsha": "c450f5452a5d7cd137acad93aab40b7353d1819f", "size": 3797, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/extract_segmentation.py", "max_stars_repo_name": "B1boid/taming-transformers", "max_stars_repo_head_hexsha": "5638360d3de989547ae1b3f04d494187ae08b8ba", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-06-07T22:53:32.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-07T22:53:32.000Z", "max_issues_repo_path": "scripts/extract_segmentation.py", "max_issues_repo_name": "ink1/taming-transformers", "max_issues_repo_head_hexsha": "3b3275d4a911d937abd54880bbfc7b09816e738f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/extract_segmentation.py", "max_forks_repo_name": "ink1/taming-transformers", "max_forks_repo_head_hexsha": "3b3275d4a911d937abd54880bbfc7b09816e738f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.9847328244, "max_line_length": 119, "alphanum_fraction": 0.6420858573, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 999}
|
program main
integer main_out
main_out = main1()
print *, "main1 called"
contains
integer function main1()
integer :: i = 10
if (i .GT. 5) then
main1 = i
print *, "early return"
return
end if
print *, "normal return"
main1 = i
end function main1
end program
|
{"hexsha": "e0e0e335de168092c4cad6ea0b8316b2f0f98fd8", "size": 364, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "integration_tests/return_01.f90", "max_stars_repo_name": "Thirumalai-Shaktivel/lfortran", "max_stars_repo_head_hexsha": "bb39faf1094b028351d5aefe27d64ee69302300a", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 316, "max_stars_repo_stars_event_min_datetime": "2019-03-24T16:23:41.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T07:28:33.000Z", "max_issues_repo_path": "integration_tests/return_01.f90", "max_issues_repo_name": "Thirumalai-Shaktivel/lfortran", "max_issues_repo_head_hexsha": "bb39faf1094b028351d5aefe27d64ee69302300a", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-07-29T04:58:03.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-04T16:40:06.000Z", "max_forks_repo_path": "integration_tests/return_01.f90", "max_forks_repo_name": "Thirumalai-Shaktivel/lfortran", "max_forks_repo_head_hexsha": "bb39faf1094b028351d5aefe27d64ee69302300a", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 26, "max_forks_repo_forks_event_min_datetime": "2019-03-28T19:40:07.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T07:28:55.000Z", "avg_line_length": 21.4117647059, "max_line_length": 35, "alphanum_fraction": 0.5082417582, "num_tokens": 95}
|
import os
os.chdir('osmFISH_AllenSSp/')
from scvi.dataset import CsvDataset
from scvi.models import JVAE, Classifier
from scvi.inference import JVAETrainer
import numpy as np
import pandas as pd
import copy
import torch
import time as tm
### osmFISH data
osmFISH_data = CsvDataset('data/gimVI_data/osmFISH_Cortex_scvi.csv', save_path = "", sep = ",", gene_by_cell = False)
### RNA
RNA_data = CsvDataset('data/gimVI_data/Allen_SSp_data_scvi.csv', save_path = "", sep = ",", gene_by_cell = False)
### Leave-one-out validation
Common_data = copy.deepcopy(RNA_data)
Common_data.gene_names = osmFISH_data.gene_names
Common_data.X = Common_data.X[:,np.reshape(np.vstack(np.argwhere(i==RNA_data.gene_names) for i in osmFISH_data.gene_names),-1)]
Gene_set = np.intersect1d(osmFISH_data.gene_names,Common_data.gene_names)
Imp_Genes = pd.DataFrame(columns=Gene_set)
gimVI_time = []
for i in Gene_set:
print(i)
# Create copy of the fish dataset with hidden genes
data_spatial_partial = copy.deepcopy(osmFISH_data)
data_spatial_partial.filter_genes_by_attribute(np.setdiff1d(osmFISH_data.gene_names,i))
data_spatial_partial.batch_indices += Common_data.n_batches
if(data_spatial_partial.X.shape[0] != osmFISH_data.X.shape[0]):
continue
datasets = [Common_data, data_spatial_partial]
generative_distributions = ["zinb", "nb"]
gene_mappings = [slice(None), np.reshape(np.vstack(np.argwhere(i==Common_data.gene_names) for i in data_spatial_partial.gene_names),-1)]
n_inputs = [d.nb_genes for d in datasets]
total_genes = Common_data.nb_genes
n_batches = sum([d.n_batches for d in datasets])
model_library_size = [True, False]
n_latent = 8
kappa = 5
start = tm.time()
torch.manual_seed(0)
model = JVAE(
n_inputs,
total_genes,
gene_mappings,
generative_distributions,
model_library_size,
n_layers_decoder_individual=0,
n_layers_decoder_shared=0,
n_layers_encoder_individual=1,
n_layers_encoder_shared=1,
dim_hidden_encoder=64,
dim_hidden_decoder_shared=64,
dropout_rate_encoder=0.2,
dropout_rate_decoder=0.2,
n_batch=n_batches,
n_latent=n_latent,
)
discriminator = Classifier(n_latent, 32, 2, 3, logits=True)
trainer = JVAETrainer(model, discriminator, datasets, 0.95, frequency=1, kappa=kappa)
trainer.train(n_epochs=200)
_,Imputed = trainer.get_imputed_values(normalized=True)
Imputed = np.reshape(Imputed[:,np.argwhere(i==Common_data.gene_names)[0]],-1)
Imp_Genes[i] = Imputed
gimVI_time.append(tm.time()-start)
Imp_Genes = Imp_Genes.fillna(0)
Imp_Genes.to_csv('Results/gimVI_LeaveOneOut.csv')
gimVI_time = pd.DataFrame(gimVI_time)
gimVI_time.to_csv('Results/gimVI_Time.csv', index = False)
### New genes
Imp_New_Genes = pd.DataFrame(columns=["TESC","PVRL3","GRM2"])
# Create copy of the fish dataset with hidden genes
data_spatial_partial = copy.deepcopy(osmFISH_data)
data_spatial_partial.filter_genes_by_attribute(osmFISH_data.gene_names)
data_spatial_partial.batch_indices += RNA_data.n_batches
datasets = [RNA_data, data_spatial_partial]
generative_distributions = ["zinb", "nb"]
gene_mappings = [slice(None), np.reshape(np.vstack(np.argwhere(i==RNA_data.gene_names) for i in data_spatial_partial.gene_names),-1)]
n_inputs = [d.nb_genes for d in datasets]
total_genes = RNA_data.nb_genes
n_batches = sum([d.n_batches for d in datasets])
model_library_size = [True, False]
n_latent = 8
kappa = 5
torch.manual_seed(0)
model = JVAE(
n_inputs,
total_genes,
gene_mappings,
generative_distributions,
model_library_size,
n_layers_decoder_individual=0,
n_layers_decoder_shared=0,
n_layers_encoder_individual=1,
n_layers_encoder_shared=1,
dim_hidden_encoder=64,
dim_hidden_decoder_shared=64,
dropout_rate_encoder=0.2,
dropout_rate_decoder=0.2,
n_batch=n_batches,
n_latent=n_latent,
)
discriminator = Classifier(n_latent, 32, 2, 3, logits=True)
trainer = JVAETrainer(model, discriminator, datasets, 0.95, frequency=1, kappa=kappa)
trainer.train(n_epochs=200)
for i in ["TESC","PVRL3","GRM2"]:
_,Imputed = trainer.get_imputed_values(normalized=True)
Imputed = np.reshape(Imputed[:,np.argwhere(i==RNA_data.gene_names)[0]],-1)
Imp_New_Genes[i] = Imputed
Imp_New_Genes.to_csv('Results/gimVI_New_genes.csv')
|
{"hexsha": "6732ae3e542b474679697d3a28c2b831a99ef0c3", "size": 4587, "ext": "py", "lang": "Python", "max_stars_repo_path": "benchmark/osmFISH_AllenSSp/gimVI/gimVI.py", "max_stars_repo_name": "tabdelaal/SpaGE", "max_stars_repo_head_hexsha": "7533cbf2275c3049561e8a17b9f7866e0e324743", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2020-11-15T05:56:48.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T22:59:58.000Z", "max_issues_repo_path": "benchmark/osmFISH_AllenSSp/gimVI/gimVI.py", "max_issues_repo_name": "tabdelaal/SpaGE", "max_issues_repo_head_hexsha": "7533cbf2275c3049561e8a17b9f7866e0e324743", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "benchmark/osmFISH_AllenSSp/gimVI/gimVI.py", "max_forks_repo_name": "tabdelaal/SpaGE", "max_forks_repo_head_hexsha": "7533cbf2275c3049561e8a17b9f7866e0e324743", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-05-21T09:45:32.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-21T09:45:32.000Z", "avg_line_length": 34.4887218045, "max_line_length": 141, "alphanum_fraction": 0.711794201, "include": true, "reason": "import numpy", "num_tokens": 1222}
|
# from https://github.com/SpaceNetChallenge/SpaceNet_Off_Nadir_Solutions/blob/master/selim_sef
import os
import torch
from torch import nn
from torch.utils import model_zoo
from src.models.resnet import resnet34
encoder_params = {
'resnet34':
{
'filters': [64, 64, 128, 256, 512],
'decoder_filters': [64, 128, 256, 256],
'last_upsample': 64,
'init_op': resnet34,
'url': None,
}
}
class AbstractModel(nn.Module):
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
m.weight.data = nn.init.kaiming_normal_(m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def initialize_encoder(self, model, model_url, num_channels_changed=False):
if os.path.isfile(model_url):
pretrained_dict = torch.load(model_url)
else:
pretrained_dict = model_zoo.load_url(model_url)
if 'state_dict' in pretrained_dict:
pretrained_dict = pretrained_dict['state_dict']
pretrained_dict = {k.replace('module.', ''): v for k, v in pretrained_dict.items()}
model_dict = model.state_dict()
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
if num_channels_changed:
model.state_dict()[self.first_layer_params_name +
'.weight'][:, :3, ...] = pretrained_dict[self.first_layer_params_name + '.weight'].data
skip_layers = [
self.first_layer_params_name,
self.first_layer_params_name + '.weight',
]
pretrained_dict = {
k: v
for k, v in pretrained_dict.items()
if not any(k.startswith(s) for s in skip_layers)
}
model.load_state_dict(pretrained_dict, strict=False)
@property
def first_layer_params_name(self):
return 'conv1'
class EncoderDecoder(AbstractModel):
def __init__(self, num_classes, num_channels=3, encoder_name='resnet34'):
if not hasattr(self, 'first_layer_stride_two'):
self.first_layer_stride_two = False
if not hasattr(self, 'decoder_block'):
self.decoder_block = UnetDecoderBlock
if not hasattr(self, 'bottleneck_type'):
self.bottleneck_type = ConvBottleneck
self.filters = encoder_params[encoder_name]['filters']
self.decoder_filters = encoder_params[encoder_name].get('decoder_filters', self.filters[:-1])
self.last_upsample_filters = encoder_params[encoder_name].get('last_upsample', self.decoder_filters[0] // 2)
super().__init__()
self.num_channels = num_channels
self.num_classes = num_classes
self.bottlenecks = nn.ModuleList(
[
self.bottleneck_type(self.filters[-i - 2] + f, f)
for i, f in enumerate(reversed(self.decoder_filters[:]))
]
)
self.decoder_stages = nn.ModuleList([self.get_decoder(idx) for idx in range(0, len(self.decoder_filters))])
if self.first_layer_stride_two:
self.last_upsample = self.decoder_block(
self.decoder_filters[0],
self.last_upsample_filters,
self.last_upsample_filters,
)
self.final = self.make_final_classifier(
self.last_upsample_filters if self.first_layer_stride_two else self.decoder_filters[0],
num_classes,
)
self._initialize_weights()
encoder = encoder_params[encoder_name]['init_op'](pretrained=True)
self.encoder_stages = nn.ModuleList([self.get_encoder(encoder, idx) for idx in range(len(self.filters))])
if encoder_params[encoder_name]['url'] is not None:
self.initialize_encoder(encoder, encoder_params[encoder_name]['url'], num_channels != 3)
# noinspection PyCallingNonCallable
def forward(self, x):
# x, angles = x
enc_results = []
for stage in self.encoder_stages:
x = stage(x)
enc_results.append(torch.cat(x, dim=1) if isinstance(x, tuple) else x.clone())
last_dec_out = enc_results[-1]
# size = last_dec_out.size(2)
# last_dec_out = torch.cat([last_dec_out, F.upsample(angles, size=(size, size), mode="nearest")], dim=1)
x = last_dec_out
for idx, bottleneck in enumerate(self.bottlenecks):
rev_idx = -(idx + 1)
x = self.decoder_stages[rev_idx](x)
x = bottleneck(x, enc_results[rev_idx - 1])
if self.first_layer_stride_two:
x = self.last_upsample(x)
f = self.final(x)
return f
def get_decoder(self, layer):
in_channels = (
self.filters[layer + 1] if layer + 1 == len(self.decoder_filters) else self.decoder_filters[layer + 1]
)
return self.decoder_block(
in_channels,
self.decoder_filters[layer],
self.decoder_filters[max(layer, 0)],
)
def make_final_classifier(self, in_filters, num_classes):
return nn.Sequential(nn.Conv2d(in_filters, num_classes, 1, padding=0))
def get_encoder(self, encoder, layer):
raise NotImplementedError
@property
def first_layer_params(self):
return _get_layers_params([self.encoder_stages[0]])
@property
def layers_except_first_params(self):
layers = get_slice(self.encoder_stages, 1, -1) + [
self.bottlenecks,
self.decoder_stages,
self.final,
]
return _get_layers_params(layers)
def _get_layers_params(layers):
return sum((list(l.parameters()) for l in layers), [])
def get_slice(features, start, end):
if end == -1:
end = len(features)
return [features[i] for i in range(start, end)]
class ConvBottleneck(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.seq = nn.Sequential(nn.Conv2d(in_channels, out_channels, 3, padding=1), nn.ReLU(inplace=True))
def forward(self, dec, enc):
x = torch.cat([dec, enc], dim=1)
return self.seq(x)
class UnetDecoderBlock(nn.Module):
def __init__(self, in_channels, middle_channels, out_channels):
super().__init__()
self.layer = nn.Sequential(
nn.Upsample(scale_factor=2),
nn.Conv2d(in_channels, out_channels, 3, padding=1),
nn.ReLU(inplace=True),
)
def forward(self, x):
return self.layer(x)
class Resnet(EncoderDecoder):
def __init__(self, seg_classes, backbone_arch):
self.first_layer_stride_two = True
super().__init__(seg_classes, 4, backbone_arch)
def get_encoder(self, encoder, layer):
if layer == 0:
return nn.Sequential(encoder.conv1, encoder.bn1, encoder.relu)
elif layer == 1:
return nn.Sequential(encoder.maxpool, encoder.layer1)
elif layer == 2:
return encoder.layer2
elif layer == 3:
return encoder.layer3
elif layer == 4:
return encoder.layer4
class ResnetSuperVision(Resnet):
def __init__(self, seg_classes, backbone_arch):
super().__init__(seg_classes, backbone_arch=backbone_arch)
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Linear(512, 1)
def forward(self, x):
enc_results = []
for stage in self.encoder_stages:
x = stage(x)
enc_results.append(torch.cat(x, dim=1) if isinstance(x, tuple) else x.clone())
last_dec_out = enc_results[-1]
x = last_dec_out
x_cls = self.avgpool(x)
x_cls = x_cls.view(x_cls.size(0), -1)
x_cls = self.fc(x_cls).view(x_cls.size(0))
for idx, bottleneck in enumerate(self.bottlenecks):
rev_idx = -(idx + 1)
x = self.decoder_stages[rev_idx](x)
x = bottleneck(x, enc_results[rev_idx - 1])
if self.first_layer_stride_two:
x = self.last_upsample(x)
f = self.final(x)
return f, x_cls
if __name__ == '__main__':
d = ResnetSuperVision(1, backbone_arch='resnet34')
d.eval()
import numpy as np
with torch.no_grad():
images = torch.from_numpy(np.zeros((4, 3, 256, 256), dtype='float32'))
p1, p2 = d(images)
print(p1.shape)
print(p2.shape)
print(d)
|
{"hexsha": "b80fc8da68bb63da82b223ad7108d1c5a7e04321", "size": 8671, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/models/unet.py", "max_stars_repo_name": "kevinkwshin/kaggle-pneumothorax", "max_stars_repo_head_hexsha": "24b91a9425097023f0cc7781a9380cb247babe22", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 74, "max_stars_repo_stars_event_min_datetime": "2019-09-13T11:29:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-25T09:49:14.000Z", "max_issues_repo_path": "src/models/unet.py", "max_issues_repo_name": "amirassov/kaggle-pneumothorax", "max_issues_repo_head_hexsha": "24b91a9425097023f0cc7781a9380cb247babe22", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/models/unet.py", "max_forks_repo_name": "amirassov/kaggle-pneumothorax", "max_forks_repo_head_hexsha": "24b91a9425097023f0cc7781a9380cb247babe22", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 13, "max_forks_repo_forks_event_min_datetime": "2019-09-16T07:47:29.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-08T20:15:08.000Z", "avg_line_length": 33.7392996109, "max_line_length": 118, "alphanum_fraction": 0.6112328451, "include": true, "reason": "import numpy", "num_tokens": 2000}
|
import numpy as np
from loginit import get_module_logger
from sklearn.decomposition import PCA, KernelPCA
from sklearn.model_selection import GridSearchCV, StratifiedShuffleSplit, StratifiedKFold
from sklearn.model_selection import cross_validate
from sklearn.kernel_ridge import KernelRidge
from sklearn.metrics import confusion_matrix, roc_auc_score
import re
import sys
import os
import time
import argparse
import random
import itertools
import cmocean
import warnings
import sklearn.exceptions
#warnings.filterwarnings("ignore", category=sklearn.exceptions.UndefinedMetricWarning)
import matplotlib as mpl
import matplotlib.pyplot as plt
from sklearn.base import clone
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import classification_report
from sklearn.metrics import f1_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import ParameterGrid
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from collections import defaultdict
# for multiple kernel learning
import mklaren
import align
from mklaren.mkl.align import Align
from mklaren.mkl.alignf import Alignf
PARENT_PATH = r'F:\Research\ScaleVariant'
EXP_NAME = 'exp_20190516'
MUTAG = 'MUTAG'
PFK_KERNEL = 4
FET_SCALE = 'scale-variant'
FET_COMMON = 'common'
FET_GRAPHLET = 'Graphlet'
FET_KSTEP = 'KStepRandomWalk'
FET_GEOM = 'GeometricRandomWalk'
FET_EXP = 'ExponentialRandomWalk'
FET_SHORTEST = 'ShortestPath'
FET_WL = 'WL'
FET_AVG_SCALE = 'scale_avg'
FET_AVG_SCALE_NORM = 'scale_norm_avg'
ker_methods = [FET_SCALE,FET_GRAPHLET, FET_KSTEP, FET_GEOM, FET_EXP, FET_SHORTEST, FET_WL, FET_COMMON, FET_AVG_SCALE, FET_AVG_SCALE_NORM]
ker_pal = {
FET_GRAPHLET: '4',
FET_KSTEP : '2',
FET_GEOM : '0.05',
FET_EXP : '0.1',
FET_SHORTEST : 'NA',
FET_WL : '5'
}
def get_exp_path(parent_path, data_name, exp_name):
exp_path = r'{}/{}/{}'.format(parent_path, exp_name, data_name)
return exp_path
class KernelParams:
def __init__(self, method, T1, T2, thres, infval, t, Tmax):
self.method = method
self.T1 = T1
self.T2 = T2
self.thres = thres
self.infval = infval
self.t = t # for Fisher Persistence kernel
self.Tmax = Tmax
def normalize_kernel(kermat):
#kermat[kermat <= 0] = 0
sm = np.sum(kermat.diagonal() <= 0)
if sm > 0:
return None
D = np.diag(1.0/np.sqrt(np.diag(kermat)))
kermat = np.dot(np.dot(D, kermat), D)
print('Normalized matrix')
return kermat
def get_kernel_path(folder, mt, exp_path, ph_name, dim, kprm):
method, T1, T2, thres, infval, Tmax = kprm.method, kprm.T1, kprm.T2, kprm.thres, kprm.infval, kprm.Tmax
if mt == FET_SCALE:
kerpath = os.path.join(exp_path, '{}/{}_d_{}_method_{}_T1_{}_T2_{}_tmax_{}_thres_{}_inf_{}.txt'.format(folder, ph_name, \
dim, method, T1, T2, Tmax, thres, infval))
elif mt == FET_AVG_SCALE or mt == FET_AVG_SCALE_NORM:
kerpath = os.path.join(exp_path, '{}/{}_method_{}_d_{}.txt'.format(folder, mt, method, dim))
else:
kerpath = 'DumpppppppNotFound'
if not os.path.isfile(kerpath):
print('Not found {}'.format(kerpath))
return kerpath
def load_graph_kernel(folder, exp_path, method, par, normalize):
kerfile = os.path.join(exp_path, '{}/graph_kernel_{}_par_{}.txt'.format(folder, method, par))
if method == FET_GRAPHLET and os.path.isfile(kerfile) == False:
kerfile = os.path.join(exp_path, '{}/graph_kernel_{}_par_3.txt'.format(folder, method))
if os.path.isfile(kerfile) == False:
print('Not found graph kernel', method)
return None
kermat = np.loadtxt(kerfile)
if kermat is None:
return None
if normalize > 0:
kermat = normalize_kernel(kermat)
return kermat
def load_kernel(folder, mt, exp_path, ph_name, dp, dim, kprm, normalize):
if dp >= 0 and dp != dim:
print ('Specified dim not match dp={}, dim={}'.format(dp, dim))
return
kerfile = get_kernel_path(folder, mt, exp_path, ph_name, dim, kprm)
if os.path.isfile(kerfile) == False:
return None
kermat = np.loadtxt(kerfile, dtype=np.float32)
if kermat is None:
return None
print('Dim=', dim, 'Kermat shape', kermat.shape)
for i in range(kermat.shape[0]):
if kermat[i, i] == 0:
print('Kernel ill defined i =', i)
kermat[i, i] = 1
if kprm.method == PFK_KERNEL:
kermat = kermat / float(-kprm.t)
kermat = np.exp(kermat)
if normalize > 0:
kermat = normalize_kernel(kermat)
return kermat
def load_data(filename):
"""
Read from setting file
"""
lbs = []
datls = []
with open(filename, 'r') as rf:
lines = rf.readlines()
for line in lines:
lb = int(re.search('lb_([0-9]+)_', line).groups()[0])
lbs.append(lb)
datls.append(len(lbs)-1)
rf.close()
X_index = np.array(datls).astype(np.int32)
y = np.array(lbs).astype(np.int32)
return X_index, y
def svc_classify(X, y, train_index, test_index, mt):
if mt == 'common':
X_train, X_test = X[np.ix_(train_index)], X[np.ix_(test_index)]
else:
X_train, X_test = X[np.ix_(train_index, train_index)], X[np.ix_(test_index, train_index)]
y_train, y_test = y[np.ix_(train_index)], y[np.ix_(test_index)]
#C_grid = [1e-2, 2e-2, 5e-2, 1e-1, 2e-1, 5e-1, 1e0, 2e0, 5e0, 1e1, 2e1, 5e1, 1e2]
C_grid = [1e-2, 1e-1, 1e0, 1e1, 1e2]
#C_grid = (10. ** np.arange(1,10,1) / len(y_train)).tolist()
if mt == 'common':
best_clf = GridSearchCV(SVC(), cv=5, param_grid={'kernel':('linear', 'rbf'), 'C': C_grid})
else:
best_clf = GridSearchCV(SVC(kernel='precomputed'), cv=5, param_grid={"C": C_grid})
best_clf.fit(X_train, y_train)
train_sc = best_clf.score(X_train, y_train)
test_sc = best_clf.score(X_test, y_test)
# Compute confusion matrix
# y_pred = best_clf.predict(X_test)
# cnf_matrix = confusion_matrix(y_test, y_pred)
return train_sc, test_sc
def add_kernel(K1, K2):
if K1 is None:
return K2
if K2 is None:
return K1
return (K1 + K2)
def multiply_kernel(K1, K2):
if K1 is None:
return K2
if K2 is None:
return K1
return np.multiply(K1, K2)
def combine_kernel(K1, K2, alpha):
if K1 is None:
return K2
if K2 is None:
return K1
kX = alpha*K1 + (1.0-alpha)*K2
return kX
def multiple_kernel_learning(y, train_index, K1, K2, label="normal"):
if K1 is None:
return K2
if K2 is None:
return K1
K1_train = K1[np.ix_(train_index, train_index)]
K2_train = K2[np.ix_(train_index, train_index)]
y_train = y[np.ix_(train_index)]
model = Alignf(typ="convex")
#model = Align()
model.fit([K1_train, K2_train], y_train)
mu = model.mu
#combined_kernel = lambda x, y: \
# mu[0] * K1(x, y) + mu[1] * K2(x, y)
print(label, mu)
#combine_kernel = mu[0] * centered_kernel(K1) + mu[1] * centered_kernel(K2)
combine_kernel = mu[0] * K1 + mu[1] * K2
return combine_kernel
np.set_printoptions(precision=5)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--dataname', '-d', type=str, default=MUTAG)
parser.add_argument('--log', type=str, default='log')
parser.add_argument('--parentpath', type=str, default=PARENT_PATH)
parser.add_argument('--expname', type=str, default=EXP_NAME)
parser.add_argument('--phname', type=str, default='ph_20180628_norm_0')
parser.add_argument('--method', '-me', type=int, default=0)
parser.add_argument('--T1', type=float, default=0.0)
parser.add_argument('--T2', type=float, default=1.0)
parser.add_argument('--Tmax', type=float, default=0.0)
parser.add_argument('--combine', '-cb', type=int, default=0) # 0: add, 1: multiply, other: multiple learning
parser.add_argument('--thres0', type=float, default=0.0)
parser.add_argument('--thres1', type=float, default=0.0)
parser.add_argument('--weight', '-w', type=float, default=0.5)
parser.add_argument('--time', type=float, default=1.0)
parser.add_argument('--infval', type=float, default=0.0)
parser.add_argument('--norm', type=int, default=1)
parser.add_argument('--nums', '-nu', type=int, default=1)
parser.add_argument('--sp', type=int, default=0) # specific index of kernel method, -1 for test all methods
parser.add_argument('--dp', type=int, default=-1) # specific dim for calculating kernel, -1 for use all dimensions
args = parser.parse_args()
parent_path, data_name, exp_name, ph_name = args.parentpath, args.dataname, args.expname, args.phname
dp, sp, norm, infval, weight = args.dp, args.sp, args.norm, args.infval, args.weight
Tmax, cb = args.Tmax, args.combine
dir_path = os.path.dirname(os.path.realpath(__file__))
log_path = os.path.join(dir_path, args.log)
exp_path = get_exp_path(parent_path, data_name, exp_name)
if os.path.exists(log_path) == False:
os.makedirs(log_path)
log_filename = '{}_{}_{}_T1_{}_T2_{}_Tmax_{}_thres0_{}_thres1_{}_nums_{}_method_{}_norm_{}_infval_{}_t_{}_sp_{}_dp_{}_cb_{}.log'.format(
data_name, exp_name, ph_name,
args.T1, args.T2, Tmax, args.thres0, args.thres1,
args.nums, args.method, args.norm, args.infval, args.time, sp, dp, cb
)
log_filename = os.path.join(log_path, log_filename)
logger = get_module_logger(__name__, log_filename)
logger.info(log_filename)
setting_file = os.path.join(exp_path, 'barlist_{}_d_0.txt'.format(ph_name))
X_index, y = load_data(setting_file)
# Load kernels
sp_mth = ''
if sp >= 0 and sp < len(ker_methods):
sp_mth = ker_methods[sp]
kerX = defaultdict()
# scale-variant type
kX0, kX1 = None, None
for mt in ker_methods:
if len(sp_mth) > 0 and mt != sp_mth:
continue
if mt == FET_COMMON:
# features type
feature_file = os.path.join(exp_path, 'kernel/common_features_rev.npy')
if os.path.isfile(feature_file):
fetX = np.load(feature_file)
fetX[np.isnan(fetX)] = 0.0
if norm > 0:
for i in range(fetX.shape[1]):
v = fetX[:, i]
vmin, vmax = v.min(), v.max()
if vmin < vmax:
fetX[:, i] = (v - vmin) / (vmax - vmin)
kerX[mt] = fetX
print('Features loaded')
elif mt == FET_SCALE or mt == FET_AVG_SCALE or mt == FET_AVG_SCALE_NORM:
kprm0 = KernelParams(args.method, args.T1, args.T2, args.thres0, args.infval, args.time, Tmax)
kprm1 = KernelParams(args.method, args.T1, args.T2, args.thres1, args.infval, args.time, Tmax)
kX0 = load_kernel('kernel', mt, exp_path, ph_name, dp, 0, kprm0, norm)
kX1 = load_kernel('kernel', mt, exp_path, ph_name, dp, 1, kprm1, norm)
kX = None
if cb == 1:
kX = multiply_kernel(kX0, kX1)
else:
kX = add_kernel(kX0, kX1)
if kX is not None:
kerX[mt] = kX
print('Scale kernel loaded mt={}, combine={}'.format(mt, cb))
print('Shape = ', kX.shape)
elif mt in ker_pal:
# load graph kernel
gX = load_graph_kernel('kernel', exp_path, mt, ker_pal[mt], norm)
if gX is not None:
kerX[mt] = gX
print('{} kernel loaded'.format(mt))
np.set_printoptions(precision=5)
global_test, global_train = defaultdict(), defaultdict()
for mt in kerX.keys():
global_test[mt] = []
global_train[mt] = []
# repeat for nums
for n in range(args.nums):
# ten-fold
skf = StratifiedKFold(n_splits=10, shuffle=True, random_state=n)
for mt in kerX.keys():
local_test, local_train = [], []
for train_index, test_index in skf.split(X_index, y):
if cb != 0 and cb != 1:
kerX[mt] = multiple_kernel_learning(y, train_index, kX0, kX1, label="learning combine kernels")
train_sc, test_sc = svc_classify(kerX[mt], y, train_index, test_index, mt)
local_train.append(train_sc)
local_test.append(test_sc)
#logger.debug('n={}, mt={}, score: train={}, test={}'.format(n, mt, train_sc, test_sc))
avg_test_local = np.mean(local_test)
avg_train_local = np.mean(local_train)
global_test[mt].append(avg_test_local)
global_train[mt].append(avg_train_local)
#logger.debug('n={}, mean local score: train={}, test={}'.format(n, avg_train_local, avg_test_local))
avg_test_global, std_test_global = 100*np.mean(global_test[mt]), 100*np.std(global_test[mt])
avg_train_global, std_train_global = 100*np.mean(global_train[mt]), 100*np.std(global_train[mt])
logger.debug('n={}, mt={}, glocal score (mean, std): train=( {}, {} ), test=( {}, {} )'.format(n, mt,\
avg_train_global, std_train_global,
avg_test_global, std_test_global))
logger.debug('')
|
{"hexsha": "75c5cccb2362d40952ea4ab08da3bea2086d870c", "size": 13836, "ext": "py", "lang": "Python", "max_stars_repo_path": "classify/kernel_eval.py", "max_stars_repo_name": "OminiaVincit/scale-variant-topo", "max_stars_repo_head_hexsha": "6945bc42aacd0d71a6fb472c87e09da223821e1e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2018-11-09T21:59:59.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-22T19:02:10.000Z", "max_issues_repo_path": "classify/kernel_eval.py", "max_issues_repo_name": "OminiaVincit/scale-variant-topo", "max_issues_repo_head_hexsha": "6945bc42aacd0d71a6fb472c87e09da223821e1e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "classify/kernel_eval.py", "max_forks_repo_name": "OminiaVincit/scale-variant-topo", "max_forks_repo_head_hexsha": "6945bc42aacd0d71a6fb472c87e09da223821e1e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.2938005391, "max_line_length": 140, "alphanum_fraction": 0.63226366, "include": true, "reason": "import numpy", "num_tokens": 3926}
|
from bson import json_util
import json
import os
import numpy as np
import tensorflow as tf
from keras.layers.core import K #import keras.backend as K
import time
import pandas as pd
import multiprocessing
#
from keras.preprocessing import text, sequence
from keras.preprocessing.text import Tokenizer
from keras.utils import to_categorical
RESULTS_DIR = "results/"
MAXLEN_SEQ = 700
data_root = '/nosave/lange/cu-ssp/data/'
residue_list = list('ACEDGFIHKMLNQPSRTWVYX') + ['NoSeq']
q8_list = list('LBEGIHST') + ['NoSeq']
"""Json utils to print, save and load training results."""
def print_json(result):
"""Pretty-print a jsonable structure (e.g.: result)."""
print(json.dumps(
result,
default=json_util.default, sort_keys=True,
indent=4, separators=(',', ': ')
))
def save_json_result(model_name, result):
"""Save json to a directory and a filename."""
result_name = '{}.txt.json'.format(model_name)
if not os.path.exists(RESULTS_DIR):
os.makedirs(RESULTS_DIR)
with open(os.path.join(RESULTS_DIR, result_name), 'w') as f:
json.dump(
result, f,
default=json_util.default, sort_keys=True,
indent=4, separators=(',', ': ')
)
def load_json_result(best_result_name):
"""Load json from a path (directory + filename)."""
result_path = os.path.join(RESULTS_DIR, best_result_name)
with open(result_path, 'r') as f:
return json.JSONDecoder().decode(
f.read()
# default=json_util.default,
# separators=(',', ': ')
)
def load_best_hyperspace(name = 'json'):
results = [
f for f in list(sorted(os.listdir(RESULTS_DIR))) if name in f
]
if len(results) == 0:
return None
best_result_name = results[-1]
return load_json_result(best_result_name)["space"]
# transformations for pssm:
def sigmoid_p(data):
return logistic.cdf(data)
# transformations for hmm:
def normal_h(data):
return 2**((-data/1000))
# for both:
def standard(data):
mean = np.mean(data)
std = np.std(data)
data_ = (data - mean) / std
return data_
# Computes and returns the n-grams of a particular sequence, defaults to trigrams
def seq2ngrams(seqs, n = 1):
return np.array([[seq[i : i + n] for i in range(len(seq))] for seq in seqs])
## metrics for this task:
# The custom accuracy metric used for this task
def accuracy(y_true, y_predicted):
y = tf.argmax(y_true, axis =- 1)
y_ = tf.argmax(y_predicted, axis =- 1)
mask = tf.greater(y, 0)
return K.cast(K.equal(tf.boolean_mask(y, mask), tf.boolean_mask(y_, mask)), K.floatx())
def weighted_accuracy(y_true, y_pred):
return K.sum(K.equal(K.argmax(y_true, axis=-1),
K.argmax(y_pred, axis=-1)) * K.sum(y_true, axis=-1)) / K.sum(y_true)
def kullback_leibler_divergence(y_true, y_pred):
'''Calculates the Kullback-Leibler (KL) divergence between prediction
and target values.
'''
y_true = K.clip(y_true, K.epsilon(), 1)
y_pred = K.clip(y_pred, K.epsilon(), 1)
return K.sum(y_true * K.log(y_true / y_pred), axis=-1)
def matthews_correlation(y_true, y_pred):
'''Calculates the Matthews correlation coefficient measure for quality
of binary classification problems.
'''
y_pred_pos = K.round(K.clip(y_pred, 0, 1))
y_pred_neg = 1 - y_pred_pos
y_pos = K.round(K.clip(y_true, 0, 1))
y_neg = 1 - y_pos
tp = K.sum(y_pos * y_pred_pos)
tn = K.sum(y_neg * y_pred_neg)
fp = K.sum(y_neg * y_pred_pos)
fn = K.sum(y_pos * y_pred_neg)
numerator = (tp * tn - fp * fn)
denominator = K.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
return numerator / (denominator + K.epsilon())
def precision(y_true, y_pred):
'''Calculates the precision, a metric for multi-label classification of
how many selected items are relevant.
'''
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def recall(y_true, y_pred):
'''Calculates the recall, a metric for multi-label classification of
how many relevant items are selected.
'''
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def fbeta_score(y_true, y_pred, beta=1):
'''Calculates the F score, the weighted harmonic mean of precision and recall.
This is useful for multi-label classification, where input samples can be
classified as sets of labels. By only using accuracy (precision) a model
would achieve a perfect score by simply assigning every class to every
input. In order to avoid this, a metric should penalize incorrect class
assignments as well (recall). The F-beta score (ranged from 0.0 to 1.0)
computes this, as a weighted mean of the proportion of correct class
assignments vs. the proportion of incorrect class assignments.
With beta = 1, this is equivalent to a F-measure. With beta < 1, assigning
correct classes becomes more important, and with beta > 1 the metric is
instead weighted towards penalizing incorrect class assignments.
'''
if beta < 0:
raise ValueError('The lowest choosable beta is zero (only precision).')
# If there are no true positives, fix the F score at 0 like sklearn.
if K.sum(K.round(K.clip(y_true, 0, 1))) == 0:
return 0
p = precision(y_true, y_pred)
r = recall(y_true, y_pred)
bb = beta ** 2
fbeta_score = (1 + bb) * (p * r) / (bb * p + r + K.epsilon())
return fbeta_score
# losses:
def nll(y_true, y_pred):
""" Negative log likelihood. """
# keras.losses.binary_crossentropy give the mean
# over the last axis. we require the sum
return K.sum(K.binary_crossentropy(y_true, y_pred), axis=-1)
'''
def get_data(npy_path, normalize_profiles):
# daten durcheinander würfeln?
data = np.load(npy_path+'.npy')
max_len = 700
data_reshape = data.reshape(data.shape[0], 700, -1)
residue_onehot = data_reshape[:,:,0:22]
residue_q8_onehot = data_reshape[:,:,22:31]
profile = data_reshape[:,:,35:57]
#pad profiles to same length
zero_arr = np.zeros((profile.shape[0], max_len - profile.shape[1], profile.shape[2]))
profile_padded = np.concatenate([profile, zero_arr], axis=1)
residue_array = np.array(residue_list)[residue_onehot.argmax(2)]
q8_array = np.array(q8_list)[residue_q8_onehot.argmax(2)]
residue_str_list = []
q8_str_list = []
for vec in residue_array:
x = ''.join(vec[vec != 'NoSeq'])
residue_str_list.append(x)
for vec in q8_array:
x = ''.join(vec[vec != 'NoSeq'])
q8_str_list.append(x)
id_list = np.arange(1, len(residue_array) + 1)
len_list = np.array([len(x) for x in residue_str_list])
train_df = pd.DataFrame({'id': id_list, 'len': len_list, 'input': residue_str_list, 'expected': q8_str_list})
input_one_hot = residue_onehot
q8_onehot = residue_q8_onehot
train_input_seqs, train_target_seqs= train_df[['input', 'expected']][(train_df.len <= 700)].values.T
input_seqs
input_pssm = profile_padded
#SPÄTERE::
#nput_hmm = None
#rsa_onehot = None; output_data = [q8_onehot, rsa_onehot]
#input_data = [input_one_hot, input_seqs, input_pssm, input_hmm]
input_data = [input_one_hot, input_seqs, input_pssm]
output_data = q8_onehot
return input_data, output_data
'''
def load_augmented_data(npy_path, max_len):
data = np.load(npy_path)
data_reshape = data.reshape(data.shape[0], 700, -1)
residue_onehot = data_reshape[:,:,0:22]
residue_q8_onehot = data_reshape[:,:,22:31]
profile = data_reshape[:,:,35:57]
#pad profiles to same length
zero_arr = np.zeros((profile.shape[0], max_len - profile.shape[1], profile.shape[2]))
profile_padded = np.concatenate([profile, zero_arr], axis=1)
residue_array = np.array(residue_list)[residue_onehot.argmax(2)]
q8_array = np.array(q8_list)[residue_q8_onehot.argmax(2)]
residue_str_list = []
q8_str_list = []
for vec in residue_array:
x = ''.join(vec[vec != 'NoSeq'])
residue_str_list.append(x)
for vec in q8_array:
x = ''.join(vec[vec != 'NoSeq'])
q8_str_list.append(x)
id_list = np.arange(1, len(residue_array) + 1)
len_list = np.array([len(x) for x in residue_str_list])
train_df = pd.DataFrame({'id': id_list, 'len': len_list, 'input': residue_str_list, 'expected': q8_str_list})
return train_df, profile_padded
def get_data():
cb513filename = data_root+'data_princeton/cb513.npy'
cb6133filteredfilename = data_root+'data_princeton/cb6133filtered.npy'
maxlen_seq = 700
# load train and test and cut length to maxlen_seq
train_df, X_aug_train = load_augmented_data(cb6133filteredfilename, maxlen_seq)
train_input_seqs, train_target_seqs = train_df[['input', 'expected']][(train_df.len <= maxlen_seq)].values.T
test_df, X_aug_test = load_augmented_data(cb513filename, maxlen_seq)
test_input_seqs, test_target_seqs = test_df[['input', 'expected']][(test_df.len <= maxlen_seq)].values.T
# Using the tokenizer to encode and decode the sequences for use in training
# use preprocessing tools for text from keras to encode input sequence as word rank numbers and target sequence as one hot.
# To ensure easy to use training and testing, all sequences are padded with zeros to the maximum sequence length
# transform sequences to trigrams
train_input_grams = seq2ngrams(train_input_seqs)
# transform sequences
# fit alphabet on train basis
tokenizer_encoder = Tokenizer()
tokenizer_encoder.fit_on_texts(train_input_grams)
tokenizer_decoder = Tokenizer(char_level=True)
tokenizer_decoder.fit_on_texts(train_target_seqs)
# train
train_input_data = tokenizer_encoder.texts_to_sequences(train_input_grams)
X_train = sequence.pad_sequences(train_input_data, maxlen=maxlen_seq, padding='post')
# transform targets to one-hot
train_target_data = tokenizer_decoder.texts_to_sequences(train_target_seqs)
train_target_data = sequence.pad_sequences(train_target_data, maxlen=maxlen_seq, padding='post')
y_train = to_categorical(train_target_data)
input_one_hot = to_categorical(X_train)
# test
test_input_grams = seq2ngrams(test_input_seqs)
test_input_data = tokenizer_encoder.texts_to_sequences(test_input_grams)
X_test = sequence.pad_sequences(test_input_data, maxlen=maxlen_seq, padding='post')
test_target_data = tokenizer_decoder.texts_to_sequences(test_target_seqs)
test_target_data = sequence.pad_sequences(test_target_data, maxlen=maxlen_seq, padding='post')
y_test = to_categorical(test_target_data)
input_one_hot_test = to_categorical(X_test)
#### validation data
'''
n_samples = len(train_df)
np.random.seed(0)
validation_idx = np.random.choice(np.arange(n_samples), size=300, replace=False)
training_idx = np.array(list(set(np.arange(n_samples)) - set(validation_idx)))
X_val = X_train[validation_idx]
X_train = X_train[training_idx]
y_val = y_train[validation_idx]
y_train = y_train[training_idx]
X_aug_val = X_aug_train[validation_idx]
X_aug_train = X_aug_train[training_idx]
'''
#hmm profiles
input_hmm = np.load(data_root+'data_princeton/hmm_train.npy', allow_pickle=True)[:,:700,:]
input_hmm_test = np.load(data_root+'data_princeton/hmm_cb513.npy', allow_pickle=True)[:,:700,:]
#elmo embedding
input_elmo_train = np.load(data_root+'data_princeton/train_input_embedding.npy')
input_elmo_test = np.load(data_root+'data_princeton/cb513_input_embedding.npy')
print(input_elmo_train.shape)
print(input_elmo_test.shape)
input_data_train = [input_one_hot, X_train, input_elmo_train, standard(X_aug_train), input_hmm]
output_data_train = y_train
print(len(y_train))
print(input_hmm.shape)
print(len(y_test))
print(input_hmm_test.shape)
input_data_test = [input_one_hot_test, X_test, input_elmo_test, standard(X_aug_test), input_hmm_test]
output_data_test = y_test
return input_data_train, output_data_train, input_data_test, output_data_test
# fit_on_texts Updates internal vocabulary based on a list of texts
# texts_to_sequences Transforms each text in texts to a sequence of integers, 0 is reserved for padding
#fertig, nur get_data noch machen
def evaluate_model(model, load_file, hype_space, X_test, y_test):
start_time = time.time()
file_test = ['cb513'] #add more later
test_accs = []
for test in file_test:
model.load_weights(load_file)
score = model.evaluate(X_test, y_test, verbose=2, batch_size=1)
for metric, s in zip(model.metrics_names, score):
print(test + ' test ', metric, ': ', s)
test_accs.append(score[1])
m, s = divmod(time.time() - start_time, 60)
print("Needed {:.0f}min {:.0f}s to evaluate model.".format(m, s))
return dict(zip(file_test, test_accs))
def load_6133_filted():
'''
TRAIN data Cullpdb+profile_6133_filtered
Test data CB513\CASP10\CASP11
'''
print("Loading train data (Cullpdb_filted)...")
data = np.load()
data = np.reshape(data, (-1, 700, 57))
# print data.shape
datahot = data[:, :, 0:21] # sequence feature
# print 'sequence feature',dataonehot[1,:3,:]
datapssm = data[:, :, 35:56] # profile feature
# print 'profile feature',datapssm[1,:3,:]
labels = data[:, :, 22:30] # secondary struture label , 8-d
# shuffle data
# np.random.seed(2018)
num_seqs, seqlen, feature_dim = np.shape(data)
num_classes = labels.shape[2]
seq_index = np.arange(0, num_seqs) #
np.random.shuffle(seq_index)
# train data
trainhot = datahot[seq_index[:5278]] # 21
trainlabel = labels[seq_index[:5278]] # 8
trainpssm = datapssm[seq_index[:5278]] # 21
# val data
vallabel = labels[seq_index[5278:5534]] # 8
valpssm = datapssm[seq_index[5278:5534]] # 21
valhot = datahot[seq_index[5278:5534]] # 21
train_hot = np.ones((trainhot.shape[0], trainhot.shape[1]))
for i in xrange(trainhot.shape[0]):
for j in xrange(trainhot.shape[1]):
if np.sum(trainhot[i, j, :]) != 0:
train_hot[i, j] = np.argmax(trainhot[i, j, :])
val_hot = np.ones((valhot.shape[0], valhot.shape[1]))
for i in xrange(valhot.shape[0]):
for j in xrange(valhot.shape[1]):
if np.sum(valhot[i, j, :]) != 0:
val_hot[i, j] = np.argmax(valhot[i, j, :])
solvindex = range(33, 35)
trainsolvlabel = data[:5600, :, solvindex]
trainsolvvalue = trainsolvlabel[:, :, 0] * 2 + trainsolvlabel[:, :, 1]
trainsolvlabel = np.zeros((trainsolvvalue.shape[0], trainsolvvalue.shape[1], 4))
for i in xrange(trainsolvvalue.shape[0]):
for j in xrange(trainsolvvalue.shape[1]):
if np.sum(trainlabel[i, j, :]) != 0:
trainsolvlabel[i, j, trainsolvvalue[i, j]] = 1
return train_hot, trainpssm, trainlabel, val_hot, valpssm, vallabel
|
{"hexsha": "83663c35c9a7b7d5e9b6087f0826f94225c82bb6", "size": 15354, "ext": "py", "lang": "Python", "max_stars_repo_path": "model_neu/optimized/hyperutils.py", "max_stars_repo_name": "lelange/cu-ssp", "max_stars_repo_head_hexsha": "9f1a7abf79a2fb6ef2ae0f37de79469c2dc3488f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "model_neu/optimized/hyperutils.py", "max_issues_repo_name": "lelange/cu-ssp", "max_issues_repo_head_hexsha": "9f1a7abf79a2fb6ef2ae0f37de79469c2dc3488f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "model_neu/optimized/hyperutils.py", "max_forks_repo_name": "lelange/cu-ssp", "max_forks_repo_head_hexsha": "9f1a7abf79a2fb6ef2ae0f37de79469c2dc3488f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.5571428571, "max_line_length": 127, "alphanum_fraction": 0.6819721245, "include": true, "reason": "import numpy", "num_tokens": 4127}
|
/-
Copyright (c) 2021 Justus Springer. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Justus Springer
-/
import category_theory.sites.spaces
import topology.sheaves.sheaf
import category_theory.sites.dense_subsite
/-!
# Coverings and sieves; from sheaves on sites and sheaves on spaces
In this file, we connect coverings in a topological space to sieves in the associated Grothendieck
topology, in preparation of connecting the sheaf condition on sites to the various sheaf conditions
on spaces.
We also specialize results about sheaves on sites to sheaves on spaces; we show that the inclusion
functor from a topological basis to `topological_space.opens` is cover_dense, that open maps
induce cover_preserving functors, and that open embeddings induce compatible_preserving functors.
-/
noncomputable theory
universes w v u
open category_theory topological_space
namespace Top.presheaf
variables {X : Top.{w}}
/--
Given a presieve `R` on `U`, we obtain a covering family of open sets in `X`, by taking as index
type the type of dependent pairs `(V, f)`, where `f : V ⟶ U` is in `R`.
-/
def covering_of_presieve (U : opens X) (R : presieve U) : (Σ V, {f : V ⟶ U // R f}) → opens X :=
λ f, f.1
@[simp]
lemma covering_of_presieve_apply (U : opens X) (R : presieve U) (f : Σ V, {f : V ⟶ U // R f}) :
covering_of_presieve U R f = f.1 := rfl
namespace covering_of_presieve
variables (U : opens X) (R : presieve U)
/--
If `R` is a presieve in the grothendieck topology on `opens X`, the covering family associated to
`R` really is _covering_, i.e. the union of all open sets equals `U`.
-/
lemma supr_eq_of_mem_grothendieck (hR : sieve.generate R ∈ opens.grothendieck_topology X U) :
supr (covering_of_presieve U R) = U :=
begin
apply le_antisymm,
{ refine supr_le _,
intro f,
exact f.2.1.le, },
intros x hxU,
rw [opens.mem_supr],
obtain ⟨V, iVU, ⟨W, iVW, iWU, hiWU, -⟩, hxV⟩ := hR x hxU,
exact ⟨⟨W, ⟨iWU, hiWU⟩⟩, iVW.le hxV⟩,
end
end covering_of_presieve
/--
Given a family of opens `U : ι → opens X` and any open `Y : opens X`, we obtain a presieve
on `Y` by declaring that a morphism `f : V ⟶ Y` is a member of the presieve if and only if
there exists an index `i : ι` such that `V = U i`.
-/
def presieve_of_covering_aux {ι : Type v} (U : ι → opens X) (Y : opens X) : presieve Y :=
λ V f, ∃ i, V = U i
/-- Take `Y` to be `supr U` and obtain a presieve over `supr U`. -/
def presieve_of_covering {ι : Type v} (U : ι → opens X) : presieve (supr U) :=
presieve_of_covering_aux U (supr U)
/-- Given a presieve `R` on `Y`, if we take its associated family of opens via
`covering_of_presieve` (which may not cover `Y` if `R` is not covering), and take
the presieve on `Y` associated to the family of opens via `presieve_of_covering_aux`,
then we get back the original presieve `R`. -/
@[simp] lemma covering_presieve_eq_self {Y : opens X} (R : presieve Y) :
presieve_of_covering_aux (covering_of_presieve Y R) Y = R :=
by { ext Z f, exact ⟨λ ⟨⟨_,_,h⟩,rfl⟩, by convert h, λ h, ⟨⟨Z,f,h⟩,rfl⟩⟩ }
namespace presieve_of_covering
variables {ι : Type v} (U : ι → opens X)
/--
The sieve generated by `presieve_of_covering U` is a member of the grothendieck topology.
-/
lemma mem_grothendieck_topology :
sieve.generate (presieve_of_covering U) ∈ opens.grothendieck_topology X (supr U) :=
begin
intros x hx,
obtain ⟨i, hxi⟩ := opens.mem_supr.mp hx,
exact ⟨U i, opens.le_supr U i, ⟨U i, 𝟙 _, opens.le_supr U i, ⟨i, rfl⟩, category.id_comp _⟩, hxi⟩,
end
/--
An index `i : ι` can be turned into a dependent pair `(V, f)`, where `V` is an open set and
`f : V ⟶ supr U` is a member of `presieve_of_covering U f`.
-/
def hom_of_index (i : ι) : Σ V, {f : V ⟶ supr U // presieve_of_covering U f} :=
⟨U i, opens.le_supr U i, i, rfl⟩
/--
By using the axiom of choice, a dependent pair `(V, f)` where `f : V ⟶ supr U` is a member of
`presieve_of_covering U f` can be turned into an index `i : ι`, such that `V = U i`.
-/
def index_of_hom (f : Σ V, {f : V ⟶ supr U // presieve_of_covering U f}) : ι := f.2.2.some
lemma index_of_hom_spec (f : Σ V, {f : V ⟶ supr U // presieve_of_covering U f}) :
f.1 = U (index_of_hom U f) := f.2.2.some_spec
end presieve_of_covering
end Top.presheaf
namespace Top.opens
variables {X : Top} {ι : Type*}
lemma cover_dense_iff_is_basis [category ι] (B : ι ⥤ opens X) :
cover_dense (opens.grothendieck_topology X) B ↔ opens.is_basis (set.range B.obj) :=
begin
rw opens.is_basis_iff_nbhd,
split, intros hd U x hx, rcases hd.1 U x hx with ⟨V,f,⟨i,f₁,f₂,hc⟩,hV⟩,
exact ⟨B.obj i, ⟨i,rfl⟩, f₁.le hV, f₂.le⟩,
intro hb, split, intros U x hx, rcases hb hx with ⟨_,⟨i,rfl⟩,hx,hi⟩,
exact ⟨B.obj i, ⟨⟨hi⟩⟩, ⟨⟨i, 𝟙 _, ⟨⟨hi⟩⟩, rfl⟩⟩, hx⟩,
end
lemma cover_dense_induced_functor {B : ι → opens X} (h : opens.is_basis (set.range B)) :
cover_dense (opens.grothendieck_topology X) (induced_functor B) :=
(cover_dense_iff_is_basis _).2 h
end Top.opens
section open_embedding
open Top.presheaf opposite
variables {C : Type u} [category.{v} C]
variables {X Y : Top.{w}} {f : X ⟶ Y} {F : Y.presheaf C}
lemma open_embedding.compatible_preserving (hf : open_embedding f) :
compatible_preserving (opens.grothendieck_topology Y) hf.is_open_map.functor :=
begin
haveI : mono f := (Top.mono_iff_injective f).mpr hf.inj,
apply compatible_preserving_of_downwards_closed,
intros U V i,
refine ⟨(opens.map f).obj V, eq_to_iso $ opens.ext $ set.image_preimage_eq_of_subset $ λ x h, _⟩,
obtain ⟨_, _, rfl⟩ := i.le h,
exact ⟨_, rfl⟩
end
lemma is_open_map.cover_preserving (hf : is_open_map f) :
cover_preserving (opens.grothendieck_topology X) (opens.grothendieck_topology Y) hf.functor :=
begin
constructor,
rintros U S hU _ ⟨x, hx, rfl⟩,
obtain ⟨V, i, hV, hxV⟩ := hU x hx,
exact ⟨_, hf.functor.map i, ⟨_, i, 𝟙 _, hV, rfl⟩, set.mem_image_of_mem f hxV⟩
end
lemma Top.presheaf.is_sheaf_of_open_embedding (h : open_embedding f)
(hF : F.is_sheaf) : is_sheaf (h.is_open_map.functor.op ⋙ F) :=
pullback_is_sheaf_of_cover_preserving h.compatible_preserving h.is_open_map.cover_preserving ⟨_, hF⟩
end open_embedding
namespace Top.sheaf
open Top opposite
variables {C : Type u} [category.{v} C]
variables {X : Top.{w}} {ι : Type*} {B : ι → opens X}
variables (F : X.presheaf C) (F' : sheaf C X) (h : opens.is_basis (set.range B))
/-- The empty component of a sheaf is terminal -/
def is_terminal_of_empty (F : sheaf C X) : limits.is_terminal (F.val.obj (op ⊥)) :=
F.is_terminal_of_bot_cover ⊥ (by tidy)
/-- A variant of `is_terminal_of_empty` that is easier to `apply`. -/
def is_terminal_of_eq_empty (F : X.sheaf C) {U : opens X} (h : U = ⊥) :
limits.is_terminal (F.val.obj (op U)) :=
by convert F.is_terminal_of_empty
/-- If a family `B` of open sets forms a basis of the topology on `X`, and if `F'`
is a sheaf on `X`, then a homomorphism between a presheaf `F` on `X` and `F'`
is equivalent to a homomorphism between their restrictions to the indexing type
`ι` of `B`, with the induced category structure on `ι`. -/
def restrict_hom_equiv_hom :
((induced_functor B).op ⋙ F ⟶ (induced_functor B).op ⋙ F'.1) ≃ (F ⟶ F'.1) :=
@cover_dense.restrict_hom_equiv_hom _ _ _ _ _ _ _ _ (opens.cover_dense_induced_functor h)
_ F F'
@[simp] lemma extend_hom_app (α : ((induced_functor B).op ⋙ F ⟶ (induced_functor B).op ⋙ F'.1))
(i : ι) : (restrict_hom_equiv_hom F F' h α).app (op (B i)) = α.app (op i) :=
by { nth_rewrite 1 ← (restrict_hom_equiv_hom F F' h).left_inv α, refl }
include h
lemma hom_ext {α β : F ⟶ F'.1} (he : ∀ i, α.app (op (B i)) = β.app (op (B i))) : α = β :=
by { apply (restrict_hom_equiv_hom F F' h).symm.injective, ext i, exact he i.unop }
end Top.sheaf
|
{"author": "leanprover-community", "repo": "mathlib", "sha": "5e526d18cea33550268dcbbddcb822d5cde40654", "save_path": "github-repos/lean/leanprover-community-mathlib", "path": "github-repos/lean/leanprover-community-mathlib/mathlib-5e526d18cea33550268dcbbddcb822d5cde40654/src/topology/sheaves/sheaf_condition/sites.lean"}
|
(* This code is copyrighted by its authors; it is distributed under *)
(* the terms of the LGPL license (see LICENSE and description files) *)
(* *************************************************************************
Buchberger : reduction star
Laurent Thery May 1997 (revised April 2001)
************************************************************************** *)
From Buchberger Require Export Preduceplus.
Set Default Proof Using "Type".
Section Preducestar.
Load hCoefStructure.
Load hOrderStructure.
Load hReduceplus.
Inductive reducestar (Q : list (poly A0 eqA ltM)) :
list (Term A n) -> list (Term A n) -> Prop :=
reducestar0 :
forall p q : list (Term A n),
reduceplus A A0 A1 eqA invA minusA multA divA eqA_dec n ltM ltM_dec Q p
q ->
irreducible A A0 A1 eqA invA minusA multA divA eqA_dec n ltM ltM_dec Q
q -> reducestar Q p q.
Theorem reducestar_eqp_com :
forall (Q : list (poly A0 eqA ltM)) (p q r s : list (Term A n)),
reducestar Q p q ->
canonical A0 eqA ltM p ->
eqP A eqA n p r -> eqP A eqA n q s -> reducestar Q r s.
Proof using plusA os cs.
intros Q p q r s H' H'0 H'1 H'2; inversion H'.
apply reducestar0; auto.
apply reduceplus_eqp_com with (p := p) (q := q) (1 := cs); auto.
apply irreducible_eqp_com with (p := q) (1 := cs); auto.
apply canonical_reduceplus with (1 := cs) (3 := H); auto.
Qed.
Definition mks :
forall p : list (Term A n), canonical A0 eqA ltM p -> poly A0 eqA ltM.
intros p H'; exists p; exact H'.
Defined.
Definition selectdivf :
forall a : Term A n,
~ zeroP (A:=A) A0 eqA (n:=n) a ->
forall Q : list (poly A0 eqA ltM),
{q : list (Term A n) |
exists b : Term A n,
(exists p : list (Term A n),
divP A A0 eqA multA divA n a b /\
inPolySet A A0 eqA n ltM q Q /\ q = pX b p)} +
{(forall (b : Term A n) (q : list (Term A n)),
inPolySet A A0 eqA n ltM (pX b q) Q -> ~ divP A A0 eqA multA divA n a b)}.
intros a Z Q; elim Q; auto.
right.
intros b q H'; inversion_clear H'.
intros a0; case a0.
intros x; case x.
intros c l H'; case H'.
intros H'1; case H'1.
intros x0 H'0; left.
exists x0.
elim H'0; intros b E; elim E; intros p E0; elim E0; intros H'2 H'3; elim H'3;
intros H'4 H'5; try exact H'5; clear H'3 E0 E H'0.
exists b; exists p; split; [ idtac | split ]; auto.
apply inskip; auto.
intros H'0; right.
intros b q H'1; inversion_clear H'1; auto.
apply H'0 with (q := q); auto.
intros a1 l c l0 H'; case (divP_dec _ _ _ _ _ _ _ _ _ cs n a a1); auto.
apply canonical_nzeroP with (p := l) (ltM := ltM); auto.
intros divP1; left; exists (pX a1 l); auto.
exists a1; exists l; split; [ idtac | split ]; auto.
change
(inPolySet A A0 eqA n ltM (pX a1 l)
(exist (canonical A0 eqA ltM) (pX a1 l) c :: l0))
in |- *.
apply incons with (a := a1) (p := l) (H := c); auto.
intros divP1; case H'; intros Hyp1.
case Hyp1.
intros x0 H'0; left; exists x0.
elim H'0; intros b E; elim E; intros p E0; elim E0; intros H'1 H'2; elim H'2;
intros H'3 H'4; try exact H'4; clear H'2 E0 E H'0.
exists b; exists p; split; [ idtac | split ]; auto.
apply inskip; auto.
right.
intros b q H'1; generalize divP1; inversion_clear H'1; eauto.
Defined.
Definition selectpolyf :
forall (Q : list (poly A0 eqA ltM)) (r : list (Term A n)),
canonical A0 eqA ltM r ->
{q : list (Term A n) |
reduce A A0 A1 eqA invA minusA multA divA eqA_dec n ltM ltM_dec Q r q} +
{irreducible A A0 A1 eqA invA minusA multA divA eqA_dec n ltM ltM_dec Q r}.
intros Q r; elim r.
intros H'; right;
change
(irreducible A A0 A1 eqA invA minusA multA divA eqA_dec n ltM ltM_dec Q
(pO A n)) in |- *; apply pO_irreducible; auto.
intros a l H' H'0.
cut (canonical A0 eqA ltM l);
[ intros C0 | apply canonical_imp_canonical with (a := a); auto ].
cut (~ zeroP (A:=A) A0 eqA (n:=n) a);
[ intros Z0 | apply canonical_nzeroP with (p := l) (ltM := ltM); auto ].
case (selectdivf a Z0 Q); intros Hyp1.
case Hyp1.
intros x0; case x0; left.
absurd True; auto.
elim e; intros b E; elim E; intros p E0; elim E0; intros H'1 H'2; elim H'2;
intros H'3 H'4; inversion H'4; clear H'2 E0 E e.
cut (~ zeroP (A:=A) A0 eqA (n:=n) t); [ intros nZt | idtac ].
exists
(spminusf A A0 A1 eqA invA minusA multA divA eqA_dec n ltM ltM_dec a t nZt l
l0).
elim e; intros b E; elim E; intros p E0; elim E0; intros H'1 H'2; elim H'2;
intros H'3 H'4; clear H'2 E0 E e.
change
(reduce A A0 A1 eqA invA minusA multA divA eqA_dec n ltM ltM_dec Q
(pX a l)
(spminusf A A0 A1 eqA invA minusA multA divA eqA_dec n ltM ltM_dec a t
nZt l l0)) in |- *.
apply reducetop_sp with (1 := cs); auto.
rewrite (pX_invl A n t b l0 p); auto.
apply canonical_nzeroP with (ltM := ltM) (p := l0).
apply inPolySet_imp_canonical with (L := Q); auto.
elim e; intros b E; elim E; intros p E0; elim E0; intros H'1 H'2; elim H'2;
intros H'3 H'4; try exact H'3; clear H'2 E0 E e.
lapply H'; [ intros H'1; case H'1; clear H' | clear H' ].
intros H'; case H'.
intros x H'2; left; exists (pX a x).
change
(reduce A A0 A1 eqA invA minusA multA divA eqA_dec n ltM ltM_dec Q
(pX a l) (pX a x)) in |- *.
apply reduceskip; auto.
intros H'; right;
change
(forall q : list (Term A n),
~
reduce A A0 A1 eqA invA minusA multA divA eqA_dec n ltM ltM_dec Q
(pX a l) q) in |- *.
intros q; red in |- *; intros H'2; generalize H' Hyp1; inversion_clear H'2.
intros H'3 H'4; lapply (H'4 b q0); [ intros H'7; apply H'7 | idtac ]; auto.
intros H'3 H'4;
absurd
(reduce A A0 A1 eqA invA minusA multA divA eqA_dec n ltM ltM_dec Q l q0);
auto.
auto.
Defined.
Definition Reducef :
forall (Q : list (poly A0 eqA ltM)) (p : poly A0 eqA ltM),
{q : poly A0 eqA ltM |
reducestar Q (s2p A A0 eqA n ltM p) (s2p A A0 eqA n ltM q)}.
intros Q p; pattern p in |- *.
apply well_founded_induction_type with (1 := sltp_wf _ A0 eqA _ ltM os).
intros x; case x.
intros x0; case x0.
intros o H'; simpl in |- *.
exists (mks (pO A n) (canonicalpO A A0 eqA n ltM)); simpl in |- *; auto.
apply reducestar0; auto.
apply Rstar_0; auto.
apply pO_irreducible; auto.
simpl in |- *; intros a l o H'; auto.
cut (~ zeroP (A:=A) A0 eqA (n:=n) a);
[ intros Z0 | apply canonical_nzeroP with (ltM := ltM) (p := l) ];
auto.
lapply (selectdivf a);
[ intros H'1; case (H'1 Q) | idtac ];
auto; intros Div1.
case Div1.
intros x1; case x1.
intros H'0; absurd True; auto.
elim H'0; intros b E; elim E; intros p0 E0; elim E0; intros H'3 H'4; elim H'4;
intros H'5 H'6; inversion_clear H'6; clear H'4 E0 E H'0.
intros a0 l0 H'0.
cut (~ zeroP (A:=A) A0 eqA (n:=n) a0); [ intros nZt | idtac ].
2: apply canonical_nzeroP with (p := l0) (ltM := ltM).
2: apply inPolySet_imp_canonical with (L := Q); auto.
2: elim H'0; intros b E; elim E; intros p0 E0; elim E0; intros H'3 H'4;
elim H'4; intros H'5 H'6; try exact H'5; clear H'4 E0 E H'0.
cut
(canonical A0 eqA ltM
(spminusf A A0 A1 eqA invA minusA multA divA eqA_dec n ltM ltM_dec a a0
nZt l l0)); [ intros Op2 | idtac ]; auto.
lapply
(H'
(mks
(spminusf A A0 A1 eqA invA minusA multA divA eqA_dec n ltM ltM_dec a
a0 nZt l l0) Op2)); [ intros H'4; case H'4 | idtac ];
auto.
intros x2 H'3; exists x2.
apply reducestar0; auto.
apply
Rstar_n
with
(y := spminusf A A0 A1 eqA invA minusA multA divA eqA_dec n ltM ltM_dec a
a0 nZt l l0); auto.
elim H'0; intros b E; elim E; intros p0 E0; elim E0; intros H'5 H'6; elim H'6;
intros H'7 H'8; clear H'6 E0 E H'0.
change
(reduce A A0 A1 eqA invA minusA multA divA eqA_dec n ltM ltM_dec Q
(pX a l)
(spminusf A A0 A1 eqA invA minusA multA divA eqA_dec n ltM ltM_dec a a0
nZt l l0)) in |- *.
apply reducetop_sp with (1 := cs); auto.
rewrite (pX_invl A n a0 b l0 p0); auto.
inversion_clear H'3; auto.
inversion_clear H'3; auto.
simpl in |- *;
apply
ltP_reduce
with (Q := Q) (eqA_dec := eqA_dec) (ltM_dec := ltM_dec) (1 := cs);
auto.
elim H'0; intros b E; elim E; intros p0 E0; elim E0; intros H'3 H'4; elim H'4;
intros H'5 H'6; clear H'4 E0 E H'0.
change
(reduce A A0 A1 eqA invA minusA multA divA eqA_dec n ltM ltM_dec Q
(pX a l)
(spminusf A A0 A1 eqA invA minusA multA divA eqA_dec n ltM ltM_dec a a0
nZt l l0)) in |- *.
apply reducetop_sp with (1 := cs); auto.
rewrite (pX_invl A n a0 b l0 p0); auto.
apply canonical_spminusf with (1 := cs); auto.
apply canonical_imp_canonical with (a := a); auto.
elim H'0; intros b E; elim E; intros p0 E0; elim E0; intros H'3 H'4; elim H'4;
intros H'5 H'6; clear H'4 E0 E H'0.
apply canonical_imp_canonical with (a := a0); auto.
apply inPolySet_imp_canonical with (L := Q); auto.
elim H'0; intros b E; elim E; intros p0 E0; elim E0; intros H'3 H'4; elim H'4;
intros H'5 H'6; clear H'4 E0 E H'0.
rewrite (pX_invl A n a0 b l0 p0); auto.
cut (canonical A0 eqA ltM l);
[ intros Op2 | apply canonical_imp_canonical with (a := a); auto ].
lapply (H' (mks l Op2)); simpl in |- *; [ intros H'3; case H'3 | idtac ];
auto.
2: change (ltP (A:=A) (n:=n) ltM l (pX a l)) in |- *;
apply ltP_refl_pX with (1 := cs); auto.
intros x1; case x1.
intros x2 c H'4; simpl in |- *; cut (canonical A0 eqA ltM (pX a x2));
[ intros Op3 | auto ].
exists (mks (pX a x2) Op3); simpl in |- *.
apply reducestar0; auto.
change
(reduceplus A A0 A1 eqA invA minusA multA divA eqA_dec n ltM ltM_dec Q
(pX a l) (pX a x2)) in |- *.
apply reduceplus_skip with (1 := cs); auto.
inversion_clear H'4; auto.
change
(forall p : list (Term A n),
~
reduce A A0 A1 eqA invA minusA multA divA eqA_dec n ltM ltM_dec Q
(pX a x2) p) in |- *.
intros p0; red in |- *; intros H'5; generalize H'4 Div1; inversion_clear H'5;
simpl in |- *.
intros H'6 H'7; lapply (H'7 b q); [ intros H'10; apply H'10 | idtac ]; auto.
intros H'6 H'7;
absurd
(reduce A A0 A1 eqA invA minusA multA divA eqA_dec n ltM ltM_dec Q x2 q);
auto.
inversion_clear H'6; auto.
cut
(reduceplus A A0 A1 eqA invA minusA multA divA eqA_dec n ltM ltM_dec Q
(pX a l) (pX a x2)); [ intros Re0 | idtac ].
apply canonical_reduceplus with (1 := cs) (3 := Re0); auto.
apply reduceplus_skip with (1 := cs); auto.
inversion_clear H'4; auto.
Defined.
Theorem reduce0_reducestar :
forall (Q : list (poly A0 eqA ltM)) (p : list (Term A n)),
canonical A0 eqA ltM p -> exists t : list (Term A n), reducestar Q p t.
Proof using plusA os cs.
intros Q p H.
generalize (Reducef Q (mks p H)).
intros H'; elim H'.
simpl in |- *.
intros x H'0; exists (s2p A A0 eqA n ltM x); try assumption.
Qed.
Theorem reducestar_trans :
forall (Q : list (poly A0 eqA ltM)) (x y z : list (Term A n)),
canonical A0 eqA ltM x ->
reduceplus A A0 A1 eqA invA minusA multA divA eqA_dec n ltM ltM_dec Q x y ->
reducestar Q y z -> reducestar Q x z.
Proof using plusA os cs.
intros Q x y z H' H'0 H'1; inversion H'1.
apply reducestar0; auto.
apply reduceplus_trans with (1 := cs) (y := y); auto.
Qed.
Theorem reducestar_reduceplus :
forall (Q : list (poly A0 eqA ltM)) (p q : list (Term A n)),
reducestar Q p q ->
reduceplus A A0 A1 eqA invA minusA multA divA eqA_dec n ltM ltM_dec Q p q.
Proof.
intros Q p q H'; inversion H'; auto.
Qed.
Theorem reducestar_irreducible :
forall (Q : list (poly A0 eqA ltM)) (p q : list (Term A n)),
reducestar Q p q ->
irreducible A A0 A1 eqA invA minusA multA divA eqA_dec n ltM ltM_dec Q q.
Proof.
intros Q p q H'; inversion H'; auto.
Qed.
Local Hint Resolve reducestar_reduceplus : core.
Theorem reducestar_inv :
forall (Q : list (poly A0 eqA ltM)) (p q : list (Term A n)),
reducestar Q p q ->
canonical A0 eqA ltM p ->
eqP A eqA n p q /\
irreducible A A0 A1 eqA invA minusA multA divA eqA_dec n ltM ltM_dec Q p \/
(exists r : list (Term A n),
reduce A A0 A1 eqA invA minusA multA divA eqA_dec n ltM ltM_dec Q p r /\
reducestar Q r q).
Proof using plusA os cs.
intros Q p q H'; elim H'.
intros p0 q0 H'0; inversion H'0.
intros H'1 H'2; left; split; auto.
apply irreducible_eqp_com with (1 := cs) (p := q0); auto.
apply eqp_imp_canonical with (p := p0) (1 := cs); auto.
apply (eqp_sym _ _ _ _ _ _ _ _ _ cs n); auto.
intros H'1 H'2; right; exists y; split; auto.
apply reducestar0; auto.
Qed.
Lemma pO_reducestar :
forall (Q : list (poly A0 eqA ltM)) (p : list (Term A n)),
reducestar Q (pO A n) p -> p = pO A n.
Proof.
intros Q p H'.
cut
(reduceplus A A0 A1 eqA invA minusA multA divA eqA_dec n ltM ltM_dec Q
(pO A n) p); [ intros Red1 | apply reducestar_reduceplus; auto ].
apply pO_reduceplus with (2 := Red1); auto.
Qed.
Theorem reducestar_pO_is_pO :
forall (Q : list (poly A0 eqA ltM)) (p q : list (Term A n)),
canonical A0 eqA ltM p -> reducestar Q (pO A n) (pO A n).
Proof.
intros Q p H' H'0; auto.
apply reducestar0; auto.
apply Rstar_0; auto.
apply pO_irreducible.
Qed.
Theorem reducestar_in_pO :
forall (Q : list (poly A0 eqA ltM)) (a : Term A n) (p : list (Term A n)),
inPolySet A A0 eqA n ltM p Q ->
~ zeroP (A:=A) A0 eqA (n:=n) a ->
reducestar Q (mults (A:=A) multA (n:=n) a p) (pO A n).
Proof using plusA os cs.
intros Q a p H' H'0.
cut (canonical A0 eqA ltM p); [ intros Op0 | idtac ].
apply
reducestar_eqp_com
with
(p := mults (A:=A) multA (n:=n) a p)
(q := mults (A:=A) multA (n:=n) a (pO A n)); auto.
apply reducestar0; auto.
apply reduceplus_mults with (1 := cs); auto.
apply reduce_imp_reduceplus with (1 := cs); auto.
apply reduce_in_pO with (1 := cs); auto.
simpl in |- *; apply pO_irreducible; auto.
apply inPolySet_imp_canonical with (L := Q); auto.
Qed.
Definition pickinSet :
forall a : Term A n,
~ zeroP (A:=A) A0 eqA (n:=n) a ->
forall Q : list (poly A0 eqA ltM),
{p : list (Term A n) | pickinSetp A A0 eqA multA divA n ltM a p Q} +
{(forall (b : Term A n) (p q : list (Term A n)),
inPolySet A A0 eqA n ltM p Q ->
divP A A0 eqA multA divA n a b -> p <> pX b q)}.
intros a Z0 Q; elim Q.
right; intros b p q H'; inversion_clear H'.
intros a0; case a0.
intros x; case x.
intros c l H'; case H'; auto.
intros H'0; left; inversion_clear H'0.
exists x0; auto.
apply pickinSetskip; auto.
intros H'0; right.
intros b p q H'1; inversion_clear H'1.
intros H'3; red in |- *; intros H'4.
lapply (H'0 b p q);
[ intros H'7; lapply H'7; [ intros H'8; clear H'7 | clear H'7 ] | idtac ];
auto.
intros a1 l c l0 H'.
case divP_dec with (1 := cs) (a := a) (b := a1); auto.
apply canonical_nzeroP with (ltM := ltM) (p := l); auto.
intros H'0; left; exists (pX a1 l); auto.
change
(pickinSetp A A0 eqA multA divA n ltM a (pX a1 l) (mks (pX a1 l) c :: l0))
in |- *; unfold mks in |- *; auto.
apply (pickinSeteqp A A0 eqA multA divA n ltM); auto.
intros Hyp.
case H'; intros H'1.
inversion H'1.
left; exists x0; auto.
apply pickinSetskip; auto.
right.
intros b p q H'2; inversion_clear H'2; auto.
intros H'3; red in |- *; intros H'4; apply Hyp.
injection H'4.
intros H'5 H'6; rewrite H'6; auto.
Defined.
End Preducestar.
|
{"author": "coq-community", "repo": "buchberger", "sha": "7625647c300bb5f155f6bf40b69c232f64819a4f", "save_path": "github-repos/coq/coq-community-buchberger", "path": "github-repos/coq/coq-community-buchberger/buchberger-7625647c300bb5f155f6bf40b69c232f64819a4f/theories/Preducestar.v"}
|
#! /usr/bin/env python3
import argparse
import os
import time
import numpy as np
import cv2
import dlib
here = os.path.abspath(os.path.dirname(__file__))
_predictor_path = 'shape_predictor_68_face_landmarks.dat'
_casc_path = 'haarcascade_frontalface_alt.xml'
predictor_path = os.path.join(here, _predictor_path)
casc_path = os.path.join(here, _casc_path)
def applyAffineTransform(src, srcTri, dstTri, size):
'''
Apply affine transform calculated using srcTri and dstTri to src and
output an image of size.
'''
warpMat = cv2.getAffineTransform(np.float32(srcTri), np.float32(dstTri))
dst = cv2.warpAffine(
src,
warpMat, (size[0], size[1]),
None,
flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_REFLECT_101
)
return dst
def rectContains(rect, point):
x, y, w, h = rect
px, py = point
return x <= px <= x + w and y <= py <= y + h
def calculateDelaunayTriangles(rect, points):
subdiv = cv2.Subdiv2D(rect)
for p in points:
subdiv.insert(tuple(p))
triangleList = subdiv.getTriangleList()
delaunayTri = []
pt = []
count = 0
for t in triangleList:
pt.append((t[0], t[1]))
pt.append((t[2], t[3]))
pt.append((t[4], t[5]))
pt1 = (t[0], t[1])
pt2 = (t[2], t[3])
pt3 = (t[4], t[5])
if rectContains(rect, pt1) and rectContains(
rect, pt2) and rectContains(rect, pt3):
count = count + 1
ind = []
for j in range(0, 3):
for k in range(0, len(points)):
if abs(pt[j][0] - points[k][0]) < 1.0 and abs(
pt[j][1] - points[k][1]) < 1.0:
ind.append(k)
if len(ind) == 3:
delaunayTri.append((ind[0], ind[1], ind[2]))
pt = []
return delaunayTri
def warpTriangle(img1, img2, t1, t2):
'''
Warps and alpha blends triangular regions from img1 and img2 to img
'''
r1 = cv2.boundingRect(np.float32([t1]))
r2 = cv2.boundingRect(np.float32([t2]))
t1Rect = []
t2Rect = []
t2RectInt = []
for i in range(0, 3):
t1Rect.append(((t1[i][0] - r1[0]), (t1[i][1] - r1[1])))
t2Rect.append(((t2[i][0] - r2[0]), (t2[i][1] - r2[1])))
t2RectInt.append(((t2[i][0] - r2[0]), (t2[i][1] - r2[1])))
mask = np.zeros((r2[3], r2[2], 3), dtype=np.float32)
cv2.fillConvexPoly(mask, np.int32(t2RectInt), (1.0, 1.0, 1.0), 16, 0)
img1Rect = img1[r1[1]:r1[1] + r1[3], r1[0]:r1[0] + r1[2]]
size = (r2[2], r2[3])
img2Rect = applyAffineTransform(img1Rect, t1Rect, t2Rect, size)
img2Rect = img2Rect * mask
img2[r2[1]:r2[1] + r2[3], r2[0]:r2[0] + r2[2]] = img2[
r2[1]:r2[1] + r2[3], r2[0]:r2[0] + r2[2]
] * ((1.0, 1.0, 1.0) - mask)
img2[r2[1]:r2[1] + r2[3], r2[0]:r2[0] + r2[2]
] = img2[r2[1]:r2[1] + r2[3], r2[0]:r2[0] + r2[2]] + img2Rect
class FPSReporter:
def __init__(self):
self.last = None
def __call__(self):
now = time.perf_counter()
if self.last is not None:
print(
'time={:.2g}, fps={:.2g}'.
format(now - self.last, 1 / (now - self.last))
)
self.last = now
fps = FPSReporter()
def init():
global casade_classifier, detector, predictor
casade_classifier = cv2.CascadeClassifier(casc_path)
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(predictor_path)
def detect_faces_fast(img):
boxes = casade_classifier.detectMultiScale(
cv2.cvtColor(img, cv2.COLOR_BGR2GRAY),
scaleFactor=1.1,
minNeighbors=5,
minSize=(50, 50)
)
return [
dlib.rectangle(*map(int, [x, y, x + w, y + h]))
for (x, y, w, h) in boxes
]
def detect_faces_slow(img):
return detector(img, 1)
def predict_landmark(img, rect):
return [(point.x, point.y) for point in predictor(img, rect).parts()]
def check_bound(img, points):
'''
returns true if all points are in the image
'''
height, width, *_ = img.shape
return all(0 <= x < width and 0 <= y < height for (x, y) in points)
def get_convex_hull_indexes(points):
return cv2.convexHull(np.array(points), returnPoints=False)[:, 0]
face68_reflections = np.array(
[
16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 26, 25, 24,
23, 22, 21, 20, 19, 18, 17, 27, 28, 29, 30, 35, 34, 33, 32, 31, 45, 44,
43, 42, 47, 46, 39, 38, 37, 36, 41, 40, 54, 53, 52, 51, 50, 49, 48, 59,
58, 57, 56, 55, 64, 63, 62, 61, 60, 67, 66, 65
]
)
left_face_indexes = np.array(
[
0, 1, 2, 3, 4, 5, 6, 7, 8, 57, 66, 62, 51, 33, 30, 29, 28, 27, 21, 20,
19, 18, 17
]
)
right_face_indexes = np.array(
[
8, 9, 10, 11, 12, 13, 14, 15, 16, 26, 25, 24, 23, 22, 27, 28, 29, 30,
33, 51, 62, 66, 57
]
)
USE_CONVEX_HULL = False
AUTO_REFLECT = True
def put_face(
srcimg,
srcpoints,
dstimg,
dstpoints,
):
src_points_list = srcpoints
dst_points_list = dstpoints
srcpoints = np.array(srcpoints)
dstpoints = np.array(dstpoints)
x1, y1 = dstpoints.min(axis=0)
x2, y2 = dstpoints.max(axis=0)
h, w = dstimg.shape[:2]
x1, y1 = max(x1 - 5, 0), max(y1 - 5, 0)
x2, y2 = min(x2 + 5, w), min(y2 + 5, h)
dsth, dstw, *_ = dstimg.shape
dstbounds = (0, 0, dstw, dsth)
# area test & reflect
if AUTO_REFLECT:
srcl = cv2.contourArea(srcpoints[left_face_indexes])
srcr = cv2.contourArea(srcpoints[right_face_indexes])
dstl = cv2.contourArea(dstpoints[left_face_indexes])
dstr = cv2.contourArea(dstpoints[right_face_indexes])
if (srcl < srcr) == (dstl > dstr):
srcpoints = srcpoints[face68_reflections]
# find convex hull
hull_indexes = get_convex_hull_indexes(dstpoints)
dsthull = dstpoints[hull_indexes]
if USE_CONVEX_HULL:
srcpoints = srcpoints[hull_indexes]
dstpoints = dsthull
# delaunay triangulation
try:
delaunay_triangles = calculateDelaunayTriangles(dstbounds, dstpoints)
except cv2.error:
return dstimg
assert delaunay_triangles
warpedimg = np.copy(dstimg)
# affine transformation to Delaunay triangles
for triangle in delaunay_triangles:
warpTriangle(
srcimg,
warpedimg,
[srcpoints[point] for point in triangle],
[dstpoints[point] for point in triangle],
)
# mask
hull8U = [(p[0], p[1]) for p in dsthull]
mask = np.zeros(dstimg.shape, dtype=dstimg.dtype)
cv2.fillConvexPoly(mask, np.int32(hull8U), (255, 255, 255))
r = cv2.boundingRect(np.float32([dsthull]))
center = ((r[0] + int(r[2] / 2), r[1] + int(r[3] / 2)))
dstimg = cv2.seamlessClone(
np.uint8(warpedimg), dstimg, mask, center, cv2.NORMAL_CLONE
)
# draw triangle
face = dstimg.copy()
for triangle in delaunay_triangles:
points = np.array([dst_points_list[point] for point in triangle])
cv2.drawContours(face, [points], 0, (0, 255, 0))
cv2.rectangle(dstimg, (x1, y1), (x2, y2), (0, 0, 255), 3)
return dstimg, face[y1:y2, x1:x2]
def img2video(srcfile, capture=0):
srcimg = cv2.imread(srcfile)
srcfaces = detect_faces_slow(srcimg)
srcpoints = predict_landmark(srcimg, srcfaces[0])
cap = cv2.VideoCapture(capture)
while True:
fps()
ret, dstimg = cap.read()
cv2.imshow('original', dstimg)
assert ret
dstfaces = detect_faces_fast(dstimg)
for rect in dstfaces:
dstpoints = predict_landmark(dstimg, rect)
if not check_bound(dstimg, dstpoints):
continue
dstimg, _ = put_face(srcimg, srcpoints, dstimg, dstpoints)
cv2.imshow('face swapped', dstimg)
cv2.waitKey(1)
def imgswap(filename):
img = cv2.imread(filename)
faces = detect_faces_slow(img)
pointss = [predict_landmark(img, rect) for rect in faces]
pointss = [points for points in pointss if check_bound(img, points)]
srcimg = img.copy()
for pa, pb in zip(pointss, pointss[1:] + [pointss[0]]):
img, _ = put_face(srcimg, pa, img, pb)
return img
def img2img(srcfile, dstfile):
srcimg = cv2.imread(srcfile)
srcfaces = detect_faces_slow(srcimg)
srcpoints = predict_landmark(srcimg, srcfaces[0])
dstimg = cv2.imread(dstfile)
dstfaces = detect_faces_slow(dstimg)
for rect in dstfaces:
top, bottom = rect.top(), rect.bottom()
left, right = rect.left(), rect.right()
dstpoints = predict_landmark(dstimg, rect)
if not check_bound(dstimg, dstpoints):
continue
dstimg, _ = put_face(srcimg, srcpoints, dstimg, dstpoints)
# for (x, y) in dstpoints:
# cv2.circle(dstimg,(x, y), 1, (0, 255, 0), 1, -1)
return dstimg
def video2video(srcfile, dstfile, savefile):
srcimg = cv2.imread(srcfile)
srcfaces = detect_faces_slow(srcimg)
srcpoints = predict_landmark(srcimg, srcfaces[0])
cap = cv2.VideoCapture(dstfile)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(savefile, fourcc, 20.0, (1280,720))
id_ = 0
while(cap.isOpened()):
ret, frame = cap.read()
if not ret:
break
faces = detect_faces_slow(frame)
for rect in faces:
top, bottom = rect.top(), rect.bottom()
left, right = rect.left(), rect.right()
points = predict_landmark(frame, rect)
if not check_bound(frame, points):
continue
frame, face = put_face(srcimg, srcpoints, frame, points)
face = cv2.resize(face, (250, 250), interpolation=cv2.INTER_CUBIC)
frame[-250:, -250:, :] = face
# cv2.imwrite('./output/{:05d}.jpg'.format(id_), frame)
out.write(frame)
id_ += 1
cap.release()
out.release()
return
def get_parser():
parser = argparse.ArgumentParser(
allow_abbrev=False,
)
parser.add_argument(
'mode',
type=str,
default='video',
help='self, image, video, camera'
)
parser.add_argument(
'source',
help='the source image file',
)
parser.add_argument(
'--dst',
metavar='filename',
help='swap the face from the source image into this image file',
)
parser.add_argument(
'--save',
metavar='filename',
help='save the swapped image to this file',
)
parser.add_argument(
'--noshow', action='store_true', help="don't cv2.imshow"
)
parser.add_argument(
'--directpaste',
action='store_true',
help='paste convex hull instead of feature points',
)
parser.add_argument(
'--noreflect',
action='store_true',
help="do not reflect feature points vertically even if two faces are "
"facing different sides"
)
return parser
if __name__ == '__main__':
options = get_parser().parse_args()
init()
USE_CONVEX_HULL = options.directpaste
AUTO_REFLECT = not options.noreflect
IS_SHOW = not options.noshow
if options.mode == 'self':
result = imgswap(options.source)
elif options.mode == 'video':
video2video(options.source, options.dst, options.save)
elif options.mode == 'image':
result = img2img(options.source, options.dst)
if options.save is not None:
cv2.imwrite(options.save, result)
if not IS_SHOW:
cv2.imshow('swap', result)
cv2.waitKey(0)
cv2.destroyAllWindows()
elif options.mode == 'camera':
img2video(0)
else:
assert False, "Shouldn't be here :("
|
{"hexsha": "8a394a6af0af506780f1e1e80f733c81c33431e5", "size": 11892, "ext": "py", "lang": "Python", "max_stars_repo_path": "faceswap.py", "max_stars_repo_name": "JSharpClone/faceswap", "max_stars_repo_head_hexsha": "be706a4e29a5bb7777e3cec1fab03ce14b8ae06a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "faceswap.py", "max_issues_repo_name": "JSharpClone/faceswap", "max_issues_repo_head_hexsha": "be706a4e29a5bb7777e3cec1fab03ce14b8ae06a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "faceswap.py", "max_forks_repo_name": "JSharpClone/faceswap", "max_forks_repo_head_hexsha": "be706a4e29a5bb7777e3cec1fab03ce14b8ae06a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.075794621, "max_line_length": 79, "alphanum_fraction": 0.5828287925, "include": true, "reason": "import numpy", "num_tokens": 3668}
|
import json
import csv
import os
import copy
import numpy as np
from camel_tools.calima_star.database import CalimaStarDB
from camel_tools.calima_star.analyzer import CalimaStarAnalyzer
from camel_tools.disambig.mle import MLEDisambiguator
import torch
class InputExample:
"""Simple object to encapsulate each data example"""
def __init__(self, src, trg,
src_label, trg_label, trg_gender):
self.src = src
self.trg = trg
self.src_label = src_label
self.trg_label = trg_label
self.trg_gender = trg_gender
def __repr__(self):
return str(self.to_json_str())
def to_json_str(self):
return json.dumps(self.to_dict(), indent=2, ensure_ascii=False)
def to_dict(self):
output = copy.deepcopy(self.__dict__)
return output
class RawDataset:
"""Encapsulates the raw examples in InputExample objects"""
def __init__(self, data_dir, normalized=False):
self.train_examples = self.get_train_examples(data_dir, normalized=normalized)
self.dev_examples = self.get_dev_examples(data_dir, normalized=normalized)
self.test_examples = self.get_test_examples(data_dir, normalized=normalized)
def create_examples(self, src_path, trg_path):
src_txt = self.get_txt_examples(src_path)
src_labels = self.get_labels(src_path + '.label')
trg_txt = self.get_txt_examples(trg_path)
trg_labels = self.get_labels(trg_path + '.label')
trg_genders = self.get_trg_gender(trg_path + '.gender')
examples = []
for i in range(len(src_txt)):
src = src_txt[i].strip()
trg = trg_txt[i].strip()
src_label = src_labels[i].strip()
trg_label = trg_labels[i].strip()
trg_gender = trg_genders[i].strip()
input_example = InputExample(src=src,
trg=trg,
src_label=src_label,
trg_label=trg_label,
trg_gender=trg_gender)
examples.append(input_example)
return examples
def get_labels(self, data_dir):
with open(data_dir) as f:
return f.readlines()
def get_trg_gender(self, data_dir):
with open(data_dir) as f:
return f.readlines()
def get_txt_examples(self, data_dir):
with open(data_dir, encoding='utf8') as f:
return f.readlines()
def get_train_examples(self, data_dir, normalized=False):
"""Reads the train examples of the dataset"""
#joint_model/S-set.M.uniq+S-set.M.uniq+S-set.F.uniq+S-set.F.uniq+D-set-train.arin+D-set-train.arin
#joint_model/S-set.M.uniq+S-set.F.uniq+S-set.M.uniq+S-set.F.uniq+D-set-train.ar.M+D-set-train.ar.F
#joint_model/D-set-train.arin+D-set-train.arin
#joint_model/D-set-train.ar.M+D-set-train.ar.F
if normalized:
return self.create_examples(os.path.join(data_dir, 'joint_model/D-set-train.arin+D-set-train.arin.normalized'),
os.path.join(data_dir, 'joint_model/D-set-train.ar.M+D-set-train.ar.F.normalized'))
else:
return self.create_examples(os.path.join(data_dir, 'joint_model/D-set-train.arin+D-set-train.arin'),
os.path.join(data_dir, 'joint_model/D-set-train.ar.M+D-set-train.ar.F'))
def get_dev_examples(self, data_dir, normalized=False):
"""Reads the dev examples of the dataset"""
if normalized:
return self.create_examples(os.path.join(data_dir, 'joint_model/D-set-dev.arin+D-set-dev.arin.normalized'),
os.path.join(data_dir, 'joint_model/D-set-dev.ar.M+D-set-dev.ar.F.normalized'))
else:
return self.create_examples(os.path.join(data_dir, 'joint_model/D-set-dev.arin+D-set-dev.arin'),
os.path.join(data_dir, 'joint_model/D-set-dev.ar.M+D-set-dev.ar.F'))
def get_test_examples(self, data_dir, normalized=False):
"""Reads the test examples of the dataset"""
if normalized:
return self.create_examples(os.path.join(data_dir, 'joint_model/D-set-test.arin+D-set-test.arin.normalized'),
os.path.join(data_dir, 'joint_model/D-set-test.ar.M+D-set-test.ar.F.normalized'))
else:
return self.create_examples(os.path.join(data_dir, 'joint_model/D-set-test.arin+D-set-test.arin'),
os.path.join(data_dir, 'joint_model/D-set-test.ar.M+D-set-test.ar.F'))
class Vocabulary:
"""Base vocabulary class"""
def __init__(self, token_to_idx=None):
if token_to_idx is None:
token_to_idx = dict()
self.token_to_idx = token_to_idx
self.idx_to_token = {idx: token for token, idx in self.token_to_idx.items()}
def add_token(self, token):
if token in self.token_to_idx:
index = self.token_to_idx[token]
else:
index = len(self.token_to_idx)
self.token_to_idx[token] = index
self.idx_to_token[index] = token
return index
def add_many(self, tokens):
return [self.add_token(token) for token in tokens]
def lookup_token(self, token):
return self.token_to_idx[token]
def lookup_index(self, index):
return self.idx_to_token[index]
def to_serializable(self):
return {'token_to_idx': self.token_to_idx}
@classmethod
def from_serializable(cls, contents):
return cls(**contents)
def __len__(self):
return len(self.token_to_idx)
class SeqVocabulary(Vocabulary):
"""Sequence vocabulary class"""
def __init__(self, token_to_idx=None, unk_token='<unk>',
pad_token='<pad>', sos_token='<s>',
eos_token='</s>'):
super(SeqVocabulary, self).__init__(token_to_idx)
self.pad_token = pad_token
self.unk_token = unk_token
self.sos_token = sos_token
self.eos_token = eos_token
self.pad_idx = self.add_token(self.pad_token)
self.unk_idx = self.add_token(self.unk_token)
self.sos_idx = self.add_token(self.sos_token)
self.eos_idx = self.add_token(self.eos_token)
def to_serializable(self):
contents = super(SeqVocabulary, self).to_serializable()
contents.update({'unk_token': self.unk_token,
'pad_token': self.pad_token,
'sos_token': self.sos_token,
'eos_token': self.eos_token})
return contents
@classmethod
def from_serializable(cls, contents):
return cls(**contents)
def lookup_token(self, token):
return self.token_to_idx.get(token, self.unk_idx)
class MorphFeaturizer:
"""Morphological Featurizer Class"""
def __init__(self, analyzer_db_path):
self.db = CalimaStarDB(analyzer_db_path)
self.analyzer = CalimaStarAnalyzer(self.db, cache_size=46000)
self.disambiguator = MLEDisambiguator(self.analyzer)
self.w_to_features = {}
def featurize(self, sentence):
"""
Args:
- sentence (str): a sentence in Arabic
Returns:
- a dictionary of word to vector mapping for each word in the sentence.
Each vector will be a one-hot representing the following features:
[lex+m lex+f spvar+m spvar+f]
"""
# using the MLEDisambiguator to get the analyses
disambiguations = self.disambiguator.disambiguate(sentence.split(' '), top=1)
# disambiguations is a list of DisambiguatedWord objects
# each DisambiguatedWord object is a tuple of: (word, scored_analyses)
# scored_analyses is a list of ScoredAnalysis objects
# each ScoredAnalysis object is a tuple of: (score, analysis)
for disambig in disambiguations:
word, scored_analyses = disambig
if word not in self.w_to_features:
self.w_to_features[word] = list()
if scored_analyses:
for scored_analysis in scored_analyses:
# each analysis will have a vector
score, analysis = scored_analysis
features = np.zeros(4)
# getting the source and gender features
src = analysis['source']
func_gen = analysis['gen']
#form_gen = analysis['form_gen']
# functional gender features
if src == 'lex' and func_gen == 'm':
features[0] = 1
elif src == 'lex' and func_gen == 'f':
features[1] = 1
elif src == 'spvar' and func_gen == 'm':
features[2] = 1
elif src == 'spvar' and func_gen == 'f':
features[3] = 1
# form gender features
#if src == 'lex' and form_gen == 'm':
# features[0] = 1
#elif src == 'lex' and form_gen == 'f':
# features[1] = 1
#elif src == 'spvar' and form_gen == 'm':
# features[2] = 1
#elif src == 'spvar' and form_gen == 'f':
# features[3] = 1
self.w_to_features[word].append(features)
# squashing all the vectors into one
self.w_to_features[word] = np.array(self.w_to_features[word])
self.w_to_features[word] = self.w_to_features[word].sum(axis=0)
# replacing all the elements > with 1
self.w_to_features[word][self.w_to_features[word] > 0] = 1
# replacing all the 0 elements with 1e-6
self.w_to_features[word][self.w_to_features[word] == 0] = 1e-6
self.w_to_features[word] = self.w_to_features[word].tolist()
else:
self.w_to_features[word] = np.full((4), 1e-6).tolist()
def featurize_sentences(self, sentences):
"""Featurizes a list of sentences"""
for sentence in sentences:
self.featurize(sentence)
def to_serializable(self):
return {'morph_features': self.w_to_features}
def from_serializable(self, contents):
self.w_to_features = contents['morph_features']
def save_morph_features(self, path):
with open(path, mode='w', encoding='utf8') as f:
return json.dump(self.to_serializable(), f, ensure_ascii=False)
def load_morph_features(self, path):
with open(path) as f:
return self.from_serializable(json.load(f))
def create_morph_embeddings(self, word_vocab):
"""Creating a morphological features embedding matrix"""
morph_features = self.w_to_features
# Note: morph_features will have all the words in word_vocab
# except: <s>, <pad>, <unk>, </s>, ' '
# Creating a 0 embedding matrix of shape: (len(word_vocab), 4)
morph_embedding_matrix = torch.ones((len(word_vocab), 4)) * 1e-6
for word in word_vocab.token_to_idx:
if word in morph_features:
index = word_vocab.lookup_token(word)
morph_embedding_matrix[index] = torch.tensor(morph_features[word],
dtype=torch.float64)
return morph_embedding_matrix
def create_gender_embeddings(trg_gender_vocab):
"""Creates one-hot vectors gender embeddings"""
matrix = torch.zeros((len(trg_gender_vocab), len(trg_gender_vocab)), dtype=torch.float32)
m_idx = trg_gender_vocab.lookup_token('M')
f_idx = trg_gender_vocab.lookup_token('F')
matrix[m_idx] = torch.tensor([1, 1e-6], dtype=torch.float32)
matrix[f_idx] = torch.tensor([1e-6, 1], dtype=torch.float32)
return matrix
|
{"hexsha": "9f9bb2d5ee31df14deb8ff7ae9dba5ed190b6613", "size": 12245, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/data_utils.py", "max_stars_repo_name": "CAMeL-Lab/gender-reinflection", "max_stars_repo_head_hexsha": "006de318a326c8ea67d610adb30d3e0a8d6e59db", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-12-02T12:44:26.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-19T01:42:33.000Z", "max_issues_repo_path": "utils/data_utils.py", "max_issues_repo_name": "CAMeL-Lab/gender-reinflection", "max_issues_repo_head_hexsha": "006de318a326c8ea67d610adb30d3e0a8d6e59db", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils/data_utils.py", "max_forks_repo_name": "CAMeL-Lab/gender-reinflection", "max_forks_repo_head_hexsha": "006de318a326c8ea67d610adb30d3e0a8d6e59db", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.3682432432, "max_line_length": 123, "alphanum_fraction": 0.5907717436, "include": true, "reason": "import numpy", "num_tokens": 2715}
|
module IdemInvo where
open import Relation.Binary.PropositionalEquality
module MainResult
(A : Set)
(f : A → A)
(idem : ∀ x → f (f x) ≡ f x)
(invo : ∀ x → f (f x) ≡ x)
where
-- an idempotent and involutive function is an identity function
iden : ∀ x → f x ≡ x
iden x = trans (sym (idem x)) (invo x)
|
{"hexsha": "f615f5affe8428621df82b16cb8316583e03c572", "size": 318, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "IdemInvo.agda", "max_stars_repo_name": "zaklogician/IdemInvo", "max_stars_repo_head_hexsha": "44f16597c9ef9596f6dc1b628848a3a74fa9a19b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2015-11-19T01:54:50.000Z", "max_stars_repo_stars_event_max_datetime": "2015-11-19T01:54:50.000Z", "max_issues_repo_path": "IdemInvo.agda", "max_issues_repo_name": "zaklogician/IdemInvo", "max_issues_repo_head_hexsha": "44f16597c9ef9596f6dc1b628848a3a74fa9a19b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "IdemInvo.agda", "max_forks_repo_name": "zaklogician/IdemInvo", "max_forks_repo_head_hexsha": "44f16597c9ef9596f6dc1b628848a3a74fa9a19b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.2, "max_line_length": 66, "alphanum_fraction": 0.6132075472, "num_tokens": 118}
|
# coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2016 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
__authors__ = ["S Thery, M Glass, M Sanchez del Rio - ESRF ISDD Advanced Analysis and Modelling"]
__license__ = "MIT"
__date__ = "31/08/2016"
import unittest
import numpy as np
import scipy.integrate as integrate
import scipy.constants as codata
import matplotlib.pyplot as plt
from pySRU.MagneticStructureUndulatorPlane import MagneticStructureUndulatorPlane as Undulator
from pySRU.ElectronBeam import ElectronBeam
from pySRU.SourceBendingmagnet import BENDING_MAGNET as BM
from pySRU.TrajectoryFactory import TrajectoryFactory,TRAJECTORY_METHOD_ANALYTIC, TRAJECTORY_METHOD_ODE
from pySRU.RadiationFactory import RadiationFactory , RADIATION_METHOD_APPROX_FARFIELD,RADIATION_METHOD_NEAR_FIELD
from pySRU.Simulation import Simulation,create_simulation
class MagneticFieldTest(unittest.TestCase):
def create_magn_field_undulator_test(self, magnetic_structure, electron_beam,method_traj):
sim_test = create_simulation(magnetic_structure=magnetic_structure,electron_beam=electron_beam,
traj_method=method_traj,rad_method=RADIATION_METHOD_APPROX_FARFIELD)
print('create')
source_test=sim_test.source
Zo=sim_test.trajectory_fact.initial_condition[5]
Bo=source_test.magnetic_field_strength()
lambda_u=source_test.magnetic_structure.period_length
self.assertTrue(Zo<0.)
Zo_analitic=source_test.magnetic_structure.length*0.5
Z_test = np.linspace(-Zo_analitic, Zo_analitic, sim_test.trajectory_fact.Nb_pts)
diff_mf = np.abs(source_test.magnetic_field.By(z=Z_test, y=0.0, x=0.0) -
Bo * np.cos((2.0 * np.pi / lambda_u) * Z_test))
self.assertTrue(all(diff_mf < np.abs(Bo) * 1e-3))
print('integration 1')
int1 = integrate.quad((lambda z: source_test.magnetic_field.By(z=z, y=0.0, x=0.0)),Zo,-Zo
,limit=int(source_test.choose_nb_pts_trajectory(2)))
self.assertAlmostEqual(int1[0], 0.0, 2)
print('integration 2')#TODO marche pas donne tjr zeros meme qd c'est faux
int2 = integrate.quad((lambda z: z*source_test.magnetic_field.By(z=z, y=0.0, x=0.0)),Zo,-Zo
,limit=int(source_test.choose_nb_pts_trajectory(2)))
print(int2[0])
self.assertAlmostEqual(int2[0], 0.0)
# doen't work ...
# def create_magn_field_test_BM(self, BM, method_traj, formule=1):
# distance = 250.0
# Xmax = distance * 1e-3
# Ymax = distance * 1e-3
#
# traj_test = TrajectoryFactory(Nb_pts=2000, method=method_traj)
# rad_test = RadiationFactory(method=RADIATION_METHOD_APPROX_FARFIELD, omega=BM.omega1(), Nb_pts=100,
# formula=formule)
# sim_test = create_simulation(parameter=BM, trajectory_fact=traj_test, radiation_fact=rad_test,
# distance=distance, X_max=Xmax, Y_max=Ymax)
# if method_traj == TRAJECTORY_METHOD_ANALYTIC:
# Zo = BM.Zo_analitic()
# else:
# Zo = BM.Zo_symetry()
# self.assertEqual(sim_test.magnetic_field.z[0], Zo)
# self.assertEqual(sim_test.magnetic_field.z[-1], -Zo)
# self.assertEqual(len(sim_test.magnetic_field.z), sim_test.trajectory_fact.Nb_pts)
# Z_test = np.linspace(BM.Zo_analitic(), -BM.Zo_analitic(), sim_test.trajectory_fact.Nb_pts)
# diff_mf = np.abs(sim_test.magnetic_field.By(z=Z_test, y=0.0, x=0.0) -
# BM.Bo * np.cos((2.0 * np.pi / BM.lambda_u) * Z_test))
# self.assertTrue(all(diff_mf < np.abs(BM.Bo) * 1e-3))
# int1 = integrate.quad((lambda z: sim_test.magnetic_field.By(z=z, y=0.0, x=0.0)),
# sim_test.magnetic_field.z[0],
# sim_test.magnetic_field.z[-1])
# self.assertAlmostEqual(int1[0], 0.0, 5)
# int2 = integrate.quad((lambda z1: integrate.quad((lambda z: sim_test.magnetic_field.By(z=z, y=0.0, x=0.0)),
# sim_test.magnetic_field.z[0], z1)[0])
# , sim_test.magnetic_field.z[0], sim_test.magnetic_field.z[-1])
# self.assertAlmostEqual(int2[0], 0.0, 5)
def test_magn_field(self):
beam_test = ElectronBeam(Electron_energy=1.3, I_current=1.0)
beam_ESRF = ElectronBeam(Electron_energy=6.0, I_current=0.2)
und_test = Undulator(K=1.87, period_length=0.035, length=0.035 * 14)
ESRF18 = Undulator(K=1.68, period_length=0.018, length=2.)
self.create_magn_field_undulator_test(magnetic_structure=und_test,electron_beam=beam_test,
method_traj=TRAJECTORY_METHOD_ODE)
print("und_test, ok")
self.create_magn_field_undulator_test(magnetic_structure=ESRF18,electron_beam=beam_ESRF,
method_traj=TRAJECTORY_METHOD_ODE)
print("esrf18, ok")
# self.create_magn_field_test_BM(BM=ESRFBM, method_traj=TRAJECTORY_METHOD_ODE, formule=1)
# print("esrf BM ok")
|
{"hexsha": "879128a31825d2b3718814bfe58d3f401dadf254", "size": 6395, "ext": "py", "lang": "Python", "max_stars_repo_path": "pySRU/tests/MagneticFieldTest.py", "max_stars_repo_name": "SophieTh/und_Sophie_2016", "max_stars_repo_head_hexsha": "28e1520e86f342cf35f862fd4bf56c51dc191b91", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2016-08-23T19:36:36.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-26T14:47:55.000Z", "max_issues_repo_path": "pySRU/tests/MagneticFieldTest.py", "max_issues_repo_name": "SophieTh/und_Sophie_2016", "max_issues_repo_head_hexsha": "28e1520e86f342cf35f862fd4bf56c51dc191b91", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2016-09-05T20:31:33.000Z", "max_issues_repo_issues_event_max_datetime": "2018-08-29T08:12:47.000Z", "max_forks_repo_path": "pySRU/tests/MagneticFieldTest.py", "max_forks_repo_name": "SophieTh/und_Sophie_2016", "max_forks_repo_head_hexsha": "28e1520e86f342cf35f862fd4bf56c51dc191b91", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2017-03-22T15:03:08.000Z", "max_forks_repo_forks_event_max_datetime": "2018-10-17T13:20:09.000Z", "avg_line_length": 52.4180327869, "max_line_length": 117, "alphanum_fraction": 0.6614542611, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1643}
|
[STATEMENT]
lemma in_pdata_pairs_to_listI2:
assumes "(f, g) \<in> set ps"
shows "monom_mult (1 / lc (fst g)) ((lcs (lp (fst f)) (lp (fst g))) - (lp (fst g)))
(fst g) \<in> set (pdata_pairs_to_list ps)" (is "?m \<in> _")
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. monom_mult ((1::'b) / lc (fst g)) (lcs (lp (fst f)) (lp (fst g)) - lp (fst g)) (fst g) \<in> set (pdata_pairs_to_list ps)
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
(f, g) \<in> set ps
goal (1 subgoal):
1. monom_mult ((1::'b) / lc (fst g)) (lcs (lp (fst f)) (lp (fst g)) - lp (fst g)) (fst g) \<in> set (pdata_pairs_to_list ps)
[PROOF STEP]
proof (induct ps)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. (f, g) \<in> set [] \<Longrightarrow> monom_mult ((1::'b) / lc (fst g)) (lcs (lp (fst f)) (lp (fst g)) - lp (fst g)) (fst g) \<in> set (pdata_pairs_to_list [])
2. \<And>a ps. \<lbrakk>(f, g) \<in> set ps \<Longrightarrow> monom_mult ((1::'b) / lc (fst g)) (lcs (lp (fst f)) (lp (fst g)) - lp (fst g)) (fst g) \<in> set (pdata_pairs_to_list ps); (f, g) \<in> set (a # ps)\<rbrakk> \<Longrightarrow> monom_mult ((1::'b) / lc (fst g)) (lcs (lp (fst f)) (lp (fst g)) - lp (fst g)) (fst g) \<in> set (pdata_pairs_to_list (a # ps))
[PROOF STEP]
case Nil
[PROOF STATE]
proof (state)
this:
(f, g) \<in> set []
goal (2 subgoals):
1. (f, g) \<in> set [] \<Longrightarrow> monom_mult ((1::'b) / lc (fst g)) (lcs (lp (fst f)) (lp (fst g)) - lp (fst g)) (fst g) \<in> set (pdata_pairs_to_list [])
2. \<And>a ps. \<lbrakk>(f, g) \<in> set ps \<Longrightarrow> monom_mult ((1::'b) / lc (fst g)) (lcs (lp (fst f)) (lp (fst g)) - lp (fst g)) (fst g) \<in> set (pdata_pairs_to_list ps); (f, g) \<in> set (a # ps)\<rbrakk> \<Longrightarrow> monom_mult ((1::'b) / lc (fst g)) (lcs (lp (fst f)) (lp (fst g)) - lp (fst g)) (fst g) \<in> set (pdata_pairs_to_list (a # ps))
[PROOF STEP]
thus ?case
[PROOF STATE]
proof (prove)
using this:
(f, g) \<in> set []
goal (1 subgoal):
1. monom_mult ((1::'b) / lc (fst g)) (lcs (lp (fst f)) (lp (fst g)) - lp (fst g)) (fst g) \<in> set (pdata_pairs_to_list [])
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
monom_mult ((1::'b) / lc (fst g)) (lcs (lp (fst f)) (lp (fst g)) - lp (fst g)) (fst g) \<in> set (pdata_pairs_to_list [])
goal (1 subgoal):
1. \<And>a ps. \<lbrakk>(f, g) \<in> set ps \<Longrightarrow> monom_mult ((1::'b) / lc (fst g)) (lcs (lp (fst f)) (lp (fst g)) - lp (fst g)) (fst g) \<in> set (pdata_pairs_to_list ps); (f, g) \<in> set (a # ps)\<rbrakk> \<Longrightarrow> monom_mult ((1::'b) / lc (fst g)) (lcs (lp (fst f)) (lp (fst g)) - lp (fst g)) (fst g) \<in> set (pdata_pairs_to_list (a # ps))
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>a ps. \<lbrakk>(f, g) \<in> set ps \<Longrightarrow> monom_mult ((1::'b) / lc (fst g)) (lcs (lp (fst f)) (lp (fst g)) - lp (fst g)) (fst g) \<in> set (pdata_pairs_to_list ps); (f, g) \<in> set (a # ps)\<rbrakk> \<Longrightarrow> monom_mult ((1::'b) / lc (fst g)) (lcs (lp (fst f)) (lp (fst g)) - lp (fst g)) (fst g) \<in> set (pdata_pairs_to_list (a # ps))
[PROOF STEP]
case (Cons p ps)
[PROOF STATE]
proof (state)
this:
(f, g) \<in> set ps \<Longrightarrow> monom_mult ((1::'b) / lc (fst g)) (lcs (lp (fst f)) (lp (fst g)) - lp (fst g)) (fst g) \<in> set (pdata_pairs_to_list ps)
(f, g) \<in> set (p # ps)
goal (1 subgoal):
1. \<And>a ps. \<lbrakk>(f, g) \<in> set ps \<Longrightarrow> monom_mult ((1::'b) / lc (fst g)) (lcs (lp (fst f)) (lp (fst g)) - lp (fst g)) (fst g) \<in> set (pdata_pairs_to_list ps); (f, g) \<in> set (a # ps)\<rbrakk> \<Longrightarrow> monom_mult ((1::'b) / lc (fst g)) (lcs (lp (fst f)) (lp (fst g)) - lp (fst g)) (fst g) \<in> set (pdata_pairs_to_list (a # ps))
[PROOF STEP]
from Cons(2)
[PROOF STATE]
proof (chain)
picking this:
(f, g) \<in> set (p # ps)
[PROOF STEP]
have "p = (f, g) \<or> (f, g) \<in> set ps"
[PROOF STATE]
proof (prove)
using this:
(f, g) \<in> set (p # ps)
goal (1 subgoal):
1. p = (f, g) \<or> (f, g) \<in> set ps
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
p = (f, g) \<or> (f, g) \<in> set ps
goal (1 subgoal):
1. \<And>a ps. \<lbrakk>(f, g) \<in> set ps \<Longrightarrow> monom_mult ((1::'b) / lc (fst g)) (lcs (lp (fst f)) (lp (fst g)) - lp (fst g)) (fst g) \<in> set (pdata_pairs_to_list ps); (f, g) \<in> set (a # ps)\<rbrakk> \<Longrightarrow> monom_mult ((1::'b) / lc (fst g)) (lcs (lp (fst f)) (lp (fst g)) - lp (fst g)) (fst g) \<in> set (pdata_pairs_to_list (a # ps))
[PROOF STEP]
thus ?case
[PROOF STATE]
proof (prove)
using this:
p = (f, g) \<or> (f, g) \<in> set ps
goal (1 subgoal):
1. monom_mult ((1::'b) / lc (fst g)) (lcs (lp (fst f)) (lp (fst g)) - lp (fst g)) (fst g) \<in> set (pdata_pairs_to_list (p # ps))
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. p = (f, g) \<Longrightarrow> monom_mult ((1::'b) / lc (fst g)) (lcs (lp (fst f)) (lp (fst g)) - lp (fst g)) (fst g) \<in> set (pdata_pairs_to_list (p # ps))
2. (f, g) \<in> set ps \<Longrightarrow> monom_mult ((1::'b) / lc (fst g)) (lcs (lp (fst f)) (lp (fst g)) - lp (fst g)) (fst g) \<in> set (pdata_pairs_to_list (p # ps))
[PROOF STEP]
assume "p = (f, g)"
[PROOF STATE]
proof (state)
this:
p = (f, g)
goal (2 subgoals):
1. p = (f, g) \<Longrightarrow> monom_mult ((1::'b) / lc (fst g)) (lcs (lp (fst f)) (lp (fst g)) - lp (fst g)) (fst g) \<in> set (pdata_pairs_to_list (p # ps))
2. (f, g) \<in> set ps \<Longrightarrow> monom_mult ((1::'b) / lc (fst g)) (lcs (lp (fst f)) (lp (fst g)) - lp (fst g)) (fst g) \<in> set (pdata_pairs_to_list (p # ps))
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. monom_mult ((1::'b) / lc (fst g)) (lcs (lp (fst f)) (lp (fst g)) - lp (fst g)) (fst g) \<in> set (pdata_pairs_to_list (p # ps))
[PROOF STEP]
by (simp add: \<open>p = (f, g)\<close> Let_def)
[PROOF STATE]
proof (state)
this:
monom_mult ((1::'b) / lc (fst g)) (lcs (lp (fst f)) (lp (fst g)) - lp (fst g)) (fst g) \<in> set (pdata_pairs_to_list (p # ps))
goal (1 subgoal):
1. (f, g) \<in> set ps \<Longrightarrow> monom_mult ((1::'b) / lc (fst g)) (lcs (lp (fst f)) (lp (fst g)) - lp (fst g)) (fst g) \<in> set (pdata_pairs_to_list (p # ps))
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. (f, g) \<in> set ps \<Longrightarrow> monom_mult ((1::'b) / lc (fst g)) (lcs (lp (fst f)) (lp (fst g)) - lp (fst g)) (fst g) \<in> set (pdata_pairs_to_list (p # ps))
[PROOF STEP]
assume "(f, g) \<in> set ps"
[PROOF STATE]
proof (state)
this:
(f, g) \<in> set ps
goal (1 subgoal):
1. (f, g) \<in> set ps \<Longrightarrow> monom_mult ((1::'b) / lc (fst g)) (lcs (lp (fst f)) (lp (fst g)) - lp (fst g)) (fst g) \<in> set (pdata_pairs_to_list (p # ps))
[PROOF STEP]
hence "?m \<in> set (pdata_pairs_to_list ps)"
[PROOF STATE]
proof (prove)
using this:
(f, g) \<in> set ps
goal (1 subgoal):
1. monom_mult ((1::'b) / lc (fst g)) (lcs (lp (fst f)) (lp (fst g)) - lp (fst g)) (fst g) \<in> set (pdata_pairs_to_list ps)
[PROOF STEP]
by (rule Cons(1))
[PROOF STATE]
proof (state)
this:
monom_mult ((1::'b) / lc (fst g)) (lcs (lp (fst f)) (lp (fst g)) - lp (fst g)) (fst g) \<in> set (pdata_pairs_to_list ps)
goal (1 subgoal):
1. (f, g) \<in> set ps \<Longrightarrow> monom_mult ((1::'b) / lc (fst g)) (lcs (lp (fst f)) (lp (fst g)) - lp (fst g)) (fst g) \<in> set (pdata_pairs_to_list (p # ps))
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
monom_mult ((1::'b) / lc (fst g)) (lcs (lp (fst f)) (lp (fst g)) - lp (fst g)) (fst g) \<in> set (pdata_pairs_to_list ps)
goal (1 subgoal):
1. monom_mult ((1::'b) / lc (fst g)) (lcs (lp (fst f)) (lp (fst g)) - lp (fst g)) (fst g) \<in> set (pdata_pairs_to_list (p # ps))
[PROOF STEP]
by (simp add: Let_def)
[PROOF STATE]
proof (state)
this:
monom_mult ((1::'b) / lc (fst g)) (lcs (lp (fst f)) (lp (fst g)) - lp (fst g)) (fst g) \<in> set (pdata_pairs_to_list (p # ps))
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
monom_mult ((1::'b) / lc (fst g)) (lcs (lp (fst f)) (lp (fst g)) - lp (fst g)) (fst g) \<in> set (pdata_pairs_to_list (p # ps))
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 4035, "file": "Groebner_Bases_F4", "length": 23}
|
%----------------------------------------------------------------------------------------
% PACKAGES AND OTHER DOCUMENT CONFIGURATIONS
%----------------------------------------------------------------------------------------
\documentclass[letterpaper]{twentysecondcv} % a4paper for A4
\awards {
\begin{itemize}
\item Particiant of 2014 and 2015 Ukraine ACM ICPC.
\item 2013: 27th All Ukrainian Olympiad in Informatics, Lugansk, Ukraine\\ \textbf{third diploma}.
\end{itemize}
}
\hackatons {
\begin{itemize}
\item
{2017 Kyiv Computer Vision Hackathon: Pedestrian Safety \giturl{https://github.com/philipshurpik/pedestrian-detection-hackathon}
\\ \\ My team and I designed and implemented detecting pedestrians using SSD neural network and estimating distance to them using camera properties and road perspective.
}
\end{itemize}
}
\personal {
\begin{itemize}
\item Team player, purposeful, responsible, sociable, patient, disciplined and fast learner.
\end{itemize}
}
\goal {
\begin{itemize}
\item Develop and master my technical and soft skills.
\item Try to make an impact.
\item Explore world.
\end{itemize}
}
%----------------------------------------------------------------------------------------
% PERSONAL INFORMATION
%----------------------------------------------------------------------------------------
% If you don't need one or more of the below, just remove the content leaving the command, e.g. \cvnumberphone{}
\cvname{Vitalii Vrublevskyi} % Your name
\cvjobtitle{Software Engineer} % Job title/career
\cvlinkedin{https://www.linkedin.com/in/vitalii-vrublevskyi}% LinkedIn
\cvnumberphone{+380680550459} % Phone number
\cvmail{vitalii.vrublevskyi@gmail.com} % Email address
\cvsite{github.com/vrublevskiyvitaliy} % Personal website
\cvhaker{https://www.hackerrank.com/vrublevskyi}
\cvleetcode{https://leetcode.com/vitalii-vrublevskyi}
\cvcodeforce{http://codeforces.com/profile/Steel_Rat11}
\cvkaggle{https://www.kaggle.com/steelrat11}
%----------------------------------------------------------------------------------------
\newcommand\skills{
\smartdiagram[bubble diagram]{
\textbf{Programming},
\textbf{Data}\\\textbf{Structures},
\textbf{Algorithms},
\textbf{Machine}\\\textbf{learning},
\textbf{C++},
\textbf{Python},
\textbf{JS},
\textbf{MySQL},
\textbf{PHP}
}
\\
}
\begin{document}
\makeprofile % Print the sidebar
%----------------------------------------------------------------------------------------
% EDUCATION
%----------------------------------------------------------------------------------------
\section{Education}
\begin{twenty} % Environment for a list with descriptions
\twentyitem
{Expected \\ June 2019}
{Master degree in Informatics}
{Kyiv, Ukraine}
{Taras Shevchenko National University of Kyiv}
{Faculty of Computer Science and Cybernetics}
\twentyitem
{June 2017}
{Bachelor degree with Honours in Informatics}
{Kyiv, Ukraine}
{Taras Shevchenko National University of Kyiv}
{Faculty of Computer Science and Cybernetics}
%\twentyitem{<dates>}{<title>}{<organization>}{<location>}{<description>}
\end{twenty}
\section{Skills and languages}
\begin{multicols}{2}
\begin{itemize}
\item {Data Structures}
\item {Algorithms}
\item {Problem solving}
\item {Object-oriented design and patterns}
\item {Parallel programming}
\item {C++}
\item {Python}
\item {JS}
\item {PHP}
\end{itemize}
\end{multicols}
%----------------------------------------------------------------------------------------
% Projects
%----------------------------------------------------------------------------------------
\section{Projects}
\begin{itemize}
\item \projectItem
{Implemented structured data extraction from unstructured text \giturl{https://github.com/vrublevskiyvitaliy/Law_Text_Segmentaion}}
{The main goal of the project is to divide law documents into sections. My team did it by parsing documents and creating structure of the lists, analysed semantic closeness of paragraphs.}
\item \projectItem
{Developed a system for Named Entity Recognition}
{My team chose the CRF method and researched what features could be used, what annotations of named entities get better results and tested the stability of them at Spanish and Dutch language.}
\item \projectItem
{Developed a system based on parallel programing}
{Implemented parallel Dijkstra algorithm using MPI, OpenMP. I explored CUDA for building K-d tree. I used university PARCS approach to solve knapsack problem.}
\end{itemize}
\projectItem
{See my other projects at \giturl{https://github.com/vrublevskiyvitaliy} }
{}
%----------------------------------------------------------------------------------------
% Hackatons
%----------------------------------------------------------------------------------------
\section{Experience}
\begin{twenty} % Environment for a list with descriptions
\twentyitemwithoutbegin
{}
{Software Engineer (Remote)}
{Sep 2015 - Dec 2016 }
{MP5 Project - WeDesign.Live, London, UK}
{Web based live collaborative platform for designing with slicer software.
{JavaScript, C++, Python, Computational Geometry, Linear Algebra. Developed JavaScript side of designer, architecture for constructive solid geometry (CSG) technique which decreased calculation time.
}
}
\end{twenty}
\section{Publications}
\begin{twenty}
\twentyitemwithoutbegin
{}
{Constructing a unified algorithmic platform based on Voronoi diagram.}
{2017}
{PDMU-2017 XXIX International Conference}
{}
\end{twenty}
\begin{twenty}
\twentyitemwithoutbegin
{}
{Greedy approach for solving Art Gallery Problem}
{2017}
{XV International conference "Shevchenkivska Spring 2017"}
{}
\end{twenty}
\end{document}
|
{"hexsha": "c4dda7e9b9622104f9907a1e386c691432a7d387", "size": 6057, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "template.tex", "max_stars_repo_name": "vrublevskiyvitaliy/infographic", "max_stars_repo_head_hexsha": "c9a5a2e1b59af715a9fdd4675c7b8e2950b216d0", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-08-27T00:51:40.000Z", "max_stars_repo_stars_event_max_datetime": "2018-08-27T00:51:40.000Z", "max_issues_repo_path": "template.tex", "max_issues_repo_name": "vrublevskiyvitaliy/infographic", "max_issues_repo_head_hexsha": "c9a5a2e1b59af715a9fdd4675c7b8e2950b216d0", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "template.tex", "max_forks_repo_name": "vrublevskiyvitaliy/infographic", "max_forks_repo_head_hexsha": "c9a5a2e1b59af715a9fdd4675c7b8e2950b216d0", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.8103448276, "max_line_length": 207, "alphanum_fraction": 0.5922073634, "num_tokens": 1388}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.