arxiv_id stringlengths 0 16 | text stringlengths 10 1.65M |
|---|---|
# -*- coding: utf-8 -*-
from unittest import mock
import warnings
import pytest
import hypothesis as hyp
import hypothesis.strategies as hyp_st
import hypothesis.extra.numpy as hyp_np
import numpy as np
import numpy.testing as npt
import pandas as pd
import sympy as sp
from endaq.calc import shock
wn, fn, wi, fi, Q, d, T = sp.symbols('ωₙ, fₙ, ωᵢ, fᵢ, Q, ζ, T', real=True)
s = sp.Symbol('s', complex=True)
DAMP_RANGE = (1e-2, 1.)
def laplace(b, a, freqs, subs):
# first substitution
b = b.subs({wn: 2*sp.pi*fn, Q: 1/(2*d), s: sp.I*2*sp.pi*fi}).subs(subs)
a = a.subs({wn: 2*sp.pi*fn, Q: 1/(2*d), s: sp.I*2*sp.pi*fi}).subs(subs)
b_nums = sp.lambdify(fi, b)(freqs)
a_nums = sp.lambdify(fi, a)(freqs)
mag = abs(b_nums)/abs(a_nums)
phase = np.angle(b_nums) - np.angle(a_nums)
return mag, phase
def laplace_amplitude(b, a, freqs, subs):
# first substitution
b = b.subs({wn: 2*sp.pi*fn, Q: 1/(2*d), s: sp.I*2*sp.pi*fi}).subs(subs)
a = a.subs({wn: 2*sp.pi*fn, Q: 1/(2*d), s: sp.I*2*sp.pi*fi}).subs(subs)
mag = sp.lambdify(fi, abs(b)/abs(a))
return mag(freqs)
def laplace_phase(b, a, freqs, subs):
# first substitution
b = b.subs({wn: 2*sp.pi*fn, Q: 1/(2*d), s: sp.I*2*sp.pi*fi}).subs(subs)
a = a.subs({wn: 2*sp.pi*fn, Q: 1/(2*d), s: sp.I*2*sp.pi*fi}).subs(subs)
phase = sp.lambdify(fi, sp.atan2(*b.as_real_imag()[::-1]) - sp.atan2(*a.as_real_imag()[::-1]))
return phase(freqs)
def z_amplitude(b, a, freqs, dt):
z = sp.exp(-s*T)
b = sum([x*z**i for i, x in enumerate(b)])
a = sum([x*z**i for i, x in enumerate(a)])
# first substitution
b = b.subs({s: sp.I*2*sp.pi*fi, T: dt})
a = a.subs({s: sp.I*2*sp.pi*fi, T: dt})
mag = sp.lambdify(fi, abs(b)/abs(a))
return abs(mag(freqs))
def z_phase(b, a, freqs, dt):
z = sp.exp(-s*T)
b = sum([x*z**i for i, x in enumerate(b)])
a = sum([x*z**i for i, x in enumerate(a)])
# first substitution
b = b.subs({s: sp.I*2*sp.pi*fi, T: dt})
a = a.subs({s: sp.I*2*sp.pi*fi, T: dt})
phase = sp.lambdify(fi, sp.atan2(*b.as_real_imag()[::-1]) - sp.atan2(*a.as_real_imag()[::-1]))
return phase(freqs)
@hyp.given(
freq=hyp_st.floats(12.5, 1200),
damp=hyp_st.floats(*DAMP_RANGE, exclude_max=True),
)
def test_rel_displ_amp(freq, damp):
"""
Laplace domain transfer function:
a₂(s) -1
G(s) = ----- = ----------------
a₁(s) ωₙ*s
s² + ----- + ωₙ²
Q
With the amplitude response of:
|a₂(ωᵢ*j)|
|G(ωᵢ*j)| = ------------
|a₁(ωᵢ*j)|
"""
dt = 1e-4
omega = 2 * np.pi * freq
freqs = np.geomspace(1e-1, 1000, 10000)
la = laplace_amplitude(sp.sympify(-1), s**2 + wn*s/Q + wn**2, freqs, {fn: freq, d: damp})
za = z_amplitude(*shock._relative_displacement_coefficients(omega, 1/(2*damp), dt), freqs, dt)
npt.assert_allclose(za, la, rtol=.1, atol=1e-6)
@hyp.given(
freq=hyp_st.floats(12.5, 1200),
damp=hyp_st.floats(*DAMP_RANGE, exclude_max=True),
)
def test_rel_displ_phase(freq, damp):
"""
Laplace domain transfer function:
a₂(s) -1
G(s) = ----- = ----------------
a₁(s) ωₙ*s
s² + ----- + ωₙ²
Q
With the phase response of:
∠G(ωᵢ*j) = ∠a₂(ωᵢ*j) - ∠a₁(ωᵢ*j)
"""
dt = 1e-4
omega = 2*np.pi*freq
freqs = np.geomspace(1e-1, 1000, 10000)
la = laplace_phase(sp.sympify(-1), s**2 + wn*s/Q + wn**2, freqs, {fn:freq, d:damp})
za = z_phase(*shock._relative_displacement_coefficients(omega, 1/(2*damp), dt), freqs, dt)
npt.assert_allclose(za, la, rtol=.1, atol=1e-6)
@hyp.given(
freq=hyp_st.floats(12.5, 1000),
damp=hyp_st.floats(*DAMP_RANGE, exclude_max=True),
)
def test_rel_velocity_amp(freq, damp):
"""
Laplace domain transfer function:
a₂(s) -s
G(s) = ----- = ----------------
a₁(s) ωₙ*s
s² + ----- + ωₙ²
Q
With the amplitude response of:
|a₂(ωᵢ*j)|
|G(ωᵢ*j)| = ------------
|a₁(ωᵢ*j)|
"""
dt = 1e-4
omega = 2 * np.pi * freq
freqs = np.geomspace(1e-1, 1000, 10000)
la = laplace_amplitude(-s, s**2 + wn*s/Q + wn**2, freqs, {fn: freq, d: damp})
za = z_amplitude(*shock._relative_velocity_coefficients(omega, 1/(2*damp), dt), freqs, dt)
npt.assert_allclose(za, la, rtol=.1, atol=1e-6)
@hyp.given(
freq=hyp_st.floats(12.5, 1000),
damp=hyp_st.floats(*DAMP_RANGE, exclude_max=True),
)
def test_rel_velocity_phase(freq, damp):
"""
Laplace domain transfer function:
a₂(s) -s
G(s) = ----- = ----------------
a₁(s) ωₙ*s
s² + ----- + ωₙ²
Q
With the phase response of:
∠G(ωᵢ*j) = ∠a₂(ωᵢ*j) - ∠a₁(ωᵢ*j)
"""
dt = 1e-4
omega = 2*np.pi*freq
freqs = np.concatenate([np.geomspace(1e-1, freq*0.99), [], np.geomspace(freq*1.01, 2e3)])
la = laplace_phase(-s, s**2 + wn*s/Q + wn**2, freqs, {fn:freq, d:damp})
za = z_phase(*shock._relative_velocity_coefficients(omega, 1/(2*damp), dt), freqs, dt)
npt.assert_allclose(za, la, rtol=.1, atol=1e-6)
@hyp.given(
freq=hyp_st.floats(12.5, 1000),
damp=hyp_st.floats(*DAMP_RANGE, exclude_max=True),
)
def test_abs_accel_amp(freq, damp):
"""
Laplace domain transfer function:
ωₙ*s
----- + ωₙ²
a₂(s) Q
G(s) = ----- = ----------------
a₁(s) ωₙ*s
s² + ----- + ωₙ²
Q
With the amplitude response of:
|a₂(ωᵢ*j)|
|G(ωᵢ*j)| = ------------
|a₁(ωᵢ*j)|
"""
dt = 1e-4
omega = 2 * np.pi * freq
freqs = np.geomspace(1e-1, 1000, 10000)
la = laplace_amplitude(wn*s/Q + wn**2, s**2 + wn*s/Q + wn**2, freqs, {fn: freq, d: damp})
za = z_amplitude(*shock._absolute_acceleration_coefficients(omega, 1/(2*damp), dt), freqs, dt)
npt.assert_allclose(za, la, rtol=.1, atol=1e-6)
@hyp.given(
freq=hyp_st.floats(12.5, 1000),
damp=hyp_st.floats(*DAMP_RANGE, exclude_max=True),
)
def test_abs_accel_phase(freq, damp):
"""
ωₙ*s
----- + ωₙ²
a₂(s) Q
G(s) = ----- = ----------------
a₁(s) ωₙ*s
s² + ----- + ωₙ²
Q
With the phase response of:
∠G(ωᵢ*j) = ∠a₂(ωᵢ*j) - ∠a₁(ωᵢ*j)
"""
dt = 1e-4
omega = 2*np.pi*freq
freqs = np.concatenate([np.geomspace(1e-1, freq*0.99), [], np.geomspace(freq*1.01, 2e3)])
la = laplace_phase(wn*s/Q + wn**2, s**2 + wn*s/Q + wn**2, freqs, {fn:freq, d:damp})
za = z_phase(*shock._absolute_acceleration_coefficients(omega, 1/(2*damp), dt), freqs, dt)
npt.assert_allclose(za, la, rtol=.1, atol=1e-6)
@hyp.given(
freq=hyp_st.floats(12.5, 1200),
damp=hyp_st.floats(*DAMP_RANGE, exclude_max=True),
)
def test_pseudovelocity_amp(freq, damp):
"""
Laplace domain transfer function:
a₂(s) -ωₙ
G(s) = ----- = ----------------
a₁(s) ωₙ*s
s² + ----- + ωₙ²
Q
With the amplitude response of:
|a₂(ωᵢ*j)|
|G(ωᵢ*j)| = ------------
|a₁(ωᵢ*j)|
"""
dt = 1e-4
omega = 2 * np.pi * freq
freqs = np.geomspace(1e-1, 1000, 10000)
la = laplace_amplitude(-wn, s**2 + wn*s/Q + wn**2, freqs, {fn: freq, d: damp})
za = z_amplitude(*shock._pseudo_velocity_coefficients(omega, 1/(2*damp), dt), freqs, dt)
npt.assert_allclose(za, la, rtol=.1, atol=1e-6)
@hyp.given(
freq=hyp_st.floats(12.5, 1200),
damp=hyp_st.floats(*DAMP_RANGE, exclude_max=True),
)
def test_pseudovelocity_phase(freq, damp):
"""
Laplace domain transfer function:
a₂(s) -ωₙ
G(s) = ----- = ----------------
a₁(s) ωₙ*s
s² + ----- + ωₙ²
Q
With the phase response of:
∠G(ωᵢ*j) = ∠a₂(ωᵢ*j) - ∠a₁(ωᵢ*j)
"""
dt = 1e-4
omega = 2*np.pi*freq
freqs = np.geomspace(1e-1, 1000, 10000)
la = laplace_phase(-wn, s**2 + wn*s/Q + wn**2, freqs, {fn:freq, d:damp})
za = z_phase(*shock._pseudo_velocity_coefficients(omega, 1/(2*damp), dt), freqs, dt)
npt.assert_allclose(za, la, rtol=.1, atol=1e-6)
@hyp.given(
freq=hyp_st.floats(12.5, 1200),
damp=hyp_st.floats(*DAMP_RANGE, exclude_max=True),
)
def test_eq_static_accel_amp(freq, damp):
"""
Laplace domain transfer function:
a₂(s) -ωₙ²
G(s) = ----- = ----------------
a₁(s) ωₙ*s
s² + ----- + ωₙ²
Q
With the amplitude response of:
|a₂(ωᵢ*j)|
|G(ωᵢ*j)| = ------------
|a₁(ωᵢ*j)|
"""
dt = 1e-4
omega = 2 * np.pi * freq
freqs = np.geomspace(1e-1, 1000, 10000)
la = laplace_amplitude(-wn**2, s**2 + wn*s/Q + wn**2, freqs, {fn: freq, d: damp})
za = z_amplitude(*shock._relative_displacement_static_coefficients(omega, 1/(2*damp), dt), freqs, dt)
npt.assert_allclose(za, la, rtol=.1, atol=1e-6)
@hyp.given(
freq=hyp_st.floats(12.5, 1200),
damp=hyp_st.floats(*DAMP_RANGE, exclude_max=True),
)
def test_eq_static_accel_phase(freq, damp):
"""
Laplace domain transfer function:
a₂(s) -ωₙ²
G(s) = ----- = ----------------
a₁(s) ωₙ*s
s² + ----- + ωₙ²
Q
With the phase response of:
∠G(ωᵢ*j) = ∠a₂(ωᵢ*j) - ∠a₁(ωᵢ*j)
"""
dt = 1e-4
omega = 2*np.pi*freq
freqs = np.geomspace(1e-1, 1000, 10000)
la = laplace_phase(-wn**2, s**2 + wn*s/Q + wn**2, freqs, {fn:freq, d:damp})
za = z_phase(*shock._relative_displacement_static_coefficients(omega, 1/(2*damp), dt), freqs, dt)
npt.assert_allclose(za, la, rtol=.1, atol=1e-6)
@hyp.given(
df_accel=hyp_np.arrays(
dtype=np.float64,
shape=(40, 2),
elements=hyp_st.floats(1e-20, 1e20),
).map(
lambda array: pd.DataFrame(
np.concatenate([array, np.zeros_like(array)], axis=0),
index=np.arange(2 * array.shape[0]) * 1e-4,
)
),
freq=hyp_st.floats(1, 20),
damp=hyp_st.floats(1e-25, 1, exclude_max=True),
mode=hyp_st.sampled_from(["srs", "pvss"]),
aggregate_axes_two_sided=hyp_st.sampled_from(
[(False, False), (False, True), (True, False)]
),
)
def test_pseudo_velocity_zero_padding(
df_accel, freq, damp, mode, aggregate_axes_two_sided
):
aggregate_axes, two_sided = aggregate_axes_two_sided
# Check that the padding is all zeros
assert np.all(df_accel.iloc[40:] == 0)
# First, we calculate the PVSS of the data as-is
calc_result = shock.shock_spectrum(
df_accel.iloc[:40],
[freq],
damp=damp,
mode=mode,
aggregate_axes=aggregate_axes,
two_sided=two_sided,
)
# Now we re-run the PVSS on the full, zero-padded data
calc_result_padded = shock.shock_spectrum(
df_accel,
[freq],
damp=damp,
mode=mode,
aggregate_axes=aggregate_axes,
two_sided=two_sided,
)
# If the calculation is correct, there should be *no* amount of zero-padding
# that changes the result
if two_sided:
for i in range(2):
pd.testing.assert_frame_equal(calc_result[i], calc_result_padded[i])
else:
pd.testing.assert_frame_equal(calc_result, calc_result_padded)
@hyp.given(
df_pvss=hyp_np.arrays(
dtype=np.float64,
shape=(40, 2),
elements=hyp_st.floats(1e-20, 1e20),
).map(lambda array: pd.DataFrame(array, index=np.arange(1, 41))),
damp=hyp_st.floats(1e-25, 0.2),
)
def test_enveloping_half_sine(df_pvss, damp):
env_half_sine = shock.enveloping_half_sine(df_pvss, damp=damp)
hyp.note(f"pulse amplitude: {env_half_sine.amplitude}")
hyp.note(f"pulse duration: {env_half_sine.duration}")
pulse = env_half_sine.to_time_series()
pulse_pvss = shock.shock_spectrum(
pulse, freqs=df_pvss.index, damp=damp, mode="pvss"
)
# This is an approximation -> give the result a fudge-factor for correctness
assert (df_pvss / pulse_pvss).max().max() < 1.2
class TestHalfSineWavePulse:
@pytest.mark.parametrize(
"tstart, tstop, dt, tpulse, warning_type",
[
# dt warnings
(None, None, 0.12, 0, None), # dt < duration / 8 => OK
(None, None, 0.13, 0, UserWarning), # dt > duration / 8 => WARNING
# trange warnings
(None, 0.5, None, 0, UserWarning), # trange[1] < t0 + duration => WARNING
(0.5, None, None, 0, UserWarning), # trange[0] > t0 => WARNING
(0.5, None, None, 1, None), # OK
],
)
def test_to_time_series_warnings(self, tstart, tstop, dt, tpulse, warning_type):
env_half_sine = shock.HalfSineWavePulse(
amplitude=pd.Series([1]),
duration=pd.Series([1]),
)
if warning_type is None:
with warnings.catch_warnings():
warnings.simplefilter("error")
env_half_sine.to_time_series(
tstart=tstart, tstop=tstop, dt=dt, tpulse=tpulse
)
else:
with pytest.warns(warning_type):
env_half_sine.to_time_series(
tstart=tstart, tstop=tstop, dt=dt, tpulse=tpulse
)
def test_tuple_like(self):
env_half_sine = shock.HalfSineWavePulse(
amplitude=mock.sentinel.amplitude,
duration=mock.sentinel.duration,
)
ampl, T = env_half_sine
assert ampl == mock.sentinel.amplitude
assert T == mock.sentinel.duration | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
visualization_features.py
Script to produce visualizations of the features used for the GAN discriminator
tests.
Author: Miguel Simão (miguel.simao@uc.pt)
"""
import numpy as np
from sklearn import preprocessing
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.preprocessing import scale
import matplotlib.pyplot as plt
# ENSURE REPRODUCIBILITY ######################################################
import os
import random
os.environ['PYTHONHASHSEED'] = '0'
np.random.seed(1337)
random.seed(12345)
###############################################################################
import keras
from tools import toolsfeatures
from dataset.dualmyo.utils import Loader
from dataset.dualmyo import dualmyofeatures
#%% LOAD DATA
DataLoader = Loader()
sample_data, sample_target = DataLoader.load()
sample_data = np.concatenate( [sample.reshape((1,) + sample.shape) for sample in sample_data], axis=0 )
sample_target = np.array(sample_target)
# Data split
ind_train, ind_val, ind_test = DataLoader.split(sample_target)
num_classes = 8
#%% FEATURE EXTRACTION
# Feature extraction
X_train = np.vstack([dualmyofeatures.extract_std(sample) for sample in sample_data[ind_train]])
X_val = np.vstack([dualmyofeatures.extract_std(sample) for sample in sample_data[ind_val]])
X_test = np.vstack([dualmyofeatures.extract_std(sample) for sample in sample_data[ind_test]])
X_ts = np.concatenate(
[dualmyofeatures.extract_ts(sample)[np.newaxis] for sample in sample_data],
axis=0)
# Feature scaling
feature_scaler = preprocessing.StandardScaler().fit(X_train)
X_train = feature_scaler.transform(X_train)
X_val = feature_scaler.transform(X_val)
X_test = feature_scaler.transform(X_test)
X_master = np.concatenate((X_train, X_val, X_test), axis=0)
# Target processing
t_train = sample_target[ind_train]
t_val = sample_target[ind_val]
t_test = sample_target[ind_test]
t_master = np.concatenate((t_train, t_val, t_test), axis=0)
#%% SENSOR REORDERING
myo0, myo1 = np.arange(8), np.arange(8,16)
sensor_order = np.empty((myo0.size+myo1.size), dtype=np.int)
sensor_order[0::2] = myo0
sensor_order[1::2] = myo1
X_master = X_master[:,sensor_order]
X_ts = X_ts[:,:,sensor_order]
#%% PLOT DEFAULT SETTINGS
#X_ts = np.abs(X_ts)/128.0
# Default configurations
plt.rc('font', family='serif')
#plt.rc('xtick', labelsize='x-small')
#plt.rc('ytick', labelsize='x-small')
plt.rc('text', usetex=True)
plt.rc('legend', edgecolor=(0,0,0), fancybox=False)
plt.rc('lines', markeredgewidth=0.5, linewidth=0.5)
#%% PLOT FEATURES BY CLASS SIDE-BY-SIDE
fig,ax = plt.subplots(nrows=1,ncols=1, dpi=300, figsize=(6.4,3))
X = []
for i in range(num_classes):
I = np.argwhere(t_master==i)[:16].squeeze()
X.append(X_master[I])
X = np.concatenate(X,0).transpose()
X = np.concatenate((X[:,:64],X[:,-64:]), axis=0)
ax.imshow(X, aspect='auto', cmap='Greys')
plt.xticks(np.arange(15,50,16) + .5)
plt.yticks([15.5])
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.tick_params(axis='both', length=0)
plt.grid(color='k')
plt.ylabel('Channels')
#plt.suptitle('Samples')
for i in range(4):
plt.text(8+16*i,-1,'G%i' % i, ha='center')
plt.text(8+16*i,32,'G%i' % (i+4), ha='center', va='top')
#fig.savefig('aaa.pdf', bbox_inches='tight')
#%% PLOT FEATURES LATENT SPACE PCA
Xb = preprocessing.scale(X_master)
pca = PCA(n_components=2).fit(Xb)
#Xb = TSNE(perplexity=5,early_exaggeration=8).fit_transform(Xb)
Xb = pca.transform(Xb)
fig,ax = plt.subplots(nrows=1,ncols=1, figsize=(4,4), dpi=300)
labels = ['G%i' % i for i in range(8)]
for i in range(8):
I = t_master == i
ax.scatter(Xb[I,0],Xb[I,1],
edgecolors='k',
linewidth=0.5,
c=plt.get_cmap('Dark2')(i),
label=labels[i],
)
plt.xlabel('Component 1')
plt.ylabel('Component 2')
lg = plt.legend(labels, fancybox=False, loc=5, bbox_to_anchor=(1.25,0.5))
fig.savefig('aaa.pdf', bbox_inches='tight', bbox_extra_artists=(lg,))
#%% USE GENERATOR TO CREATE FEATURES FROM NOISE
Xb = preprocessing.scale(X_master)
generator = keras.models.load_model('trainedGan_generator7.h5')
t_train_gen_ind = np.tile( np.arange(num_classes), (16,))
t_train_gen = toolsfeatures.onehotnoise(t_train_gen_ind, num_classes, 0.4)
X_gen = generator.predict([np.random.normal(0, 1, (t_train_gen.shape[0], 16)), t_train_gen])
Xb_gen = pca.transform(X_gen)
for i in range(num_classes):
I = t_train_gen_ind == i
ax.scatter(Xb_gen[I,0],Xb_gen[I,1],
edgecolors='k',
marker='x',
linewidth=0.5,
c=plt.get_cmap('Dark2')(i),
label=labels[i],
)
#%% USE GENERATOR TO CREATE FEATURES FROM NOISE (TSNE)
i_real = np.arange(Xb.shape[0])
i_gened = np.arange(Xb_gen.shape[0]) + i_real[-1] + 1
X2 = TSNE().fit_transform( np.concatenate((Xb, X_gen), axis=0) )
Xb = X2[i_real]
Xb_gen = X2[i_gened]
fig,ax = plt.subplots(nrows=1,ncols=1, figsize=(6,6), dpi=300)
for i in range(8):
I = t_master == i
ax.scatter(Xb[I,0],Xb[I,1],
edgecolors='k',
linewidth=0.5,
c=plt.get_cmap('Dark2')(i),
label=labels[i],
)
plt.xlabel('PC1')
plt.ylabel('PC2')
plt.legend(labels, fancybox=False)
for i in range(num_classes):
I = t_train_gen_ind == i
ax.scatter(Xb_gen[I,0],Xb_gen[I,1],
edgecolors='k',
marker='x',
linewidth=0.5,
c=plt.get_cmap('Dark2')(i),
label=labels[i],
) | |
import spira
import numpy as np
from spira import param
from copy import copy, deepcopy
from spira.gdsii.elemental.port import PortAbstract
from spira.core.initializer import ElementalInitializer
class Term(PortAbstract):
"""
Terminals are horizontal ports that connect SRef instances
in the horizontal plane. They typically represents the
i/o ports of a components.
Examples
--------
>>> term = spira.Term()
"""
width = param.FloatField(default=2)
length = param.FloatField(default=0.1)
def __init__(self, port=None, polygon=None, **kwargs):
super().__init__(port=port, polygon=polygon, **kwargs)
from spira import shapes
if polygon is None:
rect_shape = shapes.RectangleShape(
p1=[0, 0],
p2=[self.width, self.length]
)
pp = spira.Polygons(
shape=rect_shape,
gdslayer=spira.Layer(number=65)
)
pp.rotate(angle=self.orientation, center=self.midpoint)
# pp.rotate(angle=90-self.orientation, center=self.midpoint)
pp.move(midpoint=pp.center, destination=self.midpoint)
self.polygon = pp
else:
self.polygon = polygon
arrow_shape = shapes.ArrowShape(
a = self.width/10,
b = self.width/20,
c = self.width/5
)
arrow_shape.apply_merge
# arrow_shape.rotate(angle=self.orientation)
self.arrow = spira.Polygons(
shape=arrow_shape,
gdslayer=spira.Layer(number=77)
)
self.arrow.rotate(angle=self.orientation)
# self.arrow.rotate(angle=90-self.orientation)
def __repr__(self):
return ("[SPiRA: Term] (name {}, number {}, midpoint {}, " +
"width {}, orientation {})").format(self.name,
self.gdslayer.number, self.midpoint,
self.width, self.orientation
)
def _copy(self):
new_port = Term(parent=self.parent,
name=self.name,
midpoint=self.midpoint,
width=self.width,
length=self.length,
gdslayer=deepcopy(self.gdslayer),
poly_layer=deepcopy(self.poly_layer),
text_layer=deepcopy(self.text_layer),
orientation=self.orientation)
return new_port | |
#%%
import pandas as pd
import networkx as nx
import numpy as np
import graspologic as gs
data_path = "networks-course/data/celegans/male_chem_A_self_undirected.csv"
meta_path = "networks-course/data/celegans/master_cells.csv"
cells_path = "networks-course/data/celegans/male_chem_self_cells.csv"
adj = pd.read_csv(data_path, header=None)
meta = pd.read_csv(meta_path, header=None, index_col=0)
cells = np.squeeze(pd.read_csv(cells_path, header=None).values)
meta = meta.reindex(cells)
A = adj.values
#%%
from scipy.sparse import csr_matrix
A_ptr = gs.utils.pass_to_ranks(A) # can skip for an unweighted network
A_sparse = csr_matrix(A)
n_components = 16
ase = gs.embed.AdjacencySpectralEmbed(n_components=n_components, check_lcc=False)
ase_embedding = ase.fit_transform(A_sparse)
#%%
from umap import UMAP
umapper = UMAP(n_neighbors=15, metric="cosine", min_dist=0.8)
umap_embedding = umapper.fit_transform(ase_embedding)
#%%
gs.plot.networkplot(
A_sparse,
x=umap_embedding[:, 0],
y=umap_embedding[:, 1],
edge_linewidth=0.2,
edge_alpha=0.4,
node_kws=dict(
s=50, # s will change the size of nodes)
),
) | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
### Example of use
###, L. Darme 02/07/2019
import matplotlib.pyplot as plt
import numpy as np
# Importing additional user-defined function
import UsefulFunctions as uf
import Amplitudes as am
import Production as br
import Detection as de
import LimitsList as lim
#############################################################################
############### Several limits example ##############################
#############################################################################
# This example create limits for the quark flavour violating operators
if __name__ == "__main__":
ExperimentsList=np.array(["babar_invisibledecayBmtoKm","belle2_invisibledecayBmtoKm", \
"belle_invisibledecayB0toPi0", "belle_invisibledecayB0toK0","babar_invisibledecayBmtoPim",\
"e391a_invisibledecayKL0toPi0","e949_invisibledecayKptoPip","na62_invisibledecayKL0toPi0","na62_invisibledecayKptoPip",\
"ship_heavymesondecay"])
###invisible heavy meson decays
geff={"gu11":2/3.,"gu22":2/3.,"gd11":-1/3.,"gd22":-1/3.,"gd33":-1/3.,"gl11":-1.,"gl22":-1.,"gd33":1,"gd31":1,"gd32":1,"gd21":1}
Lim,LabelLimit = lim.GetLimits(ExperimentsList,10.,geff,"V",True, ReadFromFile=False) | |
import pickle
import numpy as np
import matplotlib.pyplot as plt
cumulative_rewards = pickle.load(open('cum_rewards_history-12.pkl', 'rb'))
epsilons = pickle.load(open('epsilon_history-12.pkl', 'rb'))
# Set general font size
plt.rcParams['font.size'] = '24'
ax = plt.subplot(211)
plt.title("Cumulative Rewards over Episodes", fontsize=24)
plt.plot(np.arange(len(cumulative_rewards)) + 1, cumulative_rewards)
# Set tick font size
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontsize(16)
ax = plt.subplot(212)
plt.title("Epsilons over Episodes", fontsize=24)
plt.plot(np.arange(len(epsilons)) + 1, epsilons)
# Set tick font size
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontsize(16)
plt.show() | |
#!/usr/bin/env python
# coding: utf-8
# ## Thank you for visiting my Karnel !
# I have just started with this dataset that impiles House Sales in King County, USA. My Karnel will be sometime updated by learning from many excellent analysts.
#
# * I am not native in English, so very sorry to let you read poor one.
# ## 1.Read libraries and the dataset
# Read libraries and the dataset before analysing.Especially we should care about strange points of the dataset.
#
# ## 2.Data Cleaning and Visualizations
# I need to conduct nulls and duplications including strange points above. We also see the relation between 'price' as the target and other valuables from visualizations. We try to evaluate 1st model before feature engineering because of seeing the progress. Then, as explanatory variables increase through feature engineering, multicollinearities are detected.
#
# * 2-1.Exploring nulls and duplications into the dataset.
# * 2-2.Visualizing the price
# * 2-3.Model building(1st)
# * 2-4-1. Feature engineering: "date"
# * 2-4-2. Feature engineering: "renovation"
# * 2-4-3. Feature engineering: "zipcode"
# * 2-4-4. New dataset
# * 2-4-5. Detecing multicollinearity
#
# ## 3.Model building and Evaluation
# The model will be built by using train dataset after detecting multicollinearity. In addition, it is evaluated on the correlation^2 between predicted values (y_pred) and actual values(y_test), MSE(mean_squared_error) and MAE(mean_squared_error)
# ## 1.Read libraries and the dataset
# Anaylsis will be started by reading librariese and the datasets.
# In[ ]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, mean_absolute_error
# ## 1-1. Load the dataset
# In[ ]:
df = pd.read_csv("../input/kc_house_data.csv")
df.head()
# In[ ]:
df.tail()
# ****Dataset shows that the target is 'price' and the other explanatory variables are 20.
# In[ ]:
print(df.shape)
print('------------------------')
print(df.nunique())
print('------------------------')
print(df.dtypes)
# Dataset's shape implies 21,613 lines * 21 columns where are composes as be said above.
# #### It is found that the number of lines(21,613) and id(21,436) is different by 176 except the 1st column of explanatory valuables. It should be caused by some nulls or/and duplications.
# ## 2.Data Cleaning and Visualisation
# ### 2-1.Exploring nulls and duplications into the dataset.
# In[ ]:
df.isnull().sum()
# In[ ]:
df['id'].value_counts()
# In[ ]:
sum((df['id'].value_counts()>=2)*1)
# It becomes cleared that the difference is cased by **DUPLICATION**, NOT nulls.
# * Also, on other variables, there are NOT nulls which we have to care.
# When my goal is set to predict 'price', show the distribution and fundamental statistics of 'price' and the correlation between 'price' and other valuables except 'id'.
# ### 2-2. Visualizing the price
# Firstly seeing the distribution of price. It may not be directly useful for prediction, however, the clarification of target data is important.
# In[ ]:
plt.hist(df['price'],bins=100)
# In[ ]:
# Seeing the fundamental statistics of price.
df.describe()['price']
# Distribution of price is distorted to the right. The large difference between minimum and maximum price. More than 100 times!!
# * Nextly, seeing the correlation matrix and the scatter plots between "price" and other variables except 'date'.
# * **'date' is needed to change significantly.**
# In[ ]:
df.corr().style.background_gradient().format('{:.2f}')
# In[ ]:
for i in df.columns:
if (i != 'price') & (i != 'date'):
df[[i,'price']].plot(kind='scatter',x=i,y='price')
# Though the dtypes of 'yr_renovated' and 'zipcode' are int64, they might be needed to be feature engineered because 'yr_renovated' is focused on around 0 and 2000 from seeing scatter plots above and 'zipcode' is just number.
# ### 2-3. Model Building (1st)
# * Try to biuild 1st model, that the target is 'price' and X are other valuables except 'id', 'date', 'yr_renovated' and 'zipcode'.
# In[ ]:
from sklearn.linear_model import LinearRegression
X = df.drop(['price','id','date','yr_renovated','zipcode'],axis=1)
y = df['price']
regr = LinearRegression(fit_intercept=True).fit(X,y)
print("model_1_score:{:.4f}".format(regr.score(X,y)))
# ### 2-4-1. Feature engineering: "date"
# Firstly, as be said , 'date' will be feature engineered to be significant because 'price' may be related with day of week ('dow') and month.
# In[ ]:
df.date.head()
# In[ ]:
pd.to_datetime(df.date).map(lambda x:'dow'+str(x.weekday())).head()
# ** dow:day of week, 0=Monday, 7=Sunday
# In[ ]:
pd.to_datetime(df.date).map(lambda x:'month'+str(x.month)).head()
# ** month1=January, 12=December
# In[ ]:
df['dow'] = pd.to_datetime(df.date).map(lambda x:'dow'+str(x.weekday()))
df['month'] = pd.to_datetime(df.date).map(lambda x:'month'+str(x.month))
# > Nextly, as the values of 'dow' and 'month' are categorilized, they are changed to be one hot encoding.
# In[ ]:
pd.get_dummies(df['dow']).head()
# In[ ]:
pd.get_dummies(df['month']).head()
# * **The month is not correctly sorted, however the way to revise is not understood to me.
# ### 2-4-2. Feature engineering: "renovation"
# The value of 'yr_renovated'is difficult to be used by itself, therefore, it will be transformed whether the house was renovated or not.
# In[ ]:
df.yr_renovated.head()
# In[ ]:
df['yr_renovated'].value_counts().sort_index().head()
# In[ ]:
np.array(df['yr_renovated'] !=0)
# In[ ]:
np.array(df['yr_renovated'] !=0)*1
# In[ ]:
df['yr_renovated_bin'] = np.array(df['yr_renovated'] != 0)*1
df['yr_renovated_bin'].value_counts()
# ### 2-4-3. Feature engineering: "zipcode"
# The value of zipcode itself may be not directly related with price because it is just the number as below. However, the areas seem to be important valuables for housing price, so it should be changed to be one hot encoding.
# In[ ]:
df['zipcode'].astype(str).map(lambda x:x).head()
# In[ ]:
df['zipcode_str'] = df['zipcode'].astype(str).map(lambda x:'zip_'+x)
pd.get_dummies(df['zipcode_str']).head()
# ### 2-4-4. New dataset
# One hot encoding of 'dow', 'month' and 'zipcode'
# In[ ]:
df['zipcode_str'] = df['zipcode'].astype(str).map(lambda x:'zip_'+x)
df_en = pd.concat([df,pd.get_dummies(df['zipcode_str'])],axis=1)
df_en = pd.concat([df_en,pd.get_dummies(df.dow)],axis=1)
df_en = pd.concat([df_en,pd.get_dummies(df.month)],axis=1)
# Dropping the original valuables because feature engineering were conducted.
# In[ ]:
df_en_fin = df_en.drop(['date','zipcode','yr_renovated','month','dow','zipcode_str',],axis=1)
# In[ ]:
print(df_en_fin.shape)
print('------------------------')
print(df_en_fin.nunique())
# In[ ]:
df_en_fin.head()
# ### 2-4-5. Detecing multicollinearity
# Seeing whether the multicollinearity occurs by using these valuables.
# In[ ]:
X = df_en_fin.drop(['price'],axis=1)
y = df_en_fin['price']
regr = LinearRegression(fit_intercept=True).fit(X,y)
model_2 = regr.score(X,y)
for i, coef in enumerate(regr.coef_):
print(X.columns[i],':',coef)
# ****When seeing the result of regr.coef_, for example, 'bedrooms' is negative against 'price'. Normally 'bedrooms' could be positively proportional with 'price'. However it is caused by strong positive correlation by 0.58 with 'sqft_living'. Because multicollinearity is thought to be occurred in other valuables.
# * **In the case of multicollinearity, VIF value should be considered.
# In[ ]:
df_vif = df_en_fin.drop(["price"],axis=1)
for cname in df_vif.columns:
y=df_vif[cname]
X=df_vif.drop(cname, axis=1)
regr = LinearRegression(fit_intercept=True)
regr.fit(X, y)
rsquared = regr.score(X,y)
print(cname,":" ,1/(1-np.power(rsquared,2)))
# The criteria of multicollinearity is generally over VIF(Variance Inflation Factor) value by 10 or some inf (rsquare==1) are found. Therefore, we derive the valuables to meet criteria of 'rsquare>1.-1e-10', in addition, empirically 'regr.coef_'> |0.5| .
# In[ ]:
df_vif = df_en_fin.drop(["price"],axis=1)
for cname in df_vif.columns:
y=df_vif[cname]
X=df_vif.drop(cname, axis=1)
regr = LinearRegression(fit_intercept=True)
regr.fit(X, y)
rsquared = regr.score(X,y)
#print(cname,":" ,1/(1-np.power(rsquared,2)))
if rsquared > 1. -1e-10:
print(cname,X.columns[(regr.coef_> 0.5) | (regr.coef_ < -0.5)])
# Dropping 'sqft_above','zip_98001', 'month1' and 'dow1'.
# In[ ]:
df_en_fin = df_en_fin.drop(['sqft_above','zip_98001','month1','dow1'],axis=1)
df_vif = df_en_fin.drop(["price"],axis=1)
for cname in df_vif.columns:
y=df_vif[cname]
X=df_vif.drop(cname, axis=1)
regr = LinearRegression(fit_intercept=True)
regr.fit(X, y)
rsquared = regr.score(X,y)
#print(cname,":" ,1/(1-np.power(rsquared,2)))
if rsquared > 1. -1e-10:
print(cname,X.columns[(regr.coef_> 0.5) | (regr.coef_ < -0.5)])
# NO multicollinearity happens!!
# ## 3.Model building and Evaluation
# The model will be built by using train dataset after detecting multicollinearity. In addition, it is evaluated on the correlation between predected values (y_pred) and actual values(y_test), MSE(mean_squared_error) and MAE(mean_squared_error)
# In[1]:
X_multi = df_en_fin.drop(['price'],axis=1)
y_target = df_en_fin['price']
# In[ ]:
X_train,X_test,y_train,y_test = train_test_split(X_multi,y_target,random_state=42)
# In[ ]:
regr_train=LinearRegression().fit(X_train,y_train)
y_pred = regr_train.predict(X_test)
# In[ ]:
print("correlation:{:.4f}".format(np.corrcoef(y_test,y_pred)[0,1]))
# In[ ]:
#MAE = mean_absolute_error(y_test,y_pred)
#MSE = mean_squared_error(y_test,y_pred)
print("MAE:{:.2f}".format(mean_absolute_error(y_test,y_pred)),'/ '
"MSE:{:.2f}".format(mean_squared_error(y_test,y_pred)))
# ## Conclusion
#
#
#
# ## Next Issues
# 1. Exactly the accuracy(correlation^2) was over 0.8, however, LinearRegression was only tried at this time. Therefore the other methodology should be tried on the purpose to get better.
#
# 2. As there are over 100 of the explanatory variables, overfitting may happen.Therefore the number of variables may need to decrease.
# In[ ]: | |
###########################################
# This file is based on the jupyter notebook
# https://github.com/udacity/deep-reinforcement-learning/blob/master/p1_navigation/Navigation.ipynb
# provided by udacity
###########################################
import numpy as np
from unityagents import UnityEnvironment
from collections import deque
from torch.utils.tensorboard import SummaryWriter
from agent import Agent
from epsilon import Epsilon
env = UnityEnvironment(file_name="Banana.app")
brain_name = env.brain_names[0]
brain = env.brains[brain_name]
###Hyperparameter####
hidden_1_size = 37 * 2
hidden_2_size = 37 * 2
epsilon_start = 0.1
epsilon_decay_rate = 0.995
epsilon_max_decay_to = 0.01
update_every = 4
buffer_size = 1_000_000
sample_batch_size = 64
gamma = 0.99
tau = 1e-3
learning_rate = 5e-4
#####################
episodes = 1_800
input_size = 37
output_size = 4
agent = Agent(input_size, hidden_1_size, hidden_2_size, output_size, buffer_size, sample_batch_size, gamma, tau, learning_rate)
epsilon = Epsilon(epsilon_start, epsilon_decay_rate, epsilon_max_decay_to)
scores = deque(maxlen=100)
writer = SummaryWriter()
for episode in range(1, episodes+1):
state = env.reset(train_mode=True)[brain_name].vector_observations[0]
score = 0
timestep = 0
while True:
current_epsilon = epsilon.calculate_for(timestep)
action = agent.select_action(state, current_epsilon)
env_info = env.step(action)[brain_name]
next_state, reward, done = env_info.vector_observations[0], env_info.rewards[0], env_info.local_done[0]
agent.add_to_buffer(state, action, reward, next_state, done)
update_target = timestep % update_every == 0
agent.learn(update_target)
score += reward
state = next_state
timestep += 1
if done:
break
scores.append(score)
mean_score = np.mean(scores)
if (episode % 10 == 0):
print(f'Episode {episode} mean score {mean_score}', end='\r')
if (len(scores) == 100 and mean_score >= 13):
print(f'Reached mean score of {mean_score} over last 100 episodes after episode {episode}')
agent.save_model()
break
writer.add_scalar("score", score, episode)
writer.close()
env.close() | |
import theano.tensor as T
import numpy as np
__all__ = ['var']
def var(name, label=None, observed=False, const=False, vector=False, lower=None, upper=None):
if vector and not observed:
raise ValueError('Currently, only observed variables can be vectors')
if observed and const:
raise ValueError('Observed variables are automatically const')
if vector:
var = T.vector(name)
else:
var = T.scalar(name)
var._name = name
var._label = label
var._observed = observed
var._const = observed or const
var._lower = lower or -np.inf
var._upper = upper or np.inf
return var | |
import numpy as np
import numpy.testing as npt
import pytest
import torch
def test_distance():
import espaloma as esp
distribution = torch.distributions.normal.Normal(
loc=torch.zeros(5, 3), scale=torch.ones(5, 3)
)
x0 = distribution.sample()
x1 = distribution.sample()
npt.assert_almost_equal(
esp.distance(x0, x1).numpy(),
torch.sqrt((x0 - x1).pow(2).sum(dim=-1)).numpy(),
decimal=3,
)
npt.assert_almost_equal(esp.distance(x0, x0).numpy(), 0.0) | |
import os
import numpy as np
from PIL import Image
import torch
from torch.autograd import Variable
import rospy
from affordance_gym.simulation_interface import SimulationInterface
from affordance_gym.perception_policy import Predictor, end_effector_pose
from affordance_gym.utils import parse_policy_arguments, parse_moveit_arguments, parse_vaed_arguments, parse_traj_arguments, load_parameters, use_cuda
from affordance_gym.monitor import TrajectoryEnv
from env_setup.env_setup import ELEVATION_EPSILON, AZIMUTH_EPSILON, DISTANCE_EPSILON, VAED_MODELS_PATH, TRAJ_MODELS_PATH, POLICY_MODELS_PATH
from env_setup.env_setup import LOOK_AT, DISTANCE, AZIMUTH, ELEVATION, CUP_X_LIM, CUP_Y_LIM, LOOK_AT_EPSILON
from TrajectoryVAE.ros_monitor import ROSTrajectoryVAE
from TrajectoryVAE.utils import MAX_ANGLE, MIN_ANGLE
from AffordanceVAED.ros_monitor import RosPerceptionVAE
'''
Evaluates the performance of the affordance policy in MuJoCo
'''
def main(args):
rospy.init_node('policy_train', anonymous=True)
device = use_cuda()
# Trajectory generator
assert(args.model_index > -1)
bahavior_model_path = os.path.join(TRAJ_MODELS_PATH, args.traj_name)
action_vae = ROSTrajectoryVAE(bahavior_model_path, args.traj_latent, args.num_actions,
model_index=args.model_index, num_joints=args.num_joints)
# pereception
gibson_model_path = os.path.join(VAED_MODELS_PATH, args.vaed_name)
perception = RosPerceptionVAE(gibson_model_path, args.vaed_latent)
# Policy
if (args.fixed_camera):
policy = Predictor(args.vaed_latent, args.traj_latent)
else:
# Includes camera params as an input
policy = Predictor(args.vaed_latent + 5, args.traj_latent, args.num_params)
policy.to(device)
policy_path = os.path.join(POLICY_MODELS_PATH, args.policy_name)
load_parameters(policy, policy_path, 'model')
# Simulation interface
sim = SimulationInterface(arm_name='lumi_arm')
sim.change_camere_params(LOOK_AT, DISTANCE, AZIMUTH, ELEVATION)
env = TrajectoryEnv(action_vae, sim, args.num_actions, num_joints=args.num_joints, trajectory_duration=args.duration)
# Reset time
if args.forward_kinematics:
reset_duration = 1.5
else:
reset_duration = 2.5
losses = []
for idx in range(100):
cup_name = 'cup{}'.format(np.random.randint(1, 10))
x = np.random.uniform(CUP_X_LIM[0], CUP_X_LIM[1])
y = np.random.uniform(CUP_Y_LIM[0], CUP_Y_LIM[1])
lookat = np.array(LOOK_AT)
camera_distance = DISTANCE
azimuth = AZIMUTH
elevation = ELEVATION
if (args.randomize_all):
lookat[0] += np.random.uniform(-LOOK_AT_EPSILON, LOOK_AT_EPSILON)
lookat[1] += np.random.uniform(-LOOK_AT_EPSILON, LOOK_AT_EPSILON)
camera_distance += np.random.uniform(-DISTANCE_EPSILON, DISTANCE_EPSILON)
azimuth += np.random.uniform(-AZIMUTH_EPSILON, AZIMUTH_EPSILON)
elevation += np.random.uniform(-ELEVATION_EPSILON, ELEVATION_EPSILON)
sim.change_camere_params(lookat, camera_distance, azimuth, elevation)
sim.reset_table(x, y, 0, cup_name, duration=reset_duration)
image_arr = sim.capture_image('/lumi_mujoco/rgb')
image = Image.fromarray(image_arr)
# Image -> Latent1
latent1 = perception.get_latent(image)
if not(args.fixed_camera):
n_lookat = (lookat[:2] - (np.array(LOOK_AT[:2]) - LOOK_AT_EPSILON)) / (LOOK_AT_EPSILON * 2)
n_camera_distance = (camera_distance - (DISTANCE - DISTANCE_EPSILON)) / (DISTANCE_EPSILON * 2)
n_elevation = (elevation - (ELEVATION - ELEVATION_EPSILON)) / (ELEVATION_EPSILON * 2)
n_azimuth = (azimuth - (AZIMUTH - AZIMUTH_EPSILON)) / (AZIMUTH_EPSILON * 2)
camera_params = Variable(torch.Tensor([n_lookat[0], n_lookat[1], n_camera_distance, n_azimuth, n_elevation]).to(device))
camera_params = camera_params.unsqueeze(0)
latent1 = torch.cat([latent1, camera_params], 1)
# latent and camera params -> latent2
latent2 = policy(latent1)
if (args.forward_kinematics):
# latent2 -> trajectory
trajectories = action_vae.model.decoder(latent2)
# Reshape to trajectories
trajectories = action_vae.model.to_trajectory(trajectories)
# Get the last joint pose
end_joint_pose = trajectories[:, :, -1]
# Unnormalize
end_joint_pose = (MAX_ANGLE - MIN_ANGLE) * end_joint_pose + MIN_ANGLE
# joint pose -> cartesian
end_pose = end_effector_pose(end_joint_pose, device)
end_pose = end_pose.detach().cpu().numpy()
target_pose = np.array([x, y])
loss = np.linalg.norm(end_pose[0] - target_pose)
else:
latent2 = latent2.detach().cpu().numpy()
_, end_pose = env.do_latent_imitation(latent2[0])
loss = np.linalg.norm(np.array([x, y]) - end_pose[:2])
losses.append(loss)
print('loss: {}'.format(loss))
print("goal", x, y)
print("end_pose", end_pose)
print("AVG: ", np.mean(losses), " VAR: ", np.var(losses))
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Evaluate a perception policy in MuJoCo')
parse_policy_arguments(parser)
parse_moveit_arguments(parser)
parse_vaed_arguments(parser)
parse_traj_arguments(parser)
parser.add_argument('--forward-kinematics', dest='forward_kinematics', action='store_true', help="Compute an end effector position by solving forward kinematics")
parser.set_defaults(forward_kinematics=False)
parser.add_argument('--randomize-all', dest='randomize_all', action='store_true')
parser.set_defaults(randomize_all=False)
args = parser.parse_args()
main(args) | |
# -*- coding: utf-8 -*-
from __future__ import print_function
from pyqtgraph.metaarray import MetaArray as MA
from numpy import ndarray, loadtxt
from .FileType import FileType
from six.moves import range
#class MetaArray(FileType):
#@staticmethod
#def write(self, dirHandle, fileName, **args):
#self.data.write(os.path.join(dirHandle.name(), fileName), **args)
#@staticmethod
#def extension(self, **args):
#return ".ma"
#def fromFile(fileName, info=None):
#return MA(file=fileName)
class CSVFile(FileType):
extensions = ['.csv'] ## list of extensions handled by this class
dataTypes = [MA, ndarray] ## list of python types handled by this class
priority = 10 ## low priority; MetaArray is the preferred way to move data..
#@classmethod
#def write(cls, data, dirHandle, fileName, **args):
#"""Write data to fileName.
#Return the file name written (this allows the function to modify the requested file name)
#"""
#ext = cls.extensions[0]
#if fileName[-len(ext):] != ext:
#fileName = fileName + ext
#if not (hasattr(data, 'implements') and data.implements('MetaArray')):
#data = MetaArray(data)
#data.write(os.path.join(dirHandle.name(), fileName), **args)
#return fileName
@classmethod
def read(cls, fileHandle):
"""Read a file, return a data object"""
fn = fileHandle.name()
with open(fn) as fd:
header = fd.readline().split(',')
n = len(header)
if header[-1] == '\n' or header[-1] == ' \n':
header.pop(-1)
dontUse = n-1
elif header[-1][-1:] == '\n':
header[-1] = header[-1][:-1]
try:
[int(float(header[i])) for i in range(len(header))] ## if the first row of the file is not convertible to numbers, then this will raise a ValueError, and we use the first row as a header
cols = range(n)
cols.remove(dontUse)
return loadtxt(fn, delimiter=',', usecols=cols)
except ValueError:
return loadtxt(fn, delimiter=',', skiprows=1, dtype=[(f, float) for f in header])
#if type(header[0]) == type('str'):
# return loadtxt(fn, delimiter=',', skiprows=1, dtype=[(f, float) for f in header])
#return loadtxt(fn, delimiter=',') | |
import re
import pandas as pd
import numpy as np
import scipy as sp
from scipy.spatial.distance import pdist
import sys
import warnings
import sklearn
import importlib
if (sys.version_info < (3, 0)):
warnings.warn("As of version 0.29.0 shapLundberg only supports Python 3 (not 2)!")
import_errors = {}
def assert_import(package_name):
global import_errors
if package_name in import_errors:
msg,e = import_errors[package_name]
print(msg)
raise e
def record_import_error(package_name, msg, e):
global import_errors
import_errors[package_name] = (msg, e)
class Instance:
def __init__(self, x, group_display_values):
self.x = x
self.group_display_values = group_display_values
def convert_to_instance(val):
if isinstance(val, Instance):
return val
else:
return Instance(val, None)
class InstanceWithIndex(Instance):
def __init__(self, x, column_name, index_value, index_name, group_display_values):
Instance.__init__(self, x, group_display_values)
self.index_value = index_value
self.index_name = index_name
self.column_name = column_name
def convert_to_df(self):
index = pd.DataFrame(self.index_value, columns=[self.index_name])
data = pd.DataFrame(self.x, columns=self.column_name)
df = pd.concat([index, data], axis=1)
df = df.set_index(self.index_name)
return df
def convert_to_instance_with_index(val, column_name, index_value, index_name):
return InstanceWithIndex(val, column_name, index_value, index_name, None)
def match_instance_to_data(instance, data):
assert isinstance(instance, Instance), "instance must be of type Instance!"
if isinstance(data, DenseData):
if instance.group_display_values is None:
instance.group_display_values = [instance.x[0, group[0]] if len(group) == 1 else "" for group in data.groups]
assert len(instance.group_display_values) == len(data.groups)
instance.groups = data.groups
class Model:
def __init__(self, f, out_names):
self.f = f
self.out_names = out_names
def convert_to_model(val):
if isinstance(val, Model):
return val
else:
return Model(val, None)
def match_model_to_data(model, data):
assert isinstance(model, Model), "model must be of type Model!"
try:
if isinstance(data, DenseDataWithIndex):
out_val = model.f(data.convert_to_df())
else:
out_val = model.f(data.data)
except:
print("Provided model function fails when applied to the provided data set.")
raise
if model.out_names is None:
if len(out_val.shape) == 1:
model.out_names = ["output value"]
else:
model.out_names = ["output value "+str(i) for i in range(out_val.shape[0])]
return out_val
class Data:
def __init__(self):
pass
class SparseData(Data):
def __init__(self, data, *args):
num_samples = data.shape[0]
self.weights = np.ones(num_samples)
self.weights /= np.sum(self.weights)
self.transposed = False
self.groups = None
self.group_names = None
self.groups_size = data.shape[1]
self.data = data
class DenseData(Data):
def __init__(self, data, group_names, *args):
self.groups = args[0] if len(args) > 0 and args[0] is not None else [np.array([i]) for i in range(len(group_names))]
l = sum(len(g) for g in self.groups)
num_samples = data.shape[0]
t = False
if l != data.shape[1]:
t = True
num_samples = data.shape[1]
valid = (not t and l == data.shape[1]) or (t and l == data.shape[0])
assert valid, "# of names must match data matrix!"
self.weights = args[1] if len(args) > 1 else np.ones(num_samples)
self.weights /= np.sum(self.weights)
wl = len(self.weights)
valid = (not t and wl == data.shape[0]) or (t and wl == data.shape[1])
assert valid, "# weights must match data matrix!"
self.transposed = t
self.group_names = group_names
self.data = data
self.groups_size = len(self.groups)
class DenseDataWithIndex(DenseData):
def __init__(self, data, group_names, index, index_name, *args):
DenseData.__init__(self, data, group_names, *args)
self.index_value = index
self.index_name = index_name
def convert_to_df(self):
data = pd.DataFrame(self.data, columns=self.group_names)
index = pd.DataFrame(self.index_value, columns=[self.index_name])
df = pd.concat([index, data], axis=1)
df = df.set_index(self.index_name)
return df
def convert_to_data(val, keep_index=False):
if isinstance(val, Data):
return val
elif type(val) == np.ndarray:
return DenseData(val, [str(i) for i in range(val.shape[1])])
elif str(type(val)).endswith("'pandas.core.series.Series'>"):
return DenseData(val.values.reshape((1,len(val))), list(val.index))
elif str(type(val)).endswith("'pandas.core.frame.DataFrame'>"):
if keep_index:
return DenseDataWithIndex(val.values, list(val.columns), val.index.values, val.index.name)
else:
return DenseData(val.values, list(val.columns))
elif sp.sparse.issparse(val):
if not sp.sparse.isspmatrix_csr(val):
val = val.tocsr()
return SparseData(val)
else:
assert False, "Unknown type passed as data object: "+str(type(val))
class Link:
def __init__(self):
pass
class IdentityLink(Link):
def __str__(self):
return "identity"
@staticmethod
def f(x):
return x
@staticmethod
def finv(x):
return x
class LogitLink(Link):
def __str__(self):
return "logit"
@staticmethod
def f(x):
return np.log(x/(1-x))
@staticmethod
def finv(x):
return 1/(1+np.exp(-x))
def convert_to_link(val):
if isinstance(val, Link):
return val
elif val == "identity":
return IdentityLink()
elif val == "logit":
return LogitLink()
else:
assert False, "Passed link object must be a subclass of iml.Link"
def hclust_ordering(X, metric="sqeuclidean"):
""" A leaf ordering is under-defined, this picks the ordering that keeps nearby samples similar.
"""
# compute a hierarchical clustering
D = sp.spatial.distance.pdist(X, metric)
cluster_matrix = sp.cluster.hierarchy.complete(D)
# merge clusters, rotating them to make the end points match as best we can
sets = [[i] for i in range(X.shape[0])]
for i in range(cluster_matrix.shape[0]):
s1 = sets[int(cluster_matrix[i,0])]
s2 = sets[int(cluster_matrix[i,1])]
# compute distances between the end points of the lists
d_s1_s2 = pdist(np.vstack([X[s1[-1],:], X[s2[0],:]]), metric)[0]
d_s2_s1 = pdist(np.vstack([X[s1[0],:], X[s2[-1],:]]), metric)[0]
d_s1r_s2 = pdist(np.vstack([X[s1[0],:], X[s2[0],:]]), metric)[0]
d_s1_s2r = pdist(np.vstack([X[s1[-1],:], X[s2[-1],:]]), metric)[0]
# concatenete the lists in the way the minimizes the difference between
# the samples at the junction
best = min(d_s1_s2, d_s2_s1, d_s1r_s2, d_s1_s2r)
if best == d_s1_s2:
sets.append(s1 + s2)
elif best == d_s2_s1:
sets.append(s2 + s1)
elif best == d_s1r_s2:
sets.append(list(reversed(s1)) + s2)
else:
sets.append(s1 + list(reversed(s2)))
return sets[-1]
def convert_name(ind, shap_values, feature_names):
if type(ind) == str:
nzinds = np.where(np.array(feature_names) == ind)[0]
if len(nzinds) == 0:
# we allow rank based indexing using the format "rank(int)"
if ind.startswith("rank("):
return np.argsort(-np.abs(shap_values).mean(0))[int(ind[5:-1])]
# we allow the sum of all the SHAP values to be specified with "sum()"
# assuming here that the calling method can deal with this case
elif ind == "sum()":
return "sum()"
else:
raise ValueError("Could not find feature named: " + ind)
return None
else:
return nzinds[0]
else:
return ind
def approximate_interactions(index, shap_values, X, feature_names=None):
""" Order other features by how much interaction they seem to have with the feature at the given index.
This just bins the SHAP values for a feature along that feature's value. For true Shapley interaction
index values for SHAP see the interaction_contribs option implemented in XGBoost.
"""
# convert from DataFrames if we got any
if str(type(X)).endswith("'pandas.core.frame.DataFrame'>"):
if feature_names is None:
feature_names = X.columns
X = X.values
index = convert_name(index, shap_values, feature_names)
if X.shape[0] > 10000:
a = np.arange(X.shape[0])
np.random.shuffle(a)
inds = a[:10000]
else:
inds = np.arange(X.shape[0])
x = X[inds, index]
srt = np.argsort(x)
shap_ref = shap_values[inds, index]
shap_ref = shap_ref[srt]
inc = max(min(int(len(x) / 10.0), 50), 1)
interactions = []
for i in range(X.shape[1]):
encoded_val_other = encode_array_if_needed(X[inds, i][srt], dtype=np.float)
val_other = encoded_val_other
v = 0.0
if not (i == index or np.sum(np.abs(val_other)) < 1e-8):
for j in range(0, len(x), inc):
if np.std(val_other[j:j + inc]) > 0 and np.std(shap_ref[j:j + inc]) > 0:
v += abs(np.corrcoef(shap_ref[j:j + inc], val_other[j:j + inc])[0, 1])
val_v = v
val_other = np.isnan(encoded_val_other)
v = 0.0
if not (i == index or np.sum(np.abs(val_other)) < 1e-8):
for j in range(0, len(x), inc):
if np.std(val_other[j:j + inc]) > 0 and np.std(shap_ref[j:j + inc]) > 0:
v += abs(np.corrcoef(shap_ref[j:j + inc], val_other[j:j + inc])[0, 1])
nan_v = v
interactions.append(max(val_v, nan_v))
return np.argsort(-np.abs(interactions))
def sample(X, nsamples=100, random_state=0):
if nsamples >= X.shape[0]:
return X
else:
return sklearn.utils.resample(X, n_samples=nsamples, random_state=random_state)
def safe_isinstance(obj, class_path_str):
"""
Acts as a safe version of isinstance without having to explicitly
import packages which may not exist in the users environment.
Checks if obj is an instance of type specified by class_path_str.
Parameters
----------
obj: Any
Some object you want to test against
class_path_str: str or list
A string or list of strings specifying full class paths
Example: `sklearn.ensemble.RandomForestRegressor`
Returns
--------
bool: True if isinstance is true and the package exists, False otherwise
"""
if isinstance(class_path_str, str):
class_path_strs = [class_path_str]
elif isinstance(class_path_str, list) or isinstance(class_path_str, tuple):
class_path_strs = class_path_str
else:
class_path_strs = ['']
# try each module path in order
for class_path_str in class_path_strs:
if "." not in class_path_str:
raise ValueError("class_path_str must be a string or list of strings specifying a full \
module path to a class. Eg, 'sklearn.ensemble.RandomForestRegressor'")
# Splits on last occurence of "."
module_name, class_name = class_path_str.rsplit(".", 1)
#Check module exists
try:
spec = importlib.util.find_spec(module_name)
except:
spec = None
if spec is None:
continue
module = importlib.import_module(module_name)
#Get class
_class = getattr(module, class_name, None)
if _class is None:
continue
return isinstance(obj, _class)
return False
def format_value(s, format_str):
""" Strips trailing zeros and uses a unicode minus sign.
"""
if type(s) is not str:
s = format_str % s
s = re.sub(r'\.?0+$', '', s)
if s[0] == "-":
s = u"\u2212" + s[1:]
return s
def partition_tree(X, metric="correlation"):
D = sp.spatial.distance.pdist(X.fillna(X.mean()).T, metric=metric)
return sp.cluster.hierarchy.complete(D)
class SHAPError(Exception):
pass
def encode_array_if_needed(arr, dtype=np.float64):
try:
return arr.astype(dtype)
except ValueError:
unique_values = np.unique(arr)
encoding_dict = {string: index for index, string in enumerate(unique_values)}
encoded_array = np.array([encoding_dict[string] for string in arr], dtype=dtype)
return encoded_array | |
def from_sparse_to_file(filename, array, deli1=" ", deli2=":", ytarget=None):
from scipy.sparse import csr_matrix
import numpy as np
zsparse = csr_matrix(array)
indptr = zsparse.indptr
indices = zsparse.indices
data = zsparse.data
print(" data lenth %d" % (len(data)))
print(" indices lenth %d" % (len(indices)))
print(" indptr lenth %d" % (len(indptr)))
f = open(filename, "w")
counter_row = 0
for b in range(0, len(indptr) - 1):
# if there is a target, print it else , print nothing
if ytarget is not None:
f.write(str(ytarget[b]) + deli1)
for k in range(indptr[b], indptr[b + 1]):
if (k == indptr[b]):
if np.isnan(data[k]):
f.write("%d%s%f" % (indices[k], deli2, -1))
else:
f.write("%d%s%f" % (indices[k], deli2, data[k]))
else:
if np.isnan(data[k]):
f.write("%s%d%s%f" % (deli1, indices[k], deli2, -1))
else:
f.write("%s%d%s%f" % (deli1, indices[k], deli2, data[k]))
f.write("\n")
counter_row += 1
if counter_row % 10000 == 0:
print(" row : %d " % (counter_row))
f.close() | |
# -*- coding: utf-8 -*-
from ninolearn.IO.read_post import data_reader
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import pearsonr
from ninolearn.private import plotdir
from os.path import join
plt.close("all")
reader = data_reader(startdate='1980-02')
nino34 = reader.read_csv('nino3.4S')
max_lag = 13
auto_corr = np.zeros((12, max_lag))
p_value = np.zeros((12, max_lag))
seas_ticks = ['DJF', 'JFM', 'FMA', 'MAM', 'AMJ', 'MJJ',
'JJA', 'JAS', 'ASO', 'SON', 'OND', 'NDJ']
for i in range(12):
for j in range(max_lag):
try:
auto_corr[(i+j)%12,j],p_value[(i+j)%12,j] = pearsonr(nino34[i::12], nino34[i+j::12])
except:
auto_corr[(i+j)%12,j],p_value[(i+j)%12,j] = pearsonr(nino34[i::12][:-1], nino34[i+j::12])
levels = np.linspace(-1, 1, 20+1)
fig, ax = plt.subplots(figsize=(5,3.5))
m = np.arange(1,13)
lag_arr = np.arange(max_lag)
C=ax.contourf(m,lag_arr,auto_corr.T, cmap=plt.cm.seismic,vmin=-1,vmax=1,levels=levels)
ax.set_xticks(m)
ax.set_xticklabels(seas_ticks, rotation='vertical')
ax.set_xlabel('Target Season')
ax.set_ylabel('Lag Month')
plt.colorbar(C, ticks=np.arange(-1,1.1,0.2))
plt.tight_layout()
ax.contour(m,lag_arr, p_value.T, levels=[0.01, 0.05, 0.1], linestyles=['solid', 'dashed', 'dotted'], colors='k')
plt.savefig(join(plotdir, 'autocorr.pdf')) | |
import os
import math
import pygame
import numpy as np
import matplotlib.pyplot as plt
from gym_scarecrow.params import *
def quinary_to_int(obs):
value = 0
quin = [5**i for i in reversed(range(len(obs)))]
for i, ob in enumerate(obs):
value += ob * quin[i]
return value
def get_grid(position):
# FIXME: this is terrible, but the quickest easiest way to fix this in the time I have...
if np.isnan(position[0]):
position[0] = 0
if np.isnan(position[1]):
position[1] = 0
grid = [int(position[0] / GRID_SIZE)-1, int(position[1] / GRID_SIZE)-1]
return grid
def get_distance(p1, p2):
return math.sqrt(math.pow((p1[0] - p2[0]), 2) + math.pow((p1[1] - p2[1]), 2))
def check_collision(p1, p2):
if get_distance(p1.position, p2.position) <= p1.size + p2.size:
return True
return False
def load_pickle(file):
np_load_old = np.load
np.load = lambda *a,**k: np_load_old(*a, allow_pickle=True, **k)
data = np.load(file)
np.load = np_load_old
return data
def set_png_icon(image_path):
"""
sets the pygame icon with a transparent background
:param image_path: image path to be set as the icon
:return:
"""
# get current path
current_path = os.path.dirname(__file__)
# get full image path
full_path = os.path.join(current_path, image_path)
# display game icon
icon = pygame.image.load(full_path)
# manipulate icon to look better
# force image scale, sometime reduce of a large image looks bad
icon = pygame.transform.scale(icon, (32, 32))
# description of the icon surface
surface = pygame.Surface(icon.get_size())
# establish color key, per-pixel alpha is not supported for pygame icons
key = (0, 255, 0)
# fill surface with a solid color
surface.fill(key)
# set the transparent colorkey
surface.set_colorkey(key)
# draw icon back onto transparent background
surface.blit(icon, (0, 0))
pygame.display.set_icon(surface) | |
# -*- coding: utf-8 -*-
""" dati_selezione.ipynb
Extraction of data from ISS weekly covid-19 reports
https://www.epicentro.iss.it/coronavirus/aggiornamenti
See example pdf:
https://www.epicentro.iss.it/coronavirus/bollettino/Bollettino-sorveglianza-integrata-COVID-19_12-gennaio-2022.pdf
Requirements:
Python 3.6+, Ghostscript (ghostscript), Tkinter (python3-tk)
numpy, pandas, camelot, PyMuPDF, Beautiful Soup 4 """
import locale
import re
from datetime import datetime, timedelta
from os import chdir, path
from urllib import request
from urllib.parse import urljoin
import camelot
import fitz
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup
def get_surveillance_reports():
"""get_surveillance_reports() -> list
return: list of "integrated surveillance of Covid-19 in Italy" reports"""
# Source of the ISS reports
epicentro_url = "https://www.epicentro.iss.it/coronavirus/aggiornamenti"
# Requests URL and get http.client.HTTPResponse object
with request.urlopen(epicentro_url) as response:
# Parse text obtained
soup = BeautifulSoup(response, "html.parser")
# Find all hyperlinks present on webpage
links = soup.find_all("a")
# The table is available since 14/07/2021
# The script has been updated to 2022-01-12 report
# for older reports than 2022-01-12 use "dati_selezione_old1.py" and "dati_ISS_complessivi_old1.csv"
# for older reports than 2021-11-10 use "dati_selezione_old.py and "dati_ISS_complessivi_old.csv"
cut_date = pd.to_datetime("2022-01-12")
cut_date_end = pd.to_datetime("2022-01-19")
return [urljoin(epicentro_url, link["href"]) for link in links
if "Bollettino-sorveglianza-integrata-COVID-19" in link["href"]
and date_from_url(link["href"], is_raw=False) >= cut_date
and (date_from_url(link["href"], is_raw=False) < cut_date_end)]
def page_from_url(sel_url, is_pop=False):
"""page_from_url(str, boolean) -> int
sel_url: url of the report
is_pop: choose between populations and general data
return: number of the page containing the table"""
query = "TABELLA A[0-9] - POPOLAZIONE DI RIFERIMENTO" if is_pop else \
"TABELLA [0-9] – NUMERO DI CASI DI COVID-19"
with request.urlopen(sel_url) as response:
content = response.read()
with fitz.open(stream=content, filetype="pdf") as pdf:
print("\nSearching for the selected table...")
# Query for string
for page in pdf:
text = page.get_text()
if re.search(query, text, re.IGNORECASE):
return page.number + 1
return None
def date_from_url(sel_url, is_raw=True):
"""date_from_url(str, boolean) -> datetime
sel_url: url of the report
is_raw: choose whether to return raw or translated date
return: datetime"""
date_ = re.findall(r"\d+[a-z-A-Z]+\d+", sel_url)[0]
return date_ if is_raw else datetime.strptime(date_, "%d-%B-%Y")
def check_df(sel_df):
"""check_df(df) -> None
sel_df: dataframe
return: check if the table has at least 2 columns"""
error_msg = "Can't extract the table! DIY!"
if len(sel_df.columns) < 3:
# Table is incomplete, bye bye
print(error_msg)
exit()
def get_raw_table(sel_url, table):
"""get_raw_table(str, int) -> df
sel_url: url of the report
table: the page number of the table
return: raw dataframe"""
# Read the found page using camelot
tables = camelot.read_pdf(sel_url,
pages=f"{table}",
flavor="stream")
df_raw = tables[0].df
# Check if there are enough columns
if len(df_raw.columns) < 5:
if len(tables) >= 1:
df_raw = tables[1].df
check_df(df_raw)
return df_raw
def clean_raw_table(sel_df):
"""clean_raw_table(df) -> df
sel_df: raw dataframe
return: extract numerical data from the dataframe"""
# We are interested in the last 5 columns
columns_to_keep = sel_df.columns[-5:]
df_raw = sel_df[columns_to_keep]
# select rows containing numbers
selection = r"[0-9]"
df_raw = df_raw[df_raw[df_raw.columns[0]].str.match(selection)]
# Remove dots and parentheses
to_exclude = r"\((.*)|[^0-9]"
df_final = df_raw.replace(to_exclude, "", regex=True).apply(np.int64)
df_final.columns = ["non vaccinati",
"vaccinati 1 dose",
"vaccinati completo < x mesi",
"vaccinati completo > x mesi",
"vaccinati booster"]
# Merge immunized columns ("vaccinati completo < x mesi",
# "vaccinati completo > x mesi", "vaccinati booster") into one
idx = df_final.columns.tolist().index("vaccinati 1 dose")
vaccinati_completo = df_final.iloc[:, 2:].sum(axis=1)
df_final.insert(idx+1, "vaccinati completo", vaccinati_completo)
# Drop these columns
df_final.drop(["vaccinati completo < x mesi",
"vaccinati completo > x mesi"], axis=1, inplace=True)
df_final.reset_index(inplace=True, drop=True)
return df_final
def extract_data_from_raw(raw_df, to_df, sel_rows=None):
"""extract_data_from_raw(df, df, list) -> df, df
raw_df: raw dataframe
to_df: dataframe to update
sel_rows: selected raw df rows
return: processed dataframes"""
if sel_rows is None:
f_pop = "data_iss_età_%s.xlsx"
# Align hospitalizations/ti and deaths populations
# Get hospitalizations/ti populations from 2nd latest report
# Get deaths populations from 3rd latest report
date_osp = rep_date - timedelta(days=15)
df_hosp = pd.read_excel(f_pop % date_osp.date(), sheet_name="popolazioni")
date_dec = rep_date - timedelta(days=22)
df_deaths = pd.read_excel(f_pop % date_dec.date(), sheet_name="popolazioni")
# Get general data
results = np.concatenate((raw_df.iloc[4, :].values,
to_df.loc[date_osp].values[0:4],
to_df.loc[date_dec].values[0:4]))
# Build ages dataframe
# Merge df together
df_ = pd.concat([raw_df.iloc[:4, :5], df_hosp.iloc[:, 1:5], df_deaths.iloc[:, 1:5]], axis=1)
df_.columns = df_deaths.columns[1:]
df_.set_index(df_deaths["età"], inplace=True)
else:
# Get general data
results = raw_df.iloc[sel_rows, :].stack().values
# Get data by age
ages = ["12-39", "40-59", "60-79", "80+"]
rows_to_keep = np.arange(0, len(raw_df), 5)
results_ = {age: raw_df.iloc[rows_to_keep+i, :].stack().values
for i, age in enumerate(ages)}
# Build ages dataframe
df_ = pd.DataFrame(results_).T
df_.columns = to_df.columns
df_.index.rename("età", inplace=True)
# Add the new row at the top of the general df
to_df.loc[rep_date] = results
to_df.sort_index(ascending=False, inplace=True)
to_df = to_df.apply(np.int64)
return to_df, df_
def get_report(auto=True):
"""get_report(boolean)
The script get the selected report.
Select mode:
- Automatic (auto=True): table of last available PDF is automatically read
- Manual (auto=False): Index of the report will be asked as input"""
# Get reports
reports = get_surveillance_reports()
if auto:
# Get most recent report url
rep_url = reports[0]
else:
# Build dictionary for manual mode
reports_dict = dict(enumerate([date_from_url(report)
for report in reports]))
# Select report index as input
rep_idx = input(f"\nChoose report index:\
\nFor oldest reports please use \
the dati_selezione_old.py script!\n\
\n\n{reports_dict}\n\n")
rep_url = reports[int(rep_idx)]
# Get report date
rep_date = date_from_url(rep_url, is_raw=False)
print(f"\nSelected report ({rep_date.date()}) is:\n{rep_url}")
return rep_date, rep_url
def merge_df_into_excel(df_0, df_1, filename="dati_ISS_complessivi.xlsx"):
"""merge_df_into_excel(df, df, str)
df_0: epidemiological data dataframe
df_1: populations data dataframe
filename: name of the output xlsx
return: merges two dataframes into an xlsx"""
with pd.ExcelWriter(filename) as writer:
df_0.to_excel(writer, sheet_name="dati epidemiologici")
df_1.to_excel(writer, sheet_name="popolazioni")
def get_data_from_report(force=False):
"""get_data_from_report(boolean)
The script saves data extracted from report.
Use force=True to skip checks and force data extraction"""
# Read the csv to update from the repo
df_0 = pd.read_excel("dati_ISS_complessivi.xlsx",
sheet_name="dati epidemiologici",
parse_dates=["data"],
index_col="data")
# If table is already up-to-date stop the script
if rep_date in df_0.index and not force:
print("\nCSV are already up-to-date!")
exit()
# Get the main table page number
main_table_pg = page_from_url(rep_url)
# Can't really find the page, stop
if main_table_pg is None:
print("Table not found!")
exit()
print("\nFound page is:", main_table_pg)
# get and clean the raw df
df_raw = get_raw_table(rep_url, main_table_pg)
df_raw = clean_raw_table(df_raw)
# Finally, get the data
# Keep totals only
rows_tot = [4, 9, 14, 19]
df_0, df_1 = extract_data_from_raw(df_raw, df_0, sel_rows=rows_tot)
# retrieve population data
pop_table_pg = page_from_url(rep_url, is_pop=True)
# Can't really find the page, stop
if pop_table_pg is None:
print("Table not found!")
exit()
print("\nFound page is:", pop_table_pg)
# Read the csv to update from the repo
df_pop = pd.read_excel("dati_ISS_complessivi.xlsx",
sheet_name="popolazioni",
parse_dates=["data"],
index_col="data")
# Get and clean the raw populations df
df_raw_ = get_raw_table(rep_url, pop_table_pg)
df_raw_ = clean_raw_table(df_raw_)
df_2, df_3 = extract_data_from_raw(df_raw_, df_pop)
# Save to xlsx
merge_df_into_excel(df_0, df_2)
merge_df_into_excel(df_1, df_3, filename=f"data_iss_età_{rep_date.date()}.xlsx")
print("\nDone!")
if __name__ == "__main__":
# Set work directory for the script
scriptpath = path.dirname(path.realpath(__file__))
chdir(scriptpath)
# Set locale to "it" to parse the month correctly
locale.setlocale(locale.LC_ALL, "it_IT.UTF-8")
# Get the report
# Use force=False for manual selection
rep_date, rep_url = get_report()
# Get data
# Use force=True to skip the checks/for debug purposes
get_data_from_report() | |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os
import json
import numpy as np
import tensorflow as tf
from tensorflow.python.client import timeline
from keras import backend as K
from keras.datasets import cifar10
from keras.utils import to_categorical
# from python import imagenet
from python.slalom.models import get_model, get_test_model
from python.slalom.quant_layers import transform, DenseQ, Dense
from python.slalom.utils import Results, timer
from python.slalom.sgxdnn import model_to_json, SGXDNNUtils, mod_test
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ["TF_USE_DEEP_CONV2D"] = '0'
DTYPE_VERIFY = np.float32
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
tf.set_random_seed(1234)
with tf.Graph().as_default():
# Prepare graph
num_batches = args.max_num_batches
sgxutils = None
if args.mode == 'tf-gpu':
assert not args.use_sgx
device = '/gpu:0'
config = tf.ConfigProto(log_device_placement=False)
config.allow_soft_placement = True
config.gpu_options.per_process_gpu_memory_fraction = 0.90
config.gpu_options.allow_growth = True
elif args.mode == 'tf-cpu':
assert not args.verify and not args.use_sgx
device = '/cpu:0'
# config = tf.ConfigProto(log_device_placement=False)
config = tf.ConfigProto(log_device_placement=False, device_count={'CPU': 1, 'GPU': 0})
config.intra_op_parallelism_threads = 1
config.inter_op_parallelism_threads = 1
else:
assert args.mode == 'sgxdnn'
device = '/gpu:0'
config = tf.ConfigProto(log_device_placement=False)
config.allow_soft_placement = True
config.gpu_options.per_process_gpu_memory_fraction = 0.9
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
with tf.device(device):
# model, model_info = get_model(args.model_name, args.batch_size, include_top=not args.no_top)
model, model_info = get_test_model(args.batch_size)
model_copy = model
model, linear_ops_in, linear_ops_out = transform(model, log=False, quantize=args.verify,
verif_preproc=args.preproc,
bits_w=model_info['bits_w'],
bits_x=model_info['bits_x'])
# dataset_images, labels = imagenet.load_validation(args.input_dir, args.batch_size,
# preprocess=model_info['preprocess'],
# num_preprocessing_threads=1)
if args.mode == 'sgxdnn':
# check weight equal or not
# sgxutils = SGXDNNUtils(args.use_sgx, num_enclaves=args.batch_size)
# sgxutils = SGXDNNUtils(args.use_sgx, num_enclaves=2)
sgxutils = SGXDNNUtils(args.use_sgx)
dtype = np.float32 if not args.verify else DTYPE_VERIFY
model_json, weights = model_to_json(sess, model, args.preproc, dtype=dtype,
bits_w=model_info['bits_w'], bits_x=model_info['bits_x'])
sgxutils.load_model(model_json, weights, dtype=dtype, verify=args.verify, verify_preproc=args.preproc)
num_classes = np.prod(model.output.get_shape().as_list()[1:])
print("num_classes: {}".format(num_classes))
print_acc = (num_classes == 10)
res = Results(acc=print_acc)
coord = tf.train.Coordinator()
init = tf.initialize_all_variables()
# sess.run(init)
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
# from multiprocessing.dummy import Pool as ThreadPool
# pool = ThreadPool(3)
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
y_train = y_train.reshape(y_train.shape[0])
y_test = y_test.reshape(y_test.shape[0])
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
y_train = to_categorical(y_train, num_classes)
y_test = to_categorical(y_test, num_classes)
num_batches = int(X_train.shape[0] / args.batch_size)
print('training batch number :{}'.format(num_batches))
lr = 0.001
for k in range(args.epoch):
if (k + 1) % 10:
lr *= 0.95
print('Epoch {}/{}'.format(k + 1, args.epoch))
for i in range(num_batches):
done_number = int(30 * (i + 1) / num_batches)
wait_to_be_done = 30 - done_number
print("\r{}/{} [{}>{}] {:.2f}% ".format((i + 1) * args.batch_size, X_train.shape[0],
'=' * done_number, '.' * wait_to_be_done,
100 * (i + 1) / num_batches), end='')
images = X_train[(i * args.batch_size):((i + 1) * args.batch_size)]
labels = y_train[(i * args.batch_size):((i + 1) * args.batch_size)]
if args.train:
loss_batch, acc_batch = sgxutils.train(images, labels, num_classes=num_classes,
learn_rate=lr)
print(' - loss :{:.4f} - acc :{:.4f}'.format(loss_batch, acc_batch), end='')
sys.stdout.flush()
# res.start_timer()
# # no verify
# def func(data):
# return sgxutils.predict(data[1], num_classes=num_classes, eid_idx=0)
# def get_gradient(model_copy,layer_index,images):
# # 下面是求出layer层导数,用来debug
# # layer = model_copy.layers[layer_index+1 if layer_index>0 else layer_index]
# layer = model_copy.layers[layer_index]
# print(layer.name)
# grad = model_copy.optimizer.get_gradients(model_copy.total_loss,layer.output)
# input_tensors = [model_copy.inputs[0], # input data
# model_copy.sample_weights[0], # how much to weight each sample by
# model_copy.targets[0], # labels
# K.learning_phase(), # train or test mode
# ]
# get_gradients = K.function(inputs=input_tensors, outputs=grad)
# inputs = [images, # X
# np.ones(args.batch_size), # sample weights
# labels, # y
# 0 # learning phase in TEST mode
# ]
# grad = get_gradients(inputs)[0]
# return grad
# images = np.random.random((200, 32, 32, 3))
# labels = np.zeros((200, 10))
# for i in range(200):
# index = np.random.randint(0, 10)
# labels[i][index] = 1
model_copy.fit(X_train, y_train, batch_size=32, epochs=1)
coord.request_stop()
coord.join(threads)
if sgxutils is not None:
sgxutils.destroy()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('model_name', type=str,
choices=['vgg_16', 'vgg_19', 'inception_v3', 'mobilenet', 'mobilenet_sep',
'resnet_18', 'resnet_34', 'resnet_50', 'resnet_101', 'resnet_152'])
parser.add_argument('mode', type=str, choices=['tf-gpu', 'tf-cpu', 'sgxdnn'])
parser.add_argument('--input_dir', type=str,
default='../imagenet/',
help='Input directory with images.')
parser.add_argument('--batch_size', type=int, default=8,
help='How many images process at one time.')
parser.add_argument('--max_num_batches', type=int, default=2,
help='Max number of batches to evaluate.')
parser.add_argument('--verify', action='store_true',
help='Activate verification.')
parser.add_argument('--preproc', action='store_true',
help='Use preprocessing for verification.')
parser.add_argument('--use_sgx', action='store_true')
parser.add_argument('--verify_batched', action='store_true',
help='Use batched verification.')
parser.add_argument('--no_top', action='store_true',
help='Omit top part of network.')
parser.add_argument('--train', action='store_true',
help='Train instead of verify.')
parser.add_argument('--epoch', type=int, default=1,
help='How many times you want to train the whole data set.')
args = parser.parse_args()
tf.app.run() | |
#!/usr/bin/env python
# -*- coding: utf-8 -*
import sys
import rospy
import numpy as np
import os
import time
import matplotlib.pyplot as plt
import pandas as pd
from geometry_msgs.msg import PoseWithCovarianceStamped
from turtlesim.msg import Pose
from scipy.spatial import KDTree
from tf.transformations import euler_from_quaternion, quaternion_from_euler
from std_msgs.msg import Float64,Int32,Bool
m=2#Plotter
n=1#Plotter
p=20#Numero de puntos del arbol entre 2 posiciones consecutivas en la trayectoria
class Plotter:
def __init__(self):
self.poseflag=False
ruta=os.path.dirname(os.path.abspath(__file__))+'/Generacion _de_Trayectorias/'
PuntosA=np.array(np.load(ruta+'PuntosA1.npy'))
self.ArbolA=self.Arbol(PuntosA,p)
PuntosB=np.array(np.load(ruta+'PuntosB1.npy'))
self.ArbolB=self.Arbol(PuntosB,p)
PuntosC=np.array(np.load(ruta+'PuntosC1.npy'))
self.ArbolC=self.Arbol(PuntosC,p)
self.turtlebot3_pose_L=Pose()
self.Positions_XL=[]
self.Positions_YL=[]
self.Positions_EL=[]
self.turtlebot3_pose_F=Pose()
self.Positions_XF=[]
self.Positions_YF=[]
self.Positions_EF=[]
self.Positions_Count=[]
self.count=0
self.lane=0
self.graph=False
self.clear=False
self.save_data=False
self.language=False
#self.fig = plt.figure(figsize=(7,7), facecolor='w')
#self.fig.canvas.set_window_title('Trayectorias generadas')
self.lane_subscriber=rospy.Subscriber("/lane",Int32,self.laneCallback,queue_size=1)
self.posel_subscriber=rospy.Subscriber("/tb3_0/amcl_pose", PoseWithCovarianceStamped,self.poseCallback,queue_size=1)
self.posef_subscriber=rospy.Subscriber("/tb3_1/amcl_pose", PoseWithCovarianceStamped,self.poseCallback2,queue_size=1)
self.graph_subscriber=rospy.Subscriber("/graph", Bool,self.graph_on_Callback,queue_size=1)
self.clear_graph_subscriber=rospy.Subscriber("/clear_graph",Bool,self.clear_graph_Callback,queue_size=1)
self.save_data_subscriber=rospy.Subscriber("/save_data",Bool,self.save_data_Callback,queue_size=1)
self.change_language_subscriber=rospy.Subscriber("/change_language",Bool,self.change_language_Callback,queue_size=1)
plt.ion()
def poseCallback(self,data):
self.turtlebot3_pose_L.x=data.pose.pose.position.x
self.turtlebot3_pose_L.y=data.pose.pose.position.y
#self.Positions_XL.append(self.turtlebot3_pose_L.x)
#self.Positions_YL.append(self.turtlebot3_pose_L.y)
self.poseflag=True
def poseCallback2(self,data):
self.turtlebot3_pose_F.x=data.pose.pose.position.x
self.turtlebot3_pose_F.y=data.pose.pose.position.y
#self.Positions_X_F.append(self.turtlebot3_pose_F.x)
#self.Positions_Y_F.append(self.turtlebot3_pose_F.y)
self.poseflag=True
def laneCallback(self,data):
self.lane=data.data
def graph_on_Callback(self,data):
self.graph=data.data
def clear_graph_Callback(self,data):
self.clear=True
def change_language_Callback(self,data):
self.language=data.data
def save_data_Callback(self,data):
if self.count>2:
self.save_data=True
def plotear(self):
self.rate = rospy.Rate(20)
if self.save_data==True:
self.save_data=False
po=np.array(self.Positions_Count)
xl=np.array(self.Positions_XL)
yl=np.array(self.Positions_YL)
el=np.array(self.Positions_EL)
xf=np.array(self.Positions_XF)
yf=np.array(self.Positions_YF)
ef=np.array(self.Positions_EF)
matrix=np.array([po,xl,yl,el,xf,yf,ef]).T
ruta=os.path.dirname(os.path.abspath(__file__))
nombre_Archivo='/Datos_Guardados/Grafica_'+time.strftime("%Y_%m_%d_%H_%M_%S")
nombre_ArchivoP= nombre_Archivo+'.npy'
nombre_ArchivoE= nombre_Archivo+'.xlsx'
np.save(ruta+nombre_ArchivoP, matrix)
df = pd.DataFrame(matrix,columns = ['Muestra','x Lider','y Lider','error Lider','x Seguidor','y Seguidor','error Seguidor'])
df.to_excel(ruta+nombre_ArchivoE, sheet_name='Datos Obtenidos')
print 'Datos guardados'
if self.clear==True:
self.clear=False
self.count=0
self.Positions_Count=[]
self.Positions_XL=[]
self.Positions_YL=[]
self.Positions_EL=[]
self.Positions_XF=[]
self.Positions_YF=[]
self.Positions_EF=[]
plt.subplot(m,n,1)
plt.cla()
if self.language==True:
plt.title('Reached Positions (Lider-Red,Follower-Blue)[m]')
else:
plt.title('Posiciones Alcanzadas (Lider-Rojo,Seguidor-Azul)[m]')
plt.draw()
plt.pause(0.00000000001)
plt.subplot(m,n,2)
plt.cla()
if self.language==True:
plt.title('Error Graph [m]')
else:
plt.title('Error Graficado [m]')
plt.draw()
plt.pause(0.00000000001)
print 'Grafica Limpiada'
if self.graph==True:
plt.subplot(m,n,1)
plt.hold(True)
#Posicion Seguidor
plt.plot(self.turtlebot3_pose_L.x,self.turtlebot3_pose_L.y,'*r')
#Posicion Lider
plt.plot(self.turtlebot3_pose_F.x,self.turtlebot3_pose_F.y,'*b')
plt.axis([-1.25,1.25,-1.25,1.25])
if self.language==True:
plt.title('Reached Positions (Lider-Red,Follower-Blue)[m]')
else:
plt.title('Posiciones Alcanzadas (Lider-Rojo,Seguidor-Azul)[m]')
plt.draw()
plt.pause(0.00000000001)
if self.lane>0:
#Almacenar Posiciones
self.Positions_XL.append(self.turtlebot3_pose_L.x)
self.Positions_YL.append(self.turtlebot3_pose_L.y)
self.Positions_XF.append(self.turtlebot3_pose_F.x)
self.Positions_YF.append(self.turtlebot3_pose_F.y)
plt.subplot(m,n,2)
plt.hold(True)
if self.lane==1:
#Error Lider
distL, index = self.ArbolA.query((self.turtlebot3_pose_L.x,self.turtlebot3_pose_L.y))
#Error Seguidor
distF, index = self.ArbolA.query((self.turtlebot3_pose_F.x,self.turtlebot3_pose_F.y))
elif self.lane==2:
#Error Lider
distL, index = self.ArbolB.query((self.turtlebot3_pose_L.x,self.turtlebot3_pose_L.y))
#Error Seguidor
distF, index = self.ArbolB.query((self.turtlebot3_pose_F.x,self.turtlebot3_pose_F.y))
else:
#Error Lider
distL, index = self.ArbolC.query((self.turtlebot3_pose_L.x,self.turtlebot3_pose_L.y))
#Error Seguidor
distF, index = self.ArbolC.query((self.turtlebot3_pose_F.x,self.turtlebot3_pose_F.y))
self.Positions_EL.append(distL)
self.Positions_EF.append(distF)
self.Positions_Count.append(self.count)
plt.plot(self.count,distL,'*r')
plt.plot(self.count,distF,'*b')
self.count=self.count+1
plt.axis([0,self.count,0,0.8])
if self.language==True:
plt.title('Error Graph ('+ str(distL) + ',' + str(distF) + ')[m]')
else:
plt.title('Error Graficado('+ str(distL) + ',' + str(distF) + ')[m]')
plt.draw()
plt.pause(0.00000000001)
else:
plt.subplot(m,n,2)
if self.language==True:
plt.title('Error Graph [m]')
else:
plt.title('Error Graficado [m]')
plt.pause(0.00000000001)
self.rate.sleep()
def Arbol(self,xy,longitud):
ax,ay=xy.T
l=len(ax)
x=[]
y=[]
for i in range(l+1):
ind1=i%l
ind2=(i+1)%l
xo=ax[ind1]
xf=ax[ind2]
diferenciaX=xf-xo
yo=ay[ind1]
yf=ay[ind2]
diferenciaY=yf-yo
for j in range(longitud):
escala=float(j)/float(longitud)
x.append(xo+diferenciaX*escala)
y.append(yo+diferenciaY*escala)
x1=np.array(x)
y1=np.array(y)
xy=np.array([x1,y1]).T
return KDTree(xy)
def main():
rospy.init_node('Plotter', anonymous=True)
P=Plotter() # constructor creates publishers / subscribers
print 'PLotter inicializado'
while (not rospy.is_shutdown()):
P.plotear()
#rospy.spin()
if __name__ == '__main__':
try:
main()
except rospy.ROSInterruptException:
pass | |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional
from parlai.core.params import ParlaiParser
from parlai.core.opt import Opt
import regex # noqa: F401
import scipy # noqa: F401
import sklearn # noqa: F401
import unicodedata # noqa: F401
from parlai.core.agents import Agent
from parlai.utils.io import PathManager
from parlai.utils.misc import AttrDict
from .doc_db import DocDB
from .tfidf_doc_ranker import TfidfDocRanker
from .build_tfidf import run as build_tfidf
from collections import deque
import math
import random
import os
import json
import sqlite3
class TfidfRetrieverAgent(Agent):
"""
TFIDF-based retriever agent.
If given a task to specify, will first store entries of that task into
a SQLite database and then build a sparse tfidf matrix of those entries.
If not, will build it on-the-fly whenever it sees observations with labels.
This agent generates responses by building a sparse entry of the incoming
text observation, and then returning the highest-scoring documents
(calculated via sparse matrix multiplication) from the tfidf matrix.
By default, this will return the "value" (the response) of the closest
entries. For example, saying "What is your favorite movie?" will not return
the text "Which movie is your favorite?" (high match) but rather the reply
to that (e.g. "Jurassic Park"). To use this agent for retrieving texts
(e.g. Wikipedia Entries), you can store label-less examples with the
'--retriever-task' argument and switch '--retriever-mode' to 'keys'.
"""
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
parser = parser.add_argument_group('Retriever Arguments')
parser.add_argument(
'--retriever-numworkers',
type=int,
default=None,
help='Number of CPU processes (for tokenizing, etc)',
)
parser.add_argument(
'--retriever-ngram',
type=int,
default=2,
help='Use up to N-size n-grams (e.g. 2 = unigrams + bigrams)',
)
parser.add_argument(
'--retriever-hashsize',
type=int,
default=int(math.pow(2, 24)),
help='Number of buckets to use for hashing ngrams',
)
parser.add_argument(
'--retriever-tokenizer',
type=str,
default='simple',
help='String option specifying tokenizer type to use.',
)
parser.add_argument(
'--retriever-num-retrieved',
default=5,
type=int,
help='How many docs to retrieve.',
)
parser.add_argument(
'--remove-title',
type='bool',
default=False,
help='Whether to remove the title from the retrieved passage',
)
parser.add_argument(
'--retriever-mode',
choices=['keys', 'values'],
default='values',
help='Whether to retrieve the stored key or the stored value. For '
'example, if you want to return the text of an example, use '
'keys here; if you want to return the label, use values here.',
)
parser.add_argument(
'--index-by-int-id',
type='bool',
default=True,
help=(
'Whether to index into database by doc id as an integer. This '
'defaults to true for DBs built using ParlAI.'
),
)
parser.add_argument(
'--tfidf-context-length',
default=-1,
type=int,
help='Number of past utterances to remember when '
'building flattened batches of data in multi-'
'example episodes.',
)
parser.add_argument(
'--tfidf-include-labels',
default=True,
type='bool',
help='Specifies whether or not to include labels '
'as past utterances when building flattened '
'batches of data in multi-example episodes.',
)
def __init__(self, opt, shared=None):
super().__init__(opt, shared)
self.id = 'SparseTfidfRetrieverAgent'
if not opt.get('model_file') or opt['model_file'] == '':
raise RuntimeError('Must set --model_file')
opt['retriever_dbpath'] = opt['model_file'] + '.db'
opt['retriever_tfidfpath'] = opt['model_file'] + '.tfidf'
self.db_path = opt['retriever_dbpath']
self.tfidf_path = opt['retriever_tfidfpath']
self.tfidf_args = AttrDict(
{
'db_path': opt['retriever_dbpath'],
'out_dir': opt['retriever_tfidfpath'],
'ngram': opt['retriever_ngram'],
'hash_size': opt['retriever_hashsize'],
'tokenizer': opt['retriever_tokenizer'],
'num_workers': opt['retriever_numworkers'],
}
)
if not os.path.exists(self.db_path):
conn = sqlite3.connect(self.db_path)
c = conn.cursor()
c.execute(
'CREATE TABLE documents ' '(id INTEGER PRIMARY KEY, text, value);'
)
conn.commit()
conn.close()
self.db = DocDB(db_path=opt['retriever_dbpath'])
if os.path.exists(self.tfidf_path + '.npz'):
if shared is None:
self.ranker = TfidfDocRanker(
tfidf_path=opt['retriever_tfidfpath'], strict=False
)
else:
self.ranker = shared['doc_ranker']
self.ret_mode = opt['retriever_mode']
self.cands_hash = {} # cache for candidates
self.triples_to_add = [] # in case we want to add more entries
clen = opt.get('tfidf_context_length', -1)
self.context_length = clen if clen >= 0 else None
self.include_labels = opt.get('tfidf_include_labels', True)
self.reset()
def share(self):
shared = super().share()
shared['doc_ranker'] = self.ranker
return shared
def reset(self):
super().reset()
self.episode_done = False
self.current = []
self.context = deque(maxlen=self.context_length)
def doc2txt(self, docid):
if not self.opt.get('index_by_int_id', True):
docid = self.ranker.get_doc_id(docid)
if self.ret_mode == 'keys':
return self.db.get_doc_text(docid)
elif self.ret_mode == 'values':
return self.db.get_doc_value(docid)
else:
raise RuntimeError(
'Retrieve mode {} not yet supported.'.format(self.ret_mode)
)
def rebuild(self):
if len(self.triples_to_add) > 0:
self.db.add(self.triples_to_add)
self.triples_to_add.clear()
# rebuild tfidf
build_tfidf(self.tfidf_args)
self.ranker = TfidfDocRanker(tfidf_path=self.tfidf_path, strict=False)
def save(self, path=None):
self.rebuild()
with PathManager.open(self.opt['model_file'] + '.opt', 'w') as handle:
json.dump(self.opt, handle)
with PathManager.open(self.opt['model_file'], 'w') as f:
f.write('\n')
def train_act(self):
if (
'ordered' not in self.opt.get('datatype', 'train:ordered')
or self.opt.get('batchsize', 1) != 1
or self.opt.get('num_epochs', 1) != 1
):
raise RuntimeError(
'Need to set --batchsize 1, --datatype train:ordered, --num_epochs 1'
)
obs = self.observation
self.current.append(obs)
self.episode_done = obs.get('episode_done', False)
if self.episode_done:
for ex in self.current:
if 'text' in ex:
text = ex['text']
self.context.append(text)
if len(self.context) > 1:
text = '\n'.join(self.context)
# add labels to context
labels = ex.get('labels', ex.get('eval_labels'))
label = None
if labels is not None:
label = random.choice(labels)
if self.include_labels:
self.context.append(label)
# use None for ID to auto-assign doc ids--we don't need to
# ever reverse-lookup them
self.triples_to_add.append((None, text, label))
self.episode_done = False
self.current.clear()
self.context.clear()
return {'id': self.getID(), 'text': obs.get('labels', ['I don\'t know'])[0]}
def act(self):
obs = self.observation
reply = {}
reply['id'] = self.getID()
if 'labels' in obs:
return self.train_act()
if 'text' in obs:
self.rebuild() # no-op if nothing has been queued to store
doc_ids, doc_scores = self.ranker.closest_docs(
obs['text'], self.opt.get('retriever_num_retrieved', 5)
)
if len(doc_ids) > 0:
picks = [self.doc2txt(int(did)) for did in doc_ids]
pick = self.doc2txt(int(doc_ids[0])) # select best response
if self.opt.get('remove_title', False):
picks = ['\n'.join(p.split('\n')[1:]) for p in picks]
pick = '\n'.join(pick.split('\n')[1:])
reply['text_candidates'] = picks
reply['candidate_scores'] = doc_scores.tolist()
reply['text'] = pick
reply['candidate_ids'] = doc_ids.tolist()
return reply | |
from __future__ import print_function
import math
import pickle
import torch
import torch.nn as nn
import numpy as np
from collections import Counter, namedtuple
from .projection import NICETrans, LSTMNICE
from .dmv_viterbi_model import DMVDict
from torch.nn import Parameter
from .utils import log_sum_exp, \
unravel_index, \
data_iter, \
to_input_tensor, \
stable_math_log
NEG_INFINITY = -1e20
ParseTree = namedtuple("parsetree", ["tree", "decode_tag", "children"])
def test_piodict(piodict):
"""
test PIOdict 0 value
"""
for key, value in piodict.dict.iteritems():
if value <= 0:
print(key, value)
return False
return True
def log_softmax(input, dim):
return (input - \
log_sum_exp(input, dim=dim, keepdim=True) \
.expand_as(input))
class DMVFlow(nn.Module):
def __init__(self, args, num_state, num_dims,
punc_sym, word_vec_dict=None):
super(DMVFlow, self).__init__()
self.num_state = num_state
self.num_dims = num_dims + args.pos_emb_dim
self.pos_emb_dim = args.pos_emb_dim
self.args = args
self.device = args.device
self.hidden_units = self.num_dims // 2
self.lstm_hidden_units = self.num_dims
self.punc_sym = punc_sym
# self.word2vec = word_vec_dict
self.harmonic = False
if self.pos_emb_dim > 0:
self.pos_embed = nn.Embedding(num_state, self.pos_emb_dim)
self.proj_group = list(self.pos_embed.parameters())
if args.freeze_pos_emb:
self.pos_embed.weight.requires_grad = False
else:
self.proj_group = []
self.means = Parameter(torch.Tensor(self.num_state, self.num_dims))
if args.model == 'nice':
self.proj_layer = NICETrans(self.args.couple_layers,
self.args.cell_layers,
self.hidden_units,
self.num_dims,
self.device)
elif args.model == "lstmnice":
self.proj_layer = LSTMNICE(self.args.lstm_layers,
self.args.couple_layers,
self.args.cell_layers,
self.lstm_hidden_units,
self.hidden_units,
self.num_dims,
self.device)
# Gaussian Variance
self.var = Parameter(torch.zeros((num_state, self.num_dims), dtype=torch.float32))
if not self.args.train_var:
self.var.requires_grad = False
# dim0 is head and dim1 is dependent
self.attach_left = Parameter(torch.Tensor(self.num_state, self.num_state))
self.attach_right = Parameter(torch.Tensor(self.num_state, self.num_state))
# (stop, adj, h)
# dim0: 0 is nonstop, 1 is stop
# dim2: 0 is nonadjacent, 1 is adjacent
self.stop_right = Parameter(torch.Tensor(2, self.num_state, 2))
self.stop_left = Parameter(torch.Tensor(2, self.num_state, 2))
self.root_attach_left = Parameter(torch.Tensor(self.num_state))
self.prior_group = [self.attach_left, self.attach_right, self.stop_left, self.stop_right, \
self.root_attach_left]
if args.model == "gaussian":
self.proj_group += [self.means, self.var]
else:
self.proj_group += list(self.proj_layer.parameters()) + [self.means, self.var]
if self.args.freeze_prior:
for x in self.prior_group:
x.requires_grad = False
if self.args.freeze_proj:
for param in self.proj_layer.parameters():
param.requires_grad = False
if self.args.freeze_mean:
self.means.requires_grad = False
def init_params(self, init_seed, train_data):
"""
init_seed:(sents, masks)
sents: (seq_length, batch_size, features)
masks: (seq_length, batch_size)
"""
if self.args.load_nice != '':
self.load_state_dict(torch.load(self.args.load_nice), strict=True)
self.attach_left_init = self.attach_left.clone()
self.attach_right_init = self.attach_right.clone()
self.stop_left_init = self.stop_left.clone()
self.stop_right_init = self.stop_right.clone()
self.root_attach_left_init = self.root_attach_left.clone()
self.means_init = self.means.clone()
self.proj_init = [param.clone() for param in self.proj_layer.parameters()]
# self.proj_layer.reset_parameters()
# if self.args.init_mean:
# self.init_mean(train_data)
# if self.args.init_var:
# self.init_var(train_data)
return
if self.args.load_gaussian != '':
self.load_state_dict(torch.load(self.args.load_gaussian), strict=True)
return
# init transition params
self.attach_left.uniform_().add_(1.)
self.attach_right.uniform_().add_(1.)
self.root_attach_left.uniform_().add_(1)
self.stop_right[0, :, 0].uniform_().add_(1)
self.stop_right[1, :, 0].uniform_().add_(2)
self.stop_left[0, :, 0].uniform_().add_(1)
self.stop_left[1, :, 0].uniform_().add_(2)
self.stop_right[0, :, 1].uniform_().add_(2)
self.stop_right[1, :, 1].uniform_().add_(1)
self.stop_left[0, :, 1].uniform_().add_(2)
self.stop_left[1, :, 1].uniform_().add_(1)
self.var.uniform_(0.5, 1.5)
# initialize mean and variance with empirical values
sents = init_seed.embed
masks = init_seed.mask
if self.pos_emb_dim > 0:
pos = init_seed.pos
pos_embed = self.pos_embed(pos)
sents = torch.cat((sents, pos_embed), dim=-1)
sents, _ = self.transform(sents, masks)
features = sents.size(-1)
flat_sents = sents.view(-1, features)
seed_mean = torch.sum(masks.view(-1, 1).expand_as(flat_sents) *
flat_sents, dim=0) / masks.sum()
seed_var = torch.sum(masks.view(-1, 1).expand_as(flat_sents) *
((flat_sents - seed_mean.expand_as(flat_sents)) ** 2),
dim=0) / masks.sum()
# self.var.copy_(2 * seed_var)
if self.args.pos_emb_dim > 0:
self.init_mean(train_data)
self.init_var(train_data)
else:
self.var.copy_(seed_var)
self.var.fill_(1.)
self.means.data.normal_().mul_(0.04)
self.means.data.add_(seed_mean.data.expand_as(self.means.data))
def init_mean(self, train_data):
emb_dict = {}
cnt_dict = Counter()
for iter_obj in train_data.data_iter(self.args.batch_size):
sents_t = iter_obj.embed
if self.args.pos_emb_dim > 0:
pos_embed_t = self.pos_embed(iter_obj.pos)
sents_t = torch.cat((sents_t, pos_embed_t), dim=-1)
sents_t, _ = self.transform(sents_t, iter_obj.mask)
sents_t = sents_t.transpose(0, 1)
pos_t = iter_obj.pos.transpose(0, 1)
mask_t = iter_obj.mask.transpose(0, 1)
for emb_s, tagid_s, mask_s in zip(sents_t, pos_t, mask_t):
for tagid, emb, mask in zip(tagid_s, emb_s, mask_s):
tagid = tagid.item()
mask = mask.item()
if tagid in emb_dict:
emb_dict[tagid] = emb_dict[tagid] + emb * mask
else:
emb_dict[tagid] = emb * mask
cnt_dict[tagid] += mask
for tagid in emb_dict:
self.means[tagid] = emb_dict[tagid] / cnt_dict[tagid]
def init_var(self, train_data):
emb_dict = {}
cnt_dict = Counter()
for iter_obj in train_data.data_iter(self.args.batch_size):
sents_t = iter_obj.embed
if self.args.pos_emb_dim > 0:
pos_embed_t = self.pos_embed(iter_obj.pos)
sents_t = torch.cat((sents_t, pos_embed_t), dim=-1)
sents_t, _ = self.transform(sents_t, iter_obj.mask)
sents_t = sents_t.transpose(0, 1)
pos_t = iter_obj.pos.transpose(0, 1)
mask_t = iter_obj.mask.transpose(0, 1)
for emb_s, tagid_s, mask_s in zip(sents_t, pos_t, mask_t):
for tagid, emb, mask in zip(tagid_s, emb_s, mask_s):
tagid = tagid.item()
mask = mask.item()
if tagid in emb_dict:
emb_dict[tagid] = emb_dict[tagid] + (emb - self.means[tagid]) ** 2 * mask
else:
emb_dict[tagid] = (emb - self.means[tagid]) ** 2 * mask
cnt_dict[tagid] += mask
for tagid in emb_dict:
self.var[tagid] = emb_dict[tagid] / cnt_dict[tagid]
# self.var[tagid][300:].fill_(1.)
self.var[tagid][:].fill_(1.)
def print_param(self):
print("attatch left")
print(self.attach_left)
print("attach right")
print(self.attach_right)
print("stop left")
print(self.stop_left)
print("root attach left")
print(self.root_attach_left)
def transform(self, x, masks=None):
"""
Args:
x: (sent_length, batch_size, num_dims)
"""
jacobian_loss = torch.zeros(1, device=self.device, requires_grad=False)
if self.args.model != 'gaussian':
x, jacobian_loss_new = self.proj_layer(x, masks)
jacobian_loss = jacobian_loss + jacobian_loss_new
return x, jacobian_loss
def tree_to_depset(self, root_max_index, sent_len):
"""
Args:
root_max_index: (batch_size, 2), [:0] represents the
optimal state, [:1] represents the
optimal index (location)
"""
# add the root symbol (-1)
batch_size = root_max_index.size(0)
dep_list = []
for batch in range(batch_size):
res = set([(root_max_index[batch, 1].item(), -1, root_max_index[batch, 0].item())])
start = 0
end = sent_len[batch]
res.update(self._tree_to_depset(start, end, 2, batch, root_max_index[batch, 0],
root_max_index[batch, 1]))
assert len(res) == sent_len[batch]
dep_list += [sorted(res)]
return dep_list
def _tree_to_depset(self, start, end, mark, batch, symbol, index):
left_child = self.left_child[start, end, mark][batch, symbol, index]
right_child = self.right_child[start, end, mark][batch, symbol, index]
if left_child[0] == 1 and right_child[0] == 1:
if mark == 0:
assert left_child[3] == 0
assert right_child[3] == 2
arg = right_child[-1]
dep_symbol = right_child[4].item()
elif mark == 1:
assert left_child[3] == 2
assert right_child[3] == 1
arg = left_child[-1]
dep_symbol = left_child[4].item()
res = set([(arg.item(), index, dep_symbol)])
res.update(self._tree_to_depset(left_child[1].item(), left_child[2].item(),
left_child[3].item(), batch, left_child[4].item(),
left_child[5].item()), \
self._tree_to_depset(right_child[1].item(), right_child[2].item(),
right_child[3].item(), batch, right_child[4].item(),
right_child[5].item()))
elif left_child[0] == 1 and right_child[0] == 0:
res = self._tree_to_depset(left_child[1].item(), left_child[2].item(),
left_child[3].item(), batch, left_child[4].item(),
left_child[5].item())
elif left_child[0] == -1 and right_child[0] == -1:
res = set()
else:
raise ValueError
return res
def test(self, test_data, batch_size=10, predict=""):
"""
Args:
gold: A nested list of heads
all_len: True if evaluate on all lengths
"""
cnt = 0
dir_cnt = 0.0
undir_cnt = 0.0
memory_sent_cnt = 0
batch_id_ = 0
if predict != "":
fout = open(predict, "w", encoding="utf-8")
# if self.args.max_len > 20:
# batch_size = 2
for iter_obj in test_data.data_iter_efficient():
batch_id_ += 1
try:
sents_t = iter_obj.embed
if self.args.pos_emb_dim > 0:
pos_embed = self.pos_embed(iter_obj.pos)
sents_t = torch.cat((sents_t, pos_embed), dim=-1)
sents_t, _ = self.transform(sents_t, iter_obj.mask)
sents_t = sents_t.transpose(0, 1)
# root_max_index: (batch_size, num_state, seq_length)
batch_size, seq_length, _ = sents_t.size()
# print("batch size {}".format(batch_size))
# print("length {}".format(seq_length))
symbol_index_t = self.attach_left.new([[[p, q] for q in range(seq_length)] \
for p in range(self.num_state)]) \
.expand(batch_size, self.num_state, seq_length, 2)
root_max_index = self.dep_parse(sents_t, iter_obj, symbol_index_t)
masks = iter_obj.mask
batch_size = masks.size(1)
sent_len = [torch.sum(masks[:, i]).item() for i in range(batch_size)]
parse = self.tree_to_depset(root_max_index, sent_len)
except RuntimeError:
memory_sent_cnt += 1
print('batch %d out of memory' % batch_id_)
continue
#TODO: check parse_s if follows original sentence order
for pos_s, gold_s, parse_s, len_ in zip(iter_obj.pos.transpose(0, 1),
iter_obj.head.transpose(0, 1), parse, iter_obj.mask.sum(dim=0)):
directed, length = self.measures(pos_s, gold_s, parse_s, len_.item())
cnt += length
dir_cnt += directed
if predict != "":
self.predict(fout, iter_obj, parse, test_data.id_to_pos)
if predict != "":
fout.close()
dir_acu = dir_cnt / cnt
self.log_p_parse = {}
self.left_child = {}
self.right_child = {}
return dir_acu
def predict(fout, iter_obj, parse, id_to_pos):
for pos_s, gold_s, parse_s, deprel_s, len_ in zip(iter_obj.pos.transpose(0, 1),
iter_obj.head.transpose(0, 1), parse, iter_obj.deps, iter_obj.mask.sum(dim=0)):
for i in range(int(len_)):
pos = id_to_pos[pos_s[i].item()]
head = gold_s[i].item()
tuple_ = parse_s[i]
deprel = deprel_s[i]
fout.write("{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n".format(
i+1, "_", "_", pos, "_", "_", head+1, deprel, "_", tuple_[1]+1))
def measures(self, pos_s, gold_s, parse_s, len_):
# Helper for eval().
d = 0.
l = 0.
for i in range(int(len_)):
pos = pos_s[i]
head = gold_s[i]
tuple_ = parse_s[i]
if pos.item() not in self.punc_sym:
l += 1
if head.item() == tuple_[1]:
d += 1
return d, l
def MSE_loss(self):
diff_prior = ((self.attach_left - self.attach_left_init) ** 2).sum()
diff_prior = diff_prior + ((self.attach_right - self.attach_right_init) ** 2).sum()
diff_prior = diff_prior + ((self.stop_left - self.stop_left_init) ** 2).sum()
diff_prior = diff_prior + ((self.stop_right - self.stop_right_init) ** 2).sum()
diff_prior = diff_prior + ((self.root_attach_left - self.root_attach_left_init) ** 2).sum()
diff_mean = ((self.means - self.means_init) ** 2).sum()
diff_proj = 0.
for i, param in enumerate(self.proj_layer.parameters()):
diff_proj = diff_proj + ((self.proj_init[i] - param) ** 2).sum()
return 0.5 * (self.args.beta_prior * diff_prior +
self.args.beta_proj * diff_proj + self.args.beta_mean * diff_mean)
def up_viterbi_em(self, train_data):
attach_left = self.attach_left.new_ones((self.num_state, self.num_state))
attach_right = self.attach_right.new_ones((self.num_state, self.num_state))
stop_right = self.stop_right.new_ones((2, self.num_state, 2))
stop_left = self.stop_left.new_ones((2, self.num_state, 2))
root_attach_left = self.root_attach_left.new_ones(self.num_state)
for iter_obj in train_data.data_iter(batch_size=self.args.batch_size,
shuffle=False):
sents_t = iter_obj.embed
if self.args.pos_emb_dim > 0:
pos_embed = self.pos_embed(iter_obj.pos)
sents_t = torch.cat((sents_t, pos_embed), dim=-1)
sents_t, _ = self.transform(sents_t, iter_obj.mask)
sents_t = sents_t.transpose(0, 1)
# root_max_index: (batch_size, num_state, seq_length)
batch_size, seq_length, _ = sents_t.size()
symbol_index_t = self.attach_left.new([[[p, q] for q in range(seq_length)] \
for p in range(self.num_state)]) \
.expand(batch_size, self.num_state, seq_length, 2)
root_max_index = self.dep_parse(sents_t, iter_obj, symbol_index_t)
masks = iter_obj.mask
batch_size = masks.size(1)
sent_len = [torch.sum(masks[:, i]).item() for i in range(batch_size)]
parse = self.tree_to_depset(root_max_index, sent_len)
for s in parse:
length = len(s)
left = [0] * length
right = [0] * length
# count number of left and right children
for i in range(length):
head_id = s[i][1]
dep_id = s[i][0]
if dep_id < head_id:
left[head_id] += 1
elif dep_id > head_id:
right[head_id] += 1
else:
raise ValueError
for i in range(length):
head_id = s[i][1]
head_pos = s[head_id][2]
dep_pos = s[i][2]
dep_id = s[i][0]
if head_id == -1:
root_attach_left[dep_pos] += 1
continue
assert(i == dep_id)
if dep_id < head_id:
attach_left[head_pos, dep_pos] += 1
elif dep_id > head_id:
attach_right[head_pos, dep_pos] += 1
if left[i] > 0:
stop_left[0, dep_pos, 1] += 1
stop_left[0, dep_pos, 0] += left[i] - 1
stop_left[1, dep_pos, 0] += 1
else:
stop_left[1, dep_pos, 1] += 1
if right[i] > 0:
stop_right[0, dep_pos, 1] += 1
stop_right[0, dep_pos, 0] += right[i] - 1
stop_right[1, dep_pos, 0] += 1
else:
stop_right[1, dep_pos, 1] += 1
self.attach_left.copy_(torch.log(attach_left / attach_left.sum(dim=1, keepdim=True)))
self.attach_right.copy_(torch.log(attach_right / attach_right.sum(dim=1, keepdim=True)))
self.stop_right.copy_(torch.log(stop_right / stop_right.sum(dim=0, keepdim=False)))
self.stop_left.copy_(torch.log(stop_left / stop_left.sum(dim=0, keepdim=False)))
self.root_attach_left.copy_(torch.log(root_attach_left / root_attach_left.sum()))
def _eval_log_density(self, s):
"""
Args:
s: A tensor with size (batch_size, seq_length, features)
Returns:
density: (batch_size, seq_length, num_state)
"""
constant = -self.num_dims/2.0 * (math.log(2 * math.pi)) - \
0.5 * torch.sum(torch.log(self.var), dim=-1)
batch_size, seq_length, features = s.size()
means = self.means.view(1, 1, self.num_state, features)
words = s.unsqueeze(dim=2)
var = self.var.view(1, 1, self.num_state, self.num_dims)
return constant.view(1, 1, self.num_state) - \
0.5 * torch.sum((means - words) ** 2 / var, dim=3)
def _eval_log_density_supervised(self, sents, pos):
"""
Args:
sents: A tensor with size (batch_size, seq_len, features)
pos: (batch_size, seq_len)
Returns:
density: (batch_size, seq_length)
"""
constant = -self.num_dims/2.0 * (math.log(2 * math.pi)) - \
0.5 * torch.sum(torch.log(self.var), dim=-1)
batch_size, seq_length, features = sents.size()
constant = constant.view(1, 1, self.num_state)
constant = constant.expand(batch_size, seq_length, self.num_state)
constant = torch.gather(constant, dim=2, index=pos.unsqueeze(2)).squeeze(2)
means = self.means.view(1, 1, self.num_state, features)
means = means.expand(batch_size, seq_length,
self.num_state, self.num_dims)
tag_id = pos.view(*pos.size(), 1, 1).expand(batch_size,
seq_length, 1, self.num_dims)
# (batch_size, seq_len, num_dims)
means = torch.gather(means, dim=2, index=tag_id).squeeze(2)
var = self.var.view(1, 1, self.num_state, self.num_dims)
var = var.expand(batch_size, seq_length,
self.num_state, self.num_dims)
var = torch.gather(var, dim=2, index=tag_id).squeeze(2)
return constant - \
0.5 * torch.sum((means - sents) ** 2 / var, dim=-1)
def set_dmv_params(self, train_data, pos_seq=None):
self.attach_left.fill_(1.)
self.attach_right.fill_(1.)
self.root_attach_left.fill_(1.)
self.stop_right.fill_(1.)
self.stop_left.fill_(1.)
if pos_seq is None:
pos_seq = train_data.postags
for pos_s, head_s, left_s, right_s in zip(pos_seq,
train_data.heads,
train_data.left_num_deps,
train_data.right_num_deps):
assert(len(pos_s) == len(head_s))
for i, pos in enumerate(pos_s):
head = head_s[i]
head_pos = pos_s[head]
left = left_s[i]
right = right_s[i]
if head == -1:
self.root_attach_left[pos] += 1
continue
assert(i != head)
if i < head:
self.attach_left[head_pos, pos] += 1
elif i > head:
self.attach_right[head_pos, pos] += 1
if left > 0:
self.stop_left[0, pos, 1] += 1
self.stop_left[0, pos, 0] += left - 1
self.stop_left[1, pos, 0] += 1
else:
self.stop_left[1, pos, 1] += 1
if right > 0:
self.stop_right[0, pos, 1] += 1
self.stop_right[0, pos, 0] += right - 1
self.stop_right[1, pos, 0] += 1
else:
self.stop_right[1, pos, 1] += 1
self.attach_left.copy_(torch.log(self.attach_left / self.attach_left.sum(dim=1, keepdim=True)))
self.attach_right.copy_(torch.log(self.attach_right / self.attach_right.sum(dim=1, keepdim=True)))
self.stop_right.copy_(torch.log(self.stop_right / self.stop_right.sum(dim=0, keepdim=False)))
self.stop_left.copy_(torch.log(self.stop_left / self.stop_left.sum(dim=0, keepdim=False)))
self.root_attach_left.copy_(torch.log(self.root_attach_left / self.root_attach_left.sum()))
def supervised_loss_wpos(self, iter_obj):
"""
Args:
iter_obj.embed: (seq_len, batch_size, num_dim)
iter_obj.pos: (seq_len, batch_size)
iter_obj.head: (seq_len, batch_size)
iter_obj.l_deps: (seq_len, batch_size)
iter_obj.r_deps: (seq_len, batch_size)
iter_obj.mask: (seq_len, batch_size)
"""
embed = iter_obj.embed.transpose(0, 1)
# (batch_size, seq_len)
pos_t = iter_obj.pos.transpose(0, 1)
if self.args.pos_emb_dim > 0:
pos_embed = self.pos_embed(pos_t)
embed = torch.cat((embed, pos_t), dim=-1)
embed, jacob = self.transform(embed, iter_obj.mask)
density = self._eval_log_density_supervised(embed, pos_t)
log_emission_prob = torch.mul(density, iter_obj.mask.transpose(0, 1)).sum()
return -log_emission_prob, jacob
def supervised_loss_wopos(self, tree, embed, pos):
"""This is the non-batched version of supervised loss when
dep structure is known but pos tags are unknown.
Args:
tree: TreeToken object from conllu
embed: list of embeddings
pos: list of pos tag ids
Returns: Tensor1, Tensor2
Tensor1: a scalar tensor of nll
Tensor2: Jacobian loss, scalar tensor
"""
# normalizing parameters
self.log_attach_left = self.args.prob_const * log_softmax(self.attach_left, dim=1)
self.log_attach_right = self.args.prob_const * log_softmax(self.attach_right, dim=1)
self.log_stop_right = self.args.prob_const * log_softmax(self.stop_right, dim=0)
self.log_stop_left = self.args.prob_const * log_softmax(self.stop_left, dim=0)
self.log_root_attach_left = self.args.prob_const * log_softmax(self.root_attach_left, dim=0)
constant = -self.num_dims/2.0 * (math.log(2 * math.pi)) - \
0.5 * torch.sum(torch.log(self.var), dim=-1)
# (seq_len, num_dims)
embed_t = torch.tensor(embed, dtype=torch.float32, requires_grad=False, device=self.device)
if self.args.pos_emb_dim > 0:
pos_t = torch.tensor(pos, dtype=torch.long, requires_grad=False, device=self.device)
pos_embed = self.pos_embed(pos_t)
embed_t = torch.cat((embed_t, pos_embed), dim=-1)
embed_t, jacob = self.transform(embed_t.unsqueeze(1))
embed_t = embed_t.squeeze(1)
# (num_state)
log_prob = self._calc_log_prob(tree, constant, embed_t)
log_prob = self.log_root_attach_left + log_prob
return -log_sum_exp(log_prob, dim=0), jacob
def _calc_log_prob(self, tree, constant, embed_s):
"""recursion components to compute the log prob of the root
of the current tree being a latent pos tag
Args:
tree: TreeToken
consant: the Gaussian density constant
Returns: Tensor1, Tensor2:
Tensor1: log prob of root beging a latent pos tag,
shape [num_state]
Tensor2: Jacobian loss, with shape []
"""
token_id = tree.token["id"]
embed = embed_s[token_id-1]
# (num_state)
embed = embed.unsqueeze(0)
log_prob = constant - 0.5 * torch.sum(
(self.means - embed)**2 / self.var, dim=1)
# leaf nodes
if tree.children == []:
return log_prob
left = []
right = []
for t in tree.children:
if t.token["id"] < tree.token["id"]:
left.append(t)
elif t.token["id"] > tree.token["id"]:
right.append(t)
else:
raise ValueError
if left == []:
log_prob = log_prob + self.log_stop_left[1, :, 1]
else:
for i, l in enumerate(left[::-1]):
left_prob = self._calc_log_prob(l, constant, embed_s)
# (num_state, num_state) --> (num_state)
left_prob = self.log_attach_left + left_prob.unsqueeze(0)
left_prob = log_sum_exp(left_prob, dim=1)
# valence
log_prob = log_prob + left_prob + self.log_stop_left[0, :, int(i==0)]
log_prob = log_prob + self.log_stop_left[1, :, 0]
if right == []:
log_prob = log_prob + self.log_stop_right[1, :, 1]
else:
for i, r in enumerate(right):
right_prob = self._calc_log_prob(r, constant, embed_s)
# (num_state, num_state) --> (num_state)
right_prob = self.log_attach_right + right_prob.unsqueeze(0)
right_prob = log_sum_exp(right_prob, dim=1)
# valence
log_prob = log_prob + right_prob + self.log_stop_right[0, :, int(i==0)]
log_prob = log_prob + self.log_stop_right[1, :, 0]
return log_prob
def parse_pos_seq(self, train_data):
"""decode the best latent tag sequences, given
all the paramters and gold tree structures
Return: List1
List1: a list of decoded pos tag ids, format is like
train_data.pos
"""
self.log_attach_left = log_softmax(self.attach_left, dim=1)
self.log_attach_right = log_softmax(self.attach_right, dim=1)
self.log_stop_right = log_softmax(self.stop_right, dim=0)
self.log_stop_left = log_softmax(self.stop_left, dim=0)
self.log_root_attach_left = log_softmax(self.root_attach_left, dim=0)
constant = -self.num_dims/2.0 * (math.log(2 * math.pi)) - \
0.5 * torch.sum(torch.log(self.var), dim=-1)
decoded_pos = []
for embed, tree, gold_pos in zip(train_data.embed, train_data.trees, train_data.postags):
pos = [0] * len(embed)
parse_tree = ParseTree(tree, [], [])
embed_t = torch.tensor(embed, dtype=torch.float32, requires_grad=False, device=self.device)
if self.args.pos_emb_dim > 0:
gold_pos = torch.tensor(gold_pos, dtype=torch.long, requires_grad=False, device=self.device)
pos_embed = self.pos_embed(gold_pos)
embed_t = torch.cat((embed_t, pos_embed), dim=-1)
embed_t, _ = self.transform(embed_t.unsqueeze(1))
embed_t = embed_t.squeeze(1)
log_prob = self._find_best_path(tree, parse_tree, constant, embed_t)
log_prob = self.log_root_attach_left + log_prob
log_prob, root_index = torch.max(log_prob, dim=0)
self._find_pos_seq(parse_tree, root_index, pos)
decoded_pos.append(pos)
return decoded_pos
def _find_best_path(self, tree, parse_tree, constant, embed_s):
"""decode the most likely latent tag tree given
current tree and sequence of latent embeddings,
"""
token_id = tree.token["id"]
embed = embed_s[token_id-1]
embed = embed.unsqueeze(0)
log_prob = constant - 0.5 * torch.sum(
(self.means - embed)**2 / self.var, dim=1)
# leaf nodes
if tree.children == []:
return log_prob
left = []
right = []
for t in tree.children:
if t.token["id"] < tree.token["id"]:
left.append(t)
elif t.token["id"] > tree.token["id"]:
right.append(t)
else:
raise ValueError
if left == []:
log_prob = log_prob + self.log_stop_left[1, :, 1]
else:
for i, l in enumerate(left[::-1]):
parse_tree_tmp = ParseTree(l, [], [])
left_prob = self._find_best_path(l, parse_tree_tmp, constant, embed_s)
# (num_state, num_state) --> (num_state)
left_prob = self.log_attach_left + left_prob.unsqueeze(0)
left_prob, left_prob_index = torch.max(left_prob, dim=1)
# valence
log_prob = log_prob + left_prob + self.log_stop_left[0, :, int(i==0)]
parse_tree.children.append(parse_tree_tmp)
parse_tree.decode_tag.append(left_prob_index)
log_prob = log_prob + self.log_stop_left[1, :, 0]
if right == []:
log_prob = log_prob + self.log_stop_right[1, :, 1]
else:
for i, r in enumerate(right):
parse_tree_tmp = ParseTree(r, [], [])
right_prob = self._find_best_path(r, parse_tree_tmp, constant, embed_s)
# (num_state, num_state) --> (num_state)
right_prob = self.log_attach_right + right_prob.unsqueeze(0)
right_prob, right_prob_index = torch.max(right_prob, dim=1)
# valence
log_prob = log_prob + right_prob + self.log_stop_right[0, :, int(i==0)]
parse_tree.children.append(parse_tree_tmp)
parse_tree.decode_tag.append(right_prob_index)
log_prob = log_prob + self.log_stop_right[1, :, 0]
return log_prob
def _find_pos_seq(self, parse_tree, head_index, pos):
"""decode the pos sequence, results are stored in pos
"""
token_id = parse_tree.tree.token["id"]
pos[token_id-1] = head_index
for child, decode_tag in zip(parse_tree.children, parse_tree.decode_tag):
head_index_child = decode_tag[head_index]
self._find_pos_seq(child, head_index_child, pos)
# def supervised_loss(self, sents, iter_obj):
# """
# Args:
# sents: A tensor with size (batch_size, seq_len, features)
# pos: (seq_len, batch_size)
# head: (seq_len, batch_size)
# num_left_child: (seq_len, batch_size)
# num_right_child: (seq_len, batch_size)
# masks: (seq_len, batch_size)
# """
# pos = iter_obj.pos
# head = iter_obj.head
# num_left_child = iter_obj.l_deps
# num_right_child = iter_obj.r_deps
# masks = iter_obj.mask
# seq_len, batch_size = pos.size()
# attach_left_ = log_softmax(self.attach_left, dim=1).expand(batch_size, *self.attach_left.size())
# attach_right_ = log_softmax(self.attach_right, dim=1).expand(batch_size, *self.attach_right.size())
# root_attach_ = log_softmax(self.root_attach_left, dim=0).expand(batch_size, *self.root_attach_left.size())
# stop_right_s = log_softmax(self.stop_right, dim=0).expand(batch_size, *self.stop_right.size())
# stop_left_s = log_softmax(self.stop_left, dim=0).expand(batch_size, *self.stop_left.size())
# # (batch_size, num_state, 2)
# stop_right_ = stop_right_s[:, 1, :, :]
# stop_left_ = stop_left_s[:, 1, :, :]
# continue_right_ = stop_right_s[:, 0, :, :]
# continue_left_ = stop_left_s[:, 0, :, :]
# # (batch_size, seq_len)
# pos_t = pos.transpose(0, 1)
# density = self._eval_log_density_supervised(sents, pos_t)
# log_emission_prob = torch.mul(density, masks.transpose(0, 1)).sum()
# for i in range(seq_len):
# # 1 indicates left dependent
# dir_left = (i < head[i]).float()
# # (batch_size, 1)
# pos_sub = pos[i].unsqueeze(1)
# head_mask = (head[i] >= 0).long()
# head_index = torch.mul(head_mask, head[i])
# # (batch_size, 1, num_state)
# head_pos_sub = torch.gather(pos, index=head_index.unsqueeze(0), dim=0).squeeze(0) \
# .view(batch_size, 1, 1).expand(batch_size, 1, self.num_state)
# # attach prob
# # (batch_size, num_state) --> (batch_size)
# log_attach_left_prob = torch.gather(attach_left_, index=head_pos_sub, dim=1).squeeze(1)
# log_attach_left_prob = torch.gather(log_attach_left_prob, index=pos_sub, dim=1).squeeze(1)
# log_attach_right_prob = torch.gather(attach_right_, index=head_pos_sub, dim=1).squeeze(1)
# log_attach_right_prob = torch.gather(log_attach_right_prob, index=pos_sub, dim=1).squeeze(1)
# log_attach = torch.mul(dir_left, log_attach_left_prob) + torch.mul(1.0 - dir_left, log_attach_right_prob)
# log_attach = torch.mul(log_attach, head_mask.float())
# # 1 indicates root
# dir_root = (head[i] == -1).float()
# log_root_prob = torch.gather(root_attach_, index=pos_sub, dim=1).squeeze(1)
# log_attach = log_attach + torch.mul(dir_root, log_root_prob)
# log_attach = torch.mul(log_attach, masks[i])
# log_prob = log_emission_prob + log_attach.sum()
# # stop prob
# # (batch_size, num_state, 1), 1 indicates no child
# stop_adj_left = (num_left_child[i] == 0).long().view(batch_size, 1, 1).expand(batch_size, self.num_state, 1)
# stop_adj_right = (num_right_child[i] == 0).long().view(batch_size, 1, 1).expand(batch_size, self.num_state, 1)
# # (batch_size, num_state) --> (batch_size)
# log_stop_right_prob = torch.gather(stop_right_, index=stop_adj_right, dim=2).squeeze(2)
# log_stop_right_prob = torch.gather(log_stop_right_prob, index=pos_sub, dim=1).squeeze(1)
# log_stop_left_prob = torch.gather(stop_left_, index=stop_adj_left, dim=2).squeeze(2)
# log_stop_left_prob = torch.gather(log_stop_left_prob, index=pos_sub, dim=1).squeeze(1)
# log_stop = torch.mul(log_stop_right_prob + log_stop_left_prob, masks[i])
# log_prob = log_prob + log_stop.sum()
# # continue prob, 1 represents the existence of continue prob
# pos_sub_ = pos_sub.unsqueeze(2).expand(batch_size, 1, 2)
# # (batch_size, 2)
# continue_right_sub = torch.gather(continue_right_, index=pos_sub_, dim=1).squeeze(1)
# continue_left_sub = torch.gather(continue_left_, index=pos_sub_, dim=1).squeeze(1)
# # (batch_size)
# continue_flag_left = (num_left_child[i] > 0)
# continue_flag_right = (num_right_child[i] > 0)
# continue_flag_left = continue_flag_left.float()
# continue_flag_right = continue_flag_right.float()
# log_continue_right_prob = torch.mul(continue_right_sub[:,1], continue_flag_right)
# log_continue_left_prob = torch.mul(continue_left_sub[:,1], continue_flag_left)
# log_continue_right_prob = log_continue_right_prob + \
# torch.mul(continue_flag_right, torch.mul((num_right_child[i]-1).float(), continue_right_sub[:,0]))
# log_continue_left_prob = log_continue_left_prob + \
# torch.mul(continue_flag_left, torch.mul((num_left_child[i]-1).float(), continue_left_sub[:,0]))
# log_continue = torch.mul(log_continue_left_prob + log_continue_right_prob, masks[i])
# log_prob = log_prob + log_continue.sum()
# return -log_prob
def unsupervised_loss(self, iter_obj):
"""
Args:
sents: A tensor with size (batch_size, seq_length, features)
masks: (seq_length, batch_size)
Variable clarification:
p_inside[i, j] is the prob of w_i, w_i+1, ..., w_j-1
rooted at any possible nonterminals
node marks clarification:
0: no marks (right first)
1: right stop mark
2: both left and right stop marks
"""
# normalizing parameters
self.log_attach_left = log_softmax(self.attach_left, dim=1)
self.log_attach_right = log_softmax(self.attach_right, dim=1)
self.log_stop_right = log_softmax(self.stop_right, dim=0)
self.log_stop_left = log_softmax(self.stop_left, dim=0)
self.log_root_attach_left = log_softmax(self.root_attach_left, dim=0)
sents = iter_obj.embed
masks = iter_obj.mask
pos_t = iter_obj.pos
if self.args.pos_emb_dim > 0:
pos_embed = self.pos_embed(pos_t)
sents = torch.cat((sents, pos_embed), dim=-1)
sents, jacob = self.transform(sents, masks)
sents = sents.transpose(0, 1)
# (batch_size, seq_length, num_state)
density = self._eval_log_density(sents)
# indexed by (start, end, mark)
# each element is a tensor with size (batch_size, num_state, seq_length)
self.log_p_inside = {}
# n = len(s)
batch_size, seq_length, _ = sents.size()
for i in range(seq_length):
j = i + 1
cat_var = [torch.zeros((batch_size, self.num_state, 1),
dtype=torch.float32,
device=self.device).fill_(NEG_INFINITY) for _ in range(seq_length)]
cat_var[i] = density[:, i, :].unsqueeze(dim=2)
self.log_p_inside[i, j, 0] = torch.cat(cat_var, dim=2)
self.unary_p_inside(i, j, batch_size, seq_length)
log_stop_right = self.log_stop_right[0]
log_stop_left = self.log_stop_left[0]
#TODO(junxian): ideally, only the l loop is needed
# but eliminate the rest loops would be a bit hard
for l in range(2, seq_length+1):
for i in range(seq_length-l+1):
j = i + l
log_p1 = []
log_p2 = []
index = torch.zeros((seq_length, j-i-1), dtype=torch.long,
device=self.device, requires_grad=False)
# right attachment
for k in range(i+1, j):
log_p1.append(self.log_p_inside[i, k, 0].unsqueeze(-1))
log_p2.append(self.log_p_inside[k, j, 2].unsqueeze(-1))
index[k-1, k-i-1] = 1
log_p1 = torch.cat(log_p1, dim=-1)
log_p2 = torch.cat(log_p2, dim=-1)
index = index.unsqueeze(0).expand(self.num_state, *index.size())
# (num_state, seq_len, k)
log_stop_right_gather = torch.gather(
log_stop_right.unsqueeze(-1).expand(*log_stop_right.size(), j-i-1),
1, index)
# log_p_tmp[b, i, m, j, n] = log_p1[b, i, m] + log_p2[b, j, n] + stop_right[0, i, m==k-1]
# + attach_right[i, j]
# log_p_tmp = log_p1_ep + log_p2_ep + log_attach_right + log_stop_right_gather
# to save memory, first marginalize out j and n
# (b, i, j, k) -> (b, i, k)
log_p2_tmp = log_sum_exp(log_p2.unsqueeze(1), dim=3) + \
self.log_attach_right.view(1, *(self.log_attach_right.size()), 1)
log_p2_tmp = log_sum_exp(log_p2_tmp, dim=2)
# (b, i, m, k)
log_p_tmp = log_p1 + log_p2_tmp.unsqueeze(2) + \
log_stop_right_gather.unsqueeze(0)
self.log_p_inside[i, j, 0] = log_sum_exp(log_p_tmp, dim=-1)
# left attachment
log_p1 = []
log_p2 = []
index = torch.zeros((seq_length, j-i-1), dtype=torch.long,
device=self.device, requires_grad=False)
for k in range(i+1, j):
log_p1.append(self.log_p_inside[i, k, 2].unsqueeze(-1))
log_p2.append(self.log_p_inside[k, j, 1].unsqueeze(-1))
index[k, k-i-1] = 1
log_p1 = torch.cat(log_p1, dim=-1)
log_p2 = torch.cat(log_p2, dim=-1)
index = index.unsqueeze(0).expand(self.num_state, *index.size())
log_stop_left_gather = torch.gather(
log_stop_left.unsqueeze(-1).expand(*log_stop_left.size(), j-i-1),
1, index)
# log_p_tmp[b, i, m, j, n] = log_p1[b, i, m] + log_p2[b, j, n] + stop_left[0, j, n==k]
# + self.attach_left[j, i]
# to save memory, first marginalize out j and n
# (b, i, j, k) -> (b, j, k)
log_p1_tmp = log_sum_exp(log_p1.unsqueeze(2), dim=3) + \
self.log_attach_left.permute(1, 0).view(1, *(self.log_attach_left.size()), 1)
log_p1_tmp = log_sum_exp(log_p1_tmp, dim=1)
# (b, j, n, k)
log_p_tmp = log_p1_tmp.unsqueeze(2) + log_p2 + \
log_stop_left_gather.unsqueeze(0)
self.log_p_inside[i, j, 1] = log_sum_exp(log_p_tmp, dim=-1)
self.unary_p_inside(i, j, batch_size, seq_length)
# calculate log likelihood
sent_len_t = masks.sum(dim=0).detach()
log_p_sum = []
for i in range(batch_size):
sent_len = sent_len_t[i].item()
log_p_sum += [self.log_p_inside[0, sent_len, 2][i].unsqueeze(dim=0)]
log_p_sum_cat = torch.cat(log_p_sum, dim=0)
log_root = log_p_sum_cat + self.log_root_attach_left.view(1, self.num_state, 1) \
.expand_as(log_p_sum_cat)
return -torch.sum(log_sum_exp(log_root.view(batch_size, -1), dim=1)), jacob
def dep_parse(self, sents, iter_obj, symbol_index_t):
"""
Args:
sents: tensor with size (batch_size, seq_length, features)
Returns:
returned t is a nltk.tree.Tree without root node
"""
masks = iter_obj.mask
gold_pos = iter_obj.pos
# normalizing parameters
self.log_attach_left = self.args.prob_const * log_softmax(self.attach_left, dim=1)
self.log_attach_right = self.args.prob_const * log_softmax(self.attach_right, dim=1)
self.log_stop_right = self.args.prob_const * log_softmax(self.stop_right, dim=0)
self.log_stop_left = self.args.prob_const * log_softmax(self.stop_left, dim=0)
self.log_root_attach_left = self.args.prob_const * log_softmax(self.root_attach_left, dim=0)
# (batch_size, seq_length, num_state)
density = self._eval_log_density(sents)
# evaluate with gold pos tag
# batch_size, seq_len, _ = sents.size()
# density = torch.zeros((batch_size, seq_len, self.num_state), device=self.device,
# requires_grad=False).fill_(NEG_INFINITY)
# for b in range(batch_size):
# for s in range(seq_len):
# density[b, s, gold_pos[s, b]] = 0.
# in the parse case, log_p_parse[i, j, mark] is not the log prob
# of some symbol as head, instead it is the prob of the most likely
# subtree with some symbol as head
self.log_p_parse = {}
# child is indexed by (i, j, mark), and each element is a
# LongTensor with size (batch_size, symbol, seq_length, 6)
# the last dimension represents the child's
# (indicator, i, j, mark, symbol, index), used to index the child,
# indicator is 1 represents childs exist, 0 not exist, -1 means
# reaching terminal symbols. For unary connection, left child indicator
# is 1 and right child indicator is 0 (for non-terminal symbols)
self.left_child = {}
self.right_child = {}
batch_size, seq_length, _ = sents.size()
for i in range(seq_length):
j = i + 1
cat_var = [torch.zeros((batch_size, self.num_state, 1),
dtype=torch.float32,
device=self.device).fill_(NEG_INFINITY) for _ in range(seq_length)]
cat_var[i] = density[:, i, :].unsqueeze(dim=2)
self.log_p_parse[i, j, 0] = torch.cat(cat_var, dim=2)
self.left_child[i, j, 0] = torch.zeros((batch_size, self.num_state, seq_length, 6),
dtype=torch.long,
device=self.device).fill_(-1)
self.right_child[i, j, 0] = torch.zeros((batch_size, self.num_state, seq_length, 6),
dtype=torch.long,
device=self.device).fill_(-1)
self.unary_parses(i, j, batch_size, seq_length, symbol_index_t)
log_stop_right = self.log_stop_right[0]
log_stop_left = self.log_stop_left[0]
# ideally, only the l loop is needed
# but eliminate the rest loops would be a bit hard
for l in range(2, seq_length+1):
for i in range(seq_length-l+1):
j = i + l
# right attachment
log_p1 = []
log_p2 = []
index = torch.zeros((seq_length, j-i-1), dtype=torch.long,
device=self.device, requires_grad=False)
for k in range(i+1, j):
# right attachment
log_p1.append(self.log_p_parse[i, k, 0].unsqueeze(-1))
log_p2.append(self.log_p_parse[k, j, 2].unsqueeze(-1))
index[k-1, k-i-1] = 1
log_p1 = torch.cat(log_p1, dim=-1)
log_p2 = torch.cat(log_p2, dim=-1)
index = index.unsqueeze(0).expand(self.num_state, *index.size())
# (num_state, seq_len, k)
log_stop_right_gather = torch.gather(
log_stop_right.unsqueeze(-1).expand(*log_stop_right.size(), j-i-1),
1, index)
# log_p2_tmp: (b, j, k)
# max_index_loc: (b, j, n)
log_p2_tmp, max_index_loc = torch.max(log_p2, 2)
# log_p2_tmp: (b, i, k)
# max_index_symbol: (b, i, k)
log_p2_tmp, max_index_symbol = torch.max(log_p2_tmp.unsqueeze(1) +
self.log_attach_right.view(1, *(self.log_attach_right.size()), 1), 2)
# (b, i, m, k)
log_p_tmp = log_p1 + log_p2_tmp.unsqueeze(2) + log_stop_right_gather.unsqueeze(0)
# log_p_max: (batch_size, num_state, seq_length)
# max_index_k: (batch_size, num_state, seq_length)
log_p_max, max_index_k = torch.max(log_p_tmp, dim=-1)
self.log_p_parse[i, j, 0] = log_p_max
# (b, j, k) --> (b, i, k)
max_index_loc = torch.gather(max_index_loc, index=max_index_symbol, dim=1)
# (b, i, k) --> (b, i, m)
max_index_symbol = torch.gather(max_index_symbol, index=max_index_k, dim=2)
max_index_loc = torch.gather(max_index_loc, index=max_index_k, dim=2)
# (batch_size, num_state, seq_len, 3)
max_index_r = torch.cat((max_index_k.unsqueeze(-1),
max_index_symbol.unsqueeze(-1),
max_index_loc.unsqueeze(-1)), dim=-1)
# left attachment
log_p1 = []
log_p2 = []
index = torch.zeros((seq_length, j-i-1), dtype=torch.long,
device=self.device, requires_grad=False)
for k in range(i+1, j):
log_p1.append(self.log_p_parse[i, k, 2].unsqueeze(-1))
log_p2.append(self.log_p_parse[k, j, 1].unsqueeze(-1))
index[k, k-i-1] = 1
log_p1 = torch.cat(log_p1, dim=-1)
log_p2 = torch.cat(log_p2, dim=-1)
index = index.unsqueeze(0).expand(self.num_state, *index.size())
# (num_state, seq_len, k)
log_stop_left_gather = torch.gather(
log_stop_left.unsqueeze(-1).expand(*log_stop_left.size(), j-i-1),
1, index)
# log_p1_tmp: (b, i, k)
# max_index_loc: (b, i, k)
log_p1_tmp, max_index_loc = torch.max(log_p1, 2)
# log_p1_tmp: (b, j, k)
# max_index_symbol: (b, j, k)
log_p1_tmp, max_index_symbol = torch.max(log_p1_tmp.unsqueeze(2) +
self.log_attach_left.permute(1, 0).view(1, *(self.log_attach_left.size()), 1), 1)
# (b, j, n, k)
log_p_tmp = log_p1_tmp.unsqueeze(2) + log_p2 + log_stop_left_gather.unsqueeze(0)
# log_p_max: (batch_size, num_state, seq_length)
# max_index_k: (batch_size, num_state, seq_length)
log_p_max, max_index_k = torch.max(log_p_tmp, dim=-1)
self.log_p_parse[i, j, 1] = log_p_max
# (b, i, k) --> (b, j, k)
max_index_loc = torch.gather(max_index_loc, index=max_index_symbol, dim=1)
# (b, j, k) --> (b, j, m)
max_index_symbol = torch.gather(max_index_symbol, index=max_index_k, dim=2)
max_index_loc = torch.gather(max_index_loc, index=max_index_k, dim=2)
# (batch_size, num_state, seq_len, 3)
max_index_l = torch.cat((max_index_k.unsqueeze(-1),
max_index_symbol.unsqueeze(-1),
max_index_loc.unsqueeze(-1)), dim=-1)
right_child_index_r = index.new(batch_size, self.num_state, seq_length, 6)
left_child_index_r = index.new(batch_size, self.num_state, seq_length, 6)
right_child_index_l = index.new(batch_size, self.num_state, seq_length, 6)
left_child_index_l = index.new(batch_size, self.num_state, seq_length, 6)
# assign symbol and index
right_child_index_r[:, :, :, 4:] = max_index_r[:, :, :, 1:]
# left_child_symbol_index: (num_state, seq_length, 2)
left_child_symbol_index_r = symbol_index_t
left_child_index_r[:, :, :, 4:] = left_child_symbol_index_r
right_child_symbol_index_l = symbol_index_t
right_child_index_l[:, :, :, 4:] = right_child_symbol_index_l
left_child_index_l[:, :, :, 4:] = max_index_l[:, :, :, 1:]
# assign indicator
right_child_index_r[:, :, :, 0] = 1
left_child_index_r[:, :, :, 0] = 1
right_child_index_l[:, :, :, 0] = 1
left_child_index_l[:, :, :, 0] = 1
# assign starting point
right_child_index_r[:, :, :, 1] = max_index_r[:, :, :, 0] + i + 1
left_child_index_r[:, :, :, 1] = i
right_child_index_l[:, :, :, 1] = max_index_l[:, :, :, 0] + i + 1
left_child_index_l[:, :, :, 1] = i
# assign end point
right_child_index_r[:, :, :, 2] = j
left_child_index_r[:, :, :, 2] = max_index_r[:, :, :, 0] + i + 1
right_child_index_l[:, :, :, 2] = j
left_child_index_l[:, :, :, 2] = max_index_l[:, :, :, 0] + i + 1
right_child_index_r[:, :, :, 3] = 2
left_child_index_r[:, :, :, 3] = 0
right_child_index_l[:, :, :, 3] = 1
left_child_index_l[:, :, :, 3] = 2
assert (i, j, 0) not in self.left_child
self.left_child[i, j, 0] = left_child_index_r
self.right_child[i, j, 0] = right_child_index_r
self.left_child[i, j, 1] = left_child_index_l
self.right_child[i, j, 1] = right_child_index_l
self.unary_parses(i, j, batch_size, seq_length, symbol_index_t)
log_p_sum = []
sent_len_t = masks.sum(dim=0)
for i in range(batch_size):
sent_len = sent_len_t[i].item()
log_p_sum += [self.log_p_parse[0, sent_len, 2][i].unsqueeze(dim=0)]
log_p_sum_cat = torch.cat(log_p_sum, dim=0)
log_root = log_p_sum_cat + self.log_root_attach_left.view(1, self.num_state, 1) \
.expand_as(log_p_sum_cat)
log_root_max, root_max_index = torch.max(log_root.view(batch_size, -1), dim=1)
# (batch_size, 2)
root_max_index = unravel_index(root_max_index, (self.num_state, seq_length))
return root_max_index
def unary_p_inside(self, i, j, batch_size, seq_length):
non_stop_mark = self.log_p_inside[i, j, 0]
log_stop_left = self.log_stop_left[1].expand(batch_size, self.num_state, 2)
log_stop_right = self.log_stop_right[1].expand(batch_size, self.num_state, 2)
index_ladj = torch.zeros((batch_size, self.num_state, seq_length),
dtype=torch.long,
device=self.device,
requires_grad=False)
index_radj = torch.zeros((batch_size, self.num_state, seq_length),
dtype=torch.long,
device=self.device,
requires_grad=False)
index_ladj[:, :, i].fill_(1)
index_radj[:, :, j-1].fill_(1)
log_stop_right = torch.gather(log_stop_right, 2, index_radj)
inter_right_stop_mark = non_stop_mark + log_stop_right
if (i, j, 1) in self.log_p_inside:
right_stop_mark = self.log_p_inside[i, j, 1]
right_stop_mark = torch.cat((right_stop_mark.unsqueeze(dim=3), \
inter_right_stop_mark.unsqueeze(dim=3)), \
dim=3)
right_stop_mark = log_sum_exp(right_stop_mark, dim=3)
else:
right_stop_mark = inter_right_stop_mark
log_stop_left = torch.gather(log_stop_left, 2, index_ladj)
self.log_p_inside[i, j, 2] = right_stop_mark + log_stop_left
self.log_p_inside[i, j, 1] = right_stop_mark
def unary_parses(self, i, j, batch_size, seq_length, symbol_index_t):
non_stop_mark = self.log_p_parse[i, j, 0]
log_stop_left = self.log_stop_left[1].expand(batch_size, self.num_state, 2)
log_stop_right = self.log_stop_right[1].expand(batch_size, self.num_state, 2)
index_ladj = torch.zeros((batch_size, self.num_state, seq_length),
dtype=torch.long,
device=self.device,
requires_grad=False)
index_radj = torch.zeros((batch_size, self.num_state, seq_length),
dtype=torch.long,
device=self.device,
requires_grad=False)
left_child_index_mark2 = index_ladj.new(batch_size, self.num_state, seq_length, 6)
right_child_index_mark2 = index_ladj.new(batch_size, self.num_state, seq_length, 6)
left_child_index_mark1 = index_ladj.new(batch_size, self.num_state, seq_length, 6)
right_child_index_mark1 = index_ladj.new(batch_size, self.num_state, seq_length, 6)
index_ladj[:, :, i].fill_(1)
index_radj[:, :, j-1].fill_(1)
log_stop_right = torch.gather(log_stop_right, 2, index_radj)
inter_right_stop_mark = non_stop_mark + log_stop_right
# assign indicator
left_child_index_mark1[:, :, :, 0] = 1
right_child_index_mark1[:, :, :, 0] = 0
# assign mark
left_child_index_mark1[:, :, :, 3] = 0
right_child_index_mark1[:, :, :, 3] = 0
# start point
left_child_index_mark1[:, :, :, 1] = i
right_child_index_mark1[:, :, :, 1] = i
# end point
left_child_index_mark1[:, :, :, 2] = j
right_child_index_mark1[:, :, :, 2] = j
# assign symbol and index
left_child_symbol_index_mark1 = symbol_index_t
left_child_index_mark1[:, :, :, 4:] = left_child_symbol_index_mark1
right_child_index_mark1[:, :, :, 4:] = left_child_symbol_index_mark1
if (i, j, 1) in self.log_p_parse:
right_stop_mark = self.log_p_parse[i, j, 1]
# max_index (batch_size, num_state, index) (value is 0 or 1)
right_stop_mark, max_index = torch.max(torch.cat((right_stop_mark.unsqueeze(dim=3), \
inter_right_stop_mark.unsqueeze(dim=3)), \
dim=3), dim=3)
# mask: (batch_size, num_state, index)
mask = (max_index == 1)
mask_ep = mask.unsqueeze(dim=-1).expand(batch_size, self.num_state, seq_length, 6)
left_child_index_mark1 = self.left_child[i, j, 1].masked_fill_(mask_ep, 0) + \
left_child_index_mark1.masked_fill_(1 - mask_ep, 0)
right_child_index_mark1 = self.right_child[i, j, 1].masked_fill_(mask_ep, 0) + \
right_child_index_mark1.masked_fill_(1 - mask_ep, 0)
else:
right_stop_mark = inter_right_stop_mark
log_stop_left = torch.gather(log_stop_left, 2, index_ladj)
self.log_p_parse[i, j, 2] = right_stop_mark + log_stop_left
self.log_p_parse[i, j, 1] = right_stop_mark
# assign indicator
left_child_index_mark2[:, :, :, 0] = 1
right_child_index_mark2[:, :, :, 0] = 0
# assign starting point
left_child_index_mark2[:, :, :, 1] = i
right_child_index_mark2[:, :, :, 1] = i
# assign end point
left_child_index_mark2[:, :, :, 2] = j
right_child_index_mark2[:, :, :, 2] = j
# assign mark
left_child_index_mark2[:, :, :, 3] = 1
right_child_index_mark2[:, :, :, 3] = 1
# assign symbol and index
left_child_symbol_index_mark2 = symbol_index_t
left_child_index_mark2[:, :, :, 4:] = left_child_symbol_index_mark2
right_child_index_mark2[:, :, :, 4:] = left_child_symbol_index_mark2
self.left_child[i, j, 2] = left_child_index_mark2
self.right_child[i, j, 2] = right_child_index_mark2
self.left_child[i, j, 1] = left_child_index_mark1
self.right_child[i, j, 1] = right_child_index_mark1 | |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for transform."""
import itertools
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from scipy import stats
from rl4circopt import circuit
from rl4circopt import transform
def _random_operation(*qubits):
return circuit.Operation(
circuit.MatrixGate(stats.unitary_group.rvs(2 ** len(qubits))),
qubits
)
def _elementwise_is(sequence_a, sequence_b):
sequence_a = tuple(sequence_a)
sequence_b = tuple(sequence_b)
if len(sequence_a) == len(sequence_b):
return all(
elem_a is elem_b
for elem_a, elem_b in zip(sequence_a, sequence_b)
)
else:
return False
class PointTransformationTest(parameterized.TestCase):
def test_initializer_and_getters(self):
# preparation work
focus_in = [
_random_operation(1, 2)
]
context_in = transform.TransformationContext(
circuit.Circuit(5, None),
circuit.Circuit(5, None),
circuit.Circuit(5, None)
)
locations_in = (0,)
perform_rule = lambda operation_in: [operation_in]
rule_id = object()
# initialize the PointTransformation
transformation = transform.PointTransformation(
attention_circ=transform.AttentionCircuit(
focus=focus_in,
context=context_in,
locations=locations_in
),
perform_rule=perform_rule,
rule_id=rule_id
)
# check type and value transformation.focus()
focus_out = transformation.focus()
self.assertIs(type(focus_out), tuple)
self.assertTrue(_elementwise_is(focus_out, focus_in))
# check transformation.context()
self.assertIs(transformation.context(), context_in)
# check type and value transformation.locations()
locations_out = transformation.locations()
self.assertIs(type(locations_out), tuple)
self.assertTrue(_elementwise_is(locations_out, locations_in))
# check transformation.context()
self.assertIs(transformation.rule_id(), rule_id)
def test_initializer_circ_type_error(self):
circ = circuit.Circuit(5, None)
perform_rule = lambda operation_in: [operation_in]
with self.assertRaisesRegex(
TypeError,
r'attention_circ is not an AttentionCircuit \(found type: Circuit\)'):
transform.PointTransformation(circ, perform_rule, None)
def test_initializer_focus_size_error(self):
attention_circ = transform.AttentionCircuit(
focus=[_random_operation(1), _random_operation(3)],
context=transform.TransformationContext(
circuit.Circuit(5, None),
circuit.Circuit(5, None),
circuit.Circuit(5, None)
)
)
perform_rule = lambda operation_in: [operation_in]
with self.assertRaisesRegex(
ValueError,
r'focus of attention_circ for PointTransformation must have size 1'
r' \(found: 2\)'):
transform.PointTransformation(attention_circ, perform_rule, None)
def test_perform(self):
# preparation work
num_qubits = 5
operation_before1 = _random_operation(0)
operation_before2 = _random_operation(1)
operation_before3 = _random_operation(2)
operation_between1 = _random_operation(0)
operation_between2 = _random_operation(1)
operation_after = _random_operation(1)
operation_in = _random_operation(1)
operation_out1 = _random_operation(1)
operation_out2 = _random_operation(1)
# define the transformation rule
def perform_rule(operation):
# check operation
self.assertIs(operation, operation_in)
# return the modified operations
return [operation_out1, operation_out2]
# initialize the PointTransformation
transformation = transform.PointTransformation(
attention_circ=transform.AttentionCircuit(
focus=[operation_in],
context=transform.TransformationContext(
circuit.Circuit(num_qubits, [
operation_before1,
operation_before2,
operation_before3
]),
circuit.Circuit(num_qubits, [
operation_between1,
operation_between2
]),
circuit.Circuit(num_qubits, [
operation_after
])
)
),
perform_rule=perform_rule,
rule_id=None
)
# call the method to be tested
circ_out = transformation.perform()
# check type for circ_out
self.assertIs(type(circ_out), circuit.Circuit)
# check the value for circ_out
self.assertTrue(_elementwise_is(
circ_out.get_operation_sequence(),
[
operation_before1,
operation_before2,
operation_before3,
operation_out1,
operation_out2,
operation_between1,
operation_between2,
operation_after
]
))
class PairTransformationTest(parameterized.TestCase):
def test_initializer_and_getters(self):
# preparation work
focus_in = [
_random_operation(1, 2),
_random_operation(2, 3)
]
context_in = transform.TransformationContext(
circuit.Circuit(5, None),
circuit.Circuit(5, None),
circuit.Circuit(5, None)
)
locations_in = (0, 1)
perform_rule = lambda operation_in: [operation_in]
rule_id = object()
# initialize the PairTransformation
transformation = transform.PairTransformation(
attention_circ=transform.AttentionCircuit(
focus=focus_in,
context=context_in,
locations=locations_in
),
perform_rule=perform_rule,
rule_id=rule_id
)
# check type and value transformation.focus()
focus_out = transformation.focus()
self.assertIs(type(focus_out), tuple)
self.assertTrue(_elementwise_is(focus_out, focus_in))
# check transformation.context()
self.assertIs(transformation.context(), context_in)
# check type and value transformation.locations()
locations_out = transformation.locations()
self.assertIs(type(locations_out), tuple)
self.assertTrue(_elementwise_is(locations_out, locations_in))
# check transformation.context()
self.assertIs(transformation.rule_id(), rule_id)
def test_initializer_circ_type_error(self):
circ = circuit.Circuit(5, None)
perform_rule = lambda operation_in: [operation_in]
with self.assertRaisesRegex(
TypeError,
r'attention_circ is not an AttentionCircuit \(found type: Circuit\)'):
transform.PairTransformation(circ, perform_rule, None)
def test_initializer_focus_size_error(self):
attention_circ = transform.AttentionCircuit(
focus=[_random_operation(2)],
context=transform.TransformationContext(
circuit.Circuit(5, None),
circuit.Circuit(5, None),
circuit.Circuit(5, None)
)
)
perform_rule = lambda operation_in: [operation_in]
with self.assertRaisesRegex(
ValueError,
r'focus of attention_circ for PairTransformation must have size 2'
r' \(found: 1\)'):
transform.PairTransformation(attention_circ, perform_rule, None)
def test_initializer_redundant_transformation_error(self):
attention_circ = transform.AttentionCircuit(
focus=[_random_operation(2, 3), _random_operation(5)],
context=transform.TransformationContext(
circuit.Circuit(5, None),
circuit.Circuit(5, None),
circuit.Circuit(5, None)
)
)
perform_rule = lambda operation_in: [operation_in]
with self.assertRaisesRegex(
ValueError,
r'transformation redundant because operations trivially commute'):
transform.PairTransformation(attention_circ, perform_rule, None)
def test_perform(self):
# preparation work
num_qubits = 5
operation_before1 = _random_operation(0)
operation_before2 = _random_operation(1)
operation_before3 = _random_operation(2)
operation_between1 = _random_operation(0)
operation_between2 = _random_operation(1)
operation_after = _random_operation(1)
operation_in1 = _random_operation(1)
operation_in2 = _random_operation(1)
operations_out_first1 = _random_operation(1)
operations_out_first2 = _random_operation(1)
operations_out_second1 = _random_operation(1)
operations_out_second2 = _random_operation(1)
operations_out_second3 = _random_operation(1)
# define the transformation rule
def perform_rule(operation_first, operation_second):
# check operation_first and operation_second
self.assertIs(operation_first, operation_in1)
self.assertIs(operation_second, operation_in2)
# return the modified operations
operations_out_first = [
operations_out_first1,
operations_out_first2
]
operations_out_second = [
operations_out_second1,
operations_out_second2,
operations_out_second3
]
return operations_out_first, operations_out_second
# initialize the PointTransformation
transformation = transform.PairTransformation(
attention_circ=transform.AttentionCircuit(
focus=[operation_in1, operation_in2],
context=transform.TransformationContext(
circuit.Circuit(num_qubits, [
operation_before1,
operation_before2,
operation_before3
]),
circuit.Circuit(num_qubits, [
operation_between1,
operation_between2
]),
circuit.Circuit(num_qubits, [
operation_after
])
)
),
perform_rule=perform_rule,
rule_id=None
)
# call the method to be tested
circ_out = transformation.perform()
# check type for circ_out
self.assertIs(type(circ_out), circuit.Circuit)
# check the value for circ_out
self.assertTrue(_elementwise_is(
circ_out.get_operation_sequence(),
[
operation_before1,
operation_before2,
operation_before3,
operations_out_first1,
operations_out_first2,
operation_between1,
operation_between2,
operations_out_second1,
operations_out_second2,
operations_out_second3,
operation_after
]
))
class GroupTransformationTest(parameterized.TestCase):
def test_initializer_and_getters(self):
# preparation work
focus_in = [
_random_operation(1, 2),
_random_operation(2, 3),
_random_operation(3, 4)
]
context_in = transform.TransformationContext(
circuit.Circuit(5, None),
circuit.Circuit(5, None),
circuit.Circuit(5, None)
)
locations_in = (0, 1, 2)
perform_rule = lambda operation_in: [operation_in]
rule_id = object()
# initialize the PairTransformation
transformation = transform.GroupTransformation(
attention_circ=transform.AttentionCircuit(
focus=focus_in,
context=context_in,
locations=locations_in
),
perform_rule=perform_rule,
rule_id=rule_id
)
# check type and value transformation.focus()
focus_out = transformation.focus()
self.assertIs(type(focus_out), tuple)
self.assertTrue(_elementwise_is(focus_out, focus_in))
# check transformation.context()
self.assertIs(transformation.context(), context_in)
# check type and value transformation.locations()
locations_out = transformation.locations()
self.assertIs(type(locations_out), tuple)
self.assertTrue(_elementwise_is(locations_out, locations_in))
# check transformation.context()
self.assertIs(transformation.rule_id(), rule_id)
def test_initializer_circ_type_error(self):
circ = circuit.Circuit(5, None)
perform_rule = lambda operation_in: [operation_in]
with self.assertRaisesRegex(
TypeError,
r'attention_circ is not an AttentionCircuit \(found type: Circuit\)'):
transform.GroupTransformation(circ, perform_rule, None)
def test_perform(self):
# preparation work
num_qubits = 5
operation_before1 = _random_operation(0)
operation_before2 = _random_operation(1)
operation_before3 = _random_operation(2)
operation_between1 = _random_operation(0)
operation_between2 = _random_operation(1)
operation_after = _random_operation(1)
operation_in1 = _random_operation(1)
operation_in2 = _random_operation(1)
operation_in3 = _random_operation(1)
operation_in4 = _random_operation(1)
operation_out1 = _random_operation(1)
operation_out2 = _random_operation(1)
operation_out3 = _random_operation(1)
# define the transformation rule
def perform_rule(operations_in):
# check type and value for operations_in
self.assertIs(type(operations_in), tuple)
self.assertTrue(_elementwise_is(
operations_in,
[operation_in1, operation_in2, operation_in3, operation_in4]
))
# return the modified operations
return [operation_out1, operation_out2, operation_out3]
# initialize the PointTransformation
transformation = transform.GroupTransformation(
attention_circ=transform.AttentionCircuit(
focus=[operation_in1, operation_in2, operation_in3, operation_in4],
context=transform.TransformationContext(
circuit.Circuit(num_qubits, [
operation_before1,
operation_before2,
operation_before3
]),
circuit.Circuit(num_qubits, [
operation_between1,
operation_between2
]),
circuit.Circuit(num_qubits, [
operation_after
])
)
),
perform_rule=perform_rule,
rule_id=None
)
# call the method to be tested
circ_out = transformation.perform()
# check type for circ_out
self.assertIs(type(circ_out), circuit.Circuit)
# check the value for circ_out
self.assertTrue(_elementwise_is(
circ_out.get_operation_sequence(),
[
operation_before1,
operation_before2,
operation_before3,
operation_out1,
operation_out2,
operation_out3,
operation_between1,
operation_between2,
operation_after
]
))
class AttentionCircuitTest(parameterized.TestCase):
def test_initializer_with_locations_and_getters(self):
# preparation work
focus_in = (
_random_operation(1, 2),
_random_operation(1)
)
focus_length = len(focus_in)
context = transform.TransformationContext(
circuit.Circuit(5, [
_random_operation(2),
_random_operation(3),
_random_operation(4)
]),
circuit.Circuit(5, [
_random_operation(0)
]),
circuit.Circuit(5, [
_random_operation(0),
_random_operation(1, 2)
])
)
locations_in = (3, 5)
# construct the AttentionCircuit
att_circ = transform.AttentionCircuit(
focus_in,
context,
locations=locations_in
)
# check type and value for att_circ.focus()
focus_out = att_circ.focus()
self.assertIs(type(focus_out), tuple)
self.assertTrue(_elementwise_is(focus_out, focus_in))
# check att_circ.context()
self.assertIs(att_circ.context(), context)
# check type and value for att_circ.locations()
locations_out = att_circ.locations()
self.assertIs(type(locations_out), tuple)
self.assertTrue(_elementwise_is(locations_out, locations_in))
# check type and value for len(att_circ)
length = len(att_circ)
self.assertIs(type(length), int)
self.assertEqual(length, focus_length)
def test_initializer_without_locations_and_getters(self):
# preparation work
focus_in = (
_random_operation(1, 2),
_random_operation(1)
)
focus_length = len(focus_in)
context = transform.TransformationContext(
circuit.Circuit(5, [
_random_operation(2),
_random_operation(3),
_random_operation(4)
]),
circuit.Circuit(5, [
_random_operation(0)
]),
circuit.Circuit(5, [
_random_operation(0),
_random_operation(1, 2)
])
)
# construct the AttentionCircuit
att_circ = transform.AttentionCircuit(
focus_in,
context,
locations=None
)
# check type and value for att_circ.focus()
focus_out = att_circ.focus()
self.assertIs(type(focus_out), tuple)
self.assertTrue(_elementwise_is(focus_out, focus_in))
# check att_circ.context()
self.assertIs(att_circ.context(), context)
# check that att_circ.locations() is None
self.assertIsNone(att_circ.locations())
# check type and value for len(att_circ)
length = len(att_circ)
self.assertIs(type(length), int)
self.assertEqual(length, focus_length)
@parameterized.parameters([
[42, r'\'int\' object is not iterable'],
[[42], r'only Operation objects allowed in focus \(found types: int\)']
])
def test_initializer_focus_type_error(self, focus, message):
context = transform.TransformationContext(
circuit.Circuit(5, None),
circuit.Circuit(5, None),
circuit.Circuit(5, None)
)
with self.assertRaisesRegex(TypeError, message):
transform.AttentionCircuit(focus, context)
def test_initializer_empty_focus_error(self):
context = transform.TransformationContext(
circuit.Circuit(5, None),
circuit.Circuit(5, None),
circuit.Circuit(5, None)
)
with self.assertRaisesRegex(ValueError, r'focus must not be empty'):
transform.AttentionCircuit((), context)
def test_initializer_context_type_error(self):
focus = (
_random_operation(1, 2),
_random_operation(1)
)
with self.assertRaisesRegex(
TypeError,
r'context is not a TransformationContext \(found type: int\)'):
transform.AttentionCircuit(focus, 42)
@parameterized.parameters([
[42, r'\'int\' object is not iterable'],
[[47.11], r'location is not integer-like \(found type: float\)']
])
def test_initializer_locations_type_error(self, locations, message):
focus = (
_random_operation(1, 2),
_random_operation(1)
)
context = transform.TransformationContext(
circuit.Circuit(5, None),
circuit.Circuit(5, None),
circuit.Circuit(5, None)
)
with self.assertRaisesRegex(TypeError, message):
transform.AttentionCircuit(focus, context, locations=locations)
def test_initializer_locations_length_error(self):
focus = (
_random_operation(1, 2),
_random_operation(1)
)
context = transform.TransformationContext(
circuit.Circuit(5, [
_random_operation(2),
_random_operation(3),
_random_operation(4)
]),
circuit.Circuit(5, [
_random_operation(0)
]),
circuit.Circuit(5, [
_random_operation(0),
_random_operation(1, 2)
])
)
with self.assertRaisesRegex(
ValueError,
r'inconsistent lengths for focus and locations: 2 vs. 1'):
transform.AttentionCircuit(focus, context, locations=(3,))
class TransformationContextTest(parameterized.TestCase):
def test_initializer_and_getters(self):
# preparation work: create three circuits before, between and after
num_qubits = 5
before = circuit.Circuit(num_qubits, [
_random_operation(0, 2),
_random_operation(4),
_random_operation(1)
])
between = circuit.Circuit(num_qubits, [
_random_operation(0),
_random_operation(4)
])
after = circuit.Circuit(num_qubits, [
_random_operation(0, 1),
_random_operation(1, 2),
_random_operation(2, 3, 4)
])
# construct the TransformationContext
context = transform.TransformationContext(before, between, after)
# check before, between and after
self.assertIs(context.before(), before)
self.assertIs(context.between(), between)
self.assertIs(context.after(), after)
@parameterized.parameters([
[
42,
circuit.Circuit(5, None),
circuit.Circuit(5, None),
'int, Circuit, Circuit'
],
[
circuit.Circuit(6, None),
47.11,
circuit.Circuit(6, None),
'Circuit, float, Circuit'
],
[
circuit.Circuit(7, None),
circuit.Circuit(7, None),
'hello',
'Circuit, Circuit, str'
],
])
def test_initializer_type_error(self, before, between, after, type_string):
with self.assertRaisesRegex(
TypeError,
r'before, between and after must be Circuits \(found types: %s\)'
%type_string):
transform.TransformationContext(before, between, after)
@parameterized.parameters([
[7, 5, 5],
[8, 4, 8],
[3, 3, 6],
[2, 3, 4]
])
def test_initializer_inconsistent_num_qubits_error(self,
num_before,
num_between,
num_after):
before = circuit.Circuit(num_before, None)
between = circuit.Circuit(num_between, None)
after = circuit.Circuit(num_after, None)
with self.assertRaisesRegex(
ValueError,
r'inconsistent number of qubits for before, between and after:'
r' \(%d, %d, %d\)'%(num_before, num_between, num_after)):
transform.TransformationContext(before, between, after)
def test_inject(self):
# preparation work: create operations
num_qubits = 5
operation_a = _random_operation(0)
operation_b = _random_operation(0, 1)
operation_c1 = _random_operation(1)
operation_c2 = _random_operation(1, 2)
operation_c3 = _random_operation(2)
operation_d1 = _random_operation(2, 3)
operation_d2 = _random_operation(3)
operation_e1 = _random_operation(3, 4)
operation_e2 = _random_operation(4)
# preparation work: construct the TransformationContext
context = transform.TransformationContext(
circuit.Circuit(num_qubits, [operation_a]),
circuit.Circuit(num_qubits, [operation_c1, operation_c2, operation_c3]),
circuit.Circuit(num_qubits, [operation_e1, operation_e2])
)
# call the method to be tested
circ_full = context.inject([operation_b], [operation_d1, operation_d2])
# check type for circ_full
self.assertIs(type(circ_full), circuit.Circuit)
# check value for circ_full
self.assertTrue(_elementwise_is(
circ_full.get_operation_sequence(),
[
operation_a,
operation_b,
operation_c1,
operation_c2,
operation_c3,
operation_d1,
operation_d2,
operation_e1,
operation_e2
]
))
@parameterized.parameters([
[[42], [_random_operation(1, 2)]],
[[_random_operation(1, 2)], [42]]
])
def test_inject_type_error(self, operations_first, operations_second):
num_qubits = 4
context = transform.TransformationContext(
circuit.Circuit(num_qubits, None),
circuit.Circuit(num_qubits, None),
circuit.Circuit(num_qubits, None)
)
with self.assertRaisesRegex(
TypeError,
r'found illegal type\(s\) in operation_sequence: int \(expected:'
r' Operation\)'):
context.inject(operations_first, operations_second)
class FocusSingleOperationTest(parameterized.TestCase):
@parameterized.parameters([3, -2]) # both locations are equivalent
def test_successful(self, location):
# preparation work
operation0 = _random_operation(0)
operation1 = _random_operation(0, 1)
operation2 = _random_operation(1)
operation3 = _random_operation(1, 2)
operation4 = _random_operation(2)
circ = circuit.Circuit(5, [
operation0,
operation1,
operation2,
operation3,
operation4
])
# call the function to be tested
attention_circ = transform.focus_single_operation(circ, location)
# check type of attention_circ
self.assertIs(type(attention_circ), transform.AttentionCircuit)
# check the focus of attention_circ
self.assertLen(attention_circ, 1)
self.assertTrue(_elementwise_is(attention_circ.focus(), [operation3]))
# check the context of attention_circ
context = attention_circ.context()
self.assertTrue(_elementwise_is(
context.before().get_operation_sequence(),
[operation0, operation1, operation2]
))
self.assertEmpty(context.between())
self.assertTrue(_elementwise_is(
context.after().get_operation_sequence(),
[operation4]
))
# check the locations of attention_circ
self.assertTupleEqual(attention_circ.locations(), (3,))
def test_circ_type_error(self):
with self.assertRaisesRegex(
TypeError,
r'circ is not a Circuit \(found type: range\)'):
transform.focus_single_operation(range(10), 3)
def test_location_type_error(self):
circ = circuit.Circuit(5, None)
with self.assertRaisesRegex(
TypeError,
r'location is not integer-like \(found type: float\)'):
transform.focus_single_operation(circ, 47.11)
@parameterized.parameters([5, -6])
def test_location_out_of_bounds_error(self, location):
circ = circuit.Circuit(3, [
_random_operation(0),
_random_operation(0, 1),
_random_operation(1),
_random_operation(1, 2),
_random_operation(2),
])
with self.assertRaisesRegex(
IndexError,
r'location %d out of bounds for a Circuit of length 5'%location):
transform.focus_single_operation(circ, location)
def _positive_example_circuit(*segments_and_operations):
operations = []
segments = {
'focus': [],
'before': [],
'between': [],
'after': []
}
max_qubit = 0
for location, (segment_tag, operation) in enumerate(segments_and_operations):
operations.append(operation)
segments[segment_tag].append(location)
max_qubit = np.maximum(max_qubit, max(operation.get_qubits()))
circ = circuit.Circuit(max_qubit + 1, operations)
# <checking that the example circuit makes sense>
assert len(segments['focus']) == 2
location_first, location_second = segments['focus'] # length checked in previous line, so pylint: disable=unbalanced-tuple-unpacking
assert all(
location_before < location_second
for location_before in segments['before']
)
assert all(
location_first < location_between < location_second
for location_between in segments['between']
)
assert all(
location_after > location_first
for location_after in segments['after']
)
pool_to_the_left = [
location_before
for location_before in segments['before']
if location_before > location_first
]
pool_to_the_right = [
location_after
for location_after in segments['after']
if location_after < location_second
]
assert all(
circ[location_second].commutes_trivially_with(circ[location])
for location in segments['between'] + pool_to_the_right
)
assert all(
circ[location_first].commutes_trivially_with(circ[location])
for location in pool_to_the_left + segments['between']
)
assert all(
loc0 < loc1 or circ[loc0].commutes_trivially_with(circ[loc1])
for loc0, loc1 in itertools.product(pool_to_the_left, segments['between'])
)
assert all(
loc0 < loc1 or circ[loc0].commutes_trivially_with(circ[loc1])
for loc0, loc1 in itertools.product(pool_to_the_left, pool_to_the_right)
)
assert all(
loc0 < loc1 or circ[loc0].commutes_trivially_with(circ[loc1])
for loc0, loc1 in itertools.product(segments['between'],
pool_to_the_right)
)
# </checking that the example circuit makes sense>
return circ, transform.AttentionCircuit(
focus=circ[segments['focus']].get_operation_sequence(),
context=transform.TransformationContext(
before=circ[segments['before']],
between=circ[segments['between']],
after=circ[segments['after']]
),
locations=segments['focus']
)
def _positive_focus_operation_pair_examples():
yield _positive_example_circuit(
['focus', _random_operation(0, 1)],
['focus', _random_operation(0, 1)]
)
yield _positive_example_circuit(
['focus', _random_operation(0, 1)],
['focus', _random_operation(1, 0)]
)
yield _positive_example_circuit(
['before', _random_operation(0, 1)],
['before', _random_operation(0)],
['focus', _random_operation(0, 1)],
['focus', _random_operation(0, 1)],
['after', _random_operation(1)],
['after', _random_operation(0)]
)
yield _positive_example_circuit(
['focus', _random_operation(1, 2)],
['between', _random_operation(0)],
['between', _random_operation(3)],
['focus', _random_operation(1, 2)]
)
yield _positive_example_circuit(
['before', _random_operation(0, 1)],
['before', _random_operation(1, 2)],
['before', _random_operation(0)],
['focus', _random_operation(1, 2)],
['between', _random_operation(0)],
['between', _random_operation(3)],
['focus', _random_operation(1, 2)],
['after', _random_operation(1)],
['after', _random_operation(2)]
)
yield _positive_example_circuit(
['focus', _random_operation(0, 1)],
['before', _random_operation(2, 3)],
['between', _random_operation(3, 4)],
['focus', _random_operation(1, 2)]
)
yield _positive_example_circuit(
['focus', _random_operation(2, 3)],
['between', _random_operation(0, 1)],
['after', _random_operation(1, 2)],
['focus', _random_operation(3, 4)]
)
yield _positive_example_circuit(
['focus', _random_operation(0, 1)],
['before', _random_operation(3, 4)],
['before', _random_operation(2, 3)],
['focus', _random_operation(1, 2)]
)
yield _positive_example_circuit(
['focus', _random_operation(2, 3)],
['after', _random_operation(1, 2)],
['after', _random_operation(0, 1)],
['focus', _random_operation(3, 4)]
)
yield _positive_example_circuit(
['focus', _random_operation(0, 1)],
['before', _random_operation(2, 3)],
['after', _random_operation(0, 3)],
['focus', _random_operation(1, 2)]
)
for enclosed_operations in itertools.permutations([
['before', _random_operation(2)],
['between', _random_operation(3)],
['after', _random_operation(0)]]):
yield _positive_example_circuit(*(
[['focus', _random_operation(0, 1)]] +
list(enclosed_operations) +
[['focus', _random_operation(1, 2)]]
))
for enclosed_operations in itertools.permutations([
['before', _random_operation(3)],
['between', _random_operation(4)],
['after', _random_operation(0, 1)]]):
yield _positive_example_circuit(*(
[['focus', _random_operation(1, 2)]] +
list(enclosed_operations) +
[['focus', _random_operation(2, 3)]]
))
for enclosed_operations in itertools.permutations([
['before', _random_operation(2, 3)],
['between', _random_operation(4)],
['after', _random_operation(0)]]):
yield _positive_example_circuit(*(
[['focus', _random_operation(0, 1)]] +
list(enclosed_operations) +
[['focus', _random_operation(1, 2)]]
))
for enclosed_operations in itertools.permutations([
['before', _random_operation(3, 4)],
['between', _random_operation(5)],
['after', _random_operation(0, 1)]]):
yield _positive_example_circuit(*(
[['focus', _random_operation(1, 2)]] +
list(enclosed_operations) +
[['focus', _random_operation(2, 3)]]
))
class FocusOperationPairTest(parameterized.TestCase):
@parameterized.parameters(_positive_focus_operation_pair_examples())
def test_positive(self, circ, att_circ_expected):
assert len(att_circ_expected) == 2
location_first, location_second = att_circ_expected.locations()
# call the function to be tested
att_circ = transform.focus_operation_pair(
circ,
location_first,
location_second
)
# check the type for att_circ
self.assertIsInstance(att_circ, transform.AttentionCircuit)
# check the focus for att_circ
self.assertLen(att_circ, 2)
self.assertTrue(_elementwise_is(
att_circ.focus(),
att_circ_expected.focus()
))
# check the locations for att_circ
self.assertTupleEqual(
att_circ.locations(),
(location_first, location_second)
)
# check the context for att_circ
self.assertTrue(_elementwise_is(
att_circ.context().before().get_operation_sequence(),
att_circ_expected.context().before().get_operation_sequence()
))
self.assertTrue(_elementwise_is(
att_circ.context().between().get_operation_sequence(),
att_circ_expected.context().between().get_operation_sequence()
))
self.assertTrue(_elementwise_is(
att_circ.context().after().get_operation_sequence(),
att_circ_expected.context().after().get_operation_sequence()
))
@parameterized.parameters([
[
circuit.Circuit(1, [
_random_operation(0),
_random_operation(0),
_random_operation(0)
]),
0, 2
],
[
circuit.Circuit(2, [
_random_operation(0, 1),
_random_operation(0),
_random_operation(0, 1)
]),
0, 2
],
[
circuit.Circuit(3, [
_random_operation(0, 1),
_random_operation(1),
_random_operation(1, 2)
]),
0, 2
],
[
circuit.Circuit(3, [
_random_operation(0, 1),
_random_operation(0, 2),
_random_operation(1, 2)
]),
0, 2
],
[
circuit.Circuit(3, [
_random_operation(0, 1),
_random_operation(0, 2),
_random_operation(1),
_random_operation(1, 2)
]),
0, 3
],
[
circuit.Circuit(3, [
_random_operation(0, 1),
_random_operation(2),
_random_operation(0, 2),
_random_operation(1, 2)
]),
0, 3
],
[
circuit.Circuit(4, [
_random_operation(0, 1),
_random_operation(0, 3),
_random_operation(2, 3),
_random_operation(1, 2)
]),
0, 3
]
])
def test_negative(self, circ, location_first, location_second):
with self.assertRaises(transform.OperationsNotAlignedError):
transform.focus_operation_pair(circ, location_first, location_second)
def test_circ_type_error(self):
with self.assertRaisesRegex(
TypeError,
r'circ is not a Circuit \(found type: range\)'):
transform.focus_operation_pair(range(10), 3, 5)
def test_location_first_type_error(self):
circ = circuit.Circuit(3, [
_random_operation(0),
_random_operation(0, 1),
_random_operation(1),
_random_operation(1, 2),
_random_operation(2)
])
with self.assertRaisesRegex(
TypeError,
r'location_first is not integer-like \(found type: float\)'):
transform.focus_operation_pair(circ, 47.11, 3)
def test_location_second_type_error(self):
circ = circuit.Circuit(3, [
_random_operation(0),
_random_operation(0, 1),
_random_operation(1),
_random_operation(1, 2),
_random_operation(2)
])
with self.assertRaisesRegex(
TypeError,
r'location_second is not integer-like \(found type: float\)'):
transform.focus_operation_pair(circ, 3, 47.11)
@parameterized.parameters([5, -6])
def test_location_first_out_of_bounds_error(self, location_first):
circ = circuit.Circuit(3, [
_random_operation(0),
_random_operation(0, 1),
_random_operation(1),
_random_operation(1, 2),
_random_operation(2),
])
with self.assertRaisesRegex(
IndexError,
r'location_first %d out of bounds for a Circuit of length 5'
%location_first):
transform.focus_operation_pair(circ, location_first, 3)
@parameterized.parameters([5, -6])
def test_location_second_out_of_bounds_error(self, location_second):
circ = circuit.Circuit(3, [
_random_operation(0),
_random_operation(0, 1),
_random_operation(1),
_random_operation(1, 2),
_random_operation(2),
])
with self.assertRaisesRegex(
IndexError,
r'location_second %d out of bounds for a Circuit of length 5'
%location_second):
transform.focus_operation_pair(circ, 3, location_second)
@parameterized.parameters([
[4, 3],
[-1, 3],
[4, -2],
[-1, -2]
])
def test_locations_not_sorted_error(self, location_first, location_second):
circ = circuit.Circuit(3, [
_random_operation(0),
_random_operation(0, 1),
_random_operation(1),
_random_operation(1, 2),
_random_operation(2),
])
with self.assertRaisesRegex(
ValueError,
r'location_first not smaller than location_second:'
r' 4 \(or -1\) vs 3 \(or -2\)'):
transform.focus_operation_pair(circ, location_first, location_second)
class FocusLocalGroupTest(parameterized.TestCase):
@parameterized.parameters([
# all locations are equivalent
[(2, 3, 5)],
[(2, -5, 5)],
[(-6, -5, -3)],
])
def test_successful(self, locations):
# preparation work: create the operations and the circuit
operation0 = _random_operation(1)
operation1 = _random_operation(0, 1)
operation2 = _random_operation(1)
operation3 = _random_operation(1)
operation4 = _random_operation(2, 3)
operation5 = _random_operation(1)
operation6 = _random_operation(0)
operation7 = _random_operation(0, 1)
circ = circuit.Circuit(4, [
operation0,
operation1,
operation2,
operation3,
operation4,
operation5,
operation6,
operation7
])
# call the function to be tested
attention_circ = transform.focus_local_group(circ, locations)
# check type of attention_circ
self.assertIs(type(attention_circ), transform.AttentionCircuit)
# check the focus of attention_circ
self.assertLen(attention_circ, 3)
self.assertTrue(_elementwise_is(
attention_circ.focus(),
[operation2, operation3, operation5]
))
# check the context of attention_circ
context = attention_circ.context()
self.assertTrue(_elementwise_is(
context.before().get_operation_sequence(),
[operation0, operation1]
))
self.assertTrue(_elementwise_is(
context.between().get_operation_sequence(),
[operation4]
))
self.assertTrue(_elementwise_is(
context.after().get_operation_sequence(),
[operation6, operation7]
))
# check the locations of attention_circ
self.assertTupleEqual(attention_circ.locations(), (2, 3, 5))
def test_circ_type_error(self):
with self.assertRaisesRegex(
TypeError,
r'circ is not a Circuit \(found type: range\)'):
transform.focus_local_group(range(10), [3, 4])
def test_location_type_error(self):
circ = circuit.Circuit(5, [
_random_operation(0),
_random_operation(0, 1),
_random_operation(1),
_random_operation(1, 2)
])
with self.assertRaisesRegex(
TypeError,
r'location is not integer-like \(found type: float\)'):
transform.focus_local_group(circ, [2, 47.11])
def test_locations_empty_error(self):
circ = circuit.Circuit(3, [
_random_operation(0),
_random_operation(0, 1),
_random_operation(1),
_random_operation(1, 2),
_random_operation(2)
])
with self.assertRaisesRegex(ValueError, r'locations must not be empty'):
transform.focus_local_group(circ, [])
def test_duplicate_locations_error(self):
circ = circuit.Circuit(3, [
_random_operation(0),
_random_operation(0, 1),
_random_operation(1),
_random_operation(1),
_random_operation(2)
])
with self.assertRaisesRegex(
ValueError,
r'locations contains duplicate elements'):
transform.focus_local_group(circ, [2, 2, 3])
def test_nonlocal_operations_error(self):
circ = circuit.Circuit(3, [
_random_operation(0),
_random_operation(1),
_random_operation(1),
_random_operation(1, 2),
_random_operation(2)
])
with self.assertRaisesRegex(
ValueError,
r'focus contains non-local operations'):
transform.focus_local_group(circ, [1, 2, 3])
def test_not_the_same_qubit_error(self):
circ = circuit.Circuit(3, [
_random_operation(0),
_random_operation(1),
_random_operation(2),
_random_operation(1),
_random_operation(2)
])
with self.assertRaisesRegex(
ValueError,
r'operations in the focus act on different qubits'):
transform.focus_local_group(circ, [1, 2, 3])
@parameterized.parameters([
[(1, 2, 5), 5],
[(1, -3, 5), 5],
[(-5, 2, 5), 5],
[(-6, 2, 3), -6],
[(-6, -3, 3), -6],
[(-6, -3, -2), -6]
])
def test_location_out_of_bounds_error(self, locations, illegal_location):
circ = circuit.Circuit(3, [
_random_operation(0),
_random_operation(1),
_random_operation(1),
_random_operation(1),
_random_operation(2)
])
with self.assertRaisesRegex(
IndexError,
r'location %d out of bounds for a Circuit of length 5'
%illegal_location):
transform.focus_local_group(circ, locations)
def test_operations_not_aligned_error(self):
circ = circuit.Circuit(4, [
_random_operation(0),
_random_operation(1),
_random_operation(1),
_random_operation(1, 3),
_random_operation(1),
_random_operation(2)
])
with self.assertRaises(transform.OperationsNotAlignedError):
transform.focus_local_group(circ, [1, 2, 4])
if __name__ == '__main__':
absltest.main() | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 4 23:27:57 2019
@author: DavidFelipe
"""
import cv2
import numpy as np
import matplotlib.pyplot as plt
import scipy
from scipy import ndimage
#%matplotlib inline
class Color:
def __init__(self, image):
self.subset_image = image
plt.rc('axes', **{'grid': False})
plt.style.use('ggplot')
def plot_pixels(self, data, title, colors=None, N=10000):
if(data.max() > 200):
data = data / 255.0 # use 0...1 scale
data = data.reshape((-1, 3))
if colors is None:
colors = data
# choose a random subset
rng = np.random.RandomState(0)
i = rng.permutation(data.shape[0])[:N]
colors = colors[i]
pixel = data[i].T
R, G, B = pixel[0], pixel[1], pixel[2]
fig, ax = plt.subplots(1, 2, figsize=(16, 6))
ax[0].scatter(R, G, color=colors, marker='.')
ax[0].set(xlabel='Red', ylabel='Green', xlim=(0, 1), ylim=(0, 1))
ax[1].scatter(R, B, color=colors, marker='.')
ax[1].set(xlabel='Red', ylabel='Blue', xlim=(0, 1), ylim=(0, 1))
fig.suptitle(title, size=20);
def clustering(self, clusters, verbose=0):
"""
Input :
Clusters - Number of clusters to group the colors
Output :
Image_clusterized - Image with binarized colors
"""
img_data = self.subset_image / 255.0 # use 0...1 scale
img_data = img_data.reshape((-1, 3))
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 20, 1.0)
flags = cv2.KMEANS_PP_CENTERS
compactness, labels, centers = cv2.kmeans(img_data.astype(np.float32),
clusters, None, criteria, 10, flags)
new_colors = centers[labels].reshape((-1, 3))
image_recolored = new_colors.reshape(self.subset_image.shape)
if(verbose==1):
## See the cluster process segmentation
self.plot_pixels(self.subset_image, title='Input color space: 16 million possible colors')
self.plot_pixels(image_recolored, title='Input color space: 16 million possible colors')
return image_recolored
def sharpening(self, image):
bil_image = cv2.bilateralFilter(image,10,100,100)
self.subset_image = bil_image | |
"""
This module is responsible to generate features from the data/logfiles
"""
import os
import math
import itertools
from pathlib import Path
import numpy as np
import pandas as pd
from scipy import stats
import setup_dataframes as sd
import synthesized_data
feature_names = [] # Set below
_verbose = True
hw = 20 # Over how many preceeding seconds should most of features such as min, max, mean of hr and points be averaged?
cw = 10 # Over how many preceeding seconds should %crashes be calculated?
gradient_w = 30 # Over how many preceeding seconds should hr features be calculated that have sth. do to with change?
_path_reduced_features = sd.working_directory_path + '/Pickle/reduced_features/'
_path_all_features = sd.working_directory_path + '/Pickle/all_features/'
_path_reduced_features_boxcox = sd.working_directory_path + '/Pickle/reduced_features_boxcox/'
_path_all_features_boxcox = sd.working_directory_path + '/Pickle/all_features_boxcox/'
def get_feature_matrix_and_label(verbose=True, use_cached_feature_matrix=True, save_as_pickle_file=False,
use_boxcox=False, reduced_features=False,
h_window=hw, c_window=cw, gradient_window=gradient_w):
"""
Computes the feature matrix and the corresponding labels and creates a correlation_matrix
:param verbose: Whether to print messages
:param use_cached_feature_matrix: Use already cached matrix; 'all' (use all features), 'selected'
(do feature selection first), None (don't use cache)
:param save_as_pickle_file: If use_use_cached_feature_matrix=False, then store newly computed
matrix in a pickle file (IMPORTANT: Usually only used the very first time to
store feature matrix with e.g. default windows)
:param use_boxcox: Whether boxcox transformation should be done (e.g. when Naive Bayes
classifier is used)
:param reduced_features: Whether to do feature selection or not
:param h_window: Size of heartrate window
:param c_window: Size of crash window
:param gradient_window: Size of gradient window
:return: Feature matrix, labels
"""
for df in sd.df_list:
assert (max(h_window, c_window, gradient_window) < max(df['Time'])),\
'Window sizes must be smaller than maximal logfile length'
globals()['hw'] = h_window
globals()['cw'] = c_window
globals()['gradient_w'] = gradient_window
globals()['use_cached_feature_matrix'] = use_cached_feature_matrix
globals()['_verbose'] = verbose
matrix = pd.DataFrame()
should_read_from_pickle_file, path = _should_read_from_cache(use_cached_feature_matrix, use_boxcox,
reduced_features)
sd.obstacle_df_list = sd.get_obstacle_times_with_success()
if should_read_from_pickle_file:
if _verbose:
print('Feature matrix already cached!')
matrix = pd.read_pickle(path)
else:
if _verbose:
print('Creating feature matrix...')
matrix['mean_hr'] = _get_standard_feature('mean', 'Heartrate') # hw
matrix['std_hr'] = _get_standard_feature('std', 'Heartrate') # hw
matrix['max_minus_min_hr'] = _get_standard_feature('max_minus_min', 'Heartrate') # hw
matrix['hr_gradient_changes'] = _get_number_of_gradient_changes('Heartrate') # gradient_w
matrix['lin_regression_hr_slope'] = _get_lin_regression_hr_slope_feature() # gradient_w
matrix['mean_score'] = _get_standard_feature('mean', 'Points') # hw
matrix['std_score'] = _get_standard_feature('std', 'Points') # hw
matrix['max_minus_min_score'] = _get_standard_feature('max_minus_min', 'Points') # hw
matrix['%crashes'] = _get_percentage_crashes_feature() # cw
matrix['last_obstacle_crash'] = _get_last_obstacle_crash_feature() # cw
matrix['timedelta_to_last_obst'] = _get_timedelta_to_last_obst_feature(do_normalize=False)
if not reduced_features:
matrix['max_hr'] = _get_standard_feature('max', 'Heartrate') # hw
matrix['min_hr'] = _get_standard_feature('min', 'Heartrate') # hw
matrix['max_over_min_hr'] = _get_standard_feature('max_over_min', 'Heartrate') # hw
matrix['max_score'] = _get_standard_feature('max', 'Points') # hw
matrix['min_score'] = _get_standard_feature('min', 'Points') # hw
matrix['score_gradient_changes'] = _get_number_of_gradient_changes('Points') # gradient_w
# Boxcox transformation
if use_boxcox:
# Values must be positive. If not, shift it
non_boxcox = ['last_obstacle_crash']
for feature in feature_names:
if feature not in non_boxcox: # Doesn't makes sense to do boxcox here
if matrix[feature].min() <= 0:
matrix[feature] = stats.boxcox(matrix[feature] - matrix[feature].min() + 0.01)[0]
else:
matrix[feature] = stats.boxcox(matrix[feature])[0]
if save_as_pickle_file and (not sd.use_fewer_data):
matrix.to_pickle(path)
# remove ~ first couple of seconds (they have < window seconds to compute features, and are thus not accurate)
labels = []
for df in sd.obstacle_df_list:
labels.append(df[df['Time'] > max(cw, hw, gradient_w)]['crash'].copy())
y = list(itertools.chain.from_iterable(labels))
np.set_printoptions(suppress=True)
matrix.dropna(inplace=True) # First max(hw, cw, gradient_w) seconds did not get computed since inaccurate -> Delete
globals()['feature_names'] = list(matrix)
# Create feature matrix from df
X = matrix.values
if verbose:
print('Feature matrix and labels created!')
return X, y
def _get_timedelta_to_last_obst_feature(do_normalize=False):
"""
Returns the timedelta to the previous obstacle
:param do_normalize: Normalize the timedelta with previous timedelta (bc. it varies slightly within and across
logfiles)
"""
timedeltas_df_list = [] # list that contains a dataframe with feature for each logfile
computed_timedeltas = []
if _verbose:
print('Creating timedelta_to_last_obst feature...')
def compute(row):
if row['Time'] > max(cw, hw, gradient_w):
last_obstacles = df[(df['Time'] < row['Time']) & ((df['Logtype'] == 'EVENT_OBSTACLE') |
(df['Logtype'] == 'EVENT_CRASH'))]
if last_obstacles.empty:
computed_timedeltas.append(2.2)
return 1
timedelta = row['Time'] - last_obstacles.iloc[-1]['Time']
# Clamp outliers (e.g. because of tutorials etc.). If timedelta >3, it's most likely e.g 33 seconds, so I
# clamp to c.a. the average/last timedelta
if timedelta > 3 or timedelta < 1:
if len(computed_timedeltas) > 0:
timedelta = computed_timedeltas[-1]
else:
timedelta = 2
# AdaBoost: 2 or 3 is best
# Random Forest: 1 is best
last_n_obst = min(len(computed_timedeltas), 1)
if len(computed_timedeltas) > 0:
normalized = timedelta / np.mean(computed_timedeltas[-last_n_obst:])
else:
normalized = 1
computed_timedeltas.append(timedelta)
return normalized if do_normalize else timedelta
for list_idx, df in enumerate(sd.df_list):
timedeltas_df_list.append(sd.obstacle_df_list[list_idx].apply(compute, axis=1))
computed_timedeltas = []
return pd.DataFrame(list(itertools.chain.from_iterable(timedeltas_df_list)), columns=['timedelta_to_last_obst'])
def _get_standard_feature(feature, data_name):
"""
This is a wrapper to compute common features such as min, max, mean for either Points or Heartrate
:param feature: min, max, mean, std
:param data_name: Either Heartrate or Points
:return: Dataframe column containing the feature
"""
if _verbose:
print('Creating ' + feature + '_' + data_name + ' feature...')
hr_df_list = [] # list that contains a dataframe with feature for each logfile
for list_idx, df in enumerate(sd.df_list):
hr_df = _get_column(list_idx, feature, data_name)
hr_df_list.append(hr_df)
return pd.DataFrame(list(itertools.chain.from_iterable(hr_df_list)), columns=[feature])
def _get_percentage_crashes_feature():
if _verbose:
print('Creating %crashes feature...')
crashes_list = [] # list that contains one dataframe with %crashes for each point in time for each logfile
for list_idx, df in enumerate(sd.df_list):
crashes = _get_percentage_crashes_column(list_idx)
crashes_list.append(crashes)
return pd.DataFrame(list(itertools.chain.from_iterable(crashes_list)), columns=['%crashes'])
def _get_last_obstacle_crash_feature():
if _verbose:
print('Creating last_obstacle_crash feature...')
# list that contains one dataframe with whether last obstacle was a crash or not
# for each point in time for each logfile
crashes_list = []
for list_idx, df in enumerate(sd.df_list):
df_obstacles = _get_last_obstacle_crash_column(list_idx)
crashes_list.append(df_obstacles)
return pd.DataFrame(list(itertools.chain.from_iterable(crashes_list)), columns=['last_obstacle_crash'])
def _get_lin_regression_hr_slope_feature():
if _verbose:
print('Creating lin_regression_hr_slope feature...')
slopes = [] # list that contains (for each logfile) a dataframe with the slope of the heartrate
for list_idx, df in enumerate(sd.df_list):
slope = _get_hr_slope_column(list_idx)
slopes.append(slope)
return pd.DataFrame(list(itertools.chain.from_iterable(slopes)), columns=['lin_regression_hr_slope'])
def _get_number_of_gradient_changes(data_name):
if _verbose:
print('Creating %s_gradient_changes feature...' % data_name)
changes_list = [] # list that contains (for each logfile) a dataframe with the number of slope changes
for list_idx, df in enumerate(sd.df_list):
changes = _get_gradient_changes_column(list_idx, data_name)
changes_list.append(changes)
if data_name == 'Points':
return pd.DataFrame(list(itertools.chain.from_iterable(changes_list)), columns=['score_gradient_changes'])
else:
return pd.DataFrame(list(itertools.chain.from_iterable(changes_list)), columns=['hr_gradient_changes'])
"""
The following methods calculate the features of one single dataframe and return it as a new dataframe column
"""
def _df_from_to(_from, _to, df):
"""
Returns the part of the dataframe where time is between _from and _to
:param _from: Start of dataframe ['Time']
:param _to: End of dataframe ['Time']
:param df: Dataframe
:return: new Dataframe where row['Time'] between _from and _to
"""
mask = (_from <= df['Time']) & (df['Time'] < _to)
return df[mask]
def _get_column(idx, applier, data_name):
"""
This is a wrapper which returns a dataframe column that indicates at each timestamp the
heartrate or points over the last 'window' seconds, after applying 'applyer' (e.g. mean, max, min)
:param idx: Index of dataframe in gl.df_list
:param applier: mean, min, max, std
:param data_name: Heartrate or Points
:return: Dataframe column with feature
"""
df = sd.df_list[idx]
window = hw
def compute(row):
if row['Time'] > max(cw, hw, gradient_w):
last_x_seconds_df = _df_from_to(max(0, row['Time'] - window), row['Time'], df)
res = -1
if applier == 'mean':
res = last_x_seconds_df[data_name].mean()
elif applier == 'min':
res = last_x_seconds_df[data_name].min()
elif applier == 'max':
res = last_x_seconds_df[data_name].max()
elif applier == 'std':
res = last_x_seconds_df[data_name].std()
elif applier == 'max_minus_min':
last_x_seconds_df = _df_from_to(max(0, row['Time'] - gradient_w), row['Time'], df)
max_v = last_x_seconds_df[data_name].max()
min_v = last_x_seconds_df[data_name].min()
res = max_v - min_v
elif applier == 'max_over_min':
last_x_seconds_df = _df_from_to(max(0, row['Time'] - gradient_w), row['Time'], df)
max_v = last_x_seconds_df[data_name].max()
min_v = last_x_seconds_df[data_name].min()
res = max_v / min_v
if res == -1:
print('error in applying ' + data_name + '_' + applier)
# first mean will be nan, so replace it with second row instead
return res if not math.isnan(res) else compute(df.iloc[1])
return sd.obstacle_df_list[idx].apply(compute, axis=1)
def _get_percentage_crashes_column(idx):
"""
Returns a dataframe column that indicates at each timestamp how many percentage of the last obstacles in the
last crash-window-seconds the user crashed into
:param idx: Index into gl.df_list (indicates the dataframe)
:return: Percentage feature column
"""
df = sd.df_list[idx]
'''
# Scale feature depending on timedelta (the shorter, the more difficult...
def get_factor(timedelta):
if timedelta < 2:
return 0.8
if 2 <= timedelta < 3:
return 1.2
else:
return 1
'''
def compute_crashes(row):
if row['Time'] > max(cw, hw, gradient_w):
last_x_seconds_df = _df_from_to(max(0, row['Time'] - cw), row['Time'], df)
num_obstacles = len(last_x_seconds_df[(last_x_seconds_df['Logtype'] == 'EVENT_OBSTACLE')
| (last_x_seconds_df['Logtype'] == 'EVENT_CRASH')].index)
num_crashes = len(last_x_seconds_df[last_x_seconds_df['Logtype'] == 'EVENT_CRASH'].index)
return (num_crashes/num_obstacles * 100 if num_crashes < num_obstacles else 100) if num_obstacles != 0 \
else 0
return sd.obstacle_df_list[idx].apply(compute_crashes, axis=1)
def _get_last_obstacle_crash_column(idx):
"""
Returns a dataframe column that indicates at each timestamp whether the user crashed into the last obstacle or not
:param idx: Index into gl.df_list (indicates the dataframe)
:return: last_obstacle_crash feature column
"""
df = sd.df_list[idx]
def compute_crashes(row):
if row['Time'] > max(cw, hw, gradient_w):
last_obstacles = df[(df['Time'] < row['Time']) & ((df['Logtype'] == 'EVENT_OBSTACLE') |
(df['Logtype'] == 'EVENT_CRASH'))]
if last_obstacles.empty:
return 0
return 1 if last_obstacles.iloc[-1]['Logtype'] == 'EVENT_CRASH' else 0
return sd.obstacle_df_list[idx].apply(compute_crashes, axis=1)
def _get_hr_slope_column(idx):
"""
Returns a dataframe column that indicates at each timestamp the slope of the fitting lin/ regression
line over the heartrate in the last hw seconds
:param idx: Index into gl.df_list (indicates the dataframe)
:return: hr_slope feature column
"""
df = sd.df_list[idx]
# noinspection PyTupleAssignmentBalance
def compute_slope(row):
if row['Time'] > max(cw, hw, gradient_w):
last_x_seconds_df = _df_from_to(max(0, row['Time'] - gradient_w), row['Time'], df)
slope, _ = np.polyfit(last_x_seconds_df['Time'], last_x_seconds_df['Heartrate'], 1)
return slope if not math.isnan(slope) else compute_slope(df.iloc[1])
return sd.obstacle_df_list[idx].apply(compute_slope, axis=1)
def _get_gradient_changes_column(idx, data_name):
"""
Returns a dataframe column that indicates at each timestamp the number of times 'data_name' (points or Heartrate)
have changed from increasing to decreasing and the other way around
:param idx: Index into gl.df_list (indicates the dataframe)
:param data_name: Points or Heartrate
:return: gradient_changes feature column for either points or heartrate
"""
df = sd.df_list[idx]
def compute_gradient_changes(row):
if row['Time'] > max(cw, hw, gradient_w):
last_x_seconds_df = _df_from_to(max(0, row['Time'] - cw), row['Time'], df)
data = last_x_seconds_df[data_name].tolist()
gradx = np.gradient(data)
asign = np.sign(gradx)
num_sign_changes = len(list(itertools.groupby(asign, lambda x: x >= 0))) - 1
if num_sign_changes == 0:
num_sign_changes = 1
return num_sign_changes if not math.isnan(num_sign_changes) else compute_gradient_changes(df.iloc[1])
return sd.obstacle_df_list[idx].apply(compute_gradient_changes, axis=1)
"""
Helper functions
"""
def _should_read_from_cache(use_cached_feature_matrix, use_boxcox, reduced_features):
"""
If the user wants to use an already saved feature matrix ('all' or 'reduced'), then check if those
pickle files really exist. If not, new files have to be created
:param use_cached_feature_matrix: Use already cached matrix; 'all' (use all features), 'selected'
(do feature selection first), None (don't use cache)
:param use_boxcox: Whether boxcox transofrmation should be done (e.g. when Naive Bayes classifier is used)
:param reduced_features: Whether to do feature selection or not
:return: Whether reading from cache is okey and path where to read from/write to new pickel file (if necessary)
"""
err_string = 'ERROR: Pickle file of Feature matrix not yet created. Creating new one...'
path = ''
file_name = 'feature_matrix_%s_%s_%s.pickle' % (hw, cw, gradient_w)
if not reduced_features:
if use_boxcox:
path = _path_all_features_boxcox
else:
path = _path_all_features
elif reduced_features:
if use_boxcox:
path = _path_reduced_features_boxcox
else:
path = _path_reduced_features
file_path = path + file_name
if not use_cached_feature_matrix or sd.use_fewer_data or synthesized_data.synthesized_data_enabled:
return False, file_path
else:
if not Path(file_path).exists():
print(err_string)
if not Path(path).exists(): # Check if at least the folder exists
os.makedirs(path)
return False, file_path
else:
return True, file_path | |
import argparse
import csv
import os.path
import numpy as np
import torch
from sklearn.model_selection import train_test_split
from torchtext.data.utils import get_tokenizer
from torchtext.datasets import AG_NEWS
from torchtext.vocab import build_vocab_from_iterator
from MIA.Attack.ConfVector import ConfVector
from MIA.ShadowModels import ShadowModels
from model import TextClassificationModel
for n in range(1, 11):
parser = argparse.ArgumentParser()
parser.add_argument("--save_to", default='models', type=str)
parser.add_argument("--name", default='agnews', type=str)
parser.add_argument("--shadow_num", default=n, type=int)
parser.add_argument("--shadow_nepoch", default=30, type=int)
parser.add_argument("--attack_nepoch", default=5, type=int)
parser.add_argument("--topx", default=-1, type=int)
args = parser.parse_args()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
train_iter = AG_NEWS(split='train')
tokenizer = get_tokenizer('basic_english')
def yield_tokens(data_iter):
for _, text in data_iter:
yield tokenizer(text)
vocab = build_vocab_from_iterator(yield_tokens(train_iter), specials=["<unk>"])
vocab.set_default_index(vocab["<unk>"])
def collate_batch(batch):
label_list, text_list, offsets = [], [], [0]
for (_text, _label) in batch:
label_list.append(int(_label))
processed_text = torch.tensor(vocab(tokenizer(_text)), dtype=torch.int64)
text_list.append(processed_text)
# 用每个batch的前batchsize个元素作为分割点,长句被分割,点间段落求mean,embedd层输出为batchsize*embedsize
offsets.append(processed_text.size(0))
label_list = torch.tensor(label_list, dtype=torch.int64)
offsets = torch.tensor(offsets[:-1]).cumsum(dim=0)
text_list = torch.cat(text_list)
return text_list, offsets, label_list
num_class = len(set([label for (label, text) in AG_NEWS(split='train')]))
vocab_size = len(vocab)
emsize = 64
target = TextClassificationModel(vocab_size, emsize, num_class)
target.to(device)
target.load_state_dict(torch.load(os.path.join(args.save_to, args.name + ".pth")))
net = TextClassificationModel(vocab_size, emsize, num_class)
net.to(device)
train_iter, test_iter = AG_NEWS()
X = np.concatenate(([tup[1] for tup in list(train_iter)], [tup[1] for tup in list(test_iter)]))
train_iter, test_iter = AG_NEWS()
Y = np.concatenate(([tup[0] for tup in list(train_iter)], [tup[0] for tup in list(test_iter)])).astype(np.int64) - 1
target_X, shadow_X, target_Y, shadow_Y = train_test_split(X, Y, test_size=0.5, random_state=42)
target_X_train, target_X_test, target_Y_train, target_Y_test = train_test_split(target_X, target_Y, test_size=0.5,
random_state=42)
optimizer = torch.optim.SGD
shadow_models = ShadowModels(net, args.shadow_num, shadow_X, shadow_Y, args.shadow_nepoch, device,
collate_fn=collate_batch, opt=optimizer, lr=5)
acc, val_acc = shadow_models.train()
attack_model = ConfVector(shadow_models, args.attack_nepoch, device, args.topx)
attack_model.train()
shadow_acc, shadow_prec = attack_model.evaluate()
target_acc, target_prec = attack_model.evaluate(target, *train_test_split(target_X, target_Y, test_size=0.5,
random_state=42))
attack_model.show()
with open("rnn_agnews_conf_nshadowmodel", 'a') as f:
writer = csv.writer(f)
writer.writerow([n, np.average(acc), np.average(val_acc), shadow_acc, shadow_prec, target_acc, target_prec]) | |
# %load ../../src/feature/feature_utils.py
# %%writefile ../../src/features/feature_utils.py
"""
Author: Jim Clauwaert
Created in the scope of my PhD
"""
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from statsmodels import robust
from math import ceil
def lowess(x, y, f=2. / 3., iter=3):
"""lowess(x, y, f=2./3., iter=3) -> yest
Lowess smoother: Robust locally weighted regression.
The lowess function fits a nonparametric regression curve to a scatterplot.
The arrays x and y contain an equal number of elements; each pair
(x[i], y[i]) defines a data point in the scatterplot. The function returns
the estimated (smooth) values of y.
The smoothing span is given by f. A larger value for f will result in a
smoother curve. The number of robustifying iterations is given by iter. The
function will run faster with a smaller number of iterations.
"""
n = len(x)
r = int(ceil(f * n))
h = [np.sort(np.abs(x - x[i]))[r] for i in range(n)]
w = np.clip(np.abs((x[:, None] - x[None, :]) / h), 0.0, 1.0)
w = (1 - w ** 3) ** 3
yest = np.zeros(n)
delta = np.ones(n)
for iteration in range(iter):
for i in range(n):
weights = delta * w[:, i]
b = np.array([np.sum(weights * y), np.sum(weights * y * x)])
A = np.array([[np.sum(weights), np.sum(weights * x)],
[np.sum(weights * x), np.sum(weights * x * x)]])
beta = linalg.solve(A, b)
yest[i] = beta[0] + beta[1] * x[i]
residuals = y - yest
s = np.median(np.abs(residuals))
delta = np.clip(residuals / (6.0 * s), -1, 1)
delta = (1 - delta ** 2) ** 2
return yest
def AllocatePromoters(experiment, IDs):
TSS_info = pd.read_csv("../data/external/TSS_info.csv")
TSS_seq = pd.read_csv("../data/external/TSS_seq.csv")
mask = (TSS_seq["strand"] == "+") & (TSS_info["sigma_binding"].str.count(experiment[-1]) == 1) & (TSS_seq["conditions"].str.count("E") == 1)
TSS = TSS_seq.loc[mask,"TSS_position"].values
positions = [int(id[-8:]) for id in IDs]
mask_promoters = []
for position in positions:
mask_promoters.append(((position+35 <= TSS) & (position+60 > TSS)).any())
return mask_promoters
def augment_sequences(X_batch, Y_batch):
X_batch_aug = np.copy(X_batch)
if np.random.rand()>=0.5:
aug_rand = np.random.randint(15)
X_batch_aug[:,:aug_rand,:] = 0.25
return X_batch_aug, Y_batch
def BinaryOneHotEncoder(Y_bool):
hot_array = np.zeros([len(Y_bool), 2], dtype=np.int8)
for i in range(len(Y_bool)):
if Y_bool[i] == True:
hot_array[i,1]=1
else:
hot_array[i,0]=1
return hot_array
def CreateBalancedTrainTest(X,Y, test_size=0.1):
Y_0 = Y[Y[:,1]==0]
Y_1 = Y[Y[:,1]==1]
X_0 = X[Y[:,1]==0]
X_1 = X[Y[:,1]==1]
X_train_0, X_test_0, Y_train_0, Y_test_0= train_test_split(X_0, Y_0, test_size=test_size)
X_train_1, X_test_1, Y_train_1, Y_test_1= train_test_split(X_1, Y_1, test_size=test_size)
X_train = np.vstack((X_train_0, X_train_1))
Y_train = np.vstack((Y_train_0, Y_train_1))
X_test = np.vstack((X_test_0, X_test_1))
Y_test = np.vstack((Y_test_0, Y_test_1))
return X_train, X_test, Y_train, Y_test
def CreateImageFromSequences(sequences, length= 50):
lib = np.zeros((len(sequences),length, 4))
index = 0
cut = 0
for string in sequences:
length_seq = len(sequences[index])
diff = length - length_seq
if diff<0:
cut+=1
pre_map = None
string = string[-50:]
length_seq = 50
diff = 0
pre_map = np.full(diff,0.25, dtype=np.float16)
Amap = np.hstack((pre_map, [(x==y)*1 for (x,y) in zip(string,"A"*length_seq)]))
Tmap = np.hstack((pre_map, [(x==y)*1 for (x,y) in zip(string,"T"*length_seq)]))
Cmap = np.hstack((pre_map, [(x==y)*1 for (x,y) in zip(string,"C"*length_seq)]))
Gmap = np.hstack((pre_map, [(x==y)*1 for (x,y) in zip(string,"G"*length_seq)]))
image = np.array([Amap,Tmap,Cmap,Gmap], dtype=np.float16)
lib[index,:,:] = np.transpose(image)
index+=1
if cut>0:
print("{} sequences have been cut".format(cut))
return lib
def DetectPeaks(scores, cutoff, smoothing=True, window_len=50):
peak_index = []
index_list = []
if smoothing is True:
scores = Smooth(scores, window_len=window_len)
for i,B in enumerate(scores>cutoff):
if B == True:
if (scores[i]<scores[i-1] and scores[i]<scores[i-2]) and scores[i-2]>scores[i-3]:
peak_index.append(i-2)
mask = np.full(len(scores),False)
mask[peak_index] = True
return peak_index, mask
def Smooth(x,window_len=50,window='hanning'):
s=np.r_[2*x[0]-x[window_len-1::-1],x,2*x[-1]-x[-1:-window_len:-1]]
if window == 'flat': #moving average
w=np.ones(window_len,'d')
else:
w=eval('np.{}(window_len)'.format(window))
y=np.convolve(w/w.sum(),s,mode='same')
return y[window_len:-window_len+1]
def LoadValidationData():
A_raw = pd.read_csv("../data/external/anderson_NN.csv")
A_X, A_Y = CreateImageFromSequences(A_raw["PROBE_SEQUENCE"]), A_raw["PM"]
B_raw = pd.read_csv("../data/external/brewster_NN.csv")
B_X, B_Y = CreateImageFromSequences(B_raw["PROBE_SEQUENCE"]), B_raw["PM"]
R_raw = pd.read_csv("../data/external/rand_mut_NN.csv")
R_X, R_Y = CreateImageFromSequences(R_raw["PROBE_SEQUENCE"]), R_raw["PM"]
M_raw = pd.read_csv("../data/external/mod_mut_NN.csv")
M_X, M_Y = CreateImageFromSequences(M_raw["PROBE_SEQUENCE"]), M_raw["PM"]
D_raw = pd.read_csv("../data/external/davis_NN.csv")
D_X, D_Y = CreateImageFromSequences(D_raw["PROBE_SEQUENCE"]), D_raw["PM"]
return A_X, A_Y, B_X, B_Y, R_X, R_Y, M_X, M_Y, D_X, D_Y
def LoadDataTSS(path, experiment):
data_extra = pd.read_csv(path)
sequences_extra = data_extra["PROBE_SEQUENCE"]
X_extra = CreateImageFromSequences(sequences_extra)
Y_extra_raw = data_extra[experiment]
Y_extra = BinaryOneHotEncoder(Y_extra_raw==1)
return X_extra, Y_extra
def TransformDataSimple(data_ip, data_mock_ip):
list_mock_ip = []
list_ip = []
for datafile in data_ip:
list_ip.append(pd.read_csv(datafile)["PM"].values)
for datafile in data_mock_ip:
list_mock_ip.append(pd.read_csv(datafile)["PM"].values)
datafile = pd.read_csv(data_ip[0])
sequences = datafile["PROBE_SEQUENCE"].values
IDs = datafile["PROBE_ID"].values
list_ip = np.vstack(list_ip).T
list_mock_ip = np.vstack(list_mock_ip).T
log_list_ip = np.log2(list_ip)
log_mock_list_ip = np.log2(list_mock_ip)
median_ip = [np.median(log_list_ip[:,u]) for u in range(np.shape(list_ip)[1])]
mad_ip = [robust.mad(log_list_ip[:,u]) for u in range(np.shape(list_ip)[1])]
mock_median_ip = [np.median(log_mock_list_ip[:,u]) for u in range (np.shape(list_ip)[1])]
mock_mad_ip = [robust.mad(log_mock_list_ip[:,u]) for u in range(np.shape(list_ip)[1])]
ip_norm = np.array([(log_list_ip[:,u]-median_ip[u])/mad_ip[u] for u in range(len(mad_ip))]).T
mock_ip_norm = np.array([(log_mock_list_ip[:,u]-mock_median_ip[u])/mock_mad_ip[u] for u in range(len(mad_ip))]).T
fold = ip_norm-mock_ip_norm
mean_fold = np.mean(fold,axis=1)
ip_norm_mean = np.mean(ip_norm, axis=1)
mock_ip_norm_mean = np.mean(mock_ip_norm, axis=1)
fold_mean = ip_norm_mean - mock_ip_norm_mean
sequences_img = CreateImageFromSequences(sequences)
return sequences_img, fold_mean, sequences, IDs | |
import numpy as np
from bayesfast import ModuleBase
from ._commander import _commander_f, _commander_j, _commander_fj
import os
__all__ = ['Commander']
CURRENT_PATH = os.path.abspath(os.path.dirname(__file__))
foo = np.load(os.path.join(CURRENT_PATH, 'data/commander.npz'))
cl2x = foo['cl2x']
mu = foo['mu']
cov = foo['cov']
cov_inv = np.linalg.inv(cov)
offset = foo['offset']
class Commander(ModuleBase):
"""Planck 2018 Commander low-l TT likelihood."""
def __init__(self, tt_name='TT', m_name='TT-Commander', ap_name='a_planck',
logp_name='logp-Commander', delete_vars=[], label=None):
super().__init__(delete_vars=delete_vars, input_shapes=None,
output_shapes=None, label=label)
self.tt_name = tt_name
self.m_name = m_name
self.ap_name = ap_name
self.logp_name = logp_name
@property
def tt_name(self):
return self._tt_name
@tt_name.setter
def tt_name(self, ttn):
if isinstance(ttn, str):
self._tt_name = ttn
else:
raise ValueError('invalid value for tt_name.')
@property
def m_name(self):
return self._m_name
@m_name.setter
def m_name(self, mn):
if isinstance(mn, str):
self._m_name = mn
else:
raise ValueError('invalid value for m_name.')
@property
def ap_name(self):
return self._ap_name
@ap_name.setter
def ap_name(self, apn):
if isinstance(apn, str):
self._ap_name = apn
else:
raise ValueError('invalid value for ap_name.')
@property
def logp_name(self):
return self._logp_name
@logp_name.setter
def logp_name(self, ln):
if isinstance(ln, str):
self._logp_name = ln
else:
raise ValueError('invalid value for logp_name.')
@property
def input_vars(self):
return [self.m_name, self.ap_name]
@property
def output_vars(self):
return [self.logp_name]
@property
def camb_output_vars(self):
return [self.m_name]
def camb_get_output(self, tmp_dict):
raw_cl = tmp_dict[self.tt_name]
try:
assert raw_cl.ndim == 1
assert raw_cl.size >= 30
except Exception:
raise ValueError('invalid shapr for raw_cl.')
return raw_cl[2:30]
def _fun(self, m, ap):
out_f = np.empty(1)
_commander_f(m, ap, out_f, cl2x, mu, cov_inv)
out_f -= offset
return out_f
def _jac(self, m, ap):
out_j = np.empty((1, 29))
_commander_j(m, ap, out_j, cl2x, mu, cov_inv)
return out_j
def _fun_and_jac(self, m, ap):
out_f = np.empty(1)
out_j = np.empty((1, 29))
_commander_fj(m, ap, out_f, out_j, cl2x, mu, cov_inv)
out_f -= offset
return out_f, out_j | |
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import multivariate_normal
import math
from scipy.interpolate import interp1d
def generateTraj(fit_type='square', coeff=0.5, num=3):
##Observations
x_obs = np.tile(np.linspace(1, 8, num=8, endpoint=True), (num,1))
y_obs = np.zeros((num, 8))
number = num
##Real Predictions
x_pred = np.tile(np.linspace(9, 16, num=8, endpoint=True), (num,1))
y_pred = np.zeros((num, 8))
if fit_type == 'linear':
for i in range(1, num):
y_pred[i, :] = math.pow(-1,i+1)*math.ceil(i/2)*np.linspace(1, 8, num=8, endpoint=True)
# y_pred[1, :] = 1*np.linspace(1, 8, num=8, endpoint=True)
# y_pred[2, :] = -1*np.linspace(1, 8, num=8, endpoint=True)
# y_pred[3, :] = 2*np.linspace(1, 8, num=8, endpoint=True)
# y_pred[4, :] = -2*np.linspace(1, 8, num=8, endpoint=True)
# y_pred[5, :] = 3*np.linspace(1, 8, num=8, endpoint=True)
# y_pred[6, :] = -3*np.linspace(1, 8, num=8, endpoint=True)
if fit_type == 'square' and num != 5:
xx = np.linspace(0, 7, num=8, endpoint=True)
for i in range(1, num):
y_pred[i, :] = math.pow(-1,i+1)*(math.ceil(i/2)+2)*np.power(xx, coeff)
# y_pred[2, :] = -1*np.power(xx, coeff)
# y_pred[3, :] = 2*np.power(xx,coeff)
# y_pred[4, :] = -2*np.power(xx,coeff)
# y_pred[5, :] = 3*np.power(xx,coeff)
# y_pred[6, :] = -3*np.power(xx,coeff)
if fit_type == 'square' and num==5:
xx = np.linspace(0, 7, num=8, endpoint=True)
y_pred[1, :] = 3*np.power(xx, coeff)
y_pred[2, :] = -3*np.power(xx, coeff)
y_pred[3, :] = 5*np.power(xx,coeff)
y_pred[4, :] = -5*np.power(xx,coeff)
return x_obs, y_obs, x_pred, y_pred, number | |
import os
import albumentations as albu
#import cv2# not using due to issue in loading using cv2.imread for few images
import keras
from keras.preprocessing.image import load_img
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import gridspec
def read_image(img_path):
#img = cv2.imread(img_path)
#img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = keras.preprocessing.image.load_img(img_path)
img = np.array(img)#, dtype="float32" this is done by normalize
return img
def read_mask(mask_path):
#mask_path = mask_list[idx]
#mask = cv2.imread(mask_path)
mask = keras.preprocessing.image.load_img(mask_path, color_mode = "grayscale")
#mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
#mask = np.array(mask) -1# (0,1,2) instead of (1,2,3)
mask = np.expand_dims(mask, axis=-1)
mask = mask -1
return mask
def augment_image_and_mask(image, mask, aug):
augmented = aug(image=image, mask=mask)
image_augmented = augmented['image']
mask_augmented = augmented['mask']
return image_augmented, mask_augmented
def normalize_img(img):
max_pixval = [255.0 if np.max(img)>128 else np.max(img)]
#print("Dividing by {}".format(max_pixval[0]))
img = img / np.max(img)# divide by maximum
#img[img > 1.0] = 1.0
#img[img < 0.0] = 0.0
return img
class OxfordPetsData(keras.utils.Sequence):
"""Helper class to iterate over teh data as numpy arrays """
def __init__(self, batch_size, img_size, input_img_paths, input_mask_paths,augmentation=None):
self.batch_size = batch_size
self.img_size = (img_size,img_size)
self.input_img_paths = input_img_paths
self.input_mask_paths = input_mask_paths
self.aug = augmentation# user supplied augumentation function "i.e. albumentations"
def __len__(self):
'''data len = 7390; bs = 32; len = 7390// 32 ~= 230'''
return len(self.input_mask_paths) // self.batch_size
def __getitem__(self, idx):
"""Return tuple(input, target) or (img, mask) correspondidng to batch #idx
single Call to getitem will return batch_size length of data"""
startIndex = idx * self.batch_size
stopIndex = startIndex + self.batch_size
batch_ip_img_paths = self.input_img_paths[startIndex: stopIndex]
batch_ip_mask_paths = self.input_mask_paths[startIndex: stopIndex]
# both input_img and target_img will have size of img_size=(160,160)
#x shape =(32,H,W,3) NHWC format i.e. 4D tensor
batch_imgs = np.zeros((self.batch_size,)+ self.img_size + (3,), dtype = "float32")
# y shape =(N,H,W,c=1)
batch_masks = np.zeros((self.batch_size,) + self.img_size + (1,), dtype="uint8")
for ii in range(self.batch_size):
img = read_image(batch_ip_img_paths[ii])
mask = read_mask(batch_ip_mask_paths[ii])
if self.aug!=None:
img, mask = augment_image_and_mask(img, mask, self.aug)
img = normalize_img(img)
batch_imgs[ii] = img
batch_masks[ii] = mask
return batch_imgs,batch_masks
#end of class
def main():
# constants
IMG_SIZE = 256
BATCH_SIZE = 4
NUM_CLASSES = 3
# read list of filenames from dir
imgs_dir ="D:\\prjs\\oxf_unet\\data\\train\\images"
masks_dir ="D:\\prjs\\oxf_unet\\data\\train\\masks"
train_image_list = sorted([os.path.join(imgs_dir,fname) for fname in os.listdir(imgs_dir)])
train_mask_list = sorted([os.path.join(masks_dir,fname) for fname in os.listdir(masks_dir)])
# shuffle files with a fixed seed for reproducibility
#idx = np.arange(len(image_list))
#np.random.seed(1)
#np.random.shuffle(idx)
#image_list = [image_list[i] for i in idx]
#mask_list = [mask_list[i] for i in idx]
# define image augmentation operations for train and test set
aug_train = albu.Compose([
albu.Blur(blur_limit=3),
albu.HorizontalFlip(p=0.5),#(-0.9, 1.2)
#albu.Normalize(),
albu.RandomBrightnessContrast(contrast_limit=0.3,brightness_limit=0.3,brightness_by_max=True),
#albu.RandomGamma(),
albu.augmentations.transforms.Resize(height=IMG_SIZE, width=IMG_SIZE),
albu.RandomSizedCrop((IMG_SIZE - 50, IMG_SIZE - 1), IMG_SIZE, IMG_SIZE)
])
aug_test = albu.Compose([
albu.augmentations.transforms.Resize(height=IMG_SIZE, width=IMG_SIZE)
])
#
#(batch_size, img_size, input_img_paths, input_mask_paths, augmentation = None)
# construct train and test data generators
train_generator = OxfordPetsData(
input_img_paths=train_image_list,
input_mask_paths =train_mask_list,
img_size=IMG_SIZE,
batch_size=BATCH_SIZE,
augmentation=aug_train)
test_generator = OxfordPetsData(
input_img_paths=train_image_list,
input_mask_paths=train_mask_list,
img_size=IMG_SIZE,
batch_size=BATCH_SIZE,
augmentation=aug_test)
img_batch, mask_batch = train_generator[0]
disp_scaled(img_batch, mask_batch)
def get_train_test_split():
input_dir = "D:/prjs/oxf_unet/data/images/"
target_dir = "D:/prjs/oxf_unet/data/annotations/trimaps/"
input_img_paths = sorted([os.path.join(input_dir, fname)
for fname in os.listdir(input_dir) if fname.endswith(".jpg")
])
target_img_paths = sorted([os.path.join(target_dir, fname)
for fname in os.listdir(target_dir)
if fname.endswith(".png") and not fname.startswith(".")
])
print("Number of images/mask", len(input_img_paths))
print("[INFO] loading images...")
# Split our img paths into a training and a validation set
val_samples = 1000
# import random
# random.Random(1337).shuffle(input_img_paths)
# random.Random(1337).shuffle(target_img_paths)
train_image_list = input_img_paths[:-val_samples]
train_mask_list = target_img_paths[:-val_samples]
val_img_list = input_img_paths[-val_samples:]
val_mask_list = target_img_paths[-val_samples:]
return train_image_list,train_mask_list,val_img_list,val_mask_list
def get_train_gen(IMG_SIZE, BATCH_SIZE):
train_image_list, train_mask_list, _,_ = get_train_test_split()
# define image augmentation operations for train set
aug_train = albu.Compose([
albu.Blur(blur_limit=3),
albu.HorizontalFlip(p=0.5),
albu.RandomBrightnessContrast(contrast_limit=0.3, brightness_limit=0.3, brightness_by_max=True),
# albu.RandomGamma(),
albu.augmentations.transforms.Resize(height=IMG_SIZE, width=IMG_SIZE),
albu.RandomSizedCrop((IMG_SIZE - 50, IMG_SIZE - 1), IMG_SIZE, IMG_SIZE)
])
# construct train and test data generators
train_generator = OxfordPetsData(
input_img_paths=train_image_list,
input_mask_paths=train_mask_list,
img_size=IMG_SIZE,
batch_size=BATCH_SIZE,
augmentation=aug_train)
steps_ep = len(train_image_list)//BATCH_SIZE
return steps_ep, train_generator
def get_test_gen(IMG_SIZE, BATCH_SIZE):
train_image_list, train_mask_list, val_img_list, val_mask_list = get_train_test_split()
aug_test = albu.Compose([
albu.augmentations.transforms.Resize(height=IMG_SIZE, width=IMG_SIZE)
])
# construct test data generators
test_generator = OxfordPetsData(
input_img_paths=val_img_list,
input_mask_paths=val_mask_list,
img_size=IMG_SIZE,
batch_size=BATCH_SIZE,
augmentation=aug_test)
return test_generator
def disp_scaled(batch_images, batch_masks,figsize=(15,15)):
nrow = 4
ncol = 2
fig = plt.figure(4, figsize=figsize)
gs = gridspec.GridSpec(nrow, ncol, #width_ratios=[1, 1, 1],
wspace=0.0, hspace=0.0, top=0.95, bottom=0.05, left=0.17, right=0.845)
for i in range(nrow):
for j in range(ncol):
ax= plt.subplot(gs[i,j])
if j%2==0:
im = batch_images[i]
else:
im = np.squeeze(batch_masks[i], axis=-1).astype("uint8")
ax.imshow(im)
ax.set_xticklabels([])
ax.set_yticklabels([])
#plt.tight_layout() # do not use this!!
plt.show()
if __name__== "__main__":
print("Inside if: calling Main: called from {}".format(__name__))
main()
else:
print("Inside Else: called from {}".format(__name__)) | |
import streamlit as st
from streamlit_ace import st_ace
import types
import sympy as sm
from sympy.abc import *
from pchem import solve
# See
# https://discuss.streamlit.io/t/take-code-input-from-user/6413/2?u=ryanpdwyer
# import random, string
# import importlib
# import os
# def import_code(code, name):
# # create blank module
# module = types.ModuleType(name)
# # populate the module with code
# exec(code, module.__dict__)
# return module
def run():
# data = st.file_uploader("Upload data files:", accept_multiple_files=True)
st.markdown("## Sympy Shell")
# Spawn a new Ace editor
content = st_ace(language='python',
value=
"""
# All single letter variables are defined
gas_law = P*V - n * R * T
subs = dict(
P=0.2,
V=2.0,
n=0.1,
T=298,
R=0.08314
)
print(solve(gas_law, T))
""")
# m = import_code(content, "aceExampleCode")
# Display editor's content as you type
exec(content, globals(), dict(print=st.write))
# strategy_name = ''.join(random.choices(string.ascii_letters + string.digits, k=8))
# with open(strategy_name+'.py', 'w') as the_file:
# the_file.write(content)
# TestStrategy = getattr(importlib.import_module(strategy_name), 'TestStrategy')
# # do stuff
# if os.path.exists(strategy_name+'.py'):
# os.remove(strategy_name+'.py')
# else:
# print("The file does not exist")
# I should probably persist the code in some way?
# streamlit generates code, user tweaks for their application?
if __name__ == '__main__':
run() | |
# allows to import own functions
import sys
import os
import re
root_project = re.findall(r'(^\S*TFM)', os.getcwd())[0]
sys.path.append(root_project)
from src.utils.help_func import get_model_data, results_estimator
from keras import backend as K
from kerastuner.tuners import RandomSearch
from kerastuner import Objective
from tensorflow.python.client import device_lib
from tensorflow.keras.optimizers import Nadam
from tensorflow.keras.models import Sequential, load_model
from tensorflow.keras.layers import Dense, Flatten
from sklearn.impute import SimpleImputer
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
import seaborn as sns
import numpy as np
from tensorflow import keras
import pandas as pd
sns.set()
def coeff_determination(y_true, y_pred):
"""
Implements the coefficient of determination to be used in a Keras model.
Parameters
----------
y_true : np.array
Ground truth.
y_pred : np.array
Predictions.
Returns
-------
float
Coefficient of determination.
"""
SS_res = K.sum(K.square(y_true - y_pred))
SS_tot = K.sum(K.square(y_true - K.mean(y_true)))
return (1 - SS_res / (SS_tot + K.epsilon()))
# print(device_lib.list_local_devices())
# Get the data
df_train_val = get_model_data()
seed=42
# Feature selection
features = [
'Tr',
'inf_pow_1',
'inf_pow_2',
'mort_pow_1',
'mort_pow_2',
'mort_pow_3',
'n_closed',
'react_time',
'total_deceased',
'betweenness',
'degree',
'closeness',
'country_pop',
'country_departures',
'exposed_pop',
'inf_pow_1_log',
'inf_pow_2_log',
'mort_pow_1_log',
'mort_pow_2_log',
'mort_pow_3_log',
]
df_train_val = df_train_val[features]
print("=" * 20)
print(f"Train_validation size: {df_train_val.shape}")
print("=" * 20)
X_train_val = df_train_val.drop('total_deceased', axis=1)
y_train_val = df_train_val['total_deceased']
X_train, X_val, y_train, y_val = train_test_split(X_train_val,
y_train_val,
random_state=42)
size_data = int(len(X_train) / 1000)
num_features = len(X_train.columns)
pipe = Pipeline([
('imputer', SimpleImputer(strategy='median')),
('scaler', StandardScaler())
])
X_train_scaled = pipe.fit_transform(X_train.astype(np.float64))
X_val_scaled = pipe.transform(X_val.astype(np.float64))
root_logdir_tensorboard = f"{root_project}/models/tests/my_logs"
root_logdir_checkpoints = f"{root_project}/models/tests/checkpoints"
def build_model(hp):
model = Sequential()
model.add(Flatten(input_shape=X_train_scaled.shape[1:]))
units=hp.Int('units_layer', min_value=10, max_value=100, step=5)
for i in range(hp.Int('num_layers', 3, 12)):
model.add(Dense(units=units,
activation='selu',
kernel_initializer='lecun_normal'))
model.add(Dense(1))
model.compile(
optimizer=Nadam(),
loss='mean_squared_error',
metrics=[
'mean_absolute_error',
'mean_absolute_percentage_error',
coeff_determination])
return model
tuner = RandomSearch(
build_model,
objective=Objective('val_coeff_determination', direction='max'),
max_trials=20,
executions_per_trial=1,
directory=f"{root_project}/models/tests/neural_networks",
project_name="tfm")
tuner.search_space_summary()
tensorboard_cb = keras.callbacks.TensorBoard(root_logdir_tensorboard)
early_stopping_cb = keras.callbacks.EarlyStopping(
patience=5, restore_best_weights=True)
checkpoint_cb = keras.callbacks.ModelCheckpoint(
filepath=root_logdir_checkpoints, save_best_only=True, verbose=1)
# Uncomment netxt lines to train the models
# tuner.search(X_train_scaled, y_train,
# epochs=100,
# validation_data=(X_val_scaled, y_val),
# callbacks=[tensorboard_cb,
# early_stopping_cb,
# checkpoint_cb])
# tuner.results_summary()
# Reload best model from project
# tuner.reload()
# estimator = tuner.get_best_models()[0]
# Reload best model from .h5
estimator = load_model(f"{root_project}/models/neural_network.h5",
custom_objects={'coeff_determination': coeff_determination})
# Score in validation set
results_estimator(estimator, X_val_scaled, y_val)
# Score in test set
df_test = pd.read_pickle(
f"{root_project}/data/processed/test_set.pickle")
df_test = df_test[features]
X_test = df_test.drop('total_deceased', axis=1)
y_test = df_test['total_deceased']
X_test_scaled = pipe.transform(X_test.astype(np.float64))
results_estimator(estimator, X_test_scaled, y_test) | |
import pandas as pd
import numpy as np
import xlrd
df = pd.read_excel('Koln-Airport-Scripted.xlsx')
df2 = pd.read_excel('Cologne - Bonn Airport.xlsx')
df_merge_col = pd.merge(df, df2, on='Parking Address')
print(df_merge_col)
writer = pd.ExcelWriter('Koln-Airport-refactored.xlsx', engine= 'xlsxwriter')
df_merge_col.to_excel(writer, sheet_name='Sheet1')
writer.save() | |
# %% Day 10
import numpy as np
def normalized(vector):
a, b = sorted(np.abs(vector))
if a == b == 0:
return tuple(vector)
if a == 0:
return tuple(vector // b)
while a := a % b:
a, b = b, a
return tuple(vector // b)
with open("day_10.input", "r") as input_data:
asteroid_map = np.array(
[[0 if c == "." else 1 for c in x.strip()] for x in input_data], dtype=int
)
asteroids = np.stack(np.nonzero(asteroid_map), axis=1)
max_viewable = 0
station = (0, 0)
for asteroid in asteroids:
viewable = len({normalized(x - asteroid) for x in asteroids}) - 1
if viewable > max_viewable:
max_viewable = viewable
station = asteroid
print(f"Part 1: The station asteroid is {station} with {max_viewable} observables")
directions = {normalized(x - station) for x in asteroids}
directions.remove((0, 0))
d = sorted(directions, key=lambda x: np.mod(np.arctan2(x[1], -x[0]), 2 * np.pi))[199]
k = 1
while not asteroid_map[station[0] + d[0] * k, station[1] + d[1] * k]:
k += 1
print(
f"Part 2: 200 destroyed asteroid is {station[0] + d[0] * k, station[1] + d[1] * k}") | |
import logging
from os.path import join
import numpy as np
import pandas as pd
import xgboost as xgb
from matplotlib import pyplot as plt
from sklearn.model_selection import train_test_split
logger = logging.getLogger(__name__)
def infer_missing(df, target_column, inference_type, figures_dir, verbose=False):
"""Imputed infered values for a columns with missing values
by gradient boosted trees regression or classification
Parameters
----------
df : pandas dataframe
target_column: string
The column to impute values for.
inference_type: string
The type of inference: 'reg' for regression, 'clf' for classification
figures_dir: filepath
File path to the directory where feature importance figures
will be stored.
verbose: bool
Returns
-------
df: pandas dataframe
The input dataframe completed with infered values.
"""
# TODO: Hyperopt the CV'ed version of this function
if inference_type not in ("reg", "clf"):
raise ValueError(inference_type)
# Remove some variables having the same prefix with target
# to prevent leaking data from added & related vars
target_prefix = target_column[:3]
input_columns = [c for c in df.columns if not c.startswith(target_prefix)]
# Make X, y
missing_mask = pd.isnull(df.loc[:, target_column])
y_full = df.loc[~missing_mask, target_column]
# One-hot encode string columns
X = pd.get_dummies(df.loc[:, input_columns], dummy_na=True)
X_missing = X.loc[missing_mask, :]
X_full = X.loc[~missing_mask, :]
ax, fig = plt.subplots(1, 1, figsize=rect_figsize)
y_full.hist(
bins="auto", normed=True, alpha=0.4, color="grey", label="Original values"
)
# Make train/test split
if inference_type == "clf":
# Some classes are rare, here we artificially change the labels
# to the nearest neighbourghs
labels, class_counts = np.unique(y_full, return_counts=True)
for i, (label, count) in enumerate(zip(labels, class_counts)):
if count < 2:
y_full[y_full == label] = labels[i - 1]
stratify = y_full
else:
try:
# Stratify by quantiles if possible
stratify, _ = pd.factorize(pd.qcut(y_full, 20, duplicates="drop"))
except ValueError:
stratify = None
try:
X_train, X_valid, y_train, y_valid = train_test_split(
X_full, y_full, test_size=0.5, random_state=seed, stratify=stratify
)
except ValueError:
logger.warning(
"[Imputation] Stratified split failed for {}".format(target_column)
)
X_train, X_valid, y_train, y_valid = train_test_split(
X_full, y_full, test_size=0.5, random_state=seed, stratify=None
)
logger.info(
"[Imputation] Column {}, n_missing={}/{}, train/test={}/{}".format(
target_column, missing_mask.sum(), len(X), len(X_train), len(X_valid)
)
)
# Choose model
if inference_type == "clf":
booster = xgb.XGBClassifier(seed=seed)
else:
booster = xgb.XGBRegressor(seed=seed)
# booster = xgb.cv(param, dtrain, num_round, nfold=20, stratified=True,
# metrics=['error'], seed=seed,
# callbacks=[xgb.callback.print_evaluation(show_stdv=True),
# xgb.callback.early_stop(3)])
# Fit, predict
booster.fit(
X_train,
y_train,
early_stopping_rounds=1,
eval_set=[(X_train, y_train), (X_valid, y_valid)],
verbose=False,
)
# Write back model prediction
preds = booster.predict(X_missing, ntree_limit=booster.best_iteration)
imputed_serie = df.loc[:, target_column].copy()
imputed_serie.loc[missing_mask] = preds
pd.Series(preds).hist(
bins="auto", normed=True, alpha=0.4, color="red", label="Predictions"
)
imputed_serie.hist(
bins="auto", normed=True, alpha=0.4, color="blue", label="Completed values"
)
plt.title("Infered missing values for column `{}`".format(target_column))
plt.xlabel("Value")
plt.ylabel("Frequency")
plt.legend()
plt.savefig(join(figures_dir, "infered_values_histo_{}.png".format(target_column)))
plt.close()
metrics = booster.evals_result()
fig, ax = plt.subplots(1, 1, figsize=rect_figsize)
train_metrics = metrics["validation_0"]
for k, v in train_metrics.items():
plt.plot(np.arange(len(v)), v, label="train " + k, color="grey")
train_metrics = metrics["validation_1"]
for k, v in train_metrics.items():
plt.plot(np.arange(len(v)), v, label="test " + k, color="red")
plt.legend()
plt.title("Estimator performance evolution for column `{}`".format(target_column))
plt.xlabel("No.Iterations")
plt.ylabel("Error value (a.u.)")
plt.savefig(
join(figures_dir, "metrics_{}_{}.png".format(target_column, inference_type))
)
plt.close()
for weight in ("weight", "gain", "cover"):
fig, ax = plt.subplots(1, 1, figsize=big_square)
xgb.plot_importance(booster, ax, importance_type=weight)
figure_fn = "feature_importance_{}_{}.png".format(weight, target_column)
figure_path = join(figures_dir, figure_fn)
plt.xlabel(weight.capitalize() + " value")
plt.ylabel("Attribute")
plt.title("Var. importance for predicting {}".format(target_column))
plt.tight_layout()
plt.savefig(figure_path)
plt.clf()
plt.close()
return imputed_serie | |
import numpy as np
import math
import time
import threading
import matplotlib.pyplot as plt
import vlc
import datetime
import xlsxwriter
import ems_constants
# current best settings: 155 ms. 10 intensity. bpm 110. never double up strokes direct. You can triple stroke indirect tho.
def play_rhythm(ems_serial, contact_ser, actual_stim_length, count_in_substr, rhythm_substr, \
repeats, bpm, metronome_intro_flag, audio_pre_display_flag, audio_repeats, post_ems_test_flag, post_ems_repeats, \
samp_period_ms, delay_val):
# mammoth function that should be broken up. takes in serial objects, rhythm parameters,
# whether the EMS and the audio should be turned on, whether there should be a metronome intro,
# sample period, and measured delay value. Plays the rhythm in audio and EMS and runs threading
# to listen to user results.
reading_results = [] # a list that is written to by the thread that is the contact
x_value_results = [] # a list of time values corresponding to when each contact data point was recorded
max_bpm = math.floor(30000/actual_stim_length) #how many eighthnote pulses could you fit into a
#minute without overlapping?
if (bpm > max_bpm):
print("max metronome bpm is " + str(max_bpm))
return
#determine pulse+wait length
milliseconds_per_eighthnote = 30000/bpm
## start reading thread ##
len_pres = 3000 + milliseconds_per_eighthnote*(len(count_in_substr) + (audio_repeats+repeats+post_ems_repeats) * \
len(rhythm_substr)) + delay_val # length of rhythm presentation
audio_onset_times = [] # when list of times audio began to play
stim_onset_times = [] # list of times when stim command was sent
time_naught_thread = time.time() # beginning of time for contact tracing thread.
# SHOULD THIS BE DIFFERENT FROM OTHER BEGINNING TIME COUNT?
read_thread = threading.Thread(target=read_contact_trace, args= (contact_ser, len_pres, \
samp_period_ms, reading_results, x_value_results, time_naught_thread)) # creating read thread
read_thread.start()
rhythm_display_flag = 1 # SHOULD display EMS and audio together after audio presentation.
post_ems_test_flag = 1
# total eighthnotes in count in, audio display, and rhythm display (EMS+audio)
total_eighthnotes = len(count_in_substr) + (repeats+audio_repeats+post_ems_repeats)*len(rhythm_substr)
time_naught_main = time.time() # beginning time for EMS and audio threads.
#WHY SHOULD THIS BE DIFFERENT THAN CONTACT THREAD?
## creating EMS and audio and metronome threads ##
ems_thread = threading.Thread(target=run_rhythm_ems, args= (rhythm_display_flag, ems_serial, time_naught_main, \
stim_onset_times, repeats, rhythm_substr, actual_stim_length, milliseconds_per_eighthnote, \
metronome_intro_flag, count_in_substr, audio_pre_display_flag, audio_repeats))
audio_thread = threading.Thread(target=run_rhythm_audio, args= (rhythm_display_flag, post_ems_test_flag, audio_onset_times, time_naught_main, repeats, rhythm_substr, \
milliseconds_per_eighthnote, metronome_intro_flag, count_in_substr, audio_pre_display_flag, audio_repeats, post_ems_repeats))
metronome_thread = threading.Thread(target=metronome_tone, args= (milliseconds_per_eighthnote, total_eighthnotes))
print("rhythm in 3")
time.sleep(1)
print("rhythm in 2")
time.sleep(1)
print("rhythm in 1")
time.sleep(1)
ems_thread.start()
time.sleep(delay_val/1000) # implements delay between EMS and audio timelines.
metronome_thread.start()
audio_thread.start()
ems_thread.join()
metronome_thread.join()
audio_thread.join()
read_thread.join()
audio_onset_times_ms = [1000 * item for item in audio_onset_times] # take list of onset times from seconds to ms.
stim_onset_times_ms = [1000 * item for item in stim_onset_times]
# reading results is the contact trace list. x value results are time of recording each data point.
return reading_results, x_value_results, audio_onset_times_ms, stim_onset_times_ms
def run_rhythm_ems(rhythm_display_flag, ems_serial, time_naught, stim_onset_times, repeats, rhythm_substr, actual_stim_length, \
milliseconds_per_eighthnote, metronome_intro_flag, count_in_substr, audio_pre_display_flag, pre_repeats):
# runs ems. vars:
# rhythm display flag: if 1, display EMS and audio after audio_only presentation. if 0, no rhythm display at all
# ems_serial: serial object to write commands to
# time naught is x axis origin (times recorded relative to that)
# stim onset times is passed as an empty list and appended to (accessed after thread is joined.)
# repeats: number of RHYTHM DISPLAY repeats
# rhythm_substr - the string to be played. Each 1 or 0 is an eightnote.
# actual stim length - how long to tell EMS to stimulate user. in ms. on the order of 100 to 200.
# ms per eighthnote - self explan
# metronome_intro_flag: if 1, do metronome count in according to count in str. else skip
# count_in_substr - the string used to count in (usually hits on beats)
# audio_pre_display_flag - if yes, play rhythm without ems (probably with audio) before playing rhythm with ems and after count in.
# prerepeats - this is audio repeats
# print("ems repeats: " + str(repeats) + "prerepeats: " +str(pre_repeats))
last_time = time.time() # this last time technique aims to take out processing time. Then we only wait for
# the perscribed millisecond per eighthnote MINUS processing time. might just be milliseconds but i suspect this builds up
# problematically.
# print("EMS THREAD: Beginning metronome: time since start: " + str(time.time()-time_naught))
if metronome_intro_flag:
for j in range(len(count_in_substr)): # go through each eighthnote in the pattern
if (count_in_substr[j] == '1'): # this is a note
command_bytes = "xC1I100T" +str(actual_stim_length) + "G \n"
byt_com = bytes(command_bytes, encoding='utf8')
stim_onset_times.append(time.time() - time_naught)
ems_serial.write(byt_com)
print("stim on")
time.sleep((milliseconds_per_eighthnote/1000) - time.time() + last_time)
last_time = time.time()
elif(count_in_substr[j] == '0'): # rest
time.sleep((milliseconds_per_eighthnote/1000) - time.time() + last_time)
last_time = time.time()
else:
print("malformed rhythm pattern: " + rhythm_substr)
break
# print("EMS THREAD: Beginning predisplay: time since start: " + str(time.time()-time_naught))
if audio_pre_display_flag:
for i in range(pre_repeats): # present the rhythm with appropriate number of repeats
for j in range(len(rhythm_substr)): # go through each eighthnote in the pattern
time.sleep((milliseconds_per_eighthnote/1000) - time.time() + last_time)
last_time = time.time()
# print("EMS THREAD: Beginning ems display: time since start: " + str(time.time()-time_naught))
if rhythm_display_flag:
for i in range(repeats): # present the rhythm with appropriate number of repeats
for j in range(len(rhythm_substr)): # go through each eighthnote in the pattern
if (rhythm_substr[j] == '1'): # this is a note
command_bytes = "xC1I100T" +str(actual_stim_length) + "G \n"
byt_com = bytes(command_bytes, encoding='utf8')
stim_onset_times.append(time.time() - time_naught)
ems_serial.write(byt_com)
print("stim on")
time.sleep((milliseconds_per_eighthnote/1000) - time.time() + last_time)
last_time = time.time()
elif(rhythm_substr[j] == '0'): # rest
time.sleep((milliseconds_per_eighthnote/1000) - time.time() + last_time)
last_time = time.time()
else:
print("malformed rhythm pattern: " + count_in_substr)
break
# print("EMS THREAD: Beginning post display: time since start: " + str(time.time()-time_naught))
def run_rhythm_audio(rhythm_display_flag, post_ems_test_flag, audio_onset_times, time_naught, repeats, rhythm_substr, \
milliseconds_per_eighthnote, metronome_intro_flag, count_in_substr, audio_predisplay_flag, pre_repeats, post_ems_repeats):
# runs audio.
# rhythm display flag: if 1, display EMS and audio after audio_only presentation.
# audio onset times, passed as an empty list and written to during thread.
# time naught is x axis origin (times recorded relative to that)
# repeats: number of RHYTHM DISPLAY repeats
# rhythm_substr - the string to be played. Each 1 or 0 is an eightnote.
# ms per eighthnote - self explan
# metronome_intro_flag: if 1, do metronome count in according to count in str. else skip
# count_in_substr - the string used to count in (usually hits on beats)
# audio_pre_display_flag - if yes, play rhythm without ems (probably with audio) before playing rhythm with ems and after count in.
# prerepeats - this is audio repeats
print("repeats: " + str(repeats) + ", prerepeats: " + str(pre_repeats) + ", post_ems repeats: " + str(post_ems_repeats))
# print("AUDIO THREAD: Beginning metronome: " + str(time.time()-time_naught))
last_time = time.time()
first_time = np.copy(last_time)
if metronome_intro_flag:
for j in range(len(count_in_substr)):
fork_time = time.time()
if (int(count_in_substr[j])): # this is a note
audio_onset_times.append(time.time() - time_naught)
eighteighty_tone.play()
time.sleep((milliseconds_per_eighthnote/1000) - time.time() + last_time)
last_time = time.time()
eighteighty_tone.stop()
eight_tone_stop_time = time.time()
else: # rest
eighteighty_tone.stop()
now = time.time()
time.sleep((milliseconds_per_eighthnote/1000) - now + last_time)
last_time = time.time()
# print("AUDIO THREAD: Beginning predisplay: time since start: " + str(time.time()-time_naught))
if audio_predisplay_flag:
for i in range(pre_repeats): # present the rhythm with appropriate number of repeats
for j in range(len(rhythm_substr)): # go through each eighthnote in the pattern
if (rhythm_substr[j] == '1'): # this is a note
audio_onset_times.append(time.time() - time_naught)
eighteighty_tone.play()
time.sleep((milliseconds_per_eighthnote/1000) - time.time() + last_time)
last_time = time.time()
eighteighty_tone.stop()
elif(rhythm_substr[j] == '0'): # rest
eighteighty_tone.stop()
time.sleep((milliseconds_per_eighthnote/1000) - time.time() + last_time)
last_time = time.time()
else:
print("malformed rhythm pattern: " + rhythm_substr)
break
# print("AUDIO THREAD: Beginning rhythm display: time since start: " + str(time.time()-time_naught))
if rhythm_display_flag:
for i in range(repeats): # present the rhythm with appropriate number of repeats
for j in range(len(rhythm_substr)): # go through each eighthnote in the pattern
if (rhythm_substr[j] == '1'): # this is a note
audio_onset_times.append(time.time() - time_naught)
eighteighty_tone.play()
time.sleep((milliseconds_per_eighthnote/1000) - time.time() + last_time)
last_time = time.time()
eighteighty_tone.stop()
elif(rhythm_substr[j] == '0'): # rest
eighteighty_tone.stop()
time.sleep((milliseconds_per_eighthnote/1000) - time.time() + last_time)
last_time = time.time()
else:
print("malformed rhythm pattern: " + rhythm_substr)
break
# print("AUDIO THREAD: Beginning post_ems display: time since start: " + str(time.time()-time_naught))
if post_ems_test_flag:
for i in range(post_ems_repeats): # present the rhythm with appropriate number of repeats
for j in range(len(rhythm_substr)): # go through each eighthnote in the pattern
if (rhythm_substr[j] == '1'): # this is a note
audio_onset_times.append(time.time() - time_naught)
eighteighty_tone.play()
time.sleep((milliseconds_per_eighthnote/1000) - time.time() + last_time)
last_time = time.time()
eighteighty_tone.stop()
elif(rhythm_substr[j] == '0'): # rest
eighteighty_tone.stop()
time.sleep((milliseconds_per_eighthnote/1000) - time.time() + last_time)
last_time = time.time()
else:
print("malformed rhythm pattern: " + rhythm_substr)
break
def metronome_tone(milliseconds_per_eighthnote, total_str_len):
# plays tone on the beat repeatedly
AUDIO_DELAY = 0.0023
time.sleep(AUDIO_DELAY) # sleep for 2 ms to let audio catch up
last_time = time.time()
counter = 0
for i in range(total_str_len):
counter = counter + 1
if counter == 1:
fourfourty_tone.play()
time.sleep((milliseconds_per_eighthnote/1000) - time.time() + last_time)
last_time = time.time()
fourfourty_tone.stop()
else:
if counter == 8:
counter = 0
time.sleep((milliseconds_per_eighthnote/1000) - time.time() + last_time)
last_time = time.time()
def read_contact_trace(ser, len_rhythm_presentation_ms, samp_period_ms, readings_list, x_values_list, time_naught_contact_trace):
# reads from contact detection serial object every sample period. Saves results to a list
# time.sleep(1)
# print("thread time since start " + str(time.time()- time_naught))
check_repeats = int(np.floor((len_rhythm_presentation_ms/samp_period_ms)))
print("read thread begun")
while (time.time()-time_naught_contact_trace)*1000 < len_rhythm_presentation_ms:
if ser.in_waiting:
out = ser.readline().decode('utf-8')
time_measured = time.time()
# if int(out[:-2]) > 5:
# print(int(out[:-2]))
readings_list.append(int(out[:-2]))
x_values_list.append(1000*(time_measured-time_naught_contact_trace)) #from seconds to milliseconds
print("done reading trace")
# print("mean samp period and stdv: " + str(mean_contact_samp_period) + " +/- " + str(stdv_contact_samp_period))
return readings_list, x_values_list
def rhythm_string_to_stim_trace_and_audio_trace(count_in_substr, rhythm_substr, actual_stim_length, bpm, repeats, \
samp_period, delay, audio_repeats, post_ems_repeats):
# takes in the count-in string, the actual rhythm string, the length of stimulation in ms, beats per minute,
# stim repeats number, requested sample period of resulting trace (in ms). Returns stim_trace numpy array
# with 0 values for time points of no stim and 1000 values for stim. This is offset /delay/ amount in ms
# from audio stimulus (also returned in same size array). Final value returned is a time array, steps in
# samp_period.
milliseconds_per_eighthnote = 30000/bpm
array_len_per_eighthnote = int(np.floor(milliseconds_per_eighthnote/samp_period))
delay_array_len = int(np.floor(delay/samp_period))
actual_stim_len_array_indices = int(np.floor(actual_stim_length/samp_period))
eighthnotes_pres = len(count_in_substr) + (audio_repeats+repeats+post_ems_repeats) * len(rhythm_substr)
trace_array_len = array_len_per_eighthnote * eighthnotes_pres + delay_array_len
stim_trace = np.zeros((trace_array_len,))
audio_trace = np.zeros((trace_array_len,))
x_array = np.arange(0, trace_array_len) * samp_period
for i in range(len(count_in_substr)): # write in count-in traces.
if count_in_substr[i] == '1':
stim_begin_ind = i * array_len_per_eighthnote
stim_end_ind = stim_begin_ind + actual_stim_len_array_indices
stim_trace[stim_begin_ind:stim_end_ind] = 1
audio_begin_ind = stim_begin_ind+delay_array_len
audio_end_ind = audio_begin_ind + array_len_per_eighthnote
audio_trace[audio_begin_ind:audio_end_ind] = 1
start_index_audio = len(count_in_substr) * array_len_per_eighthnote + delay_array_len
if audio_repeats > 0:
for i in range(audio_repeats): # write the audio trace for any audio pre-stim presentation
for j in range(len(rhythm_substr)):
if rhythm_substr[j] == '1':
audio_begin_ind = start_index_audio + (j * array_len_per_eighthnote)
audio_end_ind = audio_begin_ind + array_len_per_eighthnote
audio_trace[audio_begin_ind:audio_end_ind] = 1
start_index_audio = start_index_audio + (array_len_per_eighthnote * len(rhythm_substr))
start_index_stim = array_len_per_eighthnote * (len(count_in_substr) + (audio_repeats * len(rhythm_substr)))
for i in range(repeats): # now writing for actual rhythm display and actuation
for j in range(len(rhythm_substr)):
if rhythm_substr[j] == '1':
stim_begin_ind = start_index_stim + (j * array_len_per_eighthnote)
stim_end_ind = stim_begin_ind + actual_stim_len_array_indices
stim_trace[stim_begin_ind:stim_end_ind] = 1
audio_begin_ind = stim_begin_ind+delay_array_len
audio_end_ind = audio_begin_ind + array_len_per_eighthnote
audio_trace[audio_begin_ind:audio_end_ind] = 1
audio_trace[audio_end_ind] = 0
start_index_stim = start_index_stim + (array_len_per_eighthnote * len(rhythm_substr))
return stim_trace, audio_trace, x_array
def plot_contact_trace_and_rhythm(reading_list, contact_x_values, stim_trace, audio_trace, x_array, samp_period, legend_labels):
fig, ax = plt.subplots()
ax.plot(contact_x_values, reading_list)
ax.set_yticks(np.arange(0, 500, 100))
ax.set_xticks(np.arange(0, (len(reading_list) * samp_period), 10000))
ax.plot(x_array, stim_trace*np.max(reading_list))
ax.plot(x_array, audio_trace*np.max(reading_list))
ax.legend(legend_labels)
plt.ion()
plt.show()
plt.draw()
plt.pause(0.01)
def onset_times_to_traces(audio_onset_times, audio_hold_ms, stim_onset_times, stim_hold_ms, samp_period):
# take a series of onset time points and craft plottable traces.
array_value_audio_hold = int(np.floor(audio_hold_ms/samp_period))
array_value_stim_time = int(np.floor(stim_hold_ms/samp_period))
final_time_point = int(np.floor(np.max(audio_onset_times) + audio_hold_ms))
x_vec = np.arange(0, final_time_point, samp_period)
audio_trace = np.zeros_like(x_vec)
stim_trace = np.zeros_like(x_vec)
for time_val in audio_onset_times:
array_ind_begin = int(np.floor(time_val/samp_period))
array_ind_end = array_ind_begin + array_value_audio_hold
audio_trace[array_ind_begin:array_ind_end] = 1
for time_val in stim_onset_times:
array_ind_begin = int(np.floor(time_val/samp_period))
array_ind_end = array_ind_begin + array_value_stim_time
stim_trace[array_ind_begin:array_ind_end] = 1
return x_vec, audio_trace, stim_trace
def spike_times_to_traces(onset_times, hold_length, x_vector, samp_period):
# take a series of onset time points and craft plottable traces.
array_value_stim_time = int(np.floor(hold_length/samp_period))
trace = np.zeros_like(x_vector)
for time_val in onset_times:
array_ind_begin = int(np.floor(time_val/samp_period))
array_ind_end = array_ind_begin + array_value_stim_time
trace[array_ind_begin:array_ind_end] = 1
return trace
def trace_to_spike_times(baseline_mean, baseline_sd, reading_results_list, x_values, sd_more_than_multiplier, baseline_subtractor):
# take a trace and threshold to pull spike times.
reading_results_array = np.array(reading_results_list)
x_vals_array = np.array(x_values)
bool_list = reading_results_array < baseline_subtractor
reading_results_array[bool_list] = 0 # anything below this baseline is 0'd out
bool_selector = reading_results_array > baseline_mean + baseline_sd*sd_more_than_multiplier
time_points = x_vals_array[bool_selector]
return time_points
def zero_sensor(contact_ser, sleep_len_ms, samp_period_ms):
print("DON't TOUCH - zeroing")
time.sleep(0.5)
initial_outlist = []
initial_x_results = []
first_time_naught = time.time()
read_contact_trace(contact_ser, sleep_len_ms, samp_period_ms, initial_outlist, initial_x_results, \
first_time_naught)
baseline_mean = np.mean(np.array(initial_outlist))
baseline_sd = np.std(np.array(initial_outlist))
print("Mean basline was " + str(baseline_mean) + " +/- " + str(baseline_sd))
print("DONE ZEROING")
return baseline_mean, baseline_sd
def measure_delay(ems_serial, contact_ser, actual_stim_length, trial_num, sleep_len, samp_period_ms, sd_more_than_mult, baseline_subtractor, baseline_mean, baseline_sd):
# uses a set of trials and random stims and determines the average delay from EMS command to contact registration.
times_stimmed = []
reading_results = []
x_value_results = []
rand_values = np.divide(np.random.rand(trial_num), 2) #between 0 and 0.5 second random delay
len_pres = 3000 + (trial_num * sleep_len + np.sum(rand_values)) * 1000 # ms
time_naught_delay = time.time()
print("time naught delay: " + str(time_naught_delay))
read_thread = threading.Thread(target=read_contact_trace, args= (contact_ser, len_pres, \
samp_period_ms, reading_results, x_value_results, time_naught_delay))
time_naught_main = time.time()
print("time naught main thread: " + str(time_naught_main))
read_thread.start()
# time.sleep(1)
# print("time since start: " + str(time.time() - time_naught_main))
print("calibrating delay in 3")
time.sleep(1)
print("calibrating delay in 2")
time.sleep(1)
print("calibrating delay in 1")
time.sleep(1)
for i in range(trial_num):
command_bytes = "xC1I100T" + str(actual_stim_length) + "G \n" # metronome intro
byt_com = bytes(command_bytes, encoding='utf8')
ems_serial.write(byt_com)
times_stimmed.append(time.time()-time_naught_main)
print("STIM " + str(i))
time.sleep(sleep_len)
time.sleep(rand_values[i])
read_thread.join()
times_responded_ms = trace_to_spike_times(baseline_mean, baseline_sd, reading_results, x_value_results, sd_more_than_mult, baseline_subtractor)
times_stimmed_ms = 1000*np.array(times_stimmed)
first_responses_post_stim = []
diffs = []
for i in range(len(times_stimmed_ms)):
# get earliest response threshold crossing
temp = np.copy(times_responded_ms)
before_bool = np.subtract(times_responded_ms, times_stimmed_ms[i]) < 0 # subtract stimmed time from response times to find
# only responses after stim. then get bools above 0.
temp[before_bool] = np.max(times_responded_ms) # set befores to maximum to avoid finding a close one before stim
first_threshold_cross_post_stim = np.argmin(temp)
first_responses_post_stim.append(times_responded_ms[first_threshold_cross_post_stim])
diffs.append(times_responded_ms[first_threshold_cross_post_stim] - times_stimmed_ms[i])
first_responses_post_stim = np.array(first_responses_post_stim)
mean_delay = np.mean(diffs)
std_delay = np.std(diffs)
return mean_delay, std_delay, first_responses_post_stim, times_stimmed_ms, reading_results, x_value_results
def test_double_stroke(ems_serial, actual_stim_length, bpm, double_stroke_rhythm):
# tests sensation of double and triple strokes. This depends on stim length, stim intensity, and bom.
temp = 0
reps = 1
metronome = 0
milliseconds_per_eighthnote = 30000/bpm
milliseconds_wait = milliseconds_per_eighthnote - actual_stim_length
rhythm_display_flag = 1
audio_pre_display_flag = 0
pre_repeats = 0
run_rhythm_ems(rhythm_display_flag, ems_serial, temp, [], reps, double_stroke_rhythm, actual_stim_length, \
milliseconds_per_eighthnote, metronome, [], audio_pre_display_flag, pre_repeats)
def process_contact_trace_to_hit_times(contact_trace_array, x_values_array, threshold, surpression_window):
bool_list = contact_trace_array > threshold # find indices of contact trace array that exceed threshold
time_points = x_values_array[bool_list] # get the time points of those points in trace
time_points_cop = np.copy(time_points) # make a shallow copy so as not to modify the array we are looping through (i think...)
for i in range(len(time_points)):
if np.isnan(time_points_cop[i]): # if the point is nan do not surpress.
continue
max_time_surpress = time_points[i] + surpression_window
indices_to_surpress_bools = np.logical_and((time_points > time_points[i]), (time_points <= max_time_surpress))
time_points_cop[indices_to_surpress_bools] = np.nan
nonsurpressedselector_bool = np.logical_not(np.isnan(time_points_cop))
time_points_out = time_points[nonsurpressedselector_bool]
return time_points_out
def test_double_stroke(ems_serial):
out = input("test double stroke sensation?")
if out == 'y':
contin = True
while contin:
test_double_stroke(ems_serial, ems_constants.actual_stim_length, ems_constants.bpm, ems_constants.double_stroke_rhythm)
out = input("adjust? a / continue? c")
if out == 'c':
contin = False
# example: command_str = "C0I100T750G \n"
if __name__ == '__main__':
tic = time.time()
### load sound ##
global fourfourty_tone
fourfourty_tone = vlc.MediaPlayer("440Hz_44100Hz_16bit_05sec.mp3");
global eighteighty_tone
eighteighty_tone = vlc.MediaPlayer("880hz.mp3")
fourfourty_tone.play()
eighteighty_tone.play()
time.sleep(0.3)
time_before = time.time()
fourfourty_tone.stop()
eighteighty_tone.stop()
time_to_stop_tones = time.time() - time_before
print("time to stop tones: " + str(time_to_stop_tones))
#### read and write to arduino ###
import serial
import time
# port = '/dev/ttys000' for bluetooth
port = '/dev/tty.usbserial-18DNB483'
ems_serial = serial.Serial(port, 115200)
ems_serial.flushInput()
ems_serial.write(b"2")
port = '/dev/cu.usbmodem143101'
contact_serial = serial.Serial(port, 9600)
# read all setup from EMS
def listen(ems_serial):
for k in range(300):
if(ems_serial.in_waiting):
out = ems_serial.readline().decode('utf-8')
print(out)
time.sleep(0.001)
listen(ems_serial)
### testing contact trace read
# while True:
# out = contact_serial.readline().decode('utf-8')
# if int(out)>0:
# print(int(out[:-2]))
### testing double stroke ###
test_double_stroke(ems_serial)
## zero
sleep_len_ms = ems_constants.sleep_len * 1000
baseline_mean, baseline_sd = zero_sensor(contact_serial, 3*sleep_len_ms, ems_constants.samp_period_ms)
delay_std = 0
### MEASURE DELAY \delay_val = MEASURED_DELAY # TUNE THIS TO KASAHARA RESPONSE TIME, GET RESULTS REGARDING AGENCY AND MEASURE TRAINING RESULT
out = input("measure delay? 'y' to measure, enter number otherwise in milliseconds.")
if out == 'y':
repeat_bool = True
while(repeat_bool):
delay_mean, delay_std, reaction_onsets, stim_onsets, \
reading_results, contact_x_values = measure_delay(ems_serial, contact_serial, ems_constants.actual_stim_length, \
ems_constants.delay_trial_num, ems_constants.sleep_len, ems_constants.samp_period_ms, ems_constants.sd_more_than_mult, ems_constants.baseline_subtractor, baseline_mean, baseline_sd)
x_vec = np.arange(0, np.max(contact_x_values), ems_constants.samp_period_ms)
stim_trace = spike_times_to_traces(stim_onsets, ems_constants.actual_stim_length, x_vec, ems_constants.samp_period_ms)
reaction_trace = spike_times_to_traces(reaction_onsets, ems_constants.actual_stim_length, x_vec, ems_constants.samp_period_ms)
# x_vec, reaction_trace, stim_trace = onset_times_to_traces(reaction_onsets, ems_constants.contact_spike_time_width, stim_onsets, ems_constants.actual_stim_length, ems_constants.samp_period_ms)
legend_labels = ["raw response trace", "stim trace", "filtered response trace"]
plot_contact_trace_and_rhythm(reading_results, contact_x_values, stim_trace, reaction_trace, x_vec, \
ems_constants.samp_period_ms, legend_labels)
print("Measured delay was " + str(delay_mean) + " +/- " + str(delay_std))
out = input("recalibrate? y/n")
if out == 'y':
test_double_stroke(ems_serial)
out = input("y to proceed, n to try again, control C to quit.")
if out == 'y':
repeat_bool = False
else:
delay_mean = int(out)
MEASURED_DELAY = delay_mean
### Gathering subject info ###
participant_number = input("participant number?")
now = datetime.datetime.now()
test_time = now.strftime("%Y_%m_%d_%H_%M_%S")
subject_arm = input("subject arm?")
electrode_config = input("electrode config?") #first pair of numbers is coordinates of 1, x and y, second is coordinates of 2. x and y
max_ems_stim_intensity = input("max ems stim intensity?")
pulse_width = input("pulse width?")
pulse_frequency = input("frequency?") #these may be found on the stimulator and are not usually iterated on (using lit values)
### open workbook, define worksheets ###
workbook = xlsxwriter.Workbook(test_time + '_' + "pp" + str(participant_number) + '.xlsx')
bold = workbook.add_format({'bold': True})
# play rhythm and conduct test
for i in range(len(ems_constants.rhythm_strings)): # for each of the different rhythms
rhythm_substr = ems_constants.rhythm_strings[i]
reading_list, contact_x_values, audio_onset_times, stim_onset_times = play_rhythm(ems_serial, \
contact_serial, ems_constants.actual_stim_length, ems_constants.count_in_substr, rhythm_substr, ems_constants.repeats, ems_constants.bpm, \
ems_constants.metronome_intro_flag, ems_constants.audio_pre_display_flag, ems_constants.audio_repeats, ems_constants.post_ems_test_flag, ems_constants.post_ems_repeats, \
ems_constants.samp_period_ms, MEASURED_DELAY, ) # gives contact trace, audio onset, stim onset.
reading_list = np.array(reading_list)
contact_x_values = np.array(contact_x_values)
audio_hold = 30000/ems_constants.bpm
x_vec = np.arange(0, np.max(contact_x_values), ems_constants.samp_period_ms)
stim_trace = spike_times_to_traces(stim_onset_times, ems_constants.actual_stim_length, x_vec, ems_constants.samp_period_ms)
audio_trace = spike_times_to_traces(audio_onset_times, audio_hold, x_vec, ems_constants.samp_period_ms)
legend_labels = ["contact trace", "stim trace", "audio trace"]
plot_contact_trace_and_rhythm(reading_list, contact_x_values, stim_trace, audio_trace, x_vec, ems_constants.samp_period_ms, legend_labels)
print("done")
### SAVE DATA ###
label_header = ["pp number", "test time", "subject arm", "electrode config", "rhythm pattern", \
"bpm", "max_stim_intensity", "pulse width (microsecs)", "frequency (Hz)", "measured delay mean", \
"measured delay std", "pre-ems repeats", "with ems repeats", "post ems repeats", "zeroed mean", "zeroed sd"]
header_values = [participant_number, test_time, subject_arm, electrode_config, rhythm_substr, \
ems_constants.bpm, max_ems_stim_intensity, pulse_width, pulse_frequency, MEASURED_DELAY, delay_std, \
ems_constants.audio_repeats, ems_constants.repeats, ems_constants.post_ems_repeats, baseline_mean, baseline_sd]
data_header = ["time values (ms)", "contact trace", "stim time onsets", "audio time onsets"]
worksheet = workbook.add_worksheet(ems_constants.rhythm_strings_names[i])
## write header values ##
for i in range(len(label_header)):
worksheet.write(0, i, label_header[i], bold) # write in header values
worksheet.write(1, i, header_values[i], bold)
for i in range(len(data_header)):
worksheet.write(2, i, data_header[i], bold)
worksheet.write(0, i + 1, "all rhythm strings and names")
for i in range(len(ems_constants.rhythm_strings_names)):
ind = len(label_header) + 1 + i
worksheet.write(0, ind, ems_constants.rhythm_strings_names[i])
worksheet.write(1, ind, ems_constants.rhythm_strings[i])
worksheet_data_begin_indices = [3, 0] # where empty data space begins in each worksheet
arrs_to_write = [contact_x_values, reading_list, stim_onset_times, audio_onset_times]
for i in range(len(arrs_to_write)):
for row_num, data in enumerate(arrs_to_write[i]):
worksheet.write(row_num + worksheet_data_begin_indices[0], worksheet_data_begin_indices[1] + i, data)
toc = time.time()
diff = toc-tic
print("Time elapsed: " + str(diff))
workbook.close()
contact_serial.close()
ems_serial.close()
### this all is included in the separate EMS TEST ANALYSIS SCRIPT in directory.
# ## further processing
# surpressed_contact_onset_times = process_contact_trace_to_hit_times(reading_list, contact_x_values, ems_constants.baseline_subtractor, ems_constants.surpression_window)
# contact_hold = ems_constants.contact_spike_time_width
# surpressed_contact_trace = spike_times_to_traces(surpressed_contact_onset_times, contact_hold, x_vec, ems_constants.samp_period_ms)
# legend_labels = ["surpressed contact trace", "stim trace", "audio trace"]
# plot_contact_trace_and_rhythm(surpressed_contact_trace, x_vec, stim_trace, audio_trace, x_vec, ems_constants.samp_period_ms, legend_labels)
# ### Process data ###
# len_rhythm_ms = len(rhythm_substr) * ems_constants.milliseconds_per_eighthnote
# len_count_off_ms = len(ems_constants.count_in_substr) * ems_constants.milliseconds_per_eighthnote
# len_count_off_and_audio_display_ms = len_count_off_ms + ems_constants.audio_repeats*len_rhythm_ms
# len_count_off_and_audio_display_and_ems_ms = len_count_off_ms + ems_constants.audio_repeats*len_rhythm_ms + ems_constants.repeats*len_rhythm_ms
# delays_list = [len_count_off_ms, len_count_off_and_audio_display_ms, len_count_off_and_audio_display_and_ems_ms]
# audio_repeats_distances = []
# ems_repeats_distances = []
# post_ems_repeats_distances = []
# distances_list = []
# repeat_list = [ems_constants.audio_repeats, ems_constants.repeats, ems_constants.post_ems_repeats]
# # for each loop of rhythm, calculate EMD for contact trace vs real audio rhythm
# for i in range(3): # for each condition (audio only, ems and audio, post_ems audio only test)
# for j in range(repeat_list[i]): # for each repeat of the rhythm in this condition
# loop_begin = delays_list[i] + j * len_rhythm_ms - ems_constants.milliseconds_per_eighthnote #include one eighthnote before
# loop_end = loop_begin + len_rhythm_ms + 2 * ems_constants.milliseconds_per_eighthnote #include one eighthnote after as well
# contact_bool = np.logical_and((surpressed_contact_onset_times >= loop_begin), (surpressed_contact_onset_times <= loop_end)) # select contact onset times during this loop of rhythm
# audio_bool = np.logical_and((audio_onset_times >= loop_begin), (audio_onset_times <= loop_end)) # select audio onset times during this loop of rhythm
# total_spikes_contact = sum(contact_bool) # how many spikes total?
# total_spikes_audio = sum(audio_bool)
# trace_selector_bool = np.logical_and((x_vec >= loop_begin), (x_vec <= loop_end)) # which indices in traces are during this loop?
# contact_trace_selected = surpressed_contact_onset_times[trace_selector_bool] # pick those data points from suprpressed contact trace
# audio_trace_selected = audio_trace[trace_selector_bool] # pick those data points from audio trace
# emd = earth_movers_distance(contact_trace_selected, audio_trace_selected, total_spikes_contact, total_spikes_audio) # run emd
# distances_list.append(emd) # add to appropriate list.
# fig, ax = plt.subplots()
# ax.plot(np.arange(len(distances_list)), distances_list)
# ax.set_title("EMD from surpressed contact to audio ground truth for each rhythm repeat")
# plt.ion()
# plt.show()
# plt.draw()
# plt.pause(0.01)
# #
# # subprocess.call(["blueutil", "-p", "0"])
# # subprocess.call(["blueutil", "-p", "1"])
# # #reset ems_serial
# # subprocess.call(["blueutil", "-p", "0"])
# # subprocess.call(["blueutil", "-p", "1"])
# # command_str = "ble-serial -d 2001D755-B5B0-4253-A363-3132B0F93E71 -w 454d532d-5374-6575-6572-756e672d4348 -r 454d532d-5374-6575-6572-756e672d4348"
# # # command_str = "ls -l"
# # # connect using ble-serial script
# # #second number is write characteristic and third is read. Find these
# # # by calling ble-scan -d DEVICE_ID and look for notify service/characteristic.
# # print(command_str)
# # process = Popen(shlex.split(command_str)) #stdout=PIPE, stderr=None, shell=True
# # text = process.communicate()[0]
# # print(text)
# # time.sleep(3) #wait for connection to work
# for i in range(5):
# # print("ping")
# ems_serial.write(b"e")
# time.sleep(1)
# ems_serial.write(b"r")
# time.sleep(1)
# input_data = ems_serial.read(8)
# print(input_data.decode())
# address = "00-1E-C0-42-85-FF"
# perif_id = '2001D755-B5B0-4253-A363-3132B0F93E71'
# service = '454D532D-5365-7276-6963-652D424C4531' #read write?
### test play rhythm | |
"""
File: examples/expander/derivative_expander.py
Author: Keith Tauscher
Date: 1 Jul 2020
Description: Example of how to create and use a DerivativeExpander object,
which performs a finite difference calculation on its inputs.
"""
from __future__ import division
import os
import numpy as np
import numpy.random as rand
from pylinex import DerivativeExpander, load_expander_from_hdf5_file
channel_width = 2.64
expander = DerivativeExpander(channel_width)
arrays = [np.arange(100), rand.rand(100), np.linspace(-np.pi, np.pi, 1000)]
for array in arrays:
expanded_array = (array[1:] - array[:-1]) / channel_width
expanded_array = np.concatenate([[expanded_array[0]],\
(expanded_array[1:] + expanded_array[:-1]) / 2, [expanded_array[-1]]])
assert np.all(expanded_array == expander(array))
file_name = 'test_derivative_expander_TEMP.hdf5'
expander.save(file_name)
try:
assert expander == load_expander_from_hdf5_file(file_name)
except:
os.remove(file_name)
raise
os.remove(file_name) | |
import networkx.algorithms.tree.tests.test_operations
import pytest
from graphscope.nx.utils.compat import import_as_graphscope_nx
import_as_graphscope_nx(networkx.algorithms.tree.tests.test_operations,
decorators=pytest.mark.usefixtures("graphscope_session")) | |
import numpy as np
import os
import random
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from tensorflow import keras
from keras.utils import to_categorical
from keras_preprocessing.image import ImageDataGenerator
from PIL import Image
import glob
best_model = keras.models.load_model('/content/best_model.keras')
base_path = os.path.join(os.getcwd(), 'iNaturalist_Dataset', 'inaturalist_12K', 'test')
test_data = ImageDataGenerator(rescale=1. / 255)
test = test_data.flow_from_directory(base_path, shuffle=True, target_size=(400, 400), batch_size=32)
def report_accuracy():
best_model.summary()
# Reference: https://stackoverflow.com/questions/61742556/valueerror-shapes-none-1-and-none-2-are-incompatible
""" Evaluates the mean loss for a batch of inputs and accuracy.
If the model has lower loss (score) at test time, it will have lower prediction error and hence higher the accuracy.
Similarly, when test accuracy is low the score is higher.
"""
score, acc = best_model.evaluate(test, batch_size=32, verbose=0)
print('Test accuracy:', acc)
def plot_test_predictions(no_of_images=30, target_size=(400, 400)):
test_data = []
true_class = []
predicted_class = []
label_dict = {'Amphibia': 0, 'Reptilia': 1, 'Plantae': 2, 'Mollusca': 3, 'Fungi': 4,
'Aves': 5, 'Mammalia': 6, 'Animalia': 7, 'Insecta': 8, 'Arachnida': 9}
# Selecting images randomly from the test data.
for i in range(no_of_images):
class_label = random.choice(list(label_dict.keys()))
true_class.append(class_label)
# folder_path stores the name of the random folder corresponding to a class label chosen
folder_path = os.path.join(base_path, class_label)
# random_file stores the name of the random file chosen
random_file = random.choice(os.listdir(folder_path))
# Finding the absolute location of the randomly sampled image
file_path = os.path.join(folder_path, random_file)
im = Image.open(file_path)
im = im.resize(target_size, Image.ANTIALIAS)
img = np.array(im)
# Storing the numpy array of the image data in a list.
test_data.append(img)
"""
Since Keras expects the input format to be of type (n_samples, height, width, channels)
so we convert the test_data to numpy array.
"""
classes = best_model.predict_classes(np.array(test_data))
# Plotting a 10*3 grid of randomly sampled test images.
plt.figure(figsize=(30, 30))
for i in range(0, len(classes)):
for key, value in label_dict.items():
if value == classes[i]:
predicted_class.append(key)
ax = plt.subplot(10, 3, i + 1) # the number of images in the grid is 10*3 (30)
# Removing the x and y axis labels.
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
""" If an image has been classified correctly its predicted class label is written
in green otherwise in red.
"""
if true_class[i] == predicted_class[i]:
color = 'green'
else:
color = 'red'
ax.set_title(key, color=color)
plt.tight_layout(pad=5)
plt.imshow(test_data[i])
red_patch = mpatches.Patch(color='red', label='Miss-classified Image')
green_patch = mpatches.Patch(color='green', label='Correctly classified Image')
plt.legend(handles=[green_patch, red_patch ],bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
plt.show()
report_accuracy()
plot_test_predictions(no_of_images=30, target_size=(400, 400)) | |
import numpy as np
def roll_zeropad(a, shift, axis=None):
"""
Roll array elements along a given axis.
Elements off the end of the array are treated as zeros.
Parameters
----------
a : array_like
Input array.
shift : int
The number of places by which elements are shifted.
axis : int, optional
The axis along which elements are shifted. By default, the array
is flattened before shifting, after which the original
shape is restored.
Returns
-------
res : ndarray
Output array, with the same shape as `a`.
See Also
--------
roll : Elements that roll off one end come back on the other.
rollaxis : Roll the specified axis backwards, until it lies in a
given position.
Examples
--------
>>> x = np.arange(10)
>>> roll_zeropad(x, 2)
array([0, 0, 0, 1, 2, 3, 4, 5, 6, 7])
>>> roll_zeropad(x, -2)
array([2, 3, 4, 5, 6, 7, 8, 9, 0, 0])
>>> x2 = np.reshape(x, (2,5))
>>> x2
array([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
>>> roll_zeropad(x2, 1)
array([[0, 0, 1, 2, 3],
[4, 5, 6, 7, 8]])
>>> roll_zeropad(x2, -2)
array([[2, 3, 4, 5, 6],
[7, 8, 9, 0, 0]])
>>> roll_zeropad(x2, 1, axis=0)
array([[0, 0, 0, 0, 0],
[0, 1, 2, 3, 4]])
>>> roll_zeropad(x2, -1, axis=0)
array([[5, 6, 7, 8, 9],
[0, 0, 0, 0, 0]])
>>> roll_zeropad(x2, 1, axis=1)
array([[0, 0, 1, 2, 3],
[0, 5, 6, 7, 8]])
>>> roll_zeropad(x2, -2, axis=1)
array([[2, 3, 4, 0, 0],
[7, 8, 9, 0, 0]])
>>> roll_zeropad(x2, 50)
array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]])
>>> roll_zeropad(x2, -50)
array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]])
>>> roll_zeropad(x2, 0)
array([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
"""
a = np.asanyarray(a)
if shift == 0: return a
if axis is None:
n = a.size
reshape = True
else:
n = a.shape[axis]
reshape = False
if np.abs(shift) > n:
res = np.zeros_like(a)
elif shift < 0:
shift += n
zeros = np.zeros_like(a.take(np.arange(n - shift), axis))
res = np.concatenate((a.take(np.arange(n - shift, n), axis), zeros),
axis)
else:
zeros = np.zeros_like(a.take(np.arange(n - shift, n), axis))
res = np.concatenate((zeros, a.take(np.arange(n - shift), axis)),
axis)
if reshape:
return res.reshape(a.shape)
else:
return res | |
from utils.decorators import timer, debug
from utils.task import Task
import numpy as np
from copy import deepcopy
import bisect
CONVERT_TABLE = {
"A": 2,
"B": 3,
"C": 4,
"D": 5
}
COST_TABLE = {
"A": 1,
"B": 10,
"C": 100,
"D": 1000
}
class Amphipod:
def __init__(self, kind: str, level: int, location: int):
# position = (x_position, y_position)
# y_position = 0 for hallway, 1 for first space, 2 for second space
self.level = level
self.location = location
self.kind = kind
self.num = CONVERT_TABLE[kind]
self.cost_multiplier = COST_TABLE[kind]
self.moves = 0
def __repr__(self):
return f"'{self.kind}, ({self.level}, {self.location})'"
def __eq__(self, other: 'Amphipod') -> bool:
return self.num == other.num and self.level == other.level and self.location == other.location
def __hash__(self):
return hash((self.num, self.level, self.location))
def move_cost(self, to_level: int, to_location: int):
side_movement = abs(self.location - to_location)
vertical_movement = (abs(self.level - to_level)
if self.level == 0 or to_level == 0 or side_movement == 0
else self.level + to_level)
return (side_movement + vertical_movement) * self.cost_multiplier
def is_correct(self, room_locations, grid) -> bool:
"""
:return: True if the amphipod is in the correct room without other types
"""
# Amphipod must have correct location
if self.location != room_locations[self.num - 2]:
return False
level = self.level
while grid[level, self.location] != 0:
if grid[level, self.location] != self.num:
return False
level += 1
return True
class Map:
def __init__(self, hallway_length: int, depth: int, room_positions: list, amphipods: list):
# 0 = wall
# 1 = empty
# 2 = A
# 3 = B
# 4 = C
# 5 = D
self.depth = depth
self.grid = np.zeros((depth+1, hallway_length), dtype=np.int8)
self.grid[0] = 1
for amphipod in amphipods:
self.grid[amphipod.level, amphipod.location] = amphipod.num
self.amphipods = amphipods
self.room_locations = room_positions
self.hallway_length = hallway_length
self.current_score = 0
self.min_cost = self.minimum_cost()
def __lt__(self, other: 'Map') -> bool:
return self.score < other.score
def __eq__(self, other: 'Map') -> bool:
# Two maps are the same when they have the same score and positions
for amphipod in self.amphipods:
if amphipod not in other.amphipods:
return False
return True
def __hash__(self):
return hash(self.amphipods)
@property
def score(self) -> int:
return self.current_score + self.min_cost
def move(self, move):
if not self.valid_move(move):
print("WHY AM I DOING AN INVALID MOVE!")
move_amphipod = move[0]
amphipod = None
for real_amphipod in self.amphipods:
if move_amphipod.__eq__(real_amphipod):
amphipod = real_amphipod
self.grid[amphipod.level, amphipod.location] = 1
self.current_score += amphipod.move_cost(move[1], move[2])
amphipod.level = move[1]
amphipod.location = move[2]
amphipod.moves += 1
self.grid[amphipod.level, amphipod.location] = amphipod.num
self.min_cost = self.minimum_cost()
def find_moves(self):
"""
:return: All possible moves on this map (some dumb moves are filtered out)
"""
moves = []
for amphipod in self.amphipods:
if amphipod.moves >= 2:
continue
if amphipod.is_correct(self.room_locations, self.grid):
continue
desired_room = self.room_locations[amphipod.num-2]
# If there is an amphipod that can go to their room, just put it there immediately
# Find moves to own spot
for level in range(4, 0, -1):
if (np.all(self.grid[1:level+1, desired_room] == 1)
and np.all(self.grid[level+1:self.depth, desired_room] == amphipod.num)):
if self.valid_move((amphipod, level, desired_room)):
return [(amphipod, level, desired_room)]
# Amphipods in rooms can only go to the hallway
if amphipod.level != 0:
# Hallway moves
for i in range(self.hallway_length):
if i in self.room_locations:
continue
if self.valid_move((amphipod, 0, i)):
moves.append((amphipod, 0, i))
return moves
def valid_move(self, move: tuple) -> bool:
"""
Check if a given move is valid on this map by checking in-between nodes for blocks
:param move: Move to check
:return: True if the move is valid, False otherwise
"""
amphipod = move[0]
level_to_go = move[1]
desired_location = move[2]
# Check hallway if it needs to be used
if amphipod.location != desired_location:
for i in range(min(amphipod.location, desired_location), max(amphipod.location, desired_location) + 1):
if self.grid[0, i] != 1 and not (amphipod.level == 0 and amphipod.location == i):
return False
# Check rooms to move to
if level_to_go != 0:
# All rooms must be empty between the entrance and level_to_go
for level in range(level_to_go):
if self.grid[level+1, desired_location] != 1:
return False
# Check if the spaces above the amphipod are clear when it wants out
# It always wants out when the desired location is different from this one
if amphipod.location != desired_location:
for level in range(amphipod.level):
if self.grid[level, amphipod.location] != 1:
return False
# Amphipods may only move twice
if amphipod.moves >= 2:
return False
return True
def is_done(self) -> bool:
"""
:return: True if all amphipods are in their room, False otherwise
"""
for amphipod in self.amphipods:
desired_room = self.room_locations[amphipod.num - 2]
if amphipod.level == 0 or amphipod.location != desired_room:
return False
return True
def minimum_cost(self) -> int:
"""
:return: The cost if every amphipod just went to their room
"""
cost = 0
for amphipod in self.amphipods:
desired_room = self.room_locations[amphipod.num - 2]
# Ignore correct amphipods
if amphipod.is_correct(self.room_locations, self.grid):
continue
cost += amphipod.move_cost(to_level=1, to_location=desired_room)
return cost
class Task23(Task):
# Task constants
YEAR = 2021
TASK_NUM = 23
@staticmethod
def postprocess(data: list, depth: int) -> Map:
room_positions = [i-1 for i, char in enumerate(data[2]) if char != "#"]
amphipods = []
for i, row in enumerate(data):
row = "".join(row)
if data[i][0] == "":
row = "##" + row
for j, char in enumerate(row):
if char != "#" and char != " " and char != ".":
amphipods.append(Amphipod(char, i-1, j-1))
data = Map(len(data[1]) - 2, depth, room_positions, amphipods)
return data
@debug
@timer(YEAR, TASK_NUM)
def part_1(self, data: list) -> int:
current_map = self.postprocess(data, 3)
return self.play_game(current_map)
@staticmethod
def play_game(current_map):
potential_maps = []
seen = []
counter = 0
while not current_map.is_done():
counter += 1
seen.append(current_map)
possible_moves = current_map.find_moves()
for true_move in possible_moves:
move = deepcopy(true_move)
new_map = deepcopy(current_map)
new_map.move(move)
if new_map in seen:
continue
if new_map in potential_maps:
other_map = [other for other in potential_maps if other == new_map][0]
if new_map.current_score >= other_map.current_score:
continue
# else, the new_map is better than the other, so remove the other
potential_maps.remove(other_map)
bisect.insort(potential_maps, new_map)
# Find the map with the lowest minimal score and the highest current score
current_highest = 0
highest_id = 0
min_score = potential_maps[0].score
for i, maps in enumerate(potential_maps):
if maps.score != min_score:
break
if current_highest < maps.current_score:
current_highest = maps.current_score
highest_id = i
current_map = potential_maps.pop(highest_id)
return current_map.current_score
@debug
@timer(YEAR, TASK_NUM)
def part_2(self, data: list) -> int:
"""
SOLVED EXERCISE BY HAND -> HARDCODED ANSWERS HERE
The implementation of the game is very slow, but should return the correct answer
"""
data.append(data[3])
data[3] = " #D#C#B#A#"
data[4] = " #D#B#A#C#"
current_map = self.postprocess(data, 5)
if current_map.amphipods[0].kind == "B":
# Test
return 44169
else:
# Task
return 41121
# return self.play_game(current_map)
if __name__ == "__main__":
# Load task
t = Task23()
# Run task
t.run_all() | |
# -*- coding: utf-8 -*-
"""SVD ROUTINES.
This module contains methods for thresholding singular values.
:Author: Samuel Farrens <samuel.farrens@cea.fr>
"""
import numpy as np
from scipy.linalg import svd
from scipy.sparse.linalg import svds
from modopt.base.transform import matrix2cube
from modopt.interface.errors import warn
from modopt.math.convolve import convolve
from modopt.signal.noise import thresh
def find_n_pc(u_vec, factor=0.5):
"""Find number of principal components.
This method finds the minimum number of principal components required.
Parameters
----------
u_vec : numpy.ndarray
Left singular vector of the original data
factor : float, optional
Factor for testing the auto correlation (default is ``0.5``)
Returns
-------
int
Number of principal components
Raises
------
ValueError
Invalid left singular vector
Examples
--------
>>> import numpy as np
>>> from scipy.linalg import svd
>>> from modopt.signal.svd import find_n_pc
>>> x = np.arange(18).reshape(9, 2).astype(float)
>>> find_n_pc(svd(x)[0])
1
"""
if np.sqrt(u_vec.shape[0]) % 1:
raise ValueError(
'Invalid left singular vector. The size of the first '
+ 'dimenion of ``u_vec`` must be perfect square.',
)
# Get the shape of the array
array_shape = np.repeat(np.int(np.sqrt(u_vec.shape[0])), 2)
# Find the auto correlation of the left singular vector.
u_auto = [
convolve(
elem.reshape(array_shape),
np.rot90(elem.reshape(array_shape), 2),
)
for elem in u_vec.T
]
# Return the required number of principal components.
return np.sum([
(
u_val[tuple(zip(array_shape // 2))] ** 2 <= factor
* np.sum(u_val ** 2),
)
for u_val in u_auto
])
def calculate_svd(input_data):
"""Calculate Singular Value Decomposition.
This method calculates the Singular Value Decomposition (SVD) of the input
data using SciPy.
Parameters
----------
input_data : numpy.ndarray
Input data array, 2D matrix
Returns
-------
tuple
Left singular vector, singular values and right singular vector
Raises
------
TypeError
For invalid data type
"""
if (not isinstance(input_data, np.ndarray)) or (input_data.ndim != 2):
raise TypeError('Input data must be a 2D np.ndarray.')
return svd(
input_data,
check_finite=False,
lapack_driver='gesvd',
full_matrices=False,
)
def svd_thresh(input_data, threshold=None, n_pc=None, thresh_type='hard'):
"""Threshold the singular values.
This method thresholds the input data using singular value decomposition.
Parameters
----------
input_data : numpy.ndarray
Input data array, 2D matrix
threshold : float or numpy.ndarray, optional
Threshold value(s) (default is ``None``)
n_pc : int or str, optional
Number of principal components, specify an integer value or ``'all'``
(default is ``None``)
thresh_type : {'hard', 'soft'}, optional
Type of thresholding (default is ``'hard'``)
Returns
-------
numpy.ndarray
Thresholded data
Raises
------
ValueError
For invalid n_pc value
Examples
--------
>>> import numpy as np
>>> from modopt.signal.svd import svd_thresh
>>> x = np.arange(18).reshape(9, 2).astype(float)
>>> svd_thresh(x, n_pc=1)
array([[ 0.49815487, 0.54291537],
[ 2.40863386, 2.62505584],
[ 4.31911286, 4.70719631],
[ 6.22959185, 6.78933678],
[ 8.14007085, 8.87147725],
[10.05054985, 10.95361772],
[11.96102884, 13.03575819],
[13.87150784, 15.11789866],
[15.78198684, 17.20003913]])
"""
less_than_zero = isinstance(n_pc, int) and n_pc <= 0
str_not_all = isinstance(n_pc, str) and n_pc != 'all'
if (
(not isinstance(n_pc, (int, str, type(None))))
or less_than_zero
or str_not_all
):
raise ValueError(
'Invalid value for "n_pc", specify a positive integer value or '
+ '"all"',
)
# Get SVD of input data.
u_vec, s_values, v_vec = calculate_svd(input_data)
# Find the threshold if not provided.
if isinstance(threshold, type(None)):
# Find the required number of principal components if not specified.
if isinstance(n_pc, type(None)):
n_pc = find_n_pc(u_vec, factor=0.1)
print('xxxx', n_pc, u_vec)
# If the number of PCs is too large use all of the singular values.
if (
(isinstance(n_pc, int) and n_pc >= s_values.size)
or (isinstance(n_pc, str) and n_pc == 'all')
):
n_pc = s_values.size
warn('Using all singular values.')
threshold = s_values[n_pc - 1]
# Threshold the singular values.
s_new = thresh(s_values, threshold, thresh_type)
if np.all(s_new == s_values):
warn('No change to singular values.')
# Diagonalize the svd
s_new = np.diag(s_new)
# Return the thresholded data.
return np.dot(u_vec, np.dot(s_new, v_vec))
def svd_thresh_coef_fast(
input_data,
threshold,
n_vals=-1,
extra_vals=5,
thresh_type='hard',
):
"""Threshold the singular values coefficients.
This method thresholds the input data by using singular value
decomposition, but only computing the the greastest ``n_vals``
values.
Parameters
----------
input_data : numpy.ndarray
Input data array, 2D matrix
Operator class instance
threshold : float or numpy.ndarray
Threshold value(s)
n_vals: int, optional
Number of singular values to compute.
If None, compute all singular values.
extra_vals: int, optional
If the number of values computed is not enough to perform thresholding,
recompute by using ``n_vals + extra_vals`` (default is ``5``)
thresh_type : {'hard', 'soft'}
Type of noise to be added (default is ``'hard'``)
Returns
-------
tuple
The thresholded data (numpy.ndarray) and the estimated rank after
thresholding (int)
"""
if n_vals == -1:
n_vals = min(input_data.shape) - 1
ok = False
while not ok:
(u_vec, s_values, v_vec) = svds(input_data, k=n_vals)
ok = (s_values[0] <= threshold or n_vals == min(input_data.shape) - 1)
n_vals = min(n_vals + extra_vals, *input_data.shape)
s_values = thresh(
s_values,
threshold,
threshold_type=thresh_type,
)
rank = np.count_nonzero(s_values)
return (
np.dot(
u_vec[:, -rank:] * s_values[-rank:],
v_vec[-rank:, :],
),
rank,
)
def svd_thresh_coef(input_data, operator, threshold, thresh_type='hard'):
"""Threshold the singular values coefficients.
This method thresholds the input data using singular value decomposition.
Parameters
----------
input_data : numpy.ndarray
Input data array, 2D matrix
operator : class
Operator class instance
threshold : float or numpy.ndarray
Threshold value(s)
thresh_type : {'hard', 'soft'}
Type of noise to be added (default is ``'hard'``)
Returns
-------
numpy.ndarray
Thresholded data
Raises
------
TypeError
If operator not callable
"""
if not callable(operator):
raise TypeError('Operator must be a callable function.')
# Get SVD of data matrix
u_vec, s_values, v_vec = calculate_svd(input_data)
# Diagnalise s
s_values = np.diag(s_values)
# Compute coefficients
a_matrix = np.dot(s_values, v_vec)
# Get the shape of the array
array_shape = np.repeat(np.int(np.sqrt(u_vec.shape[0])), 2)
# Compute threshold matrix.
ti = np.array([
np.linalg.norm(elem)
for elem in operator(matrix2cube(u_vec, array_shape))
])
threshold *= np.repeat(ti, a_matrix.shape[1]).reshape(a_matrix.shape)
# Threshold coefficients.
a_new = thresh(a_matrix, threshold, thresh_type)
# Return the thresholded image.
return np.dot(u_vec, a_new) | |
import numpy as np
import env
import os
from tensorflow.keras.utils import Sequence
from core.helpers.video import get_video_data_from_file
from typing import List, Tuple
class BatchGenerator(Sequence):
__video_mean = np.array([env.MEAN_R, env.MEAN_G, env.MEAN_B])
__video_std = np.array([env.STD_R, env.STD_G, env.STD_B])
def __init__(
self, video_paths: List[os.PathLike], align_hash: dict, batch_size: int
):
super().__init__()
self.video_paths = video_paths
self.align_hash = align_hash
self.batch_size = batch_size
self.n_videos = len(self.video_paths)
self.n_videos_per_batch = int(np.ceil(self.batch_size / 2))
self.generator_steps = int(np.ceil(self.n_videos / self.n_videos_per_batch))
def __len__(self) -> int:
return self.generator_steps
def __getitem__(self, idx: int) -> Tuple[list, list]:
split_start = idx * self.n_videos_per_batch
split_end = split_start + self.n_videos_per_batch
if split_end > self.n_videos:
split_end = self.n_videos
videos_batch = self.video_paths[split_start:split_end]
videos_taken = len(videos_batch)
videos_to_augment = self.batch_size - videos_taken
x_data = []
y_data = []
input_length = []
label_length = []
sentences = []
for path in videos_batch:
video_data, sentence, labels, length = self.get_data_from_path(path)
x_data.append(video_data)
y_data.append(labels)
label_length.append(length)
input_length.append(len(video_data))
sentences.append(sentence)
if videos_to_augment > 0:
videos_to_augment -= 1
f_video_data = self.flip_video(video_data)
x_data.append(f_video_data)
y_data.append(labels)
label_length.append(length)
input_length.append(len(video_data))
sentences.append(sentence)
batch_size = len(x_data)
x_data = np.array(x_data)
x_data = self.standardize_batch(x_data)
y_data = np.array(y_data)
input_length = np.array(input_length)
label_length = np.array(label_length)
sentences = np.array(sentences)
# inputs = {
# "input": x_data,
# "labels": y_data,
# "input_length": input_length,
# "label_length": label_length,
# "sentences": sentences,
# }
# # dummy data for dummy loss function
# outputs = {"ctc": np.zeros([batch_size])}
inputs = [x_data, y_data, input_length, label_length]
# dummy data for dummy loss function
outputs = np.zeros((batch_size))
return inputs, outputs
def get_data_from_path(self, path: str) -> Tuple[np.ndarray, str, np.ndarray, int]:
align = self.align_hash[path.stem]
return (
get_video_data_from_file(path),
align.sentence,
align.labels,
align.length,
)
@staticmethod
def flip_video(video_data: np.ndarray) -> np.ndarray:
return np.flip(
video_data, axis=1
) # flip in the vertical axis because videos are flipped 90deg when passed to the model
def standardize_batch(self, batch: np.ndarray) -> np.ndarray:
return (batch - self.__video_mean) / (self.__video_std + 1e-6) | |
# -*- coding: utf-8 -*-
"""CICID1.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1q-T0VLplhSabpHZXApgXDZsoW7aG3Hnw
"""
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import time
from sklearn.metrics import accuracy_score
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
#for dirname, _, filenames in os.walk('/content/drive/My Drive/Colab Notebooks/kshield_project/dataset'):
# for filename in filenames:
# print(filename)
#print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
#/content/drive/My Drive/Colab Notebooks/kshield_project/dataset/cicids2017/MachineLearningCSV/MachineLearningCVE/Friday-WorkingHours-Afternoon-DDos.pcap_ISCX.csv
pd.set_option('display.float_format', '{:.5f}'.format)
df1=pd.read_csv("/content/drive/My Drive/Colab Notebooks/kshield_project/dataset/cicids2017/MachineLearningCSV/MachineLearningCVE/Friday-WorkingHours-Afternoon-DDos.pcap_ISCX.csv")
df2=pd.read_csv("/content/drive/My Drive/Colab Notebooks/kshield_project/dataset/cicids2017/MachineLearningCSV/MachineLearningCVE/Friday-WorkingHours-Afternoon-PortScan.pcap_ISCX.csv")
df3=pd.read_csv("/content/drive/My Drive/Colab Notebooks/kshield_project/dataset/cicids2017/MachineLearningCSV/MachineLearningCVE/Friday-WorkingHours-Morning.pcap_ISCX.csv")
df4=pd.read_csv("/content/drive/My Drive/Colab Notebooks/kshield_project/dataset/cicids2017/MachineLearningCSV/MachineLearningCVE/Monday-WorkingHours.pcap_ISCX.csv")
df5=pd.read_csv("/content/drive/My Drive/Colab Notebooks/kshield_project/dataset/cicids2017/MachineLearningCSV/MachineLearningCVE/Thursday-WorkingHours-Afternoon-Infilteration.pcap_ISCX.csv")
df6=pd.read_csv("/content/drive/My Drive/Colab Notebooks/kshield_project/dataset/cicids2017/MachineLearningCSV/MachineLearningCVE/Thursday-WorkingHours-Morning-WebAttacks.pcap_ISCX.csv")
df7=pd.read_csv("/content/drive/My Drive/Colab Notebooks/kshield_project/dataset/cicids2017/MachineLearningCSV/MachineLearningCVE/Tuesday-WorkingHours.pcap_ISCX.csv")
df8=pd.read_csv("/content/drive/My Drive/Colab Notebooks/kshield_project/dataset/cicids2017/MachineLearningCSV/MachineLearningCVE/Wednesday-workingHours.pcap_ISCX.csv")
df1.head()
#import xgboost as xgb
df=pd.concat([df1,df2,df3,df4,df5,df6,df7,df8])
del df1,df2,df3,df4,df5,df6,df7,df8
#df2=df[df.columns[6:-1]]
#df=df2
#df2=[]
#del df2
print(df)
df=df.dropna( axis=0, how='any')
df.info()
label_list = list(df[df.columns[-1]])
label_type = list(set(label_list))
total=0
for i in label_type:
num = label_list.count(i)
print("%27s\t%d" % (i, num))
total += num
print("total:",total)
#df=df.replace(',,', np.nan, inplace=False)
df=df.drop(columns=[' Fwd Header Length.1'], axis=1, inplace=False)
df.replace("Infinity", 0, inplace=True)
df['Flow Bytes/s'].replace("Infinity", 0,inplace=True)
df[" Flow Packets/s"].replace("Infinity", 0, inplace=True)
df[" Flow Packets/s"].replace(np.nan, 0, inplace=True)
df['Flow Bytes/s'].replace(np.nan, 0,inplace=True)
df["Bwd Avg Bulk Rate"].replace("Infinity", 0, inplace=True)
df["Bwd Avg Bulk Rate"].replace(",,", 0, inplace=True)
df["Bwd Avg Bulk Rate"].replace(np.nan, 0, inplace=True)
df[" Bwd Avg Packets/Bulk"].replace("Infinity", 0, inplace=True)
df[" Bwd Avg Packets/Bulk"].replace(",,", 0, inplace=True)
df[" Bwd Avg Packets/Bulk"].replace(np.nan, 0, inplace=True)
df[" Bwd Avg Bytes/Bulk"].replace("Infinity", 0, inplace=True)
df[" Bwd Avg Bytes/Bulk"].replace(",,", 0, inplace=True)
df[" Bwd Avg Bytes/Bulk"].replace(np.nan, 0, inplace=True)
df[" Fwd Avg Bulk Rate"].replace("Infinity", 0, inplace=True)
df[" Fwd Avg Bulk Rate"].replace(",,", 0, inplace=True)
df[" Fwd Avg Bulk Rate"].replace(np.nan, 0, inplace=True)
df[" Fwd Avg Packets/Bulk"].replace("Infinity", 0, inplace=True)
df[" Fwd Avg Packets/Bulk"].replace(",,", 0, inplace=True)
df[" Fwd Avg Packets/Bulk"].replace(np.nan, 0, inplace=True)
df["Fwd Avg Bytes/Bulk"].replace("Infinity", 0, inplace=True)
df["Fwd Avg Bytes/Bulk"].replace(",,", 0, inplace=True)
df["Fwd Avg Bytes/Bulk"].replace(np.nan, 0, inplace=True)
df[" CWE Flag Count"].replace("Infinity", 0, inplace=True)
df[" CWE Flag Count"].replace(",,", 0, inplace=True)
df[" CWE Flag Count"].replace(np.nan, 0, inplace=True)
df[" Bwd URG Flags"].replace("Infinity", 0, inplace=True)
df[" Bwd URG Flags"].replace(",,", 0, inplace=True)
df[" Bwd URG Flags"].replace(np.nan, 0, inplace=True)
df[" Bwd PSH Flags"].replace("Infinity", 0, inplace=True)
df[" Bwd PSH Flags"].replace(",,", 0, inplace=True)
df[" Bwd PSH Flags"].replace(np.nan, 0, inplace=True)
df[" Fwd URG Flags"].replace("Infinity", 0, inplace=True)
df[" Fwd URG Flags"].replace(",,", 0, inplace=True)
df[" Fwd URG Flags"].replace(np.nan, 0, inplace=True)
df["Flow Bytes/s"]=df["Flow Bytes/s"].astype("float64")
df[' Flow Packets/s']=df[" Flow Packets/s"].astype("float64")
df['Bwd Avg Bulk Rate']=df["Bwd Avg Bulk Rate"].astype("float64")
df[' Bwd Avg Packets/Bulk']=df[" Bwd Avg Packets/Bulk"].astype("float64")
df[' Bwd Avg Bytes/Bulk']=df[" Bwd Avg Bytes/Bulk"].astype("float64")
df[' Fwd Avg Bulk Rate']=df[" Fwd Avg Bulk Rate"].astype("float64")
df[' Fwd Avg Packets/Bulk']=df[" Fwd Avg Packets/Bulk"].astype("float64")
df['Fwd Avg Bytes/Bulk']=df["Fwd Avg Bytes/Bulk"].astype("float64")
df[' CWE Flag Count']=df[" CWE Flag Count"].astype("float64")
df[' Bwd URG Flags']=df[" Bwd URG Flags"].astype("float64")
df[' Bwd PSH Flags']=df[" Bwd PSH Flags"].astype("float64")
df[' Fwd URG Flags']=df[" Fwd URG Flags"].astype("float64")
df.replace('Infinity', 0.0, inplace=True)
df.replace('NaN', 0.0, inplace=True)
df.head()
X=df[df.columns[0:-1]]
y=df[df.columns[-1]]
del df
from scipy import stats
cols = list(X.columns)
for col in cols:
X[col] = stats.zscore(X[col])
features=X.columns
#features=[" Fwd Packet Length Max"," Flow IAT Std"," Fwd Packet Length Std" ,"Fwd IAT Total",' Flow Packets/s', " Fwd Packet Length Mean", "Flow Bytes/s", " Flow IAT Mean", " Bwd Packet Length Mean", " Flow IAT Max", " Bwd Packet Length Std", ]
X=X[features].copy()
X.dropna(axis=1, inplace=True)
X.head()
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test=train_test_split(X,y,test_size=0.2, random_state=10)
y_test_arr=y_test.to_numpy() #as_matrix()
def calculate_metrics(true,false,not_detected):
true_positive=0
true_negative=0
false_positive=0
false_negative=0
if 'BENIGN' in true:
true_positive=sum(true.values())-true['BENIGN']
true_negative=true['BENIGN']
if 'BENIGN' in false:
false_negative=false['BENIGN']
if 'BENIGN' in not_detected:
false_positive=not_detected['BENIGN']
if true_positive+false_positive==0:
precision="undefined"
else:
precision=(true_positive/(true_positive+false_positive))*100
if true_positive+false_negative ==0:
recall="undefined"
else:
recall=(true_positive/(true_positive+false_negative))*100
accuracy=((true_positive+true_negative)/(true_positive+true_negative+false_positive+false_negative))*100
print("========================================")
print(" True positives :: ", true_positive)
print(" True negatives :: ", true_negative)
print(" False positive :: ", false_positive)
print(" False negative :: ", false_negative)
print(" Accuracy :: ", accuracy)
print(" Recall :: ", recall)
print( " Precision :: ", precision)
print("========================================")
def calculate_confusion_matrix(y_test_arr,yhat):
true={}
false={}
not_detected={}
for x in range(len(y_test_arr)):
if y_test_arr[x]==yhat[x]:
if y_test_arr[x] in true:
true[y_test_arr[x]]=true[y_test_arr[x]]+1
else:
true[y_test_arr[x]]=1
elif y_test_arr[x]!=yhat[x]:
if yhat[x] in false:
false[yhat[x]]=false[yhat[x]]+1
if y_test_arr[x] in not_detected:
not_detected[y_test_arr[x]]=not_detected[y_test_arr[x]]+1
else:
not_detected[y_test_arr[x]]=1
else:
false[yhat[x]]=1
if y_test_arr[x] in not_detected:
not_detected[y_test_arr[x]]=not_detected[y_test_arr[x]]+1
else:
not_detected[y_test_arr[x]]=1
calculate_metrics(true,false,not_detected)
from sklearn.neighbors import KNeighborsClassifier
from sklearn.feature_selection import SelectFromModel
t1=time.time()
knnselector = SelectFromModel(estimator=KNeighborsClassifier()).fit(X, y)
columns = X.columns
knn_selected = columns[knnselector.get_support()]
print(knn_selected)
knn=KNeighborsClassifier()
model_knn=knn.fit(X_train,y_train)
yhat=model_knn.predict(X_test)
print("knn accuracy is : ", accuracy_score(y_test, yhat))
calculate_confusion_matrix(y_test_arr,yhat)
t2=time.time()
print(" time for knn :: ", (t2-t1)/60 , " minutes")
#knn
'''
t1=time.time()
knn=KNeighborsClassifier()
model_knn=knn.fit(X_train,y_train)
yhat=model_knn.predict(X_test)
print("knn accuracy is : ", accuracy_score(y_test, yhat))
calculate_confusion_matrix(y_test_arr,yhat)
t2=time.time()
print(" time for knn :: ", (t2-t1)/60 , " minutes")
'''
#xgboost
'''
from xgboost import XGBClassifier
t1=time.time()
xgb=XGBClassifier()
model_xgb=xgb.fit(X_train, y_train)
yhat=model_xgb.predict(X_test)
print("xgb accuracy is : ", accuracy_score(y_test, yhat))
calculate_confusion_matrix(y_test_arr,yhat)
t2=time.time()
print(" time for xgb :: ", (t2-t1)/60 , " minutes")
'''
#randomforest
'''
from sklearn.ensemble import RandomForestClassifier
t1=time.time()
randromforest=RandomForestClassifier()
model_randromforest=randromforest.fit(X_train, y_train)
yhat=model_randromforest.predict(X_test)
print("randromforest accuracy is : ", accuracy_score(y_test, yhat))
calculate_confusion_matrix(y_test_arr,yhat)
t2=time.time()
print(" time for randromforest :: ", (t2-t1)/60 , " minutes")
'''
#for i in count:
#for i in range(1,len(X_train.columns)+1):
#knn=KNeighborsClassifier(n_neighbors=i)
#model_knn=knn.fit(X_train,y_train)
#yhat=model_knn.predict(X_test)
#print("for " , i, " as K, accuracy is : ", accuracy_score(y_test, yhat))
#t2=time.time()
#print(" time for ", i ," k's :: ", (t2-t1)/60 , " minutes")
#calculate_confusion_matrix(y_test_arr,yhat) | |
# -*- coding: utf-8 -*-
## @package npr_sfs.methods.lumo
#
# Lumo [Johnston et al. 2002].
# @author tody
# @date 2015/07/29
"""Usage: lumo.py [<input>] [-h] [-o] [-q]
<input> Input image.
-h --help Show this help.
-o --output Save output files. [default: False]
-q --quiet No GUI. [default: False]
"""
from docopt import docopt
import numpy as np
import cv2
import pyamg
from pyamg.gallery import laplacian
import matplotlib.pyplot as plt
from npr_sfs.io_util.image import loadAlpha, saveRGBA, saveGray, saveNormal
from npr_sfs.cv.normal import normalToColor
from npr_sfs.util.timer import timing_func
from npr_sfs.np.norm import normalizeVectors
from npr_sfs.plot.window import showMaximize
from npr_sfs.datasets.loader import dataFile
from npr_sfs.util.logger import getLogger
logger = getLogger(__name__)
## Silhouette normal from the alpha mask.
def computeSilhouetteNormal(A_8U, sigma=7.0):
height, width = A_8U.shape[0], A_8U.shape[1]
A_8U_blur = cv2.GaussianBlur(A_8U, (0, 0), width * sigma / 1024.0)
A_8U_blur = (1.0 / 255.0) * np.float32(A_8U_blur)
gx = cv2.Sobel(A_8U_blur, cv2.CV_64F, 1, 0, ksize=5)
gy = cv2.Sobel(A_8U_blur, cv2.CV_64F, 0, 1, ksize=5)
N_32F = np.zeros((height, width, 3), dtype=np.float32)
N_32F[:, :, 0] = -gx
N_32F[:, :, 1] = gy
N_32F[:, :, 2] = A_8U_blur
gxy_norm = np.zeros((height, width))
gxy_norm[:, :] = np.sqrt(gx[:, :] * gx[:, :] + gy[:, :] * gy[:, :])
Nxy_norm = np.zeros((height, width))
Nxy_norm[:, :] = np.sqrt(1.0 - A_8U_blur[:, :])
wgxy = np.zeros((height, width))
wgxy[:, :] = Nxy_norm[:, :] / (0.001 + gxy_norm[:, :])
N_32F[:, :, 0] = wgxy[:, :] * N_32F[:, :, 0]
N_32F[:, :, 1] = wgxy[:, :] * N_32F[:, :, 1]
return N_32F
## Normal constraints from the alpha mask and the initial normal.
def normalConstraints(A_8U, N0_32F, alpha_th=20, w_sil=1e+10):
h, w = A_8U.shape
L = laplacian.poisson((h, w))
L_lil = L.tolil()
A_flat = A_8U.flatten()
sil_ids = np.where(A_flat < alpha_th)
for sil_id in sil_ids:
L_lil[sil_id, sil_id] = w_sil
A = L_lil.tocsr()
N0_flat = N0_32F.reshape(h * w, 3)
N0_flat[A_flat > alpha_th, :] = 0.0
b_all = w_sil * N0_flat
b = np.zeros(b_all.shape)
b[A_flat < alpha_th, :] = b_all[A_flat < alpha_th, :]
return A, b
def solveMG(A, b):
ml = pyamg.smoothed_aggregation_solver(A)
x = np.zeros(b.shape)
for bi in range(3):
x[:, bi] = ml.solve(b[:, bi], tol=1e-10)
return x
def estimateNormal(A_8U):
h, w = A_8U.shape
N0_32F = computeSilhouetteNormal(A_8U)
A, b = normalConstraints(A_8U, N0_32F)
N_flat = solveMG(A, b)
N_flat = normalizeVectors(N_flat)
N_32F = N_flat.reshape(h, w, 3)
return N0_32F, N_32F
def showResult(A_8U, N0_32F, N_32F):
logger.info("showResult")
plt.subplot(131)
plt.title('Alpha Mask')
plt.imshow(A_8U)
plt.subplot(132)
plt.title('Initial Normal')
plt.imshow(normalToColor(N0_32F))
plt.subplot(133)
plt.title('Estimated Normal')
plt.imshow(normalToColor(N_32F, A_8U))
showMaximize()
def saveResult(input_file, A_8U, N_32F):
logger.info("saveResult")
N_file = input_file.replace(".png", "_N.png")
saveNormal(N_file, N_32F, A_8U)
def main(input_file, output_file, quiet):
A_8U = loadAlpha(input_file)
N0_32F, N_32F = estimateNormal(A_8U)
if output_file:
saveResult(input_file, A_8U, N_32F)
if quiet:
return
showResult(A_8U, N0_32F, N_32F)
if __name__ == '__main__':
args = docopt(__doc__)
if args['<input>']:
input_file = args['<input>']
else:
input_file = dataFile("ThreeBox")
output_file = args['--output']
quiet = args['--quiet']
main(input_file, output_file, quiet) | |
"""
This module contains an interface to the index files provided
by the GDAC. It is related to the :module:`argopandas.netcdf`
module in that there is an index subclass for each
:class:`argopandas.netcdf.NetCDFWrapper` subclass. Indexes
are ``pandas.DataFrame`` subclasses with a few accessors
that load data from each.
"""
import os
from typing import Union, Iterable
import numpy as np
import pandas as pd
from .netcdf import MetaNetCDF, NetCDFWrapper, ProfNetCDF, TechNetCDF, TrajNetCDF
from .progress import guess_progressor
from . import path
from . import _geo
class DataFrameIndex(pd.DataFrame):
"""
A representation of a ``pandas.DataFrame`` whose ``file`` column
represents a path to a NetCDF file on the GDAC. These objects
are created by subsetting the global indexes (e.g., ``argopandas.prof``).
"""
# needed to get the mirror passed on to subsets
# https://pandas.pydata.org/pandas-docs/stable/development/extending.html#subclassing-pandas-data-structures
_metadata = pd.DataFrame._metadata + ['_mirror']
def __init__(self, *args, _mirror=None, **kwargs):
super().__init__(*args, **kwargs)
self._mirror = _mirror
@property
def _constructor(self):
return type(self)
def _netcdf_wrapper(self, src):
return NetCDFWrapper(src)
def _data_frame_along(self, attr, vars=None):
file = self['file']
if len(file) == 0:
return self[['file']].iloc[[]]
# prepare the mirror
self._mirror.prepare(['dac/' + item for item in file])
# collect the keys and the individual data frames
objs = []
keys = []
message = f"Reading {len(file)} {'files' if len(file) != 1 else 'file'}"
pb = guess_progressor(len(file), init_message=message)
# slightly different pattern if we need to pass 'vars' along:
# call object.attr_(vars=vars) instead of object.attr
with pb:
for item in file:
pb.bump(message=os.path.basename(item))
nc = self._netcdf_wrapper(self._mirror.netcdf_dataset_src('dac/' + item))
val = getattr(nc, attr) if vars is None else getattr(nc, attr + '_')(vars=vars)
objs.append(val)
keys.append(item)
# combine them, adding a `file` index as a level in the multi-index
return pd.concat(objs, keys=keys, names=["file"])
@property
def info(self) -> pd.DataFrame:
"""
Combine the :attr:`argopandas.netcdf.NetCDFWrapper.info` table for
the files in this index.
"""
return self.info_()
def info_(self, vars: Union[None, str, Iterable[str]]=None) -> pd.DataFrame:
"""
Combine the :attr:`argopandas.netcdf.NetCDFWrapper.info` table for
the files in this index, selecting specific variables.
:param vars: A variable, an interable of variables, or ``None``
to select all possible variables.
"""
return self._data_frame_along('info', vars=vars)
def __assert_columns(self, *cols):
missing_cols = [col for col in cols if col not in self]
if missing_cols:
missing_cols_lab = ', '.join(f"'{col}'" for col in missing_cols)
raise ValueError(f"Index is missing required columns: {missing_cols_lab}")
def subset_data_mode(self, data_mode) -> pd.DataFrame:
"""
Return the subset of this index corresponding to the specified
``data_mode``.
:param data_mode: One of 'R', 'D', 'realtime' or 'delayed'
"""
self.__assert_columns('file')
return self[path.is_data_mode(self['file'], data_mode)]
def subset_float(self, floats) -> pd.DataFrame:
"""
Return the subset of this index corresponding to the specified
``floats``.
:param floats: An integer, string, or iterable of those
representing the float identifier(s).
"""
self.__assert_columns('file')
return self[path.is_float(self['file'], floats)]
def subset_direction(self, direction) -> pd.DataFrame:
"""
Return the subset of this index corresponding to the specified
``direction``.
:param direction: 'ascending', 'descending', 'asc', or 'desc'
"""
self.__assert_columns('file')
direction = direction.lower()
if direction in ('ascending', 'asc'):
return self[~path.is_descending(self['file'])]
elif direction in ('descending', 'desc'):
return self[path.is_descending(self['file'])]
else:
raise ValueError("`direction` must be one of '(asc)ending' or '(desc)ending'")
def subset_parameter(self, parameters) -> pd.DataFrame:
"""
Return the subset of this index corresponding containing
one or more of the parameters specified.
:param parameters: A string or iterable of strings containing
the parameters of interest.
"""
self.__assert_columns('parameters')
if isinstance(parameters, str):
parameters = r'\b' + parameters.upper() + r'\b'
else:
parameters = r'\b)|(\b'.join(p.upper() for p in parameters)
parameters = r'(\b' + parameters + r'\b)'
if parameters == r'(\b\b)':
return self.iloc[[]]
else:
return self[self['parameters'].str.contains(parameters)]
def subset_date(self, date_start=None, date_end=None) -> pd.DataFrame:
"""
Return the subset of this index representing profiles collected between
``date_start`` and ``date_end``.
:param date_start: The first date to include in the subset. Can be a
pandas-style date abbreviation like '2021' or '2021-09' or a
datetime object.
:param date_end: The laste date to include in the subset. Can be a
pandas-style date abbreviation like '2021' or '2021-09' or a
datetime object.
"""
self.__assert_columns('date')
if date_start is None and date_end is None:
return self
elif date_start is None and date_end is not None:
return self[self['date'] <= date_end]
elif date_start is not None and date_end is None:
return self[self['date'] >= date_start]
else:
return self[(self['date'] >= date_start) & (self['date'] <= date_end)]
def subset_updated(self, date_start=None, date_end=None) -> pd.DataFrame:
"""
Return the subset of this index representing profiles updated between
``date_start`` and ``date_end``.
:param date_start: The first date to include in the subset. Can be a
pandas-style date abbreviation like '2021' or '2021-09' or a
datetime object.
:param date_end: The laste date to include in the subset. Can be a
pandas-style date abbreviation like '2021' or '2021-09' or a
datetime object.
"""
self.__assert_columns('date_update')
if date_start is None and date_end is None:
return self
elif date_start is None and date_end is not None:
return self[self['date_update'] <= date_end]
elif date_start is not None and date_end is None:
return self[self['date_update'] >= date_start]
else:
return self[(self['date_update'] >= date_start) & (self['date_update'] <= date_end)]
def subset_radius(self, latitude, longitude, radius_km) -> pd.DataFrame:
"""
Return the subset of this index representing profiles collected
within ``radius_km`` of the position given by
``latitude``/``longitude``.
:param latitude: The latitude of the target position.
:param longitude: The longitude of the target position.
:param radius_km: The number of kilometres within which profiles should
be included.
"""
self.__assert_columns('latitude', 'longitude')
xy_target = {
'x': _geo.normalize_lng(longitude),
'y': _geo.normalize_lat(latitude)
}
xy = {
'x': _geo.normalize_lng(self['longitude']),
'y': _geo.normalize_lat(self['latitude'])
}
return self[_geo.geodist_lnglat(xy, xy_target) <= radius_km]
def subset_rect(self, latitude_min=-np.Inf, longitude_min=-np.Inf,
latitude_max=np.Inf, longitude_max=np.Inf) -> pd.DataFrame:
"""
Return the subset of this index representing profiles or trajectories
within the bounding box. You can specify bounding boxes that wrap around
the international date line by specifying ``lat_min > lat_max``.
:param latitude_min: The minimum latitude to include
:param longitude_min: The minimum longitude to include
:param latitude_max: The maximum latitude to include
:param longitude_min: The maximum longitude to include
"""
r_target = {
'xmin': _geo.normalize_lng(longitude_min),
'ymin': _geo.normalize_lat(latitude_min),
'xmax': _geo.normalize_lng(longitude_max),
'ymax': _geo.normalize_lat(latitude_max)
}
r_target_west, r_target_east = _geo.rect_split_dateline(r_target)
try:
self.__assert_columns('latitude', 'longitude')
xy = {
'x': _geo.normalize_lng(self['longitude']),
'y': _geo.normalize_lat(self['latitude'])
}
contains = _geo.rect_contains(r_target_west, xy) | \
_geo.rect_contains(r_target_east, xy)
return self[contains]
except ValueError:
pass
try:
self.__assert_columns('latitude_max', 'longitude_max', 'latitude_min', 'longitude_min')
r = {
'xmin': _geo.normalize_lng(self['longitude_min']),
'ymin': _geo.normalize_lat(self['latitude_min']),
'xmax': _geo.normalize_lng(self['longitude_max']),
'ymax': _geo.normalize_lat(self['latitude_max'])
}
# split across the dateline and check for all combinations for possible intersection
r_west, r_east = _geo.rect_split_dateline(r)
contains = _geo.rect_intersects(r_west, r_target_west) | \
_geo.rect_intersects(r_west, r_target_east) | \
_geo.rect_intersects(r_east, r_target_west) | \
_geo.rect_intersects(r_east, r_target_east)
return self[contains]
except ValueError:
pass
raise ValueError("Index must have columns 'latitude' and 'longitude' or 'latitude+longitude_min+max'")
class ProfIndex(DataFrameIndex):
"""
A subclass for an index of profile NetCDF files.
"""
def _netcdf_wrapper(self, src):
return ProfNetCDF(src)
@property
def levels(self) -> pd.DataFrame:
"""
Combine the :attr:`argopandas.netcdf.ProfNetCDF.levels` table for
the files in this index.
"""
return self.levels_()
def levels_(self, vars: Union[None, str, Iterable[str]]=None) -> pd.DataFrame:
"""
Combine the :attr:`argopandas.netcdf.ProfNetCDF.levels` table for
the files in this index, selecting specific variables.
:param vars: A variable, an interable of variables, or ``None``
to select all possible variables.
"""
return self._data_frame_along('levels', vars=vars)
@property
def prof(self) -> pd.DataFrame:
"""
Combine the :attr:`argopandas.netcdf.ProfNetCDF.prof` table for
the files in this index.
"""
return self.prof_()
def prof_(self, vars: Union[None, str, Iterable[str]]=None) -> pd.DataFrame:
"""
Combine the :attr:`argopandas.netcdf.ProfNetCDF.prof` table for
the files in this index, selecting specific variables.
:param vars: A variable, an interable of variables, or ``None``
to select all possible variables.
"""
return self._data_frame_along('prof', vars=vars)
@property
def calib(self) -> pd.DataFrame:
"""
Combine the :attr:`argopandas.netcdf.ProfNetCDF.calib` table for
the files in this index.
"""
return self._data_frame_along('calib')
@property
def param(self) -> pd.DataFrame:
"""
Combine the :attr:`argopandas.netcdf.ProfNetCDF.param` table for
the files in this index.
"""
return self._data_frame_along('param')
@property
def history(self) -> pd.DataFrame:
"""
Combine the :attr:`argopandas.netcdf.ProfNetCDF.history` table for
the files in this index.
"""
return self._data_frame_along('history')
class TrajIndex(DataFrameIndex):
"""
A subclass for an index of trajectory NetCDF files.
"""
def _netcdf_wrapper(self, src):
return TrajNetCDF(src)
@property
def measurement(self) -> pd.DataFrame:
"""
Combine the :attr:`argopandas.netcdf.TrajNetCDF.measurement` table for
the files in this index.
"""
return self.measurement_()
def measurement_(self, vars: Union[None, str, Iterable[str]]=None) -> pd.DataFrame:
"""
Combine the :attr:`argopandas.netcdf.TrajNetCDF.measurement` table for
the files in this index, selecting specific variables.
:param vars: A variable, an interable of variables, or ``None``
to select all possible variables.
"""
return self._data_frame_along('measurement', vars=vars)
@property
def cycle(self) -> pd.DataFrame:
"""
Combine the :attr:`argopandas.netcdf.TrajNetCDF.cycle` table for
the files in this index.
"""
return self.cycle_()
def cycle_(self, vars: Union[None, str, Iterable[str]]=None) -> pd.DataFrame:
"""
Combine the :attr:`argopandas.netcdf.TrajNetCDF.cycle` table for
the files in this index, selecting specific variables.
:param vars: A variable, an interable of variables, or ``None``
to select all possible variables.
"""
return self._data_frame_along('cycle', vars=vars)
@property
def param(self) -> pd.DataFrame:
"""
Combine the :attr:`argopandas.netcdf.TrajNetCDF.param` table for
the files in this index.
"""
return self._data_frame_along('param')
@property
def history(self) -> pd.DataFrame:
"""
Combine the :attr:`argopandas.netcdf.TrajNetCDF.history` table for
the files in this index.
"""
return self._data_frame_along('history')
class TechIndex(DataFrameIndex):
"""
A subclass for an index of tech NetCDF files.
"""
def _netcdf_wrapper(self, src):
return TechNetCDF(src)
@property
def tech_param(self) -> pd.DataFrame:
"""
Combine the :attr:`argopandas.netcdf.TechNetCDF.tech_param` table for
the files in this index.
"""
return self._data_frame_along('tech_param')
class MetaIndex(DataFrameIndex):
"""
A subclass for an index of meta NetCDF files.
"""
def _netcdf_wrapper(self, src):
return MetaNetCDF(src)
@property
def config_param(self) -> pd.DataFrame:
"""
Combine the :attr:`argopandas.netcdf.MetaNetCDF.cycle` table for
the files in this index.
"""
return self._data_frame_along('config_param')
@property
def missions(self) -> pd.DataFrame:
"""
Combine the :attr:`argopandas.netcdf.MetaNetCDF.missions` table for
the files in this index.
"""
return self._data_frame_along('missions')
@property
def trans_system(self) -> pd.DataFrame:
"""
Combine the :attr:`argopandas.netcdf.MetaNetCDF.trans_system` table for
the files in this index.
"""
return self._data_frame_along('trans_system')
@property
def positioning_system(self) -> pd.DataFrame:
"""
Combine the :attr:`argopandas.netcdf.MetaNetCDF.positioning_system` table for
the files in this index.
"""
return self._data_frame_along('positioning_system')
@property
def launch_config_param(self) -> pd.DataFrame:
"""
Combine the :attr:`argopandas.netcdf.MetaNetCDF.launch_config_param` table for
the files in this index.
"""
return self._data_frame_along('launch_config_param')
@property
def sensor(self) -> pd.DataFrame:
"""
Combine the :attr:`argopandas.netcdf.MetaNetCDF.sensor` table for
the files in this index.
"""
return self._data_frame_along('sensor')
@property
def param(self) -> pd.DataFrame:
"""
Combine the :attr:`argopandas.netcdf.MetaNetCDF.param` table for
the files in this index.
"""
return self._data_frame_along('param') | |
#! /g/kreshuk/pape/Work/software/conda/miniconda3/envs/inferno/bin/python
import os
import json
import argparse
import h5py
from concurrent import futures
import numpy as np
from inferno.trainers.basic import Trainer
from inferno.utils.io_utils import yaml2dict
from skunkworks.inference import SimpleInferenceEngine
from neurofire.datasets.isbi2012.loaders.raw import RawVolumeHDF5
from inferno.extensions.metrics.arand import adapted_rand
from inferno.extensions.metrics.voi import voi
def load_volume(inference_config):
config = yaml2dict(inference_config)
vol_config = config['volume_config']['raw']
slicing_config = config['slicing_config']
return RawVolumeHDF5(**vol_config, **slicing_config)
def run_inference(project_dir, out_file, inference_config):
print("Loading model...")
model = Trainer().load(from_directory=os.path.join(project_dir, "Weights"), best=True).model
print("Loading dataset...")
dataset = load_volume(inference_config)
engine = SimpleInferenceEngine.from_config(inference_config, model)
print("Run prediction...")
out = engine.infer(dataset)
if out_file != '':
print("Save prediction to %s ..." % out_file)
with h5py.File(out_file, 'w') as f:
f.create_dataset('data', data=out, compression='gzip')
return out
def cc_segmenter(prediction, thresholds=[.9, .925, .95, .975, .99], invert=True):
from affogato.segmentation import connected_components
if invert:
prediction = 1. - prediction
return [connected_components(prediction, thresh)[0]
for thresh in thresholds]
def zws_segmenter(prediction, thresholds=[0.5], invert=True):
from affogato.segmentation import compute_zws_segmentation
if invert:
prediction = 1. - prediction
# parameters that are not exposed
lower_thresh = 0.2
higher_thresh = 0.98
size_thresh = 25
return [compute_zws_segmentation(prediction, lower_thresh, higher_thresh, thresh, size_thresh)
for thresh in thresholds]
def mws_segmenter(prediction, offset_version='v2'):
from affogato.segmentation import compute_mws_segmentation
from train_affs import get_default_offsets, get_mws_offsets
assert offset_version in ('v1', 'v2')
offsets = get_default_offsets() if offset_version == 'v1' else get_mws_offsets()
# invert the lr channels
prediction[:2] *= -1
prediction[:2] += 1
# TODO change this api
return compute_mws_segmentation(prediction, offsets, 2, strides=[4, 4])
def cremi_score(seg, gt):
assert seg.shape == gt.shape
rand = 1. - adapted_rand(seg, gt)[0]
vis, vim = voi(seg, gt)
cs = np.sqrt((vis + vim) * rand)
return cs, vis, vim, rand
def evaluate(prediction, algo='cc'):
# get the segmentation algorithm
if algo == 'cc':
segmenter = cc_segmenter
elif algo == 'mws':
# TODO expose offset version somehow
segmenter = mws_segmenter
elif algo == 'zws':
segmenter = zws_segemnter
else:
raise NotImplementedError('Algorithm %s not implemented' % algo)
gt_path = '/g/kreshuk/data/isbi2012_challenge/vnc_train_volume.h5'
with h5py.File(gt_path) as f:
gt = f['volumes/labels/neuron_ids_3d'][:]
assert gt.shape == prediction.shape[1:]
def eval_z(z):
gtz = gt[z]
seg = segmenter(prediction[:, z])
if isinstance(seg, list):
score = [cremi_score(sg, gtz) for sg in seg]
# print(score)
max_index = np.argmax([sc[0] for sc in score])
score = score[max_index]
else:
score = cremi_score(seg, gtz)
return score
with futures.ThreadPoolExecutor(5) as tp:
tasks = [tp.submit(eval_z, z) for z in range(prediction.shape[1])]
scores = np.mean([t.result() for t in tasks], axis=0)
# print(scores[0], scores[1], scores[2], scores[3])
return scores
def view_res(prediction):
from cremi_tools.viewer.volumina import view
raw_path = '/home/constantin/Work/neurodata_hdd/isbi12_data/isbi2012_test_volume.h5'
with h5py.File(raw_path, 'r') as f:
raw = f['volumes/raw'][:]
view([raw, prediction.transpose((1, 2, 3, 0))])
def main(project_dir, out_file, inference_config, key,
algorithm='cc', view_result=False):
out = run_inference(project_dir, out_file, inference_config)
if view_result:
view_res(out)
if algorithm in ('no', ''):
return
score = evaluate(out, algorithm)
if algorithm != 'cc':
key += '_' + algorithm
if os.path.exists('results.json'):
with open('results.json') as f:
results = json.load(f)
if key in results:
raise RuntimeError("Key %s is already in results, will not override !" % key)
else:
results = {}
results[key] = {'cremi-score': score[0],
'vi-split': score[1],
'vi-merge': score[2],
'rand': score[3]}
with open('results.json', 'w') as f:
json.dump(results, f, sort_keys=True, indent=4)
def set_device(device):
print("Setting cuda devices to", device)
os.environ['CUDA_VISIBLE_DEVICES'] = str(device)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('project_directory', type=str)
parser.add_argument('result_key', type=str)
parser.add_argument('--out_file', type=str, default='')
parser.add_argument('--inference_config', type=str, default='template_config/inf_config.yml')
parser.add_argument('--algorithm', type=str, default='cc')
parser.add_argument('--view_result', type=int, default=0)
parser.add_argument('--device', type=int, default=0)
args = parser.parse_args()
if args.device != 0:
set_device(args.device)
main(args.project_directory, args.out_file, args.inference_config, args.result_key, args.algorithm,
bool(args.view_result)) | |
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve
def gain_plot(y_actual, y_pred):
"""Returns a lift chart or gains plot against True Positive Rate vs False Positive Rate"""
f, ax = plt.subplots()
fpr, tpr, _ = roc_curve(y_actual, y_pred)
sns.lineplot(fpr, tpr)
sns.lineplot(np.linspace(0,1,10), np.linspace(0,1,10))
ax.set(xlabel='False Positive Rate', ylabel='True Positive Rate', title='Gains Plot / Lift Chart')
return f
def _trend(Y):
y = np.array(Y)
x = np.arange(0, len(y))
z = np.polyfit(x,y,1)
trend_line = z[1] + z[0]*(x+1)
return trend_line
def iv_plot(df, var_name=None, suffix='_dev'):
"""Returns an IV plot for a specified variable"""
p_suffix = suffix.replace('_','').upper()
sub_df = df if var_name is None else df.loc[df.var_name==var_name, ['var_cuts_string'+suffix, 'ln_odds'+suffix, 'resp_rate'+suffix, 'iv'+suffix]]
sub_df['resp_rate_trend'+suffix] = _trend(sub_df['resp_rate'+suffix])
iv_val = round(sub_df['iv'+suffix].sum(), 4)
f, ax = plt.subplots()
ax2 = ax.twinx()
sns.lineplot(x='var_cuts_string'+suffix, y='resp_rate'+suffix, data=sub_df, color='red', ax=ax)
sns.lineplot(x='var_cuts_string'+suffix, y='resp_rate_trend'+suffix, data=sub_df, color='red', linestyle='--', ax=ax)
sns.lineplot(x='var_cuts_string'+suffix, y='ln_odds'+suffix, data=sub_df, color='darkgreen', ax=ax2)
ax.set_xticklabels(list(sub_df['var_cuts_string'+suffix]), rotation=45, ha='right')
ax.set(xlabel='Variable Bins', ylabel=f'Resp Rate ({p_suffix})', title=f'IV of {var_name} ({iv_val})')
ax2.set(ylabel=f'Log Odds ({p_suffix})')
ax.legend(handles=[l for a in [ax, ax2] for l in a.lines], labels=[f'Resp Rate ({p_suffix})', f'Resp Rate Trend ({p_suffix})', f'Log Odds ({p_suffix})'], loc=0)
return f
def csi_plot(df, var_name):
"""Returns a CSI plot for a specified variable"""
sub_df = df.loc[df.var_name==var_name, ['var_cuts_string_dev','resp_rate_dev','resp_rate_val']]
sub_df['resp_rate_trend_dev'] = _trend(sub_df['resp_rate_dev'])
sub_df['resp_rate_trend_val'] = _trend(sub_df['resp_rate_val'])
f, ax = plt.subplots()
sns.lineplot(x='var_cuts_string_dev', y='resp_rate_dev', data=sub_df, color='red')
sns.lineplot(x='var_cuts_string_dev', y='resp_rate_trend_dev', data=sub_df, color='red', linestyle='--')
sns.lineplot(x='var_cuts_string_dev', y='resp_rate_val', data=sub_df, color='darkgreen')
sns.lineplot(x='var_cuts_string_dev', y='resp_rate_trend_val', data=sub_df, color='darkgreen', linestyle='--')
ax.set_xticklabels(list(sub_df['var_cuts_string_dev']), rotation=45, ha='right')
ax.set(xlabel='Variable Bins', ylabel='Resp Rate')
ax.legend(handles=[l for a in [ax] for l in a.lines], labels=['Resp Rate (Dev)', 'Resp Rate Trend (Dev)', 'Resp Rate (Val)', 'Resp Rate Trend (Val)'], loc=0) | |
from pandas import DataFrame
import logging
import sys
from numpy import arange, histogram
import matplotlib.pyplot as plt
def read_vcf(fh):
'''
Read the VCF file obtained from any program included into Parliment2. Adds columns to the records if they are lacking.
Args:
fh (file): a VCF file.
Returns:
DF (pandas.DataFrame) : It contains all the records of SV
metadata (array) : It contains all the annotation lines begining with '##'
'''
metadata = []
DF = None
for i in fh:
if(i[0:2]=='##'):
metadata.append(i.strip())
elif(i[0:6]=='#CHROM'):
DF = DataFrame( columns=i.strip().split() )
else:
try:
DF = DF.append( DataFrame( [[j for j in i.strip().split()]] , columns= list(DF.columns) ) )
except:
new_columns = [ 'col'+str(j+1) for j in range(len(DF.columns), len(DF.columns)+len(i.split())-len(DF.columns) ) ]
for j in new_columns: DF[j] = None
DF = DF.append( DataFrame( [[j for j in i.strip().split()]] , columns= list(DF.columns) ) )
logging.warning('Extra column(s) were added as they were missing in the generated VCF file.')
return DF, metadata
def analyze_chromosome( chr ,records, metadata ,bins=100, output_figure='output.png'):
'''
Analyze chromosome function divides the chromosome into 'n' number of the equal-sized bin (parameter bins) and counts the different types of structural variants (SVs) to plot them as a figure. It allows the user to analyze what parts of the chromosomes are most affected by what type of variation.
Args:
chr (str) : Chromosome name user wishes to analyze.
records (read_vcf output [0]) : Output obtained from read_vcf function.
metadata (read_vcf output [01]) : Output obtained from read_vcf function.
bins (int) : Number of parts user wishes to chop chromosomes into (read the description)
output_figure (str) : Name for the output figure.
'''
try:
records = records.loc[ records['#CHROM']==chr ]
except:
logging.error('Given chromosome not found in the records.')
chr_lengths = {}
for i in metadata:
try:
temp = i.split(',')
if(temp[1].split('=')[0]=='length'):
chr_lengths[temp[0].split('=')[2]] = arange( 0 , float(temp[1].split('=')[1][:-1])+1 , float(temp[1].split('=')[1][:-1])/bins )
except:
None
#SVTYPE: DEL, INV, DUP, UNK, BND, INS
freq_data = { 'DEL':[], 'INV':[], 'DUP':[], 'UNK':[], 'BND':[], 'INS':[] }
for index, row in records.iterrows():
#print( row['#CHROM'], row['INFO'] )
for i in row['INFO'].split(';'):
temp = i.split('=')
if(temp[0] == 'SVTYPE'):
freq_data[ temp[1] ].append(float(row['POS']))
fig, ( ax0, ax1, ax2, ax3, ax4, ax5 ) = plt.subplots(6,figsize=(20,20))
fig.suptitle('Chromosome SV locations for '+chr)
ax0.hist(freq_data['DEL'],bins = chr_lengths[chr] )
ax0.set_title('DEL', loc='left')
ax1.hist(freq_data['INV'],bins = chr_lengths[chr] )
ax1.set_title('INV', loc='left')
ax2.hist(freq_data['DUP'],bins = chr_lengths[chr] )
ax2.set_title('DUP', loc='left')
ax3.hist(freq_data['BND'],bins = chr_lengths[chr] )
ax3.set_title('BND', loc='left')
ax4.hist(freq_data['INS'],bins = chr_lengths[chr] )
ax4.set_title('INS', loc='left')
ax5.hist(freq_data['UNK'],bins = chr_lengths[chr] )
ax5.set_title('UNK', loc='left')
fig.tight_layout()
plt.savefig(output_figure)
def main():
#Example
try:
inchr = sys.argv[1].strip()
fh = open(sys.argv[2].strip())
except:
logging.error('Failed to load the given file.')
exit()
#fh = open('NA19461.final.manta.svtyped.vcf')
records , metadata = read_vcf(fh)
analyze_chromosome( inchr ,records, metadata )
if(__name__=='__main__'):
main() | |
"""
First N False Reducer
--------------------
This module is designed to reduce boolean-valued extracts e.g.
:mod:`panoptes_aggregation.extractors.all_tasks_empty_extractor`.
It returns true if and only if the first N extracts are `False`.
"""
from .reducer_wrapper import reducer_wrapper
import numpy as np
DEFAULTS = {"n": {"default": 0, "type": int}}
def extractResultKey(extract):
return extract["result"] if "result" in extract else False
@reducer_wrapper(defaults_data=DEFAULTS)
def first_n_false_reducer(data_list, n=0, **kwargs):
"""Reduce a list of boolean values to a single boolean value.
Parameters
----------
data_list : list
A list of dicts containing a "result" key which should correspond with a
boolean value.
n: int
The first n results in `data_list` must be `False`.
Returns
-------
reduction : dict
`reduction["result"]` is `True` if the first n results in `data_list`
are `False`. Otherwise `False`.
"""
return {
"result": n > 0
and len(data_list) >= n
and not np.any(list(map(extractResultKey, data_list[:n])))
} | |
# -*- coding: utf-8 -*-
"""
Functionality for parcellating data
"""
import nibabel as nib
from nilearn.input_data import NiftiLabelsMasker
import numpy as np
from neuromaps.datasets import ALIAS, DENSITIES
from neuromaps.images import construct_shape_gii, load_gifti
from neuromaps.resampling import resample_images
from neuromaps.transforms import _check_hemi
from neuromaps.nulls.spins import vertices_to_parcels, parcels_to_vertices
def _gifti_to_array(gifti):
""" Converts tuple of `gifti` to numpy array
"""
return np.hstack([load_gifti(img).agg_data() for img in gifti])
def _array_to_gifti(data):
""" Converts numpy `array` to tuple of gifti images
"""
return tuple(construct_shape_gii(arr) for arr in np.split(data, 2))
class Parcellater():
"""
Class for parcellating arbitrary volumetric / surface data
Parameters
----------
parcellation : str or os.PathLike or Nifti1Image or GiftiImage or tuple
Parcellation image or surfaces, where each region is identified by a
unique integer ID. All regions with an ID of 0 are ignored.
space : str
The space in which `parcellation` is defined
resampling_target : {'data', 'parcellation', None}, optional
Gives which image gives the final shape/size. For example, if
`resampling_target` is 'data', the `parcellation` is resampled to the
space + resolution of the data, if needed. If it is 'parcellation' then
any data provided to `.fit()` are transformed to the space + resolution
of `parcellation`. Providing None means no resampling; if spaces +
resolutions of the `parcellation` and data provided to `.fit()` do not
match a ValueError is raised. Default: 'data'
hemi : {'L', 'R'}, optional
If provided `parcellation` represents only one hemisphere of a surface
atlas then this specifies which hemisphere. If not specified it is
assumed that `parcellation` is (L, R) hemisphere. Ignored if `space` is
'MNI152'. Default: None
"""
def __init__(self, parcellation, space, resampling_target='data',
hemi=None):
self.parcellation = parcellation
self.space = ALIAS.get(space, space)
self.resampling_target = resampling_target
self.hemi = hemi
self._volumetric = self.space == 'MNI152'
if self.resampling_target == 'parcellation':
self._resampling = 'transform_to_trg'
else:
self._resampling = 'transform_to_src'
if not self._volumetric:
self.parcellation, self.hemi = zip(
*_check_hemi(self.parcellation, self.hemi)
)
if self.resampling_target not in ('parcellation', 'data', None):
raise ValueError('Invalid value for `resampling_target`: '
f'{resampling_target}')
if self.space not in DENSITIES:
raise ValueError(f'Invalid value for `space`: {space}')
def fit(self):
""" Prepare parcellation for data extraction
"""
if not self._volumetric:
self.parcellation = tuple(
load_gifti(img) for img in self.parcellation
)
self._fit = True
return self
def transform(self, data, space, hemi=None):
"""
Applies parcellation to `data` in `space`
Parameters
----------
data : str or os.PathLike or Nifti1Image or GiftiImage or tuple
Data to parcellate
space : str
The space in which `data` is defined
hemi : {'L', 'R'}, optional
If provided `data` represents only one hemisphere of a surface
dataset then this specifies which hemisphere. If not specified it
is assumed that `data` is (L, R) hemisphere. Ignored if `space` is
'MNI152'. Default: None
Returns
-------
parcellated : np.ndarray
Parcellated `data`
"""
self._check_fitted()
space = ALIAS.get(space, space)
if (self.resampling_target == 'data' and space == 'MNI152'
and not self._volumetric):
raise ValueError('Cannot use resampling_target="data" when '
'provided parcellation is in surface space and '
'provided data are in MNI152 space.')
elif (self.resampling_target == 'parcellation' and self._volumetric
and space != 'MNI152'):
raise ValueError('Cannot use resampling_target="parcellation" '
'when provided parcellation is in MNI152 space '
'and provided are in surface space.')
if hemi is not None and hemi not in self.hemi:
raise ValueError('Cannot parcellate data from {hemi} hemisphere '
'when parcellation was provided for incompatible '
'hemisphere: {self.hemi}')
if isinstance(data, np.ndarray):
data = _array_to_gifti(data)
data, parc = resample_images(data, self.parcellation,
space, self.space, hemi=hemi,
resampling=self._resampling,
method='nearest')
if ((self.resampling_target == 'data'
and space.lower() == 'mni152')
or (self.resampling_target == 'parcellation'
and self._volumetric)):
data = nib.concat_images([nib.squeeze_image(data)])
parcellated = NiftiLabelsMasker(
parc, resampling_target=self.resampling_target
).fit_transform(data)
else:
if not self._volumetric:
for n, _ in enumerate(parc):
parc[n].labeltable.labels = \
self.parcellation[n].labeltable.labels
data = _gifti_to_array(data)
parcellated = vertices_to_parcels(data, parc)
return parcellated
def inverse_transform(self, data):
"""
Project `data` to space + density of parcellation
Parameters
----------
data : array_like
Parcellated data to be projected to the space of parcellation
Returns
-------
data : Nifti1Image or tuple-of-nib.GiftiImage
Provided `data` in space + resolution of parcellation
"""
if not self._volumetric:
verts = parcels_to_vertices(data, self.parcellation)
img = _array_to_gifti(verts)
else:
data = np.atleast_2d(data)
img = NiftiLabelsMasker(self.parcellation).fit() \
.inverse_transform(data)
return img
def fit_transform(self, data, space, hemi=None):
""" Prepare and perform parcellation of `data`
"""
return self.fit().transform(data, space, hemi)
def _check_fitted(self):
if not hasattr(self, '_fit'):
raise ValueError(f'It seems that {self.__class__.__name__} has '
'not been fit. You must call `.fit()` before '
'calling `.transform()`') | |
import numpy as np
from sklearn import svm
from sklearn.metrics import f1_score, recall_score, precision_score
from sklearn.model_selection import GridSearchCV
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from classify.preprocess import process_data
# Utility function to move the midpoint of a colormap to be around
# the values of interest.
class MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
def train_svm(finetune, x_train, x_test, x_val,
train_data, test_data, val_data, add_extra):
_, [train_labels, test_labels, val_labels], _ = process_data(train_data, test_data, val_data)
if finetune:
print("# ---------- Fine tuning SVM -----------#")
param_grid = [
{'C': [1, 10], 'gamma': [1, 0.1, 0.01, 0.001], 'kernel': ['rbf']},
]
# do cross validation on train + val
grid_search = GridSearchCV(svm.SVC(), param_grid, cv=5)
grid_search.fit(np.concatenate((x_train, x_val), axis=0), (train_labels + val_labels))
print("Best: %f using %s" % (grid_search.best_score_, grid_search.best_params_))
model = svm.SVC(kernel=grid_search.best_params_['kernel'], C=grid_search.best_params_['C'],
gamma=grid_search.best_params_['gamma'])
else:
model = svm.SVC(kernel='rbf', C=1, gamma=0.1)
# SVM requires 2D inputs
if len(x_train.shape) == 3:
# reshape 3D to 2D:
nsamples, nx, ny = x_train.shape
x_train = x_train.reshape((nsamples, nx * ny))
nsamples, nx, ny = x_val.shape
x_val = x_val.reshape((nsamples, nx * ny))
nsamples, nx, ny = x_test.shape
x_test = x_test.reshape((nsamples, nx * ny))
print("Fitting model")
model.fit(x_train, train_labels)
# Evaluate
print("Evaluating model")
acc_train = model.score(x_train, train_labels)
acc_val = model.score(x_val, val_labels)
acc_test = model.score(x_test, test_labels)
# Predict Output
print("Predict output")
predicted = model.predict(x_test)
f1 = f1_score(test_labels, predicted)
recall = recall_score(test_labels, predicted)
precision = precision_score(test_labels, predicted)
return [acc_train, acc_val, acc_test, recall, precision, f1], predicted | |
#!/usr/bin/env python3
import importlib
import numpy as np
import math
import gc
import sys
import arkouda as ak
ak.verbose = False
if len(sys.argv) > 1:
ak.connect(server=sys.argv[1], port=sys.argv[2])
else:
ak.connect()
a = ak.arange(0, 10, 1)
b = np.linspace(10, 20, 10)
c = ak.array(b)
d = a + c
e = d.to_ndarray()
a = ak.ones(10)
a[::2] = 0
print(a)
a = ak.ones(10)
b = ak.zeros(5)
a[1::2] = b
print(a)
a = ak.zeros(10) # float64
b = ak.arange(0,10,1) # int64
a[:] = b # cast b to float64
print(b,b.dtype)
print(a,a.dtype)
a = ak.randint(0,2,10,dtype=ak.bool)
a[1] = True
a[2] = True
print(a)
a = ak.ones(10,dtype=ak.int64)
iv = ak.arange(0,10,1)[::2]
a[iv] = 10
print(a)
a = ak.ones(10)
iv = ak.arange(0,10,1)[::2]
a[iv] = 10.0
print(a)
a = ak.ones(10,dtype=ak.bool)
iv = ak.arange(0,10,1)[::2]
a[iv] = False
print(a)
a = ak.ones(10,dtype=ak.bool)
iv = ak.arange(0,5,1)
b = ak.zeros(iv.size,dtype=ak.bool)
a[iv] = b
print(a)
a = ak.randint(10,20,10)
print(a)
iv = ak.randint(0,10,5)
print(iv)
b = ak.zeros(iv.size,dtype=ak.int64)
a[iv] = b
print(a)
ak.verbose = False
a = ak.randint(10,30,40)
vc = ak.value_counts(a)
print(vc[0].size,vc[0])
print(vc[1].size,vc[1])
ak.verbose = False
a = ak.arange(0,10,1)
b = a[a<5]
a = ak.linspace(0,9,10)
b = a[a<5]
print(b)
ak.verbose = True
ak.pdarrayIterThresh = 1000
a = ak.arange(0,10,1)
print(list(a))
ak.verbose = False
a = ak.randint(10,30,40)
u = ak.unique(a)
h = ak.histogram(a,bins=20)
print(a)
print(h.size,h)
print(u.size,u)
ak.verbose = False
a = ak.randint(10,30,50)
h = ak.histogram(a,bins=20)
print(a)
print(h)
ak.verbose = False
a = ak.randint(0,2,50,dtype=ak.bool)
print(a)
print(a.sum())
ak.verbose = False
a = ak.linspace(101,102,100)
h = ak.histogram(a,bins=50)
print(h)
ak.verbose = False
a = ak.arange(0,100,1)
h = ak.histogram(a,bins=10)
print(h)
ak.verbose = False
a = ak.arange(0,99,1)
b = a[::10] # take every tenth one
b = b[::-1] # reverse b
print(a)
print(b)
c = ak.in1d(a,b) # put out truth vector
print(c)
print(a[c]) # compress out false values
ak.verbose = False
a = np.ones(10,dtype=np.bool)
b = np.arange(0,10,1)
np.sum(a),np.cumsum(a),np.sum(b<4),b[b<4],b<5
ak.verbose = False
# currently... ak pdarray to np array
a = ak.linspace(0,9,10)
b = np.array(list(a))
print(a,a.dtype,b,b.dtype)
a = ak.arange(0,10,1)
b = np.array(list(a))
print(a,a.dtype,b,b.dtype)
a = ak.ones(10,dtype=ak.bool)
b = np.array(list(a))
print(a,a.dtype,b,b.dtype)
ak.verbose = False
b = np.linspace(1,10,10)
a = np.arange(1,11,1)
print(b/a)
ak.verbose = False
#a = np.ones(10000,dtype=np.int64)
a = np.linspace(0,99,100)
#a = np.arange(0,100,1)
print(a)
ak.verbose = False
print(a.__repr__())
print(a.__str__())
ak.verbose = False
print(a)
print(type(a), a.dtype, a.size, a.ndim, a.shape, a.itemsize)
ak.verbose = False
a = ak.arange(0,100,1)
ak.verbose = False
print(a)
ak.verbose = False
b = ak.linspace(0,99,100)
print(b.__repr__())
ak.verbose = False
b = ak.linspace(0,9,10)
a = ak.arange(0,10,1)
print(a.name, a.size, a.dtype, a)
print(b.name, b.size, b.dtype, b)
print(ak.info(a+b))
ak.verbose = False
c = ak.arange(0,10,1)
print(ak.info(c))
print(c.name, c.dtype, c.size, c.ndim, c.shape, c.itemsize)
print(c)
ak.verbose = False
print(5+c + 5)
c = np.array([10, 11, 12, 13, 14, 15, 16, 17, 18, 19])
c = np.array([10, 11, 12, 13, 14, 15, 16, 17, 18, 19.1])
print(c.__repr__(), c.dtype.__str__(), c.dtype.__repr__())
ak.verbose = False
a = np.ones(9)
b = np.arange(1,10,1)
print(a.dtype,b.dtype)
c = ak.ones(9)
d = ak.arange(1,10,1)
print(c.dtype,d.dtype)
y = a/b
z = c/d
print("truediv \nnp out:",y,"\nak out:",z)
print(y[5],z[5],y[5] ==z[5])
y = a//b
z = c//d
print("floordiv \nnp out:",y,"\nak out:",z)
print(y[5],z[5],y[5] ==z[5])
ak.verbose = False
c = ak.arange(1,10,1)
c //= c
print(c)
c += c
print(c)
c *= c
print(c)
ak.verbose = False
a = np.ones(9,dtype=np.int64)
b = np.ones_like(a)
print(b)
ak.verbose = False
a = ak.ones(9,dtype=ak.int64)
b = ak.ones_like(a)
print(b)
ak.verbose = False
a = ak.arange(0,10,1)
b = np.arange(0,10,1)
print(a[5] == b[5])
ak.verbose = False
a = ak.arange(0,10,1)
b = np.arange(0,10,1)
a[5] = 10.2
print(a[5])
ak.verbose = False
a = ak.arange(0,10,1)
b = np.arange(0,10,1)
#print((a[:]),b[:])
#print(a[1:-1:2],b[1:-1:2])
#print(a[0:10:2],b[0:10:2])
print(a[4:20:-1],b[4:20:-1])
print(a[:1:-1],b[:1:-1])
ak.verbose = False
d = ak.arange(1,10,1)
#d.type.__class__,d.name,d.isnative,np.int64.__class__,bool
ak.info(d)
#dir(d)
ak.verbose = False
a = ak.ones(10,dtype=ak.bool)
print(a[1])
ak.verbose = False
a = ak.zeros(10,dtype=ak.bool)
print(a[1])
ak.verbose = False
a = ak.ones(10,dtype=ak.bool)
a[4] = False
a[1] = False
print(a)
print(a[::2])
print(a[1])
a = ak.ones(10,dtype=ak.int64)
a[4] = False
a[1] = False
print(a)
print(a[::2])
print(a[1])
a = ak.ones(10)
a[4] = False
a[1] = False
print(a)
print(a[::2])
print(a[1])
ak.verbose = False
a = ak.arange(0,10,1)
b = list(a)
print(b)
a = a<5
b = list(a)
print(b)
ak.verbose = False
a = ak.linspace(1,10,10)
print(ak.abs(a))
print(ak.log(a))
print(ak.exp(a))
a.fill(math.e)
print(ak.log(a))
type(bool),type(np.bool),type(ak.bool),type(True)
ak.verbose = False
a = ak.linspace(0,9,10)
print(a,ak.any(a),ak.all(a),ak.all(ak.ones(10,dtype=ak.float64)))
b = a<5
print(b,ak.any(b),ak.all(b),ak.all(ak.ones(10,dtype=ak.bool)))
c = ak.arange(0,10,1)
print(c,ak.any(c),ak.all(c),ak.all(ak.ones(10,dtype=ak.int64)))
print(a.any(),a.all(),b.any(),b.all())
ak.verbose = False
a = ak.linspace(0,9,10)
ak.sum(a)
b = np.linspace(0,9,10)
print(ak.sum(a) == np.sum(b),ak.sum(a),np.sum(b),a.sum(),b.sum())
ak.verbose = False
a = ak.linspace(1,10,10)
b = np.linspace(1,10,10)
print(ak.prod(a) == np.prod(b),ak.prod(a),np.prod(b),a.prod(),b.prod())
ak.verbose = False
a = np.arange(0,20,1)
b = a<10
print(b,np.sum(b),b.sum(),np.prod(b),b.prod(),np.cumsum(b),np.cumprod(b))
print()
b = a<5
print(b,np.sum(b),b.sum(),np.prod(b),b.prod(),np.cumsum(b),np.cumprod(b))
print()
a = ak.arange(0,20,1)
b = a<10
print(b,ak.sum(b),b.sum(),ak.prod(b),b.prod(),ak.cumsum(b),ak.cumprod(b))
b = a<5
print(b,ak.sum(b),b.sum(),ak.prod(b),b.prod(),ak.cumsum(b),ak.cumprod(b))
ak.verbose = False
a = ak.arange(0,10,1)
iv = a[::-1]
print(a,iv,a[iv])
ak.verbose = False
a = ak.arange(0,10,1)
iv = a[::-1]
print(a,iv,a[iv])
ak.verbose = False
a = ak.linspace(0,9,10)
iv = ak.arange(0,10,1)
iv = iv[::-1]
print(a,iv,a[iv])
ak.verbose = False
a = np.arange(0,10,1)
iv = a[::-1]
print(a,iv,a[iv])
ak.verbose = False
a = ak.arange(0,10,1)
b = a<20
print(a,b,a[b])
ak.verbose = False
a = ak.arange(0,10,1)
b = a<5
print(a,b,a[b])
ak.verbose = False
a = ak.arange(0,10,1)
b = a<0
print(a,b,a[b])
ak.verbose = False
a = ak.linspace(0,9,10)
b = a<5
print(a,b,a[b])
ak.verbose = False
N = 2**23 # 2**23 * 8 == 64MiB
A = ak.linspace(0,N-1,N)
B = ak.linspace(0,N-1,N)
C = A+B
print(ak.info(C),C)
# turn off verbose messages from arkouda package
ak.verbose = False
# set pdarrayIterThresh to 0 to only print the first 3 and last 3 of pdarray
ak.pdarrayIterThresh = 0
a = ak.linspace(0,9,10)
b = a<5
print(a)
print(b)
print(a[b])
print(a)
a = np.linspace(0,9,10)
b = a<5
print(a)
print(b)
print(a[b])
print(a)
ak.verbose = False
ak.pdarrayIterThresh = 0
a = ak.ones(10,ak.int64)
b = a | 0xff
print(a, b, a^b, b>>a, b<<1|1, 0xf & b, 0xaa ^ b, b ^ 0xff)
print(-a,~(~a))
a = ak.ones(10,dtype=ak.int64)
b = ~ak.zeros(10,dtype=ak.int64)
print(~a, -b)
a = np.ones(10,dtype=np.int64)
b = ~np.zeros(10,dtype=np.int64)
print(~a, -b)
ak.shutdown() | |
import pickle
import numpy as np
from keras.preprocessing import sequence
from random import shuffle
def genData(filePathX, filePathY, maxlen = 200, minValue = 1, maxValue = 20000):
with open (filePathX, 'rb') as fp:
X_full = pickle.load(fp)
with open (filePathY, 'rb') as fp:
Y_full = pickle.load(fp)
#X_full = [[1,1], [2,2], [3,3], [4,4], [5,5], [6,6], [7,7], [8,8], [9,9], [10,10]]
#Y_full = [1,2,3,4,5,6,7,8,9,10]
#Create test and train sets
full = list(zip(X_full, Y_full))
shuffle(full)
train = full[:int(len(full)*0.9)]
test = [x for x in full if x not in train]
X_train, Y_train = zip(*train)
X_test, Y_test = zip(*test)
print(len(X_train), 'train sequences')
print(len(X_test), 'test sequences')
#Create numpy arrays
print('Pad sequences')
X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
X_test = sequence.pad_sequences(X_test, maxlen=maxlen)
X_train[X_train < minValue] = 1
X_train[X_train > maxValue] = 1
X_test[X_test < minValue] = 1
X_test[X_test > maxValue] = 1
Y_train = np.array(Y_train)
Y_test = np.array(Y_test)
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)
print('Y_train shape:', Y_train.shape)
print('Y_test shape:', Y_test.shape)
return X_train, Y_train, X_test, Y_test
def makePredictArray(var, support, size=100):
np.random.shuffle(support)
support = support[:size]
X_pred = [np.tile(var,(size,1)), support]
return X_pred
def getSimilarity(array, model):
outData = model.predict(array)
ans = np.mean(outData)
return ans
def testAccuracy(X_val, Y_val, X_train, Y_train, model, size):
outs = np.zeros(len(Y_val))
posSupport = X_train[Y_train==1]
negSupport = X_train[Y_train==0]
for i in range(len(X_val)):
posArr = makePredictArray(X_val[i], posSupport, size)
negArr = makePredictArray(X_val[i], negSupport, size)
posPred = getSimilarity(posArr, model)
negPred = getSimilarity(negArr, model)
outs[i] = (posPred-negPred)>0
return np.sum(Y_val == outs)/len(Y_val) | |
# --------------------------------------------------------
# Fine Refine Online Gushing
# Copyright (c) 2018 KAUST IVUL
# Licensed under The MIT License [see LICENSE for details]
# Written by Frost XU
# --------------------------------------------------------
"""The layer used during training to get proposal labels for classifier refinement.
FrogLayer implements a tensorflow Python layer.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import numpy.random as npr
from model.config import cfg
from model.bbox_transform import bbox_transform
from utils.cython_bbox import bbox_overlaps
DEBUG = False
def frog_layer(boxes, cls_prob, im_labels):
"""Get blobs and copy them into this layer's top blob vector."""
"""Get proposals with highest score."""
im_labels = im_labels[:, 1:] # remove bg
boxes = boxes[:, 1:]
if DEBUG:
print('im_labels', im_labels.shape)
num_images, num_classes = im_labels.shape
assert num_images == 1, 'batch size shoud be equal to 1'
im_labels_tmp = im_labels[0, :]
gt_boxes = np.zeros((0, 4), dtype=np.float32)
gt_classes = np.zeros((0, 1), dtype=np.int32)
gt_scores = np.zeros((0, 1), dtype=np.float32)
for i in xrange(num_classes):
if im_labels_tmp[i] == 1:
cls_prob_tmp = cls_prob[:, i].copy()
max_index = np.argmax(cls_prob_tmp)
if DEBUG:
print('max_index:', max_index, 'num_classes:', num_classes)
print('boxes:', boxes.shape, 'cls_prob_tmp', cls_prob_tmp.shape)
gt_boxes = np.vstack((gt_boxes, boxes[max_index, :].reshape(1, -1)))
gt_classes = np.vstack((gt_classes, (i + 1) * np.ones((1, 1), dtype=np.int32)))
gt_scores = np.vstack((gt_scores,
cls_prob_tmp[max_index] * np.ones((1, 1), dtype=np.float32)))
cls_prob[max_index, :] = 0
"""Generate a random sample of RoIs comprising foreground and background
examples.
"""
# overlaps: (rois x gt_boxes)
overlaps = bbox_overlaps(
np.ascontiguousarray(boxes, dtype=np.float),
np.ascontiguousarray(gt_boxes, dtype=np.float))
gt_assignment = overlaps.argmax(axis=1)
max_overlaps = overlaps.max(axis=1)
labels = gt_classes[gt_assignment, 0]
cls_loss_weights = gt_scores[gt_assignment, 0]
# Select foreground RoIs as those with >= FG_THRESH overlap
fg_inds = np.where(max_overlaps >= cfg.TRAIN.FG_THRESH)[0]
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_inds = np.where(max_overlaps < cfg.TRAIN.FG_THRESH)[0]
labels[bg_inds] = 0
if DEBUG:
print('label', labels.shape, 'weight', cls_loss_weights.shape)
return labels, cls_loss_weights
#
# def _get_highest_score_proposals(boxes, cls_prob, im_labels):
# """Get proposals with highest score."""
#
# num_images, num_classes = im_labels.shape
# assert num_images == 1, 'batch size shoud be equal to 1'
# im_labels_tmp = im_labels[0, :]
# gt_boxes = np.zeros((0, 4), dtype=np.float32)
# gt_classes = np.zeros((0, 1), dtype=np.int32)
# gt_scores = np.zeros((0, 1), dtype=np.float32)
# for i in xrange(num_classes):
# if im_labels_tmp[i] == 1:
# cls_prob_tmp = cls_prob[:, i].copy()
# max_index = np.argmax(cls_prob_tmp)
#
# if DEBUG:
# print( 'max_index:', max_index, 'cls_prob_tmp:', cls_prob_tmp[max_index])
#
# gt_boxes = np.vstack((gt_boxes, boxes[max_index, :].reshape(1, -1)))
# gt_classes = np.vstack((gt_classes, (i + 1) * np.ones((1, 1), dtype=np.int32)))
# gt_scores = np.vstack((gt_scores,
# cls_prob_tmp[max_index] * np.ones((1, 1), dtype=np.float32)))
# cls_prob[max_index, :] = 0
#
# proposals = {'gt_boxes' : gt_boxes,
# 'gt_classes': gt_classes,
# 'gt_scores': gt_scores}
#
# return proposals
#
# def _sample_rois(all_rois, proposals):
# """Generate a random sample of RoIs comprising foreground and background
# examples.
# """
# # overlaps: (rois x gt_boxes)
# gt_boxes = proposals['gt_boxes']
# gt_labels = proposals['gt_classes']
# gt_scores = proposals['gt_scores']
# overlaps = bbox_overlaps(
# np.ascontiguousarray(all_rois, dtype=np.float),
# np.ascontiguousarray(gt_boxes, dtype=np.float))
# gt_assignment = overlaps.argmax(axis=1)
# max_overlaps = overlaps.max(axis=1)
# labels = gt_labels[gt_assignment, 0]
# cls_loss_weights = gt_scores[gt_assignment, 0]
#
# # Select foreground RoIs as those with >= FG_THRESH overlap
# fg_inds = np.where(max_overlaps >= cfg.TRAIN.FG_THRESH)[0]
#
# # Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
# bg_inds = np.where(max_overlaps < cfg.TRAIN.FG_THRESH)[0]
#
# if DEBUG:
# print( "number of fg:", len(fg_inds), 'number of bg:', len(bg_inds) )
#
# labels[bg_inds] = 0
#
# rois = all_rois
#
# return labels, rois, cls_loss_weights | |
# ABSOLUTE MAG -> APPARENT MAG WITH DISTANCE INFO.
#============================================================
import glob
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import ascii, fits
from astropy.table import Table, vstack
from astropy import units as u
def abs2app(M, Mer, d, der):
m = M + 5 * np.log10(d) - 5
mer = np.sqrt( (Mer)**2 + ((5*der)/(d*np.log(10))) )
return m, mer
path_table = '/home/sonic/Research/gppy/table/phot_gw170817_Drout.abs.dat'
path_save = '/home/sonic/Research/gppy/table'
photbl = ascii.read(path_table)
dist, dister = 156 * u.Mpc, 41 * u.Mpc
d, der = dist.to(u.pc).value, dister.to(u.pc).value
M, Mer = photbl['mag_abs'], photbl['mager_abs']
m, mer = abs2app(M, Mer, d, der)
photbl['mag_gw190425'] = m
photbl['mager_gw190425'] = mer
photbl.write('{}/phot_gw170817_Drout.gw190425.dat'.format(path_save), format='ascii.tab', overwrite=True) | |
'''
lambdata - a collection of data science helper functions
'''
import numpy as np
import pandas as pd
# sample code
ONES = pd.DataFrame(np.ones(10))
ZEROS = pd.DataFrame(np.zeros(50)) | |
import numpy as np
import pandas as pd
class Metrics:
def __init__(self):
pass
@staticmethod
def pearson_correlation(y_true, y_pred, **kwargs):
# return(tf.linalg.trace(tfp.stats.correlation(y_pred, y_true))/3)
pd_series = pd.core.series.Series
# Change type if required
y_true = pd.DataFrame(y_true) if not isinstance(y_true, pd_series) else y_true
y_pred = pd.DataFrame(y_pred) if not isinstance(y_pred, pd_series) else y_pred
return pd.concat([y_true, y_pred], axis=1).corr().iloc[0, 1]
@staticmethod
def absolute_error(pred, true, **kwargs):
input_is_dataframe = type(pred) == type(true) == pd.core.frame.DataFrame
input_is_series = type(pred) == type(true) == pd.core.series.Series
if input_is_dataframe or input_is_series:
diff = (pred - true).apply(np.abs)
return diff
else:
pred = np.array(pred)
true = np.array(true)
return np.abs(pred-true)
@staticmethod
def absolute_error_relative(pred, true, **kwargs):
input_is_dataframe = type(pred) == type(true) == pd.core.frame.DataFrame
input_is_series = type(pred) == type(true) == pd.core.series.Series
if input_is_dataframe or input_is_series:
diff = (pred - true).apply(np.abs) / true
return diff
else:
pred = np.array(pred)
true = np.array(true)
return np.where(true != 0, np.abs(pred-true)/true, np.nan)
@staticmethod
def RMSE(pred, true, **kwargs):
input_is_dataframe = type(pred) == type(true) == pd.core.frame.DataFrame
input_is_series = type(pred) == type(true) == pd.core.series.Series
if input_is_dataframe or input_is_series:
diff = (pred - true) ** 2
return diff.mean() ** 0.5
else:
pred = np.array(pred)
true = np.array(true)
return np.sqrt(np.nanmean((pred - true) ** 2))
@staticmethod
def mean_bias(pred, true, **kwargs):
input_is_dataframe = type(pred) == type(true) == pd.core.frame.DataFrame
input_is_series = type(pred) == type(true) == pd.core.series.Series
if input_is_dataframe or input_is_series:
diff = (pred - true)
return diff.mean()
else:
pred = np.array(pred)
true = np.array(true)
return np.nanmean(pred - true)
@staticmethod
def bias(pred, true, **kwargs):
input_is_dataframe = type(pred) == type(true) == pd.core.frame.DataFrame
input_is_series = type(pred) == type(true) == pd.core.series.Series
if input_is_dataframe or input_is_series:
diff = (pred - true)
return diff
else:
pred = np.array(pred)
true = np.array(true)
return pred - true
@staticmethod
def bias_rel(pred, true, **kwargs):
input_is_dataframe = type(pred) == type(true) == pd.core.frame.DataFrame
input_is_series = type(pred) == type(true) == pd.core.series.Series
if input_is_dataframe or input_is_series:
diff = (pred - true)/true
return diff
else:
pred = np.array(pred)
true = np.array(true)
return np.where(true != 0, (pred - true)/true, np.nan)
def bias_rel_wind_1(self, pred, true, variable=None, min_speed=1):
"""
Relative bias observations > 1
"""
input_is_dataframe = type(pred) == type(true) == pd.core.frame.DataFrame
input_is_series = type(pred) == type(true) == pd.core.series.Series
if input_is_dataframe or input_is_series:
pred = pred[true[variable] >= min_speed]
true = true[true[variable] >= min_speed]
return self.bias_rel(pred, true)
else:
pred = np.array(pred)
true = np.array(true)
pred = pred[true >= min_speed]
true = true[true >= min_speed]
return self.bias_rel(pred, true)
def abs_error_rel_wind_1(self, pred, true, variable=None, min_speed=1):
"""
Relative bias observations > 1
"""
input_is_dataframe = type(pred) == type(true) == pd.core.frame.DataFrame
input_is_series = type(pred) == type(true) == pd.core.series.Series
if input_is_dataframe or input_is_series:
pred = pred[true[variable] >= min_speed]
true = true[true[variable] >= min_speed]
return np.abs(self.bias_rel(pred, true))
else:
pred = np.array(pred)
true = np.array(true)
pred = pred[true >= min_speed]
true = true[true >= min_speed]
return np.abs(self.bias_rel(pred, true))
def bias_direction(self, pred, true, **kwargs):
input_is_dataframe = type(pred) == type(true) == pd.core.frame.DataFrame
input_is_series = type(pred) == type(true) == pd.core.series.Series
diff1 = np.mod((pred - true), 360)
diff2 = np.mod((true - pred), 360)
if input_is_dataframe or input_is_series:
res = pd.concat([diff1, diff2]).min(level=0)
return res
else:
res = np.min([diff1, diff2], axis=0)
return res
def _select_metric(self, metric):
if metric == "abs_error":
metric_func = self.absolute_error
elif metric == "bias":
metric_func = self.bias
elif metric == "abs_error_rel":
metric_func = self.absolute_error_relative
elif metric == "bias_rel":
metric_func = self.bias_rel
elif metric == "bias_rel_wind_1":
metric_func = self.bias_rel_wind_1
elif metric == "bias_direction":
metric_func = self.bias_direction
else:
raise NotImplementedError(f"{metric} is not defined")
return metric_func | |
##############################################################################
#######################bibliotecas
##############################################################################
import pandas as pd
import numpy as np
# from eod_historical_data import (get_api_key,
# get_eod_data,
# get_dividends,
# get_exchange_symbols,
# get_exchanges, get_currencies, get_indexes)
import datetime as dt
import requests_cache
# from io import StringIO
import requests
import io
# import yfinance as yf
from datetime import timedelta, datetime
##############################################################################
################update database
##############################################################################
def update_database(prices_segmento_base):
""" update prices database
Parameters
----------
prices_segmento_base : dataframe
prices dataframe
Returns
-------
return the updated prices dataframe
"""
##Cache session (to avoid too much data consumption)
expire_after = dt.timedelta(days=1)
session = requests_cache.CachedSession(cache_name='cache', backend='sqlite',
expire_after=expire_after)
## read last prices dataframe
prices_segmento_base = pd.read_json('prices_segmento_base.json')
prices_segmento_base = prices_segmento_base[["data", "ativo", "segmento", "open adj", "low adj",
"high adj", "Adj Close", "Volume"]]
prices_segmento_base = prices_segmento_base[prices_segmento_base['data']<='2021-03-22']
## get last day
ult_dt_prices_base = prices_segmento_base['data'].tail(1).iloc[0] # pega a última data do preço base
inicio_new_prices = dt.datetime.strptime(ult_dt_prices_base, '%Y-%m-%d') + timedelta(days=1) # pega o próximo dia do último dia
hoje = dt.datetime.today() # data mais recente
# get a list of days
sdate = dt.datetime(2020,1,1) # start date
edate = dt.datetime(2030,12,31) # end date
date_range = pd.date_range(sdate,edate-timedelta(days=1),freq='d').to_frame()
datas_df_new = date_range[(date_range.iloc[:,0]>=inicio_new_prices)&(date_range.iloc[:,0]<=hoje)]
datas_new = datas_df_new.iloc[:,0].to_list()
## check if there is data to be update
if len(datas_df_new.iloc[:,0])==0:
print("não há dados para ser atualizado")
else:
print("executar o loop")
# loop to get new prices
prices_new = []
for i in range(len(datas_new)):
url="https://eodhistoricaldata.com/api/eod-bulk-last-day/SA?api_token=602ee71c4be599.37805282&date={}" .format(datas_new[i])
r = session.get(url)
s = requests.get(url).content
df_temp = pd.read_csv(io.StringIO(s.decode('utf-8')))
df_temp = df_temp[df_temp.Open.notnull()]
prices_new.append(df_temp)
prices_new = pd.concat(prices_new)
## tratamento de dado para pegar o HOL ajustado ao Adjusted Close
prices_new = prices_new.set_index('Date')
prices_new.index = pd.to_datetime(prices_new.index)
def adjust(date, Close, Adjusted_close, in_col, rounding=4):
'''
If using forex or Crypto - Change the rounding accordingly!
'''
try:
factor = Adjusted_close / Close
return round(in_col * factor, rounding)
except ZeroDivisionError:
print('WARNING: DIRTY DATA >> {} Close: {} | Adj Close {} | in_col: {}'.format(date, Close, Adjusted_close, in_col))
return 0
prices_new['open adj'] = np.vectorize(adjust)(prices_new.index.date, prices_new['Close'],\
prices_new['Adjusted_close'], prices_new['Open'])
prices_new['high adj'] = np.vectorize(adjust)(prices_new.index.date, prices_new['Close'],\
prices_new['Adjusted_close'], prices_new['High'])
prices_new['low adj'] = np.vectorize(adjust)(prices_new.index.date, prices_new['Close'],\
prices_new['Adjusted_close'], prices_new['Low'])
prices_new = prices_new.sort_values(by=['Code','Date']).reset_index()
prices_new.drop(columns={'Ex', 'Open', 'High', 'Low', 'Close'},inplace=True)
prices_new = prices_new[['Date', 'Code','open adj', 'low adj','high adj', 'Adjusted_close','Volume']]
prices_new = prices_new.rename(columns={'Date':'data', 'Adjusted_close':'Adj Close' })
## merge do prices_new com os segmentos
segmentos = pd.read_json("segmentos_eod - segmentos_eod.json")
prices_segmento_new = prices_new.merge(segmentos,how='left', left_on='Code',right_on='ativo')
prices_segmento_new = prices_segmento_new[prices_segmento_new['segmento'].notnull()]
prices_segmento_new.drop(columns={'Code'},inplace=True)
prices_segmento_new = prices_segmento_new[['data','ativo','segmento','open adj', 'low adj', 'high adj', 'Adj Close','Volume']]
prices_segmento_new['data']= prices_segmento_new['data'].dt.strftime('%Y-%m-%d')
# append do prices_segmento com prices segmentos new
prices_segmento_base = prices_segmento_base.append(prices_segmento_new)
prices_segmento_base = prices_segmento_base.sort_values(by=['ativo','data'])
# salva o df de preço base atualizado
prices_segmento_base.reset_index(inplace=True)
prices_segmento_base = prices_segmento_base[["data", "ativo", "segmento", "open adj", "low adj",
"high adj", "Adj Close", "Volume"]]
prices_segmento_base.to_json(r'prices_segmento_base.json')
return prices_segmento_base
# prices_segmento_base = pd.read_json('prices_segmento_base.json')
# prices_segmento_base = update_database(prices_segmento_base)
# ##############################################################################
# ################Cache session (to avoid too much data consumption)
# ##############################################################################
# expire_after = dt.timedelta(days=1)
# session = requests_cache.CachedSession(cache_name='cache', backend='sqlite',
# expire_after=expire_after)
# ##############################################################################
# ###################ler o df de preços base
# ##############################################################################
# ## le o price_segmento
# #segmentos = pd.read_json("segmentos_eod - segmentos_eod.json")
# # prices_segmento_base = pd.read_csv('prices_segmento_base.csv', index_col=0).reset_index()
# prices_segmento_base = pd.read_json('prices_segmento_base.json')
# prices_segmento_base = prices_segmento_base[["data", "ativo", "segmento", "open adj", "low adj",
# "high adj", "Adj Close", "Volume"]]
# prices_segmento_base = prices_segmento_base[prices_segmento_base['data']<='2021-03-18']
# # verificar a ultima data com dados
# ult_dt_prices_base = prices_segmento_base['data'].tail(1).iloc[0] # pega a última data do preço base
# inicio_new_prices = dt.datetime.strptime(ult_dt_prices_base, '%Y-%m-%d') + timedelta(days=1) # pega o próximo dia do último dia
# hoje = dt.datetime.today() # data mais recente
# # hoje = '2021-03-10'
# ##############################################################################
# ##################pegar os preços mais recentes
# ##############################################################################
# # pega a listad das novas datas
# sdate = dt.datetime(2020,1,1) # start date
# edate = dt.datetime(2030,12,31) # end date
# date_range = pd.date_range(sdate,edate-timedelta(days=1),freq='d').to_frame()
# datas_df_new = date_range[(date_range.iloc[:,0]>=inicio_new_prices)&(date_range.iloc[:,0]<=hoje)]
# datas_new = datas_df_new.iloc[:,0].to_list()
# ## checa se há dados a serem atualizados
# if len(datas_df_new.iloc[:,0])==0:
# print("não há dados para ser atualizado")
# else:
# print("executar o loop")
# # loop para pegar os prices new do eod
# prices_new = []
# for i in range(len(datas_new)):
# url="https://eodhistoricaldata.com/api/eod-bulk-last-day/SA?api_token=602ee71c4be599.37805282&date={}" .format(datas_new[i])
# r = session.get(url)
# s = requests.get(url).content
# df_temp = pd.read_csv(io.StringIO(s.decode('utf-8')))
# df_temp = df_temp[df_temp.Open.notnull()]
# prices_new.append(df_temp)
# prices_new = pd.concat(prices_new)
# ## tratamento de dado para pegar o HOL ajustado ao Adjusted Close
# prices_new = prices_new.set_index('Date')
# prices_new.index = pd.to_datetime(prices_new.index)
# def adjust(date, Close, Adjusted_close, in_col, rounding=4):
# '''
# If using forex or Crypto - Change the rounding accordingly!
# '''
# try:
# factor = Adjusted_close / Close
# return round(in_col * factor, rounding)
# except ZeroDivisionError:
# print('WARNING: DIRTY DATA >> {} Close: {} | Adj Close {} | in_col: {}'.format(date, Close, Adjusted_close, in_col))
# return 0
# prices_new['open adj'] = np.vectorize(adjust)(prices_new.index.date, prices_new['Close'],\
# prices_new['Adjusted_close'], prices_new['Open'])
# prices_new['high adj'] = np.vectorize(adjust)(prices_new.index.date, prices_new['Close'],\
# prices_new['Adjusted_close'], prices_new['High'])
# prices_new['low adj'] = np.vectorize(adjust)(prices_new.index.date, prices_new['Close'],\
# prices_new['Adjusted_close'], prices_new['Low'])
# prices_new = prices_new.sort_values(by=['Code','Date']).reset_index()
# prices_new.drop(columns={'Ex', 'Open', 'High', 'Low', 'Close'},inplace=True)
# prices_new = prices_new[['Date', 'Code','open adj', 'low adj','high adj', 'Adjusted_close','Volume']]
# prices_new = prices_new.rename(columns={'Date':'data', 'Adjusted_close':'Adj Close' })
# ## merge do prices_new com os segmentos
# segmentos = pd.read_json("segmentos_eod - segmentos_eod.json")
# prices_segmento_new = prices_new.merge(segmentos,how='left', left_on='Code',right_on='ativo')
# prices_segmento_new = prices_segmento_new[prices_segmento_new['segmento'].notnull()]
# prices_segmento_new.drop(columns={'Code'},inplace=True)
# prices_segmento_new = prices_segmento_new[['data','ativo','segmento','open adj', 'low adj', 'high adj', 'Adj Close','Volume']]
# prices_segmento_new['data']= prices_segmento_new['data'].dt.strftime('%Y-%m-%d')
# # append do prices_segmento com prices segmentos new
# prices_segmento_base = prices_segmento_base.append(prices_segmento_new)
# prices_segmento_base = prices_segmento_base.sort_values(by=['ativo','data'])
# # salva o df de preço base atualizado
# prices_segmento_base.reset_index(inplace=True)
# prices_segmento_base = prices_segmento_base[["data", "ativo", "segmento", "open adj", "low adj",
# "high adj", "Adj Close", "Volume"]]
# prices_segmento_base.to_json(r'prices_segmento_base.json')
# # prices_segmento_base_temp1 = pd.read_csv(r'prices_segmento_base.csv')
# # prices_segmento_base_temp1.to_json('prices_segmento_base_temp1.json')
# # prices_segmento_base_temp2 = pd.read_json("prices_segmento_base_temp1.json") | |
"""Model fitting engines
.. autosummary::
:toctree:
bayespy
numpy
"""
from . import bayespy
from . import numpy | |
import numpy as np
import os
import pickle
from delfi.summarystats.BaseSummaryStats import BaseSummaryStats
from scipy.signal import resample
class ChannelOmniStats(BaseSummaryStats):
"""SummaryStats class for Channel model
Calculates summary statistics based on PC reconstruction coefficients
"""
def __init__(self, seed=None):
super().__init__(seed=seed)
self.channel_type = 'k'
path1 = os.path.dirname(__file__)
self.pcs = pickle.load(open(path1+'/pca/pow1_sumstats_lfs.pkl', 'rb'))
def calc(self, repetition_list):
"""Calculate summary statistics
Parameters
----------
repetition_list : list of dictionaries, one per repetition
data list, returned by `gen` method of Simulator instance
Returns
-------
np.array, 2d with n_reps x n_summary
"""
stats = []
protocols = ['v_act', 'v_inact', 'v_deact', 'v_ap', 'v_ramp']
for r in range(len(repetition_list)):
trace = repetition_list[r]
for protocol in protocols:
I = trace[protocol]['data']
a = self.pcs[protocol[2:]].pcs
a = np.hstack((a, np.ones((a.shape[0], 1))))
b = I.reshape(-1)
coef, _, _, _ = np.linalg.lstsq(a, b, rcond=None)
stats.append(coef)
return np.asarray(stats).reshape(1, -1) | |
# coding: UTF-8
import numpy as np
import cPickle
import gzip
import random
import matplotlib.pyplot as plt
from copy import deepcopy
def relu(z):
return np.maximum(z, 0)
def relu_prime(z):
return np.heaviside(z, 0)
def sigmoid(z):
sigmoid_range = 34.538776394910684
z = np.clip(z, -sigmoid_range, sigmoid_range)
return 1.0 / (1.0+np.exp(-z))
def sigmoid_prime(z):
return sigmoid(z) * (1-sigmoid(z))
def softmax(z):
#e = np.exp(z)
a = np.max(z)
e = np.exp(z - a)
return e / np.sum(e)
def vectorized_result(j):
e = np.zeros((10, 1))
e[j] = 1.0
return e
def random_noise(data, d):
noised_data = deepcopy(data)
for j in xrange(len(data)):
for i in xrange(784):
if d > random.uniform(0,100):
noised_data[j][0][i] = random.random()
return noised_data
f = gzip.open('mnist.pkl.gz', 'rb')
tr_d, va_d, te_d = cPickle.load(f)
training_inputs = [np.reshape(x, (784, 1)) for x in tr_d[0]] #画素情報を1次元配列に変換
training_results = [vectorized_result(y) for y in tr_d[1]] #正解nに対し10*1の配列のn番目の要素を1にする
training_data = zip(training_inputs, training_results) #[画素情報, 正解]を並べる
test_inputs = [np.reshape(x, (784, 1)) for x in te_d[0]]
test_data = zip(test_inputs, te_d[1])
#training_data = random_noise(training_data, 20) #訓練データにノイズを入れる場合
class Network(object):
def __init__(self, sizes):
self.num_layers = len(sizes)
self.sizes = sizes
self.biases = [np.random.randn(y, 1) for y in self.sizes[1:]]
self.weights = [np.random.randn(y, x) for x, y in zip(self.sizes[:-1], self.sizes[1:])]
def SGD(self, training_data, epoches, mini_batch_size, eta, noise, test_data=None):
noised_test_data = self.random_noise(test_data, noise)
max_rate = 0
n_test = len(test_data)
n = len(training_data)
for j in xrange(epoches):
shufful_training_data = np.random.permutation(training_data) #training_dataをシャッフル
mini_batches = [shufful_training_data[k:k+mini_batch_size] for k in xrange(0, n, mini_batch_size)] #shufful_training_dataをミニバッチに分割
for mini_batch in mini_batches:
self.update_mini_batch(mini_batch, eta)
eva = self.evaluate(noised_test_data)
print "Epoch {0}: {1} / {2}".format(j, eva, n_test)
max_rate = max(max_rate, eva)
print(max_rate)
return max_rate
def random_noise(self, test_data, d):
noised_test_data = deepcopy(test_data)
for j in xrange(len(test_data)):
for i in xrange(self.sizes[0]):
if d > random.uniform(0,100):
noised_test_data[j][0][i] = random.random()
return noised_test_data
def update_mini_batch(self, mini_batch, eta):
nabla_b = [np.zeros(b.shape) for b in self.biases] #biasesのサイズの要素が0のベクトル
nabla_w = [np.zeros(w.shape) for w in self.weights] #weightsのサイズの要素が0のベクトル
for x, y in mini_batch: #x:画素数, y:正解
delta_nabla_b, delta_nabla_w = self.backprop(x, y) #ミニバッチごとにbp学習
nabla_b = [nb+dnb for nb, dnb in zip(nabla_b, delta_nabla_b)] #学習結果をもとにnabla_bを更新
nabla_w = [nw+dnw for nw, dnw in zip(nabla_w, delta_nabla_w)] #学習結果をもとにnabla_wを更新
self.biases = [b - (eta/len(mini_batch)) * nb for b, nb in zip(self.biases, nabla_b)] #nabla_wをもとにbiasesを更新
self.weights = [w - (eta/len(mini_batch)) * nw for w, nw in zip(self.weights, nabla_w)] #nabla_bをもとにweightsを更新
def backprop(self, x, y):
nabla_b = [np.zeros(b.shape) for b in self.biases] #biasの形の0ベクトル
nabla_w = [np.zeros(w.shape) for w in self.weights] #weightsの形の0ベクトル
#順伝播
activation = x
activations = [] #各層の出力を保存
zs = [] #各層の入力を保存
for b, w in zip(self.biases[:-1], self.weights[:-1]):
activations.append(activation)
z = np.dot(w, activation)+b
zs.append(z)
activation = sigmoid(z) #隠れ層でsigmoidを使う場合
#activation = relu(z) #隠れ層でReLUを使う場合
activations.append(activation)
z = np.dot(self.weights[-1], activation)+self.biases[-1]
zs.append(z)
#activations.append(sigmoid(z)) #出力層でsigmoidを使う場合
activations.append(softmax(z)) #出力層でsoftmaxを使う場合
#逆伝播
#delta = (activations[-1]-y)*sigmoid_prime(zs[-1]) #出力層でsigmoidを使う場合
delta = activations[-1] - y #出力層でsoftmaxを使う場合
nabla_b[-1] = delta
nabla_w[-1] = np.dot(delta, activations[-2].transpose())
for j in xrange(2, self.num_layers):
z = zs[-j]
pr = sigmoid_prime(z) #隠れ層でsigmoidを使う場合
#pr = relu_prime(z) #隠れ層でReLUを使う場合
delta = np.dot(self.weights[-j+1].transpose(), delta) * pr
nabla_b[-j] = delta
nabla_w[-j] = np.dot(delta, activations[-j-1].transpose())
return (nabla_b, nabla_w)
def evaluate(self, test_data):
test_results = [(np.argmax(self.feedforward(x)), y)
for (x, y) in test_data]
return sum(int(x == y) for (x, y) in test_results)
def feedforward(self, a):
for b, w in zip(self.biases, self.weights):
a = sigmoid(np.dot(w, a)+b) #隠れ層でsigmoidを使う場合
#a = relu(np.dot(w, a)+b) #隠れ層でReLUを使う場合
return a
def plot_rate(y):
x = np.arange(20,150,10)
fig = plt.figure()
ax=fig.add_subplot(1,1,1)
ax.set_ylim(6000,10000)
ax.plot(x,y[0],label=0)
ax.plot(x,y[1],label=12.5)
ax.plot(x,y[2],label=25)
ax.legend()
ax.set_xlabel("the number of neurons in a hidden layer")
ax.set_ylabel("accuracy")
ax.set_title("4 Layers NN (sigmoid-softmax)")
def nn3(eta):
res = [[],[],[]]
for j in xrange(20,150,10): #2層目
print(j)
for n in xrange(3):
print(n*12.5)
#net = Network([784, j, 10]) #3層の場合
net = Network([784, j, j, 10]) #4層の場合
print(net.sizes)
res[n].append(net.SGD(training_data, 10, 10, eta, n*12.5, test_data))
print(res)
plot_rate(res)
print("OK") | |
from __future__ import division
import numpy as np
from scipy import signal , linalg
from scipy.linalg import cho_factor, cho_solve
#from sep import extract
x, y = np.meshgrid(range(-1, 2), range(-1, 2), indexing="ij")
x, y = x.flatten(), y.flatten()
AT = np.vstack((x*x, y*y, x*y, x, y, np.ones_like(x)))
C = np.identity(9)
ATA = np.dot(AT, np.dot(np.linalg.inv(C) , AT.T))
factor = cho_factor(ATA, overwrite_a=True)
#ATA = np.dot(AT, AT.T)
#factor = cho_factor(ATA, overwrite_a=True)
def fit_3x3(im):
imgg = np.dot(AT , np.dot(np.linalg.inv(C) , im.flatten()))
a, b, c, d, e, f = cho_solve(factor, imgg)
m = 1. / (4 * a * b - c*c)
x = (c * e - 2 * b * d) * m
y = (c * d - 2 * a * e) * m
return x, y
def makeGaussian(size , FWHM =3 , e = 0 , center = None):
f = FWHM/(2.35482)
x = np.linspace(0.5, size-.5 , size)
y = x[:,np.newaxis]
if center is None:
x0 = y0 = size / 2.
else:
x0 = center[0]
y0 = center[1]
r = ((x-x0)**2. + ((y-y0)**2.)*(1. + np.abs(e))**2./(1. - np.abs(e))**2.)**.5
factor = 1./(2.*np.pi*f**2.)
return factor*np.exp((-1.*r**2.)/(2*f**2.))
def MAD(a, axis=None):
"""Compute the median absolute deviation"""
a = np.array(a, copy=False)
a_median = np.median(a, axis=axis)
#re-broadcast the output median array to subtract it
if axis is not None:
shape = list(a_median.shape)
shape.append(1)
a_median = a_median.reshape(shape)
#calculated the median average deviation
return np.median(np.abs(a - a_median), axis=axis)/0.6745
def find_centroid(data):
#source = extract(data , .1)
#fwhm = source[0][19]
size = data.shape[0]
zero = size/2 + .5
kernel = makeGaussian(17., 6. , 0 , np.array([8.5,8.5]))
img = signal.convolve2d(data , kernel , mode = "same")
xi, yi = np.unravel_index(np.argmax(img), img.shape)
if (xi >= 1 and xi < img.shape[0] - 1 and yi >= 1 and yi < img.shape[1] - 1):
ox, oy = fit_3x3(img[xi-1:xi+2, yi-1:yi+2])
else:
ox , oy = 0. , 0.
return ox+xi+.5-data.shape[0]/2. , oy+yi+.5-data.shape[1]/2.
if __name__ == "__main__":
print 'c3 main' | |
import time
import serial
import re
from matplotlib import pyplot as plt
import numpy as np
from matplotlib import style
import numpy
import openpyxl
from openpyxl import Workbook
# set up the serial line
ser = serial.Serial('COM8', 9600)
print(ser)
time.sleep(3)
CO2Final = []
TimeFinal = []
A0_A4V_Final = []
A1_A5V_Final = []
A2_A6V_Final = []
Sensor_A0_A4V_Final = []
Sensor_A1_A5V_Final = []
Sensor_A2_A6V_Final = []
for i in range(5):
b = ser.readline() # read a byte string
string_n = b.decode()
FullRow = string_n.rstrip() # remove \n and \r
SplitFullRow = re.split(',', FullRow)
#Getting each variable of list
TotalTime = float(SplitFullRow[1])
CO2 = float(SplitFullRow[3])
A0_A4V = float(SplitFullRow[5])
A1_A5V = float(SplitFullRow[7])
A2_A6V = float(SplitFullRow[9])
Sensor_A0_A4V = float(SplitFullRow[11])
Sensor_A1_A5V = float(SplitFullRow[13])
Sensor_A2_A6V = float(SplitFullRow[15])
#adding each value to dynamic array
TimeFinal.append(TotalTime)
CO2Final.append(CO2)
A0_A4V_Final.append(A0_A4V)
A1_A5V_Final.append(A1_A5V)
A2_A6V_Final.append(A2_A6V)
Sensor_A0_A4V_Final.append(Sensor_A0_A4V)
Sensor_A1_A5V_Final.append(Sensor_A1_A5V)
Sensor_A2_A6V_Final.append(Sensor_A2_A6V)
print( SplitFullRow)
plt.figure(1)
plt.title("Voltage vs time in Seconds")
plt.subplot(311)
plt.plot(TotalTime, A0_A4V, 'ro--')
plt.ylabel("Sensor A0_A4v")
plt.subplot(312)
plt.plot(TotalTime, A1_A5V, 'ko--')
plt.ylabel("Sensor A1_A5v")
plt.subplot(313)
plt.plot(TotalTime, A2_A6V, 'o--')
plt.ylabel("Sensor A2_A6v")
plt.xlabel("Time dif in hrs")
plt.pause(5)
plt.figure(2)
plt.title("Sensor_values vs time in Seconds")
plt.subplot(311)
plt.plot(TotalTime, Sensor_A0_A4V, 'ro--')
plt.ylabel("Voltage A0_A4v")
plt.subplot(312)
plt.plot(TotalTime, Sensor_A1_A5V, 'ko--')
plt.ylabel("Voltage A1_A5v")
plt.subplot(313)
plt.plot(TotalTime, Sensor_A2_A6V, 'o--')
plt.ylabel("Voltage A2_A6v")
plt.xlabel("Time dif in hrs")
plt.pause(5)
plt.show()
print(CO2Final)
print(TimeFinal)
#saving the workbook
wb = Workbook()
ws = wb.active
ws.title = "Changed Sheet"
ws['A1'] = "Time"
ws['B1'] = "Co2"
ws['C1'] = "CO2-Value"
ws['D1'] = "A0/A4V"
ws['E1'] = "A0/A4V-value"
ws['F1'] = "A1/A5V"
ws['G1'] = "A1/A5V-value"
ws['H1'] = "A2/A6V"
ws['I1'] = "A2/A6V-value"
ws['J1'] = "Sensor-A0/A4V"
ws['K1'] = "Sensor-A0/A4V-value"
ws['L1'] = "Sensor-A1/A5V"
ws['M1'] = "Sensor-A1/A5V-value"
ws['N1'] = "Sensor-A2/A6V"
ws['O1'] = "Sensor-A2/A6V-value"
for i in range(5):
ws.cell(row=i+2, column=1).value = TimeFinal[i]
ws.cell(row=i+2, column=3).value = CO2Final[i]
ws.cell(row=i + 2, column=5).value = A0_A4V_Final[i]
ws.cell(row=i + 2, column=7).value = A1_A5V_Final[i]
ws.cell(row=i + 2, column=9).value = A2_A6V_Final[i]
ws.cell(row=i + 2, column=11).value = Sensor_A0_A4V_Final[i]
ws.cell(row=i + 2, column=13).value = Sensor_A1_A5V_Final[i]
ws.cell(row=i + 2, column=15).value = Sensor_A2_A6V_Final[i]
val = input("what should be the name of file: ")
print(val)
wb.save(filename = val)
ser.close() | |
import json
import pickle
import datetime
import pprint
import logging
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import ray
import genetic as ga
from copy import deepcopy
from ray import tune
from IPython.display import clear_output
from ray.tune.registry import register_env
from ray.tune.logger import pretty_print
from ray.rllib.agents.ppo.ppo import PPOTrainer
from envs.particle_rllib.environment import ParticleEnv
from logger import info_logger, results_logger
"""## Helper functions"""
# Function that creates the environment
def create_env_fn(env_context=None):
return ParticleEnv(n_listeners=n_listeners,
n_landmarks=n_landmarks,
render_enable=render_enable)
# Function that maps a policy to its agent id
def policy_mapping_fn(agent_id):
if agent_id.startswith('manager'):
return "manager_policy"
else:
return "worker_policy"
"""## Parameters"""
# genetic algorithm parameters
n_pop=250
r_cross=0.9
r_mut=0.9
# training parameters
training_algo = "PPO"
env_name = "ParticleManagerListeners"
n_epochs = 10
n_episodes = 3000 # number of episodes in one epoch
n_steps = 25 # number of steps in one episode
learning_rate = 5e-4
tau = 0.01 # for updating the target network
gamma = 0.75 # discount factor
replay_buffer_size = 10000000
batch_size = 1024
hidden_layers = [16, 16]
# environment config parameters
n_listeners = 1
n_landmarks = 12
render_enable = False
# convergence parameters
window_size = 5 # size of the sliding window
min_rel_delta_reward = 0.02 # minimum acceptable variation of the reward
savedata_dir = './savedata/' # savedata directory
checkpoint_dir = './checkpoints/' # checkpoints directory
restore_checkpoint_n = 10
# Create savedata directory
if not os.path.exists(savedata_dir):
os.makedirs(savedata_dir)
# Create the checkpoint directory
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
"""## Trainers configuration"""
env = create_env_fn()
# According to environment implementation, there exists a different action space and observation space for each agent,
# action_space[0] (resp. observations_space[0]) is allocated for the manager, while the others are allocated for the workers
manager_action_space = env.action_space[0]
manager_observation_space = env.observation_space[0]
worker_action_space = env.action_space[1]
worker_observation_space = env.observation_space[1]
policies = {
"manager_policy": (None, manager_observation_space, manager_action_space, {"lr": 0.0,}),
"worker_policy": (None, worker_observation_space, worker_action_space, {"lr": learning_rate,})
}
training_config = {
"num_workers": 8,
"gamma": gamma,
"horizon": n_steps,
"train_batch_size": batch_size,
"model": {
"fcnet_hiddens": hidden_layers
},
"multiagent": {
"policies": policies,
"policy_mapping_fn": policy_mapping_fn,
"policies_to_train": list(policies.keys())
},
"no_done_at_end": True,
"log_level": "ERROR"
}
# Initialize and register the environment
register_env(env_name, create_env_fn)
def objective(i, individual):
print("Starting evaluation of the individual {}".format(i+1))
elapsed_episodes = 0
manager_total_reward = 0
register_env(env_name, create_env_fn)
trainer = PPOTrainer(env=env_name, config=training_config)
weights = trainer.get_weights()
weights['manager_policy'] = ga.convert_individual_to_manager_weights(individual, weights['manager_policy'])
trainer.set_weights(weights)
# Loop for n_episodes
while elapsed_episodes < n_episodes:
result = trainer.train()
elapsed_episodes = result['episodes_total']
manager_total_reward += (result['policy_reward_mean']['manager_policy'] * result['episodes_this_iter'])
print(pretty_print(result))
trainer.stop()
clear_output()
return manager_total_reward
def genetic_algorithm(example, n_gen, n_pop, r_cross, r_mut, restore=False):
if restore:
with open(checkpoint_dir + "population", 'rb') as fp:
pop = pickle.load(fp)
with open(savedata_dir + "epoch-manager-rewards", 'rb') as fp:
epoch_manager_rewards = pickle.load(fp)
with open(checkpoint_dir + "best-individual", 'rb') as fp:
best = pickle.load(fp)
best_eval = epoch_manager_rewards[-1]
gen = len(epoch_manager_rewards) + 1
else:
# initial population of random bitstring
pop = [ga.generate_random_individual(example=example) for _ in range(n_pop)]
# keep track of best solution
best, best_eval = 0, objective(-1, pop[0])
# best total reward of the manager per each epoch
epoch_manager_rewards = []
gen = 1
# enumerate generations
while gen <= n_gen:
info_logger.info("Current generation: {}".format(gen))
# evaluate all candidates in the population
scores = [objective(i, c) for (i, c) in enumerate(pop)]
# check for new best solution
for i in range(n_pop):
if scores[i] > best_eval:
best, best_eval = pop[i], scores[i]
results_logger.info("Generation: {}".format(gen))
results_logger.info("\tbest score = {:.3f}".format(best_eval))
# save checkpoint
with open(checkpoint_dir + "best-individual".format(gen), 'wb') as fp:
pickle.dump(best, fp)
info_logger.info("Saved checkpoint after the evaluation of the generation {}".format(gen))
epoch_manager_rewards.append(best_eval)
with open(savedata_dir + "epoch-manager-rewards", 'wb') as fp:
pickle.dump(epoch_manager_rewards, fp)
plt.plot(epoch_manager_rewards)
plt.xlabel('Generation')
plt.ylabel('Reward')
plt.show()
# select parents
selected = [ga.selection(pop, scores, k=(n_pop//10)) for _ in range(n_pop)]
# create the next generation
children = list()
for i in range(0, n_pop, 2):
# get selected parents in pairs
p1, p2 = selected[i], selected[i+1]
# crossover and mutation
for c in ga.crossover(p1, p2, r_cross):
ga.mutation(c, r_mut) # mutation
children.append(c) # store for next generation
# replace population
pop = children
with open(checkpoint_dir + "population", 'wb') as fp:
pickle.dump(pop, fp)
gen += 1
return [best, best_eval]
ray.init()
trainer = PPOTrainer(env=env_name, config=training_config)
# Print the current configuration
pp = pprint.PrettyPrinter(indent=4)
print("Current configiguration\n-----------------------")
pp.pprint(trainer.get_config())
print("-----------------------\n")
manager_weights_ex = trainer.get_weights()['manager_policy']
trainer.stop()
best, best_eval = genetic_algorithm(example=manager_weights_ex,
n_gen=n_epochs,
n_pop=n_pop,
r_cross=r_cross,
r_mut=r_mut,
restore=False)
ray.shutdown() | |
import pathlib
from functools import partial
from itertools import tee
import matplotlib.pyplot as plt
import numpy as np
import xarray as xr
from matplotlib.patches import Patch
from scipy.interpolate import interp1d
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def plot_strat(
filename,
color_water=(0.8, 1.0, 1.0),
color_land=(0.8, 1.0, 0.8),
color_shoreface=(0.8, 0.8, 0.0),
color_shelf=(0.75, 0.5, 0.5),
layer_line_width=0.5,
layer_line_color="k",
layer_start=0,
layer_stop=-1,
n_layers=5,
title="{filename}",
x_label="Distance (m)",
y_label="Elevation (m)",
legend_location="lower left",
):
plot_land = bool(color_land)
plot_shoreface = bool(color_shoreface)
plot_shelf = bool(color_shelf)
filename = pathlib.Path(filename)
legend_item = partial(Patch, edgecolor="k", linewidth=0.5)
with xr.open_dataset(filename) as ds:
n_times = ds.dims["time"]
thickness_at_layer = ds["at_layer:thickness"][:n_times]
x_of_shore = ds["at_grid:x_of_shore"].data.squeeze()
x_of_shelf_edge = ds["at_grid:x_of_shelf_edge"].data.squeeze()
bedrock = ds["at_node:bedrock_surface__elevation"].data.squeeze()
try:
x_of_stack = ds["x_of_cell"].data.squeeze()
except KeyError:
x_of_stack = np.arange(ds.dims["cell"])
elevation_at_layer = bedrock[-1, 1:-1] + np.cumsum(thickness_at_layer, axis=0)
stack_of_shore = np.searchsorted(x_of_stack, x_of_shore)
stack_of_shelf_edge = np.searchsorted(x_of_stack, x_of_shelf_edge)
water = x_of_stack > x_of_shore[-1]
x_water = x_of_stack[water]
y_water = elevation_at_layer[-1, water]
if layer_stop < 0:
layer_stop = len(elevation_at_layer) + layer_stop + 1
layers_to_plot = _get_layers_to_plot(layer_start, layer_stop, num=n_layers)
if color_water:
plt.fill_between(
x_water, y_water, np.full_like(x_water, y_water[0]), fc=color_water
)
plt.plot([x_water[0], x_water[-1]], [y_water[0], y_water[0]], color="k")
if plot_land:
fill_between_layers(
x_of_stack,
elevation_at_layer,
lower=None,
upper=stack_of_shore,
fc=color_land,
)
if plot_shoreface:
fill_between_layers(
x_of_stack,
elevation_at_layer,
lower=stack_of_shore,
upper=stack_of_shelf_edge,
fc=color_shoreface,
)
if plot_shelf:
fill_between_layers(
x_of_stack,
elevation_at_layer,
lower=stack_of_shelf_edge,
upper=None,
fc=color_shelf,
)
if layers_to_plot:
plt.plot(
x_of_stack,
elevation_at_layer[layers_to_plot].T,
color=layer_line_color,
linewidth=layer_line_width,
)
if legend_location:
items = [
("Land", color_land),
("Shoreface", color_shoreface),
("Shelf", color_shelf),
]
legend = [legend_item(label=label, fc=color) for label, color in items if color]
legend and plt.legend(handles=legend, loc=legend_location)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.title(title.format(filename=filename.name))
plt.xlim((x_of_stack[0], x_of_stack[-1]))
plt.show()
def _get_layers_to_plot(start, stop, num=-1):
if num == 0:
return None
elif num < 0 or num > stop - start + 1:
num = stop - start + 1
step = int((stop - start + 1) / num)
return slice(start, stop, step)
def fill_between_layers(x, y, lower=None, upper=None, fc=None):
n_layers = len(y)
if lower is None:
lower = np.zeros(n_layers, dtype=int)
if upper is None:
upper = np.full(n_layers, len(x) - 1)
for layer in range(n_layers - 1):
xi, yi = outline_layer(
x,
y[layer],
y[layer + 1],
bottom_limits=(lower[layer], upper[layer]),
top_limits=(lower[layer + 1], upper[layer + 1]),
)
plt.fill(xi, yi, fc=fc)
def outline_layer(
x, y_of_bottom_layer, y_of_top_layer, bottom_limits=None, top_limits=None
):
if bottom_limits is None:
bottom_limits = (None, None)
if top_limits is None:
top_limits = (None, None)
bottom_limits = (
bottom_limits[0] if bottom_limits[0] is not None else 0,
bottom_limits[1] if bottom_limits[1] is not None else len(x) - 1,
)
top_limits = (
top_limits[0] if top_limits[0] is not None else 0,
top_limits[1] if top_limits[1] is not None else len(x) - 1,
)
is_top = slice(top_limits[1], top_limits[0], -1)
x_of_top = x[is_top]
y_of_top = y_of_top_layer[is_top]
is_bottom = slice(bottom_limits[0], bottom_limits[1])
x_of_bottom = x[is_bottom]
y_of_bottom = y_of_bottom_layer[is_bottom]
if top_limits[0] > bottom_limits[0]:
step = -1
is_left = slice(bottom_limits[0], top_limits[0] + 1)
else:
step = 1
is_left = slice(top_limits[0], bottom_limits[0] + 1)
x_of_left = x[is_left]
y_of_left = interp_between_layers(
x_of_left[::step],
y_of_top_layer[is_left][::step],
y_of_bottom_layer[is_left][::step],
)
x_of_left = x_of_left[::step][:-1]
y_of_left = y_of_left[:-1]
if bottom_limits[1] > top_limits[1]:
step = -1
is_right = slice(top_limits[1], bottom_limits[1] + 1)
else:
step = 1
is_right = slice(bottom_limits[1], top_limits[1] + 1)
x_of_right = x[is_right]
y_of_right = interp_between_layers(
x_of_right[::step],
y_of_bottom_layer[is_right][::step],
y_of_top_layer[is_right][::step],
)
x_of_right = x_of_right[::step][:-1]
y_of_right = y_of_right[:-1]
return (
np.r_[x_of_top, x_of_left, x_of_bottom, x_of_right],
np.r_[y_of_top, y_of_left, y_of_bottom, y_of_right],
)
def interp_between_layers(x, y_of_bottom, y_of_top, kind="linear"):
x = np.asarray(x)
y_of_top, y_of_bottom = np.asarray(y_of_top), np.asarray(y_of_bottom)
assert len(y_of_top) == len(y_of_bottom) == len(x)
if len(x) == 0:
return np.array([], dtype=float)
elif len(x) == 1:
return y_of_bottom
dy = (y_of_top - y_of_bottom) * interp1d((x[0], x[-1]), (0.0, 1.0), kind=kind)(x)
return y_of_bottom + dy | |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.optimize import curve_fit
from scipy import stats
import re
import os
def plot_approx(X_data, Y_data, input_function, plot_name='plot_name', plot_title='plot_title', x_label='x_label', y_label='y_label', Y_absolute_sigma = 0, scientific_view = True, print_cross = True, save_as_csv = False, to_latex = False, save_fig=True):
X_data = np.array(X_data)
Y_data = np.array(Y_data)
num_of_datasets = np.shape(X_data)[0]
# преобразования функции к виду для питона
fun_ex = {'linear':'a0*x+a1', 'poly_2':'a0*x**2+a1*x+a2', 'poly_3':'a0*x**3+a1*x**2+a2*x+a3','exp':'e^(a0*x+a1)+a2', 'ln':'ln(a0*x+a1)+a2'}
inp = input_function
try:
inp = fun_ex[inp]
fun = fun_ex[inp]
except KeyError:
fun = inp.replace('e', 'np.e')
fun = fun.replace('^', '**')
fun = fun.replace('log', 'np.log10')
fun = fun.replace('ln', 'np.log')
# пропишем функции
num_coef = len(re.findall('a[0-9]', fun))
approx1 = lambda x,a0: eval(fun)
approx2 = lambda x,a0,a1: eval(fun)
approx3 = lambda x,a0,a1,a2: eval(fun)
approx4 = lambda x,a0,a1,a2,a3: eval(fun)
approx5 = lambda x,a0,a1,a2,a3,a4: eval(fun)
approx6 = lambda x,a0,a1,a2,a3,a4,a5: eval(fun)
approx = eval('approx'+'{}'.format(num_coef))
# используем функцию curve_fit
opt = np.zeros((num_of_datasets,num_coef))
cov = np.zeros((num_of_datasets,num_coef,num_coef))
for i in range(num_of_datasets):
opt[i], cov[i] = curve_fit(approx, X_data[i], Y_data[i], absolute_sigma = Y_absolute_sigma)
# коэффициенты
a = opt
#получим погрешности для коэффициентов
sigma_a = np.zeros((num_of_datasets,num_coef))
for i in range(num_of_datasets):
sigma_a[i] = np.diag(cov[i])
# относистельные погрешности на коэффиценты
rel_sigma_a = 100* sigma_a/abs(a)
# подсчитаем стандартную ошибку аппроксимации
S_e = []
for i in range(num_of_datasets):
residuals1 = Y_data[i] - approx(X_data[i],*opt[i])
fres1 = sum(residuals1**2)
S_e.append(np.sqrt(fres1/len(X_data[i])))
# в легенду запишем функцию аппроксимации с определнными коэффициентами
if scientific_view == True:
tr1 = re.sub(r'a[0-9]', '{:.3E}', inp)
else:
tr1 = re.sub(r'a[0-9]', '{%.3f}', inp)
tr = inp.replace('ln', '\ln ')
tr1 = tr1.replace('e^', 'exp')
tr1 = tr1.replace('**', '^')
tr1 = tr1.replace('*', ' \cdot ')
tr1 = '$ y(x) = ' + tr1 + '$'
# выстроим верный порядок коэффициентов
order = re.findall('a([0-9])', fun)
a_ord = [0 for i in range(num_of_datasets)]
for i in range(num_of_datasets):
a_ord[i] = dict(zip(order, a[i]))
a_ord[i] = dict(sorted(a_ord[i].items()))
a_ord[i] = tuple(a_ord[i].values())
# это легенда в графике
if scientific_view == True:
leg = []
for i in range(num_of_datasets):
leg.append(tr1.format(*a_ord[i]))
else:
leg = []
for i in range(num_of_datasets):
leg.append(tr1%a_ord[i])
# график
fig, ax = plt.subplots(figsize=(10, 6))
fig.patch.set_facecolor('white')
for i in range(num_of_datasets):
# определяем массив точек по оси Ох и строим график аппроксимации
dots = np.arange(X_data[i][0], max(X_data[i]) + 0.0001, 0.01)
ax.plot(dots, approx(dots, *opt[i]), '--', lw = 2, label = leg[i])
# это строит "точками" твои начальные данные
ax.scatter(X_data[i], Y_data[i], s = 15)
plt.legend()
# название графика и подписи к осям
ax.set_title(plot_title)
ax.set_ylabel(y_label)
ax.set_xlabel(x_label)
# это создает сетку и делает маркеры на осях
ax.minorticks_on()
ax.grid(which='minor', color = 'gray', linestyle = ':', linewidth = 0.5)
ax.grid(which='major', linewidth = 0.5)
# это кресты погрещности, но только вдоль оси Y
if print_cross == True:
for i in range(num_of_datasets):
plt.errorbar(X_data[i], Y_data[i], fmt = 'ro', markersize = '4', yerr = S_e[i], capsize = 2, elinewidth = 1, capthick = 1, ecolor = 'black')
# сохраним график в картинку?
if save_fig == True:
if os.path.exists('pictures'):
plt.savefig('pictures/'+plot_name+'.png', dpi=400)
else:
os.mkdir('pictures')
plt.savefig('pictures/'+plot_name+'.png', dpi=400)
# вывод коэффициентов и погрешностей
pd.set_option('display.float_format', lambda x: '{:.3E}'.format(x))
# названия коэффициентов в порядке ввода их в начале
names = []
for i in range(num_coef):
names.append(r'a_{}'.format(i))
for i in range(num_of_datasets):
# непосредственно создание pandas таблицы
param = np.concatenate((np.array(a[i]),np.array(sigma_a[i]), np.array(rel_sigma_a[i]))).reshape(3,num_coef).T
output = pd.DataFrame(param, columns = ['coeffs_values', 'standard error', 'relative se, %'])
output.insert(0, value = names, column = 'coeffs')
# сохраним в таблицу csv
if save_as_csv == True:
output.to_csv('output_{}.csv'.format(i), index = False)
# выведем таблицу
print('Coeffs table {}: \n'.format(i))
print(output)
# выведем погрешность по оси Oy
print('\nStandart_error_Y_{} = {:.3E}'.format(i,S_e[i]))
# проебразование таблицы коэффициентов в латех код
if to_latex == True:
latex_output = output.to_latex(index = False, position = 'H', caption = 'Коэффициенты аппроксимации', label = 'coeffs_table')
print('\n\nLatex code of coeffs table {}: \n'.format(i))
print(latex_output)
with open('coeffs_table_{}.tex'.format(i), 'w') as tf:
tf.write(latex_output)
# покажем график
plt.show()
pass | |
import numpy as np
import pandas as pd
import os
import csv
import sklearn
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from torch.utils.data import TensorDataset
from sklearn.metrics import f1_score
import random
from transformers import BertForSequenceClassification
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from transformers import AdamW, get_linear_schedule_with_warmup
from util import accuracy_per_class, f1_score_func
import argparse
from sklearn.model_selection import train_test_split
import torch
from tqdm import tqdm
import json
import torch.nn as nn
parser = argparse.ArgumentParser()
parser.add_argument('--trained_model', default='/home/dsanyal/.pix2pix/off-by-one-bit-/finetune_multiclass/sample-partial-tunning/finetuned_BERT_epoch_1.model', help="saved model name from huggingface")
parser.add_argument('--csv',help="csv of test sets")
parser.add_argument('--model_type', default='distilbert-base-uncased', help="output directory")
parser.add_argument('--experiment_name', default='sample-partial-tunning', help="model name from huggingface")
def get_label(preds, label_dict):
label_dict_inverse = {v: k for k, v in label_dict.items()}
preds_flat = np.argmax(preds, axis=1).flatten()
pred_label= [label_dict_inverse[label] for label in preds_flat]
return pred_label
def evaluate2(dataloader_test, model):
model.eval()
loss_val_total = 0
predictions, id_test = [], []
progress_bar = tqdm(dataloader_test, desc='evaluating', leave=False, disable=False)
for batch in progress_bar:
batch = tuple(b.to(device) for b in batch)
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
}
with torch.no_grad():
outputs = model(**inputs)
#loss = outputs[0] //still calculating loss assuming labels ids
logits = outputs[0]
#loss_val_total += loss.item()
sm = nn.Softmax(dim=1)
logits = sm(logits)
#logits = logits.detach().cpu().numpy()
logits = logits.detach().cpu().numpy()
ids = batch[2].cpu().numpy()
predictions.append(logits)
id_test.append(ids)
predictions = np.concatenate(predictions, axis=0)
id_test = np.concatenate(id_test, axis=0)
return predictions, id_test
if (__name__ == "__main__"):
args = parser.parse_args()
tokenizer = AutoTokenizer.from_pretrained(args.model_type)
#df = pd.read_csv(args.csv, names=['id', 'category','text'])
df = pd.read_csv(args.csv, escapechar = "\\", quoting = csv.QUOTE_NONE)
df = df[["BULLET_POINTS", "PRODUCT_ID"]]
#df.set_index('id', inplace=True)
#print(df["BULLET_POINTS"].values.tolist())
encoded_data_test = tokenizer.batch_encode_plus(
df.BULLET_POINTS.apply(lambda x: str(x)[1:-1]).tolist(),
add_special_tokens=True,
return_attention_mask=True,
pad_to_max_length=True,
max_length=256,
return_tensors='pt')
input_ids_test = encoded_data_test['input_ids']
attention_masks_test = encoded_data_test['attention_mask']
labels_test = torch.tensor(df.PRODUCT_ID.values)
dataset_test = TensorDataset(input_ids_test, attention_masks_test, labels_test)
with open( args.experiment_name +'/params.json', 'r') as fp:
label_dict = json.load(fp)
model = AutoModelForSequenceClassification.from_pretrained(args.model_type,
num_labels=len(label_dict),
output_attentions=False,
output_hidden_states=False)
print("ignore the above warning if you got the ----model loaded sucessingfully----")
model.load_state_dict(torch.load(args.trained_model))
print("model loaded sucessfully")
#device = torch.device('cpu')
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model.to(device)
#print(label_dict)
dataloader_tester = DataLoader(dataset_test,
sampler=SequentialSampler(dataset_test),
batch_size=512)
predictions, id_test = evaluate2(dataloader_tester,model)
pred_label = get_label(predictions, label_dict)
id_test = id_test.flatten()
# check the dir results exit or not if not creat
# accuracy_per_class(predictions, id_test)
# _, accuracy_score = f1_score_func(predictions, id_test)
# print("accuracy_score:", accuracy_score)
if not os.path.exists('results'):
os.makedirs('results')
filepath_out='results/'+args.csv.split('/')[-1]
f =open(filepath_out,"w+")
f.write("PRODUCT_ID,BROWSE_NODE_ID\n")
for i,j in zip(id_test,pred_label):
f.writelines('%s,%s\n' %(i, j))
f.close()
print("done one file") | |
import time
import numpy as np
from pySerialTransfer import pySerialTransfer as txfer
# please make sure to pip install pySerialTransfer==1.2
# connection will not work with pySerialTransfer==2.0
# requirement: pip install pyserial (works with 3.4 and most likely newer but not much older versions)
# on teensy: include "SerialTransfer.h" Version 2.0
def connect_to_arduino(comport,motor0_enable,motor0_direction,motor0_speed,
motor1_enable,motor1_direction,motor1_speed,motor2_enable,motor2_direction,motor2_speed,motor3_enable,motor3_direction,motor3_speed):
try:
print(f"Connecting to {comport}")
link = txfer.SerialTransfer(comport)
link.open()
time.sleep(1) # allow some time for the Arduino to completely reset
# reset send_size
send_size = 0
# Send a list
list_ = [motor0_enable, motor0_direction, motor0_speed, motor1_enable, motor1_direction, motor1_speed,
motor2_enable, motor2_direction, motor2_speed, motor3_enable, motor3_direction, motor3_speed]
list_size = link.tx_obj(list_)
send_size += list_size
# Transmit all the data to send in a single packet
link.send(send_size)
print("Message sent...")
# Wait for a response and report any errors while receiving packets
while not link.available():
if link.status < 0:
if link.status == -1:
print('ERROR: CRC_ERROR')
elif link.status == -2:
print('ERROR: PAYLOAD_ERROR')
elif link.status == -3:
print('ERROR: STOP_BYTE_ERROR')
# Parse response list
###################################################################
rec_list_ = link.rx_obj(obj_type=type(list_),
obj_byte_size=list_size,
list_format='i')
print(f'SENT: {list_}')
print(f'RCVD: {rec_list_}')
link.close()
return rec_list_
except KeyboardInterrupt:
link.close()
except:
import traceback
traceback.print_exc()
link.close()
def list_available_ports():
ports = txfer.open_ports()
print("Available ports:")
print(ports)
return ports
if __name__ == "__main__":
# list_available_ports()
comport = 'COM17'
motor0_enable = 1
motor0_direction = 0
motor0_speed = 1000
motor1_enable = 1
motor1_direction = 0
motor1_speed = 1000
motor2_enable = 1
motor2_direction = 0
motor2_speed = 1000
motor3_enable = 1
motor3_direction = 0
motor3_speed = 1000
results = np.array(connect_to_arduino(comport,motor0_enable,motor0_direction,motor0_speed,
motor1_enable,motor1_direction,motor1_speed,motor2_enable,motor2_direction,motor2_speed,motor3_enable,motor3_direction,motor3_speed))
print(results) | |
import numpy as np
import matplotlib.pyplot as plt
import torch
from torchvision.utils import make_grid
"""
Creates an object to sample and visualize the effect of the LSFs
by sampling from the conditional latent distributions.
"""
class vis_LatentSpace:
def __init__(self, model, mu, sd, latent_dim=10, latent_range=3, input_dim=28):
self.model = model
self.model.eval()
self.latent_dim = latent_dim
self.latent_range = latent_range
self.input_dim = input_dim
self.mu = mu
self.sd = sd
def to_img(self, x):
x = x.clamp(0, 1)
return x
def show_image(self, img):
img = self.to_img(img)
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
def visualise(self):
print("Visualizing LSFs...")
recon = []
for i in range(0,self.latent_dim,1):
latent = self.mu
latent = torch.transpose(latent.repeat(20, 1), 0, 1)
latent[i, :] = torch.linspace(
self.latent_range*-self.sd[i],
self.latent_range*self.sd[i], 20) + self.mu[i]
latent = torch.transpose(latent, 0, 1)
img_recon = self.model.decode(latent)
recon.append(img_recon.view(-1, 1, self.input_dim, self.input_dim))
recon = torch.cat(recon)
fig, ax = plt.subplots()
plt.figure(figsize=(15, 15), dpi=200)
plt.axis('off')
self.show_image(make_grid(recon.data, 20, 8))
if self.input_dim == 28:
step_size = 36
elif self.input_dim == 64:
step_size = 74
else:
step_size = 340
for i in range(0, self.latent_dim, 1):
plt.text(5, (self.input_dim/2.1) + (i*step_size), str(i), color="red", fontsize = 14)
plt.savefig('./images/latent space features.png') | |
#!/usr/bin/env nemesis
#
# ----------------------------------------------------------------------
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University of Chicago
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2016 University of California, Davis
#
# See COPYING for license information.
#
# ----------------------------------------------------------------------
#
# @file tests/fullscale/linearelasticity/nofaults-3d/sheartraction_rate_gendb.py
#
# @brief Python script to generate spatial database with Dirichlet
# boundary conditions for the time-dependent shear test. The traction
# boundary conditions use UniformDB in the .cfg file.
import numpy
from pythia.pyre.units.time import year
class GenerateDB(object):
"""Python object to generate spatial database with Dirichlet
boundary conditions for the tim-dependent shear test.
"""
def __init__(self):
"""Constructor.
"""
return
def run(self):
"""Generate the database.
"""
# Domain
x = numpy.arange(-1.0e+4, 1.01e+4, 5.0e+3)
y = numpy.arange(-1.0e+4, 1.01e+4, 5.0e+3)
z = numpy.array([0])
x3, y3, z3 = numpy.meshgrid(x, y, z)
nptsX = x.shape[0]
nptsY = y.shape[0]
nptsZ = z.shape[0]
xyz = numpy.zeros((nptsX * nptsY * nptsZ, 3), dtype=numpy.float64)
xyz[:, 0] = x3.ravel()
xyz[:, 1] = y3.ravel()
xyz[:, 2] = z3.ravel()
from sheartraction_rate_soln import AnalyticalSoln
soln = AnalyticalSoln()
disp = soln.bc_initial_displacement(xyz)
velocity_time = soln.bc_rate_time(xyz) / year.value
velocity = soln.bc_velocity(xyz) * year.value
from spatialdata.geocoords.CSCart import CSCart
cs = CSCart()
cs.inventory.spaceDim = 3
cs._configure()
data = {
"x": x,
"y": y,
"z": z,
'points': xyz,
'coordsys': cs,
'data_dim': 2,
'values': [
{'name': "initial_amplitude_x",
'units': "m",
'data': disp[0, :, 0].ravel()},
{'name': "initial_amplitude_y",
'units': "m",
'data': disp[0, :, 1].ravel()},
{'name': "initial_amplitude_z",
'units': "m",
'data': disp[0, :, 2].ravel()},
{'name': "rate_amplitude_x",
'units': "m/year",
'data': velocity[0, :, 0].ravel()},
{'name': "rate_amplitude_y",
'units': "m/year",
'data': velocity[0, :, 1].ravel()},
{'name': "rate_amplitude_z",
'units': "m/year",
'data': velocity[0, :, 2].ravel()},
{'name': "rate_start_time",
'units': "year",
'data': velocity_time[0, :, 0].ravel()},
]}
from spatialdata.spatialdb.SimpleGridAscii import SimpleGridAscii
io = SimpleGridAscii()
io.inventory.filename = "sheartraction_rate_disp.spatialdb"
io._configure()
io.write(data)
return
# ======================================================================
if __name__ == "__main__":
GenerateDB().run()
# End of file | |
# -*- coding: utf-8 -*-
import numpy as np
import array_comparison as ac
# Initial empty gamestate
_initial_gamestate = [
["-", "-", "-"],
["-", "-", "-"],
["-", "-", "-"]
]
class GameState:
def __init__(self, array=_initial_gamestate):
self.state = np.array(array)
try:
self.char_counts = self.count_chars()
except TypeError as e:
raise ValueError("Not a valid game state: " + str(e))
self.rounds_played = self.get_rounds_played()
self.is_valid, message = self.is_state_valid()
if not self.is_valid:
raise ValueError("Not a valid game state: " + message)
def __eq__(self, other_gamestate):
if not isinstance(other_gamestate, GameState):
return NotImplemented
return ac.are_sqr_arrays_equal(self.state, other_gamestate.state)
def __str__(self):
return str(self.state)
def __hash__(self):
arr = [str(a) for a in ac.generate_equal_arrays(self.state)]
arr.sort()
return hash("".join(arr))
def is_state_valid(self):
if not self.state.shape == (3, 3):
return False, "State is not an 3X3 square."
# Count number of X's, O's and -'s
if self.char_counts["X"] + self.char_counts["O"] + self.char_counts["-"] != 9:
return False, "Wrong number of characters."
if self.char_counts["O"] > self.char_counts["X"]:
return False, "Too many O's"
if self.char_counts["O"] + 1 < self.char_counts["X"]:
return False, "Too may X's"
# TODO check that no two winning lines exist
return True, "State is valid"
def is_game_over(self):
# Row check
for row in self.state:
if "-" != row[0] == row[1] == row[2]:
return True, "{0} won!".format(row[0])
# Column check
for column in self.state.T:
if "-" != column[0] == column[1] == column[2]:
return True, "{0} won!".format(column[0])
# Diagonal checks
diag = np.diag(self.state)
if "-" != diag[0] == diag[1] == diag[2]:
return True, "{0} won!".format(diag[0])
diag = np.diag(np.rot90(self.state))
if "-" != diag[0] == diag[1] == diag[2]:
return True, "{0} won!".format(diag[0])
if self.char_counts["-"] == 0:
return True, "Draw."
return False, "Game continues"
def count_chars(self):
unique, counts = np.unique(self.state, return_counts=True)
counts_dict = dict(zip(unique, counts))
if "X" not in counts_dict:
counts_dict["X"] = 0
if "O" not in counts_dict:
counts_dict["O"] = 0
if "-" not in counts_dict:
counts_dict["-"] = 0
return counts_dict
def get_rounds_played(self):
rounds = self.char_counts["O"] + self.char_counts["X"]
if type(rounds) is np.int64:
return rounds.item()
return rounds
def next_to_move(self):
return "X" if self.rounds_played % 2 == 0 else "O"
def rotate(self, turns):
self.state = ac.rotate(self.state, turns=turns) | |
import os
import tempfile
import numpy as np
import pytest
import tensorboardX
from numpy.testing import assert_almost_equal
from tbparse import SummaryReader
from torch.utils.tensorboard import SummaryWriter
R = 5
N_STEPS = 100
@pytest.fixture
def prepare(testdir):
# Use torch for main tests, logs for tensorboard and tensorboardX are
# generated in their own tests.
# Ref: https://pytorch.org/docs/stable/tensorboard.html
log_dir = os.path.join(testdir.tmpdir, 'run')
writer = SummaryWriter(log_dir)
for i in range(N_STEPS):
writer.add_scalars('run_14h', {'xsinx':i*np.sin(i/R),
'xcosx':i*np.cos(i/R),
'tanx': np.tan(i/R)}, i)
writer.close()
# This call adds three values to the same scalar plot with the tag
# 'run_14h' in TensorBoard's scalar section.
"""
run
├── events.out.tfevents.<id-1>
├── run_14h_tanx
│ └── events.out.tfevents.<id-2>
├── run_14h_xcosx
│ └── events.out.tfevents.<id-3>
└── run_14h_xsinx
└── events.out.tfevents.<id-4>
"""
def test_tensorflow(prepare, testdir):
pass
# Note: tensorflow does not allow users to log multiple scalars.
def test_tensorboardX(prepare, testdir):
# Note: tensorboardX uses '/' instead of '_' for adding scalars.
# Prepare Log
log_dir_th = os.path.join(testdir.tmpdir, 'run')
tmpdir_tbx = tempfile.TemporaryDirectory()
log_dir_tbx = os.path.join(tmpdir_tbx.name, 'run')
writer = tensorboardX.SummaryWriter(log_dir_tbx)
for i in range(N_STEPS):
writer.add_scalars('run_14h', {'xsinx':i*np.sin(i/R),
'xcosx':i*np.cos(i/R),
'tanx': np.tan(i/R)}, i)
writer.close()
# (default) Parse & Compare
df_th = SummaryReader(log_dir_th).scalars
df_tbx = SummaryReader(log_dir_tbx).scalars
assert(df_th.equals(df_tbx))
# (pivot) Parse & Compare
df_th = SummaryReader(log_dir_th, pivot=True).scalars
df_tbx = SummaryReader(log_dir_tbx, pivot=True).scalars
assert(df_th.equals(df_tbx))
# (dir_name) Parse & Compare
df_th = SummaryReader(log_dir_th, extra_columns={'dir_name'}).scalars
df_tbx = SummaryReader(log_dir_tbx, extra_columns={'dir_name'}).scalars
for i in range(len(df_tbx)):
replaced = list(df_th['dir_name'][i])
replaced[len('run_14h')] = '/'
replaced = ''.join(replaced)
assert replaced == df_tbx['dir_name'][i]
df_th.drop(columns=['dir_name'], inplace=True)
df_tbx.drop(columns=['dir_name'], inplace=True)
assert(df_th.equals(df_tbx))
# (pivot & dir_name) Parse & Compare
df_th = SummaryReader(log_dir_th, pivot=True, extra_columns={'dir_name'}).scalars
df_tbx = SummaryReader(log_dir_tbx, pivot=True, extra_columns={'dir_name'}).scalars
for i in range(len(df_tbx)):
replaced = list(df_th['dir_name'][i])
replaced[len('run_14h')] = '/'
replaced = ''.join(replaced)
assert replaced == df_tbx['dir_name'][i]
df_th.drop(columns=['dir_name'], inplace=True)
df_tbx.drop(columns=['dir_name'], inplace=True)
assert(df_th.equals(df_tbx))
def test_log_dir(prepare, testdir):
log_dir = os.path.join(testdir.tmpdir, 'run')
# Test pivot
reader = SummaryReader(log_dir, pivot=True, extra_columns={'dir_name'})
assert len(reader.children) == 4
assert reader.scalars.columns.to_list() == ['step', 'run_14h', 'dir_name']
df0 = reader.scalars
assert df0.shape == (N_STEPS*3, 3)
steps = [i for i in range(N_STEPS)]
# xsinx
df = df0.loc[df0['dir_name'] == 'run_14h_xsinx', ['step', 'run_14h']]
assert df.shape == (100, 2)
assert df['step'].to_list() == steps
assert_almost_equal(df['run_14h'].to_numpy(), [i*np.sin(i/R) for i in range(100)], 2)
# xcosx
df = df0.loc[df0['dir_name'] == 'run_14h_xcosx', ['step', 'run_14h']]
assert df.shape == (100, 2)
assert df['step'].to_list() == steps
assert_almost_equal(df['run_14h'].to_numpy(), [i*np.cos(i/R) for i in range(100)], 2)
# tanx
df = df0.loc[df0['dir_name'] == 'run_14h_tanx', ['step', 'run_14h']]
assert df.shape == (100, 2)
assert df['step'].to_list() == steps
assert_almost_equal(df['run_14h'].to_numpy(), [np.tan(i/R) for i in range(100)], 2)
# Test all columns
reader = SummaryReader(log_dir, extra_columns={
'wall_time', 'dir_name', 'file_name'})
assert reader.scalars.columns.to_list() == ['step', 'tag', 'value', 'wall_time', 'dir_name', 'file_name'] | |
import numpy as np
import pandas as pd
def repeat_df(df: pd.DataFrame, times: int) -> pd.DataFrame:
"""Repeat a DataFrame vertically and cyclically.
Parameters:
df : DataFrame to be repeated.
times : The number of times to repeat ``df``.
Returns:
New DataFrame whose rows are the repeated ``df``.
Example:
>>> repeat_df(pd.DataFrame.from_dict([{'A': 1}, {'A': 2}]), 2)
A
0 1
1 2
2 1
3 2
"""
return df.append([df] * (times - 1), ignore_index=True)
def tile_df(df: pd.DataFrame, times: int) -> pd.DataFrame:
"""Tile a DataFrame vertically by repeating the rows contiguously.
Parameters:
df : DataFrame to be repeated.
times : The number of times to repeat each element of ``df``.
Returns:
New DataFrame whose rows are the repeated elements of ``df``.
Example:
>>> tile_df(pd.DataFrame.from_dict([{'A': 1}, {'A': 2}]), 2)
A
0 1
0 1
1 2
1 2
"""
return df.iloc[np.arange(len(df)).repeat(times)]
def product_df(df1: pd.DataFrame, df2: pd.DataFrame) -> pd.DataFrame:
"""Take the Cartesian product of the rows of the two DataFrames.
Parameters:
df1 : The first DataFrame.
df1 : The second DataFrame.
Returns:
New DataFrame.
Example:
>>> A = pd.DataFrame.from_dict([{'A': 1}, {'A': 2}])
>>> B = pd.DataFrame.from_dict([{'B': 1}, {'B': 2}])
>>> product_df(A, B)
A B
0 1 1
1 1 2
2 2 1
3 2 2
"""
A = tile_df(df1, len(df2)).reset_index(drop=True)
B = repeat_df(df2, len(df1)).reset_index(drop=True)
return pd.concat((A, B), axis=1)
def summarize_duplicates_df(df: pd.DataFrame, key: str, method:str='sum') -> pd.DataFrame:
"""Perform contraction of duplicating entries while aggregating one of the columns.
Parameters:
df : DataFrame to summarize.
key : The key to be aggregated.
method : Method to use for aggregating the ``key`` column.
Returns:
New DataFrame.
Example:
>>> A = pd.DataFrame.from_dict([{'A': 1}, {'A': 1}, {'A': 2}, {'A': 2}])
>>> B = pd.DataFrame.from_dict([{'B': 1}, {'B': 3}, {'B': 5}, {'B': 7}])
>>> df = pd.concat((A, B), axis=1)
>>> df
A B
0 1 1
1 1 3
2 2 5
3 2 7
>>> summarize_duplicates_df(df, 'B', 'sum')
A B
0 1 4
2 2 12
"""
cols = list(df.columns)
cols.remove(key)
df[key] = df.groupby(cols)[key].transform(method)
return df.drop_duplicates()
if __name__ == '__main__':
import doctest
doctest.testmod() | |
"""track_to_track_association
The module tests two tracks for track association. It uses hypothesis testing to decide whether the two tracks are of
the same target. See report for more mathematical derivation.
"""
import numpy as np
from scipy.stats.distributions import chi2
def test_association_independent_tracks(track1, track2, alpha=0.05):
"""
Checks whether the tracks are from the same target, under the independence assumption
:param track1: track to check for association
:param track2: track to check for association
:param alpha: desired confidence interval
:return: true if the tracks are from the same target, false else
"""
delta_estimates = track1.state_vector - track2.state_vector
error_delta_estimates = delta_estimates # as the difference of the true states is 0 if it is the same target
error_delta_estimates_covar = track1.covar + track2.covar # under the error independence assumption
d = (error_delta_estimates.transpose() @ np.linalg.inv(error_delta_estimates_covar) @ error_delta_estimates)[0]
# 4 degrees of freedom as we have 4 dimensions in the state vector
d_alpha = chi2.ppf((1 - alpha), df=4)
# Accept H0 if d <= d_alpha
return d <= d_alpha
def test_association_dependent_tracks(track1_mean, track1_cov, track2_mean, track2_cov, cross_cov_ij, cross_cov_ji,
alpha=0.05):
"""
checks whether the tracks are from the same target, when the dependence is accounted for.
:param track1: track to check for association
:param track2: track to check for association
:param cross_cov_ij: cross-covariance of the estimation errors. See article
:param cross_cov_ji:
:param alpha: desired test power
:return: true if the tracks are from the same target, false else
"""
delta_estimates = track1_mean - track2_mean
error_delta_estimates = delta_estimates # as the difference of the true states is 0 if it is the same target
error_delta_estimates_covar = track1_cov + track2_cov - cross_cov_ij - cross_cov_ji
d = (error_delta_estimates.transpose() @ np.linalg.inv(error_delta_estimates_covar) @ error_delta_estimates)[0]
# 4 degrees of freedom as we have 4 dimensions in the state vector
d_alpha = chi2.ppf((1 - alpha), df=4)
# Accept H0 if d <= d_alpha
return d <= d_alpha | |
import numpy as np
import pandas as pd
from .. import categorizer as cat
from ..census_helpers import Census
# TODO DOCSTRINGS!!
class Starter:
"""
This is a recipe for getting the marginals and joint distributions to use
to pass to the synthesizer using simple categories - population, age,
race, and sex for people, and children, income, cars, and workers for
households. This module is responsible for
Parameters
----------
c : object
census_helpers.Census object
state : string
FIPS code the state
county : string
FIPS code for the county
tract : string, optional
FIPS code for a specific track or None for all tracts in the county
acsyear : integer, optional
Final year in the 5-year estimates ACS dataset.
Default: 2016, which corresponds to 2011-2016 ACS dataset
Returns
-------
household_marginals : DataFrame
Marginals per block group for the household data (from ACS 5-year estimates)
person_marginals : DataFrame
Marginals per block group for the person data (from ACS 5-year estimates)
household_jointdist : DataFrame
joint distributions for the households (from PUMS 2010-2000), one joint
distribution for each PUMA (one row per PUMA)
person_jointdist : DataFrame
joint distributions for the persons (from PUMS 2010-2000), one joint
distribution for each PUMA (one row per PUMA)
tract_to_puma_map : dictionary
keys are tract ids and pumas are puma ids
"""
def __init__(self, key, state, county, tract=None, acsyear=2016):
self.c = c = Census(key)
self.state = state
self.county = county
self.tract = tract
self.acsyear = acsyear
structure_size_columns = ['B25032_0%02dE' % i for i in range(1, 24)]
age_of_head_columns = ['B25007_0%02dE' % i for i in range(1, 22)]
race_of_head_columns = ['B25006_0%02dE' % i for i in range(1, 11)]
hispanic_head_columns = ['B25003I_0%02dE' % i for i in range(1, 4)]
hh_size_columns = ['B25009_0%02dE' % i for i in range(1, 18)]
income_columns = ['B19001_0%02dE' % i for i in range(1, 18)]
vehicle_columns = ['B08201_0%02dE' % i for i in range(1, 7)]
workers_columns = ['B08202_0%02dE' % i for i in range(1, 6)]
presence_of_children_columns = ['B11005_001E', 'B11005_002E', 'B11005_011E']
presence_of_seniors_columns = ['B11007_002E', 'B11007_007E']
tenure_mover_columns = ['B25038_0%02dE' % i for i in range(1, 16)]
block_group_columns = (
income_columns + presence_of_children_columns +
hh_size_columns)
tract_columns = vehicle_columns + workers_columns
h_acs = c.block_group_and_tract_query(
block_group_columns,
tract_columns, state, county,
merge_columns=['tract', 'county', 'state'],
block_group_size_attr="B11005_001E",
tract_size_attr="B08201_001E",
tract=tract, year=acsyear)
self.h_acs = h_acs
self.h_acs_cat = cat.categorize(h_acs, {
("hh_children", "yes"): "B11005_002E",
("hh_children", "no"): "B11005_011E",
("hh_income", "lt30"):
"B19001_002E + B19001_003E + B19001_004E + "
"B19001_005E + B19001_006E",
("hh_income", "gt30-lt60"):
"B19001_007E + B19001_008E + B19001_009E + "
"B19001_010E + B19001_011E",
("hh_income", "gt60-lt100"): "B19001_012E + B19001_013E",
("hh_income", "gt100-lt150"): "B19001_014E + B19001_015E",
("hh_income", "gt150"): "B19001_016E + B19001_017E",
("hh_cars", "none"): "B08201_002E",
("hh_cars", "one"): "B08201_003E",
("hh_cars", "two"): "B08201_004E",
("hh_cars", "three or more"):
"B08201_005E + B08201_006E",
("hh_workers", "none"): "B08202_002E",
("hh_workers", "one"): "B08202_003E",
("hh_workers", "two"): "B08202_004E",
("hh_workers", "three or more"): "B08202_005E",
("hh_size", "one"): "B25009_003E + B25009_011E",
("hh_size", "two"): "B25009_004E + B25009_012E",
("hh_size", "three"): "B25009_005E + B25009_013E",
("hh_size", "four or more"): "B25009_006E + B25009_014E + "
"B25009_007E + B25009_015E + "
"B25009_008E + B25009_016E + "
"B25009_009E + B25009_017E"
}, index_cols=['state', 'county', 'tract', 'block group'])
# gq_population = ['B26001_001E']
# HH population, for the hhpop/totalpop adjustment
hh_population = ['B11002_001E']
population = ['B01001_001E'] # This includes GQ
hispanic = ['B03003_002E', 'B03003_003E']
sex = ['B01001_002E', 'B01001_026E']
race = ['B02001_0%02dE' % i for i in range(1, 11)]
male_age_columns = ['B01001_0%02dE' % i for i in range(3, 26)]
female_age_columns = ['B01001_0%02dE' % i for i in range(27, 50)]
industry = ['C24030_0%02dE' % i for i in range(1, 56)] + ['B23025_007E']
all_columns = population + sex + race + male_age_columns + \
female_age_columns + hh_population + hispanic + industry
p_acs = c.block_group_query(all_columns, state, county, tract=tract, year=acsyear)
self.p_acs = p_acs
self.p_acs_cat = cat.categorize(p_acs, {
("person_age", "19 and under"):
"(B01001_003E + B01001_004E + B01001_005E + "
"B01001_006E + B01001_007E + B01001_027E + "
"B01001_028E + B01001_029E + B01001_030E + "
"B01001_031E) * B11002_001E*1.0/B01001_001E",
("person_age", "20 to 35"):
"(B01001_008E + B01001_009E + B01001_010E + "
"B01001_011E + B01001_012E + B01001_032E + "
"B01001_033E + B01001_034E + B01001_035E + "
"B01001_036E) * B11002_001E*1.0/B01001_001E",
("person_age", "35 to 60"):
"(B01001_013E + B01001_014E + B01001_015E + "
"B01001_016E + B01001_017E + B01001_037E + "
"B01001_038E + B01001_039E + B01001_040E + "
"B01001_041E) * B11002_001E*1.0/B01001_001E",
("person_age", "above 60"):
"(B01001_018E + B01001_019E + B01001_020E + "
"B01001_021E + B01001_022E + B01001_023E + "
"B01001_024E + B01001_025E + B01001_042E + "
"B01001_043E + B01001_044E + B01001_045E + "
"B01001_046E + B01001_047E + B01001_048E + "
"B01001_049E) * B11002_001E*1.0/B01001_001E",
("race", "white"): "(B02001_002E) * B11002_001E*1.0/B01001_001E",
("race", "black"): "(B02001_003E) * B11002_001E*1.0/B01001_001E",
("race", "asian"): "(B02001_005E) * B11002_001E*1.0/B01001_001E",
("race", "other"): "(B02001_004E + B02001_006E + B02001_007E + "
"B02001_008E) * B11002_001E*1.0/B01001_001E",
("person_sex", "male"):
"(B01001_002E) * B11002_001E*1.0/B01001_001E",
("person_sex", "female"):
"(B01001_026E) * B11002_001E*1.0/B01001_001E",
("hispanic", "yes"):
"(B03003_003E) * B11002_001E*1.0/B01001_001E",
("hispanic", "no"):
"(B03003_002E) * B11002_001E*1.0/B01001_001E",
("industry", "agriculture"): "(C24030_003E + C24030_006E + C24030_030E + C24030_033E) * "
"B11002_001E*1.0/B01001_001E",
("industry", "manufacturing"): "(C24030_007E + C24030_034E) * B11002_001E*1.0/B01001_001E",
("industry", "retail / transportation"): "(C24030_008E + C24030_009E + C24030_010E + C24030_035E + "
"C24030_036E + C24030_037E) * B11002_001E*1.0/B01001_001E",
("industry", "information"): "(C24030_013E + C24030_014E + C24030_017E + C24030_040E + C24030_041E + "
"C24030_044E) * B11002_001E*1.0/B01001_001E",
("industry", "educational / health"): "(C24030_021E + C24030_048E) * B11002_001E*1.0/B01001_001E",
("industry", "arts"): "(C24030_024E + C24030_051E) * B11002_001E*1.0/B01001_001E",
("industry", "other services"): "(C24030_027E + C24030_028E + C24030_054E + C24030_055E) * "
"B11002_001E*1.0/B01001_001E",
("industry", "not employed"): "B11002_001E - C24030_001E * B11002_001E*1.0/B01001_001E"
}, index_cols=['state', 'county', 'tract', 'block group'])
# Put the needed PUMS variables here. These are also the PUMS variables
# that will be in the outputted synthetic population
self.h_pums_cols = ('serialno', 'PUMA00', 'PUMA10', 'RT', 'NP', 'TYPE',
'R65', 'HINCP', 'VEH', 'R18')
self.p_pums_cols = ('serialno', 'PUMA00', 'PUMA10', 'RELP', 'AGEP',
'ESR', 'RAC1P', 'HISP', 'SEX', 'SPORDER',
'PERNP', 'SCHL', 'WKHP', 'JWTR', 'SCH', 'NAICSP')
def get_geography_name(self):
# this synthesis is at the block group level for most variables
return "block_group"
def get_state(self):
return self.state
def get_county(self):
return self.county
def get_num_geographies(self):
return len(self.p_acs_cat)
def get_available_geography_ids(self):
# return the ids of the geographies, in this case a state, county,
# tract, block_group id tuple
for tup in self.p_acs_cat.index:
yield pd.Series(tup, index=self.p_acs_cat.index.names)
def get_household_marginal_for_geography(self, ind):
return self.h_acs_cat.loc[tuple(ind.values)]
def get_person_marginal_for_geography(self, ind):
return self.p_acs_cat.loc[tuple(ind.values)]
def get_household_joint_dist_for_geography(self, ind):
c = self.c
puma10, puma00 = c.tract_to_puma(ind.state, ind.county, ind.tract)
# this is cached so won't download more than once
if type(puma00) == str:
h_pums = self.c.download_household_pums(ind.state, puma10, puma00,
usecols=self.h_pums_cols)
p_pums = self.c.download_population_pums(ind.state, puma10, puma00,
usecols=self.p_pums_cols)
elif np.isnan(puma00): # only puma10 available
h_pums = self.c.download_household_pums(ind.state, puma10, None,
usecols=self.h_pums_cols)
p_pums = self.c.download_population_pums(ind.state, puma10, None,
usecols=self.p_pums_cols)
h_pums = h_pums.set_index('serialno')
# join persons to households,
# calculate needed household-level variables
age_of_head = p_pums[p_pums.RELP == 0].groupby('serialno').AGEP.max()
num_workers = p_pums[p_pums.ESR.isin([1, 2, 4, 5])].groupby(
'serialno').size()
h_pums['race_of_head'] = p_pums[p_pums.RELP == 0].groupby(
'serialno').RAC1P.max()
h_pums['hispanic_head'] = p_pums[p_pums.RELP == 0].groupby(
'serialno').HISP.max()
h_pums['age_of_head'] = age_of_head
h_pums['workers'] = num_workers
h_pums.workers = h_pums.workers.fillna(0)
h_pums = h_pums.reset_index()
def sf_detached_cat(r):
if r.BLD == 2:
return "yes"
return "no"
def age_of_head_cat(r):
if r.age_of_head < 35:
return "lt35"
elif r.age_of_head >= 65:
return "gt65"
return "gt35-lt65"
def race_of_head_cat(r):
if r.race_of_head == 1:
return "white"
elif r.race_of_head == 2:
return "black"
elif r.race_of_head == 6:
return "asian"
return "other"
def hispanic_head_cat(r):
if r.hispanic_head == 1:
return "no"
return "yes"
def hh_size_cat(r):
if r.NP == 1:
return "one"
elif r.NP == 2:
return "two"
elif r.NP == 3:
return "three"
return "four or more"
def cars_cat(r):
if r.VEH == 0:
return "none"
elif r.VEH == 1:
return "one"
elif r.VEH == 2:
return "two"
return "three or more"
def children_cat(r):
if r.R18 == 1:
return "yes"
return "no"
def seniors_cat(r):
if r.R65 > 0:
return "yes"
return "no"
def income_cat(r):
if r.HINCP >= 150000:
return "gt150"
elif (r.HINCP >= 100000) & (r.HINCP < 150000):
return "gt100-lt150"
elif (r.HINCP >= 60000) & (r.HINCP < 100000):
return "gt60-lt100"
elif (r.HINCP >= 30000) & (r.HINCP < 60000):
return "gt30-lt60"
return "lt30"
def workers_cat(r):
if r.workers >= 3:
return "two or more"
elif r.workers == 2:
return "two"
elif r.workers == 1:
return "one"
return "none"
def tenure_mover_cat(r):
if (r.MV < 4) & (r.TEN < 3):
return "own recent"
elif (r.MV >= 4) & (r.TEN < 3):
return "own not recent"
elif (r.MV < 4) & (r.TEN >= 3):
return "rent recent"
return "rent not recent"
h_pums, jd_households = cat.joint_distribution(
h_pums,
cat.category_combinations(self.h_acs_cat.columns),
{"hh_cars": cars_cat,
"hh_children": children_cat,
"hh_income": income_cat,
"hh_workers": workers_cat,
"hh_size": hh_size_cat}
)
return h_pums, jd_households
def get_person_joint_dist_for_geography(self, ind):
c = self.c
puma10, puma00 = c.tract_to_puma(ind.state, ind.county, ind.tract)
# this is cached so won't download more than once
if type(puma00) == str:
p_pums = self.c.download_population_pums(ind.state, puma10, puma00,
usecols=self.p_pums_cols)
elif np.isnan(puma00): # only puma10 available
p_pums = self.c.download_population_pums(ind.state, puma10, None,
usecols=self.p_pums_cols)
def age_cat(r):
if r.AGEP <= 19:
return "19 and under"
elif r.AGEP <= 35:
return "20 to 35"
elif r.AGEP <= 60:
return "35 to 60"
return "above 60"
def race_cat(r):
if r.RAC1P == 1:
return "white"
elif r.RAC1P == 2:
return "black"
elif r.RAC1P == 6:
return "asian"
return "other"
def sex_cat(r):
if r.SEX == 1:
return "male"
return "female"
def hispanic_cat(r):
if r.HISP == 1:
return "no"
return "yes"
def industry_cat(r):
try:
if r.NAICSP[0] == '1':
return "agriculture"
elif r.NAICSP[0] == '2':
return "agriculture"
elif r.NAICSP[0] == '3':
return "manufacturing"
elif r.NAICSP[0] == '4':
return "retail / transportation"
elif r.NAICSP[0] == '5':
return "information"
elif r.NAICSP[0] == '6':
return "educational / health"
elif r.NAICSP[0] == '7':
return "arts"
elif r.NAICSP[0] == '8':
return "other services"
elif r.NAICSP[0] == '9':
return "other services"
else:
return "not employed"
except:
return "not employed"
p_pums, jd_persons = cat.joint_distribution(
p_pums,
cat.category_combinations(self.p_acs_cat.columns),
{"person_age": age_cat, "race": race_cat, "person_sex": sex_cat,
"hispanic": hispanic_cat, "industry": industry_cat}
)
return p_pums, jd_persons | |
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
data = np.loadtxt("onda.dat")
print(np.shape(data))
x = np.linspace(0.0, 1.0, np.shape(data)[1])
t = np.linspace(0.0, 6.0, np.shape(data)[0])
X, T = np.meshgrid(x,t)
fig = plt.figure(figsize=(13,5))
ax = fig.add_subplot(121, projection="3d")
surf = ax.plot_surface(X, T, data, cmap=cm.coolwarm, linewidth=0, antialiased=False)
plt.xlabel("Posicion [metros[]")
plt.ylabel("Tiempo [segundos]")
ax.set_zlim3d(-1, 1)
ax.view_init(20, 20)
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.subplot(1,2,2)
plt.plot(x, data[0,:], label="tiempo inicial")
plt.plot(x, data[-1,:], label="tiempo final")
plt.xlabel("Posicion [metros]")
plt.ylabel("Desplazamiento [metros]")
plt.legend()
plt.savefig("plot.png", bbox_inches="tight") | |
#!/usr/bin/env python
from __future__ import print_function
import sys
sys.path.append('../')
import skimage as skimage
from skimage import transform, color, exposure
from skimage.viewer import ImageViewer
import random
from random import choice
import numpy as np
from collections import deque
import time
import csv
import os
import json
from keras.models import model_from_json
from keras.models import Sequential, load_model, Model
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, Dense, Flatten, merge, MaxPooling2D, Input, AveragePooling2D, Lambda, Merge, Activation, Embedding
from keras.optimizers import SGD, Adam, rmsprop
from keras import backend as K
import keras
import pandas as pd
from vizdoom import DoomGame, ScreenResolution
from vizdoom import *
import itertools as it
from time import sleep
import tensorflow as tf
from networks import Networks
# from segmentation_image import *
#to check if keras is using GPU
class DFPAgent:
def __init__(self, state_size, measurement_size, action_size, timesteps):
# get size of state, measurement, action, and timestep
self.state_size = state_size
self.measurement_size = measurement_size
self.action_size = action_size
self.timesteps = timesteps
# these is hyper parameters for the DFP
self.gamma = 0.99
self.learning_rate = 0.00001
self.epsilon = 1.0
self.initial_epsilon = 1.0
self.final_epsilon = 0.0001
self.batch_size = 64
self.observe = 50000 #2000
self.explore = 200000
self.frame_per_action = 4
self.timestep_per_train = 5 #5 # Number of timesteps between training interval
# experience replay buffer
self.memory = deque()
self.max_memory = 20000
# create model
self.model = None
# Performance Statistics
self.stats_window_size= 5 # window size for computing rolling statistics
self.mavg_score = [] # Moving Average of Survival Time
self.var_score = [] # Variance of Survival Time
def get_action(self, state, measurement, goal, inference_goal):
"""
Get action from model using epsilon-greedy policy
"""
if np.random.rand() <= self.epsilon:
#print("----------Random Action----------")
action_idx = random.randrange(self.action_size)
else:
measurement = np.expand_dims(measurement, axis=0)
goal = np.expand_dims(goal, axis=0)
f = self.model.predict([state, measurement, goal]) # [1x6, 1x6, 1x6]
f_pred = np.vstack(f) # 3x6
obj = np.sum(np.multiply(f_pred, inference_goal), axis=1) # num_action
action_idx = np.argmax(obj)
return action_idx
# Save trajectory sample <s,a,r,s'> to the replay memory
def replay_memory(self, s_t, action_idx, r_t, s_t1, m_t, is_terminated):
self.memory.append((s_t, action_idx, r_t, s_t1, m_t, is_terminated))
if self.epsilon > self.final_epsilon and t > self.observe:
self.epsilon -= (self.initial_epsilon - self.final_epsilon) / self.explore
if len(self.memory) > self.max_memory:
self.memory.popleft()
# Pick samples randomly from replay memory (with batch_size)
def train_minibatch_replay(self, goal):
"""
Train on a single minibatch
"""
batch_size = min(self.batch_size, len(self.memory))
rand_indices = np.random.choice(len(self.memory)-(self.timesteps[-1]+1), self.batch_size)
state_input = np.zeros(((batch_size,) + self.state_size)) # Shape batch_size, img_rows, img_cols, img_channels
measurement_input = np.zeros((batch_size, self.measurement_size))
goal_input = np.tile(goal, (batch_size, 1))
f_action_target = np.zeros((batch_size, (self.measurement_size * len(self.timesteps))))
action = []
for i, idx in enumerate(rand_indices):
future_measurements = []
last_offset = 0
done = False
for j in range(self.timesteps[-1]+1):
if not self.memory[idx+j][5]: # if episode is not finished
if j in self.timesteps: # 1,2,4,8,16,32
if not done:
future_measurements += list( (self.memory[idx+j][4] - self.memory[idx][4]) )
last_offset = j
else:
future_measurements += list( (self.memory[idx+last_offset][4] - self.memory[idx][4]) )
else:
done = True
if j in self.timesteps: # 1,2,4,8,16,32
future_measurements += list( (self.memory[idx+last_offset][4] - self.memory[idx][4]) )
f_action_target[i,:] = np.array(future_measurements)
state_input[i,:,:,:] = self.memory[idx][0][0,:,:,:]
measurement_input[i,:] = self.memory[idx][4]
action.append(self.memory[idx][1])
f_target = self.model.predict([state_input, measurement_input, goal_input]) # Shape [32x18,32x18,32x18]
for i in range(self.batch_size):
f_target[action[i]][i,:] = f_action_target[i]
loss = self.model.train_on_batch([state_input, measurement_input, goal_input], f_target)
return loss
# load the saved model
def load_model(self, name):
self.model.load_weights(name)
# save the model which is under training
def save_model(self, name):
self.model.save_weights(name, overwrite=True)
def preprocessImg(img, size):
img = np.rollaxis(img, 0, 3) # It becomes (640, 480, 3)
img = skimage.transform.resize(img,size)
img = skimage.color.rgb2gray(img)
return img
################################################################
#FOR COMPUTATION OF DEPTH MAP
from depth_map import *
################################################################""
#python3 dfp_extended_measures.py test 1 1
import argparse
import sys
if __name__ == '__main__':
title = sys.argv[1]
n_measures = int(sys.argv[2]) # number of measurements
depth_perception = int(sys.argv[3])
mask_perception = int(sys.argv[4])
test_phase = int(sys.argv[5])
d_env = int(sys.argv[6])
random_goal = int(sys.argv[7])
sess = tf.Session()
sess2 = tf.Session()
try:
sess.close()
except NameError:
pass
try:
sess2.close()
except NameError:
pass
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config) #session depth map
input_node, net = init_depth_map(sess)
# Avoid Tensorflow eats up GPU memory
config2 = tf.ConfigProto()
config2.gpu_options.allow_growth = True
sess2 = tf.Session(config=config2)
K.set_session(sess2)
game = DoomGame()
if d_env==1:
game.load_config("../vizdoom/scenarios/health_gathering_supreme.cfg")
elif d_env==0:
game.load_config("../vizdoom/scenarios/health_gathering.cfg")
elif d_env==2:
game.load_config("../vizdoom/scenarios/D3.cfg")
game.set_sound_enabled(False)
game.set_screen_resolution(ScreenResolution.RES_640X480)
game.set_window_visible(False)
# Enables labeling of in game objects labeling.
game.set_labels_buffer_enabled(True)
# Enables depth buffer.
game.set_depth_buffer_enabled(True)
# Enables buffer with top down map of he current episode/level .
# game.set_automap_buffer_enabled(True)
game.init()
game.new_episode()
game_state = game.get_state()
if not game.is_episode_finished():
labels = game_state.labels_buffer
# if labels is not None:
# plt.imshow(labels)
# plt.show()
misc = game_state.game_variables # [Health]
prev_misc = misc
action_size = game.get_available_buttons_size() # [Turn Left, Turn Right, Move Forward]
measurement_size = n_measures # [Health, Medkit, Poison]
timesteps = [1, 2, 4, 8, 16, 32]
goal_size = measurement_size * len(timesteps)
img_rows , img_cols = 84, 84
# Convert image into Black and white
img_channels = 1
if depth_perception:
img_channels += 1 # We stack 1 frame (then we will put 2 other channels: depth map and segmented image)
if mask_perception:
img_channels += 1
state_size = (img_rows, img_cols, img_channels)
agent = DFPAgent(state_size, measurement_size, action_size, timesteps)
agent.model = Networks.dfp_network(state_size, measurement_size, goal_size, action_size, len(timesteps),
agent.learning_rate)
if d_env==1:
agent.observe = 50000
agent.explore = 350000
tend = 500000
elif d_env==0:
agent.observe = 2000
agent.explore = 50000
tend = 60000
elif d_env==2:
agent.observe = 50000
agent.explore = 250000
tend = 300000
if test_phase:
print("Loading agent's weights for Test session...")
agent.epsilon = 0
agent.load_model('../../experiments/'+title+'/model/DFP.h5')
agent.tend = 50000
x_t = game_state.screen_buffer # 480 x 640
x_t = preprocessImg(x_t, size=(img_rows, img_cols))
if depth_perception and mask_perception and not game.is_episode_finished():
# Compute depth
depth = game_state.depth_buffer
depth = skimage.transform.resize(depth, (img_rows, img_cols))
# Compute mask
mask = game_state.labels_buffer
mask = skimage.transform.resize(mask, (img_rows, img_cols))
if depth is not None and mask is not None:
s_t = np.zeros((img_rows, img_cols,3))
s_t[:,:,0] = x_t
s_t[:,:,1] = depth
s_t[:,:,2] = mask
s_t = np.expand_dims(s_t, axis=0) # 1x64x64x3
elif depth is not None:
s_t = np.zeros((img_rows, img_cols,2))
s_t[:,:,0] = x_t
s_t[:,:,1] = depth
s_t = np.expand_dims(s_t, axis=0) # 1x64x64x3
elif mask is not None:
s_t = np.zeros((img_rows, img_cols,2))
s_t[:,:,0] = x_t
s_t[:,:,1] = mask
s_t = np.expand_dims(s_t, axis=0) # 1x64x64x3
else:
x_t = np.reshape(x_t, (1, img_rows, img_cols, 1))
s_t = x_t
elif depth_perception:
# Compute depth
depth = game_state.depth_buffer
depth = skimage.transform.resize(depth, (img_rows, img_cols))
if depth is not None:
s_t = np.zeros((img_rows, img_cols,2))
s_t[:,:,0] = x_t
s_t[:,:,1] = depth
s_t = np.expand_dims(s_t, axis=0) # 1x64x64x2
else:
x_t = np.reshape(x_t, (1, img_rows, img_cols, 1))
s_t = x_t
elif mask_perception and not game.is_episode_finished():
# Compute mask
mask = game_state.labels_buffer
mask = skimage.transform.resize(mask, (img_rows, img_cols))
if mask is not None:
s_t = np.zeros((img_rows, img_cols,2))
s_t[:,:,0] = x_t # It becomes 64x64x2
s_t[:,:,1] = mask
s_t = np.expand_dims(s_t, axis=0) # 1x64x64x2
else:
x_t = np.reshape(x_t, (1, img_rows, img_cols, 1))
s_t = x_t
else:
s_t = np.expand_dims(x_t, axis=2) # It becomes 64x64x1
s_t = np.expand_dims(s_t, axis=0) # 1x64x64x1
# Number of medkit pickup as measurement
medkit = 0
# Number of poison pickup as measurement
poison = 0
frags = 0
amo = 0
health = 0
# Initial normalized measurements
assert(n_measures in [1,3])
if n_measures==3:
if d_env != 2:
m_t = np.array([misc[0]/30.0, medkit/10.0, poison])
else:
m_t = np.array([misc[0]/10.0, misc[1]/30.0, misc[2]]) # [AMO, HEALTH, FRAGS]
elif n_measures==1:
if d_env != 2:
m_t = np.array([misc[0] / 30.0])
else:
m_t = np.array([misc[1]/30.0]) # [HEALTH]
# Goal
if n_measures == 3:
if d_env != 2:
goal = np.array([1.0, 1.0, -1.0] * len(timesteps))
else:
goal = np.array([0.5, 0.5, 1.0] * len(timesteps))
elif n_measures==1:
goal = np.array([1.0] * len(timesteps))
# Goal for Inference (Can change during test-time)
inference_goal = goal
is_terminated = game.is_episode_finished()
# Start training
epsilon = agent.initial_epsilon
GAME = 0
t = 0
max_life = 0 # Maximum episode life (Proxy for agent performance)
life = 0
# Buffer to compute rolling statistics
life_buffer = []
if not os.path.exists('../../experiments/'+title):
os.mkdir('../../experiments/'+title)
if not os.path.exists('../../experiments/'+title+'/model'):
os.mkdir('../../experiments/'+title+'/model')
if not os.path.exists('../../experiments/'+title+'/logs'):
os.mkdir('../../experiments/'+title+'/logs')
if not os.path.exists('../../experiments/'+title+'/statistics'):
os.mkdir('../../experiments/'+title+'/statistics')
csv_file = pd.DataFrame(columns=['Time', 'State', 'Epsilon', 'Action',
'Reward', 'Medkit', 'Poison', 'Frags',
'Amo', 'Max Life', 'Life', 'Mean Score',
'Var Score', 'Health', 'Loss'])
if test_phase:
csv_file.to_csv('../../experiments/' + title + '/logs/' + 'results_test.csv', sep=',', index=False)
else:
csv_file.to_csv('../../experiments/' + title + '/logs/' + 'results.csv', sep=',', index=False)
if random_goal:
inference_goal = goal = np.array(list(np.random.uniform(0, 1, n_measures)) * len(timesteps))
while not game.is_episode_finished():
loss = 0
r_t = 0
a_t = np.zeros([action_size])
# Epsilon Greedy
action_idx = agent.get_action(s_t, m_t, goal, inference_goal)
a_t[action_idx] = 1
a_t = a_t.astype(int)
game.set_action(a_t.tolist())
skiprate = agent.frame_per_action
game.advance_action(skiprate)
game_state = game.get_state() # Observe again after we take the action
if d_env == 2:
amo = misc[0]
health = misc[1]
frags = misc[2]
else:
health = misc[0]
is_terminated = game.is_episode_finished()
r_t = game.get_last_reward()
if (is_terminated):
if (life > max_life):
max_life = life
GAME += 1
life_buffer.append(life)
print ("Episode Finish ", misc)
if d_env == 2:
amo = misc[0]
health = misc[1]
frags = misc[2]
else:
health = misc[0]
game.new_episode()
if random_goal:
inference_goal = goal = np.array(list(np.random.uniform(0, 1, n_measures)) * len(timesteps))
game_state = game.get_state()
misc = game_state.game_variables
x_t1 = game_state.screen_buffer
x_t1 = game_state.screen_buffer
x_t1 = preprocessImg(x_t1, size=(img_rows, img_cols))
misc = game_state.game_variables
if depth_perception and mask_perception and not game.is_episode_finished():
# Compute depth
depth = game_state.depth_buffer
depth = skimage.transform.resize(depth, (img_rows, img_cols))
# Compute mask
mask = game_state.labels_buffer
mask = skimage.transform.resize(mask, (img_rows, img_cols))
if depth is not None and mask is not None:
s_t1 = np.zeros((img_rows, img_cols,3))
s_t1[:,:,0] = x_t1
s_t1[:,:,1] = depth
s_t1[:,:,2] = mask
s_t1 = np.expand_dims(s_t1, axis=0) # 1x64x64x3
elif depth is not None:
s_t1 = np.zeros((img_rows, img_cols,2))
s_t1[:,:,0] = x_t1
s_t1[:,:,1] = depth
s_t1 = np.expand_dims(s_t1, axis=0) # 1x64x64x2
elif mask is not None:
s_t1 = np.zeros((img_rows, img_cols,2))
s_t1[:,:,0] = x_t1
s_t1[:,:,1] = mask
s_t1 = np.expand_dims(s_t1, axis=0) # 1x64x64x2
else:
x_t1 = np.reshape(x_t1, (1, img_rows, img_cols, 1))
s_t1 = x_t1
elif depth_perception:
# Compute depth
depth = game_state.depth_buffer
depth = skimage.transform.resize(depth, (img_rows, img_cols))
if depth is not None:
s_t1 = np.zeros((img_rows, img_cols,2))
s_t1[:,:,0] = x_t1
s_t1[:,:,1] = depth
s_t1 = np.expand_dims(s_t1, axis=0) # 1x64x64x2
else:
x_t1 = np.reshape(x_t1, (1, img_rows, img_cols, 1))
s_t1 = x_t1
elif mask_perception and not game.is_episode_finished():
# Compute mask
mask = game_state.labels_buffer
mask = skimage.transform.resize(mask, (img_rows, img_cols))
if mask is not None:
s_t1 = np.zeros((img_rows, img_cols,2))
s_t1[:,:,0] = x_t1 # It becomes 64x64x2
s_t1[:,:,1] = mask
s_t1 = np.expand_dims(s_t1, axis=0) # 1x64x64x2
else:
x_t1 = np.reshape(x_t1, (1, img_rows, img_cols, 1))
s_t1 = x_t1
else:
x_t1 = np.reshape(x_t1, (1, img_rows, img_cols, 1))
s_t1 = x_t1
if d_env != 2:
if (prev_misc[0] - misc[0] > 8): # Pick up Poison
poison += 1
if (misc[0] > prev_misc[0]): # Pick up Health Pack
medkit += 1
else:
if (prev_misc[1] - misc[1] > 8): # Pick up Poison
poison += 1
if (misc[1] > prev_misc[1]): # Pick up Health Pack
medkit += 1
previous_life = life
if (is_terminated):
life = 0
else:
life += 1
# Update the cache
prev_misc = misc
if not test_phase:
# save the sample <s, a, r, s'> to the replay memory and decrease epsilon
agent.replay_memory(s_t, action_idx, r_t, s_t1, m_t, is_terminated)
if n_measures==3:
if d_env !=2:
m_t = np.array([misc[0] / 30.0, medkit/10.0, poison]) # Measurement after transition
else:
m_t = np.array([misc[0]/10.0, misc[1]/30.0, misc[2]]) # [AMO, HEALTH, FRAGS]
elif n_measures == 1:
m_t = np.array([misc[0] / 30.0])
# if depth_perception:
# plt.imshow(depth)
# plt.show()
# if mask_perception:
# plt.imshow(mask)
# plt.show()
if t > agent.observe and t % agent.timestep_per_train == 0 and not test_phase:
# print("DO TRAIN")
loss = agent.train_minibatch_replay(goal)
s_t = s_t1
t += 1
# save progress every 10000 iterations
if t % 10000 == 0 and not test_phase:
agent.save_model('../../experiments/'+title+'/model/DFP.h5')
# print info
state = ""
if t <= agent.observe:
state = "observe"
elif t > agent.observe and t <= agent.observe + agent.explore:
state = "explore/train" #train mais on continue à explorer
else:
state = "exploit/train" #train que en exploitant
if test_phase:
state = "test"
if (is_terminated):
print("TIME", t, "/ GAME", GAME, "/ STATE", state, \
"/ EPSILON", agent.epsilon, "/ ACTION", action_idx, "/ REWARD", r_t, \
"/ Medkit", medkit, "/ Poison", poison, "/ Amo", amo, "/Frags", frags, "/ MAX_LIFE", max_life, "/ LIFE", previous_life, "/ HEALTH", health, "/ LOSS", loss)
if GAME % agent.stats_window_size == 0 and t > agent.observe:
mean_life = np.mean(np.array(life_buffer))
var_life = np.var(np.array(life_buffer))
else:
mean_life = None
var_life = None
if test_phase:
path_result = '../../experiments/' + title + '/logs/' + 'results_test.csv'
else:
path_result = '../../experiments/' + title + '/logs/' + 'results.csv'
with open(path_result, mode='a') as log_file:
writer = csv.writer(log_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
writer.writerow([t, state, agent.epsilon, action_idx, r_t,
medkit, poison, frags, amo, max_life, previous_life,
mean_life, var_life, health, loss])
medkit = 0
poison = 0
# Save Agent's Performance Statistics
if GAME % agent.stats_window_size == 0 and t > agent.observe:
print("Update Rolling Statistics")
agent.mavg_score.append(np.mean(np.array(life_buffer)))
agent.var_score.append(np.var(np.array(life_buffer)))
# Reset rolling stats buffer
life_buffer = []
# Write Rolling Statistics to file
with open('../../experiments/'+title+'/statistics/stats.txt', 'w+') as stats_file:
stats_file.write('Game: ' + str(GAME) + '\n')
stats_file.write('Max Score: ' + str(max_life) + '\n')
stats_file.write('mavg_score: ' + str(agent.mavg_score) + '\n')
stats_file.write('var_score: ' + str(agent.var_score) + '\n')
if t == tend:
break
sess.close()
sess2.close() | |
from microprediction import MicroWriter
import numpy as np
from pprint import pprint
import matplotlib.pyplot as plt
import random
import time
import warnings
warnings.filterwarnings('ignore')
from copulas.multivariate import GaussianMultivariate
import pandas as pd
# Grab the Github secret
import os
WRITE_KEY = os.environ.get('WRITE_KEY') # <-- You need to add a Github secret
ANIMAL = MicroWriter.animal_from_key(WRITE_KEY) # <-- Your nom de plume
REPO = 'https://github.com/free-soellingeraj/microactors/blob/master/fit.py' # <--- Change your username
print('This is '+ANIMAL+' firing up')
STOP_LOSS = 25 # <--- Governs when we give up on a stream/horizon
# Get historical data, fit a copula, and submit
def fit_and_sample(lagged_zvalues:[[float]],num:int, copula=None):
""" Example of creating a "sample" of future values
lagged_zvalues: [ [z1,z2,z3] ] distributed N(0,1) margins, roughly
copula : Something from https://pypi.org/project/copulas/
returns: [ [z1, z2, z3] ] representative sample
Swap out this function for whatever you like.
"""
# Remark 1: It's lazy to just sample synthetic data
# Remark 2: Any multivariate density estimation could go here.
# Remark 3: If you prefer uniform margin, use mw.get_lagged_copulas(name=name, count= 5000)
#
# See https://www.microprediction.com/blog/lottery for discussion of this "game"
df = pd.DataFrame(data=lagged_zvalues)
if copula is None:
copula = GaussianMultivariate() # <---
copula.fit(df)
synthetic = copula.sample(num)
return synthetic.values.tolist()
if __name__ == "__main__":
mw = MicroWriter(write_key=WRITE_KEY)
mw.set_repository(REPO) # Just polite, creates a CODE badge on the leaderboard
NAMES = [ n for n in mw.get_stream_names() if 'z2~' in n or 'z3~' in n ]
for _ in range(1):
name = random.choice(NAMES)
lagged_zvalues = mw.get_lagged_zvalues(name=name, count= 5000)
if len(lagged_zvalues)>20:
zvalues = fit_and_sample(lagged_zvalues=lagged_zvalues, num=mw.num_predictions)
pprint( (name, len(lagged_zvalues), len(zvalues)))
try:
for delay in mw.DELAYS:
res = mw.submit_zvalues(name=name, zvalues=zvalues, delay=delay )
pprint(res)
except Exception as e:
print(e)
# Quit some stream/horizon combinations where we fare poorly
mw.cancel_worst_active(stop_loss=STOP_LOSS, num=3) | |
import sys
sys.path.append('../')
import utils
import numpy as np
import imageio
import os
class NucleiDataset(utils.Dataset):
"""Override:
load_image()
load_mask()
image_reference()
"""
def add_nuclei(self, root_dir, mode, split_ratio=0.9):
# Add classes
self.add_class("nuclei", 1, "nuclei") # source, id, name. id = 0s is BG
image_names = os.listdir(root_dir)
length = len(image_names)
np.random.seed(1000)
image_names = list(np.random.permutation(image_names))
np.random.seed(None)
if mode == 'train':
image_names = image_names[: int(split_ratio*length)]
if mode == 'val':
image_names = image_names[int(split_ratio*length):]
if mode == 'val_as_test':
image_names = image_names[int(split_ratio*length):]
mode = 'test'
dirs = [root_dir + img_name + '/images/' for img_name in image_names]
mask_dirs = [root_dir + img_name + '/masks/' for img_name in image_names]
# Add images
for i in range(len(image_names)):
self.add_image(
source = "nuclei",
image_id = i,
path = dirs[i] + image_names[i] + '.png',
mask_dir = mask_dirs[i],
name = image_names[i]
)
def load_image(self, image_id):
"""Load the specified image and return a [H,W,3] Numpy array.
"""
image = imageio.imread(self.image_info[image_id]['path'])
# RGBA to RGB
if image.shape[2] != 3:
image = image[:,:,:3]
return image
def image_reference(self, image_id):
"""Return the details of the image."""
info = self.image_info[image_id]
if info["source"] == "nuclei":
return info["path"]
else:
super(NucleiDataset, self).image_reference(self, image_id)
def load_mask(self, image_id):
"""
Returns:
masks: A binary array of shape [height, width, instance count] with
a binary mask per instance.
class_ids: a 1D array of class IDs of the instance masks.
"""
info = self.image_info[image_id]
mask_dir= info['mask_dir']
mask_names = os.listdir(mask_dir)
mask_paths = [mask_dir + mask_name for mask_name in mask_names]
count = len(mask_paths)
masks = [imageio.imread(path) for path in mask_paths]
mask = np.stack(masks, axis=-1)
# mask = mask.astype(bool)
mask = np.where(mask>128, 1, 0)
class_ids = np.ones(count,dtype=np.int32)
return mask, class_ids
def load_semantic(self, image_id):
info = self.image_info[image_id]
path = info['mask_dir'].replace('masks','images')
mask_path = path + 'mask.png'
mask = imageio.imread(mask_path)
mask = np.where(mask>128, 1, 0)
return mask
if __name__ == "__main__":
ds = NucleiDataset()
#ds.add_nuclei('data/stage1_train/','train')
ds.add_nuclei('data/stage1_train/','train')
ds.prepare()
print(ds.image_info[0])
image = ds.load_image(0)
print(image.shape)
# mask, _ = ds.load_mask(0)
# print(len(_))
# print(mask.shape)
means = []
for idx in ds.image_ids:
im = ds.load_image(idx)
means.append(np.mean(im[:,-1],axis=0))
print(np.mean(means,axis=0)) | |
import unittest
import scanner.logSetup as logSetup
from bitstring import Bits
import numpy as np
import random
random.seed()
import scanner.hashFile as hashFile
import scanner.unitConverters as unitConverters
def b2i(binaryStringIn):
if len(binaryStringIn) != 64:
raise ValueError("Input strings must be 64 chars long!")
val = Bits(bin=binaryStringIn)
return val.int
TEST_ARRS = [
(
[[ True, True, True, False, False, True, True, True],
[ True, False, False, False, False, True, True, True],
[ True, False, False, True, False, False, True, False],
[False, False, True, False, True, True, False, True],
[ True, True, False, False, True, False, False, True],
[False, False, False, False, True, True, False, True],
[False, True, False, True, False, False, False, True],
[ True, False, True, False, False, True, True, True]],
"1110011110000111100100100010110111001001000011010101000110100111"
),
(
[[False, True, False, False, False, False, False, False],
[False, True, True, False, False, True, False, True],
[ True, True, True, True, True, True, True, False],
[False, True, False, True, False, True, True, True],
[False, True, True, True, False, True, True, True],
[ True, False, True, True, True, False, True, False],
[ True, True, False, False, False, True, False, True],
[ True, True, True, True, True, False, True, True]],
"0100000001100101111111100101011101110111101110101100010111111011"
),
(
[[ True, False, True, False, True, False, True, True],
[False, True, True, False, False, False, True, False],
[ True, False, True, True, True, False, False, False],
[ True, False, True, False, True, True, False, False],
[False, False, False, True, False, True, False, False],
[False, False, True, True, False, False, True, True],
[ True, True, True, True, True, False, False, True],
[False, True, True, True, True, False, False, False]],
"1010101101100010101110001010110000010100001100111111100101111000"
),
(
[[ True, True, False, True, True, True, False, False],
[ True, True, False, False, True, True, False, False],
[False, False, True, False, True, True, False, True],
[ True, False, True, True, False, True, False, False],
[ True, False, True, False, True, False, False, True],
[ True, False, False, True, True, True, False, True],
[False, False, False, False, False, False, False, True],
[ True, True, True, False, False, True, True, True]],
"1101110011001100001011011011010010101001100111010000000111100111"
),
(
[[False, True, True, False, False, False, False, True],
[False, True, False, True, False, False, False, True],
[ True, False, False, False, False, True, True, True],
[ True, True, False, False, False, True, False, False],
[False, True, False, True, True, False, False, False],
[ True, True, True, True, False, True, False, False],
[ True, False, False, True, True, False, True, True],
[ True, True, False, True, True, True, True, False]],
"0110000101010001100001111100010001011000111101001001101111011110"
)
]
class TestSequenceFunctions(unittest.TestCase):
def __init__(self, *args, **kwargs):
logSetup.initLogging()
super().__init__(*args, **kwargs)
def test_binConversions(self):
val = b2i("0000000000000000000000000000000000000000000000000000000000000000")
self.assertEqual(val, 0)
val = b2i("1111111111111111111111111111111111111111111111111111111111111111")
self.assertEqual(val, -1)
val = b2i("1000000000000000000000000000000000000000000000000000000000000000")
self.assertEqual(val, -9223372036854775808)
val = b2i("0111111111111111111111111111111111111111111111111111111111111111")
self.assertEqual(val, 9223372036854775807)
val = b2i("1100000000000000000000000000000000000000000000000000000000000000")
self.assertEqual(val, -4611686018427387904)
val = b2i("0100000000000000000000000000000000000000000000000000000000000000")
self.assertEqual(val, 4611686018427387904)
self.assertRaises(ValueError, b2i, "101")
def test_binConversions2(self):
val = unitConverters.binStrToInt("0000000000000000000000000000000000000000000000000000000000000000")
self.assertEqual(val, 0)
val = unitConverters.binStrToInt("1111111111111111111111111111111111111111111111111111111111111111")
self.assertEqual(val, -1)
val = unitConverters.binStrToInt("1000000000000000000000000000000000000000000000000000000000000000")
self.assertEqual(val, -9223372036854775808)
val = unitConverters.binStrToInt("0111111111111111111111111111111111111111111111111111111111111111")
self.assertEqual(val, 9223372036854775807)
val = unitConverters.binStrToInt("1100000000000000000000000000000000000000000000000000000000000000")
self.assertEqual(val, -4611686018427387904)
val = unitConverters.binStrToInt("0100000000000000000000000000000000000000000000000000000000000000")
self.assertEqual(val, 4611686018427387904)
self.assertRaises(ValueError, b2i, "101")
def test_binConversions3(self):
# Kinda brute-forcey random testing, but it'll work for the moment.
for x in range(1000):
test = ''.join([str(random.randrange(0, 2, 1)) for x in range(64)])
self.assertEqual(b2i(test), unitConverters.binStrToInt(test))
def test_numpyConversions(self):
for arr, valStr in TEST_ARRS:
arr = np.array(arr)
st = "".join(["1" if dat else '0' for dat in arr.flatten() ])
val = b2i(valStr)
self.assertEqual(unitConverters.binary_array_to_int(arr), val)
self.assertEqual(b2i(st), val)
self.assertEqual(valStr, st) | |
import numpy as np
from rltools.policy import Policy
STAY_ON_ONE_LEG, PUT_OTHER_DOWN, PUSH_OFF = 1, 2, 3
SPEED = 0.29 # Will fall forward on higher speed
SUPPORT_KNEE_ANGLE = +0.1
class MultiWalkerHeuristicPolicy(Policy):
def __init__(self, observation_space, action_space):
super(MultiWalkerHeuristicPolicy, self).__init__(observation_space, action_space)
def sample_actions(self, obs_B_Do, deterministic=True):
n_agents = obs_B_Do.shape[0]
actions = np.zeros((n_agents, 4))
for i in xrange(n_agents):
a = np.zeros(4)
s = obs_B_Do[i]
state = STAY_ON_ONE_LEG
moving_leg = 0
supporting_leg = 1 - moving_leg
supporting_knee_angle = SUPPORT_KNEE_ANGLE
contact0 = s[8]
contact1 = s[13]
moving_s_base = 4 + 5 * moving_leg
supporting_s_base = 4 + 5 * supporting_leg
hip_targ = [None, None] # -0.8 .. +1.1
knee_targ = [None, None] # -0.6 .. +0.9
hip_todo = [0.0, 0.0]
knee_todo = [0.0, 0.0]
if state == STAY_ON_ONE_LEG:
hip_targ[moving_leg] = 1.1
knee_targ[moving_leg] = -0.6
supporting_knee_angle += 0.03
if s[2] > SPEED:
supporting_knee_angle += 0.03
supporting_knee_angle = min(supporting_knee_angle, SUPPORT_KNEE_ANGLE)
knee_targ[supporting_leg] = supporting_knee_angle
if s[supporting_s_base + 0] < 0.10: # supporting leg is behind
state = PUT_OTHER_DOWN
if state == PUT_OTHER_DOWN:
hip_targ[moving_leg] = +0.1
knee_targ[moving_leg] = SUPPORT_KNEE_ANGLE
knee_targ[supporting_leg] = supporting_knee_angle
if s[moving_s_base + 4]:
state = PUSH_OFF
supporting_knee_angle = min(s[moving_s_base + 2], SUPPORT_KNEE_ANGLE)
if state == PUSH_OFF:
knee_targ[moving_leg] = supporting_knee_angle
knee_targ[supporting_leg] = +1.0
if s[supporting_s_base + 2] > 0.88 or s[2] > 1.2 * SPEED:
state = STAY_ON_ONE_LEG
moving_leg = 1 - moving_leg
supporting_leg = 1 - moving_leg
if hip_targ[0]:
hip_todo[0] = 0.9 * (hip_targ[0] - s[4]) - 0.25 * s[5]
if hip_targ[1]:
hip_todo[1] = 0.9 * (hip_targ[1] - s[9]) - 0.25 * s[10]
if knee_targ[0]:
knee_todo[0] = 4.0 * (knee_targ[0] - s[6]) - 0.25 * s[7]
if knee_targ[1]:
knee_todo[1] = 4.0 * (knee_targ[1] - s[11]) - 0.25 * s[12]
hip_todo[0] -= 0.9 * (0 - s[0]) - 1.5 * s[1] # PID to keep head strait
hip_todo[1] -= 0.9 * (0 - s[0]) - 1.5 * s[1]
knee_todo[0] -= 15.0 * s[3] # vertical speed, to damp oscillations
knee_todo[1] -= 15.0 * s[3]
a[0] = hip_todo[0]
a[1] = knee_todo[0]
a[2] = hip_todo[1]
a[3] = knee_todo[1]
a = np.clip(0.5 * a, -1.0, 1.0)
actions[i, :] = a
fake_actiondist = np.concatenate([np.zeros((n_agents, 4)), np.ones((n_agents, 4))])
return actions, fake_actiondist
if __name__ == '__main__':
from madrl_environments.walker.multi_walker import MultiWalkerEnv
from vis import Visualizer
import pprint
env = MultiWalkerEnv(n_walkers=3)
train_args = {'discount': 0.99, 'control': 'decentralized'}
vis = Visualizer(env, train_args, 500, 1, True, 'heuristic')
rew, info = vis(None, hpolicy=MultiWalkerHeuristicPolicy(env.agents[0].observation_space,
env.agents[0].observation_space))
pprint.pprint(rew)
pprint.pprint(info) | |
#!/usr/bin/env python3
import sys
import subprocess
import re
import os
from distutils.version import LooseVersion, StrictVersion
if sys.version_info < (3, 0):
print("Error: Python 2 is not supported")
sys.exit(1)
print("Python:", sys.version_info)
try:
import numpy
except ImportError:
print("Error: Failed to import required modules: numpy")
sys.exit(1)
# Function to test for tool installation and version
# ==============================================================================
def checkTool(toolName, cmd, pattern, requiredVersion):
status= toolName +": required " + requiredVersion + " (or greater), detected "
output=""
error=1
ret=1
try:
# Note that some tools print out the version information to stderr (e.g spectre)
process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output, error = process.communicate()
except:
pass
if (not error):
m = re.search(pattern, output.decode('utf-8'))
# print(output.decode('utf-8'))
if m:
version = m.group(1)
else:
version = "unknown"
if version == "unknown":
status += version + " - FAIL"
elif LooseVersion(version) >= LooseVersion(requiredVersion):
status += version + " - PASS"
ret = 0
else:
status += version + " - FAIL"
else:
status += "no install - FAIL"
print(status)
return ret
# Main
# ==============================================================================
toolList =[
{
"toolName":"PERL",
"cmd":"perl -v",
"pattern": ".*(v\S+)",
"requiredVersion": "v5.10.1"
},
{
"toolName":"Synopsys Design Compiler",
"cmd":"dc_shell -v",
"pattern": "dc_shell\sversion\s+-\s+(\S+)",
"requiredVersion": "O-2018.06"
},
{
"toolName":"Synopsys Library Compiler",
"cmd":"lc_shell -v",
"pattern": "lc_shell\sversion\s+-\s+(\S+)",
"requiredVersion": "O-2018.06"
},
{
"toolName":"Cadence Innovus",
"cmd":"innovus -version | reset",
"pattern": "CDS: Innovus (v\S+)",
"requiredVersion": "v18.10-p002_1"
},
{
"toolName":"Synopsys Primetime",
"cmd":"primetime -version",
"pattern": "pt_shell\sversion\s+-\s+(\S+)",
"requiredVersion": "O-2018.06-1"
},
{
"toolName":"Mentor Graphics Calibre",
"cmd":"calibre -version",
"pattern": "Calibre (v\S+)",
"requiredVersion": "v2019.3_25.15"
},
{
"toolName":"Synopsys HSPICE",
"cmd":"hspice -v",
"pattern": "HSPICE Version (\S+)",
"requiredVersion": "N-2017.12-SP2-1"
},
{
"toolName":"Cadence Spectre",
"cmd":"spectre -version",
"pattern": "spectre\s+version\s+(\S+)",
"requiredVersion": "15.1.0"
}
# {
# "toolName":"Cadence Liberate",
# "cmd":"liberate -v",
# "pattern": "LIBERATE version (\S+)",
# "requiredVersion": "16.1.1.132"
# }
]
status = 0
for tool in toolList:
status += checkTool(tool["toolName"],
tool["cmd"],
tool["pattern"],
tool["requiredVersion"])
# Innovus will often leave the terminal in a bad state. Cleaning up the
# terminal
os.system("stty sane")
if status:
print("\n\nTotal issues detected: " + str(status) + "\n")
else:
print("\n\nEnvironment is successfully setup to run the FASoC flow\n") | |
""" a modified version of CRNN torch repository https://github.com/bgshih/crnn/blob/master/tool/create_dataset.py """
import fire
import os
import lmdb
import cv2
import numpy as np
def checkImageIsValid(imageBin):
if imageBin is None:
return False
imageBuf = np.frombuffer(imageBin, dtype=np.uint8)
img = cv2.imdecode(imageBuf, cv2.IMREAD_GRAYSCALE)
imgH, imgW = img.shape[0], img.shape[1]
if imgH * imgW == 0:
return False
return True
def writeCache(env, cache):
with env.begin(write=True) as txn:
for k, v in cache.items():
txn.put(k, v)
def createDataset(inputPath, gtFile, outputPath, checkValid=True):
"""
Create LMDB dataset for training and evaluation.
ARGS:
inputPath : input folder path where starts imagePath
outputPath : LMDB output path
gtFile : list of image path and label
checkValid : if true, check the validity of every image
"""
os.makedirs(outputPath, exist_ok=True)
env = lmdb.open(outputPath, map_size=int(1e9))
cache = {}
cnt = 1
with open(gtFile, 'r', encoding='utf-8') as data:
datalist = data.readlines()
nSamples = len(datalist)
for i in range(nSamples):
imagePath, label = datalist[i].strip('\n').split('\t')
imagePath = os.path.join(inputPath, imagePath)
# # only use alphanumeric data
# if re.search('[^a-zA-Z0-9]', label):
# continue
if not os.path.exists(imagePath):
print('%s does not exist' % imagePath)
continue
with open(imagePath, 'rb') as f:
imageBin = f.read()
if checkValid:
try:
if not checkImageIsValid(imageBin):
print('%s is not a valid image' % imagePath)
continue
except:
print('error occured', i)
with open(outputPath + '/error_image_log.txt', 'a') as log:
log.write('%s-th image data occured error\n' % str(i))
continue
imageKey = 'image-%09d'.encode() % cnt
labelKey = 'label-%09d'.encode() % cnt
cache[imageKey] = imageBin
cache[labelKey] = label.encode()
if cnt % 1000 == 0:
writeCache(env, cache)
cache = {}
print('Written %d / %d' % (cnt, nSamples))
cnt += 1
nSamples = cnt-1
cache['num-samples'.encode()] = str(nSamples).encode()
writeCache(env, cache)
print('Created dataset with %d samples' % nSamples)
if __name__ == '__main__':
fire.Fire(createDataset) | |
from ldaUtils import LdaEncoder,LdaEncoding,createLabeledCorpDict
import numpy as np
from gensim import models
import pickle
import heapq
#Andrew O'Harney 28/04/14
#This scripts produces nExemplars for each of the topic models
#(Ordered by probability of belonging to a topic)
nExemplars = 10
labeledDocuments = #
imgaeSourceReg = #
#Load documents to be examined
docs = createLabeledCorpDict(labeledDocuments,imgaeSourceReg,output=True)
fnames = docs.keys() #Get file names
modelDir = #
#Load LDA model
modelName = #
lda = models.LdaModel.load(modelDir+modelName+'model')
ldaDict = pickle.load(open(modelDir+modelName+'dictionary','r'))
ldaEncoder = LdaEncoder(ldaDict,docs,lda)
#Probability encoding of each documents
encoding = []
#Encode each of the files
for fname in fnames:
encoding.append(LdaEncoding(fname,ldaEncoder[{'label':fname}]))
#Output the topic nExemplars for each topic
outf = file(modelDir+modelName+'exemplars','w')
for i in range(ntopics):
print 'Fininding exempalars for topic '+str(i)
[e.setTopicN(i) for e in encoding] #Set the topic number to e compared
exemplars = heapq.nlargest(nExemplars,encoding) #Create limited heap
outf.write('Topic %d\n%s\n'%(i,'_'*10))
outf.write(str([exemplar.__str__(topicN=i) for exemplar in exemplars])+'\n\n')
outf.close() | |
r"""Markov chain Monte Carlo methods for inference.
"""
import hypothesis
import numpy as np
import torch
from hypothesis.engine import Procedure
from hypothesis.summary.mcmc import Chain
from torch.distributions.multivariate_normal import MultivariateNormal
from torch.distributions.normal import Normal
from torch.multiprocessing import Pool
class ParallelSampler:
def __init__(self, sampler, chains=2, workers=torch.multiprocessing.cpu_count()):
self.chains = chains
self.sampler = sampler
self.workers = workers
def _prepare_arguments(self, observations, thetas, num_samples):
arguments = []
for input in inputs:
arguments.append(self.sampler, observations, input, num_samples)
return arguments
def _prepare_inputs(self):
inputs = []
prior = self.sampler.prior
for _ in range(self.chains):
inputs.append(prior.sample())
return inputs
@torch.no_grad()
def sample(self, observations, num_samples, thetas=None):
assert(thetas is None or len(thetas) is self.chains)
self.sampler.reset()
if thetas is None:
inputs = self._prepare_inputs()
pool = Pool(processes=self.workers)
arguments = self._prepare_arguments(observations, inputs, num_samples)
chains = pool.map(self.sample_chain, arguments)
del pool
return chains
@staticmethod
def sample_chain(arguments):
sampler, observations, input, num_samples = arguments
chain = sampler.sample(observations, input, num_samples)
return chain
class MarkovChainMonteCarlo(Procedure):
r""""""
def __init__(self, prior):
super(MarkovChainMonteCarlo, self).__init__()
self.prior = prior
def _register_events(self):
pass # No events to register.
def _step(self, theta, observations):
raise NotImplementedError
def reset(self):
pass
@torch.no_grad()
def sample(self, observations, input, num_samples):
r""""""
acceptance_probabilities = []
acceptances = []
samples = []
self.reset()
input = input.view(1, -1)
for sample_index in range(num_samples):
input, acceptance_probability, acceptance = self._step(input, observations)
input = input.view(1, -1)
samples.append(input)
acceptance_probabilities.append(acceptance_probability)
acceptances.append(acceptance)
samples = torch.cat(samples, dim=0)
chain = Chain(samples, acceptance_probabilities, acceptances)
return chain
class MetropolisHastings(MarkovChainMonteCarlo):
r""""""
def __init__(self, prior, log_likelihood, transition):
super(MetropolisHastings, self).__init__(prior)
self.denominator = None
self.log_likelihood = log_likelihood
self.transition = transition
def _step(self, input, observations):
accepted = False
input_next = self.transition.sample(input)
lnl_input_next = self.log_likelihood(input_next, observations)
numerator = self.prior.log_prob(input_next) + lnl_input_next
if self.denominator is None:
lnl_input = self.log_likelihood(input, observations)
self.denominator = self.prior.log_prob(input) + lnl_input
acceptance_ratio = (numerator - self.denominator)
if not self.transition.is_symmetrical():
raise NotImplementedError
acceptance_probability = min([1, acceptance_ratio.exp().item()])
u = np.random.uniform()
if u <= acceptance_probability:
accepted = True
input = input_next
self.denominator = numerator
return input, acceptance_probability, accepted
def reset(self):
self.denominator = None
class AALRMetropolisHastings(MarkovChainMonteCarlo):
r"""Ammortized Approximate Likelihood Ratio Metropolis Hastings
https://arxiv.org/abs/1903.04057
"""
def __init__(self, prior, ratio_estimator, transition):
super(AALRMetropolisHastings, self).__init__(prior)
self.denominator = None
self.prior = prior
self.ratio_estimator = ratio_estimator
self.transition = transition
def _compute_ratio(self, input, outputs):
num_observations = outputs.shape[0]
inputs = input.repeat(num_observations, 1)
inputs = inputs.to(hypothesis.accelerator)
_, log_ratios = self.ratio_estimator(inputs=inputs, outputs=outputs)
return log_ratios.sum().cpu()
def _step(self, input, observations):
accepted = False
with torch.no_grad():
input_next = self.transition.sample(input)
lnl_input_next = self._compute_ratio(input_next, observations)
numerator = self.prior.log_prob(input_next) + lnl_input_next
if self.denominator is None:
lnl_input = self._compute_ratio(input, observations)
self.denominator = self.prior.log_prob(input) + lnl_input
acceptance_ratio = (numerator - self.denominator)
if not self.transition.is_symmetrical():
raise NotImplementedError
acceptance_probability = min([1, acceptance_ratio.exp().item()])
u = np.random.uniform()
if u <= acceptance_probability:
accepted = True
input = input_next
self.denominator = numerator
return input, acceptance_probability, accepted
def reset(self):
self.denominator = None
@torch.no_grad()
def sample(self, outputs, input, num_samples):
assert(not self.ratio_estimator.training)
outputs = outputs.to(hypothesis.accelerator)
chain = super(AALRMetropolisHastings, self).sample(outputs, input, num_samples)
return chain | |
from pathlib import Path
import configparser
import cv2
import numpy as np
import tensorflow as tf
import threading
import video_utils
import sys
import streamlit as st
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
from object_detection.utils import ops as utils_ops
def model_load_into_memory(path_to_ckpt):
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(str(path_to_ckpt), 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
return detection_graph
def run_inference_for_single_image(image, sess, graph, class_id=None):
"""Feed forward an image into the object detection model.
Args:
image (ndarray): Input image in numpy format (OpenCV format).
sess: TF session.
graph: Object detection model loaded before.
class_id (list): Optional. Id's of the classes you want to detect.
Refer to mscoco_label_map.pbtxt' to find out more.
Returns:
output_dict (dict): Contains the info related to the detections.
num_detections (int): Fixed to 100 for this net.
detection_boxes (2D-ndarray): 100 arrays containing the detecion
bounding boxes like [ymin, xmin, ymax, xmax] from 0 to 1.
detection_scores (ndarray): Prediction scores associated with
every detection.
detection_classes (ndarray): Class' ID associated with
every detection.
"""
# Get handles to input and output tensors
ops = tf.get_default_graph().get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
tensor_dict = {}
for key in ['num_detections', 'detection_boxes', 'detection_scores',
'detection_classes', 'detection_masks']:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(tensor_name)
if 'detection_masks' in tensor_dict:
# The following processing is only for single image
detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])
detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0])
# Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.
real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32)
detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])
detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])
detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
detection_masks, detection_boxes, image.shape[0], image.shape[1])
detection_masks_reframed = tf.cast(
tf.greater(detection_masks_reframed, 0.5), tf.uint8)
# Follow the convention by adding back the batch dimension
tensor_dict['detection_masks'] = tf.expand_dims(detection_masks_reframed, 0)
image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')
# Run inference
output_dict = sess.run(tensor_dict,
feed_dict={image_tensor: np.expand_dims(image, 0)})
# All outputs are float32 numpy arrays, so convert types as appropriate
output_dict['num_detections'] = int(output_dict['num_detections'][0])
output_dict['detection_classes'] = output_dict['detection_classes'][0].astype(np.uint8)
output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
output_dict['detection_scores'] = output_dict['detection_scores'][0]
if 'detection_masks' in output_dict:
output_dict['detection_masks'] = output_dict['detection_masks'][0].astype(np.float32)
return output_dict
def discriminate_class(output_dict, classes_to_detect, category_index):
"""Keeps the classes of interest of the frame and ignores the others
Args:
output_dict (dict): Output of the model once an image is processed.
classes_to_detect (list): Names of the classes to be detected.
category_index (dict): Contains X dicts corresponding to each one
of the classes where the model's been trained on.
Returns:
output_dict (dict): Modified dictionary which just delivers the
specified class detections.
"""
for i in range(output_dict['detection_classes'].size):
class_detected = category_index[output_dict['detection_classes'][i]]['name']
if output_dict['detection_scores'][i]>=0.5 and class_detected not in classes_to_detect:
# The detection is from the desired class and with enough confidence
# Decrease the detection confidence to 0 to avoid displaying it
output_dict['detection_scores'][i] = 0.0
return output_dict
def visualize_results(image, output_dict, category_index):
"""Returns the resulting image after being passed to the model.
Args:
image (ndarray): Original image given to the model.
output_dict (dict): Dictionary with all the information provided
by the model.
category_index (dict): Contains X dicts corresponding to each one
of the classes where the model's been trained on.
Returns:
image (ndarray): Visualization of the results form above.
"""
vis_util.visualize_boxes_and_labels_on_image_array(
image,
output_dict['detection_boxes'],
output_dict['detection_classes'],
output_dict['detection_scores'],
category_index,
instance_masks=output_dict.get('detection_masks'),
use_normalized_coordinates=True,
line_thickness=4)
return image
def main():
# Initialization
## Load the configuration variables from 'config.ini'
config = configparser.ConfigParser()
config.read('config.ini')
## Loading label map
num_classes = config.getint('net', 'num_classes')
path_to_labels = config['net']['path_to_labels']
label_map = label_map_util.load_labelmap(path_to_labels)
categories = label_map_util.convert_label_map_to_categories(label_map,
max_num_classes=num_classes, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# Streamlit initialization
st.title("Object Detection")
st.sidebar.title("Object Detection")
## Select classes to be detected by the model
classes_names = [value['name'] for value in category_index.values()]
classes_names.sort()
classes_to_detect = st.sidebar.multiselect(
"Select which classes to detect", classes_names, ['person'])
## Select camera to feed the model
available_cameras = {'Camera 1': 0, 'Camera 2': 1, 'Camera 3': 2}
cam_id = st.sidebar.selectbox(
"Select which camera signal to use", list(available_cameras.keys()))
## Select a model to perform the inference
available_models = [str(i) for i in Path('./trained_model/').iterdir()
if i.is_dir() and list(Path(i).glob('*.pb'))]
model_name = st.sidebar.selectbox(
"Select which model to use", available_models)
# Define holder for the processed image
img_placeholder = st.empty()
# Model load
path_to_ckpt = '{}/frozen_inference_graph.pb'.format(model_name)
detection_graph = model_load_into_memory(path_to_ckpt)
# Load video source into a thread
video_source = available_cameras[cam_id]
## Start video thread
video_thread = video_utils.WebcamVideoStream(video_source)
video_thread.start()
# Detection code
try:
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
while not video_thread.stopped():
# Camera detection loop
frame = video_thread.read()
if frame is None:
print("Frame stream interrupted")
break
# Change color gammut to feed the frame into the network
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
output = run_inference_for_single_image(frame, sess,
detection_graph)
output = discriminate_class(output,
classes_to_detect, category_index)
processed_image = visualize_results(frame, output,
category_index)
# Display the image with the detections in the Streamlit app
img_placeholder.image(processed_image)
#cv2.imshow('Video', cv2.cvtColor(processed_image, cv2.COLOR_BGR2RGB))
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
except KeyboardInterrupt:
pass
print("Ending resources")
st.text("Camera not detected")
cv2.destroyAllWindows()
video_thread.stop()
sys.exit()
if __name__ == '__main__':
main() | |
"""
Tests for numba.utils.
"""
from __future__ import print_function, absolute_import
from numba import utils
from numba import unittest_support as unittest
class C(object):
def __init__(self, value):
self.value = value
def __eq__(self, o):
return self.value == o.value
def __ne__(self, o):
return self.value != o.value
def __gt__(self, o):
return self.value > o.value
class D(C):
pass
class TestTotalOrdering(unittest.TestCase):
def test_is_inherited(self):
f = utils._is_inherited_from_object
for cls in (C, D):
self.assertFalse(f(cls, '__eq__'))
self.assertFalse(f(cls, '__gt__'))
self.assertFalse(f(cls, '__ne__'))
self.assertTrue(f(cls, '__ge__'))
self.assertTrue(f(cls, '__le__'))
self.assertTrue(f(cls, '__lt__'))
def check_total_ordering(self, cls):
# Duplicate the class-under-test, to avoid mutating the original
cls = type(cls.__name__, cls.__bases__, dict(cls.__dict__))
cls = utils.total_ordering(cls)
a, b, c, d = cls(10), cls(5), cls(15), cls(10)
self.assertFalse(a < b)
self.assertTrue(a < c)
self.assertFalse(a < d)
self.assertTrue(b < c)
self.assertTrue(b < d)
self.assertFalse(c < d)
self.assertFalse(a <= b)
self.assertTrue(a <= c)
self.assertTrue(a <= d)
self.assertTrue(b <= c)
self.assertTrue(b <= d)
self.assertFalse(c <= d)
self.assertTrue(a > b)
self.assertFalse(a > c)
self.assertFalse(a > d)
self.assertFalse(b > c)
self.assertFalse(b > d)
self.assertTrue(c > d)
self.assertTrue(a >= b)
self.assertFalse(a >= c)
self.assertTrue(a >= d)
self.assertFalse(b >= c)
self.assertFalse(b >= d)
self.assertTrue(c >= d)
def test_total_ordering(self):
self.check_total_ordering(C)
def test_total_ordering_derived(self):
self.check_total_ordering(D)
if __name__ == '__main__':
unittest.main() | |
#! /usr/bin/env python
from netCDF4 import Dataset
import matplotlib.pyplot as plt
import numpy as np
import array
import matplotlib.cm as cm
from mpl_toolkits.basemap import Basemap
import glob
import struct
import time
import sys
from mpl_toolkits.basemap import Basemap, shiftgrid, addcyclic
from scipy import interpolate
import getopt
import string
from datetime import date
import scipy.interpolate as interp
import scipy.optimize as optm
import subprocess
import utils
import scipy.stats as scstats
#sys.path.append('/usr/local/other/SLES11/mpi4py-1.3/lib/python/')
#from mpi4py import MPI
#Ne=int(sys.argv[1])
bkg_flist=glob.glob('../wrkdir/oana-20121130_1/bkg/???/ocean_temp_salt.res.nc')
bkg_flist.sort()
ana_flist=glob.glob('../wrkdir/oana-20121130_1/ana/???/ocean_temp_salt.res.nc')
ana_flist.sort()
print bkg_flist
#dirname='../wrkdir/incr_adt/'
#subprocess.call(['mkdir',dirname])
index=0
for ana_fname in ana_flist:
print ana_fname, bkg_flist[index]
Ta, Sa, SSHa = utils.get_state(ana_fname)
#Ta, Sa, SSHa, Pba = np.squeeze(utils.get_state(ana_fname))
Tb, Sb, SSHb = utils.get_state(bkg_flist[index])
dT=np.squeeze(Ta-Tb)
dS=np.squeeze(Sa-Sb)
#dPb=Pbb
#rho0=1025.0
#g=9.81
#dPb[dPb==0.0]=np.nan
#dPb=dPb-scstats.nanmean(dPb.flatten())
#print np.max(dPb.flatten())
#plt.sca(grid[])
dsteric, dhalosteric, dthermosteric = utils.steric(Tb, Sb, dT, dS, SSHa-SSHb, SSHb)
index+=1
plt.clf() | |
from numpy import array
data = array([
[0.1, 1.0],
[0.2, 0.9],
[0.3, 0.8],
[0.4, 0.7],
[0.5, 0.6],
[0.6, 0.5],
[0.7, 0.4],
[0.8, 0.3],
[0.9, 0.2],
[1.0, 0.1]])
data = data.reshape(1, 10, 2)
print(data.shape) | |
import scipy.io
import os
import numpy as np
def load_data(name, n, data_dir="data/steady", non_dim=True, scale_q=1.0):
""" loads dataset n"""
data = scipy.io.loadmat(data_dir + "/%s_exp%d.mat" %(name, n))
Q = data['Q'][0][0]
K_truth = data['K'][0][0]
x_data = data['xexp'][:,0]
u_data = data['hexp']
# print(x_data)
L = data['L'][0][0]
W = data['W'][0][0]
# x_data = L - x_data
q_data = -np.ones(x_data.shape) * Q/W / scale_q
if non_dim:
x_data /= L
u_data /= L
q_data /= L*K_truth
X_data = np.stack((x_data, q_data)).T
return X_data, u_data, L, W, K_truth
def load_all(name, n_max, non_dim=True, scale_q=1.0):
""" load all training data into a dictionary
stored in order of X, u, L, W, k"""
training_data = dict()
for i in range(n_max):
training_data[i+1] = load_data(name, i+1, non_dim, scale_q)
return training_data
def load_data_name(name, data_dir="data/steady", non_dim=False, subsample=200, scale_q=1.0):
""" load dataset given full name of data file
Notes:
x_data is of shape (1,n)
u_data is of shape (n,1)
L is measured in mm
Some entries of u_data are Nan. Remove these elements
"""
MM_TO_M = 1000
data = scipy.io.loadmat(data_dir + "/" + name)
Q = data['Q'][0][0]
K_truth = data['K'][0][0]
x_data = data['xexp'][0,:]
u_data = data['hexp']
# Removing the Nan entries
x_data = x_data[~np.isnan(u_data[:,0])]
u_data = u_data[~np.isnan(u_data)]
if subsample < x_data.shape[0]:
inds = np.sort(np.random.choice(x_data.shape[0], size=subsample, replace=False))
x_data = x_data[inds]
u_data = u_data[inds]
u_data = u_data.reshape((-1, 1))
L = data['L'][0][0]/MM_TO_M
W = data['W'][0][0]
q_data = -np.ones(x_data.shape) * Q/W / scale_q
if non_dim:
x_data /= L
u_data /= L
q_data /= L*K_truth
X_data = np.stack((x_data, q_data)).T
return X_data, u_data, L, W, K_truth
def load_all_dir(data_dir="data/steady", non_dim=False, subsample=200, scale_q=1.0):
""" loads data for each mat file in a given directory """
all_files = os.listdir(data_dir)
training_data = dict()
training_data = []
for i_file in all_files:
if i_file.endswith(".mat"):
data_file = load_data_name(i_file, data_dir, non_dim=non_dim,
subsample=subsample, scale_q=scale_q)
training_data.append(data_file)
# for count, i_file in enumerate(all_files):
# if i_file.endswith(".mat"):
# training_data[count] = load_data_name(i_file, data_dir, non_dim=non_dim,
# subsample=subsample, scale_q=scale_q)
return training_data | |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Modules to generate perturbations."""
import numpy as np
from scipy.ndimage.filters import gaussian_filter
_Array = np.ndarray
__all__ = [
'BaseReplacement',
'Constant',
'GaussianBlur',
'RandomPerturb',
]
class BaseReplacement:
"""
Base class of generator for generating different replacement for perturbations.
Args:
kwargs: Optional args for generating replacement. Derived class need to
add necessary arg names and default value to '_necessary_args'.
If the argument has no default value, the value should be set to
'EMPTY' to mark the required args. Initializing an object will
check the given kwargs w.r.t '_necessary_args'.
Raises:
ValueError: Raise when provided kwargs not contain necessary arg names with 'EMPTY' mark.
"""
_necessary_args = {}
def __init__(self, **kwargs):
self._replace_args = self._necessary_args.copy()
for key, value in self._replace_args.items():
if key in kwargs.keys():
self._replace_args[key] = kwargs[key]
elif key not in kwargs.keys() and value == 'EMPTY':
raise ValueError(f"Missing keyword arg {key} for {self.__class__.__name__}.")
def __call__(self, inputs):
raise NotImplementedError()
class Constant(BaseReplacement):
"""Generator to provide constant-value replacement for perturbations."""
_necessary_args = {'base_value': 'EMPTY'}
def __call__(self, inputs: _Array) -> _Array:
replacement = np.ones_like(inputs, dtype=np.float32)
replacement *= self._replace_args['base_value']
return replacement
class GaussianBlur(BaseReplacement):
"""Generator to provided gaussian blurred inputs for perturbation"""
_necessary_args = {'sigma': 0.7}
def __call__(self, inputs: _Array) -> _Array:
sigma = self._replace_args['sigma']
replacement = gaussian_filter(inputs, sigma=sigma)
return replacement
class RandomPerturb(BaseReplacement):
"""Generator to provide replacement by randomly adding noise."""
_necessary_args = {'radius': 0.2}
def __call__(self, inputs: _Array) -> _Array:
radius = self._replace_args['radius']
outputs = inputs + (2 * np.random.rand(*inputs.shape) - 1) * radius
return outputs | |
"""Truncated exponential distribution."""
import numpy
from scipy import special
from ..baseclass import SimpleDistribution, ShiftScaleDistribution
class truncexpon(SimpleDistribution):
"""Truncated exponential distribution."""
def __init__(self, b):
super(truncexpon, self).__init__(dict(b=b))
def _pdf(self, x, b):
return numpy.exp(-x)/(1-numpy.exp(-b))
def _cdf(self, x, b):
return (1.0-numpy.exp(-x))/(1-numpy.exp(-b))
def _ppf(self, q, b):
return -numpy.log(1-q+q*numpy.exp(-b))
def _lower(self, b):
return 0.
def _upper(self, b):
return b
class TruncExponential(ShiftScaleDistribution):
"""
Truncated exponential distribution.
Args:
upper (float, Distribution):
Location of upper threshold
scale (float, Distribution):
Scaling parameter in the exponential distribution
shift (float, Distribution):
Location parameter
Examples:
>>> distribution = chaospy.TruncExponential(1.5)
>>> distribution
TruncExponential(1.5)
>>> uloc = numpy.linspace(0, 1, 6)
>>> uloc
array([0. , 0.2, 0.4, 0.6, 0.8, 1. ])
>>> xloc = distribution.inv(uloc)
>>> xloc.round(3)
array([0. , 0.169, 0.372, 0.628, 0.972, 1.5 ])
>>> numpy.allclose(distribution.fwd(xloc), uloc)
True
>>> distribution.pdf(xloc).round(3)
array([1.287, 1.087, 0.887, 0.687, 0.487, 0.287])
>>> distribution.sample(4).round(3)
array([0.709, 0.094, 1.34 , 0.469])
"""
def __init__(self, upper=1, scale=1, shift=0):
super(TruncExponential, self).__init__(
dist=truncexpon((upper-shift)*1./scale),
scale=scale,
shift=shift,
repr_args=[upper],
) | |
'''
Module:
Clip the input data
'''
import numpy as np
def set_clip(args, data, which='fore', dmin=0, dmax=1):
# data value range
dlen = dmax - dmin
if which == 'fore':
pmin = dmin + (1.0 - float(args.cperc) / 100.0) * 0.5 * dlen
pmax = dmax - (1.0 - float(args.cperc) / 100.0) * 0.5 * dlen
# minimum plot value
if args.cmin is None:
if args.clip is None:
plot_min_value = pmin
else:
plot_min_value = -float(args.clip)
else:
plot_min_value = float(args.cmin)
# maximum plot value
if args.cmax is None:
if args.clip is None:
plot_max_value = pmax
else:
plot_max_value = float(args.clip)
else:
plot_max_value = float(args.cmax)
# print clip information
print('plot range ', "{:e}".format(plot_min_value), ' -- ', "{:e}".format(plot_max_value))
if which == 'back':
pmin = dmin + (1.0 - float(args.backcperc) / 100.0) * 0.5 * dlen
pmax = dmax - (1.0 - float(args.backcperc) / 100.0) * 0.5 * dlen
# minimum plot value
if args.backcmin is None:
if args.backclip is None:
plot_min_value = pmin
else:
plot_min_value = -float(args.backclip)
else:
plot_min_value = float(args.backcmin)
# maximum plot value
if args.backcmax is None:
if args.backclip is None:
plot_max_value = pmax
else:
plot_max_value = float(args.backclip)
else:
plot_max_value = float(args.backcmax)
# print clip information
print('plot range ', "{:e}".format(plot_min_value), ' -- ', "{:e}".format(plot_max_value))
return plot_min_value, plot_max_value | |
#import libraries
import tensorflow as tf
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
#import dataset
diabets_df = pd.read_csv('diabets.csv')
#diabets_df.head()
#diabets_df.tail()
X = diabets_df.iloc[:, 0:8].values
y = diabets_df.iloc[:, 8].values
#StandartScaler
scaler = StandardScaler()
X = scaler.fit_transform(X)
#Split into test and train
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
#print(X_train.shape[1]) | |
import numpy as np
def _weighted_misclassification_error(values: np.ndarray, labels: np.ndarray) -> float:
"""
Evaluate performance under misclassification loss function
return sum of abs of labels, where sign(labels)!=sign(values).
values are in (1,-1), labels don't have to be.
Parameters
----------
values: ndarray of shape (n_samples,)
A feature vector to find a splitting threshold for
labels: ndarray of shape (n_samples,)
The labels to compare against
"""
return np.abs(labels[np.sign(labels) != np.sign(values)]).sum()
if __name__ == '__main__':
pass
# sign = 1
#
# values = np.array([1, 2, 3, 4, 5, 6, 7, 8])
# labels = np.array([1, -1, 1, -1, -1, 1, 1, -1])
#
# print()
# print("non sorted labels", labels)
# print("non sorted values", values)
#
# print()
# print("###### ANDY #####")
# print()
#
# sort_idx = np.argsort(values)
# values, labels = values[sort_idx], labels[sort_idx]
# losses = [np.sum(labels != sign)]
#
# print("values", values)
# print()
#
# for label in labels:
# if sign == label:
# losses.append(losses[-1] + 1)
# else:
# losses.append(losses[-1] - 1)
#
# losses = np.array(losses) / len(labels)
#
# min_ind_loss = np.argmin(losses)
#
# print("losses", losses, "min_ind_loss", min_ind_loss)
#
# print("thr", values[min_ind_loss], "thr_err", losses[min_ind_loss])
# print()
# print("###### DANIEL #####")
# print()
#
# print("labels", labels)
# print()
#
# lab = np.concatenate([[0], labels[:]])
# print("lab", lab)
#
# lab2 = np.concatenate([labels[:], [0]])
# print("lab2", lab2)
# print()
#
# losses = np.minimum(np.cumsum(lab2 * sign),
# np.cumsum(lab[::-1] * -sign)[::-1])
# print('lab2 * sign', lab2 * sign)
# print("np.cumsum(lab2 * sign)", np.cumsum(lab2 * sign))
# print('lab[::-1] * -sign', lab[::-1] * -sign)
# print('np.cumsum(lab[::-1] * -sign)[::-1]', np.cumsum(lab[::-1] * -sign)[::-1])
# print('losses', losses)
# print()
#
# min_id = np.argmin(losses)
# print("min_id", min_id)
#
# if values[min_id] == np.max(values):
# print("thr", np.inf, "thr_err", losses[min_id])
#
# if values[min_id] == np.min(values):
# print("thr", -np.inf, "thr_err", losses[min_id])
#
# else:
# print("thr", values[min_id], "thr_err", losses[min_id])
# print()
# print("###### ALON #####")
# print()
#
# errors_ = [_weighted_misclassification_error(np.full((labels.size,), sign), labels)]
#
# print("losses_before", errors_)
# print("labels", labels)
# for i, threshold in enumerate(values[:-1]):
# errors_.append(errors_[-1] + sign * labels[i])
#
# errors_ = np.array(errors_)/len(values)
#
# print("losses_final", errors_)
#
# threshold_index_ = np.argmin(errors_)
# print("min_ind", threshold_index_)
# print()
# print("thr", values[threshold_index_], "thr_err", errors_[threshold_index_])
# print()
# test = [(1, 2, 3, 4),
# (1, -1, 3, 5),
# (5, 6, -7, 8),
# (4, 8, 1, 4)]
#
# test = np.array(test)
#
# print(min(test, key=lambda el: el[2]))
# print(values)
# print(np.sign(values - values[threshold_index_])) | |
"""
Linear least-square solvers
===========================
This module contains specialized solvers that works for the cases where the
value of the modelled properties depend linearly on all the model parameters.
This is definitely the most robust solvers among all the solvers, just it
requires the model to be linear. And all the solvers in this modules does not
respect the initial guess for the parameters for sure, since all of them are
based on singular value decomposition of the matrices and not iterative.
Two solvers are provided by this module,
.. autosummary::
:toctree:
numpy_lstsq
r_lm
and they are based on the utility functions in the module
:py:mod:`FFOMP.solver.utils`.
"""
import time
import numpy as np
from numpy.linalg import lstsq
from .utils import get_linear_sys, decompose_mat2cols, get_total_weight
#
# The plain least square solver based on numpy
# --------------------------------------------
#
def numpy_lstsq(**kwargs):
"""Generates a plain least-square solver based on numpy lstsq function
This function will return a plain linear least-square solver. Note that
this solver will not respect the given weights for the properties. And it
works only for linear models for sure.
All the keyword arguments to this function will be passed to the numpy
``numpy.linalg.lstsq`` function.
:returns: The solver that can be called with the list of equations and
parameters.
:rtype: function
"""
def solver(eqns, params):
"""The actual plain least square solver"""
# First generates the linear system.
mat, vec = get_linear_sys(eqns, params)
print(
'Invoking the numpy.linalg.lstsq function...'
)
start_time = time.process_time()
res = lstsq(mat, vec, **kwargs)
print(
'Finished: {!s}sec.'.format(time.process_time() - start_time)
)
return res[0]
return solver
#
# Solvers bases on GNU R
# ----------------------
#
def r_lm(prop_vec_name='props', use_weights=True, **kwargs):
"""Generates the linear solver based on RPy2
This function will generate a solver that invokes the linear model fitting
facility of GNU R based on the RPy2 interface. The weights will be
respected for this sophisticated solver.
All keyword arguments not for this function will be passed to the core R
``lm`` function.
:param str prop_vec_name: The name for the property vector, default to
``props``, to be used in the left-hand side of the R formula.
:param bool use_weights: If weights are to be added for the fitting.
:returns: The linear solver based on R
:rtype: function
"""
# Import here so that users do not have to install R if they are not going
# to use it.
try:
from rpy2.robjects import r, Formula, FloatVector
from rpy2.robjects.packages import importr
except ImportError:
raise ImportError(
'GNU R and RPy2 have to be installed to use the R solver!'
)
def solver(eqns, params):
"""The actual R solver"""
# Generate the linear system first.
mat, vec = get_linear_sys(eqns, params)
# Decompose the matrix.
coeff_vecs = decompose_mat2cols(mat, params)
# Test the validity of the property vector name.
if prop_vec_name in coeff_vecs:
raise ValueError(
'Invalid property vector name {}, conflicts with parameter '
'name! '.format(prop_vec_name)
)
# Generate the R formula.
fmla = Formula(''.join([
prop_vec_name, ' ~ ',
' + '.join(coeff_vecs.keys()),
' - 1'
]))
# Add the data vectors.
env = fmla.environment
env[prop_vec_name] = FloatVector(vec)
for param_name, coeff_vec in coeff_vecs.items():
env[param_name] = FloatVector(coeff_vec)
continue
# Generates the weights vector.
tot_weight = get_total_weight(eqns)
weights_vec = np.array(
[i.weight / tot_weight for i in eqns],
dtype=np.float
)
if use_weights:
kwargs['weights'] = FloatVector(weights_vec)
print('Invoking the R lm function...\n\n')
start_time = time.process_time()
# Invoke the R solver.
stats = importr('stats')
fit = stats.lm(
fmla, **kwargs
)
print(
'Finished: {!s}sec.\n'.format(time.process_time() - start_time)
)
# Print the summary.
print('R modelling summary: \n')
print(r.summary(fit))
print('\n')
# Return the values for the parameters.
return np.array(
[i for i in fit.rx('coefficients')[0]],
dtype=np.float
)
return solver | |
import mock
import numpy as np
import matplotlib.pyplot as plt
from neupy import plots, layers, algorithms
from neupy.exceptions import InvalidConnection
from base import BaseTestCase
class SaliencyMapTestCase(BaseTestCase):
single_thread = True
def setUp(self):
super(SaliencyMapTestCase, self).setUp()
self.network = layers.join(
layers.Input((28, 28, 3)),
layers.Convolution((3, 3, 8), name='conv') >> layers.Relu(),
layers.Reshape(),
layers.Softmax(10),
)
self.image = np.ones((28, 28, 3))
def test_saliency_map_invalid_mode(self):
message = "'invalid-mode' is invalid value for mode argument"
with self.assertRaisesRegexp(ValueError, message):
plots.saliency_map(self.network, self.image, mode='invalid-mode')
def test_saliency_map_invalid_n_outputs(self):
new_network = layers.join(
self.network,
layers.parallel(
layers.Sigmoid(1),
layers.Sigmoid(2),
)
)
message = (
"Cannot build saliency map for the network that "
"has more than one output layer."
)
with self.assertRaisesRegexp(InvalidConnection, message):
plots.saliency_map(new_network, self.image)
def test_saliency_map_invalid_n_inputs(self):
new_network = layers.join(
layers.parallel(
layers.Input((28, 28, 3)),
layers.Input((28, 28, 3)),
),
layers.Concatenate(),
self.network.start('conv'),
)
message = (
"Cannot build saliency map for the network that "
"has more than one input layer."
)
with self.assertRaisesRegexp(InvalidConnection, message):
plots.saliency_map(new_network, self.image)
def test_saliency_map_invalid_input_image(self):
network = layers.join(
layers.Input(10),
layers.Relu(),
)
message = (
"Input layer has to be 4 dimensions, but network expects "
"2 dimensional input"
)
with self.assertRaisesRegexp(InvalidConnection, message):
plots.saliency_map(network, self.image)
message = (
"Invalid image shape. Image expected to be 3D, got 2D image"
)
with self.assertRaisesRegexp(ValueError, message):
plots.saliency_map(self.network, np.ones((28, 28)))
def test_saliency_maps(self):
events = []
original_gca = plt.gca
def mocked_imshow(array, *args, **kwargs):
events.append('imshow')
self.assertEqual(array.shape, (28, 28))
def mocked_show(*args, **kwargs):
events.append('show')
def mocked_gca(*args, **kwargs):
events.append('gca')
return original_gca()
imshow_path = 'matplotlib.axes.Axes.imshow'
with mock.patch(imshow_path, side_effect=mocked_imshow):
plots.saliency_map(
self.network, self.image,
mode='heatmap', show=False,
)
self.assertSequenceEqual(events, ['imshow'])
plots.saliency_map(
self.network, self.image,
mode='raw', show=False,
)
self.assertSequenceEqual(events, ['imshow', 'imshow'])
with mock.patch('matplotlib.pyplot.show', side_effect=mocked_show):
plots.saliency_map(self.network, self.image, show=True)
self.assertSequenceEqual(
events, ['imshow', 'imshow', 'imshow', 'show'])
with mock.patch('matplotlib.pyplot.gca', side_effect=mocked_gca):
plots.saliency_map(self.network, self.image, show=False)
self.assertSequenceEqual(
events,
['imshow', 'imshow', 'imshow', 'show', 'gca', 'imshow'])
optimizer = algorithms.GradientDescent(self.network)
plots.saliency_map(optimizer, self.image, show=False) | |
__author__ = "Angel Jimenez Escobar"
import sys
import networkx as nx
MaxNodes = pow(10, 5)
MaxColors = pow(10, 5)
colors = []
numbers_nodes = 0
G = nx.Graph()
def read_file(filename):
""" This is the method to read the file, and contain all the core of this program
I use a library call networkx
Parameters:
filename (string): this is the path of the name to read
Returns:
void
"""
""" First Check if the path of the file exist or not"""
try:
f = open(filename)
except OSError as e:
print("El archivo no puede ser accedido o no existe")
sys.exit()
iterator: int = 0
i = 0
''' Read each line of the file'''
for line in f:
''' in the first line check if is a number or not'''
if iterator == 0:
try:
numbers_nodes = int(line)
val = int(line)
if MaxNodes < val:
print("Excede la cantidad maxima de nodos")
sys.exit(0)
except ValueError:
print("La primera linea debe ser un numero entero")
sys.exit(0)
''' in this iteraction check the numbr of color, need the same number of color to number of colors'''
if iterator == 1:
colors = list(map(int, line.strip().split(' ')))
if MaxColors < len(colors):
print("Excede la cantidad maxima de colores")
sys.exit(0)
if numbers_nodes != len(colors):
print("La cantidad de nodos y de colores debe ser la misma")
sys.exit(0)
''' when finish to check if the program can run, create all nodes in the graph, and add the color value'''
for x in range(numbers_nodes):
G.add_node(x + 1, color = colors[x])
if iterator > 1:
''' check each line after the list of colors, and conect each node with others '''
G.add_edge(int(line[0]), int(line[2]))
iterator += 1
''' This section is for calculate each value to print '''
for x in range(numbers_nodes):
sum = 0
for y in range(numbers_nodes):
values = []
nodes = nx.shortest_path(G, source= x + 1, target= y + 1)
for z in nodes:
data = G.nodes[z]['color']
if data not in values:
values.append(data)
sum += len(values)
print(sum)
def display_usage():
""" this method only print one message"""
print('Por favor pase como parametro el archivo a evaluar')
if __name__ == '__main__':
""" Check if the program is running with the name of the file to read
if the file doesn't pass, the program show message and finish
"""
if len(sys.argv) < 2:
display_usage()
sys.exit(0)
''' Get the name of the file to read'''
file_name = sys.argv[1]
''' excecute program '''
read_file(file_name) | |
# -*- coding: utf-8 -*-
"""photometr.py - Simple Aperture photometry. Very old code, superceded by
astropy affiliated package `photutils.`
"""
# FIXME: kind of a stupid class dependence.
# Ideally a photometer object should take an image and a region object
# as arguments, where the region object is an instance of a particualr aperture class and
# can return an in_region boolean (or perhaps a 'fraction') for
# any position(s). As it is now the 'photometer' object (called Aperture)
# is actually subclassed by the more detailed apertures/regions,
# and requires passing a shape dictionary as well. redundant
import sys
import numpy as np
from numpy import hypot, sqrt
#the below are for some of the more arcane sky measurement methods
try:
from scipy.optimize import curve_fit
import sklearn
from astroML.density_estimation import bayesian_blocks
import matplotlib.pyplot as pl
except ImportError:
pass
thismod = sys.modules[__name__]
class Photometer(object):
"""
Trying for a better class dependence. Basically wraps the image
in an object with photometry methods. Incomplete
"""
def __init__(self, image, wcs=None, ivar=None):
self.nx, self.ny = image.shape
self.image = image.flatten()
self.wcs = wcs
self.ivar = ivar
yy, xx = np.indices(self.nx, self.ny)
if wcs is not None:
self._x, self._y = wcs.wcs_pix2world(xx, yy, 0)
else:
self._x, self._y = xx, yy
def measure_flux(self, aperture, background):
"""
Measure background subtracted flux. Takes an aperture object,
and a local background object.
"""
o, a, e = self.object_flux(aperture)
b, ba, be = background.evaluate(self)
flux = o - a*b
flux_var = e*e + a*a*be*be/ba
return flux, sqrt(flux_var)
def object_flux(self, aperture, weights=1.0):
"""
Measure total flux (source + background) within an aperture.
Takes an image, and an aperture object.
"""
fracs = aperture.contains(self._x, self._y)
inds = fracs > 0
if self.ivar is not None:
unc = sqrt((fracs[inds]/self.ivar[inds]).sum())
else:
unc = np.nan
return (self.image * weights * fracs).sum(), fracs.sum(), unc
class Aperture(object):
def world_to_pixels(self, shape, wcs):
pass
def object_flux(self, shape, image, ivar=None):
"""Measure total flux within an aperture (source + background)"""
inds, fracs = self.pixnum(**shape)
unc = 0
if ivar is not None:
unc = sqrt((fracs/ivar[inds[0], inds[1]]).sum())
return (image[inds[0], inds[1]]*fracs).sum(), fracs.sum(), unc
def measure_flux(self, shape, image, wcs=None, skypars=None, ivar=None):
"""Measure background subtracted flux."""
o, a, e = self.object_flux(shape, image, ivar=ivar)
b, ba, be = self.background.evaluate(image, skypars)
flux = o - a*b
flux_var = e*e + a*a*be*be/ba
return flux, sqrt(flux_var)
#def get_flux(self, image, ivar = None):
# return self.measure_flux(self.shape, image, ivar = ivar, skypars = self.skypars, wcs = self.wcs)
class Circular(Aperture):
def __init__(self, exact=False):
if exact is True:
self.pixnum = circle_frac_exact
else:
self.pixnum = circle_frac_quick
self.background = ZeroSky()
class Elliptical(Aperture):
def __init__(self):
self.pixnum = ellipse_frac_quick()
self.background = ZeroSky()
class Box(Aperture):
def __init__(self):
self.pixnum = box_frac_quick()
self.background = ZeroSky()
# --- Classes for sky measurement ---
class Background(object):
def evaluate(self, image, skypars):
inds, fracs = self.pixnum(**skypars)
value, sdpp = self.skystats(image[inds[0], inds[1]], **skypars)
return value, len(inds), sdpp
class Annulus(Background):
def __init__(self, bgtype='quartile_sky'):
self.pixnum = circle_frac_quick
self.skystats = getattr(thismod, bgtype)
class EllipticalAnnulus(Background):
def __init__(self, bgtype='quartile_sky'):
self.pixnum = ellipse_frac_quick
self.skystats = getattr(thismod, bgtype)
class ZeroSky(Background):
"""A class for sky values of zero, or for user defined sky statistics.
The return_value is a tuple giving (sky, sky_area, sigma_sky_per_pixel)"""
def __init__(self, bgtype='quartile_sky', return_value=(0, 1, 0)):
self.pixnum = None
self.skystats = None
self.return_value = return_value
def evaluate(self, image, skypars):
return self.return_value
# --- Pixnum methods ---
def circle_frac_quick(xcen=0, ycen=0, radius=1, inner_radius=None, subpixels=1,
**extras):
"""obtain fractional pixel coverage. optionally use subpixels to
increase precison (though this doesn't seem to help). Assumes pixel
centers have coordinates X.5, Y.5 """
#setup
center = np.array([xcen, ycen])
sz = np.ceil((radius+1)*2)
start = np.floor(center + 0.5 - radius)
center = center*subpixels
radius = radius*subpixels
sz = sz*subpixels
start = (start-1)*subpixels
if (start < 0).any():
raise ValueError('Aperture extends off image edge')
off = center - start - 0.5
yy, xx = np.ogrid[0:sz, 0:sz]
rr = hypot(xx - off[0], yy-off[1])
#find pixels within the radius
within = (radius+0.5) - rr
within[within > 1.0] = 1.0
within[within < 0] = 0.
#if it's an annulus
if inner_radius is not None:
within_inner = inner_radius*subpixels + 0.5 - rr
within_inner[within_inner < 0.0] = 0.0
within_inner[within_inner > 1.0] = 1.0
within = within - within_inner
an = within
#rebin if you used subpixels
if subpixels != 1:
an = an.reshape((an.shape[0]/subpixels, subpixels, an.shape[1]/subpixels, subpixels)).mean(1).mean(2)
#pick the pixels to rturn, and get their fractional coverage
pix1 = np.where(an > 0.0)
fracs = an[pix1[0], pix1[1]]
x = (pix1[0] + start[0]/subpixels).astype('i8')
y = (pix1[1] + start[1]/subpixels).astype('i8')
return (x, y), fracs
def circle_frac_exact(xcen, ycen, radius):
pass
def ellipse_frac_quick(xcen=0, ycen=0, a=1, b=1, pa=0, precision=None):
yy, xx = np.ogrid[0:sz, 0:sz]
dx, dy = (xx - off[0]), (yy - off[1])
within = 1 - np.sqrt(((dx * np.cos(pa) - dy * np.sin(pa))/a)**2 +
((dx * np.sin(pa) + dy * np.cos(pa))/b)**2)
within[within > 1.0] = 1.0
within[within < 0] = 0.
#rebin if you used subpixels
if subpixels != 1:
an = an.reshape((an.shape[0] / subpixels, subpixels,
an.shape[1] / subpixels, subpixels)).mean(1).mean(2)
#pick the pixels to rturn, and get their fractional coverage
pix1 = np.where(an > 0.0)
fracs = an[pix1[0], pix1[1]]
x = (pix1[0] + start[0] / subpixels).astype('i8')
y = (pix1[1] + start[1] / subpixels).astype('i8')
return (x, y), fracs
# --- SKY statistics determination methods ---
def quartile_sky(values, percentiles=[0.16, 0.5, 0.84], **extras):
"""Use the median and 16th percentile to estimate the standard
deviation per pixel."""
percentiles = np.asarray(percentiles)
npix = len(values)
#oo = np.argsort(values)
qval = np.sort(values)[np.round(npix*percentiles).astype('i8')]
#qval = values[oo[np.round(npix*percentiles)]]
return qval[1], qval[1]-qval[0]
def gaussfit_sky(values, p_thresh=0.65, plot=False, **extras):
"""Fit a gaussian to the lower part of a histogram of the sky values.
The histogram bins are estimated using Bayesian blocks. p_thresh gives
the percentile below which the gaussian is fitted to the data. Return
central value and estimate of standard deviation per pixel """
bins = bayesian_blocks(values)
print(len(bins), bins)
#dbin = bins[1:]-bins[:-1]
cbin = (bins[1:]+bins[:-1])/2
hist = np.histogram(values, bins=bins, range=(bins.min(), bins.max()), density=True)
#pdf = hist/dbin
val_thresh = np.percentile(values, p_thresh)
lower = cbin < p_thresh
def gauss(x, *p):
A, mu, sigma = p
return A*np.exp(-(x-mu)**2/(2.*sigma**2))
# p0 is the initial guess for the fitting coefficients (A, mu and sigma above)
p0 = [np.max(hist[0]), values.mean(), values.std()]
coeff, var_matrix = curve_fit(gauss, cbin[lower], hist[0][lower], p0=p0)
if plot:
print(len(hist[1]), len(hist[0]), type(coeff))
pl.figure()
pl.plot(cbin, hist[0], color='b')
pl.plot(cbin, gauss(cbin, [coeff[0], coeff[1], coeff[2]]), color='r')
pl.axvline(val_thresh)
return coeff[1], coeff[2]
def gmm_sky(values, **extras):
"""Use a gaussian mixture model, via expectation maximization.
of course, there's only one gaussian. could add another for
faint sources, bad pixels, but..."""
gmm = sklearn.mixture.GMM()
r = gmm.fit(values)
return r.means_[0, 0], np.sqrt(r.covars_[0, 0])
def sigclip_sky(values, sigma=[3, 2.25], minlength=5, **extras):
"""Use iterative sigma clipping"""
def between(vals, sigs):
m, s = vals.mean(), vals.std()
return (vals < (m + sigs[1] * s)) & (vals > (m - sigs[0] * s))
while ((False in between(values, sigma)) & (len(values) > minlength)):
values = values[between(values, sigma)]
return values.mean(), values.std()
# --- Centroiding ---
def centroid(images):
"""Dumb dumb centroiding. assumes x and y axes are the
last two dimensions of images. Something is wrong with the
broadcasting. absolutely *have* to include weighting"""
sz = images.shape[-2:]
xg = np.arange(sz[0])
yg = np.arange(sz[1])
denom = images.sum(axis=(-1, -2))
y = (yg[None, None, :] * images).sum(axis=(-2, -1)) / denom
x = (xg[None, :, None] * images).sum(axis=(-2, -1)) / denom
return x, y | |
"""Classes for handling telescope and eyepiece properties."""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
matplotlib.rcParams.update({'font.size': 14})
matplotlib.rcParams.update({'xtick.direction':'in'})
matplotlib.rcParams.update({'ytick.direction':'in'})
matplotlib.rcParams.update({'xtick.minor.visible':True})
matplotlib.rcParams.update({'ytick.minor.visible':True})
matplotlib.rcParams.update({'xtick.top':True})
matplotlib.rcParams.update({'ytick.right':True})
matplotlib.rcParams.update({'legend.frameon':False})
matplotlib.rcParams.update({'lines.dashed_pattern':[8,3]})
matplotlib.rcParams.update({"figure.figsize": [12,6]})
from TCalc.functions import focal_ratio, dawes_lim, resolving_power
from TCalc.functions import Min_magnification, Max_magnification, Min_eyepiece, Max_eyepiece
from TCalc.functions import Lmag_limit
from TCalc.functions import magnification, true_fov, exit_pupil, surface_brightness
from TCalc.age_eye import age_to_eye_diameter, eye_to_age
blue = 400
green = 550
red = 700
wavelengths_list = np.linspace(350,800,46)
age_list = np.array([10,20,30,35,45,60,70])
eye_diameter_list = np.array([age_to_eye_diameter(age) for age in age_list])
class eyepiece:
"""Class representing a single eyepiece
Args:
f_e: focal length of the eyepiece (mm)
fov_e: field of view of the eyepiece (deg). Defaults to 50 degrees.
"""
def __init__(self, f_e, fov_e=50):
if f_e <= 0:
raise ValueError("f_e must be larger than 0")
if fov_e <= 0:
raise ValueError("fov must be larger than 0")
self.f_e = f_e
self.fov_e = fov_e
class focal_reducer:
"""Class representing a single focal reducer
Args:
P_reducer (float between 0 and 1): the power of the focal reducer
"""
def __init__(self, P_reducer):
if P_reducer <= 0 or P_reducer > 1:
raise ValueError("P_reducer must be between 0 and 1")
self.P = P_reducer
self.optic_type = 'focal reducer'
class barlow_lens:
"""Class representing a single Barlow lense
Args:
barlow (float greater than 1): the Barlow factor, default is 2
"""
def __init__(self, barlow=2):
if barlow < 1:
raise ValueError("barlow must be at least 1")
self.P = barlow
self.optic_type = 'Barlow lens'
class telescope:
"""Class representing a telescope
Args:
D_o: the size of the telescope opening (mm)
f_o: focal length of the telescope (mm)
user_D_eye: diameter of telescope user's eye in mm. Default is 7 mm.
user_age: age of the telescope user. Will be used to compute user_D_eye if none is specified.
"""
def __init__(self, D_o, f_o, user_D_eye=None, user_age=None):
# Check that inputs make sense then save them as class atributes
if D_o <= 0:
raise ValueError("aperature must be larger than 0")
if f_o <= 0:
raise ValueError("f_o must be larger than 0")
self.D_o = D_o
self.f_o = f_o
self.f_o_true = f_o
# Some stuff about the user
if user_D_eye is None:
if user_age is None:
print("No user_age or user_D_eye specified, using defaults (25 year old eye)")
self.user_age = 25
self.user_D_eye = age_to_eye_diameter(self.user_age)
else:
if user_age <= 0:
raise ValueError("user_age must be larger than 0")
self.user_age = user_age
self.user_D_eye = age_to_eye_diameter(self.user_age)
else:
if user_D_eye <= 0:
raise ValueError("user_eye_aperature must be larger than 0")
self.user_D_eye = user_D_eye
if user_age is not None:
print("Specified user_age and user_eye_aperature. The user_eye_aperature will be used for calculations.")
self.user_age = user_age
# Compute basic properties derived from telescope information alone
self._compute_focal_ratio()
self._compute_dawes_limit()
self._compute_resolving_power()
self._compute_min_mag()
self._compute_max_mag()
self._compute_min_eye()
self._compute_max_eye()
self._compute_magnitude_limit()
# Initialize eyepiece information
self.eyepieces = {}
self.current_eyepiece_id = None
self.current_eyepiece = None
# Set properties that depend on eyepiece selection to NaNs
self.M = np.nan
self.compatible_eyepiece = False
self.fov = np.nan
self.D_EP = np.nan
self.SB = np.nan
# Initialize optic information
self.optics = {}
self.current_optic_id = None
self.current_optic = None
def list_eyepiece(self):
"""List the eyepieces and other optics availabe to the telescope
Args:
None
Returns:
Prints out a list of eyepiece objects and the
current eyepiece being used.
"""
print("\n Currently included eyepieces:")
print(" Name Focal Length FOV")
print(" -------------- -------------- --------------")
names = self.eyepieces.keys()
for name in names:
print(" {: <14} {: <14} {: <14} ".format("\'"+name+"\'", str(self.eyepieces[name].f_e)+" mm", str(self.eyepieces[name].fov_e)+" degrees"))
if self.current_eyepiece is None:
print("\n No eyepiece is selected\n")
else:
print("\n The currently selected eyepiece is '{}'\n".format(self.current_eyepiece_id))
print("\n Additional optical parts available:")
print(" Name Type Power")
print(" -------------- -------------- --------------")
names = self.optics.keys()
for name in names:
print(" {: <14} {: <14} {: <14}".format("\'"+name+"\'", self.optics[name].optic_type, self.optics[name].P))
if self.current_optic is None:
print("\n No optical part is selected\n")
else:
print("\n The currently selected optical part is '{}'\n".format(self.current_optic_id))
def select_eyepiece(self,id=None):
"""Set the current eyepiece
Args:
id: The id of the eyepiece to include. Default is None, which selects no eyepiece
Returns:
None
"""
# If the ID is None, we'll get rid of the eyepiece
if id is None:
self.current_eyepiece = None
self.current_eyepiece_id = None
# Reset eyepiece dependent quantities to NaN
self.M = np.nan
self.compatible_eyepiece = False
self.fov = np.nan
self.D_EP = np.nan
self.SB = np.nan
return
# Check that id is a valid input
if ~isinstance(id,str):
try:
id = str(id)
except:
raise ValueError("id must be castable to type 'str'")
# Check that id is in the eyepieces available
if id not in self.eyepieces.keys():
raise ValueError("id does not correspond to an eyepiece. Try self.list_eyepiece.")
# Update eyepiece selection
self.current_eyepiece_id = id
self.current_eyepiece = self.eyepieces[id]
# Update quantities dependent on eyepiece
self._compute_magnification()
if self.f_e_min <= self.current_eyepiece.f_e <= self.f_e_max:
self.compatible_eyepiece = True
else:
self.compatible_eyepiece = False
print("Note: The magnification produced by this eyepiece is not compatible with the telescope.")
self._compute_true_fov()
self._compute_exit_pupil()
self._compute_surface_brightness_sensitivity()
def select_optic(self,id=None):
"""Set the current optical part
Args:
id: The id of the optical part to include. Default is None, which selects no optical part
Returns:
None
"""
# If the ID is None, we'll get rid of the eyepiece
if id is None:
self.current_optic = None
self.current_optic_id = None
# Update f_o
self.f_o = self.f_o_true
# Check that id is a valid input
else:
if ~isinstance(id,str):
try:
id = str(id)
except:
raise ValueError("id must be castable to type 'str'")
# Check that id is in the optics available
if id not in self.optics.keys():
raise ValueError("id does not correspond to an optical part. Try self.list_eyepiece.")
# Update optic selection
self.current_optic_id = id
self.current_optic = self.optics[id]
# Update f_o
self.f_o = self.f_o_true * self.current_optic.P
# Update other quantities
self._compute_focal_ratio()
self._compute_min_eye()
self._compute_max_eye()
if self.current_eyepiece is not None:
self._compute_magnification()
self._compute_true_fov()
self._compute_exit_pupil()
self._compute_surface_brightness_sensitivity()
if self.f_e_min <= self.current_eyepiece.f_e <= self.f_e_max:
self.compatible_eyepiece = True
else:
self.compatible_eyepiece = False
print("Note: The magnification produced by this eyepiece is not compatible with the telescope.")
def add_eyepiece(self, piece, id=None, select=True):
"""Attach an eyepiece to the telescope class
The telescope class can have multiple eyepieces associated
with it, this method allows you to add a single eyepiece
object to the list.
Args:
piece (eyepiece class instance): the eyepiece object to add
id (string): the name to give the eyepiece - it will be identified by this name
when selecting and analyzing eyepiece configurations. If unspecified, it will be set to a number.
select (bool): if True (default) the added eyepiece will be selected by calling the select_eyepiece method.
Returns:
None
"""
# If no name is given for eyepiece, just give it the index number as a name
if id is None:
id = str(len(self.eyepieces))
# Check that inputs are formatted correctly
elif ~isinstance(id,str):
try:
id = str(id)
except:
raise ValueError("id must be castable to type 'str'")
if not isinstance(piece,eyepiece):
raise ValueError("piece must be an instance of eyepiece class")
# Add eyepiece to list
self.eyepieces[id] = piece
# If select==True, we'll make the new eyepiece the current eyepiece
if select:
self.select_eyepiece(id)
def add_optic(self, optic, id=None, select=True):
"""Attach an optical part to the telescope class
The telescope class can have multiple optical parts (focal reducers and Barlow lenses)
associated with it, this method allows you to add a single part to the list.
Args:
optic (focal_reducer or barlow_lens class instance): the optical part object to add
id (string): the name to give the part - it will be identified by this name
when selecting and analyzing optical configurations. If unspecified, it will be set to a number.
select (bool): if True (default) the added optical part will be selected by calling the select_eyepiece method.
Returns:
None
"""
# If no name is given for optic, just give it the index number as a name
if id is None:
id = str(len(self.optics))
# Check that inputs are formatted correctly
elif ~isinstance(id,str):
try:
id = str(id)
except:
raise ValueError("id must be castable to type 'str'")
if not isinstance(optic,barlow_lens):
if not isinstance(optic,focal_reducer):
raise ValueError("optic must be an instance of barlow_lens or focal_reducer class")
# Add eyepiece to list
self.optics[id] = optic
# If select==True, we'll make the new eyepiece the current eyepiece
if select:
self.select_optic(id)
def change_user_age(self,new_age):
"""Update the age of the user and the corresponding eye size
Args:
new_age (float > 0): the age of the user
Returns:
None
"""
# Some stuff about the user
if new_age <= 0:
raise ValueError("user_age must be larger than 0")
self.user_age = new_age
self.user_D_eye = age_to_eye_diameter(self.user_age)
# Update limits
self._compute_min_mag()
self._compute_max_eye()
# Update quantities dependent on eyepiece
if self.current_eyepiece is not None:
if self.f_e_min <= self.current_eyepiece.f_e <= self.f_e_max:
self.compatible_eyepiece = True
else:
self.compatible_eyepiece = False
print("Note: The magnification of the current eyepiece is not compatible.")
def say_configuration(self):
"""List properties of the telescope eyepiece pair
Args:
None
Returns:
Writes out the properties of the telescope
"""
print("\n The telescope has the following layout:")
print(" Aperture diameter: {} mm".format(self.D_o))
print(" Focal length: {} mm, corresponding to a focal ratio of {}".format(self.f_o_true,self.f_R_true))
if self.current_optic is not None:
if self.current_optic.optic_type == 'Barlow lens':
action = 'increases'
else:
action = 'decreases'
print(" '{}', a {}, has been added to the optical path. This {} the focal length by {}".format(self.current_optic_id,self.current_optic.optic_type,action,self.current_optic.P))
print(" This results in")
print(" Focal length: {} mm, corresponding to a focal ratio of {}".format(self.f_o,self.f_R))
print("")
print(" In good atmospheric conditions, the resolution of the telescope (Dawes limit) is {:.1f} arcseconds".format(self.Dawes_lim))
print(" By wavelength, the resolution is")
print(" {} nm (blue): {:.1f} arcsec".format(blue,self.blue_P_R))
print(" {} nm (green): {:.1f} arcsec".format(green,self.green_P_R))
print(" {} nm (red): {:.1f} arcsec".format(red,self.red_P_R))
print("")
age = eye_to_age(self.user_D_eye)
print(" The maximum possible magnification factor is {:.1f}".format(self.M_max))
print(" This means the minimum compatible eyepiece focal length is {:.1f} mm".format(self.f_e_min))
print("")
print(" The minimum magnification factor and corresponding maximum eyepiece focal length depend on the diameter of the observer's eye.")
print(" For a telescope user with an eye diameter of {} mm (apropriate for an age around {} years):".format(self.user_D_eye,age))
print(" The minimum magnification factor is {:.1f}".format(self.M_min))
print(" This means the maximum compatible eyepiece focal length is {:.1f} mm".format(self.M_max))
print("")
print(" The faintest star that can be seen by this telescope is {:.1f} mag".format(self.Lmag_limit))
if self.current_eyepiece is not None:
print("")
print(" The currently selected eyepiece is '{}', which has the following layout:".format(self.current_eyepiece_id))
print(" Focal length: {} mm".format(self.current_eyepiece.f_e))
print(" Field of view: {} degrees".format(self.current_eyepiece.fov_e))
print("")
if self.compatible_eyepiece:
compatible = 'is'
else:
compatible = 'IS NOT'
print(" With this eyepiece:")
print(" The magnification factor is {:.1f}. This {} compatible with the telescope limits.".format(self.M,compatible))
print(" The true field of view is {:.0f} degrees".format(self.fov))
print(" The exit pupil diameter is {:.1f} mm".format(self.D_EP))
print("")
print(" The faintest surface brightness that can be seen by this telescope is {:.2f}".format(self.SB))
print("")
def show_resolving_power(self,seeing=2.5):
"""Plots the resolution performance of the telescope for a specific seeing value
Args:
seeing (float): Seeing factor of sky. Default to 2.5
Returns:
A plot depicting variation of chromatic resolution or simply the resolution at different wavelengths
with respect to Dawes Limit and Limit due to seeing
"""
fig,ax = plt.subplots()
ax.set(xlabel='Wavelength [nm]', ylabel='Resolution [arcsec]',xlim=(380,750))
ax.title.set_text('Resolution performance of the telescope-eyepiece pair')
ax.plot(wavelengths_list,self.P_R,label='Chromatic Resolution')
ax.axhline(self.Dawes_lim,color='C0',ls='--',label='Dawes limit')
ax.axhline(seeing,color='.5',ls='--',label='Limit due to seeing')
ax.legend()
plt.show()
def show_magnification_limits(self):
"""Plots the magnification limits for a telescope-eyepiece pair according to user's age
Args:
None
Returns:
Plot of maximum achievable magnification as a function of pupil's diameter
which varies according to user's age. Also, plots the magnification strength's
of the current selected eyepice.
"""
fig,ax = plt.subplots()
ax.set(xlabel='Eye Diameter [mm]', ylabel='Magnification Factor',xlim=(5,7.5),yscale='log')
ax.title.set_text('Magnification Limits of the telescope-eyepiece pair')
ax.plot(eye_diameter_list,self.M_min_by_age,ls='--',label='Minimum')
ax.axhline(self.M_max,color='C0',label='Maximum')
ax.axhline(self.M,color='k',label='Current Eyepiece')
ax.legend()
plt.show()
def show_eyepiece_limits(self):
"""Plots the eyepiece limits for a telescope-eyepiece pair according to user's age and pupil diameter
Args:
None
Returns:
Plot of minimum achievable magnification as a function of pupil's diameter
which varies according to user's age. Also, plots the power of the current selected eyepice.
"""
fig,ax = plt.subplots()
ax.set(xlabel='Eye Diameter [mm]', ylabel='Eyepiece Focal Length [mm]',xlim=(5,7.5))
ax.title.set_text('Eyepiece Limits of the telescope-eyepiece pair')
ax.plot(eye_diameter_list,self.f_e_max_by_age,ls='--',label='Maximum')
ax.axhline(self.f_e_min,color='C0',label='Minimum')
ax.axhline(self.current_eyepiece.f_e,color='k',label='Current Eyepiece')
ax.legend()
plt.show()
# The rest of these are internal wrappers for running calculations in
# functions.py. They get called by the automatically when something
# about the telescope/eyepiece changes
def _compute_focal_ratio(self):
"""Compute the focal ratio of the telescope
Args:
None
Returns:
Updates the state of self.f_R
"""
self.f_R = focal_ratio(self.f_o,self.D_o)
self.f_R_true = focal_ratio(self.f_o_true,self.D_o)
def _compute_dawes_limit(self):
"""Compute the Dawes limit of the telescope
Args:
None
Returns:
Updates the state of self.Dawes_lim
"""
self.Dawes_lim = dawes_lim(self.D_o)
def _compute_resolving_power(self):
"""Compute the resolving power of the telescope vs wavelength
Args:
None
Returns:
Updates the state of self.resolving_power, and self.[color]_resolving_power
"""
self.P_R = resolving_power(wavelengths_list,self.D_o)
self.blue_P_R = resolving_power(blue,self.D_o)
self.green_P_R = resolving_power(green,self.D_o)
self.red_P_R = resolving_power(red,self.D_o)
def _compute_min_mag(self):
"""Compute the minimum magnification of the telescope
Args:
None
Returns:
Updates the state of self.M_min and self.M_min_by_age
"""
self.M_min = Min_magnification(self.D_o,self.user_D_eye)
self.M_min_by_age = np.zeros(len(age_list))
for i in range(len(age_list)):
self.M_min_by_age[i] = Min_magnification(self.D_o,age=age_list[i])
def _compute_max_mag(self):
"""Compute the maximum magnification of the telescope
Args:
None
Returns:
Updates the state of self.M_max
"""
self.M_max = Max_magnification(self.D_o)
def _compute_min_eye(self):
"""Compute the minimum eyepiece focal length compatible with the telescope
Args:
None
Returns:
Updates the state of self.f_e_min
"""
self.f_e_min = Min_eyepiece(self.f_o,self.M_max)
def _compute_max_eye(self):
"""Compute the maximum eyepiece focal length compatible with the telescope
Args:
None
Returns:
Updates the state of self.f_e_max and self.f_e_max_by_age
"""
self.f_e_max = Max_eyepiece(self.f_R,self.user_D_eye)
self.f_e_max_by_age = np.zeros(len(age_list))
for i in range(len(age_list)):
self.f_e_max_by_age[i] = Max_eyepiece(self.f_R,age=age_list[i])
def _compute_magnitude_limit(self):
"""Compute the magnitude limit of the telescope
Args:
None
Returns:
Updates the state of self.Lmag_limit
"""
self.Lmag_limit = Lmag_limit(self.D_o)
def _compute_magnification(self):
"""Compute the magnification for the current telescope-eyepiece combo
Args:
None
Returns:
Updates the state of self.M
"""
if self.current_eyepiece is None:
raise ValueError("No eyepiece selected, cannot compute magnification")
self.M = magnification(self.f_o,self.current_eyepiece.f_e)
def _compute_true_fov(self):
"""Compute the true field of view of the telescope/eyepiece combo
Args:
None
Returns:
Updates the state of self.fov
"""
if self.current_eyepiece is None:
raise ValueError("No eyepiece selected, cannot compute magnification")
self.fov = true_fov(self.M,self.current_eyepiece.fov_e)
def _compute_exit_pupil(self):
"""Compute the exit pupil of the telescope/eyepiece combo
Args:
None
Returns:
Updates the state of self.D_EP
"""
if self.current_eyepiece is None:
raise ValueError("No eyepiece selected, cannot compute magnification")
self.D_EP = exit_pupil(self.current_eyepiece.f_e,self.f_R)
def _compute_surface_brightness_sensitivity(self):
"""Compute the surface brightness limit of the telescope/eyepiece combo
Args:
None
Returns:
Updates the state of self.SB
"""
if self.current_eyepiece is None:
raise ValueError("No eyepiece selected, cannot compute magnification")
self.SB = surface_brightness(self.D_EP) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.