code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
from astropy.time import Time, TimeDelta
import astropy.coordinates as ascoord
import lunarsky
import lunarsky.tests as ltests
import numpy as np
import pytest
Ntimes = 5
Nangs = 3
latitudes = np.linspace(0, 90, Nangs)
longitudes = np.linspace(0, 360, Nangs)
latlons = [(lat, lon) for lon in longitudes for lat in latitudes]
# Ten years of time.
times = Time(lunarsky.topo._J2000.jd + np.linspace(0, 10 * 365.25, Ntimes), format='jd')
@pytest.mark.parametrize('time', times)
@pytest.mark.parametrize('lat,lon', latlons)
def test_icrs_to_mcmf(time, lat, lon):
# Check that the following transformation paths are equivalent:
# ICRS -> MCMF -> TOPO
# ICRS -> TOPO
stars = ltests.get_catalog()
loc = lunarsky.MoonLocation.from_selenodetic(lon, lat)
topo0 = stars.transform_to(lunarsky.LunarTopo(location=loc, obstime=time))
mcmf = stars.transform_to(lunarsky.MCMF(obstime=time))
topo1 = mcmf.transform_to(lunarsky.LunarTopo(location=loc, obstime=time))
assert ltests.positions_close(topo0, topo1, ascoord.Angle(10.0, 'arcsec'))
@pytest.mark.parametrize('time', times)
@pytest.mark.parametrize('lat,lon', latlons)
def test_topo_transform_loop(time, lat, lon):
# Testing remaining transformations
height = 10.0 # m
stars = ltests.get_catalog()
loc = lunarsky.MoonLocation.from_selenodetic(lon, lat, height)
topo0 = stars.transform_to(lunarsky.LunarTopo(location=loc, obstime=time))
icrs0 = topo0.transform_to(ascoord.ICRS())
assert ltests.positions_close(stars, icrs0, ascoord.Angle(5.0, 'arcsec'))
mcmf0 = topo0.transform_to(lunarsky.MCMF(obstime=time))
mcmf1 = stars.transform_to(lunarsky.MCMF(obstime=time))
assert ltests.positions_close(mcmf0, mcmf1, ascoord.Angle(5.0, 'arcsec'))
def test_earth_from_moon():
# Look at the position of the Earth from the Moon over time.
Ntimes = 100
ets = np.linspace(0, 4 * 28 * 24 * 3600., Ntimes) # Four months
times_jd = Time.now() + TimeDelta(ets, format='sec')
# Minimum and maximum, respectively, over the year.
# The lunar apogee nad perigee vary over time. These are
# chosen from a table of minimum/maxmium perigees over a century.
# http://astropixels.com/ephemeris/moon/moonperap2001.html
lunar_perigee = 356425.0 # km, Dec 6 2052
lunar_apogee = 406709.0 # km, Dec 12 2061
lat, lon = 0, 0 # deg
loc = lunarsky.MoonLocation.from_selenodetic(lat, lon)
zaaz_deg = np.zeros((Ntimes, 2))
for ti, tim in enumerate(times_jd):
mcmf = lunarsky.spice_utils.earth_pos_mcmf(tim)
dist = np.linalg.norm(mcmf.cartesian.xyz.to('km').value)
assert lunar_perigee < dist < lunar_apogee
top = mcmf.transform_to(lunarsky.LunarTopo(location=loc, obstime=tim))
zaaz_deg[ti, :] = [top.zen.deg, top.az.deg]
assert np.all(zaaz_deg[:, 0] < 10) # All zenith angles should be less than 10 degrees
# Check that the periodicity of the Earth's motion around the zenith
# is consistent with the Moon's orbit.
moonfreq = 1 / (28. * 24. * 3600.) # Hz, frequency of the moon's orbit
_az = np.fft.fft(zaaz_deg[:, 1])
ks = np.fft.fftfreq(Ntimes, d=np.diff(ets)[0])
sel = ks > 0
closest = np.argmin(np.abs(moonfreq - ks[sel]))
peakloc = np.argmax(np.abs(_az[sel]))
assert peakloc == closest
| [
"lunarsky.tests.get_catalog",
"lunarsky.MCMF",
"numpy.abs",
"astropy.time.Time.now",
"astropy.coordinates.Angle",
"lunarsky.spice_utils.earth_pos_mcmf",
"numpy.fft.fft",
"astropy.time.TimeDelta",
"numpy.diff",
"pytest.mark.parametrize",
"numpy.linspace",
"numpy.zeros",
"lunarsky.LunarTopo",
... | [((196, 221), 'numpy.linspace', 'np.linspace', (['(0)', '(90)', 'Nangs'], {}), '(0, 90, Nangs)\n', (207, 221), True, 'import numpy as np\n'), ((235, 261), 'numpy.linspace', 'np.linspace', (['(0)', '(360)', 'Nangs'], {}), '(0, 360, Nangs)\n', (246, 261), True, 'import numpy as np\n'), ((442, 480), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""time"""', 'times'], {}), "('time', times)\n", (465, 480), False, 'import pytest\n'), ((482, 525), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""lat,lon"""', 'latlons'], {}), "('lat,lon', latlons)\n", (505, 525), False, 'import pytest\n'), ((1076, 1114), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""time"""', 'times'], {}), "('time', times)\n", (1099, 1114), False, 'import pytest\n'), ((1116, 1159), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""lat,lon"""', 'latlons'], {}), "('lat,lon', latlons)\n", (1139, 1159), False, 'import pytest\n'), ((696, 716), 'lunarsky.tests.get_catalog', 'ltests.get_catalog', ([], {}), '()\n', (714, 716), True, 'import lunarsky.tests as ltests\n'), ((728, 776), 'lunarsky.MoonLocation.from_selenodetic', 'lunarsky.MoonLocation.from_selenodetic', (['lon', 'lat'], {}), '(lon, lat)\n', (766, 776), False, 'import lunarsky\n'), ((1284, 1304), 'lunarsky.tests.get_catalog', 'ltests.get_catalog', ([], {}), '()\n', (1302, 1304), True, 'import lunarsky.tests as ltests\n'), ((1315, 1371), 'lunarsky.MoonLocation.from_selenodetic', 'lunarsky.MoonLocation.from_selenodetic', (['lon', 'lat', 'height'], {}), '(lon, lat, height)\n', (1353, 1371), False, 'import lunarsky\n'), ((1897, 1941), 'numpy.linspace', 'np.linspace', (['(0)', '(4 * 28 * 24 * 3600.0)', 'Ntimes'], {}), '(0, 4 * 28 * 24 * 3600.0, Ntimes)\n', (1908, 1941), True, 'import numpy as np\n'), ((2403, 2451), 'lunarsky.MoonLocation.from_selenodetic', 'lunarsky.MoonLocation.from_selenodetic', (['lat', 'lon'], {}), '(lat, lon)\n', (2441, 2451), False, 'import lunarsky\n'), ((2467, 2488), 'numpy.zeros', 'np.zeros', (['(Ntimes, 2)'], {}), '((Ntimes, 2))\n', (2475, 2488), True, 'import numpy as np\n'), ((2844, 2871), 'numpy.all', 'np.all', (['(zaaz_deg[:, 0] < 10)'], {}), '(zaaz_deg[:, 0] < 10)\n', (2850, 2871), True, 'import numpy as np\n'), ((3129, 3155), 'numpy.fft.fft', 'np.fft.fft', (['zaaz_deg[:, 1]'], {}), '(zaaz_deg[:, 1])\n', (3139, 3155), True, 'import numpy as np\n'), ((389, 424), 'numpy.linspace', 'np.linspace', (['(0)', '(10 * 365.25)', 'Ntimes'], {}), '(0, 10 * 365.25, Ntimes)\n', (400, 424), True, 'import numpy as np\n'), ((809, 855), 'lunarsky.LunarTopo', 'lunarsky.LunarTopo', ([], {'location': 'loc', 'obstime': 'time'}), '(location=loc, obstime=time)\n', (827, 855), False, 'import lunarsky\n'), ((887, 914), 'lunarsky.MCMF', 'lunarsky.MCMF', ([], {'obstime': 'time'}), '(obstime=time)\n', (900, 914), False, 'import lunarsky\n'), ((946, 992), 'lunarsky.LunarTopo', 'lunarsky.LunarTopo', ([], {'location': 'loc', 'obstime': 'time'}), '(location=loc, obstime=time)\n', (964, 992), False, 'import lunarsky\n'), ((1042, 1071), 'astropy.coordinates.Angle', 'ascoord.Angle', (['(10.0)', '"""arcsec"""'], {}), "(10.0, 'arcsec')\n", (1055, 1071), True, 'import astropy.coordinates as ascoord\n'), ((1403, 1449), 'lunarsky.LunarTopo', 'lunarsky.LunarTopo', ([], {'location': 'loc', 'obstime': 'time'}), '(location=loc, obstime=time)\n', (1421, 1449), False, 'import lunarsky\n'), ((1482, 1496), 'astropy.coordinates.ICRS', 'ascoord.ICRS', ([], {}), '()\n', (1494, 1496), True, 'import astropy.coordinates as ascoord\n'), ((1546, 1574), 'astropy.coordinates.Angle', 'ascoord.Angle', (['(5.0)', '"""arcsec"""'], {}), "(5.0, 'arcsec')\n", (1559, 1574), True, 'import astropy.coordinates as ascoord\n'), ((1608, 1635), 'lunarsky.MCMF', 'lunarsky.MCMF', ([], {'obstime': 'time'}), '(obstime=time)\n', (1621, 1635), False, 'import lunarsky\n'), ((1668, 1695), 'lunarsky.MCMF', 'lunarsky.MCMF', ([], {'obstime': 'time'}), '(obstime=time)\n', (1681, 1695), False, 'import lunarsky\n'), ((1745, 1773), 'astropy.coordinates.Angle', 'ascoord.Angle', (['(5.0)', '"""arcsec"""'], {}), "(5.0, 'arcsec')\n", (1758, 1773), True, 'import astropy.coordinates as ascoord\n'), ((1973, 1983), 'astropy.time.Time.now', 'Time.now', ([], {}), '()\n', (1981, 1983), False, 'from astropy.time import Time, TimeDelta\n'), ((1986, 2014), 'astropy.time.TimeDelta', 'TimeDelta', (['ets'], {'format': '"""sec"""'}), "(ets, format='sec')\n", (1995, 2014), False, 'from astropy.time import Time, TimeDelta\n'), ((2544, 2584), 'lunarsky.spice_utils.earth_pos_mcmf', 'lunarsky.spice_utils.earth_pos_mcmf', (['tim'], {}), '(tim)\n', (2579, 2584), False, 'import lunarsky\n'), ((3249, 3275), 'numpy.abs', 'np.abs', (['(moonfreq - ks[sel])'], {}), '(moonfreq - ks[sel])\n', (3255, 3275), True, 'import numpy as np\n'), ((3301, 3317), 'numpy.abs', 'np.abs', (['_az[sel]'], {}), '(_az[sel])\n', (3307, 3317), True, 'import numpy as np\n'), ((2733, 2778), 'lunarsky.LunarTopo', 'lunarsky.LunarTopo', ([], {'location': 'loc', 'obstime': 'tim'}), '(location=loc, obstime=tim)\n', (2751, 2778), False, 'import lunarsky\n'), ((3190, 3202), 'numpy.diff', 'np.diff', (['ets'], {}), '(ets)\n', (3197, 3202), True, 'import numpy as np\n')] |
import os
import csv
import numpy as np
import pandas as pd
from sklearn.preprocessing import MultiLabelBinarizer
from torch.utils.data import Dataset
from pytorch_pretrained_bert import BertTokenizer
class OrganicDataset(Dataset):
# 0 FOR TRAIN, 1 FOR EVAL, 2 FOR TEST
def __init__(self, path, tokenizer, seq_len=128, train=0):
self.train = train
# load both training and test data
self.data_train = preprocess_organic_annotated(os.path.join(path, 'organic_train.csv'))
self.tokenizer = tokenizer
self.seq_len = seq_len
self.neg_label_weight = 0.1
# label encoder for aspect, polarity and joint task
self.aspect_encoder = MultiLabelBinarizer()
self.polarity_encoder = MultiLabelBinarizer()
self.joint_encoder = MultiLabelBinarizer()
self.aspect_encoder.fit(self.data_train.aspect)
self.polarity_encoder.fit(self.data_train.polarity)
self.joint_encoder.fit(self.data_train.joint)
if self.train == 0:
self.data = self.data_train
elif self.train == 1:
self.data = preprocess_organic_annotated(os.path.join(path, 'organic_eval.csv'))
else:
self.data = preprocess_organic_annotated(os.path.join(path, 'organic_test.csv'))
self.aspects = self.aspect_encoder.transform(self.data.aspect)
self.polarity = self.polarity_encoder.transform(self.data.polarity)
self.joint = self.joint_encoder.transform(self.data.joint)
def __len__(self):
return self.data.shape[0]
def __getitem__(self, idx):
aspect = self.aspects[idx]
aspect_weights = np.where(aspect == 1.0, aspect, self.neg_label_weight)
polarity = self.polarity[idx]
joint = self.joint[idx]
joint_weights = np.where(joint == 1.0, joint, self.neg_label_weight)
seq_raw = self.data.iloc[idx, 0]
seq = ['[CLS]'] + self.tokenizer.tokenize(seq_raw)[:self.seq_len - 2] + ['[SEP]']
seq = self.tokenizer.convert_tokens_to_ids(seq)
# zero-pad sequence and mask
mask = [1] * len(seq)
while len(mask) < self.seq_len:
mask.append(0)
seq.append(0)
assert len(mask) == self.seq_len
assert len(seq) == self.seq_len
sample = {'aspect': aspect.astype(np.float), 'polarity': polarity.astype(np.float),
'joint': joint.astype(np.float), 'sequence': np.array(seq), 'mask': np.array(mask),
'aspect_weights': aspect_weights.astype(np.float), 'joint_weights': joint_weights.astype(np.float)}
return sample
def preprocess_organic_annotated(path):
data = pd.read_csv(path, sep='|', quoting=csv.QUOTE_NONE)
# drop trailing commas in polarity
data['polarity'] = data.iloc[:, 2].apply(lambda x: x[0])
data = data.loc[:, ('sequence', 'aspect', 'polarity')]
# collect all aspects and corresponding polarities for every sentence into lists
data = data.groupby('sequence', as_index=False).agg({'aspect': list, 'polarity': list})
data['joint'] = data.apply(lambda x: ['/'.join([a, b]) for a in x['aspect'] for b in x['polarity']], axis=1)
return data
if __name__ == '__main__':
data = OrganicDataset('../models/data/organic', BertTokenizer.from_pretrained('bert-base-uncased'))
for i in range(1):
print(data[i]['test'])
| [
"pytorch_pretrained_bert.BertTokenizer.from_pretrained",
"pandas.read_csv",
"numpy.where",
"os.path.join",
"numpy.array",
"sklearn.preprocessing.MultiLabelBinarizer"
] | [((2683, 2733), 'pandas.read_csv', 'pd.read_csv', (['path'], {'sep': '"""|"""', 'quoting': 'csv.QUOTE_NONE'}), "(path, sep='|', quoting=csv.QUOTE_NONE)\n", (2694, 2733), True, 'import pandas as pd\n'), ((700, 721), 'sklearn.preprocessing.MultiLabelBinarizer', 'MultiLabelBinarizer', ([], {}), '()\n', (719, 721), False, 'from sklearn.preprocessing import MultiLabelBinarizer\n'), ((754, 775), 'sklearn.preprocessing.MultiLabelBinarizer', 'MultiLabelBinarizer', ([], {}), '()\n', (773, 775), False, 'from sklearn.preprocessing import MultiLabelBinarizer\n'), ((805, 826), 'sklearn.preprocessing.MultiLabelBinarizer', 'MultiLabelBinarizer', ([], {}), '()\n', (824, 826), False, 'from sklearn.preprocessing import MultiLabelBinarizer\n'), ((1662, 1716), 'numpy.where', 'np.where', (['(aspect == 1.0)', 'aspect', 'self.neg_label_weight'], {}), '(aspect == 1.0, aspect, self.neg_label_weight)\n', (1670, 1716), True, 'import numpy as np\n'), ((1811, 1863), 'numpy.where', 'np.where', (['(joint == 1.0)', 'joint', 'self.neg_label_weight'], {}), '(joint == 1.0, joint, self.neg_label_weight)\n', (1819, 1863), True, 'import numpy as np\n'), ((3280, 3330), 'pytorch_pretrained_bert.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['"""bert-base-uncased"""'], {}), "('bert-base-uncased')\n", (3309, 3330), False, 'from pytorch_pretrained_bert import BertTokenizer\n'), ((466, 505), 'os.path.join', 'os.path.join', (['path', '"""organic_train.csv"""'], {}), "(path, 'organic_train.csv')\n", (478, 505), False, 'import os\n'), ((2451, 2464), 'numpy.array', 'np.array', (['seq'], {}), '(seq)\n', (2459, 2464), True, 'import numpy as np\n'), ((2474, 2488), 'numpy.array', 'np.array', (['mask'], {}), '(mask)\n', (2482, 2488), True, 'import numpy as np\n'), ((1149, 1187), 'os.path.join', 'os.path.join', (['path', '"""organic_eval.csv"""'], {}), "(path, 'organic_eval.csv')\n", (1161, 1187), False, 'import os\n'), ((1256, 1294), 'os.path.join', 'os.path.join', (['path', '"""organic_test.csv"""'], {}), "(path, 'organic_test.csv')\n", (1268, 1294), False, 'import os\n')] |
# ---
# jupyter:
# jupytext:
# cell_markers: '"""'
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.7.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
"""
# Classification using importance-weighted SGPR
This notebook explains how to use Markovflow to build and optimise a GP classifier (in 1D of
course!) using importance-weighted variational inference.
"""
# %%
import numpy as np
import tensorflow as tf
from gpflow.ci_utils import ci_niter
from gpflow.likelihoods import Bernoulli
from markovflow.models.iwvi import ImportanceWeightedVI
from markovflow.kernels import Matern32
import matplotlib.pyplot as plt
# %%
# Setup
learning_rate = 1e-3
importance_K = 10
# toy data
num_data = 100
time_points = np.linspace(0, 10, num_data).reshape(-1,)
observations = np.cos(2*np.pi * time_points / 3.).reshape(-1, 1) + np.random.randn(num_data, 1) * .8
observations = (observations > 0).astype(float)
data = (tf.convert_to_tensor(time_points), tf.convert_to_tensor(observations))
# %%
# model setup
num_inducing = 20
inducing_points = np.linspace(-1, 11, num_inducing).reshape(-1,)
kernel = Matern32(lengthscale=2.0, variance=4.0)
likelihood = Bernoulli()
m = ImportanceWeightedVI(kernel=kernel,
inducing_points=tf.constant(inducing_points, dtype=tf.float64),
likelihood=likelihood,
num_importance_samples=importance_K)
# %%
# optimizer setup
variational_variables = m.dist_q.trainable_variables
hyperparam_variables = m.kernel.trainable_variables
adam_variational = tf.optimizers.Adam(learning_rate)
adam_hyper = tf.optimizers.Adam(learning_rate)
_dregs = lambda: -m.dregs_objective(data)
_iwvi_elbo = lambda: -m.elbo(data)
@tf.function
def step():
adam_variational.minimize(_dregs, var_list=variational_variables)
adam_hyper.minimize(_iwvi_elbo, var_list=hyperparam_variables)
@tf.function
def elbo_eval():
return m.elbo(data)
# %%
# a function to plot the data and model fit
def plot(model):
time_grid = np.linspace(0, 10, 200).reshape(-1,)
num_samples = 50
samples_q_s = model.posterior.proposal_process.sample_state(time_grid, num_samples)
samples_iwvi = model.posterior.sample_f(time_grid, num_samples, input_data=data)
_, axarr = plt.subplots(2, 1, sharex=True, sharey=True)
# plot data
axarr[0].plot(time_points, observations, 'kx')
axarr[0].set_title('proposal')
axarr[0].plot(time_grid, samples_q_s[..., 0].numpy().T, alpha=.1, color='red')
axarr[1].plot(time_points, observations, 'kx')
axarr[1].set_title('importance-weighted')
axarr[1].plot(time_grid, samples_iwvi[..., 0].numpy().T, alpha=.1, color='blue')
axarr[1].set_ylim(-1.5, 2.5)
# plot mean by numerically integrating the iwvi posterior
eps = 1e-3
inv_link = lambda x : eps + (1-eps) * likelihood.invlink(x)
probs = m.posterior.expected_value(time_grid, data, inv_link)
axarr[1].plot(time_grid, probs, color='black', lw=1.6)
# %%
plot(m)
# %%
# the optimisation loop
elbos, elbo_stds = [], []
max_iter = ci_niter(2000)
for i in range(max_iter):
step()
if i % 10 == 0:
elbos_i = [elbo_eval().numpy() for _ in range(10)]
elbos.append(np.mean(elbos_i))
elbo_stds.append(np.std(elbos_i))
print(i, elbos[-1], elbo_stds[-1])
# %%
plot(m)
| [
"numpy.mean",
"gpflow.ci_utils.ci_niter",
"numpy.std",
"markovflow.kernels.Matern32",
"numpy.linspace",
"tensorflow.optimizers.Adam",
"tensorflow.constant",
"numpy.cos",
"gpflow.likelihoods.Bernoulli",
"tensorflow.convert_to_tensor",
"numpy.random.randn",
"matplotlib.pyplot.subplots"
] | [((1280, 1319), 'markovflow.kernels.Matern32', 'Matern32', ([], {'lengthscale': '(2.0)', 'variance': '(4.0)'}), '(lengthscale=2.0, variance=4.0)\n', (1288, 1319), False, 'from markovflow.kernels import Matern32\n'), ((1333, 1344), 'gpflow.likelihoods.Bernoulli', 'Bernoulli', ([], {}), '()\n', (1342, 1344), False, 'from gpflow.likelihoods import Bernoulli\n'), ((1733, 1766), 'tensorflow.optimizers.Adam', 'tf.optimizers.Adam', (['learning_rate'], {}), '(learning_rate)\n', (1751, 1766), True, 'import tensorflow as tf\n'), ((1780, 1813), 'tensorflow.optimizers.Adam', 'tf.optimizers.Adam', (['learning_rate'], {}), '(learning_rate)\n', (1798, 1813), True, 'import tensorflow as tf\n'), ((3238, 3252), 'gpflow.ci_utils.ci_niter', 'ci_niter', (['(2000)'], {}), '(2000)\n', (3246, 3252), False, 'from gpflow.ci_utils import ci_niter\n'), ((1097, 1130), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['time_points'], {}), '(time_points)\n', (1117, 1130), True, 'import tensorflow as tf\n'), ((1132, 1166), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['observations'], {}), '(observations)\n', (1152, 1166), True, 'import tensorflow as tf\n'), ((2444, 2488), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'sharex': '(True)', 'sharey': '(True)'}), '(2, 1, sharex=True, sharey=True)\n', (2456, 2488), True, 'import matplotlib.pyplot as plt\n'), ((898, 926), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', 'num_data'], {}), '(0, 10, num_data)\n', (909, 926), True, 'import numpy as np\n'), ((1007, 1035), 'numpy.random.randn', 'np.random.randn', (['num_data', '(1)'], {}), '(num_data, 1)\n', (1022, 1035), True, 'import numpy as np\n'), ((1224, 1257), 'numpy.linspace', 'np.linspace', (['(-1)', '(11)', 'num_inducing'], {}), '(-1, 11, num_inducing)\n', (1235, 1257), True, 'import numpy as np\n'), ((1426, 1472), 'tensorflow.constant', 'tf.constant', (['inducing_points'], {'dtype': 'tf.float64'}), '(inducing_points, dtype=tf.float64)\n', (1437, 1472), True, 'import tensorflow as tf\n'), ((955, 992), 'numpy.cos', 'np.cos', (['(2 * np.pi * time_points / 3.0)'], {}), '(2 * np.pi * time_points / 3.0)\n', (961, 992), True, 'import numpy as np\n'), ((2196, 2219), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(200)'], {}), '(0, 10, 200)\n', (2207, 2219), True, 'import numpy as np\n'), ((3390, 3406), 'numpy.mean', 'np.mean', (['elbos_i'], {}), '(elbos_i)\n', (3397, 3406), True, 'import numpy as np\n'), ((3433, 3448), 'numpy.std', 'np.std', (['elbos_i'], {}), '(elbos_i)\n', (3439, 3448), True, 'import numpy as np\n')] |
import abc
import os
import SimpleITK as sitk
import numpy as np
import pymia.data.conversion as conversion
import common.evalutation.numpyfunctions as np_fn
import common.utils.labelhelper as lh
import rechun.eval.helper as helper
import rechun.eval.evaldata as evdata
import rechun.directories as dirs
class Loader:
class Params:
def __init__(self, misc_entry='probabilities', need_target=True, need_prediction=True, need_t2_mask=False,
need_prediction_dist_and_boarder=False, need_gt_dist_and_boarder=False, images_needed: list=None,
need_img_props=False) -> None:
super().__init__()
self.misc_entry = misc_entry
self.need_target = need_target
self.need_prediction = need_prediction
self.need_t2_mask = need_t2_mask
self.need_gt_dist_and_boarder = need_gt_dist_and_boarder
self.need_prediction_dist_and_boarder = need_prediction_dist_and_boarder
self.images_needed = images_needed
self.need_img_props = need_img_props
def __init__(self) -> None:
super().__init__()
self.cached_entries = {}
self.cached_subject_id = None
def get_data(self, subject_file, params: Params):
if subject_file.subject != self.cached_subject_id:
self.cached_entries.clear()
self.cached_subject_id = subject_file.subject
to_eval = {}
misc_np, props = self._get_misc_entry(subject_file, params.misc_entry, 'img_properties')
to_eval[params.misc_entry] = misc_np
if params.need_img_props:
to_eval['img_properties'] = props
if params.need_target:
to_eval['target'] = self._get_target(subject_file, 'target')
if params.need_prediction:
to_eval['prediction'] = self._get_prediction(subject_file, 'prediction')
if params.need_gt_dist_and_boarder:
mask, distance = self._get_dist_and_boarder(subject_file, 'target_boarder', 'target_distance',
'target')
to_eval['target_boarder'] = mask
to_eval['target_distance'] = distance
if params.need_prediction_dist_and_boarder:
mask, distance = self._get_dist_and_boarder(subject_file, 'prediction_boarder', 'prediction_distance',
'prediction')
to_eval['prediction_boarder'] = mask
to_eval['prediction_distance'] = distance
if params.need_t2_mask:
to_eval['mask'] = self._get_t2_mask(subject_file, 'mask')
if params.images_needed:
for image_type in params.images_needed:
to_eval[image_type] = self._get_image(subject_file, image_type)
return to_eval
def _get_misc_entry(self, subject_file, entry: str, property_entry: str):
if entry in self.cached_entries:
return self.cached_entries[entry].copy(), self.cached_entries[property_entry] # copy just to be sure that is hadn't been modified
file_path = subject_file.categories['misc'].entries[entry]
np_misc, props = conversion.SimpleITKNumpyImageBridge.convert(sitk.ReadImage(file_path))
self.cached_entries[entry] = np_misc
self.cached_entries[property_entry] = props
return self.cached_entries[entry], self.cached_entries[property_entry]
def _get_target(self, subject_file, entry):
if entry in self.cached_entries:
return self.cached_entries[entry].copy() # copy just to be sure that is hadn't been modified
file_path = subject_file.categories['labels'].entries['gt']
target_np = sitk.GetArrayFromImage(sitk.ReadImage(file_path, sitk.sitkUInt8))
target_np[target_np > 0] = 1 # the labels are 0 to 4 but we only do 0 and 1
self.cached_entries[entry] = target_np
return target_np
def _get_image(self, subject_file, entry):
if entry in self.cached_entries:
return self.cached_entries[entry].copy() # copy just to be sure that is hadn't been modified
file_path = subject_file.categories['images'].entries[entry]
image_np = sitk.GetArrayFromImage(sitk.ReadImage(file_path))
self.cached_entries[entry] = image_np
return image_np
def _get_prediction(self, subject_file, entry):
if entry in self.cached_entries:
return self.cached_entries[entry].copy()
file_path = subject_file.categories['labels'].entries[entry]
prediction_np = sitk.GetArrayFromImage(sitk.ReadImage(file_path, sitk.sitkUInt8))
self.cached_entries[entry] = prediction_np
return prediction_np
def _get_dist_and_boarder(self, subject_file, boarder_entry, dist_entry, prediction_entry):
if boarder_entry in self.cached_entries and dist_entry in self.cached_entries:
return self.cached_entries[boarder_entry].copy(), self.cached_entries[dist_entry].copy()
prediction_np = self._get_prediction(subject_file, prediction_entry)
distance, mask = lh.boarder_mask(prediction_np.astype(np.bool), distance_in=1, distance_out=1)
self.cached_entries[boarder_entry] = mask
self.cached_entries[dist_entry] = distance
return mask, distance
def _get_t2_mask(self, subject_file, entry):
if entry in self.cached_entries:
return self.cached_entries[entry].copy() # copy just to be sure that is hadn't been modified
file_path = subject_file.categories['images'].entries['t2']
t2_np = sitk.GetArrayFromImage(sitk.ReadImage(file_path))
mask_np = t2_np > 0
self.cached_entries[entry] = mask_np
return mask_np
class PrepareData(abc.ABC):
@abc.abstractmethod
def __call__(self, to_eval: dict) -> dict:
pass
class ComposePreparation(PrepareData):
def __init__(self, prepare_data_list: list) -> None:
super().__init__()
self.prepare_data_list = prepare_data_list
def __call__(self, to_eval: dict) -> dict:
for prepare_data in self.prepare_data_list:
to_eval = prepare_data(to_eval)
return to_eval
class AddBackgroundProbabilities(PrepareData):
def __call__(self, to_eval: dict) -> dict:
to_eval['probabilities'] = helper.add_background_probability(to_eval['probabilities'])
return to_eval
class RescaleLinear(PrepareData):
def __init__(self, entry: str, min_: float, max_: float, epsilon=1e-5) -> None:
self.entry = entry
self.min = min_
self.max = max_
self.epsilon = epsilon # epsilon is used to have probs != 0 or 1
def __call__(self, to_eval: dict) -> dict:
prob_np = helper.rescale_uncertainties(to_eval[self.entry], self.min, self.max, self.epsilon)
to_eval[self.entry] = prob_np
return to_eval
class RescaleSubjectMinMax(PrepareData):
def __init__(self, entry: str, epsilon=1e-5) -> None:
self.entry = entry
self.epsilon = epsilon # epsilon is used to have probs != 0 or 1
def __call__(self, to_eval: dict) -> dict:
entry_np = to_eval[self.entry]
prob_np = helper.rescale_uncertainties(entry_np, entry_np.min(), entry_np.max(), self.epsilon)
to_eval[self.entry] = prob_np
return to_eval
class ToForegroundProbabilities(PrepareData):
def __call__(self, to_eval: dict) -> dict:
prob_np = helper.uncertainty_to_foreground_probabilities(to_eval['probabilities'], to_eval['prediction'])
to_eval['probabilities'] = prob_np
return to_eval
class ToEntropy(PrepareData):
def __init__(self, entropy_entry='uncertainty') -> None:
super().__init__()
self.nb_classes = 2 # everything is binary until now
self.entropy_entry = entropy_entry
def __call__(self, to_eval: dict) -> dict:
prob_np = to_eval['probabilities']
if prob_np.shape[-1] != self.nb_classes:
raise ValueError('last dimension of probability array ({}) must be equal to nb_classes ({})'
.format(prob_np.shape, self.nb_classes))
to_eval[self.entropy_entry] = np_fn.entropy(prob_np) / np.log(self.nb_classes)
helper.check_min_max(to_eval[self.entropy_entry], only_warn=True)
return to_eval
class MoveEntry(PrepareData):
def __init__(self, from_entry: str, to_entry: str) -> None:
super().__init__()
self.from_entry = from_entry
self.to_entry = to_entry
def __call__(self, to_eval: dict) -> dict:
to_eval[self.to_entry] = to_eval[self.from_entry]
return to_eval
def get_probability_preparation(eval_data: evdata.EvalData, rescale_confidence='subject', rescale_sigma='subject',
min_max_dir: str = None):
prepare = []
if eval_data.confidence_entry == 'probabilities':
prepare.append(AddBackgroundProbabilities())
return ComposePreparation(prepare), eval_data.id_
if eval_data.confidence_entry == 'confidence':
id_ = eval_data.id_
prep, prep_id = _get_rescale_prep_and_idstr(eval_data, rescale_confidence, min_max_dir)
if prep is not None:
prepare.append(prep)
id_ += prep_id
prepare.extend([
MoveEntry(eval_data.confidence_entry, 'probabilities'),
ToForegroundProbabilities(),
AddBackgroundProbabilities()
])
return ComposePreparation(prepare), id_
# if sigma or log-var
id_ = eval_data.id_
prep, prep_id = _get_rescale_prep_and_idstr(eval_data, rescale_sigma, min_max_dir)
if prep is not None:
prepare.append(prep)
id_ += prep_id
prepare.extend([
MoveEntry(eval_data.confidence_entry, 'probabilities'),
ToForegroundProbabilities(),
AddBackgroundProbabilities()
])
return ComposePreparation(prepare), id_
def get_uncertainty_preparation(eval_data: evdata.EvalData, rescale_confidence='', rescale_sigma='global',
min_max_dir: str = None):
prepare = []
if eval_data.confidence_entry == 'probabilities':
prepare.append(AddBackgroundProbabilities())
prepare.append(ToEntropy())
return ComposePreparation(prepare), eval_data.id_
if eval_data.confidence_entry == 'confidence':
id_ = eval_data.id_
prep, prep_id = _get_rescale_prep_and_idstr(eval_data, rescale_confidence, min_max_dir)
if prep is not None:
prepare.append(prep)
id_ += prep_id
prepare.append(MoveEntry(eval_data.confidence_entry, 'uncertainty'))
return ComposePreparation(prepare), id_
# sigma or log-var
id_ = eval_data.id_
prep, prep_id = _get_rescale_prep_and_idstr(eval_data, rescale_sigma, min_max_dir)
if prep is not None:
prepare.append(prep)
id_ += prep_id
prepare.append(MoveEntry(eval_data.confidence_entry, 'uncertainty'))
return ComposePreparation(prepare), id_
def _get_rescale_prep_and_idstr(eval_data: evdata.EvalData, rescale_type: str, min_max_dir: str = None):
if rescale_type == 'global':
min_max_path = os.path.join(min_max_dir, dirs.MINMAX_PLACEHOLDER.format(eval_data.id_))
min_, max_ = helper.read_min_max(min_max_path)
return RescaleLinear(eval_data.confidence_entry, min_, max_), '_globalrescale'
elif rescale_type == 'subject':
return RescaleSubjectMinMax(eval_data.confidence_entry), '_rescale'
else:
return None, ''
def get_confidence_entry_preparation(eval_data: evdata.EvalData, to_entry):
if eval_data.confidence_entry == 'probabilities':
return MoveEntry('probabilities', to_entry), eval_data.id_
if eval_data.confidence_entry == 'confidence':
return MoveEntry(eval_data.confidence_entry, to_entry), eval_data.id_
# sigma or log-var
return MoveEntry(eval_data.confidence_entry, to_entry), eval_data.id_
| [
"rechun.eval.helper.add_background_probability",
"common.evalutation.numpyfunctions.entropy",
"rechun.eval.helper.rescale_uncertainties",
"rechun.eval.helper.uncertainty_to_foreground_probabilities",
"rechun.eval.helper.check_min_max",
"rechun.eval.helper.read_min_max",
"numpy.log",
"rechun.directorie... | [((6359, 6418), 'rechun.eval.helper.add_background_probability', 'helper.add_background_probability', (["to_eval['probabilities']"], {}), "(to_eval['probabilities'])\n", (6392, 6418), True, 'import rechun.eval.helper as helper\n'), ((6778, 6866), 'rechun.eval.helper.rescale_uncertainties', 'helper.rescale_uncertainties', (['to_eval[self.entry]', 'self.min', 'self.max', 'self.epsilon'], {}), '(to_eval[self.entry], self.min, self.max, self.\n epsilon)\n', (6806, 6866), True, 'import rechun.eval.helper as helper\n'), ((7491, 7590), 'rechun.eval.helper.uncertainty_to_foreground_probabilities', 'helper.uncertainty_to_foreground_probabilities', (["to_eval['probabilities']", "to_eval['prediction']"], {}), "(to_eval['probabilities'],\n to_eval['prediction'])\n", (7537, 7590), True, 'import rechun.eval.helper as helper\n'), ((8289, 8354), 'rechun.eval.helper.check_min_max', 'helper.check_min_max', (['to_eval[self.entropy_entry]'], {'only_warn': '(True)'}), '(to_eval[self.entropy_entry], only_warn=True)\n', (8309, 8354), True, 'import rechun.eval.helper as helper\n'), ((11341, 11374), 'rechun.eval.helper.read_min_max', 'helper.read_min_max', (['min_max_path'], {}), '(min_max_path)\n', (11360, 11374), True, 'import rechun.eval.helper as helper\n'), ((3248, 3273), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['file_path'], {}), '(file_path)\n', (3262, 3273), True, 'import SimpleITK as sitk\n'), ((3757, 3798), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['file_path', 'sitk.sitkUInt8'], {}), '(file_path, sitk.sitkUInt8)\n', (3771, 3798), True, 'import SimpleITK as sitk\n'), ((4262, 4287), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['file_path'], {}), '(file_path)\n', (4276, 4287), True, 'import SimpleITK as sitk\n'), ((4622, 4663), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['file_path', 'sitk.sitkUInt8'], {}), '(file_path, sitk.sitkUInt8)\n', (4636, 4663), True, 'import SimpleITK as sitk\n'), ((5645, 5670), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['file_path'], {}), '(file_path)\n', (5659, 5670), True, 'import SimpleITK as sitk\n'), ((8232, 8254), 'common.evalutation.numpyfunctions.entropy', 'np_fn.entropy', (['prob_np'], {}), '(prob_np)\n', (8245, 8254), True, 'import common.evalutation.numpyfunctions as np_fn\n'), ((8257, 8280), 'numpy.log', 'np.log', (['self.nb_classes'], {}), '(self.nb_classes)\n', (8263, 8280), True, 'import numpy as np\n'), ((11273, 11318), 'rechun.directories.MINMAX_PLACEHOLDER.format', 'dirs.MINMAX_PLACEHOLDER.format', (['eval_data.id_'], {}), '(eval_data.id_)\n', (11303, 11318), True, 'import rechun.directories as dirs\n')] |
import math
import torch
import torch.nn.functional as F
from numpy import prod
from torch.nn import init
from .matrix import tt_to_matrix
class TTLinear(torch.nn.Module):
"""Tensor-Train linear layer, proposed in [1]_.
References
----------
.. [1] <NAME>., <NAME>., <NAME>., & <NAME>. (2015).
'Tensorizing neural networks.'' In Advances in neural information
processing systems (pp. 442-450).
"""
def __init__(self, in_shape, out_shape, *, rank, bias=True,
reassemble=True):
assert len(in_shape) == len(out_shape)
if not isinstance(rank, (list, tuple)):
rank = [1] + (len(in_shape) - 1) * [rank] + [1]
if not all(isinstance(r, int) for r in rank):
raise TypeError("`rank` must be an int or a list of ints.")
assert len(rank) == len(out_shape) + 1
assert rank[0] == rank[-1]
super().__init__()
self.reassemble, self.rank = reassemble, rank
self.shape = in_shape, out_shape
self.cores = torch.nn.ParameterList([
torch.nn.Parameter(torch.Tensor(r0, n, m, r1))
for r0, n, m, r1 in zip(rank[:-1], in_shape, out_shape, rank[1:])
])
if bias:
self.bias = torch.nn.Parameter(torch.Tensor(*out_shape).flatten())
else:
self.register_parameter("bias", None)
self.reset_parameters()
def reset_parameters(self):
for core in self.cores:
# WIP See the NIPS 2015 paper on the proper initialization
init.normal_(core, std=0.02)
if self.bias is not None:
bound = 1. / math.sqrt(prod(self.shape[0]))
init.uniform_(self.bias, -bound, bound)
@property
def weight(self):
return tt_to_matrix(self.shape, *self.cores).t()
def forward(self, input, assemble=False):
if self.reassemble:
return F.linear(input, self.weight, self.bias)
*head, tail = input.shape
data = input.view(-1, *self.shape[0], 1)
for core in self.cores:
data = torch.tensordot(data, core, dims=[[1, -1], [1, 0]])
# the first dim of `data` is squeezed `head`. The last is
# exactly `self.shape[1]`.
output = data.reshape(*head, -1)
if self.bias is not None:
output += self.bias
return output
def extra_repr(self):
shapes = ["x".join(map(str, nm)) for nm in zip(*self.shape)]
return "[" + "]x[".join(shapes) + f"] ({repr(self.rank)[1:-1]})"
| [
"torch.nn.functional.linear",
"numpy.prod",
"torch.tensordot",
"torch.Tensor",
"torch.nn.init.uniform_",
"torch.nn.init.normal_"
] | [((1570, 1598), 'torch.nn.init.normal_', 'init.normal_', (['core'], {'std': '(0.02)'}), '(core, std=0.02)\n', (1582, 1598), False, 'from torch.nn import init\n'), ((1702, 1741), 'torch.nn.init.uniform_', 'init.uniform_', (['self.bias', '(-bound)', 'bound'], {}), '(self.bias, -bound, bound)\n', (1715, 1741), False, 'from torch.nn import init\n'), ((1930, 1969), 'torch.nn.functional.linear', 'F.linear', (['input', 'self.weight', 'self.bias'], {}), '(input, self.weight, self.bias)\n', (1938, 1969), True, 'import torch.nn.functional as F\n'), ((2105, 2156), 'torch.tensordot', 'torch.tensordot', (['data', 'core'], {'dims': '[[1, -1], [1, 0]]'}), '(data, core, dims=[[1, -1], [1, 0]])\n', (2120, 2156), False, 'import torch\n'), ((1112, 1138), 'torch.Tensor', 'torch.Tensor', (['r0', 'n', 'm', 'r1'], {}), '(r0, n, m, r1)\n', (1124, 1138), False, 'import torch\n'), ((1669, 1688), 'numpy.prod', 'prod', (['self.shape[0]'], {}), '(self.shape[0])\n', (1673, 1688), False, 'from numpy import prod\n'), ((1290, 1314), 'torch.Tensor', 'torch.Tensor', (['*out_shape'], {}), '(*out_shape)\n', (1302, 1314), False, 'import torch\n')] |
import torch
from torch import nn
import summary
import os
import sys
import numpy as np
import time
from torchvision import transforms
from torch.utils.data import DataLoader
import utils
from models import AutoEncoderCov3D, AutoEncoderCov3DMem
import data.utils as data_utils
import argparse
from tqdm import tqdm
import aug_data as aug_data
import utils.eval as eval_utils
parser = argparse.ArgumentParser(description="Memorizing_Normality")
parser.add_argument('--dataset', type=str, default="UCSDped2")
parser.add_argument('--dataset_augment_type', type=str, default="training", help='the augmented version or not augmented version')
parser.add_argument('--dataset_augment_test_type', type=str, default='original_testing', help='the augmented version')
parser.add_argument("--version", type=int, default=1)
parser.add_argument("--ckpt_step", type=int, default=59)
parser.add_argument("--data_path", type=str, default='/project/bo/anomaly_data/')
parser.add_argument("--EntropyLossWeight", type=float, default=0)
parser.add_argument("--lr", type=float, default=1e-4)
args = parser.parse_args()
device = "cuda"
height, width = 256, 256
ch = 1
num_frame = 16
batch_size=1
ModelName = "MemAE"
model_dir = '/project/bo/exp_data/memory_normal/%s/%slr_%.5f_entropyloss_%.5f_version_%d/' % (args.dataset,
args.dataset_augment_type,
args.lr,
args.EntropyLossWeight,
args.version)
orig_stdout = sys.stdout
if args.dataset_augment_test_type == "frames/testing/":
first = "original_1.00"
else:
first = args.dataset_augment_test_type
f = open(os.path.join(model_dir, 'output_%s_%d.txt' % (first, args.ckpt_step)),'w')
sys.stdout= f
ckpt_dir = model_dir + "model-00%d.pt" % args.ckpt_step
if "venue" in args.dataset:
args.dataset = "Avenue"
gt_file = "/project/bo/anomaly_data/%s/gt.npy" % args.dataset
if args.dataset_augment_test_type == "frames/testing/":
save_path = model_dir + "recons_error_original_1.0.npy"
else:
save_path = model_dir + "recons_error_%s.npy" % args.dataset_augment_test_type
if os.path.isfile(save_path):
recons_error = np.load(save_path)
eval_utils.eval_video2(gt_file, recons_error, args.dataset)
exit()
if args.dataset_augment_test_type != "frames/testing/" and "venue" in args.dataset:
rain_type = str(args.dataset_augment_test_type.strip().split('_')[0])
brightness = int(args.dataset_augment_test_type.strip().split('_')[-1])/10
data_dir = args.data_path + "Avenue/frames/%s_testing/bright_%.2f/" % (rain_type, brightness)
if not os.path.exists(data_dir):
aug_data.save_avenue_rain_or_bright(args.data_path, rain_type, True, "testing", bright_space=brightness)
else:
data_dir = args.data_path + '/%s/%s/' % (args.dataset, args.dataset_augment_test_type)
frame_trans = transforms.Compose([
transforms.Resize([height, width]),
transforms.Grayscale(num_output_channels=1),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
])
unorm_trans = utils.UnNormalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
print("------Data folder", data_dir)
print("------Model folder", model_dir)
print("------Restored ckpt", ckpt_dir)
data_loader = data_utils.DataLoader(data_dir, frame_trans, time_step=num_frame-1, num_pred=1)
# batch_size = [v for v in range(50)[1:] if len(data_loader) % v == 0][-1]
video_data_loader = DataLoader(data_loader, batch_size=batch_size, shuffle=False)
chnum_in_ = 1
mem_dim_in = 2000
sparse_shrink_thres = 0.0025
model = AutoEncoderCov3DMem(chnum_in_, mem_dim_in, shrink_thres=sparse_shrink_thres)
model_para = torch.load(ckpt_dir)
model.load_state_dict(model_para)
model.requires_grad_(False)
model.to(device)
model.eval()
img_crop_size = 0
recon_error_list = [None] * len(video_data_loader)
# recon_error_list = []
time_init = time.time()
progress_bar = tqdm(video_data_loader)
for batch_idx, frames in enumerate(progress_bar):
progress_bar.update()
frames = frames.reshape([batch_size, num_frame, ch, height, width])
frames = frames.permute(0, 2, 1, 3, 4)
frames = frames.to(device)
if (ModelName == 'AE'):
recon_frames = model(frames)
###### calculate reconstruction error (MSE)
recon_np = utils.vframes2imgs(unorm_trans(recon_frames.data), step=1, batch_idx=0)
input_np = utils.vframes2imgs(unorm_trans(frames.data), step=1, batch_idx=0)
r = utils.crop_image(recon_np, img_crop_size) - utils.crop_image(input_np, img_crop_size)
# recon_error = np.mean(sum(r**2)**0.5)
recon_error = np.mean(r ** 2) # **0.5
elif (ModelName == 'MemAE'):
recon_res = model(frames)
recon_frames = recon_res['output']
recon_np = utils.vframes2imgs(unorm_trans(recon_frames.data), step=1, batch_idx=0)
input_np = utils.vframes2imgs(unorm_trans(frames.data), step=1, batch_idx=0)
r = utils.crop_image(recon_np, img_crop_size) - utils.crop_image(input_np, img_crop_size)
sp_error_map = sum(r ** 2)**0.5
recon_error = np.mean(sp_error_map.flatten())
else:
recon_error = -1
print('Wrong ModelName.')
# recon_error_list.append(recon_error)
recon_error_list[batch_idx] = recon_error
# recon_error_list = [v for j in recon_error_list for v in j]
print("The length of the reconstruction error is ", len(recon_error_list))
print("The length of the testing images is", len(data_loader))
print("............start to checking the anomaly detection auc score...................")
print("............use ckpt dir at step %d" % args.ckpt_step)
eval_utils.eval_video2(gt_file, recon_error_list, args.dataset)
time_use = time.time() - time_init
print("FPS-------------", len(video_data_loader) / (time_use))
sys.stdout = orig_stdout
f.close()
if args.dataset_augment_test_type == "frames/testing/":
save_path = model_dir + "recons_error_original_1.0"
else:
save_path = model_dir + "recons_error_%s" % args.dataset_augment_test_type
np.save(save_path, recon_error_list)
| [
"utils.UnNormalize",
"torchvision.transforms.Grayscale",
"utils.crop_image",
"numpy.save",
"os.path.exists",
"numpy.mean",
"data.utils.DataLoader",
"aug_data.save_avenue_rain_or_bright",
"argparse.ArgumentParser",
"utils.eval.eval_video2",
"torchvision.transforms.ToTensor",
"os.path.isfile",
... | [((387, 446), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Memorizing_Normality"""'}), "(description='Memorizing_Normality')\n", (410, 446), False, 'import argparse\n'), ((2411, 2436), 'os.path.isfile', 'os.path.isfile', (['save_path'], {}), '(save_path)\n', (2425, 2436), False, 'import os\n'), ((3372, 3432), 'utils.UnNormalize', 'utils.UnNormalize', ([], {'mean': '(0.5, 0.5, 0.5)', 'std': '(0.5, 0.5, 0.5)'}), '(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))\n', (3389, 3432), False, 'import utils\n'), ((3563, 3648), 'data.utils.DataLoader', 'data_utils.DataLoader', (['data_dir', 'frame_trans'], {'time_step': '(num_frame - 1)', 'num_pred': '(1)'}), '(data_dir, frame_trans, time_step=num_frame - 1,\n num_pred=1)\n', (3584, 3648), True, 'import data.utils as data_utils\n'), ((3740, 3801), 'torch.utils.data.DataLoader', 'DataLoader', (['data_loader'], {'batch_size': 'batch_size', 'shuffle': '(False)'}), '(data_loader, batch_size=batch_size, shuffle=False)\n', (3750, 3801), False, 'from torch.utils.data import DataLoader\n'), ((3873, 3949), 'models.AutoEncoderCov3DMem', 'AutoEncoderCov3DMem', (['chnum_in_', 'mem_dim_in'], {'shrink_thres': 'sparse_shrink_thres'}), '(chnum_in_, mem_dim_in, shrink_thres=sparse_shrink_thres)\n', (3892, 3949), False, 'from models import AutoEncoderCov3D, AutoEncoderCov3DMem\n'), ((3963, 3983), 'torch.load', 'torch.load', (['ckpt_dir'], {}), '(ckpt_dir)\n', (3973, 3983), False, 'import torch\n'), ((4182, 4193), 'time.time', 'time.time', ([], {}), '()\n', (4191, 4193), False, 'import time\n'), ((4209, 4232), 'tqdm.tqdm', 'tqdm', (['video_data_loader'], {}), '(video_data_loader)\n', (4213, 4232), False, 'from tqdm import tqdm\n'), ((5929, 5992), 'utils.eval.eval_video2', 'eval_utils.eval_video2', (['gt_file', 'recon_error_list', 'args.dataset'], {}), '(gt_file, recon_error_list, args.dataset)\n', (5951, 5992), True, 'import utils.eval as eval_utils\n'), ((6324, 6360), 'numpy.save', 'np.save', (['save_path', 'recon_error_list'], {}), '(save_path, recon_error_list)\n', (6331, 6360), True, 'import numpy as np\n'), ((1926, 1995), 'os.path.join', 'os.path.join', (['model_dir', "('output_%s_%d.txt' % (first, args.ckpt_step))"], {}), "(model_dir, 'output_%s_%d.txt' % (first, args.ckpt_step))\n", (1938, 1995), False, 'import os\n'), ((2457, 2475), 'numpy.load', 'np.load', (['save_path'], {}), '(save_path)\n', (2464, 2475), True, 'import numpy as np\n'), ((2480, 2539), 'utils.eval.eval_video2', 'eval_utils.eval_video2', (['gt_file', 'recons_error', 'args.dataset'], {}), '(gt_file, recons_error, args.dataset)\n', (2502, 2539), True, 'import utils.eval as eval_utils\n'), ((6004, 6015), 'time.time', 'time.time', ([], {}), '()\n', (6013, 6015), False, 'import time\n'), ((2907, 2931), 'os.path.exists', 'os.path.exists', (['data_dir'], {}), '(data_dir)\n', (2921, 2931), False, 'import os\n'), ((2941, 3049), 'aug_data.save_avenue_rain_or_bright', 'aug_data.save_avenue_rain_or_bright', (['args.data_path', 'rain_type', '(True)', '"""testing"""'], {'bright_space': 'brightness'}), "(args.data_path, rain_type, True,\n 'testing', bright_space=brightness)\n", (2976, 3049), True, 'import aug_data as aug_data\n'), ((3187, 3221), 'torchvision.transforms.Resize', 'transforms.Resize', (['[height, width]'], {}), '([height, width])\n', (3204, 3221), False, 'from torchvision import transforms\n'), ((3231, 3274), 'torchvision.transforms.Grayscale', 'transforms.Grayscale', ([], {'num_output_channels': '(1)'}), '(num_output_channels=1)\n', (3251, 3274), False, 'from torchvision import transforms\n'), ((3284, 3305), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3303, 3305), False, 'from torchvision import transforms\n'), ((3315, 3349), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.5]', '[0.5]'], {}), '([0.5], [0.5])\n', (3335, 3349), False, 'from torchvision import transforms\n'), ((4916, 4931), 'numpy.mean', 'np.mean', (['(r ** 2)'], {}), '(r ** 2)\n', (4923, 4931), True, 'import numpy as np\n'), ((4760, 4801), 'utils.crop_image', 'utils.crop_image', (['recon_np', 'img_crop_size'], {}), '(recon_np, img_crop_size)\n', (4776, 4801), False, 'import utils\n'), ((4804, 4845), 'utils.crop_image', 'utils.crop_image', (['input_np', 'img_crop_size'], {}), '(input_np, img_crop_size)\n', (4820, 4845), False, 'import utils\n'), ((5239, 5280), 'utils.crop_image', 'utils.crop_image', (['recon_np', 'img_crop_size'], {}), '(recon_np, img_crop_size)\n', (5255, 5280), False, 'import utils\n'), ((5283, 5324), 'utils.crop_image', 'utils.crop_image', (['input_np', 'img_crop_size'], {}), '(input_np, img_crop_size)\n', (5299, 5324), False, 'import utils\n')] |
import sys
import numpy as np
from six import StringIO
from gym_factored.envs.base import DiscreteEnv
# actions
A = 0
B = 1
class DifficultCMDPEnv(DiscreteEnv):
"""
A difficult CMDP environment
"""
def __init__(self, prob_y_zero = 0.1):
self.domains = [[0, 1, 2], [0, 1]]
self.ns = ns = 6
na = 2
isd = np.array([0.5, 0.5, 0, 0, 0, 0])
p = {s: {a: [] for a in range(na)} for s in range(ns)}
for s in range(ns):
x, y = list(self.decode(s))
for a in range(na):
if x == 0:
if prob_y_zero > 0:
p[s][a].append((prob_y_zero, self.encode(1, 0), 0, False, {}))
if 1 - prob_y_zero:
p[s][a].append((1 - prob_y_zero, self.encode(1, 1), 0, False, {}))
elif x == 1:
cost = int(a == A)
info = {'cost': cost}
done = True
if y == 0:
p[s][a].append((1, self.encode(2, y), int(a == B), done, info))
else:
p[s][a].append((1, self.encode(2, y), int(a == A), done, info))
else:
p[s][a].append((1, s, 0, True, {}))
DiscreteEnv.__init__(self, ns, na, p, isd, domains=self.domains)
def render(self, mode='human'):
outfile = StringIO() if mode == 'ansi' else sys.stdout
res = [["»" for _ in self.domains[0]] for _ in self.domains[1]]
x, y = self.decode(self.s)
res[y][x] = "*"
for line in res:
outfile.write("".join(line) + "\n")
outfile.write("last action: {}\n".format(self.lastaction) if self.lastaction is not None else "")
if mode != 'human':
return outfile
| [
"numpy.array",
"six.StringIO",
"gym_factored.envs.base.DiscreteEnv.__init__"
] | [((358, 390), 'numpy.array', 'np.array', (['[0.5, 0.5, 0, 0, 0, 0]'], {}), '([0.5, 0.5, 0, 0, 0, 0])\n', (366, 390), True, 'import numpy as np\n'), ((1300, 1364), 'gym_factored.envs.base.DiscreteEnv.__init__', 'DiscreteEnv.__init__', (['self', 'ns', 'na', 'p', 'isd'], {'domains': 'self.domains'}), '(self, ns, na, p, isd, domains=self.domains)\n', (1320, 1364), False, 'from gym_factored.envs.base import DiscreteEnv\n'), ((1420, 1430), 'six.StringIO', 'StringIO', ([], {}), '()\n', (1428, 1430), False, 'from six import StringIO\n')] |
import os
import time
import math
import transforms3d
import cv2
import random
import numpy as np
from gym import spaces
from webots_web_log_interface.interface import WebotsGameLogParser
from active_soccer_vision.sim.ball import ball_position_gen, ball_position_player, Ball
from active_soccer_vision.sim.robot import robot_position_gen, robot_position_player, robot_orientation_gen, robot_orientation_player, Robot
from active_soccer_vision.sim.camera import Camera
# Get file location
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
class SoccerWorldSim:
def __init__(self, config):
super().__init__()
self.config = config
self.render_resolution = self.config['sim']['render_resolution']
self.field_size = self.config['sim']['field_size']
self.time_delta = self.config['sim']['time_delta']
self.num_robots = self.config['misc']['num_robots']
self.load_recordings = self.config['ball']['recorded'] or self.config['ball']['recorded']
self.game_log_paths = self.config['player']['game_logs']
random.shuffle(self.game_log_paths)
# Load render background
img = cv2.imread(os.path.join(__location__, "..", self.config['sim']['map']))
self.field_map = cv2.resize(
img,
(self.render_resolution * self.field_size[0], self.render_resolution * self.field_size[1]))
# Load the game log if needed
if self.load_recordings:
self.webots_log_loader = WebotsGameLogParser(os.path.join(__location__, "..", self.game_log_paths[0]), verbose=False)
self.webots_log_loader.start = random.randrange(0,
int(self.webots_log_loader.get_max_player_timestamp() - (self.config['sim']['length'] + 1) * self.time_delta))
self.webots_log_loader.stop = self.webots_log_loader.start + (self.config['sim']['length'] + 2) * self.time_delta
# Check if we use the recorded or generated bakk movements
if self.config['ball']['recorded']:
ball_position_generator = ball_position_player(
game_log=self.webots_log_loader,
time_delta=self.time_delta,
start=self.webots_log_loader.start,
stop=self.webots_log_loader.stop,
ball_position_interval=(
self.field_size[0],
self.field_size[1]),
ball_noise=self.config['ball']['gen']['ball_noise'])
else:
ball_position_generator = ball_position_gen(
time_delta=self.time_delta,
ball_init_position=(
random.uniform(0, self.field_size[0]),
random.uniform(0, self.field_size[1])),
ball_position_interval=(
self.field_size[0],
self.field_size[1]),
**self.config['ball']['gen'])
self.ball = Ball(ball_position_generator, self.time_delta)
# Load and shuffle robots from recording or create a dummy object
if self.config['robot']['recorded']:
robot_names = self.webots_log_loader.x3d.get_player_names()
random.shuffle(robot_names)
assert len(robot_names) >= self.num_robots, "More robots present in recording than in the config"
robot_names = robot_names[:self.num_robots]
else:
robot_names = list(range(self.num_robots))
# Create all robots
self.robots = []
for name in robot_names:
# Check if we use the recorded or generated robot movements
if self.config['robot']['recorded']:
robot_orientation_generator = robot_orientation_player(
game_log=self.webots_log_loader,
start=self.webots_log_loader.start,
stop=self.webots_log_loader.stop,
time_delta=self.time_delta,
robot=name,
noise=self.config['robot']['gen']['position']['noise'])
robot_position_generator = robot_position_player(
game_log=self.webots_log_loader,
start=self.webots_log_loader.start,
stop=self.webots_log_loader.stop,
robot=name,
time_delta=self.time_delta,
robot_position_interval=(
self.field_size[0],
self.field_size[1]),
noise=self.config['robot']['gen']['position']['noise'])
else:
robot_position_generator = robot_position_gen(
time_delta=self.time_delta,
robot_init_position=(
random.uniform(0, self.field_size[0]),
random.uniform(1, self.field_size[1])),
robot_position_interval=(
self.field_size[0],
self.field_size[1]),
**self.config['robot']['gen']['position'])
robot_orientation_generator = robot_orientation_gen(
time_delta=self.time_delta,
robot_init_orientation=(0.0, 0.0, random.uniform(0, math.tau)),
**self.config['robot']['gen']['orientation'])
self.robots.append(
Robot(
robot_position_generator,
robot_orientation_generator,
self.config['robot']['height'],
self.time_delta))
self.my_robot = self.robots[0]
self.other_robots = self.robots[1:]
self.camera = Camera(fov=math.radians(70), width=1920, height=1080, robot=self.my_robot)
# History which saves how much each field cell is viewed
self.view_history = np.zeros((
self.field_size[1] * self.config['rl']['observation']['maps']['resolution'],
self.field_size[0] * self.config['rl']['observation']['maps']['resolution'], 1), dtype=np.uint8)
self._last_pan = 0.5
self._last_tilt = 0.5
self._sim_step = 0
def step(self, action):
if self.config['rl']['action']['space'] == "discrete":
tmp_action = np.zeros(2)
if action == 0:
tmp_action[0] = 0.5
tmp_action[1] = 0.5
elif action == 1:
tmp_action[0] = 1.0
tmp_action[1] = 0.5
elif action == 2:
tmp_action[0] = 0.0
tmp_action[1] = 0.5
elif action == 3:
tmp_action[0] = 0.5
tmp_action[1] = 1.0
elif action == 4:
tmp_action[0] = 0.5
tmp_action[1] = 0.0
else:
print(action)
action = tmp_action
elif self.config['rl']['action']['space'] == "continuos":
# Scalse actions to 0-1
action = (action + 1) / 2
# Generate ball and robot pose
self.ball.step()
[bot.step() for bot in self.robots]
if self.config['rl']['action']['mode'] == "Pattern":
self.camera.set_pan(
min(1,
max(0,
(math.sin(self._sim_step * math.pi * 0.5 * self.time_delta) + 1) * 0.5 * action[0] + (action[1] - 0.5))),
normalized=True)
self.camera.set_tilt(
min(1,
max(0,
(math.sin(self._sim_step * math.pi * 0.25 * self.time_delta) + 1) * 0.5 * action[2] + (action[3] - 0.5))),
normalized=True)
elif self.config['rl']['action']['mode'] == "Position":
self.camera.set_pan(action[0], normalized=True)
self.camera.set_tilt(action[1], normalized=True)
elif self.config['rl']['action']['mode'] == "Velocity":
self.camera.set_pan(self.camera.get_pan(normalize=True) + (action[0] - 0.5) * self.time_delta, normalized=True)
self.camera.set_tilt(self.camera.get_tilt(normalize=True) + (action[0] - 0.5) * self.time_delta, normalized=True)
elif self.config['rl']['action']['mode'] == "Absolute":
self.camera.look_at(action[0] * self.field_size[0], action[1] * self.field_size[1])
else:
print("Unknown action mode")
# Check if we are ably to observe the Ball
if self.camera.check_if_point_is_visible(self.ball.get_2d_position()):
self.ball.observe()
# Check if we are ably to observe any robots
for robot in self.other_robots:
if self.camera.check_if_point_is_visible(robot.get_2d_position()):
robot.observe()
# Build observation
observation_vector = []
observation_vector_config = self.config['rl']['observation']['vec']
# Base position
if observation_vector_config["base_position"]:
observation_vector += [
self.my_robot.get_2d_position()[0]/self.field_size[0], # Base footprint position x
self.my_robot.get_2d_position()[1]/self.field_size[1], # Base footprint position y
]
# Base heading
if observation_vector_config["base_heading"]:
observation_vector += [
(math.sin(self.my_robot.get_heading()) + 1)/2, # Base footprint heading part 1
(math.cos(self.my_robot.get_heading()) + 1)/2, # Base footprint heading part 2
]
# Camera position
if observation_vector_config["camera_position"]:
observation_vector += [
self.camera.get_2d_position()[0]/self.field_size[0], # Camera position x
self.camera.get_2d_position()[1]/self.field_size[1], # Camera position y
]
# Neck state
if observation_vector_config["neck_joint_position"]:
observation_vector += [
self.camera.get_pan(normalize=True), # Current Camera Pan
self.camera.get_tilt(normalize=True), # Current Camera Tilt
]
if observation_vector_config["neck_joint_position_history"]:
observation_vector += [
self._last_pan,
self._last_tilt,
]
# Phase
if observation_vector_config["sin_phase"]:
observation_vector += [
(math.sin(self._sim_step * math.pi * 0.2 * self.time_delta) + 1) * 0.5,
]
# Action history
if observation_vector_config["action_history"]:
observation_vector += [
(action[0] + 1)/2,
(action[1] + 1)/2,
]
# Ball world model
if observation_vector_config["estimated_ball_state"]:
observation_vector += [
self.ball.get_last_observed_2d_position()[0][0]/self.field_size[0], # Observed ball x
self.ball.get_last_observed_2d_position()[0][1]/self.field_size[1], # Observed ball y
self.ball.get_last_observed_2d_position()[1], # Observed ball confidence
]
# Robots world model
if observation_vector_config["estimated_robot_states"]:
for robot in self.other_robots:
observation_vector += [
robot.get_last_observed_2d_position()[0][0]/self.field_size[0], # Observed x
robot.get_last_observed_2d_position()[0][1]/self.field_size[1], # Observed x
robot.get_last_observed_2d_position()[1], # Confidence
]
# Render observation maps
observation_maps = None
observation_map_config = self.config['rl']['observation']['maps']
# Calculate view history no matter if it is used or not in the observation
# Decay older history
self.view_history = (self.view_history * observation_map_config["view_history_map_decay"]).astype(np.uint8)
# Get corners of projected fov
corners = (self.camera.get_projected_image_corners() * observation_map_config['resolution']).astype(np.int32)
# Draw polygon of visible area
cv2.fillPoly(self.view_history,[corners.reshape((-1,1,2))],(255,))
#cv2.imshow("hist", self.view_history)
# Render the other maps if necessary
if observation_map_config["observation_maps"]:
observation_maps = np.zeros((
self.field_size[1] * observation_map_config["resolution"],
self.field_size[0] * observation_map_config["resolution"], 1), dtype=np.uint8)
# Robots world model for the map
if observation_map_config["estimated_robot_states_map"]:
for robot in self.other_robots:
# Draw robot on map if the cell is not occupied by a
idx =( int(min(self.field_size[1] - 1, robot.get_last_observed_2d_position()[0][1]) * observation_map_config["resolution"]),
int(min(self.field_size[0] - 1, robot.get_last_observed_2d_position()[0][0]) * observation_map_config["resolution"]))
if robot.get_last_observed_2d_position()[1] * 254 + 1 > observation_maps[idx]:
observation_maps[idx] = robot.get_last_observed_2d_position()[1] * 254 + 1
# Include view history if wanted
if observation_map_config["view_history_map"]:
observation_maps = np.dstack((self.view_history, observation_maps))
#cv2.imshow("map", cv2.resize(np.dstack((np.zeros_like(self.view_history), observation_maps)), (9*self.render_resolution, 6*self.render_resolution)))
self._last_pan = self.camera.get_pan(normalize=True) # Current Camera Pan
self._last_tilt = self.camera.get_tilt(normalize=True) # Current Camera Tilt
self._sim_step += 1
# Check if we have observation maps
if observation_maps is None:
return np.array(observation_vector, dtype=np.float32)
else:
return {
"vec": np.array(observation_vector, dtype=np.float32),
"map": observation_maps.transpose(2, 0, 1),
}
def render(self):
# Options
render_ball_grid = False
mask_out = False
# Create canvas
canvas = self.field_map.copy()
# Draw camera wit fov indicator
yaw = self.camera.get_heading()
camera_on_canvas = (self.camera.get_2d_position() * self.render_resolution).astype(np.int)
fov = self.camera.fov
length = 0.5 #m
camera_in_image_heading_min_vector = camera_on_canvas + (np.array([math.cos(yaw - fov/2), math.sin(yaw - fov/2)]) * length * self.render_resolution).astype(np.int)
camera_in_image_heading_max_vector = camera_on_canvas + (np.array([math.cos(yaw + fov/2), math.sin(yaw + fov/2)]) * length * self.render_resolution).astype(np.int)
cv2.line(canvas, tuple(camera_on_canvas), tuple(camera_in_image_heading_min_vector), (255,255,255), 2)
cv2.line(canvas, tuple(camera_on_canvas), tuple(camera_in_image_heading_max_vector), (255,255,255), 2)
# Draw robot poses
def draw_robot(robot, length=0.5):
robot_on_canvas = (robot.get_2d_position() * self.render_resolution).astype(np.int) # Todo use different one for camere position
robot_in_image_heading_vector = robot_on_canvas + (np.array([math.cos(robot.get_heading()), math.sin(robot.get_heading())]) * length * self.render_resolution).astype(np.int)
if self.camera.check_if_point_is_visible(robot.get_2d_position()):
color = (100, 255, 100)
else:
color = (100, 100, 255)
if robot == self.my_robot:
color = (255, 255, 255)
cv2.arrowedLine(canvas, tuple(robot_on_canvas), tuple(robot_in_image_heading_vector), color, 4)
# Draw other robots
[draw_robot(robot) for robot in self.robots]
# Draw approximated visible field area
corners = (self.camera.get_projected_image_corners() * self.render_resolution).astype(np.int32)
cv2.polylines(canvas,[corners.reshape((-1,1,2))],True,(0,255,255), 5)
if render_ball_grid:
# Simulate and check ball grid for testing purses
for i in [x / 5.0 for x in range(0, 50)]:
for u in [x / 5.0 for x in range(0, 45)]:
ball_position = np.array([i, u], dtype=np.float)
if self.camera.check_if_point_is_visible(ball_position):
cv2.circle(canvas, tuple([int(e * self.render_resolution) for e in ball_position]), 5, (0,255,0), -1)
else:
cv2.circle(canvas, tuple([int(e * self.render_resolution) for e in ball_position]), 5, (0,0,255), -1)
# Check if the ball is visable
if self.camera.check_if_point_is_visible(self.ball.get_2d_position()):
cv2.circle(canvas, tuple([int(e * self.render_resolution) for e in self.ball.get_2d_position()]), 10, (0,255,0), -1)
else:
cv2.circle(canvas, tuple([int(e * self.render_resolution) for e in self.ball.get_2d_position()]), 10, (0,0,255), -1)
# Mask out invisible areas
if mask_out:
canvas *= cv2.fillPoly(np.zeros_like(canvas),[corners.reshape((-1,1,2))], color=(1,1,1))
return canvas
| [
"active_soccer_vision.sim.robot.Robot",
"numpy.dstack",
"random.uniform",
"random.shuffle",
"active_soccer_vision.sim.ball.Ball",
"active_soccer_vision.sim.robot.robot_orientation_player",
"os.path.join",
"numpy.zeros_like",
"os.getcwd",
"active_soccer_vision.sim.robot.robot_position_player",
"o... | [((541, 552), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (550, 552), False, 'import os\n'), ((554, 579), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (569, 579), False, 'import os\n'), ((1122, 1157), 'random.shuffle', 'random.shuffle', (['self.game_log_paths'], {}), '(self.game_log_paths)\n', (1136, 1157), False, 'import random\n'), ((1303, 1415), 'cv2.resize', 'cv2.resize', (['img', '(self.render_resolution * self.field_size[0], self.render_resolution * self\n .field_size[1])'], {}), '(img, (self.render_resolution * self.field_size[0], self.\n render_resolution * self.field_size[1]))\n', (1313, 1415), False, 'import cv2\n'), ((2973, 3019), 'active_soccer_vision.sim.ball.Ball', 'Ball', (['ball_position_generator', 'self.time_delta'], {}), '(ball_position_generator, self.time_delta)\n', (2977, 3019), False, 'from active_soccer_vision.sim.ball import ball_position_gen, ball_position_player, Ball\n'), ((5886, 6079), 'numpy.zeros', 'np.zeros', (["(self.field_size[1] * self.config['rl']['observation']['maps']['resolution'\n ], self.field_size[0] * self.config['rl']['observation']['maps'][\n 'resolution'], 1)"], {'dtype': 'np.uint8'}), "((self.field_size[1] * self.config['rl']['observation']['maps'][\n 'resolution'], self.field_size[0] * self.config['rl']['observation'][\n 'maps']['resolution'], 1), dtype=np.uint8)\n", (5894, 6079), True, 'import numpy as np\n'), ((1217, 1276), 'os.path.join', 'os.path.join', (['__location__', '""".."""', "self.config['sim']['map']"], {}), "(__location__, '..', self.config['sim']['map'])\n", (1229, 1276), False, 'import os\n'), ((2105, 2388), 'active_soccer_vision.sim.ball.ball_position_player', 'ball_position_player', ([], {'game_log': 'self.webots_log_loader', 'time_delta': 'self.time_delta', 'start': 'self.webots_log_loader.start', 'stop': 'self.webots_log_loader.stop', 'ball_position_interval': '(self.field_size[0], self.field_size[1])', 'ball_noise': "self.config['ball']['gen']['ball_noise']"}), "(game_log=self.webots_log_loader, time_delta=self.\n time_delta, start=self.webots_log_loader.start, stop=self.\n webots_log_loader.stop, ball_position_interval=(self.field_size[0],\n self.field_size[1]), ball_noise=self.config['ball']['gen']['ball_noise'])\n", (2125, 2388), False, 'from active_soccer_vision.sim.ball import ball_position_gen, ball_position_player, Ball\n'), ((3224, 3251), 'random.shuffle', 'random.shuffle', (['robot_names'], {}), '(robot_names)\n', (3238, 3251), False, 'import random\n'), ((6299, 6310), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (6307, 6310), True, 'import numpy as np\n'), ((12487, 12639), 'numpy.zeros', 'np.zeros', (["(self.field_size[1] * observation_map_config['resolution'], self.field_size\n [0] * observation_map_config['resolution'], 1)"], {'dtype': 'np.uint8'}), "((self.field_size[1] * observation_map_config['resolution'], self.\n field_size[0] * observation_map_config['resolution'], 1), dtype=np.uint8)\n", (12495, 12639), True, 'import numpy as np\n'), ((14047, 14093), 'numpy.array', 'np.array', (['observation_vector'], {'dtype': 'np.float32'}), '(observation_vector, dtype=np.float32)\n', (14055, 14093), True, 'import numpy as np\n'), ((1565, 1621), 'os.path.join', 'os.path.join', (['__location__', '""".."""', 'self.game_log_paths[0]'], {}), "(__location__, '..', self.game_log_paths[0])\n", (1577, 1621), False, 'import os\n'), ((3741, 3979), 'active_soccer_vision.sim.robot.robot_orientation_player', 'robot_orientation_player', ([], {'game_log': 'self.webots_log_loader', 'start': 'self.webots_log_loader.start', 'stop': 'self.webots_log_loader.stop', 'time_delta': 'self.time_delta', 'robot': 'name', 'noise': "self.config['robot']['gen']['position']['noise']"}), "(game_log=self.webots_log_loader, start=self.\n webots_log_loader.start, stop=self.webots_log_loader.stop, time_delta=\n self.time_delta, robot=name, noise=self.config['robot']['gen'][\n 'position']['noise'])\n", (3765, 3979), False, 'from active_soccer_vision.sim.robot import robot_position_gen, robot_position_player, robot_orientation_gen, robot_orientation_player, Robot\n'), ((4129, 4433), 'active_soccer_vision.sim.robot.robot_position_player', 'robot_position_player', ([], {'game_log': 'self.webots_log_loader', 'start': 'self.webots_log_loader.start', 'stop': 'self.webots_log_loader.stop', 'robot': 'name', 'time_delta': 'self.time_delta', 'robot_position_interval': '(self.field_size[0], self.field_size[1])', 'noise': "self.config['robot']['gen']['position']['noise']"}), "(game_log=self.webots_log_loader, start=self.\n webots_log_loader.start, stop=self.webots_log_loader.stop, robot=name,\n time_delta=self.time_delta, robot_position_interval=(self.field_size[0],\n self.field_size[1]), noise=self.config['robot']['gen']['position']['noise']\n )\n", (4150, 4433), False, 'from active_soccer_vision.sim.robot import robot_position_gen, robot_position_player, robot_orientation_gen, robot_orientation_player, Robot\n'), ((5418, 5532), 'active_soccer_vision.sim.robot.Robot', 'Robot', (['robot_position_generator', 'robot_orientation_generator', "self.config['robot']['height']", 'self.time_delta'], {}), "(robot_position_generator, robot_orientation_generator, self.config[\n 'robot']['height'], self.time_delta)\n", (5423, 5532), False, 'from active_soccer_vision.sim.robot import robot_position_gen, robot_position_player, robot_orientation_gen, robot_orientation_player, Robot\n'), ((5728, 5744), 'math.radians', 'math.radians', (['(70)'], {}), '(70)\n', (5740, 5744), False, 'import math\n'), ((13532, 13580), 'numpy.dstack', 'np.dstack', (['(self.view_history, observation_maps)'], {}), '((self.view_history, observation_maps))\n', (13541, 13580), True, 'import numpy as np\n'), ((14152, 14198), 'numpy.array', 'np.array', (['observation_vector'], {'dtype': 'np.float32'}), '(observation_vector, dtype=np.float32)\n', (14160, 14198), True, 'import numpy as np\n'), ((17422, 17443), 'numpy.zeros_like', 'np.zeros_like', (['canvas'], {}), '(canvas)\n', (17435, 17443), True, 'import numpy as np\n'), ((16551, 16583), 'numpy.array', 'np.array', (['[i, u]'], {'dtype': 'np.float'}), '([i, u], dtype=np.float)\n', (16559, 16583), True, 'import numpy as np\n'), ((2685, 2722), 'random.uniform', 'random.uniform', (['(0)', 'self.field_size[0]'], {}), '(0, self.field_size[0])\n', (2699, 2722), False, 'import random\n'), ((2744, 2781), 'random.uniform', 'random.uniform', (['(0)', 'self.field_size[1]'], {}), '(0, self.field_size[1])\n', (2758, 2781), False, 'import random\n'), ((10474, 10532), 'math.sin', 'math.sin', (['(self._sim_step * math.pi * 0.2 * self.time_delta)'], {}), '(self._sim_step * math.pi * 0.2 * self.time_delta)\n', (10482, 10532), False, 'import math\n'), ((4801, 4838), 'random.uniform', 'random.uniform', (['(0)', 'self.field_size[0]'], {}), '(0, self.field_size[0])\n', (4815, 4838), False, 'import random\n'), ((4864, 4901), 'random.uniform', 'random.uniform', (['(1)', 'self.field_size[1]'], {}), '(1, self.field_size[1])\n', (4878, 4901), False, 'import random\n'), ((5273, 5300), 'random.uniform', 'random.uniform', (['(0)', 'math.tau'], {}), '(0, math.tau)\n', (5287, 5300), False, 'import random\n'), ((14745, 14768), 'math.cos', 'math.cos', (['(yaw - fov / 2)'], {}), '(yaw - fov / 2)\n', (14753, 14768), False, 'import math\n'), ((14768, 14791), 'math.sin', 'math.sin', (['(yaw - fov / 2)'], {}), '(yaw - fov / 2)\n', (14776, 14791), False, 'import math\n'), ((14917, 14940), 'math.cos', 'math.cos', (['(yaw + fov / 2)'], {}), '(yaw + fov / 2)\n', (14925, 14940), False, 'import math\n'), ((14940, 14963), 'math.sin', 'math.sin', (['(yaw + fov / 2)'], {}), '(yaw + fov / 2)\n', (14948, 14963), False, 'import math\n'), ((7318, 7376), 'math.sin', 'math.sin', (['(self._sim_step * math.pi * 0.5 * self.time_delta)'], {}), '(self._sim_step * math.pi * 0.5 * self.time_delta)\n', (7326, 7376), False, 'import math\n'), ((7565, 7624), 'math.sin', 'math.sin', (['(self._sim_step * math.pi * 0.25 * self.time_delta)'], {}), '(self._sim_step * math.pi * 0.25 * self.time_delta)\n', (7573, 7624), False, 'import math\n')] |
"""
Base class for the contexts as used in the paper "How to Train Your
Differentiable Filter". Contains code that is shared between all three
contexts.
"""
# this code only works with tensorflow 1
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import numpy as np
import os
import csv
from differentiable_filters.contexts import base_context as base
import differentiable_filters.utils.tensorflow_compatability as compat
class PaperBaseContext(base.BaseContext):
def __init__(self, param, mode):
"""
Base class for the contexts used in the paper containing shared
functions.
Parameters
----------
param : dict
A dictionary of arguments
mode : string
determines which parts of the model are trained. Use "filter" for
the whole model, "pretrain_obs" for pretraining the observation
related functions of the context in isolation or "pretrain_proc"
for pretrainign the process-related functions of the context.
"""
super(base.BaseContext, self).__init__()
# determine the loss function
self.loss = param['loss']
self.batch_size = param['batch_size']
self.mixture_std = param['mixture_std']
self.debug = param['debug']
self.param = param
self.update_ops = []
# if we extract more than one training example from one record in the
# dataset, we need to indicate this
self.train_multiplier = 1
self.test_multiplier = 1
self.epoch_size = 1
self.mode = mode
self.scale = param['scale']
self.sl = param['sequence_length']
###########################################################################
# observation models
###########################################################################
def run_sensor_model(self, raw_observations, training):
"""
Process raw observations and return the predicted observations z
for the filter and an encoding for predicting the observation noise
Parameters
----------
raw_observations : list of tensors
Raw sensory observations
training : boolean tensor
flag that indicates if model is in training or test mode
Returns
-------
z : tensor [batch_size, dim_z]
Low-dimensional observations
enc : tensor or list of tensors
An encoding of the raw observations that can be used for predicting
heteroscedastic observation noise or the learned observation update
of the particle filter
"""
z, enc = self.sensor_model_layer(raw_observations, training)
return z, enc
def get_observation_noise(self, encoding, training):
"""
Observation noise model
Parameters
----------
encoding : ensor or list of tensors
An encoding of the raw observations that can be used for predicting
heteroscedastic observation
training : bool
training or testing?
Returns
-------
R : tensor [batch_size, dim_z, dim_z]
Observation noise covariance matrix
"""
if not self.param['learn_r']:
return tf.tile(self.R[None, :, :], [self.batch_size, 1, 1])
if self.param['hetero_r']:
if self.param['diagonal_covar']:
return self.observation_noise_hetero_diag(encoding, training)
else:
return self.observation_noise_hetero_full(encoding, training)
else:
if self.param['diagonal_covar']:
return self.observation_noise_const_diag(encoding, training)
else:
return self.observation_noise_const_full(encoding, training)
def get_observation_likelihood(self, particles, encoding, training):
"""
Learned observation update for the particle filter.
Consumes an encoding of the raw observatuions and the predicted
particles and returns the likelihood of each particle
Parameters
----------
particles : tensor [batch_size, num_particles, dim_z]
Predicted observations for the particles
encoding : tensor or list of tensors
An encoding of the raw observations
training : bool
training or testing?
Returns
-------
tf.keras.layer
A layer that predicts the likelihood of the observations under each
particle
"""
return self.likelihood_layer([particles, encoding], training)
def run_observation_model(self, state, training):
"""
Predicts the observations for a given state
Parameters
----------
state : tensor [batch_size, dim_x]
the predicted state
training : bool
training or testing?
Returns
-------
tf.keras.layer
A layer that computes the expected observations for the input
state and the Jacobian of the observation model
"""
return self.observation_model_layer(state, training)
###########################################################################
# process models
###########################################################################
def run_process_model(self, old_state, action, training):
"""
Predicts the next state given the old state and actions performed
Parameters
----------
old_state : tensor [batch_size, dim_x]
the previous state
action : tensor [batch_size, dim_u]
the performed actions
training : bool
training or testing?
Returns
-------
new_state : tensor [batch_size, dim_x]
the predicted next state
F : tensor [batch_size, dim_x, dim_x]
the jacobian of the process model
"""
if self.param['learn_process']:
new_state, F = \
self.process_model_learned_layer([old_state, action], training)
else:
new_state, F = \
self.process_model_analytical_layer([old_state, action],
training)
new_state = self.correct_state(new_state, diff=False)
return new_state, F
def get_process_noise(self, old_state, action, training):
"""
Consumes the old state and action and predicts the process
noise with the desired attributs
Parameters
----------
old_state : tensor [batch_size, dim_x]
the previous state
action : tensor [batch_size, dim_u]
the performed actions
training : bool
training or testing?
Returns
-------
tf.keras.layer
A layer that computes the desired process noise
"""
if not self.param['learn_q']:
return tf.tile(self.Q[None, :, :], [self.batch_size, 1, 1])
if self.param['learn_process']:
if self.param['hetero_q']:
if self.param['diagonal_covar']:
return self.process_noise_hetero_diag_lrn([old_state,
action],
training)
else:
return self.process_noise_hetero_full_lrn([old_state,
action],
training)
else:
if self.param['diagonal_covar']:
return self.process_noise_const_diag_lrn([old_state,
action],
training)
else:
return self.process_noise_const_full_lrn([old_state,
action],
training)
else:
if self.param['hetero_q']:
if self.param['diagonal_covar']:
return self.process_noise_hetero_diag_ana([old_state,
action],
training)
else:
return self.process_noise_hetero_full_ana([old_state,
action],
training)
else:
if self.param['diagonal_covar']:
return self.process_noise_const_diag_ana([old_state,
action],
training)
else:
return self.process_noise_const_full_ana([old_state,
action],
training)
###########################################################################
# loss functions
###########################################################################
def get_filter_loss(self, prediction, label, step, training):
"""
Compute the loss for the filtering application - defined in the context
Args:
prediction: list of predicted tensors
label: list of label tensors
step: training step
training: boolean tensor, indicates if we compute a loss for
training or testing
Returns:
loss: the total loss for training the filtering application
metrics: additional metrics we might want to log for evaluation
metric-names: the names for those metrics
"""
raise NotImplementedError("Please implement this method")
def get_observation_loss(self, prediction, label, step, training):
"""
Compute the loss for the observation functions - defined in the context
Args:
prediction: list of predicted tensors
label: list of label tensors
step: training step
training: boolean tensor, indicates if we compute a loss for
training or testing
Returns:
loss: the total loss for training the observation preprocessing
metrics: additional metrics we might want to log for evaluation
metric-names: the names for those metrics
"""
raise NotImplementedError("Please implement this method")
def get_process_loss(self, prediction, label, step, training):
"""
Compute the loss for the process functions - defined in the context
Args:
prediction: list of predicted tensors
label: list of label tensors
step: training step
training: boolean tensor, indicates if we compute a loss for
training or testing
Returns:
loss: the total loss for training the process model
metrics: additional metrics we might want to log for evaluation
metric-names: the names for those metrics
"""
raise NotImplementedError("Please implement this method")
###########################################################################
# loss functions
###########################################################################
def _mixture_likelihood(self, diffs, weights, reduce_mean=False):
"""
Compute the negative log likelihood of y under a a gaussian
mixture model defined by a set of particles and their weights.
Parameters
----------
diffs : tensor
difference between y and the states of the particles
weights : tensor
weights of the particles
reduce_mean : bool, optional
if true, return the mean likelihood loss over the complete tensor.
The default is False.
Returns
-------
likelihood : tensor
the negative log likelihood
"""
dim = compat.get_dim_int(diffs, -1)
num = compat.get_dim_int(diffs, -2)
# remove nans and infs and replace them with high values/zeros
diffs = tf.where(tf.math.is_finite(diffs), diffs,
tf.ones_like(diffs)*1e5/self.scale)
weights = tf.where(tf.math.is_finite(weights), weights,
tf.zeros_like(weights))
weights /= tf.reduce_sum(weights, axis=-1, keepdims=True)
covar = np.ones(dim, dtype=np.float32)
for k in range(dim):
covar[k] *= self.mixture_std/self.scale
covar = tf.linalg.diag(tf.square(covar))
if len(diffs.get_shape().as_list()) > 3:
sl = compat.get_dim_int(diffs, 1)
diffs = tf.reshape(diffs, [self.batch_size, -1, num, dim, 1])
covar = tf.tile(covar[None, None, None, :, :],
[self.batch_size, sl, num, 1, 1])
else:
sl = 1
diffs = tf.reshape(diffs, [self.batch_size, num, dim, 1])
covar = tf.tile(covar[None, None, :, :],
[self.batch_size, num, 1, 1])
# transfer to float 64 for higher accuracy
covar = tf.cast(covar, tf.float64)
diffs = tf.cast(diffs, tf.float64)
weights = tf.cast(weights, tf.float64)
exponent = tf.matmul(tf.matmul(tf.linalg.matrix_transpose(diffs),
tf.linalg.inv(covar)), diffs)
exponent = tf.reshape(exponent, [self.batch_size, sl, num])
normalizer = tf.math.log(tf.linalg.det(covar)) + \
tf.cast(dim * tf.log(2*np.pi), tf.float64)
log_like = -0.5 * (exponent + normalizer)
log_like = tf.reshape(log_like, [self.batch_size, sl, num])
log_like = tf.where(tf.greater_equal(log_like, -500), log_like,
tf.ones_like(log_like)*-500)
exp = tf.exp(log_like)
# the per particle likelihoods are weighted and summed in the particle
# dimension
weighted = weights * exp
weighted = tf.reduce_sum(weighted, axis=-1)
# compute the negative logarithm and undo the bias
likelihood = - (tf.math.log(tf.maximum(weighted, 1e-300)))
if reduce_mean:
likelihood = tf.reduce_mean(likelihood)
likelihood = tf.cast(likelihood, tf.float32)
return likelihood
######################################
# Evaluation
######################################
def save_log(self, log_dict, out_dir, step, num=0, mode='filter'):
"""
A helper to save the results of testing a filter on a a given problem.
Parameters
----------
log_dict : dict
dictionary of the losses that should be logged (as lists, one loss
per test example)
out_dir : str
the directory where the results are written to
step : int
the training step of the model that ahs been evaluated
num : int, optional
The number of this test run (if the model is evaluated several
times)
mode : str, optional
flag that indicates if the context is run in filtering or
pretraining mode.
"""
row = {}
for k, v in log_dict.items():
if type(v[0]) not in [str, bool, np.str, np.bool]:
row[k] = np.mean(v)
row[k + '_std'] = np.std(v)
log_file = open(os.path.join(out_dir, str(step) + '_res.csv'),
'w')
log = csv.DictWriter(log_file, sorted(row.keys()))
log.writeheader()
log.writerow(row)
log_file.close()
return
| [
"tensorflow.compat.v1.exp",
"tensorflow.compat.v1.disable_v2_behavior",
"tensorflow.compat.v1.ones_like",
"tensorflow.compat.v1.linalg.inv",
"tensorflow.compat.v1.maximum",
"tensorflow.compat.v1.log",
"numpy.mean",
"differentiable_filters.utils.tensorflow_compatability.get_dim_int",
"tensorflow.comp... | [((233, 257), 'tensorflow.compat.v1.disable_v2_behavior', 'tf.disable_v2_behavior', ([], {}), '()\n', (255, 257), True, 'import tensorflow.compat.v1 as tf\n'), ((12469, 12498), 'differentiable_filters.utils.tensorflow_compatability.get_dim_int', 'compat.get_dim_int', (['diffs', '(-1)'], {}), '(diffs, -1)\n', (12487, 12498), True, 'import differentiable_filters.utils.tensorflow_compatability as compat\n'), ((12513, 12542), 'differentiable_filters.utils.tensorflow_compatability.get_dim_int', 'compat.get_dim_int', (['diffs', '(-2)'], {}), '(diffs, -2)\n', (12531, 12542), True, 'import differentiable_filters.utils.tensorflow_compatability as compat\n'), ((12868, 12914), 'tensorflow.compat.v1.reduce_sum', 'tf.reduce_sum', (['weights'], {'axis': '(-1)', 'keepdims': '(True)'}), '(weights, axis=-1, keepdims=True)\n', (12881, 12914), True, 'import tensorflow.compat.v1 as tf\n'), ((12932, 12962), 'numpy.ones', 'np.ones', (['dim'], {'dtype': 'np.float32'}), '(dim, dtype=np.float32)\n', (12939, 12962), True, 'import numpy as np\n'), ((13666, 13692), 'tensorflow.compat.v1.cast', 'tf.cast', (['covar', 'tf.float64'], {}), '(covar, tf.float64)\n', (13673, 13692), True, 'import tensorflow.compat.v1 as tf\n'), ((13709, 13735), 'tensorflow.compat.v1.cast', 'tf.cast', (['diffs', 'tf.float64'], {}), '(diffs, tf.float64)\n', (13716, 13735), True, 'import tensorflow.compat.v1 as tf\n'), ((13754, 13782), 'tensorflow.compat.v1.cast', 'tf.cast', (['weights', 'tf.float64'], {}), '(weights, tf.float64)\n', (13761, 13782), True, 'import tensorflow.compat.v1 as tf\n'), ((13946, 13994), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['exponent', '[self.batch_size, sl, num]'], {}), '(exponent, [self.batch_size, sl, num])\n', (13956, 13994), True, 'import tensorflow.compat.v1 as tf\n'), ((14180, 14228), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['log_like', '[self.batch_size, sl, num]'], {}), '(log_like, [self.batch_size, sl, num])\n', (14190, 14228), True, 'import tensorflow.compat.v1 as tf\n'), ((14374, 14390), 'tensorflow.compat.v1.exp', 'tf.exp', (['log_like'], {}), '(log_like)\n', (14380, 14390), True, 'import tensorflow.compat.v1 as tf\n'), ((14543, 14575), 'tensorflow.compat.v1.reduce_sum', 'tf.reduce_sum', (['weighted'], {'axis': '(-1)'}), '(weighted, axis=-1)\n', (14556, 14575), True, 'import tensorflow.compat.v1 as tf\n'), ((14802, 14833), 'tensorflow.compat.v1.cast', 'tf.cast', (['likelihood', 'tf.float32'], {}), '(likelihood, tf.float32)\n', (14809, 14833), True, 'import tensorflow.compat.v1 as tf\n'), ((3329, 3381), 'tensorflow.compat.v1.tile', 'tf.tile', (['self.R[None, :, :]', '[self.batch_size, 1, 1]'], {}), '(self.R[None, :, :], [self.batch_size, 1, 1])\n', (3336, 3381), True, 'import tensorflow.compat.v1 as tf\n'), ((7086, 7138), 'tensorflow.compat.v1.tile', 'tf.tile', (['self.Q[None, :, :]', '[self.batch_size, 1, 1]'], {}), '(self.Q[None, :, :], [self.batch_size, 1, 1])\n', (7093, 7138), True, 'import tensorflow.compat.v1 as tf\n'), ((12640, 12664), 'tensorflow.compat.v1.math.is_finite', 'tf.math.is_finite', (['diffs'], {}), '(diffs)\n', (12657, 12664), True, 'import tensorflow.compat.v1 as tf\n'), ((12761, 12787), 'tensorflow.compat.v1.math.is_finite', 'tf.math.is_finite', (['weights'], {}), '(weights)\n', (12778, 12787), True, 'import tensorflow.compat.v1 as tf\n'), ((12825, 12847), 'tensorflow.compat.v1.zeros_like', 'tf.zeros_like', (['weights'], {}), '(weights)\n', (12838, 12847), True, 'import tensorflow.compat.v1 as tf\n'), ((13076, 13092), 'tensorflow.compat.v1.square', 'tf.square', (['covar'], {}), '(covar)\n', (13085, 13092), True, 'import tensorflow.compat.v1 as tf\n'), ((13160, 13188), 'differentiable_filters.utils.tensorflow_compatability.get_dim_int', 'compat.get_dim_int', (['diffs', '(1)'], {}), '(diffs, 1)\n', (13178, 13188), True, 'import differentiable_filters.utils.tensorflow_compatability as compat\n'), ((13209, 13262), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['diffs', '[self.batch_size, -1, num, dim, 1]'], {}), '(diffs, [self.batch_size, -1, num, dim, 1])\n', (13219, 13262), True, 'import tensorflow.compat.v1 as tf\n'), ((13283, 13355), 'tensorflow.compat.v1.tile', 'tf.tile', (['covar[None, None, None, :, :]', '[self.batch_size, sl, num, 1, 1]'], {}), '(covar[None, None, None, :, :], [self.batch_size, sl, num, 1, 1])\n', (13290, 13355), True, 'import tensorflow.compat.v1 as tf\n'), ((13437, 13486), 'tensorflow.compat.v1.reshape', 'tf.reshape', (['diffs', '[self.batch_size, num, dim, 1]'], {}), '(diffs, [self.batch_size, num, dim, 1])\n', (13447, 13486), True, 'import tensorflow.compat.v1 as tf\n'), ((13507, 13569), 'tensorflow.compat.v1.tile', 'tf.tile', (['covar[None, None, :, :]', '[self.batch_size, num, 1, 1]'], {}), '(covar[None, None, :, :], [self.batch_size, num, 1, 1])\n', (13514, 13569), True, 'import tensorflow.compat.v1 as tf\n'), ((14258, 14290), 'tensorflow.compat.v1.greater_equal', 'tf.greater_equal', (['log_like', '(-500)'], {}), '(log_like, -500)\n', (14274, 14290), True, 'import tensorflow.compat.v1 as tf\n'), ((14753, 14779), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['likelihood'], {}), '(likelihood)\n', (14767, 14779), True, 'import tensorflow.compat.v1 as tf\n'), ((13823, 13856), 'tensorflow.compat.v1.linalg.matrix_transpose', 'tf.linalg.matrix_transpose', (['diffs'], {}), '(diffs)\n', (13849, 13856), True, 'import tensorflow.compat.v1 as tf\n'), ((13897, 13917), 'tensorflow.compat.v1.linalg.inv', 'tf.linalg.inv', (['covar'], {}), '(covar)\n', (13910, 13917), True, 'import tensorflow.compat.v1 as tf\n'), ((14029, 14049), 'tensorflow.compat.v1.linalg.det', 'tf.linalg.det', (['covar'], {}), '(covar)\n', (14042, 14049), True, 'import tensorflow.compat.v1 as tf\n'), ((14330, 14352), 'tensorflow.compat.v1.ones_like', 'tf.ones_like', (['log_like'], {}), '(log_like)\n', (14342, 14352), True, 'import tensorflow.compat.v1 as tf\n'), ((14672, 14700), 'tensorflow.compat.v1.maximum', 'tf.maximum', (['weighted', '(1e-300)'], {}), '(weighted, 1e-300)\n', (14682, 14700), True, 'import tensorflow.compat.v1 as tf\n'), ((15902, 15912), 'numpy.mean', 'np.mean', (['v'], {}), '(v)\n', (15909, 15912), True, 'import numpy as np\n'), ((15947, 15956), 'numpy.std', 'np.std', (['v'], {}), '(v)\n', (15953, 15956), True, 'import numpy as np\n'), ((12698, 12717), 'tensorflow.compat.v1.ones_like', 'tf.ones_like', (['diffs'], {}), '(diffs)\n', (12710, 12717), True, 'import tensorflow.compat.v1 as tf\n'), ((14081, 14098), 'tensorflow.compat.v1.log', 'tf.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (14087, 14098), True, 'import tensorflow.compat.v1 as tf\n')] |
# COVID-19 Early Warning Score
# Date: 3/20/2020
# Contact: <EMAIL>
from functools import reduce
from typing import Tuple, Dict, Any
import numpy as np
import pandas as pd
import streamlit as st
#import plotly.graph_objects as go
#import ipywidgets as widgets
#from ipywidgets import AppLayout, Button
#from IPython.display import HTML, display, Markdown
#from bqplot import pyplot as plt
#import ipyvuetify as v
#from traitlets import Unicode, List
#matplotlib.use("Agg")
#import matplotlib.pyplot as plt
hide_menu_style = """
<style>
#MainMenu {visibility: hidden;}
</style>
"""
st.markdown(hide_menu_style, unsafe_allow_html=True)
# Parameters
# Age
age = st.sidebar.radio("Age > 44?", ('Yes', 'No'))
if age == 'Yes':
age = 1
else:
age = 0
# Sex
sex = st.sidebar.radio("Sex", ('Male', 'Female'))
if sex == 'Male':
sex = 1
else:
sex = 0
# CT Findings
CT = st.sidebar.radio(
"Signs of Pneumonia on CT?", ('Yes', 'No'))
if CT == 'Yes':
CT_score = 5
else:
CT_score = 0
if CT == 'Yes':
CT_val = 1
else:
CT_val = 0
# Exposure
exposure = st.sidebar.radio(
"Has the patient been in close contact with a confirmed COVID-19 case?", ('Yes', 'No'))
if exposure == 'Yes':
exposure_score = 5
else:
exposure_score = 0
if exposure == 'Yes':
exposure_val = 1
else:
exposure_val = 0
# Fever
tmax = 0
fever = st.sidebar.radio("Fever?", ('Yes', 'No'))
if fever == 'Yes':
tmax = (st.sidebar.number_input("TMax", 35.0, 45.0, value=36.5, step=0.1, format="%f"))
if fever == 'Yes':
fever = 1
else:
fever = 0
if tmax > 37.8:
tmax = 1
else:
tmax = 0
# Respiratory Symptoms
resp_symp = 0
resp_symp = st.sidebar.multiselect('Any respiratory symptoms?',
('Cough', 'Expectoration', 'Dyspnea'))
resp_symp = len(resp_symp)
if resp_symp >= 1:
resp_symp = 1
else:
resp_symp = 0
# NLR
abs_lym = (st.sidebar.number_input("Absolute Lymphocytes", 0.1, 10.0, value=3.0, step=0.1, format="%f"))
abs_neu = (st.sidebar.number_input("Absolute Neutrophils", 0.1, 10.0, value=3.0, step=0.1, format="%f"))
nlr = abs_neu/abs_lym
nlr_value = nlr
if nlr > 3.13:
nlr = 1
else: nlr = 0
st.title("COVID-19 Early Warning Score")
st.markdown(
"""*This tool is intended for ....
For questions about this page, contact <EMAIL>.
""")
def ews(age, sex, CT_score, exposure_score, fever, tmax, resp_symp, nlr):
values = age, sex, CT_score, exposure_score, fever, tmax, resp_symp, nlr
score = np.sum(values)
#covid19 = 1 / 1 + exp - [ -9.106 + (2.79 * fever) + (4.58 * exposure) + (5.10 * CT) + (0.97 * NLR) + (0.94 * tmax) + (0.90 x Sex)]
return score
bin_dict = dict({1:'Yes', 0:'No'})
age_dict = dict({1:'Age>44', 0:'Age<44'})
sex_dict = dict({1:'Male', 0:'Female'})
tmax_dict = dict({1:'Tmax>37.8', 0:'Tmax<37.8'})
resp_dict = dict({1:'At least one respiratory symptom', 0: 'No respiratory symptoms'})
nlr_dict = dict({1:'NLR>3.13', 0:'NLR<3.13'})
early_warning_score = ews(age, sex, CT_score, exposure_score, fever, tmax, resp_symp, nlr)
data = {
'Parameters': ['Signs of Pneumonia on CT', 'History of close contact with confirmed COVID-19 patient',
'Fever', 'Tmax', 'Age', 'Sex','Respiratory Symptoms', 'Neutrophyl/Lymphocyte Ratio (NLR)', ''],
'Assessment': [bin_dict[CT_val], bin_dict[exposure_val], bin_dict[fever], tmax_dict[tmax], age_dict[age], sex_dict[sex],
resp_dict[resp_symp], nlr_dict[nlr], 'Total Score'],
'Score': [CT_score, exposure_score, fever, tmax, age, sex, resp_symp, nlr, early_warning_score]
}
df = pd.DataFrame(data)
st.subheader("COVID-19 Early Warning Calculator")
st.table(df)
st.markdown("""The COVID-19 Early Warning Score is **{early_warning_score:.0f}**, a score of more than 10 indicates...""".format(
early_warning_score=early_warning_score)
)
st.markdown("""The calculated Neutrophyl/Lymphocyte Ratio is **{nlr_value:.2f}**.""".format(
nlr_value=nlr_value)
)
# """Erie county has reported **{cases_erie:.2f}** cases of COVID-19.""".format(
# cases_erie=cases_erie
# )
if st.checkbox("Show Additional Information"):
st.subheader("COVID-19 Early Warning Score Methodology")
st.markdown(
"""* **Hospitalized.
* **Currently
* **Regional .
""")
st.subheader("References & Acknowledgements")
st.markdown(
"""
https://www.medrxiv.org/content/10.1101/2020.03.05.20031906v1.full.pdf
"""
) | [
"streamlit.checkbox",
"streamlit.markdown",
"streamlit.sidebar.multiselect",
"streamlit.table",
"pandas.DataFrame",
"numpy.sum",
"streamlit.subheader",
"streamlit.sidebar.number_input",
"streamlit.sidebar.radio",
"streamlit.title"
] | [((616, 668), 'streamlit.markdown', 'st.markdown', (['hide_menu_style'], {'unsafe_allow_html': '(True)'}), '(hide_menu_style, unsafe_allow_html=True)\n', (627, 668), True, 'import streamlit as st\n'), ((696, 740), 'streamlit.sidebar.radio', 'st.sidebar.radio', (['"""Age > 44?"""', "('Yes', 'No')"], {}), "('Age > 44?', ('Yes', 'No'))\n", (712, 740), True, 'import streamlit as st\n'), ((803, 846), 'streamlit.sidebar.radio', 'st.sidebar.radio', (['"""Sex"""', "('Male', 'Female')"], {}), "('Sex', ('Male', 'Female'))\n", (819, 846), True, 'import streamlit as st\n'), ((917, 977), 'streamlit.sidebar.radio', 'st.sidebar.radio', (['"""Signs of Pneumonia on CT?"""', "('Yes', 'No')"], {}), "('Signs of Pneumonia on CT?', ('Yes', 'No'))\n", (933, 977), True, 'import streamlit as st\n'), ((1118, 1231), 'streamlit.sidebar.radio', 'st.sidebar.radio', (['"""Has the patient been in close contact with a confirmed COVID-19 case?"""', "('Yes', 'No')"], {}), "(\n 'Has the patient been in close contact with a confirmed COVID-19 case?',\n ('Yes', 'No'))\n", (1134, 1231), True, 'import streamlit as st\n'), ((1402, 1443), 'streamlit.sidebar.radio', 'st.sidebar.radio', (['"""Fever?"""', "('Yes', 'No')"], {}), "('Fever?', ('Yes', 'No'))\n", (1418, 1443), True, 'import streamlit as st\n'), ((1712, 1806), 'streamlit.sidebar.multiselect', 'st.sidebar.multiselect', (['"""Any respiratory symptoms?"""', "('Cough', 'Expectoration', 'Dyspnea')"], {}), "('Any respiratory symptoms?', ('Cough',\n 'Expectoration', 'Dyspnea'))\n", (1734, 1806), True, 'import streamlit as st\n'), ((1934, 2031), 'streamlit.sidebar.number_input', 'st.sidebar.number_input', (['"""Absolute Lymphocytes"""', '(0.1)', '(10.0)'], {'value': '(3.0)', 'step': '(0.1)', 'format': '"""%f"""'}), "('Absolute Lymphocytes', 0.1, 10.0, value=3.0, step=\n 0.1, format='%f')\n", (1957, 2031), True, 'import streamlit as st\n'), ((2039, 2136), 'streamlit.sidebar.number_input', 'st.sidebar.number_input', (['"""Absolute Neutrophils"""', '(0.1)', '(10.0)'], {'value': '(3.0)', 'step': '(0.1)', 'format': '"""%f"""'}), "('Absolute Neutrophils', 0.1, 10.0, value=3.0, step=\n 0.1, format='%f')\n", (2062, 2136), True, 'import streamlit as st\n'), ((2215, 2255), 'streamlit.title', 'st.title', (['"""COVID-19 Early Warning Score"""'], {}), "('COVID-19 Early Warning Score')\n", (2223, 2255), True, 'import streamlit as st\n'), ((2256, 2368), 'streamlit.markdown', 'st.markdown', (['"""*This tool is intended for .... \n\nFor questions about this page, contact <EMAIL>. \n"""'], {}), '(\n """*This tool is intended for .... \n\nFor questions about this page, contact <EMAIL>. \n"""\n )\n', (2267, 2368), True, 'import streamlit as st\n'), ((3627, 3645), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (3639, 3645), True, 'import pandas as pd\n'), ((3647, 3696), 'streamlit.subheader', 'st.subheader', (['"""COVID-19 Early Warning Calculator"""'], {}), "('COVID-19 Early Warning Calculator')\n", (3659, 3696), True, 'import streamlit as st\n'), ((3698, 3710), 'streamlit.table', 'st.table', (['df'], {}), '(df)\n', (3706, 3710), True, 'import streamlit as st\n'), ((4144, 4186), 'streamlit.checkbox', 'st.checkbox', (['"""Show Additional Information"""'], {}), "('Show Additional Information')\n", (4155, 4186), True, 'import streamlit as st\n'), ((4366, 4411), 'streamlit.subheader', 'st.subheader', (['"""References & Acknowledgements"""'], {}), "('References & Acknowledgements')\n", (4378, 4411), True, 'import streamlit as st\n'), ((4412, 4521), 'streamlit.markdown', 'st.markdown', (['"""\n https://www.medrxiv.org/content/10.1101/2020.03.05.20031906v1.full.pdf\n """'], {}), '(\n """\n https://www.medrxiv.org/content/10.1101/2020.03.05.20031906v1.full.pdf\n """\n )\n', (4423, 4521), True, 'import streamlit as st\n'), ((1476, 1554), 'streamlit.sidebar.number_input', 'st.sidebar.number_input', (['"""TMax"""', '(35.0)', '(45.0)'], {'value': '(36.5)', 'step': '(0.1)', 'format': '"""%f"""'}), "('TMax', 35.0, 45.0, value=36.5, step=0.1, format='%f')\n", (1499, 1554), True, 'import streamlit as st\n'), ((2528, 2542), 'numpy.sum', 'np.sum', (['values'], {}), '(values)\n', (2534, 2542), True, 'import numpy as np\n'), ((4192, 4248), 'streamlit.subheader', 'st.subheader', (['"""COVID-19 Early Warning Score Methodology"""'], {}), "('COVID-19 Early Warning Score Methodology')\n", (4204, 4248), True, 'import streamlit as st\n'), ((4254, 4356), 'streamlit.markdown', 'st.markdown', (['"""* **Hospitalized.\n * **Currently \n * **Regional . \n """'], {}), '(\n """* **Hospitalized.\n * **Currently \n * **Regional . \n """\n )\n', (4265, 4356), True, 'import streamlit as st\n')] |
from components.mapping.point import Point
from components.mapping.map import Map
import random
import numpy as np
import copy
class DNA:
'''
The DNA represents a solution to the problem,
the solution for the Multi Traveller Salesman Problem is encoded
as a matrix of n_buses x number_of_stops.
The entire set of stops and the distance between each stop is represented
as the map.
Constraint: They start from same start_position
'''
def __init__(self, n_buses, _map, start_position, routes = None):
try:
assert isinstance(n_buses, int)
assert isinstance(start_position, int)
assert isinstance(_map, Map)
except AssertionError:
raise AssertionError('Error Creating DNA Object - Unexpected Types')
self.n_buses = n_buses
self.map = _map
self.start_position = start_position
self.fitness_value = [np.inf, np.inf]
if routes == None:
self.routes = self.initialize_routes()
else:
self.routes = routes
def initialize_routes(self):
routes = np.empty(shape=self.n_buses, dtype=object)
for i in range(len(routes)):
routes[i] = np.array(self.start_position)
visited_stops = np.zeros((self.map.adj_mat.shape[0]), dtype=bool)
'''
All buses start from the same start position
'''
visited_stops[self.start_position] = True
while any(not s for s in visited_stops): #Checks if Non-Visited Stops still exist
# Get available stops
non_visited_stops = [idx for idx, s in enumerate(visited_stops) if s==False]
# Pick one stop randomly
stop = random.sample(non_visited_stops, k=1)[0]
# Pick one route randomly
route = random.randint(0,len(routes)-1)
# Add stop to route
routes[route] = np.append(routes[route], stop)
# Update Visited Stops
visited_stops[stop] = True
return routes
def fitness(self):
'''
The fitness function will calculate the max route
'''
if self.fitness_value[0] == np.inf:
routes_cost = np.zeros(shape=self.routes.shape[0],dtype=float)
for idx,r in enumerate(self.routes):
routes_cost[idx] = DNA.get_route_cost(r, self.map)
self.fitness_value = [max(routes_cost), sum(routes_cost)]
def get_route_cost(r,_map):
cost = 0.0
for i in range(r.size-1):
cost += _map.get_distance_between_points(r[i], r[i+1])
return cost
def mutation(self, n_perm = 4):
'''
Mutation will be a set of permutations between 2 random routes
'''
for i in range(n_perm):
route_1 = self.routes[random.randint(0, len(self.routes)-1)]
route_2 = self.routes[random.randint(0, len(self.routes)-1)]
if route_1.size > 1:
pos_1 = random.randint(1,route_1.size-1)
if route_2.size > 1:
pos_2 = random.randint(1,route_2.size-1)
aux = route_1[pos_1]
route_1[pos_1] = route_2[pos_2]
route_2[pos_2] = aux
else:
route_2 = np.append(route_2,route_1[pos_1])
route_1 = np.delete(route_1,pos_1)
else:
if route_2.size > 1:
pos_2 = random.randint(1,route_2.size-1)
route_1 = np.append(route_1, route_2[pos_2])
route_2 = np.delete(route_2,pos_2)
else:
pass
def latter_city(encoded, k):
'''
Identify the stop that comes after stop K
'''
idx_of_city = np.where(encoded == k)[0][0]
if idx_of_city == len(encoded)-1:
return encoded[0]
else:
return encoded[idx_of_city + 1]
def former_city(encoded, k):
'''
Identify the stop that comes before stop K
'''
idx_of_city = np.where(encoded == k)[0][0]
if idx_of_city == 0:
return encoded[len(encoded) - 1]
else:
return encoded[idx_of_city - 1]
def encode(self):
'''
Encode the routes into a single array
to make it easier to manipulate and operate
crossover.
'''
aux = np.zeros(shape=0,dtype=int)
for r in self.routes:
aux = np.append(aux,r)
return aux
def decode(self, encoded_dna):
'''
Decode the encoded array into the matrix of routes
'''
route = -1
routes = np.empty(shape=self.n_buses, dtype=object)
for i in range(len(routes)):
routes[i] = np.array(self.start_position)
for idx, el in enumerate(encoded_dna):
if el == self.start_position:
route += 1
else:
routes[route] = np.append(routes[route], el)
return routes
def crossover(parent_1, parent_2, mark, _map):
'''
The crossover operation will generate two offspring
the first offspring is generated by starting from position
K then filling the child array based on each parents distance to the next point
E.g.
P_1:0->2->3->0->4->1
P_2:0->3->2->4->0->1
if K = 3 and mark = latter
for P_1 latter will be 0
for P_2 latter will be 2
remove K from P_1 and P_2
dx = distance of K->0
dy = distance of K->2
dx is less than dy.
so, the new K will be 0, loops until P_1 and P_2 are empty
Child 2 is a random permutation of child 1.
'''
# Get Encoded parents and remove repeated starting
# positions
encoded_parent_1 = parent_1.encode()
encoded_parent_1 = encoded_parent_1[1:len(encoded_parent_1)]
encoded_parent_2 = parent_2.encode()
encoded_parent_2 = encoded_parent_2[1:len(encoded_parent_2)]
length = len(encoded_parent_1)
# get a random K
k = encoded_parent_1[random.randint(0,length-1)]
# result will store the child 1
result = np.array([parent_1.start_position,k])
while length > 1:
if mark == 'latter':
x = DNA.latter_city(encoded_parent_1, k)
y = DNA.latter_city(encoded_parent_2, k)
elif mark == 'former':
x = DNA.former_city(encoded_parent_1, k)
y = DNA.former_city(encoded_parent_2, k)
# Find K in parents and remove.
idx_of_k_parent_1 = np.where(encoded_parent_1 == k)[0][0]
encoded_parent_1 = np.delete(encoded_parent_1,idx_of_k_parent_1)
idx_of_k_parent_2 = np.where(encoded_parent_2 == k)[0][0]
encoded_parent_2 = np.delete(encoded_parent_2,idx_of_k_parent_2)
dx = _map.get_distance_between_points(k, x)
dy = _map.get_distance_between_points(k, y)
if dx < dy:
k = x
else:
k = y
length = len(encoded_parent_1)
result = np.append(result, k)
#child 2 generation based on random permutation of Child 1/Result
child_2_encoded = np.append(result[0],np.random.permutation(result[1:len(result)]))
child_1 = DNA(parent_1.n_buses, parent_1.map, parent_1.start_position, routes = parent_1.decode(result))
child_2 = DNA(parent_1.n_buses, parent_1.map, parent_1.start_position, routes = parent_1.decode(child_2_encoded))
return child_1, child_2
| [
"random.sample",
"numpy.where",
"numpy.delete",
"numpy.append",
"numpy.array",
"numpy.zeros",
"numpy.empty",
"random.randint"
] | [((1125, 1167), 'numpy.empty', 'np.empty', ([], {'shape': 'self.n_buses', 'dtype': 'object'}), '(shape=self.n_buses, dtype=object)\n', (1133, 1167), True, 'import numpy as np\n'), ((1292, 1339), 'numpy.zeros', 'np.zeros', (['self.map.adj_mat.shape[0]'], {'dtype': 'bool'}), '(self.map.adj_mat.shape[0], dtype=bool)\n', (1300, 1339), True, 'import numpy as np\n'), ((4597, 4625), 'numpy.zeros', 'np.zeros', ([], {'shape': '(0)', 'dtype': 'int'}), '(shape=0, dtype=int)\n', (4605, 4625), True, 'import numpy as np\n'), ((4877, 4919), 'numpy.empty', 'np.empty', ([], {'shape': 'self.n_buses', 'dtype': 'object'}), '(shape=self.n_buses, dtype=object)\n', (4885, 4919), True, 'import numpy as np\n'), ((6454, 6492), 'numpy.array', 'np.array', (['[parent_1.start_position, k]'], {}), '([parent_1.start_position, k])\n', (6462, 6492), True, 'import numpy as np\n'), ((1229, 1258), 'numpy.array', 'np.array', (['self.start_position'], {}), '(self.start_position)\n', (1237, 1258), True, 'import numpy as np\n'), ((1978, 2008), 'numpy.append', 'np.append', (['routes[route]', 'stop'], {}), '(routes[route], stop)\n', (1987, 2008), True, 'import numpy as np\n'), ((2308, 2357), 'numpy.zeros', 'np.zeros', ([], {'shape': 'self.routes.shape[0]', 'dtype': 'float'}), '(shape=self.routes.shape[0], dtype=float)\n', (2316, 2357), True, 'import numpy as np\n'), ((4673, 4690), 'numpy.append', 'np.append', (['aux', 'r'], {}), '(aux, r)\n', (4682, 4690), True, 'import numpy as np\n'), ((4981, 5010), 'numpy.array', 'np.array', (['self.start_position'], {}), '(self.start_position)\n', (4989, 5010), True, 'import numpy as np\n'), ((6368, 6397), 'random.randint', 'random.randint', (['(0)', '(length - 1)'], {}), '(0, length - 1)\n', (6382, 6397), False, 'import random\n'), ((6974, 7020), 'numpy.delete', 'np.delete', (['encoded_parent_1', 'idx_of_k_parent_1'], {}), '(encoded_parent_1, idx_of_k_parent_1)\n', (6983, 7020), True, 'import numpy as np\n'), ((7134, 7180), 'numpy.delete', 'np.delete', (['encoded_parent_2', 'idx_of_k_parent_2'], {}), '(encoded_parent_2, idx_of_k_parent_2)\n', (7143, 7180), True, 'import numpy as np\n'), ((7469, 7489), 'numpy.append', 'np.append', (['result', 'k'], {}), '(result, k)\n', (7478, 7489), True, 'import numpy as np\n'), ((1761, 1798), 'random.sample', 'random.sample', (['non_visited_stops'], {'k': '(1)'}), '(non_visited_stops, k=1)\n', (1774, 1798), False, 'import random\n'), ((3096, 3131), 'random.randint', 'random.randint', (['(1)', '(route_1.size - 1)'], {}), '(1, route_1.size - 1)\n', (3110, 3131), False, 'import random\n'), ((3950, 3972), 'numpy.where', 'np.where', (['(encoded == k)'], {}), '(encoded == k)\n', (3958, 3972), True, 'import numpy as np\n'), ((4249, 4271), 'numpy.where', 'np.where', (['(encoded == k)'], {}), '(encoded == k)\n', (4257, 4271), True, 'import numpy as np\n'), ((5186, 5214), 'numpy.append', 'np.append', (['routes[route]', 'el'], {}), '(routes[route], el)\n', (5195, 5214), True, 'import numpy as np\n'), ((3196, 3231), 'random.randint', 'random.randint', (['(1)', '(route_2.size - 1)'], {}), '(1, route_2.size - 1)\n', (3210, 3231), False, 'import random\n'), ((3416, 3450), 'numpy.append', 'np.append', (['route_2', 'route_1[pos_1]'], {}), '(route_2, route_1[pos_1])\n', (3425, 3450), True, 'import numpy as np\n'), ((3480, 3505), 'numpy.delete', 'np.delete', (['route_1', 'pos_1'], {}), '(route_1, pos_1)\n', (3489, 3505), True, 'import numpy as np\n'), ((3590, 3625), 'random.randint', 'random.randint', (['(1)', '(route_2.size - 1)'], {}), '(1, route_2.size - 1)\n', (3604, 3625), False, 'import random\n'), ((3674, 3708), 'numpy.append', 'np.append', (['route_1', 'route_2[pos_2]'], {}), '(route_1, route_2[pos_2])\n', (3683, 3708), True, 'import numpy as np\n'), ((3739, 3764), 'numpy.delete', 'np.delete', (['route_2', 'pos_2'], {}), '(route_2, pos_2)\n', (3748, 3764), True, 'import numpy as np\n'), ((6905, 6936), 'numpy.where', 'np.where', (['(encoded_parent_1 == k)'], {}), '(encoded_parent_1 == k)\n', (6913, 6936), True, 'import numpy as np\n'), ((7065, 7096), 'numpy.where', 'np.where', (['(encoded_parent_2 == k)'], {}), '(encoded_parent_2 == k)\n', (7073, 7096), True, 'import numpy as np\n')] |
import random
import numpy as np
import math
from PIL import ImageColor
from utils import shadeN
class GearSystem:
def __init__(self, palette):
self.n_gears = random.choice([2, 3, 4, 5, 6])
self.speeds = 1.*np.random.choice([-1, 1], self.n_gears)*np.random.choice([0, 0, 1, 2, 4, 8, 16], self.n_gears)
radii = np.random.uniform(0.05, 1, self.n_gears)
# this will lead to a strong bias towards smaller gears, while keeping the largest ones:
#radii = radii**4
radii = radii/np.sum(radii)
self.radii = radii*0.9 #make it take up *most* of the image
self.cur_loc = np.array([sum(self.radii), 0])
self.current_speed = 0
self.colour_options = [[ImageColor.getcolor(v, "RGB") for v in colour_pair] for colour_pair in palette.line_options]
self.cur_color_range = self.colour_options[0]
self.current_colour = None
self.performed_regime_switch = False
self.cur_angles = np.zeros(len(self.speeds))
def step(self, dt):
self.cur_angles += self.speeds*dt
# compute displacement vectors of each gear
compounded_delta = (0,0)
for rad, theta in zip(self.radii, self.cur_angles):
dx = rad*math.cos(theta)
dy = rad*math.sin(theta)
#vectors.append((dx, ))
compounded_delta = (compounded_delta[0]+dx, compounded_delta[1]+dy)
# compute speed
dx = compounded_delta[0] - self.cur_loc[0]
dy = compounded_delta[1] - self.cur_loc[1]
v = ((dx**2 + dy**2)**0.5)/dt
self.current_speed = v
self.cur_loc = compounded_delta
angle = (math.atan2(dy, dx) - math.pi/4)%(2*math.pi)
# scale angle to 0 -> 1
angle = angle/(2*math.pi)
self.current_colour = shadeN(colours=(self.cur_color_range[0], self.cur_color_range[1], self.cur_color_range[0]), centers=(0, 0.5, 1), v=angle)
if random.random() < dt*0.005:
if random.random() < 0.5:
self.speeds += np.random.normal(0, np.random.choice([0.002, 0.01, 0.05]), self.n_gears)
#if random.random() < 0.25:
# self.current_colour = random.choice(self.colour_options)
else:
mask = np.random.choice([0, 1], self.n_gears)
self.speeds = 1.*self.speeds*mask + (1-mask)*np.random.choice([-1, 1], self.n_gears)*np.random.choice([0, 0, 1, 2, 4, 8, 16], self.n_gears)
self.performed_regime_switch = True
#self.current_colour = random.choice(self.colour_options)
self.cur_color_range = random.choice(self.colour_options)
else:
self.performed_regime_switch = False | [
"random.choice",
"numpy.random.choice",
"math.cos",
"numpy.sum",
"math.atan2",
"utils.shadeN",
"numpy.random.uniform",
"PIL.ImageColor.getcolor",
"random.random",
"math.sin"
] | [((166, 196), 'random.choice', 'random.choice', (['[2, 3, 4, 5, 6]'], {}), '([2, 3, 4, 5, 6])\n', (179, 196), False, 'import random\n'), ((323, 363), 'numpy.random.uniform', 'np.random.uniform', (['(0.05)', '(1)', 'self.n_gears'], {}), '(0.05, 1, self.n_gears)\n', (340, 363), True, 'import numpy as np\n'), ((1606, 1732), 'utils.shadeN', 'shadeN', ([], {'colours': '(self.cur_color_range[0], self.cur_color_range[1], self.cur_color_range[0])', 'centers': '(0, 0.5, 1)', 'v': 'angle'}), '(colours=(self.cur_color_range[0], self.cur_color_range[1], self.\n cur_color_range[0]), centers=(0, 0.5, 1), v=angle)\n', (1612, 1732), False, 'from utils import shadeN\n'), ((257, 311), 'numpy.random.choice', 'np.random.choice', (['[0, 0, 1, 2, 4, 8, 16]', 'self.n_gears'], {}), '([0, 0, 1, 2, 4, 8, 16], self.n_gears)\n', (273, 311), True, 'import numpy as np\n'), ((492, 505), 'numpy.sum', 'np.sum', (['radii'], {}), '(radii)\n', (498, 505), True, 'import numpy as np\n'), ((1734, 1749), 'random.random', 'random.random', ([], {}), '()\n', (1747, 1749), False, 'import random\n'), ((217, 256), 'numpy.random.choice', 'np.random.choice', (['[-1, 1]', 'self.n_gears'], {}), '([-1, 1], self.n_gears)\n', (233, 256), True, 'import numpy as np\n'), ((669, 698), 'PIL.ImageColor.getcolor', 'ImageColor.getcolor', (['v', '"""RGB"""'], {}), "(v, 'RGB')\n", (688, 698), False, 'from PIL import ImageColor\n'), ((1128, 1143), 'math.cos', 'math.cos', (['theta'], {}), '(theta)\n', (1136, 1143), False, 'import math\n'), ((1156, 1171), 'math.sin', 'math.sin', (['theta'], {}), '(theta)\n', (1164, 1171), False, 'import math\n'), ((1483, 1501), 'math.atan2', 'math.atan2', (['dy', 'dx'], {}), '(dy, dx)\n', (1493, 1501), False, 'import math\n'), ((1768, 1783), 'random.random', 'random.random', ([], {}), '()\n', (1781, 1783), False, 'import random\n'), ((1998, 2036), 'numpy.random.choice', 'np.random.choice', (['[0, 1]', 'self.n_gears'], {}), '([0, 1], self.n_gears)\n', (2014, 2036), True, 'import numpy as np\n'), ((2312, 2346), 'random.choice', 'random.choice', (['self.colour_options'], {}), '(self.colour_options)\n', (2325, 2346), False, 'import random\n'), ((1830, 1867), 'numpy.random.choice', 'np.random.choice', (['[0.002, 0.01, 0.05]'], {}), '([0.002, 0.01, 0.05])\n', (1846, 1867), True, 'import numpy as np\n'), ((2126, 2180), 'numpy.random.choice', 'np.random.choice', (['[0, 0, 1, 2, 4, 8, 16]', 'self.n_gears'], {}), '([0, 0, 1, 2, 4, 8, 16], self.n_gears)\n', (2142, 2180), True, 'import numpy as np\n'), ((2086, 2125), 'numpy.random.choice', 'np.random.choice', (['[-1, 1]', 'self.n_gears'], {}), '([-1, 1], self.n_gears)\n', (2102, 2125), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import datasets
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split, KFold
from sklearn.preprocessing import StandardScaler
from elm import ELM
from mlxtend.plotting import plot_decision_regions, plot_confusion_matrix
iris_datasets = datasets.load_iris()
X = iris_datasets.data
Y = np.zeros((iris_datasets.target.shape[0], 3))
for i in range(iris_datasets.target.shape[0]):
Y[i, iris_datasets.target[i]] = 1
iters = 20
n_folds = 3
accuracy = np.zeros((20, 1))
mean_time = 0
data = []
best = [[], 0]
for i in range(iters):
CVO = KFold(n_splits=n_folds, shuffle=True)
acc_values = []
for train_index, test_index in CVO.split(X):
X_train, X_test = X[train_index], X[test_index]
Y_train, Y_test = Y[train_index], Y[test_index]
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
elm = ELM(hidden_units = 10, activation="log")
#elm = ELM(hidden_units = 10, activation="tan")
elm.fit(X_train, Y_train)
Y_hat = elm.predict(X_test)
Y_hat = np.round(Y_hat)
acc_values.append(np.sum(np.where(np.argmax(Y_hat, axis=1) == np.argmax(Y_test, axis=1), 1, 0))/len(Y_test))
if acc_values[-1] > best[1]:
best[0] = confusion_matrix(np.argmax(Y_test, axis=1), np.argmax(Y_hat, axis=1))
best[1] = acc_values[-1]
accuracy[i] = np.mean(acc_values)
print("Accuracy", np.mean(accuracy))
print("Standard Deviation (accuracy)", np.std(accuracy, axis=0))
conf_matrix = best[0]
print("Confusion Matrix")
print(conf_matrix)
labels = ["Setosa", "Versicolor", "Virginica"]
fig, ax = plot_confusion_matrix(conf_mat=conf_matrix)
ax.set_xticklabels([""] + labels)
ax.set_yticklabels([""] + labels)
plt.xlabel("Predicted")
plt.ylabel("Desired")
plt.show()
| [
"sklearn.datasets.load_iris",
"numpy.mean",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"numpy.std",
"numpy.argmax",
"sklearn.preprocessing.StandardScaler",
"numpy.zeros",
"mlxtend.plotting.plot_confusion_matrix",
"elm.ELM",
"sklearn.model_selection.KFold",
"numpy.round",
"matplo... | [((391, 411), 'sklearn.datasets.load_iris', 'datasets.load_iris', ([], {}), '()\n', (409, 411), False, 'from sklearn import datasets\n'), ((439, 483), 'numpy.zeros', 'np.zeros', (['(iris_datasets.target.shape[0], 3)'], {}), '((iris_datasets.target.shape[0], 3))\n', (447, 483), True, 'import numpy as np\n'), ((604, 621), 'numpy.zeros', 'np.zeros', (['(20, 1)'], {}), '((20, 1))\n', (612, 621), True, 'import numpy as np\n'), ((1806, 1849), 'mlxtend.plotting.plot_confusion_matrix', 'plot_confusion_matrix', ([], {'conf_mat': 'conf_matrix'}), '(conf_mat=conf_matrix)\n', (1827, 1849), False, 'from mlxtend.plotting import plot_decision_regions, plot_confusion_matrix\n'), ((1918, 1941), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Predicted"""'], {}), "('Predicted')\n", (1928, 1941), True, 'import matplotlib.pyplot as plt\n'), ((1942, 1963), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Desired"""'], {}), "('Desired')\n", (1952, 1963), True, 'import matplotlib.pyplot as plt\n'), ((1964, 1974), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1972, 1974), True, 'import matplotlib.pyplot as plt\n'), ((694, 731), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'n_folds', 'shuffle': '(True)'}), '(n_splits=n_folds, shuffle=True)\n', (699, 731), False, 'from sklearn.model_selection import train_test_split, KFold\n'), ((1557, 1576), 'numpy.mean', 'np.mean', (['acc_values'], {}), '(acc_values)\n', (1564, 1576), True, 'import numpy as np\n'), ((1596, 1613), 'numpy.mean', 'np.mean', (['accuracy'], {}), '(accuracy)\n', (1603, 1613), True, 'import numpy as np\n'), ((1654, 1678), 'numpy.std', 'np.std', (['accuracy'], {'axis': '(0)'}), '(accuracy, axis=0)\n', (1660, 1678), True, 'import numpy as np\n'), ((933, 949), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (947, 949), False, 'from sklearn.preprocessing import StandardScaler\n'), ((1055, 1093), 'elm.ELM', 'ELM', ([], {'hidden_units': '(10)', 'activation': '"""log"""'}), "(hidden_units=10, activation='log')\n", (1058, 1093), False, 'from elm import ELM\n'), ((1238, 1253), 'numpy.round', 'np.round', (['Y_hat'], {}), '(Y_hat)\n', (1246, 1253), True, 'import numpy as np\n'), ((1449, 1474), 'numpy.argmax', 'np.argmax', (['Y_test'], {'axis': '(1)'}), '(Y_test, axis=1)\n', (1458, 1474), True, 'import numpy as np\n'), ((1476, 1500), 'numpy.argmax', 'np.argmax', (['Y_hat'], {'axis': '(1)'}), '(Y_hat, axis=1)\n', (1485, 1500), True, 'import numpy as np\n'), ((1298, 1322), 'numpy.argmax', 'np.argmax', (['Y_hat'], {'axis': '(1)'}), '(Y_hat, axis=1)\n', (1307, 1322), True, 'import numpy as np\n'), ((1326, 1351), 'numpy.argmax', 'np.argmax', (['Y_test'], {'axis': '(1)'}), '(Y_test, axis=1)\n', (1335, 1351), True, 'import numpy as np\n')] |
import time
from queue import Queue
from data_sender import send
import argparse
from threading import Thread, Event
import cv2
import numpy as np
from redis import Redis
import pickle
import torch
from models import TSN
from baseline_rpc_rgb import make_ucf, make_infer, make_hmdb
from sklearn.metrics import confusion_matrix
import os
#### DEVIVCE ####
def streaming():
while True:
with picamera.PiCamera() as camera:
camera.resolution = (224, 224)
camera.framerate = 40
camera.start_recording('./device-video/1.h264')
camera.wait_recording(2)
counter = 1
while True:
counter += 1
camera.split_recording('./device-video/%d.h264' % counter)
camera.wait_recording(2)
camera.stop_recording()
#### DEVICE ####
def data_send():
counter = 0
while True:
LIST_DIR = os.listdir('./device-video')
SORTED_DIR = [ int(x.split('.')[0]) for x in LIST_DIR]
SORTED_DIR.sort()
SORTED_DIR = [ str(x) + '.h264' for x in SORTED_DIR]
LENGTH = len(LIST_DIR)
if LENGTH > 1 and LENGTH > counter:
item = SORTED_DIR[counter]
PATH = os.path.join('/home/pi/venv/video', item)
read_file = open(PATH, 'rb')
encoding = read_file.read()
if encoding != b'':
send('Frame', encoding)
counter += 1
#### HUB ####
def receive_and_save(rgb_net, redis):
counter = 1
frames_to_run = []
while True:
initial_time = time.time()
if redis.llen('Frame') > 0:
start_redis = time.time()
incoming_video = redis.lpop('Frame')
stop_redis = time.time()
print("TYPE OF FRAME: ", type(incoming_video), "Popping Time: ", stop_redis-start_redis)
start_load = time.time()
video = pickle.loads(incoming_video)
end_load = time.time()
f = open('./video/%d.h264'%counter, 'wb+')
opened = time.time()
f.write(video)
write_file = time.time()
#f.write(video.encode('utf-8'))
print("[Pickle Loading Time: ]", end_load-start_load)
print("[Video Open Time: ]", opened-end_load)
print("[Video Write Time: ]", write_file - opened)
counter += 1
#### HUB ####
def read_file_and_run():
counter = 0
while True:
LIST_DIR = sorted(os.listdir('./video'))
SORTED_DIR = [ int(x.split('.')[0]) for x in LIST_DIR]
SORTED_DIR.sort()
SORTED_DIR = [ str(x) + '.h264' for x in SORTED_DIR ]
LENGTH = len(LIST_DIR)
if LENGTH > 1 and LENGTH > counter:
item = SORTED_DIR[counter]
PATH = os.path.join('./video', item)
start_capture = time.time()
cap = cv2.VideoCapture(PATH)
end_capture = time.time()
print("[VIDEO CAPTURE OF ", item, end_capture-start_capture)
counter += 1
tmp_stack_frames = []
before_opening_cap = time.time()
while cap.isOpened():
ret, frame = cap.read()
new_frame = time.time()
if ret == True:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
tmp_stack_frames.append(frame)
append_frame = time.time()
print("[APPENDING ONE FRAME EACH]: ", append_frame-new_frame)
else:
print("DONE READING ", item)
ready_to_infer = time.time()
rst = make_infer(args.rgb_weights, tmp_stack_frames[1:], rgb_net, 'RGB', args.test_segments, num_class)
inferred = time.time()
print("[TIME WHEN ALL FRAMES ARE READY]:{}, [INF TIME]: {}".format(ready_to_infer-before_opening_cap, inferred-ready_to_infer))
tmp_rst = np.argmax(np.mean(rst, axis=0))
print(make_hmdb()[tmp_rst])
break
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Sending streaming images from pi to hub')
parser.add_argument('--video_path', type=str)
parser.add_argument('--hub_device', type=str, help='Specify where this will be run')
parser.add_argument('--rgb_weights', type=str)
parser.add_argument('--dataset', type=str, default='ucf101')
parser.add_argument('--arch', type=str, default='BNInception')
parser.add_argument('--crop_fusion_type', type=str, default='avg', choices=['avg', 'max', 'topk'])
parser.add_argument('--dropout', type=float, default=0.7)
parser.add_argument('--test_segments', type=int, default=5)
args = parser.parse_args()
if args.dataset == 'ucf101':
num_class = 101
elif args.dataset == 'hmdb51':
num_class = 51
else:
raise ValueError('Unknown dataset' + args.dataset)
rgb_net = TSN(num_class, 1, 'RGB',
base_model=args.arch,
consensus_type=args.crop_fusion_type,
dropout=args.dropout)
rgb_checkpoint = torch.load(args.rgb_weights)
print("model epoch {} best prec@1: {}".format(rgb_checkpoint['epoch'], rgb_checkpoint['best_prec1']))
base_dict = {'.'.join(k.split('.')[1:]): v for k,v in list(rgb_checkpoint['state_dict'].items())}
rgb_net.load_state_dict(base_dict)
output = []
label = []
redis_queue = Queue()
need_stop = Event()
host_address = '172.16.17.326'
redis = Redis(host_address)
redis.flushall()
if args.hub_device == 'Hub':
import shutil
shutil.rmtree('./video')
os.mkdir('./video')
jobs = [ Thread(target=receive_and_save, args=(rgb_net, redis)),
Thread(target=read_file_and_run)]
else:
jobs = [ Thread(target=streaming, args=(args.video_path, need_stop))]
[job.start() for job in jobs]
[job.join() for job in jobs]
print("Terminating..")
if args.hub_device == 'Hub':
cf = confusion_matrix(label, output).astype(float)
cls_cnt = cf.sum(axis=1)
cls_hit = np.diag(cf)
print("CLS CNT, HIT", cls_cnt, cls_hit)
cls_acc = cls_hit / cls_cnt
print('Accuracy {:.02f}%'.format(np.mean(cls_acc) * 100))
| [
"pickle.loads",
"baseline_rpc_rgb.make_hmdb",
"models.TSN",
"numpy.mean",
"os.listdir",
"argparse.ArgumentParser",
"data_sender.send",
"os.mkdir",
"sklearn.metrics.confusion_matrix",
"baseline_rpc_rgb.make_infer",
"redis.Redis",
"cv2.cvtColor",
"time.time",
"torch.load",
"os.path.join",
... | [((4182, 4260), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Sending streaming images from pi to hub"""'}), "(description='Sending streaming images from pi to hub')\n", (4205, 4260), False, 'import argparse\n'), ((5048, 5159), 'models.TSN', 'TSN', (['num_class', '(1)', '"""RGB"""'], {'base_model': 'args.arch', 'consensus_type': 'args.crop_fusion_type', 'dropout': 'args.dropout'}), "(num_class, 1, 'RGB', base_model=args.arch, consensus_type=args.\n crop_fusion_type, dropout=args.dropout)\n", (5051, 5159), False, 'from models import TSN\n'), ((5230, 5258), 'torch.load', 'torch.load', (['args.rgb_weights'], {}), '(args.rgb_weights)\n', (5240, 5258), False, 'import torch\n'), ((5566, 5573), 'queue.Queue', 'Queue', ([], {}), '()\n', (5571, 5573), False, 'from queue import Queue\n'), ((5590, 5597), 'threading.Event', 'Event', ([], {}), '()\n', (5595, 5597), False, 'from threading import Thread, Event\n'), ((5650, 5669), 'redis.Redis', 'Redis', (['host_address'], {}), '(host_address)\n', (5655, 5669), False, 'from redis import Redis\n'), ((926, 954), 'os.listdir', 'os.listdir', (['"""./device-video"""'], {}), "('./device-video')\n", (936, 954), False, 'import os\n'), ((1611, 1622), 'time.time', 'time.time', ([], {}), '()\n', (1620, 1622), False, 'import time\n'), ((5763, 5787), 'shutil.rmtree', 'shutil.rmtree', (['"""./video"""'], {}), "('./video')\n", (5776, 5787), False, 'import shutil\n'), ((5796, 5815), 'os.mkdir', 'os.mkdir', (['"""./video"""'], {}), "('./video')\n", (5804, 5815), False, 'import os\n'), ((6267, 6278), 'numpy.diag', 'np.diag', (['cf'], {}), '(cf)\n', (6274, 6278), True, 'import numpy as np\n'), ((1240, 1281), 'os.path.join', 'os.path.join', (['"""/home/pi/venv/video"""', 'item'], {}), "('/home/pi/venv/video', item)\n", (1252, 1281), False, 'import os\n'), ((1685, 1696), 'time.time', 'time.time', ([], {}), '()\n', (1694, 1696), False, 'import time\n'), ((1771, 1782), 'time.time', 'time.time', ([], {}), '()\n', (1780, 1782), False, 'import time\n'), ((1909, 1920), 'time.time', 'time.time', ([], {}), '()\n', (1918, 1920), False, 'import time\n'), ((1941, 1969), 'pickle.loads', 'pickle.loads', (['incoming_video'], {}), '(incoming_video)\n', (1953, 1969), False, 'import pickle\n'), ((1993, 2004), 'time.time', 'time.time', ([], {}), '()\n', (2002, 2004), False, 'import time\n'), ((2081, 2092), 'time.time', 'time.time', ([], {}), '()\n', (2090, 2092), False, 'import time\n'), ((2145, 2156), 'time.time', 'time.time', ([], {}), '()\n', (2154, 2156), False, 'import time\n'), ((2525, 2546), 'os.listdir', 'os.listdir', (['"""./video"""'], {}), "('./video')\n", (2535, 2546), False, 'import os\n'), ((2832, 2861), 'os.path.join', 'os.path.join', (['"""./video"""', 'item'], {}), "('./video', item)\n", (2844, 2861), False, 'import os\n'), ((2890, 2901), 'time.time', 'time.time', ([], {}), '()\n', (2899, 2901), False, 'import time\n'), ((2920, 2942), 'cv2.VideoCapture', 'cv2.VideoCapture', (['PATH'], {}), '(PATH)\n', (2936, 2942), False, 'import cv2\n'), ((2969, 2980), 'time.time', 'time.time', ([], {}), '()\n', (2978, 2980), False, 'import time\n'), ((3147, 3158), 'time.time', 'time.time', ([], {}), '()\n', (3156, 3158), False, 'import time\n'), ((5833, 5887), 'threading.Thread', 'Thread', ([], {'target': 'receive_and_save', 'args': '(rgb_net, redis)'}), '(target=receive_and_save, args=(rgb_net, redis))\n', (5839, 5887), False, 'from threading import Thread, Event\n'), ((5906, 5938), 'threading.Thread', 'Thread', ([], {'target': 'read_file_and_run'}), '(target=read_file_and_run)\n', (5912, 5938), False, 'from threading import Thread, Event\n'), ((5967, 6026), 'threading.Thread', 'Thread', ([], {'target': 'streaming', 'args': '(args.video_path, need_stop)'}), '(target=streaming, args=(args.video_path, need_stop))\n', (5973, 6026), False, 'from threading import Thread, Event\n'), ((1411, 1434), 'data_sender.send', 'send', (['"""Frame"""', 'encoding'], {}), "('Frame', encoding)\n", (1415, 1434), False, 'from data_sender import send\n'), ((3261, 3272), 'time.time', 'time.time', ([], {}), '()\n', (3270, 3272), False, 'import time\n'), ((6170, 6201), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['label', 'output'], {}), '(label, output)\n', (6186, 6201), False, 'from sklearn.metrics import confusion_matrix\n'), ((3333, 3371), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2RGB'], {}), '(frame, cv2.COLOR_BGR2RGB)\n', (3345, 3371), False, 'import cv2\n'), ((3458, 3469), 'time.time', 'time.time', ([], {}), '()\n', (3467, 3469), False, 'import time\n'), ((3661, 3672), 'time.time', 'time.time', ([], {}), '()\n', (3670, 3672), False, 'import time\n'), ((3699, 3801), 'baseline_rpc_rgb.make_infer', 'make_infer', (['args.rgb_weights', 'tmp_stack_frames[1:]', 'rgb_net', '"""RGB"""', 'args.test_segments', 'num_class'], {}), "(args.rgb_weights, tmp_stack_frames[1:], rgb_net, 'RGB', args.\n test_segments, num_class)\n", (3709, 3801), False, 'from baseline_rpc_rgb import make_ucf, make_infer, make_hmdb\n'), ((3828, 3839), 'time.time', 'time.time', ([], {}), '()\n', (3837, 3839), False, 'import time\n'), ((6405, 6421), 'numpy.mean', 'np.mean', (['cls_acc'], {}), '(cls_acc)\n', (6412, 6421), True, 'import numpy as np\n'), ((4028, 4048), 'numpy.mean', 'np.mean', (['rst'], {'axis': '(0)'}), '(rst, axis=0)\n', (4035, 4048), True, 'import numpy as np\n'), ((4076, 4087), 'baseline_rpc_rgb.make_hmdb', 'make_hmdb', ([], {}), '()\n', (4085, 4087), False, 'from baseline_rpc_rgb import make_ucf, make_infer, make_hmdb\n')] |
#Module cGSSD implements complex generalized CCSD for the Hubbard Hamiltonian in N^5 scaling
#by exploiting the sparsity of the two-electron integrals for Hubbard. Derivation and factorization
#of the spin-orbital CCSD equations were worked out by <NAME>.
import numpy as np
#import cCCutils
import CCDutils
import CCSDutils
import os
import pickle
from scf import onee_MO_tran
def ccsd(ham,ampfile="none"):
if (ham.hamtype != 'Hubbard'):
print("This routine should only be used for Hubbard in the spin-orbital basis.")
if (ham.wfn_type == 'uhf'):
print("converting One-electron Integrals to spin-orbital basis")
#We build the 2-e integrals on the fly,
#so will only build the spin-orbital Fock matrix and MO coefficients.
Fa_ao = onee_MO_tran(ham.F_a,np.linalg.inv(ham.C_a))
Fb_ao = onee_MO_tran(ham.F_b,np.linalg.inv(ham.C_b))
nvirta = ham.nbas-ham.nocca
nvirtb = ham.nbas-ham.noccb
C = np.zeros([ham.nbas*2,ham.nbas*2])
F = np.zeros([ham.nbas*2,ham.nbas*2])
#Build spin-orbital Mo coefficients
C[:ham.nbas,:ham.nocca] = ham.C_a[:,:ham.nocca]
C[ham.nbas:,ham.nocca:(ham.nocca+ham.noccb)] = ham.C_b[:,:ham.noccb]
C[:ham.nbas,(ham.nocca+ham.noccb):(ham.nocca+ham.noccb+nvirta)] = ham.C_a[:,ham.nocca:]
C[ham.nbas:,(ham.nocca+ham.noccb+nvirta):] = ham.C_b[:,ham.noccb:]
#build spinorbital fock
F[:ham.nbas,:ham.nbas] = Fa_ao
F[ham.nbas:,ham.nbas:] = Fb_ao
F = onee_MO_tran(F,C)
print(np.diag(F))
ham.F = np.copy(F)
ham.C = np.copy(C)
ham.nso = 2*ham.nbas
ham.nocc = ham.nocca + ham.noccb
ham.nvirt = ham.nvirta + ham.nvirtb
ham.wfn_type = 'ghf'
if (ham.F.dtype ==float):
print("Converting real integrals to complex")
ham.F = ham.F.astype(np.complex)
ham.C = ham.C.astype(np.complex)
#Initialize/get amplitudes
if ((ampfile != 'none') and(os.path.isfile(ampfile))):
with open(ampfile, 'rb') as f:
T2 = pickle.load(f)
T1 = pickle.load(f)
else:
T2 = np.zeros([ham.nocc,ham.nocc,ham.nvirt,ham.nvirt],dtype=np.complex)
T1 = np.zeros([ham.nocc,ham.nvirt],dtype=np.complex)
#set up for DIIS
diis_start, diis_dim, Errors, T2s, Err_vec = CCDutils.diis_setup(ham.nocc,ham.nvirt,ham.F.dtype)
T1Errors, T1s, T1Err_vec = CCSDutils.diis_singles_setup(ham.nocc,ham.nvirt,diis_start,diis_dim,ham.F.dtype)
#Convert arrays to complex as necessary
# Errors, T2s, Err_vec = Errors.astype(np.complex), T2s.astype(np.complex), Err_vec.astype(np.complex)
# T1Errors, T1s, T1Err_vec = T1Errors.astype(np.complex), T1s.astype(np.complex), T1Err_vec.astype(np.complex)
#Build some initial intermediates. ham.C is AOxMO, with alpha in the first nbas/2 rows, followed by beta.
C_up = ham.C[:ham.nbas,:]
C_down = ham.C[ham.nbas:,:]
C_upq = np.zeros((ham.nbas,ham.nso,ham.nso),dtype=np.complex)
C_pqu = np.zeros((ham.nso,ham.nso,ham.nbas),dtype=np.complex)
C_up_up_pqu = np.zeros((ham.nso,ham.nso,ham.nbas),dtype=np.complex)
C_up_down_pqu= np.zeros((ham.nso,ham.nso,ham.nbas),dtype=np.complex)
C_down_up_pqu= np.zeros((ham.nso,ham.nso,ham.nbas),dtype=np.complex)
C_down_down_pqu= np.zeros((ham.nso,ham.nso,ham.nbas),dtype=np.complex)
for u in range(ham.nbas):
for p in range(ham.nso):
for q in range(ham.nso):
C_upq[u,p,q] = C_up[u,p]*C_down[u,q] - C_down[u,p]*C_up[u,q]
C_pqu[p,q,u] = (np.conj(C_up[u,p])*np.conj(C_down[u,q])
- np.conj(C_down[u,p])*np.conj(C_up[u,q]))
C_up_up_pqu[p,q,u] = np.conj(C_up[u,p])*C_up[u,q]
C_up_down_pqu[p,q,u] = np.conj(C_down[u,p])*C_up[u,q]
C_down_up_pqu[p,q,u] = np.conj(C_up[u,p])*C_down[u,q]
C_down_down_pqu[p,q,u] = np.conj(C_down[u,p])*C_down[u,q]
print("Beginning Complex GCCSD Iterations")
eold = 0.0e0
error = 1.0
tol = 1.0e-8
damping = 2
niter = 1
while (error > tol):
T2, Errors, T2s = CCDutils.diis(diis_start,diis_dim,niter,Errors,T2s,T2,Err_vec)
T1, T1Errors, T1s = CCSDutils.diis_singles(diis_start,diis_dim,niter,T1Errors,T1s,T1,T1Err_vec)
#Effective Amplitude Intermediates
Tau = T2 + np.einsum('ia,jb->ijab',T1,T1) - np.einsum('ib,ja->ijab',T1,T1)
Tau_uij = np.einsum('ijab,uab->uij',Tau,C_upq[:,ham.nocc:,ham.nocc:])
Tau_abu = np.einsum('ijab,iju->abu',Tau,C_pqu[:ham.nocc,:ham.nocc,:])
T_up_up_bju = np.einsum('ijab,iau->bju',T2,C_up_up_pqu[:ham.nocc,ham.nocc:,:])
T_up_down_bju = np.einsum('ijab,iau->bju',T2,C_up_down_pqu[:ham.nocc,ham.nocc:,:])
T_down_up_bju = np.einsum('ijab,iau->bju',T2,C_down_up_pqu[:ham.nocc,ham.nocc:,:])
T_down_down_bju = np.einsum('ijab,iau->bju',T2,C_down_down_pqu[:ham.nocc,ham.nocc:,:])
T_up_up_aiu = np.einsum('ijab,jbu->aiu',T2,C_up_up_pqu[:ham.nocc,ham.nocc:,:])
T_up_down_aiu = np.einsum('ijab,jbu->aiu',T2,C_up_down_pqu[:ham.nocc,ham.nocc:,:])
T_down_up_aiu = np.einsum('ijab,jbu->aiu',T2,C_down_up_pqu[:ham.nocc,ham.nocc:,:])
T_down_down_aiu = np.einsum('ijab,jbu->aiu',T2,C_down_down_pqu[:ham.nocc,ham.nocc:,:])
Tau_up_iu = np.zeros((ham.nocc,ham.nbas),dtype=np.complex)
Tau_down_iu = np.zeros((ham.nocc,ham.nbas),dtype=np.complex)
T_up_iu = np.zeros((ham.nocc,ham.nbas),dtype=np.complex)
T_down_iu = np.zeros((ham.nocc,ham.nbas),dtype=np.complex)
for i in range(ham.nocc):
for u in range(ham.nbas):
Tau_up_iu[i,u] = np.einsum('j,j',Tau_uij[u,i,:],np.conj(C_up[u,:ham.nocc]))
Tau_down_iu[i,u] = np.einsum('j,j',Tau_uij[u,i,:],np.conj(C_down[u,:ham.nocc]))
T_up_iu[i,u] = np.einsum('a,a',T1[i,:],C_up[u,ham.nocc:])
T_down_iu[i,u] = np.einsum('a,a',T1[i,:],C_down[u,ham.nocc:])
Tau_up_ua = np.zeros((ham.nbas,ham.nvirt),dtype=np.complex)
Tau_down_ua = np.zeros((ham.nbas,ham.nvirt),dtype=np.complex)
T_up_ua = np.zeros((ham.nbas,ham.nvirt),dtype=np.complex)
T_down_ua = np.zeros((ham.nbas,ham.nvirt),dtype=np.complex)
for u in range(ham.nbas):
for a in range(ham.nvirt):
Tau_up_ua[u,a] = np.einsum('b,b',Tau_abu[a,:,u],C_up[u,ham.nocc:])
Tau_down_ua[u,a] = np.einsum('b,b',Tau_abu[a,:,u],C_down[u,ham.nocc:])
T_up_ua[u,a] = np.einsum('i,i',T1[:,a],np.conj(C_up[u,:ham.nocc]))
T_down_ua[u,a] = np.einsum('i,i',T1[:,a],np.conj(C_down[u,:ham.nocc]))
T1_up_up_u = np.zeros((ham.nbas),dtype=np.complex)
T1_up_down_u = np.zeros((ham.nbas),dtype=np.complex)
T1_down_up_u = np.zeros((ham.nbas),dtype=np.complex)
T1_down_down_u = np.zeros((ham.nbas),dtype=np.complex)
for u in range(ham.nbas):
T1_up_up_u[u] = np.einsum('ia,ia',C_up_up_pqu[:ham.nocc,ham.nocc:,u],T1)
T1_up_down_u[u] = np.einsum('ia,ia',C_up_down_pqu[:ham.nocc,ham.nocc:,u],T1)
T1_down_up_u[u] = np.einsum('ia,ia',C_down_up_pqu[:ham.nocc,ham.nocc:,u],T1)
T1_down_down_u[u] = np.einsum('ia,ia',C_down_down_pqu[:ham.nocc,ham.nocc:,u],T1)
#Intermediates for the singles equations
J_kc = np.einsum('kcu,u->kc',C_up_up_pqu[:ham.nocc,ham.nocc:,:],T1_down_down_u)
J_kc -= np.einsum('kcu,u->kc',C_up_down_pqu[:ham.nocc,ham.nocc:,:],T1_down_up_u)
J_kc -= np.einsum('kcu,u->kc',C_down_up_pqu[:ham.nocc,ham.nocc:,:],T1_up_down_u)
J_kc += np.einsum('kcu,u->kc',C_down_down_pqu[:ham.nocc,ham.nocc:,:],T1_up_up_u)
J_kc *= ham.U
J_ac = np.einsum('uc,ua->ac',C_up[:,ham.nocc:],Tau_down_ua)
J_ac -= np.einsum('uc,ua->ac',C_down[:,ham.nocc:],Tau_up_ua)
J_ac *= ham.U
J_ki = np.einsum('uk,iu->ki',np.conj(C_up[:,:ham.nocc]),Tau_down_iu)
J_ki -= np.einsum('uk,iu->ki',np.conj(C_down[:,:ham.nocc]),Tau_up_iu)
J_ki *= ham.U
#Get G1
G1 = np.copy(ham.F[:ham.nocc,ham.nocc:])
F_offdiag = ham.F - np.diag(np.diag(ham.F))
G1 += np.einsum('ac,ic->ia',F_offdiag[ham.nocc:,ham.nocc:],T1)
G1 -= np.einsum('ki,ka->ia',F_offdiag[:ham.nocc,:ham.nocc],T1)
G1 -= np.einsum('kc,ikac->ia',ham.F[:ham.nocc,ham.nocc:],(np.einsum('ic,ka->ikac',T1,T1) - T2))
G1 += np.einsum('kc,ikac->ia',J_kc,(np.einsum('ic,ka->ikac',T1,T1) + T2))
G1 -= 0.5e0*np.einsum('ic,ac->ia',T1,J_ac)
G1 -= 0.5e0*np.einsum('ka,ki->ia',T1,J_ki)
G1 += 0.5e0*ham.U*(np.einsum('ua,iu->ia',np.conj(C_up[:,ham.nocc:]),Tau_down_iu)
- np.einsum('ua,iu->ia',np.conj(C_down[:,ham.nocc:]),Tau_up_iu))
G1 -= 0.5e0*ham.U*(np.einsum('ui,ua->ia',C_up[:,:ham.nocc],Tau_down_ua)
- np.einsum('ui,ua->ia',C_down[:,:ham.nocc],Tau_up_ua))
G1 += ham.U*(np.einsum('aiu,u->ia',C_up_up_pqu[ham.nocc:,:ham.nocc,:],T1_down_down_u)
- np.einsum('aiu,u->ia',C_up_down_pqu[ham.nocc:,:ham.nocc,:],T1_down_up_u)
- np.einsum('aiu,u->ia',C_down_up_pqu[ham.nocc:,:ham.nocc,:],T1_up_down_u)
+ np.einsum('aiu,u->ia',C_down_down_pqu[ham.nocc:,:ham.nocc,:],T1_up_up_u))
#Intermediates for doubles equations
X_up_up_aiu = C_up_up_pqu[ham.nocc:,:ham.nocc,:] + 0.50e0*T_up_up_bju
X_up_down_aiu = C_up_down_pqu[ham.nocc:,:ham.nocc,:] + 0.50e0*T_up_down_bju
X_down_up_aiu = C_down_up_pqu[ham.nocc:,:ham.nocc,:] + 0.50e0*T_down_up_bju
X_down_down_aiu = C_down_down_pqu[ham.nocc:,:ham.nocc,:] + 0.50e0*T_down_down_bju
# X_up_up_aiu = C_up_up_pqu[ham.nocc:,:ham.nocc,:] + 0.50e0*T_up_up_aiu
# X_up_down_aiu = C_up_down_pqu[ham.nocc:,:ham.nocc,:] + 0.50e0*T_up_down_aiu
# X_down_up_aiu = C_down_up_pqu[ham.nocc:,:ham.nocc,:] + 0.50e0*T_down_up_aiu
# X_down_down_aiu = C_down_down_pqu[ham.nocc:,:ham.nocc,:] + 0.50e0*T_down_down_aiu
for a in range(ham.nvirt):
aa = a + ham.nocc
for i in range(ham.nocc):
for u in range(ham.nbas):
X_up_up_aiu[a,i,u] -= T_up_iu[i,u]*T_up_ua[u,a]
X_up_up_aiu[a,i,u] += T_up_iu[i,u]*np.conj(C_up[u,aa])
X_up_up_aiu[a,i,u] -= T_up_ua[u,a]*C_up[u,i]
X_up_down_aiu[a,i,u] -= T_up_iu[i,u]*T_down_ua[u,a]
X_up_down_aiu[a,i,u] += T_up_iu[i,u]*np.conj(C_down[u,aa])
X_up_down_aiu[a,i,u] -= T_down_ua[u,a]*C_up[u,i]
X_down_up_aiu[a,i,u] -= T_down_iu[i,u]*T_up_ua[u,a]
X_down_up_aiu[a,i,u] += T_down_iu[i,u]*np.conj(C_up[u,aa])
X_down_up_aiu[a,i,u] -= T_up_ua[u,a]*C_down[u,i]
X_down_down_aiu[a,i,u] -= T_down_iu[i,u]*T_down_ua[u,a]
X_down_down_aiu[a,i,u] += T_down_iu[i,u]*np.conj(C_down[u,aa])
X_down_down_aiu[a,i,u] -= T_down_ua[u,a]*C_down[u,i]
K_ad = np.einsum('adu,u->ad',C_up_up_pqu[ham.nocc:,ham.nocc:,:],T1_down_down_u)
K_ad -= np.einsum('adu,u->ad',C_up_down_pqu[ham.nocc:,ham.nocc:,:],T1_down_up_u)
K_ad -= np.einsum('adu,u->ad',C_down_up_pqu[ham.nocc:,ham.nocc:,:],T1_up_down_u)
K_ad += np.einsum('adu,u->ad',C_down_down_pqu[ham.nocc:,ham.nocc:,:],T1_up_up_u)
K_ad -= 0.5e0*(np.einsum('ud,ua->ad',C_up[:,ham.nocc:],Tau_down_ua)
- np.einsum('ud,ua->ad',C_down[:,ham.nocc:],Tau_up_ua))
K_ad *= ham.U
K_ad -= np.einsum('ld,la',ham.F[:ham.nocc,ham.nocc:],T1)
K_li = -np.einsum('liu,u->li', C_up_up_pqu[:ham.nocc,:ham.nocc,:],T1_down_down_u)
K_li += np.einsum('liu,u->li', C_up_down_pqu[:ham.nocc,:ham.nocc,:],T1_down_up_u)
K_li += np.einsum('liu,u->li', C_down_up_pqu[:ham.nocc,:ham.nocc,:],T1_up_down_u)
K_li -= np.einsum('liu,u->li',C_down_down_pqu[:ham.nocc,:ham.nocc,:],T1_up_up_u)
K_li -= 0.5e0*(np.einsum('ul,iu->li',np.conj(C_up[:,:ham.nocc]),Tau_down_iu)
- np.einsum('ul,iu->li',np.conj(C_down[:,:ham.nocc]),Tau_up_iu))
K_li *= ham.U
K_li -= np.einsum('ld,id',ham.F[:ham.nocc,ham.nocc:],T1)
Xuij = C_upq[:,:ham.nocc,:ham.nocc] + 0.50e0*Tau_uij
Xabu = C_pqu[ham.nocc:,ham.nocc:,:] + 0.50e0*Tau_abu
for u in range(ham.nbas):
for i in range(ham.nocc):
for j in range(ham.nocc):
Xuij[u,i,j] += C_up[u,i]*T_down_iu[j,u]
Xuij[u,i,j] -= C_down[u,i]*T_up_iu[j,u]
Xuij[u,i,j] -= C_up[u,j]*T_down_iu[i,u]
Xuij[u,i,j] += C_down[u,j]*T_up_iu[i,u]
for a in range(ham.nvirt):
aa = a + ham.nocc
for b in range(ham.nvirt):
bb = b + ham.nocc
Xabu[a,b,u] -= np.conj(C_up[u,aa])*T_down_ua[u,b]
Xabu[a,b,u] += np.conj(C_down[u,aa])*T_up_ua[u,b]
Xabu[a,b,u] += np.conj(C_up[u,bb])*T_down_ua[u,a]
# Xabu[a,b,u] += np.conj(C_down[u,bb])*T_up_ua[u,a]
# Check with Tom about Notes, but I think this term should have the opposite sign
Xabu[a,b,u] -= np.conj(C_down[u,bb])*T_up_ua[u,a]
#Get G2
#Rings
Rings = np.einsum('aiu,bju->ijab',X_up_up_aiu,T_down_down_bju)
Rings -= np.einsum('aiu,bju->ijab',X_up_down_aiu,T_down_up_bju)
Rings -= np.einsum('aiu,bju->ijab',X_down_up_aiu,T_up_down_bju)
Rings += np.einsum('aiu,bju->ijab',X_down_down_aiu,T_up_up_bju)
Rings *= ham.U
#contractions
G2 = ham.U*np.einsum('uij,abu->ijab',Xuij,Xabu)
G2 += (Rings - np.swapaxes(Rings,2,3) - np.swapaxes(Rings,0,1) + np.swapaxes(np.swapaxes(Rings,0,1),2,3))
G2 += np.einsum('ad,ijdb->ijab',K_ad,T2)
G2 += np.einsum('bd,ijad->ijab',K_ad,T2)
G2 += np.einsum('li,ljab->ijab',K_li,T2)
G2 += np.einsum('lj,ilab->ijab',K_li,T2)
#non-canonical terms
G2 += np.einsum('ac,ijcb->ijab',F_offdiag[ham.nocc:,ham.nocc:],T2)
G2 += np.einsum('bc,ijac->ijab',F_offdiag[ham.nocc:,ham.nocc:],T2)
G2 -= np.einsum('ik,kjab->ijab',F_offdiag[:ham.nocc,:ham.nocc],T2)
G2 -= np.einsum('jk,ikab->ijab',F_offdiag[:ham.nocc,:ham.nocc],T2)
#Get error vecs (residuals HT-G)
T2error, Err_vec = CCDutils.get_Err(ham.F,G2,T2,ham.nocc,ham.nvirt)
T1error, T1Err_vec = CCSDutils.get_singles_Err(ham.F,G1,T1,ham.nocc,ham.nvirt)
#solve HT = G
T2 = CCDutils.solveccd(ham.F,G2,T2,ham.nocc,ham.nvirt,x=damping)
T1 = CCSDutils.solveccs(ham.F,G1,T1,ham.nocc,ham.nvirt,x=damping)
#Get energy
Tau = T2 + np.einsum('ia,jb->ijab',T1,T1) - np.einsum('ib,ja->ijab',T1,T1)
Tau_uij = np.einsum('ijab,uab->uij',Tau,C_upq[:,ham.nocc:,ham.nocc:])
ecorr = 0.25*ham.U*np.einsum('iju,uij',C_pqu[:ham.nocc,:ham.nocc,:],Tau_uij)
ecorr += np.einsum('ia,ia',ham.F[:ham.nocc,ham.nocc:],T1)
error = np.abs(eold-ecorr)
eold = ecorr
print("Iteration ", niter, " Energy = ", ecorr, " Error = ", error)
niter +=1
if ((ampfile != 'none')):
with open(ampfile, 'wb') as f:
pickle.dump(T2,f)
pickle.dump(T1,f)
ham.ecorr = ecorr
| [
"numpy.einsum",
"CCDutils.solveccd",
"scf.onee_MO_tran",
"CCSDutils.solveccs",
"CCSDutils.diis_singles",
"numpy.abs",
"numpy.conj",
"CCDutils.diis",
"pickle.load",
"os.path.isfile",
"numpy.copy",
"pickle.dump",
"CCDutils.get_Err",
"numpy.diag",
"CCDutils.diis_setup",
"numpy.swapaxes",
... | [((2125, 2178), 'CCDutils.diis_setup', 'CCDutils.diis_setup', (['ham.nocc', 'ham.nvirt', 'ham.F.dtype'], {}), '(ham.nocc, ham.nvirt, ham.F.dtype)\n', (2144, 2178), False, 'import CCDutils\n'), ((2205, 2294), 'CCSDutils.diis_singles_setup', 'CCSDutils.diis_singles_setup', (['ham.nocc', 'ham.nvirt', 'diis_start', 'diis_dim', 'ham.F.dtype'], {}), '(ham.nocc, ham.nvirt, diis_start, diis_dim, ham\n .F.dtype)\n', (2233, 2294), False, 'import CCSDutils\n'), ((2719, 2775), 'numpy.zeros', 'np.zeros', (['(ham.nbas, ham.nso, ham.nso)'], {'dtype': 'np.complex'}), '((ham.nbas, ham.nso, ham.nso), dtype=np.complex)\n', (2727, 2775), True, 'import numpy as np\n'), ((2782, 2838), 'numpy.zeros', 'np.zeros', (['(ham.nso, ham.nso, ham.nbas)'], {'dtype': 'np.complex'}), '((ham.nso, ham.nso, ham.nbas), dtype=np.complex)\n', (2790, 2838), True, 'import numpy as np\n'), ((2851, 2907), 'numpy.zeros', 'np.zeros', (['(ham.nso, ham.nso, ham.nbas)'], {'dtype': 'np.complex'}), '((ham.nso, ham.nso, ham.nbas), dtype=np.complex)\n', (2859, 2907), True, 'import numpy as np\n'), ((2921, 2977), 'numpy.zeros', 'np.zeros', (['(ham.nso, ham.nso, ham.nbas)'], {'dtype': 'np.complex'}), '((ham.nso, ham.nso, ham.nbas), dtype=np.complex)\n', (2929, 2977), True, 'import numpy as np\n'), ((2991, 3047), 'numpy.zeros', 'np.zeros', (['(ham.nso, ham.nso, ham.nbas)'], {'dtype': 'np.complex'}), '((ham.nso, ham.nso, ham.nbas), dtype=np.complex)\n', (2999, 3047), True, 'import numpy as np\n'), ((3063, 3119), 'numpy.zeros', 'np.zeros', (['(ham.nso, ham.nso, ham.nbas)'], {'dtype': 'np.complex'}), '((ham.nso, ham.nso, ham.nbas), dtype=np.complex)\n', (3071, 3119), True, 'import numpy as np\n'), ((919, 957), 'numpy.zeros', 'np.zeros', (['[ham.nbas * 2, ham.nbas * 2]'], {}), '([ham.nbas * 2, ham.nbas * 2])\n', (927, 957), True, 'import numpy as np\n'), ((959, 997), 'numpy.zeros', 'np.zeros', (['[ham.nbas * 2, ham.nbas * 2]'], {}), '([ham.nbas * 2, ham.nbas * 2])\n', (967, 997), True, 'import numpy as np\n'), ((1410, 1428), 'scf.onee_MO_tran', 'onee_MO_tran', (['F', 'C'], {}), '(F, C)\n', (1422, 1428), False, 'from scf import onee_MO_tran\n'), ((1458, 1468), 'numpy.copy', 'np.copy', (['F'], {}), '(F)\n', (1465, 1468), True, 'import numpy as np\n'), ((1479, 1489), 'numpy.copy', 'np.copy', (['C'], {}), '(C)\n', (1486, 1489), True, 'import numpy as np\n'), ((1816, 1839), 'os.path.isfile', 'os.path.isfile', (['ampfile'], {}), '(ampfile)\n', (1830, 1839), False, 'import os\n'), ((1937, 2007), 'numpy.zeros', 'np.zeros', (['[ham.nocc, ham.nocc, ham.nvirt, ham.nvirt]'], {'dtype': 'np.complex'}), '([ham.nocc, ham.nocc, ham.nvirt, ham.nvirt], dtype=np.complex)\n', (1945, 2007), True, 'import numpy as np\n'), ((2011, 2060), 'numpy.zeros', 'np.zeros', (['[ham.nocc, ham.nvirt]'], {'dtype': 'np.complex'}), '([ham.nocc, ham.nvirt], dtype=np.complex)\n', (2019, 2060), True, 'import numpy as np\n'), ((3771, 3839), 'CCDutils.diis', 'CCDutils.diis', (['diis_start', 'diis_dim', 'niter', 'Errors', 'T2s', 'T2', 'Err_vec'], {}), '(diis_start, diis_dim, niter, Errors, T2s, T2, Err_vec)\n', (3784, 3839), False, 'import CCDutils\n'), ((3856, 3941), 'CCSDutils.diis_singles', 'CCSDutils.diis_singles', (['diis_start', 'diis_dim', 'niter', 'T1Errors', 'T1s', 'T1', 'T1Err_vec'], {}), '(diis_start, diis_dim, niter, T1Errors, T1s, T1,\n T1Err_vec)\n', (3878, 3941), False, 'import CCSDutils\n'), ((4061, 4124), 'numpy.einsum', 'np.einsum', (['"""ijab,uab->uij"""', 'Tau', 'C_upq[:, ham.nocc:, ham.nocc:]'], {}), "('ijab,uab->uij', Tau, C_upq[:, ham.nocc:, ham.nocc:])\n", (4070, 4124), True, 'import numpy as np\n'), ((4133, 4196), 'numpy.einsum', 'np.einsum', (['"""ijab,iju->abu"""', 'Tau', 'C_pqu[:ham.nocc, :ham.nocc, :]'], {}), "('ijab,iju->abu', Tau, C_pqu[:ham.nocc, :ham.nocc, :])\n", (4142, 4196), True, 'import numpy as np\n'), ((4213, 4281), 'numpy.einsum', 'np.einsum', (['"""ijab,iau->bju"""', 'T2', 'C_up_up_pqu[:ham.nocc, ham.nocc:, :]'], {}), "('ijab,iau->bju', T2, C_up_up_pqu[:ham.nocc, ham.nocc:, :])\n", (4222, 4281), True, 'import numpy as np\n'), ((4298, 4368), 'numpy.einsum', 'np.einsum', (['"""ijab,iau->bju"""', 'T2', 'C_up_down_pqu[:ham.nocc, ham.nocc:, :]'], {}), "('ijab,iau->bju', T2, C_up_down_pqu[:ham.nocc, ham.nocc:, :])\n", (4307, 4368), True, 'import numpy as np\n'), ((4385, 4455), 'numpy.einsum', 'np.einsum', (['"""ijab,iau->bju"""', 'T2', 'C_down_up_pqu[:ham.nocc, ham.nocc:, :]'], {}), "('ijab,iau->bju', T2, C_down_up_pqu[:ham.nocc, ham.nocc:, :])\n", (4394, 4455), True, 'import numpy as np\n'), ((4472, 4544), 'numpy.einsum', 'np.einsum', (['"""ijab,iau->bju"""', 'T2', 'C_down_down_pqu[:ham.nocc, ham.nocc:, :]'], {}), "('ijab,iau->bju', T2, C_down_down_pqu[:ham.nocc, ham.nocc:, :])\n", (4481, 4544), True, 'import numpy as np\n'), ((4561, 4629), 'numpy.einsum', 'np.einsum', (['"""ijab,jbu->aiu"""', 'T2', 'C_up_up_pqu[:ham.nocc, ham.nocc:, :]'], {}), "('ijab,jbu->aiu', T2, C_up_up_pqu[:ham.nocc, ham.nocc:, :])\n", (4570, 4629), True, 'import numpy as np\n'), ((4646, 4716), 'numpy.einsum', 'np.einsum', (['"""ijab,jbu->aiu"""', 'T2', 'C_up_down_pqu[:ham.nocc, ham.nocc:, :]'], {}), "('ijab,jbu->aiu', T2, C_up_down_pqu[:ham.nocc, ham.nocc:, :])\n", (4655, 4716), True, 'import numpy as np\n'), ((4733, 4803), 'numpy.einsum', 'np.einsum', (['"""ijab,jbu->aiu"""', 'T2', 'C_down_up_pqu[:ham.nocc, ham.nocc:, :]'], {}), "('ijab,jbu->aiu', T2, C_down_up_pqu[:ham.nocc, ham.nocc:, :])\n", (4742, 4803), True, 'import numpy as np\n'), ((4820, 4892), 'numpy.einsum', 'np.einsum', (['"""ijab,jbu->aiu"""', 'T2', 'C_down_down_pqu[:ham.nocc, ham.nocc:, :]'], {}), "('ijab,jbu->aiu', T2, C_down_down_pqu[:ham.nocc, ham.nocc:, :])\n", (4829, 4892), True, 'import numpy as np\n'), ((4904, 4952), 'numpy.zeros', 'np.zeros', (['(ham.nocc, ham.nbas)'], {'dtype': 'np.complex'}), '((ham.nocc, ham.nbas), dtype=np.complex)\n', (4912, 4952), True, 'import numpy as np\n'), ((4967, 5015), 'numpy.zeros', 'np.zeros', (['(ham.nocc, ham.nbas)'], {'dtype': 'np.complex'}), '((ham.nocc, ham.nbas), dtype=np.complex)\n', (4975, 5015), True, 'import numpy as np\n'), ((5026, 5074), 'numpy.zeros', 'np.zeros', (['(ham.nocc, ham.nbas)'], {'dtype': 'np.complex'}), '((ham.nocc, ham.nbas), dtype=np.complex)\n', (5034, 5074), True, 'import numpy as np\n'), ((5087, 5135), 'numpy.zeros', 'np.zeros', (['(ham.nocc, ham.nbas)'], {'dtype': 'np.complex'}), '((ham.nocc, ham.nbas), dtype=np.complex)\n', (5095, 5135), True, 'import numpy as np\n'), ((5505, 5554), 'numpy.zeros', 'np.zeros', (['(ham.nbas, ham.nvirt)'], {'dtype': 'np.complex'}), '((ham.nbas, ham.nvirt), dtype=np.complex)\n', (5513, 5554), True, 'import numpy as np\n'), ((5569, 5618), 'numpy.zeros', 'np.zeros', (['(ham.nbas, ham.nvirt)'], {'dtype': 'np.complex'}), '((ham.nbas, ham.nvirt), dtype=np.complex)\n', (5577, 5618), True, 'import numpy as np\n'), ((5631, 5680), 'numpy.zeros', 'np.zeros', (['(ham.nbas, ham.nvirt)'], {'dtype': 'np.complex'}), '((ham.nbas, ham.nvirt), dtype=np.complex)\n', (5639, 5680), True, 'import numpy as np\n'), ((5693, 5742), 'numpy.zeros', 'np.zeros', (['(ham.nbas, ham.nvirt)'], {'dtype': 'np.complex'}), '((ham.nbas, ham.nvirt), dtype=np.complex)\n', (5701, 5742), True, 'import numpy as np\n'), ((6114, 6150), 'numpy.zeros', 'np.zeros', (['ham.nbas'], {'dtype': 'np.complex'}), '(ham.nbas, dtype=np.complex)\n', (6122, 6150), True, 'import numpy as np\n'), ((6171, 6207), 'numpy.zeros', 'np.zeros', (['ham.nbas'], {'dtype': 'np.complex'}), '(ham.nbas, dtype=np.complex)\n', (6179, 6207), True, 'import numpy as np\n'), ((6228, 6264), 'numpy.zeros', 'np.zeros', (['ham.nbas'], {'dtype': 'np.complex'}), '(ham.nbas, dtype=np.complex)\n', (6236, 6264), True, 'import numpy as np\n'), ((6285, 6321), 'numpy.zeros', 'np.zeros', (['ham.nbas'], {'dtype': 'np.complex'}), '(ham.nbas, dtype=np.complex)\n', (6293, 6321), True, 'import numpy as np\n'), ((6730, 6806), 'numpy.einsum', 'np.einsum', (['"""kcu,u->kc"""', 'C_up_up_pqu[:ham.nocc, ham.nocc:, :]', 'T1_down_down_u'], {}), "('kcu,u->kc', C_up_up_pqu[:ham.nocc, ham.nocc:, :], T1_down_down_u)\n", (6739, 6806), True, 'import numpy as np\n'), ((6813, 6889), 'numpy.einsum', 'np.einsum', (['"""kcu,u->kc"""', 'C_up_down_pqu[:ham.nocc, ham.nocc:, :]', 'T1_down_up_u'], {}), "('kcu,u->kc', C_up_down_pqu[:ham.nocc, ham.nocc:, :], T1_down_up_u)\n", (6822, 6889), True, 'import numpy as np\n'), ((6896, 6972), 'numpy.einsum', 'np.einsum', (['"""kcu,u->kc"""', 'C_down_up_pqu[:ham.nocc, ham.nocc:, :]', 'T1_up_down_u'], {}), "('kcu,u->kc', C_down_up_pqu[:ham.nocc, ham.nocc:, :], T1_up_down_u)\n", (6905, 6972), True, 'import numpy as np\n'), ((6979, 7055), 'numpy.einsum', 'np.einsum', (['"""kcu,u->kc"""', 'C_down_down_pqu[:ham.nocc, ham.nocc:, :]', 'T1_up_up_u'], {}), "('kcu,u->kc', C_down_down_pqu[:ham.nocc, ham.nocc:, :], T1_up_up_u)\n", (6988, 7055), True, 'import numpy as np\n'), ((7080, 7135), 'numpy.einsum', 'np.einsum', (['"""uc,ua->ac"""', 'C_up[:, ham.nocc:]', 'Tau_down_ua'], {}), "('uc,ua->ac', C_up[:, ham.nocc:], Tau_down_ua)\n", (7089, 7135), True, 'import numpy as np\n'), ((7143, 7198), 'numpy.einsum', 'np.einsum', (['"""uc,ua->ac"""', 'C_down[:, ham.nocc:]', 'Tau_up_ua'], {}), "('uc,ua->ac', C_down[:, ham.nocc:], Tau_up_ua)\n", (7152, 7198), True, 'import numpy as np\n'), ((7399, 7435), 'numpy.copy', 'np.copy', (['ham.F[:ham.nocc, ham.nocc:]'], {}), '(ham.F[:ham.nocc, ham.nocc:])\n', (7406, 7435), True, 'import numpy as np\n'), ((7489, 7548), 'numpy.einsum', 'np.einsum', (['"""ac,ic->ia"""', 'F_offdiag[ham.nocc:, ham.nocc:]', 'T1'], {}), "('ac,ic->ia', F_offdiag[ham.nocc:, ham.nocc:], T1)\n", (7498, 7548), True, 'import numpy as np\n'), ((7554, 7613), 'numpy.einsum', 'np.einsum', (['"""ki,ka->ia"""', 'F_offdiag[:ham.nocc, :ham.nocc]', 'T1'], {}), "('ki,ka->ia', F_offdiag[:ham.nocc, :ham.nocc], T1)\n", (7563, 7613), True, 'import numpy as np\n'), ((10044, 10120), 'numpy.einsum', 'np.einsum', (['"""adu,u->ad"""', 'C_up_up_pqu[ham.nocc:, ham.nocc:, :]', 'T1_down_down_u'], {}), "('adu,u->ad', C_up_up_pqu[ham.nocc:, ham.nocc:, :], T1_down_down_u)\n", (10053, 10120), True, 'import numpy as np\n'), ((10127, 10203), 'numpy.einsum', 'np.einsum', (['"""adu,u->ad"""', 'C_up_down_pqu[ham.nocc:, ham.nocc:, :]', 'T1_down_up_u'], {}), "('adu,u->ad', C_up_down_pqu[ham.nocc:, ham.nocc:, :], T1_down_up_u)\n", (10136, 10203), True, 'import numpy as np\n'), ((10210, 10286), 'numpy.einsum', 'np.einsum', (['"""adu,u->ad"""', 'C_down_up_pqu[ham.nocc:, ham.nocc:, :]', 'T1_up_down_u'], {}), "('adu,u->ad', C_down_up_pqu[ham.nocc:, ham.nocc:, :], T1_up_down_u)\n", (10219, 10286), True, 'import numpy as np\n'), ((10293, 10369), 'numpy.einsum', 'np.einsum', (['"""adu,u->ad"""', 'C_down_down_pqu[ham.nocc:, ham.nocc:, :]', 'T1_up_up_u'], {}), "('adu,u->ad', C_down_down_pqu[ham.nocc:, ham.nocc:, :], T1_up_up_u)\n", (10302, 10369), True, 'import numpy as np\n'), ((10529, 10580), 'numpy.einsum', 'np.einsum', (['"""ld,la"""', 'ham.F[:ham.nocc, ham.nocc:]', 'T1'], {}), "('ld,la', ham.F[:ham.nocc, ham.nocc:], T1)\n", (10538, 10580), True, 'import numpy as np\n'), ((10676, 10752), 'numpy.einsum', 'np.einsum', (['"""liu,u->li"""', 'C_up_down_pqu[:ham.nocc, :ham.nocc, :]', 'T1_down_up_u'], {}), "('liu,u->li', C_up_down_pqu[:ham.nocc, :ham.nocc, :], T1_down_up_u)\n", (10685, 10752), True, 'import numpy as np\n'), ((10761, 10837), 'numpy.einsum', 'np.einsum', (['"""liu,u->li"""', 'C_down_up_pqu[:ham.nocc, :ham.nocc, :]', 'T1_up_down_u'], {}), "('liu,u->li', C_down_up_pqu[:ham.nocc, :ham.nocc, :], T1_up_down_u)\n", (10770, 10837), True, 'import numpy as np\n'), ((10846, 10922), 'numpy.einsum', 'np.einsum', (['"""liu,u->li"""', 'C_down_down_pqu[:ham.nocc, :ham.nocc, :]', 'T1_up_up_u'], {}), "('liu,u->li', C_down_down_pqu[:ham.nocc, :ham.nocc, :], T1_up_up_u)\n", (10855, 10922), True, 'import numpy as np\n'), ((11100, 11151), 'numpy.einsum', 'np.einsum', (['"""ld,id"""', 'ham.F[:ham.nocc, ham.nocc:]', 'T1'], {}), "('ld,id', ham.F[:ham.nocc, ham.nocc:], T1)\n", (11109, 11151), True, 'import numpy as np\n'), ((12033, 12089), 'numpy.einsum', 'np.einsum', (['"""aiu,bju->ijab"""', 'X_up_up_aiu', 'T_down_down_bju'], {}), "('aiu,bju->ijab', X_up_up_aiu, T_down_down_bju)\n", (12042, 12089), True, 'import numpy as np\n'), ((12099, 12155), 'numpy.einsum', 'np.einsum', (['"""aiu,bju->ijab"""', 'X_up_down_aiu', 'T_down_up_bju'], {}), "('aiu,bju->ijab', X_up_down_aiu, T_down_up_bju)\n", (12108, 12155), True, 'import numpy as np\n'), ((12165, 12221), 'numpy.einsum', 'np.einsum', (['"""aiu,bju->ijab"""', 'X_down_up_aiu', 'T_up_down_bju'], {}), "('aiu,bju->ijab', X_down_up_aiu, T_up_down_bju)\n", (12174, 12221), True, 'import numpy as np\n'), ((12231, 12287), 'numpy.einsum', 'np.einsum', (['"""aiu,bju->ijab"""', 'X_down_down_aiu', 'T_up_up_bju'], {}), "('aiu,bju->ijab', X_down_down_aiu, T_up_up_bju)\n", (12240, 12287), True, 'import numpy as np\n'), ((12489, 12525), 'numpy.einsum', 'np.einsum', (['"""ad,ijdb->ijab"""', 'K_ad', 'T2'], {}), "('ad,ijdb->ijab', K_ad, T2)\n", (12498, 12525), True, 'import numpy as np\n'), ((12532, 12568), 'numpy.einsum', 'np.einsum', (['"""bd,ijad->ijab"""', 'K_ad', 'T2'], {}), "('bd,ijad->ijab', K_ad, T2)\n", (12541, 12568), True, 'import numpy as np\n'), ((12575, 12611), 'numpy.einsum', 'np.einsum', (['"""li,ljab->ijab"""', 'K_li', 'T2'], {}), "('li,ljab->ijab', K_li, T2)\n", (12584, 12611), True, 'import numpy as np\n'), ((12618, 12654), 'numpy.einsum', 'np.einsum', (['"""lj,ilab->ijab"""', 'K_li', 'T2'], {}), "('lj,ilab->ijab', K_li, T2)\n", (12627, 12654), True, 'import numpy as np\n'), ((12690, 12753), 'numpy.einsum', 'np.einsum', (['"""ac,ijcb->ijab"""', 'F_offdiag[ham.nocc:, ham.nocc:]', 'T2'], {}), "('ac,ijcb->ijab', F_offdiag[ham.nocc:, ham.nocc:], T2)\n", (12699, 12753), True, 'import numpy as np\n'), ((12759, 12822), 'numpy.einsum', 'np.einsum', (['"""bc,ijac->ijab"""', 'F_offdiag[ham.nocc:, ham.nocc:]', 'T2'], {}), "('bc,ijac->ijab', F_offdiag[ham.nocc:, ham.nocc:], T2)\n", (12768, 12822), True, 'import numpy as np\n'), ((12828, 12891), 'numpy.einsum', 'np.einsum', (['"""ik,kjab->ijab"""', 'F_offdiag[:ham.nocc, :ham.nocc]', 'T2'], {}), "('ik,kjab->ijab', F_offdiag[:ham.nocc, :ham.nocc], T2)\n", (12837, 12891), True, 'import numpy as np\n'), ((12897, 12960), 'numpy.einsum', 'np.einsum', (['"""jk,ikab->ijab"""', 'F_offdiag[:ham.nocc, :ham.nocc]', 'T2'], {}), "('jk,ikab->ijab', F_offdiag[:ham.nocc, :ham.nocc], T2)\n", (12906, 12960), True, 'import numpy as np\n'), ((13022, 13074), 'CCDutils.get_Err', 'CCDutils.get_Err', (['ham.F', 'G2', 'T2', 'ham.nocc', 'ham.nvirt'], {}), '(ham.F, G2, T2, ham.nocc, ham.nvirt)\n', (13038, 13074), False, 'import CCDutils\n'), ((13094, 13155), 'CCSDutils.get_singles_Err', 'CCSDutils.get_singles_Err', (['ham.F', 'G1', 'T1', 'ham.nocc', 'ham.nvirt'], {}), '(ham.F, G1, T1, ham.nocc, ham.nvirt)\n', (13119, 13155), False, 'import CCSDutils\n'), ((13181, 13245), 'CCDutils.solveccd', 'CCDutils.solveccd', (['ham.F', 'G2', 'T2', 'ham.nocc', 'ham.nvirt'], {'x': 'damping'}), '(ham.F, G2, T2, ham.nocc, ham.nvirt, x=damping)\n', (13198, 13245), False, 'import CCDutils\n'), ((13248, 13313), 'CCSDutils.solveccs', 'CCSDutils.solveccs', (['ham.F', 'G1', 'T1', 'ham.nocc', 'ham.nvirt'], {'x': 'damping'}), '(ham.F, G1, T1, ham.nocc, ham.nvirt, x=damping)\n', (13266, 13313), False, 'import CCSDutils\n'), ((13415, 13478), 'numpy.einsum', 'np.einsum', (['"""ijab,uab->uij"""', 'Tau', 'C_upq[:, ham.nocc:, ham.nocc:]'], {}), "('ijab,uab->uij', Tau, C_upq[:, ham.nocc:, ham.nocc:])\n", (13424, 13478), True, 'import numpy as np\n'), ((13565, 13616), 'numpy.einsum', 'np.einsum', (['"""ia,ia"""', 'ham.F[:ham.nocc, ham.nocc:]', 'T1'], {}), "('ia,ia', ham.F[:ham.nocc, ham.nocc:], T1)\n", (13574, 13616), True, 'import numpy as np\n'), ((13624, 13644), 'numpy.abs', 'np.abs', (['(eold - ecorr)'], {}), '(eold - ecorr)\n', (13630, 13644), True, 'import numpy as np\n'), ((772, 794), 'numpy.linalg.inv', 'np.linalg.inv', (['ham.C_a'], {}), '(ham.C_a)\n', (785, 794), True, 'import numpy as np\n'), ((829, 851), 'numpy.linalg.inv', 'np.linalg.inv', (['ham.C_b'], {}), '(ham.C_b)\n', (842, 851), True, 'import numpy as np\n'), ((1436, 1446), 'numpy.diag', 'np.diag', (['F'], {}), '(F)\n', (1443, 1446), True, 'import numpy as np\n'), ((1884, 1898), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1895, 1898), False, 'import pickle\n'), ((1907, 1921), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1918, 1921), False, 'import pickle\n'), ((4018, 4050), 'numpy.einsum', 'np.einsum', (['"""ib,ja->ijab"""', 'T1', 'T1'], {}), "('ib,ja->ijab', T1, T1)\n", (4027, 4050), True, 'import numpy as np\n'), ((6372, 6432), 'numpy.einsum', 'np.einsum', (['"""ia,ia"""', 'C_up_up_pqu[:ham.nocc, ham.nocc:, u]', 'T1'], {}), "('ia,ia', C_up_up_pqu[:ham.nocc, ham.nocc:, u], T1)\n", (6381, 6432), True, 'import numpy as np\n'), ((6450, 6512), 'numpy.einsum', 'np.einsum', (['"""ia,ia"""', 'C_up_down_pqu[:ham.nocc, ham.nocc:, u]', 'T1'], {}), "('ia,ia', C_up_down_pqu[:ham.nocc, ham.nocc:, u], T1)\n", (6459, 6512), True, 'import numpy as np\n'), ((6530, 6592), 'numpy.einsum', 'np.einsum', (['"""ia,ia"""', 'C_down_up_pqu[:ham.nocc, ham.nocc:, u]', 'T1'], {}), "('ia,ia', C_down_up_pqu[:ham.nocc, ham.nocc:, u], T1)\n", (6539, 6592), True, 'import numpy as np\n'), ((6614, 6678), 'numpy.einsum', 'np.einsum', (['"""ia,ia"""', 'C_down_down_pqu[:ham.nocc, ham.nocc:, u]', 'T1'], {}), "('ia,ia', C_down_down_pqu[:ham.nocc, ham.nocc:, u], T1)\n", (6623, 6678), True, 'import numpy as np\n'), ((7246, 7273), 'numpy.conj', 'np.conj', (['C_up[:, :ham.nocc]'], {}), '(C_up[:, :ham.nocc])\n', (7253, 7273), True, 'import numpy as np\n'), ((7318, 7347), 'numpy.conj', 'np.conj', (['C_down[:, :ham.nocc]'], {}), '(C_down[:, :ham.nocc])\n', (7325, 7347), True, 'import numpy as np\n'), ((7799, 7831), 'numpy.einsum', 'np.einsum', (['"""ic,ac->ia"""', 'T1', 'J_ac'], {}), "('ic,ac->ia', T1, J_ac)\n", (7808, 7831), True, 'import numpy as np\n'), ((7844, 7876), 'numpy.einsum', 'np.einsum', (['"""ka,ki->ia"""', 'T1', 'J_ki'], {}), "('ka,ki->ia', T1, J_ki)\n", (7853, 7876), True, 'import numpy as np\n'), ((10592, 10668), 'numpy.einsum', 'np.einsum', (['"""liu,u->li"""', 'C_up_up_pqu[:ham.nocc, :ham.nocc, :]', 'T1_down_down_u'], {}), "('liu,u->li', C_up_up_pqu[:ham.nocc, :ham.nocc, :], T1_down_down_u)\n", (10601, 10668), True, 'import numpy as np\n'), ((12335, 12373), 'numpy.einsum', 'np.einsum', (['"""uij,abu->ijab"""', 'Xuij', 'Xabu'], {}), "('uij,abu->ijab', Xuij, Xabu)\n", (12344, 12373), True, 'import numpy as np\n'), ((13372, 13404), 'numpy.einsum', 'np.einsum', (['"""ib,ja->ijab"""', 'T1', 'T1'], {}), "('ib,ja->ijab', T1, T1)\n", (13381, 13404), True, 'import numpy as np\n'), ((13496, 13557), 'numpy.einsum', 'np.einsum', (['"""iju,uij"""', 'C_pqu[:ham.nocc, :ham.nocc, :]', 'Tau_uij'], {}), "('iju,uij', C_pqu[:ham.nocc, :ham.nocc, :], Tau_uij)\n", (13505, 13557), True, 'import numpy as np\n'), ((13804, 13822), 'pickle.dump', 'pickle.dump', (['T2', 'f'], {}), '(T2, f)\n', (13815, 13822), False, 'import pickle\n'), ((13825, 13843), 'pickle.dump', 'pickle.dump', (['T1', 'f'], {}), '(T1, f)\n', (13836, 13843), False, 'import pickle\n'), ((3984, 4016), 'numpy.einsum', 'np.einsum', (['"""ia,jb->ijab"""', 'T1', 'T1'], {}), "('ia,jb->ijab', T1, T1)\n", (3993, 4016), True, 'import numpy as np\n'), ((5378, 5424), 'numpy.einsum', 'np.einsum', (['"""a,a"""', 'T1[i, :]', 'C_up[u, ham.nocc:]'], {}), "('a,a', T1[i, :], C_up[u, ham.nocc:])\n", (5387, 5424), True, 'import numpy as np\n'), ((5442, 5490), 'numpy.einsum', 'np.einsum', (['"""a,a"""', 'T1[i, :]', 'C_down[u, ham.nocc:]'], {}), "('a,a', T1[i, :], C_down[u, ham.nocc:])\n", (5451, 5490), True, 'import numpy as np\n'), ((5822, 5876), 'numpy.einsum', 'np.einsum', (['"""b,b"""', 'Tau_abu[a, :, u]', 'C_up[u, ham.nocc:]'], {}), "('b,b', Tau_abu[a, :, u], C_up[u, ham.nocc:])\n", (5831, 5876), True, 'import numpy as np\n'), ((5895, 5951), 'numpy.einsum', 'np.einsum', (['"""b,b"""', 'Tau_abu[a, :, u]', 'C_down[u, ham.nocc:]'], {}), "('b,b', Tau_abu[a, :, u], C_down[u, ham.nocc:])\n", (5904, 5951), True, 'import numpy as np\n'), ((7465, 7479), 'numpy.diag', 'np.diag', (['ham.F'], {}), '(ham.F)\n', (7472, 7479), True, 'import numpy as np\n'), ((7671, 7703), 'numpy.einsum', 'np.einsum', (['"""ic,ka->ikac"""', 'T1', 'T1'], {}), "('ic,ka->ikac', T1, T1)\n", (7680, 7703), True, 'import numpy as np\n'), ((7747, 7779), 'numpy.einsum', 'np.einsum', (['"""ic,ka->ikac"""', 'T1', 'T1'], {}), "('ic,ka->ikac', T1, T1)\n", (7756, 7779), True, 'import numpy as np\n'), ((8054, 8109), 'numpy.einsum', 'np.einsum', (['"""ui,ua->ia"""', 'C_up[:, :ham.nocc]', 'Tau_down_ua'], {}), "('ui,ua->ia', C_up[:, :ham.nocc], Tau_down_ua)\n", (8063, 8109), True, 'import numpy as np\n'), ((8118, 8173), 'numpy.einsum', 'np.einsum', (['"""ui,ua->ia"""', 'C_down[:, :ham.nocc]', 'Tau_up_ua'], {}), "('ui,ua->ia', C_down[:, :ham.nocc], Tau_up_ua)\n", (8127, 8173), True, 'import numpy as np\n'), ((8430, 8506), 'numpy.einsum', 'np.einsum', (['"""aiu,u->ia"""', 'C_down_down_pqu[ham.nocc:, :ham.nocc, :]', 'T1_up_up_u'], {}), "('aiu,u->ia', C_down_down_pqu[ham.nocc:, :ham.nocc, :], T1_up_up_u)\n", (8439, 8506), True, 'import numpy as np\n'), ((10383, 10438), 'numpy.einsum', 'np.einsum', (['"""ud,ua->ad"""', 'C_up[:, ham.nocc:]', 'Tau_down_ua'], {}), "('ud,ua->ad', C_up[:, ham.nocc:], Tau_down_ua)\n", (10392, 10438), True, 'import numpy as np\n'), ((10449, 10504), 'numpy.einsum', 'np.einsum', (['"""ud,ua->ad"""', 'C_down[:, ham.nocc:]', 'Tau_up_ua'], {}), "('ud,ua->ad', C_down[:, ham.nocc:], Tau_up_ua)\n", (10458, 10504), True, 'import numpy as np\n'), ((12415, 12439), 'numpy.swapaxes', 'np.swapaxes', (['Rings', '(0)', '(1)'], {}), '(Rings, 0, 1)\n', (12426, 12439), True, 'import numpy as np\n'), ((12452, 12476), 'numpy.swapaxes', 'np.swapaxes', (['Rings', '(0)', '(1)'], {}), '(Rings, 0, 1)\n', (12463, 12476), True, 'import numpy as np\n'), ((13338, 13370), 'numpy.einsum', 'np.einsum', (['"""ia,jb->ijab"""', 'T1', 'T1'], {}), "('ia,jb->ijab', T1, T1)\n", (13347, 13370), True, 'import numpy as np\n'), ((3405, 3424), 'numpy.conj', 'np.conj', (['C_up[u, p]'], {}), '(C_up[u, p])\n', (3412, 3424), True, 'import numpy as np\n'), ((3463, 3484), 'numpy.conj', 'np.conj', (['C_down[u, p]'], {}), '(C_down[u, p])\n', (3470, 3484), True, 'import numpy as np\n'), ((3523, 3542), 'numpy.conj', 'np.conj', (['C_up[u, p]'], {}), '(C_up[u, p])\n', (3530, 3542), True, 'import numpy as np\n'), ((3583, 3604), 'numpy.conj', 'np.conj', (['C_down[u, p]'], {}), '(C_down[u, p])\n', (3590, 3604), True, 'import numpy as np\n'), ((5245, 5272), 'numpy.conj', 'np.conj', (['C_up[u, :ham.nocc]'], {}), '(C_up[u, :ham.nocc])\n', (5252, 5272), True, 'import numpy as np\n'), ((5327, 5356), 'numpy.conj', 'np.conj', (['C_down[u, :ham.nocc]'], {}), '(C_down[u, :ham.nocc])\n', (5334, 5356), True, 'import numpy as np\n'), ((5992, 6019), 'numpy.conj', 'np.conj', (['C_up[u, :ham.nocc]'], {}), '(C_up[u, :ham.nocc])\n', (5999, 6019), True, 'import numpy as np\n'), ((6065, 6094), 'numpy.conj', 'np.conj', (['C_down[u, :ham.nocc]'], {}), '(C_down[u, :ham.nocc])\n', (6072, 6094), True, 'import numpy as np\n'), ((7918, 7945), 'numpy.conj', 'np.conj', (['C_up[:, ham.nocc:]'], {}), '(C_up[:, ham.nocc:])\n', (7925, 7945), True, 'import numpy as np\n'), ((7992, 8021), 'numpy.conj', 'np.conj', (['C_down[:, ham.nocc:]'], {}), '(C_down[:, ham.nocc:])\n', (7999, 8021), True, 'import numpy as np\n'), ((8349, 8425), 'numpy.einsum', 'np.einsum', (['"""aiu,u->ia"""', 'C_down_up_pqu[ham.nocc:, :ham.nocc, :]', 'T1_up_down_u'], {}), "('aiu,u->ia', C_down_up_pqu[ham.nocc:, :ham.nocc, :], T1_up_down_u)\n", (8358, 8425), True, 'import numpy as np\n'), ((10958, 10985), 'numpy.conj', 'np.conj', (['C_up[:, :ham.nocc]'], {}), '(C_up[:, :ham.nocc])\n', (10965, 10985), True, 'import numpy as np\n'), ((11033, 11062), 'numpy.conj', 'np.conj', (['C_down[:, :ham.nocc]'], {}), '(C_down[:, :ham.nocc])\n', (11040, 11062), True, 'import numpy as np\n'), ((12390, 12414), 'numpy.swapaxes', 'np.swapaxes', (['Rings', '(2)', '(3)'], {}), '(Rings, 2, 3)\n', (12401, 12414), True, 'import numpy as np\n'), ((3286, 3305), 'numpy.conj', 'np.conj', (['C_up[u, p]'], {}), '(C_up[u, p])\n', (3293, 3305), True, 'import numpy as np\n'), ((3305, 3326), 'numpy.conj', 'np.conj', (['C_down[u, q]'], {}), '(C_down[u, q])\n', (3312, 3326), True, 'import numpy as np\n'), ((3335, 3356), 'numpy.conj', 'np.conj', (['C_down[u, p]'], {}), '(C_down[u, p])\n', (3342, 3356), True, 'import numpy as np\n'), ((3356, 3375), 'numpy.conj', 'np.conj', (['C_up[u, q]'], {}), '(C_up[u, q])\n', (3363, 3375), True, 'import numpy as np\n'), ((8187, 8263), 'numpy.einsum', 'np.einsum', (['"""aiu,u->ia"""', 'C_up_up_pqu[ham.nocc:, :ham.nocc, :]', 'T1_down_down_u'], {}), "('aiu,u->ia', C_up_up_pqu[ham.nocc:, :ham.nocc, :], T1_down_down_u)\n", (8196, 8263), True, 'import numpy as np\n'), ((8268, 8344), 'numpy.einsum', 'np.einsum', (['"""aiu,u->ia"""', 'C_up_down_pqu[ham.nocc:, :ham.nocc, :]', 'T1_down_up_u'], {}), "('aiu,u->ia', C_up_down_pqu[ham.nocc:, :ham.nocc, :], T1_down_up_u)\n", (8277, 8344), True, 'import numpy as np\n'), ((9417, 9437), 'numpy.conj', 'np.conj', (['C_up[u, aa]'], {}), '(C_up[u, aa])\n', (9424, 9437), True, 'import numpy as np\n'), ((9588, 9610), 'numpy.conj', 'np.conj', (['C_down[u, aa]'], {}), '(C_down[u, aa])\n', (9595, 9610), True, 'import numpy as np\n'), ((9767, 9787), 'numpy.conj', 'np.conj', (['C_up[u, aa]'], {}), '(C_up[u, aa])\n', (9774, 9787), True, 'import numpy as np\n'), ((9950, 9972), 'numpy.conj', 'np.conj', (['C_down[u, aa]'], {}), '(C_down[u, aa])\n', (9957, 9972), True, 'import numpy as np\n'), ((11658, 11678), 'numpy.conj', 'np.conj', (['C_up[u, aa]'], {}), '(C_up[u, aa])\n', (11665, 11678), True, 'import numpy as np\n'), ((11713, 11735), 'numpy.conj', 'np.conj', (['C_down[u, aa]'], {}), '(C_down[u, aa])\n', (11720, 11735), True, 'import numpy as np\n'), ((11768, 11788), 'numpy.conj', 'np.conj', (['C_up[u, bb]'], {}), '(C_up[u, bb])\n', (11775, 11788), True, 'import numpy as np\n'), ((11965, 11987), 'numpy.conj', 'np.conj', (['C_down[u, bb]'], {}), '(C_down[u, bb])\n', (11972, 11987), True, 'import numpy as np\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import torch
import numpy as np
from src.agents.base import Agent, adjust_learning_rate
class DQNAgent(Agent):
def __init__(self, env_prototype, model_prototype, memory_prototype=None, **kwargs):
super(DQNAgent, self).__init__(env_prototype, model_prototype, memory_prototype, **kwargs)
self.logger.info('<===================================> DQNAgent')
def _get_loss(self, experiences, logging=True):
batch_size = len(experiences)
s0_batch = torch.from_numpy(np.asarray([experiences[i][0] for i in range(batch_size)])).type(self.dtype).to(self.device)
a_batch = torch.from_numpy(np.asarray([experiences[i][1] for i in range(batch_size)])).long().to(self.device)
r_batch = torch.from_numpy(np.asarray([experiences[i][2] for i in range(batch_size)])).type(self.dtype).to(self.device)
s1_batch = torch.from_numpy(np.asarray([experiences[i][3] for i in range(batch_size)])).type(self.dtype).to(self.device)
t1_batch = torch.from_numpy(np.asarray([experiences[i][4] for i in range(batch_size)])).type(self.dtype).to(self.device)
# Compute target Q values for mini-batch update.
if self.bootstrap_type == 'double_q':
q_values = self.model(s1_batch).detach() # Detach this variable from the current graph since we don't want gradients to propagate
_, q_max_actions = q_values.max(dim=1, keepdim=True)
next_max_q_values = self.target_model(s1_batch).detach()
next_max_q_values = next_max_q_values.gather(1, q_max_actions)
elif self.bootstrap_type == 'target_q':
next_max_q_values = self.target_model(s1_batch).detach()
next_max_q_values, _ = next_max_q_values.max(dim=1, keepdim=True)
elif self.bootstrap_type == 'learn_q':
next_max_q_values = self.model(s1_batch).detach()
next_max_q_values, _ = next_max_q_values.max(dim=1, keepdim=True)
else:
raise ValueError('Input bootstrapping type is not supported!')
# Compute r_t + gamma * max_a Q(s_t+1, a) and update the targets accordingly
current_q_values = self.model(s0_batch).gather(1, a_batch.unsqueeze(1)).squeeze()
# Set discounted reward to zero for all states that were terminal.
next_max_q_values = (next_max_q_values * t1_batch.unsqueeze(1)).squeeze()
expected_q_values = (r_batch + self.gamma * next_max_q_values).squeeze()
loss = self.value_criteria(current_q_values, expected_q_values)
error = (current_q_values - next_max_q_values).tolist()
if logging and self.step != 0 and self.step % self.log_step_interval == 0: # logging
self.window_max_abs_q.append(np.mean(np.abs(next_max_q_values.tolist())))
self.max_abs_q_log.append(np.max(self.window_max_abs_q))
self.loss_log.append(loss.item())
self.step_log.append(self.step)
return loss, error
def _forward(self, states):
states = torch.from_numpy(states).unsqueeze(0).type(self.dtype).to(self.device)
q_values = self.model(states).detach()
action = self._epsilon_greedy(q_values)
return action
def _backward(self, experiences=None, idxs=None, is_weights=None):
# Train the network on a single stochastic batch.
error = 0.
if self.step % self.train_interval == 0:
if experiences is None:
experiences, idxs, is_weights = self.memory.sample_batch(self.batch_size)
loss, error = self._get_loss(experiences)
if idxs is not None: # update priorities
error = np.abs(error) # abs error as priorities
for i in range(self.batch_size):
self.memory.update(idxs[i], error[i])
is_weights = np.ones(len(experiences)) if is_weights is None else is_weights
loss = (torch.from_numpy(is_weights).type(self.dtype).to(self.device) * loss).mean() # apply importance weights
self.optimizer.zero_grad()
loss.backward()
for param in self.model.parameters():
param.grad.data.clamp_(-self.clip_grad, self.clip_grad)
self.optimizer.step()
# adjust learning rate if enabled
if self.lr_decay:
self.lr_adjusted = max(self.lr * (self.steps - self.step) / self.steps, 1e-32)
adjust_learning_rate(self.optimizer, self.lr_adjusted)
if self.target_model_update >= 1 and self.step % self.target_model_update == 0:
self._update_target_model_hard() # Hard update every `target_model_update` steps.
if self.target_model_update < 1.:
self._update_target_model_soft() # Soft update with `(1 - target_model_update) * old + target_model_update * new`.
return error
def _random_initialization(self):
self.logger.info('<===================================> Random policy initialization for %d eps' % self.random_eps)
for e in range(self.random_eps):
self.experience = self.env.reset()
while not self.env.episode_ended:
action = self.env.sample_random_action()
self.experience = self.env.step(action)
error = self._backward([self.experience])
self._store_experience(self.experience, abs(error))
def fit_model(self):
self._init_model(training=True)
self.eps = self.eps_start
self.optimizer = self.optimizer_class(self.model.parameters(), lr=self.lr, weight_decay=self.weight_decay)
self.lr_adjusted = self.lr
self._reset_training_loggings()
self.start_time = time.time()
self.step = 0
total_reward = 0.
self._random_initialization()
self.logger.info('<===================================> Training ...')
for self.episode in range(self.episodes):
self.experience = self.env.reset()
episode_steps, episode_reward = 0, 0
while not self.env.episode_ended:
action = self._forward(self.experience[3])
self.experience = self.env.step(action)
_, error = self._get_loss([self.experience], logging=False) # compute priority
self._store_experience(self.experience, abs(error))
self._visualize(visualize=self.train_visualize)
if self.step > self.learn_start:
self._backward()
episode_steps += 1
episode_reward += self.experience[2]
self.step += 1
self.window_scores.append(episode_reward)
run_avg_reward = np.mean(self.window_scores)
total_reward += episode_reward
total_avg_reward = total_reward / self.episode if self.episode > 0 else 0
if self.episode % self.log_episode_interval == 0:
if self.use_tensorboard:
self.writer.add_scalar('run_avg_reward/episode', run_avg_reward, self.episode)
self.run_avg_score_log.append(run_avg_reward)
self.total_avg_score_log.append(total_avg_reward)
self.eps_log.append(self.episode)
if run_avg_reward > self.env.solved_criteria:
self._save_model(self.step, episode_reward)
if self.solved_stop:
break
if self.episode % self.prog_freq == 0:
self.logger.info('Reporting at episode %d | Elapsed Time: %s' % (self.episode, str(time.time() - self.start_time)))
self.logger.info('Training Stats: lr: %f epsilon: %f steps: %d ' % (self.lr_adjusted, self.eps, self.step))
self.logger.info('Training Stats: total_reward: %f total_avg_reward: %f run_avg_reward: %f ' % (total_reward, total_avg_reward, run_avg_reward))
if self.step > self.steps:
self.logger.info('Maximal steps reached. Training stop!')
break
self.logger.info('Saving model...')
self._save_model(self.step, episode_reward)
def test_model(self):
if not self.model:
self._init_model(training=False)
for episode in range(self.test_nepisodes):
episode_reward = 0
episode_steps = 0
self.experience = self.env.reset()
while not self.env.episode_ended:
action = self._forward(self.experience[3])
self.experience = self.env.step(action)
self._visualize(visualize=True)
episode_steps += 1
episode_reward += self.experience[2]
if episode_reward > self.env.solved_criteria:
self.logger.info('Test episode %d at %d step with reward %f ' % (episode, episode_steps, episode_reward))
| [
"numpy.mean",
"numpy.abs",
"torch.from_numpy",
"numpy.max",
"src.agents.base.adjust_learning_rate",
"time.time"
] | [((5800, 5811), 'time.time', 'time.time', ([], {}), '()\n', (5809, 5811), False, 'import time\n'), ((4515, 4569), 'src.agents.base.adjust_learning_rate', 'adjust_learning_rate', (['self.optimizer', 'self.lr_adjusted'], {}), '(self.optimizer, self.lr_adjusted)\n', (4535, 4569), False, 'from src.agents.base import Agent, adjust_learning_rate\n'), ((6800, 6827), 'numpy.mean', 'np.mean', (['self.window_scores'], {}), '(self.window_scores)\n', (6807, 6827), True, 'import numpy as np\n'), ((2917, 2946), 'numpy.max', 'np.max', (['self.window_max_abs_q'], {}), '(self.window_max_abs_q)\n', (2923, 2946), True, 'import numpy as np\n'), ((3759, 3772), 'numpy.abs', 'np.abs', (['error'], {}), '(error)\n', (3765, 3772), True, 'import numpy as np\n'), ((3115, 3139), 'torch.from_numpy', 'torch.from_numpy', (['states'], {}), '(states)\n', (3131, 3139), False, 'import torch\n'), ((7668, 7679), 'time.time', 'time.time', ([], {}), '()\n', (7677, 7679), False, 'import time\n'), ((4016, 4044), 'torch.from_numpy', 'torch.from_numpy', (['is_weights'], {}), '(is_weights)\n', (4032, 4044), False, 'import torch\n')] |
"""
NOTE: this is only used for testing the cython / c implementation.
"""
# Third-party
import astropy.units as u
import numpy as np
from twobody.wrap import cy_rv_from_elements
from astroML.utils import log_multivariate_gaussian
# from scipy.stats import multivariate_normal
# Project
from ...samples import JokerSamples
from ...distributions import FixedCompanionMass
from ...utils import DEFAULT_RNG
__all__ = ['get_ivar', 'likelihood_worker', 'marginal_ln_likelihood']
def get_ivar(data, s):
"""Return a copy of the inverse variance array with jitter included.
This is safe for zero'd out inverse variances.
Parameters
----------
data : `~thejoker.data.RVData`
s : numeric
Jitter in the same units as the RV data.
"""
return data.ivar.value / (1 + s**2 * data.ivar.value)
def likelihood_worker(y, ivar, M, mu, Lambda, make_aA=False):
"""
Internal function used to construct linear algebra objects
used to compute the marginal log-likelihood.
Parameters
----------
M : array_like
Design matrix.
ivar : array_like
Inverse-variance matrix.
y : array_like
Data (in this case, radial velocities).
mu : array_like
Prior mean for linear parameters.
Lambda : array_like
Prior variance matrix for linear parameters.
Returns
-------
B : `numpy.ndarray`
C + M Λ M^T - Variance of data gaussian.
b : `numpy.ndarray`
M µ - Optimal values of linear parameters.
chi2 : float
Chi-squared value.
Notes
-----
The linear parameter vector returned here (``b``) may have a negative
velocity semi-amplitude. I don't think there is anything we can do
about this if we want to preserve the simple linear algebra below and
it means that optimizing over the marginal likelihood below won't
work -- in the argument of periastron, there will be two peaks of
similar height, and so the highest marginal likelihood period might be
shifted from the truth.
"""
Λ = Lambda
Λinv = np.linalg.inv(Λ)
µ = mu
Cinv = np.diag(ivar)
C = np.diag(1 / ivar)
b = M @ µ
B = C + M @ Λ @ M.T
# Old implementation:
# old_marg_ll = multivariate_normal.logpdf(y, b, B)
# if make_aAinv:
# Ainv = Λinv + M.T @ Cinv @ M
# # Note: this is unstable! if cond num is high, could do:
# # p, *_ = np.linalg.lstsq(A, y)
# a = np.linalg.solve(Ainv, Λinv @ mu + M.T @ Cinv @ y)
# return marg_ll, b, B, a, Ainv
# else:
# return marg_ll, b, B
# New implementation:
Ainv = Λinv + M.T @ Cinv @ M
A = np.linalg.inv(Ainv)
Binv = Cinv - Cinv @ M @ A @ M.T @ Cinv
marg_ll = log_multivariate_gaussian(y, b, B, Vinv=Binv)
if make_aA:
# Note: this is unstable! if cond num is high, could do:
# p, *_ = np.linalg.lstsq(A, y)
a = np.linalg.solve(Ainv, Λinv @ mu + M.T @ Cinv @ y)
return marg_ll, b, B, a, A
else:
return marg_ll, b, B
def design_matrix(nonlinear_p, data, prior):
"""Compute the design matrix, M.
Parameters
----------
nonlinear_p : array_like
data : `~thejoker.RVData`
prior : `~thejoker.JokerPrior`
Returns
-------
M : `numpy.ndarray`
The design matrix with shape ``(n_times, n_params)``.
"""
P, ecc, omega, M0 = nonlinear_p[:4] # we don't need the jitter here
t = data._t_bmjd
t0 = data._t_ref_bmjd
zdot = cy_rv_from_elements(t, P, 1., ecc, omega, M0, t0, 1e-8, 128)
M1 = np.vander(t - t0, N=prior.poly_trend, increasing=True)
M = np.hstack((zdot[:, None], M1))
return M
def get_M_Lambda_ivar(samples, prior, data):
v_unit = data.rv.unit
units = {'K': v_unit, 's': v_unit}
for i, k in enumerate(list(prior._linear_equiv_units.keys())[1:]): # skip K
units[k] = v_unit / u.day**i
packed_samples, _ = samples.pack(units=units)
n_samples = packed_samples.shape[0]
n_linear = len(prior._linear_equiv_units)
Lambda = np.zeros(n_linear)
for i, k in enumerate(prior._linear_equiv_units.keys()):
if k == 'K':
continue # set below
Lambda[i] = prior.pars[k].distribution.sd.eval() ** 2
K_dist = prior.pars['K'].distribution
if isinstance(K_dist, FixedCompanionMass):
sigma_K0 = K_dist._sigma_K0.to_value(v_unit)
P0 = K_dist._P0.to_value(samples['P'].unit)
max_K2 = K_dist._max_K.to_value(v_unit) ** 2
else:
Lambda[0] = K_dist.sd.eval() ** 2
for n in range(n_samples):
M = design_matrix(packed_samples[n], data, prior)
if isinstance(K_dist, FixedCompanionMass):
P = samples['P'][n].value
e = samples['e'][n]
Lambda[0] = sigma_K0**2 / (1 - e**2) * (P / P0)**(-2/3)
Lambda[0] = min(max_K2, Lambda[0])
# jitter must be in same units as the data RV's / ivar!
s = packed_samples[n, 4]
ivar = get_ivar(data, s)
yield n, M, Lambda, ivar, packed_samples[n], units
def marginal_ln_likelihood(samples, prior, data):
"""
Internal function used to compute the likelihood marginalized
over the linear parameters.
Parameters
----------
samples : `~thejoker.JokerSamples`
prior : `~thejoker.JokerPrior`
data : `~thejoker.RVData`
Returns
-------
marg_ln_like : `numpy.ndarray`
Marginal log-likelihood values.
"""
n_samples = len(samples)
n_linear = len(prior._linear_equiv_units)
mu = np.zeros(n_linear)
marg_ll = np.zeros(n_samples)
for n, M, Lambda, ivar, *_ in get_M_Lambda_ivar(samples, prior, data):
try:
marg_ll[n], *_ = likelihood_worker(data.rv.value, ivar, M,
mu, np.diag(Lambda),
make_aA=False)
except np.linalg.LinAlgError as e:
raise e
return marg_ll
def rejection_sample(samples, prior, data, rnd=None):
"""
Parameters
----------
samples : `~thejoker.JokerSamples`
prior : `~thejoker.JokerPrior`
data : `~thejoker.RVData`
"""
n_linear = len(prior._linear_equiv_units)
mu = np.zeros(n_linear)
if rnd is None:
rnd = DEFAULT_RNG()
ll = marginal_ln_likelihood(samples, prior, data)
uu = rnd.uniform(size=len(ll))
mask = np.exp(ll - ll.max()) > uu
n_good_samples = mask.sum()
good_samples = samples[mask]
all_packed = np.zeros((n_good_samples, len(prior.par_names)))
for n, M, Lambda, ivar, packed_nonlinear, units in get_M_Lambda_ivar(
good_samples, prior, data):
try:
_, b, B, a, A = likelihood_worker(data.rv.value, ivar, M,
mu, np.diag(Lambda),
make_aA=True)
except np.linalg.LinAlgError as e:
raise e
linear_pars = rnd.multivariate_normal(a, A)
all_packed[n] = np.concatenate((packed_nonlinear, linear_pars))
unpack_units = dict()
for k in prior.par_names:
if k in units:
unpack_units[k] = units[k]
else:
unpack_units[k] = samples[k].unit
return JokerSamples.unpack(all_packed, unpack_units, prior.poly_trend,
data.t_ref)
def get_aAbB(samples, prior, data):
"""
For testing Cython against
Parameters
----------
samples : `~thejoker.JokerSamples`
prior : `~thejoker.JokerPrior`
data : `~thejoker.RVData`
"""
n_samples = len(samples)
n_linear = len(prior._linear_equiv_units)
n_times = len(data)
mu = np.zeros(n_linear)
out = {'a': np.zeros((n_samples, n_linear)),
'A': np.zeros((n_samples, n_linear, n_linear)),
'b': np.zeros((n_samples, n_times)),
'B': np.zeros((n_samples, n_times, n_times))}
for n, M, Lambda, ivar, *_ in get_M_Lambda_ivar(samples, prior, data):
try:
_, b, B, a, A = likelihood_worker(data.rv.value, ivar, M,
mu, np.diag(Lambda),
make_aA=True)
except np.linalg.LinAlgError as e:
raise e
out['a'][n] = a
out['A'][n] = A
out['b'][n] = b
out['B'][n] = B
return out
| [
"numpy.linalg.solve",
"numpy.vander",
"numpy.hstack",
"astroML.utils.log_multivariate_gaussian",
"twobody.wrap.cy_rv_from_elements",
"numpy.diag",
"numpy.zeros",
"numpy.linalg.inv",
"numpy.concatenate"
] | [((2079, 2095), 'numpy.linalg.inv', 'np.linalg.inv', (['Λ'], {}), '(Λ)\n', (2092, 2095), True, 'import numpy as np\n'), ((2118, 2131), 'numpy.diag', 'np.diag', (['ivar'], {}), '(ivar)\n', (2125, 2131), True, 'import numpy as np\n'), ((2140, 2157), 'numpy.diag', 'np.diag', (['(1 / ivar)'], {}), '(1 / ivar)\n', (2147, 2157), True, 'import numpy as np\n'), ((2666, 2685), 'numpy.linalg.inv', 'np.linalg.inv', (['Ainv'], {}), '(Ainv)\n', (2679, 2685), True, 'import numpy as np\n'), ((2745, 2790), 'astroML.utils.log_multivariate_gaussian', 'log_multivariate_gaussian', (['y', 'b', 'B'], {'Vinv': 'Binv'}), '(y, b, B, Vinv=Binv)\n', (2770, 2790), False, 'from astroML.utils import log_multivariate_gaussian\n'), ((3508, 3570), 'twobody.wrap.cy_rv_from_elements', 'cy_rv_from_elements', (['t', 'P', '(1.0)', 'ecc', 'omega', 'M0', 't0', '(1e-08)', '(128)'], {}), '(t, P, 1.0, ecc, omega, M0, t0, 1e-08, 128)\n', (3527, 3570), False, 'from twobody.wrap import cy_rv_from_elements\n'), ((3579, 3633), 'numpy.vander', 'np.vander', (['(t - t0)'], {'N': 'prior.poly_trend', 'increasing': '(True)'}), '(t - t0, N=prior.poly_trend, increasing=True)\n', (3588, 3633), True, 'import numpy as np\n'), ((3642, 3672), 'numpy.hstack', 'np.hstack', (['(zdot[:, None], M1)'], {}), '((zdot[:, None], M1))\n', (3651, 3672), True, 'import numpy as np\n'), ((4068, 4086), 'numpy.zeros', 'np.zeros', (['n_linear'], {}), '(n_linear)\n', (4076, 4086), True, 'import numpy as np\n'), ((5568, 5586), 'numpy.zeros', 'np.zeros', (['n_linear'], {}), '(n_linear)\n', (5576, 5586), True, 'import numpy as np\n'), ((5602, 5621), 'numpy.zeros', 'np.zeros', (['n_samples'], {}), '(n_samples)\n', (5610, 5621), True, 'import numpy as np\n'), ((6257, 6275), 'numpy.zeros', 'np.zeros', (['n_linear'], {}), '(n_linear)\n', (6265, 6275), True, 'import numpy as np\n'), ((7725, 7743), 'numpy.zeros', 'np.zeros', (['n_linear'], {}), '(n_linear)\n', (7733, 7743), True, 'import numpy as np\n'), ((2925, 2974), 'numpy.linalg.solve', 'np.linalg.solve', (['Ainv', '(Λinv @ mu + M.T @ Cinv @ y)'], {}), '(Ainv, Λinv @ mu + M.T @ Cinv @ y)\n', (2940, 2974), True, 'import numpy as np\n'), ((7050, 7097), 'numpy.concatenate', 'np.concatenate', (['(packed_nonlinear, linear_pars)'], {}), '((packed_nonlinear, linear_pars))\n', (7064, 7097), True, 'import numpy as np\n'), ((7761, 7792), 'numpy.zeros', 'np.zeros', (['(n_samples, n_linear)'], {}), '((n_samples, n_linear))\n', (7769, 7792), True, 'import numpy as np\n'), ((7810, 7851), 'numpy.zeros', 'np.zeros', (['(n_samples, n_linear, n_linear)'], {}), '((n_samples, n_linear, n_linear))\n', (7818, 7851), True, 'import numpy as np\n'), ((7869, 7899), 'numpy.zeros', 'np.zeros', (['(n_samples, n_times)'], {}), '((n_samples, n_times))\n', (7877, 7899), True, 'import numpy as np\n'), ((7917, 7956), 'numpy.zeros', 'np.zeros', (['(n_samples, n_times, n_times)'], {}), '((n_samples, n_times, n_times))\n', (7925, 7956), True, 'import numpy as np\n'), ((5832, 5847), 'numpy.diag', 'np.diag', (['Lambda'], {}), '(Lambda)\n', (5839, 5847), True, 'import numpy as np\n'), ((6833, 6848), 'numpy.diag', 'np.diag', (['Lambda'], {}), '(Lambda)\n', (6840, 6848), True, 'import numpy as np\n'), ((8167, 8182), 'numpy.diag', 'np.diag', (['Lambda'], {}), '(Lambda)\n', (8174, 8182), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import numpy as np
import torch
import pandas as pd
import os
import learn2predict as l2p
# Use the device defined in learn2predict (should be the GPU with the most free memory)
device = l2p.device
simDir = "./sim_data"
predictor_name = "flam"
num_mc = 2000
gam = True
M = 10
wdim = 10
df = pd.read_csv(os.path.join(simDir,predictor_name+"_performance.csv"),dtype={'scenario':np.int,'est':object,'n':np.int,'s':np.int,'mse':np.float,'se':np.float})
for n_metatrain in [100,500]:
for s in [1,5]:
if gam:
Pi, Pi_opt, Pi_sched = l2p.initPi(s,s+2,wdim,M=M,gam=gam)
rank_based = True
else:
Pi, Pi_opt, Pi_sched = l2p.initPi(s,s+2,wdim,gam=gam)
rank_based = False
# initialize the procedure
T, T_opt, T_sched = l2p.initT(rank_based=rank_based,gam=gam)
fn_main = './estimators/'+(('Gam' + '_m' + str(M)) if gam else ('Linear')) + '_n' + str(n_metatrain) + '_s' + str(s) + '_wdim' + str(wdim)
iteration, loss_list = l2p.load_model(T, T_opt, T_sched, Pi, Pi_opt, Pi_sched, fn_main+'.tar', fl_backup = fn_main+'_backup.tar')
for scenario in [1,2,3,4]:
for n in [100,500]:
print(("n_metatrain:",n_metatrain,"s:",s,"scenario",scenario,"n",n))
losses = torch.zeros(num_mc)
for i in range(num_mc):
w = torch.tensor(pd.read_csv(os.path.join(simDir,"flam_"+str(scenario), "w_n"+str(n)+"_s"+str(s)+"_mcrep"+str(i)+".csv")).values,device=device,dtype=torch.float)
y = torch.tensor(pd.read_csv(os.path.join(simDir,"flam_"+str(scenario), "y_n"+str(n)+"_s"+str(s)+"_mcrep"+str(i)+".csv")).values,device=device,dtype=torch.float)
w_tilde = torch.tensor(pd.read_csv(os.path.join(simDir,"flam_"+str(scenario), "w_tilde_n"+str(n)+"_s"+str(s)+"_mcrep"+str(i)+".csv")).values,device=device,dtype=torch.float)
regfun = torch.tensor(pd.read_csv(os.path.join(simDir,"flam_"+str(scenario), "regfun_n"+str(n)+"_s"+str(s)+"_mcrep"+str(i)+".csv")).values,device=device,dtype=torch.float)
T_out_pos = T(w_tilde,w,y).squeeze().detach()
T_out_neg = -T(w_tilde,w,-y).squeeze().detach()
T_out = (T_out_pos + T_out_neg)/2
# losses[i] = ((T_out_sym - regfun.squeeze())**2).mean()
losses[i] = ((T_out - regfun.squeeze())**2).mean()
df = df.append(pd.DataFrame({"scenario":[np.int(scenario)],
"est":["AMC"+str(n_metatrain)],
"n":[np.int(n)],
"s":[np.int(s)],
"mse":[np.float(losses.mean().cpu().numpy())],
"se":[np.sqrt(np.float((losses.var()/losses.size()[0]).cpu().numpy()))]}))
df = df.sort_values(by=['s', 'scenario', 'n'])
df.to_csv('tables/flam_results_all_symmetrized.csv', index=False)
df.round(2).to_csv('tables/flam_results_all_symmetrized_rounded.csv', index=False)
| [
"learn2predict.initPi",
"learn2predict.initT",
"learn2predict.load_model",
"os.path.join",
"numpy.int",
"torch.zeros"
] | [((331, 388), 'os.path.join', 'os.path.join', (['simDir', "(predictor_name + '_performance.csv')"], {}), "(simDir, predictor_name + '_performance.csv')\n", (343, 388), False, 'import os\n'), ((819, 860), 'learn2predict.initT', 'l2p.initT', ([], {'rank_based': 'rank_based', 'gam': 'gam'}), '(rank_based=rank_based, gam=gam)\n', (828, 860), True, 'import learn2predict as l2p\n'), ((1039, 1151), 'learn2predict.load_model', 'l2p.load_model', (['T', 'T_opt', 'T_sched', 'Pi', 'Pi_opt', 'Pi_sched', "(fn_main + '.tar')"], {'fl_backup': "(fn_main + '_backup.tar')"}), "(T, T_opt, T_sched, Pi, Pi_opt, Pi_sched, fn_main + '.tar',\n fl_backup=fn_main + '_backup.tar')\n", (1053, 1151), True, 'import learn2predict as l2p\n'), ((579, 619), 'learn2predict.initPi', 'l2p.initPi', (['s', '(s + 2)', 'wdim'], {'M': 'M', 'gam': 'gam'}), '(s, s + 2, wdim, M=M, gam=gam)\n', (589, 619), True, 'import learn2predict as l2p\n'), ((693, 728), 'learn2predict.initPi', 'l2p.initPi', (['s', '(s + 2)', 'wdim'], {'gam': 'gam'}), '(s, s + 2, wdim, gam=gam)\n', (703, 728), True, 'import learn2predict as l2p\n'), ((1323, 1342), 'torch.zeros', 'torch.zeros', (['num_mc'], {}), '(num_mc)\n', (1334, 1342), False, 'import torch\n'), ((2549, 2565), 'numpy.int', 'np.int', (['scenario'], {}), '(scenario)\n', (2555, 2565), True, 'import numpy as np\n'), ((2695, 2704), 'numpy.int', 'np.int', (['n'], {}), '(n)\n', (2701, 2704), True, 'import numpy as np\n'), ((2757, 2766), 'numpy.int', 'np.int', (['s'], {}), '(s)\n', (2763, 2766), True, 'import numpy as np\n')] |
'''
Basic demonstration of the capabilities of the CRNN using TimeDistributed layers
Processes an MNIST image (or blank square) at each time step and sums the digits.
Learning is based on the sum of the digits, not explicit labels on each digit.
'''
from __future__ import print_function
import numpy as np
from keras.datasets import mnist
from keras.models import Sequential
#from keras.initializations import norRemal, identity
from keras.layers.recurrent import SimpleRNN, LSTM, GRU
from keras.optimizers import RMSprop, Adadelta
from keras.layers.convolutional import Convolution2D
from keras.layers.core import Dense, Activation, TimeDistributedDense, Dropout, Reshape, Flatten
from keras.layers.wrappers import TimeDistributed
from keras.models import model_from_json
#import json
def create_model(maxToAdd, size):
model = Sequential()
model.add(TimeDistributed(Convolution2D(8, 4, 1, border_mode='valid'), input_shape=(maxToAdd,1,size*size,1)))
model.add(Activation('relu'))
model.add(TimeDistributed(Convolution2D(16, 3, 1, border_mode='valid')))
model.add(Activation('relu'))
model.add(Reshape((maxToAdd,np.prod(model.output_shape[-3:]))))
model.add(TimeDistributed(Flatten()))
model.add(Activation('relu'))
model.add(GRU(output_dim=100,return_sequences=True))
model.add(GRU(output_dim=50,return_sequences=False))
model.add(Dropout(.2))
model.add(Dense(1))
rmsprop = RMSprop()
model.compile(loss='mean_squared_error', optimizer=rmsprop)
return model
def main():
# for reproducibility
np.random.seed(2016)
#define some run parameters
batch_size = 32
nb_epochs = 20
examplesPer = 60000
maxToAdd = 8
hidden_units = 200
size = 28
#cutoff = 1000
model = create_model(maxToAdd=maxToAdd, size=size)
# the data, shuffled and split between train and test sets
(X_train_raw, y_train_temp), (X_test_raw, y_test_temp) = mnist.load_data()
#ignore "cutoff" section in full run
#X_train_raw = X_train_raw[:cutoff]
#X_test_raw = X_test_raw[:cutoff]
#y_train_temp = y_train_temp[:cutoff]
#y_test_temp = y_test_temp[:cutoff]
#basic image processing
X_train_raw = X_train_raw.astype('float32')
X_test_raw = X_test_raw.astype('float32')
X_train_raw /= 255
X_test_raw /= 255
print('X_train_raw shape:', X_train_raw.shape)
print(X_train_raw.shape[0], 'train samples')
print(X_test_raw.shape[0], 'test samples')
print("Building model")
#define our time-distributed setup
for ep in range(0,nb_epochs):
X_train = []
y_train = []
X_test = []
y_test = []
X_train = np.zeros((examplesPer,maxToAdd,1,size*size,1))
for i in range(0,examplesPer):
#initialize a training example of max_num_time_steps,im_size,im_size
output = np.zeros((maxToAdd,1,size*size, 1))
#decide how many MNIST images to put in that tensor
numToAdd = np.ceil(np.random.rand()*maxToAdd)
#sample that many images
indices = np.random.choice(X_train_raw.shape[0],size=numToAdd)
example = np.reshape(X_train_raw[indices], [X_train_raw[indices].shape[0], 28*28, 1])
#sum up the outputs for new output
exampleY = y_train_temp[indices]
output[0:numToAdd,0,:,:] = example
X_train[i,:,:,:,:] = output
y_train.append(np.sum(exampleY))
y_train = np.array(y_train)
if ep == 0:
print("X_train shape: ",X_train.shape)
print("y_train shape: ",y_train.shape)
for i in range(60000):
loss = model.train_on_batch(X_train[i:i+10], y_train[i:i+10])
print("loss %f" % loss)
#Test the model
X_test = np.zeros((examplesPer,maxToAdd,1,size,size))
for i in range(0,examplesPer):
output = np.zeros((maxToAdd,1,size,size))
numToAdd = np.ceil(np.random.rand()*maxToAdd)
indices = np.random.choice(X_test_raw.shape[0],size=numToAdd)
example = X_test_raw[indices]
exampleY = y_test_temp[indices]
output[0:numToAdd,0,:,:] = example
X_test[i,:,:,:,:] = output
y_test.append(np.sum(exampleY))
X_test = np.array(X_test)
y_test = np.array(y_test)
preds = model.predict(X_test)
#print the results of the test
print(np.sum(np.sqrt(np.mean([ (y_test[i] - preds[i][0])**2 for i in range(0,len(preds)) ]))))
print("naive guess", np.sum(np.sqrt(np.mean([ (y_test[i] - np.mean(y_test))**2 for i in range(0,len(y_test)) ]))))
if __name__ == '__main__':
main()
| [
"keras.layers.recurrent.GRU",
"numpy.prod",
"keras.layers.core.Flatten",
"numpy.mean",
"numpy.reshape",
"keras.layers.core.Activation",
"keras.datasets.mnist.load_data",
"keras.layers.core.Dropout",
"numpy.random.choice",
"keras.layers.convolutional.Convolution2D",
"numpy.random.rand",
"keras.... | [((838, 850), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (848, 850), False, 'from keras.models import Sequential\n'), ((1434, 1443), 'keras.optimizers.RMSprop', 'RMSprop', ([], {}), '()\n', (1441, 1443), False, 'from keras.optimizers import RMSprop, Adadelta\n'), ((1570, 1590), 'numpy.random.seed', 'np.random.seed', (['(2016)'], {}), '(2016)\n', (1584, 1590), True, 'import numpy as np\n'), ((1985, 2002), 'keras.datasets.mnist.load_data', 'mnist.load_data', ([], {}), '()\n', (2000, 2002), False, 'from keras.datasets import mnist\n'), ((3918, 3966), 'numpy.zeros', 'np.zeros', (['(examplesPer, maxToAdd, 1, size, size)'], {}), '((examplesPer, maxToAdd, 1, size, size))\n', (3926, 3966), True, 'import numpy as np\n'), ((4402, 4418), 'numpy.array', 'np.array', (['X_test'], {}), '(X_test)\n', (4410, 4418), True, 'import numpy as np\n'), ((4433, 4449), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (4441, 4449), True, 'import numpy as np\n'), ((979, 997), 'keras.layers.core.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (989, 997), False, 'from keras.layers.core import Dense, Activation, TimeDistributedDense, Dropout, Reshape, Flatten\n'), ((1090, 1108), 'keras.layers.core.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (1100, 1108), False, 'from keras.layers.core import Dense, Activation, TimeDistributedDense, Dropout, Reshape, Flatten\n'), ((1234, 1252), 'keras.layers.core.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (1244, 1252), False, 'from keras.layers.core import Dense, Activation, TimeDistributedDense, Dropout, Reshape, Flatten\n'), ((1268, 1310), 'keras.layers.recurrent.GRU', 'GRU', ([], {'output_dim': '(100)', 'return_sequences': '(True)'}), '(output_dim=100, return_sequences=True)\n', (1271, 1310), False, 'from keras.layers.recurrent import SimpleRNN, LSTM, GRU\n'), ((1325, 1367), 'keras.layers.recurrent.GRU', 'GRU', ([], {'output_dim': '(50)', 'return_sequences': '(False)'}), '(output_dim=50, return_sequences=False)\n', (1328, 1367), False, 'from keras.layers.recurrent import SimpleRNN, LSTM, GRU\n'), ((1382, 1394), 'keras.layers.core.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (1389, 1394), False, 'from keras.layers.core import Dense, Activation, TimeDistributedDense, Dropout, Reshape, Flatten\n'), ((1409, 1417), 'keras.layers.core.Dense', 'Dense', (['(1)'], {}), '(1)\n', (1414, 1417), False, 'from keras.layers.core import Dense, Activation, TimeDistributedDense, Dropout, Reshape, Flatten\n'), ((2774, 2826), 'numpy.zeros', 'np.zeros', (['(examplesPer, maxToAdd, 1, size * size, 1)'], {}), '((examplesPer, maxToAdd, 1, size * size, 1))\n', (2782, 2826), True, 'import numpy as np\n'), ((3597, 3614), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (3605, 3614), True, 'import numpy as np\n'), ((4020, 4055), 'numpy.zeros', 'np.zeros', (['(maxToAdd, 1, size, size)'], {}), '((maxToAdd, 1, size, size))\n', (4028, 4055), True, 'import numpy as np\n'), ((4132, 4184), 'numpy.random.choice', 'np.random.choice', (['X_test_raw.shape[0]'], {'size': 'numToAdd'}), '(X_test_raw.shape[0], size=numToAdd)\n', (4148, 4184), True, 'import numpy as np\n'), ((881, 924), 'keras.layers.convolutional.Convolution2D', 'Convolution2D', (['(8)', '(4)', '(1)'], {'border_mode': '"""valid"""'}), "(8, 4, 1, border_mode='valid')\n", (894, 924), False, 'from keras.layers.convolutional import Convolution2D\n'), ((1029, 1073), 'keras.layers.convolutional.Convolution2D', 'Convolution2D', (['(16)', '(3)', '(1)'], {'border_mode': '"""valid"""'}), "(16, 3, 1, border_mode='valid')\n", (1042, 1073), False, 'from keras.layers.convolutional import Convolution2D\n'), ((1208, 1217), 'keras.layers.core.Flatten', 'Flatten', ([], {}), '()\n', (1215, 1217), False, 'from keras.layers.core import Dense, Activation, TimeDistributedDense, Dropout, Reshape, Flatten\n'), ((2968, 3007), 'numpy.zeros', 'np.zeros', (['(maxToAdd, 1, size * size, 1)'], {}), '((maxToAdd, 1, size * size, 1))\n', (2976, 3007), True, 'import numpy as np\n'), ((3192, 3245), 'numpy.random.choice', 'np.random.choice', (['X_train_raw.shape[0]'], {'size': 'numToAdd'}), '(X_train_raw.shape[0], size=numToAdd)\n', (3208, 3245), True, 'import numpy as np\n'), ((3271, 3348), 'numpy.reshape', 'np.reshape', (['X_train_raw[indices]', '[X_train_raw[indices].shape[0], 28 * 28, 1]'], {}), '(X_train_raw[indices], [X_train_raw[indices].shape[0], 28 * 28, 1])\n', (3281, 3348), True, 'import numpy as np\n'), ((4369, 4385), 'numpy.sum', 'np.sum', (['exampleY'], {}), '(exampleY)\n', (4375, 4385), True, 'import numpy as np\n'), ((1142, 1174), 'numpy.prod', 'np.prod', (['model.output_shape[-3:]'], {}), '(model.output_shape[-3:])\n', (1149, 1174), True, 'import numpy as np\n'), ((3556, 3572), 'numpy.sum', 'np.sum', (['exampleY'], {}), '(exampleY)\n', (3562, 3572), True, 'import numpy as np\n'), ((4083, 4099), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (4097, 4099), True, 'import numpy as np\n'), ((3102, 3118), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3116, 3118), True, 'import numpy as np\n'), ((4685, 4700), 'numpy.mean', 'np.mean', (['y_test'], {}), '(y_test)\n', (4692, 4700), True, 'import numpy as np\n')] |
# Author: <NAME>
# Collaborators: Prof. <NAME>, Prof. <NAME>, Dr. <NAME>
# Email : <EMAIL>/<EMAIL>
# Affiliation : Imperial Centre for Inference and Cosmology
# Status : Under Development
'''
Important linear algebra operations for Gaussian Process
'''
import numpy as np
from GPy.util import linalg as gpl
def solve(matrix: np.ndarray, b_vec: np.ndarray, return_chol: bool = False) -> np.ndarray:
'''
Given a matrix and a vector, this solves for x in the following:
Ax = b
If A is diagonal, the calculations are simpler (do not require any inversions)
:param: matrix (np.ndarray) : 'A' matrix of size N x N
:param: b_vec (np.ndarray) : 'b' vector of size N
:param: return_chol (bool) : if True, the Cholesky factor will be retuned
:return: dummy (np.ndarray) : 'x' in the equation above
If we want the Cholesky factor:
:return: chol_factor (np.ndarray) : the Cholesky factor is returned
'''
if diagonal(matrix):
# simple solution for x - no inversion
dummy = 1. / np.atleast_2d(np.diag(matrix)).T * b_vec
# if we want the Cholesky factor, it is a simple square root operation
if return_chol:
chol_factor = np.sqrt(matrix)
return dummy, chol_factor
else:
return dummy
else:
# for stability, we use jitchol from the GPy package
chol_factor = gpl.jitchol(matrix)
# find x vector
dummy = gpl.dpotrs(chol_factor, b_vec, lower=True)[0]
if return_chol:
return dummy, chol_factor
else:
return dummy
def matrix_inverse(matrix: np.ndarray, return_chol: bool = False) -> np.ndarray:
'''
Sometimes, we would need the matrix inverse as well
If we are dealing with diagonal matrix, inversion is simple
:param: matrix (np.ndarray) : matrix of size N x N
:param: return_chol (bool) : if True, the Cholesky factor will be returned
:return: dummy (np.ndarray) : matrix inverse
If we also want the Cholesky factor:
:return: chol_factor (np.ndarray) : the Cholesky factor
'''
# check if matrix is diagonal first
if diagonal(matrix):
# simple matrix inversion
dummy = np.diag(1. / np.diag(matrix))
return dummy
else:
# calculate the Cholesky factor using jitchol from GPy
# for numerical stability
chol_factor = gpl.jitchol(matrix)
# perform matrix inversion
dummy = gpl.dpotrs(chol_factor, np.eye(chol_factor.shape[0]), lower=True)[0]
if return_chol:
return dummy, chol_factor
else:
return dummy
def diagonal(matrix: np.ndarray) -> bool:
'''
Check if a matrix is diagonal
:param: matrix (np.ndarray) : matrix of size N x N
:return: cond (bool) : if diagonal, True
'''
if np.count_nonzero(matrix - np.diag(np.diagonal(matrix))) == 0:
cond = True
return cond
| [
"numpy.diagonal",
"numpy.eye",
"numpy.sqrt",
"GPy.util.linalg.jitchol",
"numpy.diag",
"GPy.util.linalg.dpotrs"
] | [((1402, 1421), 'GPy.util.linalg.jitchol', 'gpl.jitchol', (['matrix'], {}), '(matrix)\n', (1413, 1421), True, 'from GPy.util import linalg as gpl\n'), ((2420, 2439), 'GPy.util.linalg.jitchol', 'gpl.jitchol', (['matrix'], {}), '(matrix)\n', (2431, 2439), True, 'from GPy.util import linalg as gpl\n'), ((1214, 1229), 'numpy.sqrt', 'np.sqrt', (['matrix'], {}), '(matrix)\n', (1221, 1229), True, 'import numpy as np\n'), ((1463, 1505), 'GPy.util.linalg.dpotrs', 'gpl.dpotrs', (['chol_factor', 'b_vec'], {'lower': '(True)'}), '(chol_factor, b_vec, lower=True)\n', (1473, 1505), True, 'from GPy.util import linalg as gpl\n'), ((2250, 2265), 'numpy.diag', 'np.diag', (['matrix'], {}), '(matrix)\n', (2257, 2265), True, 'import numpy as np\n'), ((2516, 2544), 'numpy.eye', 'np.eye', (['chol_factor.shape[0]'], {}), '(chol_factor.shape[0])\n', (2522, 2544), True, 'import numpy as np\n'), ((2901, 2920), 'numpy.diagonal', 'np.diagonal', (['matrix'], {}), '(matrix)\n', (2912, 2920), True, 'import numpy as np\n'), ((1057, 1072), 'numpy.diag', 'np.diag', (['matrix'], {}), '(matrix)\n', (1064, 1072), True, 'import numpy as np\n')] |
import os
#from subprocess import call
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def convertToFullCartersianCoordinates(data, dofsnp=np.array([18, 19, 20, 24, 25, 43]), dtype=int, x_dim_red=60, x_dim_original=66):
dofs = dofsnp
batch_size = data.shape[0]
data_ext = np.zeros([batch_size, x_dim_original])
data_ext[:, 0:dofs[0]] = data[:, 0:dofs[0]]
data_ext[:, dofs[2]+1:dofs[3]] = data[:, dofs[0]:dofs[0]+3]
data_ext[:, dofs[4]+1:dofs[5]] = data[:, dofs[0]+3:dofs[0]+3+(dofs[5]-dofs[4]-1)]
data_ext[:, dofs[5]+1:] = data[:, dofs[0]+3+(dofs[5]-dofs[4]-1):]
return data_ext
def convert_given_representation(samples, coordrep, unitgiven=1., bredcoord=False):
convertfactor = unitgiven
if coordrep == 'ang':
samplesout = convertangulardataset(samples.T)
elif coordrep == 'ang_augmented':
samplesout = convertangularaugmenteddataset(samples.T)
elif coordrep == 'ang_auggrouped':
samplesout = convertangularaugmenteddataset(samples.T, bgrouped=True, convertfactor=convertfactor)
else:
if bredcoord:
samples = convertToFullCartersianCoordinates(data=samples)
samplesout = samples.T / convertfactor
return samplesout
def getcolorcodeALA15(ramapath, N, ssize=5):
""" Get color coding for ALA-15 1527 dataset. """
from analyse_ala_15 import AngleCategorizer
nResidues = 15
#angles = np.loadtxt('rama_dataset_ala_15.xvg', skiprows=32, usecols=range(0, 2), delimiter=' ')
angles = np.loadtxt(os.path.join(ramapath, 'rama_dataset_ala_15_1500.xvg'), skiprows=32, usecols=range(0, 2), delimiter=' ')
nSamples = angles.shape[0]/15
angles.resize(nSamples, nResidues, 2)
angCat = AngleCategorizer(angles)
angCat.categorize()
angCat.countConfigurations()
colInd = angCat.getColorMatrix()
alphaInd = angCat.getAlphaVals()
marker = list()
patchlist = list()
marker.append('o')
marker.append('o')
marker.append('o')
import matplotlib.patches as mpatches
patchlist.append(mpatches.Patch(color='black', label=r'$\alpha$'))
patchlist.append(mpatches.Patch(color='blue', label=r'$\beta$-1'))
patchlist.append(mpatches.Patch(color='red', label=r'$\beta$-2'))
alpha = plt.scatter(0, 1, c='k', marker=marker[0], s=ssize, label=r'$\alpha$')
beta1 = plt.scatter(0, 1, c='b', marker=marker[1], s=ssize, label=r'$\beta\textnormal{-}1$')
beta2 = plt.scatter(0, 1, c='r', marker=marker[2], s=ssize, label=r'$\beta\textnormal{-}2$')
plt.close()
patchlist = [alpha, beta1, beta2]
return colInd, marker, patchlist, alphaInd
def getcolorcode1527(ssize=5):
""" Get color coding for ALA-2 1527 dataset. """
iA = 29
iB1 = 932
iB2 = 566
colInd = list()
marker = list()
patchlist = list()
marker.append('o')
marker.append('v')
marker.append('x')
for i in range(0, iA):
colInd.append('k')
for i in range(0, iB1):
colInd.append('b')
for i in range(0, iB2):
colInd.append('r')
import matplotlib.patches as mpatches
patchlist.append(mpatches.Patch(color='black', label=r'$\alpha$'))
patchlist.append(mpatches.Patch(color='blue', label=r'$\beta$-1'))
patchlist.append(mpatches.Patch(color='red', label=r'$\beta$-2'))
alpha = plt.scatter(0, 1, c='k', marker=marker[0], s=ssize, label=r'$\alpha$')
beta1 = plt.scatter(0, 1, c='b', marker=marker[1], s=ssize, label=r'$\beta\textnormal{-}1$')
beta2 = plt.scatter(0, 1, c='r', marker=marker[2], s=ssize, label=r'$\beta\textnormal{-}2$')
plt.close()
patchlist = [alpha, beta1, beta2]
return colInd, marker, patchlist
def estimateProperties(samples_name, cluster, datasetN, pathofsamples=None, postS=0, nCores=2, peptide='ala_2'):
command = ''
if cluster == True:
command += 'python /afs/crc.nd.edu/user/m/mschoebe/Private/projects/ganpepvae/estimate_properties.py'
command += ' --referenceDirectory ' + '/afs/crc.nd.edu/user/m/mschoebe/Private/data/data_peptide/'
command += ' --cluster ' + '2'
command += ' --postS ' + str(postS)
command += ' --nCores ' + str(nCores)
else:
#command += 'pyenv activate work; '
command += 'python /home/schoeberl/predictive_cvs/prediction/propteinpropcal/estimate_properties.py'
command += ' --referenceDirectory ' + '/home/schoeberl/predictive_cvs/prediction/propteinpropcal/'
if pathofsamples is not None:
command += ' --predFilePath ' + pathofsamples + '/'
command += ' --dataCollected random'
command += ' --fileNamePred ' + samples_name
command += ' --conformation ' + 'm'
command += ' --peptide ' + peptide
#--cluster 2 --postS 500 --nCores 24
#os.system(command)
#os.system(command)
#call(['bash','pyenv activate work', command], shell=True)
f = open(pathofsamples+'/est_prop.sh', 'w')
#f.write('#!/bin/bash')
if cluster == True:
f.write('#!/bin/bash\n')
f.write('#$ -N est_prop_' + samples_name + '\n')
f.write('#$ -pe smp ' + str(nCores) + '\n')
f.write('#$ -q debug\n\n')
f.write('source activate work\n')
f.write('module load gromacs\n\n')
# $ -N est_prop
# $ -pe smp 24
# $ -q debug
f.write(command)
f.close()
os.chmod(pathofsamples+'/est_prop.sh', 0o777)
# this is for comparison with real dataset
command = ''
if cluster == True:
command += 'python /afs/crc.nd.edu/user/m/mschoebe/Private/projects/ganpepvae/estimate_properties_compare.py'
command += ' --referenceDirectory ' + '/afs/crc.nd.edu/user/m/mschoebe/Private/data/data_peptide/'
command += ' --cluster ' + '2'
command += ' --postS ' + str(postS)
command += ' --nCores ' + str(nCores)
else:
#command += 'pyenv activate work; '
command += 'python /home/schoeberl/predictive_cvs/prediction/propteinpropcal/estimate_properties_compare.py'
command += ' --referenceDirectory ' + '/home/schoeberl/predictive_cvs/prediction/propteinpropcal/'
if pathofsamples is not None:
command += ' --predFilePath ' + pathofsamples + '/'
command += ' --dataCollected random'
command += ' --compareRefData dataset_' + str(datasetN)
command += ' --fileNamePred ' + samples_name
command += ' --conformation ' + 'm'
command += ' --peptide ' + peptide
#--cluster 2 --postS 500 --nCores 24
#os.system(command)
#os.system(command)
#call(['bash','pyenv activate work', command], shell=True)
f = open(pathofsamples+'/est_prop_compare.sh', 'w')
#f.write('#!/bin/bash')
f.write(command)
f.close()
os.chmod(pathofsamples+'/est_prop_compare.sh', 0o777)
def getAbsCoordinates(xyz):
_xyzAbs = np.zeros([xyz.shape[0] + 1, xyz.shape[1]])
# number of residues
nACE = 1
nALA = 1
nNME = 1
ACEleng = 6
ALAleng = 10
NMEleng = 6
# go through every residue
aACE = np.zeros([nACE * ACEleng, 3])
aALA = np.zeros([nALA * ALAleng, 3])
aNME = np.zeros([nNME * NMEleng, 3])
# 1HH3 = CH3 + (1HH3 - CH3)
aACE[0, :] = xyz[0, :]
# CH3 = 0
# aACE[1,:] = 0
# 2HH3 = CH3 + (2HH3 - CH3)
aACE[2, :] = xyz[1, :]
# 3HH3 = CH3 + (3HH3 - CH3)
aACE[3, :] = xyz[2, :]
# C = CH3 + (C - CH3)
aACE[4, :] = xyz[3, :]
# O = C + (O - C)
aACE[5, :] = aACE[4, :] + xyz[4, :]
# first N coordinate
aALA[0, :] = aACE[4, :] + xyz[5, :]
for iALA in range(0, nALA):
# N = C + (N - C)
if iALA > 0:
aALA[iALA * ALAleng + 0, :] = aALA[iALA * ALAleng - 2, :] + xyz[ACEleng + iALA * ALAleng - 1, :]
# H = N + (H - N)
aALA[iALA * ALAleng + 1, :] = aALA[iALA * ALAleng + 0, :] + xyz[ACEleng + iALA * ALAleng + 0, :]
# CA = N + (CA - N)
aALA[iALA * ALAleng + 2, :] = aALA[iALA * ALAleng + 0, :] + xyz[ACEleng + iALA * ALAleng + 1, :]
# HA = CA + (HA - CA)
aALA[iALA * ALAleng + 3, :] = aALA[iALA * ALAleng + 2, :] + xyz[ACEleng + iALA * ALAleng + 2, :]
# CB = CA + (CB - CA)
aALA[iALA * ALAleng + 4, :] = aALA[iALA * ALAleng + 2, :] + xyz[ACEleng + iALA * ALAleng + 3, :]
# HB1 = CB + (HB1 - CB)
aALA[iALA * ALAleng + 5, :] = aALA[iALA * ALAleng + 4, :] + xyz[ACEleng + iALA * ALAleng + 4, :]
# HB2 = CB + (HB2 - CB)
aALA[iALA * ALAleng + 6, :] = aALA[iALA * ALAleng + 4, :] + xyz[ACEleng + iALA * ALAleng + 5, :]
# HB3 = CB + (HB3 - CB)
aALA[iALA * ALAleng + 7, :] = aALA[iALA * ALAleng + 4, :] + xyz[ACEleng + iALA * ALAleng + 6, :]
# C = CA + (C - CA)
aALA[iALA * ALAleng + 8, :] = aALA[iALA * ALAleng + 2, :] + xyz[ACEleng + iALA * ALAleng + 7, :]
# O = C + (O - C)
aALA[iALA * ALAleng + 9, :] = aALA[iALA * ALAleng + 8, :] + xyz[ACEleng + iALA * ALAleng + 8, :]
# Last part
# N = C + (N - C)
aNME[0, :] = aALA[nALA * ALAleng - 2, :] + xyz[ACEleng + nALA * ALAleng - 1, :]
# H = N + (H - N)
aNME[1, :] = aNME[0, :] + xyz[ACEleng + nALA * ALAleng + 0, :]
# CH3 = N + (CH3 - N)
aNME[2, :] = aNME[0, :] + xyz[ACEleng + nALA * ALAleng + 1, :]
# 1HH3 = CH3 + (1HH3 - CH3)
aNME[3, :] = aNME[2, :] + xyz[ACEleng + nALA * ALAleng + 2, :]
# 2HH3 = CH3 + (2HH3 - CH3)
aNME[4, :] = aNME[2, :] + xyz[ACEleng + nALA * ALAleng + 3, :]
# 3HH3 = CH3 + (2HH3 - CH3)
aNME[5, :] = aNME[2, :] + xyz[ACEleng + nALA * ALAleng + 4, :]
_xyzAbs[0:(ACEleng), :] = aACE
_xyzAbs[ACEleng:(ACEleng + nALA * ALAleng), :] = aALA
_xyzAbs[(ACEleng + nALA * ALAleng):, :] = aNME
return _xyzAbs
def getCartesian(rphitheta, dataaugmented=False):
rphithetaShape = rphitheta.shape
if dataaugmented:
_xyz = np.zeros([rphithetaShape[0], 3])
r = rphitheta[:, 0]
sinphi = rphitheta[:, 1]
cosphi = rphitheta[:, 2]
sintheta = rphitheta[:, 3]
costheta = rphitheta[:, 4]
_xyz[:, 0] = r * costheta * sinphi
_xyz[:, 1] = r * sintheta * sinphi
_xyz[:, 2] = r * cosphi
else:
_xyz = np.zeros(rphithetaShape)
_xyz[:, 0] = rphitheta[:, 0] * np.cos(rphitheta[:, 2]) * np.sin(rphitheta[:, 1])
_xyz[:, 1] = rphitheta[:, 0] * np.sin(rphitheta[:, 2]) * np.sin(rphitheta[:, 1])
_xyz[:, 2] = rphitheta[:, 0] * np.cos(rphitheta[:, 1])
xyzAbs = getAbsCoordinates(xyz=_xyz)
return xyzAbs
def convertangulardataset(data):
#outname = 'samples.txt'
#data = np.loadtxt('dataset_mixed_1527_ang.txt')
dim = data.shape[0]
n = data.shape[1]
datacatout = np.zeros([dim+3, n])
for j in range(0, n):
sample = data[:,j]
rphitheta = np.zeros([dim/3, 3])
for i in range(0, rphitheta.shape[0]):
rphitheta[i, 0] = sample[i * 3 + 0]
rphitheta[i, 1] = sample[i * 3 + 1]
rphitheta[i, 2] = sample[i * 3 + 2]
datacoord = getCartesian(rphitheta=rphitheta)
datacoordvec = np.reshape(datacoord, sample.shape[0] + 3)
datacatout[:, j] = np.copy(datacoordvec)
return datacatout
#np.savetxt(outname, datacatout)
def convertangularaugmenteddataset(data, bgrouped=False, convertfactor=1.):
#outname = 'samples.txt'
#data = np.loadtxt('dataset_mixed_1527_ang.txt')
dim = data.shape[0]
n = data.shape[1]
# specify the size of one coordinate point: here (r, sin \theta, cos \theta, sin \psi, cos \psi)
sizeofcoord = 5
nparticles = int(dim / sizeofcoord + 1)
ncoordtuples = nparticles - 1
datacatout = np.zeros([nparticles * 3, n])
if bgrouped:
dataUse = np.zeros(data.shape)
# sorted dataset r1 r2 r3 r4 , sin sin sin
# temporary dataset for
r = data[0 * ncoordtuples:1 * ncoordtuples, :]
sinphi = data[1 * ncoordtuples:2 * ncoordtuples, :]
cosphi = data[2 * ncoordtuples:3 * ncoordtuples, :]
sintheta = data[3 * ncoordtuples:4 * ncoordtuples, :]
costheta = data[4 * ncoordtuples:5 * ncoordtuples, :]
for i in range(0, ncoordtuples):
dataUse[i * sizeofcoord + 0, :] = r[i, :] / convertfactor
dataUse[i * sizeofcoord + 1, :] = sinphi[i, :]
dataUse[i * sizeofcoord + 2, :] = cosphi[i, :]
dataUse[i * sizeofcoord + 3, :] = sintheta[i, :]
dataUse[i * sizeofcoord + 4, :] = costheta[i, :]
else:
dataUse = np.copy(data)
for j in range(0, n):
sample = dataUse[:, j]
rphithetaaugmented = np.zeros([int(dim/sizeofcoord), sizeofcoord])
for i in range(0, rphithetaaugmented.shape[0]):
rphithetaaugmented[i, 0] = sample[i * sizeofcoord + 0]
rphithetaaugmented[i, 1] = sample[i * sizeofcoord + 1]
rphithetaaugmented[i, 2] = sample[i * sizeofcoord + 2]
rphithetaaugmented[i, 3] = sample[i * sizeofcoord + 3]
rphithetaaugmented[i, 4] = sample[i * sizeofcoord + 4]
datacoord = getCartesian(rphitheta=rphithetaaugmented, dataaugmented=True)
datacoordvec = np.reshape(datacoord, nparticles * 3)
datacatout[:, j] = np.copy(datacoordvec)
return datacatout
#np.savetxt(outname, datacatout)
| [
"numpy.copy",
"numpy.reshape",
"matplotlib.use",
"os.path.join",
"os.chmod",
"matplotlib.pyplot.close",
"numpy.array",
"numpy.zeros",
"analyse_ala_15.AngleCategorizer",
"matplotlib.pyplot.scatter",
"matplotlib.patches.Patch",
"numpy.sin",
"numpy.cos"
] | [((78, 99), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (92, 99), False, 'import matplotlib\n'), ((186, 220), 'numpy.array', 'np.array', (['[18, 19, 20, 24, 25, 43]'], {}), '([18, 19, 20, 24, 25, 43])\n', (194, 220), True, 'import numpy as np\n'), ((333, 371), 'numpy.zeros', 'np.zeros', (['[batch_size, x_dim_original]'], {}), '([batch_size, x_dim_original])\n', (341, 371), True, 'import numpy as np\n'), ((1769, 1793), 'analyse_ala_15.AngleCategorizer', 'AngleCategorizer', (['angles'], {}), '(angles)\n', (1785, 1793), False, 'from analyse_ala_15 import AngleCategorizer\n'), ((2307, 2377), 'matplotlib.pyplot.scatter', 'plt.scatter', (['(0)', '(1)'], {'c': '"""k"""', 'marker': 'marker[0]', 's': 'ssize', 'label': '"""$\\\\alpha$"""'}), "(0, 1, c='k', marker=marker[0], s=ssize, label='$\\\\alpha$')\n", (2318, 2377), True, 'import matplotlib.pyplot as plt\n'), ((2390, 2480), 'matplotlib.pyplot.scatter', 'plt.scatter', (['(0)', '(1)'], {'c': '"""b"""', 'marker': 'marker[1]', 's': 'ssize', 'label': '"""$\\\\beta\\\\textnormal{-}1$"""'}), "(0, 1, c='b', marker=marker[1], s=ssize, label=\n '$\\\\beta\\\\textnormal{-}1$')\n", (2401, 2480), True, 'import matplotlib.pyplot as plt\n'), ((2487, 2577), 'matplotlib.pyplot.scatter', 'plt.scatter', (['(0)', '(1)'], {'c': '"""r"""', 'marker': 'marker[2]', 's': 'ssize', 'label': '"""$\\\\beta\\\\textnormal{-}2$"""'}), "(0, 1, c='r', marker=marker[2], s=ssize, label=\n '$\\\\beta\\\\textnormal{-}2$')\n", (2498, 2577), True, 'import matplotlib.pyplot as plt\n'), ((2576, 2587), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2585, 2587), True, 'import matplotlib.pyplot as plt\n'), ((3369, 3439), 'matplotlib.pyplot.scatter', 'plt.scatter', (['(0)', '(1)'], {'c': '"""k"""', 'marker': 'marker[0]', 's': 'ssize', 'label': '"""$\\\\alpha$"""'}), "(0, 1, c='k', marker=marker[0], s=ssize, label='$\\\\alpha$')\n", (3380, 3439), True, 'import matplotlib.pyplot as plt\n'), ((3452, 3542), 'matplotlib.pyplot.scatter', 'plt.scatter', (['(0)', '(1)'], {'c': '"""b"""', 'marker': 'marker[1]', 's': 'ssize', 'label': '"""$\\\\beta\\\\textnormal{-}1$"""'}), "(0, 1, c='b', marker=marker[1], s=ssize, label=\n '$\\\\beta\\\\textnormal{-}1$')\n", (3463, 3542), True, 'import matplotlib.pyplot as plt\n'), ((3549, 3639), 'matplotlib.pyplot.scatter', 'plt.scatter', (['(0)', '(1)'], {'c': '"""r"""', 'marker': 'marker[2]', 's': 'ssize', 'label': '"""$\\\\beta\\\\textnormal{-}2$"""'}), "(0, 1, c='r', marker=marker[2], s=ssize, label=\n '$\\\\beta\\\\textnormal{-}2$')\n", (3560, 3639), True, 'import matplotlib.pyplot as plt\n'), ((3638, 3649), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3647, 3649), True, 'import matplotlib.pyplot as plt\n'), ((5378, 5423), 'os.chmod', 'os.chmod', (["(pathofsamples + '/est_prop.sh')", '(511)'], {}), "(pathofsamples + '/est_prop.sh', 511)\n", (5386, 5423), False, 'import os\n'), ((6750, 6803), 'os.chmod', 'os.chmod', (["(pathofsamples + '/est_prop_compare.sh')", '(511)'], {}), "(pathofsamples + '/est_prop_compare.sh', 511)\n", (6758, 6803), False, 'import os\n'), ((6848, 6890), 'numpy.zeros', 'np.zeros', (['[xyz.shape[0] + 1, xyz.shape[1]]'], {}), '([xyz.shape[0] + 1, xyz.shape[1]])\n', (6856, 6890), True, 'import numpy as np\n'), ((7049, 7078), 'numpy.zeros', 'np.zeros', (['[nACE * ACEleng, 3]'], {}), '([nACE * ACEleng, 3])\n', (7057, 7078), True, 'import numpy as np\n'), ((7090, 7119), 'numpy.zeros', 'np.zeros', (['[nALA * ALAleng, 3]'], {}), '([nALA * ALAleng, 3])\n', (7098, 7119), True, 'import numpy as np\n'), ((7131, 7160), 'numpy.zeros', 'np.zeros', (['[nNME * NMEleng, 3]'], {}), '([nNME * NMEleng, 3])\n', (7139, 7160), True, 'import numpy as np\n'), ((10695, 10717), 'numpy.zeros', 'np.zeros', (['[dim + 3, n]'], {}), '([dim + 3, n])\n', (10703, 10717), True, 'import numpy as np\n'), ((11657, 11686), 'numpy.zeros', 'np.zeros', (['[nparticles * 3, n]'], {}), '([nparticles * 3, n])\n', (11665, 11686), True, 'import numpy as np\n'), ((1574, 1628), 'os.path.join', 'os.path.join', (['ramapath', '"""rama_dataset_ala_15_1500.xvg"""'], {}), "(ramapath, 'rama_dataset_ala_15_1500.xvg')\n", (1586, 1628), False, 'import os\n'), ((2103, 2151), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': '"""black"""', 'label': '"""$\\\\alpha$"""'}), "(color='black', label='$\\\\alpha$')\n", (2117, 2151), True, 'import matplotlib.patches as mpatches\n'), ((2174, 2222), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': '"""blue"""', 'label': '"""$\\\\beta$-1"""'}), "(color='blue', label='$\\\\beta$-1')\n", (2188, 2222), True, 'import matplotlib.patches as mpatches\n'), ((2245, 2292), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': '"""red"""', 'label': '"""$\\\\beta$-2"""'}), "(color='red', label='$\\\\beta$-2')\n", (2259, 2292), True, 'import matplotlib.patches as mpatches\n'), ((3165, 3213), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': '"""black"""', 'label': '"""$\\\\alpha$"""'}), "(color='black', label='$\\\\alpha$')\n", (3179, 3213), True, 'import matplotlib.patches as mpatches\n'), ((3236, 3284), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': '"""blue"""', 'label': '"""$\\\\beta$-1"""'}), "(color='blue', label='$\\\\beta$-1')\n", (3250, 3284), True, 'import matplotlib.patches as mpatches\n'), ((3307, 3354), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': '"""red"""', 'label': '"""$\\\\beta$-2"""'}), "(color='red', label='$\\\\beta$-2')\n", (3321, 3354), True, 'import matplotlib.patches as mpatches\n'), ((9846, 9878), 'numpy.zeros', 'np.zeros', (['[rphithetaShape[0], 3]'], {}), '([rphithetaShape[0], 3])\n', (9854, 9878), True, 'import numpy as np\n'), ((10186, 10210), 'numpy.zeros', 'np.zeros', (['rphithetaShape'], {}), '(rphithetaShape)\n', (10194, 10210), True, 'import numpy as np\n'), ((10790, 10812), 'numpy.zeros', 'np.zeros', (['[dim / 3, 3]'], {}), '([dim / 3, 3])\n', (10798, 10812), True, 'import numpy as np\n'), ((11080, 11122), 'numpy.reshape', 'np.reshape', (['datacoord', '(sample.shape[0] + 3)'], {}), '(datacoord, sample.shape[0] + 3)\n', (11090, 11122), True, 'import numpy as np\n'), ((11150, 11171), 'numpy.copy', 'np.copy', (['datacoordvec'], {}), '(datacoordvec)\n', (11157, 11171), True, 'import numpy as np\n'), ((11723, 11743), 'numpy.zeros', 'np.zeros', (['data.shape'], {}), '(data.shape)\n', (11731, 11743), True, 'import numpy as np\n'), ((12505, 12518), 'numpy.copy', 'np.copy', (['data'], {}), '(data)\n', (12512, 12518), True, 'import numpy as np\n'), ((13150, 13187), 'numpy.reshape', 'np.reshape', (['datacoord', '(nparticles * 3)'], {}), '(datacoord, nparticles * 3)\n', (13160, 13187), True, 'import numpy as np\n'), ((13215, 13236), 'numpy.copy', 'np.copy', (['datacoordvec'], {}), '(datacoordvec)\n', (13222, 13236), True, 'import numpy as np\n'), ((10276, 10299), 'numpy.sin', 'np.sin', (['rphitheta[:, 1]'], {}), '(rphitheta[:, 1])\n', (10282, 10299), True, 'import numpy as np\n'), ((10365, 10388), 'numpy.sin', 'np.sin', (['rphitheta[:, 1]'], {}), '(rphitheta[:, 1])\n', (10371, 10388), True, 'import numpy as np\n'), ((10428, 10451), 'numpy.cos', 'np.cos', (['rphitheta[:, 1]'], {}), '(rphitheta[:, 1])\n', (10434, 10451), True, 'import numpy as np\n'), ((10250, 10273), 'numpy.cos', 'np.cos', (['rphitheta[:, 2]'], {}), '(rphitheta[:, 2])\n', (10256, 10273), True, 'import numpy as np\n'), ((10339, 10362), 'numpy.sin', 'np.sin', (['rphitheta[:, 2]'], {}), '(rphitheta[:, 2])\n', (10345, 10362), True, 'import numpy as np\n')] |
# Script for preparing MERRA-2 reanalysis data:
# 1. Calculate effective wind speeds from u and v wind speeds in two heights (10 and 50m)
# 2. Calculate alpha friction coefficient
from paths_nz import mer_path
import glob
import numpy as np
import os
import xarray as xr
files = glob.glob(mer_path+'/*.nc')
files.sort()
if not os.path.isdir(mer_path + '/eff_ws'):
os.mkdir(mer_path + '/eff_ws')
out_files = glob.glob(mer_path + '/eff_ws/*')
# 1987 - 2019
i1 = 1987
i2 = 2019
wfile = mer_path+'/eff_ws/merra2_wind_NZ_' + str(i1) + '-' + str(i2) + '.nc'
afile = mer_path+'/eff_ws/merra2_alpha_NZ_' + str(i1) + '-' + str(i2) + '.nc'
if wfile not in out_files:
print('calculating wind ' + str(i1) + '-' + str(i2))
data = xr.open_mfdataset(files, chunks = {'time': 46})
wh10 = ((data.U10M**2+data.V10M**2)**0.5).compute()
wh50 = ((data.U50M**2+data.V50M**2)**0.5).compute()
print('saving wind ' + str(i1) + '-' + str(i2))
eff_ws = xr.Dataset({'wh10': wh10,
'wh50': wh50})
eff_ws.to_netcdf(wfile)
eff_ws.close()
del(eff_ws)
if afile not in out_files:
print('calculating alpha ' + str(i1) + '-' + str(i2))
eff_ws = xr.open_dataset(wfile)
alpha = (xr.ufuncs.log(eff_ws.wh50/eff_ws.wh10)/np.log(50/(10+data.DISPH))).compute()
print('saving alpha ' + str(i1) + '-' + str(i2))
xr.Dataset({'alpha': alpha}).to_netcdf(afile)
del(alpha)
| [
"numpy.log",
"xarray.Dataset",
"xarray.ufuncs.log",
"os.path.isdir",
"os.mkdir",
"xarray.open_dataset",
"xarray.open_mfdataset",
"glob.glob"
] | [((281, 310), 'glob.glob', 'glob.glob', (["(mer_path + '/*.nc')"], {}), "(mer_path + '/*.nc')\n", (290, 310), False, 'import glob\n'), ((415, 448), 'glob.glob', 'glob.glob', (["(mer_path + '/eff_ws/*')"], {}), "(mer_path + '/eff_ws/*')\n", (424, 448), False, 'import glob\n'), ((330, 365), 'os.path.isdir', 'os.path.isdir', (["(mer_path + '/eff_ws')"], {}), "(mer_path + '/eff_ws')\n", (343, 365), False, 'import os\n'), ((371, 401), 'os.mkdir', 'os.mkdir', (["(mer_path + '/eff_ws')"], {}), "(mer_path + '/eff_ws')\n", (379, 401), False, 'import os\n'), ((734, 779), 'xarray.open_mfdataset', 'xr.open_mfdataset', (['files'], {'chunks': "{'time': 46}"}), "(files, chunks={'time': 46})\n", (751, 779), True, 'import xarray as xr\n'), ((959, 999), 'xarray.Dataset', 'xr.Dataset', (["{'wh10': wh10, 'wh50': wh50}"], {}), "({'wh10': wh10, 'wh50': wh50})\n", (969, 999), True, 'import xarray as xr\n'), ((1208, 1230), 'xarray.open_dataset', 'xr.open_dataset', (['wfile'], {}), '(wfile)\n', (1223, 1230), True, 'import xarray as xr\n'), ((1378, 1406), 'xarray.Dataset', 'xr.Dataset', (["{'alpha': alpha}"], {}), "({'alpha': alpha})\n", (1388, 1406), True, 'import xarray as xr\n'), ((1244, 1284), 'xarray.ufuncs.log', 'xr.ufuncs.log', (['(eff_ws.wh50 / eff_ws.wh10)'], {}), '(eff_ws.wh50 / eff_ws.wh10)\n', (1257, 1284), True, 'import xarray as xr\n'), ((1283, 1313), 'numpy.log', 'np.log', (['(50 / (10 + data.DISPH))'], {}), '(50 / (10 + data.DISPH))\n', (1289, 1313), True, 'import numpy as np\n')] |
from netCDF4 import Dataset as dst
from matplotlib import pyplot as plt
from numpy import array, mean, reshape
nc = dst('../vwnd.mon.mean.nc', mode='r')
def slice_per(source, step):
return [source[i::step] for i in range(step)]
profiles = []
months = range(853)[-68:-8]
levels = [2,8]
for level in levels:
profile = []
for month in months:
for latitude in nc.variables['vwnd'][month][level]:
profile.append(latitude)
profiles.append(profile)
p850,p250 = profiles[0],profiles[1]
p850 = array(p850).flatten(order='C')
p850 = array(slice_per(p850,10512))
profile850 = []
for elem in p850:
m = mean(elem)
profile850.append(m)
profile850 = reshape(array(profile850),(144,73),order='F')
p850 = []
for elem in profile850:
new = mean(elem)
p850.append(new)
p250 = array(p250).flatten(order='C')
p250 = array(slice_per(p250,10512))
profile250 = []
for elem in p250:
m = mean(elem)
profile250.append(m)
profile250 = reshape(array(profile250),(144,73),order='F')
p250 = []
for elem in profile250:
new = mean(elem)
p250.append(new)
print(len(p250))
plt.plot(p850,label="850")
plt.plot(p250,label="250")
plt.legend()
plt.savefig("htVlong.png")
plt.show() | [
"numpy.mean",
"matplotlib.pyplot.savefig",
"netCDF4.Dataset",
"matplotlib.pyplot.plot",
"numpy.array",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((117, 153), 'netCDF4.Dataset', 'dst', (['"""../vwnd.mon.mean.nc"""'], {'mode': '"""r"""'}), "('../vwnd.mon.mean.nc', mode='r')\n", (120, 153), True, 'from netCDF4 import Dataset as dst\n'), ((1062, 1089), 'matplotlib.pyplot.plot', 'plt.plot', (['p850'], {'label': '"""850"""'}), "(p850, label='850')\n", (1070, 1089), True, 'from matplotlib import pyplot as plt\n'), ((1089, 1116), 'matplotlib.pyplot.plot', 'plt.plot', (['p250'], {'label': '"""250"""'}), "(p250, label='250')\n", (1097, 1116), True, 'from matplotlib import pyplot as plt\n'), ((1116, 1128), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1126, 1128), True, 'from matplotlib import pyplot as plt\n'), ((1129, 1155), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""htVlong.png"""'], {}), "('htVlong.png')\n", (1140, 1155), True, 'from matplotlib import pyplot as plt\n'), ((1156, 1166), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1164, 1166), True, 'from matplotlib import pyplot as plt\n'), ((606, 616), 'numpy.mean', 'mean', (['elem'], {}), '(elem)\n', (610, 616), False, 'from numpy import array, mean, reshape\n'), ((660, 677), 'numpy.array', 'array', (['profile850'], {}), '(profile850)\n', (665, 677), False, 'from numpy import array, mean, reshape\n'), ((739, 749), 'numpy.mean', 'mean', (['elem'], {}), '(elem)\n', (743, 749), False, 'from numpy import array, mean, reshape\n'), ((882, 892), 'numpy.mean', 'mean', (['elem'], {}), '(elem)\n', (886, 892), False, 'from numpy import array, mean, reshape\n'), ((936, 953), 'numpy.array', 'array', (['profile250'], {}), '(profile250)\n', (941, 953), False, 'from numpy import array, mean, reshape\n'), ((1015, 1025), 'numpy.mean', 'mean', (['elem'], {}), '(elem)\n', (1019, 1025), False, 'from numpy import array, mean, reshape\n'), ((500, 511), 'numpy.array', 'array', (['p850'], {}), '(p850)\n', (505, 511), False, 'from numpy import array, mean, reshape\n'), ((776, 787), 'numpy.array', 'array', (['p250'], {}), '(p250)\n', (781, 787), False, 'from numpy import array, mean, reshape\n')] |
from __future__ import absolute_import, division
from toolz import memoize
import numpy as np
from .core import Expr
from .utils import ngjit, isreal
class Glyph(Expr):
"""Base class for glyphs."""
pass
class _PointLike(Glyph):
"""Shared methods between Point and Line"""
def __init__(self, x, y):
self.x = x
self.y = y
@property
def inputs(self):
return (self.x, self.y)
def validate(self, in_dshape):
if not isreal(in_dshape.measure[self.x]):
raise ValueError('x must be real')
elif not isreal(in_dshape.measure[self.y]):
raise ValueError('y must be real')
@staticmethod
@ngjit
def _compute_x_bounds(xs):
minval = maxval = xs[0]
for x in xs:
if not np.isnan(x):
if np.isnan(minval) or x < minval:
minval = x
if np.isnan(maxval) or x > maxval:
maxval = x
if np.isnan(minval) or np.isnan(maxval):
raise ValueError('All x coordinates are NaN.')
return minval, maxval
@staticmethod
@ngjit
def _compute_y_bounds(ys):
minval = maxval = ys[0]
for y in ys:
if not np.isnan(y):
if np.isnan(minval) or y < minval:
minval = y
if np.isnan(maxval) or y > maxval:
maxval = y
if np.isnan(minval) or np.isnan(maxval):
raise ValueError('All y coordinates are NaN.')
return minval, maxval
@memoize
def _compute_x_bounds_dask(self, df):
"""Like ``PointLike._compute_x_bounds``, but memoized because
``df`` is immutable/hashable (a Dask dataframe).
"""
xs = df[self.x].values
return np.nanmin(xs), np.nanmax(xs)
@memoize
def _compute_y_bounds_dask(self, df):
"""Like ``PointLike._compute_y_bounds``, but memoized because
``df`` is immutable/hashable (a Dask dataframe).
"""
ys = df[self.y].values
return np.nanmin(ys), np.nanmax(ys)
class Point(_PointLike):
"""A point, with center at ``x`` and ``y``.
Points map each record to a single bin.
Points falling exactly on the upper bounds are treated as a special case,
mapping into the previous bin rather than being cropped off.
Parameters
----------
x, y : str
Column names for the x and y coordinates of each point.
"""
@memoize
def _build_extend(self, x_mapper, y_mapper, info, append):
x_name = self.x
y_name = self.y
@ngjit
def _extend(vt, bounds, xs, ys, *aggs_and_cols):
sx, tx, sy, ty = vt
xmin, xmax, ymin, ymax = bounds
def map_onto_pixel(x, y):
xx = int(x_mapper(x) * sx + tx)
yy = int(y_mapper(y) * sy + ty)
# Points falling on upper bound are mapped into previous bin
return (xx - 1 if x == xmax else xx,
yy - 1 if y == ymax else yy)
for i in range(xs.shape[0]):
x = xs[i]
y = ys[i]
if (xmin <= x <= xmax) and (ymin <= y <= ymax):
xi, yi = map_onto_pixel(x, y)
append(i, xi, yi, *aggs_and_cols)
def extend(aggs, df, vt, bounds):
xs = df[x_name].values
ys = df[y_name].values
cols = aggs + info(df)
_extend(vt, bounds, xs, ys, *cols)
return extend
class Line(_PointLike):
"""A line, with vertices defined by ``x`` and ``y``.
Parameters
----------
x, y : str
Column names for the x and y coordinates of each vertex.
"""
@memoize
def _build_extend(self, x_mapper, y_mapper, info, append):
map_onto_pixel = _build_map_onto_pixel(x_mapper, y_mapper)
draw_line = _build_draw_line(append)
extend_line = _build_extend_line(draw_line, map_onto_pixel)
x_name = self.x
y_name = self.y
def extend(aggs, df, vt, bounds, plot_start=True):
xs = df[x_name].values
ys = df[y_name].values
cols = aggs + info(df)
extend_line(vt, bounds, xs, ys, plot_start, *cols)
return extend
# -- Helpers for computing line geometry --
def _build_map_onto_pixel(x_mapper, y_mapper):
@ngjit
def map_onto_pixel(vt, bounds, x, y):
"""Map points onto pixel grid"""
sx, tx, sy, ty = vt
_, xmax, _, ymax = bounds
xx = int(x_mapper(x) * sx + tx)
yy = int(y_mapper(y) * sy + ty)
# Points falling on upper bound are mapped into previous bin
return (xx - 1 if x == xmax else xx,
yy - 1 if y == ymax else yy)
return map_onto_pixel
def _build_draw_line(append):
"""Specialize a line plotting kernel for a given append/axis combination"""
@ngjit
def draw_line(x0i, y0i, x1i, y1i, i, plot_start, clipped, *aggs_and_cols):
"""Draw a line using Bresenham's algorithm
This method plots a line segment with integer coordinates onto a pixel
grid. The vertices are assumed to have already been scaled, transformed,
and clipped within the bounds.
The following algorithm is the more general Bresenham's algorithm that
works with both float and integer coordinates. A future performance
improvement would replace this algorithm with the integer-specific one.
"""
dx = x1i - x0i
ix = (dx > 0) - (dx < 0)
dx = abs(dx) * 2
dy = y1i - y0i
iy = (dy > 0) - (dy < 0)
dy = abs(dy) * 2
if plot_start:
append(i, x0i, y0i, *aggs_and_cols)
if dx >= dy:
# If vertices weren't clipped and are concurrent in integer space,
# call append and return, as the second vertex won't be hit below.
if not clipped and not (dx | dy):
append(i, x0i, y0i, *aggs_and_cols)
return
error = 2*dy - dx
while x0i != x1i:
if error >= 0 and (error or ix > 0):
error -= 2 * dx
y0i += iy
error += 2 * dy
x0i += ix
append(i, x0i, y0i, *aggs_and_cols)
else:
error = 2*dx - dy
while y0i != y1i:
if error >= 0 and (error or iy > 0):
error -= 2 * dy
x0i += ix
error += 2 * dx
y0i += iy
append(i, x0i, y0i, *aggs_and_cols)
return draw_line
def _build_extend_line(draw_line, map_onto_pixel):
@ngjit
def outside_bounds(x0, y0, x1, y1, xmin, xmax, ymin, ymax):
if x0 < xmin and x1 < xmin:
return True
if x0 > xmax and x1 > xmax:
return True
if y0 < ymin and y1 < ymin:
return True
return y0 > ymax and y1 > ymax
@ngjit
def clipt(p, q, t0, t1):
accept = True
if p < 0 and q < 0:
r = q / p
if r > t1:
accept = False
elif r > t0:
t0 = r
elif p > 0 and q < p:
r = q / p
if r < t0:
accept = False
elif r < t1:
t1 = r
elif q < 0:
accept = False
return t0, t1, accept
@ngjit
def extend_line(vt, bounds, xs, ys, plot_start, *aggs_and_cols):
"""Aggregate along a line formed by ``xs`` and ``ys``"""
xmin, xmax, ymin, ymax = bounds
nrows = xs.shape[0]
i = 0
while i < nrows - 1:
x0 = xs[i]
y0 = ys[i]
x1 = xs[i + 1]
y1 = ys[i + 1]
# If any of the coordinates are NaN, there's a discontinuity. Skip
# the entire segment.
if np.isnan(x0) or np.isnan(y0) or np.isnan(x1) or np.isnan(y1):
plot_start = True
i += 1
continue
# Use Liang-Barsky (1992) to clip the segment to a bounding box
if outside_bounds(x0, y0, x1, y1, xmin, xmax, ymin, ymax):
plot_start = True
i += 1
continue
clipped = False
t0, t1 = 0, 1
dx = x1 - x0
t0, t1, accept = clipt(-dx, x0 - xmin, t0, t1)
if not accept:
i += 1
continue
t0, t1, accept = clipt(dx, xmax - x0, t0, t1)
if not accept:
i += 1
continue
dy = y1 - y0
t0, t1, accept = clipt(-dy, y0 - ymin, t0, t1)
if not accept:
i += 1
continue
t0, t1, accept = clipt(dy, ymax - y0, t0, t1)
if not accept:
i += 1
continue
if t1 < 1:
clipped = True
x1 = x0 + t1 * dx
y1 = y0 + t1 * dy
if t0 > 0:
# If x0 is clipped, we need to plot the new start
clipped = True
plot_start = True
x0 = x0 + t0 * dx
y0 = y0 + t0 * dy
x0i, y0i = map_onto_pixel(vt, bounds, x0, y0)
x1i, y1i = map_onto_pixel(vt, bounds, x1, y1)
draw_line(x0i, y0i, x1i, y1i, i, plot_start, clipped, *aggs_and_cols)
plot_start = False
i += 1
return extend_line
| [
"numpy.nanmin",
"numpy.isnan",
"numpy.nanmax"
] | [((980, 996), 'numpy.isnan', 'np.isnan', (['minval'], {}), '(minval)\n', (988, 996), True, 'import numpy as np\n'), ((1000, 1016), 'numpy.isnan', 'np.isnan', (['maxval'], {}), '(maxval)\n', (1008, 1016), True, 'import numpy as np\n'), ((1428, 1444), 'numpy.isnan', 'np.isnan', (['minval'], {}), '(minval)\n', (1436, 1444), True, 'import numpy as np\n'), ((1448, 1464), 'numpy.isnan', 'np.isnan', (['maxval'], {}), '(maxval)\n', (1456, 1464), True, 'import numpy as np\n'), ((1796, 1809), 'numpy.nanmin', 'np.nanmin', (['xs'], {}), '(xs)\n', (1805, 1809), True, 'import numpy as np\n'), ((1811, 1824), 'numpy.nanmax', 'np.nanmax', (['xs'], {}), '(xs)\n', (1820, 1824), True, 'import numpy as np\n'), ((2066, 2079), 'numpy.nanmin', 'np.nanmin', (['ys'], {}), '(ys)\n', (2075, 2079), True, 'import numpy as np\n'), ((2081, 2094), 'numpy.nanmax', 'np.nanmax', (['ys'], {}), '(ys)\n', (2090, 2094), True, 'import numpy as np\n'), ((792, 803), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (800, 803), True, 'import numpy as np\n'), ((1240, 1251), 'numpy.isnan', 'np.isnan', (['y'], {}), '(y)\n', (1248, 1251), True, 'import numpy as np\n'), ((7950, 7962), 'numpy.isnan', 'np.isnan', (['x0'], {}), '(x0)\n', (7958, 7962), True, 'import numpy as np\n'), ((7966, 7978), 'numpy.isnan', 'np.isnan', (['y0'], {}), '(y0)\n', (7974, 7978), True, 'import numpy as np\n'), ((7982, 7994), 'numpy.isnan', 'np.isnan', (['x1'], {}), '(x1)\n', (7990, 7994), True, 'import numpy as np\n'), ((7998, 8010), 'numpy.isnan', 'np.isnan', (['y1'], {}), '(y1)\n', (8006, 8010), True, 'import numpy as np\n'), ((824, 840), 'numpy.isnan', 'np.isnan', (['minval'], {}), '(minval)\n', (832, 840), True, 'import numpy as np\n'), ((906, 922), 'numpy.isnan', 'np.isnan', (['maxval'], {}), '(maxval)\n', (914, 922), True, 'import numpy as np\n'), ((1272, 1288), 'numpy.isnan', 'np.isnan', (['minval'], {}), '(minval)\n', (1280, 1288), True, 'import numpy as np\n'), ((1354, 1370), 'numpy.isnan', 'np.isnan', (['maxval'], {}), '(maxval)\n', (1362, 1370), True, 'import numpy as np\n')] |
import pandas as pd
import tapnx as tapnx
import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
import time as time
filename = 'MATLAB_test'
filename = 'siouxfallswithspeeds'
#filename = 'smallsiouxfalls'
#filename = 'siouxfalls'
nodes = None
G = tapnx.graph_from_csv(filename, nodes=nodes, trips=True, edge_attr=True)
if nodes:
fig, ax = tapnx.plot_graph(G, node_size=200, node_labels=True)
plt.show()
G.graph['no_edges'] = G.number_of_edges()
remote = True
otol=1.0e-3
f,cf,x,cx,min_d,_,_ = tapnx.gekko_optimise_column_gen(G,lam=1, remote=remote,d=True,otol=otol)
f,cf,x,cx,max_d,_,_ = tapnx.gekko_optimise_column_gen(G,min_max_type=-1, lam=1, remote=remote,d=True,otol=otol)
f,cf,x,cx,min_tt,_,_ = tapnx.gekko_optimise_column_gen(G,lam=0, remote=remote,d=True)
f,cf,x,cx,max_tt,_,_ = tapnx.gekko_optimise_column_gen(G,min_max_type=-1, lam=0, remote=remote,d=True,otol=otol)
max_tt = -max_tt
max_d = -max_d
dists = []
tts = []
lams = np.round(np.arange(0,1.01,0.2),2)
#lams = np.round(np.arange(0.15,0.3,0.01),2)
#lams = np.arange(0,1.01,0.1)
for lam in lams:
#print(lam)
#time.sleep(1)
f,cf,x,cx,min_ws,dist,tt = tapnx.gekko_optimise_column_gen(G,lam=lam,remote=remote,min_d=min_d,max_d=max_d,min_tt=min_tt,max_tt=max_tt,d=True,otol=otol)
dists.append(dist)
tts.append(tt)
print(min_d)
print(max_d)
print(dists)
print(min_tt)
print(max_tt)
print(tts)
print((np.array(dists)-min_d)/(max_d-min_d))
print((np.array(tts)-min_tt)/(max_tt-min_tt))
plt.figure()
plt.plot(dists,tts, 'o')
plt.figure()
plt.plot(lams,dists)
plt.figure()
plt.plot(lams,tts)
plt.show() | [
"matplotlib.pyplot.plot",
"tapnx.graph_from_csv",
"numpy.array",
"matplotlib.pyplot.figure",
"tapnx.plot_graph",
"tapnx.gekko_optimise_column_gen",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((272, 343), 'tapnx.graph_from_csv', 'tapnx.graph_from_csv', (['filename'], {'nodes': 'nodes', 'trips': '(True)', 'edge_attr': '(True)'}), '(filename, nodes=nodes, trips=True, edge_attr=True)\n', (292, 343), True, 'import tapnx as tapnx\n'), ((528, 603), 'tapnx.gekko_optimise_column_gen', 'tapnx.gekko_optimise_column_gen', (['G'], {'lam': '(1)', 'remote': 'remote', 'd': '(True)', 'otol': 'otol'}), '(G, lam=1, remote=remote, d=True, otol=otol)\n', (559, 603), True, 'import tapnx as tapnx\n'), ((623, 720), 'tapnx.gekko_optimise_column_gen', 'tapnx.gekko_optimise_column_gen', (['G'], {'min_max_type': '(-1)', 'lam': '(1)', 'remote': 'remote', 'd': '(True)', 'otol': 'otol'}), '(G, min_max_type=-1, lam=1, remote=remote, d\n =True, otol=otol)\n', (654, 720), True, 'import tapnx as tapnx\n'), ((737, 801), 'tapnx.gekko_optimise_column_gen', 'tapnx.gekko_optimise_column_gen', (['G'], {'lam': '(0)', 'remote': 'remote', 'd': '(True)'}), '(G, lam=0, remote=remote, d=True)\n', (768, 801), True, 'import tapnx as tapnx\n'), ((823, 920), 'tapnx.gekko_optimise_column_gen', 'tapnx.gekko_optimise_column_gen', (['G'], {'min_max_type': '(-1)', 'lam': '(0)', 'remote': 'remote', 'd': '(True)', 'otol': 'otol'}), '(G, min_max_type=-1, lam=0, remote=remote, d\n =True, otol=otol)\n', (854, 920), True, 'import tapnx as tapnx\n'), ((1507, 1519), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1517, 1519), True, 'import matplotlib.pyplot as plt\n'), ((1520, 1545), 'matplotlib.pyplot.plot', 'plt.plot', (['dists', 'tts', '"""o"""'], {}), "(dists, tts, 'o')\n", (1528, 1545), True, 'import matplotlib.pyplot as plt\n'), ((1546, 1558), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1556, 1558), True, 'import matplotlib.pyplot as plt\n'), ((1559, 1580), 'matplotlib.pyplot.plot', 'plt.plot', (['lams', 'dists'], {}), '(lams, dists)\n', (1567, 1580), True, 'import matplotlib.pyplot as plt\n'), ((1581, 1593), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1591, 1593), True, 'import matplotlib.pyplot as plt\n'), ((1594, 1613), 'matplotlib.pyplot.plot', 'plt.plot', (['lams', 'tts'], {}), '(lams, tts)\n', (1602, 1613), True, 'import matplotlib.pyplot as plt\n'), ((1614, 1624), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1622, 1624), True, 'import matplotlib.pyplot as plt\n'), ((368, 420), 'tapnx.plot_graph', 'tapnx.plot_graph', (['G'], {'node_size': '(200)', 'node_labels': '(True)'}), '(G, node_size=200, node_labels=True)\n', (384, 420), True, 'import tapnx as tapnx\n'), ((425, 435), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (433, 435), True, 'import matplotlib.pyplot as plt\n'), ((985, 1008), 'numpy.arange', 'np.arange', (['(0)', '(1.01)', '(0.2)'], {}), '(0, 1.01, 0.2)\n', (994, 1008), True, 'import numpy as np\n'), ((1168, 1305), 'tapnx.gekko_optimise_column_gen', 'tapnx.gekko_optimise_column_gen', (['G'], {'lam': 'lam', 'remote': 'remote', 'min_d': 'min_d', 'max_d': 'max_d', 'min_tt': 'min_tt', 'max_tt': 'max_tt', 'd': '(True)', 'otol': 'otol'}), '(G, lam=lam, remote=remote, min_d=min_d,\n max_d=max_d, min_tt=min_tt, max_tt=max_tt, d=True, otol=otol)\n', (1199, 1305), True, 'import tapnx as tapnx\n'), ((1423, 1438), 'numpy.array', 'np.array', (['dists'], {}), '(dists)\n', (1431, 1438), True, 'import numpy as np\n'), ((1468, 1481), 'numpy.array', 'np.array', (['tts'], {}), '(tts)\n', (1476, 1481), True, 'import numpy as np\n')] |
# Copyright (C) 2015, UChicago Argonne, LLC
# All Rights Reserved
#
# Generic IO (ANL-15-066)
# <NAME>, Argonne National Laboratory
#
# OPEN SOURCE LICENSE
#
# Under the terms of Contract No. DE-AC02-06CH11357 with UChicago Argonne,
# LLC, the U.S. Government retains certain rights in this software.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the names of UChicago Argonne, LLC or the Department of Energy
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# *****************************************************************************
#
# DISCLAIMER
# THE SOFTWARE IS SUPPLIED "AS IS" WITHOUT WARRANTY OF ANY KIND. NEITHER THE
# UNTED STATES GOVERNMENT, NOR THE UNITED STATES DEPARTMENT OF ENERGY, NOR
# UCHICAGO ARGONNE, LLC, NOR ANY OF THEIR EMPLOYEES, MAKES ANY WARRANTY,
# EXPRESS OR IMPLIED, OR ASSUMES ANY LEGAL LIABILITY OR RESPONSIBILITY FOR THE
# ACCURACY, COMPLETENESS, OR USEFULNESS OF ANY INFORMATION, DATA, APPARATUS,
# PRODUCT, OR PROCESS DISCLOSED, OR REPRESENTS THAT ITS USE WOULD NOT INFRINGE
# PRIVATELY OWNED RIGHTS.
#
# *****************************************************************************
from numpy.ctypeslib import ndpointer
import numpy as np
import ctypes as ct
import os
#Define where the library is and load it
_path = os.path.dirname(__file__)
libpygio = ct.CDLL(_path + '/../frontend/libpygio.so')
#we need to define the return type ("restype") and
#the argument types
libpygio.get_elem_num.restype=ct.c_int64
libpygio.get_elem_num.argtypes=[ct.c_char_p]
libpygio.get_elem_num_in_leaf.restype=ct.c_int64
libpygio.get_elem_num_in_leaf.argtypes=[ct.c_char_p, ct.c_int]
libpygio.get_variable_type.restype=ct.c_int
libpygio.get_variable_type.argtypes=[ct.c_char_p, ct.c_char_p]
libpygio.get_variable_field_count.restype=ct.c_int
libpygio.get_variable_field_count.argtypes=[ct.c_char_p, ct.c_char_p]
libpygio.read_gio_int32.restype=None
libpygio.read_gio_int32.argtypes=[ct.c_char_p, ct.c_char_p, ct.POINTER(ct.c_int), ct.c_int]
libpygio.read_gio_int64.restype=None
libpygio.read_gio_int64.argtypes=[ct.c_char_p, ct.c_char_p, ct.POINTER(ct.c_int64), ct.c_int]
libpygio.read_gio_float.restype=None
libpygio.read_gio_float.argtypes=[ct.c_char_p, ct.c_char_p, ct.POINTER(ct.c_float), ct.c_int]
libpygio.read_gio_double.restype=None
libpygio.read_gio_double.argtypes=[ct.c_char_p, ct.c_char_p, ct.POINTER(ct.c_double), ct.c_int]
libpygio.read_gio_oct_int32.restype=None
libpygio.read_gio_oct_int32.argtypes=[ct.c_char_p, ct.c_int, ct.c_char_p, ct.POINTER(ct.c_int)]
libpygio.read_gio_oct_int64.restype=None
libpygio.read_gio_oct_int64.argtypes=[ct.c_char_p, ct.c_int, ct.c_char_p, ct.POINTER(ct.c_int64)]
libpygio.read_gio_oct_float.restype=None
libpygio.read_gio_oct_float.argtypes=[ct.c_char_p, ct.c_int, ct.c_char_p, ct.POINTER(ct.c_float)]
libpygio.read_gio_oct_double.restype=None
libpygio.read_gio_oct_double.argtypes=[ct.c_char_p, ct.c_int, ct.c_char_p, ct.POINTER(ct.c_double)]
libpygio.inspect_gio.restype=None
libpygio.inspect_gio.argtypes=[ct.c_char_p]
libpygio.get_octree.restype=ct.c_char_p
libpygio.get_octree.argtypes=[ct.c_char_p]
libpygio.get_variable.restype=ct.c_char_p
libpygio.get_variable.argtypes=[ct.c_char_p, ct.c_int]
libpygio.get_num_octree_leaves.restype=ct.c_int
libpygio.get_num_octree_leaves.argtypes=[ct.c_char_p, ct.POINTER(ct.c_int)]
libpygio.get_octree_leaves.restype=ct.POINTER(ct.c_int)
libpygio.get_octree_leaves.argtypes=[ct.c_char_p, ct.POINTER(ct.c_int)]
def gio_read_oct(file_name, var_name, leaf_id):
var_size = libpygio.get_elem_num_in_leaf(file_name, leaf_id)
var_type = libpygio.get_variable_type(file_name,var_name)
if(var_type==10):
print ("Variable not found")
return
elif(var_type==9):
print ("variable type not known (not int32/int64/float/double)")
elif(var_type==0):
#float
result = np.ndarray((var_size),dtype=np.float32)
libpygio.read_gio_oct_float(file_name, leaf_id, var_name, result.ctypes.data_as(ct.POINTER(ct.c_float)))
return result
elif(var_type==1):
#double
result = np.ndarray((var_size),dtype=np.float64)
libpygio.read_gio_oct_double(file_name, leaf_id, var_name, result.ctypes.data_as(ct.POINTER(ct.c_double)))
return result
elif(var_type==2):
#int32
result = np.ndarray((var_size),dtype=np.int32)
libpygio.read_gio_oct_int32(file_name, leaf_id, var_name, result.ctypes.data_as(ct.POINTER(ct.c_int32)))
return result
elif(var_type==3):
#int64
result = np.ndarray((var_size),dtype=np.int64)
libpygio.read_gio_oct_int64(file_name, leaf_id, var_name, result.ctypes.data_as(ct.POINTER(ct.c_int64)))
return result
def gio_read(file_name, var_name):
var_size = libpygio.get_elem_num(file_name)
var_type = libpygio.get_variable_type(file_name,var_name)
field_count = libpygio.get_variable_field_count(file_name,var_name)
if(var_type==10):
print ("Variable not found")
return
elif(var_type==9):
print ("variable type not known (not int32/int64/float/double)")
elif(var_type==0):
#float
result = np.ndarray((var_size),dtype=np.float32)
libpygio.read_gio_float(file_name,var_name,result.ctypes.data_as(ct.POINTER(ct.c_float)),field_count)
return result
elif(var_type==1):
#double
result = np.ndarray((var_size),dtype=np.float64)
libpygio.read_gio_double(file_name,var_name,result.ctypes.data_as(ct.POINTER(ct.c_double)),field_count)
return result
elif(var_type==2):
#int32
result = np.ndarray((var_size),dtype=np.int32)
libpygio.read_gio_int32(file_name,var_name,result.ctypes.data_as(ct.POINTER(ct.c_int32)),field_count)
return result
elif(var_type==3):
#int64
result = np.ndarray((var_size),dtype=np.int64)
libpygio.read_gio_int64(file_name,var_name,result.ctypes.data_as(ct.POINTER(ct.c_int64)),field_count)
return result
def gio_has_variable(file_name,var_name):
var_size = libpygio.get_elem_num(file_name)
var_type = libpygio.get_variable_type(file_name,var_name)
return var_type!=10
def gio_inspect(file_name):
libpygio.inspect_gio(file_name)
def gio_get_num_variables(file_name):
return ( libpygio.get_num_variables(file_name) )
def gio_get_variable(file_name, i):
libpygio.get_variable.restype = ct.POINTER(ct.c_char)
temp_str = libpygio.get_variable(file_name, i)
return ct.string_at(temp_str)
def gio_get_octree(file_name):
libpygio.get_variable.restype = ct.POINTER(ct.c_char)
temp_str = libpygio.get_octree(file_name)
return ct.string_at(temp_str)
def gio_get_octree_leaves(file_name, extents):
exts = (ct.c_int * len(extents))(*extents)
num_leaves = libpygio.get_num_octree_leaves(file_name, exts)
result = np.ndarray((num_leaves),dtype=np.int32)
result.ctypes.data_as(ct.POINTER(ct.c_int32))
result = libpygio.get_octree_leaves( file_name, exts )
return num_leaves, result
| [
"ctypes.POINTER",
"ctypes.string_at",
"os.path.dirname",
"numpy.ndarray",
"ctypes.CDLL"
] | [((2039, 2064), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2054, 2064), False, 'import os\n'), ((2076, 2119), 'ctypes.CDLL', 'ct.CDLL', (["(_path + '/../frontend/libpygio.so')"], {}), "(_path + '/../frontend/libpygio.so')\n", (2083, 2119), True, 'import ctypes as ct\n'), ((4142, 4162), 'ctypes.POINTER', 'ct.POINTER', (['ct.c_int'], {}), '(ct.c_int)\n', (4152, 4162), True, 'import ctypes as ct\n'), ((2719, 2739), 'ctypes.POINTER', 'ct.POINTER', (['ct.c_int'], {}), '(ct.c_int)\n', (2729, 2739), True, 'import ctypes as ct\n'), ((2849, 2871), 'ctypes.POINTER', 'ct.POINTER', (['ct.c_int64'], {}), '(ct.c_int64)\n', (2859, 2871), True, 'import ctypes as ct\n'), ((2981, 3003), 'ctypes.POINTER', 'ct.POINTER', (['ct.c_float'], {}), '(ct.c_float)\n', (2991, 3003), True, 'import ctypes as ct\n'), ((3115, 3138), 'ctypes.POINTER', 'ct.POINTER', (['ct.c_double'], {}), '(ct.c_double)\n', (3125, 3138), True, 'import ctypes as ct\n'), ((3268, 3288), 'ctypes.POINTER', 'ct.POINTER', (['ct.c_int'], {}), '(ct.c_int)\n', (3278, 3288), True, 'import ctypes as ct\n'), ((3406, 3428), 'ctypes.POINTER', 'ct.POINTER', (['ct.c_int64'], {}), '(ct.c_int64)\n', (3416, 3428), True, 'import ctypes as ct\n'), ((3546, 3568), 'ctypes.POINTER', 'ct.POINTER', (['ct.c_float'], {}), '(ct.c_float)\n', (3556, 3568), True, 'import ctypes as ct\n'), ((3688, 3711), 'ctypes.POINTER', 'ct.POINTER', (['ct.c_double'], {}), '(ct.c_double)\n', (3698, 3711), True, 'import ctypes as ct\n'), ((4084, 4104), 'ctypes.POINTER', 'ct.POINTER', (['ct.c_int'], {}), '(ct.c_int)\n', (4094, 4104), True, 'import ctypes as ct\n'), ((4213, 4233), 'ctypes.POINTER', 'ct.POINTER', (['ct.c_int'], {}), '(ct.c_int)\n', (4223, 4233), True, 'import ctypes as ct\n'), ((7219, 7240), 'ctypes.POINTER', 'ct.POINTER', (['ct.c_char'], {}), '(ct.c_char)\n', (7229, 7240), True, 'import ctypes as ct\n'), ((7304, 7326), 'ctypes.string_at', 'ct.string_at', (['temp_str'], {}), '(temp_str)\n', (7316, 7326), True, 'import ctypes as ct\n'), ((7396, 7417), 'ctypes.POINTER', 'ct.POINTER', (['ct.c_char'], {}), '(ct.c_char)\n', (7406, 7417), True, 'import ctypes as ct\n'), ((7476, 7498), 'ctypes.string_at', 'ct.string_at', (['temp_str'], {}), '(temp_str)\n', (7488, 7498), True, 'import ctypes as ct\n'), ((7675, 7713), 'numpy.ndarray', 'np.ndarray', (['num_leaves'], {'dtype': 'np.int32'}), '(num_leaves, dtype=np.int32)\n', (7685, 7713), True, 'import numpy as np\n'), ((7741, 7763), 'ctypes.POINTER', 'ct.POINTER', (['ct.c_int32'], {}), '(ct.c_int32)\n', (7751, 7763), True, 'import ctypes as ct\n'), ((4639, 4677), 'numpy.ndarray', 'np.ndarray', (['var_size'], {'dtype': 'np.float32'}), '(var_size, dtype=np.float32)\n', (4649, 4677), True, 'import numpy as np\n'), ((5948, 5986), 'numpy.ndarray', 'np.ndarray', (['var_size'], {'dtype': 'np.float32'}), '(var_size, dtype=np.float32)\n', (5958, 5986), True, 'import numpy as np\n'), ((4870, 4908), 'numpy.ndarray', 'np.ndarray', (['var_size'], {'dtype': 'np.float64'}), '(var_size, dtype=np.float64)\n', (4880, 4908), True, 'import numpy as np\n'), ((6176, 6214), 'numpy.ndarray', 'np.ndarray', (['var_size'], {'dtype': 'np.float64'}), '(var_size, dtype=np.float64)\n', (6186, 6214), True, 'import numpy as np\n'), ((4767, 4789), 'ctypes.POINTER', 'ct.POINTER', (['ct.c_float'], {}), '(ct.c_float)\n', (4777, 4789), True, 'import ctypes as ct\n'), ((5102, 5138), 'numpy.ndarray', 'np.ndarray', (['var_size'], {'dtype': 'np.int32'}), '(var_size, dtype=np.int32)\n', (5112, 5138), True, 'import numpy as np\n'), ((6061, 6083), 'ctypes.POINTER', 'ct.POINTER', (['ct.c_float'], {}), '(ct.c_float)\n', (6071, 6083), True, 'import ctypes as ct\n'), ((6405, 6441), 'numpy.ndarray', 'np.ndarray', (['var_size'], {'dtype': 'np.int32'}), '(var_size, dtype=np.int32)\n', (6415, 6441), True, 'import numpy as np\n'), ((4999, 5022), 'ctypes.POINTER', 'ct.POINTER', (['ct.c_double'], {}), '(ct.c_double)\n', (5009, 5022), True, 'import ctypes as ct\n'), ((5330, 5366), 'numpy.ndarray', 'np.ndarray', (['var_size'], {'dtype': 'np.int64'}), '(var_size, dtype=np.int64)\n', (5340, 5366), True, 'import numpy as np\n'), ((6290, 6313), 'ctypes.POINTER', 'ct.POINTER', (['ct.c_double'], {}), '(ct.c_double)\n', (6300, 6313), True, 'import ctypes as ct\n'), ((6630, 6666), 'numpy.ndarray', 'np.ndarray', (['var_size'], {'dtype': 'np.int64'}), '(var_size, dtype=np.int64)\n', (6640, 6666), True, 'import numpy as np\n'), ((5228, 5250), 'ctypes.POINTER', 'ct.POINTER', (['ct.c_int32'], {}), '(ct.c_int32)\n', (5238, 5250), True, 'import ctypes as ct\n'), ((6516, 6538), 'ctypes.POINTER', 'ct.POINTER', (['ct.c_int32'], {}), '(ct.c_int32)\n', (6526, 6538), True, 'import ctypes as ct\n'), ((5456, 5478), 'ctypes.POINTER', 'ct.POINTER', (['ct.c_int64'], {}), '(ct.c_int64)\n', (5466, 5478), True, 'import ctypes as ct\n'), ((6741, 6763), 'ctypes.POINTER', 'ct.POINTER', (['ct.c_int64'], {}), '(ct.c_int64)\n', (6751, 6763), True, 'import ctypes as ct\n')] |
import numpy as np
import math
import matplotlib.pyplot as plt
import itertools as it
"""
class of functions to statistically compare cutoff 2-d spatiotemporal point processes to simulated complete spatial randomness. This is an implementation of Ripley's K function for 2-D space time.
"""
class RipleysKEstimator_spacetime:
def __init__(self,t_max, d_max, t_min, d_min, width, dt):
"""initialize estimator
t_max - int, last year of model iteration
d_max - int, longest centerline length
t_min - int, 0
d_min - in, 0
width - constant channel width in m"""
self.t_max = t_max # last year
self.d_max = d_max # max centerline length
self.t_min = t_min # 0
self.d_min = d_min # 0
self.width = width
self.dt = dt
def __call__(self, cutoffs, mode, max_search_d, max_search_t, plotornot):
"""
perform main function
"""
return self.mc_env(cutoffs = cutoffs, nit=99, mode = mode, max_search_d=max_search_d, max_search_t=max_search_t, plotornot=plotornot)
def _pairwise_diffs(self, data):
"""
compute array of distances between every point in 1D
data - 1-D array of deltas, space or time
"""
npts = len(data)
diff = np.zeros(shape=(npts * (npts - 1) // 2), dtype=np.double)
datai = np.zeros(shape=(npts * (npts - 1) // 2), dtype=np.double)
k = 0
for i in range(npts - 1):
for j in range(i+1, npts):
diff[k] = abs(data[i] - data[j])
datai[k] = data[i]
k += 1
return datai, diff
def _near_neigh(self,data):
"""
compute array of distance between every point and its nearest neighbor in 1D
data - 1-D array of deltas, space or time
"""
npts = len(data)
diff = np.zeros(shape=npts, dtype=np.double)
for i in range(npts):
others= np.hstack((data[i::-1], data[i+1:]))
mask = np.ones(len(data), dtype=bool)
mask[i] = False
others = others[mask]
diff[i] = np.min(abs(data[i] - others))
return diff
def _weights(self, xi, yi, diff_d, diff_t):
"""
compute weights for edge effect correction. one over intersecting area
"""
npts = 500
weights = np.ones(shape=(npts * (npts - 1) // 2), dtype=np.double)
#top
weights[(((self.t_max- yi) - diff_t )<= 0)] =2
#bottom
weights[((yi-diff_t) <= 0)] =2
weights[(((self.d_max- xi) - diff_d )<= 0)] =2
weights[((xi-diff_d) <= 0)] =2
return weights
def _evaluate(self, data, dist_space, dist_time, mode):
"""
INPUTS
data - 2-D array of N by 2 size where N is number of cutoffs in dataset, column [:,0] records distance downstream and [:,1] is time.
dist_space - 1-D array of radii to search in for intensity estimates
dist_time - 1-D array of durations to search in for intensity estimates
mode - statistical measurement to be made, str, either 'K_st, 'K', 'G', or 'H'
OUTPUTS
stat_d - 1-D Ripleys K at distances dist_space
stat_t - 1-D temporal Ripleys K at durations dist_time
stat_dt - 2-D spatiotemporal Ripleys K of M by N{n} size, where M{t} is dist_space, T is dist_time, and each array member is the intensity of the sampled point process at n search distance and t search time.
"""
data = np.asarray(data)
npts = len(data)
stat_d = np.zeros(len(dist_space)) #1-D spatial statistic
stat_t = np.zeros(len(dist_time))#1-D temporal statistic
stat_dt = np.zeros((len(dist_space), len(dist_time))) #2D space-time statistic
null = stat_dt.copy()
if mode == "H":
"""
H , the probability of finding neighbors in search dist
"""
deltaspace = self._pairwise_diffs(data[:,0])
deltatime = self._pairwise_diffs(data[:,1])
for i in range(len(dist_space)):
d_indicator = (deltaspace <=dist_space[i])
stat_d[i] = (d_indicator).sum()
for i in range(len(dist_time)):
t_indicator = (deltatime<=dist_time[i])
stat_t[i] = (t_indicator).sum()
stat_t = 2*stat_t/(npts*(npts-1))
stat_d = 2*stat_d/(npts*(npts-1))
return (stat_d, stat_t)
if mode == "G":
"""
G, probability the nearest neighbor is within search dist
"""
deltaspace = self._near_neigh(data[:,0])
deltatime = self._near_neigh(data[:,1])
for i in range(len(dist_space)):
d_indicator = (deltaspace <=dist_space[i])
stat_d[i] = (d_indicator).sum()
for i in range(len(dist_time)):
t_indicator = (deltatime<=dist_time[i])
stat_t[i] = (t_indicator).sum()
stat_t = stat_t/(npts)
stat_d = stat_d/(npts)
return (stat_d, stat_t)
if mode == "K":
"""
number of additional events near other events on time scales of dist_time and spatial scales of dist_space, 2 1-d plots
"""
xi, deltaspace = self._pairwise_diffs(data[:,0])
yi, deltatime = self._pairwise_diffs(data[:,1])
for i in range(len(dist_space)):
d_indicator = (deltaspace <=dist_space[i])
stat_d[i] = (d_indicator*xi).sum()
for i in range(len(dist_time)):
t_indicator = (deltatime<=dist_time[i])
stat_t[i] = (t_indicator*yi).sum()
stat_t = 2*(self.t_max*stat_t/(npts*(npts-1)))
stat_d = 2*(self.d_max*stat_d/((npts-1)*npts))
return (stat_d, stat_t)
if mode == "K_st":
"""
number of additional events near other events given sepcific search distances and durations. 2D heatmap
"""
xi, deltaspace = self._pairwise_diffs(data[:,0])
yi, deltatime = self._pairwise_diffs(data[:,1])
weights = self._weights(xi, yi, deltaspace, deltatime)
for x in range(len(dist_space)):
for t in range(len(dist_time)):
dt_indicator = (deltatime<=dist_time[t])&(deltaspace <=dist_space[x])
stat_dt[x,t] = (dt_indicator*weights).sum()
stat_dt = (self.d_max*self.t_max*stat_dt)/(npts*(npts-1))
return(stat_dt)
def mc_env(self,cutoffs, nit, mode, max_search_d, max_search_t, plotornot):
"""
generate random distibutions in same space + time ranges as data
"""
rng = np.random.default_rng(seed = 80)
data = cutoffs[['downstream_distance', 'time']].to_numpy()
num_samples = len(cutoffs.time)
r_time = np.linspace(self.dt,self.dt*max_search_t, max_search_t)
r_space = np.linspace(self.width,self.width*max_search_d, max_search_d)
mc_d = np.zeros((len(r_space), nit))
mc_t = np.zeros((len(r_time), nit))
mc_dt = np.zeros((len(r_space), len(r_time), nit))
z = np.zeros((num_samples, 2))
for i in range(nit):
z[:,0] = rng.random(size = num_samples)*self.d_max
z[:,1] = rng.random(size = num_samples)*self.t_max
k_dt = self._evaluate(data=z, dist_time=r_time, dist_space=r_space, mode='K_st')
mc_dt[:, :, i]= k_dt
k_d, k_t = self._evaluate(data=z, dist_time=r_time, dist_space=r_space, mode='K')
mc_d[:,i] = k_d
mc_t[:,i] = k_t
if mode == 'K_st':
## monte carlo envelope - limits on probable randomness
upper_dt = np.percentile(mc_dt, 97.5, axis = 2)
lower_dt = np.percentile(mc_dt, 2.5, axis = 2)
middle_dt = np.ma.mean(mc_dt, axis = 2)
upper_d = np.ma.max(mc_d, axis = 1)
lower_d = np.ma.min(mc_d, axis = 1)
upper_t = np.ma.max(mc_t, axis = 1)
lower_t = np.ma.min(mc_t, axis = 1)
#K values
stat_d, stat_t = self._evaluate(data=data, dist_time=r_time, dist_space=r_space, mode='K')
#space-time K
stat_dt = self._evaluate(data=data, dist_time=r_time, dist_space=r_space, mode=mode)
#dependent_clustered = (stat_dt>np.multiply(upper_d.reshape(len(upper_d),1),upper_t))
#dependent_regular = (stat_dt<np.multiply(lower_d.reshape(len(lower_d),1),lower_t))
#locations of statictically nonrandom, dependent K values
#significantly more aggregated than upper mc env, and
clustered = (stat_dt>upper_dt)
regular = (stat_dt<lower_dt)
sig_mask = (clustered+regular)
#stat_d_times_stat_t = np.multiply(stat_d.reshape(len(stat_d),1),stat_t.reshape(1,len(stat_t)))
normalized = stat_dt-middle_dt
if plotornot == 1:
self.plot_st(r_space, r_time, normalized, sig_mask, np.sum(normalized), lower_dt-middle_dt, upper_dt-middle_dt)#
return [normalized,middle_dt, upper_dt-middle_dt, lower_dt-middle_dt]
else:
#monte carlo envelope
upper_d = np.ma.max(mc_d, axis = 1)/(r_space*2)-1
upper_t = np.ma.max(mc_t, axis = 1)/(r_time*2)-1
lower_d = np.ma.min(mc_d, axis = 1)/(r_space*2)-1
lower_t = np.ma.min(mc_t, axis = 1)/(r_time*2)-1
#Simulated Poisson distrubution
middle_d = np.ma.mean(mc_d, axis = 1)/(r_space*2)-1
middle_t = np.ma.mean(mc_t, axis = 1)/(r_time*2)-1
#K values
stat_d, stat_t = self._evaluate(data=data, dist_time=r_time, dist_space=r_space, mode=mode)
#normalize to what's expected under poisson
stat_d = (stat_d)/(r_space*2) -1
stat_t = (stat_t)/(r_time*2) -1
self.plot(upper_d,upper_t, lower_d, lower_t, middle_d, middle_t, r_space, r_time, stat_d, stat_t, num_samples)
def plot_st(self, r_space, r_time, normalized, sig_mask, D, lowerlim, upperlim):
plt.rcParams.update({'font.size': 10})
fig,ax = plt.subplots(figsize = (8,4))
cmap = plt.get_cmap('PiYG')
vscale = np.max(abs(normalized))
#im = ax.imshow(np.ma.masked_values(normalized, 0),origin='lower' cmap = cmap)
im =ax.pcolormesh(np.swapaxes(normalized,0,1), cmap = cmap,vmin = -np.max(np.abs(lowerlim, upperlim)), vmax = np.max(np.abs(lowerlim, upperlim)))
#plt.pcolor(np.ma.masked_values(np.swapaxes(normalized*sig_mask,0,1),0), edgecolors='k', linewidths=4, alpha=0.)
im2 =ax.pcolormesh(np.ma.masked_values(np.swapaxes(normalized*sig_mask,0,1)/np.swapaxes(normalized*sig_mask,0,1),0), zorder=2, linewidths = .01,facecolor='none', edgecolors='k',cmap='gray')
plt.title('D ='+str(D), pad = 10)
cbar = ax.figure.colorbar(im, ax=ax)#, ticks = [-2,-1,0,1,2])
cbar.ax.set_ylabel("D(d,t) = K_hat(d,t)-K(d,t)", va="bottom", rotation=-90)
#cbar.ax.set_yticklabels(['<-2', '-1', '0','1','>2'])
ax.set_ylim(bottom=0, top=max(r_time/self.dt))
ax.set_xlim(left=0, right=max(r_space/self.width))
ax.set_ylabel('time window (years)')
ax.set_xlabel('search distance (ch-w)')
ax.set_xticks(r_space/(self.width), minor=True)
ax.set_yticks(r_time/self.dt, minor=True)
ax.set_xticks(r_space/self.width-.5)
ax.set_yticks(r_time/self.dt-.5)
ax.set_yticklabels((r_time).astype(int))
ax.set_xticklabels((r_space/100).astype(int))#, rotation='vertical')
ax.tick_params(axis = 'both', which = 'major', top =False, bottom = False, left = False, right = False)
#ax.grid(True, which='minor', color='k', linewidth=.1)
return fig,
def plot(self,upper_d,upper_t, lower_d, lower_t, middle_d, middle_t, r_space, r_time, stat_d, stat_t, num_samples):
#1-d spatial Ripley's K
fig = plt.figure()
#plot CSR envelope
plt.plot(r_space/self.width, upper_d, color='red', ls=':', label='_nolegend_', linewidth = .5)
plt.plot(r_space/self.width, lower_d, color='red', ls=':', label='_nolegend_', linewidth = .5)
plt.plot(r_space/self.width, middle_d, color='red', ls=':', label='CSR', linewidth = .5)
plt.plot(r_space/self.width, stat_d, color = "black", linewidth = .5,label = str(num_samples)+ ' cutoffs')
plt.legend(loc = 'lower right')
plt.xlabel("d along centerline (ch-w)")
plt.ylabel('K/2-d')
plt.title("Homegrown 1D space EDF")
#plt.savefig(resultdir + str(year)+"yrs_Space_Ripley_"+mode+".jpg", dpi = 500)
plt.show()
#1-D Temporal Ripley's K
fig2 = plt.figure()
#plot CSR envelope
plt.plot(r_time, upper_t, color='red', ls=':',linewidth = .5, label='_nolegend_')
plt.plot(r_time, lower_t, color='red', ls=':',linewidth = .5, label='_nolegend_')
plt.plot(r_time, middle_t, color='red', ls=':',linewidth = .5, label='CSR')
plt.plot(r_time, stat_t, color = "black", linewidth = .5, label =str(num_samples)+ ' cutoffs')
plt.legend(loc = 'lower right')
plt.xlabel("t in years")
plt.ylabel('K/2 -t')
plt.title("Homegrown 1D time EDF")
plt.show() | [
"numpy.random.default_rng",
"matplotlib.pyplot.ylabel",
"numpy.hstack",
"numpy.ma.min",
"numpy.ma.max",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.asarray",
"numpy.linspace",
"numpy.abs",
"numpy.ones",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"matplotlib.pyp... | [((1348, 1403), 'numpy.zeros', 'np.zeros', ([], {'shape': '(npts * (npts - 1) // 2)', 'dtype': 'np.double'}), '(shape=npts * (npts - 1) // 2, dtype=np.double)\n', (1356, 1403), True, 'import numpy as np\n'), ((1424, 1479), 'numpy.zeros', 'np.zeros', ([], {'shape': '(npts * (npts - 1) // 2)', 'dtype': 'np.double'}), '(shape=npts * (npts - 1) // 2, dtype=np.double)\n', (1432, 1479), True, 'import numpy as np\n'), ((1949, 1986), 'numpy.zeros', 'np.zeros', ([], {'shape': 'npts', 'dtype': 'np.double'}), '(shape=npts, dtype=np.double)\n', (1957, 1986), True, 'import numpy as np\n'), ((2471, 2525), 'numpy.ones', 'np.ones', ([], {'shape': '(npts * (npts - 1) // 2)', 'dtype': 'np.double'}), '(shape=npts * (npts - 1) // 2, dtype=np.double)\n', (2478, 2525), True, 'import numpy as np\n'), ((3693, 3709), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (3703, 3709), True, 'import numpy as np\n'), ((7084, 7114), 'numpy.random.default_rng', 'np.random.default_rng', ([], {'seed': '(80)'}), '(seed=80)\n', (7105, 7114), True, 'import numpy as np\n'), ((7245, 7303), 'numpy.linspace', 'np.linspace', (['self.dt', '(self.dt * max_search_t)', 'max_search_t'], {}), '(self.dt, self.dt * max_search_t, max_search_t)\n', (7256, 7303), True, 'import numpy as np\n'), ((7320, 7384), 'numpy.linspace', 'np.linspace', (['self.width', '(self.width * max_search_d)', 'max_search_d'], {}), '(self.width, self.width * max_search_d, max_search_d)\n', (7331, 7384), True, 'import numpy as np\n'), ((7546, 7572), 'numpy.zeros', 'np.zeros', (['(num_samples, 2)'], {}), '((num_samples, 2))\n', (7554, 7572), True, 'import numpy as np\n'), ((10682, 10720), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 10}"], {}), "({'font.size': 10})\n", (10701, 10720), True, 'import matplotlib.pyplot as plt\n'), ((10739, 10767), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 4)'}), '(figsize=(8, 4))\n', (10751, 10767), True, 'import matplotlib.pyplot as plt\n'), ((10795, 10815), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""PiYG"""'], {}), "('PiYG')\n", (10807, 10815), True, 'import matplotlib.pyplot as plt\n'), ((12598, 12610), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (12608, 12610), True, 'import matplotlib.pyplot as plt\n'), ((12648, 12748), 'matplotlib.pyplot.plot', 'plt.plot', (['(r_space / self.width)', 'upper_d'], {'color': '"""red"""', 'ls': '""":"""', 'label': '"""_nolegend_"""', 'linewidth': '(0.5)'}), "(r_space / self.width, upper_d, color='red', ls=':', label=\n '_nolegend_', linewidth=0.5)\n", (12656, 12748), True, 'import matplotlib.pyplot as plt\n'), ((12752, 12852), 'matplotlib.pyplot.plot', 'plt.plot', (['(r_space / self.width)', 'lower_d'], {'color': '"""red"""', 'ls': '""":"""', 'label': '"""_nolegend_"""', 'linewidth': '(0.5)'}), "(r_space / self.width, lower_d, color='red', ls=':', label=\n '_nolegend_', linewidth=0.5)\n", (12760, 12852), True, 'import matplotlib.pyplot as plt\n'), ((12856, 12949), 'matplotlib.pyplot.plot', 'plt.plot', (['(r_space / self.width)', 'middle_d'], {'color': '"""red"""', 'ls': '""":"""', 'label': '"""CSR"""', 'linewidth': '(0.5)'}), "(r_space / self.width, middle_d, color='red', ls=':', label='CSR',\n linewidth=0.5)\n", (12864, 12949), True, 'import matplotlib.pyplot as plt\n'), ((13070, 13099), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (13080, 13099), True, 'import matplotlib.pyplot as plt\n'), ((13111, 13150), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""d along centerline (ch-w)"""'], {}), "('d along centerline (ch-w)')\n", (13121, 13150), True, 'import matplotlib.pyplot as plt\n'), ((13160, 13179), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""K/2-d"""'], {}), "('K/2-d')\n", (13170, 13179), True, 'import matplotlib.pyplot as plt\n'), ((13189, 13224), 'matplotlib.pyplot.title', 'plt.title', (['"""Homegrown 1D space EDF"""'], {}), "('Homegrown 1D space EDF')\n", (13198, 13224), True, 'import matplotlib.pyplot as plt\n'), ((13322, 13332), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13330, 13332), True, 'import matplotlib.pyplot as plt\n'), ((13399, 13411), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (13409, 13411), True, 'import matplotlib.pyplot as plt\n'), ((13449, 13535), 'matplotlib.pyplot.plot', 'plt.plot', (['r_time', 'upper_t'], {'color': '"""red"""', 'ls': '""":"""', 'linewidth': '(0.5)', 'label': '"""_nolegend_"""'}), "(r_time, upper_t, color='red', ls=':', linewidth=0.5, label=\n '_nolegend_')\n", (13457, 13535), True, 'import matplotlib.pyplot as plt\n'), ((13540, 13626), 'matplotlib.pyplot.plot', 'plt.plot', (['r_time', 'lower_t'], {'color': '"""red"""', 'ls': '""":"""', 'linewidth': '(0.5)', 'label': '"""_nolegend_"""'}), "(r_time, lower_t, color='red', ls=':', linewidth=0.5, label=\n '_nolegend_')\n", (13548, 13626), True, 'import matplotlib.pyplot as plt\n'), ((13631, 13706), 'matplotlib.pyplot.plot', 'plt.plot', (['r_time', 'middle_t'], {'color': '"""red"""', 'ls': '""":"""', 'linewidth': '(0.5)', 'label': '"""CSR"""'}), "(r_time, middle_t, color='red', ls=':', linewidth=0.5, label='CSR')\n", (13639, 13706), True, 'import matplotlib.pyplot as plt\n'), ((13820, 13849), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (13830, 13849), True, 'import matplotlib.pyplot as plt\n'), ((13861, 13885), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t in years"""'], {}), "('t in years')\n", (13871, 13885), True, 'import matplotlib.pyplot as plt\n'), ((13895, 13915), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""K/2 -t"""'], {}), "('K/2 -t')\n", (13905, 13915), True, 'import matplotlib.pyplot as plt\n'), ((13925, 13959), 'matplotlib.pyplot.title', 'plt.title', (['"""Homegrown 1D time EDF"""'], {}), "('Homegrown 1D time EDF')\n", (13934, 13959), True, 'import matplotlib.pyplot as plt\n'), ((13969, 13979), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13977, 13979), True, 'import matplotlib.pyplot as plt\n'), ((2039, 2077), 'numpy.hstack', 'np.hstack', (['(data[i::-1], data[i + 1:])'], {}), '((data[i::-1], data[i + 1:]))\n', (2048, 2077), True, 'import numpy as np\n'), ((8145, 8179), 'numpy.percentile', 'np.percentile', (['mc_dt', '(97.5)'], {'axis': '(2)'}), '(mc_dt, 97.5, axis=2)\n', (8158, 8179), True, 'import numpy as np\n'), ((8206, 8239), 'numpy.percentile', 'np.percentile', (['mc_dt', '(2.5)'], {'axis': '(2)'}), '(mc_dt, 2.5, axis=2)\n', (8219, 8239), True, 'import numpy as np\n'), ((8267, 8292), 'numpy.ma.mean', 'np.ma.mean', (['mc_dt'], {'axis': '(2)'}), '(mc_dt, axis=2)\n', (8277, 8292), True, 'import numpy as np\n'), ((8332, 8355), 'numpy.ma.max', 'np.ma.max', (['mc_d'], {'axis': '(1)'}), '(mc_d, axis=1)\n', (8341, 8355), True, 'import numpy as np\n'), ((8381, 8404), 'numpy.ma.min', 'np.ma.min', (['mc_d'], {'axis': '(1)'}), '(mc_d, axis=1)\n', (8390, 8404), True, 'import numpy as np\n'), ((8430, 8453), 'numpy.ma.max', 'np.ma.max', (['mc_t'], {'axis': '(1)'}), '(mc_t, axis=1)\n', (8439, 8453), True, 'import numpy as np\n'), ((8479, 8502), 'numpy.ma.min', 'np.ma.min', (['mc_t'], {'axis': '(1)'}), '(mc_t, axis=1)\n', (8488, 8502), True, 'import numpy as np\n'), ((10973, 11002), 'numpy.swapaxes', 'np.swapaxes', (['normalized', '(0)', '(1)'], {}), '(normalized, 0, 1)\n', (10984, 11002), True, 'import numpy as np\n'), ((9511, 9529), 'numpy.sum', 'np.sum', (['normalized'], {}), '(normalized)\n', (9517, 9529), True, 'import numpy as np\n'), ((9742, 9765), 'numpy.ma.max', 'np.ma.max', (['mc_d'], {'axis': '(1)'}), '(mc_d, axis=1)\n', (9751, 9765), True, 'import numpy as np\n'), ((9805, 9828), 'numpy.ma.max', 'np.ma.max', (['mc_t'], {'axis': '(1)'}), '(mc_t, axis=1)\n', (9814, 9828), True, 'import numpy as np\n'), ((9877, 9900), 'numpy.ma.min', 'np.ma.min', (['mc_d'], {'axis': '(1)'}), '(mc_d, axis=1)\n', (9886, 9900), True, 'import numpy as np\n'), ((9940, 9963), 'numpy.ma.min', 'np.ma.min', (['mc_t'], {'axis': '(1)'}), '(mc_t, axis=1)\n', (9949, 9963), True, 'import numpy as np\n'), ((10048, 10072), 'numpy.ma.mean', 'np.ma.mean', (['mc_d'], {'axis': '(1)'}), '(mc_d, axis=1)\n', (10058, 10072), True, 'import numpy as np\n'), ((10113, 10137), 'numpy.ma.mean', 'np.ma.mean', (['mc_t'], {'axis': '(1)'}), '(mc_t, axis=1)\n', (10123, 10137), True, 'import numpy as np\n'), ((11072, 11098), 'numpy.abs', 'np.abs', (['lowerlim', 'upperlim'], {}), '(lowerlim, upperlim)\n', (11078, 11098), True, 'import numpy as np\n'), ((11271, 11311), 'numpy.swapaxes', 'np.swapaxes', (['(normalized * sig_mask)', '(0)', '(1)'], {}), '(normalized * sig_mask, 0, 1)\n', (11282, 11311), True, 'import numpy as np\n'), ((11308, 11348), 'numpy.swapaxes', 'np.swapaxes', (['(normalized * sig_mask)', '(0)', '(1)'], {}), '(normalized * sig_mask, 0, 1)\n', (11319, 11348), True, 'import numpy as np\n'), ((11029, 11055), 'numpy.abs', 'np.abs', (['lowerlim', 'upperlim'], {}), '(lowerlim, upperlim)\n', (11035, 11055), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from typing import Tuple
import jax.numpy as jnp
import numpy as np
import numpyro
import numpyro.distributions as dist
from astropy.io import fits
from jax import random
from jax.config import config as jax_config
from numpyro.distributions.transforms import AffineTransform
from numpyro.infer import SVI
from numpyro_ncx2 import NoncentralChi2
from tqdm import tqdm
jax_config.update("jax_enable_x64", True)
MIN_COLOR: float = 0.0
MAX_COLOR: float = 5.5
MIN_MAG: float = 4.5
MAX_MAG: float = 16.0
def setup_model(sample_variance) -> SVI:
def model(num_transit, statistic=None):
log_sigma = numpyro.sample("log_sigma", dist.Normal(0.0, 10.0))
with numpyro.plate("targets", len(num_transit)):
log_k = numpyro.sample("log_k", dist.Normal(0.0, 10.0))
lam = num_transit * 0.5 * jnp.exp(2 * (log_k - log_sigma))
numpyro.sample(
"obs",
dist.TransformedDistribution(
NoncentralChi2(num_transit - 1, lam),
AffineTransform(loc=0.0, scale=jnp.exp(2 * log_sigma)),
),
obs=statistic,
)
init = {
"log_sigma": 0.5 * np.log(np.median(sample_variance)),
"log_k": np.log(np.sqrt(sample_variance)),
}
guide = numpyro.infer.autoguide.AutoNormal(
model, init_loc_fn=numpyro.infer.init_to_value(values=init)
)
optimizer = numpyro.optim.Adam(step_size=1e-3)
return SVI(model, guide, optimizer, loss=numpyro.infer.Trace_ELBO())
def load_data(
data_path: str,
*,
min_nb_transits: int = 3,
color_range: Tuple[float, float] = (MIN_COLOR, MAX_COLOR),
mag_range: Tuple[float, float] = (MIN_MAG, MAX_MAG),
) -> fits.fitsrec.FITS_rec:
print("Loading data...")
with fits.open(data_path) as f:
data = f[1].data
m = np.isfinite(data["phot_g_mean_mag"])
m &= np.isfinite(data["bp_rp"])
m &= np.isfinite(data["dr2_radial_velocity_error"])
m &= data["dr2_rv_nb_transits"] > min_nb_transits
m &= color_range[0] < data["bp_rp"]
m &= data["bp_rp"] < color_range[1]
m &= mag_range[0] < data["phot_g_mean_mag"]
m &= data["phot_g_mean_mag"] < mag_range[1]
return data[m]
def fit_data(
data: fits.fitsrec.FITS_rec,
*,
num_mag_bins: int,
num_color_bins: int,
color_range: Tuple[float, float] = (MIN_COLOR, MAX_COLOR),
mag_range: Tuple[float, float] = (MIN_MAG, MAX_MAG),
num_iter: int = 5,
targets_per_fit: int = 1000,
num_optim: int = 5000,
seed: int = 11239,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
# Parse data
num_transit = np.ascontiguousarray(
data["dr2_rv_nb_transits"], dtype=np.int32
)
eps = np.ascontiguousarray(
data["dr2_radial_velocity_error"], dtype=np.float32
)
sample_variance = 2 * num_transit * (eps ** 2 - 0.11 ** 2) / np.pi
mag = np.ascontiguousarray(data["phot_g_mean_mag"], dtype=np.float32)
color = np.ascontiguousarray(data["bp_rp"], dtype=np.float32)
# Setup the grid and allocate the memory
mag_bins = np.linspace(mag_range[0], mag_range[1], num_mag_bins + 1)
color_bins = np.linspace(
color_range[0], color_range[1], num_color_bins + 1
)
mu = np.empty((len(mag_bins) - 1, len(color_bins) - 1, num_iter))
sigma = np.empty_like(mu)
count = np.empty((len(mag_bins) - 1, len(color_bins) - 1), dtype=np.int64)
np.random.seed(seed)
inds = np.arange(len(data))
for n in tqdm(range(len(mag_bins) - 1), desc="magnitudes"):
for m in tqdm(range(len(color_bins) - 1), desc="colors", leave=False):
mask = mag_bins[n] <= mag
mask &= mag <= mag_bins[n + 1]
mask &= color_bins[m] <= color
mask &= color <= color_bins[m + 1]
count[n, m] = mask.sum()
# For small amounts of data
if count[n, m] <= targets_per_fit:
if count[n, m] < 50:
mu[n, m, :] = np.nan
sigma[n, m, :] = np.nan
continue
svi = setup_model(sample_variance[mask])
svi_result = svi.run(
random.PRNGKey(seed + n + m),
num_optim,
num_transit[mask],
statistic=(num_transit[mask] - 1) * sample_variance[mask],
progress_bar=False,
)
params = svi_result.params
mu[n, m, :] = params["log_sigma_auto_loc"]
sigma[n, m, :] = params["log_sigma_auto_scale"]
continue
for k in tqdm(range(num_iter), desc="iterations", leave=False):
masked_inds = np.random.choice(
inds[mask],
size=targets_per_fit,
replace=mask.sum() <= targets_per_fit,
)
svi = setup_model(sample_variance[masked_inds])
svi_result = svi.run(
random.PRNGKey(seed + n + m + k),
num_optim,
num_transit[masked_inds],
statistic=(num_transit[masked_inds] - 1)
* sample_variance[masked_inds],
progress_bar=False,
)
params = svi_result.params
mu[n, m, k] = params["log_sigma_auto_loc"]
sigma[n, m, k] = params["log_sigma_auto_scale"]
return mu, sigma, count
if __name__ == "__main__":
import argparse
import yaml
parser = argparse.ArgumentParser()
parser.add_argument("-b", "--mag-bin", required=True, type=int)
parser.add_argument("-i", "--input", required=True, type=str)
parser.add_argument("-o", "--output", required=True, type=str)
parser.add_argument("-c", "--config", required=True, type=str)
args = parser.parse_args()
with open(args.config, "r") as f:
config = yaml.load(f.read(), Loader=yaml.FullLoader)
mag_bins = np.linspace(
config["min_mag"], config["max_mag"], config["num_mag"] + 1
)
min_mag = mag_bins[args.mag_bin]
max_mag = mag_bins[args.mag_bin + 1]
data = load_data(
data_path=args.input,
min_nb_transits=config["min_nb_transits"],
color_range=(
config["min_color"],
config["max_color"],
),
mag_range=(min_mag, max_mag),
)
mu, sigma, count = fit_data(
data,
num_mag_bins=1,
num_color_bins=config["num_color"],
color_range=(
config["min_color"],
config["max_color"],
),
mag_range=(min_mag, max_mag),
num_iter=config["num_iter"],
targets_per_fit=config["targets_per_fit"],
num_optim=config["num_optim"],
seed=config["seed"],
)
# Save the results
hdr = fits.Header()
hdr["min_tra"] = config["min_nb_transits"]
hdr["min_col"] = config["min_color"]
hdr["max_col"] = config["max_color"]
hdr["num_col"] = config["num_color"]
hdr["min_mag"] = config["min_mag"]
hdr["max_mag"] = config["max_mag"]
hdr["num_mag"] = config["num_mag"]
hdr["num_itr"] = config["num_iter"]
hdr["num_per"] = config["targets_per_fit"]
hdr["num_opt"] = config["num_optim"]
hdr["seed"] = config["seed"]
fits.HDUList(
[
fits.PrimaryHDU(header=hdr),
fits.ImageHDU(mu),
fits.ImageHDU(sigma),
fits.ImageHDU(count),
]
).writeto(args.output, overwrite=True)
| [
"numpy.sqrt",
"numpy.ascontiguousarray",
"numpy.isfinite",
"astropy.io.fits.open",
"numpyro.distributions.Normal",
"jax.random.PRNGKey",
"argparse.ArgumentParser",
"numpy.linspace",
"numpy.random.seed",
"numpyro.infer.Trace_ELBO",
"astropy.io.fits.PrimaryHDU",
"astropy.io.fits.ImageHDU",
"nu... | [((417, 458), 'jax.config.config.update', 'jax_config.update', (['"""jax_enable_x64"""', '(True)'], {}), "('jax_enable_x64', True)\n", (434, 458), True, 'from jax.config import config as jax_config\n'), ((1473, 1508), 'numpyro.optim.Adam', 'numpyro.optim.Adam', ([], {'step_size': '(0.001)'}), '(step_size=0.001)\n', (1491, 1508), False, 'import numpyro\n'), ((1902, 1938), 'numpy.isfinite', 'np.isfinite', (["data['phot_g_mean_mag']"], {}), "(data['phot_g_mean_mag'])\n", (1913, 1938), True, 'import numpy as np\n'), ((1948, 1974), 'numpy.isfinite', 'np.isfinite', (["data['bp_rp']"], {}), "(data['bp_rp'])\n", (1959, 1974), True, 'import numpy as np\n'), ((1984, 2030), 'numpy.isfinite', 'np.isfinite', (["data['dr2_radial_velocity_error']"], {}), "(data['dr2_radial_velocity_error'])\n", (1995, 2030), True, 'import numpy as np\n'), ((2696, 2760), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (["data['dr2_rv_nb_transits']"], {'dtype': 'np.int32'}), "(data['dr2_rv_nb_transits'], dtype=np.int32)\n", (2716, 2760), True, 'import numpy as np\n'), ((2785, 2858), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (["data['dr2_radial_velocity_error']"], {'dtype': 'np.float32'}), "(data['dr2_radial_velocity_error'], dtype=np.float32)\n", (2805, 2858), True, 'import numpy as np\n'), ((2954, 3017), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (["data['phot_g_mean_mag']"], {'dtype': 'np.float32'}), "(data['phot_g_mean_mag'], dtype=np.float32)\n", (2974, 3017), True, 'import numpy as np\n'), ((3030, 3083), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (["data['bp_rp']"], {'dtype': 'np.float32'}), "(data['bp_rp'], dtype=np.float32)\n", (3050, 3083), True, 'import numpy as np\n'), ((3145, 3202), 'numpy.linspace', 'np.linspace', (['mag_range[0]', 'mag_range[1]', '(num_mag_bins + 1)'], {}), '(mag_range[0], mag_range[1], num_mag_bins + 1)\n', (3156, 3202), True, 'import numpy as np\n'), ((3220, 3283), 'numpy.linspace', 'np.linspace', (['color_range[0]', 'color_range[1]', '(num_color_bins + 1)'], {}), '(color_range[0], color_range[1], num_color_bins + 1)\n', (3231, 3283), True, 'import numpy as np\n'), ((3380, 3397), 'numpy.empty_like', 'np.empty_like', (['mu'], {}), '(mu)\n', (3393, 3397), True, 'import numpy as np\n'), ((3482, 3502), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (3496, 3502), True, 'import numpy as np\n'), ((5626, 5651), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (5649, 5651), False, 'import argparse\n'), ((6067, 6139), 'numpy.linspace', 'np.linspace', (["config['min_mag']", "config['max_mag']", "(config['num_mag'] + 1)"], {}), "(config['min_mag'], config['max_mag'], config['num_mag'] + 1)\n", (6078, 6139), True, 'import numpy as np\n'), ((6928, 6941), 'astropy.io.fits.Header', 'fits.Header', ([], {}), '()\n', (6939, 6941), False, 'from astropy.io import fits\n'), ((1841, 1861), 'astropy.io.fits.open', 'fits.open', (['data_path'], {}), '(data_path)\n', (1850, 1861), False, 'from astropy.io import fits\n'), ((685, 707), 'numpyro.distributions.Normal', 'dist.Normal', (['(0.0)', '(10.0)'], {}), '(0.0, 10.0)\n', (696, 707), True, 'import numpyro.distributions as dist\n'), ((1302, 1326), 'numpy.sqrt', 'np.sqrt', (['sample_variance'], {}), '(sample_variance)\n', (1309, 1326), True, 'import numpy as np\n'), ((1410, 1450), 'numpyro.infer.init_to_value', 'numpyro.infer.init_to_value', ([], {'values': 'init'}), '(values=init)\n', (1437, 1450), False, 'import numpyro\n'), ((1553, 1579), 'numpyro.infer.Trace_ELBO', 'numpyro.infer.Trace_ELBO', ([], {}), '()\n', (1577, 1579), False, 'import numpyro\n'), ((811, 833), 'numpyro.distributions.Normal', 'dist.Normal', (['(0.0)', '(10.0)'], {}), '(0.0, 10.0)\n', (822, 833), True, 'import numpyro.distributions as dist\n'), ((873, 905), 'jax.numpy.exp', 'jnp.exp', (['(2 * (log_k - log_sigma))'], {}), '(2 * (log_k - log_sigma))\n', (880, 905), True, 'import jax.numpy as jnp\n'), ((1249, 1275), 'numpy.median', 'np.median', (['sample_variance'], {}), '(sample_variance)\n', (1258, 1275), True, 'import numpy as np\n'), ((1023, 1059), 'numpyro_ncx2.NoncentralChi2', 'NoncentralChi2', (['(num_transit - 1)', 'lam'], {}), '(num_transit - 1, lam)\n', (1037, 1059), False, 'from numpyro_ncx2 import NoncentralChi2\n'), ((4242, 4270), 'jax.random.PRNGKey', 'random.PRNGKey', (['(seed + n + m)'], {}), '(seed + n + m)\n', (4256, 4270), False, 'from jax import random\n'), ((5069, 5101), 'jax.random.PRNGKey', 'random.PRNGKey', (['(seed + n + m + k)'], {}), '(seed + n + m + k)\n', (5083, 5101), False, 'from jax import random\n'), ((7430, 7457), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', ([], {'header': 'hdr'}), '(header=hdr)\n', (7445, 7457), False, 'from astropy.io import fits\n'), ((7471, 7488), 'astropy.io.fits.ImageHDU', 'fits.ImageHDU', (['mu'], {}), '(mu)\n', (7484, 7488), False, 'from astropy.io import fits\n'), ((7502, 7522), 'astropy.io.fits.ImageHDU', 'fits.ImageHDU', (['sigma'], {}), '(sigma)\n', (7515, 7522), False, 'from astropy.io import fits\n'), ((7536, 7556), 'astropy.io.fits.ImageHDU', 'fits.ImageHDU', (['count'], {}), '(count)\n', (7549, 7556), False, 'from astropy.io import fits\n'), ((1112, 1134), 'jax.numpy.exp', 'jnp.exp', (['(2 * log_sigma)'], {}), '(2 * log_sigma)\n', (1119, 1134), True, 'import jax.numpy as jnp\n')] |
import numpy as np
import pandas as pd
from .ode import ODE
from .tools import Options_Container, gridmake
import time
class OCoptions(Options_Container):
description = "Solver options for a OCmodel"
def __init__(self, tol=np.sqrt(np.spacing(1)), maxit=80, show=True, nr=10):
self.tol = tol
self.maxit = maxit
self.show = show
self.nr = nr
def print_header(self):
if self.show:
print('Solving optimal control model')
print('{:4s} {:12s} {:8s}'.format('iter', 'change', 'time'))
print('-' * 30)
def print_current_iteration(self, it, change, tic):
if self.show:
print(f'{it:4d} {change:12.1e} {time.time() - tic:8.4f}')
def print_last_iteration(self, tic, change):
if self.show:
if change >= self.tol:
print('Failure to converge in OCmodel.solve()')
print(f'Elapsed Time = {time.time() - tic:7.2f} Seconds')
class OCmodel(object):
def __init__(self, basis, control, reward, transition, rho=0.0, params=[]):
assert callable(control), 'control must be a function'
assert callable(reward), 'reward must be a function'
assert callable(transition), 'transition must be a function'
self.__x = lambda s, Vs: control(s, Vs, *params)
self.__f = lambda s, x: reward(s, x, *params)
self.__g = lambda s, x: transition(s, x, *params)
# Value and policy functions
self.Value = basis.duplicate()
self.Policy = basis.duplicate()
# Time parameters
# self.time = OCtime(discount, horizon)
self.rho = rho
# Labels for model variables
# self.labels = OClabels(basis.opts.labels, x, i, j)
# Default numerical solution parameters and parameters for model functions
self.options = OCoptions()
self.params = params
''' <<<<<<<<<<<<<<<<<<< END OF CONSTRUCTOR >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>'''
def solve(self, **kwargs):
tic = time.time()
# Set user options to defaults, if not set by user with OPTSET (see above)
self.options[kwargs.keys()] = kwargs.values()
self.options.print_header()
ds = self.Value.d # dimension of state variable s
s = self.Value.nodes # collocation nodes
order = np.eye(ds, dtype=int)
# Derive interpolation matrices
Phi1 = self.Value.Phi(order=np.eye(ds, dtype=int), dropdim=False)
c = self.Value.c
# Policy iteration
for it in range(self.options.maxit):
cold = c.copy()
Vs = self.Value(s, order, dropdim=False)
if Vs.shape[1] == 1:
Vs = Vs[:, 0]
else:
print('ERROR CON LAS DIMENSIONES DE Vs')
x = self.__x(s, Vs)
f = self.__f(s, x)
g = self.__g(s, x)
B = self.rho * self.Value.Phi()
for _is in range(ds):
B -= np.diag(g[_is]) @ Phi1[_is]
c = np.linalg.solve(B, f.T).T
self.Value.c = c
if np.isnan(c).any() or np.isinf(c).any():
print('NaNs or Infs encountered')
return
change = np.abs(c - cold).max()
self.options.print_current_iteration(it, change, tic)
if change < self.options.tol:
break
self.options.print_last_iteration(tic, change)
self.Policy.y = x
return self.solution()
def solution(self, nr=10, resid=True):
"""
Computes solution over a refined grid
"""
ds = self.Value.d
labels = self.Value.opts._labels
order = np.eye(ds, dtype=int)
a = self.Value.a
b = self.Value.b
n = self.Value.n
sr = np.atleast_2d(gridmake(*[np.linspace(a[i], b[i], nr * n[i]) for i in range(self.Value.d)]))
''' MAKE DATABASE'''
# ADD CONTINUOUS STATE VARIABLE
DATA = pd.DataFrame(sr.T, columns=labels)
# SET INDEX FOR DATA
if ds == 1:
slab = DATA[labels[0]]
DATA.index = slab
# ADD VALUE FUNCTION
DATA['value'] = self.Value(sr)
Vs = self.Value(sr, order, dropdim=False)
if Vs.shape[1] == 1:
Vs = Vs[:, 0]
else:
print('ERROR CON LAS DIMENSIONES DE Vs')
# ADD CONTROL
xr = self.__x(sr, Vs)
DATA['control'] = xr.T
# ADD RESIDUAL IF REQUESTED
if resid:
f = self.__f(sr, xr)
g = self.__g(sr, xr)
DATA['resid'] = self.rho * DATA['value'] - (f + (Vs * g).sum(axis=0)).flatten()
return DATA
def simulate(self, sinit, T, N=1000):
# ****** 1: Preparation***********************************************************
ds = self.Value.d
# Determine number of replications nrep and periods nper to be simulated.
sinit = np.atleast_2d(sinit).astype(float)
ds2, nrep = sinit.shape
assert ds == ds2, 'initial continous state must have {} rows'.format(ds)
# ***** *2: Simulate the model ***************************************************
problem = ODE(lambda s: self.__g(s, self.Policy(s)), T, sinit)
problem.rk4(N, self.Value.opts._labels)
DATA = problem.x
DATA['control'] = self.Policy(DATA.values)
return DATA
| [
"numpy.atleast_2d",
"numpy.eye",
"numpy.linalg.solve",
"numpy.abs",
"numpy.diag",
"numpy.linspace",
"numpy.isnan",
"pandas.DataFrame",
"numpy.isinf",
"time.time",
"numpy.spacing"
] | [((2071, 2082), 'time.time', 'time.time', ([], {}), '()\n', (2080, 2082), False, 'import time\n'), ((2383, 2404), 'numpy.eye', 'np.eye', (['ds'], {'dtype': 'int'}), '(ds, dtype=int)\n', (2389, 2404), True, 'import numpy as np\n'), ((3751, 3772), 'numpy.eye', 'np.eye', (['ds'], {'dtype': 'int'}), '(ds, dtype=int)\n', (3757, 3772), True, 'import numpy as np\n'), ((4039, 4073), 'pandas.DataFrame', 'pd.DataFrame', (['sr.T'], {'columns': 'labels'}), '(sr.T, columns=labels)\n', (4051, 4073), True, 'import pandas as pd\n'), ((247, 260), 'numpy.spacing', 'np.spacing', (['(1)'], {}), '(1)\n', (257, 260), True, 'import numpy as np\n'), ((2482, 2503), 'numpy.eye', 'np.eye', (['ds'], {'dtype': 'int'}), '(ds, dtype=int)\n', (2488, 2503), True, 'import numpy as np\n'), ((3080, 3103), 'numpy.linalg.solve', 'np.linalg.solve', (['B', 'f.T'], {}), '(B, f.T)\n', (3095, 3103), True, 'import numpy as np\n'), ((5009, 5029), 'numpy.atleast_2d', 'np.atleast_2d', (['sinit'], {}), '(sinit)\n', (5022, 5029), True, 'import numpy as np\n'), ((3035, 3050), 'numpy.diag', 'np.diag', (['g[_is]'], {}), '(g[_is])\n', (3042, 3050), True, 'import numpy as np\n'), ((3285, 3301), 'numpy.abs', 'np.abs', (['(c - cold)'], {}), '(c - cold)\n', (3291, 3301), True, 'import numpy as np\n'), ((3150, 3161), 'numpy.isnan', 'np.isnan', (['c'], {}), '(c)\n', (3158, 3161), True, 'import numpy as np\n'), ((3171, 3182), 'numpy.isinf', 'np.isinf', (['c'], {}), '(c)\n', (3179, 3182), True, 'import numpy as np\n'), ((3887, 3921), 'numpy.linspace', 'np.linspace', (['a[i]', 'b[i]', '(nr * n[i])'], {}), '(a[i], b[i], nr * n[i])\n', (3898, 3921), True, 'import numpy as np\n'), ((716, 727), 'time.time', 'time.time', ([], {}), '()\n', (725, 727), False, 'import time\n'), ((949, 960), 'time.time', 'time.time', ([], {}), '()\n', (958, 960), False, 'import time\n')] |
import numpy as np
import numpy.random as rng
from abc import ABC, abstractmethod
from copy import deepcopy
import os
import logging
import math
def TPA_1(bmin, bmax, gibbsChain, tvd):
"""
Get a cooling schedule from one run of TPA, assuming H(x) >= 0
params:
bmin, bmax -> range of beta
gibbsChain -> oracle for approximate sampling
tvd -> tvd constraint on sampling
"""
b = bmin
schedule = []
steps = 0
while b < bmax:
u = rng.random()
gibbsChain.beta = b
mixingtime = gibbsChain.compute_mixingtime(tvd = tvd)
gibbsChain.restart_and_sample(steps=mixingtime)
steps += int(mixingtime)
Hx = gibbsChain.get_Hamiltonian(gibbsChain.current)
if Hx == 0: break
b -= np.log(u)/Hx
if bmin <= b <= bmax:
schedule.append(b)
return {"steps": steps, "schedule": schedule}
def TPA_k_d(bmin, bmax, k, d, gibbsChain, tvd):
"""
Get a cooling schedule from k run of TPA, assuming H(x) >= 0, distance of beta sampling is d
params:
bmin, bmax -> range of beta
k -> # of TPA runs
gibbsChain -> oracle for approximate sampling
tvd -> tvd constraint on sampling
"""
o_schedule = []
steps = 0
for i in range(k):
res = TPA_1(bmin, bmax, gibbsChain, tvd)
subschedule, substeps = res["schedule"], res["steps"]
o_schedule += subschedule #[1:-1]
steps += substeps
o_schedule.sort()
pt = rng.randint(0, d)
schedule = [bmin]
while pt < len(o_schedule):
schedule.append(o_schedule[pt])
pt += d
schedule.append(bmax)
return {"steps": steps, "schedule": schedule}
| [
"numpy.random.random",
"numpy.random.randint",
"numpy.log"
] | [((1532, 1549), 'numpy.random.randint', 'rng.randint', (['(0)', 'd'], {}), '(0, d)\n', (1543, 1549), True, 'import numpy.random as rng\n'), ((500, 512), 'numpy.random.random', 'rng.random', ([], {}), '()\n', (510, 512), True, 'import numpy.random as rng\n'), ((791, 800), 'numpy.log', 'np.log', (['u'], {}), '(u)\n', (797, 800), True, 'import numpy as np\n')] |
"""
Render a PAG + SC scene and generate a virtual slice.
Would be useful to plot the aspirated cells and visualize them in a slice using the CCF coordinates after registration with SHARP-TRACK.
Would also be possible to then slice the PAG at 25-50um thick sections to check how well do the subdivisions from manual registration (done by eye upon comparing with the Paxinos Atlas) match the ones from brainglobe's `structure_from_coords()` function.
"""
# %%
# https://github.com/marcomusy/vedo/issues/430#issuecomment-892719236
import brainrender
from brainrender import Scene, Animation
from vedo import settings as vsettings
from brainrender.video import VideoMaker
import numpy as np
# // DEFAULT SETTINGS //
# You can see all the default settings here: https://github.com/brainglobe/brainrender/blob/19c63b97a34336898871d66fb24484e8a55d4fa7/brainrender/settings.py
# --------------------------- brainrender settings --------------------------- #
# Change some of the default settings
brainrender.settings.BACKGROUND_COLOR = "white" # color of the background window (defaults to "white", try "blackboard")
brainrender.settings.DEFAULT_ATLAS = "allen_mouse_25um" # default atlas
brainrender.settings.DEFAULT_CAMERA = "three_quarters" # Default camera settings (orientation etc. see brainrender.camera.py)
brainrender.settings.INTERACTIVE = False # rendering interactive ?
brainrender.settings.LW = 2 # e.g. for silhouettes
brainrender.settings.ROOT_COLOR = [0.4, 0.4, 0.4] # color of the overall brain model's actor (defaults to [0.8, 0.8, 0.8])
brainrender.settings.ROOT_ALPHA = 0.2 # transparency of the overall brain model's actor (defaults to 0.2)
brainrender.settings.SCREENSHOT_SCALE = 1 # values >1 yield higher resolution screenshots
brainrender.settings.SHADER_STYLE = "cartoon" # affects the look of rendered brain regions, values can be: ["metallic", "plastic", "shiny", "glossy", "cartoon"] and can be changed in interactive mode
brainrender.settings.SHOW_AXES = False
brainrender.settings.WHOLE_SCREEN = True # If true render window is full screen
brainrender.settings.OFFSCREEN = False
# ------------------------------- vedo settings ------------------------------ #
# For transparent background with screenshots
vsettings.screenshotTransparentBackground = True # vedo for transparent bg
vsettings.useFXAA = False # This needs to be false for transparent bg
vsettings.immediateRendering = False
# // SET PARAMETERS //
# Save folder
save_folder = r"D:\Dropbox (UCL)\Project_transcriptomics\analysis\PAG_scRNAseq_brainrender\output"
# // CREATE SCENE //
scene = Scene(root = True, atlas_name = 'allen_mouse_10um', inset = False, title = 'PAG_areas_overview', screenshots_folder = save_folder, plotter = None)
# // ADD BRAIN REGIONS //
pag = scene.add_brain_region("PAG", alpha = 0.4, color = "darkgoldenrod", silhouette = None, hemisphere = "both")
sc = scene.add_brain_region("SCm", alpha = 0.4, color = "olivedrab", silhouette = None, hemisphere = "both")
# // MAKE SLICE //
slice_start = scene.root.mesh.centerOfMass() + np.array([+1000, 0, 0]) # X microns from center of mass towards the nose (if positive) or cerebellum (if negative)
slice_end = scene.root.mesh.centerOfMass() + np.array([+2000, 0, 0]) # X microns from center of mass towards the nose (if adding) or cerebellum (if subtracting)
for p, n in zip((slice_start, slice_end), (1, -1)):
plane = scene.atlas.get_plane(pos = p, norm = (n, 0, 0))
scene.slice(plane, actors = pag, close_actors = True)
scene.slice(plane, actors = sc, close_actors = True)
scene.slice(plane, actors = scene.root, close_actors = False)
# // RENDER INTERACTIVELY //
scene.render(interactive = True, camera = "frontal", zoom = 1)
# %%
| [
"numpy.array",
"brainrender.Scene"
] | [((2608, 2747), 'brainrender.Scene', 'Scene', ([], {'root': '(True)', 'atlas_name': '"""allen_mouse_10um"""', 'inset': '(False)', 'title': '"""PAG_areas_overview"""', 'screenshots_folder': 'save_folder', 'plotter': 'None'}), "(root=True, atlas_name='allen_mouse_10um', inset=False, title=\n 'PAG_areas_overview', screenshots_folder=save_folder, plotter=None)\n", (2613, 2747), False, 'from brainrender import Scene, Animation\n'), ((3074, 3097), 'numpy.array', 'np.array', (['[+1000, 0, 0]'], {}), '([+1000, 0, 0])\n', (3082, 3097), True, 'import numpy as np\n'), ((3234, 3257), 'numpy.array', 'np.array', (['[+2000, 0, 0]'], {}), '([+2000, 0, 0])\n', (3242, 3257), True, 'import numpy as np\n')] |
import numpy as np
def uniform_mutation(gen, *args):
rnd_idx = np.random.randint(len(gen))
rnd_val = np.random.rand()
left, right = gen.limits[rnd_idx]
rnd_val *= (left - right) + left
gen.values[rnd_idx] = rnd_val
def frontier_mutation(gen, *args):
rnd = np.random.rand()
rnd_idx = np.random.randint(len(gen))
left, right = gen.limits[rnd_idx]
if rnd < 0.5:
gen.values[rnd_idx] = left
else:
gen.values[rnd_idx] = right
def _non_uni_op(t, y, max_epoch, beta, *args):
rnd = np.random.rand()
return y * rnd * (1 - t/max_epoch) ** beta
def non_uniform_mutation(gen, epoch, max_epoch, beta, *args):
rnd = np.random.rand()
rnd_idx = np.random.randint(len(gen))
left, right = gen.limits[rnd_idx]
val = gen.values[rnd_idx]
if rnd < 0.5:
operand = _non_uni_op(epoch, right - val, max_epoch, beta)
else:
operand = - _non_uni_op(epoch, val - left, max_epoch, beta)
gen.values[rnd_idx] += operand
def arith_crossover(gen1, gen2, *args):
rnd = np.random.rand()
new1 = rnd * gen1.values + (1-rnd) * gen2.values
new2 = (1-rnd) * gen1.values + rnd * gen2.values
gen1.values = new1
gen2.values = new2
def simple_crossover(gen1, gen2, *args):
point = np.random.randint(len(gen1))
for i in range(point):
gen1.values[i], gen2.values[i] = gen2.values[i], gen1.values[i]
def heuristic_crossover(gen1, gen2, max_iter=100, *args):
# f_eval(gen2) must be better than f_eval(gen1)
count = 0
while True:
rnd = np.random.rand()
res = rnd * (gen2.values - gen1.values) + gen2.values
if all(
l <= val <= r
for (r, l), val in zip(gen1.limits, res)
):
return res
count += 1
if max_iter is not None and count >= max_iter:
return None
crossovers = dict(
arith_crossover=arith_crossover,
simple_crossover=simple_crossover,
heuristic_crossover=heuristic_crossover
)
mutations = dict(
uniform_mutation=uniform_mutation,
frontier_mutation=frontier_mutation,
non_uniform_mutation=non_uniform_mutation
) | [
"numpy.random.rand"
] | [((111, 127), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (125, 127), True, 'import numpy as np\n'), ((285, 301), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (299, 301), True, 'import numpy as np\n'), ((541, 557), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (555, 557), True, 'import numpy as np\n'), ((679, 695), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (693, 695), True, 'import numpy as np\n'), ((1057, 1073), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1071, 1073), True, 'import numpy as np\n'), ((1569, 1585), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1583, 1585), True, 'import numpy as np\n')] |
# --------------------------------------------------------
# Copyright (C) 2020 NVIDIA Corporation. All rights reserved.
# Nvidia Source Code License-NC
# Code written by <NAME> and <NAME>
# --------------------------------------------------------
import torch
import os
from torch import distributed, nn
from collections import defaultdict
import random
import numpy as np
from PIL import Image
import torchvision.transforms as T
import imageio
from collections.abc import Iterable
def create_folder(directory):
if not os.path.exists(directory):
os.makedirs(directory)
def distributed_is_initialized():
if distributed.is_available():
if distributed.is_initialized():
return True
return False
def lr_policy(lr_fn):
def _alr(optimizer, iteration, epoch):
lr = lr_fn(iteration, epoch)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return _alr
def lr_cosine_policy(base_lr, warmup_length, epochs):
def _lr_fn(iteration, epoch):
if epoch < warmup_length:
lr = base_lr * (epoch + 1) / warmup_length
else:
e = epoch - warmup_length
es = epochs - warmup_length
lr = 0.5 * (1 + np.cos(np.pi * e / es)) * base_lr
return lr
return lr_policy(_lr_fn)
def beta_policy(mom_fn):
def _alr(optimizer, iteration, epoch, param, indx):
mom = mom_fn(iteration, epoch)
for param_group in optimizer.param_groups:
param_group[param][indx] = mom
return _alr
def mom_cosine_policy(base_beta, warmup_length, epochs):
def _beta_fn(iteration, epoch):
if epoch < warmup_length:
beta = base_beta * (epoch + 1) / warmup_length
else:
beta = base_beta
return beta
return beta_policy(_beta_fn)
def clip(image_tensor, use_fp16=False):
"""
adjust the input based on mean and variance
"""
if use_fp16:
mean = np.array([0.485, 0.456, 0.406], dtype=np.float16)
std = np.array([0.229, 0.224, 0.225], dtype=np.float16)
else:
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
for c in range(3):
m, s = mean[c], std[c]
image_tensor[:, c] = torch.clamp(image_tensor[:, c], -m / s, (1 - m) / s)
return image_tensor
def denormalize(image_tensor, use_fp16=False):
"""
convert floats back to input
"""
if use_fp16:
mean = np.array([0.485, 0.456, 0.406], dtype=np.float16)
std = np.array([0.229, 0.224, 0.225], dtype=np.float16)
else:
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
for c in range(3):
m, s = mean[c], std[c]
image_tensor[:, c] = torch.clamp(image_tensor[:, c] * s + m, 0, 1)
return image_tensor
def batch_images_as_tensors(paths):
normalizer = T.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
)
transformer = T.Compose([
T.Resize((256, 128)),
T.ToTensor(),
normalizer
])
images = [transformer(Image.open(path).convert('RGB')) for path in paths]
batched = torch.cat([image.unsqueeze(dim=0) for image in images], dim=0)
return batched
class GIFGenerator(object):
def __init__(self, fig):
self.fig = fig
self.snapshots = []
def reset_fig(self, fig):
self.fig = fig
def snapshot(self):
self.fig.canvas.draw()
image_b, (w, h) = self.fig.canvas.print_to_buffer()
self.snapshots.append(Image.frombuffer("RGBA", size=(w, h), data=image_b).convert('RGB'))
def merge(self, path, duration=0.2):
assert len(self.snapshots) > 0, "Empty snapshots."
imageio.mimsave(path, self.snapshots, 'GIF', duration=duration)
del self.snapshots
self.snapshots = []
class InversionMetricSampler(object):
def __init__(self, inputs, targets, bs=64, identities_per_batch=8):
assert inputs.size(0) == targets.size(0), "Size of inputs must be equal to that of targets."
assert bs % identities_per_batch == 0, "Bs should be divisible by identities_per_batch."
self.inputs = inputs
self.targets = targets
self.bs = bs
self.ids_per_batch = identities_per_batch
self.samples_per_id = self.bs // self.ids_per_batch
self.ids = self.targets.squeeze().unique().tolist()
self.ids2index = defaultdict(list)
for i, identity in enumerate(self.targets):
self.ids2index[identity.item()].append(i)
def __iter__(self):
return self
def __next__(self):
batch_ids = random.sample(self.ids, self.ids_per_batch)
indices = [torch.tensor(random.sample(self.ids2index[batch_id], self.samples_per_id), dtype=torch.long)
for batch_id in batch_ids]
indices = torch.cat(indices, dim=0)
return self.inputs[indices], self.targets[indices]
class InversionSampler(object):
def __init__(self, inputs, targets, bs=64):
assert inputs.size(0) == targets.size(0), "Size of inputs must be equal to that of targets."
self.inputs = inputs
self.targets = targets
self.bs = bs
self.pool = list(range(len(self.targets)))
random.shuffle(self.pool)
def __iter__(self):
return self
def __next__(self):
if len(self.pool) < self.bs:
self.pool = list(range(len(self.targets)))
random.shuffle(self.pool)
indices = self.pool[:self.bs]
self.pool = self.pool[self.bs:]
return self.inputs[indices], self.targets[indices]
class EmbeddingsContainer(object):
def __init__(self, dataloader, model):
self.model = model
self.container = defaultdict(list)
self.model.eval()
with torch.no_grad():
for i, inputs in enumerate(dataloader, 1):
inputs, pids = self._parse_data(inputs)
embeddings = self.model(inputs)['embedding']
for embedding, pid in zip(embeddings, pids):
self.container[int(pid)].append(embedding.unsqueeze(dim=0).detach())
print(f"Loading embeddings [{i}] / [{len(dataloader)}] ...")
self.container = {pid: torch.cat(embeddings, dim=0) for pid, embeddings in self.container.items()}
def update(self, embeddings, pids):
print(f"Updating {embeddings.size(0)} embeddings ...")
for embedding, pid in zip(embeddings, pids):
self.container[int(pid)] = torch.cat([self.container[int(pid)], embedding.unsqueeze(dim=0).detach()], dim=0)
def _parse_data(self, inputs):
imgs, _, pids, _, *_ = inputs
inputs = imgs.cuda()
pids = pids.cuda()
return inputs, pids
def __getitem__(self, key):
if isinstance(key, int):
return self.container[key]
elif isinstance(key, Iterable):
return [self.container[int(individual_key)] for individual_key in key]
else:
raise RuntimeError("Only Int or Iterable instance is allowed for subscript.")
if __name__ == '__main__':
p = ['/home/luyichen/datasets/Market-1501-v15.09.15/bounding_box_train/0002_c1s1_000551_01.jpg']
b = batch_images_as_tensors(p)
pass | [
"os.path.exists",
"random.sample",
"PIL.Image.open",
"PIL.Image.frombuffer",
"random.shuffle",
"os.makedirs",
"torch.distributed.is_initialized",
"numpy.array",
"torch.cat",
"torch.no_grad",
"collections.defaultdict",
"numpy.cos",
"torchvision.transforms.Normalize",
"torchvision.transforms... | [((628, 654), 'torch.distributed.is_available', 'distributed.is_available', ([], {}), '()\n', (652, 654), False, 'from torch import distributed, nn\n'), ((2916, 2982), 'torchvision.transforms.Normalize', 'T.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (2927, 2982), True, 'import torchvision.transforms as T\n'), ((527, 552), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (541, 552), False, 'import os\n'), ((562, 584), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (573, 584), False, 'import os\n'), ((667, 695), 'torch.distributed.is_initialized', 'distributed.is_initialized', ([], {}), '()\n', (693, 695), False, 'from torch import distributed, nn\n'), ((1982, 2031), 'numpy.array', 'np.array', (['[0.485, 0.456, 0.406]'], {'dtype': 'np.float16'}), '([0.485, 0.456, 0.406], dtype=np.float16)\n', (1990, 2031), True, 'import numpy as np\n'), ((2046, 2095), 'numpy.array', 'np.array', (['[0.229, 0.224, 0.225]'], {'dtype': 'np.float16'}), '([0.229, 0.224, 0.225], dtype=np.float16)\n', (2054, 2095), True, 'import numpy as np\n'), ((2121, 2152), 'numpy.array', 'np.array', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (2129, 2152), True, 'import numpy as np\n'), ((2167, 2198), 'numpy.array', 'np.array', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (2175, 2198), True, 'import numpy as np\n'), ((2282, 2334), 'torch.clamp', 'torch.clamp', (['image_tensor[:, c]', '(-m / s)', '((1 - m) / s)'], {}), '(image_tensor[:, c], -m / s, (1 - m) / s)\n', (2293, 2334), False, 'import torch\n'), ((2489, 2538), 'numpy.array', 'np.array', (['[0.485, 0.456, 0.406]'], {'dtype': 'np.float16'}), '([0.485, 0.456, 0.406], dtype=np.float16)\n', (2497, 2538), True, 'import numpy as np\n'), ((2553, 2602), 'numpy.array', 'np.array', (['[0.229, 0.224, 0.225]'], {'dtype': 'np.float16'}), '([0.229, 0.224, 0.225], dtype=np.float16)\n', (2561, 2602), True, 'import numpy as np\n'), ((2628, 2659), 'numpy.array', 'np.array', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (2636, 2659), True, 'import numpy as np\n'), ((2674, 2705), 'numpy.array', 'np.array', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (2682, 2705), True, 'import numpy as np\n'), ((2790, 2835), 'torch.clamp', 'torch.clamp', (['(image_tensor[:, c] * s + m)', '(0)', '(1)'], {}), '(image_tensor[:, c] * s + m, 0, 1)\n', (2801, 2835), False, 'import torch\n'), ((3776, 3839), 'imageio.mimsave', 'imageio.mimsave', (['path', 'self.snapshots', '"""GIF"""'], {'duration': 'duration'}), "(path, self.snapshots, 'GIF', duration=duration)\n", (3791, 3839), False, 'import imageio\n'), ((4481, 4498), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (4492, 4498), False, 'from collections import defaultdict\n'), ((4695, 4738), 'random.sample', 'random.sample', (['self.ids', 'self.ids_per_batch'], {}), '(self.ids, self.ids_per_batch)\n', (4708, 4738), False, 'import random\n'), ((4915, 4940), 'torch.cat', 'torch.cat', (['indices'], {'dim': '(0)'}), '(indices, dim=0)\n', (4924, 4940), False, 'import torch\n'), ((5323, 5348), 'random.shuffle', 'random.shuffle', (['self.pool'], {}), '(self.pool)\n', (5337, 5348), False, 'import random\n'), ((5818, 5835), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (5829, 5835), False, 'from collections import defaultdict\n'), ((3044, 3064), 'torchvision.transforms.Resize', 'T.Resize', (['(256, 128)'], {}), '((256, 128))\n', (3052, 3064), True, 'import torchvision.transforms as T\n'), ((3074, 3086), 'torchvision.transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (3084, 3086), True, 'import torchvision.transforms as T\n'), ((5523, 5548), 'random.shuffle', 'random.shuffle', (['self.pool'], {}), '(self.pool)\n', (5537, 5548), False, 'import random\n'), ((5876, 5891), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5889, 5891), False, 'import torch\n'), ((6324, 6352), 'torch.cat', 'torch.cat', (['embeddings'], {'dim': '(0)'}), '(embeddings, dim=0)\n', (6333, 6352), False, 'import torch\n'), ((4771, 4831), 'random.sample', 'random.sample', (['self.ids2index[batch_id]', 'self.samples_per_id'], {}), '(self.ids2index[batch_id], self.samples_per_id)\n', (4784, 4831), False, 'import random\n'), ((3141, 3157), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (3151, 3157), False, 'from PIL import Image\n'), ((3599, 3650), 'PIL.Image.frombuffer', 'Image.frombuffer', (['"""RGBA"""'], {'size': '(w, h)', 'data': 'image_b'}), "('RGBA', size=(w, h), data=image_b)\n", (3615, 3650), False, 'from PIL import Image\n'), ((1244, 1266), 'numpy.cos', 'np.cos', (['(np.pi * e / es)'], {}), '(np.pi * e / es)\n', (1250, 1266), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import numpy as np
def largest_prime_factor(n):
i = 2
while i * i <= n:
if n % i:
i += 1
else:
n //= i
return n
def acf_slow(d):
""" Calculate the autocorrelation function of a time series. This speed of this method is O(n^2)
:param d: numpy array of length n, with time series values {x1, x2 ... xn}
:return: autocorrelation function
"""
# Subtract mean
d -= d.mean(axis=0)
autocorr = np.zeros([len(d)])
for l in range(d.shape[0]): # cycle through lags
N = d.shape[0] - l
for n in range(N):
autocorr[l] += d[n] * d[n + l]
autocorr[l] /= N
autocorr /= d.var()
return autocorr
def acf(t, largest_prime=500):
""" Quickly calculated the autocorrelation function of a time series, t. This gives the same results as acf_slow()
but uses FFTs. This method is faster than numpy.correlate. Efficiency is key in order to avoid headaches.
:param t: time series : ndarray [npoints, nseries]
:param largest_prime : the largest prime factor of array length allowed. The smaller the faster. 1.6M points takes
about 5 seconds with largest_prime=1000. Just be aware that you are losing data by truncating. But 5-6 data points
isn't a big deal for large arrays.
"""
T = np.array(t)
# Don't allow a prime factor larger than 'largest_prime'. Truncate data until that condition is met
l = 2 * T.shape[0] - 1
while largest_prime_factor(l) >= largest_prime or l % 2 == 0:
l -= 1
T = T[:(l + 1) // 2, ...] # '...' allows for no second dimension if only a single time series is analysed
length = T.shape[0] * 2 - 1
T -= np.mean(T, axis=0)
fftx = np.fft.fft(T, n=length, axis=0)
ret = np.fft.ifft(fftx * np.conjugate(fftx), axis=0)
ret = np.fft.fftshift(ret, axes=(0,))
autocorr_fxn = ret[length // 2:].real
autocorr_fxn /= np.arange(T.shape[0], 0, -1)[:, ...]
autocorr_fxn /= np.var(T, axis=0)
return autocorr_fxn # normalized
def autocov(joint_distribution):
""" Calculate the autocovariance function of the joint distribution of multiple realizations of a time series model
See Pag 45 - 46 of Time Series Analysis (1st edition?) by <NAME>
y_t : timeseries values at time t
y_t-j : timeseries values at time t - j
covariance_j = E(y_t - mu)(y_t-j - mu)
In words: the covariance at lag j equals the expected value of y_t times y_t-j. They are not necessarily independent
so you can't assume it equals E(y_t)*E(y_t-j)
:param joint_distribution: n x m numpy array with n independent realizations of a time series consisting of m data
points (observations) per realization.
:returns autocovariance of joint distribution as function of lag j
"""
observations = joint_distribution.shape[1]
autocov = np.zeros([observations])
for j in range(observations):
yt_expected_value = joint_distribution[:, -1] - joint_distribution[:, -1].mean()
ytlag_expected_value = joint_distribution[:, -j] - joint_distribution[:, -j].mean()
autocov[j] = (yt_expected_value * ytlag_expected_value).mean()
return autocov | [
"numpy.mean",
"numpy.arange",
"numpy.fft.fft",
"numpy.conjugate",
"numpy.array",
"numpy.zeros",
"numpy.fft.fftshift",
"numpy.var"
] | [((1351, 1362), 'numpy.array', 'np.array', (['t'], {}), '(t)\n', (1359, 1362), True, 'import numpy as np\n'), ((1731, 1749), 'numpy.mean', 'np.mean', (['T'], {'axis': '(0)'}), '(T, axis=0)\n', (1738, 1749), True, 'import numpy as np\n'), ((1762, 1793), 'numpy.fft.fft', 'np.fft.fft', (['T'], {'n': 'length', 'axis': '(0)'}), '(T, n=length, axis=0)\n', (1772, 1793), True, 'import numpy as np\n'), ((1861, 1892), 'numpy.fft.fftshift', 'np.fft.fftshift', (['ret'], {'axes': '(0,)'}), '(ret, axes=(0,))\n', (1876, 1892), True, 'import numpy as np\n'), ((2013, 2030), 'numpy.var', 'np.var', (['T'], {'axis': '(0)'}), '(T, axis=0)\n', (2019, 2030), True, 'import numpy as np\n'), ((2900, 2924), 'numpy.zeros', 'np.zeros', (['[observations]'], {}), '([observations])\n', (2908, 2924), True, 'import numpy as np\n'), ((1956, 1984), 'numpy.arange', 'np.arange', (['T.shape[0]', '(0)', '(-1)'], {}), '(T.shape[0], 0, -1)\n', (1965, 1984), True, 'import numpy as np\n'), ((1823, 1841), 'numpy.conjugate', 'np.conjugate', (['fftx'], {}), '(fftx)\n', (1835, 1841), True, 'import numpy as np\n')] |
import numpy as np
import scipy.stats
from scipy.interpolate import interp1d, UnivariateSpline
NFILES = 5
FILENAME = "omw_" # prefix for files followed by filenumber
class Prior_class(object):
'''Prior class'''
def __init__(self,priorname,hyperparams):
'''Input:
priorname - array of keywords ["uniform, "gauss"] for each param
hyperparams - array of arrays [[min,max], [mu, variance],...] for each param
'''
self.priorname=priorname
self.hyperparams = hyperparams
if self.priorname == "nonstandard": #first hyperparam = column from file to be read, specify filename and number of files above
'''useful for sampling from non-standard discrete pdf e.g. Planck/WMAP chain'''
self.read_data(hyperparams[0])
self.inv_transform_spline()
self.pdf_spline()
def read_data(self,colnum):
'''Only for "nonstandard". Method to read discrete pdf for parameter from file
Input: colnum: column number to be read from file
'''
self.param=[]
for i in range(1,NFILES):
d = np.loadtxt(FILENAME+str(i)+".txt")
for j in range(len(d[:,colnum])):
self.param.append(d[:,colnum][j])
def inv_transform_spline(self):
'''Only for "nonstandard". Method to create inverse spline to discrete cumulative distribution function
to allow drawing random variables.
Warning: user should check that spline faithfully matches actual cdf.
'''
srt_param=np.sort(self.param)
cdf = np.array(range(len(self.param)))/float(len(self.param))
#create a spline
self.spline2_cdf = UnivariateSpline(cdf,srt_param,k=5)
def pdf_spline(self):
'''Only for "nonstandard". Method creates a spline to the normalised PDF for discrete parameter values.
Warning: user should check that spline faithfully matches actual pdf.
'''
hist,nbins = np.histogram(self.param,normed=True,bins=200)
self.spline2_pdf = interp1d(nbins[1:],hist)
def return_priorprob(self,value):
'''Input:
value - random variable
Returns:
probability of rv given the prior dist
'''
if self.priorname =="gamma":
x = 1./self.hyperparams[1]
return scipy.stats.gamma.pdf(value, self.hyperparams[0],scale=x)
elif self.priorname =="normal":
return scipy.stats.norm.pdf(value, loc = self.hyperparams[0],scale=self.hyperparams[1])
elif self.priorname =="uniform":
width = self.hyperparams[1] - self.hyperparams[0]
return scipy.stats.uniform.pdf(value, loc = self.hyperparams[0],scale=width)
elif self.priorname == "nonstandard":
return self.spline2_pdf(value)
def prior(self):
'''
Returns a random variable from the prior distribution
'''
np.random.seed()
if self.priorname =="gamma":
k=self.hyperparams[0]
scale = 1./self.hyperparams[1]
return float(np.random.gamma(k,scale))
elif self.priorname =="normal":
return float(np.random.normal(self.hyperparams[0],self.hyperparams[1],size=1))
elif self.priorname =="uniform":
return float(np.random.uniform(low=self.hyperparams[0],high=self.hyperparams[1],size=1))
elif self.priorname == "nonstandard":
uni_rvs = np.random.uniform()
return float(self.spline2_cdf(uni_rvs))
| [
"numpy.random.normal",
"numpy.histogram",
"numpy.sort",
"scipy.interpolate.interp1d",
"numpy.random.gamma",
"numpy.random.uniform",
"numpy.random.seed",
"scipy.interpolate.UnivariateSpline"
] | [((1800, 1819), 'numpy.sort', 'np.sort', (['self.param'], {}), '(self.param)\n', (1807, 1819), True, 'import numpy as np\n'), ((1966, 2003), 'scipy.interpolate.UnivariateSpline', 'UnivariateSpline', (['cdf', 'srt_param'], {'k': '(5)'}), '(cdf, srt_param, k=5)\n', (1982, 2003), False, 'from scipy.interpolate import interp1d, UnivariateSpline\n'), ((2288, 2335), 'numpy.histogram', 'np.histogram', (['self.param'], {'normed': '(True)', 'bins': '(200)'}), '(self.param, normed=True, bins=200)\n', (2300, 2335), True, 'import numpy as np\n'), ((2369, 2394), 'scipy.interpolate.interp1d', 'interp1d', (['nbins[1:]', 'hist'], {}), '(nbins[1:], hist)\n', (2377, 2394), False, 'from scipy.interpolate import interp1d, UnivariateSpline\n'), ((3442, 3458), 'numpy.random.seed', 'np.random.seed', ([], {}), '()\n', (3456, 3458), True, 'import numpy as np\n'), ((3642, 3667), 'numpy.random.gamma', 'np.random.gamma', (['k', 'scale'], {}), '(k, scale)\n', (3657, 3667), True, 'import numpy as np\n'), ((3753, 3819), 'numpy.random.normal', 'np.random.normal', (['self.hyperparams[0]', 'self.hyperparams[1]'], {'size': '(1)'}), '(self.hyperparams[0], self.hyperparams[1], size=1)\n', (3769, 3819), True, 'import numpy as np\n'), ((3905, 3981), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': 'self.hyperparams[0]', 'high': 'self.hyperparams[1]', 'size': '(1)'}), '(low=self.hyperparams[0], high=self.hyperparams[1], size=1)\n', (3922, 3981), True, 'import numpy as np\n'), ((4069, 4088), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (4086, 4088), True, 'import numpy as np\n')] |
# Copyright (c) 2016, Science and Technology Facilities Council
# This software is distributed under a BSD licence. See LICENSE.txt.
"""
mrcinterpreter
--------------
Module which exports the :class:`MrcInterpreter` class.
Classes:
:class:`MrcInterpreter`: An object which can interpret an I/O stream as MRC
data.
"""
# Import Python 3 features for future-proofing
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import warnings
import numpy as np
from . import utils
from .dtypes import HEADER_DTYPE, FEI_EXTENDED_HEADER_DTYPE
from .mrcobject import MrcObject
from .constants import MAP_ID
class MrcInterpreter(MrcObject):
"""An object which interprets an I/O stream as MRC / CCP4 map data.
The header and data are handled as numpy arrays - see
:class:`~mrcfile.mrcobject.MrcObject` for details.
:class:`MrcInterpreter` can be used directly, but it is mostly intended as
a superclass to provide common stream-handling functionality. This can be
used by subclasses which will handle opening and closing the stream.
This class implements the :meth:`__enter__` and :meth:`__exit__` special
methods which allow it to be used by the Python context manager in a
:keyword:`with` block. This ensures that :meth:`close` is called after the
object is finished with.
When reading the I/O stream, a :class:`~exceptions.ValueError` is raised if
the data is invalid in one of the following ways:
#. The header's ``map`` field is not set correctly to confirm the file
type.
#. The machine stamp is invalid and so the data's byte order cannot be
determined.
#. The mode number is not recognised. Currently accepted modes are 0, 1, 2,
4 and 6.
#. The data block is not large enough for the specified data type and
dimensions.
:class:`MrcInterpreter` offers a permissive read mode for handling
problematic files. If ``permissive`` is set to :data:`True` and any of the
validity checks fails, a :mod:`warning <warnings>` is issued instead of an
exception, and file interpretation continues. If the mode number is invalid
or the data block is too small, the :attr:`data` attribute will be set to
:data:`None`. In this case, it might be possible to inspect and correct the
header, and then call :meth:`_read` again to read the data correctly. See
the :doc:`usage guide <../usage_guide>` for more details.
Methods:
* :meth:`flush`
* :meth:`close`
Methods relevant to subclasses:
* :meth:`_read`
* :meth:`_read_data`
"""
def __init__(self, iostream=None, permissive=False, **kwargs):
"""Initialise a new MrcInterpreter object.
This initialiser reads the stream if it is given. In general,
subclasses should call :meth:`super().__init__` without giving an
``iostream`` argument, then set the :attr:`_iostream` attribute
themselves and call :meth:`_read` when ready.
To use the MrcInterpreter class directly, pass a stream when creating
the object (or for a write-only stream, create an MrcInterpreter with
no stream, call :meth:`_create_default_attributes` and set the
:attr:`_iostream` attribute directly).
Args:
iostream: The I/O stream to use to read and write MRC data. The
default is :data:`None`.
permissive: Read the stream in permissive mode. The default is
:data:`False`.
Raises:
:class:`~exceptions.ValueError`: If ``iostream`` is given and the
data it contains cannot be interpreted as a valid MRC file.
"""
super(MrcInterpreter, self).__init__(**kwargs)
self._iostream = iostream
self._permissive = permissive
# If iostream is given, initialise by reading it
if self._iostream is not None:
self._read()
def __enter__(self):
"""Called by the context manager at the start of a :keyword:`with`
block.
Returns:
This object (``self``).
"""
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Called by the context manager at the end of a :keyword:`with`
block.
This ensures that the :meth:`close` method is called.
"""
self.close()
def __del__(self):
"""Attempt to flush the stream when this object is garbage collected.
It's better not to rely on this - instead, use a :keyword:`with`
block or explicitly call the :meth:`close` method.
"""
try:
self.close()
except Exception:
pass
def _read(self):
"""Read the header, extended header and data from the I/O stream.
Before calling this method, the stream should be open and positioned at
the start of the header. This method will advance the stream to the end
of the data block.
Raises:
:class:`~exceptions.ValueError`: If the file is not a valid MRC
file.
"""
self._read_header()
self._read_extended_header()
self._read_data()
def _read_header(self):
"""Read the MRC header from the I/O stream.
The header will be read from the current stream position, and the
stream will be advanced by 1024 bytes.
Raises:
:class:`~exceptions.ValueError`: If the file is not a valid MRC
file.
"""
# Read 1024 bytes from the stream
header_str = self._iostream.read(HEADER_DTYPE.itemsize)
if len(header_str) < HEADER_DTYPE.itemsize:
raise ValueError("Couldn't read enough bytes for MRC header")
# Use a recarray to allow access to fields as attributes
# (e.g. header.mode instead of header['mode'])
header = np.rec.fromstring(header_str, dtype=HEADER_DTYPE, shape=())
# Make header writeable, because fromstring() creates a read-only array
header.flags.writeable = True
# Check this is an MRC file, and read machine stamp to get byte order
if header.map != MAP_ID:
msg = ("Map ID string not found - "
"not an MRC file, or file is corrupt")
if self._permissive:
warnings.warn(msg, RuntimeWarning)
else:
raise ValueError(msg)
try:
byte_order = utils.byte_order_from_machine_stamp(header.machst)
except ValueError as err:
if self._permissive:
byte_order = '<' # try little-endian as a sensible default
warnings.warn(str(err), RuntimeWarning)
else:
raise
# Create a new dtype with the correct byte order and update the header
header.dtype = header.dtype.newbyteorder(byte_order)
header.flags.writeable = not self._read_only
self._header = header
def _read_extended_header(self):
"""Read the extended header from the stream.
If there is no extended header, a zero-length array is assigned to the
extended_header attribute.
If the extended header is recognised as FEI microscope metadata (by
'FEI1' in the header's ``exttyp`` field), its dtype is set
appropriately. Otherwise, the dtype is set as void (``'V1'``).
"""
ext_header_str = self._iostream.read(int(self.header.nsymbt))
if self.header['exttyp'] == b'FEI1':
dtype = FEI_EXTENDED_HEADER_DTYPE
else:
dtype = 'V1'
self._extended_header = np.frombuffer(ext_header_str, dtype=dtype)
self._extended_header.flags.writeable = not self._read_only
def _read_data(self):
"""Read the data array from the stream.
This method uses information from the header to set the data array's
shape and dtype.
"""
try:
dtype = utils.data_dtype_from_header(self.header)
except ValueError as err:
if self._permissive:
warnings.warn("{0} - data block cannot be read".format(err),
RuntimeWarning)
self._data = None
return
else:
raise
shape = utils.data_shape_from_header(self.header)
nbytes = dtype.itemsize
for axis_length in shape:
nbytes *= axis_length
data_bytes = self._iostream.read(nbytes)
if len(data_bytes) < nbytes:
msg = ("Expected {0} bytes in data block but could only read {1}"
.format(nbytes, len(data_bytes)))
if self._permissive:
warnings.warn(msg, RuntimeWarning)
self._data = None
return
else:
raise ValueError(msg)
self._data = np.frombuffer(data_bytes, dtype=dtype).reshape(shape)
self._data.flags.writeable = not self._read_only
def close(self):
"""Flush to the stream and clear the header and data attributes."""
if self._header is not None and not self._iostream.closed:
self.flush()
self._header = None
self._extended_header = None
self._close_data()
def flush(self):
"""Flush the header and data arrays to the I/O stream.
This implementation seeks to the start of the stream, writes the
header, extended header and data arrays, and then truncates the stream.
Subclasses should override this implementation for streams which do not
support :meth:`~io.IOBase.seek` or :meth:`~io.IOBase.truncate`.
"""
if not self._read_only:
self._iostream.seek(0)
self._iostream.write(self.header)
self._iostream.write(self.extended_header)
self._iostream.write(np.ascontiguousarray(self.data))
self._iostream.truncate()
self._iostream.flush()
| [
"warnings.warn",
"numpy.frombuffer",
"numpy.rec.fromstring",
"numpy.ascontiguousarray"
] | [((6138, 6197), 'numpy.rec.fromstring', 'np.rec.fromstring', (['header_str'], {'dtype': 'HEADER_DTYPE', 'shape': '()'}), '(header_str, dtype=HEADER_DTYPE, shape=())\n', (6155, 6197), True, 'import numpy as np\n'), ((7975, 8017), 'numpy.frombuffer', 'np.frombuffer', (['ext_header_str'], {'dtype': 'dtype'}), '(ext_header_str, dtype=dtype)\n', (7988, 8017), True, 'import numpy as np\n'), ((6600, 6634), 'warnings.warn', 'warnings.warn', (['msg', 'RuntimeWarning'], {}), '(msg, RuntimeWarning)\n', (6613, 6634), False, 'import warnings\n'), ((9110, 9144), 'warnings.warn', 'warnings.warn', (['msg', 'RuntimeWarning'], {}), '(msg, RuntimeWarning)\n', (9123, 9144), False, 'import warnings\n'), ((9288, 9326), 'numpy.frombuffer', 'np.frombuffer', (['data_bytes'], {'dtype': 'dtype'}), '(data_bytes, dtype=dtype)\n', (9301, 9326), True, 'import numpy as np\n'), ((10310, 10341), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['self.data'], {}), '(self.data)\n', (10330, 10341), True, 'import numpy as np\n')] |
#Evaluation Day XX - DD.MM.YYYY - Time: HH:MM
#Conditions: Ambient or nitrogen Atmosphere
#Chemicals: Solution X / Redox Probe Y with Concentration Z
#Working Electrode: GCE, Flat ITO, Porous ITO or Ultra-Micro-Electrode
#Import packages
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
import pylab
import os
import math
################ settings ################
# filename without ending
filename = 'Test-CV-with_Porous-ITO'
# startposition of datapoints
a = 90
# endposition of datapoints to determine linear regression function
n = 150
# sweep rate patterns (srp):
srp = ([1000, 900, 800, 700, 600, 500, 400, 300, 200, 100],
[1000, 900, 800, 700, 600, 500, 400, 300, 200, 100, 50, 30, 20, 10],
[1000, 900, 800, 700, 600, 500, 400, 300, 200, 150, 100, 80, 60, 50, 30, 20, 10, 5, 3, 2, 1],
[1000],
[#insert here your own scan rate pattern
]
)
#select scan rate line 1, 2, 3, 4, etc.
srl = 3
# significant digits of result
significant_digits = 5
##########################################
# load data from file
data = np.loadtxt(filename + '.txt', delimiter='\t', skiprows=1, dtype = np.double)
#use datapoints for baseline
regressiondata = data[a:n,:]
# determine number of colums
l = len(data[1,:]) - 1
# some directory stuff
dirname = os.path.dirname('____file______')
evaldir = os.path.join(dirname, filename + '_evaluation')
if not os.path.exists(evaldir):
os.makedirs(evaldir)
#index for sweep rate
r= srp[srl-1]
print("i,p in Ampere from Sweep rates "+str(r[0])+ "mV/s to " + str(r[-1])+"mV/s:")
p=0
for i in range(1,l+1):
# find regression function parameters
m,b = pylab.polyfit(regressiondata[:,0], regressiondata[:,i], 1)
x_a = m
# find maximum value
y_max = np.max(data[:,i])
x_max = data[data[:,i]==y_max, 0]
if len(x_max) > 1:
x_max = x_max[0]
# find corresponding x value of linear function
y_lin = m*x_max + b
# compute difference
diff = float(y_max - y_lin)
x = data[:,0]
fig = plt.figure()
ax = fig.add_subplot(111)
plt.plot(x, data[:,i], x, m*x+b, ':', regressiondata[:,0], regressiondata[:,i], x_max, y_lin, 'ro',x_max, y_max, 'ro', regressiondata[0,0], regressiondata[0,i],'gx',regressiondata[-1,0], regressiondata[-1,i], 'gx')
left, right = plt.xlim()
bottom, top = plt.ylim()
rounded_number = round(diff, significant_digits - int(math.floor(math.log10(abs(diff)))) - 1)
plt.text(left + (0.01 * (right - left)), bottom + (0.95 * (top-bottom)), "\u0394i (" + str(significant_digits) + " sf): " + str(rounded_number) + " A")
plt.title(filename + ".txt: measurement " + str(r[p]) + " mV/s", y=1.07)
plt.xlabel('E in [V]')
plt.ylabel('i in [A]')
plt.subplots_adjust(left=0.15)
ax.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.1e'))
plt.savefig(os.path.join(evaldir, filename + "_meas" + str(r[p]) + '.png'))
plt.clf()
#Output delta i,p
#print(str(r[p]) + "=" + str(rounded_number))
print(str(rounded_number))
#Output delta i,p/v^(0.5)
#print(str(rounded_number/(r[p]*0.001)**(0.5)))
p+=1
| [
"os.path.exists",
"pylab.polyfit",
"os.makedirs",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"os.path.join",
"matplotlib.pyplot.clf",
"numpy.max",
"os.path.dirname",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylim",
"nump... | [((1104, 1178), 'numpy.loadtxt', 'np.loadtxt', (["(filename + '.txt')"], {'delimiter': '"""\t"""', 'skiprows': '(1)', 'dtype': 'np.double'}), "(filename + '.txt', delimiter='\\t', skiprows=1, dtype=np.double)\n", (1114, 1178), True, 'import numpy as np\n'), ((1327, 1360), 'os.path.dirname', 'os.path.dirname', (['"""____file______"""'], {}), "('____file______')\n", (1342, 1360), False, 'import os\n'), ((1371, 1418), 'os.path.join', 'os.path.join', (['dirname', "(filename + '_evaluation')"], {}), "(dirname, filename + '_evaluation')\n", (1383, 1418), False, 'import os\n'), ((1426, 1449), 'os.path.exists', 'os.path.exists', (['evaldir'], {}), '(evaldir)\n', (1440, 1449), False, 'import os\n'), ((1455, 1475), 'os.makedirs', 'os.makedirs', (['evaldir'], {}), '(evaldir)\n', (1466, 1475), False, 'import os\n'), ((1686, 1746), 'pylab.polyfit', 'pylab.polyfit', (['regressiondata[:, 0]', 'regressiondata[:, i]', '(1)'], {}), '(regressiondata[:, 0], regressiondata[:, i], 1)\n', (1699, 1746), False, 'import pylab\n'), ((1794, 1812), 'numpy.max', 'np.max', (['data[:, i]'], {}), '(data[:, i])\n', (1800, 1812), True, 'import numpy as np\n'), ((2059, 2071), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2069, 2071), True, 'import matplotlib.pyplot as plt\n'), ((2106, 2346), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'data[:, i]', 'x', '(m * x + b)', '""":"""', 'regressiondata[:, 0]', 'regressiondata[:, i]', 'x_max', 'y_lin', '"""ro"""', 'x_max', 'y_max', '"""ro"""', 'regressiondata[0, 0]', 'regressiondata[0, i]', '"""gx"""', 'regressiondata[-1, 0]', 'regressiondata[-1, i]', '"""gx"""'], {}), "(x, data[:, i], x, m * x + b, ':', regressiondata[:, 0],\n regressiondata[:, i], x_max, y_lin, 'ro', x_max, y_max, 'ro',\n regressiondata[0, 0], regressiondata[0, i], 'gx', regressiondata[-1, 0],\n regressiondata[-1, i], 'gx')\n", (2114, 2346), True, 'import matplotlib.pyplot as plt\n'), ((2339, 2349), 'matplotlib.pyplot.xlim', 'plt.xlim', ([], {}), '()\n', (2347, 2349), True, 'import matplotlib.pyplot as plt\n'), ((2368, 2378), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {}), '()\n', (2376, 2378), True, 'import matplotlib.pyplot as plt\n'), ((2715, 2737), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""E in [V]"""'], {}), "('E in [V]')\n", (2725, 2737), True, 'import matplotlib.pyplot as plt\n'), ((2742, 2764), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""i in [A]"""'], {}), "('i in [A]')\n", (2752, 2764), True, 'import matplotlib.pyplot as plt\n'), ((2769, 2799), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.15)'}), '(left=0.15)\n', (2788, 2799), True, 'import matplotlib.pyplot as plt\n'), ((2951, 2960), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2958, 2960), True, 'import matplotlib.pyplot as plt\n'), ((2833, 2865), 'matplotlib.ticker.FormatStrFormatter', 'mtick.FormatStrFormatter', (['"""%.1e"""'], {}), "('%.1e')\n", (2857, 2865), True, 'import matplotlib.ticker as mtick\n')] |
import re
import string
import nltk
import numpy as np
import pandas as pd
from nltk.stem import RSLPStemmer
from nltk.tokenize import word_tokenize
from rank_bm25 import BM25L, BM25Okapi, BM25Plus
from .base import Similarity
class Bm25Similarity(Similarity):
def __init__(self):
self.index = None
self.text_processor = Textprocessor()
def score(self, dataset: pd.DataFrame) -> pd.DataFrame:
self.__create_index(dataset)
dataset = self.__add_score_column(dataset)
dataset = self.__normalize_score(dataset)
return dataset
def __create_index(self, dataset: pd.DataFrame):
text_corpus = dataset['ementa2'].tolist()
tokenized_corpus = [self.__clean(text) for text in text_corpus]
self.index = BM25Okapi(tokenized_corpus)
def __add_score_column(self, dataset: pd.DataFrame):
query_text = dataset['query_text'].iloc[0]
query_tokens = self.__clean(query_text)
scores = self.index.get_scores(query_tokens)
scores = np.array(scores)
dataset['score'] = scores
return dataset
def __clean(self, text: str) -> str:
text = self.text_processor.spelling(text)
tokens = self.text_processor.tokenize(text)
tokens = self.text_processor.remove_stopwords(tokens)
tokens = self.text_processor.remove_ponctuation(tokens)
tokens = self.text_processor.remove_numbers(tokens)
tokens = self.text_processor.stem(tokens)
return tokens
@staticmethod
def __normalize_score(dataset: pd.DataFrame):
dataset['score'] = (dataset['score'] - dataset['score'].min()) / (
dataset['score'].max() - dataset['score'].min())
return dataset
class Textprocessor:
def __init__(self):
nltk.download('stopwords')
self.stop_words = nltk.corpus.stopwords.words('portuguese')
self.stop_words.append('–')
self.stop_words.append('art')
self.stop_words.append('ementa')
self.stemmer = RSLPStemmer()
@staticmethod
def spelling(text):
return re.sub(r'E\sM\sE\sN\sT\sA\s', 'ementa ', text).strip()
@staticmethod
def tokenize(text):
return word_tokenize(text, language='portuguese')
@staticmethod
def join(tokens):
return ' '.join(tokens)
def remove_stopwords(self, tokens):
return [token for token in tokens if token.lower() not in self.stop_words]
@staticmethod
def remove_ponctuation(tokens):
return [token for token in tokens if len(token.strip(string.punctuation)) > 0]
@staticmethod
def remove_numbers(tokens):
return [token for token in tokens if re.search(r'\d+\b', token) is None]
def stem(self, tokens):
return [self.stemmer.stem(token) for token in tokens]
| [
"nltk.corpus.stopwords.words",
"nltk.download",
"rank_bm25.BM25Okapi",
"nltk.tokenize.word_tokenize",
"numpy.array",
"re.sub",
"nltk.stem.RSLPStemmer",
"re.search"
] | [((780, 807), 'rank_bm25.BM25Okapi', 'BM25Okapi', (['tokenized_corpus'], {}), '(tokenized_corpus)\n', (789, 807), False, 'from rank_bm25 import BM25L, BM25Okapi, BM25Plus\n'), ((1035, 1051), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (1043, 1051), True, 'import numpy as np\n'), ((1802, 1828), 'nltk.download', 'nltk.download', (['"""stopwords"""'], {}), "('stopwords')\n", (1815, 1828), False, 'import nltk\n'), ((1855, 1896), 'nltk.corpus.stopwords.words', 'nltk.corpus.stopwords.words', (['"""portuguese"""'], {}), "('portuguese')\n", (1882, 1896), False, 'import nltk\n'), ((2035, 2048), 'nltk.stem.RSLPStemmer', 'RSLPStemmer', ([], {}), '()\n', (2046, 2048), False, 'from nltk.stem import RSLPStemmer\n'), ((2220, 2262), 'nltk.tokenize.word_tokenize', 'word_tokenize', (['text'], {'language': '"""portuguese"""'}), "(text, language='portuguese')\n", (2233, 2262), False, 'from nltk.tokenize import word_tokenize\n'), ((2107, 2158), 're.sub', 're.sub', (['"""E\\\\sM\\\\sE\\\\sN\\\\sT\\\\sA\\\\s"""', '"""ementa """', 'text'], {}), "('E\\\\sM\\\\sE\\\\sN\\\\sT\\\\sA\\\\s', 'ementa ', text)\n", (2113, 2158), False, 'import re\n'), ((2698, 2725), 're.search', 're.search', (['"""\\\\d+\\\\b"""', 'token'], {}), "('\\\\d+\\\\b', token)\n", (2707, 2725), False, 'import re\n')] |
'''
Created on Jul 3, 2014
@author: roj-idl71
'''
# SUBCHANNELS EN VEZ DE CHANNELS
# BENCHMARKS -> PROBLEMAS CON ARCHIVOS GRANDES -> INCONSTANTE EN EL TIEMPO
# ACTUALIZACION DE VERSION
# HEADERS
# MODULO DE ESCRITURA
# METADATA
import os
import time
import datetime
import numpy
import timeit
from fractions import Fraction
from time import time
from time import sleep
import schainpy.admin
from schainpy.model.data.jroheaderIO import RadarControllerHeader, SystemHeader
from schainpy.model.data.jrodata import Voltage
from schainpy.model.proc.jroproc_base import ProcessingUnit, Operation, MPDecorator
import pickle
try:
import digital_rf
except:
pass
class DigitalRFReader(ProcessingUnit):
'''
classdocs
'''
def __init__(self):
'''
Constructor
'''
ProcessingUnit.__init__(self)
self.dataOut = Voltage()
self.__printInfo = True
self.__flagDiscontinuousBlock = False
self.__bufferIndex = 9999999
self.__codeType = 0
self.__ippKm = None
self.__nCode = None
self.__nBaud = None
self.__code = None
self.dtype = None
self.oldAverage = None
self.path = None
def close(self):
print('Average of writing to digital rf format is ', self.oldAverage * 1000)
return
def __getCurrentSecond(self):
return self.__thisUnixSample / self.__sample_rate
thisSecond = property(__getCurrentSecond, "I'm the 'thisSecond' property.")
def __setFileHeader(self):
'''
In this method will be initialized every parameter of dataOut object (header, no data)
'''
ippSeconds = 1.0 * self.__nSamples / self.__sample_rate
nProfiles = 1.0 / ippSeconds # Number of profiles in one second
try:
self.dataOut.radarControllerHeaderObj = RadarControllerHeader(
self.__radarControllerHeader)
except:
self.dataOut.radarControllerHeaderObj = RadarControllerHeader(
txA=0,
txB=0,
nWindows=1,
nHeights=self.__nSamples,
firstHeight=self.__firstHeigth,
deltaHeight=self.__deltaHeigth,
codeType=self.__codeType,
nCode=self.__nCode, nBaud=self.__nBaud,
code=self.__code)
try:
self.dataOut.systemHeaderObj = SystemHeader(self.__systemHeader)
except:
self.dataOut.systemHeaderObj = SystemHeader(nSamples=self.__nSamples,
nProfiles=nProfiles,
nChannels=len(
self.__channelList),
adcResolution=14)
self.dataOut.type = "Voltage"
self.dataOut.data = None
self.dataOut.dtype = self.dtype
# self.dataOut.nChannels = 0
# self.dataOut.nHeights = 0
self.dataOut.nProfiles = int(nProfiles)
self.dataOut.heightList = self.__firstHeigth + \
numpy.arange(self.__nSamples, dtype=numpy.float) * \
self.__deltaHeigth
self.dataOut.channelList = list(range(self.__num_subchannels))
self.dataOut.blocksize = self.dataOut.nChannels * self.dataOut.nHeights
# self.dataOut.channelIndexList = None
self.dataOut.flagNoData = True
self.dataOut.flagDataAsBlock = False
# Set to TRUE if the data is discontinuous
self.dataOut.flagDiscontinuousBlock = False
self.dataOut.utctime = None
# timezone like jroheader, difference in minutes between UTC and localtime
self.dataOut.timeZone = self.__timezone / 60
self.dataOut.dstFlag = 0
self.dataOut.errorCount = 0
try:
self.dataOut.nCohInt = self.fixed_metadata_dict.get(
'nCohInt', self.nCohInt)
# asumo que la data esta decodificada
self.dataOut.flagDecodeData = self.fixed_metadata_dict.get(
'flagDecodeData', self.flagDecodeData)
# asumo que la data esta sin flip
self.dataOut.flagDeflipData = self.fixed_metadata_dict['flagDeflipData']
self.dataOut.flagShiftFFT = self.fixed_metadata_dict['flagShiftFFT']
self.dataOut.useLocalTime = self.fixed_metadata_dict['useLocalTime']
except:
pass
self.dataOut.ippSeconds = ippSeconds
# Time interval between profiles
# self.dataOut.timeInterval = self.dataOut.ippSeconds * self.dataOut.nCohInt
self.dataOut.frequency = self.__frequency
self.dataOut.realtime = self.__online
def findDatafiles(self, path, startDate=None, endDate=None):
if not os.path.isdir(path):
return []
try:
digitalReadObj = digital_rf.DigitalRFReader(
path, load_all_metadata=True)
except:
digitalReadObj = digital_rf.DigitalRFReader(path)
channelNameList = digitalReadObj.get_channels()
if not channelNameList:
return []
metadata_dict = digitalReadObj.get_rf_file_metadata(channelNameList[0])
sample_rate = metadata_dict['sample_rate'][0]
this_metadata_file = digitalReadObj.get_metadata(channelNameList[0])
try:
timezone = this_metadata_file['timezone'].value
except:
timezone = 0
startUTCSecond, endUTCSecond = digitalReadObj.get_bounds(
channelNameList[0]) / sample_rate - timezone
startDatetime = datetime.datetime.utcfromtimestamp(startUTCSecond)
endDatatime = datetime.datetime.utcfromtimestamp(endUTCSecond)
if not startDate:
startDate = startDatetime.date()
if not endDate:
endDate = endDatatime.date()
dateList = []
thisDatetime = startDatetime
while(thisDatetime <= endDatatime):
thisDate = thisDatetime.date()
if thisDate < startDate:
continue
if thisDate > endDate:
break
dateList.append(thisDate)
thisDatetime += datetime.timedelta(1)
return dateList
def setup(self, path=None,
startDate=None,
endDate=None,
startTime=datetime.time(0, 0, 0),
endTime=datetime.time(23, 59, 59),
channelList=None,
nSamples=None,
online=False,
delay=60,
buffer_size=1024,
ippKm=None,
nCohInt=1,
nCode=1,
nBaud=1,
flagDecodeData=False,
code=numpy.ones((1, 1), dtype=numpy.int),
**kwargs):
'''
In this method we should set all initial parameters.
Inputs:
path
startDate
endDate
startTime
endTime
set
expLabel
ext
online
delay
'''
self.path = path
self.nCohInt = nCohInt
self.flagDecodeData = flagDecodeData
self.i = 0
if not os.path.isdir(path):
raise ValueError("[Reading] Directory %s does not exist" % path)
try:
self.digitalReadObj = digital_rf.DigitalRFReader(
path, load_all_metadata=True)
except:
self.digitalReadObj = digital_rf.DigitalRFReader(path)
channelNameList = self.digitalReadObj.get_channels()
if not channelNameList:
raise ValueError("[Reading] Directory %s does not have any files" % path)
if not channelList:
channelList = list(range(len(channelNameList)))
########## Reading metadata ######################
top_properties = self.digitalReadObj.get_properties(
channelNameList[channelList[0]])
self.__num_subchannels = top_properties['num_subchannels']
self.__sample_rate = 1.0 * \
top_properties['sample_rate_numerator'] / \
top_properties['sample_rate_denominator']
# self.__samples_per_file = top_properties['samples_per_file'][0]
self.__deltaHeigth = 1e6 * 0.15 / self.__sample_rate # why 0.15?
this_metadata_file = self.digitalReadObj.get_digital_metadata(
channelNameList[channelList[0]])
metadata_bounds = this_metadata_file.get_bounds()
self.fixed_metadata_dict = this_metadata_file.read(
metadata_bounds[0])[metadata_bounds[0]] # GET FIRST HEADER
try:
self.__processingHeader = self.fixed_metadata_dict['processingHeader']
self.__radarControllerHeader = self.fixed_metadata_dict['radarControllerHeader']
self.__systemHeader = self.fixed_metadata_dict['systemHeader']
self.dtype = pickle.loads(self.fixed_metadata_dict['dtype'])
except:
pass
self.__frequency = None
self.__frequency = self.fixed_metadata_dict.get('frequency', 1)
self.__timezone = self.fixed_metadata_dict.get('timezone', 18000)
try:
nSamples = self.fixed_metadata_dict['nSamples']
except:
nSamples = None
self.__firstHeigth = 0
try:
codeType = self.__radarControllerHeader['codeType']
except:
codeType = 0
try:
if codeType:
nCode = self.__radarControllerHeader['nCode']
nBaud = self.__radarControllerHeader['nBaud']
code = self.__radarControllerHeader['code']
except:
pass
if not ippKm:
try:
# seconds to km
ippKm = self.__radarControllerHeader['ipp']
except:
ippKm = None
####################################################
self.__ippKm = ippKm
startUTCSecond = None
endUTCSecond = None
if startDate:
startDatetime = datetime.datetime.combine(startDate, startTime)
startUTCSecond = (
startDatetime - datetime.datetime(1970, 1, 1)).total_seconds() + self.__timezone
if endDate:
endDatetime = datetime.datetime.combine(endDate, endTime)
endUTCSecond = (endDatetime - datetime.datetime(1970,
1, 1)).total_seconds() + self.__timezone
start_index, end_index = self.digitalReadObj.get_bounds(
channelNameList[channelList[0]])
if not startUTCSecond:
startUTCSecond = start_index / self.__sample_rate
if start_index > startUTCSecond * self.__sample_rate:
startUTCSecond = start_index / self.__sample_rate
if not endUTCSecond:
endUTCSecond = end_index / self.__sample_rate
if end_index < endUTCSecond * self.__sample_rate:
endUTCSecond = end_index / self.__sample_rate
if not nSamples:
if not ippKm:
raise ValueError("[Reading] nSamples or ippKm should be defined")
nSamples = int(ippKm / (1e6 * 0.15 / self.__sample_rate))
channelBoundList = []
channelNameListFiltered = []
for thisIndexChannel in channelList:
thisChannelName = channelNameList[thisIndexChannel]
start_index, end_index = self.digitalReadObj.get_bounds(
thisChannelName)
channelBoundList.append((start_index, end_index))
channelNameListFiltered.append(thisChannelName)
self.profileIndex = 0
self.i = 0
self.__delay = delay
self.__codeType = codeType
self.__nCode = nCode
self.__nBaud = nBaud
self.__code = code
self.__datapath = path
self.__online = online
self.__channelList = channelList
self.__channelNameList = channelNameListFiltered
self.__channelBoundList = channelBoundList
self.__nSamples = nSamples
self.__samples_to_read = int(nSamples) # FIJO: AHORA 40
self.__nChannels = len(self.__channelList)
self.__startUTCSecond = startUTCSecond
self.__endUTCSecond = endUTCSecond
self.__timeInterval = 1.0 * self.__samples_to_read / \
self.__sample_rate # Time interval
if online:
# self.__thisUnixSample = int(endUTCSecond*self.__sample_rate - 4*self.__samples_to_read)
startUTCSecond = numpy.floor(endUTCSecond)
# por que en el otro metodo lo primero q se hace es sumar samplestoread
self.__thisUnixSample = int(startUTCSecond * self.__sample_rate) - self.__samples_to_read
self.__data_buffer = numpy.zeros(
(self.__num_subchannels, self.__samples_to_read), dtype=numpy.complex)
self.__setFileHeader()
self.isConfig = True
print("[Reading] Digital RF Data was found from %s to %s " % (
datetime.datetime.utcfromtimestamp(
self.__startUTCSecond - self.__timezone),
datetime.datetime.utcfromtimestamp(
self.__endUTCSecond - self.__timezone)
))
print("[Reading] Starting process from %s to %s" % (datetime.datetime.utcfromtimestamp(startUTCSecond - self.__timezone),
datetime.datetime.utcfromtimestamp(
endUTCSecond - self.__timezone)
))
self.oldAverage = None
self.count = 0
self.executionTime = 0
def __reload(self):
# print
# print "%s not in range [%s, %s]" %(
# datetime.datetime.utcfromtimestamp(self.thisSecond - self.__timezone),
# datetime.datetime.utcfromtimestamp(self.__startUTCSecond - self.__timezone),
# datetime.datetime.utcfromtimestamp(self.__endUTCSecond - self.__timezone)
# )
print("[Reading] reloading metadata ...")
try:
self.digitalReadObj.reload(complete_update=True)
except:
self.digitalReadObj = digital_rf.DigitalRFReader(self.path)
start_index, end_index = self.digitalReadObj.get_bounds(
self.__channelNameList[self.__channelList[0]])
if start_index > self.__startUTCSecond * self.__sample_rate:
self.__startUTCSecond = 1.0 * start_index / self.__sample_rate
if end_index > self.__endUTCSecond * self.__sample_rate:
self.__endUTCSecond = 1.0 * end_index / self.__sample_rate
print()
print("[Reading] New timerange found [%s, %s] " % (
datetime.datetime.utcfromtimestamp(
self.__startUTCSecond - self.__timezone),
datetime.datetime.utcfromtimestamp(
self.__endUTCSecond - self.__timezone)
))
return True
return False
def timeit(self, toExecute):
t0 = time.time()
toExecute()
self.executionTime = time.time() - t0
if self.oldAverage is None:
self.oldAverage = self.executionTime
self.oldAverage = (self.executionTime + self.count *
self.oldAverage) / (self.count + 1.0)
self.count = self.count + 1.0
return
def __readNextBlock(self, seconds=30, volt_scale=1):
'''
'''
# Set the next data
self.__flagDiscontinuousBlock = False
self.__thisUnixSample += self.__samples_to_read
if self.__thisUnixSample + 2 * self.__samples_to_read > self.__endUTCSecond * self.__sample_rate:
print ("[Reading] There are no more data into selected time-range")
if self.__online:
sleep(3)
self.__reload()
else:
return False
if self.__thisUnixSample + 2 * self.__samples_to_read > self.__endUTCSecond * self.__sample_rate:
return False
self.__thisUnixSample -= self.__samples_to_read
indexChannel = 0
dataOk = False
for thisChannelName in self.__channelNameList: # TODO VARIOS CHANNELS?
for indexSubchannel in range(self.__num_subchannels):
try:
t0 = time()
result = self.digitalReadObj.read_vector_c81d(self.__thisUnixSample,
self.__samples_to_read,
thisChannelName, sub_channel=indexSubchannel)
self.executionTime = time() - t0
if self.oldAverage is None:
self.oldAverage = self.executionTime
self.oldAverage = (
self.executionTime + self.count * self.oldAverage) / (self.count + 1.0)
self.count = self.count + 1.0
except IOError as e:
# read next profile
self.__flagDiscontinuousBlock = True
print("[Reading] %s" % datetime.datetime.utcfromtimestamp(self.thisSecond - self.__timezone), e)
break
if result.shape[0] != self.__samples_to_read:
self.__flagDiscontinuousBlock = True
print("[Reading] %s: Too few samples were found, just %d/%d samples" % (datetime.datetime.utcfromtimestamp(self.thisSecond - self.__timezone),
result.shape[0],
self.__samples_to_read))
break
self.__data_buffer[indexSubchannel, :] = result * volt_scale
indexChannel+=1
dataOk = True
self.__utctime = self.__thisUnixSample / self.__sample_rate
if not dataOk:
return False
print("[Reading] %s: %d samples <> %f sec" % (datetime.datetime.utcfromtimestamp(self.thisSecond - self.__timezone),
self.__samples_to_read,
self.__timeInterval))
self.__bufferIndex = 0
return True
def __isBufferEmpty(self):
return self.__bufferIndex > self.__samples_to_read - self.__nSamples # 40960 - 40
def getData(self, seconds=30, nTries=5):
'''
This method gets the data from files and put the data into the dataOut object
In addition, increase el the buffer counter in one.
Return:
data : retorna un perfil de voltages (alturas * canales) copiados desde el
buffer. Si no hay mas archivos a leer retorna None.
Affected:
self.dataOut
self.profileIndex
self.flagDiscontinuousBlock
self.flagIsNewBlock
'''
#print("getdata")
err_counter = 0
self.dataOut.flagNoData = True
if self.__isBufferEmpty():
#print("hi")
self.__flagDiscontinuousBlock = False
while True:
#print ("q ha pasado")
if self.__readNextBlock():
break
if self.__thisUnixSample > self.__endUTCSecond * self.__sample_rate:
raise schainpy.admin.SchainError('Error')
return
if self.__flagDiscontinuousBlock:
raise schainpy.admin.SchainError('discontinuous block found')
return
if not self.__online:
raise schainpy.admin.SchainError('Online?')
return
err_counter += 1
if err_counter > nTries:
raise schainpy.admin.SchainError('Max retrys reach')
return
print('[Reading] waiting %d seconds to read a new block' % seconds)
time.sleep(seconds)
self.dataOut.data = self.__data_buffer[:, self.__bufferIndex:self.__bufferIndex + self.__nSamples]
self.dataOut.utctime = ( self.__thisUnixSample + self.__bufferIndex) / self.__sample_rate
self.dataOut.flagNoData = False
self.dataOut.flagDiscontinuousBlock = self.__flagDiscontinuousBlock
self.dataOut.profileIndex = self.profileIndex
self.__bufferIndex += self.__nSamples
self.profileIndex += 1
if self.profileIndex == self.dataOut.nProfiles:
self.profileIndex = 0
return True
def printInfo(self):
'''
'''
if self.__printInfo == False:
return
# self.systemHeaderObj.printInfo()
# self.radarControllerHeaderObj.printInfo()
self.__printInfo = False
def printNumberOfBlock(self):
'''
'''
return
# print self.profileIndex
def run(self, **kwargs):
'''
This method will be called many times so here you should put all your code
'''
if not self.isConfig:
self.setup(**kwargs)
#self.i = self.i+1
self.getData(seconds=self.__delay)
return
@MPDecorator
class DigitalRFWriter(Operation):
'''
classdocs
'''
def __init__(self, **kwargs):
'''
Constructor
'''
Operation.__init__(self, **kwargs)
self.metadata_dict = {}
self.dataOut = None
self.dtype = None
self.oldAverage = 0
def setHeader(self):
self.metadata_dict['frequency'] = self.dataOut.frequency
self.metadata_dict['timezone'] = self.dataOut.timeZone
self.metadata_dict['dtype'] = pickle.dumps(self.dataOut.dtype)
self.metadata_dict['nProfiles'] = self.dataOut.nProfiles
self.metadata_dict['heightList'] = self.dataOut.heightList
self.metadata_dict['channelList'] = self.dataOut.channelList
self.metadata_dict['flagDecodeData'] = self.dataOut.flagDecodeData
self.metadata_dict['flagDeflipData'] = self.dataOut.flagDeflipData
self.metadata_dict['flagShiftFFT'] = self.dataOut.flagShiftFFT
self.metadata_dict['useLocalTime'] = self.dataOut.useLocalTime
self.metadata_dict['nCohInt'] = self.dataOut.nCohInt
self.metadata_dict['type'] = self.dataOut.type
self.metadata_dict['flagDataAsBlock']= getattr(
self.dataOut, 'flagDataAsBlock', None) # chequear
def setup(self, dataOut, path, frequency, fileCadence, dirCadence, metadataCadence, set=0, metadataFile='metadata', ext='.h5'):
'''
In this method we should set all initial parameters.
Input:
dataOut: Input data will also be outputa data
'''
self.setHeader()
self.__ippSeconds = dataOut.ippSeconds
self.__deltaH = dataOut.getDeltaH()
self.__sample_rate = 1e6 * 0.15 / self.__deltaH
self.__dtype = dataOut.dtype
if len(dataOut.dtype) == 2:
self.__dtype = dataOut.dtype[0]
self.__nSamples = dataOut.systemHeaderObj.nSamples
self.__nProfiles = dataOut.nProfiles
if self.dataOut.type != 'Voltage':
raise 'Digital RF cannot be used with this data type'
self.arr_data = numpy.ones((1, dataOut.nFFTPoints * len(
self.dataOut.channelList)), dtype=[('r', self.__dtype), ('i', self.__dtype)])
else:
self.arr_data = numpy.ones((self.__nSamples, len(
self.dataOut.channelList)), dtype=[('r', self.__dtype), ('i', self.__dtype)])
file_cadence_millisecs = 1000
sample_rate_fraction = Fraction(self.__sample_rate).limit_denominator()
sample_rate_numerator = int(sample_rate_fraction.numerator)
sample_rate_denominator = int(sample_rate_fraction.denominator)
start_global_index = dataOut.utctime * self.__sample_rate
uuid = 'prueba'
compression_level = 0
checksum = False
is_complex = True
num_subchannels = len(dataOut.channelList)
is_continuous = True
marching_periods = False
self.digitalWriteObj = digital_rf.DigitalRFWriter(path, self.__dtype, dirCadence,
fileCadence, start_global_index,
sample_rate_numerator, sample_rate_denominator, uuid, compression_level, checksum,
is_complex, num_subchannels, is_continuous, marching_periods)
metadata_dir = os.path.join(path, 'metadata')
os.system('mkdir %s' % (metadata_dir))
self.digitalMetadataWriteObj = digital_rf.DigitalMetadataWriter(metadata_dir, dirCadence, 1, # 236, file_cadence_millisecs / 1000
sample_rate_numerator, sample_rate_denominator,
metadataFile)
self.isConfig = True
self.currentSample = 0
self.oldAverage = 0
self.count = 0
return
def writeMetadata(self):
start_idx = self.__sample_rate * self.dataOut.utctime
self.metadata_dict['processingHeader'] = self.dataOut.processingHeaderObj.getAsDict(
)
self.metadata_dict['radarControllerHeader'] = self.dataOut.radarControllerHeaderObj.getAsDict(
)
self.metadata_dict['systemHeader'] = self.dataOut.systemHeaderObj.getAsDict(
)
self.digitalMetadataWriteObj.write(start_idx, self.metadata_dict)
return
def timeit(self, toExecute):
t0 = time()
toExecute()
self.executionTime = time() - t0
if self.oldAverage is None:
self.oldAverage = self.executionTime
self.oldAverage = (self.executionTime + self.count *
self.oldAverage) / (self.count + 1.0)
self.count = self.count + 1.0
return
def writeData(self):
if self.dataOut.type != 'Voltage':
raise 'Digital RF cannot be used with this data type'
for channel in self.dataOut.channelList:
for i in range(self.dataOut.nFFTPoints):
self.arr_data[1][channel * self.dataOut.nFFTPoints +
i]['r'] = self.dataOut.data[channel][i].real
self.arr_data[1][channel * self.dataOut.nFFTPoints +
i]['i'] = self.dataOut.data[channel][i].imag
else:
for i in range(self.dataOut.systemHeaderObj.nSamples):
for channel in self.dataOut.channelList:
self.arr_data[i][channel]['r'] = self.dataOut.data[channel][i].real
self.arr_data[i][channel]['i'] = self.dataOut.data[channel][i].imag
def f(): return self.digitalWriteObj.rf_write(self.arr_data)
self.timeit(f)
return
def run(self, dataOut, frequency=49.92e6, path=None, fileCadence=1000, dirCadence=36000, metadataCadence=1, **kwargs):
'''
This method will be called many times so here you should put all your code
Inputs:
dataOut: object with the data
'''
# print dataOut.__dict__
self.dataOut = dataOut
if not self.isConfig:
self.setup(dataOut, path, frequency, fileCadence,
dirCadence, metadataCadence, **kwargs)
self.writeMetadata()
self.writeData()
## self.currentSample += 1
# if self.dataOut.flagDataAsBlock or self.currentSample == 1:
# self.writeMetadata()
## if self.currentSample == self.__nProfiles: self.currentSample = 0
return dataOut# en la version 2.7 no aparece este return
def close(self):
print('[Writing] - Closing files ')
print('Average of writing to digital rf format is ', self.oldAverage * 1000)
try:
self.digitalWriteObj.close()
except:
pass
| [
"datetime.datetime.utcfromtimestamp",
"pickle.dumps",
"time.sleep",
"schainpy.model.data.jroheaderIO.SystemHeader",
"digital_rf.DigitalRFReader",
"pickle.loads",
"datetime.timedelta",
"numpy.arange",
"datetime.datetime",
"datetime.time",
"fractions.Fraction",
"os.path.isdir",
"datetime.datet... | [((814, 843), 'schainpy.model.proc.jroproc_base.ProcessingUnit.__init__', 'ProcessingUnit.__init__', (['self'], {}), '(self)\n', (837, 843), False, 'from schainpy.model.proc.jroproc_base import ProcessingUnit, Operation, MPDecorator\n'), ((885, 894), 'schainpy.model.data.jrodata.Voltage', 'Voltage', ([], {}), '()\n', (892, 894), False, 'from schainpy.model.data.jrodata import Voltage\n'), ((5837, 5887), 'datetime.datetime.utcfromtimestamp', 'datetime.datetime.utcfromtimestamp', (['startUTCSecond'], {}), '(startUTCSecond)\n', (5871, 5887), False, 'import datetime\n'), ((5917, 5965), 'datetime.datetime.utcfromtimestamp', 'datetime.datetime.utcfromtimestamp', (['endUTCSecond'], {}), '(endUTCSecond)\n', (5951, 5965), False, 'import datetime\n'), ((6646, 6668), 'datetime.time', 'datetime.time', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (6659, 6668), False, 'import datetime\n'), ((6692, 6717), 'datetime.time', 'datetime.time', (['(23)', '(59)', '(59)'], {}), '(23, 59, 59)\n', (6705, 6717), False, 'import datetime\n'), ((7016, 7051), 'numpy.ones', 'numpy.ones', (['(1, 1)'], {'dtype': 'numpy.int'}), '((1, 1), dtype=numpy.int)\n', (7026, 7051), False, 'import numpy\n'), ((13355, 13442), 'numpy.zeros', 'numpy.zeros', (['(self.__num_subchannels, self.__samples_to_read)'], {'dtype': 'numpy.complex'}), '((self.__num_subchannels, self.__samples_to_read), dtype=numpy.\n complex)\n', (13366, 13442), False, 'import numpy\n'), ((15794, 15805), 'time.time.time', 'time.time', ([], {}), '()\n', (15803, 15805), False, 'from time import time\n'), ((22487, 22521), 'schainpy.model.proc.jroproc_base.Operation.__init__', 'Operation.__init__', (['self'], {}), '(self, **kwargs)\n', (22505, 22521), False, 'from schainpy.model.proc.jroproc_base import ProcessingUnit, Operation, MPDecorator\n'), ((22866, 22898), 'pickle.dumps', 'pickle.dumps', (['self.dataOut.dtype'], {}), '(self.dataOut.dtype)\n', (22878, 22898), False, 'import pickle\n'), ((25424, 25672), 'digital_rf.DigitalRFWriter', 'digital_rf.DigitalRFWriter', (['path', 'self.__dtype', 'dirCadence', 'fileCadence', 'start_global_index', 'sample_rate_numerator', 'sample_rate_denominator', 'uuid', 'compression_level', 'checksum', 'is_complex', 'num_subchannels', 'is_continuous', 'marching_periods'], {}), '(path, self.__dtype, dirCadence, fileCadence,\n start_global_index, sample_rate_numerator, sample_rate_denominator,\n uuid, compression_level, checksum, is_complex, num_subchannels,\n is_continuous, marching_periods)\n', (25450, 25672), False, 'import digital_rf\n'), ((25866, 25896), 'os.path.join', 'os.path.join', (['path', '"""metadata"""'], {}), "(path, 'metadata')\n", (25878, 25896), False, 'import os\n'), ((25905, 25941), 'os.system', 'os.system', (["('mkdir %s' % metadata_dir)"], {}), "('mkdir %s' % metadata_dir)\n", (25914, 25941), False, 'import os\n'), ((25983, 26110), 'digital_rf.DigitalMetadataWriter', 'digital_rf.DigitalMetadataWriter', (['metadata_dir', 'dirCadence', '(1)', 'sample_rate_numerator', 'sample_rate_denominator', 'metadataFile'], {}), '(metadata_dir, dirCadence, 1,\n sample_rate_numerator, sample_rate_denominator, metadataFile)\n', (26015, 26110), False, 'import digital_rf\n'), ((27019, 27025), 'time.time', 'time', ([], {}), '()\n', (27023, 27025), False, 'from time import time\n'), ((1944, 1995), 'schainpy.model.data.jroheaderIO.RadarControllerHeader', 'RadarControllerHeader', (['self.__radarControllerHeader'], {}), '(self.__radarControllerHeader)\n', (1965, 1995), False, 'from schainpy.model.data.jroheaderIO import RadarControllerHeader, SystemHeader\n'), ((2505, 2538), 'schainpy.model.data.jroheaderIO.SystemHeader', 'SystemHeader', (['self.__systemHeader'], {}), '(self.__systemHeader)\n', (2517, 2538), False, 'from schainpy.model.data.jroheaderIO import RadarControllerHeader, SystemHeader\n'), ((4977, 4996), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (4990, 4996), False, 'import os\n'), ((5063, 5119), 'digital_rf.DigitalRFReader', 'digital_rf.DigitalRFReader', (['path'], {'load_all_metadata': '(True)'}), '(path, load_all_metadata=True)\n', (5089, 5119), False, 'import digital_rf\n'), ((6485, 6506), 'datetime.timedelta', 'datetime.timedelta', (['(1)'], {}), '(1)\n', (6503, 6506), False, 'import datetime\n'), ((7536, 7555), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (7549, 7555), False, 'import os\n'), ((7682, 7738), 'digital_rf.DigitalRFReader', 'digital_rf.DigitalRFReader', (['path'], {'load_all_metadata': '(True)'}), '(path, load_all_metadata=True)\n', (7708, 7738), False, 'import digital_rf\n'), ((9316, 9363), 'pickle.loads', 'pickle.loads', (["self.fixed_metadata_dict['dtype']"], {}), "(self.fixed_metadata_dict['dtype'])\n", (9328, 9363), False, 'import pickle\n'), ((10501, 10548), 'datetime.datetime.combine', 'datetime.datetime.combine', (['startDate', 'startTime'], {}), '(startDate, startTime)\n', (10526, 10548), False, 'import datetime\n'), ((10726, 10769), 'datetime.datetime.combine', 'datetime.datetime.combine', (['endDate', 'endTime'], {}), '(endDate, endTime)\n', (10751, 10769), False, 'import datetime\n'), ((13117, 13142), 'numpy.floor', 'numpy.floor', (['endUTCSecond'], {}), '(endUTCSecond)\n', (13128, 13142), False, 'import numpy\n'), ((15856, 15867), 'time.time.time', 'time.time', ([], {}), '()\n', (15865, 15867), False, 'from time import time\n'), ((27076, 27082), 'time.time', 'time', ([], {}), '()\n', (27080, 27082), False, 'from time import time\n'), ((2081, 2315), 'schainpy.model.data.jroheaderIO.RadarControllerHeader', 'RadarControllerHeader', ([], {'txA': '(0)', 'txB': '(0)', 'nWindows': '(1)', 'nHeights': 'self.__nSamples', 'firstHeight': 'self.__firstHeigth', 'deltaHeight': 'self.__deltaHeigth', 'codeType': 'self.__codeType', 'nCode': 'self.__nCode', 'nBaud': 'self.__nBaud', 'code': 'self.__code'}), '(txA=0, txB=0, nWindows=1, nHeights=self.__nSamples,\n firstHeight=self.__firstHeigth, deltaHeight=self.__deltaHeigth,\n codeType=self.__codeType, nCode=self.__nCode, nBaud=self.__nBaud, code=\n self.__code)\n', (2102, 2315), False, 'from schainpy.model.data.jroheaderIO import RadarControllerHeader, SystemHeader\n'), ((3252, 3300), 'numpy.arange', 'numpy.arange', (['self.__nSamples'], {'dtype': 'numpy.float'}), '(self.__nSamples, dtype=numpy.float)\n', (3264, 3300), False, 'import numpy\n'), ((5182, 5214), 'digital_rf.DigitalRFReader', 'digital_rf.DigitalRFReader', (['path'], {}), '(path)\n', (5208, 5214), False, 'import digital_rf\n'), ((7806, 7838), 'digital_rf.DigitalRFReader', 'digital_rf.DigitalRFReader', (['path'], {}), '(path)\n', (7832, 7838), False, 'import digital_rf\n'), ((14892, 14929), 'digital_rf.DigitalRFReader', 'digital_rf.DigitalRFReader', (['self.path'], {}), '(self.path)\n', (14918, 14929), False, 'import digital_rf\n'), ((16611, 16619), 'time.sleep', 'sleep', (['(3)'], {}), '(3)\n', (16616, 16619), False, 'from time import sleep\n'), ((21032, 21051), 'time.time.sleep', 'time.sleep', (['seconds'], {}), '(seconds)\n', (21042, 21051), False, 'from time import time\n'), ((24875, 24903), 'fractions.Fraction', 'Fraction', (['self.__sample_rate'], {}), '(self.__sample_rate)\n', (24883, 24903), False, 'from fractions import Fraction\n'), ((13596, 13671), 'datetime.datetime.utcfromtimestamp', 'datetime.datetime.utcfromtimestamp', (['(self.__startUTCSecond - self.__timezone)'], {}), '(self.__startUTCSecond - self.__timezone)\n', (13630, 13671), False, 'import datetime\n'), ((13702, 13775), 'datetime.datetime.utcfromtimestamp', 'datetime.datetime.utcfromtimestamp', (['(self.__endUTCSecond - self.__timezone)'], {}), '(self.__endUTCSecond - self.__timezone)\n', (13736, 13775), False, 'import datetime\n'), ((13865, 13933), 'datetime.datetime.utcfromtimestamp', 'datetime.datetime.utcfromtimestamp', (['(startUTCSecond - self.__timezone)'], {}), '(startUTCSecond - self.__timezone)\n', (13899, 13933), False, 'import datetime\n'), ((13995, 14061), 'datetime.datetime.utcfromtimestamp', 'datetime.datetime.utcfromtimestamp', (['(endUTCSecond - self.__timezone)'], {}), '(endUTCSecond - self.__timezone)\n', (14029, 14061), False, 'import datetime\n'), ((17158, 17164), 'time.time', 'time', ([], {}), '()\n', (17162, 17164), False, 'from time import time\n'), ((18967, 19036), 'datetime.datetime.utcfromtimestamp', 'datetime.datetime.utcfromtimestamp', (['(self.thisSecond - self.__timezone)'], {}), '(self.thisSecond - self.__timezone)\n', (19001, 19036), False, 'import datetime\n'), ((15459, 15534), 'datetime.datetime.utcfromtimestamp', 'datetime.datetime.utcfromtimestamp', (['(self.__startUTCSecond - self.__timezone)'], {}), '(self.__startUTCSecond - self.__timezone)\n', (15493, 15534), False, 'import datetime\n'), ((15573, 15646), 'datetime.datetime.utcfromtimestamp', 'datetime.datetime.utcfromtimestamp', (['(self.__endUTCSecond - self.__timezone)'], {}), '(self.__endUTCSecond - self.__timezone)\n', (15607, 15646), False, 'import datetime\n'), ((17498, 17504), 'time.time', 'time', ([], {}), '()\n', (17502, 17504), False, 'from time import time\n'), ((10612, 10641), 'datetime.datetime', 'datetime.datetime', (['(1970)', '(1)', '(1)'], {}), '(1970, 1, 1)\n', (10629, 10641), False, 'import datetime\n'), ((10813, 10842), 'datetime.datetime', 'datetime.datetime', (['(1970)', '(1)', '(1)'], {}), '(1970, 1, 1)\n', (10830, 10842), False, 'import datetime\n'), ((17987, 18056), 'datetime.datetime.utcfromtimestamp', 'datetime.datetime.utcfromtimestamp', (['(self.thisSecond - self.__timezone)'], {}), '(self.thisSecond - self.__timezone)\n', (18021, 18056), False, 'import datetime\n'), ((18300, 18369), 'datetime.datetime.utcfromtimestamp', 'datetime.datetime.utcfromtimestamp', (['(self.thisSecond - self.__timezone)'], {}), '(self.thisSecond - self.__timezone)\n', (18334, 18369), False, 'import datetime\n')] |
# Interactive apps to show solutions of Riemann problems.
# Left and right states in the phase plane can be dragged
# and dropped, as well as the time bar on the x-t plane.
# MPLD3: D3-javascript plugin for matplotlib http://mpld3.github.io/
# Questions contact <NAME>. @maojrs: <EMAIL>
"""
In order to run the following code in python, try running the tests at
the end of the code. If you are using the jupyter notebooks, remember
switching mpld3.show() to mpld3.display()
"""
import numpy as np
import jinja2
import json
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.path as mpath
import matplotlib.patches as mpatches
import mpld3
from mpld3 import plugins, utils
# Shallow water interactive phase plane plugin in mpld3
class PPlaneNLPlugin(plugins.PluginBase):
JAVASCRIPT = r"""
// init custom PPLaneNL plugin
mpld3.register_plugin("drag", PPlaneNLPlugin);
PPlaneNLPlugin.prototype = Object.create(mpld3.Plugin.prototype);
PPlaneNLPlugin.prototype.constructor = PPlaneNLPlugin;
PPlaneNLPlugin.prototype.requiredProps = ["id", "idmpoint",
"idg", "iditer", "iditer_charac", "idtime", "idhmax", "idoffm",
"idhugol", "idhugor", "idintcl", "idintcr",
"idhugol2", "idhugor2", "idintcl2", "idintcr2",
"idqlm", "idqmm", "idqrm",
"idshock1", "idshock2", "idrar1", "idrar2",
"idrar1a", "idrar2a", "idrar1b", "idrar2b",
"idrar1c", "idrar2c", "idrar1d", "idrar2d",
"idtimedot", "idtimeline",
"idq1", "idq2"];
PPlaneNLPlugin.prototype.defaultProps = {}
function PPlaneNLPlugin(fig, props){
mpld3.Plugin.call(this, fig, props);
mpld3.insert_css("#" + fig.figid + " path.dragging",
{"fill-opacity": "1.0 !important",
"stroke-opacity": "1.0 !important"});
};
// Call draw function, this function is being looped all the time
PPlaneNLPlugin.prototype.draw = function(){
// Get elements into script variables
var obj = mpld3.get_element(this.props.id);
var midpoint = mpld3.get_element(this.props.idmpoint);
var g = this.props.idg;
var iter = this.props.iditer;
var iter_charac = this.props.iditer_charac;
var time = this.props.idtime;
var hmax = this.props.idhmax;
var offm = this.props.idoffm;
var hugol = mpld3.get_element(this.props.idhugol);
var hugor = mpld3.get_element(this.props.idhugor);
var intcl = mpld3.get_element(this.props.idintcl);
var intcr = mpld3.get_element(this.props.idintcr);
var hugol2 = mpld3.get_element(this.props.idhugol2);
var hugor2 = mpld3.get_element(this.props.idhugor2);
var intcl2 = mpld3.get_element(this.props.idintcl2);
var intcr2 = mpld3.get_element(this.props.idintcr2);
var qlm = mpld3.get_element(this.props.idqlm);
var qmm = mpld3.get_element(this.props.idqmm);
var qrm = mpld3.get_element(this.props.idqrm);
var shock1 = mpld3.get_element(this.props.idshock1);
var shock2 = mpld3.get_element(this.props.idshock2);
var rar1 = mpld3.get_element(this.props.idrar1);
var rar2 = mpld3.get_element(this.props.idrar2);
var rar1a = mpld3.get_element(this.props.idrar1a);
var rar2a = mpld3.get_element(this.props.idrar2a);
var rar1b = mpld3.get_element(this.props.idrar1b);
var rar2b = mpld3.get_element(this.props.idrar2b);
var rar1c = mpld3.get_element(this.props.idrar1c);
var rar2c = mpld3.get_element(this.props.idrar2c);
var rar1d = mpld3.get_element(this.props.idrar1d);
var rar2d = mpld3.get_element(this.props.idrar2d);
var timedot = mpld3.get_element(this.props.idtimedot);
var timeline = mpld3.get_element(this.props.idtimeline);
var q1 = mpld3.get_element(this.props.idq1);
var q2 = mpld3.get_element(this.props.idq2);
// Set initial conditions for javascript calculations
//var qleft = obj.offsets[0];
//var qright = obj.offsets[1];
//var qmid = midpoint.offsets[0];
var offx = obj.ax.x(offm);
var offy = offx;
// Define initial values of hl, hr, hul and hur
var hl = obj.offsets[0][0];
var hul = obj.offsets[0][1];
var hr = obj.offsets[1][0];
var hur = obj.offsets[1][1];
hmfinal = 0;
humfinal = 0;
// Main d3 drag function
var drag = d3.behavior.drag()
.origin(function(d) { return {x:obj.ax.x(d[0]),
y:obj.ax.y(d[1])}; })
.on("dragstart", dragstarted)
.on("drag", dragged)
.on("dragend", dragended);
// Dragtime function for draggable time-dot (analog to main)
var dragtime = d3.behavior.drag()
.origin(function(d) { return {x:timedot.ax.x(d[0]),
y:timedot.ax.y(d[1])}; })
.on("dragstart", dragstarted)
.on("drag", dragged)
.on("dragend", dragended);
// Set elements of ql and qr draggable points and call main drag function
obj.elements()
.data(obj.offsets)
.style("cursor", "default")
.call(drag);
// Set elements for timedot draggable point and call dragtime function
timedot.elements()
.data(timedot.offsets)
.style("cursor", "default")
.call(dragtime);
// Begin phi and phi prime function
function phi(h,hl,hr,hul,hur) {
var ul = hul/hl;
var ur = hur/hr;
if (h < hl) {
var termIC1 = 2.0*Math.sqrt(g*hl);
var termIC2 = 2.0*Math.sqrt(g*h);
var phil = ul + termIC1 - termIC2;
var philp = -Math.sqrt(g/h);
}
if (h >= hl) {
var termHL1 = (h - hl);
var termHL2 = Math.sqrt(0.5*g*(1.0/h + 1.0/hl));
var termHL3 = termHL1*termHL2;
var phil = ul - termHL3 ;
var nume = termHL2*4*h*h;
var philp = -termHL2 + termHL1*g/nume;
}
if (h < hr) {
var termIC1 = 2.0*Math.sqrt(g*hr);
var termIC2 = 2.0*Math.sqrt(g*h);
var phir = ur - termIC1 + termIC2;
var phirp = Math.sqrt(g/h);
}
if (h >= hr) {
var termHL1 = (h - hr);
var termHL2 = Math.sqrt(0.5*g*(1.0/h + 1.0/hr));
var termHL3 = termHL1*termHL2;
var phir = ur + termHL3 ;
var nume = termHL2*4*h*h;
var phirp = termHL2 - termHL1*g/nume;
}
var phi = phil-phir;
var phiprime = philp - phirp
var um = 0.5*(phil + phir);
return [phi,phiprime,um];
}
// Begin Newton iteration
function newton(hstar,hl,hr,hul,hur) {
var hn = hstar;
var error = 1;
while (error > 0.005) {
var hold = hn;
var phin = phi(hn,hl,hr,hul,hur);
var hn = hold - phin[0]/phin[1];
var um = phin[2];
error = Math.abs(hold - hn);
}
return [hn,um];
}
// Calculate hugoniot loci
function hugoloci(hm,hside,huside,sign) {
var termHL1 = hm*(hm - hside);
if (hm == 0) {
var termHL2 = 1.0; }
else {
var termHL2 = Math.sqrt(0.5*g*(1.0/hm + 1.0/hside));}
var termHL3 = termHL1*termHL2;
var hloci = hm*huside/hside + sign*termHL3;
return hloci;}
// Calculate integral curve
function integralcurve(hm,hside,huside,sign) {
var termIC1 = 2.0*hm*Math.sqrt(g*hside);
var termIC2 = 2.0*hm*Math.sqrt(g*hm);
var intcurve = hm*huside/hside - sign*(termIC1 - termIC2);
return intcurve;}
// Calculate solution plot of h and hu as function of x
function solplot(hl,hm,hr,hul,hum,hur) {
var lam = [];
lam.push([-1000000, 0,0])
var ul = hul/hl;
var um = hum/hm;
var ur = hur/hr;
var shock1 = false;
var shock2 = false;
var qsol1 = d3.range(iter);
var qsol2 = d3.range(iter);
if (hm >= hl) {
var s1 = time*(hum - hul)/(hm - hl);
shock1 = true;
} else {
var s1b = [0,0];
s1b[0] = time*(ul - Math.sqrt(g*hl));
s1b[1] = time*(um - Math.sqrt(g*hm));
s1b.sort(d3.ascending);
}
if (hm >= hr) {
var s2 = time*(hur - hum)/(hr - hm);
shock2 = true;
} else {
var s2b = [0,0];
s2b[0] = time*(um + Math.sqrt(g*hm));
s2b[1] = time*(ur + Math.sqrt(g*hr));
s2b.sort(d3.ascending);
}
for (var ii=0; ii <2*iter; ii++){
var xx = q1.data[ii][0];
// Calculate plot solution for 1 charactersitic (shock or rarefaction)
if (shock1) {
if (xx <= s1){ qsol1[ii] = hl; qsol2[ii] = hul;}
else { qsol1[ii] = hm; qsol2[ii] = hum;}
}
else {
if (xx <= s1b[0]) { qsol1[ii] = hl; qsol2[ii] = hul;}
else if (xx <= s1b[1]) {
var m1 = (hm - hl)/(s1b[1] - s1b[0]);
var m2 = (hum - hul)/(s1b[1] - s1b[0]);
qsol1[ii] = m1*(xx - s1b[0]) + hl;
qsol2[ii] = m2*(xx - s1b[0]) + hul;}
else { qsol1[ii] = hm; qsol2[ii] = hum;}
}
// Calculate plot solution for 2 charactersitic (shock or rarefaction)
if (shock2) {
if (xx > s2) {qsol1[ii] = hr; qsol2[ii] = hur;}
}
else{
if (xx > s2b[0] && xx < s2b[1]) {
var m1 = (hr - hm)/(s2b[1] - s2b[0]);
var m2 = (hur - hum)/(s2b[1] - s2b[0]);
qsol1[ii] = m1*(xx - s2b[0]) + hm;
qsol2[ii] = m2*(xx - s2b[0]) + hum;}
else if (xx > s2b[1]) { qsol1[ii] = hr; qsol2[ii] = hur;}
}
}
var solution = [qsol1,qsol2];
return solution;
}
// Function to update middle state
function update_midstate(){
var hstar = 0.05*(hl + hr);
var solution = newton(hstar,hl,hr,hul,hur);
hmfinal = solution[0];
humfinal = solution[1]*hmfinal;
var xx = obj.ax.x(hmfinal);
var yy = obj.ax.y(humfinal);
// Update middle state point and marker position
midpoint.elements().transition().duration(5)
.attr("transform", "translate(" + [xx, yy] + ")");
// Move marker
qmm.elements().transition().duration(1)
.attr("transform", "translate(" + [xx + 0.7*offx, yy + 0.7*offy] + ")");
}
// Functon to update xt-plane
function update_xtplane() {
// Calculate shcok speeds from R-H conditions
var lam1 = (humfinal - hul)/(hmfinal - hl);
var lam2 = (hur - humfinal)/(hr - hmfinal);
var lam1m = humfinal/hmfinal - Math.sqrt(g*hmfinal);
var lam2m = humfinal/hmfinal + Math.sqrt(g*hmfinal);
var lam1l = hul/hl - Math.sqrt(g*hl);
var lam2r = hur/hr + Math.sqrt(g*hr);
var color1 = "red";
var color2 = "red";
var thick1 = 4;
var thick2 = 4;
var fan = 0;
for (var ii=0; ii<iter_charac; ii++) {
if (hmfinal >= hl) {
shock1.data[ii][1] = shock1.data[ii][0]/lam1;
rar1.data[ii][1] = rar1.data[ii][0]/lam1;
rar1a.data[ii][1] = rar1a.data[ii][0]/lam1;
rar1b.data[ii][1] = rar1b.data[ii][0]/lam1;
rar1c.data[ii][1] = rar1c.data[ii][0]/lam1;
rar1d.data[ii][1] = rar1d.data[ii][0]/lam1;
console.log(rar2a.data);
color1 = "red";
thick1 = 4;
} else {
shock1.data[ii][1] = shock1.data[ii][0]/lam1l;
rar1.data[ii][1] = rar1.data[ii][0]/lam1m;
fan = Math.abs(lam1m - lam1l)/5;
rar1a.data[ii][1] = rar1a.data[ii][0]/(lam1l + fan);
rar1b.data[ii][1] = rar1b.data[ii][0]/(lam1l + 2*fan);
rar1c.data[ii][1] = rar1c.data[ii][0]/(lam1l + 3*fan);
rar1d.data[ii][1] = rar1d.data[ii][0]/(lam1l + 4*fan);
color1 = "blue";
thick1 = 1;
}
if (hmfinal >= hr) {
shock2.data[ii][2] = shock2.data[ii][0]/lam2;
rar2.data[ii][2] = rar2.data[ii][0]/lam2;
rar2a.data[ii][2] = rar2a.data[ii][0]/lam2;
rar2b.data[ii][2] = rar2b.data[ii][0]/lam2;
rar2c.data[ii][2] = rar2c.data[ii][0]/lam2;
rar2d.data[ii][2] = rar2d.data[ii][0]/lam2;
color2 = "red";
thick2 = 4;
} else {
shock2.data[ii][2] = shock2.data[ii][0]/lam2r;
rar2.data[ii][2] = rar2.data[ii][0]/lam2m;
fan = Math.abs(lam2r - lam2m)/5;
rar2a.data[ii][2] = rar2a.data[ii][0]/(lam2m + fan);
rar2b.data[ii][2] = rar2b.data[ii][0]/(lam2m + 2*fan);
rar2c.data[ii][2] = rar2c.data[ii][0]/(lam2m + 3*fan);
rar2d.data[ii][2] = rar2d.data[ii][0]/(lam2m + 4*fan);
color2 = "blue";
thick2 = 1;
}
}
// Do transitions
shock1.elements().transition().duration(5)
.attr("d", shock1.datafunc(shock1.data))
.style("stroke", color1)
.style("stroke-width", thick1);
shock2.elements().transition().duration(5)
.attr("d", shock2.datafunc(shock2.data))
.style("stroke", color2)
.style("stroke-width", thick2);
rar1.elements().transition().duration(5)
.attr("d", rar1.datafunc(rar1.data))
.style("stroke", color1)
.style("stroke-width", thick1);
rar2.elements().transition().duration(5)
.attr("d", rar2.datafunc(rar2.data))
.style("stroke", color2)
.style("stroke-width", thick2);
rar1a.elements().transition().duration(5)
.attr("d", rar1a.datafunc(rar1a.data))
.style("stroke", color1)
.style("stroke-width", thick1);
rar2a.elements().transition().duration(5)
.attr("d", rar2a.datafunc(rar2a.data))
.style("stroke", color2)
.style("stroke-width", thick2);
rar1b.elements().transition().duration(5)
.attr("d", rar1b.datafunc(rar1b.data))
.style("stroke", color1)
.style("stroke-width", thick1);
rar2b.elements().transition().duration(5)
.attr("d", rar2b.datafunc(rar2b.data))
.style("stroke", color2)
.style("stroke-width", thick2);
rar1c.elements().transition().duration(5)
.attr("d", rar1c.datafunc(rar1c.data))
.style("stroke", color1)
.style("stroke-width", thick1);
rar2c.elements().transition().duration(5)
.attr("d", rar2c.datafunc(rar2c.data))
.style("stroke", color2)
.style("stroke-width", thick2);
rar1d.elements().transition().duration(5)
.attr("d", rar1d.datafunc(rar1d.data))
.style("stroke", color1)
.style("stroke-width", thick1);
rar2d.elements().transition().duration(5)
.attr("d", rar2d.datafunc(rar2d.data))
.style("stroke", color2)
.style("stroke-width", thick2);
}
// Function to update solution plots
function update_solplots() {
var qsol = solplot(hl,hmfinal,hr,hul,humfinal,hur);
for (var ii=0; ii<2*iter; ii++){
q1.data[ii][1] = qsol[0][ii];
q2.data[ii][2] = qsol[1][ii];
}
// Do transitions
q1.elements().transition().duration(5)
.attr("d", q1.datafunc(q1.data));
q2.elements().transition().duration(5)
.attr("d", q2.datafunc(q2.data));
}
// Initialize solution with given initial states before interacting
update_midstate();
update_xtplane();
update_solplots();
// Begin drag function
function dragstarted(d) {
d3.event.sourceEvent.stopPropagation();
d3.select(this).classed("dragging", true);
}
// The drag function called while dragging is happening (meat of code here)
function dragged(d,i) {
if (i == 0 || i ==1) {
// Convert mouse coordinates in drag event (d3.event) to python coordinates d
d[0] = obj.ax.x.invert(d3.event.x);
d[1] = obj.ax.y.invert(d3.event.y);
// Move ql and qr stored in obj (they have been selected in drag)
d3.select(this)
.attr("transform", "translate(" + [d3.event.x,d3.event.y] + ")");
// If obj corresponds to ql, move all the other left elements
}
if (i==0){
// Move marker
qlm.elements().transition().duration(1)
.attr("transform", "translate(" + [d3.event.x + offx, d3.event.y + offy] + ")");
// Re-calculate inital left variables when dragging
hl = obj.offsets[0][0];
hul = obj.offsets[0][1];
// Draw Hugoniot loci through left state
for (var ii=0; ii<iter; ii++) {
//Left from hl
hugol.data[ii][0] = ii*d[0]/(1.0*iter);
var hm = hugol.data[ii][0];
hugol.data[ii][1] = hugoloci(hm,hl,hul,-1) ;
//Right from hl
hugol2.data[ii][0] = d[0] + ii*(hmax-d[0])/(1.0*iter);
var hm2 = hugol2.data[ii][0];
hugol2.data[ii][1] = hugoloci(hm2,hl,hul,-1) ;
}
// Do transitions
hugol.elements().transition().duration(5)
.attr("d", hugol.datafunc(hugol.data))
.style("stroke-dasharray", ("7,7"));
hugol2.elements().transition().duration(5)
.attr("d", hugol2.datafunc(hugol2.data));
// Draw integral curve through left state (note index bug for intcl.data[ii][j])
// and intcl2 it should be j =0,1 not j=1,2. Arrays somehow grow of sixe in mpld3
for (var ii=0; ii<iter; ii++) {
//Left from hl
intcl.data[ii][1] = ii*d[0]/(1.0*iter);
var hm = intcl.data[ii][1];
intcl.data[ii][2] = integralcurve(hm,hl,hul,-1);
//Right from hl
intcl2.data[ii][1] = d[0] + ii*(hmax-d[0])/(1.0*iter);
var hm2 = intcl2.data[ii][1];
intcl2.data[ii][2] = integralcurve(hm2,hl,hul,-1);
}
// Do transitions
intcl.elements().transition().duration(5)
.attr("d", intcl.datafunc(intcl.data));
intcl2.elements().transition().duration(5)
.attr("d", intcl2.datafunc(intcl2.data))
.style("stroke-dasharray", ("7,7"));
}
// if element corresponds to qr
else if (i==1) {
// Move marker
qrm.elements().transition().duration(1)
.attr("transform", "translate(" + [d3.event.x + offx, d3.event.y + offy] + ")");
// Re-calculate inital right variables when dragging
hr = obj.offsets[1][0];
hur = obj.offsets[1][1];
// Draw Hugoniot loci through right state
for (var ii=0; ii<iter; ii++) {
//Left from hr
hugor.data[ii][0] = ii*d[0]/(1.0*iter);
var hm = hugor.data[ii][0];
hugor.data[ii][1] = hugoloci(hm,hr,hur,1) ;
//Right from hr
hugor2.data[ii][0] = d[0] + ii*(hmax-d[0])/(1.0*iter);
var hm2 = hugor2.data[ii][0];
hugor2.data[ii][1] = hugoloci(hm2,hr,hur,1) ;
}
// Do transitions
hugor.elements().transition().duration(5)
.attr("d", hugor.datafunc(hugor.data))
.style("stroke-dasharray", ("7,7"));
hugor2.elements().transition().duration(5)
.attr("d", hugor2.datafunc(hugor2.data));
// Draw integral curve through right state (note index bug for intcr.data[ii][j])
// and intcr2 it should be j =0,1 not j=1,2. Arrays somehow grow of sixe in mpld3
for (var ii=0; ii<iter; ii++) {
//Left from hr
intcr.data[ii][1] = ii*d[0]/(1.0*iter);
var hm = intcr.data[ii][1];
intcr.data[ii][2] = integralcurve(hm,hr,hur,1);
//Right from hr
intcr2.data[ii][1] = d[0] + ii*(hmax-d[0])/(1.0*iter);
var hm2 = intcr2.data[ii][1];
intcr2.data[ii][2] = integralcurve(hm2,hr,hur,1);
}
// Do transitions
intcr.elements().transition().duration(5)
.attr("d", intcl.datafunc(intcr.data));
intcr2.elements().transition().duration(5)
.attr("d", intcr2.datafunc(intcr2.data))
.style("stroke-dasharray", ("7,7"));
}
// If time marker is moved
else if (i==2) {
// Convert mouse coordinates in drag event (d3.event) to python coordinates d
d[0] = timedot.ax.x.invert(d3.event.x);
d[1] = timedot.ax.y.invert(d3.event.y);
d3.select(this)
.attr("transform", "translate(" + [d3.event.x,d3.event.y] + ")");
// Calculate timedot position and assign it
var ty = timedot.ax.y.invert(d3.event.y);
if (ty >=0) {
timeline.data[0][1] = ty;
timeline.data[1][1] = ty;
timeline.data[2][1] = ty;
time = ty;
}
// Do transitions
timeline.elements().transition().duration(5)
.attr("d", timeline.datafunc(timeline.data));
}
// Calculate middle state
update_midstate();
// Calculate solution plots of h and hu
update_solplots();
// Update characteristic in x-t plane
update_xtplane();
}
// End dragging
function dragended(d) {
d3.select(this).classed("dragging", false);
}
}
mpld3.register_plugin("drag", PPlaneNLPlugin);
"""
def __init__(self, points, midpoint,
g, iters, iter_charac, time, hmax, offm,
hugol, hugor, intcl, intcr,
hugol2, hugor2, intcl2, intcr2,
qlmarker, qmmarker ,qrmarker,
shock1, shock2, rar1, rar2,
rar1a,rar2a,rar1b,rar2b,
rar1c,rar2c,rar1d,rar2d,
timedot, timeline,
q1, q2):
if isinstance(points, mpl.lines.Line2D):
suffix = "pts"
else:
suffix = None
self.dict_ = {"type": "drag",
"id": utils.get_id(points, suffix),
"idmpoint": utils.get_id(midpoint,suffix),
"idg" : g,
"iditer" : iters,
"iditer_charac" : iter_charac,
"idtime" : time,
"idhmax" : hmax,
"idoffm": offm,
"idhugol" : utils.get_id(hugol),
"idhugor" : utils.get_id(hugor),
"idintcl" : utils.get_id(intcl),
"idintcr" : utils.get_id(intcr),
"idhugol2" : utils.get_id(hugol2),
"idhugor2" : utils.get_id(hugor2),
"idintcl2" : utils.get_id(intcl2),
"idintcr2" : utils.get_id(intcr2),
"idqlm": utils.get_id(qlmarker,suffix),
"idqmm": utils.get_id(qmmarker,suffix),
"idqrm": utils.get_id(qrmarker,suffix),
"idshock1" : utils.get_id(shock1),
"idshock2" : utils.get_id(shock2),
"idrar1" : utils.get_id(rar1),
"idrar2" : utils.get_id(rar2),
"idrar1a" : utils.get_id(rar1a),
"idrar2a" : utils.get_id(rar2a),
"idrar1b" : utils.get_id(rar1b),
"idrar2b" : utils.get_id(rar2b),
"idrar1c" : utils.get_id(rar1c),
"idrar2c" : utils.get_id(rar2c),
"idrar1d" : utils.get_id(rar1d),
"idrar2d" : utils.get_id(rar2d),
"idtimedot" : utils.get_id(timedot,suffix),
"idtimeline" : utils.get_id(timeline),
"idq1" : utils.get_id(q1),
"idq2" : utils.get_id(q2)
}
# Python part for shallow water interactive part
# Define hugoloci and integral curve functions
def hugoloci(g,hm,hside,huside,sign):
term1 = hm*(hm - hside)
hm[hm==0] = hm[hm==0] + 0.00000001
term2 = np.sqrt(0.5*g*(1.0/hm + 1.0/hside))
hloci = hm*huside/hside + sign*term1*term2
return hloci
def intcurve(g,hm,hside,huside,sign):
term1 = 2.0*hm*np.sqrt(g*hside);
term2 = 2.0*hm*np.sqrt(g*hm);
intcurve = hm*huside/hside - sign*(term1 - term2);
return intcurve
# Define interactive plot routine in python (calls mpld3 plugin)
# Loaded with default values, can be called without any argument
def shallow_water(ql=np.array([3.0, 5.0]), qr=np.array([3.0, -5.0]), g=1.0, time=2.0, tmax=5.0,
hmax=None, humin=None, humax=None):
# Create figure
# Create a figure
#fig, axfull = plt.subplots(2,2, figsize=(13, 12))
fig, axfull = plt.subplots(2,2, figsize=(10, 8))
fig.subplots_adjust(left=0.05, right=0.9, bottom=0.1, top=0.9,
hspace=0.3, wspace=0.15)
# Have to do this to avoid issue with mpld3
ax = axfull[0] # First row of plots
axsol = axfull[1] # Second row of plost
# Calculate dq with ql and qr and other parameters
dq = np.array([qr[0]-ql[0], qr[1]-ql[1]])
iters = 100
iter_charac = 2
# eps required for bug in mpld3 (ql[0] cannot be the same than qr[0])
eps = 0.00000001
# Calculate plotting boundaries if not specified
if hmax is None:
hmax = max(ql[0],qr[0]) + 7.0
if humax is None:
humax = 3*max(abs(ql[1]),abs(qr[1]))
if humin is None:
humin = -3*max(abs(ql[1]),abs(qr[1]))
# PLOT PHASE PLANE
xxL = np.linspace(0,ql[0],iters)
xxL2 = np.linspace(ql[0],hmax,iters)
xxR = np.linspace(0,qr[0]+eps,iters)
xxR2 = np.linspace(qr[0]+eps,hmax,iters)
#Plot midpoint
qm = -1 +0.0*ql
midpoint = ax[0].plot(qm[0],qm[1],'ok', alpha=0.9, markersize=8, markeredgewidth=1)
# Plot hugoloci initial state
yyL = hugoloci(g,xxL,ql[0],ql[1],-1)
yyL2 = hugoloci(g,xxL2,ql[0],ql[1],-1)
yyR = hugoloci(g,xxR,qr[0],qr[1],1)
yyR2 = hugoloci(g,xxR2,qr[0],qr[1],1)
hugol = ax[0].plot(xxL,yyL, '--r', linewidth=1.5)
hugol2 = ax[0].plot(xxL2,yyL2, '-r', linewidth=2, label = 'Hugoniot Loci')
hugor = ax[0].plot(xxR,yyR, '--r', linewidth=1.5, label = 'Hugoniot Loci (unphysical)')
hugor2 = ax[0].plot(xxR2,yyR2, '-r', linewidth=2)
# Plot integral curve initial state
yyL = intcurve(g,xxL,ql[0],ql[1],-1)
yyL2 = intcurve(g,xxL2,ql[0],ql[1],-1)
yyR = intcurve(g,xxR,qr[0],qr[1],1)
yyR2 = intcurve(g,xxR2,qr[0],qr[1],1)
intcl = ax[0].plot(xxL,yyL, '-b', linewidth=2, label = 'Integral Curves')
intcl2 = ax[0].plot(xxL2,yyL2, '--b', linewidth=1.5, label = 'Integral Curves (unphysical)')
intcr = ax[0].plot(xxR,yyR, '-b', linewidth=2)
intcr2 = ax[0].plot(xxR2,yyR2, '--b', linewidth=1.5)
# Plot ql and qr
points = ax[0].plot([ql[0],qr[0]], [ql[1], qr[1]], 'ok', alpha=0.7, markersize=10, markeredgewidth=1)
#data = ["q_l", "q_r"]
# Plot markers
offsetx = 0.3*hmax/10
offsety = -3*offsetx
qlmarker = ax[0].plot(ql[0]+offsetx, ql[1]+offsety, 'ok', marker=(r"$ q_l $"), markersize=15)
qmmarker = ax[0].plot(qm[0]+offsetx, qm[1]+offsety, 'ok', marker=(r"$ q_m $"), markersize=15)
qrmarker = ax[0].plot(qr[0]+offsetx, qr[1]+offsety, 'ok', marker=(r"$ q_r $"), markersize=15)
# Set axis 1 properties
ax[0].set_title("Phase plane", fontsize=18)
ax[0].axis([0,hmax,humin,humax])
ax[0].set_xlabel('h', fontsize=17)
ax[0].set_ylabel('hu', fontsize=17)
#ax[0].set_aspect('equal')
ax[0].grid(alpha=0.1,color='k', linestyle='--')
legend = ax[0].legend(loc='upper left', shadow=True, fontsize = 8)
# PLOT x-t PLANE
x_xtp = np.linspace(-10,10,iter_charac)
x_xtp2 = np.linspace(-11,11,iter_charac)
# Shock speeds lam1 and lam2
lam2 = ql[1]/ql[0] - np.sqrt(g*ql[0])
lam1 = qr[1]/qr[0] + np.sqrt(g*qr[0])
char1 = x_xtp/lam1
char2 = x_xtp/lam2
char3 = x_xtp2/lam1
char4 = x_xtp2/lam2
shock1 = ax[1].plot(x_xtp, char1, '-k', linewidth=4, label="1 or 2 shock")
shock2 = ax[1].plot(x_xtp, char2, '-k', linewidth=4)
rar1 = ax[1].plot(x_xtp2, char3, '-k', linewidth=1, label="1 or 2 rarefaction")
rar2 = ax[1].plot(x_xtp2, char4, '-k', linewidth=1)
# For other 4 rarefaction lines on each side
x_xtp3 = np.linspace(-11.1,11.1,iter_charac)
x_xtp4 = np.linspace(-11.2,11.2,iter_charac)
x_xtp5 = np.linspace(-11.3,11.3,iter_charac)
x_xtp6 = np.linspace(-11.4,11.4,iter_charac)
char1a = x_xtp3/lam1; char2a = x_xtp3/lam2
char1b = x_xtp4/lam1; char2b = x_xtp4/lam2
char1c = x_xtp5/lam1; char2c = x_xtp5/lam2
char1d = x_xtp6/lam1; char2d = x_xtp6/lam2
rar1a = ax[1].plot(x_xtp3, char1a, '-k', linewidth=1)
rar2a = ax[1].plot(x_xtp3, char2a, '-k', linewidth=1)
rar1b = ax[1].plot(x_xtp4, char1b, '-k', linewidth=1)
rar2b = ax[1].plot(x_xtp4, char2b, '-k', linewidth=1)
rar1c = ax[1].plot(x_xtp5, char1c, '-k', linewidth=1)
rar2c = ax[1].plot(x_xtp5, char2c, '-k', linewidth=1)
rar1d = ax[1].plot(x_xtp6, char1d, '-k', linewidth=1)
rar2d = ax[1].plot(x_xtp6, char2d, '-k', linewidth=1)
timedot = ax[1].plot([100000,1000000,9], [-10,-10,time], 'ok' , alpha=0.7, markersize=10)
timeline = ax[1].plot([-12,0,12], [time, time, time], '--k', linewidth = 3, label="time")
# Set axis 2 properties
ax[1].set_title("x-t plane", fontsize=18)
ax[1].set_xlabel('x', fontsize=17)
ax[1].set_ylabel('t', fontsize=17)
ax[1].axis([-10,10,-0,tmax])
ax[1].grid(alpha=0.1,color='k', linestyle='--')
legend = ax[1].legend(loc='upper center', shadow=True, fontsize = 8)
# PLOT SOLUTIONS
xsol = np.linspace(-10,10,2*iters)
hsol = 0*xsol + ql[0]
husol = 0*xsol + ql[1]
q1 = axsol[0].plot(xsol,hsol, '-k', linewidth = 4, alpha = 1.0)
q2 = axsol[1].plot(xsol,husol, '-k', linewidth = 4, alpha = 1.0)
def solplot(xsol,ql,qr,qm,g):
hl = ql[0]; hm = qm[0]; hr = qr[0]
ul = ql[1]/hl; um = qm[1]/hm; ur = qr[1]/hr
lam = np.empty(4, dtype=float)
# Set axis 3 properties
axsol[0].set_title("Depth h at time = t", fontsize=18)
axsol[0].set_xlabel('x', fontsize=17)
axsol[0].set_ylabel('h', fontsize=17)
axsol[0].axis([-10,10,0,hmax])
axsol[0].grid(alpha=0.1,color='k', linestyle='--')
# Set axis 4 properties
axsol[1].set_title("Momentum hu at time = t", fontsize=18)
axsol[1].set_xlabel('x', fontsize=17)
axsol[1].set_ylabel('hu', fontsize=17)
axsol[1].axis([-10,10,humin,humax])
axsol[1].grid(alpha=0.1,color='k', linestyle='--')
# Remove defult mpld3 plugins
plugins.clear(fig)
# Call mpld3 custom PPLane plugin to interact with plot
plugins.connect(fig, PPlaneNLPlugin(points[0],midpoint[0],
g,iters,iter_charac,time, hmax, offsetx,
hugol[0],hugor[0],intcl[0],intcr[0],
hugol2[0],hugor2[0],intcl2[0],intcr2[0],
qlmarker[0],qmmarker[0],qrmarker[0],
shock1[0],shock2[0],rar1[0],rar2[0],
rar1a[0],rar2a[0],rar1b[0],rar2b[0],
rar1c[0],rar2c[0],rar1d[0],rar2d[0],
timedot[0],timeline[0],
q1[0],q2[0]))
return fig
##########################################
# Plugin for interactive phase plane plot for 2D linear case
class PPlanePlugin(plugins.PluginBase):
JAVASCRIPT = r"""
// init custom PPLane plugin
mpld3.register_plugin("drag", PPlanePlugin);
PPlanePlugin.prototype = Object.create(mpld3.Plugin.prototype);
PPlanePlugin.prototype.constructor = PPlanePlugin;
PPlanePlugin.prototype.requiredProps = ["id", "idmpoint",
"idlinesla", "idlineslb", "idlinesra", "idlinesrb",
"idqlm", "idqmm", "idqrm", "idqone", "idqtwo",
"idrl0", "idrl1", "idrr0", "idrr1"];
PPlanePlugin.prototype.defaultProps = {}
function PPlanePlugin(fig, props){
mpld3.Plugin.call(this, fig, props);
mpld3.insert_css("#" + fig.figid + " path.dragging",
{"fill-opacity": "1.0 !important",
"stroke-opacity": "1.0 !important"});
};
// Call draw function, this function is being looped all the time
PPlanePlugin.prototype.draw = function(){
// Get elements into script variables
var obj = mpld3.get_element(this.props.id);
var midpoint = mpld3.get_element(this.props.idmpoint);
var linesla = mpld3.get_element(this.props.idlinesla);
var lineslb = mpld3.get_element(this.props.idlineslb);
var linesra = mpld3.get_element(this.props.idlinesra);
var linesrb = mpld3.get_element(this.props.idlinesrb);
var qlm = mpld3.get_element(this.props.idqlm);
var qmm = mpld3.get_element(this.props.idqmm);
var qrm = mpld3.get_element(this.props.idqrm);
var qone = mpld3.get_element(this.props.idqone);
var qtwo = mpld3.get_element(this.props.idqtwo);
var rl0 = this.props.idrl0;
var rl1 = this.props.idrl1;
var rr0 = this.props.idrr0;
var rr1 = this.props.idrr1;
// Set initial conditions for javascript calculations
var qleft = obj.offsets[0];
var qright = obj.offsets[1];
var qmid = midpoint.offsets[0];
var off = 13;
// Calculate slopes for eigenlines
var ml = rl1/rl0;
var mr = rr1/rr0;
// Main d3 drag function
var drag = d3.behavior.drag()
.origin(function(d) { return {x:obj.ax.x(d[0]),
y:obj.ax.y(d[1])}; })
.on("dragstart", dragstarted)
.on("drag", dragged)
.on("dragend", dragended);
// Set elements of ql and qr points and call main drag function
obj.elements()
.data(obj.offsets)
.style("cursor", "default")
.call(drag);
// Begin drag function
function dragstarted(d) {
d3.event.sourceEvent.stopPropagation();
d3.select(this).classed("dragging", true);
}
// The drag function called while dragging is happening (meat of code here)
function dragged(d,i) {
// Convert mouse coordinates in drag event (d3.event) to python coordinates d
d[0] = obj.ax.x.invert(d3.event.x);
d[1] = obj.ax.y.invert(d3.event.y);
// Move ql and qr stored in obj (they have been selected in drag)
d3.select(this)
.attr("transform", "translate(" + [d3.event.x, d3.event.y] + ")");
// If obj corresponds to ql, move all the other left elements
if (i==0){
// Move text marker
qlm.elements().transition().duration(1)
.attr("transform", "translate(" + [d3.event.x + off, d3.event.y + off] + ")");
// Move eigenlines
for (var ii=0; ii<2; ii++) {
linesla.data[ii][1] = ml*(linesla.data[ii][0] - d[0]) + d[1];
lineslb.data[ii][1] = mr*(lineslb.data[ii][0] - d[0]) + d[1]; }
// In script calculations of middle state
qleft = [d[0], d[1]];
var det = rl0*rr1 - rr0*rl1;
var alphal = (rr1*(qright[0] - qleft[0]) - rr0*(qright[1] - qleft[1]))/det
qmid[0] = qleft[0] + alphal*rl0;
qmid[1] = qleft[1] + alphal*rl1;
var xx = obj.ax.x(qmid[0]);
var yy = obj.ax.y(qmid[1]);
}
// if element corresponds to qr
else {
// Move text marker
qrm.elements().transition().duration(1)
.attr("transform", "translate(" + [d3.event.x + off, d3.event.y + off] + ")");
// Move eigenlines
for (var ii=0; ii<2; ii++) {
linesra.data[ii][1] = ml*(linesra.data[ii][0] - d[0]) + d[1];
linesrb.data[ii][1] = mr*(linesrb.data[ii][0] - d[0]) + d[1]; }
// In script calculations of middle state
qright = [d[0], d[1]];
var det = rl0*rr1 - rr0*rl1;
var alphal = (rr1*(qright[0] - qleft[0]) - rr0*(qright[1] - qleft[1]))/det
qmid[0] = qleft[0] + alphal*rl0;
qmid[1] = qleft[1] + alphal*rl1;
var xx = obj.ax.x(qmid[0]);
var yy = obj.ax.y(qmid[1]);
}
// Update middle state point and marker position
midpoint.elements().transition().duration(5)
.attr("transform", "translate(" + [xx, yy] + ")");
qmm.elements().transition().duration(5)
.attr("transform", "translate(" + [xx + 0.7*off, yy + 0.7*off] + ")");
// Update eigenlines
linesla.elements().transition().duration(5)
.attr("d", linesla.datafunc(linesla.data));
lineslb.elements().transition().duration(5)
.attr("d", lineslb.datafunc(lineslb.data));
linesra.elements().transition().duration(5)
.attr("d", linesra.datafunc(linesra.data));
linesrb.elements().transition().duration(5)
.attr("d", linesrb.datafunc(linesrb.data));
// Update subplots of q1 and q2
qone.data[0][1] = qleft[0];
qone.data[1][1] = qleft[0];
qone.data[2][1] = qmid[0];
qone.data[3][1] = qmid[0];
qone.data[4][1] = qright[0];
qone.data[5][1] = qright[0];
qtwo.data[0][2] = qleft[1];
qtwo.data[1][2] = qleft[1];
qtwo.data[2][2] = qmid[1];
qtwo.data[3][2] = qmid[1];
qtwo.data[4][2] = qright[1];
qtwo.data[5][2] = qright[1];
qone.elements().transition().duration(5)
.attr("d", qone.datafunc(qone.data));
qtwo.elements().transition().duration(5)
.attr("d", qtwo.datafunc(qtwo.data));
}
// End dragging
function dragended(d) {
d3.select(this).classed("dragging", false);
}
}
mpld3.register_plugin("drag", PPlanePlugin);
"""
def __init__(self, points, midpoint,
linesla, lineslb, linesra, linesrb,
qlmarker, qmmarker, qrmarker,
qone, qtwo, rl0, rl1, rr0, rr1):
if isinstance(points, mpl.lines.Line2D):
suffix = "pts"
else:
suffix = None
self.dict_ = {"type": "drag",
"id": utils.get_id(points, suffix),
"idmpoint": utils.get_id(midpoint,suffix),
"idlinesla": utils.get_id(linesla),
"idlineslb": utils.get_id(lineslb),
"idlinesra": utils.get_id(linesra),
"idlinesrb": utils.get_id(linesrb),
"idqlm": utils.get_id(qlmarker,suffix),
"idqmm": utils.get_id(qmmarker,suffix),
"idqrm": utils.get_id(qrmarker,suffix),
"idqone": utils.get_id(qone),
"idqtwo": utils.get_id(qtwo),
"idrl0": rl0,
"idrl1": rl1,
"idrr0": rr0,
"idrr1": rr1}
def linear_phase_plane(ql=np.array([-2.0, 2.0]),qr=np.array([0.0, 3.0]),
r1=None, r2=None, lam1=None, lam2=None,
q1min=-5, q1max=5, q2min =-5, q2max=5, domain=5, time=1.0,
title1=None, title2=None):
# Create figure
# Create a figure
fig, ax = plt.subplots(1,3, figsize=(13.5, 5.0))
fig.subplots_adjust(left=0.05, right=0.9, bottom=0.2, top=0.9,
hspace=0.3, wspace=0.15)
#Create subfigure ax1 for phaseplane
#ax[0] = fig.add_subplot(2,2,1)
# Calculate dq from ql and qr
dq = np.array([qr[0]-ql[0], qr[1]-ql[1]])
# If eigenvectors not specified, default to Acoustics
rho0 = 1.0
zz = 2.0
if r1 is None:
r1 = np.array([-zz, 1])
if r2 is None:
r2 = np.array([zz, 1])
if lam1 is None:
lam1 = -zz/rho0
if lam2 is None:
lam2 = zz/rho0
# Choose left and right eigenvectors
if (lam1 < lam2):
laml = lam1; lamr = lam2
rl = r1; rr = r2
else:
laml = lam2; lamr = lam1
rl = r2; rr = r1
# Assign titles to solution plots
if title1 is None:
title1 = "q1"
if title2 is None:
title2 = "q2"
#Plot eigenlines across ql and qr
eps = 0.00000000001 # To avoid bugs in D3
ml = rl[1]/rl[0]
mr = rr[1]/rr[0] + eps
linesla = ax[0].plot([q1min,q1max],[ml*(q1min - ql[0]) + ql[1],ml*(q1max - ql[0]) + ql[1]], '-k')
lineslb = ax[0].plot([q1min-eps,q1max+eps],[mr*(q1min - ql[0]) + ql[1],mr*(q1max - ql[0]) + ql[1]], '-k')
linesra = ax[0].plot([q1min-2*eps,q1max+2*eps],[mr*(q1min - qr[0]) + qr[1],mr*(q1max - qr[0]) + qr[1]], '-k')
linesrb = ax[0].plot([q1min-3*eps,q1max+3*eps],[ml*(q1min - qr[0]) + qr[1],ml*(q1max - qr[0]) + qr[1]], '-k')
# Plot ql and qr
points = ax[0].plot([ql[0],qr[0]], [ql[1], qr[1]], 'ok', alpha=0.7, markersize=10, markeredgewidth=1)
data = ["q_l", "q_r"]
offset = 0.4*0.5*(q1max-q1min)/5.0
qlmarker = ax[0].plot(ql[0] + offset, ql[1] - offset, 'ok', marker=(r"$ q_l $"), markersize=15)
qrmarker = ax[0].plot(qr[0] + offset, qr[1] - offset, 'ok', marker=(r"$ q_r $"), markersize=15)
#Plot midpoint
det = rl[0]*rr[1] - rr[0]*rl[1]
alL = (rr[1]*dq[0] - rr[0]*dq[1])/det
qm = ql + alL*rl
midpoint = ax[0].plot(qm[0],qm[1],'ok', alpha=0.9, markersize=8, markeredgewidth=1)
qmmarker = ax[0].plot(qm[0]+offset,qm[1]-0.7*offset, 'k',marker=(r"$ q_m $"),markersize=20)
# Set axis 1 properties
ax[0].set_title("Phase Plane", fontsize=18)
ax[0].axis([q1min,q1max,q2min,q2max])
ax[0].grid(alpha=0.1,color='k', linestyle='--')
ax[0].set_xlabel(title1)
ax[0].set_ylabel(title2)
# Remove defult mpld3 plugins
plugins.clear(fig)
# Create solutionl line with six points where solution should be
ctleft = laml*time
ctright = lamr*time
domain = max(domain,abs(ctleft)+1,abs(ctright)+1) # Readjust xdomain if necessarry
xsol = np.array([-domain,ctleft,ctleft,ctright,ctright,domain])
qsol1 = 1*xsol
qsol1[0:2] = ql[0]
qsol1[2:4] = qm[0]
qsol1[4:6] = qr[0]
# Set axis 2 properties
ax[1].set_title(title1, fontsize=18)
ax[1].axis([-domain,domain,q1min,q1max])
ax[1].grid(alpha=0.1,color='k', linestyle='--')
ax[1].set_xlabel('x')
#ax[1].set_ylabel(title1)
# Plot solution
qone = ax[1].plot(xsol, qsol1, '-k', linewidth = 4, alpha = 1.0)
#Create subfigure ax2 for solution plane
#ax[2] = fig.add_subplot(2,2,2)
# Create solutionl line with six points
xsol2 = np.array([-domain,ctleft,ctleft,ctright,ctright,domain])
qsol2 = 1*xsol2
qsol2[0:2] = ql[1]
qsol2[2:4] = qm[1]
qsol2[4:6] = qr[1]
# Set axis 2 properties
ax[2].set_title(title2, fontsize=18)
ax[2].axis([-domain,domain,q2min,q2max])
ax[2].grid(alpha=0.1,color='k', linestyle='--')
ax[2].set_xlabel('x')
#ax[2].set_ylabel(title2)
# Plot solution
qtwo = ax[2].plot(xsol2, qsol2, '-k', linewidth = 4, alpha = 1.0)
# Call mpld3 custom PPLane plugin to interact with plot
plugins.connect(fig, PPlanePlugin(points[0],midpoint[0],
linesla[0],lineslb[0],linesra[0],linesrb[0],
qlmarker[0],qmmarker[0],qrmarker[0],qone[0],qtwo[0],
rl[0],rl[1],rr[0],rr[1]))
return fig
### Tests for interactive apps
def test_interactive_shallowPP(save_to_html=False):
# Define left and right state (h,hu)
ql = np.array([3.0, 5.0])
qr = np.array([3.0, -5.0])
# Define optional parameters (otherwise chooses default values)
plotopts = {'g':1.0, 'time':2.0, 'tmax':5, 'hmax':10, 'humin':-15, 'humax':15}
# Call interactive function (can be called without any argument)
pt = shallow_water(ql,qr,**plotopts)
if save_to_html:
mpld3.save_html(pt, "test_shallow.html")
mpld3.show()
def test_interactive_linearPP(save_to_html=False):
## Define left and right state
ql = np.array([-2.0, 2.0])
qr = np.array([0.0, -3.0])
# Define two eigenvectors and eigenvalues (acoustics)
zz = 2.0
rho0 = 1.0
r1 = np.array([zz,1.0])
r2 = np.array([-zz,1.0])
lam1 = zz/rho0
lam2 = -zz/rho0
plotopts={'q1min':-5, 'q1max':5, 'q2min':-5, 'q2max':5, 'domain':5, 'time':1,
'title1':"Pressure", 'title2':"Velocity"}
pt = linear_phase_plane(ql,qr,r1,r2,lam1,lam2,**plotopts)
if save_to_html:
mpld3.save_html(pt, "test_linearPP.html")
mpld3.show()
| [
"numpy.sqrt",
"mpld3.save_html",
"numpy.array",
"numpy.linspace",
"mpld3.utils.get_id",
"numpy.empty",
"mpld3.plugins.clear",
"mpld3.show",
"matplotlib.pyplot.subplots"
] | [((27219, 27262), 'numpy.sqrt', 'np.sqrt', (['(0.5 * g * (1.0 / hm + 1.0 / hside))'], {}), '(0.5 * g * (1.0 / hm + 1.0 / hside))\n', (27226, 27262), True, 'import numpy as np\n'), ((27656, 27676), 'numpy.array', 'np.array', (['[3.0, 5.0]'], {}), '([3.0, 5.0])\n', (27664, 27676), True, 'import numpy as np\n'), ((27681, 27702), 'numpy.array', 'np.array', (['[3.0, -5.0]'], {}), '([3.0, -5.0])\n', (27689, 27702), True, 'import numpy as np\n'), ((27901, 27936), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {'figsize': '(10, 8)'}), '(2, 2, figsize=(10, 8))\n', (27913, 27936), True, 'import matplotlib.pyplot as plt\n'), ((28251, 28291), 'numpy.array', 'np.array', (['[qr[0] - ql[0], qr[1] - ql[1]]'], {}), '([qr[0] - ql[0], qr[1] - ql[1]])\n', (28259, 28291), True, 'import numpy as np\n'), ((28709, 28737), 'numpy.linspace', 'np.linspace', (['(0)', 'ql[0]', 'iters'], {}), '(0, ql[0], iters)\n', (28720, 28737), True, 'import numpy as np\n'), ((28747, 28778), 'numpy.linspace', 'np.linspace', (['ql[0]', 'hmax', 'iters'], {}), '(ql[0], hmax, iters)\n', (28758, 28778), True, 'import numpy as np\n'), ((28787, 28821), 'numpy.linspace', 'np.linspace', (['(0)', '(qr[0] + eps)', 'iters'], {}), '(0, qr[0] + eps, iters)\n', (28798, 28821), True, 'import numpy as np\n'), ((28829, 28866), 'numpy.linspace', 'np.linspace', (['(qr[0] + eps)', 'hmax', 'iters'], {}), '(qr[0] + eps, hmax, iters)\n', (28840, 28866), True, 'import numpy as np\n'), ((30864, 30897), 'numpy.linspace', 'np.linspace', (['(-10)', '(10)', 'iter_charac'], {}), '(-10, 10, iter_charac)\n', (30875, 30897), True, 'import numpy as np\n'), ((30909, 30942), 'numpy.linspace', 'np.linspace', (['(-11)', '(11)', 'iter_charac'], {}), '(-11, 11, iter_charac)\n', (30920, 30942), True, 'import numpy as np\n'), ((31495, 31532), 'numpy.linspace', 'np.linspace', (['(-11.1)', '(11.1)', 'iter_charac'], {}), '(-11.1, 11.1, iter_charac)\n', (31506, 31532), True, 'import numpy as np\n'), ((31544, 31581), 'numpy.linspace', 'np.linspace', (['(-11.2)', '(11.2)', 'iter_charac'], {}), '(-11.2, 11.2, iter_charac)\n', (31555, 31581), True, 'import numpy as np\n'), ((31593, 31630), 'numpy.linspace', 'np.linspace', (['(-11.3)', '(11.3)', 'iter_charac'], {}), '(-11.3, 11.3, iter_charac)\n', (31604, 31630), True, 'import numpy as np\n'), ((31642, 31679), 'numpy.linspace', 'np.linspace', (['(-11.4)', '(11.4)', 'iter_charac'], {}), '(-11.4, 11.4, iter_charac)\n', (31653, 31679), True, 'import numpy as np\n'), ((32871, 32902), 'numpy.linspace', 'np.linspace', (['(-10)', '(10)', '(2 * iters)'], {}), '(-10, 10, 2 * iters)\n', (32882, 32902), True, 'import numpy as np\n'), ((33843, 33861), 'mpld3.plugins.clear', 'plugins.clear', (['fig'], {}), '(fig)\n', (33856, 33861), False, 'from mpld3 import plugins, utils\n'), ((43106, 43127), 'numpy.array', 'np.array', (['[-2.0, 2.0]'], {}), '([-2.0, 2.0])\n', (43114, 43127), True, 'import numpy as np\n'), ((43131, 43151), 'numpy.array', 'np.array', (['[0.0, 3.0]'], {}), '([0.0, 3.0])\n', (43139, 43151), True, 'import numpy as np\n'), ((43407, 43446), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(13.5, 5.0)'}), '(1, 3, figsize=(13.5, 5.0))\n', (43419, 43446), True, 'import matplotlib.pyplot as plt\n'), ((43684, 43724), 'numpy.array', 'np.array', (['[qr[0] - ql[0], qr[1] - ql[1]]'], {}), '([qr[0] - ql[0], qr[1] - ql[1]])\n', (43692, 43724), True, 'import numpy as np\n'), ((45879, 45897), 'mpld3.plugins.clear', 'plugins.clear', (['fig'], {}), '(fig)\n', (45892, 45897), False, 'from mpld3 import plugins, utils\n'), ((46113, 46174), 'numpy.array', 'np.array', (['[-domain, ctleft, ctleft, ctright, ctright, domain]'], {}), '([-domain, ctleft, ctleft, ctright, ctright, domain])\n', (46121, 46174), True, 'import numpy as np\n'), ((46713, 46774), 'numpy.array', 'np.array', (['[-domain, ctleft, ctleft, ctright, ctright, domain]'], {}), '([-domain, ctleft, ctleft, ctright, ctright, domain])\n', (46721, 46774), True, 'import numpy as np\n'), ((47688, 47708), 'numpy.array', 'np.array', (['[3.0, 5.0]'], {}), '([3.0, 5.0])\n', (47696, 47708), True, 'import numpy as np\n'), ((47718, 47739), 'numpy.array', 'np.array', (['[3.0, -5.0]'], {}), '([3.0, -5.0])\n', (47726, 47739), True, 'import numpy as np\n'), ((48075, 48087), 'mpld3.show', 'mpld3.show', ([], {}), '()\n', (48085, 48087), False, 'import mpld3\n'), ((48185, 48206), 'numpy.array', 'np.array', (['[-2.0, 2.0]'], {}), '([-2.0, 2.0])\n', (48193, 48206), True, 'import numpy as np\n'), ((48217, 48238), 'numpy.array', 'np.array', (['[0.0, -3.0]'], {}), '([0.0, -3.0])\n', (48225, 48238), True, 'import numpy as np\n'), ((48334, 48353), 'numpy.array', 'np.array', (['[zz, 1.0]'], {}), '([zz, 1.0])\n', (48342, 48353), True, 'import numpy as np\n'), ((48362, 48382), 'numpy.array', 'np.array', (['[-zz, 1.0]'], {}), '([-zz, 1.0])\n', (48370, 48382), True, 'import numpy as np\n'), ((48695, 48707), 'mpld3.show', 'mpld3.show', ([], {}), '()\n', (48705, 48707), False, 'import mpld3\n'), ((27377, 27395), 'numpy.sqrt', 'np.sqrt', (['(g * hside)'], {}), '(g * hside)\n', (27384, 27395), True, 'import numpy as np\n'), ((27414, 27429), 'numpy.sqrt', 'np.sqrt', (['(g * hm)'], {}), '(g * hm)\n', (27421, 27429), True, 'import numpy as np\n'), ((30999, 31017), 'numpy.sqrt', 'np.sqrt', (['(g * ql[0])'], {}), '(g * ql[0])\n', (31006, 31017), True, 'import numpy as np\n'), ((31041, 31059), 'numpy.sqrt', 'np.sqrt', (['(g * qr[0])'], {}), '(g * qr[0])\n', (31048, 31059), True, 'import numpy as np\n'), ((33244, 33268), 'numpy.empty', 'np.empty', (['(4)'], {'dtype': 'float'}), '(4, dtype=float)\n', (33252, 33268), True, 'import numpy as np\n'), ((43840, 43858), 'numpy.array', 'np.array', (['[-zz, 1]'], {}), '([-zz, 1])\n', (43848, 43858), True, 'import numpy as np\n'), ((43891, 43908), 'numpy.array', 'np.array', (['[zz, 1]'], {}), '([zz, 1])\n', (43899, 43908), True, 'import numpy as np\n'), ((48030, 48070), 'mpld3.save_html', 'mpld3.save_html', (['pt', '"""test_shallow.html"""'], {}), "(pt, 'test_shallow.html')\n", (48045, 48070), False, 'import mpld3\n'), ((48649, 48690), 'mpld3.save_html', 'mpld3.save_html', (['pt', '"""test_linearPP.html"""'], {}), "(pt, 'test_linearPP.html')\n", (48664, 48690), False, 'import mpld3\n'), ((25126, 25154), 'mpld3.utils.get_id', 'utils.get_id', (['points', 'suffix'], {}), '(points, suffix)\n', (25138, 25154), False, 'from mpld3 import plugins, utils\n'), ((25190, 25220), 'mpld3.utils.get_id', 'utils.get_id', (['midpoint', 'suffix'], {}), '(midpoint, suffix)\n', (25202, 25220), False, 'from mpld3 import plugins, utils\n'), ((25497, 25516), 'mpld3.utils.get_id', 'utils.get_id', (['hugol'], {}), '(hugol)\n', (25509, 25516), False, 'from mpld3 import plugins, utils\n'), ((25552, 25571), 'mpld3.utils.get_id', 'utils.get_id', (['hugor'], {}), '(hugor)\n', (25564, 25571), False, 'from mpld3 import plugins, utils\n'), ((25607, 25626), 'mpld3.utils.get_id', 'utils.get_id', (['intcl'], {}), '(intcl)\n', (25619, 25626), False, 'from mpld3 import plugins, utils\n'), ((25662, 25681), 'mpld3.utils.get_id', 'utils.get_id', (['intcr'], {}), '(intcr)\n', (25674, 25681), False, 'from mpld3 import plugins, utils\n'), ((25718, 25738), 'mpld3.utils.get_id', 'utils.get_id', (['hugol2'], {}), '(hugol2)\n', (25730, 25738), False, 'from mpld3 import plugins, utils\n'), ((25775, 25795), 'mpld3.utils.get_id', 'utils.get_id', (['hugor2'], {}), '(hugor2)\n', (25787, 25795), False, 'from mpld3 import plugins, utils\n'), ((25832, 25852), 'mpld3.utils.get_id', 'utils.get_id', (['intcl2'], {}), '(intcl2)\n', (25844, 25852), False, 'from mpld3 import plugins, utils\n'), ((25889, 25909), 'mpld3.utils.get_id', 'utils.get_id', (['intcr2'], {}), '(intcr2)\n', (25901, 25909), False, 'from mpld3 import plugins, utils\n'), ((25942, 25972), 'mpld3.utils.get_id', 'utils.get_id', (['qlmarker', 'suffix'], {}), '(qlmarker, suffix)\n', (25954, 25972), False, 'from mpld3 import plugins, utils\n'), ((26004, 26034), 'mpld3.utils.get_id', 'utils.get_id', (['qmmarker', 'suffix'], {}), '(qmmarker, suffix)\n', (26016, 26034), False, 'from mpld3 import plugins, utils\n'), ((26066, 26096), 'mpld3.utils.get_id', 'utils.get_id', (['qrmarker', 'suffix'], {}), '(qrmarker, suffix)\n', (26078, 26096), False, 'from mpld3 import plugins, utils\n'), ((26132, 26152), 'mpld3.utils.get_id', 'utils.get_id', (['shock1'], {}), '(shock1)\n', (26144, 26152), False, 'from mpld3 import plugins, utils\n'), ((26189, 26209), 'mpld3.utils.get_id', 'utils.get_id', (['shock2'], {}), '(shock2)\n', (26201, 26209), False, 'from mpld3 import plugins, utils\n'), ((26244, 26262), 'mpld3.utils.get_id', 'utils.get_id', (['rar1'], {}), '(rar1)\n', (26256, 26262), False, 'from mpld3 import plugins, utils\n'), ((26297, 26315), 'mpld3.utils.get_id', 'utils.get_id', (['rar2'], {}), '(rar2)\n', (26309, 26315), False, 'from mpld3 import plugins, utils\n'), ((26351, 26370), 'mpld3.utils.get_id', 'utils.get_id', (['rar1a'], {}), '(rar1a)\n', (26363, 26370), False, 'from mpld3 import plugins, utils\n'), ((26406, 26425), 'mpld3.utils.get_id', 'utils.get_id', (['rar2a'], {}), '(rar2a)\n', (26418, 26425), False, 'from mpld3 import plugins, utils\n'), ((26461, 26480), 'mpld3.utils.get_id', 'utils.get_id', (['rar1b'], {}), '(rar1b)\n', (26473, 26480), False, 'from mpld3 import plugins, utils\n'), ((26516, 26535), 'mpld3.utils.get_id', 'utils.get_id', (['rar2b'], {}), '(rar2b)\n', (26528, 26535), False, 'from mpld3 import plugins, utils\n'), ((26571, 26590), 'mpld3.utils.get_id', 'utils.get_id', (['rar1c'], {}), '(rar1c)\n', (26583, 26590), False, 'from mpld3 import plugins, utils\n'), ((26626, 26645), 'mpld3.utils.get_id', 'utils.get_id', (['rar2c'], {}), '(rar2c)\n', (26638, 26645), False, 'from mpld3 import plugins, utils\n'), ((26681, 26700), 'mpld3.utils.get_id', 'utils.get_id', (['rar1d'], {}), '(rar1d)\n', (26693, 26700), False, 'from mpld3 import plugins, utils\n'), ((26736, 26755), 'mpld3.utils.get_id', 'utils.get_id', (['rar2d'], {}), '(rar2d)\n', (26748, 26755), False, 'from mpld3 import plugins, utils\n'), ((26793, 26822), 'mpld3.utils.get_id', 'utils.get_id', (['timedot', 'suffix'], {}), '(timedot, suffix)\n', (26805, 26822), False, 'from mpld3 import plugins, utils\n'), ((26860, 26882), 'mpld3.utils.get_id', 'utils.get_id', (['timeline'], {}), '(timeline)\n', (26872, 26882), False, 'from mpld3 import plugins, utils\n'), ((26915, 26931), 'mpld3.utils.get_id', 'utils.get_id', (['q1'], {}), '(q1)\n', (26927, 26931), False, 'from mpld3 import plugins, utils\n'), ((26964, 26980), 'mpld3.utils.get_id', 'utils.get_id', (['q2'], {}), '(q2)\n', (26976, 26980), False, 'from mpld3 import plugins, utils\n'), ((42317, 42345), 'mpld3.utils.get_id', 'utils.get_id', (['points', 'suffix'], {}), '(points, suffix)\n', (42329, 42345), False, 'from mpld3 import plugins, utils\n'), ((42381, 42411), 'mpld3.utils.get_id', 'utils.get_id', (['midpoint', 'suffix'], {}), '(midpoint, suffix)\n', (42393, 42411), False, 'from mpld3 import plugins, utils\n'), ((42447, 42468), 'mpld3.utils.get_id', 'utils.get_id', (['linesla'], {}), '(linesla)\n', (42459, 42468), False, 'from mpld3 import plugins, utils\n'), ((42505, 42526), 'mpld3.utils.get_id', 'utils.get_id', (['lineslb'], {}), '(lineslb)\n', (42517, 42526), False, 'from mpld3 import plugins, utils\n'), ((42563, 42584), 'mpld3.utils.get_id', 'utils.get_id', (['linesra'], {}), '(linesra)\n', (42575, 42584), False, 'from mpld3 import plugins, utils\n'), ((42621, 42642), 'mpld3.utils.get_id', 'utils.get_id', (['linesrb'], {}), '(linesrb)\n', (42633, 42642), False, 'from mpld3 import plugins, utils\n'), ((42675, 42705), 'mpld3.utils.get_id', 'utils.get_id', (['qlmarker', 'suffix'], {}), '(qlmarker, suffix)\n', (42687, 42705), False, 'from mpld3 import plugins, utils\n'), ((42737, 42767), 'mpld3.utils.get_id', 'utils.get_id', (['qmmarker', 'suffix'], {}), '(qmmarker, suffix)\n', (42749, 42767), False, 'from mpld3 import plugins, utils\n'), ((42799, 42829), 'mpld3.utils.get_id', 'utils.get_id', (['qrmarker', 'suffix'], {}), '(qrmarker, suffix)\n', (42811, 42829), False, 'from mpld3 import plugins, utils\n'), ((42862, 42880), 'mpld3.utils.get_id', 'utils.get_id', (['qone'], {}), '(qone)\n', (42874, 42880), False, 'from mpld3 import plugins, utils\n'), ((42914, 42932), 'mpld3.utils.get_id', 'utils.get_id', (['qtwo'], {}), '(qtwo)\n', (42926, 42932), False, 'from mpld3 import plugins, utils\n')] |
import mdp.utils as utils
from mdp.lmdps import *
import numpy as np
class TestMDPEmbeddeding():
def __init__(self):
self.simple_test()
self.random_test()
@staticmethod
def simple_test():
"""
Explore how the unconstrained dynamics in a simple setting.
"""
# What about when p(s'| s) = 0, is not possible under the true dynamics?!
r = np.array([
[1, 0],
[0, 0]
])
# Indexed by [s' x s x a]
# ensure we have a distribution over s'
p000 = 1
p100 = 1 - p000
p001 = 0
p101 = 1 - p001
p010 = 0
p110 = 1 - p010
p011 = 1
p111 = 1 - p011
P = np.array([
[[p000, p001],
[p010, p011]],
[[p100, p101],
[p110, p111]],
])
# BUG ??? only seems to work for deterministic transitions!?
# oh, this is because deterministic transitions satisfy the row rank requirement??!
# P = np.random.random((2, 2, 2))
# P = P/np.sum(P, axis=0)
# a distribution over future states
assert np.isclose(np.sum(P, axis=0), np.ones((2,2))).all()
pi = utils.softmax(r, axis=1) # exp Q vals w gamma = 0
# a distribution over actions
assert np.isclose(np.sum(pi, axis=1), np.ones((2,))).all()
p, q = mdp_encoder(P, r)
print('q', q)
print('p', p)
print('P', P)
P_pi = np.einsum('ijk,jk->ij', P, pi)
print('P_pi', P_pi)
# the unconstrained dynamics with deterministic transitions,
# are the same was using a gamma = 0 boltzman Q vals
print("exp(r) is close to p? {}".format(np.isclose(p, P_pi, atol=1e-4).all()))
# r(s, a) = q(s) - KL(P(. | s, a) || p(. | s))
ce = numpy.zeros((2, 2))
for j in range(2):
for k in range(2): # actions
ce[j, k] = CE(P[:, j, k], p[:, j])
r_approx = q[:, np.newaxis] + ce
print(np.around(r, 3))
print(np.around(r_approx, 3))
print('r ~= q - CE(P || p): {}'.format(np.isclose(r, r_approx, atol=1e-2).all()))
print('\n\n')
@staticmethod
def random_test():
"""
Explore how the unconstrained dynamics in a random setting.
"""
n_states, n_actions = 3, 2
mdp = utils.build_random_mdp(n_states, n_actions, 0.9)
P = mdp.P
r = mdp.r
# a distribution over future states
assert np.isclose(np.sum(P, axis=0), np.ones((n_states, n_actions))).all()
p, q = mdp_encoder(P, r)
# print('P', P)
# print('r', r)
# print('q', q)
# print('p', p)
# r(s, a) = q(s) - KL(P(. | s, a) || p(. | s))
# TODO how to do with matrices!?
# kl = - (np.einsum('ijk,ij->jk', P, np.log(p)) - np.einsum('ijk,ijk->jk', P, np.log(P)))
ce = numpy.zeros((n_states, n_actions))
for j in range(n_states):
for k in range(n_actions): # actions
ce[j, k] = CE(P[:, j, k], p[:, j])
r_approx = q[:, np.newaxis] + ce
print('r', np.around(r, 3), r.shape)
print('r_approx', np.around(r_approx, 3), r_approx.shape)
print('r ~= q - CE(P || p): {}'.format(np.isclose(r, r_approx, atol=1e-3).all()))
class TestLMDPSolver():
def __init__(self):
self.simple_solve_test()
self.random_solve_test()
@staticmethod
def simple_solve_test():
"""
Simple test. Does it pick the best state?
"""
p = np.array([
[0.75, 0.5],
[0.25, 0.5]
])
q = np.array([1, 0])
u, v = lmdp_solver(p, q, 0.9)
assert np.argmax(v) == 0
@staticmethod
def random_solve_test():
"""
Want to set up a env that will test long term value over short term rewards.
"""
n_states, n_actions = 12, 3
p, q = rnd_lmdp(n_states, n_actions)
u, v = lmdp_solver(p, q, 0.99)
print(u)
print(v)
def long_term_test():
pass
class DecodeLMDPControl():
def __init__(self):
# self.test_decoder_simple()
# self.test_decoder_rnd()
self.option_decoder()
@staticmethod
def test_decoder_simple():
# Indexed by [s' x s x a]
# ensure we have a distribution over s'
p000 = 1
p100 = 1 - p000
p001 = 0
p101 = 1 - p001
p010 = 0
p110 = 1 - p010
p011 = 1
p111 = 1 - p011
P = np.array([
[[p000, p001],
[p010, p011]],
[[p100, p101],
[p110, p111]],
])
u = np.array([
[0.95, 0.25],
[0.05, 0.75]
])
pi = lmdp_decoder(u, P, lr=1)
P_pi = np.einsum('ijk,jk->ij', P, pi)
assert np.isclose(P_pi, u, atol=1e-4).all()
print(P_pi)
print(u)
@staticmethod
def test_decoder_rnd():
n_states = 6
n_actions = 6
P = rnd.random((n_states, n_states, n_actions))
P /= P.sum(0, keepdims=True)
u = rnd.random((n_states, n_states))
u /= u.sum(0, keepdims=True)
pi = lmdp_decoder(u, P, lr=1)
P_pi = np.einsum('ijk,jk->ij', P, pi)
print(P_pi)
print(u)
print(KL(P_pi,u))
assert np.isclose(P_pi, u, atol=1e-2).all()
@staticmethod
def option_decoder():
n_states = 32
n_actions = 4
P = rnd.random((n_states, n_states, n_actions))
P /= P.sum(0, keepdims=True)
u = rnd.random((n_states, n_states))
u /= u.sum(0, keepdims=True)
pi = lmdp_option_decoder(u, P)
print(pi)
def construct_chain(n_states, r_max):
n_actions = 2
r = 0*np.ones((n_states, n_actions))
# r[1, :] = r_max//n_states
r[n_states-3, 1] = -r_max/2
r[n_states-2, 1] = r_max
r[0, :] = 0
r[n_states-1, :] = 0
p = 0.9
P = np.zeros((n_states, n_states, n_actions))
# absorbing states
P[0, 0, 0] = 1
P[1, 0, 0] = 0
P[0, 0, 1] = 1
P[1, 0, 1] = 0
m = n_states-1
P[m, m, 0] = 1
P[m-1, m, 0] = 0
P[m, m, 1] = 1
P[m-1, m, 1] = 0
# go left
for i in range(1, n_states-1):
P[i-1, i, 0] = p
P[i+1, i, 0] = 1-p
# go right
for i in range(1, n_states-1):
P[i+1, i, 1] = p
P[i-1, i, 1] = 1-p
for a in range(n_actions):
assert np.isclose(np.sum(P[:, :, a], axis=0), np.ones((n_states, ))).all()
return P, r
class DiscountingTest():
def __init__(self):
self.chain_test()
@staticmethod
def chain_test():
n_states = 16
P, r = construct_chain(n_states, 20)
p, q = mdp_encoder(P, r)
u, v = lmdp_solver(p, q, 0.75)
plt.figure(figsize=(16,16))
plt.subplot(2, 2, 1)
plt.title('P: transition function')
plt.imshow(np.sum(P, axis=-1))
plt.subplot(2, 2, 2)
plt.title('r: reward function')
plt.imshow(r)
plt.subplot(2,2,3)
plt.title('p: unconstrained dynamics')
plt.imshow(p)
plt.subplot(2, 2, 4)
plt.title('u: optimal control')
plt.imshow(u)
plt.show()
"""
PROBLEM!
hmm. maybe this would be solved with finite horizon MDPs?!
"""
if __name__ == "__main__":
import matplotlib.pyplot as plt
# TestMDPEmbeddeding()
TestLMDPSolver()
# DecodeLMDPControl()
# DiscountingTest()
| [
"matplotlib.pyplot.imshow",
"numpy.isclose",
"numpy.ones",
"mdp.utils.build_random_mdp",
"numpy.argmax",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.einsum",
"numpy.around",
"numpy.sum",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"mdp.utils.softmax",
"ma... | [((6028, 6069), 'numpy.zeros', 'np.zeros', (['(n_states, n_states, n_actions)'], {}), '((n_states, n_states, n_actions))\n', (6036, 6069), True, 'import numpy as np\n'), ((405, 431), 'numpy.array', 'np.array', (['[[1, 0], [0, 0]]'], {}), '([[1, 0], [0, 0]])\n', (413, 431), True, 'import numpy as np\n'), ((729, 799), 'numpy.array', 'np.array', (['[[[p000, p001], [p010, p011]], [[p100, p101], [p110, p111]]]'], {}), '([[[p000, p001], [p010, p011]], [[p100, p101], [p110, p111]]])\n', (737, 799), True, 'import numpy as np\n'), ((1224, 1248), 'mdp.utils.softmax', 'utils.softmax', (['r'], {'axis': '(1)'}), '(r, axis=1)\n', (1237, 1248), True, 'import mdp.utils as utils\n'), ((1496, 1526), 'numpy.einsum', 'np.einsum', (['"""ijk,jk->ij"""', 'P', 'pi'], {}), "('ijk,jk->ij', P, pi)\n", (1505, 1526), True, 'import numpy as np\n'), ((2388, 2436), 'mdp.utils.build_random_mdp', 'utils.build_random_mdp', (['n_states', 'n_actions', '(0.9)'], {}), '(n_states, n_actions, 0.9)\n', (2410, 2436), True, 'import mdp.utils as utils\n'), ((3602, 3638), 'numpy.array', 'np.array', (['[[0.75, 0.5], [0.25, 0.5]]'], {}), '([[0.75, 0.5], [0.25, 0.5]])\n', (3610, 3638), True, 'import numpy as np\n'), ((3685, 3701), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (3693, 3701), True, 'import numpy as np\n'), ((4589, 4659), 'numpy.array', 'np.array', (['[[[p000, p001], [p010, p011]], [[p100, p101], [p110, p111]]]'], {}), '([[[p000, p001], [p010, p011]], [[p100, p101], [p110, p111]]])\n', (4597, 4659), True, 'import numpy as np\n'), ((4734, 4772), 'numpy.array', 'np.array', (['[[0.95, 0.25], [0.05, 0.75]]'], {}), '([[0.95, 0.25], [0.05, 0.75]])\n', (4742, 4772), True, 'import numpy as np\n'), ((4861, 4891), 'numpy.einsum', 'np.einsum', (['"""ijk,jk->ij"""', 'P', 'pi'], {}), "('ijk,jk->ij', P, pi)\n", (4870, 4891), True, 'import numpy as np\n'), ((5303, 5333), 'numpy.einsum', 'np.einsum', (['"""ijk,jk->ij"""', 'P', 'pi'], {}), "('ijk,jk->ij', P, pi)\n", (5312, 5333), True, 'import numpy as np\n'), ((5841, 5871), 'numpy.ones', 'np.ones', (['(n_states, n_actions)'], {}), '((n_states, n_actions))\n', (5848, 5871), True, 'import numpy as np\n'), ((6872, 6900), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 16)'}), '(figsize=(16, 16))\n', (6882, 6900), True, 'import matplotlib.pyplot as plt\n'), ((6909, 6929), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (6920, 6929), True, 'import matplotlib.pyplot as plt\n'), ((6938, 6973), 'matplotlib.pyplot.title', 'plt.title', (['"""P: transition function"""'], {}), "('P: transition function')\n", (6947, 6973), True, 'import matplotlib.pyplot as plt\n'), ((7022, 7042), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (7033, 7042), True, 'import matplotlib.pyplot as plt\n'), ((7051, 7082), 'matplotlib.pyplot.title', 'plt.title', (['"""r: reward function"""'], {}), "('r: reward function')\n", (7060, 7082), True, 'import matplotlib.pyplot as plt\n'), ((7091, 7104), 'matplotlib.pyplot.imshow', 'plt.imshow', (['r'], {}), '(r)\n', (7101, 7104), True, 'import matplotlib.pyplot as plt\n'), ((7114, 7134), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (7125, 7134), True, 'import matplotlib.pyplot as plt\n'), ((7141, 7179), 'matplotlib.pyplot.title', 'plt.title', (['"""p: unconstrained dynamics"""'], {}), "('p: unconstrained dynamics')\n", (7150, 7179), True, 'import matplotlib.pyplot as plt\n'), ((7188, 7201), 'matplotlib.pyplot.imshow', 'plt.imshow', (['p'], {}), '(p)\n', (7198, 7201), True, 'import matplotlib.pyplot as plt\n'), ((7211, 7231), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(4)'], {}), '(2, 2, 4)\n', (7222, 7231), True, 'import matplotlib.pyplot as plt\n'), ((7240, 7271), 'matplotlib.pyplot.title', 'plt.title', (['"""u: optimal control"""'], {}), "('u: optimal control')\n", (7249, 7271), True, 'import matplotlib.pyplot as plt\n'), ((7280, 7293), 'matplotlib.pyplot.imshow', 'plt.imshow', (['u'], {}), '(u)\n', (7290, 7293), True, 'import matplotlib.pyplot as plt\n'), ((7303, 7313), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7311, 7313), True, 'import matplotlib.pyplot as plt\n'), ((2038, 2053), 'numpy.around', 'np.around', (['r', '(3)'], {}), '(r, 3)\n', (2047, 2053), True, 'import numpy as np\n'), ((2069, 2091), 'numpy.around', 'np.around', (['r_approx', '(3)'], {}), '(r_approx, 3)\n', (2078, 2091), True, 'import numpy as np\n'), ((3171, 3186), 'numpy.around', 'np.around', (['r', '(3)'], {}), '(r, 3)\n', (3180, 3186), True, 'import numpy as np\n'), ((3223, 3245), 'numpy.around', 'np.around', (['r_approx', '(3)'], {}), '(r_approx, 3)\n', (3232, 3245), True, 'import numpy as np\n'), ((3755, 3767), 'numpy.argmax', 'np.argmax', (['v'], {}), '(v)\n', (3764, 3767), True, 'import numpy as np\n'), ((6993, 7011), 'numpy.sum', 'np.sum', (['P'], {'axis': '(-1)'}), '(P, axis=-1)\n', (6999, 7011), True, 'import numpy as np\n'), ((4908, 4940), 'numpy.isclose', 'np.isclose', (['P_pi', 'u'], {'atol': '(0.0001)'}), '(P_pi, u, atol=0.0001)\n', (4918, 4940), True, 'import numpy as np\n'), ((5413, 5443), 'numpy.isclose', 'np.isclose', (['P_pi', 'u'], {'atol': '(0.01)'}), '(P_pi, u, atol=0.01)\n', (5423, 5443), True, 'import numpy as np\n'), ((1169, 1186), 'numpy.sum', 'np.sum', (['P'], {'axis': '(0)'}), '(P, axis=0)\n', (1175, 1186), True, 'import numpy as np\n'), ((1188, 1203), 'numpy.ones', 'np.ones', (['(2, 2)'], {}), '((2, 2))\n', (1195, 1203), True, 'import numpy as np\n'), ((1339, 1357), 'numpy.sum', 'np.sum', (['pi'], {'axis': '(1)'}), '(pi, axis=1)\n', (1345, 1357), True, 'import numpy as np\n'), ((1359, 1372), 'numpy.ones', 'np.ones', (['(2,)'], {}), '((2,))\n', (1366, 1372), True, 'import numpy as np\n'), ((2544, 2561), 'numpy.sum', 'np.sum', (['P'], {'axis': '(0)'}), '(P, axis=0)\n', (2550, 2561), True, 'import numpy as np\n'), ((2563, 2593), 'numpy.ones', 'np.ones', (['(n_states, n_actions)'], {}), '((n_states, n_actions))\n', (2570, 2593), True, 'import numpy as np\n'), ((6532, 6558), 'numpy.sum', 'np.sum', (['P[:, :, a]'], {'axis': '(0)'}), '(P[:, :, a], axis=0)\n', (6538, 6558), True, 'import numpy as np\n'), ((6560, 6580), 'numpy.ones', 'np.ones', (['(n_states,)'], {}), '((n_states,))\n', (6567, 6580), True, 'import numpy as np\n'), ((1734, 1766), 'numpy.isclose', 'np.isclose', (['p', 'P_pi'], {'atol': '(0.0001)'}), '(p, P_pi, atol=0.0001)\n', (1744, 1766), True, 'import numpy as np\n'), ((2140, 2174), 'numpy.isclose', 'np.isclose', (['r', 'r_approx'], {'atol': '(0.01)'}), '(r, r_approx, atol=0.01)\n', (2150, 2174), True, 'import numpy as np\n'), ((3310, 3345), 'numpy.isclose', 'np.isclose', (['r', 'r_approx'], {'atol': '(0.001)'}), '(r, r_approx, atol=0.001)\n', (3320, 3345), True, 'import numpy as np\n')] |
import numpy
import matplotlib.pyplot as plt
def fig4plot():
#setup plots
fig = plt.figure(figsize=(10.0, 7.5))
fig.subplots_adjust(left=0.1, right=0.9, wspace=0.3)
ax1 = fig.add_subplot(221)
ax1.set_yscale('log')
ax1.set_ylabel('|m|')
ax1.set_ylim(5.e-5, 1.e-2)
ax1.set_xlabel('n$_{\mathrm{s, b}}$')
ax1.set_xlim(1.5, 4.0)
ax2 = fig.add_subplot(222)
ax2.set_yscale('log')
ax2.set_ylabel('|m|')
ax2.set_ylim(5.e-5, 1.e-2)
ax2.set_xlabel('B/T')
ax2.set_xlim(0.0, 1.0)
ax3 = fig.add_subplot(223)
ax3.set_yscale('log')
ax3.set_ylabel('|m|')
ax3.set_ylim(5.e-5, 1.e-2)
ax3.set_xlabel('e$_{\mathrm{g}}$')
ax3.set_xlim(0.1, 0.6)
ax4 = fig.add_subplot(224)
ax4.set_yscale('log')
ax4.set_ylabel('|m|')
ax4.set_ylim(5.e-5, 1.e-2)
ax4.set_xlabel('y$_0$')
ax4.set_xlim(0.0, 0.5)
ax1.fill_between([1.5, 4.0], [1.e-3, 1.e-3], [1.e-5, 1.e-5],
color='grey', alpha=0.2, edgecolor='None')
ax1.fill_between([1.5, 4.0], [1.e-3/2, 1.e-3/2], [1.e-5, 1.e-5],
color='grey', alpha=0.2, edgecolor='None')
ax1.fill_between([1.5, 4.0], [1.e-3/5, 1.e-3/5], [1.e-5, 1.e-5],
color='grey', alpha=0.2, edgecolor='None')
ax2.fill_between([0.0, 1.0], [1.e-3, 1.e-3], [1.e-5, 1.e-5],
color='grey', alpha=0.2, edgecolor='None')
ax2.fill_between([0.0, 1.0], [1.e-3/2, 1.e-3/2], [1.e-5, 1.e-5],
color='grey', alpha=0.2, edgecolor='None')
ax2.fill_between([0.0, 1.0], [1.e-3/5, 1.e-3/5], [1.e-5, 1.e-5],
color='grey', alpha=0.2, edgecolor='None')
ax3.fill_between([0.1, 0.6], [1.e-3, 1.e-3], [1.e-5, 1.e-5],
color='grey', alpha=0.2, edgecolor='None')
ax3.fill_between([0.1, 0.6], [1.e-3/2, 1.e-3/2], [1.e-5, 1.e-5],
color='grey', alpha=0.2, edgecolor='None')
ax3.fill_between([0.1, 0.6], [1.e-3/5, 1.e-3/5], [1.e-5, 1.e-5],
color='grey', alpha=0.2, edgecolor='None')
ax4.fill_between([0.0, 0.5], [1.e-3, 1.e-3], [1.e-5, 1.e-5],
color='grey', alpha=0.2, edgecolor='None')
ax4.fill_between([0.0, 0.5], [1.e-3/2, 1.e-3/2], [1.e-5, 1.e-5],
color='grey', alpha=0.2, edgecolor='None')
ax4.fill_between([0.0, 0.5], [1.e-3/5, 1.e-3/5], [1.e-5, 1.e-5],
color='grey', alpha=0.2, edgecolor='None')
# load bulge sersic index data
calib = {'bulge_n':[], 'c1':[], 'c2':[], 'm1':[], 'm2':[]}
try:
with open('output/fig4_bulge_sersic_index.dat') as fil:
for line in fil:
line = line.replace('(', ' ')
line = line.replace(')', ' ')
line = line.replace(',', ' ')
line = ' '.join(line.split())
bulge_n, c1, c2, m1, m2 = line.split(' ')
calib['bulge_n'].append(float(bulge_n))
calib['c1'].append(float(c1))
calib['c2'].append(float(c2))
calib['m1'].append(float(m1))
calib['m2'].append(float(m2))
except IOError:
pass
ax1.plot(calib['bulge_n'], abs(numpy.array(calib['m1'])), 's', mfc='None', mec='red', mew=1.3)
ax1.plot(calib['bulge_n'], abs(numpy.array(calib['m1'])), color='red')
ax1.plot(calib['bulge_n'], abs(numpy.array(calib['m2'])), 'x', mfc='None', mec='red', mew=1.3)
ax1.plot(calib['bulge_n'], abs(numpy.array(calib['m2'])), color='red', ls='--')
# load bulge flux data
calib = {'bulge_flux':[], 'c1':[], 'c2':[], 'm1':[], 'm2':[]}
try:
with open('output/fig4_bulge_flux.dat') as fil:
for line in fil:
line = line.replace('(', ' ')
line = line.replace(')', ' ')
line = line.replace(',', ' ')
line = ' '.join(line.split())
bulge_flux, c1, c2, m1, m2 = line.split(' ')
calib['bulge_flux'].append(float(bulge_flux))
calib['c1'].append(float(c1))
calib['c2'].append(float(c2))
calib['m1'].append(float(m1))
calib['m2'].append(float(m2))
except IOError:
pass
ax2.plot(calib['bulge_flux'], abs(numpy.array(calib['m1'])), 's', mfc='None', mec='red', mew=1.3)
ax2.plot(calib['bulge_flux'], abs(numpy.array(calib['m1'])), color='red')
ax2.plot(calib['bulge_flux'], abs(numpy.array(calib['m2'])), 'x', mfc='None', mec='red', mew=1.3)
ax2.plot(calib['bulge_flux'], abs(numpy.array(calib['m2'])), color='red', ls='--')
# load galaxy ellipticity data
calib = {'gal_ellip':[], 'c1':[], 'c2':[], 'm1':[], 'm2':[]}
try:
with open('output/fig4_gal_ellip.dat') as fil:
for line in fil:
line = line.replace('(', ' ')
line = line.replace(')', ' ')
line = line.replace(',', ' ')
line = ' '.join(line.split())
gal_ellip, c1, c2, m1, m2 = line.split(' ')
calib['gal_ellip'].append(float(gal_ellip))
calib['c1'].append(float(c1))
calib['c2'].append(float(c2))
calib['m1'].append(float(m1))
calib['m2'].append(float(m2))
except IOError:
pass
ax3.plot(calib['gal_ellip'], abs(numpy.array(calib['m1'])), 's', mfc='None', mec='red', mew=1.3)
ax3.plot(calib['gal_ellip'], abs(numpy.array(calib['m1'])), color='red')
ax3.plot(calib['gal_ellip'], abs(numpy.array(calib['m2'])), 'x', mfc='None', mec='red', mew=1.3)
ax3.plot(calib['gal_ellip'], abs(numpy.array(calib['m2'])), color='red', ls='--')
# load y0 data
calib = {'y0':[], 'c1':[], 'c2':[], 'm1':[], 'm2':[]}
try:
with open('output/fig4_y0.dat') as fil:
for line in fil:
line = line.replace('(', ' ')
line = line.replace(')', ' ')
line = line.replace(',', ' ')
line = ' '.join(line.split())
y0, c1, c2, m1, m2 = line.split(' ')
calib['y0'].append(float(y0))
calib['c1'].append(float(c1))
calib['c2'].append(float(c2))
calib['m1'].append(float(m1))
calib['m2'].append(float(m2))
except IOError:
pass
ax4.plot(calib['y0'], abs(numpy.array(calib['m1'])), 's', mfc='None', mec='red', mew=1.3)
ax4.plot(calib['y0'], abs(numpy.array(calib['m1'])), color='red')
ax4.plot(calib['y0'], abs(numpy.array(calib['m2'])), 'x', mfc='None', mec='red', mew=1.3)
ax4.plot(calib['y0'], abs(numpy.array(calib['m2'])), color='red', ls='--')
plt.savefig('output/fig4.pdf', dpi=220)
if __name__ == '__main__':
fig4plot()
| [
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig"
] | [((89, 120), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10.0, 7.5)'}), '(figsize=(10.0, 7.5))\n', (99, 120), True, 'import matplotlib.pyplot as plt\n'), ((6712, 6751), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""output/fig4.pdf"""'], {'dpi': '(220)'}), "('output/fig4.pdf', dpi=220)\n", (6723, 6751), True, 'import matplotlib.pyplot as plt\n'), ((3217, 3241), 'numpy.array', 'numpy.array', (["calib['m1']"], {}), "(calib['m1'])\n", (3228, 3241), False, 'import numpy\n'), ((3316, 3340), 'numpy.array', 'numpy.array', (["calib['m1']"], {}), "(calib['m1'])\n", (3327, 3340), False, 'import numpy\n'), ((3391, 3415), 'numpy.array', 'numpy.array', (["calib['m2']"], {}), "(calib['m2'])\n", (3402, 3415), False, 'import numpy\n'), ((3490, 3514), 'numpy.array', 'numpy.array', (["calib['m2']"], {}), "(calib['m2'])\n", (3501, 3514), False, 'import numpy\n'), ((4291, 4315), 'numpy.array', 'numpy.array', (["calib['m1']"], {}), "(calib['m1'])\n", (4302, 4315), False, 'import numpy\n'), ((4393, 4417), 'numpy.array', 'numpy.array', (["calib['m1']"], {}), "(calib['m1'])\n", (4404, 4417), False, 'import numpy\n'), ((4471, 4495), 'numpy.array', 'numpy.array', (["calib['m2']"], {}), "(calib['m2'])\n", (4482, 4495), False, 'import numpy\n'), ((4573, 4597), 'numpy.array', 'numpy.array', (["calib['m2']"], {}), "(calib['m2'])\n", (4584, 4597), False, 'import numpy\n'), ((5376, 5400), 'numpy.array', 'numpy.array', (["calib['m1']"], {}), "(calib['m1'])\n", (5387, 5400), False, 'import numpy\n'), ((5477, 5501), 'numpy.array', 'numpy.array', (["calib['m1']"], {}), "(calib['m1'])\n", (5488, 5501), False, 'import numpy\n'), ((5554, 5578), 'numpy.array', 'numpy.array', (["calib['m2']"], {}), "(calib['m2'])\n", (5565, 5578), False, 'import numpy\n'), ((5655, 5679), 'numpy.array', 'numpy.array', (["calib['m2']"], {}), "(calib['m2'])\n", (5666, 5679), False, 'import numpy\n'), ((6400, 6424), 'numpy.array', 'numpy.array', (["calib['m1']"], {}), "(calib['m1'])\n", (6411, 6424), False, 'import numpy\n'), ((6494, 6518), 'numpy.array', 'numpy.array', (["calib['m1']"], {}), "(calib['m1'])\n", (6505, 6518), False, 'import numpy\n'), ((6564, 6588), 'numpy.array', 'numpy.array', (["calib['m2']"], {}), "(calib['m2'])\n", (6575, 6588), False, 'import numpy\n'), ((6658, 6682), 'numpy.array', 'numpy.array', (["calib['m2']"], {}), "(calib['m2'])\n", (6669, 6682), False, 'import numpy\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
The Phantom class is instantiated with a ground-truth phantom and corresponding material properties data. The get_projections method simulates data acquisition and returns radiographs for the specified theta values.
"""
import sys
import os
import numpy as np
import pandas as pd
from scipy import misc
import h5py
import time
from scipy.integrate import simps
import matplotlib.pyplot as plt
import cv2
from tomopy import project
from scipy.ndimage.filters import gaussian_filter
from tomo_twin.pg_filter import add_phase_contrast
model_data_path = '../model_data'
class Phantom:
def __init__(self, vol, materials, res, energy_pts, bits = 16, data_path = model_data_path):
'''
Parameters
----------
vol : np.array
labeled (segmented / ground-truth) volume. voxel values are in finite range [0,...n_materials-1].
materials : dict
dict of material names and their respective density g/cc, e.g. {"Fe" : 7.87, "Al": 2.7}
res : float
voxel size in microns
energy_pts : float or np.array
list of energies
bits : int
16 for 16 bit camera
data_path : str
path to exported XOP data
'''
# deal with materials
self.bits = bits
self.res = res
self.data_path = data_path
self.energy_pts = np.asarray(energy_pts) if type(energy_pts) is float else energy_pts
self.materials = [Material(key, value, \
self.res, \
self.energy_pts, \
data_path = self.data_path) for key, value in materials.items()]
self.sigma_mat = np.concatenate([material.sigma for material in self.materials], axis = 1)
# some numbers
self.n_mat = len(self.materials)
self.n_energies = np.size(self.energy_pts)
# deal with labeled volume
self.vol = vol
self.vol_shape = self.vol.shape
if self.vol.max() != (len(self.materials)-1):
raise ValueError("Number of materials does not match voxel value range.")
if len(self.vol_shape) not in (2,3): raise ValueError("vol must have either 2 or 3 dimensions.")
self.ray_axis = 1 if len(self.vol_shape) == 3 else 0
if len(self.vol_shape) == 3:
self.proj_shape = (self.vol_shape[0], self.vol_shape[-1])
else:
self.proj_shape = (self.vol_shape[-1],)
self.make_volume() # blows up volume into individual energies
def make_volume(self):
'''
Converts the labeled GT volume provided into a volume of sigma values (attenutation coefficient, density and pixel size as pathlength). The resulting shape is (nz, ny, nx) or (n_energies, nz, ny, nx). The "energy" channel is added if multiple energies are requested.
'''
voxel_vals = np.arange(self.n_mat)
self.vol = np.asarray([self.vol]*self.n_energies, dtype = np.float32)
for ie in range(self.n_energies):
for voxel in voxel_vals:
self.vol[ie, self.vol[ie] == voxel] = self.sigma_mat[ie,voxel]
if self.n_energies == 1:
self.vol = self.vol[0]
return
else:
return
def get_projections(self, theta = (0,180,180), beam = None, noise = 0.01, blur_size = 5, detector_dist = 0.0):
'''
Acquire projections on the phantom.
Returns
-------
np.array
output shape is a stack of radiographs (nthetas, nrows, ncols)
Parameters
----------
theta : tuple
The tuple must be defined as (starting_theta, ending_theta, number_projections). The angle is intepreted as degrees.
beam : np.array
The flat-field (beam array) must be provided with shape (1, nrows, ncols) or (n_energies, nrows, ncols).
noise : float
The noise parameter is interpreted as a fraction (0,1). The noise transforms the pixel map I(y,x) in the projection space as I(y,x) --> I(y,x)*(1 + N(mu=0, sigma=noise)).
'''
# make theta array in radians
theta = np.linspace(*theta, endpoint = True)
theta = np.radians(theta)
# make beam array (if not passed)
if beam is None:
beam = np.ones(self.proj_shape, dtype = np.float32)
beam = beam*(2**self.bits-1)
# if monochromatic beam
if self.n_energies == 1:
projs = project(self.vol, theta, pad = False, emission = False)
projs = projs*beam
# scintillator / detector blurring
if blur_size > 0:
projs = [proj for proj in projs]
projs = Parallelize(projs, gaussian_filter, \
procs = 12, \
sigma = 0.3*(0.5*(blur_size - 1) - 1) + 0.8, \
order = 0)
projs = np.asarray(projs)
# in-line phase contrast based on detector-sample distance (cm)
if detector_dist > 0.0:
pad_h = int(projs.shape[1]*0.4)
projs = np.pad(projs, ((0,0), (pad_h,pad_h), (0,0)), mode = 'reflect')
projs = add_phase_contrast(projs, \
pixel_size = self.res*1e-04, \
energy = float(self.energy_pts), \
dist = detector_dist)
projs = projs[:,pad_h:-pad_h,:]
# Poisson noise model (approximated as normal distribution)
projs = np.random.normal(projs, noise*np.sqrt(projs))
# projs = np.random.poisson(projs)
# This actually worked fine
# projs = projs*beam*(1 + np.random.normal(0, noise, projs.shape))
# if polychromatic beam
else:
projs = Parallelize(theta.tolist(), \
_project_at_theta, \
vol = self.vol, \
n_energies = self.n_energies, \
beam = beam, \
noise = noise, procs = 12)
projs = np.asarray(projs)
# saturated pixels
projs = np.clip(projs, 0, 2**self.bits-1)
return projs.astype(np.uint16)
class Material:
# Ideas borrowed from <NAME>'s code for BeamHardeningCorrections (7-BM github)
def __init__(self, name, density, path_len, energy_pts, scintillator_flag = False, data_path = None):
"""
Parameters
----------
name : str
string describing material name. Typically, use chemical formula, e.g. Fe, Cu, etc.
density : float
g/cm3 units
path_len : float
thickness for components (filters, scintillators, etc.) and pixel size for materials in phantom
energy_pts : np array
listing the energy_pts requested. shape is (n,)
scintillator_flag : bool
return absorption data instead of attenuation, if material is scintillator
sigma : np.array
sigma array with dimensions (n_energies, 1)
att_coeff : np.array
mass attenuation coefficient array (n_energies, 1)
data_path : str
path to exported XOP data
"""
self.name = name
self.data_path = data_path
self.density = density # g/cc
self.scintillator_flag = scintillator_flag
self.path_len = path_len # um
self.energy_pts = energy_pts
self.calc_sigma()
def read_attcoeff(self):
"""
# att_coeff : cm2/g units, array dimensions of (n_energies,)
"""
df = pd.read_csv(os.path.join(self.data_path, 'materials', self.name + "_properties_xCrossSec.dat"), sep = '\t', delimiter = " ", header = None)
old_energy_pts = np.asarray(df[0])/1000.0
if self.scintillator_flag:
att_coeff = np.asarray(df[3])
else:
att_coeff = np.asarray(df[6])
self.att_coeff = np.interp(self.energy_pts, old_energy_pts, att_coeff).reshape(-1,1)
def calc_sigma(self):
self.read_attcoeff()
self.sigma = np.multiply(self.att_coeff, self.density)*(self.path_len*1e-4) # att_coeff in cm2/g, rho in g/cm3, res in cm
def read_source(file_path, energy_pts, res = 1.17, img_shape = (1200,1920), bits = 16, exp_fac = 0.92):
"""
Reads data from a source hdf5 file, in a format specific to this code. The original data is adapted from DABAX in XOP.
returns b : beam array shape (n_energies, V, H) or (n_energies, 1)
Two choices:
1. enter 2D shape to incorporate vertically varying fan beam profile and spectral variation. If 2D, crops the source within the FOV of Camera defined by (res, shape). Assumes FOV is in vertical center of fan beam.
2. enter 1D shape to ignore and get only spectral variation.
Parameters
----------
file_path : str
filepath for reading beam source, e.g. bending magnet, undulator or monochromatic source, etc.
energy_pts : np.array
energy points in keV, array with dimensions (n_energies,)
res : float
pixel resolution of camera in micrometers
shape : np.array
pixel array size V, H
"""
if type(energy_pts) is float:
energy_pts = np.asarray([energy_pts])
# Check shape to find if 2D or 1D beam is requested. If 1D, the beam is same across H, so return a 0D beam.
if len(img_shape) == 1:
H = img_shape
V = 0
else:
V, H = img_shape
# Read from hdf5 file. This is a specific format for this code. Use "make_sourcefile.py" to create your own with XOP (X,Y,Z) csv file as input.
with h5py.File(file_path, 'r') as hf:
b = np.asarray(hf["power"][:])
old_energy_pts = np.asarray(hf["energy_pts"][:])/1000.0
mm = np.asarray(hf["vert_mm"][:])
if V != 0:
# Crop out the beam which is not in field of view. E.g. V = 1200 pixels, res = 1.17 um --> size = 1.4 mm, which is +/- 0.02 mrads
mm_low, mm_high = -res*0.001*V/2, res*0.001*V/2 # 0.001 to convert res in um to mm
idx = np.where((mm > mm_low) & (mm < mm_high))
b = b[idx,...][0]
# Resize energy_pts as requested
b = np.asarray([np.interp(energy_pts, old_energy_pts, b[ii,...]) for ii in range(b.shape[0])])
# Create bright-field image with beam profile along Y, duplicates along X
b = cv2.resize(b, (energy_pts.size, V))
b = np.tile(b[:,np.newaxis,:], (1, H, 1))
b = np.moveaxis(b, 2, 0)
else:
b = np.mean(b, axis = 0)
b = np.interp(energy_pts, old_energy_pts, b)
b = b.reshape(-1,1)
eps = 1.0e-12
b_min = np.min(b)
b_max = np.max(b)
b = (b - b_min) / (b_max - b_min)
b = b/energy_pts.size
b = (2**bits-1 - 2000)*(exp_fac*b + (1-exp_fac))
return b
import functools
from multiprocessing import Pool, cpu_count
def _project_at_theta(theta_val, vol = None, n_energies = None, beam = None, noise = None):
proj = np.asarray([project(vol[ie], \
np.radians([theta_val]), \
pad = False, \
emission = False)[0] for ie in range(n_energies)])
proj = proj*beam + np.random.normal(0, noise/n_energies, beam.shape)
proj = simps(proj, x = energy_pts, axis = 0)
return proj
def Parallelize(ListIn, f, procs = -1, **kwargs):
"""
This function packages the "starmap" function in multiprocessing, to allow multiple iterable inputs for the parallelized function.
Parameters
----------
ListIn: list
each item in the list is a tuple of non-keyworded arguments for f.
f : function
function to be parallelized. Signature must not contain any other non-keyworded arguments other than those passed as iterables.
"""
if type(ListIn[0]) != tuple:
ListIn = [(ListIn[i],) for i in range(len(ListIn))]
reduced_argfunc = functools.partial(f, **kwargs)
if procs == -1:
opt_procs = int(np.interp(len(ListIn), [1,100,500,1000,3000,5000,10000] ,[1,2,4,8,12,36,48]))
procs = min(opt_procs, cpu_count())
if procs == 1:
OutList = [reduced_argfunc(*ListIn[iS]) for iS in range(len(ListIn))]
else:
p = Pool(processes = procs)
OutList = p.starmap(reduced_argfunc, ListIn)
p.close()
p.join()
return OutList
| [
"numpy.radians",
"numpy.clip",
"numpy.sqrt",
"multiprocessing.cpu_count",
"numpy.moveaxis",
"numpy.arange",
"numpy.mean",
"numpy.multiply",
"numpy.where",
"numpy.asarray",
"numpy.max",
"numpy.linspace",
"numpy.concatenate",
"numpy.min",
"numpy.random.normal",
"numpy.tile",
"numpy.one... | [((11607, 11616), 'numpy.min', 'np.min', (['b'], {}), '(b)\n', (11613, 11616), True, 'import numpy as np\n'), ((11629, 11638), 'numpy.max', 'np.max', (['b'], {}), '(b)\n', (11635, 11638), True, 'import numpy as np\n'), ((12254, 12287), 'scipy.integrate.simps', 'simps', (['proj'], {'x': 'energy_pts', 'axis': '(0)'}), '(proj, x=energy_pts, axis=0)\n', (12259, 12287), False, 'from scipy.integrate import simps\n'), ((12936, 12966), 'functools.partial', 'functools.partial', (['f'], {}), '(f, **kwargs)\n', (12953, 12966), False, 'import functools\n'), ((1883, 1954), 'numpy.concatenate', 'np.concatenate', (['[material.sigma for material in self.materials]'], {'axis': '(1)'}), '([material.sigma for material in self.materials], axis=1)\n', (1897, 1954), True, 'import numpy as np\n'), ((2056, 2080), 'numpy.size', 'np.size', (['self.energy_pts'], {}), '(self.energy_pts)\n', (2063, 2080), True, 'import numpy as np\n'), ((3146, 3167), 'numpy.arange', 'np.arange', (['self.n_mat'], {}), '(self.n_mat)\n', (3155, 3167), True, 'import numpy as np\n'), ((3187, 3245), 'numpy.asarray', 'np.asarray', (['([self.vol] * self.n_energies)'], {'dtype': 'np.float32'}), '([self.vol] * self.n_energies, dtype=np.float32)\n', (3197, 3245), True, 'import numpy as np\n'), ((4502, 4536), 'numpy.linspace', 'np.linspace', (['*theta'], {'endpoint': '(True)'}), '(*theta, endpoint=True)\n', (4513, 4536), True, 'import numpy as np\n'), ((4555, 4572), 'numpy.radians', 'np.radians', (['theta'], {}), '(theta)\n', (4565, 4572), True, 'import numpy as np\n'), ((6758, 6795), 'numpy.clip', 'np.clip', (['projs', '(0)', '(2 ** self.bits - 1)'], {}), '(projs, 0, 2 ** self.bits - 1)\n', (6765, 6795), True, 'import numpy as np\n'), ((10177, 10201), 'numpy.asarray', 'np.asarray', (['[energy_pts]'], {}), '([energy_pts])\n', (10187, 10201), True, 'import numpy as np\n'), ((10579, 10604), 'h5py.File', 'h5py.File', (['file_path', '"""r"""'], {}), "(file_path, 'r')\n", (10588, 10604), False, 'import h5py\n'), ((10624, 10650), 'numpy.asarray', 'np.asarray', (["hf['power'][:]"], {}), "(hf['power'][:])\n", (10634, 10650), True, 'import numpy as np\n'), ((10728, 10756), 'numpy.asarray', 'np.asarray', (["hf['vert_mm'][:]"], {}), "(hf['vert_mm'][:])\n", (10738, 10756), True, 'import numpy as np\n'), ((11020, 11060), 'numpy.where', 'np.where', (['((mm > mm_low) & (mm < mm_high))'], {}), '((mm > mm_low) & (mm < mm_high))\n', (11028, 11060), True, 'import numpy as np\n'), ((11327, 11362), 'cv2.resize', 'cv2.resize', (['b', '(energy_pts.size, V)'], {}), '(b, (energy_pts.size, V))\n', (11337, 11362), False, 'import cv2\n'), ((11375, 11414), 'numpy.tile', 'np.tile', (['b[:, np.newaxis, :]', '(1, H, 1)'], {}), '(b[:, np.newaxis, :], (1, H, 1))\n', (11382, 11414), True, 'import numpy as np\n'), ((11425, 11445), 'numpy.moveaxis', 'np.moveaxis', (['b', '(2)', '(0)'], {}), '(b, 2, 0)\n', (11436, 11445), True, 'import numpy as np\n'), ((11473, 11491), 'numpy.mean', 'np.mean', (['b'], {'axis': '(0)'}), '(b, axis=0)\n', (11480, 11491), True, 'import numpy as np\n'), ((11506, 11546), 'numpy.interp', 'np.interp', (['energy_pts', 'old_energy_pts', 'b'], {}), '(energy_pts, old_energy_pts, b)\n', (11515, 11546), True, 'import numpy as np\n'), ((12193, 12244), 'numpy.random.normal', 'np.random.normal', (['(0)', '(noise / n_energies)', 'beam.shape'], {}), '(0, noise / n_energies, beam.shape)\n', (12209, 12244), True, 'import numpy as np\n'), ((13258, 13279), 'multiprocessing.Pool', 'Pool', ([], {'processes': 'procs'}), '(processes=procs)\n', (13262, 13279), False, 'from multiprocessing import Pool, cpu_count\n'), ((1540, 1562), 'numpy.asarray', 'np.asarray', (['energy_pts'], {}), '(energy_pts)\n', (1550, 1562), True, 'import numpy as np\n'), ((4668, 4710), 'numpy.ones', 'np.ones', (['self.proj_shape'], {'dtype': 'np.float32'}), '(self.proj_shape, dtype=np.float32)\n', (4675, 4710), True, 'import numpy as np\n'), ((4861, 4912), 'tomopy.project', 'project', (['self.vol', 'theta'], {'pad': '(False)', 'emission': '(False)'}), '(self.vol, theta, pad=False, emission=False)\n', (4868, 4912), False, 'from tomopy import project\n'), ((6688, 6705), 'numpy.asarray', 'np.asarray', (['projs'], {}), '(projs)\n', (6698, 6705), True, 'import numpy as np\n'), ((8454, 8540), 'os.path.join', 'os.path.join', (['self.data_path', '"""materials"""', "(self.name + '_properties_xCrossSec.dat')"], {}), "(self.data_path, 'materials', self.name +\n '_properties_xCrossSec.dat')\n", (8466, 8540), False, 'import os\n'), ((8612, 8629), 'numpy.asarray', 'np.asarray', (['df[0]'], {}), '(df[0])\n', (8622, 8629), True, 'import numpy as np\n'), ((8696, 8713), 'numpy.asarray', 'np.asarray', (['df[3]'], {}), '(df[3])\n', (8706, 8713), True, 'import numpy as np\n'), ((8752, 8769), 'numpy.asarray', 'np.asarray', (['df[6]'], {}), '(df[6])\n', (8762, 8769), True, 'import numpy as np\n'), ((8962, 9003), 'numpy.multiply', 'np.multiply', (['self.att_coeff', 'self.density'], {}), '(self.att_coeff, self.density)\n', (8973, 9003), True, 'import numpy as np\n'), ((10676, 10707), 'numpy.asarray', 'np.asarray', (["hf['energy_pts'][:]"], {}), "(hf['energy_pts'][:])\n", (10686, 10707), True, 'import numpy as np\n'), ((13125, 13136), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (13134, 13136), False, 'from multiprocessing import Pool, cpu_count\n'), ((5355, 5372), 'numpy.asarray', 'np.asarray', (['projs'], {}), '(projs)\n', (5365, 5372), True, 'import numpy as np\n'), ((5587, 5650), 'numpy.pad', 'np.pad', (['projs', '((0, 0), (pad_h, pad_h), (0, 0))'], {'mode': '"""reflect"""'}), "(projs, ((0, 0), (pad_h, pad_h), (0, 0)), mode='reflect')\n", (5593, 5650), True, 'import numpy as np\n'), ((8804, 8857), 'numpy.interp', 'np.interp', (['self.energy_pts', 'old_energy_pts', 'att_coeff'], {}), '(self.energy_pts, old_energy_pts, att_coeff)\n', (8813, 8857), True, 'import numpy as np\n'), ((11153, 11202), 'numpy.interp', 'np.interp', (['energy_pts', 'old_energy_pts', 'b[ii, ...]'], {}), '(energy_pts, old_energy_pts, b[ii, ...])\n', (11162, 11202), True, 'import numpy as np\n'), ((6102, 6116), 'numpy.sqrt', 'np.sqrt', (['projs'], {}), '(projs)\n', (6109, 6116), True, 'import numpy as np\n'), ((12013, 12036), 'numpy.radians', 'np.radians', (['[theta_val]'], {}), '([theta_val])\n', (12023, 12036), True, 'import numpy as np\n')] |
from __future__ import absolute_import
# -- EXTERN IMPORT -- #
import numpy as np
import keras.backend as K
# -- IMPORT -- #
from .. import __verbose__ as vrb
from ..utils.data import load_image, deprocess_image, visualize_heatmap
from .gradient import Gradient
# -- SMOOTHGRAD METHOD -- #
class SmoothGrad(Gradient):
"""CLASS::SmoothGrad:
---
Description:
---
> Method that reduces the noise from Gradient results:
Arguments:
---
>- model {keras.Model} -- Model to analyze.
>- layerName {string} -- The selected layer to analyze.
Link:
---
>- http://arxiv.org/abs/1706.03825."""
def __init__(self,model,layerName):
super().__init__(model, layerName)
def interpret(self,fileName,samples=50,stdNoise=10):
"""METHOD::INTERPRET:
---
Arguments:
---
>- fileName {string} -- The path to the image file.
>- samples {int} -- Number of times Gradient will be applied. (default:{50})
>- stdNoise {float} -- The standard deviation of the noise added. (default:{10})
Returns:
---
>- {np.array} -- The saliency map."""
vrb.print_msg(self.__class__.__name__+' Analyzing')
vrb.print_msg('--------------------------')
self.rawData,imgData = load_image(fileName,preprocess=True)
SmoothGrad = []
for _ in range(samples):
noiseSignal = np.random.normal(0,stdNoise,imgData.shape)
img = imgData+noiseSignal
gradVal = self.gradient([img])[0]
SmoothGrad.append(gradVal)
self.heatMap = np.mean(np.array(SmoothGrad),axis=0)
self.heatMap = np.sum(self.heatMap[0],axis=-1)
self.heatMap[self.heatMap < np.mean(self.heatMap)] = 0
vrb.print_msg('========== DONE ==========\n')
return self.heatMap
def visualize(self,savePath,cmap='bone'):
"""METHOD::VISUALIZE:
---
Arguments:
---
>- savePath {string} -- The path where the graph will be saved.
>- cmap {string} -- The color map used to represent the graph.
Returns:
---
>- {NONE}."""
vrb.print_msg('Visualize '+self.__class__.__name__+' Result...')
vrb.print_msg('--------------------------')
heatMap = deprocess_image(self.heatMap.copy())
visualize_heatmap(self.rawData,heatMap,self.__class__.__name__,cmap,savePath)
vrb.print_msg('========== DONE ==========\n')
def __repr__(self):
return super().__repr__()+'Smooth Gradients>' | [
"numpy.random.normal",
"numpy.sum",
"numpy.array",
"numpy.mean"
] | [((1500, 1532), 'numpy.sum', 'np.sum', (['self.heatMap[0]'], {'axis': '(-1)'}), '(self.heatMap[0], axis=-1)\n', (1506, 1532), True, 'import numpy as np\n'), ((1290, 1334), 'numpy.random.normal', 'np.random.normal', (['(0)', 'stdNoise', 'imgData.shape'], {}), '(0, stdNoise, imgData.shape)\n', (1306, 1334), True, 'import numpy as np\n'), ((1454, 1474), 'numpy.array', 'np.array', (['SmoothGrad'], {}), '(SmoothGrad)\n', (1462, 1474), True, 'import numpy as np\n'), ((1562, 1583), 'numpy.mean', 'np.mean', (['self.heatMap'], {}), '(self.heatMap)\n', (1569, 1583), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
from eight import *
from . import bw2test, BW2DataTest
from .fixtures import food as food_data, biosphere, get_naughty
from bw2data import config, projects
from bw2data.database import DatabaseChooser
from bw2data.backends.peewee import (
Activity as PWActivity,
ActivityDataset,
Exchange as PWExchange,
ExchangeDataset,
sqlite3_lci_db,
)
from bw2data.backends.utils import convert_backend
from bw2data.backends.single_file.database import SingleFileDatabase
from bw2data.errors import (
InvalidExchange,
MissingIntermediateData,
UnknownObject,
UntypedExchange,
ValidityError,
)
from bw2data.backends.single_file import (
Activity as SFActivity,
Exchange as SFExchange,
)
from bw2data.errors import NotAllowed, WrongDatabase
from bw2data.meta import mapping, geomapping, databases, methods
from bw2data.parameters import (
ActivityParameter,
DatabaseParameter,
ParameterizedExchange,
parameters,
)
from bw2data.serialization import JsonWrapper, JsonSanitizer
from bw2data.utils import numpy_string, get_activity
from bw2data.validate import db_validator
from peewee import DoesNotExist
import copy
import datetime
import numpy as np
import os
import pickle
import pytest
import warnings
@pytest.fixture
@bw2test
def food():
d = DatabaseChooser("biosphere")
d.write(biosphere)
d = DatabaseChooser("food")
d.write(food_data)
def test_food(food):
assert len(databases) == 2
assert sorted(x for x in databases) == ['biosphere', 'food']
### Basic functions
@bw2test
def test_get():
d = DatabaseChooser("biosphere")
d.write(biosphere)
activity = d.get('1')
assert isinstance(activity, PWActivity)
assert activity['name'] == 'an emission'
@bw2test
def test_iter():
d = DatabaseChooser("biosphere")
d.write(biosphere)
activity = next(iter(d))
assert isinstance(activity, PWActivity)
assert activity['name'] in ('an emission', 'another emission')
@bw2test
def test_get_random():
d = DatabaseChooser("biosphere")
d.write(biosphere)
activity = d.random()
assert isinstance(activity, PWActivity)
assert activity['name'] in ('an emission', 'another emission')
def test_copy(food):
d = DatabaseChooser("food")
with pytest.raises(AssertionError):
d.copy("food")
d.copy("repas")
assert "repas" in databases
@bw2test
def test_copy_does_deepcopy():
data = {
("old name", '1'): {
"exchanges": [{
"input": ("old name", '1'),
"amount": 1.0,
'type': 'technosphere'
}]
}
}
d = DatabaseChooser("old name")
d.write(data)
new_db = d.copy("new name")
new_data = new_db.load()
assert list(new_data.values())[0]['exchanges'][0]['input'] == ('new name', '1')
assert list(data.values())[0]['exchanges'][0]['input'] == ('old name', '1')
assert list(d.load().values())[0]['exchanges'][0]['input'] == ('old name', '1')
@bw2test
def test_raise_wrong_database():
data = {
("foo", '1'): {}
}
d = DatabaseChooser("bar")
with pytest.raises(WrongDatabase):
d.write(data)
@bw2test
def test_deletes_from_database():
d = DatabaseChooser("biosphere")
d.write(biosphere)
assert "biosphere" in databases
del databases['biosphere']
assert next(sqlite3_lci_db.execute_sql(
"select count(*) from activitydataset where database = 'biosphere'"
)) == (0,)
assert next(sqlite3_lci_db.execute_sql(
"select count(*) from exchangedataset where output_database = 'biosphere'"
)) == (0,)
@bw2test
def test_delete_warning():
d = DatabaseChooser("biosphere")
d.write(biosphere)
with pytest.warns(UserWarning):
d.delete()
@bw2test
def test_relabel_data():
old_data = {
("old and boring", '1'): {
"exchanges": [{"input": ("old and boring", '42'), "amount": 1.0}]
},
("old and boring", '2'): {
"exchanges": [{"input": ("old and boring", '1'), "amount": 4.0}]
}
}
shiny_new = {
("shiny new", '1'): {
"exchanges": [{"input": ("old and boring", '42'), "amount": 1.0}]
},
("shiny new", '2'): {
"exchanges": [{"input": ("shiny new", '1'), "amount": 4.0}]
}
}
db = DatabaseChooser("foo")
assert shiny_new == db.relabel_data(old_data, "shiny new")
### Metadata
@bw2test
def test_find_graph_dependents():
databases['one'] = {'depends': ['two', 'three']}
databases['two'] = {'depends': ['four', 'five']}
databases['three'] = {'depends': ['four']}
databases['four'] = {'depends': ['six']}
databases['five'] = {'depends': ['two']}
databases['six'] = {'depends': []}
assert (
DatabaseChooser('one').find_graph_dependents() ==
{'one', 'two', 'three', 'four', 'five', 'six'}
)
@bw2test
def test_register():
database = DatabaseChooser("testy")
database.register()
assert "testy" in databases
assert 'depends' in databases['testy']
@bw2test
def test_deregister():
d = DatabaseChooser("food")
d.register()
assert "food" in databases
d.deregister()
assert "food" not in databases
@bw2test
def test_write_sets_databases_number_attribute():
d = DatabaseChooser("biosphere")
d.write(biosphere)
assert databases["biosphere"]["number"] == len(biosphere)
### Processed arrays
@bw2test
def test_sqlite_processed_array_order():
database = DatabaseChooser("testy")
data = {
("testy", "C"): {},
("testy", "A"): {'type': 'biosphere'},
("testy", "B"): {'exchanges': [
{'input': ("testy", "A"),
'amount': 1,
'type': 'technosphere'},
{'input': ("testy", "A"),
'amount': 2,
'type': 'technosphere'},
{'input': ("testy", "C"),
'amount': 2,
'type': 'biosphere'},
{'input': ("testy", "C"),
'amount': 3,
'type': 'biosphere'},
{'input': ("testy", "B"),
'amount': 4,
'type': 'production'},
{'input': ("testy", "B"),
'amount': 1,
'type': 'production'},
]}
}
database.write(data)
lookup = {k: mapping[("testy", k)] for k in "ABC"}
expected = sorted([
(lookup['A'], lookup['B'], 1),
(lookup['A'], lookup['B'], 2),
(lookup['B'], lookup['B'], 1),
(lookup['B'], lookup['B'], 4),
(lookup['C'], lookup['C'], 1),
(lookup['C'], lookup['B'], 2),
(lookup['C'], lookup['B'], 3),
])
array = np.load(database.filepath_processed())
assert array.shape == (7,)
result = [(array['input'][x], array['output'][x], array['amount'][x])
for x in range(7)]
assert expected == result
@bw2test
def test_singlefile_processed_array_order():
database = DatabaseChooser("testy", "singlefile")
data = {
("testy", "C"): {},
("testy", "A"): {'type': 'biosphere'},
("testy", "B"): {'exchanges': [
{'input': ("testy", "A"),
'amount': 1,
'type': 'technosphere'},
{'input': ("testy", "A"),
'amount': 2,
'type': 'technosphere'},
{'input': ("testy", "C"),
'amount': 2,
'type': 'biosphere'},
{'input': ("testy", "C"),
'amount': 3,
'type': 'biosphere'},
{'input': ("testy", "B"),
'amount': 4,
'type': 'production'},
{'input': ("testy", "B"),
'amount': 1,
'type': 'production'},
]}
}
database.write(data)
lookup = {k: mapping[("testy", k)] for k in "ABC"}
expected = sorted([
(lookup['A'], lookup['B'], 1),
(lookup['A'], lookup['B'], 2),
(lookup['B'], lookup['B'], 1),
(lookup['B'], lookup['B'], 4),
(lookup['C'], lookup['C'], 1),
(lookup['C'], lookup['B'], 2),
(lookup['C'], lookup['B'], 3),
])
array = np.load(database.filepath_processed())
assert array.shape == (7,)
result = [(array['input'][x], array['output'][x], array['amount'][x])
for x in range(7)]
assert expected == result
@bw2test
def test_process_adds_to_mappings():
database = DatabaseChooser("testy")
database_data = {
("testy", "A"): {'location': 'CH'},
("testy", "B"): {'location': 'DE'},
}
database.write(database_data)
assert ("testy", "A") in mapping and ("testy", "B") in mapping
assert "CH" in geomapping and "DE" in geomapping
@bw2test
def test_process_unknown_object():
database = DatabaseChooser("testy")
data = {
("testy", "A"): {},
("testy", "B"): {'exchanges': [
{'input': ("testy", "A"),
'amount': 1,
'type': 'technosphere'},
{'input': ("testy", "C"),
'amount': 1,
'type': 'technosphere'},
]},
}
with pytest.raises(UnknownObject):
database.write(data)
### String handling
@bw2test
def test_naughty_activity_codes():
db = DatabaseChooser("foo")
data = {("foo", str(i)): {'name': x} for i, x in enumerate(get_naughty())}
db.write(data)
assert set(get_naughty()) == set(x['name'] for x in db)
class DatabaseTest(BW2DataTest):
def test_setup(self):
d = DatabaseChooser("biosphere")
d.write(biosphere)
d = DatabaseChooser("food")
d.write(food_data)
def test_rename(self):
d = DatabaseChooser("biosphere")
d.write(biosphere)
d = DatabaseChooser("food")
d.write(copy.deepcopy(food_data))
ndb = d.rename("buildings")
ndb_data = ndb.load()
self.assertEqual(ndb.name, "buildings")
self.assertEqual(d.name, "buildings")
self.assertEqual(len(ndb_data), len(food_data))
for key in ndb_data:
self.assertEqual(key[0], "buildings")
for exc in ndb_data[key]['exchanges']:
self.assertTrue(exc['input'][0] in ('biosphere', 'buildings'))
def test_exchange_save(self):
database = DatabaseChooser("testy")
data = {
("testy", "A"): {},
("testy", "C"): {'type': 'biosphere'},
("testy", "B"): {'exchanges': [
{'input': ("testy", "A"),
'amount': 1,
'type': 'technosphere'},
{'input': ("testy", "B"),
'amount': 1,
'type': 'production'},
{'input': ("testy", "C"),
'amount': 1,
'type': 'biosphere'},
]},
}
then = datetime.datetime.now().isoformat()
database.write(data)
act = database.get("B")
exc = [x for x in act.production()][0]
exc['amount'] = 2
exc.save()
self.assertTrue(databases[database.name].get("dirty"))
self.assertTrue(database.metadata.get("dirty"))
self.assertTrue(database.metadata['modified'] > then)
exc = [x for x in act.production()][0]
self.assertEqual(exc['amount'], 2)
def test_dirty_activities(self):
database = DatabaseChooser("testy")
data = {
("testy", "A"): {},
("testy", "C"): {'type': 'biosphere'},
("testy", "B"): {'exchanges': [
{'input': ("testy", "A"),
'amount': 1,
'type': 'technosphere'},
{'input': ("testy", "B"),
'amount': 1,
'type': 'production'},
{'input': ("testy", "C"),
'amount': 1,
'type': 'biosphere'},
]},
}
database.write(data)
act = database.get("B")
exc = [x for x in act.production()][0]
exc['amount'] = 2
exc.save()
self.assertTrue(databases['testy']['dirty'])
lca = act.lca()
self.assertFalse(databases['testy'].get('dirty'))
self.assertEqual(
lca.supply_array[lca.activity_dict[("testy", "A")]],
0.5
)
def test_process_unknown_object_singlefile(self):
database = DatabaseChooser("testy", backend="singlefile")
database.register()
data = {
("testy", "A"): {},
("testy", "B"): {'exchanges': [
{'input': ("testy", "A"),
'amount': 1,
'type': 'technosphere'},
{'input': ("testy", "C"),
'amount': 1,
'type': 'technosphere'},
]},
}
with self.assertRaises(UnknownObject):
database.write(data)
def test_process_invalid_exchange_value(self):
database = DatabaseChooser("testy")
database.register()
data = {
("testy", "A"): {},
("testy", "B"): {'exchanges': [
{'input': ("testy", "A"),
'amount': np.nan,
'type': 'technosphere'},
{'input': ("testy", "C"),
'amount': 1,
'type': 'technosphere'},
]},
}
with self.assertRaises(ValueError):
database.write(data)
def test_untyped_exchange_error(self):
database = DatabaseChooser("testy")
database.register()
database_data = {
("testy", "A"): {'exchanges': [
{'amount': 1, 'input': ('testy', 'A')}
]},
}
with self.assertRaises(UntypedExchange):
database.write(database_data, process=False)
def test_no_input_raises_invalid_exchange(self):
database = DatabaseChooser("testy")
database.register()
database_data = {
("testy", "A"): {'exchanges': [
{'amount': 1}
]},
}
with self.assertRaises(InvalidExchange):
database.write(database_data, process=False)
def test_no_amount_raises_invalid_exchange(self):
database = DatabaseChooser("testy")
database.register()
database_data = {
("testy", "A"): {'exchanges': [
{'input': ('testy', 'A'), 'type': 'technosphere'}
]},
}
with self.assertRaises(InvalidExchange):
database.write(database_data, process=False)
def test_zero_amount_is_valid_exchange(self):
database = DatabaseChooser("testy")
database.register()
database_data = {
("testy", "A"): {'exchanges': [
{'input': ('testy', 'A'), 'type': 'technosphere', 'amount': 0.}
]},
}
database.write(database_data, process=False)
def test_process_geomapping_array(self):
database = DatabaseChooser("a database")
database.register()
database.write({})
fp = os.path.join(
projects.dir,
"processed",
database.filename + ".geomapping.npy"
)
array = np.load(fp)
fieldnames = {'activity', 'geo', 'row', 'col'}
self.assertFalse(fieldnames.difference(set(array.dtype.names)))
def test_process_checks_process_type(self):
database = DatabaseChooser("a database")
database.register()
database.write({
("a database", "foo"): {
'exchanges': [],
'type': 'process'
},
("a database", "bar"): {
'type': 'definitely not a process'
},
}, process=True)
# This shouldn't raise an error
self.assertEqual(database.process(), None)
def test_geomapping_array_includes_only_processes(self):
database = DatabaseChooser("a database")
database.register()
database.write({
("a database", "foo"): {
'exchanges': [],
'type': 'process',
'location': 'bar'
},
("a database", "baz"): {
'exchanges': [],
'type': 'emission'
},
})
fp = os.path.join(
projects.dir,
"processed",
database.filename + ".geomapping.npy"
)
array = np.load(fp)
self.assertEqual(array.shape, (1,))
self.assertEqual(array[0]['geo'], geomapping['bar'])
def test_processed_array(self):
database = DatabaseChooser("a database")
database.register()
database.write({("a database", '2'): {
'type': 'process',
'exchanges': [{
'input': ("a database", '2'),
'amount': 42,
'uncertainty type': 7,
'type': 'production'
}]
}})
fp = os.path.join(
projects.dir,
"processed",
database.filename + ".npy"
)
array = np.load(fp)
fieldnames = {'input', 'output', 'row', 'col', 'type'}
self.assertFalse(fieldnames.difference(set(array.dtype.names)))
self.assertEqual(array.shape, (1,))
self.assertEqual(array[0]['uncertainty_type'], 7)
self.assertEqual(array[0]['amount'], 42)
def test_loc_value_if_no_uncertainty(self):
database = DatabaseChooser("a database")
database.register()
database.write({("a database", '2'): {
'type': 'process',
'exchanges': [{
'input': ("a database", '2'),
'amount': 42.,
'type': 'technosphere'
}]
}})
fp = os.path.join(
projects.dir,
"processed",
database.filename + ".npy"
)
array = np.load(fp)
self.assertEqual(array.shape, (2,))
self.assertEqual(array['loc'][1], 42.)
self.assertEqual(array['loc'][0], 1.)
def test_base_class(self):
database = DatabaseChooser("a database")
self.assertEqual(database._metadata, databases)
self.assertEqual(
[x[0] for x in database.dtype_fields],
[numpy_string(x) for x in ('input', 'output', 'row', 'col', 'type')]
)
def test_find_dependents(self):
database = DatabaseChooser("a database")
database.register()
database.write({
("a database", "foo"): {
'exchanges': [
{
'input': ("foo", "bar"),
'type': 'technosphere',
'amount': 0,
},
{
'input': ("biosphere", "bar"),
'type': 'technosphere',
'amount': 0,
},
# Ignore becuase of 'ignore'
{
'input': ("awkward", "silence"),
'type': 'technosphere',
'amount': 0,
},
# Ignored because of 'unknown' type
{
'input': ("who", "am I?"),
"type": "unknown",
'amount': 0,
},
{
'input': ("biosphere", "bar"),
'type': 'technosphere',
'amount': 0,
},
],
'location': 'bar'
},
("a database", "baz"): {
'exchanges': [{
'input': ("baz", "w00t"),
'type': 'technosphere',
'amount': 0,
}],
'type': 'emission' # Ignored because of type
},
("a database", "nonce"): {}, # OK not to have 'exchanges'
}, process=False)
self.assertEqual(
database.find_dependents(ignore={"awkward"}),
["biosphere", "foo"]
)
def test_set_dependents(self):
database = DatabaseChooser("a database")
database.register()
self.assertEqual(databases['a database']['depends'], [])
keys = [("biosphere", "bar"), ("<KEY>"), ("foo", "bar")]
mapping.add(keys)
database.write({
("a database", "foo"): {
'exchanges': [
{'input': ("foo", "bar"), 'type': 'technosphere', 'amount': 1},
{'input': ("biosphere", "bar"), 'type': 'biosphere', 'amount': 1}
],
'type': 'process',
'location': 'bar'
},
("a database", "baz"): {
'exchanges': [{'input': ("baz", "w00t"), 'type': 'technosphere', 'amount': 1}],
'type': 'emission'
},
})
self.assertEqual(
databases['a database']['depends'],
["baz", "biosphere", "foo"]
)
def test_process_without_exchanges_still_in_processed_array(self):
database = DatabaseChooser("a database")
database.write({("a database", "foo"): {}})
fp = os.path.join(
projects.dir,
"processed",
database.filename + ".npy"
)
array = np.load(fp)
self.assertEqual(array['amount'][0], 1)
self.assertEqual(array.shape, (1,))
def test_random_empty(self):
database = DatabaseChooser("a database")
database.write({})
with warnings.catch_warnings() as w:
warnings.simplefilter("ignore")
self.assertEqual(database.random(), None)
def test_new_activity(self):
database = DatabaseChooser("a database")
database.register()
act = database.new_activity('foo', this="that", name='something')
act.save()
act = database.get('foo')
self.assertEqual(act['database'], 'a database')
self.assertEqual(act['code'], 'foo')
self.assertEqual(act['location'], 'GLO')
self.assertEqual(act['this'], 'that')
def test_can_split_processes_products(self):
database = DatabaseChooser("a database")
database.write({
("a database", "product"): {'type': 'product'},
("a database", "foo"): {
'exchanges': [{
'input': ("a database", "product"),
'output': ("a database", "product"),
'type': 'production',
'amount': 1
}],
'type': 'process',
},
})
self.assertTrue(("a database", "product") in mapping)
fp = os.path.join(
projects.dir,
"processed",
database.filename + ".npy"
)
array = np.load(fp)
self.assertEqual(array.shape, (1,))
self.assertEqual(array['output'][0], mapping[("a database", "foo")])
self.assertEqual(array['input'][0], mapping[("a database", "product")])
@bw2test
def test_database_delete_parameters():
db = DatabaseChooser("example")
db.register()
a = db.new_activity(code="A", name="An activity")
a.save()
b = db.new_activity(code="B", name="Another activity")
b.save()
a.new_exchange(amount=0, input=b, type="technosphere", formula="foo * bar + 4").save()
database_data = [{
'name': 'red',
'formula': '(blue ** 2) / 5',
}, {
'name': 'blue',
'amount': 12
}]
parameters.new_database_parameters(database_data, "example")
activity_data = [{
'name': 'reference_me',
'formula': 'sqrt(red - 20)',
'database': 'example',
'code': "B",
}, {
'name': 'bar',
'formula': 'reference_me + 2',
'database': 'example',
'code': "A",
}]
parameters.new_activity_parameters(activity_data, "my group")
parameters.add_exchanges_to_group("my group", a)
assert ActivityParameter.select().count() == 2
assert ParameterizedExchange.select().count() == 1
assert DatabaseParameter.select().count() == 2
assert len(parameters) == 4
del databases['example']
assert not len(parameters)
assert not ParameterizedExchange.select().count()
| [
"bw2data.parameters.DatabaseParameter.select",
"bw2data.utils.numpy_string",
"bw2data.backends.peewee.sqlite3_lci_db.execute_sql",
"bw2data.parameters.parameters.new_activity_parameters",
"bw2data.meta.mapping.add",
"os.path.join",
"warnings.catch_warnings",
"bw2data.parameters.parameters.add_exchange... | [((1379, 1407), 'bw2data.database.DatabaseChooser', 'DatabaseChooser', (['"""biosphere"""'], {}), "('biosphere')\n", (1394, 1407), False, 'from bw2data.database import DatabaseChooser\n'), ((1439, 1462), 'bw2data.database.DatabaseChooser', 'DatabaseChooser', (['"""food"""'], {}), "('food')\n", (1454, 1462), False, 'from bw2data.database import DatabaseChooser\n'), ((1659, 1687), 'bw2data.database.DatabaseChooser', 'DatabaseChooser', (['"""biosphere"""'], {}), "('biosphere')\n", (1674, 1687), False, 'from bw2data.database import DatabaseChooser\n'), ((1861, 1889), 'bw2data.database.DatabaseChooser', 'DatabaseChooser', (['"""biosphere"""'], {}), "('biosphere')\n", (1876, 1889), False, 'from bw2data.database import DatabaseChooser\n'), ((2094, 2122), 'bw2data.database.DatabaseChooser', 'DatabaseChooser', (['"""biosphere"""'], {}), "('biosphere')\n", (2109, 2122), False, 'from bw2data.database import DatabaseChooser\n'), ((2313, 2336), 'bw2data.database.DatabaseChooser', 'DatabaseChooser', (['"""food"""'], {}), "('food')\n", (2328, 2336), False, 'from bw2data.database import DatabaseChooser\n'), ((2716, 2743), 'bw2data.database.DatabaseChooser', 'DatabaseChooser', (['"""old name"""'], {}), "('old name')\n", (2731, 2743), False, 'from bw2data.database import DatabaseChooser\n'), ((3166, 3188), 'bw2data.database.DatabaseChooser', 'DatabaseChooser', (['"""bar"""'], {}), "('bar')\n", (3181, 3188), False, 'from bw2data.database import DatabaseChooser\n'), ((3302, 3330), 'bw2data.database.DatabaseChooser', 'DatabaseChooser', (['"""biosphere"""'], {}), "('biosphere')\n", (3317, 3330), False, 'from bw2data.database import DatabaseChooser\n'), ((3743, 3771), 'bw2data.database.DatabaseChooser', 'DatabaseChooser', (['"""biosphere"""'], {}), "('biosphere')\n", (3758, 3771), False, 'from bw2data.database import DatabaseChooser\n'), ((4418, 4440), 'bw2data.database.DatabaseChooser', 'DatabaseChooser', (['"""foo"""'], {}), "('foo')\n", (4433, 4440), False, 'from bw2data.database import DatabaseChooser\n'), ((5022, 5046), 'bw2data.database.DatabaseChooser', 'DatabaseChooser', (['"""testy"""'], {}), "('testy')\n", (5037, 5046), False, 'from bw2data.database import DatabaseChooser\n'), ((5187, 5210), 'bw2data.database.DatabaseChooser', 'DatabaseChooser', (['"""food"""'], {}), "('food')\n", (5202, 5210), False, 'from bw2data.database import DatabaseChooser\n'), ((5381, 5409), 'bw2data.database.DatabaseChooser', 'DatabaseChooser', (['"""biosphere"""'], {}), "('biosphere')\n", (5396, 5409), False, 'from bw2data.database import DatabaseChooser\n'), ((5583, 5607), 'bw2data.database.DatabaseChooser', 'DatabaseChooser', (['"""testy"""'], {}), "('testy')\n", (5598, 5607), False, 'from bw2data.database import DatabaseChooser\n'), ((7026, 7064), 'bw2data.database.DatabaseChooser', 'DatabaseChooser', (['"""testy"""', '"""singlefile"""'], {}), "('testy', 'singlefile')\n", (7041, 7064), False, 'from bw2data.database import DatabaseChooser\n'), ((8475, 8499), 'bw2data.database.DatabaseChooser', 'DatabaseChooser', (['"""testy"""'], {}), "('testy')\n", (8490, 8499), False, 'from bw2data.database import DatabaseChooser\n'), ((8830, 8854), 'bw2data.database.DatabaseChooser', 'DatabaseChooser', (['"""testy"""'], {}), "('testy')\n", (8845, 8854), False, 'from bw2data.database import DatabaseChooser\n'), ((9301, 9323), 'bw2data.database.DatabaseChooser', 'DatabaseChooser', (['"""foo"""'], {}), "('foo')\n", (9316, 9323), False, 'from bw2data.database import DatabaseChooser\n'), ((23281, 23307), 'bw2data.database.DatabaseChooser', 'DatabaseChooser', (['"""example"""'], {}), "('example')\n", (23296, 23307), False, 'from bw2data.database import DatabaseChooser\n'), ((23707, 23767), 'bw2data.parameters.parameters.new_database_parameters', 'parameters.new_database_parameters', (['database_data', '"""example"""'], {}), "(database_data, 'example')\n", (23741, 23767), False, 'from bw2data.parameters import ActivityParameter, DatabaseParameter, ParameterizedExchange, parameters\n'), ((24047, 24108), 'bw2data.parameters.parameters.new_activity_parameters', 'parameters.new_activity_parameters', (['activity_data', '"""my group"""'], {}), "(activity_data, 'my group')\n", (24081, 24108), False, 'from bw2data.parameters import ActivityParameter, DatabaseParameter, ParameterizedExchange, parameters\n'), ((24113, 24161), 'bw2data.parameters.parameters.add_exchanges_to_group', 'parameters.add_exchanges_to_group', (['"""my group"""', 'a'], {}), "('my group', a)\n", (24146, 24161), False, 'from bw2data.parameters import ActivityParameter, DatabaseParameter, ParameterizedExchange, parameters\n'), ((2346, 2375), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (2359, 2375), False, 'import pytest\n'), ((3198, 3226), 'pytest.raises', 'pytest.raises', (['WrongDatabase'], {}), '(WrongDatabase)\n', (3211, 3226), False, 'import pytest\n'), ((3804, 3829), 'pytest.warns', 'pytest.warns', (['UserWarning'], {}), '(UserWarning)\n', (3816, 3829), False, 'import pytest\n'), ((9167, 9195), 'pytest.raises', 'pytest.raises', (['UnknownObject'], {}), '(UnknownObject)\n', (9180, 9195), False, 'import pytest\n'), ((9557, 9585), 'bw2data.database.DatabaseChooser', 'DatabaseChooser', (['"""biosphere"""'], {}), "('biosphere')\n", (9572, 9585), False, 'from bw2data.database import DatabaseChooser\n'), ((9625, 9648), 'bw2data.database.DatabaseChooser', 'DatabaseChooser', (['"""food"""'], {}), "('food')\n", (9640, 9648), False, 'from bw2data.database import DatabaseChooser\n'), ((9716, 9744), 'bw2data.database.DatabaseChooser', 'DatabaseChooser', (['"""biosphere"""'], {}), "('biosphere')\n", (9731, 9744), False, 'from bw2data.database import DatabaseChooser\n'), ((9784, 9807), 'bw2data.database.DatabaseChooser', 'DatabaseChooser', (['"""food"""'], {}), "('food')\n", (9799, 9807), False, 'from bw2data.database import DatabaseChooser\n'), ((10329, 10353), 'bw2data.database.DatabaseChooser', 'DatabaseChooser', (['"""testy"""'], {}), "('testy')\n", (10344, 10353), False, 'from bw2data.database import DatabaseChooser\n'), ((11394, 11418), 'bw2data.database.DatabaseChooser', 'DatabaseChooser', (['"""testy"""'], {}), "('testy')\n", (11409, 11418), False, 'from bw2data.database import DatabaseChooser\n'), ((12405, 12451), 'bw2data.database.DatabaseChooser', 'DatabaseChooser', (['"""testy"""'], {'backend': '"""singlefile"""'}), "('testy', backend='singlefile')\n", (12420, 12451), False, 'from bw2data.database import DatabaseChooser\n'), ((12978, 13002), 'bw2data.database.DatabaseChooser', 'DatabaseChooser', (['"""testy"""'], {}), "('testy')\n", (12993, 13002), False, 'from bw2data.database import DatabaseChooser\n'), ((13523, 13547), 'bw2data.database.DatabaseChooser', 'DatabaseChooser', (['"""testy"""'], {}), "('testy')\n", (13538, 13547), False, 'from bw2data.database import DatabaseChooser\n'), ((13906, 13930), 'bw2data.database.DatabaseChooser', 'DatabaseChooser', (['"""testy"""'], {}), "('testy')\n", (13921, 13930), False, 'from bw2data.database import DatabaseChooser\n'), ((14265, 14289), 'bw2data.database.DatabaseChooser', 'DatabaseChooser', (['"""testy"""'], {}), "('testy')\n", (14280, 14289), False, 'from bw2data.database import DatabaseChooser\n'), ((14656, 14680), 'bw2data.database.DatabaseChooser', 'DatabaseChooser', (['"""testy"""'], {}), "('testy')\n", (14671, 14680), False, 'from bw2data.database import DatabaseChooser\n'), ((15003, 15032), 'bw2data.database.DatabaseChooser', 'DatabaseChooser', (['"""a database"""'], {}), "('a database')\n", (15018, 15032), False, 'from bw2data.database import DatabaseChooser\n'), ((15101, 15179), 'os.path.join', 'os.path.join', (['projects.dir', '"""processed"""', "(database.filename + '.geomapping.npy')"], {}), "(projects.dir, 'processed', database.filename + '.geomapping.npy')\n", (15113, 15179), False, 'import os\n'), ((15242, 15253), 'numpy.load', 'np.load', (['fp'], {}), '(fp)\n', (15249, 15253), True, 'import numpy as np\n'), ((15449, 15478), 'bw2data.database.DatabaseChooser', 'DatabaseChooser', (['"""a database"""'], {}), "('a database')\n", (15464, 15478), False, 'from bw2data.database import DatabaseChooser\n'), ((15951, 15980), 'bw2data.database.DatabaseChooser', 'DatabaseChooser', (['"""a database"""'], {}), "('a database')\n", (15966, 15980), False, 'from bw2data.database import DatabaseChooser\n'), ((16332, 16410), 'os.path.join', 'os.path.join', (['projects.dir', '"""processed"""', "(database.filename + '.geomapping.npy')"], {}), "(projects.dir, 'processed', database.filename + '.geomapping.npy')\n", (16344, 16410), False, 'import os\n'), ((16473, 16484), 'numpy.load', 'np.load', (['fp'], {}), '(fp)\n', (16480, 16484), True, 'import numpy as np\n'), ((16646, 16675), 'bw2data.database.DatabaseChooser', 'DatabaseChooser', (['"""a database"""'], {}), "('a database')\n", (16661, 16675), False, 'from bw2data.database import DatabaseChooser\n'), ((17002, 17069), 'os.path.join', 'os.path.join', (['projects.dir', '"""processed"""', "(database.filename + '.npy')"], {}), "(projects.dir, 'processed', database.filename + '.npy')\n", (17014, 17069), False, 'import os\n'), ((17132, 17143), 'numpy.load', 'np.load', (['fp'], {}), '(fp)\n', (17139, 17143), True, 'import numpy as np\n'), ((17498, 17527), 'bw2data.database.DatabaseChooser', 'DatabaseChooser', (['"""a database"""'], {}), "('a database')\n", (17513, 17527), False, 'from bw2data.database import DatabaseChooser\n'), ((17818, 17885), 'os.path.join', 'os.path.join', (['projects.dir', '"""processed"""', "(database.filename + '.npy')"], {}), "(projects.dir, 'processed', database.filename + '.npy')\n", (17830, 17885), False, 'import os\n'), ((17948, 17959), 'numpy.load', 'np.load', (['fp'], {}), '(fp)\n', (17955, 17959), True, 'import numpy as np\n'), ((18148, 18177), 'bw2data.database.DatabaseChooser', 'DatabaseChooser', (['"""a database"""'], {}), "('a database')\n", (18163, 18177), False, 'from bw2data.database import DatabaseChooser\n'), ((18458, 18487), 'bw2data.database.DatabaseChooser', 'DatabaseChooser', (['"""a database"""'], {}), "('a database')\n", (18473, 18487), False, 'from bw2data.database import DatabaseChooser\n'), ((20261, 20290), 'bw2data.database.DatabaseChooser', 'DatabaseChooser', (['"""a database"""'], {}), "('a database')\n", (20276, 20290), False, 'from bw2data.database import DatabaseChooser\n'), ((20457, 20474), 'bw2data.meta.mapping.add', 'mapping.add', (['keys'], {}), '(keys)\n', (20468, 20474), False, 'from bw2data.meta import mapping, geomapping, databases, methods\n'), ((21250, 21279), 'bw2data.database.DatabaseChooser', 'DatabaseChooser', (['"""a database"""'], {}), "('a database')\n", (21265, 21279), False, 'from bw2data.database import DatabaseChooser\n'), ((21345, 21412), 'os.path.join', 'os.path.join', (['projects.dir', '"""processed"""', "(database.filename + '.npy')"], {}), "(projects.dir, 'processed', database.filename + '.npy')\n", (21357, 21412), False, 'import os\n'), ((21475, 21486), 'numpy.load', 'np.load', (['fp'], {}), '(fp)\n', (21482, 21486), True, 'import numpy as np\n'), ((21632, 21661), 'bw2data.database.DatabaseChooser', 'DatabaseChooser', (['"""a database"""'], {}), "('a database')\n", (21647, 21661), False, 'from bw2data.database import DatabaseChooser\n'), ((21885, 21914), 'bw2data.database.DatabaseChooser', 'DatabaseChooser', (['"""a database"""'], {}), "('a database')\n", (21900, 21914), False, 'from bw2data.database import DatabaseChooser\n'), ((22336, 22365), 'bw2data.database.DatabaseChooser', 'DatabaseChooser', (['"""a database"""'], {}), "('a database')\n", (22351, 22365), False, 'from bw2data.database import DatabaseChooser\n'), ((22879, 22946), 'os.path.join', 'os.path.join', (['projects.dir', '"""processed"""', "(database.filename + '.npy')"], {}), "(projects.dir, 'processed', database.filename + '.npy')\n", (22891, 22946), False, 'import os\n'), ((23009, 23020), 'numpy.load', 'np.load', (['fp'], {}), '(fp)\n', (23016, 23020), True, 'import numpy as np\n'), ((3437, 3537), 'bw2data.backends.peewee.sqlite3_lci_db.execute_sql', 'sqlite3_lci_db.execute_sql', (['"""select count(*) from activitydataset where database = \'biosphere\'"""'], {}), '(\n "select count(*) from activitydataset where database = \'biosphere\'")\n', (3463, 3537), False, 'from bw2data.backends.peewee import Activity as PWActivity, ActivityDataset, Exchange as PWExchange, ExchangeDataset, sqlite3_lci_db\n'), ((3572, 3679), 'bw2data.backends.peewee.sqlite3_lci_db.execute_sql', 'sqlite3_lci_db.execute_sql', (['"""select count(*) from exchangedataset where output_database = \'biosphere\'"""'], {}), '(\n "select count(*) from exchangedataset where output_database = \'biosphere\'")\n', (3598, 3679), False, 'from bw2data.backends.peewee import Activity as PWActivity, ActivityDataset, Exchange as PWExchange, ExchangeDataset, sqlite3_lci_db\n'), ((9824, 9848), 'copy.deepcopy', 'copy.deepcopy', (['food_data'], {}), '(food_data)\n', (9837, 9848), False, 'import copy\n'), ((21702, 21727), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (21725, 21727), False, 'import warnings\n'), ((21746, 21777), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (21767, 21777), False, 'import warnings\n'), ((4865, 4887), 'bw2data.database.DatabaseChooser', 'DatabaseChooser', (['"""one"""'], {}), "('one')\n", (4880, 4887), False, 'from bw2data.database import DatabaseChooser\n'), ((10876, 10899), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (10897, 10899), False, 'import datetime\n'), ((18324, 18339), 'bw2data.utils.numpy_string', 'numpy_string', (['x'], {}), '(x)\n', (18336, 18339), False, 'from bw2data.utils import numpy_string, get_activity\n'), ((24174, 24200), 'bw2data.parameters.ActivityParameter.select', 'ActivityParameter.select', ([], {}), '()\n', (24198, 24200), False, 'from bw2data.parameters import ActivityParameter, DatabaseParameter, ParameterizedExchange, parameters\n'), ((24225, 24255), 'bw2data.parameters.ParameterizedExchange.select', 'ParameterizedExchange.select', ([], {}), '()\n', (24253, 24255), False, 'from bw2data.parameters import ActivityParameter, DatabaseParameter, ParameterizedExchange, parameters\n'), ((24280, 24306), 'bw2data.parameters.DatabaseParameter.select', 'DatabaseParameter.select', ([], {}), '()\n', (24304, 24306), False, 'from bw2data.parameters import ActivityParameter, DatabaseParameter, ParameterizedExchange, parameters\n'), ((24428, 24458), 'bw2data.parameters.ParameterizedExchange.select', 'ParameterizedExchange.select', ([], {}), '()\n', (24456, 24458), False, 'from bw2data.parameters import ActivityParameter, DatabaseParameter, ParameterizedExchange, parameters\n')] |
#!/usr/bin/env python3
"""Demo script for the Solo8 robot
Moves the Solo8 robot with a hard-coded choreography for show-casing and
testing.
"""
import argparse
import numpy as np
import robot_interfaces
import robot_fingers
def run_choreography(frontend):
"""Move the legs in some hard-coded choreography."""
def perform_step(position):
# one step should take 1 second, so repeat action 1000 times
for i in range(1):
t = frontend.append_desired_action(
robot_interfaces.solo_eight.Action(position=position)
)
frontend.wait_until_timeindex(t)
time_printer = robot_fingers.utils.TimePrinter()
t = 0
offset = np.random.uniform(-0.1, 0.1, size=8)
while True:
action = np.sin(t * 0.01 + offset) * 0.5
perform_step(action)
time_printer.update()
t += 1
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--multi-process",
action="store_true",
help="""If set run only frontend with multi-process
robot data. Otherwise run everything within a single
process.""",
)
args = parser.parse_args()
if args.multi_process:
# In multi-process case assume that the backend is running in a
# separate process and only set up the frontend here.
robot_data = robot_interfaces.solo_eight.MultiProcessData(
"solo8", False
)
frontend = robot_interfaces.solo_eight.Frontend(robot_data)
else:
# In single-process case run both frontend and backend in this process
# (using the `Robot` helper class).
robot = robot_fingers.Robot(
robot_interfaces.solo_eight,
robot_fingers.create_solo_eight_backend,
"soloeight.yml",
)
robot.initialize()
frontend = robot.frontend
# move around
print("Running")
run_choreography(frontend)
if __name__ == "__main__":
main()
| [
"robot_interfaces.solo_eight.Frontend",
"argparse.ArgumentParser",
"robot_interfaces.solo_eight.Action",
"robot_fingers.utils.TimePrinter",
"robot_fingers.Robot",
"robot_interfaces.solo_eight.MultiProcessData",
"numpy.random.uniform",
"numpy.sin"
] | [((643, 676), 'robot_fingers.utils.TimePrinter', 'robot_fingers.utils.TimePrinter', ([], {}), '()\n', (674, 676), False, 'import robot_fingers\n'), ((701, 737), 'numpy.random.uniform', 'np.random.uniform', (['(-0.1)', '(0.1)'], {'size': '(8)'}), '(-0.1, 0.1, size=8)\n', (718, 737), True, 'import numpy as np\n'), ((906, 931), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (929, 931), False, 'import argparse\n'), ((1408, 1468), 'robot_interfaces.solo_eight.MultiProcessData', 'robot_interfaces.solo_eight.MultiProcessData', (['"""solo8"""', '(False)'], {}), "('solo8', False)\n", (1452, 1468), False, 'import robot_interfaces\n'), ((1510, 1558), 'robot_interfaces.solo_eight.Frontend', 'robot_interfaces.solo_eight.Frontend', (['robot_data'], {}), '(robot_data)\n', (1546, 1558), False, 'import robot_interfaces\n'), ((1708, 1819), 'robot_fingers.Robot', 'robot_fingers.Robot', (['robot_interfaces.solo_eight', 'robot_fingers.create_solo_eight_backend', '"""soloeight.yml"""'], {}), "(robot_interfaces.solo_eight, robot_fingers.\n create_solo_eight_backend, 'soloeight.yml')\n", (1727, 1819), False, 'import robot_fingers\n'), ((771, 796), 'numpy.sin', 'np.sin', (['(t * 0.01 + offset)'], {}), '(t * 0.01 + offset)\n', (777, 796), True, 'import numpy as np\n'), ((510, 563), 'robot_interfaces.solo_eight.Action', 'robot_interfaces.solo_eight.Action', ([], {'position': 'position'}), '(position=position)\n', (544, 563), False, 'import robot_interfaces\n')] |
import numpy as np
from collections import namedtuple
from pomegranate import GeneralMixtureModel,NormalDistribution
import pandas as pd
def smooth(ser, sc):
return np.array(pd.Series(ser).rolling(sc, min_periods=1, center=True).mean())
origin = namedtuple("origin",["pos","firing_time","L_fork_speed","R_fork_speed"])
Pause = namedtuple("pause",["pos","duration"])
def track(time,start_time=2,end_time=15,maxv=0.8,minv=0.1,inct=1,pulselen=4,dect=5):
"""
Given a 1D array of time , generate a single fork
following an exponential increasing law between start_time and start_time + pulselen
followed by a decreasing exponential law.
The ascending part is governed by maxv and inct (the characteristic time of the exponential)
The descending part by minv and dect (the characteristic time of the exponential)
It return a 1d array with the results, as well a the actual length of the ascending
part (that can be truncated) and the length of the background part before the ascending
exponential
"""
before = time[time<=start_time]
initial = time[ (time > start_time) & (time < start_time + pulselen)]
if len(initial) != 0:
initial = maxv*(1-np.exp(-(initial-start_time)/inct))
final = time[(time >= start_time + pulselen) & (time < end_time)]
if len(initial) != 0:
startv = initial[-1]
else:
startv = maxv*(1-np.exp(-(pulselen)/inct))
if len(final) != 0:
final = startv + (minv -startv)*(1-np.exp(-(final-start_time-pulselen)/dect))
end = time[time >= end_time]
result = np.concatenate([np.zeros_like(before),initial,final,np.zeros_like(end)])
#print(maxv,np.max(result))
return result,len(initial),len(before)
def intersection(p1,p2,pause=[0,0]):
"""
Given two converging forks and their firing time and speeds,
compute the position of the intersection
as well as the position of the time of intersection.
If the intersection is outside [x1,x2], the initial position of the forks,
then return False
"""
x1,t1,R_fork_speed=p1.pos,p1.firing_time,p1.R_fork_speed
x2,t2,L_fork_speed=p2.pos,p2.firing_time,p2.L_fork_speed
t1 += pause[0]
t2 += pause[1]
assert(x2>x1)
#x = (x1+x2)/2 + (t2-t1)*v/2
x = 1/(1/L_fork_speed+1/R_fork_speed)*(t2-t1 + x1/L_fork_speed+x2/R_fork_speed)
if not( x1<x<x2):
return False,[None,None]
t = (x2-x1)/(R_fork_speed+L_fork_speed) + (t1 * R_fork_speed + t2 * L_fork_speed)/(R_fork_speed+L_fork_speed)
return True,[x,t]
def generate_mrt(pos_time,end=1000,start_at_zero=True):
"""
Given a list of origin and firing times and fork speed
return a 1d arry with the times at which the replication occurs
By default the lowest time is zero.
To do so it build a list with position and time of initiation and termination
and then use numpy linera interpolation function
"""
#print(pos_time)
x1,t1,L_fork_speed = pos_time[0].pos,pos_time[0].firing_time,pos_time[0].L_fork_speed
first = [0,t1+x1/L_fork_speed]
pos_with_terms = [first]
for p1,p2 in zip(pos_time[:-1],pos_time[1:]):
possible,inte = intersection(p1,p2)
pos_with_terms.extend([[p1.pos,p1.firing_time],inte+[]])
if not possible:
return False
if len(pos_time) == 1:
p2 = pos_time[0]
pos_with_terms.append([p2.pos,p2.firing_time])
x2,t2,R_fork_speed=p2.pos,p2.firing_time,p2.R_fork_speed
pos_with_terms.append([end,t2+(end-x2)/R_fork_speed])
p = np.array(pos_with_terms)
#print(p)
mrt = np.interp(np.arange(end),p[:,0],p[:,1])
if start_at_zero:
return mrt-np.min(mrt)
else:
return mrt
def generate_rfd(pos_time,end=1000):
"""
Given a list of origin and firing times and fork speed
return the direction of replication
"""
#print(pos_time)
rfd = np.zeros(end)
x1,t1,L_fork_speed = pos_time[0].pos,pos_time[0].firing_time,pos_time[0].L_fork_speed
rfd[:x1] = -1
for p1,p2 in zip(pos_time[:-1],pos_time[1:]):
possible,inte = intersection(p1,p2)
middle = int(round(inte[0],0))
rfd[p1.pos:middle]=1
rfd[middle:p2.pos]=-1
if len(pos_time) == 1:
x2,t2=x1,t1
else:
x2,t2=p2.pos,p2.firing_time
rfd[x2:]=1
return rfd
def generate_track(pos_time,start_time=10,end=1000,params={},same_parameters=True,pauses=[]):
"""
Given a list of origin and firing times and fork speed
and a start_time for the injection of Brdu return the incorporation
of Brdu corresponding.
"""
param_k = ["maxv","minv","pulselen","inct","dect"]
list_param_generated=[]
def generate_params(param_k,already_done={}):
if already_done != {}:
return already_done
kw={}
for p in param_k:
if type(params[p]) == list:
kw[p] = params[p][0] + (params[p][1]-params[p][0])*np.random.rand()
else:
kw[p] = params[p]
list_param_generated.append(kw)
return kw
kw = {}
if same_parameters:
kw = generate_params(param_k)
if len(pauses) ==0:
pauses=[Pause(pos=None,duration=0)] * (len(pos_time)+1)
#CHeck that pauses are ordered
if len(pauses)>1:
for p1,p2 in zip(pauses[:-1],pauses[1:]):
if p1.pos != None and p2.pos != None:
assert(p1.pos<p2.pos)
#insert empty pauses and order pause. check only one pause between all ori
#print("Before",pauses)
#print(pauses)
if len(pauses) != (len(pos_time)+1):
f_pauses=[None]*(len(pos_time)+1)
p_ori_order=[ori.pos for ori in pos_time]
#print(p_ori_order)
startp=0
if pauses[0].pos<p_ori_order[0]:
f_pauses[0]=pauses[0]
startp=1
for pause in pauses[startp:]:
for ipos in range(len(p_ori_order)-1):
if p_ori_order[ipos+1]>pause.pos>=p_ori_order[ipos]:
if f_pauses[ipos+1] != None:
print("At least two pauses located between two origins")
print("Origins",pos_ori)
print("Pauses",pauses)
raise
else:
f_pauses[ipos+1]=pause
if pauses[-1].pos>p_ori_order[-1]:
f_pauses[-1]=pauses[-1]
for i in range(len(f_pauses)):
if f_pauses[i] == None:
f_pauses[i]=Pause(pos=None,duration=0)
#print("After",f_pauses)
pauses=f_pauses
else:
#Pauses must be located between origins
for pause,ori in zip(pauses,pos_time):
if pause.pos != None:
assert(pause.pos<=ori.pos)
for pause,ori in zip(pauses[1:],pos_time[:]):
if pause.pos != None:
assert(pause.pos>=ori.pos)
assert(len(pauses)==len(pos_time)+1)
#def generate_time(start_t,pos_end,speed,pause=0):
# return np.arange(start_t,start_t+pos_end/speed,1/speed)
trac = np.zeros(end)
x1,t1,L_fork_speed = pos_time[0].pos,pos_time[0].firing_time,pos_time[0].L_fork_speed
time= np.arange(t1,t1+x1/L_fork_speed,1/L_fork_speed)
if pauses[0].duration != 0:
#print(pauses)
time[x1-pauses[0].pos:]+=pauses[0].duration
t,len_init,before = track(time,start_time=start_time,end_time=t1+x1/L_fork_speed+pauses[0].duration,
**generate_params(param_k,kw))
trac[:x1] = t[:x1][::-1]
#print(len_init)
mrt = [time[:x1][::-1]]
#mrt[:x1] = time[:x1][::-1]
len_initial = [len_init + 0] #store the length of the increasing parts
pos_s = [[x1-len_init-before,x1-before]]
for interval,(p1,p2,pause) in enumerate(zip(pos_time[:-1],pos_time[1:],pauses[1:]),1):
if pause.duration !=0:
assert(p2.pos>pause.pos>p1.pos)
possible,inte = intersection(p1,p2)
middle = int(round(inte[0]))
first_encounter_pause=True
if pause.duration!=0:
if middle > pause.pos:
#First fork get their first
delta=middle-pause.pos
delta_t=delta/p2.L_fork_speed
if delta_t>pause.duration:
#Then fork1 finish its pause
#Equivalent to starting late of time pause
possible,inte = intersection(p1,p2,pause=[pause.duration,0])
middle = int(round(inte[0]))
else:
pauses[interval] = Pause(pos=pause.pos,duration=delta/p2.L_fork_speed)
pause=pauses[interval]
middle = pause.pos
else:
first_encounter_pause = False
delta=pause.pos-middle
delta_t=delta/p1.L_fork_speed
if delta_t >pause.duration:
#Then fork2 finish its pause
possible,inte = intersection(p1,p2,pause=[0,pause.duration])
middle = int(round(inte[0]))
else:
pauses[interval] = Pause(pos=pause.pos,duration=delta/p1.L_fork_speed)
pause=pauses[interval]
middle = pause.pos
size = len(trac[p1[0]:middle])
starto = p1.firing_time
R_fork_speed = p1.R_fork_speed
time= np.arange(starto,starto+size/R_fork_speed,1/R_fork_speed)
end_cover=0
if pause.duration != 0:
end_cover=pause.duration
if first_encounter_pause and pause.duration !=0:
time[pause.pos-p1.pos:] += pause.duration
mrt.append(time[:size])
#print(time)
#print(time,len(time))
#print(p1[0],p2[0],middle)
#print(track(time,start_time=start_time,end_time=starto+size/v)[:size])
#trac[p1.pos:middle]
t,len_init,before= track(time,start_time=start_time,end_time=starto+size/R_fork_speed+end_cover,
**generate_params(param_k,kw))
trac[p1.pos:middle] = t[:size]
len_initial.append(len_init + 0)
pos_s += [[p1.pos+before,p1.pos+len_init+before]]
size = len(trac[middle:p2.pos])
starto = p2.firing_time
L_fork_speed = p2.L_fork_speed
time= np.arange(starto,starto+size/L_fork_speed,1/L_fork_speed)
if not first_encounter_pause and pause.duration !=0:
time[p2.pos-pause.pos:] += pause.duration
mrt.append(time[:size][::-1])
#print(time,len(time))
trac[middle:p2.pos]
t,len_init,before = track(time,start_time=start_time,end_time=starto+size/L_fork_speed+end_cover,
**generate_params(param_k,kw))
trac[middle:p2.pos] = t[:size][::-1]
len_initial.append(len_init + 0)
pos_s += [[p2.pos-len_init-before,p2.pos-before]]
if len(pos_time) == 1:
x2,t2 = x1,t1
R_fork_speed = pos_time[0].R_fork_speed
else:
x2,t2=p2.pos,p2.firing_time
R_fork_speed = p2.R_fork_speed
size = len(trac[x2:])
time= np.arange(t2,t2+size/R_fork_speed,1/R_fork_speed)
if pauses[-1].duration != 0:
#print(pauses)
time[pauses[-1].pos-x2:]+=pauses[-1].duration
mrt.append(time[:size])
#mrt[x2:] = time[:size]
t,len_init,before = track(time,start_time=start_time,end_time=t2+size/R_fork_speed+pauses[-1].duration,
**generate_params(param_k,kw))
trac[x2:] = t[:size]
len_initial.append(len_init + 0)
pos_s += [[x2+before,x2+len_init+before]]
if not same_parameters:
kw = list_param_generated
#print(len(trac),len(np.concatenate(mrt)))
#print(pauses,R_fork_speed)
return trac,[len_initial,pos_s],kw,mrt
def create_possible_origins(n_ori,n_sim,average_fork_speed,chlen,scaling=15):
"""
generate a list of possible simulation given a number of origin an average_fork_speed
"""
sims =[]
while len(sims) != n_sim:
pos = np.random.randint(0,chlen,n_ori)
times = np.random.randint(0,chlen/n_ori/scaling,n_ori)
times -= min(times)
pos.sort()
if len(set(pos)) != len(pos):
continue
#print(pos)
sim = [origin(p,t,average_fork_speed,average_fork_speed) for p,t in zip(pos,times)]
if type(generate_mrt(sim)) != bool:
sims.append(sim)
return sims
if __name__ == "__main__":
import argparse
import uuid
import json
from scipy import stats
import pandas as pd
import pylab
import ast
np.random.seed(0)
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--prefix', type=str,default="mock")
parser.add_argument('--parameter_file', type=str,default='data/params.json')
parser.add_argument('--average_distance_between_ori', type=float, default=50000)
parser.add_argument('--multi',dest="one_fork", action="store_false")
parser.add_argument('--correct_for_height', action="store_true")
parser.add_argument('--ground_truth', action="store_true")
parser.add_argument('--fork_position', action="store_true",
help="record fork positions")
parser.add_argument('--resolution', type=int, default=100,
help="resolution in bp of the simulation")
parser.add_argument('--n_conf_ori', type=int, default=400,
help="Generate set of ori and firing times")
parser.add_argument('--time_per_mrt', type=int, default=400,
help="Generate time of starting pulse per configuration")
parser.add_argument('--read_per_time', type=int, default=1,
help="Correspond to truncated fiber when the option whole_length"
"is set to False")
parser.add_argument('--draw_sample', type=int,default=0)
parser.add_argument('--conf',type=str,default=None,help="configuration of origins to simulate from")
parser.add_argument('--test', action="store_true")
parser.add_argument('--whole_length', action="store_true")
parser.add_argument('--length', type=int,default=None)
args = parser.parse_args()
##############################################
# Generate track parameters
with open(args.parameter_file,"r") as f:
params = json.load(f)
# maxv: lowest highest value of the plateau when increasing
# minv:[0.12-0.05,0.15-0.05], #l owest highest value of the plateau when decreasing
# pulselen":[2,2], # smallest longest value of the pulse length in minute
# inct : [.25,1.25], # lowest highest value ofcharacteristic time of the increasing exponential
# dect : [2.,5]
#############################################
#Either create ori at specific position and firing time
average_fork_speed=15 # in 100 bp/min
Sim = [[origin(50,2,average_fork_speed,average_fork_speed),
origin(70,2,average_fork_speed,average_fork_speed)]]
##############################################
#Choose fiber size and distributions
resolution = args.resolution
if not args.one_fork:
chlen=300000 // resolution
whole_length=False
else:
chlen = 50000 // resolution
whole_length=True
if args.test:
chlen=50000 //resolution
whole_length=True
if args.whole_length:
whole_length=True
if args.length != None:
chlen=int(args.length/resolution)
possiblesize = np.arange(5000//resolution,chlen)
distribsize = stats.lognorm(0.5,scale=35000/resolution).pdf(possiblesize)
distribsize /= np.sum(distribsize)
nfork = {}
pos={}
fiber = {}
rfd = {}
mrts = {}
gt = {}
parameters = {}
all_speeds={}
positions = {}
def draw(law):
if law["type"] == "pomegranate":
return GeneralMixtureModel.from_json(law["params"]).sample(1)
if law["type"] == "choices":
return np.random.choices(law["params"])
if law["type"] == "uniform":
return law["params"][0] + (law["params"][1]-law["params"][0])*np.random.rand()
if law["type"] == "normal":
return np.random.normal(loc=law["params"][0] ,scale=law["params"][1])
if law["type"] == "exp":
if "data" not in law:
law["data"] = pd.read_csv(law["params"])["data"]
which = int(np.random.randint(len(law["data"])))
shift=0
if "shift" in law:
shift= law["shift"]
return law["data"][which]+shift
if args.conf != None:
Confs = []
Pauses = []
with open(args.conf,"r") as f:
for line in f.readlines():
new_conf = ast.literal_eval(line)
average_fork_speed = draw(params["speed"]) / resolution
ori_pos =[]
for ori in new_conf[0]:
if len(ori)==4:
ori[0] = int(ori[0]/resolution)
ori_pos.append(origin(*ori))
elif len(ori)==2:
ori[0] /=resolution
ori_pos.append(origin(int(ori[0]),ori[1],average_fork_speed,average_fork_speed))
else:
raise
Confs.append(ori_pos)
if len(new_conf)==2:
pt = []
for p in new_conf[1]:
p[0]/=resolution
pt.append(Pause(int(p[0]),p[1]))
Pauses.append(pt)
n_conf=len(Confs)
#print(Confs)
#print(Pauses)
else:
n_conf = args.n_conf_ori
for sim_number in range(n_conf): # [current]:
average_fork_speed = draw(params["speed"]) / resolution
if average_fork_speed<=0:
continue
pauses=[]
if not args.one_fork:
if args.test:
sim=[origin(100,0,average_fork_speed,average_fork_speed)]
#pauses=[Pause(pos=49,duration=20),Pause(pos=120,duration=4)]
sim=[origin(30,0,average_fork_speed,average_fork_speed),origin(150,0,average_fork_speed,average_fork_speed)]
pauses=[]
#pauses=[Pause(pos=140,duration=10)]
#pauses=[Pause(pos=0,duration=0),Pause(pos=140,duration=10),Pause(pos=180,duration=0)]
#pauses=[Pause(pos=140,duration=10)]
#pauses=[Pause(pos=0,duration=0),Pause(pos=120,duration=0.5),Pause(pos=180,duration=0)]
#pauses=[Pause(pos=0,duration=0),Pause(pos=80,duration=10),Pause(pos=180,duration=0)]
#pauses=[Pause(pos=0,duration=0),Pause(pos=80,duration=0.5),Pause(pos=180,duration=0)]
elif args.conf != None:
sim=Confs[sim_number]
pauses=Pauses[sim_number]
average_fork_speed = np.mean(np.concatenate([[ori.L_fork_speed,ori.R_fork_speed] for ori in ori_pos]))
else:
sim = create_possible_origins(int(chlen / (args.average_distance_between_ori / resolution)),1,
average_fork_speed,chlen)[0]
mrt = generate_mrt(sim,end=chlen)
# Draw time between the first 3/5 of the MRT
minn = min(mrt)
maxi = minn + 3* (max(mrt)-min(mrt)) / 5
else:
minn=0
maxi=10 #Not used
for i in np.random.randint(minn,maxi,args.time_per_mrt):
kw={}
param_k = ["maxv","minv","pulselen","inct","dect"]
for p in param_k:
kw[p] = draw(params[p])
if args.correct_for_height:
kw["maxv"] = kw["maxv"]/(1-np.exp(-2/kw["inct"]))
if not args.one_fork:
tc,len_initial,kw,mrt = generate_track(sim,start_time=i,
end=chlen,params=kw,pauses=pauses)
rfds = generate_rfd(sim,end=chlen)
else:
time= np.arange(0,chlen/average_fork_speed,1/average_fork_speed)
#start at 1kb
tc,len_init,_ = track(time,start_time=time[1000//resolution-1],
end_time=chlen/average_fork_speed,**kw)
rfds=None
kw["speed"]=len_init / kw["pulselen"] * resolution
mrt=time
for size in np.random.choice(possiblesize,p=distribsize,size=args.read_per_time):
start = np.random.randint(0,len(tc)-size)
if whole_length:
start=0
size = len(tc)
else:
attemp=0
while (rfds is not None and np.sum(rfds[start:start+size]) == 0) or ((np.max(tc[start:start+size]) - np.min(tc[start:start+size])) < 0.3) :
start = np.random.randint(0,len(tc)-size)
attemp += 1
if attemp > 100:
break
if not args.one_fork:
# Get speeds of non nul forks
kw["speed"]=[li / kw["pulselen"] * resolution\
for li,[startf,endf] in zip(len_initial[0],
len_initial[1]) \
if (li != 0) and startf>start and endf<start+size ]
ui = str(uuid.uuid4())
gt[ui] = tc[start:start+size]
f = tc[start:start+size].copy()
val_background = stats.lognorm.rvs(s=1,scale=0.017*1.48,loc=0.015, size=1)[0]
while val_background>0.2:
val_background = stats.lognorm.rvs(s=1,scale=0.017*1.48,loc=0.015, size=1)[0]
f+=val_background
f[f>1]=1
kw["val_background"]=val_background
n_info = np.random.randint(15,60)
f= np.random.binomial(n_info,f) / n_info
f = smooth(f,2)
fiber[ui] = f
kw["speed_th"] = average_fork_speed * resolution
parameters[ui] = kw
pos[ui] = np.arange(start,start+size)
mrts[ui]=mrt
if args.one_fork:
positions[ui]=[1000//resolution,1000//resolution+len_init,1]
all_speeds[ui]=[kw.pop("speed")]
else:
all_speeds[ui]=kw.pop("speed")
positions[ui]=[[startf-start,endf-start,(-1)**(posn+1)] for posn,(li,[startf,endf]) in enumerate(zip(len_initial[0],
len_initial[1])) \
if (li != 0) and startf>start and endf<start+size ]
if not args.one_fork and rfds is not None:
rfd[ui] = rfds[start:start+size]
k = list(fiber.keys())
print(len(fiber.keys()),"len")
if args.conf !=None:
permuted = np.array(k)
else:
kp = np.random.permutation(len(k))
permuted = np.array(k)[kp]
if args.ground_truth:
with open(f"{args.prefix}_gt.fa","w") as h:
for p in permuted:
formated = ["%.2f"%v for v in gt[p]]
h.writelines(f"{p}\n {' '.join(formated)}\n")
if args.fork_position:
with open(f"{args.prefix}_positions.fa","w") as h:
for p in permuted:
formated = [str(v) for v in positions[p]]
h.writelines(f"{p}\n {str(positions[p])}\n")
with open(f"{args.prefix}.fa","w") as f, \
open(f"{args.prefix}_parameters.txt","w") as g, \
open(f"{args.prefix}_all_speeds.txt","w") as j:
for p in permuted:
g.writelines(f"{p} {str(parameters[p])}\n")
formated = ["%.2f"%v for v in fiber[p]]
f.writelines(f"{p}\n {' '.join(formated)}\n")
formated = ["%.2f"%v for v in all_speeds[p]]
j.writelines(f"{p}\n {' '.join(formated)}\n")
if args.draw_sample != 0:
maxi = min(args.draw_sample,len(k))
f = pylab.figure(figsize=(20,maxi))
#k1 = list(speed.keys())
#print(k[:10],k1[:10])
for i in range(maxi):
f.add_subplot(maxi//2,2,i+1)
ui = np.array(permuted)[i]
ftp = fiber[ui]
#print(len(mrts[ui]))
if len(mrts[ui])>1:
mrt=np.concatenate(mrts[ui])
else:
mrt=mrts[ui].flatten()
pylab.plot(np.arange(len(ftp))*resolution/1000,ftp,label=parameters[ui]["maxv"])
#pylab.plot(np.arange(len(ftp))/10,gt[ui],label=parameters[ui]["maxv"])
pylab.plot(np.arange(len(mrt))*resolution/1000,mrt/max(mrt),label=parameters[ui]["maxv"])
#plot(np.arange(len(ftp))/10,rfd[np.array(k)[kp][i]])
pylab.ylim(0,1.1)
#xlim(0,50)
pylab.xlabel("kb")
pylab.ylabel("% Brdu")
pylab.legend()
f.tight_layout()
pylab.savefig(f"{args.prefix}_sample.pdf")
| [
"numpy.random.rand",
"pandas.read_csv",
"pylab.savefig",
"pylab.xlabel",
"scipy.stats.lognorm.rvs",
"numpy.array",
"numpy.arange",
"numpy.random.binomial",
"pylab.ylim",
"argparse.ArgumentParser",
"pomegranate.GeneralMixtureModel.from_json",
"numpy.exp",
"numpy.random.choices",
"numpy.max"... | [((253, 329), 'collections.namedtuple', 'namedtuple', (['"""origin"""', "['pos', 'firing_time', 'L_fork_speed', 'R_fork_speed']"], {}), "('origin', ['pos', 'firing_time', 'L_fork_speed', 'R_fork_speed'])\n", (263, 329), False, 'from collections import namedtuple\n'), ((334, 374), 'collections.namedtuple', 'namedtuple', (['"""pause"""', "['pos', 'duration']"], {}), "('pause', ['pos', 'duration'])\n", (344, 374), False, 'from collections import namedtuple\n'), ((3541, 3565), 'numpy.array', 'np.array', (['pos_with_terms'], {}), '(pos_with_terms)\n', (3549, 3565), True, 'import numpy as np\n'), ((3896, 3909), 'numpy.zeros', 'np.zeros', (['end'], {}), '(end)\n', (3904, 3909), True, 'import numpy as np\n'), ((7082, 7095), 'numpy.zeros', 'np.zeros', (['end'], {}), '(end)\n', (7090, 7095), True, 'import numpy as np\n'), ((7197, 7252), 'numpy.arange', 'np.arange', (['t1', '(t1 + x1 / L_fork_speed)', '(1 / L_fork_speed)'], {}), '(t1, t1 + x1 / L_fork_speed, 1 / L_fork_speed)\n', (7206, 7252), True, 'import numpy as np\n'), ((11136, 11193), 'numpy.arange', 'np.arange', (['t2', '(t2 + size / R_fork_speed)', '(1 / R_fork_speed)'], {}), '(t2, t2 + size / R_fork_speed, 1 / R_fork_speed)\n', (11145, 11193), True, 'import numpy as np\n'), ((12639, 12656), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (12653, 12656), True, 'import numpy as np\n'), ((12691, 12716), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (12714, 12716), False, 'import argparse\n'), ((15589, 15625), 'numpy.arange', 'np.arange', (['(5000 // resolution)', 'chlen'], {}), '(5000 // resolution, chlen)\n', (15598, 15625), True, 'import numpy as np\n'), ((15720, 15739), 'numpy.sum', 'np.sum', (['distribsize'], {}), '(distribsize)\n', (15726, 15739), True, 'import numpy as np\n'), ((3600, 3614), 'numpy.arange', 'np.arange', (['end'], {}), '(end)\n', (3609, 3614), True, 'import numpy as np\n'), ((9403, 9468), 'numpy.arange', 'np.arange', (['starto', '(starto + size / R_fork_speed)', '(1 / R_fork_speed)'], {}), '(starto, starto + size / R_fork_speed, 1 / R_fork_speed)\n', (9412, 9468), True, 'import numpy as np\n'), ((10326, 10391), 'numpy.arange', 'np.arange', (['starto', '(starto + size / L_fork_speed)', '(1 / L_fork_speed)'], {}), '(starto, starto + size / L_fork_speed, 1 / L_fork_speed)\n', (10335, 10391), True, 'import numpy as np\n'), ((12067, 12101), 'numpy.random.randint', 'np.random.randint', (['(0)', 'chlen', 'n_ori'], {}), '(0, chlen, n_ori)\n', (12084, 12101), True, 'import numpy as np\n'), ((12116, 12168), 'numpy.random.randint', 'np.random.randint', (['(0)', '(chlen / n_ori / scaling)', 'n_ori'], {}), '(0, chlen / n_ori / scaling, n_ori)\n', (12133, 12168), True, 'import numpy as np\n'), ((14433, 14445), 'json.load', 'json.load', (['f'], {}), '(f)\n', (14442, 14445), False, 'import json\n'), ((19558, 19606), 'numpy.random.randint', 'np.random.randint', (['minn', 'maxi', 'args.time_per_mrt'], {}), '(minn, maxi, args.time_per_mrt)\n', (19575, 19606), True, 'import numpy as np\n'), ((23164, 23175), 'numpy.array', 'np.array', (['k'], {}), '(k)\n', (23172, 23175), True, 'import numpy as np\n'), ((24287, 24319), 'pylab.figure', 'pylab.figure', ([], {'figsize': '(20, maxi)'}), '(figsize=(20, maxi))\n', (24299, 24319), False, 'import pylab\n'), ((25218, 25260), 'pylab.savefig', 'pylab.savefig', (['f"""{args.prefix}_sample.pdf"""'], {}), "(f'{args.prefix}_sample.pdf')\n", (25231, 25260), False, 'import pylab\n'), ((1602, 1623), 'numpy.zeros_like', 'np.zeros_like', (['before'], {}), '(before)\n', (1615, 1623), True, 'import numpy as np\n'), ((1638, 1656), 'numpy.zeros_like', 'np.zeros_like', (['end'], {}), '(end)\n', (1651, 1656), True, 'import numpy as np\n'), ((3671, 3682), 'numpy.min', 'np.min', (['mrt'], {}), '(mrt)\n', (3677, 3682), True, 'import numpy as np\n'), ((15641, 15685), 'scipy.stats.lognorm', 'stats.lognorm', (['(0.5)'], {'scale': '(35000 / resolution)'}), '(0.5, scale=35000 / resolution)\n', (15654, 15685), False, 'from scipy import stats\n'), ((16069, 16101), 'numpy.random.choices', 'np.random.choices', (["law['params']"], {}), "(law['params'])\n", (16086, 16101), True, 'import numpy as np\n'), ((16285, 16347), 'numpy.random.normal', 'np.random.normal', ([], {'loc': "law['params'][0]", 'scale': "law['params'][1]"}), "(loc=law['params'][0], scale=law['params'][1])\n", (16301, 16347), True, 'import numpy as np\n'), ((20542, 20612), 'numpy.random.choice', 'np.random.choice', (['possiblesize'], {'p': 'distribsize', 'size': 'args.read_per_time'}), '(possiblesize, p=distribsize, size=args.read_per_time)\n', (20558, 20612), True, 'import numpy as np\n'), ((23248, 23259), 'numpy.array', 'np.array', (['k'], {}), '(k)\n', (23256, 23259), True, 'import numpy as np\n'), ((25050, 25068), 'pylab.ylim', 'pylab.ylim', (['(0)', '(1.1)'], {}), '(0, 1.1)\n', (25060, 25068), False, 'import pylab\n'), ((25104, 25122), 'pylab.xlabel', 'pylab.xlabel', (['"""kb"""'], {}), "('kb')\n", (25116, 25122), False, 'import pylab\n'), ((25135, 25157), 'pylab.ylabel', 'pylab.ylabel', (['"""% Brdu"""'], {}), "('% Brdu')\n", (25147, 25157), False, 'import pylab\n'), ((25170, 25184), 'pylab.legend', 'pylab.legend', ([], {}), '()\n', (25182, 25184), False, 'import pylab\n'), ((1206, 1244), 'numpy.exp', 'np.exp', (['(-(initial - start_time) / inct)'], {}), '(-(initial - start_time) / inct)\n', (1212, 1244), True, 'import numpy as np\n'), ((1402, 1426), 'numpy.exp', 'np.exp', (['(-pulselen / inct)'], {}), '(-pulselen / inct)\n', (1408, 1426), True, 'import numpy as np\n'), ((16845, 16867), 'ast.literal_eval', 'ast.literal_eval', (['line'], {}), '(line)\n', (16861, 16867), False, 'import ast\n'), ((20154, 20218), 'numpy.arange', 'np.arange', (['(0)', '(chlen / average_fork_speed)', '(1 / average_fork_speed)'], {}), '(0, chlen / average_fork_speed, 1 / average_fork_speed)\n', (20163, 20218), True, 'import numpy as np\n'), ((22069, 22094), 'numpy.random.randint', 'np.random.randint', (['(15)', '(60)'], {}), '(15, 60)\n', (22086, 22094), True, 'import numpy as np\n'), ((22341, 22371), 'numpy.arange', 'np.arange', (['start', '(start + size)'], {}), '(start, start + size)\n', (22350, 22371), True, 'import numpy as np\n'), ((24471, 24489), 'numpy.array', 'np.array', (['permuted'], {}), '(permuted)\n', (24479, 24489), True, 'import numpy as np\n'), ((24608, 24632), 'numpy.concatenate', 'np.concatenate', (['mrts[ui]'], {}), '(mrts[ui])\n', (24622, 24632), True, 'import numpy as np\n'), ((1496, 1543), 'numpy.exp', 'np.exp', (['(-(final - start_time - pulselen) / dect)'], {}), '(-(final - start_time - pulselen) / dect)\n', (1502, 1543), True, 'import numpy as np\n'), ((15958, 16002), 'pomegranate.GeneralMixtureModel.from_json', 'GeneralMixtureModel.from_json', (["law['params']"], {}), "(law['params'])\n", (15987, 16002), False, 'from pomegranate import GeneralMixtureModel, NormalDistribution\n'), ((16213, 16229), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (16227, 16229), True, 'import numpy as np\n'), ((16445, 16471), 'pandas.read_csv', 'pd.read_csv', (["law['params']"], {}), "(law['params'])\n", (16456, 16471), True, 'import pandas as pd\n'), ((21586, 21598), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (21596, 21598), False, 'import uuid\n'), ((21729, 21790), 'scipy.stats.lognorm.rvs', 'stats.lognorm.rvs', ([], {'s': '(1)', 'scale': '(0.017 * 1.48)', 'loc': '(0.015)', 'size': '(1)'}), '(s=1, scale=0.017 * 1.48, loc=0.015, size=1)\n', (21746, 21790), False, 'from scipy import stats\n'), ((22113, 22142), 'numpy.random.binomial', 'np.random.binomial', (['n_info', 'f'], {}), '(n_info, f)\n', (22131, 22142), True, 'import numpy as np\n'), ((179, 193), 'pandas.Series', 'pd.Series', (['ser'], {}), '(ser)\n', (188, 193), True, 'import pandas as pd\n'), ((4948, 4964), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (4962, 4964), True, 'import numpy as np\n'), ((19026, 19099), 'numpy.concatenate', 'np.concatenate', (['[[ori.L_fork_speed, ori.R_fork_speed] for ori in ori_pos]'], {}), '([[ori.L_fork_speed, ori.R_fork_speed] for ori in ori_pos])\n', (19040, 19099), True, 'import numpy as np\n'), ((19844, 19867), 'numpy.exp', 'np.exp', (["(-2 / kw['inct'])"], {}), "(-2 / kw['inct'])\n", (19850, 19867), True, 'import numpy as np\n'), ((21870, 21931), 'scipy.stats.lognorm.rvs', 'stats.lognorm.rvs', ([], {'s': '(1)', 'scale': '(0.017 * 1.48)', 'loc': '(0.015)', 'size': '(1)'}), '(s=1, scale=0.017 * 1.48, loc=0.015, size=1)\n', (21887, 21931), False, 'from scipy import stats\n'), ((20866, 20898), 'numpy.sum', 'np.sum', (['rfds[start:start + size]'], {}), '(rfds[start:start + size])\n', (20872, 20898), True, 'import numpy as np\n'), ((20909, 20939), 'numpy.max', 'np.max', (['tc[start:start + size]'], {}), '(tc[start:start + size])\n', (20915, 20939), True, 'import numpy as np\n'), ((20940, 20970), 'numpy.min', 'np.min', (['tc[start:start + size]'], {}), '(tc[start:start + size])\n', (20946, 20970), True, 'import numpy as np\n')] |
# Finds the E-LPIPS average (barycenter) of two images.
#
# Runs the iteration for 100 000 steps. Outputs are generated by default into directory out_bary2,
# but this directory may be changed with --outdir.
#
# The final result will be outdir/100000.png by default.
#
# This code also supports the LPIPS metric to facilitate comparisons.
#
# Usage:
# python ex_pairwise_average.py image1 image2
# python ex_pairwise_average.py image1 image2 --metric=[elpips_vgg|lpips_vgg|lpips_squeeze]
import tensorflow as tf
import numpy as np
import pdb
import os
import csv
import itertools
import time
import sys
import argparse
import elpips
import scipy.misc
import imageio
TOLERANCE = 0.00001 # How far to clip images from 0 and 1.
parser = argparse.ArgumentParser()
parser.add_argument('images', type=str, nargs=2, help='input images to average')
parser.add_argument('--outdir', type=str, default="out_bary2", help='output directory for intermediate files. Default: out_bary2')
parser.add_argument('--steps', type=int, default=100000, help='number of iterations to run')
parser.add_argument('--metric', type=str, default='elpips_vgg', help='(elpips_vgg, lpips_vgg, lpips_squeeze)')
parser.add_argument('--seed', type=int, default=-1, help='random seed (-1 for random)')
parser.add_argument('--learning_rate', type=float, default=0.03, help='step size multiplier for the optimization')
args = parser.parse_args()
if args.metric not in ('elpips_vgg', 'elpips_squeeze_maxpool', 'lpips_vgg', 'lpips_squeeze'):
raise Exception('Unsupported metric.')
def load_image(path):
_, ext = os.path.splitext(path)
if ext.lower() == '.npy':
image = np.load(path)
elif ext.lower() in ('.png', '.jpg'):
image = imageio.imread(path).astype(np.float32) / 255.0
else:
raise Exception('Unknown image type.')
return image
# Create output directory.
os.makedirs(args.outdir, exist_ok=True)
# Load inputs.
images = []
src_image = load_image(args.images[0])[:,:,0:3]
dest_image = load_image(args.images[1])[:,:,0:3]
images.append(np.expand_dims(src_image, 0))
images.append(np.expand_dims(dest_image, 0))
for i, image in enumerate(images):
if image.shape != images[0].shape:
raise Exception("Image '{}' has wrong shape.".format(args.images[i]))
imageio.imwrite(os.path.join(args.outdir, "src_image.png"), (0.5 + 255.0 * src_image).astype(np.uint8))
imageio.imwrite(os.path.join(args.outdir, "dest_image.png"), (0.5 + 255.0 * dest_image).astype(np.uint8))
# Set random seed.
if args.seed >= 0:
np.random.seed(args.seed)
# Initial image.
init_image = 0.5 + 0.2 * np.random.randn(images[0].shape[0], images[0].shape[1], images[0].shape[2], images[0].shape[3]).astype(np.float32)
init_image = np.clip(init_image, TOLERANCE, 1.0 - TOLERANCE)
imageio.imwrite(os.path.join(args.outdir, "initial_image.png"), (0.5 + 255.0 * init_image[0,:,:,:]).astype(np.uint8))
# Create the graph.
print("Creating graph.")
tf_images = []
for i in range(len(images)):
tf_images.append(tf.constant(images[i], dtype=tf.float32))
tf_images = tuple(tf_images)
with tf.variable_scope('variables'):
tf_X = tf.get_variable('tf_X', dtype=tf.float32, initializer=init_image, trainable=True)
tf_X_uint8 = tf.cast(tf.floor(255.0 * tf.clip_by_value(tf_X, 0.0, 1.0) + 0.5), tf.uint8)[0, :, :, :]
tf_step = tf.get_variable('step', dtype=tf.int32, initializer=0, trainable=False)
tf_increase_step = tf.assign(tf_step, tf_step + 1)
tf_step_f32 = tf.cast(tf_step, tf.float32)
tf_step_f32 = tf.sqrt(100.0 ** 2 + tf_step_f32**2) - 100 # Gradual start.
#Learning rate schedule between 1/t and 1/sqrt(t).
tf_learning_rate = args.learning_rate / (1.0 + 0.02 * tf_step_f32 ** 0.75)
metric_config = elpips.get_config(args.metric)
metric_config.set_scale_levels_by_image_size(image[0].shape[1], image[0].shape[2])
model = elpips.Metric(metric_config)
# Evaluate the distances between the source images and tf_X.
tf_dists = model.forward(tf_images, tf_X)
# Since tf_image is a tuple, tf_dists is also a tuple.
tf_dist1, tf_dist2 = tf_dists
# Get the distance of the first (and only) elements in the minibatch.
tf_dist1 = tf_dist1[0]
tf_dist2 = tf_dist2[0]
tf_loss = tf.square(tf_dist1) + tf.square(tf_dist2)
with tf.control_dependencies([tf_increase_step]):
tf_optimizer = tf.train.AdamOptimizer(tf_learning_rate)
tf_minimize = tf_optimizer.minimize(tf_loss)
# Project to a safe distance from invalid colors.
tf_fix_X = tf.assign(tf_X, tf.clip_by_value(tf_X, TOLERANCE, 1.0 - TOLERANCE))
print("Starting session.")
gpu_options = tf.GPUOptions(allow_growth=True)
session_config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False, gpu_options=gpu_options)
with tf.Session(config=session_config) as sess:
sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])
# No more modifications to the computation graph.
tf.get_default_graph().finalize()
# Specify checkpoint for visualizing intermediate results.
checkpoints = [0, 1, 2, 3, 5, 7, 10, 15, 20, 30, 50, 70, 100, 150, 200, 300, 400, 600, 900, 1200, 1500]
for i in range(2000, 1 + args.steps, 500):
checkpoints.append(i)
checkpoints.append(args.steps)
checkpoints = set(checkpoints)
# Run the iteration.
stime = time.time()
for i in range(1 + args.steps):
sess.run([tf_fix_X])
if i not in checkpoints:
# Iterate.
sess.run([tf_minimize])
else:
# Also output statistics.
kernels = []
ops = []
results = {}
kernels.append(tf_loss)
def op(x):
results['loss'] = x
ops.append(op)
kernels.append(tf_learning_rate)
def op(x):
results['learning_rate'] = x
ops.append(op)
kernels.append(tf_X_uint8)
def op(x):
results['X_uint8'] = x
ops.append(op)
kernels.append(tf_minimize)
def op(x):
pass
ops.append(op)
for x, op in zip(sess.run(kernels), ops):
op(x)
# Display results.
loss, X_uint8 = results['loss'], results['X_uint8']
etime = time.time()
print("Elapsed: {} s. Step {}/{}. Loss: {}. Learning rate: {}".format(int(etime - stime), i, args.steps, loss, results['learning_rate']))
imageio.imwrite(os.path.join(args.outdir, "{:06d}.png".format(i)), X_uint8)
if i % 10000 == 0:
X = sess.run([tf_X])
np.save(os.path.join(args.outdir, "save_{:06d}.npy".format(i)), X)
| [
"numpy.clip",
"tensorflow.local_variables_initializer",
"tensorflow.get_variable",
"elpips.get_config",
"tensorflow.control_dependencies",
"tensorflow.cast",
"tensorflow.GPUOptions",
"argparse.ArgumentParser",
"tensorflow.Session",
"tensorflow.assign",
"numpy.random.seed",
"tensorflow.clip_by_... | [((749, 774), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (772, 774), False, 'import argparse\n'), ((1859, 1898), 'os.makedirs', 'os.makedirs', (['args.outdir'], {'exist_ok': '(True)'}), '(args.outdir, exist_ok=True)\n', (1870, 1898), False, 'import os\n'), ((2706, 2753), 'numpy.clip', 'np.clip', (['init_image', 'TOLERANCE', '(1.0 - TOLERANCE)'], {}), '(init_image, TOLERANCE, 1.0 - TOLERANCE)\n', (2713, 2753), True, 'import numpy as np\n'), ((3297, 3368), 'tensorflow.get_variable', 'tf.get_variable', (['"""step"""'], {'dtype': 'tf.int32', 'initializer': '(0)', 'trainable': '(False)'}), "('step', dtype=tf.int32, initializer=0, trainable=False)\n", (3312, 3368), True, 'import tensorflow as tf\n'), ((3388, 3419), 'tensorflow.assign', 'tf.assign', (['tf_step', '(tf_step + 1)'], {}), '(tf_step, tf_step + 1)\n', (3397, 3419), True, 'import tensorflow as tf\n'), ((3435, 3463), 'tensorflow.cast', 'tf.cast', (['tf_step', 'tf.float32'], {}), '(tf_step, tf.float32)\n', (3442, 3463), True, 'import tensorflow as tf\n'), ((3683, 3713), 'elpips.get_config', 'elpips.get_config', (['args.metric'], {}), '(args.metric)\n', (3700, 3713), False, 'import elpips\n'), ((3805, 3833), 'elpips.Metric', 'elpips.Metric', (['metric_config'], {}), '(metric_config)\n', (3818, 3833), False, 'import elpips\n'), ((4527, 4559), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'allow_growth': '(True)'}), '(allow_growth=True)\n', (4540, 4559), True, 'import tensorflow as tf\n'), ((4577, 4675), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)', 'log_device_placement': '(False)', 'gpu_options': 'gpu_options'}), '(allow_soft_placement=True, log_device_placement=False,\n gpu_options=gpu_options)\n', (4591, 4675), True, 'import tensorflow as tf\n'), ((1593, 1615), 'os.path.splitext', 'os.path.splitext', (['path'], {}), '(path)\n', (1609, 1615), False, 'import os\n'), ((2039, 2067), 'numpy.expand_dims', 'np.expand_dims', (['src_image', '(0)'], {}), '(src_image, 0)\n', (2053, 2067), True, 'import numpy as np\n'), ((2083, 2112), 'numpy.expand_dims', 'np.expand_dims', (['dest_image', '(0)'], {}), '(dest_image, 0)\n', (2097, 2112), True, 'import numpy as np\n'), ((2275, 2317), 'os.path.join', 'os.path.join', (['args.outdir', '"""src_image.png"""'], {}), "(args.outdir, 'src_image.png')\n", (2287, 2317), False, 'import os\n'), ((2379, 2422), 'os.path.join', 'os.path.join', (['args.outdir', '"""dest_image.png"""'], {}), "(args.outdir, 'dest_image.png')\n", (2391, 2422), False, 'import os\n'), ((2509, 2534), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (2523, 2534), True, 'import numpy as np\n'), ((2770, 2816), 'os.path.join', 'os.path.join', (['args.outdir', '"""initial_image.png"""'], {}), "(args.outdir, 'initial_image.png')\n", (2782, 2816), False, 'import os\n'), ((3061, 3091), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""variables"""'], {}), "('variables')\n", (3078, 3091), True, 'import tensorflow as tf\n'), ((3101, 3187), 'tensorflow.get_variable', 'tf.get_variable', (['"""tf_X"""'], {'dtype': 'tf.float32', 'initializer': 'init_image', 'trainable': '(True)'}), "('tf_X', dtype=tf.float32, initializer=init_image, trainable\n =True)\n", (3116, 3187), True, 'import tensorflow as tf\n'), ((3478, 3516), 'tensorflow.sqrt', 'tf.sqrt', (['(100.0 ** 2 + tf_step_f32 ** 2)'], {}), '(100.0 ** 2 + tf_step_f32 ** 2)\n', (3485, 3516), True, 'import tensorflow as tf\n'), ((4154, 4173), 'tensorflow.square', 'tf.square', (['tf_dist1'], {}), '(tf_dist1)\n', (4163, 4173), True, 'import tensorflow as tf\n'), ((4176, 4195), 'tensorflow.square', 'tf.square', (['tf_dist2'], {}), '(tf_dist2)\n', (4185, 4195), True, 'import tensorflow as tf\n'), ((4204, 4247), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[tf_increase_step]'], {}), '([tf_increase_step])\n', (4227, 4247), True, 'import tensorflow as tf\n'), ((4265, 4305), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['tf_learning_rate'], {}), '(tf_learning_rate)\n', (4287, 4305), True, 'import tensorflow as tf\n'), ((4431, 4481), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['tf_X', 'TOLERANCE', '(1.0 - TOLERANCE)'], {}), '(tf_X, TOLERANCE, 1.0 - TOLERANCE)\n', (4447, 4481), True, 'import tensorflow as tf\n'), ((4677, 4710), 'tensorflow.Session', 'tf.Session', ([], {'config': 'session_config'}), '(config=session_config)\n', (4687, 4710), True, 'import tensorflow as tf\n'), ((5219, 5230), 'time.time', 'time.time', ([], {}), '()\n', (5228, 5230), False, 'import time\n'), ((1653, 1666), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (1660, 1666), True, 'import numpy as np\n'), ((2982, 3022), 'tensorflow.constant', 'tf.constant', (['images[i]'], {'dtype': 'tf.float32'}), '(images[i], dtype=tf.float32)\n', (2993, 3022), True, 'import tensorflow as tf\n'), ((4731, 4764), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4762, 4764), True, 'import tensorflow as tf\n'), ((4766, 4798), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (4796, 4798), True, 'import tensorflow as tf\n'), ((4854, 4876), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (4874, 4876), True, 'import tensorflow as tf\n'), ((5948, 5959), 'time.time', 'time.time', ([], {}), '()\n', (5957, 5959), False, 'import time\n'), ((2578, 2677), 'numpy.random.randn', 'np.random.randn', (['images[0].shape[0]', 'images[0].shape[1]', 'images[0].shape[2]', 'images[0].shape[3]'], {}), '(images[0].shape[0], images[0].shape[1], images[0].shape[2],\n images[0].shape[3])\n', (2593, 2677), True, 'import numpy as np\n'), ((1716, 1736), 'imageio.imread', 'imageio.imread', (['path'], {}), '(path)\n', (1730, 1736), False, 'import imageio\n'), ((3222, 3254), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['tf_X', '(0.0)', '(1.0)'], {}), '(tf_X, 0.0, 1.0)\n', (3238, 3254), True, 'import tensorflow as tf\n')] |
"""A module containing classes that specify the components of a Factor Graph."""
from dataclasses import asdict, dataclass
from typing import Mapping, Tuple, Union
import jax
import jax.numpy as jnp
import numpy as np
from pgmax import utils
@dataclass(frozen=True, eq=False)
class Variable:
"""Base class for variables.
If desired, this can be sub-classed to add additional concrete
meta-information
Args:
num_states: an int representing the number of states this variable
has.
"""
num_states: int
@jax.tree_util.register_pytree_node_class
@dataclass(frozen=True, eq=False)
class EnumerationWiring:
"""Wiring for enumeration factors.
Args:
edges_num_states: Array of shape (num_edges,)
Number of states for the variables connected to each edge
var_states_for_edges: Array of shape (num_edge_states,)
Global variable state indices for each edge state
factor_configs_edge_states: Array of shape (num_factor_configs, 2)
factor_configs_edge_states[ii] contains a pair of global factor_config and edge_state indices
factor_configs_edge_states[ii, 0] contains the global factor config index
factor_configs_edge_states[ii, 1] contains the corresponding global edge_state index
"""
edges_num_states: Union[np.ndarray, jnp.ndarray]
var_states_for_edges: Union[np.ndarray, jnp.ndarray]
factor_configs_edge_states: Union[np.ndarray, jnp.ndarray]
def __post_init__(self):
for field in self.__dataclass_fields__:
if isinstance(getattr(self, field), np.ndarray):
getattr(self, field).flags.writeable = False
def tree_flatten(self):
return jax.tree_util.tree_flatten(asdict(self))
@classmethod
def tree_unflatten(cls, aux_data, children):
return cls(**aux_data.unflatten(children))
@dataclass(frozen=True, eq=False)
class EnumerationFactor:
"""An enumeration factor
Args:
variables: List of connected variables
configs: Array of shape (num_val_configs, num_variables)
An array containing an explicit enumeration of all valid configurations
log_potentials: Array of shape (num_val_configs,). An array containing
the log of the potential value for every possible configuration
Raises:
ValueError: If:
(1) The dtype of the configs array is not int
(2) The dtype of the potential array is not float
(3) Configs does not have the correct shape
(4) The potential array does not have the correct shape
(5) The configs array contains invalid values
"""
variables: Tuple[Variable, ...]
configs: np.ndarray
log_potentials: np.ndarray
def __post_init__(self):
self.configs.flags.writeable = False
if not np.issubdtype(self.configs.dtype, np.integer):
raise ValueError(
f"Configurations should be integers. Got {self.configs.dtype}."
)
if not np.issubdtype(self.log_potentials.dtype, np.floating):
raise ValueError(
f"Potential should be floats. Got {self.log_potentials.dtype}."
)
if self.configs.ndim != 2:
raise ValueError(
"configs should be a 2D array containing a list of valid configurations for "
f"EnumerationFactor. Got a configs array of shape {self.configs.shape}."
)
if len(self.variables) != self.configs.shape[1]:
raise ValueError(
f"Number of variables {len(self.variables)} doesn't match given configurations {self.configs.shape}"
)
if self.log_potentials.shape != (self.configs.shape[0],):
raise ValueError(
f"Expected log potentials of shape {(self.configs.shape[0],)} for "
f"({self.configs.shape[0]}) valid configurations. Got log potentials of "
f"shape {self.log_potentials.shape}."
)
vars_num_states = np.array([variable.num_states for variable in self.variables])
if not np.logical_and(
self.configs >= 0, self.configs < vars_num_states[None]
).all():
raise ValueError("Invalid configurations for given variables")
@utils.cached_property
def edges_num_states(self) -> np.ndarray:
"""Number of states for the variables connected to each edge
Returns:
Array of shape (num_edges,)
Number of states for the variables connected to each edge
"""
edges_num_states = np.array(
[variable.num_states for variable in self.variables], dtype=int
)
return edges_num_states
@utils.cached_property
def factor_configs_edge_states(self) -> np.ndarray:
"""Array containing factor configs and edge states pairs
Returns:
Array of shape (num_factor_configs, 2)
factor_configs_edge_states[ii] contains a pair of global factor_config and edge_state indices
factor_configs_edge_states[ii, 0] contains the global factor config index
factor_configs_edge_states[ii, 1] contains the corresponding global edge_state index
"""
edges_starts = np.insert(self.edges_num_states.cumsum(), 0, 0)[:-1]
factor_configs_edge_states = np.stack(
[
np.repeat(np.arange(self.configs.shape[0]), self.configs.shape[1]),
(self.configs + edges_starts[None]).flatten(),
],
axis=1,
)
return factor_configs_edge_states
def compile_wiring(
self, vars_to_starts: Mapping[Variable, int]
) -> EnumerationWiring:
"""Compile enumeration wiring for the enumeration factor
Args:
vars_to_starts: A dictionary that maps variables to their global starting indices
For an n-state variable, a global start index of m means the global indices
of its n variable states are m, m + 1, ..., m + n - 1
Returns:
Enumeration wiring for the enumeration factor
"""
var_states_for_edges = np.concatenate(
[
np.arange(variable.num_states) + vars_to_starts[variable]
for variable in self.variables
]
)
return EnumerationWiring(
edges_num_states=self.edges_num_states,
var_states_for_edges=var_states_for_edges,
factor_configs_edge_states=self.factor_configs_edge_states,
)
| [
"dataclasses.asdict",
"numpy.logical_and",
"dataclasses.dataclass",
"numpy.array",
"numpy.issubdtype",
"numpy.arange"
] | [((248, 280), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)', 'eq': '(False)'}), '(frozen=True, eq=False)\n', (257, 280), False, 'from dataclasses import asdict, dataclass\n'), ((595, 627), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)', 'eq': '(False)'}), '(frozen=True, eq=False)\n', (604, 627), False, 'from dataclasses import asdict, dataclass\n'), ((1905, 1937), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)', 'eq': '(False)'}), '(frozen=True, eq=False)\n', (1914, 1937), False, 'from dataclasses import asdict, dataclass\n'), ((4097, 4159), 'numpy.array', 'np.array', (['[variable.num_states for variable in self.variables]'], {}), '([variable.num_states for variable in self.variables])\n', (4105, 4159), True, 'import numpy as np\n'), ((4661, 4734), 'numpy.array', 'np.array', (['[variable.num_states for variable in self.variables]'], {'dtype': 'int'}), '([variable.num_states for variable in self.variables], dtype=int)\n', (4669, 4734), True, 'import numpy as np\n'), ((1770, 1782), 'dataclasses.asdict', 'asdict', (['self'], {}), '(self)\n', (1776, 1782), False, 'from dataclasses import asdict, dataclass\n'), ((2883, 2928), 'numpy.issubdtype', 'np.issubdtype', (['self.configs.dtype', 'np.integer'], {}), '(self.configs.dtype, np.integer)\n', (2896, 2928), True, 'import numpy as np\n'), ((3070, 3123), 'numpy.issubdtype', 'np.issubdtype', (['self.log_potentials.dtype', 'np.floating'], {}), '(self.log_potentials.dtype, np.floating)\n', (3083, 3123), True, 'import numpy as np\n'), ((4175, 4246), 'numpy.logical_and', 'np.logical_and', (['(self.configs >= 0)', '(self.configs < vars_num_states[None])'], {}), '(self.configs >= 0, self.configs < vars_num_states[None])\n', (4189, 4246), True, 'import numpy as np\n'), ((5471, 5503), 'numpy.arange', 'np.arange', (['self.configs.shape[0]'], {}), '(self.configs.shape[0])\n', (5480, 5503), True, 'import numpy as np\n'), ((6286, 6316), 'numpy.arange', 'np.arange', (['variable.num_states'], {}), '(variable.num_states)\n', (6295, 6316), True, 'import numpy as np\n')] |
#!/usr/bin/python3
#
# Read 0.1° occurrence density counts from a CSV. Split by country/gbifRegion etc, and further
# by about/publishedBy, and futher by snapshot.
#
# For each of these create a matrix of 3600×1800 pixels, with the value equal to the density.
#
# Write this as a GeoTIFF.
#
import csv
import numpy as np
import os, sys
import pandas as pd
from osgeo import gdal
from osgeo import osr
sourceDir = "hadoop"
targetDir = "report"
image_size = (1800, 3600)
# Extent of our data
lat = [-90,90]
lon = [-180,180]
def writeImage(r_pixels, outputFile):
# set geotransform
nx = image_size[1]
ny = image_size[0]
xmin, ymin, xmax, ymax = [min(lon), min(lat), max(lon), max(lat)]
xres = (xmax - xmin) / float(nx)
yres = (ymax - ymin) / float(ny)
geotransform = (xmin, xres, 0, ymax, 0, -yres)
# create the 3-band raster file
dst_ds = gdal.GetDriverByName('GTiff').Create(outputFile, nx, ny, 1, gdal.GDT_UInt32, options=['COMPRESS=Deflate', 'PREDICTOR=1', 'TILED=YES'])
dst_ds.SetGeoTransform(geotransform) # specify coords
srs = osr.SpatialReference() # establish encoding
srs.ImportFromEPSG(4326) # WGS84 lat/long
dst_ds.SetProjection(srs.ExportToWkt()) # export coords to file
dst_ds.GetRasterBand(1).WriteArray(r_pixels)
dst_ds.FlushCache() # write to disk
dst_ds = None
def extractAreaGeoTIFF(areaType, sourceFile, sourceSchema, targetFile, group, groupLabel):
inputFile = "/".join([sourceDir, sourceFile])
print("Reading occurrence density data from", inputFile)
df = pd.read_csv(inputFile, names=sourceSchema, keep_default_na=False)
df_grouped = df.groupby(by=group)
print("Writing group…", end=" ")
for group in df_grouped.groups:
print(group, end=" ", flush=True)
dir = "/".join([targetDir, areaType, group, groupLabel, "geotiff"])
os.makedirs(dir, exist_ok=True)
data = df_grouped.get_group(group)
df_group_snapshot = data.groupby(by='snapshot')
for group_snapshot in df_group_snapshot.groups:
snapshot_data = df_group_snapshot.get_group(group_snapshot)
# Create Each Channel
r_pixels = np.zeros((image_size), dtype=np.uint32)
for row in snapshot_data.itertuples(index=False):
#print(row)
(g_snapshot, g_group, latitude, longitude, count) = row
if (longitude == '\\Nx' or latitude == '\\Nx;'):
print("Null row")
else:
# Map longitudes -180.0–179.9 to 0–3599
x = 1800 + int(float(longitude)*10)
# Map latitudes -90.0–89.9 to 1799–0
y = 1800 - 901 - int(float(latitude)*10)
if (x < 3600 and y < 1800):
count = int(float(count))
r_pixels[y,x] = count
else:
print("Out of range:", group, group_snapshot, longitude, latitude, count, x, y)
# Output final image
writeImage(r_pixels, "/".join([dir, targetFile % group_snapshot]))
print()
def extractGlobalGeoTIFF(sourceFile, sourceSchema, targetFile):
inputFile = "/".join([sourceDir, sourceFile])
print("Reading occurrence density data from", inputFile)
df = pd.read_csv(inputFile, names=sourceSchema, keep_default_na=False)
df_grouped = df.groupby(by='snapshot')
print("Writing global…", end=" ")
for snapshot in df_grouped.groups:
print(snapshot, end=" ", flush=True)
dir = "/".join([targetDir, "global", "geotiff"])
os.makedirs(dir, exist_ok=True)
data = df_grouped.get_group(snapshot)
# Create Each Channel
r_pixels = np.zeros((image_size), dtype=np.uint32)
for row in data.itertuples(index=False):
#print(row)
(g_snapshot, latitude, longitude, count) = row
if (longitude == '\\Nx' or latitude == '\\Nx;'):
print("Null row")
else:
# Map longitudes -180.0–179.9 to 0–3599
x = 1800 + int(float(longitude)*10)
# Map latitudes -90.0–89.9 to 1799–0
y = 1800 - 901 - int(float(latitude)*10)
if (x < 3600 and y < 1800):
count = int(float(count))
r_pixels[y,x] = count
else:
print("Out of range:", snapshot, snapshot, longitude, latitude, count, x, y)
# Output final image
writeImage(r_pixels, "/".join([dir, targetFile % snapshot]))
print()
# Density map, 0.1°, about country
extractAreaGeoTIFF(
areaType = "country",
sourceFile = "occ_density_country_point_one_deg.csv",
sourceSchema = ["snapshot", "country", "latitude", "longitude", "count"],
targetFile = "occ_density_point_one_deg_%s.tiff",
group = "country",
groupLabel = "about"
)
# Density map, 0.1°, published by country
extractAreaGeoTIFF(
areaType = "country",
sourceFile = "occ_density_publisherCountry_point_one_deg.csv",
sourceSchema = ["snapshot", "publisherCountry", "latitude", "longitude", "count"],
targetFile = "occ_density_point_one_deg_%s.tiff",
group = "publisherCountry",
groupLabel = "publishedBy"
)
# Density map, 0.1°, about region
extractAreaGeoTIFF(
areaType = "gbifRegion",
sourceFile = "occ_density_gbifRegion_point_one_deg.csv",
sourceSchema = ["snapshot", "gbifRegion", "latitude", "longitude", "count"],
targetFile = "occ_density_point_one_deg_%s.tiff",
group = "gbifRegion",
groupLabel = "about"
)
# Density map, 0.1°, published by region
extractAreaGeoTIFF(
areaType = "gbifRegion",
sourceFile = "occ_density_publisherGbifRegion_point_one_deg.csv",
sourceSchema = ["snapshot", "publisherGbifRegion", "latitude", "longitude", "count"],
targetFile = "occ_density_point_one_deg_%s.tiff",
group = "publisherGbifRegion",
groupLabel = "publishedBy"
)
# Density map, 0.1°, global
extractGlobalGeoTIFF(
sourceFile = "occ_density_point_one_deg.csv",
sourceSchema = ["snapshot", "latitude", "longitude", "count"],
targetFile = "occ_density_point_one_deg_%s.tiff"
)
| [
"pandas.read_csv",
"os.makedirs",
"osgeo.osr.SpatialReference",
"numpy.zeros",
"osgeo.gdal.GetDriverByName"
] | [((1088, 1110), 'osgeo.osr.SpatialReference', 'osr.SpatialReference', ([], {}), '()\n', (1108, 1110), False, 'from osgeo import osr\n'), ((1611, 1676), 'pandas.read_csv', 'pd.read_csv', (['inputFile'], {'names': 'sourceSchema', 'keep_default_na': '(False)'}), '(inputFile, names=sourceSchema, keep_default_na=False)\n', (1622, 1676), True, 'import pandas as pd\n'), ((3384, 3449), 'pandas.read_csv', 'pd.read_csv', (['inputFile'], {'names': 'sourceSchema', 'keep_default_na': '(False)'}), '(inputFile, names=sourceSchema, keep_default_na=False)\n', (3395, 3449), True, 'import pandas as pd\n'), ((1915, 1946), 'os.makedirs', 'os.makedirs', (['dir'], {'exist_ok': '(True)'}), '(dir, exist_ok=True)\n', (1926, 1946), False, 'import os, sys\n'), ((3681, 3712), 'os.makedirs', 'os.makedirs', (['dir'], {'exist_ok': '(True)'}), '(dir, exist_ok=True)\n', (3692, 3712), False, 'import os, sys\n'), ((3810, 3847), 'numpy.zeros', 'np.zeros', (['image_size'], {'dtype': 'np.uint32'}), '(image_size, dtype=np.uint32)\n', (3818, 3847), True, 'import numpy as np\n'), ((881, 910), 'osgeo.gdal.GetDriverByName', 'gdal.GetDriverByName', (['"""GTiff"""'], {}), "('GTiff')\n", (901, 910), False, 'from osgeo import gdal\n'), ((2234, 2271), 'numpy.zeros', 'np.zeros', (['image_size'], {'dtype': 'np.uint32'}), '(image_size, dtype=np.uint32)\n', (2242, 2271), True, 'import numpy as np\n')] |
import numpy as np
import gym
import cv2
from baselines.common.atari_wrappers import FrameStack
from retro_contest.local import make as make_local
cv2.ocl.setUseOpenCL(False) # No GPU use
class PreprocessFrame(gym.ObservationWrapper):
"""
Grayscales and resizes Frame
"""
def __init__(self, env, width=96, height=96):
super().__init__(self, env)
self.width = width
self.height = height
self.observation_space = gym.spaces.Box(
low=0, high=255,
shape=(self.height, self.width, 1), dtype=np.uint8)
def observation(self, frame):
"""
Returns preprocessed frame
"""
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(frame, (self.width, self.height),
interpolation=cv2.INTER_AREA)
frame = frame[:, :, None]
return frame
class ActionDiscretizer(gym.ActionWrapper):
"""
Wraps a retro environment to make it use
discrete actions for the game
"""
def __init__(self, env):
super(ActionDiscretizer, self).__init__(env)
buttons = ['B', 'A', 'MODE', 'START',
'UP', 'DOWN', 'LEFT', 'RIGHT',
'C', 'Y', 'X', 'Z'
]
actions = [['LEFT'], ['RIGHT'], ['LEFT', 'DOWN'], [
'RIGHT', 'DOWN'], ['DOWN'], ['DOWN', 'B'], ['B']]
self.actions_ = []
"""
For each action:
- create an array of 12[buttons] False
For each button in action:
- make button index True
Creates arrays of actions, where each True element
is the clicked button
"""
for action in actions:
arr = np.array([False] * 12)
for button in action:
arr[buttons.index(button)] = True
self.actions_.append(arr)
self.action_space = gym.spaces.Discrete(len(self.actions_))
def action(self, a_id):
"""
Retrieves an action
"""
return self.actions_[a_id].copy()
class RewardScaler(gym.RewardWrapper):
"""
Rescales the rewards for PPO.
Effects Perfomance
"""
def reward(self, reward):
return reward * 0.01
class AllowBackTracking(gym.Wrapper):
"""
Use deltas in max(X) rather than deltas in X
Agents are not discouraged heavily from
exploring backwards if there is no way to
advance forward directly.
"""
def __init__(self, env):
super(AllowBackTracking, self).__init__(env)
self._cur_x = 0
self._max_x = 0
def reset(self, **kwargs):
self._cur_x = 0
self._max_x = 0
return self.env.reset(**kwargs)
def step(self, action):
obs, reward, done, info = self.env.step(action)
self._cur_x += reward
reward = max(0, self._cur_x - self._max_x)
self._max_x = max(self._cur_x, self._max_x)
return obs, reward, done, info
def create_env(env_idx):
"""
Creates an environment with standard wrappers
"""
wrappers = [
{'game': 'SonicTheHedgehog-Genesis', 'state': 'SpringYardZone.Act3'},
{'game': 'SonicTheHedgehog-Genesis', 'state': 'SpringYardZone.Act2'},
{'game': 'SonicTheHedgehog-Genesis', 'state': 'GreenHillZone.Act3'},
{'game': 'SonicTheHedgehog-Genesis', 'state': 'GreenHillZone.Act1'},
{'game': 'SonicTheHedgehog-Genesis', 'state': 'StarLightZone.Act2'},
{'game': 'SonicTheHedgehog-Genesis', 'state': 'StarLightZone.Act1'},
{'game': 'SonicTheHedgehog-Genesis', 'state': 'MarbleZone.Act2'},
{'game': 'SonicTheHedgehog-Genesis', 'state': 'MarbleZone.Act1'},
{'game': 'SonicTheHedgehog-Genesis', 'state': 'MarbleZone.Act3'},
{'game': 'SonicTheHedgehog-Genesis', 'state': 'ScrapBrainZone.Act2'},
{'game': 'SonicTheHedgehog-Genesis', 'state': 'LabyrinthZone.Act2'},
{'game': 'SonicTheHedgehog-Genesis', 'state': 'LabyrinthZone.Act1'},
{'game': 'SonicTheHedgehog-Genesis', 'state': 'LabyrinthZone.Act3'}
]
print(wrappers[env_idx]['game'], wrappers[env_idx]['state'], flush=True)
env = make_local(game=wrappers[env_idx]['game'],
state=wrappers[env_idx]['state'],
bk2dir='/records')
# Build actions array
env = ActionDiscretizer(env)
# Scale rewards
env = RewardScaler(env)
# Preprocess frames and Stack
env = PreprocessFrame(env)
env = FrameStack(env, 4)
env = AllowBackTracking(env)
return env
def make_train(env_indices=[0], all_=False):
"""
Returns a list of environments with given indices
"""
env_indices = np.arange(0, 13) if all_ else env_indices
return [create_env(idx) for idx in env_indices]
| [
"cv2.ocl.setUseOpenCL",
"gym.spaces.Box",
"baselines.common.atari_wrappers.FrameStack",
"numpy.array",
"retro_contest.local.make",
"cv2.cvtColor",
"cv2.resize",
"numpy.arange"
] | [((150, 177), 'cv2.ocl.setUseOpenCL', 'cv2.ocl.setUseOpenCL', (['(False)'], {}), '(False)\n', (170, 177), False, 'import cv2\n'), ((4262, 4361), 'retro_contest.local.make', 'make_local', ([], {'game': "wrappers[env_idx]['game']", 'state': "wrappers[env_idx]['state']", 'bk2dir': '"""/records"""'}), "(game=wrappers[env_idx]['game'], state=wrappers[env_idx]['state'],\n bk2dir='/records')\n", (4272, 4361), True, 'from retro_contest.local import make as make_local\n'), ((4585, 4603), 'baselines.common.atari_wrappers.FrameStack', 'FrameStack', (['env', '(4)'], {}), '(env, 4)\n', (4595, 4603), False, 'from baselines.common.atari_wrappers import FrameStack\n'), ((470, 558), 'gym.spaces.Box', 'gym.spaces.Box', ([], {'low': '(0)', 'high': '(255)', 'shape': '(self.height, self.width, 1)', 'dtype': 'np.uint8'}), '(low=0, high=255, shape=(self.height, self.width, 1), dtype=\n np.uint8)\n', (484, 558), False, 'import gym\n'), ((693, 732), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_RGB2GRAY'], {}), '(frame, cv2.COLOR_RGB2GRAY)\n', (705, 732), False, 'import cv2\n'), ((749, 823), 'cv2.resize', 'cv2.resize', (['frame', '(self.width, self.height)'], {'interpolation': 'cv2.INTER_AREA'}), '(frame, (self.width, self.height), interpolation=cv2.INTER_AREA)\n', (759, 823), False, 'import cv2\n'), ((4793, 4809), 'numpy.arange', 'np.arange', (['(0)', '(13)'], {}), '(0, 13)\n', (4802, 4809), True, 'import numpy as np\n'), ((1786, 1808), 'numpy.array', 'np.array', (['([False] * 12)'], {}), '([False] * 12)\n', (1794, 1808), True, 'import numpy as np\n')] |
import logging
import os
import random
from typing import List, Tuple
import attr
import numpy as np
import pandas as pd
from flair.data import Sentence
from sklearn.datasets import dump_svmlight_file
from torch.utils.data import Dataset
from gleipnir.corpora import *
from gleipnir.data import CandidateGenerator
from gleipnir.kb import FusekiKnowledgeBase, WikidataKnowledgeBase, KnowledgeBase
from gleipnir.util import get_logger
logger = get_logger(__name__)
class LetorDataset:
pass
@attr.s
class TrainingData:
corpus_name: str = attr.ib()
corpus_train: Corpus = attr.ib()
corpus_dev: Corpus = attr.ib()
corpus_test: Corpus = attr.ib()
corpus_all: Corpus = attr.ib()
kb: KnowledgeBase = attr.ib()
cg: CandidateGenerator = attr.ib()
@pd.api.extensions.register_dataframe_accessor("ext")
class HandcraftedExtensionAccessor:
def __init__(self, pandas_obj: pd.DataFrame):
self._df = pandas_obj
self.name = ""
@property
def candidate_ids(self):
return self._df["candidate_id"]
@property
def number_of_groups(self) -> int:
return self._df["qid"].nunique()
@property
def X(self):
return self._df[self.features].astype('float32').values
@property
def y(self):
return self._df["score"].astype('float32').values
@property
def uris(self):
return self._df["uri"].values
@property
def features(self) -> List[str]:
return [f for f in self._df.columns if f.startswith("feat_")]
@property
def num_features(self) -> int:
return len(self.features)
@property
def group_sizes(self) -> List[int]:
return [int(x) for x in self._df["qid"].value_counts(sort=False)]
@property
def groupby_qid(self) -> List[pd.DataFrame]:
return [_slice for (_, _slice) in self._df.groupby(["qid"])]
@property
def group_X(self) -> List[np.array]:
return [_slice.ext.X for (_, _slice) in self._df.groupby(["qid"])]
@property
def group_y(self) -> List[np.array]:
return [_slice.ext.y for (_, _slice) in self._df.groupby(["qid"])]
@property
def mentions(self) -> List[str]:
return [_slice["mention"].iloc[0] for (_, _slice) in self._df.groupby(["qid"])]
@property
def labels(self) -> List[List[str]]:
return [_slice["label"] for (_, _slice) in self._df.groupby(["qid"])]
@property
def contexts(self) -> List[str]:
return [_slice["context"].iloc[0] for (_, _slice) in self._df.groupby(["qid"])]
@property
def gold_uris(self) -> List[str]:
return [_slice["gold"].iloc[0] for (_, _slice) in self._df.groupby(["qid"])]
@property
def gold_indices(self) -> List[int]:
return [_slice["gold_idx"].iloc[0] for (_, _slice) in self._df.groupby(["qid"])]
def split_by_qid(self, qid) -> Tuple[pd.DataFrame, pd.DataFrame]:
p1 = self._df.query(f"qid < {qid}")
p2 = self._df.query(f"qid >= {qid}")
return p1, p2
def to_csv(self):
assert self.name, "Need to set name when saving to csv"
self._df.to_csv(os.path.join(PATH_HANDCRAFTED, f"{self.name}.csv"), index=False, sep="\t")
def subsample(self, number_of_groups: int) -> pd.DataFrame:
""" Selects the first `number_of_groups` groups. """
# We assume that qids are sorted ascending
limit = self._df["qid"].values[0] + number_of_groups
result = self._df.query(f"qid < {limit}")
assert len(result.ext.groupby_qid) == number_of_groups
return result
def slice_by_qid(self, lower: int, upper: int) -> pd.DataFrame:
# We need to find the first qid as the offset
offset = self._df["qid"].values[0]
return self._df.query(f"qid >= {offset + lower} and qid < {offset + upper}")
def to_svmlight(self):
assert self.name, "Need to set name when saving to csv"
dump_svmlight_file(self.X, self.y, os.path.join(PATH_HANDCRAFTED, f"{self.name}.dat"), query_id=self._df["qid"])
def get_raw_corpus_data(s: str, caching: bool = True):
if s == "aida":
data_train = load_aida_train()
data_dev = load_aida_dev()
data_test = load_aida_test()
data_all = load_aida_all()
kb = WikidataKnowledgeBase(caching=caching)
elif s == "wwo-fuseki":
data_train = load_wwo_train()
data_dev = load_wwo_dev()
data_test = load_wwo_test()
data_all = load_wwo_all()
kb = FusekiKnowledgeBase(name="wwo", caching=caching)
elif s == "1641-fuseki":
data_train = load_depositions_train()
data_dev = load_depositions_dev()
data_test = load_depositions_test()
data_all = load_depositions_all()
kb = FusekiKnowledgeBase(name="depositions", caching=caching)
else:
raise Exception(f"Unknown corpus name: {s}")
cg = CandidateGenerator(kb)
return TrainingData(s, data_train, data_dev, data_test, data_all, kb, cg)
def load_dataframe_from_csv(name: str) -> pd.DataFrame:
p = os.path.join(PATH_HANDCRAFTED, f"{name}.csv")
df = pd.read_csv(p, sep="\t")
df.ext.name = name
return df
def load_handcrafted_data(name: str, evaluate_on_test: bool = False) -> Tuple[pd.DataFrame, pd.DataFrame]:
logger.info("Loading [%s]", name)
ds_train_name = name + "_train"
ds_test_name = name + ("_dev" if not evaluate_on_test else "_test")
df_train = load_dataframe_from_csv(ds_train_name)
df_eval = load_dataframe_from_csv(ds_test_name)
df_train.fillna('<unk>', inplace=True)
df_eval.fillna('<unk>', inplace=True)
return df_train, df_eval
def load_handcrafted_simulation_data(name: str) -> pd.DataFrame:
logger.info("Loading [%s]", name)
ds_name = f"{name}_full_sim"
df = load_dataframe_from_csv(ds_name)
df.fillna('<unk>', inplace=True)
return df
class HandcraftedLetorDataset(Dataset):
# https://github.com/yutayamazaki/RankNet-PyTorch/
def __init__(self, df: pd.DataFrame):
group_sizes = df.ext.group_sizes
qids = df["qid"]
indices = qids.unique()
large_enough_groups = {i for i, group_size in zip(indices, group_sizes) if group_size >= 2}
df = df[qids.isin(large_enough_groups)]
self.X_grouped = df.ext.group_X
self.y_grouped = df.ext.group_y
self.gold_indices = df.ext.gold_indices
assert len(self.X_grouped) == len(self.y_grouped) == len(self.gold_indices), "Groups have to have the same length!"
def __len__(self) -> int:
return len(self.y_grouped)
def __getitem__(self, group_idx: int):
X = self.X_grouped[group_idx]
y = self.y_grouped[group_idx]
gold_idx = self.gold_indices[group_idx]
assert gold_idx >= 0, "Group does not have gold label!"
assert y[gold_idx] == 1.0, "Gold should have score of 1!"
x_p = X[gold_idx]
y_p = y[gold_idx]
indices = list(range(len(y)))
indices.remove(gold_idx)
idx_n = random.choice(indices)
assert idx_n != gold_idx
x_n = X[idx_n]
y_n = y[idx_n]
return {
"x_p": x_p,
"x_n": x_n,
"y_p": y_p,
"y_n": y_n
}
class PairwiseFlairLetorDataset(Dataset):
# https://github.com/yutayamazaki/RankNet-PyTorch/
def __init__(self, df: pd.DataFrame):
mentions = []
grouped_kb_labels = []
grouped_descriptions = []
contexts = []
for group in df.ext.groupby_qid:
# The mention is identical for all items in the group
mentions.append(Sentence(group["mention"].values[0], use_tokenizer=False))
grouped_kb_labels.append([Sentence(x, use_tokenizer=True) for x in group["label"]])
grouped_descriptions.append([Sentence(x, use_tokenizer=True) for x in group["description"]])
contexts.append(Sentence(group["context"].values[0], use_tokenizer=True))
self.mentions = mentions
self.grouped_kb_labels = grouped_kb_labels
self.grouped_descriptions = grouped_descriptions
self.contexts = contexts
self.y_grouped = df.ext.group_y
self.gold_indices = df.ext.gold_indices
def __len__(self) -> int:
return len(self.y_grouped)
def __getitem__(self, group_idx: int):
mention = self.mentions[group_idx]
labels = self.grouped_kb_labels[group_idx]
descriptions = self.grouped_descriptions[group_idx]
context = self.contexts[group_idx]
y = self.y_grouped[group_idx]
gold_idx = self.gold_indices[group_idx]
assert gold_idx >= 0, "Group does not have gold label!"
assert y[gold_idx] == 1.0, "Gold should have score of 1!"
label_p = labels[gold_idx]
description_p = descriptions[gold_idx]
y_p = y[gold_idx]
indices = np.arange(start=1, stop=len(y))
idx_n = np.random.choice(indices)
label_n = labels[idx_n]
description_n = descriptions[idx_n]
y_n = y[idx_n]
return {
"mention": mention,
"label_p": label_p,
"description_p": description_p,
"y_p": y_p,
"label_n": label_n,
"description_n": description_n,
"y_n": y_n,
"context": context
}
| [
"random.choice",
"gleipnir.data.CandidateGenerator",
"pandas.read_csv",
"numpy.random.choice",
"gleipnir.kb.WikidataKnowledgeBase",
"os.path.join",
"gleipnir.kb.FusekiKnowledgeBase",
"gleipnir.util.get_logger",
"pandas.api.extensions.register_dataframe_accessor",
"flair.data.Sentence",
"attr.ib"... | [((448, 468), 'gleipnir.util.get_logger', 'get_logger', (['__name__'], {}), '(__name__)\n', (458, 468), False, 'from gleipnir.util import get_logger\n'), ((781, 833), 'pandas.api.extensions.register_dataframe_accessor', 'pd.api.extensions.register_dataframe_accessor', (['"""ext"""'], {}), "('ext')\n", (826, 833), True, 'import pandas as pd\n'), ((552, 561), 'attr.ib', 'attr.ib', ([], {}), '()\n', (559, 561), False, 'import attr\n'), ((589, 598), 'attr.ib', 'attr.ib', ([], {}), '()\n', (596, 598), False, 'import attr\n'), ((624, 633), 'attr.ib', 'attr.ib', ([], {}), '()\n', (631, 633), False, 'import attr\n'), ((660, 669), 'attr.ib', 'attr.ib', ([], {}), '()\n', (667, 669), False, 'import attr\n'), ((695, 704), 'attr.ib', 'attr.ib', ([], {}), '()\n', (702, 704), False, 'import attr\n'), ((729, 738), 'attr.ib', 'attr.ib', ([], {}), '()\n', (736, 738), False, 'import attr\n'), ((768, 777), 'attr.ib', 'attr.ib', ([], {}), '()\n', (775, 777), False, 'import attr\n'), ((4900, 4922), 'gleipnir.data.CandidateGenerator', 'CandidateGenerator', (['kb'], {}), '(kb)\n', (4918, 4922), False, 'from gleipnir.data import CandidateGenerator\n'), ((5068, 5113), 'os.path.join', 'os.path.join', (['PATH_HANDCRAFTED', 'f"""{name}.csv"""'], {}), "(PATH_HANDCRAFTED, f'{name}.csv')\n", (5080, 5113), False, 'import os\n'), ((5123, 5147), 'pandas.read_csv', 'pd.read_csv', (['p'], {'sep': '"""\t"""'}), "(p, sep='\\t')\n", (5134, 5147), True, 'import pandas as pd\n'), ((4281, 4319), 'gleipnir.kb.WikidataKnowledgeBase', 'WikidataKnowledgeBase', ([], {'caching': 'caching'}), '(caching=caching)\n', (4302, 4319), False, 'from gleipnir.kb import FusekiKnowledgeBase, WikidataKnowledgeBase, KnowledgeBase\n'), ((7044, 7066), 'random.choice', 'random.choice', (['indices'], {}), '(indices)\n', (7057, 7066), False, 'import random\n'), ((8963, 8988), 'numpy.random.choice', 'np.random.choice', (['indices'], {}), '(indices)\n', (8979, 8988), True, 'import numpy as np\n'), ((3132, 3182), 'os.path.join', 'os.path.join', (['PATH_HANDCRAFTED', 'f"""{self.name}.csv"""'], {}), "(PATH_HANDCRAFTED, f'{self.name}.csv')\n", (3144, 3182), False, 'import os\n'), ((3966, 4016), 'os.path.join', 'os.path.join', (['PATH_HANDCRAFTED', 'f"""{self.name}.dat"""'], {}), "(PATH_HANDCRAFTED, f'{self.name}.dat')\n", (3978, 4016), False, 'import os\n'), ((4504, 4552), 'gleipnir.kb.FusekiKnowledgeBase', 'FusekiKnowledgeBase', ([], {'name': '"""wwo"""', 'caching': 'caching'}), "(name='wwo', caching=caching)\n", (4523, 4552), False, 'from gleipnir.kb import FusekiKnowledgeBase, WikidataKnowledgeBase, KnowledgeBase\n'), ((4770, 4826), 'gleipnir.kb.FusekiKnowledgeBase', 'FusekiKnowledgeBase', ([], {'name': '"""depositions"""', 'caching': 'caching'}), "(name='depositions', caching=caching)\n", (4789, 4826), False, 'from gleipnir.kb import FusekiKnowledgeBase, WikidataKnowledgeBase, KnowledgeBase\n'), ((7654, 7711), 'flair.data.Sentence', 'Sentence', (["group['mention'].values[0]"], {'use_tokenizer': '(False)'}), "(group['mention'].values[0], use_tokenizer=False)\n", (7662, 7711), False, 'from flair.data import Sentence\n'), ((7942, 7998), 'flair.data.Sentence', 'Sentence', (["group['context'].values[0]"], {'use_tokenizer': '(True)'}), "(group['context'].values[0], use_tokenizer=True)\n", (7950, 7998), False, 'from flair.data import Sentence\n'), ((7751, 7782), 'flair.data.Sentence', 'Sentence', (['x'], {'use_tokenizer': '(True)'}), '(x, use_tokenizer=True)\n', (7759, 7782), False, 'from flair.data import Sentence\n'), ((7850, 7881), 'flair.data.Sentence', 'Sentence', (['x'], {'use_tokenizer': '(True)'}), '(x, use_tokenizer=True)\n', (7858, 7881), False, 'from flair.data import Sentence\n')] |
import datetime
from sklearn.metrics import mean_squared_error, mean_absolute_error, f1_score
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from .dataset import DataSet
from .metergroup import MeterGroup
from .disaggregate import (
CombinatorialOptimisation,
Mean,
FHMM,
Zero,
DAE,
Seq2Point,
Seq2Seq,
DSC,
Disaggregator,
) # , AFHMM,AFHMM_SAC
class API:
"""
The API ia designed for rapid experimentation with NILM Algorithms.
"""
def __init__(self, params):
"""
Initializes the API with default parameters
"""
self.power = {}
self.sample_period = 1
self.appliances = []
self.methods = {}
self.chunk_size = None
self.method_dict = {
"CO": {},
"FHMM": {},
"Hart85": {},
"DAE": {},
"Mean": {},
"Zero": {},
"WindowGRU": {},
"Seq2Point": {},
"RNN": {},
"Seq2Seq": {},
"DSC": {},
"AFHMM": {},
"AFHMM_SAC": {},
}
self.pre_trained = False
self.metrics = []
self.train_datasets_dict = {}
self.test_datasets_dict = {}
self.artificial_aggregate = False
self.train_submeters = []
self.train_mains = pd.DataFrame()
self.test_submeters = []
self.test_mains = pd.DataFrame()
self.gt_overall = {}
self.pred_overall = {}
self.classifiers = []
self.DROP_ALL_NANS = True
self.mae = pd.DataFrame()
self.rmse = pd.DataFrame()
self.experiment(params)
def initialise(self, params):
"""
Instantiates the API with the specified Parameters
"""
for elems in params["params"]["power"]:
self.power = params["params"]["power"]
self.sample_period = params["sample_rate"]
for elems in params["appliances"]:
self.appliances.append(elems)
self.pre_trained = ["pre_trained"]
self.train_datasets_dict = params["train"]["datasets"]
self.test_datasets_dict = params["test"]["datasets"]
self.metrics = params["test"]["metrics"]
self.methods = params["methods"]
self.artificial_aggregate = params.get(
"artificial_aggregate", self.artificial_aggregate
)
self.chunk_size = params.get("chunk_size", self.chunk_size)
def experiment(self, params):
"""
Calls the Experiments with the specified parameters
"""
self.params = params
self.initialise(params)
if params["chunk_size"]:
# This is for training and Testing in Chunks
self.load_datasets_chunks()
else:
# This is to load all the data from all buildings and use it for training and testing. This might not be possible to execute on computers with low specs
self.load_datasets()
def load_datasets_chunks(self):
"""
This function loads the data from buildings and datasets with the specified chunk size and trains on each of them.
After the training process is over, it tests on the specified testing set whilst loading it in chunks.
"""
# First, we initialize all the models
self.store_classifier_instances()
d = self.train_datasets_dict
for model_name, clf in self.classifiers:
# If the model is a neural net, it has an attribute n_epochs, Ex: DAE, Seq2Point
if hasattr(clf, "n_epochs"):
epochs = clf.n_epochs
# If it doesn't have the attribute n_epochs, this is executed. Ex: Mean, Zero
else:
epochs = 1
# If the model has the filename specified for loading the pretrained model, then we don't need to load training data
if clf.load_model_path:
print(clf.MODEL_NAME, " is loading the pretrained model")
continue
for q in range(epochs):
for dataset in d:
print("Loading data for ", dataset, " dataset")
for building in d[dataset]["buildings"]:
train = DataSet(d[dataset]["path"])
print("Loading building ... ", building)
train.set_window(
start=d[dataset]["buildings"][building]["start_time"],
end=d[dataset]["buildings"][building]["end_time"],
)
mains_iterator = (
train.buildings[building]
.elec.mains()
.load(
chunksize=self.chunk_size,
physical_quantity="power",
ac_type=self.power["mains"],
sample_period=self.sample_period,
)
)
print(self.appliances)
appliance_iterators = [
train.buildings[building]
.elec.select_using_appliances(type=app_name)
.load(
chunksize=self.chunk_size,
physical_quantity="power",
ac_type=self.power["appliance"],
sample_period=self.sample_period,
)
for app_name in self.appliances
]
print(train.buildings[building].elec.mains())
for chunk_num, chunk in enumerate(
train.buildings[building]
.elec.mains()
.load(
chunksize=self.chunk_size,
physical_quantity="power",
ac_type=self.power["mains"],
sample_period=self.sample_period,
)
):
# Dummry loop for executing on outer level. Just for looping till end of a chunk
print("starting enumeration..........")
train_df = next(mains_iterator)
appliance_readings = []
for i in appliance_iterators:
try:
appliance_df = next(i)
except StopIteration:
pass
appliance_readings.append(appliance_df)
if self.DROP_ALL_NANS:
train_df, appliance_readings = self.dropna(
train_df, appliance_readings
)
if self.artificial_aggregate:
print("Creating an Artificial Aggregate")
train_df = pd.DataFrame(
np.zeros(appliance_readings[0].shape),
index=appliance_readings[0].index,
columns=appliance_readings[0].columns,
)
for app_reading in appliance_readings:
train_df += app_reading
train_appliances = []
for cnt, i in enumerate(appliance_readings):
train_appliances.append((self.appliances[cnt], [i]))
self.train_mains = [train_df]
self.train_submeters = train_appliances
clf.partial_fit(self.train_mains, self.train_submeters)
print("...............Finished the Training Process ...................")
print("...............Started the Testing Process ...................")
d = self.test_datasets_dict
for dataset in d:
print("Loading data for ", dataset, " dataset")
for building in d[dataset]["buildings"]:
test = DataSet(d[dataset]["path"])
test.set_window(
start=d[dataset]["buildings"][building]["start_time"],
end=d[dataset]["buildings"][building]["end_time"],
)
mains_iterator = (
test.buildings[building]
.elec.mains()
.load(
chunksize=self.chunk_size,
physical_quantity="power",
ac_type=self.power["mains"],
sample_period=self.sample_period,
)
)
appliance_iterators = [
test.buildings[building]
.elec.select_using_appliances(type=app_name)
.load(
chunksize=self.chunk_size,
physical_quantity="power",
ac_type=self.power["appliance"],
sample_period=self.sample_period,
)
for app_name in self.appliances
]
for chunk_num, chunk in enumerate(
test.buildings[building]
.elec.mains()
.load(
chunksize=self.chunk_size,
physical_quantity="power",
ac_type=self.power["mains"],
sample_period=self.sample_period,
)
):
test_df = next(mains_iterator)
appliance_readings = []
for i in appliance_iterators:
try:
appliance_df = next(i)
except StopIteration:
appliance_df = pd.DataFrame()
appliance_readings.append(appliance_df)
if self.DROP_ALL_NANS:
test_df, appliance_readings = self.dropna(
test_df, appliance_readings
)
if self.artificial_aggregate:
print("Creating an Artificial Aggregate")
test_df = pd.DataFrame(
np.zeros(appliance_readings[0].shape),
index=appliance_readings[0].index,
columns=appliance_readings[0].columns,
)
for app_reading in appliance_readings:
test_df += app_reading
test_appliances = []
for cnt, i in enumerate(appliance_readings):
test_appliances.append((self.appliances[cnt], [i]))
self.test_mains = [test_df]
self.test_submeters = test_appliances
print(
"Results for Dataset {dataset} Building {building} Chunk {chunk_num}".format(
dataset=dataset, building=building, chunk_num=chunk_num
)
)
self.call_predict(self.classifiers)
def dropna(self, mains_df, appliance_dfs):
"""
Drops the missing values in the Mains reading and appliance readings and returns consistent data by copmuting the intersection
"""
print("Dropping missing values")
# The below steps are for making sure that data is consistent by doing intersection across appliances
mains_df = mains_df.dropna()
for i in range(len(appliance_dfs)):
appliance_dfs[i] = appliance_dfs[i].dropna()
ix = mains_df.index
for app_df in appliance_dfs:
ix = ix.intersection(app_df.index)
mains_df = mains_df.loc[ix]
new_appliances_list = []
for app_df in appliance_dfs:
new_appliances_list.append(app_df.loc[ix])
return mains_df, new_appliances_list
def load_datasets(self):
# This function has a few issues, which should be addressed soon
self.store_classifier_instances()
d = self.train_datasets_dict
print("............... Loading Data for training ...................")
# store the train_main readings for all buildings
for dataset in d:
print("Loading data for ", dataset, " dataset")
train = DataSet(d[dataset]["path"])
for building in d[dataset]["buildings"]:
print("Loading building ... ", building)
train.set_window(
start=d[dataset]["buildings"][building]["start_time"],
end=d[dataset]["buildings"][building]["end_time"],
)
self.train_mains = self.train_mains.append(
next(
train.buildings[building]
.elec.mains()
.load(
physical_quantity="power",
ac_type=self.power["mains"],
sample_period=self.sample_period,
)
)
)
# store train submeters reading
train_buildings = pd.DataFrame()
for appliance in self.appliances:
train_df = pd.DataFrame()
print("For appliance .. ", appliance)
for dataset in d:
print("Loading data for ", dataset, " dataset")
train = DataSet(d[dataset]["path"])
for building in d[dataset]["buildings"]:
print("Loading building ... ", building)
# store data for submeters
train.set_window(
start=d[dataset]["buildings"][building]["start_time"],
end=d[dataset]["buildings"][building]["end_time"],
)
train_df = train_df.append(
next(
train.buildings[building]
.elec.submeters()
.select_using_appliances(type=appliance)
.load(
physical_quantity="power",
ac_type=self.power["appliance"],
sample_period=self.sample_period,
)
)
)
self.train_submeters.append((appliance, [train_df]))
# create instance of the training methods
# train models
# store data for mains
self.train_mains = [self.train_mains]
self.call_partial_fit()
d = self.test_datasets_dict
# store the test_main readings for all buildings
for dataset in d:
print("Loading data for ", dataset, " dataset")
test = DataSet(d[dataset]["path"])
for building in d[dataset]["buildings"]:
test.set_window(
start=d[dataset]["buildings"][building]["start_time"],
end=d[dataset]["buildings"][building]["end_time"],
)
self.test_mains = next(
test.buildings[building]
.elec.mains()
.load(
physical_quantity="power",
ac_type=self.power["mains"],
sample_period=self.sample_period,
)
)
self.test_submeters = []
for appliance in self.appliances:
test_df = next(
(
test.buildings[building]
.elec.submeters()
.select_using_appliances(type=appliance)
.load(
physical_quantity="power",
ac_type=self.power["appliance"],
sample_period=self.sample_period,
)
)
)
self.test_submeters.append((appliance, [test_df]))
self.test_mains = [self.test_mains]
self.call_predict(self.classifiers)
def store_classifier_instances(self):
"""
This function is reponsible for initializing the models with the specified model parameters
"""
method_dict = {}
for i in self.method_dict:
if i in self.methods:
self.method_dict[i].update(self.methods[i])
method_dict = {
"CO": CombinatorialOptimisation(self.method_dict["CO"]),
"FHMM": FHMM(self.method_dict["FHMM"]),
"DAE": DAE(self.method_dict["DAE"]),
"Mean": Mean(self.method_dict["Mean"]),
"Zero": Zero(self.method_dict["Zero"]),
"Seq2Seq": Seq2Seq(self.method_dict["Seq2Seq"]),
"Seq2Point": Seq2Point(self.method_dict["Seq2Point"]),
"DSC": DSC(self.method_dict["DSC"]),
# 'AFHMM':AFHMM(self.method_dict['AFHMM']),
# 'AFHMM_SAC':AFHMM_SAC(self.method_dict['AFHMM_SAC'])
#'RNN':RNN(self.method_dict['RNN'])
}
for name in self.methods:
if name in method_dict:
clf = method_dict[name]
self.classifiers.append((name, clf))
else:
print(
"\n\nThe method {model_name} specied does not exist. \n\n".format(
model_name=i
)
)
def call_predict(self, classifiers):
"""
This functions computers the predictions on the self.test_mains using all the trained models and then compares different learn't models using the metrics specified
"""
pred_overall = {}
gt_overall = {}
for name, clf in classifiers:
gt_overall, pred_overall[name] = self.predict(
clf,
self.test_mains,
self.test_submeters,
self.sample_period,
"Europe/London",
)
self.gt_overall = gt_overall
self.pred_overall = pred_overall
for i in gt_overall.columns:
plt.figure()
plt.plot(gt_overall[i], label="truth")
for clf in pred_overall:
plt.plot(pred_overall[clf][i], label=clf)
plt.title(i)
plt.legend()
if gt_overall.size == 0:
print("No samples found in ground truth")
return None
for metric in self.metrics:
if metric == "f1-score":
f1_score = {}
for clf_name, clf in classifiers:
f1_score[clf_name] = self.compute_f1_score(
gt_overall, pred_overall[clf_name]
)
f1_score = pd.DataFrame(f1_score)
print("............ ", metric, " ..............")
print(f1_score)
elif metric == "rmse":
rmse = {}
for clf_name, clf in classifiers:
rmse[clf_name] = self.compute_rmse(
gt_overall, pred_overall[clf_name]
)
rmse = pd.DataFrame(rmse)
self.rmse = rmse
print("............ ", metric, " ..............")
print(rmse)
elif metric == "mae":
mae = {}
for clf_name, clf in classifiers:
mae[clf_name] = self.compute_mae(gt_overall, pred_overall[clf_name])
mae = pd.DataFrame(mae)
self.mae = mae
print("............ ", metric, " ..............")
print(mae)
elif metric == "rel_error":
rel_error = {}
for clf_name, clf in classifiers:
rel_error[clf_name] = self.compute_rel_error(
gt_overall, pred_overall[clf_name]
)
rel_error = pd.DataFrame(rel_error)
print("............ ", metric, " ..............")
print(rel_error)
else:
print(
"The requested metric {metric} does not exist.".format(
metric=metric
)
)
def predict(self, clf, test_elec, test_submeters, sample_period, timezone):
"""
Generates predictions on the test dataset using the specified classifier.
"""
# "ac_type" varies according to the dataset used.
# Make sure to use the correct ac_type before using the default parameters in this code.
pred_list = clf.disaggregate_chunk(test_elec)
# It might not have time stamps sometimes due to neural nets
# It has the readings for all the appliances
concat_pred_df = pd.concat(pred_list, axis=0)
gt = {}
for meter, data in test_submeters:
concatenated_df_app = pd.concat(data, axis=1)
index = concatenated_df_app.index
gt[meter] = pd.Series(concatenated_df_app.values.flatten(), index=index)
gt_overall = pd.DataFrame(gt, dtype="float32")
pred = {}
for app_name in concat_pred_df.columns:
app_series_values = concat_pred_df[app_name].values.flatten()
# Neural nets do extra padding sometimes, to fit, so get rid of extra predictions
app_series_values = app_series_values[: len(gt_overall[app_name])]
# print (len(gt_overall[app_name]),len(app_series_values))
pred[app_name] = pd.Series(app_series_values, index=gt_overall.index)
pred_overall = pd.DataFrame(pred, dtype="float32")
# gt[i] = pd.DataFrame({k:v.squeeze() for k,v in iteritems(gt[i]) if len(v)}, index=next(iter(gt[i].values())).index).dropna()
# If everything can fit in memory
# gt_overall = pd.concat(gt)
# gt_overall.index = gt_overall.index.droplevel()
# #pred_overall = pd.concat(pred)
# pred_overall.index = pred_overall.index.droplevel()
# Having the same order of columns
# gt_overall = gt_overall[pred_overall.columns]
# #Intersection of index
# gt_index_utc = gt_overall.index.tz_convert("UTC")
# pred_index_utc = pred_overall.index.tz_convert("UTC")
# common_index_utc = gt_index_utc.intersection(pred_index_utc)
# common_index_local = common_index_utc.tz_convert(timezone)
# gt_overall = gt_overall.loc[common_index_local]
# pred_overall = pred_overall.loc[common_index_local]
# appliance_labels = [m for m in gt_overall.columns.values]
# gt_overall.columns = appliance_labels
# pred_overall.columns = appliance_labels
return gt_overall, pred_overall
# metrics
def compute_mae(self, gt, pred):
"""
Computes the Mean Absolute Error between Ground truth and Prediction
"""
mae = {}
for appliance in gt.columns:
mae[appliance] = mean_absolute_error(gt[appliance], pred[appliance])
return pd.Series(mae)
def compute_rmse(self, gt, pred):
"""
Computes the Root Mean Squared Error between Ground truth and Prediction
"""
rms_error = {}
for appliance in gt.columns:
rms_error[appliance] = np.sqrt(
mean_squared_error(gt[appliance], pred[appliance])
)
# print (gt['sockets'])
# print (pred[])
return pd.Series(rms_error)
def compute_f1_score(self, gt, pred):
"""
Computes the F1 Score between Ground truth and Prediction
"""
f1 = {}
gttemp = {}
predtemp = {}
for appliance in gt.columns:
gttemp[appliance] = np.array(gt[appliance])
gttemp[appliance] = np.where(gttemp[appliance] < 10, 0, 1)
predtemp[appliance] = np.array(pred[appliance])
predtemp[appliance] = np.where(predtemp[appliance] < 10, 0, 1)
f1[appliance] = f1_score(gttemp[appliance], predtemp[appliance])
return pd.Series(f1)
def compute_rel_error(self, gt, pred):
"""
Computes the Relative Error between Ground truth and Prediction
"""
rel_error = {}
for appliance in gt.columns:
rel_error[appliance] = np.sum(
np.sum(abs(gt[appliance] - pred[appliance])) / len(gt[appliance])
)
return pd.Series(rel_error)
| [
"pandas.Series",
"sklearn.metrics.f1_score",
"numpy.where",
"matplotlib.pyplot.plot",
"sklearn.metrics.mean_squared_error",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.zeros",
"pandas.DataFrame",
"matplotlib.pyplot.title",
"pandas.concat",
"matplotlib.pyplot.legend",
"sklearn.metrics.m... | [((1364, 1378), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1376, 1378), True, 'import pandas as pd\n'), ((1438, 1452), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1450, 1452), True, 'import pandas as pd\n'), ((1596, 1610), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1608, 1610), True, 'import pandas as pd\n'), ((1631, 1645), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1643, 1645), True, 'import pandas as pd\n'), ((13764, 13778), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (13776, 13778), True, 'import pandas as pd\n'), ((21613, 21641), 'pandas.concat', 'pd.concat', (['pred_list'], {'axis': '(0)'}), '(pred_list, axis=0)\n', (21622, 21641), True, 'import pandas as pd\n'), ((21913, 21946), 'pandas.DataFrame', 'pd.DataFrame', (['gt'], {'dtype': '"""float32"""'}), "(gt, dtype='float32')\n", (21925, 21946), True, 'import pandas as pd\n'), ((22444, 22479), 'pandas.DataFrame', 'pd.DataFrame', (['pred'], {'dtype': '"""float32"""'}), "(pred, dtype='float32')\n", (22456, 22479), True, 'import pandas as pd\n'), ((23888, 23902), 'pandas.Series', 'pd.Series', (['mae'], {}), '(mae)\n', (23897, 23902), True, 'import pandas as pd\n'), ((24304, 24324), 'pandas.Series', 'pd.Series', (['rms_error'], {}), '(rms_error)\n', (24313, 24324), True, 'import pandas as pd\n'), ((24907, 24920), 'pandas.Series', 'pd.Series', (['f1'], {}), '(f1)\n', (24916, 24920), True, 'import pandas as pd\n'), ((25276, 25296), 'pandas.Series', 'pd.Series', (['rel_error'], {}), '(rel_error)\n', (25285, 25296), True, 'import pandas as pd\n'), ((13844, 13858), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (13856, 13858), True, 'import pandas as pd\n'), ((18899, 18911), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (18909, 18911), True, 'import matplotlib.pyplot as plt\n'), ((18924, 18962), 'matplotlib.pyplot.plot', 'plt.plot', (['gt_overall[i]'], {'label': '"""truth"""'}), "(gt_overall[i], label='truth')\n", (18932, 18962), True, 'import matplotlib.pyplot as plt\n'), ((19070, 19082), 'matplotlib.pyplot.title', 'plt.title', (['i'], {}), '(i)\n', (19079, 19082), True, 'import matplotlib.pyplot as plt\n'), ((19095, 19107), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (19105, 19107), True, 'import matplotlib.pyplot as plt\n'), ((21736, 21759), 'pandas.concat', 'pd.concat', (['data'], {'axis': '(1)'}), '(data, axis=1)\n', (21745, 21759), True, 'import pandas as pd\n'), ((22367, 22419), 'pandas.Series', 'pd.Series', (['app_series_values'], {'index': 'gt_overall.index'}), '(app_series_values, index=gt_overall.index)\n', (22376, 22419), True, 'import pandas as pd\n'), ((23821, 23872), 'sklearn.metrics.mean_absolute_error', 'mean_absolute_error', (['gt[appliance]', 'pred[appliance]'], {}), '(gt[appliance], pred[appliance])\n', (23840, 23872), False, 'from sklearn.metrics import mean_squared_error, mean_absolute_error, f1_score\n'), ((24585, 24608), 'numpy.array', 'np.array', (['gt[appliance]'], {}), '(gt[appliance])\n', (24593, 24608), True, 'import numpy as np\n'), ((24641, 24679), 'numpy.where', 'np.where', (['(gttemp[appliance] < 10)', '(0)', '(1)'], {}), '(gttemp[appliance] < 10, 0, 1)\n', (24649, 24679), True, 'import numpy as np\n'), ((24714, 24739), 'numpy.array', 'np.array', (['pred[appliance]'], {}), '(pred[appliance])\n', (24722, 24739), True, 'import numpy as np\n'), ((24774, 24814), 'numpy.where', 'np.where', (['(predtemp[appliance] < 10)', '(0)', '(1)'], {}), '(predtemp[appliance] < 10, 0, 1)\n', (24782, 24814), True, 'import numpy as np\n'), ((24843, 24891), 'sklearn.metrics.f1_score', 'f1_score', (['gttemp[appliance]', 'predtemp[appliance]'], {}), '(gttemp[appliance], predtemp[appliance])\n', (24851, 24891), False, 'from sklearn.metrics import mean_squared_error, mean_absolute_error, f1_score\n'), ((19016, 19057), 'matplotlib.pyplot.plot', 'plt.plot', (['pred_overall[clf][i]'], {'label': 'clf'}), '(pred_overall[clf][i], label=clf)\n', (19024, 19057), True, 'import matplotlib.pyplot as plt\n'), ((19548, 19570), 'pandas.DataFrame', 'pd.DataFrame', (['f1_score'], {}), '(f1_score)\n', (19560, 19570), True, 'import pandas as pd\n'), ((24167, 24217), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['gt[appliance]', 'pred[appliance]'], {}), '(gt[appliance], pred[appliance])\n', (24185, 24217), False, 'from sklearn.metrics import mean_squared_error, mean_absolute_error, f1_score\n'), ((19941, 19959), 'pandas.DataFrame', 'pd.DataFrame', (['rmse'], {}), '(rmse)\n', (19953, 19959), True, 'import pandas as pd\n'), ((20308, 20325), 'pandas.DataFrame', 'pd.DataFrame', (['mae'], {}), '(mae)\n', (20320, 20325), True, 'import pandas as pd\n'), ((10756, 10793), 'numpy.zeros', 'np.zeros', (['appliance_readings[0].shape'], {}), '(appliance_readings[0].shape)\n', (10764, 10793), True, 'import numpy as np\n'), ((20747, 20770), 'pandas.DataFrame', 'pd.DataFrame', (['rel_error'], {}), '(rel_error)\n', (20759, 20770), True, 'import pandas as pd\n'), ((10290, 10304), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (10302, 10304), True, 'import pandas as pd\n'), ((7325, 7362), 'numpy.zeros', 'np.zeros', (['appliance_readings[0].shape'], {}), '(appliance_readings[0].shape)\n', (7333, 7362), True, 'import numpy as np\n')] |
"""Computation of graph embeddings and graph kernels.
Author : <NAME>, <NAME>
"""
import numpy as np
import networkx as nx
from gk_weisfeiler_lehman import GK_WL
from gk_shortest_path import GK_SP
def DCE_embedding(X, th=0.0):
"""
Direct connection label embedding.
"""
return np.where(X > th, X, 0.0)
def DR_embedding(X, th=0.0, K=1):
"""Dissimilarity representation based embedding.
From: <NAME>.; <NAME>.; <NAME>.; <NAME>.:
Vector Space Embedding of Undirected Graphs with Fixed-cardinality
Vertex Sequences for Classification, Proceddings of 20th
International Conference on Pattern Recognition (ICPR),
pp.902,905, 23-26, Aug. 2010.
Parameters:
----------
X: ndarray of dimensions (n, d), where n is the number of samples
and d is the number lenght of the vector obtained after
unfolding the upper triangular matrix of the adjancency matrix
of each graph. Dataset.
th: float
Threshold to be applied in the edge weights. Edges with weights
below the given threshold are removed.
K: int
A value to be used when
Return:
------
X: ndarray
Dataset embedded into a vector space
"""
# Application of threshold and changing from similarities to
# dissimilarities the weight edges values.
X = np.where(X > th, np.max(X) - X, 0.0)
XX = np.zeros((X.shape[0], X.shape[0]))
for t, v in enumerate(X):
for q, u in enumerate(X):
aux = 0
for i in range(len(v)):
if v[i] == 0 or u[i] == 0:
aux += K
else:
aux += np.abs(u[i] - v[i])
XX[t, q] = aux
return XX
def WL_K_embedding(X, th=0.):
"""Computation of Weisfeiler-Lehman graph kernel. The kernel matrix is
used as an embedding.
Parameters:
----------
X: ndarray of dimensions (n, d), where n is the number of samples
and d is the lenght of the vector obtained after unfolding the
upper triangular matrix of the adjancency matrix of each graph.
Dataset.
th: float
Threshold to be applied in the edge weights. Edges with weights
below the given threshold are removed.
Return:
------
X: ndarray
Dataset embedded into a vector space
"""
dim = int(np.sqrt(X.shape[1]*2)+1)
graphs = []
for t, v in enumerate(X):
# Compute adjacency matrix
mat = np.zeros((dim, dim))
cont = 0
for i in range(dim-1):
for j in range(i+1, dim):
mat[i, j] = v[cont]
mat[j, i] = v[cont]
cont += 1
# Applying the threshold and keeping binary edges
adj_mat = np.where(mat > th, 1.0, 0)
g = nx.from_numpy_matrix(adj_mat)
graphs.append(g)
gk_wl = GK_WL()
XX = gk_wl.compare_list(graphs, node_label=False)
return XX
def SP_K_embedding(X, th=0.):
"""Computation of Shortest_Path graph kernel. The kernel matrix is
used as an embedding.
Parameters:
----------
X: ndarray of dimensions (n, d), where n is the number of samples
and d is the lenght of the vector obtained after unfolding the
upper triangular matrix of the adjancency matrix of each graph.
Dataset.
th: float
Threshold to be applied in the edge weights. Edges with weights
below the given threshold are removed.
Return:
------
X: ndarray
Dataset embedded into a vector space
"""
dim = int(np.sqrt(X.shape[1] * 2) + 1)
graphs = []
for t, v in enumerate(X):
# Compute adjacency matrix
mat = np.zeros((dim, dim))
cont = 0
for i in range(dim-1):
for j in range(i+1, dim):
mat[i, j] = v[cont]
mat[j, i] = v[cont]
cont += 1
# Applying the threshold and keeping binary edges
adj_mat = np.where(mat > th, 1.0, 0)
g = nx.from_numpy_matrix(adj_mat)
graphs.append(g)
gk_sp = GK_SP()
XX = gk_sp.compare_list(graphs)
return XX
| [
"numpy.abs",
"gk_shortest_path.GK_SP",
"numpy.sqrt",
"numpy.where",
"numpy.max",
"numpy.zeros",
"networkx.from_numpy_matrix",
"gk_weisfeiler_lehman.GK_WL"
] | [((297, 321), 'numpy.where', 'np.where', (['(X > th)', 'X', '(0.0)'], {}), '(X > th, X, 0.0)\n', (305, 321), True, 'import numpy as np\n'), ((1382, 1416), 'numpy.zeros', 'np.zeros', (['(X.shape[0], X.shape[0])'], {}), '((X.shape[0], X.shape[0]))\n', (1390, 1416), True, 'import numpy as np\n'), ((2861, 2868), 'gk_weisfeiler_lehman.GK_WL', 'GK_WL', ([], {}), '()\n', (2866, 2868), False, 'from gk_weisfeiler_lehman import GK_WL\n'), ((4076, 4083), 'gk_shortest_path.GK_SP', 'GK_SP', ([], {}), '()\n', (4081, 4083), False, 'from gk_shortest_path import GK_SP\n'), ((2472, 2492), 'numpy.zeros', 'np.zeros', (['(dim, dim)'], {}), '((dim, dim))\n', (2480, 2492), True, 'import numpy as np\n'), ((2754, 2780), 'numpy.where', 'np.where', (['(mat > th)', '(1.0)', '(0)'], {}), '(mat > th, 1.0, 0)\n', (2762, 2780), True, 'import numpy as np\n'), ((2793, 2822), 'networkx.from_numpy_matrix', 'nx.from_numpy_matrix', (['adj_mat'], {}), '(adj_mat)\n', (2813, 2822), True, 'import networkx as nx\n'), ((3687, 3707), 'numpy.zeros', 'np.zeros', (['(dim, dim)'], {}), '((dim, dim))\n', (3695, 3707), True, 'import numpy as np\n'), ((3969, 3995), 'numpy.where', 'np.where', (['(mat > th)', '(1.0)', '(0)'], {}), '(mat > th, 1.0, 0)\n', (3977, 3995), True, 'import numpy as np\n'), ((4008, 4037), 'networkx.from_numpy_matrix', 'nx.from_numpy_matrix', (['adj_mat'], {}), '(adj_mat)\n', (4028, 4037), True, 'import networkx as nx\n'), ((1353, 1362), 'numpy.max', 'np.max', (['X'], {}), '(X)\n', (1359, 1362), True, 'import numpy as np\n'), ((2352, 2375), 'numpy.sqrt', 'np.sqrt', (['(X.shape[1] * 2)'], {}), '(X.shape[1] * 2)\n', (2359, 2375), True, 'import numpy as np\n'), ((3563, 3586), 'numpy.sqrt', 'np.sqrt', (['(X.shape[1] * 2)'], {}), '(X.shape[1] * 2)\n', (3570, 3586), True, 'import numpy as np\n'), ((1658, 1677), 'numpy.abs', 'np.abs', (['(u[i] - v[i])'], {}), '(u[i] - v[i])\n', (1664, 1677), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# file: nms.py
# author: JinTian
# time: 2018/6/11 8:42 PM
# Copyright 2018 JinTian. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
"""
A quite normal work of nms
"""
import numpy as np
from .config import global_config
class NMS(object):
def __init__(self):
pass
@staticmethod
def _iou(box1, box2):
b1_x0, b1_y0, b1_x1, b1_y1 = box1
b2_x0, b2_y0, b2_x1, b2_y1 = box2
int_x0 = max(b1_x0, b2_x0)
int_y0 = max(b1_y0, b2_y0)
int_x1 = min(b1_x1, b2_x1)
int_y1 = min(b1_y1, b2_y1)
int_area = (int_x1 - int_x0) * (int_y1 - int_y0)
b1_area = (b1_x1 - b1_x0) * (b1_y1 - b1_y0)
b2_area = (b2_x1 - b2_x0) * (b2_y1 - b2_y0)
iou = int_area / (b1_area + b2_area - int_area + 1e-05)
return iou
def nms(self, predictions_with_boxes):
"""
do nms
:param predictions_with_boxes:
:param confidence_threshold:
:param iou_threshold:
:return:
"""
conf_mask = np.expand_dims((predictions_with_boxes[:, :, 4] > global_config.nms_cf_threshold), -1)
predictions = predictions_with_boxes * conf_mask
result = {}
for i, image_pred in enumerate(predictions):
shape = image_pred.shape
non_zero_idxs = np.nonzero(image_pred)
image_pred = image_pred[non_zero_idxs]
image_pred = image_pred.reshape(-1, shape[-1])
bbox_attrs = image_pred[:, :5]
classes = image_pred[:, 5:]
classes = np.argmax(classes, axis=-1)
unique_classes = list(set(classes.reshape(-1)))
for cls in unique_classes:
cls_mask = classes == cls
cls_boxes = bbox_attrs[np.nonzero(cls_mask)]
cls_boxes = cls_boxes[cls_boxes[:, -1].argsort()[::-1]]
cls_scores = cls_boxes[:, -1]
cls_boxes = cls_boxes[:, :-1]
while len(cls_boxes) > 0:
box = cls_boxes[0]
score = cls_scores[0]
if not cls in result:
result[cls] = []
result[cls].append((box, score))
cls_boxes = cls_boxes[1:]
ious = np.array([self._iou(box, x) for x in cls_boxes])
iou_mask = ious < global_config.nms_iou_threshold
cls_boxes = cls_boxes[np.nonzero(iou_mask)]
cls_scores = cls_scores[np.nonzero(iou_mask)]
return result
nms = NMS()
| [
"numpy.nonzero",
"numpy.argmax",
"numpy.expand_dims"
] | [((1641, 1730), 'numpy.expand_dims', 'np.expand_dims', (['(predictions_with_boxes[:, :, 4] > global_config.nms_cf_threshold)', '(-1)'], {}), '(predictions_with_boxes[:, :, 4] > global_config.\n nms_cf_threshold, -1)\n', (1655, 1730), True, 'import numpy as np\n'), ((1924, 1946), 'numpy.nonzero', 'np.nonzero', (['image_pred'], {}), '(image_pred)\n', (1934, 1946), True, 'import numpy as np\n'), ((2163, 2190), 'numpy.argmax', 'np.argmax', (['classes'], {'axis': '(-1)'}), '(classes, axis=-1)\n', (2172, 2190), True, 'import numpy as np\n'), ((2373, 2393), 'numpy.nonzero', 'np.nonzero', (['cls_mask'], {}), '(cls_mask)\n', (2383, 2393), True, 'import numpy as np\n'), ((3053, 3073), 'numpy.nonzero', 'np.nonzero', (['iou_mask'], {}), '(iou_mask)\n', (3063, 3073), True, 'import numpy as np\n'), ((3119, 3139), 'numpy.nonzero', 'np.nonzero', (['iou_mask'], {}), '(iou_mask)\n', (3129, 3139), True, 'import numpy as np\n')] |
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.svm import SVR
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os.path as osp
import gc
def get_model(x, y):
regr = SVR()
pipe = Pipeline(steps=[('reg', regr)])
param_grid = {
'reg__kernel':('linear', 'rbf'),
'reg__C': [0.01, 0.1, 1, 10],
'reg__epsilon': [0.1, 0.2, 0.4, 0.5, 0.8, 1., 1.5, 2, 3],
'reg__gamma': ['auto', 'scale'],
}
search = GridSearchCV(pipe, param_grid, iid=False, cv=5,
return_train_score=False, n_jobs = 4)
search.fit(x, y)
return search.best_estimator_
def read(file_name):
data = pd.read_csv(file_name, sep = '\t')
x = np.array([[float(year)] for year in list(data)])
y = np.array([[year] for year in np.array(data).reshape(-1)]).reshape(-1, )
return x, y
if __name__ == '__main__':
data_root = '../data/machine_learning'
file_names = ['black_african_american.tsv', 'female.tsv', 'hispanic_latino.tsv', 'male.tsv', 'under_18_years.tsv', 'white.tsv']
names = ['black african american', 'female ', 'hispanic latino', 'male', 'under 18 years', 'white']
query = np.array([[2018], [2019], [2020]]).reshape(-1, )
for fn, n in zip(file_names, names):
x, y = read(osp.join(data_root, fn))
#predict(x, y, np.array([[2018], [2020]]))
model = get_model(x, y)
y_model = model.predict(x)
#y_query = model.predict(query)
fig =plt.figure()
plt.title(n)
plt.scatter(x, y, color='green')
#plt.scatter(query, y_query, color='black')
plt.plot(x, y_model, color='blue', linewidth=2)
plt.savefig(fn.split('.')[0] + '.jpg')
# Clean RAM
fig.clf()
plt.close()
gc.collect() | [
"sklearn.model_selection.GridSearchCV",
"pandas.read_csv",
"matplotlib.pyplot.plot",
"os.path.join",
"matplotlib.pyplot.close",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.scatter",
"gc.collect",
"sklearn.pipeline.Pipeline",
"matplotlib.pyplot.title",
"sklearn.svm.SVR"
] | [((351, 356), 'sklearn.svm.SVR', 'SVR', ([], {}), '()\n', (354, 356), False, 'from sklearn.svm import SVR\n'), ((365, 396), 'sklearn.pipeline.Pipeline', 'Pipeline', ([], {'steps': "[('reg', regr)]"}), "(steps=[('reg', regr)])\n", (373, 396), False, 'from sklearn.pipeline import Pipeline\n'), ((588, 675), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['pipe', 'param_grid'], {'iid': '(False)', 'cv': '(5)', 'return_train_score': '(False)', 'n_jobs': '(4)'}), '(pipe, param_grid, iid=False, cv=5, return_train_score=False,\n n_jobs=4)\n', (600, 675), False, 'from sklearn.model_selection import GridSearchCV\n'), ((755, 787), 'pandas.read_csv', 'pd.read_csv', (['file_name'], {'sep': '"""\t"""'}), "(file_name, sep='\\t')\n", (766, 787), True, 'import pandas as pd\n'), ((1513, 1525), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1523, 1525), True, 'import matplotlib.pyplot as plt\n'), ((1528, 1540), 'matplotlib.pyplot.title', 'plt.title', (['n'], {}), '(n)\n', (1537, 1540), True, 'import matplotlib.pyplot as plt\n'), ((1543, 1575), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {'color': '"""green"""'}), "(x, y, color='green')\n", (1554, 1575), True, 'import matplotlib.pyplot as plt\n'), ((1626, 1673), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y_model'], {'color': '"""blue"""', 'linewidth': '(2)'}), "(x, y_model, color='blue', linewidth=2)\n", (1634, 1673), True, 'import matplotlib.pyplot as plt\n'), ((1745, 1756), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1754, 1756), True, 'import matplotlib.pyplot as plt\n'), ((1759, 1771), 'gc.collect', 'gc.collect', ([], {}), '()\n', (1769, 1771), False, 'import gc\n'), ((1242, 1276), 'numpy.array', 'np.array', (['[[2018], [2019], [2020]]'], {}), '([[2018], [2019], [2020]])\n', (1250, 1276), True, 'import numpy as np\n'), ((1343, 1366), 'os.path.join', 'osp.join', (['data_root', 'fn'], {}), '(data_root, fn)\n', (1351, 1366), True, 'import os.path as osp\n'), ((878, 892), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (886, 892), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import matplotlib_venn as vplt
x = np.random.randint(2, size=(10, 3))
df = pd.DataFrame(x, columns=['A', 'B', 'C'])
print(df)
v = vplt.venn3(subsets=(1, 1, 1, 1, 1, 1, 1))
df = pd.DataFrame([[1, 1], [1, 0], [0, 1], [0, 0]], columns=['A', 'B'])
sets = [set(np.argwhere(v).ravel()) for k, v in df.items()]
venn3(sets, df.columns)
plt.show()
| [
"pandas.DataFrame",
"matplotlib_venn.venn3",
"numpy.random.randint",
"numpy.argwhere"
] | [((75, 109), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': '(10, 3)'}), '(2, size=(10, 3))\n', (92, 109), True, 'import numpy as np\n'), ((115, 155), 'pandas.DataFrame', 'pd.DataFrame', (['x'], {'columns': "['A', 'B', 'C']"}), "(x, columns=['A', 'B', 'C'])\n", (127, 155), True, 'import pandas as pd\n'), ((170, 211), 'matplotlib_venn.venn3', 'vplt.venn3', ([], {'subsets': '(1, 1, 1, 1, 1, 1, 1)'}), '(subsets=(1, 1, 1, 1, 1, 1, 1))\n', (180, 211), True, 'import matplotlib_venn as vplt\n'), ((218, 284), 'pandas.DataFrame', 'pd.DataFrame', (['[[1, 1], [1, 0], [0, 1], [0, 0]]'], {'columns': "['A', 'B']"}), "([[1, 1], [1, 0], [0, 1], [0, 0]], columns=['A', 'B'])\n", (230, 284), True, 'import pandas as pd\n'), ((298, 312), 'numpy.argwhere', 'np.argwhere', (['v'], {}), '(v)\n', (309, 312), True, 'import numpy as np\n')] |
import collections
import copy
import numpy
from typing import Dict, List
import warnings
from .constants import Messages
from .datatypes import Measurement
from .datatypes import TimeSeries
from .datatypes import Sensitivity
from .constants import Constants
SINGLE_ID = Constants.single_id
class OwnDict(collections.OrderedDict):
"""
Extendeds OrderedDict with `to_numpy()` method
"""
def to_numpy(self) -> numpy.ndarray:
return numpy.array(list((self.values())))
class Calculations():
@staticmethod
def cov_into_corr(Cov:numpy.ndarray) -> numpy.ndarray:
"""
Calculates correlation matrix from variance-covariance matrix.
Arguments
---------
Cov : numpy.ndarray
Variance-covariance matrix, must be sqaure and positive semi-definite.
Returns
-------
Corr : numpy.ndarray
Correlation matrix for Cov.
Raises
------
ValueError
Cov is not square.
"""
if Cov.shape[0] != Cov.shape[1]:
raise ValueError('Cov must be square')
Corr = numpy.zeros_like(Cov) * numpy.nan
for i in range(Cov.shape[0]):
for j in range(Cov.shape[0]):
Corr[i, j] = Cov[i, j] / (numpy.sqrt(Cov[i, i]) * numpy.sqrt(Cov[j, j]))
return Corr
class Helpers():
@staticmethod
def bounds_to_floats(bounds:List[tuple]) -> List[tuple]:
"""
Casts bounds from int to float.
"""
new_bounds = []
for _bounds in bounds:
lower, upper = _bounds
new_bounds.append((float(lower), float(upper)))
return new_bounds
@staticmethod
def has_unique_ids(values, report:bool=True) -> bool:
"""
Verifies that a list or dict has only (case-insensitive) unique items or keys, respectively.
Keyword arguments
-----------------
report : bool
To show the non-unique ids.
"""
success = True
if isinstance(values, set):
return success
if len(values) == 1:
return success
_values = copy.deepcopy(values)
if isinstance(_values, list):
_values.sort(key=str.lower)
values_str_lower = [_value.lower() for _value in _values]
if len(_values) > len(set(values_str_lower)):
success = False
elif isinstance(_values, (dict, OwnDict)):
_values = list(_values.keys())
values_str_lower = [_value.lower() for _value in _values]
if len(_values) > len(set(values_str_lower)):
success = False
else:
raise TypeError(f'Type {type(values)} cannot be handled.')
if not success and report:
print(f'Bad, non-unique (case-insensitive) ids: {_values}')
return success
@staticmethod
def all_measurements_have_errors(measurements:List[Measurement]) -> bool:
"""
Checks whether if Measurement objects have errors.
"""
with_errors = []
for measurement in measurements:
if measurement.errors is None:
with_errors.append(False)
else:
with_errors.append(True)
return all(with_errors)
@staticmethod
def get_unique_timepoints(time_series:List[TimeSeries]) -> numpy.ndarray:
"""
Creates a joint unique time vector from all timepoints of a list of TimeSeries objects.
Arguments
---------
time_series : List[TimeSeries]
The list of TimeSeries (and subclasses thereof) for which a joint time vector is wanted.
Returns
-------
t_all : numpy.ndarray
The joint vector of time points.
"""
_t = [
_timepoint
for _time_series in time_series
for _timepoint in _time_series.timepoints.flatten()
]
return numpy.unique(_t)
@staticmethod
def extract_time_series(time_series:List[TimeSeries], name:str, replicate_id:str, no_extraction_warning:bool=False) -> TimeSeries:
"""
Extract a specific TimeSeries object, identified by its properties `name` and `replicate_id`.
In case no match is found, None is returned.
Arguments
---------
time_series : List[TimeSeries]
The list from which the specific TimeSeries object shall be extracted.
name : str
The identifying `name` property.
replicate_id : str
The identifying `replicate_id` property.
Keyword arguments
-----------------
no_extraction_warning : bool
Whether to raise a warning when no TimeSeries object can be extracted.
Default is False
Returns
-------
extracted_time_series : TimeSeries or None
Raises
------
ValueError
Multiple TimeSeries objects have the same `name` and `replicate_id` property.
Warns
-----
UserWarning
No TimeSeries object match the criteria.
Only raised for `no_extraction_warning` set to True.
"""
_extracted_time_series = [
_time_series for _time_series in time_series
if _time_series.name == name and _time_series.replicate_id == replicate_id
]
if len(_extracted_time_series) > 1:
raise ValueError('List of (subclassed) TimeSeries objects is ambigous. Found multiple occurences ')
if len(_extracted_time_series) == 0:
extracted_time_series = None
if no_extraction_warning:
warnings.warn(f'Could not extract a TimeSeries object with replicate_id {replicate_id} and name {name}')
else:
extracted_time_series = _extracted_time_series[0]
return extracted_time_series
@staticmethod
def get_parameters_length(parameter_collections:Dict[str, numpy.ndarray]) -> int:
"""
Arguments
---------
parameter_collections : Dict[str, numpy.ndarray]
A set of parameters (model parameters, initial values, observation parameters).
Returns
-------
length : int
The number of values for each parameter
Raises
------
ValueError
Parameters have different number of estimated values.
"""
lengths = set([len(parameter_collections[_p]) for _p in parameter_collections])
if len(lengths) > 1:
raise ValueError('Parameters have different number of estimated values.')
length = list(lengths)[0]
return length
@staticmethod
def split_parameters_distributions(parameter_collections:Dict[str, numpy.ndarray]) -> List[Dict]:
"""
Arguments
---------
parameter_collections : Dict[str, numpy.ndarray]
A set of parameters (model parameters, initial values, observation parameters).
Returns
-------
splits : List[Dict]
A list of separate parameter dictonaries for each slice of the parameter collections.
"""
_length = Helpers.get_parameters_length(parameter_collections)
splits = [
{
_p : parameter_collections[_p][i]
for _p in parameter_collections
}
for i in range(_length)
]
return splits | [
"numpy.sqrt",
"numpy.unique",
"copy.deepcopy",
"warnings.warn",
"numpy.zeros_like"
] | [((2319, 2340), 'copy.deepcopy', 'copy.deepcopy', (['values'], {}), '(values)\n', (2332, 2340), False, 'import copy\n'), ((4227, 4243), 'numpy.unique', 'numpy.unique', (['_t'], {}), '(_t)\n', (4239, 4243), False, 'import numpy\n'), ((1213, 1234), 'numpy.zeros_like', 'numpy.zeros_like', (['Cov'], {}), '(Cov)\n', (1229, 1234), False, 'import numpy\n'), ((6078, 6192), 'warnings.warn', 'warnings.warn', (['f"""Could not extract a TimeSeries object with replicate_id {replicate_id} and name {name}"""'], {}), "(\n f'Could not extract a TimeSeries object with replicate_id {replicate_id} and name {name}'\n )\n", (6091, 6192), False, 'import warnings\n'), ((1372, 1393), 'numpy.sqrt', 'numpy.sqrt', (['Cov[i, i]'], {}), '(Cov[i, i])\n', (1382, 1393), False, 'import numpy\n'), ((1396, 1417), 'numpy.sqrt', 'numpy.sqrt', (['Cov[j, j]'], {}), '(Cov[j, j])\n', (1406, 1417), False, 'import numpy\n')] |
from runner.koan import *
from numpy import ones
class AboutOnes(Koan):
def test_ones(self):
numpy_ones = ones([[3,2],[3,4]])
self.assertEquals(__, numpy_ones.tolist())
def test_ones_from_int(self):
numpy_ones = ones(3)
self.assertEquals(__, numpy_ones.tolist())
def test_ones_specifying_type(self):
numpy_ones = ones([7,1], dtype=float)
self.assertEquals(__, numpy_ones.tolist())
def test_ones_from_tuple(self):
numpy_ones = ones((1,2))
self.assertEquals(__, numpy_ones.tolist()) | [
"numpy.ones"
] | [((120, 142), 'numpy.ones', 'ones', (['[[3, 2], [3, 4]]'], {}), '([[3, 2], [3, 4]])\n', (124, 142), False, 'from numpy import ones\n'), ((247, 254), 'numpy.ones', 'ones', (['(3)'], {}), '(3)\n', (251, 254), False, 'from numpy import ones\n'), ((369, 394), 'numpy.ones', 'ones', (['[7, 1]'], {'dtype': 'float'}), '([7, 1], dtype=float)\n', (373, 394), False, 'from numpy import ones\n'), ((503, 515), 'numpy.ones', 'ones', (['(1, 2)'], {}), '((1, 2))\n', (507, 515), False, 'from numpy import ones\n')] |
import os
import numpy as np
# set ratio of the labeled samples
numerator = 1
denominator = 8
labeled_ratio = numerator / denominator
# read the samples list
samples_list = 'VOCdevkit/VOC2012/ImageSets/Segmentation/train_aug.txt'
if not os.path.exists(samples_list):
print('The PascalVOC 2012 dataset is not prepared.\n'
'Please run \'sh prepare.sh\' to prepare it.')
with open(samples_list, 'r') as f:
samples = f.read().splitlines()
np.random.shuffle(samples)
# get the sublabeled list
labeled_num = int(len(samples) * labeled_ratio + 1)
labeled_list = samples[:labeled_num]
# create the output path and save the sublabeled list
out_path = 'sublabeled_prefix/{0}-{1}'.format(numerator, denominator)
if not os.path.exists(out_path):
os.makedirs(out_path)
out_file = os.path.join(out_path, '{0}.txt'.format(len(os.listdir(out_path))))
with open(out_file, 'w') as f:
for sample in labeled_list:
f.write(sample + '\n')
| [
"os.path.exists",
"os.listdir",
"os.makedirs",
"numpy.random.shuffle"
] | [((456, 482), 'numpy.random.shuffle', 'np.random.shuffle', (['samples'], {}), '(samples)\n', (473, 482), True, 'import numpy as np\n'), ((239, 267), 'os.path.exists', 'os.path.exists', (['samples_list'], {}), '(samples_list)\n', (253, 267), False, 'import os\n'), ((731, 755), 'os.path.exists', 'os.path.exists', (['out_path'], {}), '(out_path)\n', (745, 755), False, 'import os\n'), ((761, 782), 'os.makedirs', 'os.makedirs', (['out_path'], {}), '(out_path)\n', (772, 782), False, 'import os\n'), ((839, 859), 'os.listdir', 'os.listdir', (['out_path'], {}), '(out_path)\n', (849, 859), False, 'import os\n')] |
from sympy import *
import numpy as np
import cmath
import math
import matplotlib.pyplot as plt
import matplotlib.axes as ax
from mpl_toolkits.mplot3d import Axes3D
from scipy.integrate import ode
global dx
global tau
tau = 100
dx = 0.1
num = int(2 / dx) + 1
def ta(t):
return -math.log(3.54466 - t)
def xi(x, t):
return x / math.sqrt(4 * ta(t) * (3.54466 - t))
def u_exact(x, t):
return ta(t) - math.log(1 + xi(x, t)**2)
def MMPDE(u, x, dt):
num = int(u.shape[0])
A = np.zeros((num, num))
for i in range(num - 1):
A[i][i + 1] = -dt * 0.5 * (math.exp(u[i + 1]) + math.exp(u[i])) / (math.exp(u[i]) * tau * dx**2)
A[i + 1][i] = -dt * 0.5 * (math.exp(u[i]) + math.exp(u[i - 1])) / (math.exp(u[i]) * tau * dx**2)
for i in range(1, num - 1):
A[i][i] = 1 - A[i][i + 1] - A[i][i - 1]
A[0][0] = 1
A[num - 1][num - 1] = 1
A = np.mat(A)
x = np.mat(x)
U = A.I * x.T
U = np.array(U)
soln = np.zeros(num)
soln[0] = -1
soln[num - 1] = 1
for i in range(1, num - 1):
soln[i] = U[i][0]
return soln
def f(t, y):
n = int(y.shape[0])
U = np.zeros(n)
U[0] = 0
U[n - 1] = 0
for i in range(1, n - 1):
U[i] = (y[i + 1] - 2 * y[i] + y[i - 1]) / dx**2 + math.exp(y[i])
return U
def amm_f(t, y, x):
n = int(y.shape[0])
U = np.zeros(n)
U[0] = 0
U[n - 1] = 0
for i in range(1, n - 1):
U[i] = ( 0.5 * (math.exp(y[i + 1]) + math.exp(y[i])) * (x[i + 1] - x[i]) - 0.5 * (math.exp(y[i]) + math.exp(y[i - 1])) * (x[i] - x[i - 1])) / (math.exp(y[i]) * tau * dx**2) * (y[i + 1] - y[i - 1]) / (x[i + 1] - x[i - 1]) + 2 * ((y[i + 1] - y[i]) / (x[i + 1] - x[i]) - (y[i] - y[i - 1]) / (x[i] - x[i - 1])) / (x[i + 1] - x[i - 1]) + math.exp(y[i])
return U
if __name__ == "__main__":
t_0 = 0
e1 = 0
e2 = 0
X = np.linspace(-1, 1, num)
mesh = X
y_0 = np.zeros(num)
y_1 = np.zeros(num)
y_2 = np.zeros(num)
r = ode(f).set_integrator('zvode', method='bdf')
r.set_initial_value(y_1, t_0)
#s = ode(f).set_integrator('zvode', method='bdf')
#s.set_initial_value(y_2, t_0)
t_1 = 3.54466
dt = 0.0001
while r.successful() and r.t <= t_1:
r.integrate(r.t + dt)
print(r.t)
#s.integrate(s.t + dt)
#mesh = MMPDE(s.y, mesh, dt)
Y = r.y
T = r.t
r = ode(f).set_integrator('zvode', method='bdf')
r.set_initial_value(Y, T).set_f_params(mesh)
if abs(r.t - 3.5) < 1e-6:
for i in range(1, num - 1):
y_0[i] = u_exact(-1 + i * dx, r.t)
for i in range(int((num - 1) / 2), int((num - 1) * 3 / 4)):
e1 += (y_0[i] - r.y[i])**2
e1 = e1 * 4 / int(num - 1)
print(math.sqrt(e1))
#for i in range(1, num - 1):
# y_0[i] = u_exact(mesh[i], s.t)
#for i in range(int((num - 1) / 2), int((num - 1) * 3 / 4)):
# e2 += (y_0[i] - s.y[i])**2
#e2 = e2 * 4 / int(num - 1)
#print(math.sqrt(e2))
break
| [
"numpy.mat",
"scipy.integrate.ode",
"math.sqrt",
"math.log",
"numpy.array",
"numpy.zeros",
"numpy.linspace",
"math.exp"
] | [((494, 514), 'numpy.zeros', 'np.zeros', (['(num, num)'], {}), '((num, num))\n', (502, 514), True, 'import numpy as np\n'), ((886, 895), 'numpy.mat', 'np.mat', (['A'], {}), '(A)\n', (892, 895), True, 'import numpy as np\n'), ((904, 913), 'numpy.mat', 'np.mat', (['x'], {}), '(x)\n', (910, 913), True, 'import numpy as np\n'), ((940, 951), 'numpy.array', 'np.array', (['U'], {}), '(U)\n', (948, 951), True, 'import numpy as np\n'), ((963, 976), 'numpy.zeros', 'np.zeros', (['num'], {}), '(num)\n', (971, 976), True, 'import numpy as np\n'), ((1136, 1147), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (1144, 1147), True, 'import numpy as np\n'), ((1347, 1358), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (1355, 1358), True, 'import numpy as np\n'), ((1859, 1882), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', 'num'], {}), '(-1, 1, num)\n', (1870, 1882), True, 'import numpy as np\n'), ((1906, 1919), 'numpy.zeros', 'np.zeros', (['num'], {}), '(num)\n', (1914, 1919), True, 'import numpy as np\n'), ((1930, 1943), 'numpy.zeros', 'np.zeros', (['num'], {}), '(num)\n', (1938, 1943), True, 'import numpy as np\n'), ((1954, 1967), 'numpy.zeros', 'np.zeros', (['num'], {}), '(num)\n', (1962, 1967), True, 'import numpy as np\n'), ((284, 305), 'math.log', 'math.log', (['(3.54466 - t)'], {}), '(3.54466 - t)\n', (292, 305), False, 'import math\n'), ((1266, 1280), 'math.exp', 'math.exp', (['y[i]'], {}), '(y[i])\n', (1274, 1280), False, 'import math\n'), ((1760, 1774), 'math.exp', 'math.exp', (['y[i]'], {}), '(y[i])\n', (1768, 1774), False, 'import math\n'), ((1976, 1982), 'scipy.integrate.ode', 'ode', (['f'], {}), '(f)\n', (1979, 1982), False, 'from scipy.integrate import ode\n'), ((2380, 2386), 'scipy.integrate.ode', 'ode', (['f'], {}), '(f)\n', (2383, 2386), False, 'from scipy.integrate import ode\n'), ((2775, 2788), 'math.sqrt', 'math.sqrt', (['e1'], {}), '(e1)\n', (2784, 2788), False, 'import math\n'), ((579, 597), 'math.exp', 'math.exp', (['u[i + 1]'], {}), '(u[i + 1])\n', (587, 597), False, 'import math\n'), ((600, 614), 'math.exp', 'math.exp', (['u[i]'], {}), '(u[i])\n', (608, 614), False, 'import math\n'), ((619, 633), 'math.exp', 'math.exp', (['u[i]'], {}), '(u[i])\n', (627, 633), False, 'import math\n'), ((684, 698), 'math.exp', 'math.exp', (['u[i]'], {}), '(u[i])\n', (692, 698), False, 'import math\n'), ((701, 719), 'math.exp', 'math.exp', (['u[i - 1]'], {}), '(u[i - 1])\n', (709, 719), False, 'import math\n'), ((724, 738), 'math.exp', 'math.exp', (['u[i]'], {}), '(u[i])\n', (732, 738), False, 'import math\n'), ((1570, 1584), 'math.exp', 'math.exp', (['y[i]'], {}), '(y[i])\n', (1578, 1584), False, 'import math\n'), ((1443, 1461), 'math.exp', 'math.exp', (['y[i + 1]'], {}), '(y[i + 1])\n', (1451, 1461), False, 'import math\n'), ((1464, 1478), 'math.exp', 'math.exp', (['y[i]'], {}), '(y[i])\n', (1472, 1478), False, 'import math\n'), ((1509, 1523), 'math.exp', 'math.exp', (['y[i]'], {}), '(y[i])\n', (1517, 1523), False, 'import math\n'), ((1526, 1544), 'math.exp', 'math.exp', (['y[i - 1]'], {}), '(y[i - 1])\n', (1534, 1544), False, 'import math\n')] |
import numpy as np
from keras import backend as K
from keras.models import Model
from keras.losses import binary_crossentropy
from keras.layers import Input, Dense, Dropout
from keras.regularizers import l2
from sklearn.model_selection import train_test_split
class DropoutVAE:
def __init__(self, original_dim, input_shape,
intermediate_dim=32, latent_dim=3, dropout=0.05,
summary=False):
self._build_model(original_dim, input_shape,
intermediate_dim,
latent_dim, summary,
dropout)
def _build_model(self, original_dim, input_shape, intermediate_dim, latent_dim,
summary=False, dropout=0.05):
inputs = Input(shape=input_shape, name='encoder_input')
x = inputs
x = Dense(intermediate_dim, activation='relu')(x)
x = Dense(intermediate_dim//2, activation='relu')(x)
z_mean = Dense(latent_dim, name='z_mean')(x)
z_log_var = Dense(latent_dim, name='z_log_var')(x)
# We remove the z layer ( z layer is used in VAE but not here)
self.encoder = Model(inputs, [z_mean, z_log_var],
name='encoder')
latent_inputs = Input(shape=(latent_dim,),
name='z_sampling')
x = latent_inputs
x = Dense(intermediate_dim//2, activation='relu',
kernel_regularizer=l2(1e-4),
bias_regularizer=l2(1e-4))(x)
x = Dropout(dropout)(x)
x = Dense(intermediate_dim, activation='relu',
kernel_regularizer=l2(1e-4),
bias_regularizer=l2(1e-4))(x)
x = Dropout(dropout)(x)
outputs = Dense(original_dim, activation='sigmoid',
kernel_regularizer=l2(1e-4),
bias_regularizer=l2(1e-4))(x)
self.decoder = Model(latent_inputs,
outputs,
name='decoder')
# Here we take the mean (not the z-layer)
outputs = self.decoder(self.encoder(inputs)[0])
self.vae = Model(inputs, outputs,
name='vae_mlp')
reconstruction_loss = binary_crossentropy(inputs, outputs)
reconstruction_loss *= original_dim
kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
kl_loss = K.sum(kl_loss, axis=-1)
kl_loss *= -0.5
vae_loss = K.mean(reconstruction_loss + kl_loss)
self.vae.add_loss(vae_loss)
self.vae.compile(optimizer='adam')
if summary:
print(self.vae.summary())
def fit(self, x_train, x_test, epochs=100, batch_size=100,
verbose=1):
self.vae.fit(x_train,
shuffle=True,
epochs=epochs,
batch_size=batch_size,
verbose=verbose,
validation_data=(x_test, None))
def fit_unsplit(self, X, epochs=100, batch_size=100, verbose=1):
x_train, x_test = train_test_split(X, test_size = 0.5)
self.fit(x_train, x_test, epochs, batch_size, verbose)
def encoder_predict(self, x_test, batch_size=100):
return self.encoder.predict(x_test,
batch_size=batch_size)
def generate(self, latent_val, batch_size=100):
return self.decoder.predict(latent_val)
def predict(self, x_test, batch_size=1, nums=1000):
Yt_hat = []
for _ in range(nums):
Yt_hat.extend(self.vae.predict(x_test))
return np.asarray(Yt_hat)
def mean_predict(self, x_test, batch_size=1, nums=1000):
predict_stochastic = K.function([self.decoder.layers[0].input,
K.learning_phase()],
[self.decoder.get_output_at(0)])
latents = self.encoder.predict(x_test)[0]
Yt_hat = []
for _ in range(nums):
Yt_hat.append(predict_stochastic([latents, 1]))
return np.asarray(Yt_hat) | [
"keras.losses.binary_crossentropy",
"keras.backend.sum",
"keras.backend.exp",
"sklearn.model_selection.train_test_split",
"keras.backend.mean",
"keras.backend.square",
"keras.backend.learning_phase",
"numpy.asarray",
"keras.layers.Input",
"keras.models.Model",
"keras.regularizers.l2",
"keras.l... | [((771, 817), 'keras.layers.Input', 'Input', ([], {'shape': 'input_shape', 'name': '"""encoder_input"""'}), "(shape=input_shape, name='encoder_input')\n", (776, 817), False, 'from keras.layers import Input, Dense, Dropout\n'), ((1180, 1230), 'keras.models.Model', 'Model', (['inputs', '[z_mean, z_log_var]'], {'name': '"""encoder"""'}), "(inputs, [z_mean, z_log_var], name='encoder')\n", (1185, 1230), False, 'from keras.models import Model\n'), ((1289, 1334), 'keras.layers.Input', 'Input', ([], {'shape': '(latent_dim,)', 'name': '"""z_sampling"""'}), "(shape=(latent_dim,), name='z_sampling')\n", (1294, 1334), False, 'from keras.layers import Input, Dense, Dropout\n'), ((1944, 1989), 'keras.models.Model', 'Model', (['latent_inputs', 'outputs'], {'name': '"""decoder"""'}), "(latent_inputs, outputs, name='decoder')\n", (1949, 1989), False, 'from keras.models import Model\n'), ((2185, 2223), 'keras.models.Model', 'Model', (['inputs', 'outputs'], {'name': '"""vae_mlp"""'}), "(inputs, outputs, name='vae_mlp')\n", (2190, 2223), False, 'from keras.models import Model\n'), ((2289, 2325), 'keras.losses.binary_crossentropy', 'binary_crossentropy', (['inputs', 'outputs'], {}), '(inputs, outputs)\n', (2308, 2325), False, 'from keras.losses import binary_crossentropy\n'), ((2458, 2481), 'keras.backend.sum', 'K.sum', (['kl_loss'], {'axis': '(-1)'}), '(kl_loss, axis=-1)\n', (2463, 2481), True, 'from keras import backend as K\n'), ((2534, 2571), 'keras.backend.mean', 'K.mean', (['(reconstruction_loss + kl_loss)'], {}), '(reconstruction_loss + kl_loss)\n', (2540, 2571), True, 'from keras import backend as K\n'), ((3103, 3137), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X'], {'test_size': '(0.5)'}), '(X, test_size=0.5)\n', (3119, 3137), False, 'from sklearn.model_selection import train_test_split\n'), ((3675, 3693), 'numpy.asarray', 'np.asarray', (['Yt_hat'], {}), '(Yt_hat)\n', (3685, 3693), True, 'import numpy as np\n'), ((4147, 4165), 'numpy.asarray', 'np.asarray', (['Yt_hat'], {}), '(Yt_hat)\n', (4157, 4165), True, 'import numpy as np\n'), ((849, 891), 'keras.layers.Dense', 'Dense', (['intermediate_dim'], {'activation': '"""relu"""'}), "(intermediate_dim, activation='relu')\n", (854, 891), False, 'from keras.layers import Input, Dense, Dropout\n'), ((907, 954), 'keras.layers.Dense', 'Dense', (['(intermediate_dim // 2)'], {'activation': '"""relu"""'}), "(intermediate_dim // 2, activation='relu')\n", (912, 954), False, 'from keras.layers import Input, Dense, Dropout\n'), ((982, 1014), 'keras.layers.Dense', 'Dense', (['latent_dim'], {'name': '"""z_mean"""'}), "(latent_dim, name='z_mean')\n", (987, 1014), False, 'from keras.layers import Input, Dense, Dropout\n'), ((1038, 1073), 'keras.layers.Dense', 'Dense', (['latent_dim'], {'name': '"""z_log_var"""'}), "(latent_dim, name='z_log_var')\n", (1043, 1073), False, 'from keras.layers import Input, Dense, Dropout\n'), ((1555, 1571), 'keras.layers.Dropout', 'Dropout', (['dropout'], {}), '(dropout)\n', (1562, 1571), False, 'from keras.layers import Input, Dense, Dropout\n'), ((1735, 1751), 'keras.layers.Dropout', 'Dropout', (['dropout'], {}), '(dropout)\n', (1742, 1751), False, 'from keras.layers import Input, Dense, Dropout\n'), ((2423, 2439), 'keras.backend.exp', 'K.exp', (['z_log_var'], {}), '(z_log_var)\n', (2428, 2439), True, 'from keras import backend as K\n'), ((2404, 2420), 'keras.backend.square', 'K.square', (['z_mean'], {}), '(z_mean)\n', (2412, 2420), True, 'from keras import backend as K\n'), ((3885, 3903), 'keras.backend.learning_phase', 'K.learning_phase', ([], {}), '()\n', (3901, 3903), True, 'from keras import backend as K\n'), ((1486, 1496), 'keras.regularizers.l2', 'l2', (['(0.0001)'], {}), '(0.0001)\n', (1488, 1496), False, 'from keras.regularizers import l2\n'), ((1530, 1540), 'keras.regularizers.l2', 'l2', (['(0.0001)'], {}), '(0.0001)\n', (1532, 1540), False, 'from keras.regularizers import l2\n'), ((1666, 1676), 'keras.regularizers.l2', 'l2', (['(0.0001)'], {}), '(0.0001)\n', (1668, 1676), False, 'from keras.regularizers import l2\n'), ((1710, 1720), 'keras.regularizers.l2', 'l2', (['(0.0001)'], {}), '(0.0001)\n', (1712, 1720), False, 'from keras.regularizers import l2\n'), ((1857, 1867), 'keras.regularizers.l2', 'l2', (['(0.0001)'], {}), '(0.0001)\n', (1859, 1867), False, 'from keras.regularizers import l2\n'), ((1907, 1917), 'keras.regularizers.l2', 'l2', (['(0.0001)'], {}), '(0.0001)\n', (1909, 1917), False, 'from keras.regularizers import l2\n')] |
from sklearn import model_selection, preprocessing, metrics
import pandas, numpy, keras
from keras.preprocessing import text, sequence
from keras import layers, models, optimizers, initializers
# carrega o conjunto de dados
data = open('Dataset teste', encoding="utf8").read()
classes, textos = [], []
for i, line in enumerate(data.split("\n")):
content = line.split()
classes.append(content[0])
textos.append(content[1])
# crie um dataframe usando textos e lables
trainDF = pandas.DataFrame()
trainDF['texto'] = textos
trainDF['classe'] = classes
# dividir o conjunto de dados em conjuntos de dados de treinamento e validação
train_x, valid_x, train_y, valid_y = model_selection.train_test_split( trainDF['texto'], trainDF['classe'])
# codifica o rótulo da variável de destino
encoder = preprocessing.LabelEncoder()
train_y = encoder.fit_transform(train_y)
valid_y = encoder.fit_transform(valid_y)
# carregar os vetores de incorporação de palavras pré-treinados
embeddings_index = {}
for i, line in enumerate(open('Dataset Treino', encoding="utf8")):
values = line.split()
embeddings_index[values[0]] = numpy.asarray(values[1:], dtype='float32')
# crie um tokenizer
token = text.Tokenizer()
token.fit_on_texts(trainDF['texto'])
word_index = token.word_index
# converta texto em sequência de tokens e guarde-as para garantir vetores de comprimento igual
train_seq_x = sequence.pad_sequences(token.texts_to_sequences(train_x), maxlen=70)
valid_seq_x = sequence.pad_sequences(token.texts_to_sequences(valid_x), maxlen=70)
# criar mapeamento de incorporação de tokens
embedding_matrix = numpy.zeros((len(word_index) + 1, 300))
for word, i in word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
def train_model(classifier, feature_vector_train, label, feature_vector_valid, is_neural_net=False):
# ajuste o conjunto de dados de treinamento no classificador
classifier.fit(feature_vector_train, label, epochs=5)
# predizer os rótulos no conjunto de dados de validação
classifier.predict(feature_vector_valid)
def create_cnn():
# Adicione uma camada de entrada
input_layer = layers.Input((70, ))
# Adicione a camada de incorporação de palavras
embedding_layer = layers.Embedding(len(word_index) + 1, 300, weights=[embedding_matrix], trainable=False)(input_layer)
embedding_layer = layers.SpatialDropout1D(0.3)(embedding_layer)
# Adicione a camada convolucional
conv_layer = layers.Convolution1D(90, 3, activation="relu")(embedding_layer)
# Adicione a camada de pooling máximo, pega maior valor resltande do mapa de ativação.
pooling_layer = layers.GlobalMaxPool1D()(conv_layer)
# Adicione as camadas de saída
# camada totalmente conectada para normalização dos dados.
output_layer1 = layers.Dropout(0.7)(pooling_layer)
output_layer2 = layers.Dense(1, activation="sigmoid")(output_layer1)
# Compile o modelo
model = models.Model(inputs=input_layer, outputs=output_layer2)
model.compile(optimizer=optimizers.Adamax(), loss='binary_crossentropy', metrics=['accuracy'])
return model
classifier = create_cnn()
classifier.summary()
train_model(classifier, train_seq_x, train_y, valid_seq_x, is_neural_net=True)
| [
"sklearn.preprocessing.LabelEncoder",
"keras.preprocessing.text.Tokenizer",
"sklearn.model_selection.train_test_split",
"numpy.asarray",
"keras.layers.Convolution1D",
"keras.layers.Input",
"keras.layers.GlobalMaxPool1D",
"keras.layers.Dense",
"keras.models.Model",
"keras.layers.SpatialDropout1D",
... | [((507, 525), 'pandas.DataFrame', 'pandas.DataFrame', ([], {}), '()\n', (523, 525), False, 'import pandas, numpy, keras\n'), ((702, 771), 'sklearn.model_selection.train_test_split', 'model_selection.train_test_split', (["trainDF['texto']", "trainDF['classe']"], {}), "(trainDF['texto'], trainDF['classe'])\n", (734, 771), False, 'from sklearn import model_selection, preprocessing, metrics\n'), ((830, 858), 'sklearn.preprocessing.LabelEncoder', 'preprocessing.LabelEncoder', ([], {}), '()\n', (856, 858), False, 'from sklearn import model_selection, preprocessing, metrics\n'), ((1238, 1254), 'keras.preprocessing.text.Tokenizer', 'text.Tokenizer', ([], {}), '()\n', (1252, 1254), False, 'from keras.preprocessing import text, sequence\n'), ((1163, 1205), 'numpy.asarray', 'numpy.asarray', (['values[1:]'], {'dtype': '"""float32"""'}), "(values[1:], dtype='float32')\n", (1176, 1205), False, 'import pandas, numpy, keras\n'), ((2295, 2314), 'keras.layers.Input', 'layers.Input', (['(70,)'], {}), '((70,))\n', (2307, 2314), False, 'from keras import layers, models, optimizers, initializers\n'), ((3114, 3169), 'keras.models.Model', 'models.Model', ([], {'inputs': 'input_layer', 'outputs': 'output_layer2'}), '(inputs=input_layer, outputs=output_layer2)\n', (3126, 3169), False, 'from keras import layers, models, optimizers, initializers\n'), ((2518, 2546), 'keras.layers.SpatialDropout1D', 'layers.SpatialDropout1D', (['(0.3)'], {}), '(0.3)\n', (2541, 2546), False, 'from keras import layers, models, optimizers, initializers\n'), ((2623, 2669), 'keras.layers.Convolution1D', 'layers.Convolution1D', (['(90)', '(3)'], {'activation': '"""relu"""'}), "(90, 3, activation='relu')\n", (2643, 2669), False, 'from keras import layers, models, optimizers, initializers\n'), ((2806, 2830), 'keras.layers.GlobalMaxPool1D', 'layers.GlobalMaxPool1D', ([], {}), '()\n', (2828, 2830), False, 'from keras import layers, models, optimizers, initializers\n'), ((2966, 2985), 'keras.layers.Dropout', 'layers.Dropout', (['(0.7)'], {}), '(0.7)\n', (2980, 2985), False, 'from keras import layers, models, optimizers, initializers\n'), ((3022, 3059), 'keras.layers.Dense', 'layers.Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (3034, 3059), False, 'from keras import layers, models, optimizers, initializers\n'), ((3199, 3218), 'keras.optimizers.Adamax', 'optimizers.Adamax', ([], {}), '()\n', (3216, 3218), False, 'from keras import layers, models, optimizers, initializers\n')] |
import pickle
import numpy as np
DEFAULT_THRESHOLD_MULTIPLIER = 4
DEFAULT_RELATIVE_SCALE = 0.1
def uniform_on_sphere(center, radius, num_samples=1, reflecting_boundary_radius=np.inf):
"""uniform_on_sphere
Uniform distribution on a sphere
Parameters
----------
center : np array
center is the center of the sphere
radius : float
radius is the radius of the sphere
num_samples : int
num_samples is the number of samples we are going to get
reflecting_boundary_radius : float
The radius of the reflecting boundary. Gives us a further constraint that all
the samples need to be within the reflecting boundary
Returns
-------
valid_samples : np array
samples is an np array of shape (num_samples, center.size).
Each row is a sample
"""
n = center.size
n_valid = 0
valid_samples = []
while n_valid < num_samples:
samples = np.random.randn(num_samples, n)
sample_norms = np.linalg.norm(samples, axis=1, keepdims=True)
samples = radius * samples / sample_norms
samples = samples + center.reshape((1, n))
samples = samples[np.linalg.norm(samples, axis=1) < reflecting_boundary_radius]
valid_samples.append(samples)
n_valid += len(samples)
valid_samples = np.concatenate(valid_samples)
assert np.all(np.linalg.norm(valid_samples, axis=1) < reflecting_boundary_radius)
return valid_samples[:num_samples]
def sample_uniform_initial_location(centers, radiuses, boundary_radius):
"""sample_uniform_initial_location
Parameters
----------
centers : np array
centers is an np array of shape (num_spheres, n_dim), and is the centers of all
the targets.
radiuses : np array
radiuses is an np array of shape (num_spheres,), and is the radius of the targets.
boundary_radius : float
boundary_radius is the boundary radius of the reflecting boundary.
Returns
-------
initial_location : np array
initial_location is an np array of shape (n,), where n is the dimension of the
system. initial_location is the location we sampled.
"""
n = centers.shape[1]
while True:
initial_location = 2 * boundary_radius * np.random.rand(n) - boundary_radius
if np.linalg.norm(initial_location, ord=2) < boundary_radius:
distances = np.linalg.norm(
initial_location.reshape((1, n)) - centers, ord=2, axis=1
)
if all(distances > radiuses):
break
return initial_location
def sample_random_locations(center, radiuses, n_samples):
assert radiuses[0] < radiuses[1]
n_dim = center.size
locations = np.zeros((n_samples, n_dim))
for ii in range(n_samples):
random_radius = (radiuses[1] - radiuses[0]) * np.random.rand(1) + radiuses[0]
locations[ii] = uniform_on_sphere(center, random_radius)
return locations
def load_model_params(model_params_fname):
with open(model_params_fname, 'rb') as f:
model_params = pickle.load(f)
time_step = model_params['time_step']
target_param_list = model_params['target_param_list']
return time_step, target_param_list
| [
"numpy.random.rand",
"pickle.load",
"numpy.zeros",
"numpy.concatenate",
"numpy.linalg.norm",
"numpy.random.randn"
] | [((1336, 1365), 'numpy.concatenate', 'np.concatenate', (['valid_samples'], {}), '(valid_samples)\n', (1350, 1365), True, 'import numpy as np\n'), ((2756, 2784), 'numpy.zeros', 'np.zeros', (['(n_samples, n_dim)'], {}), '((n_samples, n_dim))\n', (2764, 2784), True, 'import numpy as np\n'), ((954, 985), 'numpy.random.randn', 'np.random.randn', (['num_samples', 'n'], {}), '(num_samples, n)\n', (969, 985), True, 'import numpy as np\n'), ((1009, 1055), 'numpy.linalg.norm', 'np.linalg.norm', (['samples'], {'axis': '(1)', 'keepdims': '(True)'}), '(samples, axis=1, keepdims=True)\n', (1023, 1055), True, 'import numpy as np\n'), ((3104, 3118), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3115, 3118), False, 'import pickle\n'), ((1384, 1421), 'numpy.linalg.norm', 'np.linalg.norm', (['valid_samples'], {'axis': '(1)'}), '(valid_samples, axis=1)\n', (1398, 1421), True, 'import numpy as np\n'), ((2339, 2378), 'numpy.linalg.norm', 'np.linalg.norm', (['initial_location'], {'ord': '(2)'}), '(initial_location, ord=2)\n', (2353, 2378), True, 'import numpy as np\n'), ((1183, 1214), 'numpy.linalg.norm', 'np.linalg.norm', (['samples'], {'axis': '(1)'}), '(samples, axis=1)\n', (1197, 1214), True, 'import numpy as np\n'), ((2292, 2309), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (2306, 2309), True, 'import numpy as np\n'), ((2871, 2888), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (2885, 2888), True, 'import numpy as np\n')] |
"""
(first step)
parse the Kounkel & Covey (2019) catalogs to get metadata needed to make
"interesting" cuts good for rotation period measurements.
"""
import numpy as np, pandas as pd
from astropy.io.votable import from_table, writeto, parse
getfile = '../data/gaia_archive_kc19_string_table1-result.vot.gz'
vot = parse(getfile)
tab = vot.get_first_table().to_table()
# NOTE: "to pandas" with gaia source IDs is a BAD BAD idea, because it
# literally converts a large fraction of them wrong. (probably int32 vs int64
# type errors? super unclear. i'm also having difficulty making a simple
# working reproducible example for this bug.... regardless, reassigning the
# sourceid seems to work)
gaia_df = tab.to_pandas()
gaia_df['source_id'] = tab['source_id']
# source_id,ra,dec,group_id
df1 = pd.read_csv('../data/string_table1.csv')
df1['source_id'] = np.int64(df1['source_id'])
# group_id,name,age,av,string,width,height,length,l,b,parallax,vl,vb,vr,x,y,z,u,v,w
df2 = pd.read_csv('../data/string_table2.csv')
selcols = 'group_id,name,age,av,string'.split(',')
df2 = df2[selcols]
#
# merge against these
#
# mdf = df1.merge(gaia_df, on='source_id', how='left') #FIXME
mdf = gaia_df.merge(df1, on='source_id', how='left')
print('{} entries from Kounkel table 1'.format(len(df1)))
print('{} entries from gaia match of Kounkel table 1'.format(len(gaia_df)))
print('\n{} entries in merge of the two...'.format(len(mdf)))
#
# get T mag using Stassun+2019 Eq1
#
Tmag_pred = (
mdf['phot_g_mean_mag']
- 0.00522555 * (mdf['phot_bp_mean_mag'] - mdf['phot_rp_mean_mag'])**3
+ 0.0891337 * (mdf['phot_bp_mean_mag'] - mdf['phot_rp_mean_mag'])**2
- 0.633923 * (mdf['phot_bp_mean_mag'] - mdf['phot_rp_mean_mag'])
+ 0.0324473
)
mdf['Tmag_pred'] = Tmag_pred
#
# merge on group_id to get ages, and whether it is in a "string" or not
#
amdf = mdf.merge(df2, on='group_id', how='left')
#
# get "absolute G" analog (ignoring extinction). note that it's a bit noisy,
# but it's better than ignoring the parallax entirely. note also that i'm
# ignoring the extinction correction, b/c it's basically not known. (though
# Green+19 might differ).
#
amdf['M_G'] = (
amdf['phot_g_mean_mag'] +
+ 5*np.log10(amdf['parallax']/1e3)
)
print('\n{} entries after merging outside to get ages...'.format(len(amdf)))
outpath = '../data/kounkel_table1_sourceinfo.csv'
amdf.to_csv(outpath, index=False, header=True)
print('made {}'.format(outpath))
| [
"numpy.int64",
"numpy.log10",
"astropy.io.votable.parse",
"pandas.read_csv"
] | [((317, 331), 'astropy.io.votable.parse', 'parse', (['getfile'], {}), '(getfile)\n', (322, 331), False, 'from astropy.io.votable import from_table, writeto, parse\n'), ((796, 836), 'pandas.read_csv', 'pd.read_csv', (['"""../data/string_table1.csv"""'], {}), "('../data/string_table1.csv')\n", (807, 836), True, 'import numpy as np, pandas as pd\n'), ((856, 882), 'numpy.int64', 'np.int64', (["df1['source_id']"], {}), "(df1['source_id'])\n", (864, 882), True, 'import numpy as np, pandas as pd\n'), ((974, 1014), 'pandas.read_csv', 'pd.read_csv', (['"""../data/string_table2.csv"""'], {}), "('../data/string_table2.csv')\n", (985, 1014), True, 'import numpy as np, pandas as pd\n'), ((2195, 2230), 'numpy.log10', 'np.log10', (["(amdf['parallax'] / 1000.0)"], {}), "(amdf['parallax'] / 1000.0)\n", (2203, 2230), True, 'import numpy as np, pandas as pd\n')] |
import sys
import numpy as np
infil = sys.argv[1] # original output from Fit-Hi-C
top_n = int(sys.argv[2]) # an integer
outfil = sys.argv[3] # output file name
qvalues = np.loadtxt(infil, usecols=[-1])
minq = qvalues[qvalues>0].min()
pool = []
with open(infil, 'r') as source:
for line in source:
parse = line.rstrip().split()
count = int(parse[6])
qvalue = float(parse[7])
if qvalue <= 0:
qvalue = minq
record = (-np.log(qvalue), count) + tuple(parse[:6])
pool.append(record)
pool.sort(reverse=True)
selected = pool[:top_n]
with open(outfil, 'w') as out:
for line in selected:
out.write('\t'.join(list(line[2:])+[str(line[0])])+'\n') | [
"numpy.loadtxt",
"numpy.log"
] | [((172, 203), 'numpy.loadtxt', 'np.loadtxt', (['infil'], {'usecols': '[-1]'}), '(infil, usecols=[-1])\n', (182, 203), True, 'import numpy as np\n'), ((474, 488), 'numpy.log', 'np.log', (['qvalue'], {}), '(qvalue)\n', (480, 488), True, 'import numpy as np\n')] |
import numpy as np
t = np.zeros(1024, dtype=int)
#generates a ramp
for i in range (0,1024):
t[i]=i
np.savetxt("ramp_9000h_b.csv", (t%128)*32+36864, fmt='%d')
#generates a sin
s = np.sin(t*2*3.14159265/64)*(32767/3)+(24576)
np.savetxt("sin128.csv", s, fmt='%d')
| [
"numpy.sin",
"numpy.zeros",
"numpy.savetxt"
] | [((24, 49), 'numpy.zeros', 'np.zeros', (['(1024)'], {'dtype': 'int'}), '(1024, dtype=int)\n', (32, 49), True, 'import numpy as np\n'), ((106, 168), 'numpy.savetxt', 'np.savetxt', (['"""ramp_9000h_b.csv"""', '(t % 128 * 32 + 36864)'], {'fmt': '"""%d"""'}), "('ramp_9000h_b.csv', t % 128 * 32 + 36864, fmt='%d')\n", (116, 168), True, 'import numpy as np\n'), ((232, 269), 'numpy.savetxt', 'np.savetxt', (['"""sin128.csv"""', 's'], {'fmt': '"""%d"""'}), "('sin128.csv', s, fmt='%d')\n", (242, 269), True, 'import numpy as np\n'), ((187, 218), 'numpy.sin', 'np.sin', (['(t * 2 * 3.14159265 / 64)'], {}), '(t * 2 * 3.14159265 / 64)\n', (193, 218), True, 'import numpy as np\n')] |
"""
- Script to generate predictions for long answer type questions in simplified validation dataset
(v1.0-simplified_nq-dev-all.jsonl) and save the predictions to a predictions.json file
- Pass arguments using command line
- Restricted for device with CUDA
- Make sure the loaded config, tokenizer and weights belong to the same model
- Runs with the model trained with the train_v0.py script
"""
import os
import json
import logging
import argparse
import jsonlines
import numpy as np
from tqdm import tqdm
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from transformers import BertTokenizer, BertConfig
from custom_typing.datasets import SimplifiedNaturalQADataset
from models.bert.bert_for_qa import BertForQuestionAnswering
from utils.collator import CollatorForValidation
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S')
def parse_data_from_json_file(val_dataset: str, max_data: int = 1e10):
"""Reads and parses the json file for simplified natural questions validation dataset
Parsing training examples and creating a data dictionary of examples (positive and negative).
Negative examples are generated by uniform sampling from the list of incorrect long answer candidates for positive
examples
Args:
val_dataset (str): path to simplified training dataset jsonl file
max_data (int): max number of examples to validate, majorly used for debugging purposes
Returns:
id_list (List[int]): list of document ids
id_candidate_list_sorted (List(Tuple(int, int))):sorted list of (document id, long answer candidate index) based
on length of long answer candidate
data_dict (dict):
"""
# check if input file is of type jsonl
assert os.path.splitext(val_dataset)[-1] == ".jsonl", "dataset file type is not jsonl, check the file provided"
# store all document ids
id_list = []
# store all candidates
id_candidate_list = []
# store length of all candidates
id_candidate_len_list = []
# store data id to candidate length dict
id_candidate_len_dict = {}
data_dict = {}
with jsonlines.open(val_dataset) as reader:
for n, data_line in enumerate(tqdm(reader)):
if n > max_data:
break
doc_id = data_line['example_id']
id_list.append(doc_id)
# initialize data_dict
data_dict[doc_id] = {
'document_text': ' '.join([item['token'] for item in data_line['document_tokens']]),
'question_text': data_line['question_text'],
'long_answer_candidates': data_line['long_answer_candidates'],
}
question_len = len(data_line['question_text'].split())
# We use the white space tokenized version to estimate candidate length here.
for i, candidate in enumerate(data_line['long_answer_candidates']):
id_candidate_list.append((doc_id, i))
candidate_length = question_len + candidate['end_token'] - candidate['start_token']
id_candidate_len_list.append(candidate_length)
id_candidate_len_dict[(doc_id, i)] = candidate_length
# sorting candidates based on candidate's length
sorted_index = np.argsort(np.array(id_candidate_len_list))
id_candidate_list_sorted = []
for i in range(len(id_candidate_list)):
id_candidate_list_sorted.append(id_candidate_list[sorted_index[i]])
return id_list, id_candidate_list_sorted, data_dict
if __name__=='__main__':
parser = argparse.ArgumentParser(description="parser to mine hard examples from a set of examples")
parser.add_argument('-d', '--val_dataset', help='path to dataset examples json file', type=str,
default='../datasets/natural_questions_simplified/v1.0-simplified_nq-dev-all.jsonl')
parser.add_argument('-o', '--output_path', help='path to store predictions', type=str,
default='../predictions/bert_base_uncased/')
parser.add_argument('-m', '--model_path', help='path to a saved model', type=str, default='bert-base-uncased')
parser.add_argument('-w', '--weights', help='path to saved weights for the model', type=str,
default='../weights/bert-base-uncased/epoch1/')
parser.add_argument('--fp16', action='store_true', help='mention if loaded model is trained on half precision')
args = parser.parse_args()
logging.info("parsing validation dataset")
id_list, id_candidate_list_sorted, data_dict = parse_data_from_json_file(args.val_dataset)
# hyperparameters
max_seq_length = 384
max_question_length = 64
batch_size = 768
# load model
logging.info("loading config")
config = BertConfig.from_pretrained(args.model_path)
config.num_labels = 5
tokenizer = BertTokenizer.from_pretrained(args.model_path, do_lower_case=True)
model = BertForQuestionAnswering.from_pretrained(args.weights, config=config)
if torch.cuda.is_available():
model.cuda()
logging.info(f"fp16: {args.fp16}")
if args.fp16:
from apex import amp
model = amp.initialize(model, opt_level="O1", verbosity=0)
if torch.cuda.device_count() > 1:
model = torch.nn.DataParallel(model)
# testing
# iterator for testing
test_data_generator = SimplifiedNaturalQADataset(id_list=id_candidate_list_sorted)
test_collator = CollatorForValidation(data_dict=data_dict,
tokenizer=tokenizer,
max_seq_length=max_seq_length,
max_question_length=max_question_length)
test_generator = DataLoader(dataset=test_data_generator,
collate_fn=test_collator,
batch_size=batch_size,
shuffle=False,
num_workers=16,
pin_memory=True)
# evaluating model on dataset
logging.info("Evaluating")
model.eval()
classifier_probs = np.zeros((len(id_candidate_list_sorted), 5), dtype=np.float32) # class
for j, (batch_input_ids, batch_attention_mask, batch_token_type_ids) in enumerate(tqdm(test_generator)):
with torch.no_grad():
start = j*batch_size
end = start + batch_size
if j == len(test_generator) - 1:
end = len(test_generator.dataset)
if torch.cuda.is_available():
batch_input_ids = batch_input_ids.cuda()
batch_attention_mask = batch_attention_mask.cuda()
batch_token_type_ids = batch_token_type_ids.cuda()
start_position_logits, end_position_logits, classifier_logits = model(batch_input_ids, batch_attention_mask, batch_token_type_ids)
classifier_probs[start:end] += nn.functional.softmax(classifier_logits, dim=1).cpu().data.numpy()
# Processing long answers:
# initialize a temporary dictionary to store prediction values.
temp_dict = {}
for doc_id in id_list:
temp_dict[doc_id] = {
'long_answer': {'start_token': -1, 'end_token': -1},
'long_answer_score': -1.0,
'short_answers': [{'start_token': -1, 'end_token': -1}],
'short_answers_score': -1.0,
'yes_no_answer': 'NONE'
}
# from candidates to document
for i, (doc_id, candidate_index) in enumerate(tqdm(id_candidate_list_sorted)):
# process long answer
la_candidate = data_dict[doc_id]['long_answer_candidates'][candidate_index]
long_answer_score = 1.0 - classifier_probs[i,0] # 1- no_answer_score
if long_answer_score > temp_dict[doc_id]['long_answer_score']:
temp_dict[doc_id]['long_answer_score'] = long_answer_score
temp_dict[doc_id]['long_answer']['start_token'] = la_candidate['start_token']
temp_dict[doc_id]['long_answer']['end_token'] = la_candidate['end_token']
# Copy the temporary dictionary into the final dictionary that meets the required format for validation.
final_dict = {'predictions': []}
for doc_id in id_list:
prediction_dict = {
'example_id': doc_id,
'long_answer': {'start_byte': -1, 'end_byte': -1,
'start_token': temp_dict[doc_id]['long_answer']['start_token'],
'end_token': temp_dict[doc_id]['long_answer']['end_token']},
'long_answer_score': temp_dict[doc_id]['long_answer_score'],
'short_answers': [{'start_byte': -1, 'end_byte': -1, 'start_token': -1, 'end_token': -1}],
'short_answers_score': -1.0,
'yes_no_answer': 'NONE'
}
final_dict['predictions'].append(prediction_dict)
# make sure output directory exists
if not os.path.exists(args.output_path):
os.makedirs(args.output_path, exist_ok=True)
logging.info(f"writing predictions to {args.output_path}")
# write to json file
with open(os.path.join(args.output_path, 'predictions.json'), 'w') as f:
json.dump(final_dict, f)
| [
"utils.collator.CollatorForValidation",
"torch.cuda.device_count",
"jsonlines.open",
"numpy.array",
"torch.cuda.is_available",
"apex.amp.initialize",
"logging.info",
"torch.nn.functional.softmax",
"os.path.exists",
"argparse.ArgumentParser",
"os.path.splitext",
"models.bert.bert_for_qa.BertFor... | [((812, 921), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG', 'format': '"""%(asctime)s - %(message)s"""', 'datefmt': '"""%d-%b-%y %H:%M:%S"""'}), "(level=logging.DEBUG, format='%(asctime)s - %(message)s',\n datefmt='%d-%b-%y %H:%M:%S')\n", (831, 921), False, 'import logging\n'), ((3633, 3728), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""parser to mine hard examples from a set of examples"""'}), "(description=\n 'parser to mine hard examples from a set of examples')\n", (3656, 3728), False, 'import argparse\n'), ((4529, 4571), 'logging.info', 'logging.info', (['"""parsing validation dataset"""'], {}), "('parsing validation dataset')\n", (4541, 4571), False, 'import logging\n'), ((4787, 4817), 'logging.info', 'logging.info', (['"""loading config"""'], {}), "('loading config')\n", (4799, 4817), False, 'import logging\n'), ((4831, 4874), 'transformers.BertConfig.from_pretrained', 'BertConfig.from_pretrained', (['args.model_path'], {}), '(args.model_path)\n', (4857, 4874), False, 'from transformers import BertTokenizer, BertConfig\n'), ((4917, 4983), 'transformers.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['args.model_path'], {'do_lower_case': '(True)'}), '(args.model_path, do_lower_case=True)\n', (4946, 4983), False, 'from transformers import BertTokenizer, BertConfig\n'), ((4996, 5065), 'models.bert.bert_for_qa.BertForQuestionAnswering.from_pretrained', 'BertForQuestionAnswering.from_pretrained', (['args.weights'], {'config': 'config'}), '(args.weights, config=config)\n', (5036, 5065), False, 'from models.bert.bert_for_qa import BertForQuestionAnswering\n'), ((5074, 5099), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5097, 5099), False, 'import torch\n'), ((5126, 5160), 'logging.info', 'logging.info', (['f"""fp16: {args.fp16}"""'], {}), "(f'fp16: {args.fp16}')\n", (5138, 5160), False, 'import logging\n'), ((5426, 5486), 'custom_typing.datasets.SimplifiedNaturalQADataset', 'SimplifiedNaturalQADataset', ([], {'id_list': 'id_candidate_list_sorted'}), '(id_list=id_candidate_list_sorted)\n', (5452, 5486), False, 'from custom_typing.datasets import SimplifiedNaturalQADataset\n'), ((5507, 5646), 'utils.collator.CollatorForValidation', 'CollatorForValidation', ([], {'data_dict': 'data_dict', 'tokenizer': 'tokenizer', 'max_seq_length': 'max_seq_length', 'max_question_length': 'max_question_length'}), '(data_dict=data_dict, tokenizer=tokenizer,\n max_seq_length=max_seq_length, max_question_length=max_question_length)\n', (5528, 5646), False, 'from utils.collator import CollatorForValidation\n'), ((5748, 5888), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'test_data_generator', 'collate_fn': 'test_collator', 'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': '(16)', 'pin_memory': '(True)'}), '(dataset=test_data_generator, collate_fn=test_collator,\n batch_size=batch_size, shuffle=False, num_workers=16, pin_memory=True)\n', (5758, 5888), False, 'from torch.utils.data import DataLoader\n'), ((6084, 6110), 'logging.info', 'logging.info', (['"""Evaluating"""'], {}), "('Evaluating')\n", (6096, 6110), False, 'import logging\n'), ((9021, 9079), 'logging.info', 'logging.info', (['f"""writing predictions to {args.output_path}"""'], {}), "(f'writing predictions to {args.output_path}')\n", (9033, 9079), False, 'import logging\n'), ((2191, 2218), 'jsonlines.open', 'jsonlines.open', (['val_dataset'], {}), '(val_dataset)\n', (2205, 2218), False, 'import jsonlines\n'), ((3349, 3380), 'numpy.array', 'np.array', (['id_candidate_len_list'], {}), '(id_candidate_len_list)\n', (3357, 3380), True, 'import numpy as np\n'), ((5224, 5274), 'apex.amp.initialize', 'amp.initialize', (['model'], {'opt_level': '"""O1"""', 'verbosity': '(0)'}), "(model, opt_level='O1', verbosity=0)\n", (5238, 5274), False, 'from apex import amp\n'), ((5282, 5307), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (5305, 5307), False, 'import torch\n'), ((5329, 5357), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {}), '(model)\n', (5350, 5357), False, 'import torch\n'), ((6311, 6331), 'tqdm.tqdm', 'tqdm', (['test_generator'], {}), '(test_generator)\n', (6315, 6331), False, 'from tqdm import tqdm\n'), ((7536, 7566), 'tqdm.tqdm', 'tqdm', (['id_candidate_list_sorted'], {}), '(id_candidate_list_sorted)\n', (7540, 7566), False, 'from tqdm import tqdm\n'), ((8930, 8962), 'os.path.exists', 'os.path.exists', (['args.output_path'], {}), '(args.output_path)\n', (8944, 8962), False, 'import os\n'), ((8972, 9016), 'os.makedirs', 'os.makedirs', (['args.output_path'], {'exist_ok': '(True)'}), '(args.output_path, exist_ok=True)\n', (8983, 9016), False, 'import os\n'), ((9190, 9214), 'json.dump', 'json.dump', (['final_dict', 'f'], {}), '(final_dict, f)\n', (9199, 9214), False, 'import json\n'), ((1813, 1842), 'os.path.splitext', 'os.path.splitext', (['val_dataset'], {}), '(val_dataset)\n', (1829, 1842), False, 'import os\n'), ((2268, 2280), 'tqdm.tqdm', 'tqdm', (['reader'], {}), '(reader)\n', (2272, 2280), False, 'from tqdm import tqdm\n'), ((6347, 6362), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6360, 6362), False, 'import torch\n'), ((6544, 6569), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6567, 6569), False, 'import torch\n'), ((9119, 9169), 'os.path.join', 'os.path.join', (['args.output_path', '"""predictions.json"""'], {}), "(args.output_path, 'predictions.json')\n", (9131, 9169), False, 'import os\n'), ((6948, 6995), 'torch.nn.functional.softmax', 'nn.functional.softmax', (['classifier_logits'], {'dim': '(1)'}), '(classifier_logits, dim=1)\n', (6969, 6995), True, 'import torch.nn as nn\n')] |
import numpy as np
def _F1_eval(preds, labels):
t = np.arange(0, 1, 0.005)
f = np.repeat(0, 200)
results = np.vstack([t, f]).T
# assuming labels only containing 0's and 1's
n_pos_examples = sum(labels)
if n_pos_examples == 0:
raise ValueError("labels not containing positive examples")
for i in range(200):
pred_indexes = (preds >= results[i, 0])
TP = sum(labels[pred_indexes])
FP = len(labels[pred_indexes]) - TP
precision = 0
recall = TP / n_pos_examples
if (FP + TP) > 0:
precision = TP / (FP + TP)
if (precision + recall > 0):
F1 = 2 * precision * recall / (precision + recall)
else:
F1 = 0
results[i, 1] = F1
return (max(results[:, 1]))
# Xgboost version
def F1_eval(preds, dtrain):
f1 = _F1_eval(preds, dtrain.get_label())
# xgboost minimizes metrics
return 'f1_err', 1.-f1 | [
"numpy.repeat",
"numpy.vstack",
"numpy.arange"
] | [((57, 79), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(0.005)'], {}), '(0, 1, 0.005)\n', (66, 79), True, 'import numpy as np\n'), ((88, 105), 'numpy.repeat', 'np.repeat', (['(0)', '(200)'], {}), '(0, 200)\n', (97, 105), True, 'import numpy as np\n'), ((120, 137), 'numpy.vstack', 'np.vstack', (['[t, f]'], {}), '([t, f])\n', (129, 137), True, 'import numpy as np\n')] |
import os
import json
import cv2
import time
import base64
import numpy as np
from elasticsearch import Elasticsearch
from datetime import datetime
from flask import Flask, request, Response
app = Flask(__name__)
count = 0
# 画像を保存するフォルダの作成
image_dir = "./images"
#elasticsearchと接続
es = Elasticsearch(host='172.16.58.3', port=9200,http_auth=('elastic','InfoNetworking'))
if not os.path.isdir(image_dir):
os.mkdir(image_dir)
@app.route('/save', methods=['POST'])
def save_image():
# データの変換処理
data = request.data.decode('utf-8')
data_json = json.loads(data)
image = data_json['image']
image_dec = base64.b64decode(image)
data_np = np.fromstring(image_dec, dtype='uint8')
decimg = cv2.imdecode(data_np, 1)
#画像を保存
#西暦と月のフォルダを作成
nowtime = datetime.now()
savedir = os.getcwd()
if not os.path.exists(os.path.join(savedir)):
os.mkdir(os.path.join(savedir))
print("MAKE_DIR: " + savedir)
#年のフォルダを作成
savedir += datetime.now().strftime("/%Y")
if not os.path.exists(os.path.join(savedir)):
os.mkdir(os.path.join(savedir))
print("MAKE_DIR: " + savedir)
#月のフォルダ作成
savedir += nowtime.strftime("/%m")
if not os.path.exists(os.path.join(savedir)):
os.mkdir(os.path.join(savedir))
print("MAKE_DIR: " + savedir)
#日のフォルダを生成
savedir += nowtime.strftime("/%d")
if not os.path.exists(os.path.join(savedir)):
os.mkdir(os.path.join(savedir))
print("MAKE_DIR: " + savedir)
# 時間_分_秒のフォルダを生成
savefile = savedir
#saveFileName = datetime.now().strftime("%Y_%m_%d_%H%M%S.png")
saveFileName = datetime.now().strftime("%Y_%m_%d.png")
saveFileName = os.path.join(savedir, saveFileName)
cv2.imwrite(saveFileName, decimg)
print(str(savedir) +"に保存しました")
return Response(response=json.dumps({"message": "{} was saved".format(saveFileName)}), status=200)
@app.route('/Info', methods=['POST'])
def save_info():
data = request.data.decode('utf-8')
data_json = json.loads(data)
prediction = {}
prediction["date"] = time.time() * 1000
prediction["id"] = data_json['id']
prediction["vertical"] = data_json['vertical']
prediction["horizon"] = data_json['horizon']
prediction["categories"] = data_json['categories']
outdata = json.dumps(prediction)
print(outdata)
try:
res = es.index(index="aquaponics", doc_type='fish', body=outdata)
except Exception as e:
print(e)
return Response(response=json.dumps({"message": "ok!"}), status=200)
#return Response(response=json.dumps({"message": "{} was saved".format(id)}), status=200)
# 画像ファイルを保存
# global count
# filename = "./images/image{}.png".format(count)
# cv2.imwrite(filename, decimg)
# count += 1
# HTTPレスポンスを送信
#return Response(response=json.dumps({"message": "{} was saved".format(saveFileName)}), status=200)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8082) | [
"cv2.imwrite",
"json.loads",
"flask.Flask",
"elasticsearch.Elasticsearch",
"flask.request.data.decode",
"os.path.join",
"base64.b64decode",
"json.dumps",
"os.getcwd",
"datetime.datetime.now",
"os.path.isdir",
"os.mkdir",
"cv2.imdecode",
"numpy.fromstring",
"time.time"
] | [((197, 212), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (202, 212), False, 'from flask import Flask, request, Response\n'), ((287, 376), 'elasticsearch.Elasticsearch', 'Elasticsearch', ([], {'host': '"""172.16.58.3"""', 'port': '(9200)', 'http_auth': "('elastic', 'InfoNetworking')"}), "(host='172.16.58.3', port=9200, http_auth=('elastic',\n 'InfoNetworking'))\n", (300, 376), False, 'from elasticsearch import Elasticsearch\n'), ((378, 402), 'os.path.isdir', 'os.path.isdir', (['image_dir'], {}), '(image_dir)\n', (391, 402), False, 'import os\n'), ((406, 425), 'os.mkdir', 'os.mkdir', (['image_dir'], {}), '(image_dir)\n', (414, 425), False, 'import os\n'), ((509, 537), 'flask.request.data.decode', 'request.data.decode', (['"""utf-8"""'], {}), "('utf-8')\n", (528, 537), False, 'from flask import Flask, request, Response\n'), ((554, 570), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (564, 570), False, 'import json\n'), ((618, 641), 'base64.b64decode', 'base64.b64decode', (['image'], {}), '(image)\n', (634, 641), False, 'import base64\n'), ((656, 695), 'numpy.fromstring', 'np.fromstring', (['image_dec'], {'dtype': '"""uint8"""'}), "(image_dec, dtype='uint8')\n", (669, 695), True, 'import numpy as np\n'), ((709, 733), 'cv2.imdecode', 'cv2.imdecode', (['data_np', '(1)'], {}), '(data_np, 1)\n', (721, 733), False, 'import cv2\n'), ((780, 794), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (792, 794), False, 'from datetime import datetime\n'), ((809, 820), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (818, 820), False, 'import os\n'), ((1696, 1731), 'os.path.join', 'os.path.join', (['savedir', 'saveFileName'], {}), '(savedir, saveFileName)\n', (1708, 1731), False, 'import os\n'), ((1736, 1769), 'cv2.imwrite', 'cv2.imwrite', (['saveFileName', 'decimg'], {}), '(saveFileName, decimg)\n', (1747, 1769), False, 'import cv2\n'), ((1976, 2004), 'flask.request.data.decode', 'request.data.decode', (['"""utf-8"""'], {}), "('utf-8')\n", (1995, 2004), False, 'from flask import Flask, request, Response\n'), ((2021, 2037), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (2031, 2037), False, 'import json\n'), ((2312, 2334), 'json.dumps', 'json.dumps', (['prediction'], {}), '(prediction)\n', (2322, 2334), False, 'import json\n'), ((2083, 2094), 'time.time', 'time.time', ([], {}), '()\n', (2092, 2094), False, 'import time\n'), ((849, 870), 'os.path.join', 'os.path.join', (['savedir'], {}), '(savedir)\n', (861, 870), False, 'import os\n'), ((890, 911), 'os.path.join', 'os.path.join', (['savedir'], {}), '(savedir)\n', (902, 911), False, 'import os\n'), ((981, 995), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (993, 995), False, 'from datetime import datetime\n'), ((1038, 1059), 'os.path.join', 'os.path.join', (['savedir'], {}), '(savedir)\n', (1050, 1059), False, 'import os\n'), ((1079, 1100), 'os.path.join', 'os.path.join', (['savedir'], {}), '(savedir)\n', (1091, 1100), False, 'import os\n'), ((1219, 1240), 'os.path.join', 'os.path.join', (['savedir'], {}), '(savedir)\n', (1231, 1240), False, 'import os\n'), ((1260, 1281), 'os.path.join', 'os.path.join', (['savedir'], {}), '(savedir)\n', (1272, 1281), False, 'import os\n'), ((1402, 1423), 'os.path.join', 'os.path.join', (['savedir'], {}), '(savedir)\n', (1414, 1423), False, 'import os\n'), ((1443, 1464), 'os.path.join', 'os.path.join', (['savedir'], {}), '(savedir)\n', (1455, 1464), False, 'import os\n'), ((1637, 1651), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1649, 1651), False, 'from datetime import datetime\n'), ((2510, 2540), 'json.dumps', 'json.dumps', (["{'message': 'ok!'}"], {}), "({'message': 'ok!'})\n", (2520, 2540), False, 'import json\n')] |
import unittest
import numpy.testing as npt
import numpy as np
from doatools.model import UniformLinearArray, FarField1DSourcePlacement
from doatools.performance import crb_det_farfield_1d, crb_sto_farfield_1d, crb_stouc_farfield_1d
class TestCRB(unittest.TestCase):
def setUp(self):
self.wavelength = 1.0
def test_det_farfield_1d(self):
ula = UniformLinearArray(10, self.wavelength / 2)
sources = FarField1DSourcePlacement(np.linspace(-np.pi/3, np.pi/4, 3))
P = np.array([
[10.0 , 1.0+0.3j, 0.5-0.1j],
[1.0-0.3j, 11.0, 0.9-0.2j],
[0.5+0.1j, 0.9+0.2j, 9.0]
])
sigma = 1.0
n_snapshots = 100
CRB_actual = crb_det_farfield_1d(ula, sources, self.wavelength, P, sigma, n_snapshots)
CRB_expected = np.array([
[ 2.636339e-06, -2.947961e-08, -2.894480e-08],
[-2.947961e-08, 5.868361e-07, -1.254243e-08],
[-2.894480e-08, -1.254243e-08, 1.495266e-06]
])
npt.assert_allclose(CRB_actual, CRB_expected, rtol=1e-6)
def test_sto_farfield_1d(self):
ula = UniformLinearArray(10, self.wavelength / 2)
sources = FarField1DSourcePlacement(np.linspace(-np.pi/3, np.pi/4, 3))
P = np.array([
[10.0 , 1.0+0.3j, 0.5-0.1j],
[1.0-0.3j, 11.0, 0.9-0.2j],
[0.5+0.1j, 0.9+0.2j, 9.0]
])
sigma = 1.0
n_snapshots = 100
CRB_actual = crb_sto_farfield_1d(ula, sources, self.wavelength, P, sigma, n_snapshots)
CRB_expected = np.array([
[ 2.663109e-06, -3.037374e-08, -2.994223e-08],
[-3.037374e-08, 5.922466e-07, -1.287051e-08],
[-2.994223e-08, -1.287051e-08, 1.512018e-06]
])
npt.assert_allclose(CRB_actual, CRB_expected, rtol=1e-6)
def test_stouc_farfield_1d(self):
ula = UniformLinearArray(10, self.wavelength / 2)
sources = FarField1DSourcePlacement(np.linspace(-np.pi/3, np.pi/4, 3))
p = np.array([2.0, 3.0, 1.0])
sigma = 1.0
n_snapshots = 100
CRB_actual = crb_stouc_farfield_1d(ula, sources, self.wavelength, p, sigma, n_snapshots)
CRB_expected = np.array([
[ 1.3757938e-05, -3.7302575e-09, 4.3845873e-08],
[-3.7302575e-09, 2.2173076e-06, 5.0642214e-09],
[ 4.3845873e-08, 5.0642214e-09, 1.4740719e-05]
])
npt.assert_allclose(CRB_actual, CRB_expected, rtol=1e-6)
def test_convergence_farfield_1d(self):
# The three CRBs should converge when SNR is sufficiently high.
ula = UniformLinearArray(16, self.wavelength / 2)
sources = FarField1DSourcePlacement(np.linspace(-np.pi/3, np.pi/4, 5))
p = np.diag(np.full((sources.size,), 1000.0))
sigma = 1.0 # 30 dB SNR
n_snapshots = 10
CRB_stouc = crb_stouc_farfield_1d(ula, sources, self.wavelength, p, sigma, n_snapshots)
CRB_sto = crb_sto_farfield_1d(ula, sources, self.wavelength, p, sigma, n_snapshots)
CRB_det = crb_det_farfield_1d(ula, sources, self.wavelength, p, sigma, n_snapshots)
npt.assert_allclose(np.diag(CRB_sto), np.diag(CRB_stouc), rtol=1e-2)
npt.assert_allclose(np.diag(CRB_det), np.diag(CRB_stouc), rtol=1e-2)
if __name__ == '__main__':
unittest.main()
| [
"numpy.testing.assert_allclose",
"numpy.diag",
"numpy.array",
"doatools.performance.crb_stouc_farfield_1d",
"numpy.linspace",
"doatools.model.UniformLinearArray",
"doatools.performance.crb_sto_farfield_1d",
"unittest.main",
"numpy.full",
"doatools.performance.crb_det_farfield_1d"
] | [((3333, 3348), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3346, 3348), False, 'import unittest\n'), ((372, 415), 'doatools.model.UniformLinearArray', 'UniformLinearArray', (['(10)', '(self.wavelength / 2)'], {}), '(10, self.wavelength / 2)\n', (390, 415), False, 'from doatools.model import UniformLinearArray, FarField1DSourcePlacement\n'), ((507, 617), 'numpy.array', 'np.array', (['[[10.0, 1.0 + 0.3j, 0.5 - 0.1j], [1.0 - 0.3j, 11.0, 0.9 - 0.2j], [0.5 + \n 0.1j, 0.9 + 0.2j, 9.0]]'], {}), '([[10.0, 1.0 + 0.3j, 0.5 - 0.1j], [1.0 - 0.3j, 11.0, 0.9 - 0.2j], [\n 0.5 + 0.1j, 0.9 + 0.2j, 9.0]])\n', (515, 617), True, 'import numpy as np\n'), ((727, 800), 'doatools.performance.crb_det_farfield_1d', 'crb_det_farfield_1d', (['ula', 'sources', 'self.wavelength', 'P', 'sigma', 'n_snapshots'], {}), '(ula, sources, self.wavelength, P, sigma, n_snapshots)\n', (746, 800), False, 'from doatools.performance import crb_det_farfield_1d, crb_sto_farfield_1d, crb_stouc_farfield_1d\n'), ((824, 975), 'numpy.array', 'np.array', (['[[2.636339e-06, -2.947961e-08, -2.89448e-08], [-2.947961e-08, 5.868361e-07,\n -1.254243e-08], [-2.89448e-08, -1.254243e-08, 1.495266e-06]]'], {}), '([[2.636339e-06, -2.947961e-08, -2.89448e-08], [-2.947961e-08, \n 5.868361e-07, -1.254243e-08], [-2.89448e-08, -1.254243e-08, 1.495266e-06]])\n', (832, 975), True, 'import numpy as np\n'), ((1030, 1087), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['CRB_actual', 'CRB_expected'], {'rtol': '(1e-06)'}), '(CRB_actual, CRB_expected, rtol=1e-06)\n', (1049, 1087), True, 'import numpy.testing as npt\n'), ((1138, 1181), 'doatools.model.UniformLinearArray', 'UniformLinearArray', (['(10)', '(self.wavelength / 2)'], {}), '(10, self.wavelength / 2)\n', (1156, 1181), False, 'from doatools.model import UniformLinearArray, FarField1DSourcePlacement\n'), ((1273, 1383), 'numpy.array', 'np.array', (['[[10.0, 1.0 + 0.3j, 0.5 - 0.1j], [1.0 - 0.3j, 11.0, 0.9 - 0.2j], [0.5 + \n 0.1j, 0.9 + 0.2j, 9.0]]'], {}), '([[10.0, 1.0 + 0.3j, 0.5 - 0.1j], [1.0 - 0.3j, 11.0, 0.9 - 0.2j], [\n 0.5 + 0.1j, 0.9 + 0.2j, 9.0]])\n', (1281, 1383), True, 'import numpy as np\n'), ((1493, 1566), 'doatools.performance.crb_sto_farfield_1d', 'crb_sto_farfield_1d', (['ula', 'sources', 'self.wavelength', 'P', 'sigma', 'n_snapshots'], {}), '(ula, sources, self.wavelength, P, sigma, n_snapshots)\n', (1512, 1566), False, 'from doatools.performance import crb_det_farfield_1d, crb_sto_farfield_1d, crb_stouc_farfield_1d\n'), ((1590, 1748), 'numpy.array', 'np.array', (['[[2.663109e-06, -3.037374e-08, -2.994223e-08], [-3.037374e-08, 5.922466e-07,\n -1.287051e-08], [-2.994223e-08, -1.287051e-08, 1.512018e-06]]'], {}), '([[2.663109e-06, -3.037374e-08, -2.994223e-08], [-3.037374e-08, \n 5.922466e-07, -1.287051e-08], [-2.994223e-08, -1.287051e-08, 1.512018e-06]]\n )\n', (1598, 1748), True, 'import numpy as np\n'), ((1796, 1853), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['CRB_actual', 'CRB_expected'], {'rtol': '(1e-06)'}), '(CRB_actual, CRB_expected, rtol=1e-06)\n', (1815, 1853), True, 'import numpy.testing as npt\n'), ((1906, 1949), 'doatools.model.UniformLinearArray', 'UniformLinearArray', (['(10)', '(self.wavelength / 2)'], {}), '(10, self.wavelength / 2)\n', (1924, 1949), False, 'from doatools.model import UniformLinearArray, FarField1DSourcePlacement\n'), ((2041, 2066), 'numpy.array', 'np.array', (['[2.0, 3.0, 1.0]'], {}), '([2.0, 3.0, 1.0])\n', (2049, 2066), True, 'import numpy as np\n'), ((2134, 2209), 'doatools.performance.crb_stouc_farfield_1d', 'crb_stouc_farfield_1d', (['ula', 'sources', 'self.wavelength', 'p', 'sigma', 'n_snapshots'], {}), '(ula, sources, self.wavelength, p, sigma, n_snapshots)\n', (2155, 2209), False, 'from doatools.performance import crb_det_farfield_1d, crb_sto_farfield_1d, crb_stouc_farfield_1d\n'), ((2233, 2396), 'numpy.array', 'np.array', (['[[1.3757938e-05, -3.7302575e-09, 4.3845873e-08], [-3.7302575e-09, \n 2.2173076e-06, 5.0642214e-09], [4.3845873e-08, 5.0642214e-09, \n 1.4740719e-05]]'], {}), '([[1.3757938e-05, -3.7302575e-09, 4.3845873e-08], [-3.7302575e-09, \n 2.2173076e-06, 5.0642214e-09], [4.3845873e-08, 5.0642214e-09, \n 1.4740719e-05]])\n', (2241, 2396), True, 'import numpy as np\n'), ((2445, 2502), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['CRB_actual', 'CRB_expected'], {'rtol': '(1e-06)'}), '(CRB_actual, CRB_expected, rtol=1e-06)\n', (2464, 2502), True, 'import numpy.testing as npt\n'), ((2633, 2676), 'doatools.model.UniformLinearArray', 'UniformLinearArray', (['(16)', '(self.wavelength / 2)'], {}), '(16, self.wavelength / 2)\n', (2651, 2676), False, 'from doatools.model import UniformLinearArray, FarField1DSourcePlacement\n'), ((2887, 2962), 'doatools.performance.crb_stouc_farfield_1d', 'crb_stouc_farfield_1d', (['ula', 'sources', 'self.wavelength', 'p', 'sigma', 'n_snapshots'], {}), '(ula, sources, self.wavelength, p, sigma, n_snapshots)\n', (2908, 2962), False, 'from doatools.performance import crb_det_farfield_1d, crb_sto_farfield_1d, crb_stouc_farfield_1d\n'), ((2981, 3054), 'doatools.performance.crb_sto_farfield_1d', 'crb_sto_farfield_1d', (['ula', 'sources', 'self.wavelength', 'p', 'sigma', 'n_snapshots'], {}), '(ula, sources, self.wavelength, p, sigma, n_snapshots)\n', (3000, 3054), False, 'from doatools.performance import crb_det_farfield_1d, crb_sto_farfield_1d, crb_stouc_farfield_1d\n'), ((3073, 3146), 'doatools.performance.crb_det_farfield_1d', 'crb_det_farfield_1d', (['ula', 'sources', 'self.wavelength', 'p', 'sigma', 'n_snapshots'], {}), '(ula, sources, self.wavelength, p, sigma, n_snapshots)\n', (3092, 3146), False, 'from doatools.performance import crb_det_farfield_1d, crb_sto_farfield_1d, crb_stouc_farfield_1d\n'), ((460, 497), 'numpy.linspace', 'np.linspace', (['(-np.pi / 3)', '(np.pi / 4)', '(3)'], {}), '(-np.pi / 3, np.pi / 4, 3)\n', (471, 497), True, 'import numpy as np\n'), ((1226, 1263), 'numpy.linspace', 'np.linspace', (['(-np.pi / 3)', '(np.pi / 4)', '(3)'], {}), '(-np.pi / 3, np.pi / 4, 3)\n', (1237, 1263), True, 'import numpy as np\n'), ((1994, 2031), 'numpy.linspace', 'np.linspace', (['(-np.pi / 3)', '(np.pi / 4)', '(3)'], {}), '(-np.pi / 3, np.pi / 4, 3)\n', (2005, 2031), True, 'import numpy as np\n'), ((2721, 2758), 'numpy.linspace', 'np.linspace', (['(-np.pi / 3)', '(np.pi / 4)', '(5)'], {}), '(-np.pi / 3, np.pi / 4, 5)\n', (2732, 2758), True, 'import numpy as np\n'), ((2776, 2808), 'numpy.full', 'np.full', (['(sources.size,)', '(1000.0)'], {}), '((sources.size,), 1000.0)\n', (2783, 2808), True, 'import numpy as np\n'), ((3175, 3191), 'numpy.diag', 'np.diag', (['CRB_sto'], {}), '(CRB_sto)\n', (3182, 3191), True, 'import numpy as np\n'), ((3193, 3211), 'numpy.diag', 'np.diag', (['CRB_stouc'], {}), '(CRB_stouc)\n', (3200, 3211), True, 'import numpy as np\n'), ((3252, 3268), 'numpy.diag', 'np.diag', (['CRB_det'], {}), '(CRB_det)\n', (3259, 3268), True, 'import numpy as np\n'), ((3270, 3288), 'numpy.diag', 'np.diag', (['CRB_stouc'], {}), '(CRB_stouc)\n', (3277, 3288), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
#
# STS spectrometer
# ================
#
# To implement further functionality you basically just need to look at
# the two communication layer functions:
# self._send_command and self._query_data
#
# based on those you can implement other commands.
# call them with the right message constant in self._const.MSG_...
# and the payload data packed in a string.
#
""" File: STS.py
Author: <NAME>, <NAME>
Last change: 2014/08/15
Python Interface for STS OceanOptics Spectometers.
Current device classes:
* STS
"""
#----------------------------------------------------------
import struct
from oceanoptics.defines import OceanOpticsError as _OOError
from oceanoptics.defines import OceanOpticsMinMaxIntegrationTime as _OOMinMaxIT
from oceanoptics.base import OceanOpticsSpectrometer as _OOSpec
from oceanoptics.base import OceanOpticsUSBComm as _OOUSBComm
import numpy as np
import time
import hashlib
import warnings
#----------------------------------------------------------
class _STSCONSTANTS(object):
"""All relevant constants are stored here"""
HEADER_START_BYTES = 0xC0C1
HEADER_PROTOCOL_VERSION = 0x1100 # XXX: this seems to be the newest protocol version!!!
FLAG_RESPONSE_TO_REQUEST = 0x0001
FLAG_ACK = 0x0002
FLAG_REQUEST_ACK = 0x0004
FLAG_NACK = 0x0008
FLAG_HW_EXCEPTION = 0x0010
FLAG_PROTOCOL_DEPRECATED = 0x0020
ERROR_CODES = { 0: 'Success (no detectable errors)',
1: 'Invalid/unsupported protocol',
2: 'Unknown message type',
3: 'Bad checksum',
4: 'Message too large',
5: 'Payload length does not match message type',
6: 'Payload data invalid',
7: 'Device not ready for given message type',
8: 'Unknown checksum type',
9: 'Device reset unexpectedly',
10: 'Too many buses (Commands have come from too many bus interfaces)',
11: 'Out of memory. Failed to allocate enough space to complete request.',
12: 'Command is valid, but desired information does not exist.',
13: 'Int Device Error. May be unrecoverable.',
100: 'Could not decrypt properly',
101: 'Firmware layout invalid',
102: 'Data packet was wrong size',
103: 'hardware revision not compatible with firmware',
104: 'Existing flash map not compatible with firmware',
255: 'Operation/Response Deferred. Operation will take some time to complete. Do not ACK or NACK yet.',
}
NO_ERROR = 0x0000
RESERVED = ""
CHECKSUM_TYPE_NONE = 0x00
CHECKSUM_TYPE_MD5 = 0x01
NO_CHECKSUM = ""
FOOTER = 0xC2C3C4C5 # the datasheet specifies it in this order...
# Generic Device Commands
MSG_RESET = 0x00000000
MSG_RESET_DEFAULTS = 0x00000001
MSG_GET_HARDWARE_REVISION = 0x00000080
MSG_GET_FIRMWARE_REVISION = 0x00000090
MSG_GET_SERIAL_NUMBER = 0x00000100
MSG_GET_DEVICE_ALIAS = 0x00000200
MSG_GET_DEVICE_ALIAS_LENGTH = 0x00000201
MSG_SET_DEVICE_ALIAS = 0x00000210
MSG_GET_NUMBER_USER_STRINGS = 0x00000300
MSG_GET_USER_STRING_LENGTH = 0x00000301
MSG_GET_USER_STRING = 0x00000302
MSG_SET_USER_STRING = 0x00000310
MSG_GET_RS232_BAUDRATE = 0x00000800
MSG_GET_RS232_FLOW_CONTROL_MODE = 0x00000804
MSG_SET_RS232_BAUDRATE = 0x00000810
MSG_SET_RS232_FLOW_CONTROL_MODE = 0x00000814
MSG_SAVE_RS232_SETTINGS = 0x000008F0
MSG_CONFIGURE_STATUS_LED = 0x00001010
MSG_REPROGRAMMING_MODE = 0x000FFF00
# Spectrometer Commands
MSG_GET_AND_SEND_CORRECTED_SPECTRUM = 0x00101000
MSG_GET_AND_SEND_RAW_SPECTRUM = 0x00101100
MSG_GET_PARTIAL_SPECTRUM_MODE = 0x00102000
MSG_GET_AND_SEND_PARTIAL_CORRECTED_SPECTRUM = 0x00102080
MSG_SET_INTEGRATION_TIME = 0x00110010
MSG_SET_TRIGGER_MODE = 0x00110110
MSG_SIMULATE_TRIGGER_PULSE = 0x00110120
MSG_GET_PIXEL_BINNING_FACTOR = 0x00110280
MSG_GET_MAXIMUM_BINNING_FACTOR = 0x00110281
MSG_SET_BINNING_FACTOR = 0x00110290
MSG_SET_DEFAULT_BINNING_FACTOR = 0x00110295
MSG_SET_LAMP_ENABLE = 0x00110410
MSG_SET_TRIGGER_DELAY = 0x00110510
MSG_GET_SCANS_TO_AVERAGE = 0x00120000
MSG_SET_SCANS_TO_AVERAGE = 0x00120010
MSG_GET_BOXCAR_WIDTH = 0x00121000
MSG_SET_BOXCAR_WIDTH = 0x00121010
MSG_GET_WAVELENGTH_COEFFICIENT_COUNT = 0x00180100
MSG_GET_WAVELENGTH_COEFFICIENT = 0x00180101
MSG_SET_WAVELENGTH_COEFFICIENT = 0x00180111
MSG_GET_NONLINEARITY_COEFFICIENT_COUNT = 0x00181100
MSG_GET_NONLINEARITY_COEFFICIENT = 0x00181101
MSG_SET_NONLINEARITY_COEFFICIENT = 0x00181111
MSG_GET_IRRADIANCE_CALIBRATION = 0x00182001
MSG_GET_IRRADIANCE_CALIBRATION_COUNT = 0x00182002
MSG_GET_IRRADIANCE_CALIBRATION_COLLECTION_AREA = 0x00182003
MSG_SET_IRRADIANCE_CALIBRATION = 0x00182011
MSG_SET_IRRADIANCE_CALIBRATION_COLLECTION_AREA = 0x00182013
MSG_GET_NUMBER_STRAY_LIGHT_COEFFICIENTS = 0x00183100
MSG_GET_STRAY_LIGHT_COEFFICIENT = 0x00183101
MSG_SET_STRAY_LIGHT_COEFFICIENT = 0x00183111
MSG_GET_HOT_PIXEL_INDICES = 0x00186000
MSG_SET_HOT_PIXEL_INDICES = 0x00186010
MSG_GET_BENCH_ID = 0x001B0000
MSG_GET_BENCH_SERIAL_NUMBER = 0x001B0100
MSG_GET_SLIT_WIDTH_MICRONS = 0x001B0200
MSG_GET_FIBER_DIAMETER_MICRONS = 0x001B0300
MSG_GET_FILTER = 0x001B0500
MSG_GET_COATING = 0x001B0600
# GPIO commands
MSG_GET_NUMBER_GPIO_PINS = 0x00200000
MSG_GET_OUTPUT_ENABLE_VECTOR = 0x00200100
MSG_SET_OUTPUT_ENABLE_VECTOR = 0x00200110
MSG_GET_VALUE_VECTOR = 0x00200300
MSG_SET_VALUE_VECTOR = 0x00200310
# Strobe commands
MSG_SET_SINGLE_STROBE_PULSE_DELAY = 0x00300010
MSG_SET_SINGLE_STROBE_PULSE_WIDTH = 0x00300011
MSG_SET_SINGLE_STROBE_ENABLE = 0x00300012
MSG_SET_CONTINUOUS_STROBE_PERIOD = 0x00310010
MSG_SET_CONTINUOUS_STROBE_ENABLE = 0x00310011
# Temperature Commands
MSG_GET_TEMPERATURE_SENSOR_COUNT = 0x00400000
MSG_READ_TEMPERATURE_SENSOR = 0x00400001
MSG_READ_ALL_TEMPERATURE_SENSORS = 0x00400002
HEADER_FMT = ("<H" # start_bytes
"H" # protocol_version
"H" # flags
"H" # error number
"L" # message type
"L" # regarding
"6s" # reserved
"B" # checksum type
"B" # immediate length
"16s" # immediate data
"L" # bytes remaining
)
FOOTER_FMT = ("16s" # checksum
"L" # footer
)
class STS(_OOSpec, _OOUSBComm):
"""Rewrite of STS class"""
def __init__(self, integration_time=0.001):
super(STS, self).__init__('STS')
self._const = _STSCONSTANTS
# we can't query this info:
self._pixels = 1024
# get wavelengths
self._wl = self._get_wavelengths()
# set the integration time
self._integration_time = self._set_integration_time(integration_time)
self._min_integration_time, self._max_integration_time = _OOMinMaxIT[self.model]
#------------------------------------
# Implement High level functionality
#------------------------------------
def integration_time(self, time_sec=None):
"""get or set the integration_time in seconds
"""
if time_sec is not None:
if self._min_integration_time <= time_sec < self._max_integration_time:
self._integration_time = self._set_integration_time(time_sec)
else:
raise _OOError("Integration time for %s required to be %f <= t < %f" %
(self.model, self._min_integration_time, self._max_integration_time))
return self._integration_time
def wavelengths(self, *args, **kwargs):
# TODO: add function paramters
return self._wl
def intensities(self, *args, **kwargs):
# TODO: add function paramters
return self._request_spectrum()
def spectrum(self, *args, **kwargs):
# TODO: add function paramters
return np.vstack((self._wl, self._request_spectrum()))
#-----------------
# low level layer
#-----------------
def _set_integration_time(self, time_sec):
"""Sets the integration time in seconds
"""
integration_time_us = int(time_sec * 1000000)
self._send_command(self._const.MSG_SET_INTEGRATION_TIME, struct.pack("<L", integration_time_us))
return integration_time_us * 1e-6
def _get_wavelengths(self):
"""returns an array of wavelengths for the STS spectrometer
"""
# Get the numer of wavelength coefficients first
data = self._query_data(self._const.MSG_GET_WAVELENGTH_COEFFICIENT_COUNT, "")
N_wlcoeff = struct.unpack("<B", data)[0]
# Then query the coefficients
wlcoefficients = []
for i in range(N_wlcoeff):
data = self._query_data(self._const.MSG_GET_WAVELENGTH_COEFFICIENT, struct.pack("<B", i))
wlcoefficients.append(struct.unpack("<f", data)[0])
# Now, generate the wavelength array
return sum( wlcoefficients[i] * np.arange(self._pixels, dtype=np.float64)**i for i in range(N_wlcoeff) )
def _request_spectrum(self):
"""returns the spectrum array.
"""
# Get all data
msg = self._construct_outgoing_message(self._const.MSG_GET_AND_SEND_RAW_SPECTRUM, "")
self._usb_send(msg)
time.sleep(max(self._integration_time - self._USBTIMEOUT, 0))
ret = self._usb_read()
remaining_bytes, checksumtype = self._check_incoming_message_header(ret[:44])
length_payload_footer = remaining_bytes
remaining_bytes -= len(ret[44:])
while True:
if remaining_bytes <= 0:
break
N_bytes = min(remaining_bytes, self._EPin0_size)
ret += self._usb_read(epi_size=N_bytes)
remaining_bytes -= N_bytes
if length_payload_footer != len(ret[44:]):
raise _OOError("There is a remaining packet length error: %d vs %d" % (remaining_bytes, len(ret[44:])))
checksum = self._check_incoming_message_footer(ret[-20:])
if (checksumtype == self._const.CHECKSUM_TYPE_MD5) and (checksum != hashlib.md5(ret[:-20]).digest()):
# TODO: raise Error
warnings.warn("The checksums differ, but we ignore this for now.")
data = self._extract_message_data(ret)
spectrum = struct.unpack("<%dH" % self._pixels, data)
return np.array(spectrum, dtype=np.float64)
#-----------------------------
# communication functionality
#-----------------------------
def _query_data(self, msgtype, payload):
"""recommended query function"""
msg = self._construct_outgoing_message(msgtype, payload, request_ACK=False)
ret = self._usb_query(msg)
remaining_bytes, checksumtype = self._check_incoming_message_header(ret[:44])
if remaining_bytes != len(ret[44:]):
raise _OOError("There is a remaining packet length error: %d vs %d" % (remaining_bytes, len(ret[44:])))
checksum = self._check_incoming_message_footer(ret[-20:])
if (checksumtype == self._const.CHECKSUM_TYPE_MD5) and (checksum != hashlib.md5(ret[:-20]).digest()):
# TODO: raise Error
warnings.warn("The checksums differ, but we ignore this for now.")
data = self._extract_message_data(ret)
return data
def _send_command(self, msgtype, payload):
"""recommended command function"""
msg = self._construct_outgoing_message(msgtype, payload, request_ACK=True)
ret = self._usb_query(msg)
_, checksumtype = self._check_incoming_message_header(ret[:44])
checksum = self._check_incoming_message_footer(ret[-20:])
if (checksumtype == self._const.CHECKSUM_TYPE_MD5) and (checksum != hashlib.md5(ret[:-20]).digest()):
# TODO: raise Error
warnings.warn("The checksums differ, but we ignore this for now.")
return
def _construct_outgoing_message(self, msgtype, payload, request_ACK=False, regarding=None):
"""message layout, see STS datasheet
"""
if request_ACK == True:
flags = self._const.FLAG_REQUEST_ACK
else:
flags = 0
if regarding is None:
regarding = 0
if len(payload) <= 16:
payload_fmt = "0s"
immediate_length = len(payload)
immediate_data = payload
payload = ""
bytes_remaining = 20 # Checksum + footer
else:
payload_fmt = "%ds" % len(payload)
immediate_length = 0
immediate_data = ""
bytes_remaining = 20 + len(payload)
FMT = self._const.HEADER_FMT + payload_fmt + self._const.FOOTER_FMT
msg = struct.pack(FMT, self._const.HEADER_START_BYTES,
self._const.HEADER_PROTOCOL_VERSION,
flags,
self._const.NO_ERROR,
msgtype,
regarding,
self._const.RESERVED,
self._const.CHECKSUM_TYPE_NONE,
immediate_length,
immediate_data,
bytes_remaining,
payload,
self._const.NO_CHECKSUM,
self._const.FOOTER)
return msg
def _check_incoming_message_header(self, header):
"""message layout, see STS datasheet
"""
assert len(header) == 44, "header has wrong length! len(header): %d" % len(header)
data = struct.unpack(self._const.HEADER_FMT, header)
assert data[0] == self._const.HEADER_START_BYTES, 'header start_bytes wrong: %d' % data[0]
assert data[1] == self._const.HEADER_PROTOCOL_VERSION, 'header protocol version wrong: %d' % data[1]
flags = data[2]
if flags == 0:
pass
if flags & self._const.FLAG_RESPONSE_TO_REQUEST:
pass # TODO: propagate?
if flags & self._const.FLAG_ACK:
pass # TODO: propagate?
if flags & self._const.FLAG_REQUEST_ACK:
pass # TODO: only the host should be able to set this?
if (flags & self._const.FLAG_NACK) or (flags & self._const.FLAG_HW_EXCEPTION):
error = data[3]
if error != 0: # != SUCCESS
raise _OOError(self._const.ERROR_CODES[error])
else:
pass # TODO: should we do simething here?
if flags & self._const.FLAG_PROTOCOL_DEPRECATED:
raise _OOError("Protocol deprecated?!?")
# msgtype = data[4]
# regarding = data[5]
checksumtype = data[7] # TODO: implement checksums.
assert checksumtype in [self._const.CHECKSUM_TYPE_NONE, self._const.CHECKSUM_TYPE_MD5], 'the checksum type is unkown: %d' % checksumtype
# immediate_length = data[8]
# immediate_data = data[9]
bytes_remaining = data[10]
return bytes_remaining, checksumtype
def _check_incoming_message_footer(self, footer):
"""message layout, see STS datasheet
"""
assert len(footer) == 20, "footer has wrong length! len(footer): %d" % len(footer)
data = struct.unpack("<" + self._const.FOOTER_FMT, footer)
checksum = data[0]
assert data[1] == self._const.FOOTER, "the device returned a wrong footer: %d" % data[1]
return checksum
def _extract_message_data(self, msg):
"""message layout, see STS datasheet
"""
payload_length = len(msg) - 44 - 20 # - HeaderLength - FooterLength
assert payload_length >= 0, "the received message was shorter than 64 bytes: %d" % payload_length
payload_fmt = "%ds" % payload_length
FMT = self._const.HEADER_FMT + payload_fmt + self._const.FOOTER_FMT
data = struct.unpack(FMT, msg)
msgtype = data[4]
immediate_length = data[8]
immediate_data = data[9]
payload = data[11]
if (immediate_length > 0) and len(payload) > 0:
raise _OOError("the device returned immediate data and payload data? cmd: %d" % msgtype)
elif immediate_length > 0:
return immediate_data[:immediate_length]
elif payload_length > 0:
return payload
else:
return ""
| [
"hashlib.md5",
"struct.pack",
"numpy.array",
"struct.unpack",
"oceanoptics.defines.OceanOpticsError",
"warnings.warn",
"numpy.arange"
] | [((10797, 10839), 'struct.unpack', 'struct.unpack', (["('<%dH' % self._pixels)", 'data'], {}), "('<%dH' % self._pixels, data)\n", (10810, 10839), False, 'import struct\n'), ((10855, 10891), 'numpy.array', 'np.array', (['spectrum'], {'dtype': 'np.float64'}), '(spectrum, dtype=np.float64)\n', (10863, 10891), True, 'import numpy as np\n'), ((13207, 13518), 'struct.pack', 'struct.pack', (['FMT', 'self._const.HEADER_START_BYTES', 'self._const.HEADER_PROTOCOL_VERSION', 'flags', 'self._const.NO_ERROR', 'msgtype', 'regarding', 'self._const.RESERVED', 'self._const.CHECKSUM_TYPE_NONE', 'immediate_length', 'immediate_data', 'bytes_remaining', 'payload', 'self._const.NO_CHECKSUM', 'self._const.FOOTER'], {}), '(FMT, self._const.HEADER_START_BYTES, self._const.\n HEADER_PROTOCOL_VERSION, flags, self._const.NO_ERROR, msgtype,\n regarding, self._const.RESERVED, self._const.CHECKSUM_TYPE_NONE,\n immediate_length, immediate_data, bytes_remaining, payload, self._const\n .NO_CHECKSUM, self._const.FOOTER)\n', (13218, 13518), False, 'import struct\n'), ((14143, 14188), 'struct.unpack', 'struct.unpack', (['self._const.HEADER_FMT', 'header'], {}), '(self._const.HEADER_FMT, header)\n', (14156, 14188), False, 'import struct\n'), ((15798, 15849), 'struct.unpack', 'struct.unpack', (["('<' + self._const.FOOTER_FMT)", 'footer'], {}), "('<' + self._const.FOOTER_FMT, footer)\n", (15811, 15849), False, 'import struct\n'), ((16421, 16444), 'struct.unpack', 'struct.unpack', (['FMT', 'msg'], {}), '(FMT, msg)\n', (16434, 16444), False, 'import struct\n'), ((8717, 8755), 'struct.pack', 'struct.pack', (['"""<L"""', 'integration_time_us'], {}), "('<L', integration_time_us)\n", (8728, 8755), False, 'import struct\n'), ((9077, 9102), 'struct.unpack', 'struct.unpack', (['"""<B"""', 'data'], {}), "('<B', data)\n", (9090, 9102), False, 'import struct\n'), ((10663, 10729), 'warnings.warn', 'warnings.warn', (['"""The checksums differ, but we ignore this for now."""'], {}), "('The checksums differ, but we ignore this for now.')\n", (10676, 10729), False, 'import warnings\n'), ((11672, 11738), 'warnings.warn', 'warnings.warn', (['"""The checksums differ, but we ignore this for now."""'], {}), "('The checksums differ, but we ignore this for now.')\n", (11685, 11738), False, 'import warnings\n'), ((12307, 12373), 'warnings.warn', 'warnings.warn', (['"""The checksums differ, but we ignore this for now."""'], {}), "('The checksums differ, but we ignore this for now.')\n", (12320, 12373), False, 'import warnings\n'), ((15123, 15157), 'oceanoptics.defines.OceanOpticsError', '_OOError', (['"""Protocol deprecated?!?"""'], {}), "('Protocol deprecated?!?')\n", (15131, 15157), True, 'from oceanoptics.defines import OceanOpticsError as _OOError\n'), ((16643, 16729), 'oceanoptics.defines.OceanOpticsError', '_OOError', (["('the device returned immediate data and payload data? cmd: %d' % msgtype)"], {}), "('the device returned immediate data and payload data? cmd: %d' %\n msgtype)\n", (16651, 16729), True, 'from oceanoptics.defines import OceanOpticsError as _OOError\n'), ((7840, 7979), 'oceanoptics.defines.OceanOpticsError', '_OOError', (["('Integration time for %s required to be %f <= t < %f' % (self.model, self.\n _min_integration_time, self._max_integration_time))"], {}), "('Integration time for %s required to be %f <= t < %f' % (self.\n model, self._min_integration_time, self._max_integration_time))\n", (7848, 7979), True, 'from oceanoptics.defines import OceanOpticsError as _OOError\n'), ((9288, 9308), 'struct.pack', 'struct.pack', (['"""<B"""', 'i'], {}), "('<B', i)\n", (9299, 9308), False, 'import struct\n'), ((14930, 14970), 'oceanoptics.defines.OceanOpticsError', '_OOError', (['self._const.ERROR_CODES[error]'], {}), '(self._const.ERROR_CODES[error])\n', (14938, 14970), True, 'from oceanoptics.defines import OceanOpticsError as _OOError\n'), ((9344, 9369), 'struct.unpack', 'struct.unpack', (['"""<f"""', 'data'], {}), "('<f', data)\n", (9357, 9369), False, 'import struct\n'), ((9460, 9501), 'numpy.arange', 'np.arange', (['self._pixels'], {'dtype': 'np.float64'}), '(self._pixels, dtype=np.float64)\n', (9469, 9501), True, 'import numpy as np\n'), ((10585, 10607), 'hashlib.md5', 'hashlib.md5', (['ret[:-20]'], {}), '(ret[:-20])\n', (10596, 10607), False, 'import hashlib\n'), ((11594, 11616), 'hashlib.md5', 'hashlib.md5', (['ret[:-20]'], {}), '(ret[:-20])\n', (11605, 11616), False, 'import hashlib\n'), ((12229, 12251), 'hashlib.md5', 'hashlib.md5', (['ret[:-20]'], {}), '(ret[:-20])\n', (12240, 12251), False, 'import hashlib\n')] |
import streamlit as st
import pandas as pd
import numpy as np
df = pd.DataFrame(
np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4], columns=["lat", "lon"]
)
# TODO: Use this instead of the example below. Need to autodetect viewport
# first, thought.
# st.deck_gl_chart(df)
st.deck_gl_chart(
viewport={"latitude": 37.76, "longitude": -122.4, "zoom": 11, "pitch": 50},
layers=[{"type": "ScatterplotLayer", "data": df}],
)
| [
"numpy.random.randn",
"streamlit.deck_gl_chart"
] | [((284, 432), 'streamlit.deck_gl_chart', 'st.deck_gl_chart', ([], {'viewport': "{'latitude': 37.76, 'longitude': -122.4, 'zoom': 11, 'pitch': 50}", 'layers': "[{'type': 'ScatterplotLayer', 'data': df}]"}), "(viewport={'latitude': 37.76, 'longitude': -122.4, 'zoom': \n 11, 'pitch': 50}, layers=[{'type': 'ScatterplotLayer', 'data': df}])\n", (300, 432), True, 'import streamlit as st\n'), ((86, 110), 'numpy.random.randn', 'np.random.randn', (['(1000)', '(2)'], {}), '(1000, 2)\n', (101, 110), True, 'import numpy as np\n')] |
"""
Test file that uses pytest to ensure that the MPyC implementation of the matrix inverse
correctly computes the matrix inverse.
"""
from typing import Any, List, Tuple, Type, Union
import pytest
from mpyc.runtime import mpc
from mpyc.sectypes import SecureFixedPoint
from numpy import abs as abs_
from numpy import array, divide, linalg, random, zeros_like
from tno.mpc.mpyc.matrix_inverse.matrix_inverse import (
SecureFixedPointMatrix,
matrix_inverse,
)
Matrix = List[List[Union[float, int]]]
matrices: List[Matrix] = [
[[2, 0], [0, 3]],
[[1 / 2, 0], [0, 1 / 3]],
[[-2, 0], [0, 3]],
[[-1 / 2, 0], [0, 1 / 3]],
(random.randint(low=-1000, high=1000, size=(5, 5)) / 10).tolist(),
]
async def async_test_matrix_inverse(testcase: Tuple[Matrix, Any]) -> None:
"""
Test that checks whether the secure application of the matrix inverse returns the same result
as the regular matrix inverse up to a certain margin.
:param testcase: tuple of a matrix and its correct inverse
"""
matrix, correct_matrix_inverse = testcase
bit_length = 32
frac_length = 16
await mpc.start()
secfxp: Type[SecureFixedPoint] = mpc.SecFxp(l=bit_length, f=frac_length)
secure_matrix: SecureFixedPointMatrix = [[secfxp(x) for x in row] for row in matrix]
secure_matrix = [mpc.input(row, 0) for row in secure_matrix]
# noinspection PyTypeChecker
secure_inverse: SecureFixedPointMatrix = matrix_inverse(secure_matrix)
inverse: List[List[float]] = [await mpc.output(row) for row in secure_inverse]
secure_checker = mpc.matrix_prod(secure_matrix, secure_inverse)
checker: List[List[float]] = [await mpc.output(row) for row in secure_checker]
diff = array(correct_matrix_inverse) - array(inverse)
rel_diff = divide(
diff,
array(correct_matrix_inverse),
out=zeros_like(diff),
where=array(correct_matrix_inverse) != 0,
)
await mpc.shutdown()
max_abs_diff = abs_(diff).max()
max_rel_diff = abs_(rel_diff).max()
print(f"X = \n{array(matrix)}\n")
print(f"Xinv = \n{array(correct_matrix_inverse)}\n")
print(f"Xinv_mpc = \n{array(inverse)}\n")
print(f"X * Xinv_mpc = \n{array(checker)}\n")
print(f"max absolute diff = {max_abs_diff}")
print(f"max relative diff (nonzero entries) = {max_rel_diff}")
assert max_abs_diff < 0.05 and max_rel_diff < 0.5
@pytest.mark.parametrize(
"test_case", [(matrix, linalg.inv(matrix).tolist()) for matrix in matrices]
)
def test_matrix_inverse(test_case: Tuple[Matrix, Any]) -> None:
"""
Test that runs the async test code.
:param test_case: tuple containing a matrix and its inverse
"""
mpc.run(async_test_matrix_inverse(test_case))
| [
"mpyc.runtime.mpc.output",
"numpy.abs",
"mpyc.runtime.mpc.start",
"mpyc.runtime.mpc.shutdown",
"mpyc.runtime.mpc.input",
"mpyc.runtime.mpc.SecFxp",
"numpy.array",
"numpy.random.randint",
"numpy.linalg.inv",
"mpyc.runtime.mpc.matrix_prod",
"tno.mpc.mpyc.matrix_inverse.matrix_inverse.matrix_invers... | [((1177, 1216), 'mpyc.runtime.mpc.SecFxp', 'mpc.SecFxp', ([], {'l': 'bit_length', 'f': 'frac_length'}), '(l=bit_length, f=frac_length)\n', (1187, 1216), False, 'from mpyc.runtime import mpc\n'), ((1450, 1479), 'tno.mpc.mpyc.matrix_inverse.matrix_inverse.matrix_inverse', 'matrix_inverse', (['secure_matrix'], {}), '(secure_matrix)\n', (1464, 1479), False, 'from tno.mpc.mpyc.matrix_inverse.matrix_inverse import SecureFixedPointMatrix, matrix_inverse\n'), ((1585, 1631), 'mpyc.runtime.mpc.matrix_prod', 'mpc.matrix_prod', (['secure_matrix', 'secure_inverse'], {}), '(secure_matrix, secure_inverse)\n', (1600, 1631), False, 'from mpyc.runtime import mpc\n'), ((1127, 1138), 'mpyc.runtime.mpc.start', 'mpc.start', ([], {}), '()\n', (1136, 1138), False, 'from mpyc.runtime import mpc\n'), ((1327, 1344), 'mpyc.runtime.mpc.input', 'mpc.input', (['row', '(0)'], {}), '(row, 0)\n', (1336, 1344), False, 'from mpyc.runtime import mpc\n'), ((1727, 1756), 'numpy.array', 'array', (['correct_matrix_inverse'], {}), '(correct_matrix_inverse)\n', (1732, 1756), False, 'from numpy import array, divide, linalg, random, zeros_like\n'), ((1759, 1773), 'numpy.array', 'array', (['inverse'], {}), '(inverse)\n', (1764, 1773), False, 'from numpy import array, divide, linalg, random, zeros_like\n'), ((1819, 1848), 'numpy.array', 'array', (['correct_matrix_inverse'], {}), '(correct_matrix_inverse)\n', (1824, 1848), False, 'from numpy import array, divide, linalg, random, zeros_like\n'), ((1947, 1961), 'mpyc.runtime.mpc.shutdown', 'mpc.shutdown', ([], {}), '()\n', (1959, 1961), False, 'from mpyc.runtime import mpc\n'), ((1520, 1535), 'mpyc.runtime.mpc.output', 'mpc.output', (['row'], {}), '(row)\n', (1530, 1535), False, 'from mpyc.runtime import mpc\n'), ((1672, 1687), 'mpyc.runtime.mpc.output', 'mpc.output', (['row'], {}), '(row)\n', (1682, 1687), False, 'from mpyc.runtime import mpc\n'), ((1862, 1878), 'numpy.zeros_like', 'zeros_like', (['diff'], {}), '(diff)\n', (1872, 1878), False, 'from numpy import array, divide, linalg, random, zeros_like\n'), ((1981, 1991), 'numpy.abs', 'abs_', (['diff'], {}), '(diff)\n', (1985, 1991), True, 'from numpy import abs as abs_\n'), ((2017, 2031), 'numpy.abs', 'abs_', (['rel_diff'], {}), '(rel_diff)\n', (2021, 2031), True, 'from numpy import abs as abs_\n'), ((648, 697), 'numpy.random.randint', 'random.randint', ([], {'low': '(-1000)', 'high': '(1000)', 'size': '(5, 5)'}), '(low=-1000, high=1000, size=(5, 5))\n', (662, 697), False, 'from numpy import array, divide, linalg, random, zeros_like\n'), ((1894, 1923), 'numpy.array', 'array', (['correct_matrix_inverse'], {}), '(correct_matrix_inverse)\n', (1899, 1923), False, 'from numpy import array, divide, linalg, random, zeros_like\n'), ((2058, 2071), 'numpy.array', 'array', (['matrix'], {}), '(matrix)\n', (2063, 2071), False, 'from numpy import array, divide, linalg, random, zeros_like\n'), ((2099, 2128), 'numpy.array', 'array', (['correct_matrix_inverse'], {}), '(correct_matrix_inverse)\n', (2104, 2128), False, 'from numpy import array, divide, linalg, random, zeros_like\n'), ((2160, 2174), 'numpy.array', 'array', (['inverse'], {}), '(inverse)\n', (2165, 2174), False, 'from numpy import array, divide, linalg, random, zeros_like\n'), ((2210, 2224), 'numpy.array', 'array', (['checker'], {}), '(checker)\n', (2215, 2224), False, 'from numpy import array, divide, linalg, random, zeros_like\n'), ((2455, 2473), 'numpy.linalg.inv', 'linalg.inv', (['matrix'], {}), '(matrix)\n', (2465, 2473), False, 'from numpy import array, divide, linalg, random, zeros_like\n')] |
#%%
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np # numpy for arrays
from tqdm import tqdm
import SpecCAF.Solver as Solver
import SpecCAF.spherical as spherical
from matplotlib import pyplot as plt
from scipy.interpolate import interp1d,interp2d
from scipy.signal import find_peaks
from scipy.ndimage import median_filter,gaussian_filter1d
import matplotlib as mpl
plt.rcParams.update({
"text.usetex": True,
"font.family": "serif",
"font.serif": ["Palatino"],
"font.size" : 12,
"figure.autolayout" : True,
})
paramtype = 'mean'
def peakfind(f,tol=0.1):
rh,th = sh.y0synth(f)
#tol=0.1
#rh,th = sh.y0synth(sol.y)
peaksth = np.zeros((2,rh.shape[1]))
peaksrh = np.zeros((2,rh.shape[1]))
cpotype = []
for i in range(rh.shape[1]):
peakstemp = find_peaks(rh[:,i])[0]
if peakstemp.size==0:
peaksth[:,i] = np.nan
peaksrh[:,i] = np.mean(rh[:,i])
cpotype.append('Isotropic')
else:
rhtemp = rh[peakstemp,i]
sort = np.argsort(rhtemp)
peaksth[0,i]=th[peakstemp[sort[-1]]]
peaksrh[0,i]=rhtemp[sort[-1]]
if peakstemp.size>1:
peaksth[1,i]=th[peakstemp[sort[-2]]]
peaksrh[1,i]=rhtemp[sort[-2]]
if peaksrh[1,i]/peaksrh[0,i]>1-tol:
cpotype.append('2D Cone')
elif peaksrh[1,i]/peaksrh[0,i]<tol:
cpotype.append('Single Maxima')
else:
cpotype.append('Secondary Cluster')
return cpotype,peaksrh,peaksth
def cpoIdentify(ratios,tol=0.1):
cpotype= np.zeros_like(ratios,dtype='object')
cpotype[np.isnan(ratios)]='Isotropic'
cpotype[ratios>1-tol]='2D Cone'
cpotype[ratios<tol]='Single Maxima'
cpotype[cpotype==0]='Secondary Cluster'
return cpotype
newcols = np.zeros((256,4))
col1 = np.array([252/256, 141/256, 89/256, 1])
col2 = np.array([255/256, 255/256, 191/256, 1])
col3 = np.array([145/256, 191/256, 219/256, 1])
tol=0.1
newcols[:int(256*tol),:]=col1
newcols[int(256*tol):int((1-tol)*256),:]=col2
newcols[int((1-tol)*256):]=col3
newcols = gaussian_filter1d(newcols, 2.1,axis=0)
newcmp = mpl.colors.ListedColormap(newcols)
sh = spherical.spherical(12)
#Set initial Conditon
f0=sh.spec_array()
f0[0]=1
strainvec = np.linspace(0,10,100)
Tvec = np.linspace(-30,-5,50)
#Wvec = np.linspace(0,1,30)
Wvec = np.logspace(-1,1,50)
#Wvec = np.concatenate([np.linspace(0,0.1,10),np.logspace(-1,1,20)])
Wgrid,Tgrid,Sgrid=np.meshgrid(Wvec,Tvec,strainvec)
cpotypes = np.zeros((Tvec.size,Wvec.size,strainvec.size),dtype=object)
ratios = np.zeros((Tvec.size,Wvec.size,strainvec.size))
peakth = np.zeros_like(ratios)
F = np.zeros((Tvec.size,Wvec.size,sh.nlm,strainvec.size))
Whd = np.logspace(-1,1,1000)
Thd = np.linspace(-30,-5,1000)
F = np.load('F' + str(Wvec.size) + paramtype + '.npy')
# ratios = np.load('ratios100log12.npy')
# peakth = np.load('peakth100log12.npy')
cpotypes = cpoIdentify(ratios,tol)
for i in tqdm(range(Tvec.size)):
for j in range(Wvec.size):
# gradu = Solver.gradufromW(Wvec[j])
# p=Solver.params(Tvec[i],strainvec,gradu,paramtype)
# sol=Solver.rk3solve(p,sh,f0)
# interp = interp1d(sol.t,sol.y)
# F[i,j,:,:] = interp(strainvec)
cpotype,peaksrh,peaksth = peakfind(F[i,j,:,:])
cpotypes[i,j,:] = cpotype
ratios[i,j,:] = peaksrh[1,:]/peaksrh[0,:]
peakth[i,j,:] = peaksth[0,:]
strainplots = np.array([0.3, 0.5, 1, 2,5,10])
r=0.07
ncol=2
nrow = strainplots.size//ncol
fig2,ax2 = plt.subplots(nrow,ncol,figsize=(8,9))
Tins = np.array([[-25,-10],[-25,-10],[-25,-15],[-25,-10],[-20,-15],[-20,-15]])
Wins = np.array([[0.3,2],[0.2,1],[1,0.2],[0.3,1],[0.2,5],[0.2,4]])
subfiglabels=('a','b','c','d','e','f')
i=0
#for i in range(strainplots.size):
for ax in ax2.flat:
strainind = np.argmin(np.abs(strainvec-strainplots[i]))
R = interp2d(Tvec,Wvec,ratios[:,:,strainind])
ratioshd = R(Thd,Whd).T
med = median_filter(ratioshd,35)
#cpotypeshd = cpoIdentify(med)
#Zlocal = fv(cpotypeshd).T
im2 = ax.contourf(Thd,Whd,med,vmin=0,vmax=1,cmap=newcmp)
con = ax.contour(Tvec,Wvec,(180/np.pi)*peakth[:,:,strainind].T,[20,50],colors='black',lw=3)
if i==1:
ax.clabel(con,inline=1,fmt='%1.0f',inline_spacing=0,manual=[(-29,0.6),(-15,4)])
else:
ax.clabel(con,inline=1,fmt='%1.0f',inline_spacing=0)
ax.set_ylabel('$\mathcal{W}$')
ax.set_xlabel('$T(^{\circ}C)$')
ax.set_yscale('symlog', linthresh=0.1)
ax.set_title('(' + subfiglabels[i] + '), $\gamma=' + ('%.2f' % strainplots[i]) + '$')
ax.set_yticks(np.logspace(-1,1,5))
#ax.grid(b=True, which='both',axis='both')
##Inset polefigures
# for j in range(2):
# Wind = np.abs(Wvec-Wins[i,j]).argmin()
# Tind = np.abs(Tvec-Tins[i,j]).argmin()
# xx,yy,fgrid=sh.polefigure(F[Tind,Wind,:,strainind])
# axcentre = ax.transData.transform((Tvec[Tind],Wvec[Wind]))
# inv=ax.transAxes.inverted()
# axcentre = inv.transform(axcentre)
# ins = ax.inset_axes([axcentre[0]-r,axcentre[1]-r,2*r,2.926*r])
# cs=ins.contour(xx,yy,fgrid,levels=np.linspace(0,1,11),vmin=0)
# circle=plt.Circle((0,0),radius=0.98*np.pi/2,alpha=0.5,fc='w',ec='black')
# ins.add_patch(circle)
# ins.axis('off')
i=i+1
legend_elements = [mpl.patches.Patch(facecolor=col1,label='Single Maxima'),
mpl.patches.Patch(facecolor=col2,label='Secondary Cluster'),
mpl.patches.Patch(facecolor=col3,label='Double Maxima')]
#leg_ax = fig2.add_axes([0.1, -0.05, 0.8, 0.05])
leg = fig2.legend(handles=legend_elements,bbox_to_anchor=(0.1,0,0.8,0.02),ncol=3,mode="expand")
fig2.savefig('fig09' + paramtype + '.pdf',format='pdf',bbox_inches='tight')
fig3,ax3= plt.subplots(nrow,ncol,figsize=(8,9))
i=0
for ax in ax3.flat:
strainind = np.argmin(np.abs(strainvec-strainplots[i]))
cs=ax.contourf(Tvec,Wvec,(180/np.pi)*peakth[:,:,strainind].T,[0, 10, 20, 30, 40, 50, 60, 70],cmap='magma')
for c in cs.collections:
c.set_edgecolor("face")
ax.set_aspect('auto')
ax.set_yscale('log')
ax.set_xlabel('$T(^{\circ}C)$')
ax.set_ylabel('$\mathcal{W}$')
ax.set_title('(' + subfiglabels[i] + '), $\gamma=' + ('%.2f' % strainplots[i]) + '$')
i=i+1
cbar_ax = fig3.add_axes([0.1,-0.01, 0.8, 0.02])
cbar=fig3.colorbar(cs,cax=cbar_ax,orientation='horizontal')
cbar.set_label('Angle between primary cluster and compression axis $(^{\\circ})$')
cbar.solids.set_edgecolor("face")
#fig3.colorbar(cs,bbox_to_anchor=(0.1,0,0.8,0.02))
fig3.savefig('fig08' + paramtype + '.pdf',format='pdf',bbox_inches='tight')
#np.save('F' + str(Wvec.size) + paramtype + '.npy',F) | [
"SpecCAF.spherical.spherical",
"numpy.argsort",
"numpy.array",
"scipy.interpolate.interp2d",
"numpy.mean",
"matplotlib.colors.ListedColormap",
"numpy.linspace",
"scipy.signal.find_peaks",
"numpy.meshgrid",
"scipy.ndimage.gaussian_filter1d",
"numpy.logspace",
"numpy.abs",
"numpy.isnan",
"ma... | [((389, 531), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'text.usetex': True, 'font.family': 'serif', 'font.serif': ['Palatino'],\n 'font.size': 12, 'figure.autolayout': True}"], {}), "({'text.usetex': True, 'font.family': 'serif',\n 'font.serif': ['Palatino'], 'font.size': 12, 'figure.autolayout': True})\n", (408, 531), True, 'from matplotlib import pyplot as plt\n'), ((1974, 1992), 'numpy.zeros', 'np.zeros', (['(256, 4)'], {}), '((256, 4))\n', (1982, 1992), True, 'import numpy as np\n'), ((1999, 2044), 'numpy.array', 'np.array', (['[252 / 256, 141 / 256, 89 / 256, 1]'], {}), '([252 / 256, 141 / 256, 89 / 256, 1])\n', (2007, 2044), True, 'import numpy as np\n'), ((2046, 2092), 'numpy.array', 'np.array', (['[255 / 256, 255 / 256, 191 / 256, 1]'], {}), '([255 / 256, 255 / 256, 191 / 256, 1])\n', (2054, 2092), True, 'import numpy as np\n'), ((2094, 2140), 'numpy.array', 'np.array', (['[145 / 256, 191 / 256, 219 / 256, 1]'], {}), '([145 / 256, 191 / 256, 219 / 256, 1])\n', (2102, 2140), True, 'import numpy as np\n'), ((2262, 2301), 'scipy.ndimage.gaussian_filter1d', 'gaussian_filter1d', (['newcols', '(2.1)'], {'axis': '(0)'}), '(newcols, 2.1, axis=0)\n', (2279, 2301), False, 'from scipy.ndimage import median_filter, gaussian_filter1d\n'), ((2310, 2344), 'matplotlib.colors.ListedColormap', 'mpl.colors.ListedColormap', (['newcols'], {}), '(newcols)\n', (2335, 2344), True, 'import matplotlib as mpl\n'), ((2352, 2375), 'SpecCAF.spherical.spherical', 'spherical.spherical', (['(12)'], {}), '(12)\n', (2371, 2375), True, 'import SpecCAF.spherical as spherical\n'), ((2441, 2464), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(100)'], {}), '(0, 10, 100)\n', (2452, 2464), True, 'import numpy as np\n'), ((2470, 2494), 'numpy.linspace', 'np.linspace', (['(-30)', '(-5)', '(50)'], {}), '(-30, -5, 50)\n', (2481, 2494), True, 'import numpy as np\n'), ((2528, 2550), 'numpy.logspace', 'np.logspace', (['(-1)', '(1)', '(50)'], {}), '(-1, 1, 50)\n', (2539, 2550), True, 'import numpy as np\n'), ((2638, 2672), 'numpy.meshgrid', 'np.meshgrid', (['Wvec', 'Tvec', 'strainvec'], {}), '(Wvec, Tvec, strainvec)\n', (2649, 2672), True, 'import numpy as np\n'), ((2682, 2744), 'numpy.zeros', 'np.zeros', (['(Tvec.size, Wvec.size, strainvec.size)'], {'dtype': 'object'}), '((Tvec.size, Wvec.size, strainvec.size), dtype=object)\n', (2690, 2744), True, 'import numpy as np\n'), ((2751, 2799), 'numpy.zeros', 'np.zeros', (['(Tvec.size, Wvec.size, strainvec.size)'], {}), '((Tvec.size, Wvec.size, strainvec.size))\n', (2759, 2799), True, 'import numpy as np\n'), ((2807, 2828), 'numpy.zeros_like', 'np.zeros_like', (['ratios'], {}), '(ratios)\n', (2820, 2828), True, 'import numpy as np\n'), ((2833, 2889), 'numpy.zeros', 'np.zeros', (['(Tvec.size, Wvec.size, sh.nlm, strainvec.size)'], {}), '((Tvec.size, Wvec.size, sh.nlm, strainvec.size))\n', (2841, 2889), True, 'import numpy as np\n'), ((2895, 2919), 'numpy.logspace', 'np.logspace', (['(-1)', '(1)', '(1000)'], {}), '(-1, 1, 1000)\n', (2906, 2919), True, 'import numpy as np\n'), ((2924, 2950), 'numpy.linspace', 'np.linspace', (['(-30)', '(-5)', '(1000)'], {}), '(-30, -5, 1000)\n', (2935, 2950), True, 'import numpy as np\n'), ((3656, 3689), 'numpy.array', 'np.array', (['[0.3, 0.5, 1, 2, 5, 10]'], {}), '([0.3, 0.5, 1, 2, 5, 10])\n', (3664, 3689), True, 'import numpy as np\n'), ((3745, 3785), 'matplotlib.pyplot.subplots', 'plt.subplots', (['nrow', 'ncol'], {'figsize': '(8, 9)'}), '(nrow, ncol, figsize=(8, 9))\n', (3757, 3785), True, 'from matplotlib import pyplot as plt\n'), ((3790, 3876), 'numpy.array', 'np.array', (['[[-25, -10], [-25, -10], [-25, -15], [-25, -10], [-20, -15], [-20, -15]]'], {}), '([[-25, -10], [-25, -10], [-25, -15], [-25, -10], [-20, -15], [-20,\n -15]])\n', (3798, 3876), True, 'import numpy as np\n'), ((3869, 3939), 'numpy.array', 'np.array', (['[[0.3, 2], [0.2, 1], [1, 0.2], [0.3, 1], [0.2, 5], [0.2, 4]]'], {}), '([[0.3, 2], [0.2, 1], [1, 0.2], [0.3, 1], [0.2, 5], [0.2, 4]])\n', (3877, 3939), True, 'import numpy as np\n'), ((6034, 6074), 'matplotlib.pyplot.subplots', 'plt.subplots', (['nrow', 'ncol'], {'figsize': '(8, 9)'}), '(nrow, ncol, figsize=(8, 9))\n', (6046, 6074), True, 'from matplotlib import pyplot as plt\n'), ((685, 711), 'numpy.zeros', 'np.zeros', (['(2, rh.shape[1])'], {}), '((2, rh.shape[1]))\n', (693, 711), True, 'import numpy as np\n'), ((725, 751), 'numpy.zeros', 'np.zeros', (['(2, rh.shape[1])'], {}), '((2, rh.shape[1]))\n', (733, 751), True, 'import numpy as np\n'), ((1726, 1763), 'numpy.zeros_like', 'np.zeros_like', (['ratios'], {'dtype': '"""object"""'}), "(ratios, dtype='object')\n", (1739, 1763), True, 'import numpy as np\n'), ((4107, 4152), 'scipy.interpolate.interp2d', 'interp2d', (['Tvec', 'Wvec', 'ratios[:, :, strainind]'], {}), '(Tvec, Wvec, ratios[:, :, strainind])\n', (4115, 4152), False, 'from scipy.interpolate import interp1d, interp2d\n'), ((4187, 4214), 'scipy.ndimage.median_filter', 'median_filter', (['ratioshd', '(35)'], {}), '(ratioshd, 35)\n', (4200, 4214), False, 'from scipy.ndimage import median_filter, gaussian_filter1d\n'), ((5586, 5642), 'matplotlib.patches.Patch', 'mpl.patches.Patch', ([], {'facecolor': 'col1', 'label': '"""Single Maxima"""'}), "(facecolor=col1, label='Single Maxima')\n", (5603, 5642), True, 'import matplotlib as mpl\n'), ((5662, 5722), 'matplotlib.patches.Patch', 'mpl.patches.Patch', ([], {'facecolor': 'col2', 'label': '"""Secondary Cluster"""'}), "(facecolor=col2, label='Secondary Cluster')\n", (5679, 5722), True, 'import matplotlib as mpl\n'), ((5742, 5798), 'matplotlib.patches.Patch', 'mpl.patches.Patch', ([], {'facecolor': 'col3', 'label': '"""Double Maxima"""'}), "(facecolor=col3, label='Double Maxima')\n", (5759, 5798), True, 'import matplotlib as mpl\n'), ((1780, 1796), 'numpy.isnan', 'np.isnan', (['ratios'], {}), '(ratios)\n', (1788, 1796), True, 'import numpy as np\n'), ((4059, 4093), 'numpy.abs', 'np.abs', (['(strainvec - strainplots[i])'], {}), '(strainvec - strainplots[i])\n', (4065, 4093), True, 'import numpy as np\n'), ((4840, 4861), 'numpy.logspace', 'np.logspace', (['(-1)', '(1)', '(5)'], {}), '(-1, 1, 5)\n', (4851, 4861), True, 'import numpy as np\n'), ((6129, 6163), 'numpy.abs', 'np.abs', (['(strainvec - strainplots[i])'], {}), '(strainvec - strainplots[i])\n', (6135, 6163), True, 'import numpy as np\n'), ((826, 846), 'scipy.signal.find_peaks', 'find_peaks', (['rh[:, i]'], {}), '(rh[:, i])\n', (836, 846), False, 'from scipy.signal import find_peaks\n'), ((940, 957), 'numpy.mean', 'np.mean', (['rh[:, i]'], {}), '(rh[:, i])\n', (947, 957), True, 'import numpy as np\n'), ((1097, 1115), 'numpy.argsort', 'np.argsort', (['rhtemp'], {}), '(rhtemp)\n', (1107, 1115), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import re
import glob
import pandas as pd
import numpy as np
class IqaLoggingProcessor:
'''A tool to process logfiles of different IQA tools for image quality assesment
into a pandas dataframe. The IqaLoggingProcessor needs an absolut filepath
as input and will check for certain logfiles at this location.'''
def __init__(self, inpath):
self.inpath = inpath
# implement functionality
def getSubPathList(self):
subdirlist = glob.glob(self.inpath + "/**/")
return subdirlist
def getFilenameList(self, path):
filepath = glob.glob(path + "/*.txt")
filenamelist = [filelocation.split("/")[-1] for filelocation in filepath]
return filenamelist
def getFilenameComponents(self, filename):
components = filename.split("_")
if len(components) == 7:
metric, size, algo, target, tools, opt, os = [*components]
tools = tools.split("-")
os = os.split(".")[0]
else:
print(f"--> Unsupportet IQA log file type: {filename}")
exit(1)
return metric, size, algo, target, tools[0], tools[1], opt, os
def getSubPathId(self, subPath):
pathId = [p.split('/')[-2] for p in subPath]
return pathId
def getDeltaE2000Values(self, filepath, filename):
values = []
frames = []
meanDeltaE = []
maxDeltaE = []
# sm uses a smoothening filter size 11, sigma 3
smDeltaE = []
regex1 = r"^\s+\d+\s\d+\.\d+\s\d+\.\d+"
regex2 = r"(?P<index>sm\s+)(?P<value>\d+\.\d+)"
readLog = open(filepath + filename)
for line in readLog:
valuesFound = re.findall(regex1, line)
smDeltaEFound = re.findall(regex2, line)
[values.append(value) for value in valuesFound if len(value) > 0]
[smDeltaE.append(float(sm[1])) for sm in smDeltaEFound if len(smDeltaEFound) > 0]
readLog.close()
for v in values:
frame, mean, max_ = v.split()
frames.append(int(frame))
meanDeltaE.append(float(mean))
maxDeltaE.append(float(max_))
try:
#print(len(smDeltaE))
smDeltaE.pop(-1)
except:
print("Error Id1 --> Logfile " + filename + " incomplete!")
exit(1)
if len(frames) == len(meanDeltaE) == len(maxDeltaE) == len(smDeltaE):
print("Reading --> " + filename)
return [frames, meanDeltaE, maxDeltaE, smDeltaE]
else:
print("Error Id2 --> Logfile " + filename + " incomplete!")
print(f'Detected --> {len(frames)} Frames, {len(meanDeltaE)} mean dE, {len(maxDeltaE)} max dE, {len(smDeltaE)} sm dE')
exit(1)
def getIctcpValues(self, filepath, filename):
frames = []
maxDeltaE = []
aveDeltaE = []
regex1 = r"frame "
regex3 = r" max "
regex4 = r" ave "
readLog = open(filepath + filename)
for line in readLog:
frameFound = re.findall(r"(?<={})\d+".format(regex1), line)
maxDeltaEFound = re.findall(r"(?<={})\d+\.\d+\D+?\d+".format(regex3), line)
aveDeltaEFound = re.findall(r"(?<={})\d+\.\d+\D+?\d+".format(regex4), line)
[frames.append(int(f)) for f in frameFound if len(frameFound) > 0]
[maxDeltaE.append(float(maxd)) for maxd in maxDeltaEFound if len(maxDeltaEFound) > 0]
[aveDeltaE.append(float(ave)) for ave in aveDeltaEFound if len(aveDeltaEFound) > 0]
readLog.close()
if len(frames) == len(maxDeltaE) == len(aveDeltaE):
print("Reading --> " + filename)
return [frames, maxDeltaE, aveDeltaE]
else:
print("Error Id3 --> Logfile " + filename + " incomplete!")
exit(1)
def createDataframe(self, filepath, filename, nameComponents):
metric, _, _, _, _, _, _, _ = [*nameComponents]
if metric == "ictcp":
columnames = ['Frame', 'Delta ICTCP max', 'Delta ICTCP ave']
data = self.getIctcpValues(filepath, filename)
elif metric == "dE2000":
columnames = ['Frame', 'DeltaE CIE2000 mean', 'DeltaE CIE2000 max', 'smoothed DeltaE CIE2000']
data = self.getDeltaE2000Values(filepath, filename)
else:
print(f"Error Id4 --> Unsupportet metric: {metric}. Please check log file name string.")
exit(1)
content = list(zip(*data))
df = pd.DataFrame(list(np.array(content)), columns=columnames)
return df | [
"numpy.array",
"re.findall",
"glob.glob"
] | [((504, 535), 'glob.glob', 'glob.glob', (["(self.inpath + '/**/')"], {}), "(self.inpath + '/**/')\n", (513, 535), False, 'import glob\n'), ((619, 645), 'glob.glob', 'glob.glob', (["(path + '/*.txt')"], {}), "(path + '/*.txt')\n", (628, 645), False, 'import glob\n'), ((1730, 1754), 're.findall', 're.findall', (['regex1', 'line'], {}), '(regex1, line)\n', (1740, 1754), False, 'import re\n'), ((1783, 1807), 're.findall', 're.findall', (['regex2', 'line'], {}), '(regex2, line)\n', (1793, 1807), False, 'import re\n'), ((4571, 4588), 'numpy.array', 'np.array', (['content'], {}), '(content)\n', (4579, 4588), True, 'import numpy as np\n')] |
import os
import simpy
import numpy as np
from pathlib import Path
from RideSimulator.Grid import Grid
from RideSimulator.Trip import Trip
lat_points, lon_points, trip_distances, trips_per_min = None, None, None, None
def read_data(directory="data", lon_file="lon_points", lat_file="lat_points", distance_file="trip_distances",
min_file="trips_per_min"):
global lat_points, lon_points, trip_distances, trips_per_min
data_path = directory
if Path(os.getcwd()).parts[-1] != "RideSimulator":
data_path = os.path.join(Path(os.getcwd()).parent, os.path.join('RideSimulator', directory))
print("Loading trip data...")
lat_points = np.loadtxt(os.path.join(data_path, lat_file))
lon_points = np.loadtxt(os.path.join(data_path, lon_file))
trip_distances = np.loadtxt(os.path.join(data_path, distance_file))
trips_per_min = np.loadtxt(os.path.join(data_path, min_file))
print("Data loading complete")
class TripGenerator(object):
def __init__(self, grid: Grid, time_unit, trips_per_week=20000, seed: int = None):
if seed is not None:
np.random.seed(seed)
self.id = 0
self.grid = grid
self.width = grid.width
self.height = grid.height
self.granularity = 1000
self.min_trip_distance = self.width / 100
self.max_displacement = self.width * 0.05 * self.granularity
self.time_unit = time_unit
self.units_per_day = time_unit * 60 * 24
time_slice = self.units_per_day // 24
self.peak_times = [time_slice * 4, time_slice * 5, time_slice * 14, time_slice * 15]
self.lat_points, self.lon_points, self.trip_distances = self.import_data()
self.trips_per_min = self.scale_trip_count(trips_per_week)
self.updated_hex_ids = set()
def import_data(self):
lat_points_copy = lat_points.copy()
lon_points_copy = lon_points.copy()
trip_distances_copy = trip_distances.copy()
np.random.shuffle(lat_points_copy)
np.random.shuffle(lon_points_copy)
np.random.shuffle(trip_distances_copy)
lat_points_scaled = lat_points_copy - lat_points_copy.min()
lon_points_scaled = lon_points - lon_points_copy.min()
lat_scale = self.height / lat_points_scaled.max()
lon_scale = self.width / lon_points_scaled.max()
distance_scale = self.width / trip_distances_copy.max()
lat_points_scaled = lat_points_scaled * lat_scale
lon_points_scaled = lon_points_scaled * lon_scale
distances_scaled = trip_distances * distance_scale
return lat_points_scaled, lon_points_scaled, distances_scaled
@staticmethod
def scale_trip_count(trips_per_week):
scale = trips_per_min.sum() * 7 // trips_per_week
scaled_hourly = (trips_per_min / scale).astype(int)
dif = 0
sampled_trips = []
for i in range(24):
h = i * 60
full_trips = (trips_per_min[h:h + 60] / scale) // 1
prob_trips = (trips_per_min[h:h + 60] / scale) % 1
total_trips = []
sample_trips = (np.random.rand(60) < prob_trips).astype(int)
total_trips += (full_trips + sample_trips).tolist()
sampled_trips += (full_trips + sample_trips).tolist()
trip_count = int(sum(total_trips) / 100)
dif += trip_count - scaled_hourly[i]
return np.array(sampled_trips).astype(int)
def get_trip_locations(self, x, y, distance):
x = min(self.width, x + np.random.randint(0, self.max_displacement) / self.granularity)
y = min(self.height, y + np.random.randint(0, self.max_displacement) / self.granularity)
start_hex = self.grid.hex_overlay.get_closest_hex([x, y])
start_hex.trip_count += 1
theta = np.rad2deg(np.random.random() * 2 * np.pi)
while distance < self.min_trip_distance:
distance = distance * 1.5
while True:
d_x = x + distance * np.cos(theta)
d_y = y + distance * np.sin(theta)
x_count = 0
y_count = 0
while (d_x > self.width or d_x < 0) and x_count < 2:
# print("switching x direction", d_x)
d_x = x - distance * np.cos(theta)
x_count += 1
while (d_y > self.height or d_y < 0) and y_count < 2:
# print("switching y direction", d_y)
d_y = y - distance * np.sin(theta)
y_count += 1
if x_count == 2 or y_count == 2:
# print("Reducing distance", distance)
distance = distance / 2
else:
prob = np.random.random()
if prob < 0.2:
return np.array([[d_x, d_y], [x, y], [start_hex.id, start_hex.additional_reward]])
else:
return np.array([[x, y], [d_x, d_y], [start_hex.id, start_hex.additional_reward]])
def create_trip(self, env: simpy.Environment, trip_id: int) -> Trip:
"""
Creates a trip in the env with the given trip_id.
The trip will have randomly generated pickup and drop locations, and the id of the nearest driver pool is
assigned to the trip.
Pickup and drop locations will not be the same.
:param env: simpy environment
:param trip_id: trip id
:return: trip object
"""
distance = self.trip_distances[trip_id]
pick_up_loc, drop_loc, hex_data = self.get_trip_locations(self.lon_points[trip_id], self.lat_points[trip_id],
distance)
self.updated_hex_ids.add(hex_data[0])
nearest_spot = self.grid.get_nearest_spot(pick_up_loc)[0]
trip_i = Trip(env, trip_id, pick_up_loc, drop_loc, [nearest_spot], hex_data[0], hex_data[1])
return trip_i
def generate_trips(self, env: simpy.Environment):
peak_time = False
day_time = int((env.now % self.units_per_day) / self.time_unit)
num_trips = self.trips_per_min[day_time]
trips = []
for _ in range(num_trips):
trips.append(self.create_trip(env, self.id))
self.id += 1
return trips, peak_time
| [
"numpy.random.rand",
"numpy.random.random",
"os.path.join",
"os.getcwd",
"numpy.array",
"numpy.random.randint",
"numpy.random.seed",
"numpy.cos",
"numpy.sin",
"RideSimulator.Trip.Trip",
"numpy.random.shuffle"
] | [((682, 715), 'os.path.join', 'os.path.join', (['data_path', 'lat_file'], {}), '(data_path, lat_file)\n', (694, 715), False, 'import os\n'), ((745, 778), 'os.path.join', 'os.path.join', (['data_path', 'lon_file'], {}), '(data_path, lon_file)\n', (757, 778), False, 'import os\n'), ((812, 850), 'os.path.join', 'os.path.join', (['data_path', 'distance_file'], {}), '(data_path, distance_file)\n', (824, 850), False, 'import os\n'), ((883, 916), 'os.path.join', 'os.path.join', (['data_path', 'min_file'], {}), '(data_path, min_file)\n', (895, 916), False, 'import os\n'), ((1986, 2020), 'numpy.random.shuffle', 'np.random.shuffle', (['lat_points_copy'], {}), '(lat_points_copy)\n', (2003, 2020), True, 'import numpy as np\n'), ((2029, 2063), 'numpy.random.shuffle', 'np.random.shuffle', (['lon_points_copy'], {}), '(lon_points_copy)\n', (2046, 2063), True, 'import numpy as np\n'), ((2072, 2110), 'numpy.random.shuffle', 'np.random.shuffle', (['trip_distances_copy'], {}), '(trip_distances_copy)\n', (2089, 2110), True, 'import numpy as np\n'), ((5804, 5891), 'RideSimulator.Trip.Trip', 'Trip', (['env', 'trip_id', 'pick_up_loc', 'drop_loc', '[nearest_spot]', 'hex_data[0]', 'hex_data[1]'], {}), '(env, trip_id, pick_up_loc, drop_loc, [nearest_spot], hex_data[0],\n hex_data[1])\n', (5808, 5891), False, 'from RideSimulator.Trip import Trip\n'), ((577, 617), 'os.path.join', 'os.path.join', (['"""RideSimulator"""', 'directory'], {}), "('RideSimulator', directory)\n", (589, 617), False, 'import os\n'), ((1112, 1132), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1126, 1132), True, 'import numpy as np\n'), ((3422, 3445), 'numpy.array', 'np.array', (['sampled_trips'], {}), '(sampled_trips)\n', (3430, 3445), True, 'import numpy as np\n'), ((4699, 4717), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (4715, 4717), True, 'import numpy as np\n'), ((475, 486), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (484, 486), False, 'import os\n'), ((556, 567), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (565, 567), False, 'import os\n'), ((3542, 3585), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.max_displacement'], {}), '(0, self.max_displacement)\n', (3559, 3585), True, 'import numpy as np\n'), ((3639, 3682), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.max_displacement'], {}), '(0, self.max_displacement)\n', (3656, 3682), True, 'import numpy as np\n'), ((3832, 3850), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (3848, 3850), True, 'import numpy as np\n'), ((4006, 4019), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (4012, 4019), True, 'import numpy as np\n'), ((4053, 4066), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (4059, 4066), True, 'import numpy as np\n'), ((4776, 4851), 'numpy.array', 'np.array', (['[[d_x, d_y], [x, y], [start_hex.id, start_hex.additional_reward]]'], {}), '([[d_x, d_y], [x, y], [start_hex.id, start_hex.additional_reward]])\n', (4784, 4851), True, 'import numpy as np\n'), ((4901, 4976), 'numpy.array', 'np.array', (['[[x, y], [d_x, d_y], [start_hex.id, start_hex.additional_reward]]'], {}), '([[x, y], [d_x, d_y], [start_hex.id, start_hex.additional_reward]])\n', (4909, 4976), True, 'import numpy as np\n'), ((3128, 3146), 'numpy.random.rand', 'np.random.rand', (['(60)'], {}), '(60)\n', (3142, 3146), True, 'import numpy as np\n'), ((4273, 4286), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (4279, 4286), True, 'import numpy as np\n'), ((4474, 4487), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (4480, 4487), True, 'import numpy as np\n')] |
from itertools import product
from tqdm.notebook import tqdm
import numpy as np
import pandas as pd
import torch
from torch.utils.data import DataLoader
import torch.nn.functional as F
from cotrain_helper import get_learner_by_query
def unipole(points, B0, r, x, y):
numerator = np.sum((points - [x, y]) ** 2)
B = B0 * np.exp(- numerator / (2 * r ** 2))
return B
def bipole_gaussian(points, B0, rho, gamma, sigma):
""" Two gaussians with opposite sign
Args:
points: points with coordinates in the last dimension.
B0: maximum magnitude of each gaussian. (May cancel if rho is small)
rho: polarity separation.
gamma: tilt angle of the positive pole in radian.
sigma: standard deviation of gaussian.
Return:
Z: ndarray of shape points.shape[:-1], i.e., the last dimension of `points` is reduced while other dimensions preserved.
"""
c = np.array([0, 0])
c1 = c + rho / 2 * np.array([np.cos(gamma), np.sin(gamma)])
c2 = -c1 #c2 = c - rho / 2 * np.array([np.cos(gamma), np.sin(gamma)])
Z1 = np.exp(-np.sum((points - c1) ** 2 / (2 * sigma ** 2), axis=-1))
Z2 = np.exp(-np.sum((points - c2) ** 2 / (2 * sigma ** 2), axis=-1))
Z = B0 * (Z1 - Z2)
return Z
def bipole_yeates2020(points, B0, rho, gamma, a=0.56):
"""
Args:
points: points with coordinates in the last dimension.
B0: amplitude. (NB: not max abs value)
rho: polarity separation in radian.
gamma: tilt angle wrt the equator in radian.
a: size of BMR relative to rho
Note: Yeates 2020, eq (5)
"""
R = np.array([
[np.cos(gamma), np.sin(gamma)],
[-np.sin(gamma), np.cos(gamma)],
]) # rotation matrix (-gamma)
_points = np.einsum('ij,klj->kli', R, points)
A = - (B0 / rho) * _points[:,:,0] # factor before exp
numerator = (_points ** 2).dot([1,2]) # x^2 + 2 * y^2
B = A * np.exp(- numerator / (a * rho)**2)
return B
def sweep_constant_images(learner, lim=10):
assert lim > 0
with torch.no_grad():
input = (torch.ones(1, 1, 128, 128)
.to(learner.device))
const = np.linspace(-lim, lim, 51)
dataloader = DataLoader(
[input * c for c in const],
batch_size=16,
#num_workers=8,
#pin_memory=True,
)
probs = []
for batch in dataloader:
output = learner(batch)
prob = F.softmax(output)[:, 1].tolist()
#prob = (output[0, 1] - output[0, 0]).item()
probs.extend(prob)
probs = probs
return const, probs
def sweep_Z_list(learner, Z_list):
dataset = [(torch.tensor(np.expand_dims(Z, axis=(0,1)))
.to(learner.device)
.to(torch.float32))
for Z in Z_list]
dataloader = DataLoader(
dataset,
batch_size=16,
#num_workers=4,
#pin_memory=True,
# Setting num_worker > 0 causes problem
# Change fork to spawn: torch.multiprocessing.set_start_method('spawn')
# It works but loading is very slow
)
probs = []
for Z_batch in tqdm(dataloader):
probs.extend(F.softmax(learner(Z_batch), dim=-1)[:, 1].tolist())
probs = np.array(probs)
return probs
def sweep_learner_and_Z_list(function, XY, params, names):
"""
Args:
function: unipole, bipole_gaussian, or bipole_yeates2020
XY: first argument of function. Coordinate systems
params: other args of function. A list of parameters.
names: names of the parameteres. Used to name columns in df
"""
df = pd.DataFrame(data=params, columns=names)
Z_list = [function(XY, *args) for args in params]
dataset = 'sharp'
for val_split, test_split in tqdm(list(product(range(5), range(5)))):
query = f'cv/base/{dataset}/0/{val_split}/{test_split}/CNN'
learner = get_learner_by_query(query, eval_mode=True, device='cuda:1')
probs = sweep_Z_list(learner, Z_list)
df[f'prob_{val_split}_{test_split}'] = probs
df['prob'] = df[[c for c in df.columns if c[:5] == 'prob_']].mean(axis=1)
#(df[[c for c in df.columns if c[:5] == 'prob_']]
# .describe()
# .style
# .background_gradient(axis=1)
#)
#df['prob'].hist(bins=20)
#df_pos = df[df['prob'] > 0.5]
#Z_list_pos = [Z_list[i] for i in df_pos.index]
#print(len(df_pos))
return df, Z_list | [
"numpy.sin",
"cotrain_helper.get_learner_by_query",
"numpy.exp",
"numpy.sum",
"numpy.array",
"numpy.linspace",
"numpy.einsum",
"numpy.cos",
"numpy.expand_dims",
"torch.utils.data.DataLoader",
"pandas.DataFrame",
"torch.no_grad",
"tqdm.notebook.tqdm",
"torch.nn.functional.softmax",
"torch... | [((286, 316), 'numpy.sum', 'np.sum', (['((points - [x, y]) ** 2)'], {}), '((points - [x, y]) ** 2)\n', (292, 316), True, 'import numpy as np\n'), ((920, 936), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (928, 936), True, 'import numpy as np\n'), ((1773, 1808), 'numpy.einsum', 'np.einsum', (['"""ij,klj->kli"""', 'R', 'points'], {}), "('ij,klj->kli', R, points)\n", (1782, 1808), True, 'import numpy as np\n'), ((2864, 2898), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': '(16)'}), '(dataset, batch_size=16)\n', (2874, 2898), False, 'from torch.utils.data import DataLoader\n'), ((3179, 3195), 'tqdm.notebook.tqdm', 'tqdm', (['dataloader'], {}), '(dataloader)\n', (3183, 3195), False, 'from tqdm.notebook import tqdm\n'), ((3282, 3297), 'numpy.array', 'np.array', (['probs'], {}), '(probs)\n', (3290, 3297), True, 'import numpy as np\n'), ((3665, 3705), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'params', 'columns': 'names'}), '(data=params, columns=names)\n', (3677, 3705), True, 'import pandas as pd\n'), ((330, 363), 'numpy.exp', 'np.exp', (['(-numerator / (2 * r ** 2))'], {}), '(-numerator / (2 * r ** 2))\n', (336, 363), True, 'import numpy as np\n'), ((1939, 1974), 'numpy.exp', 'np.exp', (['(-numerator / (a * rho) ** 2)'], {}), '(-numerator / (a * rho) ** 2)\n', (1945, 1974), True, 'import numpy as np\n'), ((2061, 2076), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2074, 2076), False, 'import torch\n'), ((2176, 2202), 'numpy.linspace', 'np.linspace', (['(-lim)', 'lim', '(51)'], {}), '(-lim, lim, 51)\n', (2187, 2202), True, 'import numpy as np\n'), ((2224, 2279), 'torch.utils.data.DataLoader', 'DataLoader', (['[(input * c) for c in const]'], {'batch_size': '(16)'}), '([(input * c) for c in const], batch_size=16)\n', (2234, 2279), False, 'from torch.utils.data import DataLoader\n'), ((3943, 4003), 'cotrain_helper.get_learner_by_query', 'get_learner_by_query', (['query'], {'eval_mode': '(True)', 'device': '"""cuda:1"""'}), "(query, eval_mode=True, device='cuda:1')\n", (3963, 4003), False, 'from cotrain_helper import get_learner_by_query\n'), ((1092, 1146), 'numpy.sum', 'np.sum', (['((points - c1) ** 2 / (2 * sigma ** 2))'], {'axis': '(-1)'}), '((points - c1) ** 2 / (2 * sigma ** 2), axis=-1)\n', (1098, 1146), True, 'import numpy as np\n'), ((1165, 1219), 'numpy.sum', 'np.sum', (['((points - c2) ** 2 / (2 * sigma ** 2))'], {'axis': '(-1)'}), '((points - c2) ** 2 / (2 * sigma ** 2), axis=-1)\n', (1171, 1219), True, 'import numpy as np\n'), ((1653, 1666), 'numpy.cos', 'np.cos', (['gamma'], {}), '(gamma)\n', (1659, 1666), True, 'import numpy as np\n'), ((1668, 1681), 'numpy.sin', 'np.sin', (['gamma'], {}), '(gamma)\n', (1674, 1681), True, 'import numpy as np\n'), ((1709, 1722), 'numpy.cos', 'np.cos', (['gamma'], {}), '(gamma)\n', (1715, 1722), True, 'import numpy as np\n'), ((2095, 2121), 'torch.ones', 'torch.ones', (['(1)', '(1)', '(128)', '(128)'], {}), '(1, 1, 128, 128)\n', (2105, 2121), False, 'import torch\n'), ((970, 983), 'numpy.cos', 'np.cos', (['gamma'], {}), '(gamma)\n', (976, 983), True, 'import numpy as np\n'), ((985, 998), 'numpy.sin', 'np.sin', (['gamma'], {}), '(gamma)\n', (991, 998), True, 'import numpy as np\n'), ((1694, 1707), 'numpy.sin', 'np.sin', (['gamma'], {}), '(gamma)\n', (1700, 1707), True, 'import numpy as np\n'), ((2478, 2495), 'torch.nn.functional.softmax', 'F.softmax', (['output'], {}), '(output)\n', (2487, 2495), True, 'import torch.nn.functional as F\n'), ((2711, 2741), 'numpy.expand_dims', 'np.expand_dims', (['Z'], {'axis': '(0, 1)'}), '(Z, axis=(0, 1))\n', (2725, 2741), True, 'import numpy as np\n')] |
#------------------------------------------------------------------------------
# Copyright (c) 2015, 2016, The University of Manchester, UK.
#
# BSD licenced. See LICENCE for details.
#
# Authors: <NAME>, <NAME>
#------------------------------------------------------------------------------
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def regression(file_name, second_index_quality, order):
COLUMN_SEPARATOR = ','
metrics_data = pd.DataFrame.from_csv(file_name, sep=COLUMN_SEPARATOR, header=None)
labels = ['',
'Test Coverage',
'Branch Coverage',
'McCabe\'s Cyclomatic Complexity (CC)',
'Lines of Code (LOC)',
'Number of local Methods (NOM)',
'Improvement of Lack of Cohesion in Methods (ILCOM)',
'Lack of Documentation (LOD)',
'Depth of Inheritance Tree (DIT)',
'Coupling Between Objects (CBO)'
]
x = [0,0]
ys = [0,0]
for i in range(0, 2):
x[i] = metrics_data[i + 1]
y = metrics_data[second_index_quality]
coeffs = np.polyfit(x[i], y, order)
polynomial = np.poly1d(coeffs)
ys[i] = polynomial(x[i])
plt.figure(second_index_quality - 2)
plt.xlabel("Coverage")
plt.ylabel(labels[second_index_quality])
plt.title(labels[second_index_quality])
plt.plot(x[0], ys[0])
plt.plot(x[1], ys[1])
plt.ylim(ymin=0)
plt.legend([labels[1], labels[2]], loc='upper right')
file_path = '../data/combined/Merge/Merge all projects.csv'
for x in range(3,10):
regression(file_path, x, 1)
plt.show()
| [
"matplotlib.pyplot.ylabel",
"numpy.polyfit",
"matplotlib.pyplot.xlabel",
"pandas.DataFrame.from_csv",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.figure",
"numpy.poly1d",
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((469, 536), 'pandas.DataFrame.from_csv', 'pd.DataFrame.from_csv', (['file_name'], {'sep': 'COLUMN_SEPARATOR', 'header': 'None'}), '(file_name, sep=COLUMN_SEPARATOR, header=None)\n', (490, 536), True, 'import pandas as pd\n'), ((1241, 1277), 'matplotlib.pyplot.figure', 'plt.figure', (['(second_index_quality - 2)'], {}), '(second_index_quality - 2)\n', (1251, 1277), True, 'import matplotlib.pyplot as plt\n'), ((1282, 1304), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Coverage"""'], {}), "('Coverage')\n", (1292, 1304), True, 'import matplotlib.pyplot as plt\n'), ((1309, 1349), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['labels[second_index_quality]'], {}), '(labels[second_index_quality])\n', (1319, 1349), True, 'import matplotlib.pyplot as plt\n'), ((1354, 1393), 'matplotlib.pyplot.title', 'plt.title', (['labels[second_index_quality]'], {}), '(labels[second_index_quality])\n', (1363, 1393), True, 'import matplotlib.pyplot as plt\n'), ((1398, 1419), 'matplotlib.pyplot.plot', 'plt.plot', (['x[0]', 'ys[0]'], {}), '(x[0], ys[0])\n', (1406, 1419), True, 'import matplotlib.pyplot as plt\n'), ((1424, 1445), 'matplotlib.pyplot.plot', 'plt.plot', (['x[1]', 'ys[1]'], {}), '(x[1], ys[1])\n', (1432, 1445), True, 'import matplotlib.pyplot as plt\n'), ((1450, 1466), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'ymin': '(0)'}), '(ymin=0)\n', (1458, 1466), True, 'import matplotlib.pyplot as plt\n'), ((1471, 1524), 'matplotlib.pyplot.legend', 'plt.legend', (['[labels[1], labels[2]]'], {'loc': '"""upper right"""'}), "([labels[1], labels[2]], loc='upper right')\n", (1481, 1524), True, 'import matplotlib.pyplot as plt\n'), ((1645, 1655), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1653, 1655), True, 'import matplotlib.pyplot as plt\n'), ((1137, 1163), 'numpy.polyfit', 'np.polyfit', (['x[i]', 'y', 'order'], {}), '(x[i], y, order)\n', (1147, 1163), True, 'import numpy as np\n'), ((1185, 1202), 'numpy.poly1d', 'np.poly1d', (['coeffs'], {}), '(coeffs)\n', (1194, 1202), True, 'import numpy as np\n')] |
"""
Measure several resonators per LO frequency and record SweepStreamArrays.
"""
import time
import numpy as np
from kid_readout.roach import hardware_tools, analog
from kid_readout.measurement import acquire, basic
from kid_readout.equipment import hardware
logger = acquire.get_script_logger(__file__)
# Parameters
suffix = 'interactive'
low_f0_MHz = np.array([2254.837, 2326.842, 2483.490, 2580])
high_f0_MHz = np.array([3313.270, 3378.300, 3503.600, 3524.435])
f0_MHz = high_f0_MHz[0]
f_minimum = 10e6 # Keep the tones away from the LO by at least this frequency.
f_stream_offset_MHz = 10 # Set a second tone away from the resonance by this amount
df_lo_MHz = 0.1
sweep_interval = 6
dac_attenuation = 33
fft_gain = 0
tone_sample_exponent = 18
sweep_length_seconds = 0.1
num_sweep_tones = 255
# Hardware
conditioner = analog.HeterodyneMarkII()
magnet = hardware.Thing(name='magnet_array', state={'orientation': 'up',
'distance_from_base_mm': 276})
hw = hardware.Hardware(conditioner, magnet)
ri = hardware_tools.r1h11_with_mk2(initialize=True, use_config=False)
ri.set_dac_attenuator(dac_attenuation)
ri.set_fft_gain(fft_gain)
# Calculate LO and baseband frequencies
f_resolution = ri.state.adc_sample_rate / 2**tone_sample_exponent
minimum_integer = int(f_minimum / f_resolution)
offset_integers = minimum_integer + sweep_interval * np.arange(num_sweep_tones)
offset_frequencies_MHz = 1e-6 * f_resolution * offset_integers
f_lo_MHz = df_lo_MHz * np.round((f0_MHz - offset_frequencies_MHz.mean()) / df_lo_MHz)
logger.info("Frequency spacing is {:.1f} kHz".format(1e-3 * sweep_interval * f_resolution))
logger.info("Sweep span is {:.1f} MHz".format(offset_frequencies_MHz.ptp()))
ri.set_lo(lomhz=f_lo_MHz, chan_spacing=df_lo_MHz)
logger.info("Set LO to {:.3f} MHz".format(f_lo_MHz))
ri.set_tone_baseband_freqs(offset_frequencies_MHz[:, np.newaxis], nsamp=2 ** tone_sample_exponent)
sweep_array = acquire.run_loaded_sweep(ri, length_seconds=sweep_length_seconds,
tone_bank_indices=np.arange(num_sweep_tones))
fit_f0_MHz = 1e-6 * sweep_array[0].resonator.f_0
logger.info("Fit resonance frequency in MHz is {}".format(fit_f0_MHz))
f_stream_MHz = ri.add_tone_freqs(freqs=np.array([fit_f0_MHz]))
ri.select_bank(num_sweep_tones)
ri.select_fft_bins(np.array([0]))
| [
"kid_readout.roach.analog.HeterodyneMarkII",
"kid_readout.measurement.acquire.get_script_logger",
"kid_readout.equipment.hardware.Hardware",
"numpy.array",
"kid_readout.equipment.hardware.Thing",
"numpy.arange",
"kid_readout.roach.hardware_tools.r1h11_with_mk2"
] | [((272, 307), 'kid_readout.measurement.acquire.get_script_logger', 'acquire.get_script_logger', (['__file__'], {}), '(__file__)\n', (297, 307), False, 'from kid_readout.measurement import acquire, basic\n'), ((358, 403), 'numpy.array', 'np.array', (['[2254.837, 2326.842, 2483.49, 2580]'], {}), '([2254.837, 2326.842, 2483.49, 2580])\n', (366, 403), True, 'import numpy as np\n'), ((419, 464), 'numpy.array', 'np.array', (['[3313.27, 3378.3, 3503.6, 3524.435]'], {}), '([3313.27, 3378.3, 3503.6, 3524.435])\n', (427, 464), True, 'import numpy as np\n'), ((829, 854), 'kid_readout.roach.analog.HeterodyneMarkII', 'analog.HeterodyneMarkII', ([], {}), '()\n', (852, 854), False, 'from kid_readout.roach import hardware_tools, analog\n'), ((864, 962), 'kid_readout.equipment.hardware.Thing', 'hardware.Thing', ([], {'name': '"""magnet_array"""', 'state': "{'orientation': 'up', 'distance_from_base_mm': 276}"}), "(name='magnet_array', state={'orientation': 'up',\n 'distance_from_base_mm': 276})\n", (878, 962), False, 'from kid_readout.equipment import hardware\n'), ((1016, 1054), 'kid_readout.equipment.hardware.Hardware', 'hardware.Hardware', (['conditioner', 'magnet'], {}), '(conditioner, magnet)\n', (1033, 1054), False, 'from kid_readout.equipment import hardware\n'), ((1060, 1124), 'kid_readout.roach.hardware_tools.r1h11_with_mk2', 'hardware_tools.r1h11_with_mk2', ([], {'initialize': '(True)', 'use_config': '(False)'}), '(initialize=True, use_config=False)\n', (1089, 1124), False, 'from kid_readout.roach import hardware_tools, analog\n'), ((2346, 2359), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (2354, 2359), True, 'import numpy as np\n'), ((1398, 1424), 'numpy.arange', 'np.arange', (['num_sweep_tones'], {}), '(num_sweep_tones)\n', (1407, 1424), True, 'import numpy as np\n'), ((2084, 2110), 'numpy.arange', 'np.arange', (['num_sweep_tones'], {}), '(num_sweep_tones)\n', (2093, 2110), True, 'import numpy as np\n'), ((2271, 2293), 'numpy.array', 'np.array', (['[fit_f0_MHz]'], {}), '([fit_f0_MHz])\n', (2279, 2293), True, 'import numpy as np\n')] |
"""
Converts individual mask files into one label map.
The fourth and last step in the labeling workflow.
"""
import click
import glob
import logging
import numpy as np
import os
import skimage.io
LOG_FORMAT = (
"%(levelname)s %(asctime)s - %(filename)s %(funcName)s %(lineno)s - %(message)s"
)
logging.basicConfig(
filename="./data.log", level=logging.DEBUG, format=LOG_FORMAT, filemode="a"
)
log = logging.getLogger()
def import_masks(dir_masks: dir) -> np.ndarray:
"""
Imports masks in the format of Fiji labels (Unique files for each label as png).
Converts them into one label mask with unique valued items.
"""
if not isinstance(dir_masks, str):
raise TypeError(f"dir_masks must be str but is {type(dir_masks)}.")
if not os.path.exists(dir_masks):
raise ValueError(f"dir_masks must exist, {dir_masks} does not.")
mask_files = glob.glob(f"{dir_masks}/mask_*.png")
if not mask_files:
raise ValueError(f"Empty masks directory. Must contain at least one item.")
masks = list(map(skimage.io.imread, mask_files))
log.info(f"Found {len(masks)} masks.")
masks = [np.where(m, i + 1, 0) for i, m in enumerate(masks)]
masks = np.max(masks, axis=0)
return masks
@click.command()
@click.option(
"--base_dir",
type=click.Path(exists=True),
prompt="Path to the base directory",
required=True,
help="Path to the base directory.",
)
def main(base_dir: dir):
if not os.path.exists(base_dir):
raise ValueError("base_dir must exist")
log.info(f'Started with base_dir "{base_dir}".')
dir_processed = os.path.join(base_dir, "Processed")
dir_labeling = os.path.join(base_dir, "Labeling")
dir_labeling_items = next(os.walk(dir_labeling))[1]
log.info(f'Found labeling items - "{dir_labeling_items}".')
for img_id in dir_labeling_items:
dir_curr = os.path.join(dir_labeling, img_id)
log.info(f"Current item - {dir_curr}.")
# Images
try:
img = skimage.io.imread(f"{dir_curr}/images/{img_id}.tif")
except Exception:
raise ValueError(f"Could not read file {img_id}, please check file type.")
skimage.io.imsave(
f"{dir_processed}/images/{img_id}.tif",
img.astype(dtype=np.uint16),
check_contrast=False,
)
log.info(f"Image processed.")
# Masks
mask = import_masks(f"{dir_curr}/masks")
skimage.io.imsave(
f"{dir_processed}/masks/{img_id}.tif",
mask.astype(dtype=np.uint16),
check_contrast=False,
)
log.info(f"Mask processed.")
print("\U0001F3C1 Programm finished successfully \U0001F603 \U0001F3C1")
if __name__ == "__main__":
main()
| [
"logging.basicConfig",
"logging.getLogger",
"os.path.exists",
"numpy.where",
"os.walk",
"os.path.join",
"numpy.max",
"click.Path",
"click.command",
"glob.glob"
] | [((301, 402), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': '"""./data.log"""', 'level': 'logging.DEBUG', 'format': 'LOG_FORMAT', 'filemode': '"""a"""'}), "(filename='./data.log', level=logging.DEBUG, format=\n LOG_FORMAT, filemode='a')\n", (320, 402), False, 'import logging\n'), ((410, 429), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (427, 429), False, 'import logging\n'), ((1249, 1264), 'click.command', 'click.command', ([], {}), '()\n', (1262, 1264), False, 'import click\n'), ((889, 925), 'glob.glob', 'glob.glob', (['f"""{dir_masks}/mask_*.png"""'], {}), "(f'{dir_masks}/mask_*.png')\n", (898, 925), False, 'import glob\n'), ((1207, 1228), 'numpy.max', 'np.max', (['masks'], {'axis': '(0)'}), '(masks, axis=0)\n', (1213, 1228), True, 'import numpy as np\n'), ((1618, 1653), 'os.path.join', 'os.path.join', (['base_dir', '"""Processed"""'], {}), "(base_dir, 'Processed')\n", (1630, 1653), False, 'import os\n'), ((1673, 1707), 'os.path.join', 'os.path.join', (['base_dir', '"""Labeling"""'], {}), "(base_dir, 'Labeling')\n", (1685, 1707), False, 'import os\n'), ((771, 796), 'os.path.exists', 'os.path.exists', (['dir_masks'], {}), '(dir_masks)\n', (785, 796), False, 'import os\n'), ((1143, 1164), 'numpy.where', 'np.where', (['m', '(i + 1)', '(0)'], {}), '(m, i + 1, 0)\n', (1151, 1164), True, 'import numpy as np\n'), ((1470, 1494), 'os.path.exists', 'os.path.exists', (['base_dir'], {}), '(base_dir)\n', (1484, 1494), False, 'import os\n'), ((1887, 1921), 'os.path.join', 'os.path.join', (['dir_labeling', 'img_id'], {}), '(dir_labeling, img_id)\n', (1899, 1921), False, 'import os\n'), ((1307, 1330), 'click.Path', 'click.Path', ([], {'exists': '(True)'}), '(exists=True)\n', (1317, 1330), False, 'import click\n'), ((1738, 1759), 'os.walk', 'os.walk', (['dir_labeling'], {}), '(dir_labeling)\n', (1745, 1759), False, 'import os\n')] |
#!/usr/bin/env python3
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
import torchvision.datasets as dset
from datasets import CIFAR100
import torchvision.transforms as transforms
from torchvision.utils import save_image
from torch.utils.data import DataLoader
import os
import sys
import copy
import math
import numpy as np
from random import shuffle
import shutil
import densenet_v1 as densenet
from train import SplitCifarDataSet
data_dir = '/root/Desktop/data'
results_dir = 'results_v2'
batch = 64
bin_weight = 0.4
baseline_epocs = 130
ft_epochs = 100
classes_a = np.array([
4, 98, 75, 9, 25, 21, 76, 23, 24, 10, 8, 28, 63, 33, 82, 87, 19,
13, 3, 81, 49, 27, 91, 74, 95, 52, 79, 90, 51, 61, 39, 72, 16,
93, 70, 67, 59, 34, 37, 94, 30, 12, 5, 46, 96, 48, 32, 20, 71, 85],
dtype=np.int64)
classes_a_bin = classes_a[:25]
use_best_state = False
def main():
torch.manual_seed(37)
torch.cuda.manual_seed(37)
if os.path.exists(results_dir):
shutil.rmtree(results_dir)
os.makedirs(results_dir, exist_ok=True)
normMean = [0.5423671, 0.53410053, 0.52827841]
normStd = [0.30129549, 0.29579896, 0.29065931]
normTransform = transforms.Normalize(normMean, normStd)
trainTransform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normTransform
])
testTransform = transforms.Compose([
transforms.ToTensor(),
normTransform
])
kwargs = {'num_workers': 1, 'pin_memory': True}
trainLoaderA = DataLoader(
CIFAR100(root=data_dir, group='A', train=True, download=False,
transform=trainTransform, classes_a=classes_a,
classes_a_bin=classes_a_bin),
batch_size=batch, shuffle=True, **kwargs)
testLoaderA = DataLoader(
CIFAR100(root=data_dir, group='A', train=False, download=False,
transform=testTransform, classes_a=classes_a,
classes_a_bin=classes_a_bin),
batch_size=batch, shuffle=False, **kwargs)
trainLoaderB = DataLoader(
CIFAR100(root=data_dir, group='B', train=True, download=False,
transform=trainTransform, classes_a=classes_a,
classes_a_bin=classes_a_bin),
batch_size=batch, shuffle=True, **kwargs)
testLoaderB = DataLoader(
CIFAR100(root=data_dir, group='B', train=False, download=False,
transform=testTransform, classes_a=classes_a,
classes_a_bin=classes_a_bin),
batch_size=batch, shuffle=False, **kwargs)
def get_net(transfer=False, binary=False):
net = densenet.DenseNet(growthRate=12, depth=100, reduction=0.5,
bottleneck=True, nClasses=100, binary=binary)
if transfer:
ft_params, reset_params = net.split_parmeters()
params = [
{'params': reset_params, 'lr': 1e-1},
{'params': ft_params, 'lr': 1e-2}
]
else:
params = net.parameters()
optimizer = optim.SGD(params, lr=1e-1,
momentum=0.9, weight_decay=1e-4)
net = net.cuda()
print(net)
return net, optimizer
net, optimizer = get_net(transfer=False)
print(' + Number of params: {}'.format(
sum([p.data.nelement() for p in net.parameters()])))
best_state = run_base(net, trainLoaderA, testLoaderA, optimizer, binary=False)
state = best_state if use_best_state else net.state_dict()
torch.save(state, os.path.join(results_dir, 'model_state.t7'))
net, optimizer = get_net(transfer=True)
net.load_state_dict(torch.load(os.path.join(results_dir, 'model_state.t7')))
net.reset_last_layer()
run_transfer(net, trainLoaderB, testLoaderB, optimizer, binary=False)
net, optimizer = get_net(binary=True, transfer=False)
best_state = run_base(net, trainLoaderA, testLoaderA, optimizer, binary=True)
state = best_state if use_best_state else net.state_dict()
torch.save(state, os.path.join(results_dir, 'model_state_binary.t7'))
net, optimizer = get_net(transfer=True, binary=True)
net.load_state_dict(torch.load(os.path.join(results_dir, 'model_state_binary.t7')))
net.reset_last_layer()
run_transfer(net, trainLoaderB, testLoaderB, optimizer, binary=True)
def run_base(net, trainLoader, testLoader, optimizer, binary):
trainF = open(os.path.join(
results_dir, 'trainA{}.csv'.format('_bin' if binary else '')), 'w')
testF = open(os.path.join(
results_dir, 'testA{}.csv'.format('_bin' if binary else '')), 'w')
transfer = False
best_loss = 1000
best_state = net.state_dict()
for epoch in range(1, baseline_epocs + 1):
adjust_opt(optimizer, epoch)
train(epoch, net, trainLoader, optimizer, trainF, binary, transfer)
new_loss = test(epoch, net, testLoader, testF, binary, transfer)
if new_loss < best_loss:
best_loss = new_loss
best_state = copy.deepcopy(net.state_dict())
print('new best results')
trainF.close()
testF.close()
return best_state
def run_transfer(net, trainLoader, testLoader, optimizer, binary):
trainF = open(os.path.join(
results_dir, 'trainB{}.csv'.format('_bin' if binary else '')), 'w')
testF = open(os.path.join(
results_dir, 'testB{}.csv'.format('_bin' if binary else '')), 'w')
transfer = True
for epoch in range(1, ft_epochs + 1):
adjust_opt_transfer(optimizer, epoch)
train(epoch, net, trainLoader, optimizer, trainF, False, transfer)
test(epoch, net, testLoader, testF, False, transfer)
trainF.close()
testF.close()
def train(epoch, net, trainLoader, optimizer, trainF, binary, transfer):
net.train()
nProcessed = 0
nTrain = len(trainLoader.dataset)
for batch_idx, (data, target) in enumerate(trainLoader):
if binary and not transfer: # A binary
data, target, target_bin = data.cuda(), target[0].cuda(), target[1].cuda()
data, target, target_bin = Variable(data), Variable(target), Variable(target_bin)
optimizer.zero_grad()
fc_out, bin_out = net(data)
loss_fc = F.nll_loss(fc_out, target)
loss_bin = F.nll_loss(bin_out, target_bin)
loss = bin_weight * loss_bin + (1. - bin_weight) * loss_fc
elif not transfer: # A not binary
#print(type(target), target[0].size(), target[1].size())
data, target = data.cuda(), target[0].cuda()
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
fc_out, bin_out = net(data)
#print(type(fc_out), fc_out.size(), type(target), target.size())
#print(target)
loss = F.nll_loss(fc_out, target)
else: # B
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
fc_out, bin_out = net(data)
loss = F.nll_loss(fc_out, target)
loss.backward()
optimizer.step()
nProcessed += len(data)
pred = fc_out.data.max(1)[1] # get the index of the max log-probability
incorrect = pred.ne(target.data).cpu().sum()
err = 100.*incorrect/len(data)
partialEpoch = epoch + batch_idx / len(trainLoader) - 1
print('Train Epoch: {:.2f} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tError: {:.6f}'.format(
partialEpoch, nProcessed, nTrain, 100. * batch_idx / len(trainLoader),
loss.data[0], err))
trainF.write('{},{},{}\n'.format(partialEpoch, loss.data[0], err))
trainF.flush()
def test(epoch, net, testLoader, testF, binary, transfer):
net.eval()
test_loss = 0
incorrect = 0
for data, target in testLoader:
if binary and not transfer:
data, target, target_bin = data.cuda(), target[0].cuda(), target[1].cuda()
data, target, target_bin = Variable(data, volatile=True), Variable(target), Variable(target_bin)
fc_out, bin_out = net(data)
# print(data[0])
# raise Exception
test_loss += F.nll_loss(fc_out, target).data[0]
elif not transfer:
data, target = data.cuda(), target[0].cuda()
data, target = Variable(data, volatile=True), Variable(target)
fc_out, bin_out = net(data)
test_loss += F.nll_loss(fc_out, target).data[0]
else:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, volatile=True), Variable(target)
fc_out, bin_out = net(data)
test_loss += F.nll_loss(fc_out, target).data[0]
pred = fc_out.data.max(1)[1] # get the index of the max log-probability
incorrect += pred.ne(target.data).cpu().sum()
test_loss /= len(testLoader) # loss function already averages over batch size
nTotal = len(testLoader.dataset)
err = 100.*incorrect/nTotal
print('\nTest set: Average loss: {:.4f}, Error: {}/{} ({:.0f}%)\n'.format(
test_loss, incorrect, nTotal, err))
testF.write('{},{},{}\n'.format(epoch, test_loss, err))
testF.flush()
return test_loss
def adjust_opt(optimizer, epoch):
if epoch == 1:
lr = 1e-1
elif epoch == 126:
lr = 1e-2
elif epoch == 151:
lr = 1e-3
else:
return
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def adjust_opt_transfer(optimizer, epoch):
fc, base = optimizer.param_groups
if epoch == 51:
fc['lr'] = base['lr'] = 1e-2
elif epoch == 76:
fc['lr'] = base['lr'] = 1e-3
if __name__=='__main__':
main() | [
"os.path.exists",
"torch.manual_seed",
"torch.optim.SGD",
"os.makedirs",
"torch.nn.functional.nll_loss",
"densenet_v1.DenseNet",
"os.path.join",
"torchvision.transforms.RandomHorizontalFlip",
"torchvision.transforms.RandomCrop",
"numpy.array",
"torchvision.transforms.Normalize",
"shutil.rmtree... | [((677, 908), 'numpy.array', 'np.array', (['[4, 98, 75, 9, 25, 21, 76, 23, 24, 10, 8, 28, 63, 33, 82, 87, 19, 13, 3, 81,\n 49, 27, 91, 74, 95, 52, 79, 90, 51, 61, 39, 72, 16, 93, 70, 67, 59, 34,\n 37, 94, 30, 12, 5, 46, 96, 48, 32, 20, 71, 85]'], {'dtype': 'np.int64'}), '([4, 98, 75, 9, 25, 21, 76, 23, 24, 10, 8, 28, 63, 33, 82, 87, 19, \n 13, 3, 81, 49, 27, 91, 74, 95, 52, 79, 90, 51, 61, 39, 72, 16, 93, 70, \n 67, 59, 34, 37, 94, 30, 12, 5, 46, 96, 48, 32, 20, 71, 85], dtype=np.int64)\n', (685, 908), True, 'import numpy as np\n'), ((988, 1009), 'torch.manual_seed', 'torch.manual_seed', (['(37)'], {}), '(37)\n', (1005, 1009), False, 'import torch\n'), ((1014, 1040), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['(37)'], {}), '(37)\n', (1036, 1040), False, 'import torch\n'), ((1049, 1076), 'os.path.exists', 'os.path.exists', (['results_dir'], {}), '(results_dir)\n', (1063, 1076), False, 'import os\n'), ((1117, 1156), 'os.makedirs', 'os.makedirs', (['results_dir'], {'exist_ok': '(True)'}), '(results_dir, exist_ok=True)\n', (1128, 1156), False, 'import os\n'), ((1280, 1319), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['normMean', 'normStd'], {}), '(normMean, normStd)\n', (1300, 1319), True, 'import torchvision.transforms as transforms\n'), ((1086, 1112), 'shutil.rmtree', 'shutil.rmtree', (['results_dir'], {}), '(results_dir)\n', (1099, 1112), False, 'import shutil\n'), ((1706, 1849), 'datasets.CIFAR100', 'CIFAR100', ([], {'root': 'data_dir', 'group': '"""A"""', 'train': '(True)', 'download': '(False)', 'transform': 'trainTransform', 'classes_a': 'classes_a', 'classes_a_bin': 'classes_a_bin'}), "(root=data_dir, group='A', train=True, download=False, transform=\n trainTransform, classes_a=classes_a, classes_a_bin=classes_a_bin)\n", (1714, 1849), False, 'from datasets import CIFAR100\n'), ((1968, 2111), 'datasets.CIFAR100', 'CIFAR100', ([], {'root': 'data_dir', 'group': '"""A"""', 'train': '(False)', 'download': '(False)', 'transform': 'testTransform', 'classes_a': 'classes_a', 'classes_a_bin': 'classes_a_bin'}), "(root=data_dir, group='A', train=False, download=False, transform=\n testTransform, classes_a=classes_a, classes_a_bin=classes_a_bin)\n", (1976, 2111), False, 'from datasets import CIFAR100\n'), ((2233, 2376), 'datasets.CIFAR100', 'CIFAR100', ([], {'root': 'data_dir', 'group': '"""B"""', 'train': '(True)', 'download': '(False)', 'transform': 'trainTransform', 'classes_a': 'classes_a', 'classes_a_bin': 'classes_a_bin'}), "(root=data_dir, group='B', train=True, download=False, transform=\n trainTransform, classes_a=classes_a, classes_a_bin=classes_a_bin)\n", (2241, 2376), False, 'from datasets import CIFAR100\n'), ((2495, 2638), 'datasets.CIFAR100', 'CIFAR100', ([], {'root': 'data_dir', 'group': '"""B"""', 'train': '(False)', 'download': '(False)', 'transform': 'testTransform', 'classes_a': 'classes_a', 'classes_a_bin': 'classes_a_bin'}), "(root=data_dir, group='B', train=False, download=False, transform=\n testTransform, classes_a=classes_a, classes_a_bin=classes_a_bin)\n", (2503, 2638), False, 'from datasets import CIFAR100\n'), ((2782, 2890), 'densenet_v1.DenseNet', 'densenet.DenseNet', ([], {'growthRate': '(12)', 'depth': '(100)', 'reduction': '(0.5)', 'bottleneck': '(True)', 'nClasses': '(100)', 'binary': 'binary'}), '(growthRate=12, depth=100, reduction=0.5, bottleneck=True,\n nClasses=100, binary=binary)\n', (2799, 2890), True, 'import densenet_v1 as densenet\n'), ((3213, 3273), 'torch.optim.SGD', 'optim.SGD', (['params'], {'lr': '(0.1)', 'momentum': '(0.9)', 'weight_decay': '(0.0001)'}), '(params, lr=0.1, momentum=0.9, weight_decay=0.0001)\n', (3222, 3273), True, 'import torch.optim as optim\n'), ((3696, 3739), 'os.path.join', 'os.path.join', (['results_dir', '"""model_state.t7"""'], {}), "(results_dir, 'model_state.t7')\n", (3708, 3739), False, 'import os\n'), ((4195, 4245), 'os.path.join', 'os.path.join', (['results_dir', '"""model_state_binary.t7"""'], {}), "(results_dir, 'model_state_binary.t7')\n", (4207, 4245), False, 'import os\n'), ((1371, 1407), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['(32)'], {'padding': '(4)'}), '(32, padding=4)\n', (1392, 1407), True, 'import torchvision.transforms as transforms\n'), ((1417, 1450), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (1448, 1450), True, 'import torchvision.transforms as transforms\n'), ((1460, 1481), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1479, 1481), True, 'import torchvision.transforms as transforms\n'), ((1561, 1582), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1580, 1582), True, 'import torchvision.transforms as transforms\n'), ((3821, 3864), 'os.path.join', 'os.path.join', (['results_dir', '"""model_state.t7"""'], {}), "(results_dir, 'model_state.t7')\n", (3833, 3864), False, 'import os\n'), ((4340, 4390), 'os.path.join', 'os.path.join', (['results_dir', '"""model_state_binary.t7"""'], {}), "(results_dir, 'model_state_binary.t7')\n", (4352, 4390), False, 'import os\n'), ((6404, 6430), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['fc_out', 'target'], {}), '(fc_out, target)\n', (6414, 6430), True, 'import torch.nn.functional as F\n'), ((6454, 6485), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['bin_out', 'target_bin'], {}), '(bin_out, target_bin)\n', (6464, 6485), True, 'import torch.nn.functional as F\n'), ((6253, 6267), 'torch.autograd.Variable', 'Variable', (['data'], {}), '(data)\n', (6261, 6267), False, 'from torch.autograd import Variable\n'), ((6269, 6285), 'torch.autograd.Variable', 'Variable', (['target'], {}), '(target)\n', (6277, 6285), False, 'from torch.autograd import Variable\n'), ((6287, 6307), 'torch.autograd.Variable', 'Variable', (['target_bin'], {}), '(target_bin)\n', (6295, 6307), False, 'from torch.autograd import Variable\n'), ((6983, 7009), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['fc_out', 'target'], {}), '(fc_out, target)\n', (6993, 7009), True, 'import torch.nn.functional as F\n'), ((7236, 7262), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['fc_out', 'target'], {}), '(fc_out, target)\n', (7246, 7262), True, 'import torch.nn.functional as F\n'), ((8199, 8228), 'torch.autograd.Variable', 'Variable', (['data'], {'volatile': '(True)'}), '(data, volatile=True)\n', (8207, 8228), False, 'from torch.autograd import Variable\n'), ((8230, 8246), 'torch.autograd.Variable', 'Variable', (['target'], {}), '(target)\n', (8238, 8246), False, 'from torch.autograd import Variable\n'), ((8248, 8268), 'torch.autograd.Variable', 'Variable', (['target_bin'], {}), '(target_bin)\n', (8256, 8268), False, 'from torch.autograd import Variable\n'), ((6753, 6767), 'torch.autograd.Variable', 'Variable', (['data'], {}), '(data)\n', (6761, 6767), False, 'from torch.autograd import Variable\n'), ((6769, 6785), 'torch.autograd.Variable', 'Variable', (['target'], {}), '(target)\n', (6777, 6785), False, 'from torch.autograd import Variable\n'), ((7110, 7124), 'torch.autograd.Variable', 'Variable', (['data'], {}), '(data)\n', (7118, 7124), False, 'from torch.autograd import Variable\n'), ((7126, 7142), 'torch.autograd.Variable', 'Variable', (['target'], {}), '(target)\n', (7134, 7142), False, 'from torch.autograd import Variable\n'), ((8393, 8419), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['fc_out', 'target'], {}), '(fc_out, target)\n', (8403, 8419), True, 'import torch.nn.functional as F\n'), ((8539, 8568), 'torch.autograd.Variable', 'Variable', (['data'], {'volatile': '(True)'}), '(data, volatile=True)\n', (8547, 8568), False, 'from torch.autograd import Variable\n'), ((8570, 8586), 'torch.autograd.Variable', 'Variable', (['target'], {}), '(target)\n', (8578, 8586), False, 'from torch.autograd import Variable\n'), ((8782, 8811), 'torch.autograd.Variable', 'Variable', (['data'], {'volatile': '(True)'}), '(data, volatile=True)\n', (8790, 8811), False, 'from torch.autograd import Variable\n'), ((8813, 8829), 'torch.autograd.Variable', 'Variable', (['target'], {}), '(target)\n', (8821, 8829), False, 'from torch.autograd import Variable\n'), ((8652, 8678), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['fc_out', 'target'], {}), '(fc_out, target)\n', (8662, 8678), True, 'import torch.nn.functional as F\n'), ((8895, 8921), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['fc_out', 'target'], {}), '(fc_out, target)\n', (8905, 8921), True, 'import torch.nn.functional as F\n')] |
import ravestate as rs
import ravestate_interloc as interloc
import ravestate_rawio as rawio
import ravestate_idle as idle
import ravestate_ontology as mem
import ravestate_verbaliser as verbaliser
import ravestate_visionio as visionio
import ravestate_hibye as hibye
from scientio.ontology.ontology import Ontology
from scientio.session import Session
from scientio.ontology.node import Node
import numpy as np
import rospy
import random
from roboy_cognition_msgs.msg import Faces, FacialFeatures
from reggol import get_logger, set_default_loglevel
logger = get_logger(__name__)
def test_known_person():
last_output = ""
with rs.Module(name="visionio_test"):
@rs.state(read=rawio.prop_out)
def raw_out(ctx: rs.ContextWrapper):
nonlocal last_output
last_output = ctx[rawio.prop_out]
logger.info(f"Output: {ctx[rawio.prop_out]}")
# Unfortunately needed until Context adopts Properties as clones.
interloc.prop_all.children.clear()
ctx = rs.Context(
"rawio",
"ontology",
"verbaliser",
"idle",
"interloc",
"nlp",
"hibye",
"visionio",
"visionio_test",
"-d", "ontology", "neo4j_pw", "test"
)
def register_dummy_known_person_to_db():
onto: Ontology = mem.get_ontology()
sess: Session = mem.get_session()
person_node = Node(metatype=onto.get_type("Person"))
person_node.set_properties({'name': 'visionio_test_person'})
person_node = sess.create(person_node)
return person_node
def delete_dummy_people():
onto: Ontology = mem.get_ontology()
sess: Session = mem.get_session()
person_node = Node(metatype=onto.get_type("Person"))
person_node.set_properties({'name': 'visionio_test_person'})
# TODO: Delete method is not working!
sess.delete(person_node)
@rs.receptor(ctx_wrap=ctx, write=visionio.prop_subscribe_faces)
def known_person_approaches(ctx: rs.ContextWrapper):
person = register_dummy_known_person_to_db()
faces = Faces()
faces.confidence = [0.85]
faces.ids = [person.get_id()]
facial_features = FacialFeatures()
facial_features.ff = np.zeros(128)
faces.face_encodings = [facial_features]
ctx[visionio.prop_subscribe_faces] = faces
mem.initialized.clear()
ctx.emit(rs.sig_startup)
ctx.run_once()
assert mem.initialized.wait()
# Vision io is started
assert visionio.reset.wait()
known_person_approaches()
# Wait until greeted
counter = 0
while not raw_out.wait(.1) and counter < 100:
ctx.run_once()
counter += 1
greeting_phrases = [phrase.replace('{name}', 'visionio_test_person') for phrase in verbaliser.get_phrase_list("greeting-with-name")]
assert last_output in greeting_phrases
assert visionio.recognize_faces.wait(0)
ctx.shutdown()
delete_dummy_people()
# Unfortunately needed until Context adopts Properties as clones.
interloc.prop_all.children.clear()
if __name__ == "__main__":
set_default_loglevel("DEBUG")
test_known_person()
exit()
| [
"reggol.set_default_loglevel",
"ravestate_ontology.initialized.clear",
"reggol.get_logger",
"ravestate.Context",
"ravestate.Module",
"ravestate_ontology.get_ontology",
"ravestate_ontology.initialized.wait",
"roboy_cognition_msgs.msg.Faces",
"ravestate_verbaliser.get_phrase_list",
"ravestate_vision... | [((560, 580), 'reggol.get_logger', 'get_logger', (['__name__'], {}), '(__name__)\n', (570, 580), False, 'from reggol import get_logger, set_default_loglevel\n'), ((969, 1003), 'ravestate_interloc.prop_all.children.clear', 'interloc.prop_all.children.clear', ([], {}), '()\n', (1001, 1003), True, 'import ravestate_interloc as interloc\n'), ((1014, 1166), 'ravestate.Context', 'rs.Context', (['"""rawio"""', '"""ontology"""', '"""verbaliser"""', '"""idle"""', '"""interloc"""', '"""nlp"""', '"""hibye"""', '"""visionio"""', '"""visionio_test"""', '"""-d"""', '"""ontology"""', '"""neo4j_pw"""', '"""test"""'], {}), "('rawio', 'ontology', 'verbaliser', 'idle', 'interloc', 'nlp',\n 'hibye', 'visionio', 'visionio_test', '-d', 'ontology', 'neo4j_pw', 'test')\n", (1024, 1166), True, 'import ravestate as rs\n'), ((1918, 1980), 'ravestate.receptor', 'rs.receptor', ([], {'ctx_wrap': 'ctx', 'write': 'visionio.prop_subscribe_faces'}), '(ctx_wrap=ctx, write=visionio.prop_subscribe_faces)\n', (1929, 1980), True, 'import ravestate as rs\n'), ((2381, 2404), 'ravestate_ontology.initialized.clear', 'mem.initialized.clear', ([], {}), '()\n', (2402, 2404), True, 'import ravestate_ontology as mem\n'), ((2464, 2486), 'ravestate_ontology.initialized.wait', 'mem.initialized.wait', ([], {}), '()\n', (2484, 2486), True, 'import ravestate_ontology as mem\n'), ((2526, 2547), 'ravestate_visionio.reset.wait', 'visionio.reset.wait', ([], {}), '()\n', (2545, 2547), True, 'import ravestate_visionio as visionio\n'), ((2907, 2939), 'ravestate_visionio.recognize_faces.wait', 'visionio.recognize_faces.wait', (['(0)'], {}), '(0)\n', (2936, 2939), True, 'import ravestate_visionio as visionio\n'), ((3060, 3094), 'ravestate_interloc.prop_all.children.clear', 'interloc.prop_all.children.clear', ([], {}), '()\n', (3092, 3094), True, 'import ravestate_interloc as interloc\n'), ((3128, 3157), 'reggol.set_default_loglevel', 'set_default_loglevel', (['"""DEBUG"""'], {}), "('DEBUG')\n", (3148, 3157), False, 'from reggol import get_logger, set_default_loglevel\n'), ((639, 670), 'ravestate.Module', 'rs.Module', ([], {'name': '"""visionio_test"""'}), "(name='visionio_test')\n", (648, 670), True, 'import ravestate as rs\n'), ((682, 711), 'ravestate.state', 'rs.state', ([], {'read': 'rawio.prop_out'}), '(read=rawio.prop_out)\n', (690, 711), True, 'import ravestate as rs\n'), ((1320, 1338), 'ravestate_ontology.get_ontology', 'mem.get_ontology', ([], {}), '()\n', (1336, 1338), True, 'import ravestate_ontology as mem\n'), ((1363, 1380), 'ravestate_ontology.get_session', 'mem.get_session', ([], {}), '()\n', (1378, 1380), True, 'import ravestate_ontology as mem\n'), ((1642, 1660), 'ravestate_ontology.get_ontology', 'mem.get_ontology', ([], {}), '()\n', (1658, 1660), True, 'import ravestate_ontology as mem\n'), ((1685, 1702), 'ravestate_ontology.get_session', 'mem.get_session', ([], {}), '()\n', (1700, 1702), True, 'import ravestate_ontology as mem\n'), ((2108, 2115), 'roboy_cognition_msgs.msg.Faces', 'Faces', ([], {}), '()\n', (2113, 2115), False, 'from roboy_cognition_msgs.msg import Faces, FacialFeatures\n'), ((2215, 2231), 'roboy_cognition_msgs.msg.FacialFeatures', 'FacialFeatures', ([], {}), '()\n', (2229, 2231), False, 'from roboy_cognition_msgs.msg import Faces, FacialFeatures\n'), ((2261, 2274), 'numpy.zeros', 'np.zeros', (['(128)'], {}), '(128)\n', (2269, 2274), True, 'import numpy as np\n'), ((2802, 2850), 'ravestate_verbaliser.get_phrase_list', 'verbaliser.get_phrase_list', (['"""greeting-with-name"""'], {}), "('greeting-with-name')\n", (2828, 2850), True, 'import ravestate_verbaliser as verbaliser\n')] |
import unittest
import numpy as np
from abslib.kp import KnowledgePatternManager, DisjunctKnowledgePatternItem, MatrixProducer, QuantKnowledgePatternItem, \
ConjunctKnowledgePatternItem
# Tests for knowledge pattern part of abslib
class KnowledgePatternManagerTest(unittest.TestCase):
def testDisjunctsConsistent(self):
arrays = [[[1.0, 1.0], [0.1, 0.2], [0.2, 0.4], [0.5, 0.7]]]
for disjunct_intervals_consistent in arrays:
knowledgePattern = DisjunctKnowledgePatternItem(disjunct_intervals_consistent)
result = KnowledgePatternManager.checkConsistency(knowledgePattern)
self.assertTrue(result.consistent, "False negative consistency result")
self.assertTrue(np.array(result.array).shape == np.array(disjunct_intervals_consistent).shape,
"Incorrect result array size")
for i in range(len(result.array)):
self.assertTrue(disjunct_intervals_consistent[i][0] <= result.array[i][0]
and result.array[i][1] <= disjunct_intervals_consistent[i][1],
"Intervals couldn't become larger")
def testDisjunctsInconsistent(self):
arrays = [[[1, 1], [0.1, 0.2], [0.2, 0.4], [0.7, 0.7]]]
for disjunct_intervals_inconsistent in arrays:
knowledgePattern = DisjunctKnowledgePatternItem(disjunct_intervals_inconsistent)
result = KnowledgePatternManager.checkConsistency(knowledgePattern)
self.assertFalse(result.consistent, "False positive consistency result")
def testQuantsConsistent(self):
arrays = [[[0.24, 0.25], [0.25, 0.25], [0.25, 0.25], [0.25, 0.25]]]
for quant_intervals_consistent in arrays:
knowledgePattern = QuantKnowledgePatternItem(quant_intervals_consistent)
result = KnowledgePatternManager.checkConsistency(knowledgePattern)
self.assertTrue(result.consistent, "False negative consistency result")
self.assertTrue(np.array(result.array).shape == np.array(quant_intervals_consistent).shape,
"Incorrect result array size")
for i in range(len(result.array)):
self.assertTrue(quant_intervals_consistent[i][0] <= result.array[i][0]
and result.array[i][1] <= quant_intervals_consistent[i][1],
"Intervals couldn't become larger")
def testQuantsInconsistent(self):
arrays = [[[0.2, 0.3], [0.2, 0.3], [0.2, 0.3], [0.6, 0.7]]]
for quant_intervals_inconsistent in arrays:
knowledgePattern = QuantKnowledgePatternItem(quant_intervals_inconsistent)
result = KnowledgePatternManager.checkConsistency(knowledgePattern)
self.assertFalse(result.consistent, "False positive consistency result")
def testConjunctsConsistent(self):
arrays = [[[1.0, 1.0], [0.6, 0.9], [0.6, 0.9], [0.2, 0.3]]]
for conjunct_intervals_consistent in arrays:
knowledgePattern = ConjunctKnowledgePatternItem(conjunct_intervals_consistent)
result = KnowledgePatternManager.checkConsistency(knowledgePattern)
self.assertTrue(result.consistent, "False negative consistency result")
self.assertTrue(np.array(result.array).shape == np.array(conjunct_intervals_consistent).shape,
"Incorrect result array size")
for i in range(len(result.array)):
self.assertTrue(conjunct_intervals_consistent[i][0] <= result.array[i][0]
and result.array[i][1] <= conjunct_intervals_consistent[i][1],
"Intervals couldn't become larger")
def testConjunctsInconsistent(self):
arrays = [[[1, 1], [0.1, 0.2], [0.2, 0.4], [0.8, 0.8]]]
for conjunct_intervals_consistent in arrays:
knowledgePattern = DisjunctKnowledgePatternItem(conjunct_intervals_consistent)
result = KnowledgePatternManager.checkConsistency(knowledgePattern)
self.assertFalse(result.consistent, "False positive consistency result")
def testDisjunctsToQuantsMatrix(self):
matrices = [(np.array([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0, 1.0],
[-0.0, -0.0, -0.0, -0.0, -1.0, 1.0, 1.0, -1.0],
[0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 1.0],
[-0.0, -0.0, -1.0, 1.0, -0.0, -0.0, 1.0, -1.0],
[-0.0, -1.0, -0.0, 1.0, -0.0, 1.0, -0.0, -1.0],
[0.0, 1.0, 1.0, -1.0, 1.0, -1.0, -1.0, 1.0]], dtype=np.double), 3)]
for matrix, n in matrices:
generated_matrix = MatrixProducer.getDisjunctsToQuantsMatrix(n)
for i in range(matrix.shape[0]):
for j in range(matrix.shape[1]):
self.assertEqual(matrix[i][j], generated_matrix[i][j], "Wrong matrix generation algorithm")
def testConjunctsToQuantsMatrix(self):
matrices = [(np.array([[1.0, -1.0, -1.0, 1.0, -1.0, 1.0, 1.0, -1.0],
[0.0, 1.0, -0.0, -1.0, -0.0, -1.0, 0.0, 1.0],
[0.0, 0.0, 1.0, -1.0, -0.0, -0.0, -1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, -0.0, -0.0, -0.0, -1.0],
[0.0, 0.0, 0.0, 0.0, 1.0, -1.0, -1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, -0.0, -1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, -1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]], dtype=np.double), 3)]
for matrix, n in matrices:
generated_matrix = MatrixProducer.getConjunctsToQuantsMatrix(n)
for i in range(matrix.shape[0]):
for j in range(matrix.shape[1]):
self.assertEqual(matrix[i][j], generated_matrix[i][j], "Wrong matrix generation algorithm")
def testQuantsToDisjunctsMatrix(self):
matrices = [(np.array([[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0],
[0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0],
[0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]], dtype=np.double), 3)]
for matrix, n in matrices:
generated_matrix = MatrixProducer.getQuantsToDisjunctsMatrix(n)
for i in range(matrix.shape[0]):
for j in range(matrix.shape[1]):
self.assertEqual(matrix[i][j], generated_matrix[i][j], "Wrong matrix generation algorithm")
if __name__ == '__main__':
unittest.main()
| [
"abslib.kp.ConjunctKnowledgePatternItem",
"abslib.kp.MatrixProducer.getDisjunctsToQuantsMatrix",
"abslib.kp.MatrixProducer.getQuantsToDisjunctsMatrix",
"abslib.kp.MatrixProducer.getConjunctsToQuantsMatrix",
"numpy.array",
"abslib.kp.DisjunctKnowledgePatternItem",
"abslib.kp.KnowledgePatternManager.check... | [((7154, 7169), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7167, 7169), False, 'import unittest\n'), ((485, 544), 'abslib.kp.DisjunctKnowledgePatternItem', 'DisjunctKnowledgePatternItem', (['disjunct_intervals_consistent'], {}), '(disjunct_intervals_consistent)\n', (513, 544), False, 'from abslib.kp import KnowledgePatternManager, DisjunctKnowledgePatternItem, MatrixProducer, QuantKnowledgePatternItem, ConjunctKnowledgePatternItem\n'), ((566, 624), 'abslib.kp.KnowledgePatternManager.checkConsistency', 'KnowledgePatternManager.checkConsistency', (['knowledgePattern'], {}), '(knowledgePattern)\n', (606, 624), False, 'from abslib.kp import KnowledgePatternManager, DisjunctKnowledgePatternItem, MatrixProducer, QuantKnowledgePatternItem, ConjunctKnowledgePatternItem\n'), ((1367, 1428), 'abslib.kp.DisjunctKnowledgePatternItem', 'DisjunctKnowledgePatternItem', (['disjunct_intervals_inconsistent'], {}), '(disjunct_intervals_inconsistent)\n', (1395, 1428), False, 'from abslib.kp import KnowledgePatternManager, DisjunctKnowledgePatternItem, MatrixProducer, QuantKnowledgePatternItem, ConjunctKnowledgePatternItem\n'), ((1450, 1508), 'abslib.kp.KnowledgePatternManager.checkConsistency', 'KnowledgePatternManager.checkConsistency', (['knowledgePattern'], {}), '(knowledgePattern)\n', (1490, 1508), False, 'from abslib.kp import KnowledgePatternManager, DisjunctKnowledgePatternItem, MatrixProducer, QuantKnowledgePatternItem, ConjunctKnowledgePatternItem\n'), ((1788, 1841), 'abslib.kp.QuantKnowledgePatternItem', 'QuantKnowledgePatternItem', (['quant_intervals_consistent'], {}), '(quant_intervals_consistent)\n', (1813, 1841), False, 'from abslib.kp import KnowledgePatternManager, DisjunctKnowledgePatternItem, MatrixProducer, QuantKnowledgePatternItem, ConjunctKnowledgePatternItem\n'), ((1863, 1921), 'abslib.kp.KnowledgePatternManager.checkConsistency', 'KnowledgePatternManager.checkConsistency', (['knowledgePattern'], {}), '(knowledgePattern)\n', (1903, 1921), False, 'from abslib.kp import KnowledgePatternManager, DisjunctKnowledgePatternItem, MatrixProducer, QuantKnowledgePatternItem, ConjunctKnowledgePatternItem\n'), ((2653, 2708), 'abslib.kp.QuantKnowledgePatternItem', 'QuantKnowledgePatternItem', (['quant_intervals_inconsistent'], {}), '(quant_intervals_inconsistent)\n', (2678, 2708), False, 'from abslib.kp import KnowledgePatternManager, DisjunctKnowledgePatternItem, MatrixProducer, QuantKnowledgePatternItem, ConjunctKnowledgePatternItem\n'), ((2730, 2788), 'abslib.kp.KnowledgePatternManager.checkConsistency', 'KnowledgePatternManager.checkConsistency', (['knowledgePattern'], {}), '(knowledgePattern)\n', (2770, 2788), False, 'from abslib.kp import KnowledgePatternManager, DisjunctKnowledgePatternItem, MatrixProducer, QuantKnowledgePatternItem, ConjunctKnowledgePatternItem\n'), ((3066, 3125), 'abslib.kp.ConjunctKnowledgePatternItem', 'ConjunctKnowledgePatternItem', (['conjunct_intervals_consistent'], {}), '(conjunct_intervals_consistent)\n', (3094, 3125), False, 'from abslib.kp import KnowledgePatternManager, DisjunctKnowledgePatternItem, MatrixProducer, QuantKnowledgePatternItem, ConjunctKnowledgePatternItem\n'), ((3147, 3205), 'abslib.kp.KnowledgePatternManager.checkConsistency', 'KnowledgePatternManager.checkConsistency', (['knowledgePattern'], {}), '(knowledgePattern)\n', (3187, 3205), False, 'from abslib.kp import KnowledgePatternManager, DisjunctKnowledgePatternItem, MatrixProducer, QuantKnowledgePatternItem, ConjunctKnowledgePatternItem\n'), ((3946, 4005), 'abslib.kp.DisjunctKnowledgePatternItem', 'DisjunctKnowledgePatternItem', (['conjunct_intervals_consistent'], {}), '(conjunct_intervals_consistent)\n', (3974, 4005), False, 'from abslib.kp import KnowledgePatternManager, DisjunctKnowledgePatternItem, MatrixProducer, QuantKnowledgePatternItem, ConjunctKnowledgePatternItem\n'), ((4027, 4085), 'abslib.kp.KnowledgePatternManager.checkConsistency', 'KnowledgePatternManager.checkConsistency', (['knowledgePattern'], {}), '(knowledgePattern)\n', (4067, 4085), False, 'from abslib.kp import KnowledgePatternManager, DisjunctKnowledgePatternItem, MatrixProducer, QuantKnowledgePatternItem, ConjunctKnowledgePatternItem\n'), ((4913, 4957), 'abslib.kp.MatrixProducer.getDisjunctsToQuantsMatrix', 'MatrixProducer.getDisjunctsToQuantsMatrix', (['n'], {}), '(n)\n', (4954, 4957), False, 'from abslib.kp import KnowledgePatternManager, DisjunctKnowledgePatternItem, MatrixProducer, QuantKnowledgePatternItem, ConjunctKnowledgePatternItem\n'), ((5902, 5946), 'abslib.kp.MatrixProducer.getConjunctsToQuantsMatrix', 'MatrixProducer.getConjunctsToQuantsMatrix', (['n'], {}), '(n)\n', (5943, 5946), False, 'from abslib.kp import KnowledgePatternManager, DisjunctKnowledgePatternItem, MatrixProducer, QuantKnowledgePatternItem, ConjunctKnowledgePatternItem\n'), ((6870, 6914), 'abslib.kp.MatrixProducer.getQuantsToDisjunctsMatrix', 'MatrixProducer.getQuantsToDisjunctsMatrix', (['n'], {}), '(n)\n', (6911, 6914), False, 'from abslib.kp import KnowledgePatternManager, DisjunctKnowledgePatternItem, MatrixProducer, QuantKnowledgePatternItem, ConjunctKnowledgePatternItem\n'), ((4236, 4646), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n -1.0, 1.0], [0.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0, 1.0], [-0.0, -0.0, -\n 0.0, -0.0, -1.0, 1.0, 1.0, -1.0], [0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, \n 1.0], [-0.0, -0.0, -1.0, 1.0, -0.0, -0.0, 1.0, -1.0], [-0.0, -1.0, -0.0,\n 1.0, -0.0, 1.0, -0.0, -1.0], [0.0, 1.0, 1.0, -1.0, 1.0, -1.0, -1.0, 1.0]]'], {'dtype': 'np.double'}), '([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0], [0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, -1.0, 1.0], [0.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0, 1.0], [-0.0,\n -0.0, -0.0, -0.0, -1.0, 1.0, 1.0, -1.0], [0.0, 0.0, 0.0, -1.0, 0.0, 0.0,\n 0.0, 1.0], [-0.0, -0.0, -1.0, 1.0, -0.0, -0.0, 1.0, -1.0], [-0.0, -1.0,\n -0.0, 1.0, -0.0, 1.0, -0.0, -1.0], [0.0, 1.0, 1.0, -1.0, 1.0, -1.0, -\n 1.0, 1.0]], dtype=np.double)\n', (4244, 4646), True, 'import numpy as np\n'), ((5229, 5636), 'numpy.array', 'np.array', (['[[1.0, -1.0, -1.0, 1.0, -1.0, 1.0, 1.0, -1.0], [0.0, 1.0, -0.0, -1.0, -0.0,\n -1.0, 0.0, 1.0], [0.0, 0.0, 1.0, -1.0, -0.0, -0.0, -1.0, 1.0], [0.0, \n 0.0, 0.0, 1.0, -0.0, -0.0, -0.0, -1.0], [0.0, 0.0, 0.0, 0.0, 1.0, -1.0,\n -1.0, 1.0], [0.0, 0.0, 0.0, 0.0, 0.0, 1.0, -0.0, -1.0], [0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 1.0, -1.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]]'], {'dtype': 'np.double'}), '([[1.0, -1.0, -1.0, 1.0, -1.0, 1.0, 1.0, -1.0], [0.0, 1.0, -0.0, -\n 1.0, -0.0, -1.0, 0.0, 1.0], [0.0, 0.0, 1.0, -1.0, -0.0, -0.0, -1.0, 1.0\n ], [0.0, 0.0, 0.0, 1.0, -0.0, -0.0, -0.0, -1.0], [0.0, 0.0, 0.0, 0.0, \n 1.0, -1.0, -1.0, 1.0], [0.0, 0.0, 0.0, 0.0, 0.0, 1.0, -0.0, -1.0], [0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, -1.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 1.0]], dtype=np.double)\n', (5237, 5636), True, 'import numpy as np\n'), ((6218, 6605), 'numpy.array', 'np.array', (['[[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], [0.0, 1.0, 0.0, 1.0, 0.0, 1.0, \n 0.0, 1.0], [0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0], [0.0, 1.0, 1.0, \n 1.0, 0.0, 1.0, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0], [\n 0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0, 1.0, 1.0,\n 1.0, 1.0], [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]]'], {'dtype': 'np.double'}), '([[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], [0.0, 1.0, 0.0, 1.0, \n 0.0, 1.0, 0.0, 1.0], [0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 1.0], [0.0, \n 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0,\n 1.0], [0.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0, \n 1.0, 1.0, 1.0, 1.0], [0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]], dtype=\n np.double)\n', (6226, 6605), True, 'import numpy as np\n'), ((737, 759), 'numpy.array', 'np.array', (['result.array'], {}), '(result.array)\n', (745, 759), True, 'import numpy as np\n'), ((769, 808), 'numpy.array', 'np.array', (['disjunct_intervals_consistent'], {}), '(disjunct_intervals_consistent)\n', (777, 808), True, 'import numpy as np\n'), ((2034, 2056), 'numpy.array', 'np.array', (['result.array'], {}), '(result.array)\n', (2042, 2056), True, 'import numpy as np\n'), ((2066, 2102), 'numpy.array', 'np.array', (['quant_intervals_consistent'], {}), '(quant_intervals_consistent)\n', (2074, 2102), True, 'import numpy as np\n'), ((3318, 3340), 'numpy.array', 'np.array', (['result.array'], {}), '(result.array)\n', (3326, 3340), True, 'import numpy as np\n'), ((3350, 3389), 'numpy.array', 'np.array', (['conjunct_intervals_consistent'], {}), '(conjunct_intervals_consistent)\n', (3358, 3389), True, 'import numpy as np\n')] |
from collections.abc import Iterable
from scipy.ndimage.filters import gaussian_filter1d
import numpy as np
from sklearn.kernel_approximation import RBFSampler
def events_to_rates(event_times, filter_bandwidth=1, num_bins=72, min_time = None,max_time=None, density = True):
""" convert list of event times into rate function with a discrete time bin_size of 1/rates_per_unit.
Uses a guassian filter over the empirical rate (histogram count / bin_size) """
if len(event_times) == 0: # if event times is an empty list or array
print("empty event_times list/array")
return np.zeros(num_bins), np.zeros(num_bins)
if not max_time:
max_time = max(event_times)
if not min_time:
min_time = min(event_times)
bins = np.linspace(min_time, max_time, num=num_bins + 1)
rate_times = (bins[1:] + bins[:-1]) / 2
bin_size = (max_time - min_time) / num_bins
if density:
counts = np.array(np.histogram(event_times, bins=bins)[0])
sampled_rates = counts / sum(counts)
else:
counts = np.array(np.histogram(event_times, bins=bins)[0])
sampled_rates = counts / bin_size
rate_vals = gaussian_filter1d(sampled_rates, filter_bandwidth, mode="nearest")
return rate_vals, rate_times
def rand_fourier_features(rate_vals, dim=1000, random_state=0):
if rate_vals.ndim == 1:
rate_vals = rate_vals[None, :]
rand_fourier = RBFSampler(n_components=dim, random_state=random_state)
return rand_fourier.fit_transform(rate_vals)
| [
"numpy.histogram",
"sklearn.kernel_approximation.RBFSampler",
"numpy.zeros",
"numpy.linspace",
"scipy.ndimage.filters.gaussian_filter1d"
] | [((766, 815), 'numpy.linspace', 'np.linspace', (['min_time', 'max_time'], {'num': '(num_bins + 1)'}), '(min_time, max_time, num=num_bins + 1)\n', (777, 815), True, 'import numpy as np\n'), ((1172, 1238), 'scipy.ndimage.filters.gaussian_filter1d', 'gaussian_filter1d', (['sampled_rates', 'filter_bandwidth'], {'mode': '"""nearest"""'}), "(sampled_rates, filter_bandwidth, mode='nearest')\n", (1189, 1238), False, 'from scipy.ndimage.filters import gaussian_filter1d\n'), ((1423, 1478), 'sklearn.kernel_approximation.RBFSampler', 'RBFSampler', ([], {'n_components': 'dim', 'random_state': 'random_state'}), '(n_components=dim, random_state=random_state)\n', (1433, 1478), False, 'from sklearn.kernel_approximation import RBFSampler\n'), ((601, 619), 'numpy.zeros', 'np.zeros', (['num_bins'], {}), '(num_bins)\n', (609, 619), True, 'import numpy as np\n'), ((621, 639), 'numpy.zeros', 'np.zeros', (['num_bins'], {}), '(num_bins)\n', (629, 639), True, 'import numpy as np\n'), ((951, 987), 'numpy.histogram', 'np.histogram', (['event_times'], {'bins': 'bins'}), '(event_times, bins=bins)\n', (963, 987), True, 'import numpy as np\n'), ((1073, 1109), 'numpy.histogram', 'np.histogram', (['event_times'], {'bins': 'bins'}), '(event_times, bins=bins)\n', (1085, 1109), True, 'import numpy as np\n')] |
import dill as pickle
import numpy as np
from datasets.pandas_dataset import PandasData
from mf_tree.simplex_partitioning_strategies import DelaunayPartitioningStrategy, CoordinateHalvingPartitioningStrategy, \
ConstantPartitioningStrategy
class ExperimentConfiguration:
def __init__(self,
experiment_type,
record_test_error,
evaluate_best_result_again,
num_repeats,
mixture_selection_strategy,
custom_mixture,
budget_min,
budget_max,
budget_step,
actual_budgets_and_mixtures_path):
self.experiment_type = experiment_type
self.record_test_error = record_test_error
self.train_on_validation = mixture_selection_strategy == "validation"
self.experiment_budgets = range(budget_min, budget_max, budget_step)
if evaluate_best_result_again:
recording_budget_step = 2 * budget_step
recording_budget_min = budget_min
recording_budget_max = 2 * (budget_max - 1) + 1
self.recording_times = range(recording_budget_min, recording_budget_max, recording_budget_step)
else:
self.recording_times = self.experiment_budgets
self.budget_min = budget_min
self.budget_max = budget_max - 1
self.budget_step = budget_step
if actual_budgets_and_mixtures_path:
print("Using actual budgets path:", actual_budgets_and_mixtures_path)
with open(actual_budgets_and_mixtures_path, 'rb') as bm:
budget_mixture_map = pickle.load(bm)
self.actual_budgets = budget_mixture_map["budgets"]
self.actual_mixtures = budget_mixture_map["mixtures"]
else:
self.actual_budgets = None
self.actual_mixtures = None
self.alt_budgets_to_use = self.actual_budgets
self.mixture_selection_strategy = mixture_selection_strategy
if custom_mixture and mixture_selection_strategy == "custom":
self.custom_mixture = [float(el) for el in custom_mixture.split(',')]
print("Using custom mixture:", self.custom_mixture)
else:
self.custom_mixture = None
if mixture_selection_strategy == "all-individual-sources":
# Set num repeats once we know the mixture dimension -- see configure below
self.num_repeats = None
else:
self.num_repeats = num_repeats
self.partitioning_strategy = None
self.initial_simplex = None
self.alpha_star = None
self.alpha_dim = None
def configure(self, data: PandasData):
self.alpha_star = data.get_alpha_star()
self.alpha_dim = len(self.alpha_star)
self.initial_simplex = np.identity(self.alpha_dim)
self.test_mixture = data.get_test_mixture()
if self.mixture_selection_strategy == "delaunay-partitioning":
self.partitioning_strategy = DelaunayPartitioningStrategy(dim=self.alpha_dim)
elif self.mixture_selection_strategy == "coordinate-halving":
self.partitioning_strategy = CoordinateHalvingPartitioningStrategy(dim=self.alpha_dim)
elif self.mixture_selection_strategy == "alpha-star":
self.partitioning_strategy = ConstantPartitioningStrategy(dim=self.alpha_dim,
simplex_point=self.alpha_star)
elif self.mixture_selection_strategy == "validation":
self.partitioning_strategy = ConstantPartitioningStrategy(dim=self.alpha_dim,
simplex_point=data.get_validate_mixture())
elif self.mixture_selection_strategy == "tree-results":
self.partitioning_strategy = [ConstantPartitioningStrategy(dim=self.alpha_dim, simplex_point=mixture) for
mixture in self.actual_mixtures]
elif self.mixture_selection_strategy == "uniform":
self.partitioning_strategy = ConstantPartitioningStrategy(dim=self.alpha_dim)
elif self.mixture_selection_strategy == "custom":
self.partitioning_strategy = ConstantPartitioningStrategy(dim=self.alpha_dim,
simplex_point=self.custom_mixture)
elif self.mixture_selection_strategy == "all-individual-sources":
self.partitioning_strategy = [ConstantPartitioningStrategy(dim=self.alpha_dim,
simplex_point=np.eye(1, self.alpha_dim, src_idx)[0])
for src_idx in range(self.alpha_dim)]
self.num_repeats = self.alpha_dim
else:
print("Invalid mixture selection strategy:", self.mixture_selection_strategy)
assert False
| [
"numpy.identity",
"numpy.eye",
"mf_tree.simplex_partitioning_strategies.CoordinateHalvingPartitioningStrategy",
"mf_tree.simplex_partitioning_strategies.DelaunayPartitioningStrategy",
"mf_tree.simplex_partitioning_strategies.ConstantPartitioningStrategy",
"dill.load"
] | [((2842, 2869), 'numpy.identity', 'np.identity', (['self.alpha_dim'], {}), '(self.alpha_dim)\n', (2853, 2869), True, 'import numpy as np\n'), ((3034, 3082), 'mf_tree.simplex_partitioning_strategies.DelaunayPartitioningStrategy', 'DelaunayPartitioningStrategy', ([], {'dim': 'self.alpha_dim'}), '(dim=self.alpha_dim)\n', (3062, 3082), False, 'from mf_tree.simplex_partitioning_strategies import DelaunayPartitioningStrategy, CoordinateHalvingPartitioningStrategy, ConstantPartitioningStrategy\n'), ((1646, 1661), 'dill.load', 'pickle.load', (['bm'], {}), '(bm)\n', (1657, 1661), True, 'import dill as pickle\n'), ((3194, 3251), 'mf_tree.simplex_partitioning_strategies.CoordinateHalvingPartitioningStrategy', 'CoordinateHalvingPartitioningStrategy', ([], {'dim': 'self.alpha_dim'}), '(dim=self.alpha_dim)\n', (3231, 3251), False, 'from mf_tree.simplex_partitioning_strategies import DelaunayPartitioningStrategy, CoordinateHalvingPartitioningStrategy, ConstantPartitioningStrategy\n'), ((3355, 3434), 'mf_tree.simplex_partitioning_strategies.ConstantPartitioningStrategy', 'ConstantPartitioningStrategy', ([], {'dim': 'self.alpha_dim', 'simplex_point': 'self.alpha_star'}), '(dim=self.alpha_dim, simplex_point=self.alpha_star)\n', (3383, 3434), False, 'from mf_tree.simplex_partitioning_strategies import DelaunayPartitioningStrategy, CoordinateHalvingPartitioningStrategy, ConstantPartitioningStrategy\n'), ((3876, 3947), 'mf_tree.simplex_partitioning_strategies.ConstantPartitioningStrategy', 'ConstantPartitioningStrategy', ([], {'dim': 'self.alpha_dim', 'simplex_point': 'mixture'}), '(dim=self.alpha_dim, simplex_point=mixture)\n', (3904, 3947), False, 'from mf_tree.simplex_partitioning_strategies import DelaunayPartitioningStrategy, CoordinateHalvingPartitioningStrategy, ConstantPartitioningStrategy\n'), ((4127, 4175), 'mf_tree.simplex_partitioning_strategies.ConstantPartitioningStrategy', 'ConstantPartitioningStrategy', ([], {'dim': 'self.alpha_dim'}), '(dim=self.alpha_dim)\n', (4155, 4175), False, 'from mf_tree.simplex_partitioning_strategies import DelaunayPartitioningStrategy, CoordinateHalvingPartitioningStrategy, ConstantPartitioningStrategy\n'), ((4275, 4363), 'mf_tree.simplex_partitioning_strategies.ConstantPartitioningStrategy', 'ConstantPartitioningStrategy', ([], {'dim': 'self.alpha_dim', 'simplex_point': 'self.custom_mixture'}), '(dim=self.alpha_dim, simplex_point=self.\n custom_mixture)\n', (4303, 4363), False, 'from mf_tree.simplex_partitioning_strategies import DelaunayPartitioningStrategy, CoordinateHalvingPartitioningStrategy, ConstantPartitioningStrategy\n'), ((4679, 4713), 'numpy.eye', 'np.eye', (['(1)', 'self.alpha_dim', 'src_idx'], {}), '(1, self.alpha_dim, src_idx)\n', (4685, 4713), True, 'import numpy as np\n')] |
import numpy as np
# import plotly.plotly as py
# import plotly.graph_objs as go
# import glob
from numpy.linalg import eig, inv
# from sklearn.decomposition import PCA
# import mdtraj as md
# from scipy.spatial import ConvexHull
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
# import pandas as pd
from scipy.optimize import fsolve
from scipy.spatial.distance import cdist
"""
eclipse:
https://math.stackexchange.com/questions/2349022/is-it-possible-to-find-the-distance-between-two-points-on-the-circumference-of-a
https://www.mathsisfun.com/geometry/ellipse-perimeter.html
https://blender.stackexchange.com/questions/60562/ellipse-construction-advice/60624#60624
http://mathworld.wolfram.com/Ellipse.html
"""
def bmat_from_conformer(conf):
"""
Parameters
-----------
conf RDKit conformer object
"""
positions = conf.GetPositions()
num_atoms = len(positions)
bmat = cdist(positions, positions)
return bmat
def calcualte_ellipse_radii(guess, eccentricity = 0, perimeter = 2 * np.pi*1):
"""
returns a,b where a <= b
"""
return fsolve(ellipse_radii_test, guess, args = (eccentricity, perimeter))
def ellipse_radii_test(radii, eccentricity = 0, perimeter = 2*np.pi*1):
"""
The returned tuple should be zero
"""
a,b = radii
return (np.sqrt(np.absolute(1 - (b**2)/(a**2))) - eccentricity,
# perimeter approximation from https://www.mathsisfun.com/geometry/ellipse-perimeter.html
np.pi * (3 * (a + b) - np.sqrt(np.absolute((3 * a + b) * (a + 3 * b)))) - perimeter)
def get_points_on_ellipse(a, b, numPoints, startAngle = 0, verbose = False, increment = 0.01):
"""
Currently only works for ellipse centered on origin
the points are drawn from the +ve x axis in the order of the quardrants
Paramters:
----------------
startAngle : float
the angle the first point makes with the axis, default is 0 (i.e) first point on the x-axis
----------------
"""
def distance(x1,y1,x2,y2):
return np.sqrt((x2-x1)**2 + (y2-y1)**2)
x0 = a
y0 = 0
angle = 0
d = 0
while(angle <= 360):
x = a * np.cos(np.radians(angle))
y = b * np.sin(np.radians(angle))
d += distance(x0,y0,x,y)
x0 = x
y0 = y
angle += increment
if verbose:
print("The estimated circumference of ellipse is {:f}".format(d))
points = []
arcLength = d/numPoints
angle = 0
x0 = a
y0 = 0
angle0 = 0
while(angle0 < startAngle):
angle += increment
x = a * np.cos(np.radians(angle))
y = b * np.sin(np.radians(angle))
x0 = x
y0 = y
angle0 = angle
for i in range(numPoints):
dist = 0
while(dist < arcLength):
angle += increment
x = a * np.cos(np.radians(angle))
y = b * np.sin(np.radians(angle))
dist += distance(x0,y0,x,y)
x0 = x
y0 = y
if verbose:
print(
"{} : angle = {:.2f}\tdifference = {:.2f}\tDistance {:.2f}"
.format(i+1,angle, angle-angle0,dist))
points.append([x0, y0])
angle0 = angle
return np.array(points)
def eccentricity(fitted_ellipse_obj):
a,b = ellipse_axis_length(fitted_ellipse_obj)
eccen = np.sqrt(np.absolute(1 - b**2 / a ** 2))
return eccen
def fitEllipse(x,y):
x = x[:,np.newaxis]
y = y[:,np.newaxis]
D = np.hstack((x*x, x*y, y*y, x, y, np.ones_like(x)))
S = np.dot(D.T,D)
C = np.zeros([6,6])
C[0,2] = C[2,0] = 2; C[1,1] = -1
E, V = eig(np.dot(inv(S), C))
n = np.argmax(np.abs(E))
a = V[:,n]
return a
def ellipse_center(a):
b,c,d,f,g,a = a[1]/2, a[2], a[3]/2, a[4]/2, a[5], a[0]
num = b*b-a*c
x0=(c*d-b*f)/num
y0=(a*f-b*d)/num
return np.array([x0,y0])
def ellipse_angle_of_rotation( a ):
b,c,d,f,g,a = a[1]/2, a[2], a[3]/2, a[4]/2, a[5], a[0]
return 0.5*np.arctan(2*b/(a-c))
def ellipse_axis_length( a ):
b,c,d,f,g,a = a[1]/2, a[2], a[3]/2, a[4]/2, a[5], a[0]
up = 2*(a*f*f+c*d*d+g*b*b-2*b*d*f-a*c*g)
down1=(b*b-a*c)*( (c-a)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))
down2=(b*b-a*c)*( (a-c)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))
res1=np.sqrt(up/down1)
res2=np.sqrt(up/down2)
return np.array([res1, res2])
def ellipse_angle_of_rotation2( a ):
b,c,d,f,g,a = a[1]/2, a[2], a[3]/2, a[4]/2, a[5], a[0]
if b == 0:
if a > c:
return 0
else:
return np.pi/2
else:
if a > c:
return np.arctan(2*b/(a-c))/2
else:
return np.pi/2 + np.arctan(2*b/(a-c))/2
import numpy as np
from scipy.stats import chi2
#import pylab as mp
def plot_ellipse(semimaj=1,semimin=1,phi=0,x_cent=0,y_cent=0,theta_num=1e3,ax=None,plot_kwargs=None,\
fill=False,fill_kwargs=None,data_out=False,cov=None,mass_level=0.68):
'''
An easy to use function for plotting ellipses in Python 2.7!
The function creates a 2D ellipse in polar coordinates then transforms to cartesian coordinates.
It can take a covariance matrix and plot contours from it.
semimaj : float
length of semimajor axis (always taken to be some phi (-90<phi<90 deg) from positive x-axis!)
semimin : float
length of semiminor axis
phi : float
angle in radians of semimajor axis above positive x axis
x_cent : float
X coordinate center
y_cent : float
Y coordinate center
theta_num : int
Number of points to sample along ellipse from 0-2pi
ax : matplotlib axis property
A pre-created matplotlib axis
plot_kwargs : dictionary
matplotlib.plot() keyword arguments
fill : bool
A flag to fill the inside of the ellipse
fill_kwargs : dictionary
Keyword arguments for matplotlib.fill()
data_out : bool
A flag to return the ellipse samples without plotting
cov : ndarray of shape (2,2)
A 2x2 covariance matrix, if given this will overwrite semimaj, semimin and phi
mass_level : float
if supplied cov, mass_level is the contour defining fractional probability mass enclosed
for example: mass_level = 0.68 is the standard 68% mass
'''
# Get Ellipse Properties from cov matrix
if cov is not None:
eig_vec,eig_val,u = np.linalg.svd(cov)
# Make sure 0th eigenvector has positive x-coordinate
if eig_vec[0][0] < 0:
eig_vec[0] *= -1
semimaj = np.sqrt(eig_val[0])
semimin = np.sqrt(eig_val[1])
if mass_level is None:
multiplier = np.sqrt(2.279)
else:
distances = np.linspace(0,20,20001)
chi2_cdf = chi2.cdf(distances,df=2)
multiplier = np.sqrt(distances[np.where(np.abs(chi2_cdf-mass_level)==np.abs(chi2_cdf-mass_level).min())[0][0]])
semimaj *= multiplier
semimin *= multiplier
phi = np.arccos(np.dot(eig_vec[0],np.array([1,0])))
if eig_vec[0][1] < 0 and phi > 0:
phi *= -1
# Generate data for ellipse structure
theta = np.linspace(0,2*np.pi,theta_num)
r = 1 / np.sqrt((np.cos(theta))**2 + (np.sin(theta))**2)
x = r*np.cos(theta)
y = r*np.sin(theta)
data = np.array([x,y])
S = np.array([[semimaj,0],[0,semimin]])
R = np.array([[np.cos(phi),-np.sin(phi)],[np.sin(phi),np.cos(phi)]])
T = np.dot(R,S)
data = np.dot(T,data)
data[0] += x_cent
data[1] += y_cent
# Output data?
if data_out == True:
return data
# Plot!
return_fig = False
if ax is None:
return_fig = True
fig,ax = plt.subplots()
if plot_kwargs is None:
ax.plot(data[0],data[1],color='b',linestyle='-')
else:
ax.plot(data[0],data[1],**plot_kwargs)
if fill == True:
ax.fill(data[0],data[1],**fill_kwargs)
if return_fig == True:
return fig
| [
"scipy.optimize.fsolve",
"numpy.abs",
"numpy.ones_like",
"scipy.stats.chi2.cdf",
"numpy.sqrt",
"numpy.radians",
"scipy.spatial.distance.cdist",
"numpy.sin",
"numpy.absolute",
"numpy.array",
"numpy.dot",
"numpy.zeros",
"numpy.linspace",
"numpy.cos",
"numpy.linalg.inv",
"numpy.linalg.svd... | [((931, 958), 'scipy.spatial.distance.cdist', 'cdist', (['positions', 'positions'], {}), '(positions, positions)\n', (936, 958), False, 'from scipy.spatial.distance import cdist\n'), ((1113, 1178), 'scipy.optimize.fsolve', 'fsolve', (['ellipse_radii_test', 'guess'], {'args': '(eccentricity, perimeter)'}), '(ellipse_radii_test, guess, args=(eccentricity, perimeter))\n', (1119, 1178), False, 'from scipy.optimize import fsolve\n'), ((3272, 3288), 'numpy.array', 'np.array', (['points'], {}), '(points)\n', (3280, 3288), True, 'import numpy as np\n'), ((3583, 3597), 'numpy.dot', 'np.dot', (['D.T', 'D'], {}), '(D.T, D)\n', (3589, 3597), True, 'import numpy as np\n'), ((3605, 3621), 'numpy.zeros', 'np.zeros', (['[6, 6]'], {}), '([6, 6])\n', (3613, 3621), True, 'import numpy as np\n'), ((3903, 3921), 'numpy.array', 'np.array', (['[x0, y0]'], {}), '([x0, y0])\n', (3911, 3921), True, 'import numpy as np\n'), ((4329, 4348), 'numpy.sqrt', 'np.sqrt', (['(up / down1)'], {}), '(up / down1)\n', (4336, 4348), True, 'import numpy as np\n'), ((4356, 4375), 'numpy.sqrt', 'np.sqrt', (['(up / down2)'], {}), '(up / down2)\n', (4363, 4375), True, 'import numpy as np\n'), ((4385, 4407), 'numpy.array', 'np.array', (['[res1, res2]'], {}), '([res1, res2])\n', (4393, 4407), True, 'import numpy as np\n'), ((7338, 7374), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', 'theta_num'], {}), '(0, 2 * np.pi, theta_num)\n', (7349, 7374), True, 'import numpy as np\n'), ((7491, 7507), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (7499, 7507), True, 'import numpy as np\n'), ((7515, 7553), 'numpy.array', 'np.array', (['[[semimaj, 0], [0, semimin]]'], {}), '([[semimaj, 0], [0, semimin]])\n', (7523, 7553), True, 'import numpy as np\n'), ((7632, 7644), 'numpy.dot', 'np.dot', (['R', 'S'], {}), '(R, S)\n', (7638, 7644), True, 'import numpy as np\n'), ((7655, 7670), 'numpy.dot', 'np.dot', (['T', 'data'], {}), '(T, data)\n', (7661, 7670), True, 'import numpy as np\n'), ((2095, 2135), 'numpy.sqrt', 'np.sqrt', (['((x2 - x1) ** 2 + (y2 - y1) ** 2)'], {}), '((x2 - x1) ** 2 + (y2 - y1) ** 2)\n', (2102, 2135), True, 'import numpy as np\n'), ((3398, 3430), 'numpy.absolute', 'np.absolute', (['(1 - b ** 2 / a ** 2)'], {}), '(1 - b ** 2 / a ** 2)\n', (3409, 3430), True, 'import numpy as np\n'), ((3711, 3720), 'numpy.abs', 'np.abs', (['E'], {}), '(E)\n', (3717, 3720), True, 'import numpy as np\n'), ((4031, 4057), 'numpy.arctan', 'np.arctan', (['(2 * b / (a - c))'], {}), '(2 * b / (a - c))\n', (4040, 4057), True, 'import numpy as np\n'), ((6578, 6596), 'numpy.linalg.svd', 'np.linalg.svd', (['cov'], {}), '(cov)\n', (6591, 6596), True, 'import numpy as np\n'), ((6736, 6755), 'numpy.sqrt', 'np.sqrt', (['eig_val[0]'], {}), '(eig_val[0])\n', (6743, 6755), True, 'import numpy as np\n'), ((6774, 6793), 'numpy.sqrt', 'np.sqrt', (['eig_val[1]'], {}), '(eig_val[1])\n', (6781, 6793), True, 'import numpy as np\n'), ((7442, 7455), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (7448, 7455), True, 'import numpy as np\n'), ((7466, 7479), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (7472, 7479), True, 'import numpy as np\n'), ((7877, 7891), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (7889, 7891), True, 'import matplotlib.pyplot as plt\n'), ((3557, 3572), 'numpy.ones_like', 'np.ones_like', (['x'], {}), '(x)\n', (3569, 3572), True, 'import numpy as np\n'), ((3681, 3687), 'numpy.linalg.inv', 'inv', (['S'], {}), '(S)\n', (3684, 3687), False, 'from numpy.linalg import eig, inv\n'), ((6850, 6864), 'numpy.sqrt', 'np.sqrt', (['(2.279)'], {}), '(2.279)\n', (6857, 6864), True, 'import numpy as np\n'), ((6903, 6928), 'numpy.linspace', 'np.linspace', (['(0)', '(20)', '(20001)'], {}), '(0, 20, 20001)\n', (6914, 6928), True, 'import numpy as np\n'), ((6950, 6975), 'scipy.stats.chi2.cdf', 'chi2.cdf', (['distances'], {'df': '(2)'}), '(distances, df=2)\n', (6958, 6975), False, 'from scipy.stats import chi2\n'), ((1348, 1380), 'numpy.absolute', 'np.absolute', (['(1 - b ** 2 / a ** 2)'], {}), '(1 - b ** 2 / a ** 2)\n', (1359, 1380), True, 'import numpy as np\n'), ((2222, 2239), 'numpy.radians', 'np.radians', (['angle'], {}), '(angle)\n', (2232, 2239), True, 'import numpy as np\n'), ((2264, 2281), 'numpy.radians', 'np.radians', (['angle'], {}), '(angle)\n', (2274, 2281), True, 'import numpy as np\n'), ((2640, 2657), 'numpy.radians', 'np.radians', (['angle'], {}), '(angle)\n', (2650, 2657), True, 'import numpy as np\n'), ((2682, 2699), 'numpy.radians', 'np.radians', (['angle'], {}), '(angle)\n', (2692, 2699), True, 'import numpy as np\n'), ((4216, 4260), 'numpy.sqrt', 'np.sqrt', (['(1 + 4 * b * b / ((a - c) * (a - c)))'], {}), '(1 + 4 * b * b / ((a - c) * (a - c)))\n', (4223, 4260), True, 'import numpy as np\n'), ((4282, 4326), 'numpy.sqrt', 'np.sqrt', (['(1 + 4 * b * b / ((a - c) * (a - c)))'], {}), '(1 + 4 * b * b / ((a - c) * (a - c)))\n', (4289, 4326), True, 'import numpy as np\n'), ((4647, 4673), 'numpy.arctan', 'np.arctan', (['(2 * b / (a - c))'], {}), '(2 * b / (a - c))\n', (4656, 4673), True, 'import numpy as np\n'), ((7201, 7217), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (7209, 7217), True, 'import numpy as np\n'), ((7570, 7581), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (7576, 7581), True, 'import numpy as np\n'), ((7597, 7608), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (7603, 7608), True, 'import numpy as np\n'), ((7609, 7620), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (7615, 7620), True, 'import numpy as np\n'), ((2893, 2910), 'numpy.radians', 'np.radians', (['angle'], {}), '(angle)\n', (2903, 2910), True, 'import numpy as np\n'), ((2939, 2956), 'numpy.radians', 'np.radians', (['angle'], {}), '(angle)\n', (2949, 2956), True, 'import numpy as np\n'), ((4713, 4739), 'numpy.arctan', 'np.arctan', (['(2 * b / (a - c))'], {}), '(2 * b / (a - c))\n', (4722, 4739), True, 'import numpy as np\n'), ((7392, 7405), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (7398, 7405), True, 'import numpy as np\n'), ((7413, 7426), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (7419, 7426), True, 'import numpy as np\n'), ((7583, 7594), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (7589, 7594), True, 'import numpy as np\n'), ((1541, 1579), 'numpy.absolute', 'np.absolute', (['((3 * a + b) * (a + 3 * b))'], {}), '((3 * a + b) * (a + 3 * b))\n', (1552, 1579), True, 'import numpy as np\n'), ((7027, 7056), 'numpy.abs', 'np.abs', (['(chi2_cdf - mass_level)'], {}), '(chi2_cdf - mass_level)\n', (7033, 7056), True, 'import numpy as np\n'), ((7056, 7085), 'numpy.abs', 'np.abs', (['(chi2_cdf - mass_level)'], {}), '(chi2_cdf - mass_level)\n', (7062, 7085), True, 'import numpy as np\n')] |
import jax
import numpy as np
import sys
sys.path.insert(0, "../")
import theanoxla
import theanoxla.tensor as T
image = T.Placeholder((512 ** 2,), "float32")
output = image.reshape((1, 1, 512, 512))
f = theanoxla.function(image, outputs=[output])
for i in range(10000):
print(i)
f(np.random.randn(512 ** 2))
| [
"theanoxla.tensor.Placeholder",
"theanoxla.function",
"sys.path.insert",
"numpy.random.randn"
] | [((42, 67), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../"""'], {}), "(0, '../')\n", (57, 67), False, 'import sys\n'), ((125, 162), 'theanoxla.tensor.Placeholder', 'T.Placeholder', (['(512 ** 2,)', '"""float32"""'], {}), "((512 ** 2,), 'float32')\n", (138, 162), True, 'import theanoxla.tensor as T\n'), ((208, 251), 'theanoxla.function', 'theanoxla.function', (['image'], {'outputs': '[output]'}), '(image, outputs=[output])\n', (226, 251), False, 'import theanoxla\n'), ((294, 319), 'numpy.random.randn', 'np.random.randn', (['(512 ** 2)'], {}), '(512 ** 2)\n', (309, 319), True, 'import numpy as np\n')] |
import dynet_config
dynet_config.set(mem='2048', autobatch=0)
import dynet as dy
import numpy as np
nn_type_options = ["LSTM", "GRU"]
class RNNClassifier:
"""
Class used to train RNNs (GRU or LSTM). Input and output data are one-hot encoded.
"""
def __init__(self, input_alphabet, num_layers, output_dim, hidden_dim, x_train, y_train, x_test=None, y_test=None,
batch_size=32, nn_type="LSTM"):
assert nn_type in nn_type_options
self.vocab_size = len(input_alphabet) + 1
input_dim = self.vocab_size - 1
output_dim = output_dim
num_of_classes = len(set(y_train)) if not y_test else len(set(y_test).union(set(y_train)))
self.state = None
self.state_size = hidden_dim
self.token_dict = dict((c, i) for i, c in enumerate(input_alphabet))
self.pc = dy.ParameterCollection()
self.input_lookup = self.pc.add_lookup_parameters((self.vocab_size, input_dim)) # TODO DOUBLE-CHECK
self.W = self.pc.add_parameters((output_dim, hidden_dim)) # TODO DOUBLE-CHECK
nn_fun = dy.LSTMBuilder if nn_type == "LSTM" else dy.GRUBuilder
self.rnn = nn_fun(num_layers, input_dim, hidden_dim, self.pc)
self.x_train, self.y_train = self._to_batch(x_train, y_train, batch_size)
self.x_test, self.y_test = None, None
if x_test:
self.x_test, self.y_test = self._to_batch(x_test, y_test, batch_size)
# self.x_test = list(map(self._pad_batch, b_x))
# self.y_test = b_y
def _to_batch(self, x, y, batch_size):
data = list(zip(*sorted(zip(x, y), key=lambda k: len(k[0]))))
batched_X = []
batched_Y = []
for i in range(int(np.ceil(len(x) / batch_size))):
batched_X.append(data[0][i * batch_size:(i + 1) * batch_size])
batched_Y.append(data[1][i * batch_size:(i + 1) * batch_size])
# to prevent bug in validate
if len(batched_X[-1]) == 1:
batched_X.pop()
batched_Y.pop()
padded_x = []
for batch in batched_X:
max_len = len(max(batch, key=len))
tmp = []
for x in batch:
tmp.append([self.vocab_size - 1] * (max_len - len(x)) + x)
padded_x.append(tmp)
return padded_x, batched_Y
def _get_probabilities_over_batch(self, batch):
dy.renew_cg()
# The I iteration embed all the i-th items in all batches
embedded = [dy.lookup_batch(self.input_lookup, chars) for chars in zip(*batch)]
state = self.rnn.initial_state()
output_vec = state.transduce(embedded)[-1]
w = self.W.expr(update=False)
return w * output_vec
# either define stop loss, or stop acc and for how many epochs acc must not fall lower than it
def train(self, epochs=10000, stop_acc=0.99, stop_epochs=3, stop_loss=0.0005, verbose=True):
assert 0 < stop_acc <= 1.1
if verbose:
print('Starting train')
trainer = dy.AdamTrainer(self.pc)
avg_loss = []
num_epos_above_threshold = 0
for i in range(epochs):
loss_values = []
for sequence, label in zip(self.x_train, self.y_train):
probabilities = self._get_probabilities_over_batch(sequence)
loss = dy.sum_batches(dy.pickneglogsoftmax_batch(probabilities, label))
loss_values.append(loss.value())
loss.backward()
trainer.update()
avg_loss.append(np.mean(loss_values))
acc_train, acc_test = self.validate()
if verbose:
if acc_test != 999.0:
print(f'Epoch {i}: Accuracy {acc_train.round(5)}, Avg. Loss {avg_loss[-1].round(5)} '
f'Validation Accuracy {acc_test.round(5)}')
else:
print(f'Epoch {i}: Accuracy {acc_train.round(5)}, Avg. Loss {avg_loss[-1].round(5)} ')
if acc_train >= stop_acc and acc_test >= stop_acc:
num_epos_above_threshold += 1
if num_epos_above_threshold == stop_epochs:
break
if num_epos_above_threshold > 0 and acc_train < stop_acc or acc_test < stop_acc:
num_epos_above_threshold = 0
if stop_loss >= avg_loss[-1] > 0:
break
if verbose:
print('Done training!')
def validate(self):
acc_train, acc_test = [], []
for X, Y in zip(self.x_train, self.y_train):
probabilities = self._get_probabilities_over_batch(X).npvalue()
for i in range(len(probabilities[0])):
prediction = np.argmax(probabilities[:, i])
label = Y[i]
if prediction == label:
acc_train.append(1)
else:
acc_train.append(0)
if self.x_test:
for X, Y in zip(self.x_test, self.y_test):
probabilities = self._get_probabilities_over_batch(X).npvalue()
for i in range(len(probabilities[0])):
prediction = np.argmax(probabilities[:, i])
label = Y[i]
if prediction == label:
acc_test.append(1)
else:
acc_test.append(0)
return np.mean(acc_train), np.mean(acc_test) if self.x_test else np.mean(999.0)
def predict(self, string: str):
w = self.W.expr(update=False)
str_2_int = [self.token_dict[i] for i in string]
# The I iteration embed all the i-th items in all batches
embedded = [self.input_lookup[i] for i in str_2_int] if str_2_int else [
self.input_lookup[self.vocab_size - 1]]
state = self.rnn.initial_state()
output_vec = state.transduce(embedded)[-1]
return np.argmax((w * output_vec).npvalue())
def step(self, inp):
w = self.W.expr(update=False)
str_2_int = self.token_dict[inp] if inp else max(self.token_dict.values()) + 1
embedded = self.input_lookup[str_2_int]
out = self.state.add_input(embedded)
self.state = out
return np.argmax((w * out.output()).npvalue())
def step_internal(self, inp):
w = self.W.expr(update=False)
str_2_int = self.token_dict[inp] if inp else max(self.token_dict.values()) + 1
embedded = self.input_lookup[str_2_int]
out = self.state.add_input(embedded)
self.state = out
#print([round(r,2) for r in [s.vec_value() for s in self.state.h()][0]])
return ",".join([str(round(r,2)) for r in [s.vec_value() for s in self.state.h()][0][:2]])
return ",".join([str(s.vec_value()) for s in self.state.h()])
print(self.state.h())
print([exp for exp in self.rnn.get_parameter_expressions()])
return np.argmax((w * out.output()).npvalue())
def renew(self):
dy.renew_cg()
def save(self, path):
self.pc.save(path)
def load(self, path):
self.pc.populate(path)
| [
"numpy.mean",
"dynet.pickneglogsoftmax_batch",
"dynet.lookup_batch",
"numpy.argmax",
"dynet.AdamTrainer",
"dynet.renew_cg",
"dynet.ParameterCollection",
"dynet_config.set"
] | [((21, 62), 'dynet_config.set', 'dynet_config.set', ([], {'mem': '"""2048"""', 'autobatch': '(0)'}), "(mem='2048', autobatch=0)\n", (37, 62), False, 'import dynet_config\n'), ((854, 878), 'dynet.ParameterCollection', 'dy.ParameterCollection', ([], {}), '()\n', (876, 878), True, 'import dynet as dy\n'), ((2395, 2408), 'dynet.renew_cg', 'dy.renew_cg', ([], {}), '()\n', (2406, 2408), True, 'import dynet as dy\n'), ((3029, 3052), 'dynet.AdamTrainer', 'dy.AdamTrainer', (['self.pc'], {}), '(self.pc)\n', (3043, 3052), True, 'import dynet as dy\n'), ((6986, 6999), 'dynet.renew_cg', 'dy.renew_cg', ([], {}), '()\n', (6997, 6999), True, 'import dynet as dy\n'), ((2495, 2536), 'dynet.lookup_batch', 'dy.lookup_batch', (['self.input_lookup', 'chars'], {}), '(self.input_lookup, chars)\n', (2510, 2536), True, 'import dynet as dy\n'), ((5401, 5419), 'numpy.mean', 'np.mean', (['acc_train'], {}), '(acc_train)\n', (5408, 5419), True, 'import numpy as np\n'), ((3548, 3568), 'numpy.mean', 'np.mean', (['loss_values'], {}), '(loss_values)\n', (3555, 3568), True, 'import numpy as np\n'), ((4717, 4747), 'numpy.argmax', 'np.argmax', (['probabilities[:, i]'], {}), '(probabilities[:, i])\n', (4726, 4747), True, 'import numpy as np\n'), ((5421, 5438), 'numpy.mean', 'np.mean', (['acc_test'], {}), '(acc_test)\n', (5428, 5438), True, 'import numpy as np\n'), ((5459, 5473), 'numpy.mean', 'np.mean', (['(999.0)'], {}), '(999.0)\n', (5466, 5473), True, 'import numpy as np\n'), ((3356, 3404), 'dynet.pickneglogsoftmax_batch', 'dy.pickneglogsoftmax_batch', (['probabilities', 'label'], {}), '(probabilities, label)\n', (3382, 3404), True, 'import dynet as dy\n'), ((5166, 5196), 'numpy.argmax', 'np.argmax', (['probabilities[:, i]'], {}), '(probabilities[:, i])\n', (5175, 5196), True, 'import numpy as np\n')] |
import numpy as np
import time
import argparse
import roboverse
import roboverse.bullet as bullet
KEY_TO_ACTION_MAPPING = {
bullet.p.B3G_LEFT_ARROW: np.array([0.1, 0, 0, 0, 0, 0, 0]),
bullet.p.B3G_RIGHT_ARROW: np.array([-0.1, 0, 0, 0, 0, 0, 0]),
bullet.p.B3G_UP_ARROW: np.array([0, -0.1, 0, 0, 0, 0, 0]),
bullet.p.B3G_DOWN_ARROW: np.array([0, 0.1, 0, 0, 0, 0, 0]),
ord('j'): np.array([0, 0, 0.2, 0, 0, 0, 0]),
ord('k'): np.array([0, 0, -0.2, 0, 0, 0, 0]),
ord('h'): np.array([0, 0, 0, 0, 0, 0, -0.7]),
ord('l'): np.array([0, 0, 0, 0, 0, 0, 0.7])
}
ENV_COMMANDS = {
ord('r'): lambda env: env.reset()
}
def keyboard_control(args):
env = roboverse.make(args.env_name, gui=True)
while True:
take_action = False
action = np.array([0, 0, 0, 0, 0, 0, 0], dtype='float32')
keys = bullet.p.getKeyboardEvents()
for qKey in keys:
if qKey in KEY_TO_ACTION_MAPPING.keys():
action += KEY_TO_ACTION_MAPPING[qKey]
take_action = True
elif qKey in ENV_COMMANDS.keys():
ENV_COMMANDS[qKey](env)
take_action = False
if take_action:
obs, rew, done, info = env.step(action)
print(rew)
time.sleep(0.1)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--env-name", type=str,
default='Widow250MultiTaskGrasp-v0')
args = parser.parse_args()
keyboard_control(args)
| [
"roboverse.bullet.p.getKeyboardEvents",
"argparse.ArgumentParser",
"roboverse.make",
"time.sleep",
"numpy.array"
] | [((155, 188), 'numpy.array', 'np.array', (['[0.1, 0, 0, 0, 0, 0, 0]'], {}), '([0.1, 0, 0, 0, 0, 0, 0])\n', (163, 188), True, 'import numpy as np\n'), ((220, 254), 'numpy.array', 'np.array', (['[-0.1, 0, 0, 0, 0, 0, 0]'], {}), '([-0.1, 0, 0, 0, 0, 0, 0])\n', (228, 254), True, 'import numpy as np\n'), ((283, 317), 'numpy.array', 'np.array', (['[0, -0.1, 0, 0, 0, 0, 0]'], {}), '([0, -0.1, 0, 0, 0, 0, 0])\n', (291, 317), True, 'import numpy as np\n'), ((348, 381), 'numpy.array', 'np.array', (['[0, 0.1, 0, 0, 0, 0, 0]'], {}), '([0, 0.1, 0, 0, 0, 0, 0])\n', (356, 381), True, 'import numpy as np\n'), ((397, 430), 'numpy.array', 'np.array', (['[0, 0, 0.2, 0, 0, 0, 0]'], {}), '([0, 0, 0.2, 0, 0, 0, 0])\n', (405, 430), True, 'import numpy as np\n'), ((446, 480), 'numpy.array', 'np.array', (['[0, 0, -0.2, 0, 0, 0, 0]'], {}), '([0, 0, -0.2, 0, 0, 0, 0])\n', (454, 480), True, 'import numpy as np\n'), ((496, 530), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, -0.7]'], {}), '([0, 0, 0, 0, 0, 0, -0.7])\n', (504, 530), True, 'import numpy as np\n'), ((546, 579), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0.7]'], {}), '([0, 0, 0, 0, 0, 0, 0.7])\n', (554, 579), True, 'import numpy as np\n'), ((680, 719), 'roboverse.make', 'roboverse.make', (['args.env_name'], {'gui': '(True)'}), '(args.env_name, gui=True)\n', (694, 719), False, 'import roboverse\n'), ((1331, 1356), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1354, 1356), False, 'import argparse\n'), ((782, 830), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 0]'], {'dtype': '"""float32"""'}), "([0, 0, 0, 0, 0, 0, 0], dtype='float32')\n", (790, 830), True, 'import numpy as np\n'), ((846, 874), 'roboverse.bullet.p.getKeyboardEvents', 'bullet.p.getKeyboardEvents', ([], {}), '()\n', (872, 874), True, 'import roboverse.bullet as bullet\n'), ((1273, 1288), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (1283, 1288), False, 'import time\n')] |
import cv2
import numpy as np
from PIL import Image, ImageDraw, ImageFont
import os
def print_utf8_text(image, xy, text, color): #utf-8 karakterleri
#fontName = "FreeSerif.ttf" #'FreeSansBold.ttf' # 'FreeMono.ttf' 'FreeSerifBold.ttf'
font = ImageFont.truetype("arial.ttf", 24) # font seçimi
img_pil =Image.fromarray(image) #imajı pillow moduna dönüştürür
draw = ImageDraw.Draw(img_pil) #imajı hazırla
draw.text((xy[0],xy[1]), text, font=font, fill=(color[0], color[1], color[2], 0)) #b,g,r,a
image = np.array(img_pil) #imajı cv2 moduna çevir (numpy.array())
return image
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read("egitim/egitim.yml")
faceCascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
font = cv2.FONT_HERSHEY_SIMPLEX
#id sayacını başlat
imagePaths = [os.path.join("veriseti",f) for f in os.listdir("veriseti")]
names = [None]
for imagePath in imagePaths:
name = os.path.split(imagePath)[-1].split(".")[2]
if name not in names:
names.append(name)
id = 0
#names = [None, "<NAME>", "<NAME>", "<NAME>", "<NAME>", "<NAME>", "Berço","<NAME>,"]
#Canlı video yakalamayı başlat
kamera = cv2.VideoCapture(0)
kamera.set(3, 1000) #Video genişliğini belirle
kamera.set(4, 800) #Video yüksekliğini belirle
#minimum pencere boyutunu belirle
minW = 0.1 * kamera.get(3) # genişlik
minH = 0.1 * kamera.get(4) #yükseklik
while True:
ret, img = kamera.read()
gri = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
yuzler = faceCascade.detectMultiScale(
gri,
scaleFactor=1.2,
minNeighbors=5,
minSize=(int(minW), int(minH)),
)
for (x,y,w,h) in yuzler:
cv2.rectangle(img, (x,y), (x + w, y + h), (0, 255, 0), 2)
id, uyum= recognizer.predict(gri[y:y + h, x:x + w])
if (uyum < 100):
id = names[id]
uyum = f"Uyum= {round(uyum,0)}%"
else:
id = "Access Denied"
uyum = f"Uyum= {round(uyum,0)}%"
color = (255,255,255)
img=print_utf8_text(img,(x + 5, y - 25),str(id),color) # Türkçe karakterler
cv2.putText(img, str(uyum), (x + 5, y + h + 25), font, 1, (255, 255, 0), 1)
cv2.imshow("Kamera", img)
k = cv2.waitKey(10) & 0xff #Çıkış için ESC veya q tuşu
if k == 27 or k==ord("q"):
break
# Belleği temizle
print("\n [INFO] Programdan çıkıyor ve ortalığı temizliyorum")
kamera.release()
cv2.destroyAllWindows()
| [
"cv2.rectangle",
"PIL.Image.fromarray",
"os.listdir",
"os.path.join",
"PIL.ImageFont.truetype",
"cv2.face.LBPHFaceRecognizer_create",
"cv2.imshow",
"os.path.split",
"numpy.array",
"PIL.ImageDraw.Draw",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"cv2.cvtColor",
"cv2.CascadeClassifier",
... | [((615, 651), 'cv2.face.LBPHFaceRecognizer_create', 'cv2.face.LBPHFaceRecognizer_create', ([], {}), '()\n', (649, 651), False, 'import cv2\n'), ((703, 791), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (["(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')"], {}), "(cv2.data.haarcascades +\n 'haarcascade_frontalface_default.xml')\n", (724, 791), False, 'import cv2\n'), ((1198, 1217), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (1214, 1217), False, 'import cv2\n'), ((2463, 2486), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2484, 2486), False, 'import cv2\n'), ((250, 285), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['"""arial.ttf"""', '(24)'], {}), "('arial.ttf', 24)\n", (268, 285), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((314, 336), 'PIL.Image.fromarray', 'Image.fromarray', (['image'], {}), '(image)\n', (329, 336), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((380, 403), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['img_pil'], {}), '(img_pil)\n', (394, 403), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((526, 543), 'numpy.array', 'np.array', (['img_pil'], {}), '(img_pil)\n', (534, 543), True, 'import numpy as np\n'), ((854, 881), 'os.path.join', 'os.path.join', (['"""veriseti"""', 'f'], {}), "('veriseti', f)\n", (866, 881), False, 'import os\n'), ((1474, 1511), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (1486, 1511), False, 'import cv2\n'), ((2234, 2259), 'cv2.imshow', 'cv2.imshow', (['"""Kamera"""', 'img'], {}), "('Kamera', img)\n", (2244, 2259), False, 'import cv2\n'), ((890, 912), 'os.listdir', 'os.listdir', (['"""veriseti"""'], {}), "('veriseti')\n", (900, 912), False, 'import os\n'), ((1701, 1759), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x, y)', '(x + w, y + h)', '(0, 255, 0)', '(2)'], {}), '(img, (x, y), (x + w, y + h), (0, 255, 0), 2)\n', (1714, 1759), False, 'import cv2\n'), ((2268, 2283), 'cv2.waitKey', 'cv2.waitKey', (['(10)'], {}), '(10)\n', (2279, 2283), False, 'import cv2\n'), ((969, 993), 'os.path.split', 'os.path.split', (['imagePath'], {}), '(imagePath)\n', (982, 993), False, 'import os\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# %load_ext autoreload
# %autoreload 2
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import collections
import itertools
import time
import pickle
import copy
from itertools import permutations
import datetime
import tensorflow as tf
# from keras.backend.tensorflow_backend import set_session
from tensorflow.compat.v1.keras.backend import set_session
config2 = tf.ConfigProto()
config2.gpu_options.allow_growth = True
set_session(tf.Session(config=config2))
from graph_nets import graphs
from graph_nets import utils_np
from graph_nets import utils_tf
from graph_nets.demos import models
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
from scipy import spatial
import tensorflow as tf
from collections import Counter
SEED = 1
np.random.seed(SEED)
tf.set_random_seed(SEED)
print(tf.__version__)
# model will be save in taskname folder
taskname = 'ArtLabel1'
if not os.path.exists(taskname):
os.mkdir(taskname)
BOITYPENUM = 23
VESTYPENUM = 25
graphcache = {}
graphfoldername = 'graph/graphsim'
randaug = np.random.RandomState(seed=0)
# {0:
# { 1:
# {'dist': 40.223774241240335,
# 'rad': 1.6151277133333335,
# 'vestype': array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0.]),
# # 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
# 'dir': [0.33139994500635567, -0.5581841181772571, 0.7606606120106799]},
# 34: {'dist': 36.04070189744566,
# 'rad': 1.9435775517241378,
# 'vestype': array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1.,0., 0., 0., 0., 0., 0., 0., 0.]),
# 'dir': [-0.007138775367677217, -0.5506059360733849, 0.8347347728751937]},
# 96: {'dist': 42.32152785546142,
# 'rad': 1.883450929411765,
# 'vestype': array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0.,0., 0., 0., 0., 0., 0., 0., 0.]),
# 'dir': [-0.4512000651830446, -0.5269815194298534, 0.7202145370361663]}},
# 1:
# { 0: {'dist': 40.223774241240335,
# 'rad': 1.6151277133333335,
# 'vestype': array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0.,0., 0., 0., 0., 0., 0., 0., 0.]),
# 'dir': [0.33139994500635567, ...
# load datasets
# load datasets for training
from load_graph import prepare_graphs
# dbnames = ['BRAVE','CROPCheck','ArizonaCheck','Parkinson2TPCheck','Anzhen']
dbnames = ['CROPCheck','ArizonaCheck','Parkinson2TPCheck','Anzhen']
all_db = prepare_graphs(dbnames, graphfoldername)
print('='*50)
for key in all_db.keys():
print('All db ',key,'len',len(all_db[key]))
# check all pickle graph exist
assert np.array([os.path.exists(i) for i in np.concatenate([all_db[j] for j in all_db])]).all()
# np.concatenate([all_db[j] for j in all_db]).shape
from load_graph import generate_graph
# print(all_db['train'][0])
# G = generate_graph(graphcache,all_db['train'][0],randaug)
#
# graphcache
# frequency count from node types
boitypes = []
for picklei in all_db['train'][::-1]:
G = generate_graph(graphcache, picklei, randaug)
boitypes.extend([ni['boitype'] for i,ni in G.nodes(data=True)])
node_class_ct = Counter(boitypes)
node_class_weightr = []
for i in range(BOITYPENUM):
if node_class_ct[i]!=0:
node_class_weightr.append(1/node_class_ct[i])
else:
node_class_weightr.append(0)
sumweights = np.sum(node_class_weightr)
node_class_weight = np.array([max(0.01,i/sumweights) for i in node_class_weightr]).astype(np.float64)
plt.bar(np.arange(BOITYPENUM),node_class_weight)
# frequency count from edge types
vestypes = []
for picklei in all_db['train']:
G = generate_graph(graphcache,picklei,randaug)
vestypes.extend([np.argmax(ni['vestype']) for i,j,ni in G.edges(data=True)])
edge_class_ct = Counter(vestypes)
edge_class_weightr = []
for i in range(VESTYPENUM):
if edge_class_ct[i]!=0:
edge_class_weightr.append(1/edge_class_ct[i])
else:
edge_class_weightr.append(0)
sumweights = np.sum(edge_class_weightr)
edge_class_weight = np.array([max(0.005,i/sumweights) for i in edge_class_weightr]).astype(np.float64)
plt.bar(np.arange(VESTYPENUM),edge_class_weight)
## Visualize example graphs
from gnn_utils import *
seed = 1 #@param{type: 'integer'}
rand = np.random.RandomState(seed=seed)
num_examples = 15 #@param{type: 'integer'}
input_graphs, target_graphs, graphs, selids = generate_networkx_graphs(graphcache, all_db, rand, num_examples,'train')
num = min(num_examples, 16)
w = 3
h = int(np.ceil(num / w))
fig = plt.figure(40, figsize=(w * 4, h * 4))
fig.clf()
# for j, graph in enumerate(graphs):
# ax = fig.add_subplot(h, w, j + 1)
# pos = get_node_dict(graph, "pos", ignoreaxis = 2)
# plotter = GraphPlotter(ax, graph, pos)
# plotter.title = os.path.basename(all_db['train'][selids[j]])[:-7]
# plotter.draw_graph_with_solution()
tf.reset_default_graph()
seed = 2
rand = np.random.RandomState(seed=seed)
# Model parameters.
# Number of processing (message-passing) steps.
num_processing_steps_tr = 10
num_processing_steps_ge = 10
# Data / training parameters.
num_training_iterations = 70000
batch_size_tr = min(len(all_db['train']),32)
batch_size_ge = len(all_db['val'])
# Data.
# Input and target placeholders.
input_ph, target_ph = create_placeholders(graphcache, rand, batch_size_tr, all_db)
# Connect the data to the model.
# Instantiate the model.
model = models.EncodeProcessDecode(edge_output_size=VESTYPENUM, node_output_size=BOITYPENUM)
# A list of outputs, one per processing step.
output_ops_tr = model(input_ph, num_processing_steps_tr)
output_ops_ge = model(input_ph, num_processing_steps_ge)
# Training loss.
loss_ops_tr = create_loss_ops(target_ph, output_ops_tr,node_class_weight,edge_class_weight,weighted=True)
# Loss across processing steps.
loss_op_tr = sum(loss_ops_tr) / num_processing_steps_tr
# Test/generalization loss.
loss_ops_ge = create_loss_ops(target_ph, output_ops_ge,node_class_weight,edge_class_weight,weighted=False)
loss_op_ge = loss_ops_ge[-1] # Loss from final processing step.
# Optimizer.
learning_rate = 1e-3
optimizer = tf.train.AdamOptimizer(learning_rate)
step_op = optimizer.minimize(loss_op_tr)
# Lets an iterable of TF graphs be output from a session as NP graphs.
input_ph, target_ph = make_all_runnable_in_session(input_ph, target_ph)
try:
sess.close()
except NameError:
pass
sess = tf.Session()
sess.run(tf.global_variables_initializer())
last_iteration = 0
logged_iterations = []
losses_tr = []
corrects_tr = []
solveds_tr = []
corrects_tr_e = []
solveds_tr_e = []
corrects_tr_n = []
solveds_tr_n = []
losses_ge = []
corrects_ge = []
solveds_ge = []
corrects_ge_e = []
solveds_ge_e = []
corrects_ge_n = []
solveds_ge_n = []
saver = tf.train.Saver()
def pltloss():
#@title Visualize results { form-width: "30%" }
# This cell visualizes the results of training. You can visualize the
# intermediate results by interrupting execution of the cell above, and running
# this cell. You can then resume training by simply executing the above cell
# again.
# Plot results curves.
fig = plt.figure(1, figsize=(18, 10))
fig.clf()
x = np.array(logged_iterations)
# Loss.
y_tr = losses_tr
y_ge = losses_ge
ax = fig.add_subplot(2, 3, 1)
ax.plot(x, y_tr, "k", label="Training")
ax.plot(x, y_ge, "k--", label="Test/generalization")
ax.set_title("Loss across training")
ax.set_xlabel("Training iteration")
ax.set_ylabel("Loss (binary cross-entropy)")
ax.legend()
# Correct.
y_tr = corrects_tr_n
y_ge = corrects_ge_n
ax = fig.add_subplot(2, 3, 2)
ax.plot(x, y_tr, "k", label="Training")
ax.plot(x, y_ge, "k--", label="Test/generalization")
ax.set_title("Fraction correct across training")
ax.set_xlabel("Training iteration")
ax.set_ylabel("Fraction nodes correct")
# Solved.
y_tr = solveds_tr_n
y_ge = solveds_ge_n
ax = fig.add_subplot(2, 3, 3)
ax.plot(x, y_tr, "k", label="Training")
ax.plot(x, y_ge, "k--", label="Test/generalization")
ax.set_title("Fraction solved across training")
ax.set_xlabel("Training iteration")
ax.set_ylabel("Fraction examples solved")
# Correct.
y_tr = corrects_tr_e
y_ge = corrects_ge_e
ax = fig.add_subplot(2, 3, 5)
ax.plot(x, y_tr, "k", label="Training")
ax.plot(x, y_ge, "k--", label="Test/generalization")
ax.set_title("Fraction correct across training")
ax.set_xlabel("Training iteration")
ax.set_ylabel("Fraction edges correct")
# Solved.
y_tr = solveds_tr_e
y_ge = solveds_ge_e
ax = fig.add_subplot(2, 3, 6)
ax.plot(x, y_tr, "k", label="Training")
ax.plot(x, y_ge, "k--", label="Test/generalization")
ax.set_title("Fraction solved across training")
ax.set_xlabel("Training iteration")
ax.set_ylabel("Fraction examples solved")
plt.show()
pltloss()
# @title Run training { form-width: "30%" }
# You can interrupt this cell's training loop at any time, and visualize the
# intermediate results by running the next cell (below). You can then resume
# training by simply executing this cell again.
# How much time between logging and printing the current results.
log_every_seconds = 60
num_training_iterations = 2000
print("# (iteration number), T (elapsed seconds), "
"Ltr (training loss), Lge (test/generalization loss), "
"Ctr (training fraction nodes/edges labeled correctly), "
"Str (training fraction examples solved correctly), "
"Cge (test/generalization fraction nodes/edges labeled correctly), "
"Sge (test/generalization fraction examples solved correctly)")
start_time = time.time()
last_log_time = start_time
for iteration in range(last_iteration, num_training_iterations):
last_iteration = iteration
feed_dict, _, _ = create_feed_dict(all_db, rand, batch_size_tr, input_ph, target_ph, 'train', graphcache)
train_values = sess.run({
"step": step_op,
"target": target_ph,
"loss": loss_op_tr,
"outputs": output_ops_tr
},
feed_dict=feed_dict)
the_time = time.time()
elapsed_since_last_log = the_time - last_log_time
if elapsed_since_last_log > log_every_seconds:
last_log_time = the_time
feed_dict, raw_graphs, _ = create_feed_dict(all_db, rand, batch_size_ge, input_ph, target_ph, 'val', graphcache)
test_values = sess.run({
"target": target_ph,
"loss": loss_op_ge,
"outputs": output_ops_ge
},
feed_dict=feed_dict)
correct_tr, solved_tr = compute_accuracy(
train_values["target"], train_values["outputs"][-1], use_nodes=True, use_edges=True)
correct_ge, solved_ge = compute_accuracy(
test_values["target"], test_values["outputs"][-1], use_nodes=True, use_edges=True)
correct_tr_n, solved_tr_n = compute_accuracy(
train_values["target"], train_values["outputs"][-1], use_nodes=True, use_edges=False)
correct_ge_n, solved_ge_n = compute_accuracy(
test_values["target"], test_values["outputs"][-1], use_nodes=True, use_edges=False)
correct_tr_e, solved_tr_e = compute_accuracy(
train_values["target"], train_values["outputs"][-1], use_nodes=False, use_edges=True)
correct_ge_e, solved_ge_e = compute_accuracy(
test_values["target"], test_values["outputs"][-1], use_nodes=False, use_edges=True)
elapsed = time.time() - start_time
losses_tr.append(train_values["loss"])
corrects_tr.append(correct_tr)
solveds_tr.append(solved_tr)
corrects_tr_n.append(correct_tr_n)
solveds_tr_n.append(solved_tr_n)
corrects_tr_e.append(correct_tr_e)
solveds_tr_e.append(solved_tr_e)
losses_ge.append(test_values["loss"])
corrects_ge.append(correct_ge)
solveds_ge.append(solved_ge)
corrects_ge_n.append(correct_ge_n)
solveds_ge_n.append(solved_ge_n)
corrects_ge_e.append(correct_ge_e)
solveds_ge_e.append(solved_ge_e)
logged_iterations.append(iteration)
print("# {:05d}, T {:.1f}, Ltr {:.4f}, Lge {:.4f},\n Ctr {:.4f}, Str {:.4f}, Cge {:.4f}, Sge {:.4f}\n"
" CtrN {:.4f}, StrN {:.4f}, CgeN {:.4f}, SgeN {:.4f} \n CtrE {:.4f}, StrE {:.4f}, CgeE {:.4f}, SgeE {:.4f}".format(
iteration, elapsed, train_values["loss"], test_values["loss"],
correct_tr, solved_tr, correct_ge, solved_ge,
correct_tr_n, solved_tr_n, correct_ge_n, solved_ge_n,
correct_tr_e, solved_tr_e, correct_ge_e, solved_ge_e))
if iteration > 10000 and \
(test_values["loss"] == np.min(losses_ge) or \
correct_ge_n == np.max(corrects_ge_n) or \
correct_ge_e == np.max(corrects_ge_e)):
save_path = saver.save(sess, taskname + "/model%d-%.4f-%.4f-%.4f.ckpt" % (
iteration, test_values["loss"], correct_ge_n, correct_ge_e))
print("Val loss decrease. Model saved in path: %s" % save_path)
print('Cge {:.4f} {:.4f} {:.4f}, Sge {:.4f} Loss_gen {:.4f}'
.format(correct_ge, correct_ge_n, correct_ge_e, solved_ge, test_values["loss"]))
pltloss()
np.save(taskname + '/losses.npy',
[last_iteration, logged_iterations, losses_tr, corrects_tr, solveds_tr, corrects_tr_e, solveds_tr_e,
corrects_tr_n, solveds_tr_n, losses_ge, corrects_ge, solveds_ge, corrects_ge_e, solveds_ge_e,
corrects_ge_n, solveds_ge_n])
#save model for current iteration
#save_path = saver.save(sess, taskname+"/model%d.ckpt"%iteration)
save_path = saver.save(sess, taskname+"/model%d-%.4f-%.4f-%.4f.ckpt"%(iteration,test_values["loss"],correct_ge_n,correct_ge_e))
np.save(taskname+'/losses.npy',[last_iteration,logged_iterations,losses_tr,corrects_tr,solveds_tr,corrects_tr_e,solveds_tr_e,corrects_tr_n,solveds_tr_n,losses_ge,corrects_ge,solveds_ge,corrects_ge_e,solveds_ge_e,corrects_ge_n,solveds_ge_n])
print("Model saved in path: %s" % save_path)
pltloss()
# Evaluate on test set
import glob
valbest = 'valloss'
#valbest = 'nodeacc'
savedmodels = glob.glob(taskname+'/*ckpt.index')
if valbest == 'valloss':
bestvalid = np.argmin([float(os.path.basename(i)[:-11].split('-')[1]) for i in savedmodels])
elif valbest == 'nodeacc':
bestvalid = np.argmax([float(os.path.basename(i)[:-11].split('-')[2]) for i in savedmodels])
elif valbest == 'edgeacc':
bestvalid = np.argmax([float(os.path.basename(i)[:-11].split('-')[3]) for i in savedmodels])
valmodelfilename = savedmodels[bestvalid][:-6]
# Restore variables from disk.
saver.restore(sess, valmodelfilename)
print("Model restored epoch",valmodelfilename)
#load test set and predict
starttime = datetime.datetime.now()
feed_dict, raw_graphs, selids = create_feed_dict(all_db, rand, len(all_db['test']), input_ph, target_ph, 'test',graphcache)
test_values = sess.run({
"target": target_ph,
"loss": loss_op_ge,
"outputs": output_ops_ge
},
feed_dict=feed_dict)
targets = utils_np.graphs_tuple_to_data_dicts(test_values["target"])
outputs = list(
zip(*(utils_np.graphs_tuple_to_data_dicts(test_values["outputs"][i])
for i in range(len(test_values["outputs"])))))
endtime = datetime.datetime.now()
print((endtime-starttime).total_seconds()/len(all_db['test']))
# direct output from GNN
#modify output from network
rawoutputs = []
for ti,outputr in enumerate(outputs):
output = copy.copy(outputr[-1])
'''for edgei in range(output['edges'].shape[0]):
#add prob m3 to m2
output['edges'][edgei][5] += output['edges'][edgei][12]
output['edges'][edgei][12] = 0
output['edges'][edgei][6] += output['edges'][edgei][13]
output['edges'][edgei][13] = 0'''
rawoutputs.append(copy.deepcopy(output))
# Hierarchical Refinement
## collect ves branch length
from hr_utils import nodeconnection, matchvestype, nodedist, edgemap
branch_dist_mean = {}
branch_dist_std = {}
branchdists = [[] for i in range(VESTYPENUM)]
for gi in all_db['train'][:]:
G = generate_graph(graphcache, gi, randaug)
cnode_type_to_id = {}
for nodei in G.nodes():
cnodetype = G.nodes[nodei]['boitype']
if cnodetype != 0:
cnode_type_to_id[cnodetype] = nodei
# print(cnode_type_to_id)
# edge according to node
for nodetypei in range(len(nodeconnection)):
if len(nodeconnection[nodetypei]) == 0:
continue
if nodetypei not in cnode_type_to_id.keys():
continue
for branchnodetypei in nodeconnection[nodetypei]:
if branchnodetypei not in cnode_type_to_id.keys() or nodetypei not in cnode_type_to_id.keys():
continue
edgetype = matchvestype(nodetypei, branchnodetypei)
try:
sp = nx.shortest_path(G, cnode_type_to_id[nodetypei], cnode_type_to_id[branchnodetypei])
except nx.NetworkXNoPath:
print('no path between', nodetypei, branchnodetypei, gi)
continue
cdist = nodedist(G, cnode_type_to_id[nodetypei], cnode_type_to_id[branchnodetypei])
branchdists[edgetype].append(cdist)
# additional edges based on node types
for edgetype, nodetypes in edgemap.items():
if nodetypes[0] not in cnode_type_to_id.keys() or nodetypes[1] not in cnode_type_to_id.keys():
continue
edgetype = matchvestype(nodetypes[0], nodetypes[1])
try:
sp = nx.shortest_path(G, cnode_type_to_id[nodetypes[0]], cnode_type_to_id[nodetypes[1]])
except nx.NetworkXNoPath:
print('no path between', nodetypes, gi)
continue
cdist = nodedist(G, cnode_type_to_id[nodetypes[0]], cnode_type_to_id[nodetypes[1]])
branchdists[edgetype].append(cdist)
for vestype, dists in enumerate(branchdists):
if len(dists) == 0:
continue
branch_dist_mean[vestype] = np.mean(dists)
branch_dist_std[vestype] = np.std(dists)
print(vestype, branch_dist_mean[vestype], branch_dist_std[vestype])
## ground truth construction from graph
#set the range for test
trange = np.arange(len(raw_graphs))
#trange = np.arange(2)
from hr_utils import findedgeid, edgefromnode
gtsr = []
for ti in trange:
graph = raw_graphs[ti]
target = targets[ti]
graphname = os.path.basename(all_db['test'][selids[ti]])[:-7]
print('=' * 40, '\n', 'ti', ti, graphname)
# prob for all edges and nodes
targetnodes = np.argmax(target['nodes'], axis=1)
ground_truth = [np.argmax(ni) for ni in target["nodes"]]
ground_truth_edge = []
for ni in target["edges"]:
if np.max(ni) != 0:
ground_truth_edge.append(np.argwhere(ni)[0])
else:
ground_truth_edge.append(0)
# merge m23
for i, v in enumerate(ground_truth_edge):
if v == 12:
ground_truth_edge[i] = 5
if v == 13:
ground_truth_edge[i] = 6
# correct wrong pcomm labels, p1/2/pcomm at node 1
if 19 in ground_truth:
pcomp1p2id = ground_truth.index(19)
if graph.nodes[pcomp1p2id]['deg'] == 1:
print('pcomml has deg 1')
ground_truth[pcomp1p2id] = 0
if 21 in ground_truth:
pcomicaid = ground_truth.index(21)
ground_truth[pcomicaid] = 0
# edge
edgeid = findedgeid(graph, pcomp1p2id, pcomicaid)
ground_truth_edge[edgeid] = 0
# pcomm to p2
if 21 in ground_truth and 19 not in ground_truth:
print('pcomml to p2')
# correct wrong pcomm labels
if 20 in ground_truth:
pcomp1p2id = ground_truth.index(20)
if graph.nodes[pcomp1p2id]['deg'] == 1:
print('pcommr has deg 1')
ground_truth[pcomp1p2id] = 0
if 22 in ground_truth:
pcomicaid = ground_truth.index(22)
ground_truth[pcomicaid] = 0
# edge
edgeid = findedgeid(graph, pcomp1p2id, pcomicaid)
ground_truth_edge[edgeid] = 0
# pcomm to p2
if 22 in ground_truth and 20 not in ground_truth:
print('pcomml to p2')
for i, n in enumerate(graph.nodes()):
# ignore abnormal ground truth label, likely to be a manual error in labeling
if ground_truth[i] != 0 and graph.nodes[i]['deg'] != len(edgefromnode[ground_truth[i]]):
print('Gt', ground_truth[i], 'deg', graph.nodes[i]['deg'], '!=', len(edgefromnode[ground_truth[i]]))
# search type A12 P12, set edge gt type from ap1 to ap2 if node is not deg 3
for i, n in enumerate(graph.edges()):
ap12edgetype = [7, 8, 17, 18]
if ground_truth_edge[i] in ap12edgetype:
# if deg 3 p1/2 a1/2 exist, edge correct label
# ap12edgetype-2 node type a1/2/acomm
if ground_truth_edge[i] in [7, 8] and ground_truth_edge[i] - 2 in ground_truth and \
graph.nodes[ground_truth.index(ground_truth_edge[i] - 2)]['deg'] == 3:
continue
if ground_truth_edge[i] in [17, 18] and ground_truth_edge[i] + 2 in ground_truth and \
graph.nodes[ground_truth.index(ground_truth_edge[i] + 2)]['deg'] == 3:
continue
ground_truth_edge[i] = ground_truth_edge[i] + 2
print('ap12 set to ap2', ground_truth_edge[i])
gt = {}
gt['nodes'] = ground_truth
gt['edges'] = ground_truth_edge
gt['name'] = graphname
gtsr.append(gt)
## refinement using HR
from hr_utils import softmax_probs, find_node_connection_ids, findmaxprob, findallnei, find_nei_nodes
# test confident node acc
compset = set([1, 2, 3, 4, 5, 6, 7, 8, 17, 18, 19, 20])
# edgetype:[nodetypes]
edgemap = {11: [5, 6], 21: [19, 21], 22: [20, 22], 23: [9, 11], 24: [10, 12], 14: [15, 17], 15: [16, 17]}
# for M2 A2 P2, from proximal to distal, third number is edge type
fillmap = [[3, 7, 5], [4, 8, 6], [3, 5, 9], [4, 6, 10], [18, 19, 19], [18, 20, 20]]
center_node_prob_thres = 1e-10
fitacc = 0
errfitacc = 0
errlist = []
# refined output
refoutputs = []
starttime = datetime.datetime.now()
for ti in trange:
graph = raw_graphs[ti]
target = targets[ti]
output = outputs[ti]
# find vestype in degree 2
deg2edge = []
deg2node = []
graphname = os.path.basename(all_db['test'][selids[ti]])[:-7]
print('=' * 40, '\n', 'ti', ti, graphname)
# prob for all edges and nodes
probedges = softmax_probs(output[-1]["edges"])
probnodes = softmax_probs(output[-1]["nodes"])
prededges = np.argmax(output[-1]["edges"], axis=1)
prednodes = np.argmax(output[-1]["nodes"], axis=1)
targetnodes = np.argmax(target['nodes'], axis=1)
edges = list(graph.edges())
# key:nodeid, value:landmarkid
node_type_to_id = {}
prob_conf_type = {}
coredge = [[] for i in range(output[-1]["edges"].shape[0])]
for nodei in graph.nodes():
# probability of each edge
probedge = {}
# predicted as edge type
prededge = []
# neighboring edge ids
nei_edge_ids = find_node_connection_ids(graph, nodei)
for edgeid in nei_edge_ids:
probedge[edgeid] = np.max(probedges[edgeid])
prededge.append(prededges[edgeid])
# node prob
probnode = np.max(probnodes[nodei])
prednode = prednodes[nodei]
# print('node predict',np.argmax(probnodes[nodei]))
# conf nodes predicted in [0,9,10,11,12,21,22] are not useful in hierachical framework
if prednode not in [0] and sorted(prededge) == edgefromnode[prednode]:
if prednode not in [0, 9, 10, 11, 12, 15, 16, 21, 22]:
if prednode in node_type_to_id.keys():
print('pred node type exist', probnode, 'exist prob', prob_conf_type[prednode])
if probnode < prob_conf_type[prednode]:
print('skip')
continue
else:
print('override')
node_type_to_id[prednode] = nodei
prob_conf_type[prednode] = probnode
print('Fit edge set', prededge, 'node', prednode, 'gt', targetnodes[nodei])
if targetnodes[nodei] != 0:
fitacc += 1
if prednode != targetnodes[nodei]:
if targetnodes[nodei] != 0:
errfitacc += 1
errlist.append([prededge, prednode, targetnodes[nodei], all_db['test'][ti]])
# from nodetype to nodeid
node_id_to_type = {node_type_to_id[i]: i for i in node_type_to_id}
print('key node unconfident', compset - set(node_id_to_type.values()))
##########################################
# start with key nodes
# anterior L(3) R(4)/posterior(18) circulation
treenodes = {3: [1, 5, 7], 4: [2, 6, 8], 18: [17, 19, 20]}
for center_node_type in [3, 4, 18]:
failcenter = 0
print('@@@Predicting centernode', center_node_type)
# if center node not exist, find from neighbor first
if center_node_type not in node_id_to_type.values():
# from confident branch nodes find center nodes
cofnodes = [i for i in node_id_to_type.values() if i in nodeconnection[center_node_type]]
print('center node not confident, branch confident nodes', cofnodes)
if len(cofnodes) == 0:
print('whole circulation cannot be confidently predicted, use max prob')
failcenter = 1
elif len(cofnodes) == 1:
print('predict using single node')
cedgetype = matchvestype(cofnodes[0], center_node_type)
nei_edge_ids = list(graph.edges(node_type_to_id[cofnodes[0]]))
nodestart = [edgei[1] for edgei in nei_edge_ids if
prededges[findedgeid(graph, edgei[0], edgei[1])] == cedgetype]
assert len(nodestart) == 1
otherbranchnodes = set(np.concatenate(list(treenodes.values()) + [list(treenodes.keys())])) - set(
treenodes[center_node_type])
visited = [node_type_to_id[cofnodes[0]], nodestart[0]] + [node_type_to_id[i] for i in otherbranchnodes
if i in node_type_to_id.keys()]
print('exclude other branch nodeid',
[[i, node_type_to_id[i]] for i in otherbranchnodes if i in node_type_to_id.keys()])
exp_edge_type = matchvestype(cofnodes[0], center_node_type)
# neighbors of major nodes have degree of 1(branch length > 10),3,3 or 3,3,3
center_node_pred = findmaxprob(graph, nodestart[0], visited, center_node_type,
len(edgefromnode[center_node_type]), exp_edge_type,
probnodes, branch_dist_mean, branch_dist_std, majornode=True)
elif len(cofnodes) == 2:
print('predict using two nodes')
try:
# 1:-1 remove start and end node
pathnodeid = nx.shortest_path(graph, node_type_to_id[cofnodes[0]], node_type_to_id[cofnodes[1]])[
1:-1]
except nx.NetworkXNoPath:
print('no shortest path between confident nodes, skip')
failcenter = 1
center_node_pred = pathnodeid[np.argmax([probnodes[ni][center_node_type] for ni in pathnodeid])]
elif len(cofnodes) == 3:
print('predict using three nodes')
center_node_pred_set = list(
set(nx.shortest_path(graph, node_type_to_id[cofnodes[0]], node_type_to_id[cofnodes[1]])) & \
set(nx.shortest_path(graph, node_type_to_id[cofnodes[0]], node_type_to_id[cofnodes[2]])) & \
set(nx.shortest_path(graph, node_type_to_id[cofnodes[1]], node_type_to_id[cofnodes[2]])))
if len(center_node_pred_set) == 1:
center_node_pred = center_node_pred_set[0]
elif len(center_node_pred_set) == 0:
print('no common node for three path')
failcenter = 1
else:
print('center_node_pred_set has more than 1 node', center_node_pred_set)
else:
print('more than three nodes.possible?')
low_prob_center = 0
if failcenter:
# center_node_pred = np.argmax([probnodes[i][center_node_type] for i in range(probnodes.shape[0])])
center_probs = {i: probnodes[i][center_node_type] for i in range(probnodes.shape[0]) if
i not in list(node_id_to_type.keys())}
center_node_pred = sorted(center_probs.items(), key=lambda item: -item[1])[0][0]
print('center predict by most prob')
if probnodes[center_node_pred][center_node_type] < center_node_prob_thres or low_prob_center:
print('center node prob too low', probnodes[center_node_pred][center_node_type],
', this may because of missing such center node type. use default')
# need to ensure branch nodes on ICA-M1, BA-P1 are oredicted
# start from root nodes
branch_node_type = treenodes[center_node_type][0]
if branch_node_type not in node_type_to_id.keys():
print('Use max prob to predict root node type', branch_node_type)
if center_node_type in [3, 4]:
branch_probs = {i: probnodes[i][branch_node_type] for i in range(probnodes.shape[0]) if
graph.nodes[i]['deg'] == 1 and i not in list(node_id_to_type.keys())}
else:
branch_probs = {i: probnodes[i][branch_node_type] for i in range(probnodes.shape[0]) if
i not in list(node_id_to_type.keys())}
branch_node_pred = sorted(branch_probs.items(), key=lambda item: -item[1])[0][0]
if probnodes[branch_node_pred][branch_node_type] < center_node_prob_thres:
print('no prob over thres for branch node type', branch_node_type)
else:
print('Branch node type', branch_node_type, 'nodeid', branch_node_pred, 'gt',
targetnodes[branch_node_pred])
node_type_to_id[branch_node_type] = branch_node_pred
node_id_to_type[branch_node_pred] = branch_node_type
# A1 can be missing, M1 cannot
if center_node_type in [3, 4]:
branch_node_type2 = treenodes[center_node_type][1]
if branch_node_type2 not in node_type_to_id.keys():
print('Use max prob to predict root node type', branch_node_type2)
if branch_node_type in node_type_to_id.keys():
visited = [node_type_to_id[branch_node_type]] + list(node_type_to_id.keys())
print('search neighbors of branch nodeid', node_type_to_id[branch_node_type])
nodestart = [i[1] for i in list(graph.edges(node_type_to_id[branch_node_type]))]
assert len(nodestart) == 1
branch_node_pred2 = findmaxprob(graph, nodestart[0], visited, branch_node_type2, \
len(edgefromnode[branch_node_type2]), probnodes,
branch_dist_mean, branch_dist_std)
else:
print('Root Branch not confident, use max prob for node type', branch_node_type2)
branch_probs2 = {i: probnodes[i][branch_node_type2] for i in range(probnodes.shape[0]) if
graph.nodes[i]['deg'] == 3 and i not in list(node_id_to_type.keys())}
branch_node_pred2 = sorted(branch_probs2.items(), key=lambda item: -item[1])[0][0]
print('Minor Branch node type', branch_node_type2, 'nodeid', branch_node_pred2, 'gt',
targetnodes[branch_node_pred2])
node_type_to_id[branch_node_type2] = branch_node_pred2
node_id_to_type[branch_node_pred2] = branch_node_type2
# tentative A1
branch_node_type3 = treenodes[center_node_type][2]
if branch_node_type3 not in node_type_to_id.keys():
print('Pred A1')
visited = list(node_type_to_id.keys())
print('search neighbors of branch nodeid', node_type_to_id[branch_node_type2])
branch_node_pred2 = findmaxprob(graph, node_type_to_id[branch_node_type2], visited, \
branch_node_type3, \
len(edgefromnode[branch_node_type3]), probnodes,
branch_dist_mean, branch_dist_std)
print('prob a1/2', probnodes[branch_node_pred2][branch_node_type2])
if probnodes[branch_node_pred2][branch_node_type2] < center_node_prob_thres:
print('no prob over thres for branch node type', branch_node_type2)
else:
print('Branch node type', branch_node_type2, 'nodeid', branch_node_pred2, 'gt',
targetnodes[branch_node_pred2])
node_type_to_id[branch_node_type2] = branch_node_pred2
node_id_to_type[branch_node_pred2] = branch_node_type2
# add center node back
try:
spam = nx.shortest_path(graph, node_type_to_id[branch_node_type2],
node_type_to_id[branch_node_type3])[1:-1]
if center_node_pred in spam:
print('center node in sp between A1 M1, confirm type', center_node_type,
'nodeid', center_node_pred, 'gt', targetnodes[center_node_pred])
node_type_to_id[center_node_type] = center_node_pred
node_id_to_type[center_node_pred] = center_node_type
except nx.NetworkXNoPath:
print('no shortest path between A1 M1 nodes, skip')
if center_node_type in [3, 4]:
# if ICA root exist, ICA search deg2
if center_node_type + 4 in node_type_to_id.keys() and center_node_type - 2 not in node_type_to_id.keys():
# M1 edge is center_node_type
deg2edge.append(center_node_type)
else:
# ICA edge is center_node_type-2
deg2edge.append(center_node_type - 2)
deg2node.append(center_node_type)
elif center_node_type == 18:
# BA search deg2
deg2edge.append(16)
deg2node.append(center_node_type)
continue
else:
print('pred centernode as nodeid', center_node_pred, 'prob',
probnodes[center_node_pred][center_node_type], 'gt', targetnodes[center_node_pred])
node_type_to_id[center_node_type] = center_node_pred
node_id_to_type[center_node_pred] = center_node_type
else:
print('Centernode confident')
nei_edges = find_node_connection_ids(graph, node_type_to_id[center_node_type])
branch_node_types = nodeconnection[center_node_type]
print('branch_node_types', branch_node_types)
nei_node_ids = [i[1] for i in graph.edges(node_type_to_id[center_node_type])]
print('nei_node_ids', nei_node_ids)
branch_match_type = list(set(branch_node_types) & set(node_id_to_type.values()))
branch_miss_type = list(set(branch_node_types) - set(node_id_to_type.values()))
print('confident branch node type', branch_match_type, 'miss', branch_miss_type)
# remove match branch nodeid
for bi in branch_match_type:
sp = nx.shortest_path(graph, node_type_to_id[center_node_type], node_type_to_id[bi])
print('path for type', bi, sp)
for neini in nei_node_ids:
if neini in sp:
del nei_node_ids[nei_node_ids.index(neini)]
# print('remove node id',neini)
print('nei node id remaining', nei_node_ids)
if len(nei_node_ids) != len(branch_miss_type):
print('nei_node_ids', nei_node_ids, 'not missing mactch branch type', branch_miss_type)
if len(nei_node_ids) < len(branch_miss_type):
print('ERR, len(nei_node_ids)<len(branch_miss_type), abort')
continue
if len(nei_node_ids):
# finding node from remaining branch
# matchnodeids = matchbranchtype(nei_node_ids,branch_miss_type,node_type_to_id[center_node_type])
matchnodeidsperms = list(permutations(nei_node_ids, len(branch_miss_type)))
maxbranchmatch = 0
maxbranchnodeid = None
for matchnodeids in matchnodeidsperms:
print('--Test branch case', matchnodeids)
cbranchmatch = 0
cbranchnodeid = []
for bi, branchtypei in enumerate(branch_miss_type):
# find best matching nei node id
nodestart = matchnodeids[bi]
print('branchtypei', branchtypei, 'max match node id ', nodestart)
visited = [node_type_to_id[center_node_type], nodestart] + list(node_type_to_id.values())
exp_edge_type = matchvestype(branchtypei, center_node_type)
# print('visited nodes id',visited,'exp ves type',exp_edge_type,'exp deg',len(edgefromnode[branchtypei]))
# ba/va can be either 1 or 3 in degree
if branchtypei == 17:
tnodeid = findmaxprob(graph, nodestart, visited, branchtypei, None, probnodes, branch_dist_mean,
branch_dist_std, exp_edge_type)
else:
tnodeid = findmaxprob(graph, nodestart, visited, branchtypei, len(edgefromnode[branchtypei]),
probnodes, branch_dist_mean, branch_dist_std, exp_edge_type)
cbranchmatch += probnodes[tnodeid][branchtypei]
cbranchnodeid.append(tnodeid)
print('pred branch', branchtypei, 'nodeid', tnodeid, 'prob', cbranchmatch)
# restrict certain type matches
if cbranchmatch > maxbranchmatch:
maxbranchmatch = cbranchmatch
maxbranchnodeid = cbranchnodeid
for bi in range(len(maxbranchnodeid)):
tnodeid = maxbranchnodeid[bi]
branchtypei = branch_miss_type[bi]
# check A1/2 P1/2 deg2
if branchtypei in [5, 6, 19, 20]:
cdist = 0
sp = nx.shortest_path(graph, node_type_to_id[center_node_type], tnodeid)
for spi in range(1, len(sp)):
cdist += graph.edges()[sp[spi - 1], sp[spi]]['dist']
edgetype = matchvestype(branchtypei, center_node_type)
if cdist > branch_dist_mean[edgetype] + 1.5 * branch_dist_std[edgetype] or \
cdist < branch_dist_mean[edgetype] - 1.5 * branch_dist_std[edgetype]:
print(tnodeid, 'branch dist', cdist, 'over thres', branch_dist_mean[edgetype],
branch_dist_std[edgetype])
print('add deg2 edge', edgetype, 'node', branchtypei)
deg2edge.append(edgetype)
deg2node.append(branchtypei)
tnodeid = sp[1]
print('tnodeid change to nearest', tnodeid)
node_id_to_type[tnodeid] = branchtypei
node_type_to_id[branchtypei] = tnodeid
print('###Best pred branch', branchtypei, 'nodeid', tnodeid, 'gt', targetnodes[tnodeid])
# check p1/2 left right
if 19 in node_type_to_id.keys() and 20 in node_type_to_id:
op12lnodeid = node_type_to_id[19]
op12rnodeid = node_type_to_id[20]
p12lnodex = graph.nodes[op12lnodeid]['pos'][0]
p12rnodex = graph.nodes[op12rnodeid]['pos'][0]
# left is larger in icafe axis, if p12L x cordinate is smaller, means error
if p12lnodex < p12rnodex:
print('P12LR swap')
node_type_to_id[19] = op12rnodeid
node_type_to_id[20] = op12lnodeid
node_id_to_type[op12lnodeid] = 20
node_id_to_type[op12rnodeid] = 19
# check major branch between M1 ICA/M1/A1, and A1 ICA/M1/A1
for mtype in [5, 6, 7, 8]:
if mtype in [5, 6]:
# ICA/M1/A1 -2
icamatype = mtype - 2
if mtype in [7, 8]:
# ICA/M1/A1 -4
icamatype = mtype - 4
if icamatype not in node_type_to_id.keys() or mtype not in node_type_to_id.keys():
continue
spm1 = nx.shortest_path(graph, node_type_to_id[icamatype], node_type_to_id[mtype])
if len(spm1) > 2:
print('AM12', mtype, 'nodes between AM1 ICA/M1/A1', len(spm1))
# current m2 nodes, spm1[-2] is closest node to m1/2
keyids = [nodeids for nodeids in node_type_to_id.values()]
fillnodes, filledges = findallnei(graph, node_type_to_id[mtype], [spm1[-2]] + keyids)
print('current M2 # nodes', len(fillnodes))
# search nodes between M1/2 and ICA/M1/A1 with most neighbor nodes
bestm1 = node_type_to_id[mtype]
nodesm2 = 0
for pm1 in spm1[1:-1]:
fillnodes, filledges = findallnei(graph, pm1, spm1 + keyids)
print(pm1, 'Potential AM1 in between has ', len(fillnodes), 'nodes')
if len(fillnodes) > 4:
bestm1 = pm1
nodesm2 = len(fillnodes)
break
if nodesm2 > 4:
print('AM2L node', bestm1, 'is a major branch, node', bestm1, 'set to', mtype)
# replace current m1/2
del node_id_to_type[node_type_to_id[mtype]]
node_type_to_id[mtype] = bestm1
node_id_to_type[bestm1] = mtype
remain_keynodes = list(compset - set(node_type_to_id.keys()))
print('key set remaining', remain_keynodes)
##########################################
# additional node connection based on main tree
# acomm check
if 5 in node_type_to_id.keys() and 6 in node_type_to_id.keys():
try:
sp = nx.shortest_path(graph, node_type_to_id[5], node_type_to_id[6])
if len(sp) >= 2:
for spi in sp[1:-1]:
if spi in node_type_to_id.values():
print('shortest acomm path come through confident nodes, abort')
deg2edge.append(11)
break
else:
deg2edge.append(11)
print('len between a12lr is', len(sp))
if 11 not in deg2edge:
acommdist = nodedist(graph, node_type_to_id[5], node_type_to_id[6])
print('acomm dist', acommdist)
if acommdist > branch_dist_mean[11] + 3 * branch_dist_std[11]:
print('likely to have overlap in A2LR')
deg2edge.append(11)
except nx.NetworkXNoPath:
print('5,6 not connected, deg2 search needed fo edge 11 and node 5, 6')
deg2edge.append(11)
if 11 not in deg2edge:
if 5 in deg2node:
del deg2node[deg2node.index(5)]
del deg2edge[deg2edge.index(7)]
print('acomm has path, remove deg 2 search for node 5')
if 6 in deg2node:
del deg2node[deg2node.index(6)]
del deg2edge[deg2edge.index(8)]
print('acomm has path, remove deg 2 search for node 6')
spa1l = nx.shortest_path(graph, node_type_to_id[5], node_type_to_id[3])
spa1r = nx.shortest_path(graph, node_type_to_id[6], node_type_to_id[4])
# check common path between acomm and a1
if len(sp) > 2:
for spi in sp[1:-1]:
# in case a1/2 is more distal than expected
if spi in spa1l:
print('set a1L/acomm to last common path', spi)
node_type_to_id[5] = spi
node_id_to_type[spi] = 5
if spi in spa1r:
print('set a1R/acomm to last common path', spi)
node_type_to_id[6] = spi
node_id_to_type[spi] = 6
# Check OA
for oanodetype in [9, 10]:
if oanodetype == 9:
if 1 not in node_type_to_id.keys() or 3 not in node_type_to_id.keys():
continue
sp = nx.shortest_path(graph, node_type_to_id[1], node_type_to_id[3])
elif oanodetype == 10:
if 2 not in node_type_to_id.keys() or 4 not in node_type_to_id.keys():
continue
sp = nx.shortest_path(graph, node_type_to_id[2], node_type_to_id[4])
else:
print('no such oanodetype')
sp = []
maxprob = 0
oaid = -1
if len(sp) > 2:
for spid in range(1, len(sp) - 1):
spi = sp[spid]
# find oa from nei of oa/ica
neinodes = list(set(find_nei_nodes(graph, spi)) - set([sp[spid - 1], sp[spid + 1]]))
if len(neinodes) == 0:
print('Self loop', spi, 'ERR NEED CORR')
continue
if len(neinodes) > 1:
print('node 3+ at oa/ica', neinodes)
# oa to the front
print(spi, 'oa/ica search deg', graph.nodes[neinodes[0]]['deg'], graph.nodes[neinodes[0]]['pos'],
graph.nodes[spi]['pos'])
if graph.nodes[neinodes[0]]['deg'] == 1 and graph.nodes[neinodes[0]]['pos'][1] < \
graph.nodes[spi]['pos'][1]:
# if prednodes[spi]==oanodetype:
# print('oa/ica prob',probnodes[spi][oanodetype])
if probnodes[spi][oanodetype] > maxprob:
# print('max',spi)
maxprob = probnodes[spi][oanodetype]
oaid = spi
if oaid != -1:
print(oanodetype, 'oai/ica node', oaid, 'gt', targetnodes[oaid])
node_type_to_id[oanodetype] = oaid
node_id_to_type[oaid] = oanodetype
# find ending node of oa
neinodeids = [i[1] for i in graph.edges(oaid) if i[1] not in sp]
assert len(neinodeids) == 1
visited = [oaid, neinodeids[0]]
exp_edge_type = matchvestype(oanodetype, oanodetype + 2)
# oa end id +2
oaendid = findmaxprob(graph, neinodeids[0], visited, oanodetype + 2, 1, probnodes, branch_dist_mean,
branch_dist_std, exp_edge_type)
print(oanodetype + 2, 'oai end node', oaendid)
node_type_to_id[oanodetype + 2] = oaendid
node_id_to_type[oaendid] = oanodetype + 2
# Check Pcomm
for pcommnodetype in [21, 22]:
# -2 is pcomm/p1/p2 node type
if pcommnodetype - 2 not in node_type_to_id.keys():
continue
# -18 is ICA/M1/A1 node type
antnodeid = pcommnodetype - 18
if antnodeid not in node_type_to_id.keys():
print('no ica/mca/aca')
antnodeid = pcommnodetype - 14
if antnodeid not in node_type_to_id.keys():
antnodeid = pcommnodetype - 16
if antnodeid not in node_type_to_id.keys():
print('no a1/2, m1/2 and ica/mca/aca, skip')
continue
if pcommnodetype - 20 not in node_type_to_id.keys():
print('no ica root, skip')
continue
try:
sp = nx.shortest_path(graph, node_type_to_id[pcommnodetype - 2], node_type_to_id[antnodeid])
# shortest path should not include PCA/BA
if 18 in node_type_to_id.keys() and node_type_to_id[18] in sp:
print('has path from p1/2 to anterior, but through pca/ba, skip')
continue
# if p1/2 exist in deg2, remove
if pcommnodetype - 2 in deg2node:
del deg2node[deg2node.index(pcommnodetype - 2)]
del deg2edge[deg2edge.index(pcommnodetype - 4)]
print('pcomm has path, remove deg 2 search for node p1/2')
except nx.NetworkXNoPath:
print(pcommnodetype, 'pcomm missing, and p1/2 deg2 search needed')
# no need to add deg2 for P1, as some p1/2 exist but not connect to pcomm
continue
spica = nx.shortest_path(graph, node_type_to_id[antnodeid], node_type_to_id[pcommnodetype - 20])
print('spica', spica, 'sp pos to ant', sp)
assert len(set(spica) & set(sp)) > 0
for pcommnodeid in sp:
if pcommnodeid in spica:
break
print(pcommnodetype, 'pcomm/ica node id', pcommnodeid)
node_type_to_id[pcommnodetype] = pcommnodeid
node_id_to_type[pcommnodeid] = pcommnodetype
# Check VA
if 17 in node_type_to_id.keys() and graph.nodes[node_type_to_id[17]]['deg'] == 3:
# check exisiting conf node type 15 16 compatibility
for va_cf_type in [15, 16]:
if va_cf_type not in node_type_to_id.keys():
continue
try:
sp = nx.shortest_path(graph, node_type_to_id[17], node_type_to_id[va_cf_type])
except nx.NetworkXNoPath:
print('va through conf nodes are not connected, remove conf node va root')
del node_id_to_type[node_type_to_id[va_cf_type]]
del node_type_to_id[va_cf_type]
if 15 not in node_type_to_id.keys():
# BA/VA and PCA/BA
visited = list(node_type_to_id.values())
vaendid = findmaxprob(graph, node_type_to_id[17], visited, 15, 1, probnodes, branch_dist_mean,
branch_dist_std)
print(15, 'VAL end node', vaendid, 'gt', targetnodes[vaendid])
node_type_to_id[15] = vaendid
node_id_to_type[vaendid] = 15
if 16 not in node_type_to_id.keys():
visited = list(node_type_to_id.values())
vaendid = findmaxprob(graph, node_type_to_id[17], visited, 16, 1, probnodes, branch_dist_mean,
branch_dist_std)
print(16, 'VAR end node', vaendid, 'gt', targetnodes[vaendid])
node_type_to_id[16] = vaendid
node_id_to_type[vaendid] = 16
# check LR
if 15 in node_type_to_id.keys() and 16 in node_type_to_id.keys():
valnodeid = node_type_to_id[15]
varnodeid = node_type_to_id[16]
if graph.nodes[node_type_to_id[15]]['pos'][0] < graph.nodes[node_type_to_id[16]]['pos'][0]:
print('VALR swap')
node_type_to_id[15] = varnodeid
node_type_to_id[16] = valnodeid
node_id_to_type[valnodeid] = 16
node_id_to_type[varnodeid] = 15
# in full graph
# TODO
if len(deg2edge):
print('#Deg 2 search edge', deg2edge, 'node', deg2node)
if len(node_id_to_type) != len(node_type_to_id):
print('len(node_id_to_type)!=len(node_type_to_id), conflict of nodes')
node_id_to_type = {node_type_to_id[i]: i for i in node_type_to_id}
##########################################
# apply confident predictions to confoutput
confoutput = {}
confoutput['nodes'] = np.zeros(probnodes.shape)
confoutput['edges'] = np.zeros(probedges.shape)
for nodei in range(confoutput['nodes'].shape[0]):
if nodei in node_id_to_type.keys() and node_id_to_type[nodei] not in deg2node:
confoutput['nodes'][nodei][node_id_to_type[nodei]] = 1
else:
if nodei in node_id_to_type.keys() and node_id_to_type[nodei] in deg2node:
print('nodei in deg2node', node_id_to_type[nodei], 'skip setting node id')
if prednodes[nodei] == 0:
confoutput['nodes'][nodei] = probnodes[nodei]
else:
# set as non type if original max prob not nontype
confoutput['nodes'][nodei][0] = 1
# fill edge according to node
for nodetypei in range(len(nodeconnection)):
if len(nodeconnection[nodetypei]) == 0:
continue
if nodetypei not in node_id_to_type.values():
continue
for branchnodetypei in nodeconnection[nodetypei]:
if branchnodetypei not in node_id_to_type.values():
continue
edgetype = matchvestype(nodetypei, branchnodetypei)
if edgetype in [7, 8] and 11 in deg2edge and branchnodetypei in deg2node:
print('edge', edgetype, 'needs deg 2 prediction, set edge to distal type')
edgetype += 2
if edgetype in [17, 18] and edgetype in deg2edge and branchnodetypei in deg2node:
print('edge', edgetype, 'needs deg 2 prediction, set edge to distal type')
edgetype += 2
try:
sp = nx.shortest_path(graph, node_type_to_id[nodetypei], node_type_to_id[branchnodetypei])
except nx.NetworkXNoPath:
print('no shortest path between connection nodes, skip', nodetypei, branchnodetypei)
# print('sp',sp)
for spi in range(1, len(sp)):
edgei = findedgeid(graph, sp[spi - 1], sp[spi])
if edgei != -1:
confoutput['edges'][edgei][edgetype] = 1
else:
print('cannot find id for edge', sp[spi - 1], sp[spi])
# fill additional edges based on node types
for edgetype, nodetypes in edgemap.items():
if nodetypes[0] in node_type_to_id.keys() and nodetypes[1] in node_type_to_id.keys():
try:
sp = nx.shortest_path(graph, node_type_to_id[nodetypes[0]], node_type_to_id[nodetypes[1]])
for spid in range(1, len(sp)):
spi = sp[spid - 1]
if spi in list(set(node_type_to_id.keys()) - set(
[node_type_to_id[nodetypes[0]], node_type_to_id[nodetypes[1]]])):
# print(edgetype,nodetypes,'path through other confident nodes')
break
spj = sp[spid]
edgei = findedgeid(graph, spi, spj)
if edgei != -1:
print(edgetype, nodetypes, 'label edgei', edgei, 'edgetype', edgetype)
confoutput['edges'][edgei][edgetype] = 1
else:
print('cannot find edge id, possible?', spi, spj)
except nx.NetworkXNoPath:
print('no path between edgetype,nodetypes', edgetype, nodetypes)
continue
# fill M2 A2 P2
# add keyids to visted list to avoid propogate through acomm pcomm
keyids = []
for nodeids in node_type_to_id.values():
keyids.append(nodeids)
print('keyids', node_id_to_type, node_type_to_id)
print('keyids', keyids)
for fi in fillmap:
if fi[1] not in node_type_to_id.keys() or fi[0] not in node_type_to_id.keys():
continue
sp = nx.shortest_path(graph, node_type_to_id[fi[1]], node_type_to_id[fi[0]])
assert len(sp) >= 2
fillnodes, filledges = findallnei(graph, node_type_to_id[fi[1]], sp[:2] + keyids)
# print('fill node/edge',fi,fillnodes,filledges)
# node set 0
for nodeid in fillnodes:
if np.argmax(confoutput['nodes'][nodeid]) != 0:
print('node already assigned', nodeid, 'no fill needed')
continue
nodez = np.zeros((BOITYPENUM))
nodez[0] = 1
confoutput['nodes'][nodeid] = nodez
# edge set to edgetype
edgetype = fi[2]
for edgeid in filledges:
edgei = findedgeid(graph, edgeid[0], edgeid[1])
if np.argmax(confoutput['edges'][edgei]) != 0:
print(edgei, 'assign', edgetype, 'edge already assigned to ', np.argmax(confoutput['edges'][edgei]))
if np.argmax(confoutput['edges'][edgei]) in [9, 10] and edgetype in [9, 10]:
print('ERR NEED CORR, A2 touch')
# a2LR touches, compare dist to A1/2 L and R
edgenodei = list(graph.edges())[edgei][0]
cdistL = nodedist(graph, edgenodei, node_type_to_id[5])
cdistR = nodedist(graph, edgenodei, node_type_to_id[6])
edgez = np.zeros((VESTYPENUM))
if cdistL < cdistR:
print('A2LR touch, set to A2L')
edgez[9] = 1
else:
print('A2LR touch, set to A2R')
edgez[10] = 1
confoutput['edges'][edgei] = edgez
else:
edgez = np.zeros((VESTYPENUM))
edgez[edgetype] = 1
confoutput['edges'][edgei] = edgez
# fill remaining with prob if not exist, if pred non-type, force to set zero type
for edgei in range(confoutput['edges'].shape[0]):
# if unset, check connection to nodetype M12 A12 P12, set to closest
if np.max(confoutput['edges'][edgei]) == 0:
# if prednodes[nodei]==0:
cprobedge = probedges[edgei]
if np.argmax(cprobedge) == 12:
cprobedge[5] += cprobedge[12]
cprobedge[12] = 0
if np.argmax(cprobedge) == 13:
cprobedge[6] += cprobedge[13]
cprobedge[13] = 0
enodei = list(graph.edges())[edgei][0]
try:
sp = nx.shortest_path(graph, enodei, node_type_to_id[7])
if node_type_to_id[3] not in sp:
print(edgei, 'has loop, remaining edge set to m2l')
zprobedge = np.zeros((VESTYPENUM))
zprobedge[5] = 1
confoutput['edges'][edgei] = zprobedge
continue
except:
pass
try:
sp = nx.shortest_path(graph, enodei, node_type_to_id[8])
if node_type_to_id[4] not in sp:
print(edgei, 'has loop, remaining edge set to m2r')
zprobedge = np.zeros((VESTYPENUM))
zprobedge[6] = 1
confoutput['edges'][edgei] = zprobedge
continue
except:
pass
try:
sp = nx.shortest_path(graph, enodei, node_type_to_id[5])
if node_type_to_id[3] not in sp and node_type_to_id[4] not in sp:
print(edgei, 'has loop, remaining edge set to a2l')
zprobedge = np.zeros((VESTYPENUM))
zprobedge[7] = 1
confoutput['edges'][edgei] = zprobedge
continue
except:
pass
try:
sp = nx.shortest_path(graph, enodei, node_type_to_id[6])
if node_type_to_id[3] not in sp and node_type_to_id[4] not in sp:
print(edgei, 'has loop, remaining edge set to a2r')
zprobedge = np.zeros((VESTYPENUM))
zprobedge[8] = 1
confoutput['edges'][edgei] = zprobedge
continue
except:
pass
try:
sp = nx.shortest_path(graph, enodei, node_type_to_id[19])
if node_type_to_id[18] not in sp and node_type_to_id[3] not in sp:
print(edgei, 'has loop, remaining edge set to p2l')
zprobedge = np.zeros((VESTYPENUM))
zprobedge[19] = 1
confoutput['edges'][edgei] = zprobedge
continue
except:
pass
try:
sp = nx.shortest_path(graph, enodei, node_type_to_id[20])
if node_type_to_id[18] not in sp and node_type_to_id[4] not in sp:
print(edgei, 'has loop, remaining edge set to p2r')
zprobedge = np.zeros((VESTYPENUM))
zprobedge[20] = 1
confoutput['edges'][edgei] = zprobedge
continue
except:
pass
# if not connected to any of the 1/2 branch, set to edge pred value
confoutput['edges'][edgei] = cprobedge
if np.argmax(cprobedge) != 0:
print('remaining edge', edgei, np.argmax(cprobedge))
# else:
# #set as non type if original max prob not nontype
# confoutput['edges'][edgei][0] = 1
refoutputs.append(copy.deepcopy(confoutput))
print('node type to id', node_type_to_id)
endtime = datetime.datetime.now()
reftime = (endtime - starttime).total_seconds() / len(all_db['test'])
print('time per case', reftime)
print('confnodes', fitacc, 'err', errfitacc)
#mean confident node per case
fitacc/len(all_db['test'])
print(errlist)
# Check and display results
## check and display HR results
gts = gtsr
# using refinement
predoutputs = refoutputs
# direct from GNN
# predoutputs = rawoutputs
# display figure
DSP = 0
# cases solve key nodes
solvekey = 0
# cases identify key nodes using the three tree.
identifykey = 0
# cases solve all nodes
solveall = 0
# eval
# confusion matrix
from cnfx import cnfx
# test set
# confusion matric for all nodes/edges
cnfx_node_all = []
cnfx_edge_all = []
# case based detection
cnfx_node_case = [cnfx(size=2) for i in range(BOITYPENUM)]
# list of node/edge acc for each scan
nodeacc_all = []
edgeacc_all = []
# list of scans with solved nodes/edges, True or False
nodesolve = []
assert len(gts) == len(predoutputs)
for ti in range(len(predoutputs)):
confoutput = predoutputs[ti]
pred = [np.argmax(ni) for ni in confoutput["nodes"]]
pred_edge = [np.argmax(ni) for ni in confoutput["edges"]]
ground_truth = gts[ti]['nodes']
ground_truth_edge = gts[ti]['edges']
graph = raw_graphs[trange[ti]]
target = targets[trange[ti]]
# output = outputs[ti]
graphname = gts[ti]['name']
print('=' * 40, '\n', 'ti', ti, graphname)
if DSP:
fig = plt.figure(101, figsize=(12, 4))
fig.clf()
node_size = 80
cnfx_node = cnfx(size=BOITYPENUM)
cnfx_edge = cnfx(size=VESTYPENUM)
color = {}
nodeerr = 0
for i, n in enumerate(graph.nodes()):
if ground_truth[i] == pred[i]:
color[n] = np.array([0.0, 0.0, 1.0])
else:
print(i, 'node', ground_truth[i], pred[i])
color[n] = np.array([1.0, 0.0, 0.0])
nodeerr += 1
cnfx_node.add(ground_truth[i], pred[i])
for typei in range(BOITYPENUM):
if typei in ground_truth:
for i in range(len(ground_truth)):
if ground_truth[i] != typei and pred[i] != typei:
continue
cnfx_node_case[typei].add(int(ground_truth[i] == typei), int(pred[i] == typei))
color_edge = {}
edgeerr = 0
for i, n in enumerate(graph.edges()):
if ground_truth_edge[i] == 0:
color_edge[n] = np.array([0.0, 1.0, 0.0])
continue
elif ground_truth_edge[i] == pred_edge[i]:
color_edge[n] = np.array([0.0, 0.0, 1.0])
else:
print(i, 'edge', ground_truth_edge[i], pred_edge[i])
color_edge[n] = np.array([1.0, 0.0, 0.0])
edgeerr += 1
cnfx_edge.add(ground_truth_edge[i], pred_edge[i])
print('Node error', nodeerr, 'Edge error', edgeerr)
cnfx_node_all.append(cnfx_node)
cnfx_edge_all.append(cnfx_edge)
if DSP:
for ignoreaxisi in range(3):
pos = get_node_dict(graph, "pos", ignoreaxis=ignoreaxisi)
ax = fig.add_subplot(1, 3, ignoreaxisi + 1)
plotter = GraphPlotter(ax, graph, pos)
plotter.draw_graph_with_solution(node_size=node_size, node_color=color, edge_color=color_edge)
# ax.set_axis_on()
# ax.set_xticks([])
# ax.set_yticks([])
try:
ax.set_facecolor([0.9] * 3 + [1.0])
except AttributeError:
ax.set_axis_bgcolor([0.9] * 3 + [1.0])
ax.grid(None)
ax.set_title("Ground truth\n" + graphname[:] + ' N%d E%d' % (nodeerr, edgeerr))
plt.show()
cnfx_node_all_sum = np.sum(cnfx_node_all)
cnfx_edge_all_sum = np.sum(cnfx_edge_all)
correct_node = cnfx_node_all_sum.acc()
correct_edge = cnfx_edge_all_sum.acc()
node_wpr = cnfx_node_all_sum.wpr()
solve_node = [cnfx_node_all[i].solveall() for i in range(len(cnfx_node_all))]
solve_node_p = np.sum(solve_node) / len(solve_node)
solve_cow_node = [cnfx_node_all[i].solvecow() for i in range(len(cnfx_node_all))]
solve_cow_node_p = np.sum(solve_cow_node) / len(solve_cow_node)
solve_edge = [cnfx_edge_all[i].solveall() for i in range(len(cnfx_edge_all))]
solve_edge_p = np.sum(solve_edge) / len(solve_edge)
print('%.4f\t' % correct_node, '%.4f\t' % node_wpr, '%.4f\t' % solve_node_p, '%.4f\t' % solve_cow_node_p,
'%.4f\t' % correct_edge, '%.4f\t' % solve_edge_p)
# %%
wrongnums = []
for ti in range(len(cnfx_node_all)):
wrongnum = np.sum(cnfx_node_all[ti].matrix) - np.sum(cnfx_node_all[ti].TP())
wrongnums.append(wrongnum)
ct = Counter(wrongnums)
plt.bar(ct.keys(), list(ct.values()))
plt.title('Histgram of wrongly predicted nodes per case')
plt.xlabel('Number of wrongly predicted nodes')
plt.ylabel('Counts')
# %%
btype = {'ICA-OA': [9, 10], 'ICA-M1': [3, 4], 'ICA-PComA': [21, 22], 'ACA1-AComA': [5, 6], 'M1-M2': [7, 8],
'VBA-PCA1': [18], 'PCA1-PComA': [19, 20]}
accs = []
precisions = []
recalls = []
for ctype, valtypes in btype.items():
cmat = cnfx(size=2)
for ctypi in valtypes:
cmat += cnfx_node_case[ctypi]
cmetric = cmat.metrictype(1)
print(ctype, '\t', '%.4f\t%.4f\t%.4f' % cmetric)
if not np.isnan(cmetric[0]):
accs.append(cmetric[0])
if not np.isnan(cmetric[1]):
precisions.append(cmetric[1])
if not np.isnan(cmetric[2]):
recalls.append(cmetric[2])
print('\t', np.mean(accs), '\t', np.mean(precisions), '\t', np.mean(recalls))
# %%
from hr_utils import NodeName
accs = []
precisions = []
recalls = []
for i in range(BOITYPENUM):
cmetric = cnfx_node_all_sum.metrictype(i)
if not np.isnan(cmetric[0]):
accs.append(cmetric[0])
if not np.isnan(cmetric[1]):
precisions.append(cmetric[1])
if not np.isnan(cmetric[2]):
recalls.append(cmetric[2])
ctype = NodeName[i]
print(ctype, '\t', '%.4f\t%.4f\t%.4f' % cmetric)
# %% md
## check and display direct from GNN
# %%
gts = gtsr
# using refinement
# predoutputs = refoutputs
# direct from GNN
predoutputs = rawoutputs
# display figure
DSP = 0
# cases solve key nodes
solvekey = 0
# cases identify key nodes using the three tree.
identifykey = 0
# cases solve all nodes
solveall = 0
# eval
# confusion matrix
from cnfx import cnfx
# test set
# confusion matric for all nodes/edges
cnfx_node_all = []
cnfx_edge_all = []
# case based detection
cnfx_node_case = [cnfx(size=2) for i in range(BOITYPENUM)]
# list of node/edge acc for each scan
nodeacc_all = []
edgeacc_all = []
# list of scans with solved nodes/edges, True or False
nodesolve = []
assert len(gts) == len(predoutputs)
for ti in range(len(predoutputs)):
confoutput = predoutputs[ti]
pred = [np.argmax(ni) for ni in confoutput["nodes"]]
pred_edge = [np.argmax(ni) for ni in confoutput["edges"]]
ground_truth = gts[ti]['nodes']
ground_truth_edge = gts[ti]['edges']
graph = raw_graphs[trange[ti]]
target = targets[trange[ti]]
# output = outputs[ti]
graphname = gts[ti]['name']
print('=' * 40, '\n', 'ti', ti, graphname)
if DSP:
fig = plt.figure(101, figsize=(12, 4))
fig.clf()
node_size = 80
cnfx_node = cnfx(size=BOITYPENUM)
cnfx_edge = cnfx(size=VESTYPENUM)
color = {}
nodeerr = 0
for i, n in enumerate(graph.nodes()):
if ground_truth[i] == pred[i]:
color[n] = np.array([0.0, 0.0, 1.0])
else:
print(i, 'node', ground_truth[i], pred[i])
color[n] = np.array([1.0, 0.0, 0.0])
nodeerr += 1
cnfx_node.add(ground_truth[i], pred[i])
for typei in range(BOITYPENUM):
if typei in ground_truth:
i = ground_truth.index(typei)
if ground_truth[i] == pred[i]:
cnfx_node_case[typei].add(1, 1)
else:
cnfx_node_case[typei].add(1, 0)
else:
if typei in pred:
cnfx_node_case[typei].add(0, 1)
else:
cnfx_node_case[typei].add(0, 0)
color_edge = {}
edgeerr = 0
for i, n in enumerate(graph.edges()):
if ground_truth_edge[i] == 0:
color_edge[n] = np.array([0.0, 1.0, 0.0])
continue
elif ground_truth_edge[i] == pred_edge[i]:
color_edge[n] = np.array([0.0, 0.0, 1.0])
else:
print(i, 'edge', ground_truth_edge[i], pred_edge[i])
color_edge[n] = np.array([1.0, 0.0, 0.0])
edgeerr += 1
cnfx_edge.add(ground_truth_edge[i], pred_edge[i])
print('Node error', nodeerr, 'Edge error', edgeerr)
cnfx_node_all.append(cnfx_node)
cnfx_edge_all.append(cnfx_edge)
if DSP:
for ignoreaxisi in range(3):
pos = get_node_dict(graph, "pos", ignoreaxis=ignoreaxisi)
ax = fig.add_subplot(1, 3, ignoreaxisi + 1)
plotter = GraphPlotter(ax, graph, pos)
plotter.draw_graph_with_solution(node_size=node_size, node_color=color, edge_color=color_edge)
# ax.set_axis_on()
# ax.set_xticks([])
# ax.set_yticks([])
try:
ax.set_facecolor([0.9] * 3 + [1.0])
except AttributeError:
ax.set_axis_bgcolor([0.9] * 3 + [1.0])
ax.grid(None)
ax.set_title("Ground truth\n" + graphname[:] + ' N%d E%d' % (nodeerr, edgeerr))
plt.show()
cnfx_node_all_sum = np.sum(cnfx_node_all)
cnfx_edge_all_sum = np.sum(cnfx_edge_all)
correct_node = cnfx_node_all_sum.acc()
correct_edge = cnfx_edge_all_sum.acc()
node_wpr = cnfx_node_all_sum.wpr()
solve_node = [cnfx_node_all[i].solveall() for i in range(len(cnfx_node_all))]
solve_node_p = np.sum(solve_node) / len(solve_node)
solve_cow_node = [cnfx_node_all[i].solvecow() for i in range(len(cnfx_node_all))]
solve_cow_node_p = np.sum(solve_cow_node) / len(solve_cow_node)
solve_edge = [cnfx_edge_all[i].solveall() for i in range(len(cnfx_edge_all))]
solve_edge_p = np.sum(solve_edge) / len(solve_edge)
print('%.4f\t' % correct_node, '%.4f\t' % node_wpr, '%.4f\t' % solve_node_p, '%.4f\t' % solve_cow_node_p,
'%.4f\t' % correct_edge, '%.4f\t' % solve_edge_p)
| [
"hr_utils.findmaxprob",
"hr_utils.edgemap.items",
"matplotlib.pyplot.ylabel",
"hr_utils.find_node_connection_ids",
"cnfx.cnfx",
"numpy.array",
"networkx.shortest_path",
"hr_utils.findedgeid",
"copy.deepcopy",
"copy.copy",
"tensorflow.set_random_seed",
"numpy.random.RandomState",
"numpy.arang... | [((527, 543), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (541, 543), True, 'import tensorflow as tf\n'), ((920, 940), 'numpy.random.seed', 'np.random.seed', (['SEED'], {}), '(SEED)\n', (934, 940), True, 'import numpy as np\n'), ((941, 965), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['SEED'], {}), '(SEED)\n', (959, 965), True, 'import tensorflow as tf\n'), ((1206, 1235), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': '(0)'}), '(seed=0)\n', (1227, 1235), True, 'import numpy as np\n'), ((2786, 2826), 'load_graph.prepare_graphs', 'prepare_graphs', (['dbnames', 'graphfoldername'], {}), '(dbnames, graphfoldername)\n', (2800, 2826), False, 'from load_graph import prepare_graphs\n'), ((3463, 3480), 'collections.Counter', 'Counter', (['boitypes'], {}), '(boitypes)\n', (3470, 3480), False, 'from collections import Counter\n'), ((3675, 3701), 'numpy.sum', 'np.sum', (['node_class_weightr'], {}), '(node_class_weightr)\n', (3681, 3701), True, 'import numpy as np\n'), ((4082, 4099), 'collections.Counter', 'Counter', (['vestypes'], {}), '(vestypes)\n', (4089, 4099), False, 'from collections import Counter\n'), ((4294, 4320), 'numpy.sum', 'np.sum', (['edge_class_weightr'], {}), '(edge_class_weightr)\n', (4300, 4320), True, 'import numpy as np\n'), ((4570, 4602), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': 'seed'}), '(seed=seed)\n', (4591, 4602), True, 'import numpy as np\n'), ((4835, 4873), 'matplotlib.pyplot.figure', 'plt.figure', (['(40)'], {'figsize': '(w * 4, h * 4)'}), '(40, figsize=(w * 4, h * 4))\n', (4845, 4873), True, 'import matplotlib.pyplot as plt\n'), ((5177, 5201), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (5199, 5201), True, 'import tensorflow as tf\n'), ((5219, 5251), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': 'seed'}), '(seed=seed)\n', (5240, 5251), True, 'import numpy as np\n'), ((5714, 5803), 'graph_nets.demos.models.EncodeProcessDecode', 'models.EncodeProcessDecode', ([], {'edge_output_size': 'VESTYPENUM', 'node_output_size': 'BOITYPENUM'}), '(edge_output_size=VESTYPENUM, node_output_size=\n BOITYPENUM)\n', (5740, 5803), False, 'from graph_nets.demos import models\n'), ((6418, 6455), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['learning_rate'], {}), '(learning_rate)\n', (6440, 6455), True, 'import tensorflow as tf\n'), ((6698, 6710), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (6708, 6710), True, 'import tensorflow as tf\n'), ((7052, 7068), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (7066, 7068), True, 'import tensorflow as tf\n'), ((9984, 9995), 'time.time', 'time.time', ([], {}), '()\n', (9993, 9995), False, 'import time\n'), ((14622, 14658), 'glob.glob', 'glob.glob', (["(taskname + '/*ckpt.index')"], {}), "(taskname + '/*ckpt.index')\n", (14631, 14658), False, 'import glob\n'), ((15231, 15254), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (15252, 15254), False, 'import datetime\n'), ((15580, 15638), 'graph_nets.utils_np.graphs_tuple_to_data_dicts', 'utils_np.graphs_tuple_to_data_dicts', (["test_values['target']"], {}), "(test_values['target'])\n", (15615, 15638), False, 'from graph_nets import utils_np\n'), ((15796, 15819), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (15817, 15819), False, 'import datetime\n'), ((22673, 22696), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (22694, 22696), False, 'import datetime\n'), ((63060, 63083), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (63081, 63083), False, 'import datetime\n'), ((66703, 66724), 'numpy.sum', 'np.sum', (['cnfx_node_all'], {}), '(cnfx_node_all)\n', (66709, 66724), True, 'import numpy as np\n'), ((66745, 66766), 'numpy.sum', 'np.sum', (['cnfx_edge_all'], {}), '(cnfx_edge_all)\n', (66751, 66766), True, 'import numpy as np\n'), ((67625, 67643), 'collections.Counter', 'Counter', (['wrongnums'], {}), '(wrongnums)\n', (67632, 67643), False, 'from collections import Counter\n'), ((67682, 67739), 'matplotlib.pyplot.title', 'plt.title', (['"""Histgram of wrongly predicted nodes per case"""'], {}), "('Histgram of wrongly predicted nodes per case')\n", (67691, 67739), True, 'import matplotlib.pyplot as plt\n'), ((67740, 67787), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of wrongly predicted nodes"""'], {}), "('Number of wrongly predicted nodes')\n", (67750, 67787), True, 'import matplotlib.pyplot as plt\n'), ((67788, 67808), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Counts"""'], {}), "('Counts')\n", (67798, 67808), True, 'import matplotlib.pyplot as plt\n'), ((72450, 72471), 'numpy.sum', 'np.sum', (['cnfx_node_all'], {}), '(cnfx_node_all)\n', (72456, 72471), True, 'import numpy as np\n'), ((72492, 72513), 'numpy.sum', 'np.sum', (['cnfx_edge_all'], {}), '(cnfx_edge_all)\n', (72498, 72513), True, 'import numpy as np\n'), ((596, 622), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config2'}), '(config=config2)\n', (606, 622), True, 'import tensorflow as tf\n'), ((1060, 1084), 'os.path.exists', 'os.path.exists', (['taskname'], {}), '(taskname)\n', (1074, 1084), False, 'import os\n'), ((1090, 1108), 'os.mkdir', 'os.mkdir', (['taskname'], {}), '(taskname)\n', (1098, 1108), False, 'import os\n'), ((3334, 3378), 'load_graph.generate_graph', 'generate_graph', (['graphcache', 'picklei', 'randaug'], {}), '(graphcache, picklei, randaug)\n', (3348, 3378), False, 'from load_graph import generate_graph\n'), ((3812, 3833), 'numpy.arange', 'np.arange', (['BOITYPENUM'], {}), '(BOITYPENUM)\n', (3821, 3833), True, 'import numpy as np\n'), ((3942, 3986), 'load_graph.generate_graph', 'generate_graph', (['graphcache', 'picklei', 'randaug'], {}), '(graphcache, picklei, randaug)\n', (3956, 3986), False, 'from load_graph import generate_graph\n'), ((4432, 4453), 'numpy.arange', 'np.arange', (['VESTYPENUM'], {}), '(VESTYPENUM)\n', (4441, 4453), True, 'import numpy as np\n'), ((4811, 4827), 'numpy.ceil', 'np.ceil', (['(num / w)'], {}), '(num / w)\n', (4818, 4827), True, 'import numpy as np\n'), ((6720, 6753), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (6751, 6753), True, 'import tensorflow as tf\n'), ((7429, 7460), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': '(18, 10)'}), '(1, figsize=(18, 10))\n', (7439, 7460), True, 'import matplotlib.pyplot as plt\n'), ((7483, 7510), 'numpy.array', 'np.array', (['logged_iterations'], {}), '(logged_iterations)\n', (7491, 7510), True, 'import numpy as np\n'), ((9195, 9205), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9203, 9205), True, 'import matplotlib.pyplot as plt\n'), ((10425, 10436), 'time.time', 'time.time', ([], {}), '()\n', (10434, 10436), False, 'import time\n'), ((16004, 16026), 'copy.copy', 'copy.copy', (['outputr[-1]'], {}), '(outputr[-1])\n', (16013, 16026), False, 'import copy\n'), ((16615, 16654), 'load_graph.generate_graph', 'generate_graph', (['graphcache', 'gi', 'randaug'], {}), '(graphcache, gi, randaug)\n', (16629, 16654), False, 'from load_graph import generate_graph\n'), ((17812, 17827), 'hr_utils.edgemap.items', 'edgemap.items', ([], {}), '()\n', (17825, 17827), False, 'from hr_utils import nodeconnection, matchvestype, nodedist, edgemap\n'), ((18490, 18504), 'numpy.mean', 'np.mean', (['dists'], {}), '(dists)\n', (18497, 18504), True, 'import numpy as np\n'), ((18536, 18549), 'numpy.std', 'np.std', (['dists'], {}), '(dists)\n', (18542, 18549), True, 'import numpy as np\n'), ((19040, 19074), 'numpy.argmax', 'np.argmax', (["target['nodes']"], {'axis': '(1)'}), "(target['nodes'], axis=1)\n", (19049, 19074), True, 'import numpy as np\n'), ((23023, 23057), 'hr_utils.softmax_probs', 'softmax_probs', (["output[-1]['edges']"], {}), "(output[-1]['edges'])\n", (23036, 23057), False, 'from hr_utils import softmax_probs, find_node_connection_ids, findmaxprob, findallnei, find_nei_nodes\n'), ((23074, 23108), 'hr_utils.softmax_probs', 'softmax_probs', (["output[-1]['nodes']"], {}), "(output[-1]['nodes'])\n", (23087, 23108), False, 'from hr_utils import softmax_probs, find_node_connection_ids, findmaxprob, findallnei, find_nei_nodes\n'), ((23125, 23163), 'numpy.argmax', 'np.argmax', (["output[-1]['edges']"], {'axis': '(1)'}), "(output[-1]['edges'], axis=1)\n", (23134, 23163), True, 'import numpy as np\n'), ((23180, 23218), 'numpy.argmax', 'np.argmax', (["output[-1]['nodes']"], {'axis': '(1)'}), "(output[-1]['nodes'], axis=1)\n", (23189, 23218), True, 'import numpy as np\n'), ((23237, 23271), 'numpy.argmax', 'np.argmax', (["target['nodes']"], {'axis': '(1)'}), "(target['nodes'], axis=1)\n", (23246, 23271), True, 'import numpy as np\n'), ((53572, 53597), 'numpy.zeros', 'np.zeros', (['probnodes.shape'], {}), '(probnodes.shape)\n', (53580, 53597), True, 'import numpy as np\n'), ((53624, 53649), 'numpy.zeros', 'np.zeros', (['probedges.shape'], {}), '(probedges.shape)\n', (53632, 53649), True, 'import numpy as np\n'), ((55815, 55830), 'hr_utils.edgemap.items', 'edgemap.items', ([], {}), '()\n', (55828, 55830), False, 'from hr_utils import nodeconnection, matchvestype, nodedist, edgemap\n'), ((63813, 63825), 'cnfx.cnfx', 'cnfx', ([], {'size': '(2)'}), '(size=2)\n', (63817, 63825), False, 'from cnfx import cnfx\n'), ((64594, 64615), 'cnfx.cnfx', 'cnfx', ([], {'size': 'BOITYPENUM'}), '(size=BOITYPENUM)\n', (64598, 64615), False, 'from cnfx import cnfx\n'), ((64632, 64653), 'cnfx.cnfx', 'cnfx', ([], {'size': 'VESTYPENUM'}), '(size=VESTYPENUM)\n', (64636, 64653), False, 'from cnfx import cnfx\n'), ((66973, 66991), 'numpy.sum', 'np.sum', (['solve_node'], {}), '(solve_node)\n', (66979, 66991), True, 'import numpy as np\n'), ((67111, 67133), 'numpy.sum', 'np.sum', (['solve_cow_node'], {}), '(solve_cow_node)\n', (67117, 67133), True, 'import numpy as np\n'), ((67249, 67267), 'numpy.sum', 'np.sum', (['solve_edge'], {}), '(solve_edge)\n', (67255, 67267), True, 'import numpy as np\n'), ((68063, 68075), 'cnfx.cnfx', 'cnfx', ([], {'size': '(2)'}), '(size=2)\n', (68067, 68075), False, 'from cnfx import cnfx\n'), ((68443, 68456), 'numpy.mean', 'np.mean', (['accs'], {}), '(accs)\n', (68450, 68456), True, 'import numpy as np\n'), ((68464, 68483), 'numpy.mean', 'np.mean', (['precisions'], {}), '(precisions)\n', (68471, 68483), True, 'import numpy as np\n'), ((68491, 68507), 'numpy.mean', 'np.mean', (['recalls'], {}), '(recalls)\n', (68498, 68507), True, 'import numpy as np\n'), ((69441, 69453), 'cnfx.cnfx', 'cnfx', ([], {'size': '(2)'}), '(size=2)\n', (69445, 69453), False, 'from cnfx import cnfx\n'), ((70222, 70243), 'cnfx.cnfx', 'cnfx', ([], {'size': 'BOITYPENUM'}), '(size=BOITYPENUM)\n', (70226, 70243), False, 'from cnfx import cnfx\n'), ((70260, 70281), 'cnfx.cnfx', 'cnfx', ([], {'size': 'VESTYPENUM'}), '(size=VESTYPENUM)\n', (70264, 70281), False, 'from cnfx import cnfx\n'), ((72720, 72738), 'numpy.sum', 'np.sum', (['solve_node'], {}), '(solve_node)\n', (72726, 72738), True, 'import numpy as np\n'), ((72858, 72880), 'numpy.sum', 'np.sum', (['solve_cow_node'], {}), '(solve_cow_node)\n', (72864, 72880), True, 'import numpy as np\n'), ((72996, 73014), 'numpy.sum', 'np.sum', (['solve_edge'], {}), '(solve_edge)\n', (73002, 73014), True, 'import numpy as np\n'), ((16338, 16359), 'copy.deepcopy', 'copy.deepcopy', (['output'], {}), '(output)\n', (16351, 16359), False, 'import copy\n'), ((17973, 18013), 'hr_utils.matchvestype', 'matchvestype', (['nodetypes[0]', 'nodetypes[1]'], {}), '(nodetypes[0], nodetypes[1])\n', (17985, 18013), False, 'from hr_utils import nodeconnection, matchvestype, nodedist, edgemap\n'), ((18251, 18326), 'hr_utils.nodedist', 'nodedist', (['G', 'cnode_type_to_id[nodetypes[0]]', 'cnode_type_to_id[nodetypes[1]]'], {}), '(G, cnode_type_to_id[nodetypes[0]], cnode_type_to_id[nodetypes[1]])\n', (18259, 18326), False, 'from hr_utils import nodeconnection, matchvestype, nodedist, edgemap\n'), ((18890, 18934), 'os.path.basename', 'os.path.basename', (["all_db['test'][selids[ti]]"], {}), "(all_db['test'][selids[ti]])\n", (18906, 18934), False, 'import os\n'), ((19096, 19109), 'numpy.argmax', 'np.argmax', (['ni'], {}), '(ni)\n', (19105, 19109), True, 'import numpy as np\n'), ((22875, 22919), 'os.path.basename', 'os.path.basename', (["all_db['test'][selids[ti]]"], {}), "(all_db['test'][selids[ti]])\n", (22891, 22919), False, 'import os\n'), ((23653, 23691), 'hr_utils.find_node_connection_ids', 'find_node_connection_ids', (['graph', 'nodei'], {}), '(graph, nodei)\n', (23677, 23691), False, 'from hr_utils import softmax_probs, find_node_connection_ids, findmaxprob, findallnei, find_nei_nodes\n'), ((23873, 23897), 'numpy.max', 'np.max', (['probnodes[nodei]'], {}), '(probnodes[nodei])\n', (23879, 23897), True, 'import numpy as np\n'), ((36777, 36843), 'hr_utils.find_node_connection_ids', 'find_node_connection_ids', (['graph', 'node_type_to_id[center_node_type]'], {}), '(graph, node_type_to_id[center_node_type])\n', (36801, 36843), False, 'from hr_utils import softmax_probs, find_node_connection_ids, findmaxprob, findallnei, find_nei_nodes\n'), ((42626, 42701), 'networkx.shortest_path', 'nx.shortest_path', (['graph', 'node_type_to_id[icamatype]', 'node_type_to_id[mtype]'], {}), '(graph, node_type_to_id[icamatype], node_type_to_id[mtype])\n', (42642, 42701), True, 'import networkx as nx\n'), ((50649, 50742), 'networkx.shortest_path', 'nx.shortest_path', (['graph', 'node_type_to_id[antnodeid]', 'node_type_to_id[pcommnodetype - 20]'], {}), '(graph, node_type_to_id[antnodeid], node_type_to_id[\n pcommnodetype - 20])\n', (50665, 50742), True, 'import networkx as nx\n'), ((57360, 57431), 'networkx.shortest_path', 'nx.shortest_path', (['graph', 'node_type_to_id[fi[1]]', 'node_type_to_id[fi[0]]'], {}), '(graph, node_type_to_id[fi[1]], node_type_to_id[fi[0]])\n', (57376, 57431), True, 'import networkx as nx\n'), ((57491, 57549), 'hr_utils.findallnei', 'findallnei', (['graph', 'node_type_to_id[fi[1]]', '(sp[:2] + keyids)'], {}), '(graph, node_type_to_id[fi[1]], sp[:2] + keyids)\n', (57501, 57549), False, 'from hr_utils import softmax_probs, find_node_connection_ids, findmaxprob, findallnei, find_nei_nodes\n'), ((62976, 63001), 'copy.deepcopy', 'copy.deepcopy', (['confoutput'], {}), '(confoutput)\n', (62989, 63001), False, 'import copy\n'), ((64115, 64128), 'numpy.argmax', 'np.argmax', (['ni'], {}), '(ni)\n', (64124, 64128), True, 'import numpy as np\n'), ((64177, 64190), 'numpy.argmax', 'np.argmax', (['ni'], {}), '(ni)\n', (64186, 64190), True, 'import numpy as np\n'), ((64503, 64535), 'matplotlib.pyplot.figure', 'plt.figure', (['(101)'], {'figsize': '(12, 4)'}), '(101, figsize=(12, 4))\n', (64513, 64535), True, 'import matplotlib.pyplot as plt\n'), ((66671, 66681), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (66679, 66681), True, 'import matplotlib.pyplot as plt\n'), ((67523, 67555), 'numpy.sum', 'np.sum', (['cnfx_node_all[ti].matrix'], {}), '(cnfx_node_all[ti].matrix)\n', (67529, 67555), True, 'import numpy as np\n'), ((68238, 68258), 'numpy.isnan', 'np.isnan', (['cmetric[0]'], {}), '(cmetric[0])\n', (68246, 68258), True, 'import numpy as np\n'), ((68303, 68323), 'numpy.isnan', 'np.isnan', (['cmetric[1]'], {}), '(cmetric[1])\n', (68311, 68323), True, 'import numpy as np\n'), ((68374, 68394), 'numpy.isnan', 'np.isnan', (['cmetric[2]'], {}), '(cmetric[2])\n', (68382, 68394), True, 'import numpy as np\n'), ((68671, 68691), 'numpy.isnan', 'np.isnan', (['cmetric[0]'], {}), '(cmetric[0])\n', (68679, 68691), True, 'import numpy as np\n'), ((68736, 68756), 'numpy.isnan', 'np.isnan', (['cmetric[1]'], {}), '(cmetric[1])\n', (68744, 68756), True, 'import numpy as np\n'), ((68807, 68827), 'numpy.isnan', 'np.isnan', (['cmetric[2]'], {}), '(cmetric[2])\n', (68815, 68827), True, 'import numpy as np\n'), ((69743, 69756), 'numpy.argmax', 'np.argmax', (['ni'], {}), '(ni)\n', (69752, 69756), True, 'import numpy as np\n'), ((69805, 69818), 'numpy.argmax', 'np.argmax', (['ni'], {}), '(ni)\n', (69814, 69818), True, 'import numpy as np\n'), ((70131, 70163), 'matplotlib.pyplot.figure', 'plt.figure', (['(101)'], {'figsize': '(12, 4)'}), '(101, figsize=(12, 4))\n', (70141, 70163), True, 'import matplotlib.pyplot as plt\n'), ((72418, 72428), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (72426, 72428), True, 'import matplotlib.pyplot as plt\n'), ((4006, 4030), 'numpy.argmax', 'np.argmax', (["ni['vestype']"], {}), "(ni['vestype'])\n", (4015, 4030), True, 'import numpy as np\n'), ((11793, 11804), 'time.time', 'time.time', ([], {}), '()\n', (11802, 11804), False, 'import time\n'), ((13612, 13882), 'numpy.save', 'np.save', (["(taskname + '/losses.npy')", '[last_iteration, logged_iterations, losses_tr, corrects_tr, solveds_tr,\n corrects_tr_e, solveds_tr_e, corrects_tr_n, solveds_tr_n, losses_ge,\n corrects_ge, solveds_ge, corrects_ge_e, solveds_ge_e, corrects_ge_n,\n solveds_ge_n]'], {}), "(taskname + '/losses.npy', [last_iteration, logged_iterations,\n losses_tr, corrects_tr, solveds_tr, corrects_tr_e, solveds_tr_e,\n corrects_tr_n, solveds_tr_n, losses_ge, corrects_ge, solveds_ge,\n corrects_ge_e, solveds_ge_e, corrects_ge_n, solveds_ge_n])\n", (13619, 13882), True, 'import numpy as np\n'), ((14210, 14480), 'numpy.save', 'np.save', (["(taskname + '/losses.npy')", '[last_iteration, logged_iterations, losses_tr, corrects_tr, solveds_tr,\n corrects_tr_e, solveds_tr_e, corrects_tr_n, solveds_tr_n, losses_ge,\n corrects_ge, solveds_ge, corrects_ge_e, solveds_ge_e, corrects_ge_n,\n solveds_ge_n]'], {}), "(taskname + '/losses.npy', [last_iteration, logged_iterations,\n losses_tr, corrects_tr, solveds_tr, corrects_tr_e, solveds_tr_e,\n corrects_tr_n, solveds_tr_n, losses_ge, corrects_ge, solveds_ge,\n corrects_ge_e, solveds_ge_e, corrects_ge_n, solveds_ge_n])\n", (14217, 14480), True, 'import numpy as np\n'), ((17294, 17334), 'hr_utils.matchvestype', 'matchvestype', (['nodetypei', 'branchnodetypei'], {}), '(nodetypei, branchnodetypei)\n', (17306, 17334), False, 'from hr_utils import nodeconnection, matchvestype, nodedist, edgemap\n'), ((17613, 17688), 'hr_utils.nodedist', 'nodedist', (['G', 'cnode_type_to_id[nodetypei]', 'cnode_type_to_id[branchnodetypei]'], {}), '(G, cnode_type_to_id[nodetypei], cnode_type_to_id[branchnodetypei])\n', (17621, 17688), False, 'from hr_utils import nodeconnection, matchvestype, nodedist, edgemap\n'), ((18044, 18132), 'networkx.shortest_path', 'nx.shortest_path', (['G', 'cnode_type_to_id[nodetypes[0]]', 'cnode_type_to_id[nodetypes[1]]'], {}), '(G, cnode_type_to_id[nodetypes[0]], cnode_type_to_id[\n nodetypes[1]])\n', (18060, 18132), True, 'import networkx as nx\n'), ((19206, 19216), 'numpy.max', 'np.max', (['ni'], {}), '(ni)\n', (19212, 19216), True, 'import numpy as np\n'), ((23760, 23785), 'numpy.max', 'np.max', (['probedges[edgeid]'], {}), '(probedges[edgeid])\n', (23766, 23785), True, 'import numpy as np\n'), ((37447, 37526), 'networkx.shortest_path', 'nx.shortest_path', (['graph', 'node_type_to_id[center_node_type]', 'node_type_to_id[bi]'], {}), '(graph, node_type_to_id[center_node_type], node_type_to_id[bi])\n', (37463, 37526), True, 'import networkx as nx\n'), ((42974, 43036), 'hr_utils.findallnei', 'findallnei', (['graph', 'node_type_to_id[mtype]', '([spm1[-2]] + keyids)'], {}), '(graph, node_type_to_id[mtype], [spm1[-2]] + keyids)\n', (42984, 43036), False, 'from hr_utils import softmax_probs, find_node_connection_ids, findmaxprob, findallnei, find_nei_nodes\n'), ((44229, 44292), 'networkx.shortest_path', 'nx.shortest_path', (['graph', 'node_type_to_id[5]', 'node_type_to_id[6]'], {}), '(graph, node_type_to_id[5], node_type_to_id[6])\n', (44245, 44292), True, 'import networkx as nx\n'), ((45630, 45693), 'networkx.shortest_path', 'nx.shortest_path', (['graph', 'node_type_to_id[5]', 'node_type_to_id[3]'], {}), '(graph, node_type_to_id[5], node_type_to_id[3])\n', (45646, 45693), True, 'import networkx as nx\n'), ((45714, 45777), 'networkx.shortest_path', 'nx.shortest_path', (['graph', 'node_type_to_id[6]', 'node_type_to_id[4]'], {}), '(graph, node_type_to_id[6], node_type_to_id[4])\n', (45730, 45777), True, 'import networkx as nx\n'), ((46574, 46637), 'networkx.shortest_path', 'nx.shortest_path', (['graph', 'node_type_to_id[1]', 'node_type_to_id[3]'], {}), '(graph, node_type_to_id[1], node_type_to_id[3])\n', (46590, 46637), True, 'import networkx as nx\n'), ((49795, 49887), 'networkx.shortest_path', 'nx.shortest_path', (['graph', 'node_type_to_id[pcommnodetype - 2]', 'node_type_to_id[antnodeid]'], {}), '(graph, node_type_to_id[pcommnodetype - 2], node_type_to_id\n [antnodeid])\n', (49811, 49887), True, 'import networkx as nx\n'), ((51880, 51985), 'hr_utils.findmaxprob', 'findmaxprob', (['graph', 'node_type_to_id[17]', 'visited', '(15)', '(1)', 'probnodes', 'branch_dist_mean', 'branch_dist_std'], {}), '(graph, node_type_to_id[17], visited, 15, 1, probnodes,\n branch_dist_mean, branch_dist_std)\n', (51891, 51985), False, 'from hr_utils import softmax_probs, find_node_connection_ids, findmaxprob, findallnei, find_nei_nodes\n'), ((52296, 52401), 'hr_utils.findmaxprob', 'findmaxprob', (['graph', 'node_type_to_id[17]', 'visited', '(16)', '(1)', 'probnodes', 'branch_dist_mean', 'branch_dist_std'], {}), '(graph, node_type_to_id[17], visited, 16, 1, probnodes,\n branch_dist_mean, branch_dist_std)\n', (52307, 52401), False, 'from hr_utils import softmax_probs, find_node_connection_ids, findmaxprob, findallnei, find_nei_nodes\n'), ((54683, 54723), 'hr_utils.matchvestype', 'matchvestype', (['nodetypei', 'branchnodetypei'], {}), '(nodetypei, branchnodetypei)\n', (54695, 54723), False, 'from hr_utils import nodeconnection, matchvestype, nodedist, edgemap\n'), ((57839, 57859), 'numpy.zeros', 'np.zeros', (['BOITYPENUM'], {}), '(BOITYPENUM)\n', (57847, 57859), True, 'import numpy as np\n'), ((58044, 58083), 'hr_utils.findedgeid', 'findedgeid', (['graph', 'edgeid[0]', 'edgeid[1]'], {}), '(graph, edgeid[0], edgeid[1])\n', (58054, 58083), False, 'from hr_utils import findedgeid, edgefromnode\n'), ((59425, 59459), 'numpy.max', 'np.max', (["confoutput['edges'][edgei]"], {}), "(confoutput['edges'][edgei])\n", (59431, 59459), True, 'import numpy as np\n'), ((64790, 64815), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0]'], {}), '([0.0, 0.0, 1.0])\n', (64798, 64815), True, 'import numpy as np\n'), ((64908, 64933), 'numpy.array', 'np.array', (['[1.0, 0.0, 0.0]'], {}), '([1.0, 0.0, 0.0])\n', (64916, 64933), True, 'import numpy as np\n'), ((65460, 65485), 'numpy.array', 'np.array', (['[0.0, 1.0, 0.0]'], {}), '([0.0, 1.0, 0.0])\n', (65468, 65485), True, 'import numpy as np\n'), ((70418, 70443), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0]'], {}), '([0.0, 0.0, 1.0])\n', (70426, 70443), True, 'import numpy as np\n'), ((70536, 70561), 'numpy.array', 'np.array', (['[1.0, 0.0, 0.0]'], {}), '([1.0, 0.0, 0.0])\n', (70544, 70561), True, 'import numpy as np\n'), ((71207, 71232), 'numpy.array', 'np.array', (['[0.0, 1.0, 0.0]'], {}), '([0.0, 1.0, 0.0])\n', (71215, 71232), True, 'import numpy as np\n'), ((2965, 2982), 'os.path.exists', 'os.path.exists', (['i'], {}), '(i)\n', (2979, 2982), False, 'import os\n'), ((15665, 15727), 'graph_nets.utils_np.graphs_tuple_to_data_dicts', 'utils_np.graphs_tuple_to_data_dicts', (["test_values['outputs'][i]"], {}), "(test_values['outputs'][i])\n", (15700, 15727), False, 'from graph_nets import utils_np\n'), ((17373, 17461), 'networkx.shortest_path', 'nx.shortest_path', (['G', 'cnode_type_to_id[nodetypei]', 'cnode_type_to_id[branchnodetypei]'], {}), '(G, cnode_type_to_id[nodetypei], cnode_type_to_id[\n branchnodetypei])\n', (17389, 17461), True, 'import networkx as nx\n'), ((19943, 19983), 'hr_utils.findedgeid', 'findedgeid', (['graph', 'pcomp1p2id', 'pcomicaid'], {}), '(graph, pcomp1p2id, pcomicaid)\n', (19953, 19983), False, 'from hr_utils import findedgeid, edgefromnode\n'), ((20543, 20583), 'hr_utils.findedgeid', 'findedgeid', (['graph', 'pcomp1p2id', 'pcomicaid'], {}), '(graph, pcomp1p2id, pcomicaid)\n', (20553, 20583), False, 'from hr_utils import findedgeid, edgefromnode\n'), ((43314, 43351), 'hr_utils.findallnei', 'findallnei', (['graph', 'pm1', '(spm1 + keyids)'], {}), '(graph, pm1, spm1 + keyids)\n', (43324, 43351), False, 'from hr_utils import softmax_probs, find_node_connection_ids, findmaxprob, findallnei, find_nei_nodes\n'), ((44750, 44805), 'hr_utils.nodedist', 'nodedist', (['graph', 'node_type_to_id[5]', 'node_type_to_id[6]'], {}), '(graph, node_type_to_id[5], node_type_to_id[6])\n', (44758, 44805), False, 'from hr_utils import nodeconnection, matchvestype, nodedist, edgemap\n'), ((46794, 46857), 'networkx.shortest_path', 'nx.shortest_path', (['graph', 'node_type_to_id[2]', 'node_type_to_id[4]'], {}), '(graph, node_type_to_id[2], node_type_to_id[4])\n', (46810, 46857), True, 'import networkx as nx\n'), ((48569, 48609), 'hr_utils.matchvestype', 'matchvestype', (['oanodetype', '(oanodetype + 2)'], {}), '(oanodetype, oanodetype + 2)\n', (48581, 48609), False, 'from hr_utils import nodeconnection, matchvestype, nodedist, edgemap\n'), ((48667, 48793), 'hr_utils.findmaxprob', 'findmaxprob', (['graph', 'neinodeids[0]', 'visited', '(oanodetype + 2)', '(1)', 'probnodes', 'branch_dist_mean', 'branch_dist_std', 'exp_edge_type'], {}), '(graph, neinodeids[0], visited, oanodetype + 2, 1, probnodes,\n branch_dist_mean, branch_dist_std, exp_edge_type)\n', (48678, 48793), False, 'from hr_utils import softmax_probs, find_node_connection_ids, findmaxprob, findallnei, find_nei_nodes\n'), ((51412, 51485), 'networkx.shortest_path', 'nx.shortest_path', (['graph', 'node_type_to_id[17]', 'node_type_to_id[va_cf_type]'], {}), '(graph, node_type_to_id[17], node_type_to_id[va_cf_type])\n', (51428, 51485), True, 'import networkx as nx\n'), ((55184, 55274), 'networkx.shortest_path', 'nx.shortest_path', (['graph', 'node_type_to_id[nodetypei]', 'node_type_to_id[branchnodetypei]'], {}), '(graph, node_type_to_id[nodetypei], node_type_to_id[\n branchnodetypei])\n', (55200, 55274), True, 'import networkx as nx\n'), ((55505, 55544), 'hr_utils.findedgeid', 'findedgeid', (['graph', 'sp[spi - 1]', 'sp[spi]'], {}), '(graph, sp[spi - 1], sp[spi])\n', (55515, 55544), False, 'from hr_utils import findedgeid, edgefromnode\n'), ((55964, 56054), 'networkx.shortest_path', 'nx.shortest_path', (['graph', 'node_type_to_id[nodetypes[0]]', 'node_type_to_id[nodetypes[1]]'], {}), '(graph, node_type_to_id[nodetypes[0]], node_type_to_id[\n nodetypes[1]])\n', (55980, 56054), True, 'import networkx as nx\n'), ((57676, 57714), 'numpy.argmax', 'np.argmax', (["confoutput['nodes'][nodeid]"], {}), "(confoutput['nodes'][nodeid])\n", (57685, 57714), True, 'import numpy as np\n'), ((58099, 58136), 'numpy.argmax', 'np.argmax', (["confoutput['edges'][edgei]"], {}), "(confoutput['edges'][edgei])\n", (58108, 58136), True, 'import numpy as np\n'), ((59086, 59106), 'numpy.zeros', 'np.zeros', (['VESTYPENUM'], {}), '(VESTYPENUM)\n', (59094, 59106), True, 'import numpy as np\n'), ((59560, 59580), 'numpy.argmax', 'np.argmax', (['cprobedge'], {}), '(cprobedge)\n', (59569, 59580), True, 'import numpy as np\n'), ((59683, 59703), 'numpy.argmax', 'np.argmax', (['cprobedge'], {}), '(cprobedge)\n', (59692, 59703), True, 'import numpy as np\n'), ((59880, 59931), 'networkx.shortest_path', 'nx.shortest_path', (['graph', 'enodei', 'node_type_to_id[7]'], {}), '(graph, enodei, node_type_to_id[7])\n', (59896, 59931), True, 'import networkx as nx\n'), ((60312, 60363), 'networkx.shortest_path', 'nx.shortest_path', (['graph', 'enodei', 'node_type_to_id[8]'], {}), '(graph, enodei, node_type_to_id[8])\n', (60328, 60363), True, 'import networkx as nx\n'), ((60744, 60795), 'networkx.shortest_path', 'nx.shortest_path', (['graph', 'enodei', 'node_type_to_id[5]'], {}), '(graph, enodei, node_type_to_id[5])\n', (60760, 60795), True, 'import networkx as nx\n'), ((61209, 61260), 'networkx.shortest_path', 'nx.shortest_path', (['graph', 'enodei', 'node_type_to_id[6]'], {}), '(graph, enodei, node_type_to_id[6])\n', (61225, 61260), True, 'import networkx as nx\n'), ((61674, 61726), 'networkx.shortest_path', 'nx.shortest_path', (['graph', 'enodei', 'node_type_to_id[19]'], {}), '(graph, enodei, node_type_to_id[19])\n', (61690, 61726), True, 'import networkx as nx\n'), ((62142, 62194), 'networkx.shortest_path', 'nx.shortest_path', (['graph', 'enodei', 'node_type_to_id[20]'], {}), '(graph, enodei, node_type_to_id[20])\n', (62158, 62194), True, 'import networkx as nx\n'), ((62719, 62739), 'numpy.argmax', 'np.argmax', (['cprobedge'], {}), '(cprobedge)\n', (62728, 62739), True, 'import numpy as np\n'), ((65586, 65611), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0]'], {}), '([0.0, 0.0, 1.0])\n', (65594, 65611), True, 'import numpy as np\n'), ((65719, 65744), 'numpy.array', 'np.array', (['[1.0, 0.0, 0.0]'], {}), '([1.0, 0.0, 0.0])\n', (65727, 65744), True, 'import numpy as np\n'), ((71333, 71358), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0]'], {}), '([0.0, 0.0, 1.0])\n', (71341, 71358), True, 'import numpy as np\n'), ((71466, 71491), 'numpy.array', 'np.array', (['[1.0, 0.0, 0.0]'], {}), '([1.0, 0.0, 0.0])\n', (71474, 71491), True, 'import numpy as np\n'), ((2992, 3035), 'numpy.concatenate', 'np.concatenate', (['[all_db[j] for j in all_db]'], {}), '([all_db[j] for j in all_db])\n', (3006, 3035), True, 'import numpy as np\n'), ((13030, 13047), 'numpy.min', 'np.min', (['losses_ge'], {}), '(losses_ge)\n', (13036, 13047), True, 'import numpy as np\n'), ((13086, 13107), 'numpy.max', 'np.max', (['corrects_ge_n'], {}), '(corrects_ge_n)\n', (13092, 13107), True, 'import numpy as np\n'), ((13146, 13167), 'numpy.max', 'np.max', (['corrects_ge_e'], {}), '(corrects_ge_e)\n', (13152, 13167), True, 'import numpy as np\n'), ((19260, 19275), 'numpy.argwhere', 'np.argwhere', (['ni'], {}), '(ni)\n', (19271, 19275), True, 'import numpy as np\n'), ((26197, 26240), 'hr_utils.matchvestype', 'matchvestype', (['cofnodes[0]', 'center_node_type'], {}), '(cofnodes[0], center_node_type)\n', (26209, 26240), False, 'from hr_utils import nodeconnection, matchvestype, nodedist, edgemap\n'), ((27102, 27145), 'hr_utils.matchvestype', 'matchvestype', (['cofnodes[0]', 'center_node_type'], {}), '(cofnodes[0], center_node_type)\n', (27114, 27145), False, 'from hr_utils import nodeconnection, matchvestype, nodedist, edgemap\n'), ((39049, 39092), 'hr_utils.matchvestype', 'matchvestype', (['branchtypei', 'center_node_type'], {}), '(branchtypei, center_node_type)\n', (39061, 39092), False, 'from hr_utils import nodeconnection, matchvestype, nodedist, edgemap\n'), ((40476, 40543), 'networkx.shortest_path', 'nx.shortest_path', (['graph', 'node_type_to_id[center_node_type]', 'tnodeid'], {}), '(graph, node_type_to_id[center_node_type], tnodeid)\n', (40492, 40543), True, 'import networkx as nx\n'), ((40702, 40745), 'hr_utils.matchvestype', 'matchvestype', (['branchtypei', 'center_node_type'], {}), '(branchtypei, center_node_type)\n', (40714, 40745), False, 'from hr_utils import nodeconnection, matchvestype, nodedist, edgemap\n'), ((56482, 56509), 'hr_utils.findedgeid', 'findedgeid', (['graph', 'spi', 'spj'], {}), '(graph, spi, spj)\n', (56492, 56509), False, 'from hr_utils import findedgeid, edgefromnode\n'), ((58221, 58258), 'numpy.argmax', 'np.argmax', (["confoutput['edges'][edgei]"], {}), "(confoutput['edges'][edgei])\n", (58230, 58258), True, 'import numpy as np\n'), ((58562, 58608), 'hr_utils.nodedist', 'nodedist', (['graph', 'edgenodei', 'node_type_to_id[5]'], {}), '(graph, edgenodei, node_type_to_id[5])\n', (58570, 58608), False, 'from hr_utils import nodeconnection, matchvestype, nodedist, edgemap\n'), ((58638, 58684), 'hr_utils.nodedist', 'nodedist', (['graph', 'edgenodei', 'node_type_to_id[6]'], {}), '(graph, edgenodei, node_type_to_id[6])\n', (58646, 58684), False, 'from hr_utils import nodeconnection, matchvestype, nodedist, edgemap\n'), ((58713, 58733), 'numpy.zeros', 'np.zeros', (['VESTYPENUM'], {}), '(VESTYPENUM)\n', (58721, 58733), True, 'import numpy as np\n'), ((60085, 60105), 'numpy.zeros', 'np.zeros', (['VESTYPENUM'], {}), '(VESTYPENUM)\n', (60093, 60105), True, 'import numpy as np\n'), ((60517, 60537), 'numpy.zeros', 'np.zeros', (['VESTYPENUM'], {}), '(VESTYPENUM)\n', (60525, 60537), True, 'import numpy as np\n'), ((60982, 61002), 'numpy.zeros', 'np.zeros', (['VESTYPENUM'], {}), '(VESTYPENUM)\n', (60990, 61002), True, 'import numpy as np\n'), ((61447, 61467), 'numpy.zeros', 'np.zeros', (['VESTYPENUM'], {}), '(VESTYPENUM)\n', (61455, 61467), True, 'import numpy as np\n'), ((61914, 61934), 'numpy.zeros', 'np.zeros', (['VESTYPENUM'], {}), '(VESTYPENUM)\n', (61922, 61934), True, 'import numpy as np\n'), ((62382, 62402), 'numpy.zeros', 'np.zeros', (['VESTYPENUM'], {}), '(VESTYPENUM)\n', (62390, 62402), True, 'import numpy as np\n'), ((62793, 62813), 'numpy.argmax', 'np.argmax', (['cprobedge'], {}), '(cprobedge)\n', (62802, 62813), True, 'import numpy as np\n'), ((39354, 39476), 'hr_utils.findmaxprob', 'findmaxprob', (['graph', 'nodestart', 'visited', 'branchtypei', 'None', 'probnodes', 'branch_dist_mean', 'branch_dist_std', 'exp_edge_type'], {}), '(graph, nodestart, visited, branchtypei, None, probnodes,\n branch_dist_mean, branch_dist_std, exp_edge_type)\n', (39365, 39476), False, 'from hr_utils import softmax_probs, find_node_connection_ids, findmaxprob, findallnei, find_nei_nodes\n'), ((58279, 58316), 'numpy.argmax', 'np.argmax', (["confoutput['edges'][edgei]"], {}), "(confoutput['edges'][edgei])\n", (58288, 58316), True, 'import numpy as np\n'), ((28060, 28125), 'numpy.argmax', 'np.argmax', (['[probnodes[ni][center_node_type] for ni in pathnodeid]'], {}), '([probnodes[ni][center_node_type] for ni in pathnodeid])\n', (28069, 28125), True, 'import numpy as np\n'), ((47154, 47180), 'hr_utils.find_nei_nodes', 'find_nei_nodes', (['graph', 'spi'], {}), '(graph, spi)\n', (47168, 47180), False, 'from hr_utils import softmax_probs, find_node_connection_ids, findmaxprob, findallnei, find_nei_nodes\n'), ((14715, 14734), 'os.path.basename', 'os.path.basename', (['i'], {}), '(i)\n', (14731, 14734), False, 'import os\n'), ((27735, 27823), 'networkx.shortest_path', 'nx.shortest_path', (['graph', 'node_type_to_id[cofnodes[0]]', 'node_type_to_id[cofnodes[1]]'], {}), '(graph, node_type_to_id[cofnodes[0]], node_type_to_id[\n cofnodes[1]])\n', (27751, 27823), True, 'import networkx as nx\n'), ((14839, 14858), 'os.path.basename', 'os.path.basename', (['i'], {}), '(i)\n', (14855, 14858), False, 'import os\n'), ((26426, 26463), 'hr_utils.findedgeid', 'findedgeid', (['graph', 'edgei[0]', 'edgei[1]'], {}), '(graph, edgei[0], edgei[1])\n', (26436, 26463), False, 'from hr_utils import findedgeid, edgefromnode\n'), ((14963, 14982), 'os.path.basename', 'os.path.basename', (['i'], {}), '(i)\n', (14979, 14982), False, 'import os\n'), ((28510, 28598), 'networkx.shortest_path', 'nx.shortest_path', (['graph', 'node_type_to_id[cofnodes[1]]', 'node_type_to_id[cofnodes[2]]'], {}), '(graph, node_type_to_id[cofnodes[1]], node_type_to_id[\n cofnodes[2]])\n', (28526, 28598), True, 'import networkx as nx\n'), ((34838, 34938), 'networkx.shortest_path', 'nx.shortest_path', (['graph', 'node_type_to_id[branch_node_type2]', 'node_type_to_id[branch_node_type3]'], {}), '(graph, node_type_to_id[branch_node_type2], node_type_to_id\n [branch_node_type3])\n', (34854, 34938), True, 'import networkx as nx\n'), ((28284, 28372), 'networkx.shortest_path', 'nx.shortest_path', (['graph', 'node_type_to_id[cofnodes[0]]', 'node_type_to_id[cofnodes[1]]'], {}), '(graph, node_type_to_id[cofnodes[0]], node_type_to_id[\n cofnodes[1]])\n', (28300, 28372), True, 'import networkx as nx\n'), ((28397, 28485), 'networkx.shortest_path', 'nx.shortest_path', (['graph', 'node_type_to_id[cofnodes[0]]', 'node_type_to_id[cofnodes[2]]'], {}), '(graph, node_type_to_id[cofnodes[0]], node_type_to_id[\n cofnodes[2]])\n', (28413, 28485), True, 'import networkx as nx\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 8 15:11:25 2019
@author: Titus
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#Depth T LSPD DTEMP PRES DPRES
data=np.genfromtxt(r"..\..\58-32_Temp_Pres_11_08_18\UOFU_MU-ESW1_PT118.txt",skip_header=70)
names=['Depth','T','LSPD','DTEMP','PRES','DPRES']
dic={}
for ind,name in enumerate(names):
dic[name]=data[:,ind]
df=pd.DataFrame(dic)
for name in names:
df=df[df[name]!=-999.25]
i=1
fig,ax=plt.subplots(len(data[0])-1)
for i in range(len(data[0])-1):
ax[i].plot(df['Depth'],df[names[i+1]])
ax[i].set_ylabel(names[i+1])
plt.show()
| [
"pandas.DataFrame",
"numpy.genfromtxt",
"matplotlib.pyplot.show"
] | [((231, 324), 'numpy.genfromtxt', 'np.genfromtxt', (['"""..\\\\..\\\\58-32_Temp_Pres_11_08_18\\\\UOFU_MU-ESW1_PT118.txt"""'], {'skip_header': '(70)'}), "('..\\\\..\\\\58-32_Temp_Pres_11_08_18\\\\UOFU_MU-ESW1_PT118.txt',\n skip_header=70)\n", (244, 324), True, 'import numpy as np\n'), ((436, 453), 'pandas.DataFrame', 'pd.DataFrame', (['dic'], {}), '(dic)\n', (448, 453), True, 'import pandas as pd\n'), ((647, 657), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (655, 657), True, 'import matplotlib.pyplot as plt\n')] |
from ._accumulate_data import AccumulateData
from ..util import MaxSamplesWarning
from numpy import array, nan
import warnings
import numpy as np
class LDTransformBayesData(AccumulateData):
"""
Update and store transformation data based on low-discrepancy sequences.
See the stopping criterion that utilize this object for references.
"""
parameters = ['n_total', 'solution', 'error_bound']
def __init__(self, stopping_criterion, integrand, m_min: int, m_max: int):
"""
Args:
stopping_criterion (StoppingCriterion): a StoppingCriterion instance
integrand (Integrand): an Integrand instance
m_min (int): initial n == 2^m_min
m_max (int): max n == 2^m_max
"""
# Extract attributes from integrand
self.stopping_criterion = stopping_criterion
self.integrand = integrand
self.measure = self.integrand.measure
self.distribution = self.measure.distribution
self.dim = stopping_criterion.dim
# Set Attributes
self.m_min = m_min
self.m_max = m_max
self.debugEnable = True
self.n_total = 0 # total number of samples generated
self.solution = nan
self.iter = 0
self.m = self.m_min
self.mvec = np.arange(self.m_min, self.m_max + 1, dtype=int)
# Initialize various temporary storage between iterations
self.xpts_ = array([]) # shifted lattice points
self.xun_ = array([]) # un-shifted lattice points
self.ftilde_ = array([]) # fourier transformed integrand values
self.ff = self.integrand.period_transform(stopping_criterion.ptransform) # integrand after the periodization transform
super(LDTransformBayesData, self).__init__()
def update_data(self):
""" See abstract method. """
# Generate sample values
if self.iter < len(self.mvec):
self.ftilde_, self.xun_, self.xpts_ = self.iter_fft(self.iter, self.xun_, self.xpts_, self.ftilde_)
self.m = self.mvec[self.iter]
self.iter += 1
# update total samples
self.n_total = 2 ** self.m # updated the total evaluations
else:
warnings.warn(f'Already used maximum allowed sample size {2 ** self.m_max}.'
f' Note that error tolerances may no longer be satisfied',
MaxSamplesWarning)
return self.xun_, self.ftilde_, self.m
# Efficient FFT computation algorithm, avoids recomputing the full fft
def iter_fft(self, iter, xun, xpts, ftilde_prev):
m = self.mvec[iter]
n = 2 ** m
# In every iteration except the first one, "n" number_of_points is doubled,
# but FFT is only computed for the newly added points.
# Previously computed FFT is reused.
if iter == 0:
# In the first iteration compute full FFT
# xun_ = mod(bsxfun( @ times, (0:1 / n:1-1 / n)',self.gen_vec),1)
# xun_ = np.arange(0, 1, 1 / n).reshape((n, 1))
# xun_ = np.mod((xun_ * self.gen_vec), 1)
# xpts_ = np.mod(bsxfun( @ plus, xun_, shift), 1) # shifted
xun_ = self.distribution.gen_samples(n_min=0, n_max=n, warn=False)
xpts_ = self.distribution.apply_randomization(xun_)
# Compute initial FFT
ftilde_ = np.fft.fft(self.ff(xpts_)) # evaluate integrand's fft
ftilde_ = ftilde_.reshape((n, 1))
else:
# xunnew = np.mod(bsxfun( @ times, (1/n : 2/n : 1-1/n)',self.gen_vec),1)
# xunnew = np.arange(1 / n, 1, 2 / n).reshape((n // 2, 1))
# xunnew = np.mod(xunnew * self.gen_vec, 1)
# xnew = np.mod(bsxfun( @ plus, xunnew, shift), 1)
xunnew = self.distribution.gen_samples(n_min=n // 2, n_max=n)
xnew = self.distribution.apply_randomization(xunnew)
[xun_, xpts_] = self.merge_pts(xun, xunnew, xpts, xnew, n, self.dim)
mnext = m - 1
# Compute FFT on next set of new points
ftilde_next_new = np.fft.fft(self.ff(xnew))
ftilde_next_new = ftilde_next_new.reshape((n // 2, 1))
if self.debugEnable:
self.alert_msg(ftilde_next_new, 'Nan', 'Inf')
# combine the previous batch and new batch to get FFT on all points
ftilde_ = self.merge_fft(ftilde_prev, ftilde_next_new, mnext)
return ftilde_, xun_, xpts_
# using FFT butefly plot technique merges two halves of fft
@staticmethod
def merge_fft(ftilde_new, ftilde_next_new, mnext):
ftilde_new = np.vstack([ftilde_new, ftilde_next_new])
nl = 2 ** mnext
ptind = np.ndarray(shape=(2 * nl, 1), buffer=np.array([True] * nl + [False] * nl), dtype=bool)
coef = np.exp(-2 * np.pi * 1j * np.ndarray(shape=(nl, 1), buffer=np.arange(0, nl), dtype=int) / (2 * nl))
coefv = np.tile(coef, (1, 1))
evenval = ftilde_new[ptind].reshape((nl, 1))
oddval = ftilde_new[~ptind].reshape((nl, 1))
ftilde_new[ptind] = np.squeeze(evenval + coefv * oddval)
ftilde_new[~ptind] = np.squeeze(evenval - coefv * oddval)
return ftilde_new
# inserts newly generated points with the old set by interleaving them
# xun - unshifted points
@staticmethod
def merge_pts(xun, xunnew, x, xnew, n, d):
temp = np.zeros((n, d))
temp[0::2, :] = xun
temp[1::2, :] = xunnew
xun = temp
temp = np.zeros((n, d))
temp[0::2, :] = x
temp[1::2, :] = xnew
x = temp
return xun, x
# prints debug message if the given variable is Inf, Nan or complex, etc
# Example: alertMsg(x, 'Inf', 'Imag')
# prints if variable 'x' is either Infinite or Imaginary
@staticmethod
def alert_msg(*args):
varargin = args
nargin = len(varargin)
if nargin > 1:
i_start = 0
var_tocheck = varargin[i_start]
i_start = i_start + 1
inpvarname = 'variable'
while i_start < nargin:
var_type = varargin[i_start]
i_start = i_start + 1
if var_type == 'Nan':
if np.any(np.isnan(var_tocheck)):
print(f'{inpvarname} has NaN values')
elif var_type == 'Inf':
if np.any(np.isinf(var_tocheck)):
print(f'{inpvarname} has Inf values')
elif var_type == 'Imag':
if not np.all(np.isreal(var_tocheck)):
print(f'{inpvarname} has complex values')
else:
print('unknown type check requested !')
| [
"numpy.tile",
"numpy.squeeze",
"numpy.array",
"numpy.zeros",
"numpy.isnan",
"numpy.vstack",
"numpy.isreal",
"warnings.warn",
"numpy.isinf",
"numpy.arange"
] | [((1307, 1355), 'numpy.arange', 'np.arange', (['self.m_min', '(self.m_max + 1)'], {'dtype': 'int'}), '(self.m_min, self.m_max + 1, dtype=int)\n', (1316, 1355), True, 'import numpy as np\n'), ((1444, 1453), 'numpy.array', 'array', (['[]'], {}), '([])\n', (1449, 1453), False, 'from numpy import array, nan\n'), ((1500, 1509), 'numpy.array', 'array', (['[]'], {}), '([])\n', (1505, 1509), False, 'from numpy import array, nan\n'), ((1562, 1571), 'numpy.array', 'array', (['[]'], {}), '([])\n', (1567, 1571), False, 'from numpy import array, nan\n'), ((4674, 4714), 'numpy.vstack', 'np.vstack', (['[ftilde_new, ftilde_next_new]'], {}), '([ftilde_new, ftilde_next_new])\n', (4683, 4714), True, 'import numpy as np\n'), ((4972, 4993), 'numpy.tile', 'np.tile', (['coef', '(1, 1)'], {}), '(coef, (1, 1))\n', (4979, 4993), True, 'import numpy as np\n'), ((5128, 5164), 'numpy.squeeze', 'np.squeeze', (['(evenval + coefv * oddval)'], {}), '(evenval + coefv * oddval)\n', (5138, 5164), True, 'import numpy as np\n'), ((5194, 5230), 'numpy.squeeze', 'np.squeeze', (['(evenval - coefv * oddval)'], {}), '(evenval - coefv * oddval)\n', (5204, 5230), True, 'import numpy as np\n'), ((5442, 5458), 'numpy.zeros', 'np.zeros', (['(n, d)'], {}), '((n, d))\n', (5450, 5458), True, 'import numpy as np\n'), ((5552, 5568), 'numpy.zeros', 'np.zeros', (['(n, d)'], {}), '((n, d))\n', (5560, 5568), True, 'import numpy as np\n'), ((2247, 2407), 'warnings.warn', 'warnings.warn', (['f"""Already used maximum allowed sample size {2 ** self.m_max}. Note that error tolerances may no longer be satisfied"""', 'MaxSamplesWarning'], {}), "(\n f'Already used maximum allowed sample size {2 ** self.m_max}. Note that error tolerances may no longer be satisfied'\n , MaxSamplesWarning)\n", (2260, 2407), False, 'import warnings\n'), ((4792, 4828), 'numpy.array', 'np.array', (['([True] * nl + [False] * nl)'], {}), '([True] * nl + [False] * nl)\n', (4800, 4828), True, 'import numpy as np\n'), ((6302, 6323), 'numpy.isnan', 'np.isnan', (['var_tocheck'], {}), '(var_tocheck)\n', (6310, 6323), True, 'import numpy as np\n'), ((4915, 4931), 'numpy.arange', 'np.arange', (['(0)', 'nl'], {}), '(0, nl)\n', (4924, 4931), True, 'import numpy as np\n'), ((6458, 6479), 'numpy.isinf', 'np.isinf', (['var_tocheck'], {}), '(var_tocheck)\n', (6466, 6479), True, 'import numpy as np\n'), ((6619, 6641), 'numpy.isreal', 'np.isreal', (['var_tocheck'], {}), '(var_tocheck)\n', (6628, 6641), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
__version__ = "0.1.0"
import sys, ntpath, glob
from scipy import spatial, linalg
import numpy as np
import pandas as pd
import datetime as dt
# =========================================================
class mbdata:
def __init__(self, xyzDir, xyzFmt, rastDir, rastFmt, flowAz):
self.xyzDir = xyzDir
self.xyzFmt = xyzFmt
self.rastDir = rastDir
self.rastFmt = rastFmt
self.flowAz = flowAz
xyzPaths = glob.glob('{0}/*.npy'.format(xyzDir))
self.nt = len(xyzPaths)
self.data = pd.DataFrame({'xyzPaths': xyzPaths})
self.data['time'] = None
self.data['gridded'] = False
self.data['xmin'] = None
self.data['xmax'] = None
self.data['ymin'] = None
self.data['ymax'] = None
self.data['zmin'] = None
self.data['zmax'] = None
self.data['raster'] = None
rastPaths = glob.glob('{0}/*.npy'.format(rastDir))
tGridded = np.empty(len(rastPaths), 'object')
for i in range(len(rastPaths)):
fname = ntpath.basename(rastPaths[i])
tGridded[i] = dt.datetime.strptime(fname, rastFmt)
self.tGridded = tGridded
for i in range(self.nt):
fname = ntpath.basename(xyzPaths[i])
self.data.set_value(i, 'time', dt.datetime.strptime(fname, xyzFmt))
xyz = np.load(self.data.ix[i,'xyzPaths'])
sw_xyz = self.rotateXY(xyz, flowAz)
self.data.set_value(i, 'xmin', np.nanmin(sw_xyz[:,0]))
self.data.set_value(i, 'xmax', np.nanmax(sw_xyz[:,0]))
self.data.set_value(i, 'ymin', np.nanmin(sw_xyz[:,1]))
self.data.set_value(i, 'ymax', np.nanmax(sw_xyz[:,1]))
self.data.set_value(i, 'zmin', np.nanmin(sw_xyz[:,2]))
self.data.set_value(i, 'zmax', np.nanmax(sw_xyz[:,2]))
if (self.data.ix[i,'time'] == tGridded).any():
self.data.set_value(i, 'gridded', True)
fname = dt.datetime.strftime(self.data.ix[i,'time'], rastFmt)
rast = np.load('{0}/{1}'.format(self.rastDir, fname))
self.data.set_value(i, 'raster', rast)
self.xmin = np.min(self.data.xmin)
self.xmax = np.max(self.data.xmax)
self.ymin = np.min(self.data.ymin)
self.ymax = np.max(self.data.ymax)
self.zmin = np.min(self.data.zmin)
self.zmax = np.max(self.data.zmax)
self.origin = rotateXY(np.array([[self.xmin, self.ymin, self.zmin]]), -flowAz)
# =========================================================
def rotateXY(self, xyz, angle):
""" Rotates every point in an xyz array about the origin by 'angle' """
rot_xyz = np.empty(xyz.shape)
theta = np.deg2rad(angle)
x = xyz[:,0]
y = xyz[:,1]
rot_xyz[:,0] = x * np.cos(theta) - y * np.sin(theta)
rot_xyz[:,1] = x * np.sin(theta) + y * np.cos(theta)
rot_xyz[:,2] = xyz[:,2]
return rot_xyz
# =========================================================
def translate(self, xyz, xbnd, ybnd):
""" moves data so [xbnd, ybnd] is at origin """
xyz[:,0] -= xbnd
xyz[:,1] -= ybnd
return xyz
# =========================================================
def rescaleXY(self, xyz, old_dx, new_dx, old_dy, new_dy):
""" Transforms x and y coordinates to grid unit intervals. Can handle
anisotropic scales """
xyz[:,0] *= (old_dx/new_dx)
xyz[:,1] *= (old_dy/new_dy)
return xyz
# =========================================================
def surfPoint(self, xyz, x, y):
""" Fits a plane to data in format array([[x,y,z]]) """
# best-fit linear plane
A = np.c_[xyz[:,0], xyz[:,1], np.ones(xyz.shape[0])]
C,_,_,_ = linalg.lstsq(A, xyz[:,2]) # coefficients
# evaluate at x,y
z = C[0]*x + C[1]*y + C[2]
return z
# =========================================================
def genGrid(self, xmin, xmax, dx, ymin, ymax, dy):
""" Generates regular coordinate grid """
nx = int((xmax-xmin) // dx)
ny = int((ymax-ymin) // dy)
remx = (xmax-xmin) % dy
remy = (ymax-ymin) % dy
xcords = np.linspace(xmin + dx/2, xmax - remx - dx/2, nx)
ycords = np.linspace(xmin + dy/2, ymax - remy - dy/2, ny)
np.meshgrid(xcords, ycords)
return np.meshgrid(xcords, ycords)
# =========================================================
def kdtree(self, xyz):
""" Generates nearest neighbor kdtree for bathymetry and
indexable elevation data"""
ztree = xyz[:,2]
tree = spatial.KDTree(xyz[:, 0:2])
return tree, ztree
# =========================================================
def gridXYZ(self, xyz, xgrid, ygrid, rSearch):
""" Grids irregular xyz data """
tree, ztree = self.kdtree(xyz) # Generate nearest neighbor search tree
ny, nx = xgrid.shape
rast = np.empty((ny, nx), 'float')
for i in range(ny):
for j in range(nx):
x = xgrid[i,j]
y = ygrid[i,j]
d = tree.query_ball_point([x,y],rSearch) # find nearest neighbors
if len(d) >= 4:
xyz_near = xyz[d]
try:
rast[i,j] = surfPoint(xyz_near, x, y)
except:
rast[i,j] = np.nan
else:
rast[i,j] = np.nan
return rast
# =========================================================
def grid_single(self, streamwise_oriented_xyz, nr, xmin, xmax, dx, ymin, ymax, dy):
""" processing function for single xyz file given row of df_init data """
# Move to minimum upper right quadrant
xyz = self.translate(streamwise_oriented_xyz, xmin, ymin)
# Rescale to coordinates of final DEM cells
scaled_xyz = self.rescaleXY(xyz, 1, dx, 1, dy)
scaled_xext = (xmax-xmin)/dx
scaled_yext = (ymax-ymin)/dy
# Generate unit coordinate grid at origin
[scaled_xgrid, scaled_ygrid] = self.genGrid(0, scaled_xext, 1, 0, scaled_yext, 1)
# Specify search radius for interpolation method (nr = number of cell radii)
scaled_rsearch = np.sqrt(0.5) * nr
# Calculate raster and return
return self.gridXYZ(scaled_xyz, scaled_xgrid, scaled_ygrid, scaled_rsearch)
def grid_data(self, dx, dy, nr):
""" batch processing function """
incomplete = self.data.gridded[self.data.gridded == False].index.tolist()
try:
print("{0}/{1} complete. Resuming.".format(incomplete[0], self.nt))
for i in incomplete:
streamwise_xyz = self.rotateXY(np.load(self.data.ix[i,'xyzPaths']),
self.flowAz)
rast = self.grid_single(streamwise_xyz, nr,
self.xmin, self.xmax, dx,
self.ymin, self.ymax, dy)
self.data.set_value(i, 'raster', rast)
self.data.ix[i, 'gridded'] = True
fname = dt.datetime.strftime(self.data.ix[i,'time'], self.rastFmt)
outfile = '{0}/{1}'.format(self.rastDir, fname)
np.save(outfile,rast)
print("({0}/{1}) completed at {2})".format(i+1, self.nt, dt.datetime.now()))
except:
print("Gridding Complete")
Z = np.empty((self.nt, self.data.raster[0].shape[0],
self.data.raster[0].shape[1]), 'float')
for t in range(self.nt):
Z[t] = self.data.raster[t]
return Z, self.data.time, self.dx, self.dy, self.origin
# =============================================================================
def load_data(xyzDir, xyzFmt, rastDir, rastFmt, flowAz):
return mbdata(xyzDir, xyzFmt, rastDir, rastFmt, flowAz)
| [
"numpy.sqrt",
"scipy.spatial.KDTree",
"numpy.array",
"numpy.sin",
"numpy.nanmin",
"numpy.save",
"numpy.load",
"scipy.linalg.lstsq",
"numpy.max",
"numpy.linspace",
"numpy.empty",
"numpy.nanmax",
"numpy.min",
"pandas.DataFrame",
"numpy.meshgrid",
"numpy.ones",
"numpy.deg2rad",
"numpy... | [((573, 609), 'pandas.DataFrame', 'pd.DataFrame', (["{'xyzPaths': xyzPaths}"], {}), "({'xyzPaths': xyzPaths})\n", (585, 609), True, 'import pandas as pd\n'), ((2222, 2244), 'numpy.min', 'np.min', (['self.data.xmin'], {}), '(self.data.xmin)\n', (2228, 2244), True, 'import numpy as np\n'), ((2265, 2287), 'numpy.max', 'np.max', (['self.data.xmax'], {}), '(self.data.xmax)\n', (2271, 2287), True, 'import numpy as np\n'), ((2308, 2330), 'numpy.min', 'np.min', (['self.data.ymin'], {}), '(self.data.ymin)\n', (2314, 2330), True, 'import numpy as np\n'), ((2351, 2373), 'numpy.max', 'np.max', (['self.data.ymax'], {}), '(self.data.ymax)\n', (2357, 2373), True, 'import numpy as np\n'), ((2394, 2416), 'numpy.min', 'np.min', (['self.data.zmin'], {}), '(self.data.zmin)\n', (2400, 2416), True, 'import numpy as np\n'), ((2437, 2459), 'numpy.max', 'np.max', (['self.data.zmax'], {}), '(self.data.zmax)\n', (2443, 2459), True, 'import numpy as np\n'), ((2746, 2765), 'numpy.empty', 'np.empty', (['xyz.shape'], {}), '(xyz.shape)\n', (2754, 2765), True, 'import numpy as np\n'), ((2782, 2799), 'numpy.deg2rad', 'np.deg2rad', (['angle'], {}), '(angle)\n', (2792, 2799), True, 'import numpy as np\n'), ((3859, 3885), 'scipy.linalg.lstsq', 'linalg.lstsq', (['A', 'xyz[:, 2]'], {}), '(A, xyz[:, 2])\n', (3871, 3885), False, 'from scipy import spatial, linalg\n'), ((4307, 4359), 'numpy.linspace', 'np.linspace', (['(xmin + dx / 2)', '(xmax - remx - dx / 2)', 'nx'], {}), '(xmin + dx / 2, xmax - remx - dx / 2, nx)\n', (4318, 4359), True, 'import numpy as np\n'), ((4373, 4425), 'numpy.linspace', 'np.linspace', (['(xmin + dy / 2)', '(ymax - remy - dy / 2)', 'ny'], {}), '(xmin + dy / 2, ymax - remy - dy / 2, ny)\n', (4384, 4425), True, 'import numpy as np\n'), ((4431, 4458), 'numpy.meshgrid', 'np.meshgrid', (['xcords', 'ycords'], {}), '(xcords, ycords)\n', (4442, 4458), True, 'import numpy as np\n'), ((4475, 4502), 'numpy.meshgrid', 'np.meshgrid', (['xcords', 'ycords'], {}), '(xcords, ycords)\n', (4486, 4502), True, 'import numpy as np\n'), ((4737, 4764), 'scipy.spatial.KDTree', 'spatial.KDTree', (['xyz[:, 0:2]'], {}), '(xyz[:, 0:2])\n', (4751, 4764), False, 'from scipy import spatial, linalg\n'), ((5073, 5100), 'numpy.empty', 'np.empty', (['(ny, nx)', '"""float"""'], {}), "((ny, nx), 'float')\n", (5081, 5100), True, 'import numpy as np\n'), ((7601, 7694), 'numpy.empty', 'np.empty', (['(self.nt, self.data.raster[0].shape[0], self.data.raster[0].shape[1])', '"""float"""'], {}), "((self.nt, self.data.raster[0].shape[0], self.data.raster[0].shape[\n 1]), 'float')\n", (7609, 7694), True, 'import numpy as np\n'), ((1087, 1116), 'ntpath.basename', 'ntpath.basename', (['rastPaths[i]'], {}), '(rastPaths[i])\n', (1102, 1116), False, 'import sys, ntpath, glob\n'), ((1143, 1179), 'datetime.datetime.strptime', 'dt.datetime.strptime', (['fname', 'rastFmt'], {}), '(fname, rastFmt)\n', (1163, 1179), True, 'import datetime as dt\n'), ((1267, 1295), 'ntpath.basename', 'ntpath.basename', (['xyzPaths[i]'], {}), '(xyzPaths[i])\n', (1282, 1295), False, 'import sys, ntpath, glob\n'), ((1395, 1431), 'numpy.load', 'np.load', (["self.data.ix[i, 'xyzPaths']"], {}), "(self.data.ix[i, 'xyzPaths'])\n", (1402, 1431), True, 'import numpy as np\n'), ((2491, 2536), 'numpy.array', 'np.array', (['[[self.xmin, self.ymin, self.zmin]]'], {}), '([[self.xmin, self.ymin, self.zmin]])\n', (2499, 2536), True, 'import numpy as np\n'), ((6398, 6410), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (6405, 6410), True, 'import numpy as np\n'), ((1339, 1374), 'datetime.datetime.strptime', 'dt.datetime.strptime', (['fname', 'xyzFmt'], {}), '(fname, xyzFmt)\n', (1359, 1374), True, 'import datetime as dt\n'), ((1523, 1546), 'numpy.nanmin', 'np.nanmin', (['sw_xyz[:, 0]'], {}), '(sw_xyz[:, 0])\n', (1532, 1546), True, 'import numpy as np\n'), ((1590, 1613), 'numpy.nanmax', 'np.nanmax', (['sw_xyz[:, 0]'], {}), '(sw_xyz[:, 0])\n', (1599, 1613), True, 'import numpy as np\n'), ((1657, 1680), 'numpy.nanmin', 'np.nanmin', (['sw_xyz[:, 1]'], {}), '(sw_xyz[:, 1])\n', (1666, 1680), True, 'import numpy as np\n'), ((1724, 1747), 'numpy.nanmax', 'np.nanmax', (['sw_xyz[:, 1]'], {}), '(sw_xyz[:, 1])\n', (1733, 1747), True, 'import numpy as np\n'), ((1791, 1814), 'numpy.nanmin', 'np.nanmin', (['sw_xyz[:, 2]'], {}), '(sw_xyz[:, 2])\n', (1800, 1814), True, 'import numpy as np\n'), ((1858, 1881), 'numpy.nanmax', 'np.nanmax', (['sw_xyz[:, 2]'], {}), '(sw_xyz[:, 2])\n', (1867, 1881), True, 'import numpy as np\n'), ((2022, 2076), 'datetime.datetime.strftime', 'dt.datetime.strftime', (["self.data.ix[i, 'time']", 'rastFmt'], {}), "(self.data.ix[i, 'time'], rastFmt)\n", (2042, 2076), True, 'import datetime as dt\n'), ((2869, 2882), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (2875, 2882), True, 'import numpy as np\n'), ((2889, 2902), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2895, 2902), True, 'import numpy as np\n'), ((2930, 2943), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2936, 2943), True, 'import numpy as np\n'), ((2950, 2963), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (2956, 2963), True, 'import numpy as np\n'), ((3818, 3839), 'numpy.ones', 'np.ones', (['xyz.shape[0]'], {}), '(xyz.shape[0])\n', (3825, 3839), True, 'import numpy as np\n'), ((7279, 7338), 'datetime.datetime.strftime', 'dt.datetime.strftime', (["self.data.ix[i, 'time']", 'self.rastFmt'], {}), "(self.data.ix[i, 'time'], self.rastFmt)\n", (7299, 7338), True, 'import datetime as dt\n'), ((7418, 7440), 'numpy.save', 'np.save', (['outfile', 'rast'], {}), '(outfile, rast)\n', (7425, 7440), True, 'import numpy as np\n'), ((6876, 6912), 'numpy.load', 'np.load', (["self.data.ix[i, 'xyzPaths']"], {}), "(self.data.ix[i, 'xyzPaths'])\n", (6883, 6912), True, 'import numpy as np\n'), ((7513, 7530), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (7528, 7530), True, 'import datetime as dt\n')] |
#!/usr/bin/env python
"""Script to generate melody annotations from pitch annotations files"""
from __future__ import print_function
import argparse
import os
import csv
import numpy as np
import medleydb
HOP = 256.0 # samples
FS = 44100.0 # samples/second
def get_time_stamps(total_duration, fs, hop):
"""Get an array of evenly spaced time stamps.
Parameters
----------
total_duration : float
Duration in seconds
fs : float
Sample rate (samples/second)
hop : float
Hop size (samples)
Returns
-------
time_stamps : np.array
Array of evenly spaced time stamps.
"""
time_stamps = []
n_stamps = int(np.ceil((total_duration * fs) / hop))
time_stamps = np.arange(n_stamps) * (hop / fs)
return time_stamps
def make_blank_melody_sequence(total_duration, fs, hop):
"""Make a melody sequence with f0 values equal to 0.
Parameters
----------
total_duration : float
Duration in seconds
fs : float
Sample rate (samples/second)
hop : float
Hop size (samples)
Returns
-------
melody_sequence : np.array
A melody sequence with a column of time stamps and a column of zeros
"""
time_stamps = get_time_stamps(total_duration, fs, hop)
melody_sequence = np.zeros((len(time_stamps), 2))
for i, time_stamp in enumerate(time_stamps):
melody_sequence[i][0] = time_stamp
return melody_sequence
def sec_to_idx(time_in_seconds, fs, hop):
"""Convert an array of times in seconds to the nearest evenly spaced indices
Parameters
----------
time_in_seconds : array-like
Time values in seconds
fs : float
Sample rate (samples/second)
hop : float
Hop size (samples)
Returns
-------
time_idx : array-like
Indices of closest time values
"""
return int(np.round(time_in_seconds * float(fs) / float(hop)))
def add_sequence_to_melody(total_duration, f0_sequence, melody_sequence, fs,
hop, dim=1, start_t=0, end_t=-1):
"""Add an f0 sequence to a melody.
Parameters
----------
total_duration : float
Track duration in seconds
f0_sequence : list or None
List of time (seconds), frequency (Hz) pairs
melody_sequence : np.array
Melody conainer. First column is time stamps
fs : float
Sample rate (samples/second)
hop : float
Hop size (samples)
dim : int, default=1
Column index to add melody sequence to
start_t : float, default=0
Start time of melody sequence
end_t : float, default=-1
End time of melody sequence. -1 defaults to track_duration
Returns
-------
melody_sequence : np.array
Sequence of melody values with newly added values
"""
if start_t < 0:
start_t = 0
if end_t > total_duration:
end_t = total_duration
start_idx = sec_to_idx(start_t, fs, hop)
if end_t == -1:
end_idx = sec_to_idx(total_duration, fs, hop)
else:
end_idx = sec_to_idx(end_t, fs, hop)
if f0_sequence:
for time_freq in f0_sequence:
time = time_freq[0]
freq = time_freq[1]
time_idx = sec_to_idx(time, fs, hop)
if time_idx >= start_idx and time_idx < end_idx:
melody_sequence[time_idx][dim] = freq
return melody_sequence
def create_melody1_annotation(mtrack, fs=FS, hop=HOP):
"""Create a melody1 annotation from pitch annotations
Parameters
----------
mtrack : Multitrack
Multitrack object
fs : float
Sample rate (samples/second)
hop : float
Hop size (samples)
Returns
-------
melody1 : np.array or None
Melody 1 annotation if a predominant stem exists, else None
"""
predominant_stem = mtrack.predominant_stem
if predominant_stem is not None:
f0_annotation = predominant_stem.pitch_annotation
print(f0_annotation)
melody_sequence = make_blank_melody_sequence(mtrack.duration, fs, hop)
melody1 = add_sequence_to_melody(
mtrack.duration, f0_annotation, melody_sequence, fs, hop
)
else:
melody1 = None
return melody1
def create_melody2_annotation(mtrack, fs=FS, hop=HOP):
"""Create a melody2 annotation from pitch annotations
Parameters
----------
mtrack : Multitrack
Multitrack object
fs : float
Sample rate (samples/second)
hop : float
Hop size (samples)
Returns
-------
melody2 : np.array or None
Melody 2 annotation if an intervals file exists, else None
"""
if os.path.exists(mtrack.melody_intervals_fpath):
intervals = []
with open(mtrack.melody_intervals_fpath, 'rU') as fhandle:
linereader = csv.reader(fhandle, delimiter='\t')
for line in linereader:
start_t = float(line[0])
end_t = float(line[1])
stem_idx = int(line[2])
intervals.append([stem_idx, start_t, end_t])
melody_sequence = make_blank_melody_sequence(mtrack.duration, fs, hop)
for interval in intervals:
stem = mtrack.stems[interval[0]]
start_t = interval[1]
end_t = interval[2]
f0_annotation = stem.pitch_annotation
if f0_annotation is not None:
melody_sequence = add_sequence_to_melody(
mtrack.duration, f0_annotation, melody_sequence, fs, hop,
start_t=start_t, end_t=end_t
)
else:
print("Warning: stem %s has no annotation" % interval[0])
melody2 = melody_sequence
else:
melody2 = None
return melody2
def create_melody3_annotation(mtrack, fs=FS, hop=HOP):
"""Create a melody3 annotation from pitch annotations
Parameters
----------
mtrack : Multitrack
Multitrack object
fs : float
Sample rate (samples/second)
hop : float
Hop size (samples)
Returns
-------
melody3 : np.array or None
Melody 3 annotation if a rankings file exists, else None
"""
melody_rankings = mtrack.melody_rankings
if melody_rankings != {}:
inverse_rankings = dict((v, k) for k, v in melody_rankings.items())
melody_sequence = make_blank_melody_sequence(mtrack.duration, fs, hop)
dim = 1
first = True
for k in sorted(inverse_rankings.keys()):
if not first:
n, m = melody_sequence.shape
temp_mel = np.zeros((n, m + 1))
temp_mel[:, :-1] = melody_sequence
melody_sequence = temp_mel
dim += 1
first = False
stem = mtrack.stems[inverse_rankings[k]]
f0_annotation = stem.pitch_annotation
melody_sequence = add_sequence_to_melody(
mtrack.duration, f0_annotation, melody_sequence,
fs, hop, dim=dim
)
melody3 = melody_sequence
else:
melody3 = None
return melody3
def write_melodies_to_csv(mtrack, melody1, melody2, melody3):
"""Write melodies to csv files in the correct directory.
Parameters
----------
mtrack : Multitrack
Multitrack object
melody1 : np.array
Melody 1 annotation
melody2 : np.array
Melody 2 annotation
melody3 : np.array
Melody 3 annotation
"""
if melody1 is not None:
print("writing melody 1...")
with open(mtrack.melody1_fpath, "w") as fhandle:
writer = csv.writer(fhandle)
writer.writerows(melody1)
else:
print("melody 1 empty")
if melody2 is not None:
print("writing melody 2...")
with open(mtrack.melody2_fpath, "w") as fhandle:
writer = csv.writer(fhandle)
writer.writerows(melody2)
else:
print("melody 2 empty")
if melody3 is not None:
print("writing melody 3...")
with open(mtrack.melody3_fpath, "w") as fhandle:
writer = csv.writer(fhandle)
writer.writerows(melody3)
else:
print("melody 3 empty")
def main(args):
"""Main function to create melody annotations for a multitrack.
"""
mtrack = medleydb.MultiTrack(args.track_id)
melody1 = create_melody1_annotation(mtrack)
melody2 = create_melody2_annotation(mtrack)
melody3 = create_melody3_annotation(mtrack)
if args.write_output:
write_melodies_to_csv(mtrack, melody1, melody2, melody3)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="")
parser.add_argument("track_id",
type=str,
help="MedleyDB track id. Ex. MusicDelta_Rock")
parser.add_argument("write_output",
type=bool,
default=True,
help="If true, write the output to a file")
main(parser.parse_args())
| [
"os.path.exists",
"numpy.ceil",
"medleydb.MultiTrack",
"argparse.ArgumentParser",
"csv.writer",
"numpy.zeros",
"csv.reader",
"numpy.arange"
] | [((4718, 4763), 'os.path.exists', 'os.path.exists', (['mtrack.melody_intervals_fpath'], {}), '(mtrack.melody_intervals_fpath)\n', (4732, 4763), False, 'import os\n'), ((8408, 8442), 'medleydb.MultiTrack', 'medleydb.MultiTrack', (['args.track_id'], {}), '(args.track_id)\n', (8427, 8442), False, 'import medleydb\n'), ((8720, 8759), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '""""""'}), "(description='')\n", (8743, 8759), False, 'import argparse\n'), ((687, 721), 'numpy.ceil', 'np.ceil', (['(total_duration * fs / hop)'], {}), '(total_duration * fs / hop)\n', (694, 721), True, 'import numpy as np\n'), ((743, 762), 'numpy.arange', 'np.arange', (['n_stamps'], {}), '(n_stamps)\n', (752, 762), True, 'import numpy as np\n'), ((4881, 4916), 'csv.reader', 'csv.reader', (['fhandle'], {'delimiter': '"""\t"""'}), "(fhandle, delimiter='\\t')\n", (4891, 4916), False, 'import csv\n'), ((7713, 7732), 'csv.writer', 'csv.writer', (['fhandle'], {}), '(fhandle)\n', (7723, 7732), False, 'import csv\n'), ((7957, 7976), 'csv.writer', 'csv.writer', (['fhandle'], {}), '(fhandle)\n', (7967, 7976), False, 'import csv\n'), ((8201, 8220), 'csv.writer', 'csv.writer', (['fhandle'], {}), '(fhandle)\n', (8211, 8220), False, 'import csv\n'), ((6678, 6698), 'numpy.zeros', 'np.zeros', (['(n, m + 1)'], {}), '((n, m + 1))\n', (6686, 6698), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""hicstraw.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1V4CRdM_hOt4KcM7jBFWjQ6NQyWxECim3
# 1. Setup and Metadata
"""
import numpy as np
import plotly.graph_objs as go
from plotly.offline import iplot
"""
Straw module
Straw enables programmatic access to .hic files.
.hic files store the contact matrices from Hi-C experiments and the
normalization and expected vectors, along with meta-data in the header.
Usage: strawObj = straw <hicFile(s)>
matrixObj = strawObj.getNormalizedMatrix <chr1> <chr2> <NONE/VC/VC_SQRT/KR> <BP/FRAG> <binsize>
data = matrixObj.getDataFromBinRegion <x1,x2,y1,y2>
Example:
import straw
strawObj = straw(filename)
matrixObj = strawObj.getNormalizedMatrix('5', '5', 'KR', 'BP', 5000)
result = matrixObj.getDataFromBinRegion(0,500,0,500)
for i in range(len(result[0])):
... print("{0}\t{1}\t{2}".format(result[0][i], result[1][i], result[2][i]))
See https://github.com/theaidenlab/straw/wiki/Python for more documentation
"""
from __future__ import absolute_import, division, print_function, unicode_literals
__author__ = "<NAME>, <NAME>, <NAME>, <NAME>, <NAME>"
__license__ = "MIT"
import struct
import zlib
import requests
import io
import concurrent.futures
import math
def __readcstr(f):
""" Helper function for reading in C-style string from file
"""
buf = b""
while True:
b = f.read(1)
if b is None or b == b"\0":
return buf.decode("utf-8")
elif b == "":
raise EOFError("Buffer unexpectedly empty while trying to read null-terminated string")
else:
buf += b
"""
functions for chrom.sizes
internal representation is a dictionary with
chromosome name as the key
value maps to a tuple containing the index and chromosome length
"""
class ChromDotSizes:
def __init__(self, data):
self.data = data
def getLength(self, chrom):
try:
return int(self.data[chrom][1])
except:
raise ValueError(str(chrom) + " not in chrom.sizes. Check that the chromosome name matches the genome.\n")
def getIndex(self, chrom):
try:
return int(self.data[chrom][0])
except:
raise ValueError(str(chrom) + " not in chrom.sizes. Check that the chromosome name matches the genome.\n")
def figureOutEndpoints(self, chrAndPositions):
chrAndPositionsArray = chrAndPositions.split(":")
chrom = chrAndPositionsArray[0]
indx1 = 0
indx2 = self.getLength(chrom)
if len(chrAndPositionsArray) == 3:
indx1 = int(chrAndPositionsArray[1])
indx2 = int(chrAndPositionsArray[2])
return chrom, indx1, indx2
def read_metadata(infile,verbose=False):
"""
Reads the metadata of HiC file from header.
Args
infile: str, path to the HiC file
verbose: bool
Returns
metadata: dict, containing the metadata.
Keys of the metadata:
HiC version,
Master index,
Genome ID (str),
Attribute dictionary (dict),
Chromosomes (dict),
Base pair-delimited resolutions (list),
Fragment-delimited resolutions (list).
"""
metadata={}
import io
import struct
if (infile.startswith("http")):
# try URL first. 100K should be sufficient for header
headers={'range' : 'bytes=0-100000', 'x-amz-meta-requester' : 'straw'}
s = requests.Session()
r=s.get(infile, headers=headers)
if (r.status_code >=400):
print("Error accessing " + infile)
print("HTTP status code " + str(r.status_code))
sys.exit(1)
req=io.BytesIO(r.content)
myrange=r.headers['content-range'].split('/')
totalbytes=myrange[1]
else:
req=open(infile, 'rb')
magic_string = struct.unpack('<3s', req.read(3))[0]
req.read(1)
if (magic_string != b"HIC"):
sys.exit('This does not appear to be a HiC file magic string is incorrect')
version = struct.unpack('<i',req.read(4))[0]
metadata['HiC version']=version
masterindex = struct.unpack('<q',req.read(8))[0]
metadata['Master index']=masterindex
genome = ""
c=req.read(1).decode("utf-8")
while (c != '\0'):
genome += c
c=req.read(1).decode("utf-8")
metadata['Genome ID']=genome
if (version > 8):
nvi = struct.unpack('<q',req.read(8))[0]
nvisize = struct.unpack('<q',req.read(8))[0]
metadata['NVI'] = nvi
metadata['NVI size'] = nvisize
## read and throw away attribute dictionary (stats+graphs)
nattributes = struct.unpack('<i',req.read(4))[0]
d={}
for x in range(0, nattributes):
key = __readcstr(req)
value = __readcstr(req)
d[key]=value
metadata['Attribute dictionary']=d
nChrs = struct.unpack('<i',req.read(4))[0]
d={}
for x in range(0, nChrs):
key = __readcstr(req)
if (version > 8):
value = struct.unpack('q',req.read(8))[0]
else:
value = struct.unpack('<i',req.read(4))[0]
d[key]=value
metadata["Chromosomes"]=d
nBpRes = struct.unpack('<i',req.read(4))[0]
l=[]
for x in range(0, nBpRes):
res = struct.unpack('<i',req.read(4))[0]
l.append(res)
metadata["Base pair-delimited resolutions"]=l
nFrag = struct.unpack('<i',req.read(4))[0]
l=[]
for x in range(0, nFrag):
res = struct.unpack('<i',req.read(4))[0]
l.append(res)
metadata["Fragment-delimited resolutions"]=l
for k in metadata:
if k!='Attribute dictionary':
print(k,':',metadata[k])
if verbose:
print('Attribute dictionary',':',metadata['Attribute dictionary'])
return metadata
# Downloads Hi-C file
#!wget -cq https://www.dropbox.com/s/t3d3kmoerm54dlr/GSM1551620_HIC071.hic
# Define files and HiC read settings
path = '/content/'
hicname = 'GSM1551620_HIC071.hic'
hic_metadata = read_metadata(hicname)
chromosomes = list(hic_metadata["Chromosomes"].keys())
base_resolutions = hic_metadata["Base pair-delimited resolutions"]
fragment_resolutions = hic_metadata["Fragment-delimited resolutions"]
"""# 2. Hi-C Data Processing"""
# Commented out IPython magic to ensure Python compatibility.
# %%capture
# !pip install hic-straw
# import straw
# Saves all self-contact data in Hi-C file
def self_chromosome_interactions(chromosomes, hicname, res):
genome = {}
for i in chromosomes:
# Adds chromosome interaction to genome dictionary,
# interactions stored as list of contactRecord objects
try:
genome[i] = straw.straw('observed','KR', hicname, i, i, 'BP', res)
except:
print(f"Interaction data for chromosome {i} not found")
return genome
# Converts chromosome interactions from contactRecords into a list
def contactRecords_to_list(records):
table = np.zeros((len(records), 3))
index = 0
for contact in records:
table[index][0] = int(contact.binX)
table[index][1] = int(contact.binY)
table[index][2] = contact.counts
index += 1
return table
# Converts data in resolution units into particle numbers
def res_correct(table, res):
table[:, 0] //= res
table[:, 1] //= res
table[:, 0] += 1
table[:, 1] += 1
return table
# Keeps entries where contact counts are higher than user-given threshold
# and gets rid of atom self contacts
def lammps_correct(table, threshold):
table = table[table[:,2] > threshold]
table = table[table[:,0] != table[:,1]]
return table
res = 250000
threshold = 3.3
#genome = self_chromosome_interactions(chromosomes, hicname, res)
genome = {}
for i in chromosomes:
# Adds chromosome interaction to genome dictionary,
# interactions stored as list of contactRecord objects
try:
genome[i] = straw.straw('observed','KR', hicname, i, i, 'BP', res)
except:
print(f"Interaction data for chromosome {i} not found")
x_correct = lammps_correct(res_correct(contactRecords_to_list(genome["X"]), res), threshold)
"""# 3. LAMMPS Input & Data File Construction"""
import time
# function to plot 3D structure
def plot_3D(coords):
x = np.zeros(len(coords))
y = np.zeros(len(coords))
z = np.zeros(len(coords))
for i in range(len(coords)):
x[i] = coords[i][0]
y[i] = coords[i][1]
z[i] = coords[i][2]
trace = go.Scatter3d(
x = x, y = y, z = z, mode = 'lines+markers', marker = dict(
size = 5,
colorscale = 'Viridis'
)
)
layout = go.Layout(title = f'Initial Random Structure')
fig = go.Figure(data = [trace], layout = layout)
iplot(fig)
return None
# function to check if next site is already occupied
def check_if_free(lattice_coords, next_coords, index):
for i in range(index):
if lattice_coords[i][0] == next_coords[0] and lattice_coords[i][1] == next_coords[1] \
and lattice_coords[i][2] == next_coords[2]:
return False
return True
# function to create random 3D walk on lattice
def random_walk(n):
backtrack = 10
lattice_coords = np.zeros([n, 3])
steps = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0],
[-1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, -1.0]])
i = 1
while i < n:
issue = False
s = time.time()
#if i % 100 == 0:
#print(i, s-start, 's')
rand = np.random.randint(0, 6)
next_coords = lattice_coords[i - 1] + steps[rand]
while check_if_free(lattice_coords, next_coords, i) == False:
rand = np.random.randint(0, 6)
next_coords = lattice_coords[i-1] + steps[rand]
e = time.time()
if e - s > 0.1:
issue = True
#print('Stuck! Go back and find a new way... %s' % i)
for k in range(1, backtrack + 1):
lattice_coords[i-k] = np.zeros(3)
i -= backtrack + 1
break
if issue == False:
lattice_coords[i] = next_coords
i += 1
return lattice_coords
# function to create molecule tags
def create_molecule_tags(n, lengths):
tags = []
tag = 1
cumlength = np.cumsum(lengths)
for i in range(1, n+1):
if i - 1 in cumlength:
tag += 1
tags.append(tag)
return tags
# function to create bonds
def create_bonds(n, lengths):
bonds = []
cumlength = np.cumsum(lengths)
for i in range(1, n):
if i not in cumlength:
bonds.append([i, i+1])
return bonds
# function to create angles
def create_angles(n, lengths):
angles = []
cumlength = np.cumsum(lengths)
for i in range(1, n-1):
if (i not in cumlength) and (i+1 not in cumlength):
angles.append([i, i + 1, i + 2])
return angles
# function to create data file
def create_datafile(n, lengths, spacing, lattice_numbers, dimensions):
chains = int(len(lengths)) # number of chains
bond_number = int(sum(lengths) - chains) # number of bonds
angle_number = 0
for length in lengths:
if length > 2.0:
angle_number += int(length - 2) # number of bond angles
lattice_coords = random_walk(n) * spacing # coordinates of lattice points
tags = create_molecule_tags(n, lengths) # molecule tags
bonds = create_bonds(n, lengths) # indicates bonds between particles
angles = create_angles(n, lengths) # indicates angles between particles
# open datafile to write to
datafile = open(f'random_coil_N{n}.dat', 'w')
datafile.write(f'LAMMPS data file for random 3D walk on lattice: N = {n}, Chain length = {length}\n\n')
datafile.write(f'{n} atoms\n1 atom types\n{bond_number} bonds\n2 bond types\n1000 extra bond per atom\n{angle_number} angles\n1 angle types\n\n')
datafile.write(f'{-dimensions[0] / 2} {dimensions[0] / 2} xlo xhi\n{-dimensions[1] / 2} {dimensions[1] / 2} ylo yhi\n{-dimensions[2] / 2} {dimensions[2] / 2} zlo zhi\n\n')
datafile.write('Masses\n\n1 1\n\nAtoms\n')
for i in range(n):
datafile.write(f'\n{i + 1}\t{tags[i]}\t1\t{lattice_coords[i][0]}\t{lattice_coords[i][1]}\t{lattice_coords[i][2]}\t0\t0\t0')
if bond_number > 0:
datafile.write('\n\nBonds\n')
for i in range(len(bonds)):
datafile.write(f'\n{i + 1}\t1\t{bonds[i][0]}\t{bonds[i][1]}')
if angle_number > 0:
datafile.write('\n\nAngles\n')
for i in range(len(angles)):
datafile.write(f'\n{i + 1}\t1\t{angles[i][0]}\t{angles[i][1]}\t{angles[i][2]}')
datafile.close()
plot_3D(lattice_coords)
return None
# function to create input file
def create_inputfile(n, timesteps, bondconnect):
# opens input file to write to
datafile = open('in.input', 'w')
dataname = f'random_coil_N{n}.dat' # data file name
lang = np.random.randint(1,1000000) # generates noise term for langevin
datafile.write('log sim.log\nunits lj\n\n')
datafile.write('atom_style angle\nboundary p p p\n\n')
datafile.write('neighbor 4 bin\nneigh_modify every 1 delay 1 check yes\n\n')
datafile.write('atom_modify sort 0 0\n\n')
datafile.write(f'#restart 1000000 N{n}.restart\n\n')
datafile.write(f'read_data {dataname}\n')
datafile.write('reset_timestep 0\n\n')
datafile.write(f'write_data equilibrated_N{n}.dat\n\n')
datafile.write('group all type 1\n\n')
datafile.write('dump 1 all custom 1000 sim.dump id x y z ix iy iz\n')
datafile.write('dump_modify 1 format line "%d %.5f %.5f %.5f %d %d %d"\n\n')
datafile.write('angle_style cosine\nangle_coeff 1 0.0\n\n')
datafile.write('pair_style lj/cut 1.12246152962189\n')
datafile.write('pair_modify shift yes\n')
datafile.write('pair_coeff * * 1.0 1.0\n\n')
datafile.write('bond_style hybrid harmonic fene\n')
datafile.write('bond_coeff 1 fene 30.0 10 1.0 1.0\n')
datafile.write('bond_coeff 2 harmonic 1.0 2.2\n')
datafile.write('special_bonds fene\n\n')
datafile.write('fix 1 all nve\n')
datafile.write(f'fix 2 all langevin 1.0 1.0 1.0 {lang}\n\n')
datafile.write('thermo 50000\n\n')
for i in range(len(bondconnect)):
datafile.write(f'create_bonds single/bond 2 {int(bondconnect[i][0])} {int(bondconnect[i][1])} special yes\n')
datafile.write('thermo_style custom step temp etotal epair emol press pxx pyy pzz lx ly lz pe ke ebond evdwl\n\n')
datafile.write('timestep 0.00001\n')
datafile.write(f'run {timesteps}')
# Defining LAMMPS properties
n = int(max(x_correct.max(axis = 0)[0], x_correct.max(axis = 0)[1])) # total number of particles
lengths = [n] # length of chains
spacing = 3.0 # lattice spacing
lattice_numbers = np.array([200, 200, 200])
dimensions = lattice_numbers * 2 # dimensions of box
timesteps = 1000000
# Creating input and data files for simulation
start = time.time()
create_datafile(n, lengths, spacing, lattice_numbers, dimensions)
create_inputfile(n, timesteps, x_correct)
end = time.time()
"""# 4. LAMMPS Simulation"""
# Commented out IPython magic to ensure Python compatibility.
# %%capture
# # Creates LAMMPS executable
# !wget https://github.com/lmhale99/atomman-demo/raw/main/lmp.gz
# !gunzip lmp.gz
# !chmod 755 lmp
#
# import subprocess
# import shlex
# Runs LAMMPS simulation based on input file
def run_lammps(infile):
command = shlex.split(f'./lmp -in {infile}')
subprocess.run(command, check=True, capture_output=False, text=True)
# Reads dump file and extracts coordinate data
def readdump(file):
f = open(file).read().splitlines()
dump = {}
#finds coordinates using "ITEM:"
#creates a dictionary with timestep and coordinates
i = 0
while i < len(f):
timestep = int(f[f.index("ITEM: TIMESTEP", i) + 1].strip())
begind = f.index("ITEM: ATOMS id x y z ix iy iz", i)
try:
endind = f.index("ITEM: TIMESTEP", i+1)
except ValueError:
endind = len(f)
coords = f[begind + 1:endind]
data = np.zeros((len(coords), 7))
index = 0
for j in coords:
line = j.split()
data[index][0] = int(line[0])
data[index][1] = float(line[1])
data[index][2] = float(line[2])
data[index][3] = float(line[3])
data[index][4] = float(line[4])
data[index][5] = float(line[5])
data[index][6] = float(line[6])
index += 1
dump[timestep] = data
i = endind
return dump
# Displays simulation at timestep
def dumpplot(dump, timestep):
trace = go.Scatter3d(
x = dump[timestep][:,1], y = dump[timestep][:,2], z = dump[timestep][:,3], mode = 'lines+markers', marker = dict(
size = 5,
color = dump[timestep][:,0],
colorscale = 'Viridis'
)
)
layout = go.Layout(title = f'Simulation at timestep {timestep}')
fig = go.Figure(data = [trace], layout = layout)
iplot(fig)
run_lammps("in.input")
dump = readdump("sim.dump")
dumpplot(dump, 100000)
"""# 5. Analysis"""
#!wget -cq https://www.dropbox.com/s/nkbuklgq2c9ewvw/nWTXa
from scipy.stats import pearsonr
from scipy.spatial.distance import pdist, squareform
#Finds all contacts
def find_contacts(data, timestep, dist):
coords = data[timestep][:,1:4]
IDs = data[timestep][:,0]
distances = np.triu(squareform(pdist(coords)))
contact_IDarrays = np.where((distances<dist) & (distances>0))
C = np.ma.size(contact_IDarrays[0])
contact_IDs = np.hstack((np.reshape(np.take(IDs,contact_IDarrays[0]), (C,1)), np.reshape(np.take(IDs,contact_IDarrays[1]), (C,1))))
contacts = np.hstack((np.reshape(contact_IDs[:,0], (C,1)), np.reshape(contact_IDs[:,1], (C,1)), distances[np.reshape(contact_IDarrays[0], (C,1)), np.reshape(contact_IDarrays[1], (C,1))]))
return contacts
def find_nwtxa_contacts(file, dist):
rawdata = open(file).read().splitlines()
table = np.zeros((len(rawdata), 3))
index = 0
for i in rawdata:
line = i.split()
if(float(line[0]) > float(line[1]) and line[2] != 'NA' and float(line[2]) > dist):
table[index][0] = int(line[0])
table[index][1] = int(line[1])
table[index][2] = float(line[2])
index += 1
return table[0:index]
def plotcontactmap(contacts):
fig = go.Figure(data=[go.Scatter(
x = contacts[:,0],
y = contacts[:,1],
mode = 'markers',
marker=dict(
opacity=0.9,
color='Black',
size=1,
))
])
fig.update_layout(
width = 600,
height = 600,
title = "Contact Map"
)
fig.update_yaxes(
scaleanchor = "x",
scaleratio = 1,
)
fig.show()
def combine(mapone, maptwo):
onemax = int(max(mapone.max(axis = 0)[0], mapone.max(axis = 0)[1]))
twomax = int(max(maptwo.max(axis = 0)[0], maptwo.max(axis = 0)[1]))
maptwo[:, 0] *= onemax/twomax
maptwo[:, 1] *= onemax/twomax
combined = np.vstack((mapone, maptwo))
return combined
contacts = find_contacts(dump, 100000, 3.3)
nwtxa = find_nwtxa_contacts("nWTXa", 5)
combined = combine(nwtxa, contacts)
plotcontactmap(combined) | [
"numpy.reshape",
"plotly.offline.iplot",
"requests.Session",
"numpy.where",
"scipy.spatial.distance.pdist",
"io.BytesIO",
"numpy.ma.size",
"numpy.take",
"numpy.array",
"numpy.zeros",
"numpy.random.randint",
"numpy.cumsum",
"numpy.vstack",
"plotly.graph_objs.Layout",
"plotly.graph_objs.Fi... | [((14996, 15021), 'numpy.array', 'np.array', (['[200, 200, 200]'], {}), '([200, 200, 200])\n', (15004, 15021), True, 'import numpy as np\n'), ((15152, 15163), 'time.time', 'time.time', ([], {}), '()\n', (15161, 15163), False, 'import time\n'), ((15278, 15289), 'time.time', 'time.time', ([], {}), '()\n', (15287, 15289), False, 'import time\n'), ((8745, 8789), 'plotly.graph_objs.Layout', 'go.Layout', ([], {'title': 'f"""Initial Random Structure"""'}), "(title=f'Initial Random Structure')\n", (8754, 8789), True, 'import plotly.graph_objs as go\n'), ((8802, 8840), 'plotly.graph_objs.Figure', 'go.Figure', ([], {'data': '[trace]', 'layout': 'layout'}), '(data=[trace], layout=layout)\n', (8811, 8840), True, 'import plotly.graph_objs as go\n'), ((8849, 8859), 'plotly.offline.iplot', 'iplot', (['fig'], {}), '(fig)\n', (8854, 8859), False, 'from plotly.offline import iplot\n'), ((9316, 9332), 'numpy.zeros', 'np.zeros', (['[n, 3]'], {}), '([n, 3])\n', (9324, 9332), True, 'import numpy as np\n'), ((9345, 9465), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [-1.0, 0.0, 0.0], [0.0,\n -1.0, 0.0], [0.0, 0.0, -1.0]]'], {}), '([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [-1.0, 0.0, \n 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, -1.0]])\n', (9353, 9465), True, 'import numpy as np\n'), ((10436, 10454), 'numpy.cumsum', 'np.cumsum', (['lengths'], {}), '(lengths)\n', (10445, 10454), True, 'import numpy as np\n'), ((10666, 10684), 'numpy.cumsum', 'np.cumsum', (['lengths'], {}), '(lengths)\n', (10675, 10684), True, 'import numpy as np\n'), ((10887, 10905), 'numpy.cumsum', 'np.cumsum', (['lengths'], {}), '(lengths)\n', (10896, 10905), True, 'import numpy as np\n'), ((13080, 13109), 'numpy.random.randint', 'np.random.randint', (['(1)', '(1000000)'], {}), '(1, 1000000)\n', (13097, 13109), True, 'import numpy as np\n'), ((17007, 17060), 'plotly.graph_objs.Layout', 'go.Layout', ([], {'title': 'f"""Simulation at timestep {timestep}"""'}), "(title=f'Simulation at timestep {timestep}')\n", (17016, 17060), True, 'import plotly.graph_objs as go\n'), ((17071, 17109), 'plotly.graph_objs.Figure', 'go.Figure', ([], {'data': '[trace]', 'layout': 'layout'}), '(data=[trace], layout=layout)\n', (17080, 17109), True, 'import plotly.graph_objs as go\n'), ((17116, 17126), 'plotly.offline.iplot', 'iplot', (['fig'], {}), '(fig)\n', (17121, 17126), False, 'from plotly.offline import iplot\n'), ((17571, 17617), 'numpy.where', 'np.where', (['((distances < dist) & (distances > 0))'], {}), '((distances < dist) & (distances > 0))\n', (17579, 17617), True, 'import numpy as np\n'), ((17622, 17653), 'numpy.ma.size', 'np.ma.size', (['contact_IDarrays[0]'], {}), '(contact_IDarrays[0])\n', (17632, 17653), True, 'import numpy as np\n'), ((19096, 19123), 'numpy.vstack', 'np.vstack', (['(mapone, maptwo)'], {}), '((mapone, maptwo))\n', (19105, 19123), True, 'import numpy as np\n'), ((3598, 3616), 'requests.Session', 'requests.Session', ([], {}), '()\n', (3614, 3616), False, 'import requests\n'), ((3836, 3857), 'io.BytesIO', 'io.BytesIO', (['r.content'], {}), '(r.content)\n', (3846, 3857), False, 'import io\n'), ((9544, 9555), 'time.time', 'time.time', ([], {}), '()\n', (9553, 9555), False, 'import time\n'), ((9633, 9656), 'numpy.random.randint', 'np.random.randint', (['(0)', '(6)'], {}), '(0, 6)\n', (9650, 9656), True, 'import numpy as np\n'), ((9804, 9827), 'numpy.random.randint', 'np.random.randint', (['(0)', '(6)'], {}), '(0, 6)\n', (9821, 9827), True, 'import numpy as np\n'), ((9904, 9915), 'time.time', 'time.time', ([], {}), '()\n', (9913, 9915), False, 'import time\n'), ((17532, 17545), 'scipy.spatial.distance.pdist', 'pdist', (['coords'], {}), '(coords)\n', (17537, 17545), False, 'from scipy.spatial.distance import pdist, squareform\n'), ((17819, 17856), 'numpy.reshape', 'np.reshape', (['contact_IDs[:, 0]', '(C, 1)'], {}), '(contact_IDs[:, 0], (C, 1))\n', (17829, 17856), True, 'import numpy as np\n'), ((17856, 17893), 'numpy.reshape', 'np.reshape', (['contact_IDs[:, 1]', '(C, 1)'], {}), '(contact_IDs[:, 1], (C, 1))\n', (17866, 17893), True, 'import numpy as np\n'), ((17694, 17727), 'numpy.take', 'np.take', (['IDs', 'contact_IDarrays[0]'], {}), '(IDs, contact_IDarrays[0])\n', (17701, 17727), True, 'import numpy as np\n'), ((17747, 17780), 'numpy.take', 'np.take', (['IDs', 'contact_IDarrays[1]'], {}), '(IDs, contact_IDarrays[1])\n', (17754, 17780), True, 'import numpy as np\n'), ((10135, 10146), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (10143, 10146), True, 'import numpy as np\n'), ((17903, 17942), 'numpy.reshape', 'np.reshape', (['contact_IDarrays[0]', '(C, 1)'], {}), '(contact_IDarrays[0], (C, 1))\n', (17913, 17942), True, 'import numpy as np\n'), ((17943, 17982), 'numpy.reshape', 'np.reshape', (['contact_IDarrays[1]', '(C, 1)'], {}), '(contact_IDarrays[1], (C, 1))\n', (17953, 17982), True, 'import numpy as np\n')] |
import functools
import math
import os
import random
import time
import json
import sys
import inspect
import shutil
import torch
import infirunner.steppers
import infirunner.param
import numpy as np
from contextlib import contextmanager
from torch.utils.tensorboard import SummaryWriter
from infirunner.util import make_trial_id
DEBUG_MODE = 'debug'
TRAIN_MODE = 'train'
TURBO_MODE = 'turbo'
class RunnerCapsule:
def __init__(self, mode, turbo_index, exp_path, trial_id, budget_start, budget_end, start_params):
self.param = infirunner.param.ParamGenerator(self)
self.params = start_params or {}
self.turbo_index = turbo_index
self.initialized = False
self.exp_path = os.path.abspath(exp_path)
self.trial_id = trial_id
self._param_wrappers = {}
self._tb_writer = None
self.steps = 0
self.log_files = {}
self.get_model_state = None
self.get_metadata_state = None
self.stdout = self.stderr = self.orig_stdout = self.orig_stderr = None
self.mode = self.set_mode(mode)
self.prev_time = 0
self.start_time = time.time()
self.budget_current = self.budget_start = budget_start
self.budget_end = budget_end
self.metric = None
@property
def var_params(self):
res = {}
for k, v in self.param_gen.items():
if not isinstance(v, infirunner.param.ConstParam):
res[k] = self.params[k]
return res
@property
def param_gen(self):
return self._param_wrappers
@property
def save_path(self):
return os.path.join(self.exp_path, self.trial_id)
def is_leader(self):
return self.turbo_index == 0
def is_debug(self):
return self.mode == DEBUG_MODE
def is_train(self):
return self.mode == TRAIN_MODE
def is_turbo(self):
return self.mode == TURBO_MODE
def set_mode(self, mode):
assert mode in (DEBUG_MODE, TRAIN_MODE, TURBO_MODE)
self.mode = mode
return mode
def redirect_io(self):
os.makedirs(self.save_path, exist_ok=True)
stdout_file = os.path.join(self.save_path, f'std_out_{self.turbo_index}.log')
stderr_file = os.path.join(self.save_path, f'std_err_{self.turbo_index}.log')
self.orig_stdout = sys.stdout
self.orig_stderr = sys.stderr
self.stdout = sys.stdout = open(stdout_file, 'a', encoding='utf-8')
self.stderr = sys.stderr = open(stderr_file, 'a', encoding='utf-8')
def restore_io(self):
if self.stdout:
sys.stdout = self.orig_stdout
if self.stderr:
sys.stderr = self.orig_stderr
try:
self.stdout.close()
except:
pass
try:
self.stderr.close()
except:
pass
del self.stdout
del self.stderr
def save_sources(self, white_list=None):
if not self.is_leader():
return
if white_list is None:
frm = inspect.stack()[1]
mod = inspect.getmodule(frm[0])
if not mod:
raise AttributeError('save_sources needs to be called from a module!')
white_list = [mod.__name__.split('.')[0]]
source_dir = os.path.join(self.save_path, 'src')
if os.path.exists(source_dir):
shutil.rmtree(source_dir, ignore_errors=True)
mods = {k: m for k, m in sys.modules.items()
if any(k.startswith(p) for p in white_list)}
for k, m in mods.items():
try:
source = inspect.getsource(m)
except OSError:
continue
except TypeError:
continue
m_comps = k.split('.')
source_save_path = os.path.join(source_dir, *m_comps[:-1])
os.makedirs(source_save_path, exist_ok=True)
with open(os.path.join(source_save_path, m_comps[-1] + '.py'), 'w', encoding='utf-8') as f:
f.write(source)
def set_state_getter(self, model_state_getter, metadata_state_getter):
self.get_model_state = model_state_getter
self.get_metadata_state = metadata_state_getter
def serialize_state(self):
now = time.time()
return {
'budget_start': self.budget_start,
'budget_end': self.budget_end,
'budget_current': self.budget_current,
'save_path': self.save_path,
'trial_id': self.trial_id,
'prev_time': self.prev_time,
'start_time': self.start_time,
'cur_time': now,
'relative_time': self.prev_time + now - self.start_time,
'mode': self.mode,
'steps': self.steps,
'metric': self.metric,
'params': self.params,
'param_gens': self.serialize_param_gen()
}
def running_average(self, key):
return infirunner.steppers.RunningAverage(self, key)
def running_averages(self, *keys, prefix=''):
return infirunner.steppers.RunningAverageGroup(self, keys, prefix=prefix)
def step(self, size=1):
self.steps += size
def __del__(self):
for file in self.log_files.values():
try:
file.close()
except:
pass
def get_log_file_handle(self, key):
log_file = self.log_files.get(key)
if log_file is None:
os.makedirs(os.path.join(self.save_path, 'logs'), exist_ok=True)
log_file = open(os.path.join(self.save_path, 'logs', f'{key}.tsv'), 'a', encoding='utf-8')
return log_file
def log_scalar(self, key, value):
if not self.is_leader():
return
now = time.time()
rel_time = self.prev_time + now - self.start_time
self.get_log_file_handle(key).write(f'{self.steps}\t{now}\t{rel_time}\t{value}\n')
def log_scalars(self, key, values):
if not self.is_leader():
return
log_file = self.get_log_file_handle(key)
now = time.time()
rel_time = self.prev_time + now - self.start_time
for v in values:
log_file.write(f'{self.steps}\t{now}\t{rel_time}\t{v}\n')
def log_file(self, key, ext, data):
if not self.is_leader():
return
p = os.path.join(self.save_path, 'logs', key)
os.makedirs(p, exist_ok=True)
with open(os.path.join(p, f'{self.steps:015}.{ext}', 'wb')) as file:
file.write(data)
def serialize_param_gen(self):
ret = {}
for k, v in self._param_wrappers.items():
ret[k] = {'type': v.__class__.__name__,
'opts': v.serialize()}
return ret
def _guard_params(self):
if self.initialized:
raise RuntimeError('Cannot produce/set parameters after it is used')
def set_params(self, param_map):
self._guard_params()
self.params.update(param_map)
def report_metric(self, metric, save_state=True, consume_budget=1):
self.budget_current += consume_budget
self.metric = metric
with open(os.path.join(self.save_path, 'metric.tsv'), 'a', encoding='ascii') as f:
f.write(f'{self.budget_current}\t{time.time()}\t{metric}\n')
if save_state:
self.save_state()
if not math.isfinite(metric) or self.budget_current >= self.budget_end:
if self._tb_writer is not None:
self._tb_writer.close()
print('exit at metric', metric, 'budget', self.budget_current, file=sys.stderr)
sys.exit()
def save_state(self, empty_ok=False):
if self.turbo_index != 0:
return
out_dir = os.path.join(self.save_path, 'saves', f'{self.budget_current:05}')
os.makedirs(out_dir, exist_ok=True)
with open(os.path.join(out_dir, 'state.json'), 'w', encoding='utf-8') as f:
json.dump(self.serialize_state(), f, ensure_ascii=False, indent=2, allow_nan=True)
if self.get_metadata_state:
with open(os.path.join(out_dir, 'metadata.json'), 'w', encoding='utf-8') as f:
json.dump(self.get_metadata_state(), f, ensure_ascii=False, indent=2, allow_nan=True)
if not empty_ok:
assert self.get_model_state
if self.get_model_state:
torch.save(self.get_model_state(), os.path.join(out_dir, 'model.pt'))
def load_state(self, load_path, override_params=True):
if load_path is None:
return None, None
with open(os.path.join(load_path, 'state.json'), 'r', encoding='utf-8') as f:
meta = json.load(f)
self.steps = meta['steps']
self.prev_time = meta['relative_time']
print('loading saved states from', load_path, 'steps', self.steps, 'prev_time', self.prev_time)
if override_params:
self.params = meta['params']
try:
with open(os.path.join(load_path, 'metadata.json'), 'r', encoding='utf-8') as f:
metadata = json.load(f)
except FileNotFoundError:
metadata = None
try:
model_state = torch.load(os.path.join(load_path, 'model.pt'), map_location='cpu')
except FileNotFoundError:
model_state = None
return model_state, metadata
def get_tb_writer(self):
if not self.is_leader():
return None
if self._tb_writer is None:
self._tb_writer = SummaryWriter(log_dir=self.save_path)
return self._tb_writer
def gen_params(self, use_default=False, skip_const=False):
new_params = {}
for k, w in self._param_wrappers.items():
if skip_const and type(w) == infirunner.param.ConstParam:
continue
new_params[k] = w.default if use_default else w.get_next_value()
return new_params
def initialize(self):
if not self.initialized:
self.initialized = True
new_params = self.gen_params(use_default=self.mode == DEBUG_MODE, skip_const=False)
new_params.update(self.params)
self.params = new_params
@contextmanager
def deterministically_stochastic(self):
old_cuda_state, old_np_state, old_state, old_th_state = self.seed_random()
yield
if torch.cuda.is_available():
torch.cuda.set_rng_state(old_cuda_state)
torch.set_rng_state(old_th_state)
np.random.set_state(old_np_state)
random.setstate(old_state)
def seed_random(self):
old_state = random.getstate()
old_np_state = np.random.get_state()
old_th_state = torch.get_rng_state()
if torch.cuda.is_available():
old_cuda_state = torch.cuda.get_rng_state()
else:
old_cuda_state = None
seed = self.turbo_index + 1
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
return old_cuda_state, old_np_state, old_state, old_th_state
def load(self, load_budget=None, initialize=True):
if self.initialized:
raise RuntimeError('Cannot call load after initialization')
if load_budget is None:
load_budget = self.budget_start
if load_budget > 0:
load_path = os.path.join(self.save_path, 'saves', f'{load_budget:05}')
else:
load_path = None
if initialize:
self.initialize()
return self.load_state(load_path)
class DynamicStateGetter:
def __init__(self):
self.state_getters = set()
def add_state_getter(self, new_state_getter):
self.state_getters.add(new_state_getter)
def remove_state_getter(self, state_getter):
self.state_getters.remove(state_getter)
def __call__(self):
ret = {}
for state_getter in self.state_getters:
ret.update(state_getter())
return ret
active_capsule = None
def make_capsule():
exp_path = os.environ.get('INFR_EXP_PATH')
if exp_path is None:
exp_path = os.path.join(os.getcwd(), '_infirunner')
mode = os.environ.get('INFR_MODE')
assert mode is None or mode in (DEBUG_MODE, TRAIN_MODE, TURBO_MODE)
if mode is None:
mode = DEBUG_MODE
turbo_index = int(os.environ.get('INFR_TURBO_INDEX', '0'))
trial_id = os.environ.get('INFR_TRIAL')
if trial_id is None:
trial_id = make_trial_id()
start_state_str = os.environ.get('INFR_START_STATE')
if start_state_str:
with open(start_state_str, 'r', encoding='utf-8') as f:
start_obj = json.load(f)
budget_start = start_obj['start_budget']
budget_end = start_obj['end_budget']
start_params = start_obj['params']
else:
budget_start = 0
budget_end = sys.maxsize
start_params = {}
budget_str = os.environ.get('INFR_BUDGET')
if budget_str is not None:
budget_start, budget_end = budget_str.split(',')
budget_start = int(budget_start)
budget_end = int(budget_end)
_cap = RunnerCapsule(mode, turbo_index, exp_path, trial_id, budget_start, budget_end, start_params)
if os.environ.get('INFR_REDIRECT_IO'):
_cap.redirect_io()
global active_capsule
active_capsule = _cap
return _cap
def chain(*decorators):
def wrap_f(f):
for dec in reversed(decorators):
f = dec(f)
return f
return wrap_f
| [
"numpy.random.get_state",
"numpy.random.set_state",
"random.setstate",
"torch.cuda.is_available",
"torch.get_rng_state",
"sys.exit",
"os.path.exists",
"torch.utils.tensorboard.SummaryWriter",
"torch.cuda.get_rng_state",
"random.getstate",
"numpy.random.seed",
"infirunner.util.make_trial_id",
... | [((12159, 12190), 'os.environ.get', 'os.environ.get', (['"""INFR_EXP_PATH"""'], {}), "('INFR_EXP_PATH')\n", (12173, 12190), False, 'import os\n'), ((12288, 12315), 'os.environ.get', 'os.environ.get', (['"""INFR_MODE"""'], {}), "('INFR_MODE')\n", (12302, 12315), False, 'import os\n'), ((12513, 12541), 'os.environ.get', 'os.environ.get', (['"""INFR_TRIAL"""'], {}), "('INFR_TRIAL')\n", (12527, 12541), False, 'import os\n'), ((12624, 12658), 'os.environ.get', 'os.environ.get', (['"""INFR_START_STATE"""'], {}), "('INFR_START_STATE')\n", (12638, 12658), False, 'import os\n'), ((13044, 13073), 'os.environ.get', 'os.environ.get', (['"""INFR_BUDGET"""'], {}), "('INFR_BUDGET')\n", (13058, 13073), False, 'import os\n'), ((13351, 13385), 'os.environ.get', 'os.environ.get', (['"""INFR_REDIRECT_IO"""'], {}), "('INFR_REDIRECT_IO')\n", (13365, 13385), False, 'import os\n'), ((720, 745), 'os.path.abspath', 'os.path.abspath', (['exp_path'], {}), '(exp_path)\n', (735, 745), False, 'import os\n'), ((1142, 1153), 'time.time', 'time.time', ([], {}), '()\n', (1151, 1153), False, 'import time\n'), ((1636, 1678), 'os.path.join', 'os.path.join', (['self.exp_path', 'self.trial_id'], {}), '(self.exp_path, self.trial_id)\n', (1648, 1678), False, 'import os\n'), ((2106, 2148), 'os.makedirs', 'os.makedirs', (['self.save_path'], {'exist_ok': '(True)'}), '(self.save_path, exist_ok=True)\n', (2117, 2148), False, 'import os\n'), ((2171, 2234), 'os.path.join', 'os.path.join', (['self.save_path', 'f"""std_out_{self.turbo_index}.log"""'], {}), "(self.save_path, f'std_out_{self.turbo_index}.log')\n", (2183, 2234), False, 'import os\n'), ((2257, 2320), 'os.path.join', 'os.path.join', (['self.save_path', 'f"""std_err_{self.turbo_index}.log"""'], {}), "(self.save_path, f'std_err_{self.turbo_index}.log')\n", (2269, 2320), False, 'import os\n'), ((3310, 3345), 'os.path.join', 'os.path.join', (['self.save_path', '"""src"""'], {}), "(self.save_path, 'src')\n", (3322, 3345), False, 'import os\n'), ((3357, 3383), 'os.path.exists', 'os.path.exists', (['source_dir'], {}), '(source_dir)\n', (3371, 3383), False, 'import os\n'), ((4290, 4301), 'time.time', 'time.time', ([], {}), '()\n', (4299, 4301), False, 'import time\n'), ((5784, 5795), 'time.time', 'time.time', ([], {}), '()\n', (5793, 5795), False, 'import time\n'), ((6101, 6112), 'time.time', 'time.time', ([], {}), '()\n', (6110, 6112), False, 'import time\n'), ((6371, 6412), 'os.path.join', 'os.path.join', (['self.save_path', '"""logs"""', 'key'], {}), "(self.save_path, 'logs', key)\n", (6383, 6412), False, 'import os\n'), ((6421, 6450), 'os.makedirs', 'os.makedirs', (['p'], {'exist_ok': '(True)'}), '(p, exist_ok=True)\n', (6432, 6450), False, 'import os\n'), ((7779, 7845), 'os.path.join', 'os.path.join', (['self.save_path', '"""saves"""', 'f"""{self.budget_current:05}"""'], {}), "(self.save_path, 'saves', f'{self.budget_current:05}')\n", (7791, 7845), False, 'import os\n'), ((7854, 7889), 'os.makedirs', 'os.makedirs', (['out_dir'], {'exist_ok': '(True)'}), '(out_dir, exist_ok=True)\n', (7865, 7889), False, 'import os\n'), ((10415, 10440), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (10438, 10440), False, 'import torch\n'), ((10503, 10536), 'torch.set_rng_state', 'torch.set_rng_state', (['old_th_state'], {}), '(old_th_state)\n', (10522, 10536), False, 'import torch\n'), ((10545, 10578), 'numpy.random.set_state', 'np.random.set_state', (['old_np_state'], {}), '(old_np_state)\n', (10564, 10578), True, 'import numpy as np\n'), ((10587, 10613), 'random.setstate', 'random.setstate', (['old_state'], {}), '(old_state)\n', (10602, 10613), False, 'import random\n'), ((10662, 10679), 'random.getstate', 'random.getstate', ([], {}), '()\n', (10677, 10679), False, 'import random\n'), ((10703, 10724), 'numpy.random.get_state', 'np.random.get_state', ([], {}), '()\n', (10722, 10724), True, 'import numpy as np\n'), ((10748, 10769), 'torch.get_rng_state', 'torch.get_rng_state', ([], {}), '()\n', (10767, 10769), False, 'import torch\n'), ((10781, 10806), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (10804, 10806), False, 'import torch\n'), ((10956, 10973), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (10967, 10973), False, 'import random\n'), ((10982, 11002), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (10996, 11002), True, 'import numpy as np\n'), ((11011, 11034), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (11028, 11034), False, 'import torch\n'), ((11046, 11071), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (11069, 11071), False, 'import torch\n'), ((12457, 12496), 'os.environ.get', 'os.environ.get', (['"""INFR_TURBO_INDEX"""', '"""0"""'], {}), "('INFR_TURBO_INDEX', '0')\n", (12471, 12496), False, 'import os\n'), ((12586, 12601), 'infirunner.util.make_trial_id', 'make_trial_id', ([], {}), '()\n', (12599, 12601), False, 'from infirunner.util import make_trial_id\n'), ((3097, 3122), 'inspect.getmodule', 'inspect.getmodule', (['frm[0]'], {}), '(frm[0])\n', (3114, 3122), False, 'import inspect\n'), ((3397, 3442), 'shutil.rmtree', 'shutil.rmtree', (['source_dir'], {'ignore_errors': '(True)'}), '(source_dir, ignore_errors=True)\n', (3410, 3442), False, 'import shutil\n'), ((3829, 3868), 'os.path.join', 'os.path.join', (['source_dir', '*m_comps[:-1]'], {}), '(source_dir, *m_comps[:-1])\n', (3841, 3868), False, 'import os\n'), ((3881, 3925), 'os.makedirs', 'os.makedirs', (['source_save_path'], {'exist_ok': '(True)'}), '(source_save_path, exist_ok=True)\n', (3892, 3925), False, 'import os\n'), ((7654, 7664), 'sys.exit', 'sys.exit', ([], {}), '()\n', (7662, 7664), False, 'import sys\n'), ((8703, 8715), 'json.load', 'json.load', (['f'], {}), '(f)\n', (8712, 8715), False, 'import json\n'), ((9563, 9600), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ([], {'log_dir': 'self.save_path'}), '(log_dir=self.save_path)\n', (9576, 9600), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((10454, 10494), 'torch.cuda.set_rng_state', 'torch.cuda.set_rng_state', (['old_cuda_state'], {}), '(old_cuda_state)\n', (10478, 10494), False, 'import torch\n'), ((10837, 10863), 'torch.cuda.get_rng_state', 'torch.cuda.get_rng_state', ([], {}), '()\n', (10861, 10863), False, 'import torch\n'), ((11085, 11113), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (11107, 11113), False, 'import torch\n'), ((11468, 11526), 'os.path.join', 'os.path.join', (['self.save_path', '"""saves"""', 'f"""{load_budget:05}"""'], {}), "(self.save_path, 'saves', f'{load_budget:05}')\n", (11480, 11526), False, 'import os\n'), ((12248, 12259), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (12257, 12259), False, 'import os\n'), ((12771, 12783), 'json.load', 'json.load', (['f'], {}), '(f)\n', (12780, 12783), False, 'import json\n'), ((3060, 3075), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (3073, 3075), False, 'import inspect\n'), ((3476, 3495), 'sys.modules.items', 'sys.modules.items', ([], {}), '()\n', (3493, 3495), False, 'import sys\n'), ((3634, 3654), 'inspect.getsource', 'inspect.getsource', (['m'], {}), '(m)\n', (3651, 3654), False, 'import inspect\n'), ((5499, 5535), 'os.path.join', 'os.path.join', (['self.save_path', '"""logs"""'], {}), "(self.save_path, 'logs')\n", (5511, 5535), False, 'import os\n'), ((5580, 5630), 'os.path.join', 'os.path.join', (['self.save_path', '"""logs"""', 'f"""{key}.tsv"""'], {}), "(self.save_path, 'logs', f'{key}.tsv')\n", (5592, 5630), False, 'import os\n'), ((6469, 6517), 'os.path.join', 'os.path.join', (['p', 'f"""{self.steps:015}.{ext}"""', '"""wb"""'], {}), "(p, f'{self.steps:015}.{ext}', 'wb')\n", (6481, 6517), False, 'import os\n'), ((7187, 7229), 'os.path.join', 'os.path.join', (['self.save_path', '"""metric.tsv"""'], {}), "(self.save_path, 'metric.tsv')\n", (7199, 7229), False, 'import os\n'), ((7401, 7422), 'math.isfinite', 'math.isfinite', (['metric'], {}), '(metric)\n', (7414, 7422), False, 'import math\n'), ((7908, 7943), 'os.path.join', 'os.path.join', (['out_dir', '"""state.json"""'], {}), "(out_dir, 'state.json')\n", (7920, 7943), False, 'import os\n'), ((8443, 8476), 'os.path.join', 'os.path.join', (['out_dir', '"""model.pt"""'], {}), "(out_dir, 'model.pt')\n", (8455, 8476), False, 'import os\n'), ((8616, 8653), 'os.path.join', 'os.path.join', (['load_path', '"""state.json"""'], {}), "(load_path, 'state.json')\n", (8628, 8653), False, 'import os\n'), ((9125, 9137), 'json.load', 'json.load', (['f'], {}), '(f)\n', (9134, 9137), False, 'import json\n'), ((9251, 9286), 'os.path.join', 'os.path.join', (['load_path', '"""model.pt"""'], {}), "(load_path, 'model.pt')\n", (9263, 9286), False, 'import os\n'), ((3948, 3999), 'os.path.join', 'os.path.join', (['source_save_path', "(m_comps[-1] + '.py')"], {}), "(source_save_path, m_comps[-1] + '.py')\n", (3960, 3999), False, 'import os\n'), ((8127, 8165), 'os.path.join', 'os.path.join', (['out_dir', '"""metadata.json"""'], {}), "(out_dir, 'metadata.json')\n", (8139, 8165), False, 'import os\n'), ((9027, 9067), 'os.path.join', 'os.path.join', (['load_path', '"""metadata.json"""'], {}), "(load_path, 'metadata.json')\n", (9039, 9067), False, 'import os\n'), ((7306, 7317), 'time.time', 'time.time', ([], {}), '()\n', (7315, 7317), False, 'import time\n')] |
#-*- coding: utf-8 -*-
# --------------------------------------------------------------------#
# --------------------------------------------------------------------#
# ---------- Made by <NAME> @ircam on 11/2015
# ---------- Copyright (c) 2018 CREAM Lab // CNRS / IRCAM / Sorbonne Universite
# ----------
# ---------- execute different super vp commands
# ---------- to use this don't forget to include these lines before your script:
# ----------
# --------------------------------------------------------------------#
# --------------------------------------------------------------------#
from __future__ import absolute_import
from __future__ import print_function
from subprocess import call
import parse_sdif
import shlex, subprocess
import os
import numpy as np
from super_vp_path import get_super_vp_path
super_vp_path = get_super_vp_path()
# ---------- generate_f0_analysis
def generate_f0_analysis(audio_file, analysis ="", f_min =80, f_max=1500, F=3000, wait = True):
"""
file : file to anlayse
analysis : sdif file to generate, if not defined analysis will be saved in the same path of the audio file, with the same name but with an sdif extension.
f_min : Minimum frequency value for pitch analysis in Hz
f_max : Maximum frequency value for pitch analysis in Hz
F : Maximum frequency in spectrum in Hz
wait : wait until the analysis is finished before exit
"""
if analysis =="":
file_tag = os.path.basename(audio_file)
file_tag = os.path.splitext(file_tag)[0]
analysis = os.path.join(os.path.dirname(audio_file), file_tag + ".sdif")
# parameters = "-t -ns -S"+audio_file+" -Af0 \"fm100, fM1500, F3000, sn120, smooth3 ,Cdem, M0.26 , E0.14\" -Np2 -M0.0464299991726875s -oversamp 8 -Wblackman -Of4 " + analysis
parameters = "-t -ns -S"+audio_file+" -Af0 \"fm"+str(f_min)+", fM"+str(f_max)+", F"+str(F)+", sn120, smooth3 ,Cdem, M0.26 , E0.14\" -Np2 -M0.0464299991726875s -oversamp 8 -Wblackman -Of4 " + analysis
cmd = super_vp_path + " " + parameters
args = shlex.split(cmd)
p = subprocess.Popen(args)
if wait:
p.wait() #wait
return analysis
# ---------- generate_LPC_analysis
def generate_LPC_analysis(audio_file, analysis="", wait = True, nb_coefs = 45):
"""
file : file to anlayse
analysis : sdif file to generate, if not defined analysis will be saved in the same path of the audio file, with the same name but with an sdif extension.
"""
if analysis== "":
file_tag = os.path.basename(audio_file)
file_tag = os.path.splitext(file_tag)[0]
analysis = os.path.dirname(audio_file)+ "/" + file_tag + ".sdif"
parameters = "-t -ns -S"+audio_file+" -Alpc "+str(nb_coefs)+" -Np2 -M0.0907029509544373s -oversamp 16 -Whanning -OS1 " + analysis
cmd = super_vp_path + " " + parameters
args = shlex.split(cmd)
p = subprocess.Popen(args)
if wait:
p.wait() #wait
# ---------- generate_formant_analysis
def generate_formant_analysis(audio_file, analysis = "", nb_formants = 5, wait = True, ana_winsize = 512):
"""
file : file to anlayse
analysis : sdif file to generate, if not defined analysis will be saved in the same path of the audio file, with the same name but with an sdif extension.
ana_winsize : Window for the svp formant analysis, in samples
Be careful, this function works only with mono files
"""
nb_formants = str(nb_formants)
ana_winsize = str(ana_winsize)
if analysis =="":
if os.path.dirname(audio_file) == "":
file_tag = os.path.basename(audio_file)
file_tag = os.path.splitext(file_tag)[0]
analysis = file_tag + ".sdif"
else:
file_tag = os.path.basename(audio_file)
file_tag = os.path.splitext(file_tag)[0]
analysis = os.path.dirname(audio_file)+ "/" + file_tag + ".sdif"
parameters = "-t -ns -S"+audio_file+" -Aformant_lpc n"+nb_formants+" 45 -Np0 -M"+ana_winsize+" -oversamp 8 -Whanning -OS1 "+ analysis
cmd = super_vp_path + " " + parameters
args = shlex.split(cmd)
p = subprocess.Popen(args)
if wait:
p.wait() #wait
# ---------- generate_formant_analysis
def generate_tenv_formant_analysis(audio_file, analysis = "", nb_formants = 5, wait = True, f0="max", ana_winsize = 512):
"""
Description:
Generate an sdif formant analysis using the true envelope
file : file to anlayse
analysis : sdif file to generate, if not defined analysis will be saved in the same path of the audio file, with the same name but with an sdif extension.
ana_winsize : Window for the svp formant analysis, in samples
Be careful, this function works only with mono files
"""
if os.path.dirname(audio_file) == "":
file_tag = os.path.basename(audio_file)
file_tag = os.path.splitext(file_tag)[0]
f0_analysis = file_tag + "f0.sdif"
if analysis == "":
analysis = file_tag + ".sdif"
else:
file_tag = os.path.basename(audio_file)
file_tag = os.path.splitext(file_tag)[0]
f0_analysis = os.path.dirname(audio_file)+ "/" + file_tag + "f0.sdif"
if analysis == "":
analysis = os.path.dirname(audio_file)+ "/" + file_tag + ".sdif"
generate_f0_analysis( audio_file, f0_analysis)
if not isinstance(f0, int) and not isinstance(f0, float):
from parse_sdif import get_f0_info
f0times, f0harm, f0val = get_f0_info(f0_analysis)
if f0 == "mean":
f0 = np.mean(f0val)
elif f0 == "max":
f0 = np.max(f0val)
else:
print("f0 error! f0 should be either an int, a float or the strings \"mean\" or \"max\"")
if analysis =="":
if os.path.dirname(audio_file) == "":
file_tag = os.path.basename(audio_file)
file_tag = os.path.splitext(file_tag)[0]
analysis = file_tag + ".sdif"
else:
file_tag = os.path.basename(audio_file)
file_tag = os.path.splitext(file_tag)[0]
analysis = os.path.dirname(audio_file)+ "/" + file_tag + ".sdif"
nb_formants = str(nb_formants)
ana_winsize = str(ana_winsize)
parameters = "-t -ns -S"+audio_file+" -Atenv +"+str(f0)+"Hz -F0 "+f0_analysis+" -Aformant_tenv n"+nb_formants+" +2,1 -Np0 -M"+ana_winsize+" -oversamp 8 -Whanning -OS1 "+ analysis
#parameters = "-t -ns -S"+audio_file+" -Atenv +"+str(f0)+"Hz -F0 "+f0_analysis+" -Np2 -M0.0907029509544373s -oversamp 16 -Whanning -OS1 "+analysis
cmd = super_vp_path + " " + parameters
args = shlex.split(cmd)
p = subprocess.Popen(args)
if wait:
p.wait() #wait
# ---------- generate_true_env_analysis
def generate_true_env_analysis(audio_file, analysis="", wait = True, f0 = "max", oversamp=16):
"""
file : file to anlayse
analysis : sdif file to generate, if not defined analysis will be saved in the same path of the audio file, with the same name but with an sdif extension.
f0 : Saying the max of f0 is a very important parameter for computing the True Envelope. Those there are diferent options for it, choose it wisely:
"mean" : Extracts and takes the mean of f0
"max" : Extracts and takes the max of f0
int or float : you can also specify directly the f0 by giving a float
Be careful, this function works only with mono files
"""
if os.path.dirname(audio_file) == "":
file_tag = os.path.basename(audio_file)
file_tag = os.path.splitext(file_tag)[0]
f0_analysis = file_tag + "f0.sdif"
if analysis == "":
analysis = file_tag + ".sdif"
else:
file_tag = os.path.basename(audio_file)
file_tag = os.path.splitext(file_tag)[0]
f0_analysis = os.path.dirname(audio_file)+ "/" + file_tag + "f0.sdif"
if analysis == "":
analysis = os.path.dirname(audio_file)+ "/" + file_tag + ".sdif"
generate_f0_analysis( audio_file, f0_analysis)
if not isinstance(f0, int) and not isinstance(f0, float):
from parse_sdif import get_f0_info
f0times, f0harm, f0val = get_f0_info(f0_analysis)
if f0 == "mean":
f0 = np.mean(f0val)
elif f0 == "max":
f0 = np.max(f0val)
else:
print("f0 error! f0 should be either an int, a float or the strings \"mean\" or \"max\"")
parameters = "-t -ns -S"+audio_file+" -Atenv +"+str(f0)+"Hz -F0 "+f0_analysis+" -Np2 -M0.0907029509544373s -oversamp "+str(oversamp)+" -Whanning -OS1 "+analysis
#parameters = "-t -ns -S"+audio_file+" -Atenv +"+str(f0)+"Hz -F0 "+f0_analysis+" -Np2 -M0.0464399084448814s -oversamp "+str(oversamp)+" -Whanning -OS1 "+analysis
#parameters = "-t -ns -S"+audio_file+" -Atenv "+str(f0)+"Hz -Np2 -M0.0907029509544373s -oversamp 16 -Whanning -OS0 "+analysis
cmd = super_vp_path + " " + parameters
args = shlex.split(cmd)
p = subprocess.Popen(args)
if wait:
p.wait() #wait
os.remove(f0_analysis)
# ---------- generate_fft_analysis
def generate_fft_analysis(audio_file, analysis = "", wait = True):
"""
audio_file : file to anlayse
analysis : sdif file to generate, if not defined analysis will be saved in the same path of the audio file, with the same name but with an sdif extension.
"""
if analysis =="":
if os.path.dirname(audio_file) == "":
file_tag = os.path.basename(audio_file)
file_tag = os.path.splitext(file_tag)[0]
analysis = file_tag + ".sdif"
else:
file_tag = os.path.basename(audio_file)
file_tag = os.path.splitext(file_tag)[0]
analysis = os.path.dirname(audio_file)+ "/" + file_tag + ".sdif"
parameters = "-t -ns -S"+audio_file+" -Afft -Np4 -M0.0454544983804226s -oversamp 8 -Whanning -OS1 "+ analysis
cmd = super_vp_path + " " + parameters
args = shlex.split(cmd)
p = subprocess.Popen(args)
if wait:
p.wait() #wait
return analysis
# ---------- transpose_sound
def transpose_sound(source_sound, nb_cents, target_sound, wait = True, env_warp_ws=512, lpc_order=25, win_oversampling = 64, fft_oversampling=2):
"""
transpose source sound from number of cents and generate target_sound
source_sound: audio source file
target_sound: audio file to be created
nb_cents : number of cents
"""
env_warp_ws = str(env_warp_ws)
lpc_order = str(lpc_order)
fft_oversampling = str(int(fft_oversampling))
win_oversampling = str(win_oversampling)
parameters = "-t -Afft "+lpc_order+" -M"+env_warp_ws+" -Np"+fft_oversampling+" -oversamp "+win_oversampling +" -Z -trans "+str(nb_cents)+" -S"+ source_sound+ " " + target_sound
cmd = super_vp_path + " " + parameters
args = shlex.split(cmd)
p = subprocess.Popen(args)
if wait:
p.wait() #wait
# ---------- transpose_sound
def env_transpose(source_sound, target_sound, nb_cents, wait = True, env_warp_ws=512, lpc_order=25 , win_oversampling = 64, fft_oversampling=2):
"""
transpose source sound from number of cents and generate target_sound
source_sound: audio source file
target_sound: audio file to be created
nb_cents : number of cents
"""
env_warp_ws = str(env_warp_ws)
lpc_order = str(lpc_order)
fft_oversampling = str(int(fft_oversampling))
win_oversampling = str(win_oversampling)
parameters = "-t -Afft "+lpc_order+" -M"+env_warp_ws+" -Np"+fft_oversampling+" -oversamp "+win_oversampling +" -Z -transenv "+str(nb_cents)+" -S"+ source_sound+ " " + target_sound
cmd = super_vp_path + " " + parameters
args = shlex.split(cmd)
p = subprocess.Popen(args)
if wait:
p.wait() #wait
# ---------- freq_warp
def freq_warp(source_sound, target_sound, warp_file, freq_warp_ws=512,lpc_order=25 ,wait = True, warp_method="lpc", win_oversampling = 64, fft_oversampling=2):
"""
source_sound : audio source file
target_sound : audio file to be created
warpfile : file to do the warp.
freq_warp_ws : size of the individual signal segments that will be treated by SuperVP
lpc_order : order for the lpc analysis
warp_method : warp with the lpc or the true envelope
fft_oversampling : frequency oversampling defined as an exponent that changes the FFT size (FFTsize=freq_warp_ws*2^fft_oversampling)
win_oversampling : window oversampling or minimum overlap of analysis and synthesis windows (hopsize=freq_warp_ws/win_oversampling). Previously called 'freq_warp_oversampling'
"""
if os.path.dirname(source_sound) == "":
file_tag = os.path.basename(source_sound)
file_tag = os.path.splitext(file_tag)[0]
f0_analysis = file_tag + "f0.sdif"
else:
file_tag = os.path.basename(source_sound)
file_tag = os.path.splitext(file_tag)[0]
f0_analysis = os.path.dirname(source_sound)+ "/" + file_tag + "f0.sdif"
freq_warp_ws = str(freq_warp_ws)
lpc_order = str(lpc_order)
fft_oversampling = str(int(fft_oversampling))
win_oversampling = str(win_oversampling)
#see super-vp help to understand the parameters
if "lpc" == warp_method:
# parameters = "-t -Afft -M512 -N512 -Np2 -oversamp 64 -f0 "+f0_analysis +" -Z -envwarp " + warp_file + " -S" + source_sound + " " + target_sound # warping with manual -N window size and redundant -Np
# parameters = "-t -Afft "+lpc_order+" -M"+str(freq_warp_ws)+" -N"+freq_warp_ws+" -Np2 -oversamp "+str(win_oversampling) +" -Z -envwarp " + warp_file + " -S" + source_sound + " " + target_sound # warping before 'win_oversampling' change
parameters = "-t -Afft "+lpc_order+" -M"+freq_warp_ws+" -Np"+fft_oversampling+" -oversamp "+win_oversampling +" -Z -envwarp " + warp_file + " -S" + source_sound + " " + target_sound
elif "t_env" == warp_method:
generate_f0_analysis( source_sound, f0_analysis)
parameters = "-t -Afft -atenv -F0 "+f0_analysis+" -M"+freq_warp_ws+" -Np"+fft_oversampling+" -oversamp "+win_oversampling+" -F0 "+f0_analysis +" -Z -envwarp " + warp_file + " -S" + source_sound + " " + target_sound
else:
print("wrong warping method specified")
cmd = super_vp_path + " " + parameters
args = shlex.split(cmd)
p = subprocess.Popen(args)
if wait:
p.wait() #wait
if "t_env" == warp_method:
os.remove(f0_analysis)
def stretch_sound_to_target_duration(source_sound, target_sound, target_duration, extreme_precision = True):
"""
stretch source sound to target duration
source_sound: audio source file
target_sound: audio file to be created
"""
import soundfile
x, fs = soundfile.read(source_sound)
source_sound_duration = len(x)/float(fs)
change_factor = target_duration/source_sound_duration
#create bpf
bpf_file = 'aux_file_for_stretch'
with open(bpf_file,'w+') as new:
new.write("-10.000000000000000 1.000000000000000\n")
new.write("0.000000000000000 " + str(change_factor) + "\n")
new.write(str(source_sound_duration) + " " + str(change_factor)+ "\n")
new.write(str(source_sound_duration) + " " + str(change_factor)+ "\n")
if not extreme_precision:
#use this parameters for good precision
parameters = "-t -Z -S"+source_sound+" -Afft -M16384 -Np2 -M0.0238095000386238s -oversamp 8 -Whanning -P0 -td_ampfac 1.20000004768372 -FCombineMul -shape 1 -D"+bpf_file +" "+ target_sound
else:
#use this parameters for extreme precision
parameters = "-t -Z -S"+source_sound+" -Afft -Np4 -M0.095238097012043s -oversamp 8 -Whanning -P1 -td_thresh 2.90000009536743 -td_G 3.79999995231628 -td_band 0,4500.0078125 -td_nument 33.3333320617676 -td_minoff 0.02s -td_mina 0 -td_minren 0.100000001490116 -td_evstre 1 -td_ampfac 1.20000004768372 -FCombineMul -shape 1 -D"+ bpf_file + " "+target_sound
cmd = super_vp_path + " " + parameters
args = shlex.split(cmd)
p = subprocess.Popen(args)
p.wait() #wait
os.remove(bpf_file)
def stretch_sound_with_bpf_file(source_sound, target_sound, bpf_file, extreme_precision = True):
"""
stretch source sound following bpf file
source_sound: audio source file
target_sound: audio file to be created
"""
from scikits.audiolab import wavread
x, fs, enc = wavread(str(source_sound))
if not extreme_precision:
#use this parameters for good precision
parameters = "-t -Z -S"+source_sound+" -Afft -M16384 -Np2 -M0.0238095000386238s -oversamp 8 -Whanning -P0 -td_ampfac 1.20000004768372 -FCombineMul -shape 1 -D"+bpf_file +" "+ target_sound
else:
#use this parameters for extreme precision
parameters = "-t -Z -S"+source_sound+" -Afft -Np4 -M0.095238097012043s -oversamp 8 -Whanning -P1 -td_thresh 2.90000009536743 -td_G 3.79999995231628 -td_band 0,4500.0078125 -td_nument 33.3333320617676 -td_minoff 0.02s -td_mina 0 -td_minren 0.100000001490116 -td_evstre 1 -td_ampfac 1.20000004768372 -FCombineMul -shape 1 -D"+ bpf_file + " "+target_sound
cmd = super_vp_path + " " + parameters
args = shlex.split(cmd)
p = subprocess.Popen(args)
p.wait() #wait
## denoise sound file
def denoise_sound(source_sound, target_sound, gain_reduction =10, amp_threshold = -55, wnd_size = 8192 ):
"""
Denoise source_sound
source_sound : audio source file
target_sound : audio file to be created
gain_reduction : gain reduction in dB to apply to the noise (must be positive)
amp_threshold : amplitude to search for silent sounds
wnd_size : when searching for silent parts in the sound, the size of moving window
for short sounds use small windows and viceversa
"""
import os
from audio_analysis import find_silence, get_sound_duration
#find silence
silence_tags = find_silence(source_sound, threshold = amp_threshold, wnd_size=wnd_size)
#get longer silence tag
max_tag = 0
for tag in silence_tags:
if (tag[1]-tag[0]) > max_tag:
max_tag = tag[1]-tag[0]
beg = tag[0]
end = tag[1]
#create noise key sdif
noise_sdif = "aux_noise_sdif_13451.sdif"
parameters = "-S"+source_sound+" -A -Np4 -M0.0500000007450581s -oversamp 8 -Whanning -avseg "+str(beg)+","+str(end)+" -OM2 "+noise_sdif
cmd = super_vp_path + " " + parameters
args = shlex.split(cmd)
p = subprocess.Popen(args)
p.wait() #wait
sound_duration = str(get_sound_duration(source_sound))
#denoise
parameters = "-t -A -Z -B0.000000 -E"+sound_duration+" -S"+source_sound+" -Np4 -M0.050000000745058s -oversamp 8 -Whanning -norm -Fsub "+noise_sdif+" -avgamma 1 -avbeta "+str(gain_reduction)+" -avsfac 0.000000000000000 "+target_sound
cmd = super_vp_path + " " + parameters
args = shlex.split(cmd)
p = subprocess.Popen(args)
p.wait() #wait
#delete noise key
os.remove(noise_sdif)
| [
"numpy.mean",
"shlex.split",
"subprocess.Popen",
"os.path.splitext",
"parse_sdif.get_f0_info",
"numpy.max",
"os.path.dirname",
"super_vp_path.get_super_vp_path",
"audio_analysis.find_silence",
"os.path.basename",
"soundfile.read",
"audio_analysis.get_sound_duration",
"os.remove"
] | [((831, 850), 'super_vp_path.get_super_vp_path', 'get_super_vp_path', ([], {}), '()\n', (848, 850), False, 'from super_vp_path import get_super_vp_path\n'), ((2004, 2020), 'shlex.split', 'shlex.split', (['cmd'], {}), '(cmd)\n', (2015, 2020), False, 'import shlex, subprocess\n'), ((2029, 2051), 'subprocess.Popen', 'subprocess.Popen', (['args'], {}), '(args)\n', (2045, 2051), False, 'import shlex, subprocess\n'), ((2765, 2781), 'shlex.split', 'shlex.split', (['cmd'], {}), '(cmd)\n', (2776, 2781), False, 'import shlex, subprocess\n'), ((2790, 2812), 'subprocess.Popen', 'subprocess.Popen', (['args'], {}), '(args)\n', (2806, 2812), False, 'import shlex, subprocess\n'), ((3902, 3918), 'shlex.split', 'shlex.split', (['cmd'], {}), '(cmd)\n', (3913, 3918), False, 'import shlex, subprocess\n'), ((3927, 3949), 'subprocess.Popen', 'subprocess.Popen', (['args'], {}), '(args)\n', (3943, 3949), False, 'import shlex, subprocess\n'), ((6181, 6197), 'shlex.split', 'shlex.split', (['cmd'], {}), '(cmd)\n', (6192, 6197), False, 'import shlex, subprocess\n'), ((6206, 6228), 'subprocess.Popen', 'subprocess.Popen', (['args'], {}), '(args)\n', (6222, 6228), False, 'import shlex, subprocess\n'), ((8322, 8338), 'shlex.split', 'shlex.split', (['cmd'], {}), '(cmd)\n', (8333, 8338), False, 'import shlex, subprocess\n'), ((8347, 8369), 'subprocess.Popen', 'subprocess.Popen', (['args'], {}), '(args)\n', (8363, 8369), False, 'import shlex, subprocess\n'), ((8400, 8422), 'os.remove', 'os.remove', (['f0_analysis'], {}), '(f0_analysis)\n', (8409, 8422), False, 'import os\n'), ((9237, 9253), 'shlex.split', 'shlex.split', (['cmd'], {}), '(cmd)\n', (9248, 9253), False, 'import shlex, subprocess\n'), ((9262, 9284), 'subprocess.Popen', 'subprocess.Popen', (['args'], {}), '(args)\n', (9278, 9284), False, 'import shlex, subprocess\n'), ((10088, 10104), 'shlex.split', 'shlex.split', (['cmd'], {}), '(cmd)\n', (10099, 10104), False, 'import shlex, subprocess\n'), ((10113, 10135), 'subprocess.Popen', 'subprocess.Popen', (['args'], {}), '(args)\n', (10129, 10135), False, 'import shlex, subprocess\n'), ((10922, 10938), 'shlex.split', 'shlex.split', (['cmd'], {}), '(cmd)\n', (10933, 10938), False, 'import shlex, subprocess\n'), ((10947, 10969), 'subprocess.Popen', 'subprocess.Popen', (['args'], {}), '(args)\n', (10963, 10969), False, 'import shlex, subprocess\n'), ((13434, 13450), 'shlex.split', 'shlex.split', (['cmd'], {}), '(cmd)\n', (13445, 13450), False, 'import shlex, subprocess\n'), ((13459, 13481), 'subprocess.Popen', 'subprocess.Popen', (['args'], {}), '(args)\n', (13475, 13481), False, 'import shlex, subprocess\n'), ((13828, 13856), 'soundfile.read', 'soundfile.read', (['source_sound'], {}), '(source_sound)\n', (13842, 13856), False, 'import soundfile\n'), ((15028, 15044), 'shlex.split', 'shlex.split', (['cmd'], {}), '(cmd)\n', (15039, 15044), False, 'import shlex, subprocess\n'), ((15053, 15075), 'subprocess.Popen', 'subprocess.Popen', (['args'], {}), '(args)\n', (15069, 15075), False, 'import shlex, subprocess\n'), ((15094, 15113), 'os.remove', 'os.remove', (['bpf_file'], {}), '(bpf_file)\n', (15103, 15113), False, 'import os\n'), ((16150, 16166), 'shlex.split', 'shlex.split', (['cmd'], {}), '(cmd)\n', (16161, 16166), False, 'import shlex, subprocess\n'), ((16175, 16197), 'subprocess.Popen', 'subprocess.Popen', (['args'], {}), '(args)\n', (16191, 16197), False, 'import shlex, subprocess\n'), ((16827, 16897), 'audio_analysis.find_silence', 'find_silence', (['source_sound'], {'threshold': 'amp_threshold', 'wnd_size': 'wnd_size'}), '(source_sound, threshold=amp_threshold, wnd_size=wnd_size)\n', (16839, 16897), False, 'from audio_analysis import find_silence, get_sound_duration\n'), ((17314, 17330), 'shlex.split', 'shlex.split', (['cmd'], {}), '(cmd)\n', (17325, 17330), False, 'import shlex, subprocess\n'), ((17339, 17361), 'subprocess.Popen', 'subprocess.Popen', (['args'], {}), '(args)\n', (17355, 17361), False, 'import shlex, subprocess\n'), ((17733, 17749), 'shlex.split', 'shlex.split', (['cmd'], {}), '(cmd)\n', (17744, 17749), False, 'import shlex, subprocess\n'), ((17758, 17780), 'subprocess.Popen', 'subprocess.Popen', (['args'], {}), '(args)\n', (17774, 17780), False, 'import shlex, subprocess\n'), ((17819, 17840), 'os.remove', 'os.remove', (['noise_sdif'], {}), '(noise_sdif)\n', (17828, 17840), False, 'import os\n'), ((1421, 1449), 'os.path.basename', 'os.path.basename', (['audio_file'], {}), '(audio_file)\n', (1437, 1449), False, 'import os\n'), ((2436, 2464), 'os.path.basename', 'os.path.basename', (['audio_file'], {}), '(audio_file)\n', (2452, 2464), False, 'import os\n'), ((4529, 4556), 'os.path.dirname', 'os.path.dirname', (['audio_file'], {}), '(audio_file)\n', (4544, 4556), False, 'import os\n'), ((4578, 4606), 'os.path.basename', 'os.path.basename', (['audio_file'], {}), '(audio_file)\n', (4594, 4606), False, 'import os\n'), ((4763, 4791), 'os.path.basename', 'os.path.basename', (['audio_file'], {}), '(audio_file)\n', (4779, 4791), False, 'import os\n'), ((5169, 5193), 'parse_sdif.get_f0_info', 'get_f0_info', (['f0_analysis'], {}), '(f0_analysis)\n', (5180, 5193), False, 'from parse_sdif import get_f0_info\n'), ((6954, 6981), 'os.path.dirname', 'os.path.dirname', (['audio_file'], {}), '(audio_file)\n', (6969, 6981), False, 'import os\n'), ((7003, 7031), 'os.path.basename', 'os.path.basename', (['audio_file'], {}), '(audio_file)\n', (7019, 7031), False, 'import os\n'), ((7188, 7216), 'os.path.basename', 'os.path.basename', (['audio_file'], {}), '(audio_file)\n', (7204, 7216), False, 'import os\n'), ((7595, 7619), 'parse_sdif.get_f0_info', 'get_f0_info', (['f0_analysis'], {}), '(f0_analysis)\n', (7606, 7619), False, 'from parse_sdif import get_f0_info\n'), ((11829, 11858), 'os.path.dirname', 'os.path.dirname', (['source_sound'], {}), '(source_sound)\n', (11844, 11858), False, 'import os\n'), ((11880, 11910), 'os.path.basename', 'os.path.basename', (['source_sound'], {}), '(source_sound)\n', (11896, 11910), False, 'import os\n'), ((12012, 12042), 'os.path.basename', 'os.path.basename', (['source_sound'], {}), '(source_sound)\n', (12028, 12042), False, 'import os\n'), ((13542, 13564), 'os.remove', 'os.remove', (['f0_analysis'], {}), '(f0_analysis)\n', (13551, 13564), False, 'import os\n'), ((17401, 17433), 'audio_analysis.get_sound_duration', 'get_sound_duration', (['source_sound'], {}), '(source_sound)\n', (17419, 17433), False, 'from audio_analysis import find_silence, get_sound_duration\n'), ((1463, 1489), 'os.path.splitext', 'os.path.splitext', (['file_tag'], {}), '(file_tag)\n', (1479, 1489), False, 'import os\n'), ((1519, 1546), 'os.path.dirname', 'os.path.dirname', (['audio_file'], {}), '(audio_file)\n', (1534, 1546), False, 'import os\n'), ((2478, 2504), 'os.path.splitext', 'os.path.splitext', (['file_tag'], {}), '(file_tag)\n', (2494, 2504), False, 'import os\n'), ((3388, 3415), 'os.path.dirname', 'os.path.dirname', (['audio_file'], {}), '(audio_file)\n', (3403, 3415), False, 'import os\n'), ((3438, 3466), 'os.path.basename', 'os.path.basename', (['audio_file'], {}), '(audio_file)\n', (3454, 3466), False, 'import os\n'), ((3567, 3595), 'os.path.basename', 'os.path.basename', (['audio_file'], {}), '(audio_file)\n', (3583, 3595), False, 'import os\n'), ((4620, 4646), 'os.path.splitext', 'os.path.splitext', (['file_tag'], {}), '(file_tag)\n', (4636, 4646), False, 'import os\n'), ((4805, 4831), 'os.path.splitext', 'os.path.splitext', (['file_tag'], {}), '(file_tag)\n', (4821, 4831), False, 'import os\n'), ((5221, 5235), 'numpy.mean', 'np.mean', (['f0val'], {}), '(f0val)\n', (5228, 5235), True, 'import numpy as np\n'), ((5407, 5434), 'os.path.dirname', 'os.path.dirname', (['audio_file'], {}), '(audio_file)\n', (5422, 5434), False, 'import os\n'), ((5457, 5485), 'os.path.basename', 'os.path.basename', (['audio_file'], {}), '(audio_file)\n', (5473, 5485), False, 'import os\n'), ((5586, 5614), 'os.path.basename', 'os.path.basename', (['audio_file'], {}), '(audio_file)\n', (5602, 5614), False, 'import os\n'), ((7045, 7071), 'os.path.splitext', 'os.path.splitext', (['file_tag'], {}), '(file_tag)\n', (7061, 7071), False, 'import os\n'), ((7230, 7256), 'os.path.splitext', 'os.path.splitext', (['file_tag'], {}), '(file_tag)\n', (7246, 7256), False, 'import os\n'), ((7647, 7661), 'numpy.mean', 'np.mean', (['f0val'], {}), '(f0val)\n', (7654, 7661), True, 'import numpy as np\n'), ((8748, 8775), 'os.path.dirname', 'os.path.dirname', (['audio_file'], {}), '(audio_file)\n', (8763, 8775), False, 'import os\n'), ((8798, 8826), 'os.path.basename', 'os.path.basename', (['audio_file'], {}), '(audio_file)\n', (8814, 8826), False, 'import os\n'), ((8927, 8955), 'os.path.basename', 'os.path.basename', (['audio_file'], {}), '(audio_file)\n', (8943, 8955), False, 'import os\n'), ((11924, 11950), 'os.path.splitext', 'os.path.splitext', (['file_tag'], {}), '(file_tag)\n', (11940, 11950), False, 'import os\n'), ((12056, 12082), 'os.path.splitext', 'os.path.splitext', (['file_tag'], {}), '(file_tag)\n', (12072, 12082), False, 'import os\n'), ((3481, 3507), 'os.path.splitext', 'os.path.splitext', (['file_tag'], {}), '(file_tag)\n', (3497, 3507), False, 'import os\n'), ((3610, 3636), 'os.path.splitext', 'os.path.splitext', (['file_tag'], {}), '(file_tag)\n', (3626, 3636), False, 'import os\n'), ((5264, 5277), 'numpy.max', 'np.max', (['f0val'], {}), '(f0val)\n', (5270, 5277), True, 'import numpy as np\n'), ((5500, 5526), 'os.path.splitext', 'os.path.splitext', (['file_tag'], {}), '(file_tag)\n', (5516, 5526), False, 'import os\n'), ((5629, 5655), 'os.path.splitext', 'os.path.splitext', (['file_tag'], {}), '(file_tag)\n', (5645, 5655), False, 'import os\n'), ((7690, 7703), 'numpy.max', 'np.max', (['f0val'], {}), '(f0val)\n', (7696, 7703), True, 'import numpy as np\n'), ((8841, 8867), 'os.path.splitext', 'os.path.splitext', (['file_tag'], {}), '(file_tag)\n', (8857, 8867), False, 'import os\n'), ((8970, 8996), 'os.path.splitext', 'os.path.splitext', (['file_tag'], {}), '(file_tag)\n', (8986, 8996), False, 'import os\n'), ((2521, 2548), 'os.path.dirname', 'os.path.dirname', (['audio_file'], {}), '(audio_file)\n', (2536, 2548), False, 'import os\n'), ((4851, 4878), 'os.path.dirname', 'os.path.dirname', (['audio_file'], {}), '(audio_file)\n', (4866, 4878), False, 'import os\n'), ((7276, 7303), 'os.path.dirname', 'os.path.dirname', (['audio_file'], {}), '(audio_file)\n', (7291, 7303), False, 'import os\n'), ((12102, 12131), 'os.path.dirname', 'os.path.dirname', (['source_sound'], {}), '(source_sound)\n', (12117, 12131), False, 'import os\n'), ((3654, 3681), 'os.path.dirname', 'os.path.dirname', (['audio_file'], {}), '(audio_file)\n', (3669, 3681), False, 'import os\n'), ((4942, 4969), 'os.path.dirname', 'os.path.dirname', (['audio_file'], {}), '(audio_file)\n', (4957, 4969), False, 'import os\n'), ((5673, 5700), 'os.path.dirname', 'os.path.dirname', (['audio_file'], {}), '(audio_file)\n', (5688, 5700), False, 'import os\n'), ((7367, 7394), 'os.path.dirname', 'os.path.dirname', (['audio_file'], {}), '(audio_file)\n', (7382, 7394), False, 'import os\n'), ((9014, 9041), 'os.path.dirname', 'os.path.dirname', (['audio_file'], {}), '(audio_file)\n', (9029, 9041), False, 'import os\n')] |
import numpy as np
import networkx as nx
import pextant_cpp
from .SEXTANTsolver import sextantSearch, SEXTANTSolver, sextantSearchList
from .astar import aStarSearchNode, aStarNodeCollection, aStarCostFunction, aStarSearch
from pextant.EnvironmentalModel import EnvironmentalModel, GridMeshModel
from pextant.lib.geoshapely import GeoPoint, GeoPolygon, LONG_LAT
from pextant.solvers.nxastar import GG, astar_path
from time import time
class MeshSearchElement(aStarSearchNode):
def __init__(self, mesh_element, parent=None, cost_from_parent=0):
self.mesh_element = mesh_element
self.derived = {} #the point of this is to store in memory expensive calculations we might need later
super(MeshSearchElement, self).__init__(mesh_element.mesh_coordinate, parent, cost_from_parent)
def goalTest(self, goal):
return self.mesh_element.mesh_coordinate == goal.mesh_element.mesh_coordinate
#return self.mesh_element.distanceToElt(goal.mesh_element) < self.mesh_element.parentMesh.resolution*3
def getChildren(self):
return MeshSearchCollection(self.mesh_element.getNeighbours(), self)
def __getattr__(self, item):
try:
return MeshSearchElement.__getattribute__(self, item)
except AttributeError:
return getattr(self.mesh_element, item)
def __str__(self):
return str(self.mesh_element)
class MeshSearchCollection(aStarNodeCollection):
def __init__(self, collection, parent=None):
super(MeshSearchCollection, self).__init__(collection)
self.derived = None
self.parent = parent
def __getitem__(self, index):
mesh_search_element = MeshSearchElement(self.collection.__getitem__(index), self.parent)
mesh_search_element.derived = dict(list(zip(['pathlength','time','energy'],self.derived[:,index])))
return mesh_search_element
class ExplorerCost(aStarCostFunction):
def __init__(self, astronaut, environment, optimize_on, cached=False, heuristic_accelerate=1):
"""
:type astronaut: Astronaut
:param environment:
:type environment: GridMeshModel
:param optimize_on:
"""
super(ExplorerCost, self).__init__()
self.explorer = astronaut
self.map = environment
self.optimize_vector = astronaut.optimizevector(optimize_on)
self.heuristic_accelerate = heuristic_accelerate
self.cache = cached
if cached:
self.cache_costs()
def cache_all(self):
end_y, end_x = self.end_node.y, self.end_node.x
self.cache_costs()
self.cache_heuristic((end_x, end_y))
def cache_costs(self):
self.cached["costs"] = self.create_costs_cache()
def create_costs_cache(self):
kernel = self.map.searchKernel
offsets = kernel.getKernel()
dem = self.map
# planar (i.e. x-y) distances to all neighbors (by kernel-index)
dr = np.apply_along_axis(np.linalg.norm, 1, offsets) * self.map.resolution
# elevations
z = self.map.dataset_unmasked
# stored gravity value
g = self.map.getGravity()
# initialize arrays for holding costs
neighbour_size = len(self.map.searchKernel.getKernel())
slopes_rad = np.empty((dem.shape[0], dem.shape[1], neighbour_size))
energy_cost = np.empty((dem.shape[0], dem.shape[1], neighbour_size))
time_cost = np.empty((dem.shape[0], dem.shape[1], neighbour_size))
path_cost = np.empty((dem.shape[0], dem.shape[1], neighbour_size))
for idx, offset in enumerate(offsets):
# planar distance to neighbor at {offset}
dri = dr[idx]
# angle (in radians) between each node and neighbor at {offset}
slopes_rad[:, :, idx] = np.arctan2(np.roll(np.roll(z, -offset[0], axis=0), -offset[1], axis=1) - z, dri)
# calculate {energy cost} and {planar velocity} from slope, distance, and gravity
energy_cost[:, :, idx], v = self.explorer.energy_expenditure(dri, slopes_rad[:, :, idx], g)
# time = distance / rate
time_cost[:,:,idx] = dri/v
# total, 3-dimensional distance traveled
path_cost[:,:,idx] = dri/np.cos(slopes_rad[:, :, idx])*np.ones_like(z)
return {'time': time_cost, 'path': path_cost, 'energy': energy_cost}
def cache_heuristic(self, goal):
self.cached["heuristics"] = self.create_heuristic_cache(goal)
def create_heuristic_cache(self, goal):
# get planar distance to goal from each grid location
oct_grid_distance = self.map.get_oct_grid_distance_to_point(goal)
# Adding the energy weight
explorer = self.explorer
m = explorer.mass
planet = self.map.planet
energy_weight = explorer.minenergy[planet](m) # to minimize energy cost
max_velocity = explorer.maxvelocity # to minimize time cost
optimize_weights = self.optimize_vector
optimize_values = np.array([
1, # Distance per m
max_velocity, # time per m
energy_weight # energy per m
])
optimize_cost = oct_grid_distance * np.dot(optimize_values, optimize_weights)
heuristic_cost = self.heuristic_accelerate * optimize_cost
return heuristic_cost
def get_cache_heuristic(self, start_row, start_col):
return self.cached["heuristics"][start_row, start_col]
def getHeuristicCost(self, elt):
node = elt.mesh_element
start_row, start_col = node.mesh_coordinate
heuristic_fx = self.get_cache_heuristic if self.cache else self._getHeuristicCost
return heuristic_fx(start_row, start_col)
def getHeuristicCostRaw(self, rowcol):
start_row, start_col = rowcol
heuristic_fx = self.get_cache_heuristic if self.cache else self._getHeuristicCost
return heuristic_fx(start_row, start_col)
def _getHeuristicCost(self, start_row, start_col):
r = self.map.resolution
start_x, start_y = r*start_col, r*start_row
end_x, end_y = self.end_node.x, self.end_node.y
optimize_vector = self.optimize_vector
# max number of diagonal steps that can be taken
h_diagonal = min(abs(start_y - end_y), abs(start_x - end_x))
h_straight = abs(start_y - end_y) + abs(start_x - end_x) # Manhattan distance
h_oct_grid = np.sqrt(2) * h_diagonal + (h_straight - 2 * h_diagonal)
# Adding the energy weight
m = self.explorer.mass
min_energy_function = self.explorer.minenergy[self.map.planet]
min_energy = min_energy_function(m) # min to keep heuristic admissible
max_velocity = self.explorer.maxvelocity # max v => min time, also to keep heuristic admissible
# determine value to multiply 'optimal distance' value by to get best admissible heuristic
admissible_values = np.array([1, max_velocity, min_energy])
admissible_weight = np.dot(admissible_values, optimize_vector)
# Patel 2010. See page 49 of Aaron's thesis
heuristic_weight = self.heuristic_accelerate
heuristic_cost = heuristic_weight * admissible_weight * h_oct_grid
return heuristic_cost
def getCostBetween(self, fromnode, tonodes):
""":type fromnode: MeshSearchElement"""
from_elt = fromnode.mesh_element
to_cllt = tonodes.collection
if self.cache:
row, col = from_elt.mesh_coordinate
selection = self.map.cached_neighbours[row,col]
costs = self.cached["costs"]
optimize_vector = np.array([
costs['path'][row, col][selection],
costs['time'][row, col][selection],
costs['energy'][row, col][selection]
])
else:
optimize_vector = self.calculateCostBetween(from_elt, to_cllt)
optimize_weights = self.optimize_vector
costs = np.dot(optimize_vector.transpose(), optimize_weights)
tonodes.derived = optimize_vector
return list(zip(tonodes, to_cllt.get_states(), costs))
def getCostToNeighbours(self, from_node):
row, col = from_node.state
neighbours = self.map.cached_neighbours(from_node.state)
return self.cached[row, col, neighbours]
def calculateCostBetween(self, from_elt, to_elts):
"""
Given the start and end states, returns the cost of travelling between them.
Allows for states which are not adjacent to each other.
optimize_vector is a list or tuple of length 3, representing the weights of
Distance, Time, and Energy
Performance optimization: tonodes instead of tonode, potentially numpy optimized, only need to load info
from fromnode once
"""
explorer = self.explorer
slopes, path_lengths = from_elt.slopeTo(to_elts)
times = explorer.time(path_lengths, slopes)
g = self.map.getGravity()
energy_cost, _ = explorer.energy_expenditure(path_lengths, slopes, g)
#TODO: rewrite this so not all functions need to get evaluated(expensive)
optimize_vector = np.array([
path_lengths,
times,
energy_cost
])
return optimize_vector
class astarSolver(SEXTANTSolver):
# algorithm type 'enum' rather than bool (previously: inhouse=true/false)
PY_INHOUSE = 1
PY_NETWORKX = 2
CPP_NETWORKX = 3
def __init__(self, env_model, explorer_model, viz=None, optimize_on='Energy',
cached=False, algorithm_type=PY_INHOUSE, heuristic_accelerate=1):
self.explorer_model = explorer_model
self.optimize_on = optimize_on
self.cache = env_model.cached
self.algorithm_type = algorithm_type
self.G = None
cost_function = ExplorerCost(explorer_model, env_model, optimize_on, env_model.cached, heuristic_accelerate)
super(astarSolver, self).__init__(env_model, cost_function, viz)
# if using networkx-based implementation, set G
if algorithm_type == astarSolver.PY_NETWORKX or algorithm_type == astarSolver.CPP_NETWORKX:
self.G = GG(self)
# if we're using CPP external module
if algorithm_type == astarSolver.CPP_NETWORKX:
# create CPP object
self.path_finder = pextant_cpp.PathFinder()
# set kernel
kernel_list = self.env_model.searchKernel.getKernel().tolist()
self.path_finder.set_kernel(kernel_list)
# cache data
cached_costs = self.cost_function.cached["costs"]
if cached_costs is None:
cached_costs = self.cost_function.create_costs_cache()
cost_map = cached_costs["energy"].tolist()
self.path_finder.cache_costs(cost_map)
obstacle_map = self.env_model.obstacles.astype(int).tolist()
self.path_finder.cache_obstacles(obstacle_map)
def accelerate(self, weight=10):
self.cost_function = ExplorerCost(self.explorer_model, self.env_model, self.optimize_on,
self.cache, heuristic_accelerate=weight)
def solve(self, startpoint, endpoint):
if self.algorithm_type == astarSolver.CPP_NETWORKX:
solver = self.solvenx_cpp
elif self.algorithm_type == astarSolver.PY_NETWORKX:
solver = self.solvenx
else: # self.algorithm_type == astarSolver.PY_INHOUSE
solver = self.solveinhouse
return solver(startpoint, endpoint)
def solveinhouse(self, startpoint, endpoint):
env_model = self.env_model
if env_model.elt_hasdata(startpoint) and env_model.elt_hasdata(endpoint):
node1, node2 = MeshSearchElement(env_model.getMeshElement(startpoint)), \
MeshSearchElement(env_model.getMeshElement(endpoint))
solution_path, expanded_items = aStarSearch(node1, node2, self.cost_function, self.viz)
raw, nodes = solution_path
if len(raw) == 0:
coordinates = []
else:
coordinates = GeoPolygon(env_model.ROW_COL, *np.array(raw).transpose())
search = sextantSearch(raw, nodes, coordinates, expanded_items)
self.searches.append(search)
return search
else:
return False
def solvenx(self, startpoint, endpoint):
env_model = self.env_model
cost_function = self.cost_function
start = env_model.getMeshElement(startpoint).mesh_coordinate
target = env_model.getMeshElement(endpoint).mesh_coordinate
if env_model.elt_hasdata(startpoint) and env_model.elt_hasdata(endpoint):
if self.G == None:
self.G = GG(self)
cost_function.setEndNode(MeshSearchElement(env_model.getMeshElement(endpoint)))
try:
raw = astar_path(self.G, start, target, lambda a, b: cost_function.getHeuristicCostRaw(a))
coordinates = GeoPolygon(self.env_model.COL_ROW, *np.array(raw).transpose()[::-1])
search = sextantSearch(raw, [], coordinates, [])
self.searches.append(search)
return search
except nx.NetworkXNoPath:
return False
else:
return False
def solvenx_cpp(self, startpoint, endpoint):
# reset any prior progress
self.path_finder.reset_progress()
# get source and target coordinates
source = self.env_model.getMeshElement(startpoint).mesh_coordinate # unscaled (row, column)
target = self.env_model.getMeshElement(endpoint).mesh_coordinate # unscaled (row, column)
# check that we have data at both start and end
if self.env_model.elt_hasdata(startpoint) and self.env_model.elt_hasdata(endpoint):
# cache heuristic
heuristics_map = self.cost_function.create_heuristic_cache(target).tolist()
self.path_finder.cache_heuristics(heuristics_map)
# perform search
raw = self.path_finder.astar_solve(source, target)
# if we have a good result
if len(raw) > 0:
# append result to 'searches' list and return
coordinates = GeoPolygon(self.env_model.COL_ROW, *np.array(raw).transpose()[::-1])
search = sextantSearch(raw, [], coordinates, [])
self.searches.append(search)
return search
# default to fail result
return False
def weight(self, a, b):
selection = (np.array(a) + self.env_model.searchKernel.getKernel()).tolist().index(list(b))
costs = self.cost_function.cached["costs"]
optimize_weights = self.cost_function.optimize_vector
optimize_vector = np.array([
costs['path'][a][selection],
costs['time'][a][selection],
costs['energy'][a][selection]
])
costs = np.dot(optimize_vector.transpose(), optimize_weights)
return costs
def generateGraph(em, weightfx):
t1 = time()
G = nx.DiGraph()
rows, cols = list(range(em.y_size)), list(range(em.x_size))
G.add_nodes_from((i, j) for i in rows for j in cols)
for i in rows:
dt = time() - t1
#if dt > 60:
#print(i)
#if i%10 == 0:
# print(i)
for j in cols:
n = np.array((i,j))+em.searchKernel.getKernel()[em.cached_neighbours[i,j]]
G.add_weighted_edges_from(((i,j), tuple(k), weightfx((i,j),tuple(k))) for k in n)
t2 = time()
print((t2-t1))
return G
if __name__ == '__main__':
from pextant.settings import WP_HI, HI_DEM_LOWQUAL_PATH
from pextant.EnvironmentalModel import GDALMesh
from pextant.explorers import Astronaut
from pextant.mesh.MeshVisualizer import ExpandViz, MeshVizM
jloader = WP_HI[7]
waypoints = jloader.get_waypoints()
envelope = waypoints.geoEnvelope()#.addMargin(0.5, 30)
env_model = GDALMesh(HI_DEM_LOWQUAL_PATH).loadSubSection(envelope, maxSlope=35)
astronaut = Astronaut(80)
solver = astarSolver(env_model, astronaut, ExpandViz(env_model, 10000))
segmentsout, rawpoints, items = solver.solvemultipoint(waypoints)
jsonout = jloader.add_search_sol(segmentsout, True)
matviz = MeshVizM()
solgrid = np.zeros((env_model.y_size, env_model.x_size))
for i in rawpoints:
solgrid[i] = 1
matviz.viz(solgrid)
| [
"pextant.EnvironmentalModel.GDALMesh",
"numpy.ones_like",
"numpy.sqrt",
"numpy.roll",
"pextant.mesh.MeshVisualizer.ExpandViz",
"pextant.solvers.nxastar.GG",
"networkx.DiGraph",
"pextant_cpp.PathFinder",
"pextant.explorers.Astronaut",
"numpy.array",
"numpy.zeros",
"numpy.dot",
"numpy.empty",
... | [((15181, 15187), 'time.time', 'time', ([], {}), '()\n', (15185, 15187), False, 'from time import time\n'), ((15196, 15208), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (15206, 15208), True, 'import networkx as nx\n'), ((15675, 15681), 'time.time', 'time', ([], {}), '()\n', (15679, 15681), False, 'from time import time\n'), ((16185, 16198), 'pextant.explorers.Astronaut', 'Astronaut', (['(80)'], {}), '(80)\n', (16194, 16198), False, 'from pextant.explorers import Astronaut\n'), ((16416, 16426), 'pextant.mesh.MeshVisualizer.MeshVizM', 'MeshVizM', ([], {}), '()\n', (16424, 16426), False, 'from pextant.mesh.MeshVisualizer import ExpandViz, MeshVizM\n'), ((16441, 16487), 'numpy.zeros', 'np.zeros', (['(env_model.y_size, env_model.x_size)'], {}), '((env_model.y_size, env_model.x_size))\n', (16449, 16487), True, 'import numpy as np\n'), ((3289, 3343), 'numpy.empty', 'np.empty', (['(dem.shape[0], dem.shape[1], neighbour_size)'], {}), '((dem.shape[0], dem.shape[1], neighbour_size))\n', (3297, 3343), True, 'import numpy as np\n'), ((3366, 3420), 'numpy.empty', 'np.empty', (['(dem.shape[0], dem.shape[1], neighbour_size)'], {}), '((dem.shape[0], dem.shape[1], neighbour_size))\n', (3374, 3420), True, 'import numpy as np\n'), ((3441, 3495), 'numpy.empty', 'np.empty', (['(dem.shape[0], dem.shape[1], neighbour_size)'], {}), '((dem.shape[0], dem.shape[1], neighbour_size))\n', (3449, 3495), True, 'import numpy as np\n'), ((3516, 3570), 'numpy.empty', 'np.empty', (['(dem.shape[0], dem.shape[1], neighbour_size)'], {}), '((dem.shape[0], dem.shape[1], neighbour_size))\n', (3524, 3570), True, 'import numpy as np\n'), ((5029, 5071), 'numpy.array', 'np.array', (['[1, max_velocity, energy_weight]'], {}), '([1, max_velocity, energy_weight])\n', (5037, 5071), True, 'import numpy as np\n'), ((6940, 6979), 'numpy.array', 'np.array', (['[1, max_velocity, min_energy]'], {}), '([1, max_velocity, min_energy])\n', (6948, 6979), True, 'import numpy as np\n'), ((7008, 7050), 'numpy.dot', 'np.dot', (['admissible_values', 'optimize_vector'], {}), '(admissible_values, optimize_vector)\n', (7014, 7050), True, 'import numpy as np\n'), ((9209, 9253), 'numpy.array', 'np.array', (['[path_lengths, times, energy_cost]'], {}), '([path_lengths, times, energy_cost])\n', (9217, 9253), True, 'import numpy as np\n'), ((14901, 15005), 'numpy.array', 'np.array', (["[costs['path'][a][selection], costs['time'][a][selection], costs['energy'][\n a][selection]]"], {}), "([costs['path'][a][selection], costs['time'][a][selection], costs[\n 'energy'][a][selection]])\n", (14909, 15005), True, 'import numpy as np\n'), ((16247, 16274), 'pextant.mesh.MeshVisualizer.ExpandViz', 'ExpandViz', (['env_model', '(10000)'], {}), '(env_model, 10000)\n', (16256, 16274), False, 'from pextant.mesh.MeshVisualizer import ExpandViz, MeshVizM\n'), ((2961, 3008), 'numpy.apply_along_axis', 'np.apply_along_axis', (['np.linalg.norm', '(1)', 'offsets'], {}), '(np.linalg.norm, 1, offsets)\n', (2980, 3008), True, 'import numpy as np\n'), ((5210, 5251), 'numpy.dot', 'np.dot', (['optimize_values', 'optimize_weights'], {}), '(optimize_values, optimize_weights)\n', (5216, 5251), True, 'import numpy as np\n'), ((7641, 7766), 'numpy.array', 'np.array', (["[costs['path'][row, col][selection], costs['time'][row, col][selection],\n costs['energy'][row, col][selection]]"], {}), "([costs['path'][row, col][selection], costs['time'][row, col][\n selection], costs['energy'][row, col][selection]])\n", (7649, 7766), True, 'import numpy as np\n'), ((10229, 10237), 'pextant.solvers.nxastar.GG', 'GG', (['self'], {}), '(self)\n', (10231, 10237), False, 'from pextant.solvers.nxastar import GG, astar_path\n'), ((10403, 10427), 'pextant_cpp.PathFinder', 'pextant_cpp.PathFinder', ([], {}), '()\n', (10425, 10427), False, 'import pextant_cpp\n'), ((15362, 15368), 'time.time', 'time', ([], {}), '()\n', (15366, 15368), False, 'from time import time\n'), ((16101, 16130), 'pextant.EnvironmentalModel.GDALMesh', 'GDALMesh', (['HI_DEM_LOWQUAL_PATH'], {}), '(HI_DEM_LOWQUAL_PATH)\n', (16109, 16130), False, 'from pextant.EnvironmentalModel import GDALMesh\n'), ((4291, 4306), 'numpy.ones_like', 'np.ones_like', (['z'], {}), '(z)\n', (4303, 4306), True, 'import numpy as np\n'), ((6433, 6443), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (6440, 6443), True, 'import numpy as np\n'), ((12841, 12849), 'pextant.solvers.nxastar.GG', 'GG', (['self'], {}), '(self)\n', (12843, 12849), False, 'from pextant.solvers.nxastar import GG, astar_path\n'), ((15501, 15517), 'numpy.array', 'np.array', (['(i, j)'], {}), '((i, j))\n', (15509, 15517), True, 'import numpy as np\n'), ((4261, 4290), 'numpy.cos', 'np.cos', (['slopes_rad[:, :, idx]'], {}), '(slopes_rad[:, :, idx])\n', (4267, 4290), True, 'import numpy as np\n'), ((3832, 3862), 'numpy.roll', 'np.roll', (['z', '(-offset[0])'], {'axis': '(0)'}), '(z, -offset[0], axis=0)\n', (3839, 3862), True, 'import numpy as np\n'), ((14683, 14694), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (14691, 14694), True, 'import numpy as np\n'), ((12233, 12246), 'numpy.array', 'np.array', (['raw'], {}), '(raw)\n', (12241, 12246), True, 'import numpy as np\n'), ((13132, 13145), 'numpy.array', 'np.array', (['raw'], {}), '(raw)\n', (13140, 13145), True, 'import numpy as np\n'), ((14405, 14418), 'numpy.array', 'np.array', (['raw'], {}), '(raw)\n', (14413, 14418), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.